├── .changeset ├── README.md └── config.json ├── .devcontainer └── devcontainer.json ├── .editorconfig ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md ├── pull_request_template.md └── workflows │ ├── ci.yml │ ├── dev-packages.yml │ └── release.yml ├── .gitignore ├── .gitmodules ├── .vscode ├── extensions.json └── settings.json ├── .yarn ├── releases │ └── yarn-4.0.2.cjs └── sdks │ ├── integrations.yml │ └── typescript │ ├── bin │ ├── tsc │ └── tsserver │ ├── lib │ ├── tsc.js │ ├── tsserver.js │ ├── tsserverlibrary.js │ └── typescript.js │ └── package.json ├── .yarnrc.yml ├── ACKNOWLEDGMENTS.md ├── CHANGELOG.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── demo ├── SharedService-sw │ ├── SharedService.js │ ├── SharedService_ServiceWorker.js │ ├── index.html │ └── index.js ├── SharedService │ ├── SharedService.js │ ├── SharedService_SharedWorker.js │ ├── index.html │ └── index.js ├── benchmarks │ ├── benchmark1.sql │ ├── benchmark10.sql │ ├── benchmark11.sql │ ├── benchmark12.sql │ ├── benchmark13.sql │ ├── benchmark14.sql │ ├── benchmark15.sql │ ├── benchmark16.sql │ ├── benchmark2.sql │ ├── benchmark3.sql │ ├── benchmark4.sql │ ├── benchmark5.sql │ ├── benchmark6.sql │ ├── benchmark7.sql │ ├── benchmark8.sql │ ├── benchmark9.sql │ ├── benchmarks.html │ ├── benchmarks.js │ └── index.html ├── contention │ ├── contention-worker.js │ ├── contention.html │ ├── contention.js │ └── index.html ├── demo-worker.js ├── demo.html ├── demo.js ├── file │ ├── index.html │ ├── index.js │ ├── service-worker.js │ └── verifier.js ├── hello │ ├── README.md │ ├── hello.html │ ├── hello.js │ └── index.html └── write-hint │ ├── index.html │ ├── index.js │ └── worker.js ├── dist ├── mc-wa-sqlite-async.mjs ├── mc-wa-sqlite-async.wasm ├── mc-wa-sqlite-jspi.mjs ├── mc-wa-sqlite-jspi.wasm ├── mc-wa-sqlite.mjs ├── mc-wa-sqlite.wasm ├── wa-sqlite-async-dynamic-main.mjs ├── wa-sqlite-async-dynamic-main.wasm ├── wa-sqlite-async.mjs ├── wa-sqlite-async.wasm ├── wa-sqlite-dynamic-main.mjs ├── wa-sqlite-dynamic-main.wasm ├── wa-sqlite-jspi.mjs ├── wa-sqlite-jspi.wasm ├── wa-sqlite.mjs └── wa-sqlite.wasm ├── docs ├── .nojekyll ├── assets │ ├── highlight.css │ ├── main.js │ ├── navigation.js │ ├── search.js │ └── style.css ├── index.html ├── interfaces │ ├── SQLiteAPI.html │ ├── SQLitePrepareOptions.html │ └── SQLiteVFS.html └── types │ └── SQLiteCompatibleType.html ├── jsconfig.json ├── multiple-ciphers └── mc_exported_functions.json ├── package.json ├── powersync-static └── powersync_exported_functions.json ├── powersync-version ├── scripts ├── docker-setup.sh ├── download-core-build.js ├── download-dynamic-core.js └── tools │ └── powersync-download.js ├── src ├── FacadeVFS.js ├── VFS.js ├── WebLocksMixin.js ├── asyncify_imports.json ├── examples │ ├── AccessHandlePoolVFS.js │ ├── IDBBatchAtomicVFS.js │ ├── IDBMirrorVFS.js │ ├── MemoryAsyncVFS.js │ ├── MemoryVFS.js │ ├── OPFSAdaptiveVFS.js │ ├── OPFSAnyContextVFS.js │ ├── OPFSCoopSyncVFS.js │ ├── OPFSPermutedVFS.js │ ├── README.md │ └── tag.js ├── exported_functions.json ├── extra_exported_runtime_methods.json ├── jspi_exports.json ├── libadapters.h ├── libadapters.js ├── libauthorizer.c ├── libauthorizer.js ├── libfunction.c ├── libfunction.js ├── libhook.c ├── libhook.js ├── libprogress.c ├── libprogress.js ├── libvfs.c ├── libvfs.js ├── main.c ├── sqlite-api.js ├── sqlite-constants.js └── types │ ├── globals.d.ts │ ├── index.d.ts │ └── tsconfig.json ├── test ├── AccessHandlePoolVFS.test.js ├── IDBBatchAtomicVFS.test.js ├── IDBMirrorVFS.test.js ├── MemoryAsyncVFS.test.js ├── MemoryVFS.test.js ├── OPFSAdaptiveVFS.test.js ├── OPFSAnyContextVFS.test.js ├── OPFSCoopSyncVFS.test.js ├── OPFSPermutedVFS.test.js ├── TestContext.js ├── WebLocksMixin.test.js ├── api.test.js ├── api_exec.js ├── api_misc.js ├── api_statements.js ├── callbacks.test.js ├── data │ └── idbv5.json ├── sql.test.js ├── sql_0001.js ├── sql_0002.js ├── sql_0003.js ├── sql_0004.js ├── sql_0005.js ├── test-worker.js ├── vfs_xAccess.js ├── vfs_xClose.js ├── vfs_xOpen.js ├── vfs_xRead.js └── vfs_xWrite.js ├── typedoc.json ├── web-test-runner.config.mjs └── yarn.lock /.changeset/README.md: -------------------------------------------------------------------------------- 1 | # Changesets 2 | 3 | Hello and welcome! This folder has been automatically generated by `@changesets/cli`, a build tool that works 4 | with multi-package repos, or single-package repos to help you version and publish your code. You can 5 | find the full documentation for it [in our repository](https://github.com/changesets/changesets) 6 | 7 | We have a quick list of common questions to get you started engaging with this project in 8 | [our documentation](https://github.com/changesets/changesets/blob/main/docs/common-questions.md) 9 | -------------------------------------------------------------------------------- /.changeset/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://unpkg.com/@changesets/config@2.3.1/schema.json", 3 | "changelog": "@changesets/cli/changelog", 4 | "commit": false, 5 | "fixed": [], 6 | "linked": [], 7 | "access": "restricted", 8 | "baseBranch": "master", 9 | "updateInternalDependencies": "patch", 10 | "ignore": [] 11 | } 12 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the 2 | // README at: https://github.com/devcontainers/templates/tree/main/src/debian 3 | { 4 | "name": "Debian", 5 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 6 | "build": { 7 | "dockerfile": "../Dockerfile" 8 | }, 9 | "features": { 10 | "ghcr.io/ebaskoro/devcontainer-features/emscripten:1": {} 11 | }, 12 | 13 | // Features to add to the dev container. More info: https://containers.dev/features. 14 | // "features": {}, 15 | 16 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 17 | "forwardPorts": [8000], 18 | 19 | // Configure tool-specific properties. 20 | // "customizations": {}, 21 | 22 | // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. 23 | "remoteUser": "root" 24 | } 25 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | 7 | [*.{js,json,.yml}] 8 | charset = utf-8 9 | indent_style = space 10 | indent_size = 2 11 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | /.yarn/** linguist-vendored 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Report potential problems in project code 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | In this project, Issues are only for possible bugs in project code. Please don't ask for help debugging _your_ code; I have plenty of challenges debugging _my_ code. 🥲 11 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ### Checklist 2 | - [ ] I grant to recipients of this Project distribution a perpetual, 3 | non-exclusive, royalty-free, irrevocable copyright license to reproduce, prepare 4 | derivative works of, publicly display, sublicense, and distribute this 5 | Contribution and such derivative works. 6 | - [ ] I certify that I am legally entitled to grant this license, and that this 7 | Contribution contains no content requiring a license from any third party. 8 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # This workflow will do a clean installation of node dependencies, cache/restore them, build the source code and run tests across different versions of node 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-nodejs 3 | 4 | name: wa-sqlite CI 5 | 6 | on: 7 | push: 8 | branches: ['master'] 9 | pull_request: 10 | branches: ['master'] 11 | 12 | env: 13 | EM_VERSION: 3.1.64 14 | EM_CACHE_FOLDER: 'emsdk-cache' 15 | 16 | jobs: 17 | build: 18 | runs-on: ubuntu-latest 19 | 20 | strategy: 21 | matrix: 22 | node-version: [20.x] 23 | 24 | steps: 25 | - uses: actions/checkout@v4 26 | - name: Use Node.js ${{ matrix.node-version }} 27 | uses: actions/setup-node@v4 28 | with: 29 | node-version: ${{ matrix.node-version }} 30 | cache: 'npm' 31 | 32 | - uses: browser-actions/setup-chrome@v1 33 | id: setup-chrome 34 | with: 35 | chrome-version: 129 36 | - run: | 37 | ${{ steps.setup-chrome.outputs.chrome-path }} --version 38 | 39 | # Install yarn dependencies. 40 | - name: Get yarn cache directory path 41 | id: yarn-cache-dir-path 42 | run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT 43 | - uses: actions/cache@v4 44 | id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`) 45 | with: 46 | path: ${{ steps.yarn-cache-dir-path.outputs.dir }} 47 | key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }} 48 | restore-keys: | 49 | ${{ runner.os }}-yarn- 50 | - run: yarn install 51 | - name: Test with checked-in WASM files 52 | run: yarn test 53 | 54 | # Install EMSDK 55 | - name: Setup Emscripten 56 | id: cache-system-libraries 57 | uses: actions/cache@v4 58 | with: 59 | path: ${{env.EM_CACHE_FOLDER}} 60 | key: ${{env.EM_VERSION}}-${{ runner.os }} 61 | - uses: mymindstorm/setup-emsdk@v14 62 | with: 63 | version: ${{env.EM_VERSION}} 64 | actions-cache-folder: ${{env.EM_CACHE_FOLDER}} 65 | - run: emcc -v 66 | 67 | # For some reason the Makefile fails to execute this with a "Permission denied" 68 | # error on Github actions. Doing this here prevents the error. 69 | - name: Download PowerSync Core 70 | run: node scripts/download-core-build.js 71 | 72 | - name: Test WASM build 73 | run: | 74 | make clean && make 75 | yarn test 76 | -------------------------------------------------------------------------------- /.github/workflows/dev-packages.yml: -------------------------------------------------------------------------------- 1 | # Action to publish packages under the `next` tag for testing 2 | # Packages are versioned as `0.0.0-{tag}-DATETIMESTAMP` 3 | name: Packages Deploy 4 | 5 | on: workflow_dispatch 6 | 7 | jobs: 8 | publish: 9 | name: Publish Dev Packages 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v3 13 | 14 | - name: Setup NodeJS 15 | uses: actions/setup-node@v2 16 | with: 17 | node-version: 20 18 | 19 | - name: Setup Yarn 20 | run: | 21 | npm install -g yarn 22 | echo "Yarn version: $(yarn -v)" 23 | echo "//registry.npmjs.org/:_authToken=${{secrets.NPM_TOKEN}}" >> ~/.npmrc 24 | 25 | - name: Install Dependencies 26 | run: yarn install --frozen-lockfile 27 | 28 | - name: Version packages 29 | run: yarn changeset version --no-git-tag --snapshot dev 30 | 31 | - name: Publish 32 | run: yarn changeset publish --tag dev 33 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | concurrency: ${{ github.workflow }}-${{ github.ref }} 9 | 10 | jobs: 11 | release: 12 | name: Release 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout Repo 16 | uses: actions/checkout@v3 17 | 18 | - name: Setup Node.js 16 19 | uses: actions/setup-node@v3 20 | with: 21 | node-version: 20 22 | 23 | - name: Install Dependencies 24 | run: yarn 25 | 26 | - name: Create Release Pull Request or Publish to npm 27 | id: changesets 28 | uses: changesets/action@v1 29 | with: 30 | # This expects you to have a script called release which does a build for your packages and calls changeset publish 31 | publish: yarn release 32 | version: yarn changeset version 33 | env: 34 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 35 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }} 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | 3 | # https://yarnpkg.com/getting-started/qa/#which-files-should-be-gitignored 4 | .yarn/* 5 | !.yarn/patches 6 | !.yarn/releases 7 | !.yarn/plugins 8 | !.yarn/sdks 9 | !.yarn/versions 10 | .pnp.* 11 | 12 | # PowerSync Rust Core 13 | libpowersync*.wasm 14 | powersync-libs 15 | 16 | /cache 17 | /debug 18 | /deps 19 | /tmp 20 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/powersync-ja/wa-sqlite/e87e9e94389cafd633c45a21aa130739358525b8/.gitmodules -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "arcanis.vscode-zipfs" 4 | ] 5 | } 6 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "search.exclude": { 3 | "**/.yarn": true, 4 | "**/.pnp.*": true 5 | }, 6 | "typescript.tsdk": ".yarn/sdks/typescript/lib", 7 | "typescript.enablePromptUseWorkspaceTsdk": true 8 | } 9 | -------------------------------------------------------------------------------- /.yarn/sdks/integrations.yml: -------------------------------------------------------------------------------- 1 | # This file is automatically generated by @yarnpkg/sdks. 2 | # Manual changes might be lost! 3 | 4 | integrations: 5 | - vscode 6 | -------------------------------------------------------------------------------- /.yarn/sdks/typescript/bin/tsc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | const {existsSync} = require(`fs`); 4 | const {createRequire} = require(`module`); 5 | const {resolve} = require(`path`); 6 | 7 | const relPnpApiPath = "../../../../.pnp.cjs"; 8 | 9 | const absPnpApiPath = resolve(__dirname, relPnpApiPath); 10 | const absRequire = createRequire(absPnpApiPath); 11 | 12 | if (existsSync(absPnpApiPath)) { 13 | if (!process.versions.pnp) { 14 | // Setup the environment to be able to require typescript/bin/tsc 15 | require(absPnpApiPath).setup(); 16 | } 17 | } 18 | 19 | // Defer to the real typescript/bin/tsc your application uses 20 | module.exports = absRequire(`typescript/bin/tsc`); 21 | -------------------------------------------------------------------------------- /.yarn/sdks/typescript/bin/tsserver: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | const {existsSync} = require(`fs`); 4 | const {createRequire} = require(`module`); 5 | const {resolve} = require(`path`); 6 | 7 | const relPnpApiPath = "../../../../.pnp.cjs"; 8 | 9 | const absPnpApiPath = resolve(__dirname, relPnpApiPath); 10 | const absRequire = createRequire(absPnpApiPath); 11 | 12 | if (existsSync(absPnpApiPath)) { 13 | if (!process.versions.pnp) { 14 | // Setup the environment to be able to require typescript/bin/tsserver 15 | require(absPnpApiPath).setup(); 16 | } 17 | } 18 | 19 | // Defer to the real typescript/bin/tsserver your application uses 20 | module.exports = absRequire(`typescript/bin/tsserver`); 21 | -------------------------------------------------------------------------------- /.yarn/sdks/typescript/lib/tsc.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | const {existsSync} = require(`fs`); 4 | const {createRequire} = require(`module`); 5 | const {resolve} = require(`path`); 6 | 7 | const relPnpApiPath = "../../../../.pnp.cjs"; 8 | 9 | const absPnpApiPath = resolve(__dirname, relPnpApiPath); 10 | const absRequire = createRequire(absPnpApiPath); 11 | 12 | if (existsSync(absPnpApiPath)) { 13 | if (!process.versions.pnp) { 14 | // Setup the environment to be able to require typescript/lib/tsc.js 15 | require(absPnpApiPath).setup(); 16 | } 17 | } 18 | 19 | // Defer to the real typescript/lib/tsc.js your application uses 20 | module.exports = absRequire(`typescript/lib/tsc.js`); 21 | -------------------------------------------------------------------------------- /.yarn/sdks/typescript/lib/typescript.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | const {existsSync} = require(`fs`); 4 | const {createRequire} = require(`module`); 5 | const {resolve} = require(`path`); 6 | 7 | const relPnpApiPath = "../../../../.pnp.cjs"; 8 | 9 | const absPnpApiPath = resolve(__dirname, relPnpApiPath); 10 | const absRequire = createRequire(absPnpApiPath); 11 | 12 | if (existsSync(absPnpApiPath)) { 13 | if (!process.versions.pnp) { 14 | // Setup the environment to be able to require typescript 15 | require(absPnpApiPath).setup(); 16 | } 17 | } 18 | 19 | // Defer to the real typescript your application uses 20 | module.exports = absRequire(`typescript`); 21 | -------------------------------------------------------------------------------- /.yarn/sdks/typescript/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "typescript", 3 | "version": "5.3.3-sdk", 4 | "main": "./lib/typescript.js", 5 | "type": "commonjs", 6 | "bin": { 7 | "tsc": "./bin/tsc", 8 | "tsserver": "./bin/tsserver" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /.yarnrc.yml: -------------------------------------------------------------------------------- 1 | compressionLevel: mixed 2 | 3 | enableGlobalCache: false 4 | 5 | yarnPath: .yarn/releases/yarn-4.0.2.cjs 6 | -------------------------------------------------------------------------------- /ACKNOWLEDGMENTS.md: -------------------------------------------------------------------------------- 1 | # Acknowledgments 2 | This project does not use code from [SQL.js](https://sql.js.org/#/), but is inspired and influenced by it. 3 | 4 | This project uses or derives material from the following sources. 5 | 6 | ## [sqlite-wasm](https://github.com/mandel59/sqlite-wasm) 7 | Copyright 2017 Ryusei Yamaguchi 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a copy of 10 | this software and associated documentation files (the "Software"), to deal in 11 | the Software without restriction, including without limitation the rights to 12 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 13 | the Software, and to permit persons to whom the Software is furnished to do so, 14 | subject to the following conditions: 15 | 16 | The above copyright notice and this permission notice shall be included in all 17 | copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 21 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 22 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 23 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 24 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 | 26 | ## [monaco-editor](https://github.com/microsoft/monaco-editor) 27 | Copyright (c) 2016 - present Microsoft Corporation 28 | 29 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 30 | 31 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 32 | 33 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 34 | 35 | ## [github-corners](https://github.com/tholman/github-corners) 36 | Copyright (c) 2016 Tim Holman 37 | 38 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 39 | 40 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 41 | 42 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 43 | 44 | ## Pull Request submitters 45 | [//]: # "Use this format (replace 'name' and 'username'): * [name](your URL choice) - [PRs](https://github.com/rhashimoto/wa-sqlite/pulls?q=is%3Apr+author%3Ausername)" 46 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # @journeyapps/wa-sqlite 2 | 3 | ## 1.2.4 4 | 5 | ### Patch Changes 6 | 7 | - 54ad303: Fix issue where OPFS VFS could freeze in infinite loop 8 | 9 | ## 1.2.3 10 | 11 | ### Patch Changes 12 | 13 | - b1fa32b: Update PowerSync core extension to version 0.3.14 14 | 15 | ## 1.2.2 16 | 17 | ### Patch Changes 18 | 19 | - 36d6cd5: Update core PowerSync extension to 0.3.12 20 | 21 | ## 1.2.1 22 | 23 | ### Patch Changes 24 | 25 | - 3980a48: Reflect correct feature status in Readme 26 | - 5b49db8: Update core extension to 0.3.11, supporting bucket priorities. 27 | 28 | ## 1.2.0 29 | 30 | ### Minor Changes 31 | 32 | - 9d9e03c: powersync-sqlite-core 0.3.8: Support 1999 columns per table 33 | 34 | ## 1.1.1 35 | 36 | ### Patch Changes 37 | 38 | - a2693c6: powersync-sqlite-core 0.3.7; sqlite 3.47.2 39 | 40 | ## 1.1.0 41 | 42 | ### Minor Changes 43 | 44 | - 7536ead: Enable database encryption with multiple ciphers 45 | 46 | ## 1.0.0 47 | 48 | ### Major Changes 49 | 50 | - 187d9cd: Updated from upstream to v1.0.4 51 | 52 | ### Minor Changes 53 | 54 | - f9f5c84: Updated PowerSync core build steps by removing Git submodules. Added ability to load PowerSync extension as a dynamic side module. 55 | 56 | ## 0.4.2 57 | 58 | ### Patch Changes 59 | 60 | - f23bd69: Use powersync-sqlite-core 0.3.6 to fix dangling rows 61 | 62 | ## 0.4.1 63 | 64 | ### Patch Changes 65 | 66 | - 588937a: Fix bug where table change update notifications would access invalid memory locations under certain conditions. 67 | 68 | ## 0.4.0 69 | 70 | ### Minor Changes 71 | 72 | - 95b8ba1: powersync-sqlite-core 0.3.0 73 | 74 | ## 0.3.0 75 | 76 | ### Minor Changes 77 | 78 | - b0124d4: Update powersync-sqlite-core to v0.2.0 79 | - b0124d4: Rename back to @journeyapps/wa-sqlite 80 | 81 | ### Patch Changes 82 | 83 | - b0124d4: Fix some type declarations 84 | 85 | ## 0.2.1 86 | 87 | ### Patch Changes 88 | 89 | - a220f80: Rename package to @powersync/wa-sqlite 90 | 91 | ## For the following previous versions refer to `@journeyapps/wa-sqlite` 92 | 93 | ## 0.2.0 94 | 95 | ### Minor Changes 96 | 97 | - 0077436: Added full text search (FTS5) support out of the box by default. There should be no configuration required. 98 | Updated from upstream changes 99 | 100 | ## 0.1.1 101 | 102 | ### Patch Changes 103 | 104 | - fabeb50: Updated from upstream changes 105 | - fabeb50: Updated powersync-sqlite-core to v0.1.6 106 | 107 | ## 0.1.0 108 | 109 | ### Minor Changes 110 | 111 | - aba7b5b: Beta release 112 | 113 | ## 0.0.2 114 | 115 | ### Patch Changes 116 | 117 | - 1cce650: Fixed Typescript declaration files. 118 | 119 | ## 0.0.1 120 | 121 | ### Patch Changes 122 | 123 | - 9bb4bb4: Initial loading of PowerSync Rust SQLite extension. 124 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/devcontainers/base:bullseye 2 | 3 | # Need Chrome: 4 | RUN sudo apt update 5 | RUN sudo apt install chromium clang default-jre -y 6 | 7 | 8 | COPY ./scripts/docker-setup.sh /tmp/setup.sh 9 | 10 | RUN sudo /bin/bash /tmp/setup.sh -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Roy T. Hashimoto 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![wa-sqlite CI](https://github.com/powersync-ja/wa-sqlite/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/powersync-ja/wa-sqlite/actions/workflows/ci.yml?branch=master) 2 | 3 | # wa-sqlite 4 | This is a WebAssembly build of SQLite with support for writing SQLite virtual filesystems completely in Javascript. This allows alternative browser storage options such as IndexedDB and Origin Private File System. Applications can opt to use either a synchronous or asynchronous (using Asyncify or JSPI) SQLite library build (an asynchronous build is required for asynchronous extensions). 5 | 6 | IndexedDB and several Origin Private File System virtual file systems are among the examples provided as proof of concept. A table comparing the different VFS classes is [here](https://github.com/rhashimoto/wa-sqlite/tree/master/src/examples#vfs-comparison). 7 | 8 | [Try the demo](https://rhashimoto.github.io/wa-sqlite/demo/?build=asyncify&config=IDBBatchAtomicVFS&reset) or run [benchmarks](https://rhashimoto.github.io/wa-sqlite/demo/benchmarks/?config=asyncify,IDBBatchAtomicVFS;asyncify,IDBMirrorVFS;default,AccessHandlePoolVFS;default,OPFSCoopSyncVFS;asyncify,OPFSAdaptiveVFS;asyncify,OPFSPermutedVFS) with a modern desktop web browser. More information is available in the [FAQ](https://github.com/rhashimoto/wa-sqlite/issues?q=is%3Aissue+label%3Afaq+), [discussion forums](https://github.com/rhashimoto/wa-sqlite/discussions), and [API reference](https://rhashimoto.github.io/wa-sqlite/docs/). 9 | 10 | ## Build 11 | The primary motivation for this project is to enable additions to SQLite with only Javascript. Most developers should be able to use the pre-built artifacts in 12 | [./dist](https://github.com/powersync-ja/wa-sqlite/tree/master/dist). 13 | Note that earlier versions of the project only provided pre-built artifacts in the 14 | "buildless" branch; that branch will no longer be maintained. 15 | 16 | If you do want to build - e.g. you want to change build flags, use a specific EMSDK version, or modify wa-sqlite itself - here are the prerequisites: 17 | 18 | * Building on Debian Linux is known to work, compatibility with other platforms is unknown. 19 | * `yarn` - If you use a different package manager (e.g. `npm`) then file paths in the demo will need adjustment. 20 | * [Emscripten SDK](https://emscripten.org/docs/getting_started/downloads.html) 3.1.61+. 21 | * `curl`, `make`, `openssl`, `sed`, `tclsh`, `unzip` 22 | 23 | Here are the build steps: 24 | * Make sure `emcc` works. 25 | * `git clone git@github.com:rhashimoto/wa-sqlite.git` 26 | * `cd wa-sqlite` 27 | * `yarn install` 28 | * `make` 29 | 30 | The default build produces ES6 modules + WASM, [synchronous and asynchronous](https://github.com/rhashimoto/wa-sqlite/issues/7) (using Asyncify and JSPI) in `dist/`. 31 | 32 | ## PowerSync instructions 33 | 34 | Note as per above that this is known to compile under Debian. 35 | 36 | Make sure to have OpenSSL@3 installed on your system. 37 | 38 | Development has been done using VSCode's [development container](https://code.visualstudio.com/docs/devcontainers/containers) feature. 39 | 40 | ```bash 41 | git clone [this repo] 42 | ``` 43 | 44 | ```bash 45 | make -B 46 | ``` 47 | 48 | ### PowerSync Core 49 | 50 | The PowerSync Rust SQLite extension is used both statically and dynamically in this package. The static core binary is downloaded in the `Makefile` during compilation. The dynamic WASM files are not commited to this repository's `dist` folder, instead the WASM files are downloaded as a `postinstall` script. 51 | 52 | Update the `powersync-version` file in order to update the PowerSync core version. 53 | 54 | ### Usage with PowerSync 55 | 56 | This fork of WA-SQLite can be used with PowerSync either statically or dynamically. See the [demo worker](./demo/demo-worker.js) for usage. 57 | 58 | ## API 59 | Javascript wrappers for core SQLITE C API functions (and some others) are provided. Some convenience functions are also provided to reduce boilerplate. Here is sample code to load the library and call the API: 60 | 61 | ```javascript 62 | import SQLiteESMFactory from '@journeyapps/wa-sqlite/dist/wa-sqlite.mjs'; 63 | import * as SQLite from '@journeyapps/wa-sqlite'; 64 | 65 | async function hello() { 66 | const module = await SQLiteESMFactory(); 67 | const sqlite3 = SQLite.Factory(module); 68 | const db = await sqlite3.open_v2('myDB'); 69 | await sqlite3.exec(db, `SELECT 'Hello, world!'`, (row, columns) => { 70 | console.log(row); 71 | }); 72 | await sqlite3.close(db); 73 | } 74 | 75 | hello(); 76 | ``` 77 | 78 | There is a slightly more complicated example [here](https://github.com/rhashimoto/wa-sqlite/tree/master/demo/hello) that also shows how to use a virtual filesystem (VFS) for persistent storage. 79 | 80 | The [implementation of `sqlite3.exec`](https://github.com/rhashimoto/wa-sqlite/blob/eb6e62584b2864d5029f51c6afe155d71ba0caa8/src/sqlite-api.js#L409-L418) may be of interest to anyone wanting more fine-grained use of SQLite statement objects (e.g. for binding parameters, explicit column datatypes, etc.). 81 | 82 | [API reference](https://rhashimoto.github.io/wa-sqlite/docs/) 83 | 84 | ## Demo 85 | To serve the demo directly from the source tree: 86 | * `yarn start` 87 | * Open a browser on http://localhost:8000/demo/?build=asyncify&config=IDBBatchAtomicVFS&reset 88 | 89 | The demo page provides access to databases on multiple VFS implementations. Query parameters on the demo page URL can be used to specify the configuration and initial state: 90 | 91 | | Parameter | Purpose | Values | Default | 92 | |----|----|----|----| 93 | | build | Emscripten build type | default, asyncify, jspi | default | 94 | | config | select VFS | MemoryVFS, MemoryAsyncVFS, IDBBatchAtomicVFS, IDBMirrorVFS, AccessHandlePoolVFS, OPFSAdaptiveVFS, OPFSAnyContextVFS, OPFSCoopSyncVFS, OPFSPermutedVFS | uses SQLite internal memory | 95 | | reset | clear persistent storage | | | 96 | 97 | For convenience, if any text region is selected in the editor, only that region will be executed. In addition, the editor contents are restored across page reloads using browser localStorage. 98 | 99 | ## License 100 | MIT License as of February 10, 2023, changed by generous sponsors 101 | [Fleet Device Management](https://fleetdm.com/) and [Reflect](https://reflect.app/). 102 | Existing licensees may continue under the GPLv3 or switch to the new license. 103 | -------------------------------------------------------------------------------- /demo/SharedService-sw/SharedService_ServiceWorker.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | // Install the service worker as soon as possible. 4 | globalThis.addEventListener('install', (/** @type {ExtendableEvent} */ event) => { 5 | event.waitUntil(globalThis.skipWaiting()); 6 | }); 7 | globalThis.addEventListener('activate', (/** @type {ExtendableEvent} */ event) => { 8 | event.waitUntil(globalThis.clients.claim()); 9 | }); 10 | 11 | // Forward messages (and ports) from client to client. 12 | globalThis.addEventListener('message', async event => { 13 | if (event.data?.sharedService) { 14 | const client = await globalThis.clients.get(event.data.clientId); 15 | client.postMessage(event.data, event.ports); 16 | } 17 | }); 18 | 19 | // Tell clients their clientId. A service worker isn't actually needed 20 | // for a context to get its clientId, but this also doubles as a way 21 | // to verify that the service worker is active. 22 | globalThis.addEventListener('fetch', async (/** @type {FetchEvent} */ event) => { 23 | if (event.request.url === globalThis.registration.scope + 'clientId') { 24 | return event.respondWith(new Response(event.clientId, { 25 | headers: { "Content-Type": "text/plain" } 26 | })); 27 | } 28 | }); -------------------------------------------------------------------------------- /demo/SharedService-sw/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | SharedService test 6 | 7 | 8 | 9 | 10 | 11 | 12 |
13 | 14 | 15 | -------------------------------------------------------------------------------- /demo/SharedService-sw/index.js: -------------------------------------------------------------------------------- 1 | import { SharedService, createSharedServicePort } from "./SharedService.js"; 2 | 3 | // This is a sample service. Only methods with structured cloneable 4 | // arguments and results can be called by proxy. 5 | const target = { 6 | async add(x, y) { 7 | log(`evaluating ${x} + ${y}`); 8 | return x + y; 9 | }, 10 | 11 | multiply(x, y) { 12 | log(`evaluating ${x} * ${y}`); 13 | return x * y; 14 | }, 15 | 16 | async slow_subtract(x, y) { 17 | log(`evaluating ${x} - ${y} with 5s delay`); 18 | await new Promise(resolve => setTimeout(resolve, 5000)); 19 | return x - y; 20 | }, 21 | 22 | throw_error(x, y) { 23 | log('throwing Error'); 24 | throw new Error('test error'); 25 | } 26 | }; 27 | 28 | // This function is called when this instance is designated as the 29 | // service provider. The port is created locally here but it could 30 | // come from a different context, e.g. a Worker. 31 | function portProvider() { 32 | log('appointed provider'); 33 | return createSharedServicePort(target); 34 | } 35 | 36 | // Load the service worker. 37 | navigator.serviceWorker.register('SharedService_ServiceWorker.js'); 38 | 39 | // Create the shared service. 40 | log('start'); 41 | const sharedService = new SharedService('test-sw', portProvider); 42 | sharedService.activate(); 43 | 44 | for (const button of Array.from(document.getElementsByTagName('button'))) { 45 | button.addEventListener('click', async () => { 46 | // Call the service. 47 | const op = button.getAttribute('data-op'); 48 | const x = Math.trunc(Math.random() * 100); 49 | const y = Math.trunc(Math.random() * 100); 50 | log(`requesting ${op}(${x}, ${y})`); 51 | try { 52 | const result = await sharedService.proxy[op](x, y); 53 | log(`result ${result}`); 54 | } catch (e) { 55 | const text = e.stack.includes(e.message) ? e.stack : `${e.message}\n${e.stack}`; 56 | log(text); 57 | } 58 | }); 59 | } 60 | 61 | function log(s) { 62 | const TIME_FORMAT = { 63 | hour12: false, 64 | hour: '2-digit', 65 | minute: '2-digit', 66 | second: '2-digit', 67 | fractionalSecondDigits: 3 68 | }; 69 | // @ts-ignore 70 | const timestamp = new Date().toLocaleTimeString(undefined, TIME_FORMAT); 71 | document.getElementById('output').textContent += `${timestamp} ${s}\n`; 72 | } -------------------------------------------------------------------------------- /demo/SharedService/SharedService_SharedWorker.js: -------------------------------------------------------------------------------- 1 | /** @type {Map} */ const mapClientIdToPort = new Map(); 2 | 3 | globalThis.addEventListener('connect', event => { 4 | // The first message from a client associates the clientId with the port. 5 | const workerPort = event.ports[0]; 6 | workerPort.addEventListener('message', event => { 7 | mapClientIdToPort.set(event.data.clientId, workerPort); 8 | 9 | // Remove the entry when the client goes away, which we detect when 10 | // the lock on its name becomes available. 11 | navigator.locks.request(event.data.clientId, { mode: 'shared' }, () => { 12 | mapClientIdToPort.get(event.data.clientId)?.close(); 13 | mapClientIdToPort.delete(event.data.clientId); 14 | }); 15 | 16 | // Subsequent messages will be forwarded. 17 | workerPort.addEventListener('message', event => { 18 | const port = mapClientIdToPort.get(event.data.clientId); 19 | port.postMessage(event.data, event.ports); 20 | }); 21 | }, { once: true }); 22 | workerPort.start(); 23 | }); -------------------------------------------------------------------------------- /demo/SharedService/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | SharedService test 6 | 7 | 8 | 9 | 10 | 11 | 12 |
13 | 14 | 15 | -------------------------------------------------------------------------------- /demo/SharedService/index.js: -------------------------------------------------------------------------------- 1 | import { SharedService, createSharedServicePort } from "./SharedService.js"; 2 | 3 | // This is a sample service. Only methods with structured cloneable 4 | // arguments and results can be called by proxy. 5 | const target = { 6 | async add(x, y) { 7 | log(`evaluating ${x} + ${y}`); 8 | return x + y; 9 | }, 10 | 11 | multiply(x, y) { 12 | log(`evaluating ${x} * ${y}`); 13 | return x * y; 14 | }, 15 | 16 | async slow_subtract(x, y) { 17 | log(`evaluating ${x} - ${y} with 5s delay`); 18 | await new Promise(resolve => setTimeout(resolve, 5000)); 19 | return x - y; 20 | }, 21 | 22 | throw_error(x, y) { 23 | log('throwing Error'); 24 | throw new Error('test error'); 25 | } 26 | }; 27 | 28 | // This function is called when this instance is designated as the 29 | // service provider. The port is created locally here but it could 30 | // come from a different context, e.g. a Worker. 31 | function portProvider() { 32 | log('appointed provider'); 33 | return createSharedServicePort(target); 34 | } 35 | 36 | // Create the shared service. 37 | log('start'); 38 | const sharedService = new SharedService('test', portProvider); 39 | sharedService.activate(); 40 | 41 | for (const button of Array.from(document.getElementsByTagName('button'))) { 42 | button.addEventListener('click', async () => { 43 | // Call the service. 44 | const op = button.getAttribute('data-op'); 45 | const x = Math.trunc(Math.random() * 100); 46 | const y = Math.trunc(Math.random() * 100); 47 | log(`requesting ${op}(${x}, ${y})`); 48 | try { 49 | const result = await sharedService.proxy[op](x, y); 50 | log(`result ${result}`); 51 | } catch (e) { 52 | const text = e.stack.includes(e.message) ? e.stack : `${e.message}\n${e.stack}`; 53 | log(text); 54 | } 55 | }); 56 | } 57 | 58 | 59 | function log(s) { 60 | const TIME_FORMAT = { 61 | hour12: false, 62 | hour: '2-digit', 63 | minute: '2-digit', 64 | second: '2-digit', 65 | fractionalSecondDigits: 3 66 | }; 67 | // @ts-ignore 68 | const timestamp = new Date().toLocaleTimeString(undefined, TIME_FORMAT); 69 | document.getElementById('output').textContent += `${timestamp} ${s}\n`; 70 | } -------------------------------------------------------------------------------- /demo/benchmarks/benchmark11.sql: -------------------------------------------------------------------------------- 1 | BEGIN; 2 | INSERT INTO t1 SELECT b,a,c FROM t2; 3 | INSERT INTO t2 SELECT b,a,c FROM t1; 4 | COMMIT; 5 | -------------------------------------------------------------------------------- /demo/benchmarks/benchmark12.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM t2 WHERE c LIKE '%fifty%'; 2 | -------------------------------------------------------------------------------- /demo/benchmarks/benchmark13.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM t2 WHERE a>10 AND a<20000; 2 | -------------------------------------------------------------------------------- /demo/benchmarks/benchmark14.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO t2 SELECT * FROM t1; 2 | -------------------------------------------------------------------------------- /demo/benchmarks/benchmark16.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE t1; 2 | DROP TABLE t2; 3 | DROP TABLE t3; 4 | -------------------------------------------------------------------------------- /demo/benchmarks/benchmark4.sql: -------------------------------------------------------------------------------- 1 | BEGIN; 2 | SELECT count(*), avg(b) FROM t2 WHERE b>=0 AND b<1000; 3 | SELECT count(*), avg(b) FROM t2 WHERE b>=100 AND b<1100; 4 | SELECT count(*), avg(b) FROM t2 WHERE b>=200 AND b<1200; 5 | SELECT count(*), avg(b) FROM t2 WHERE b>=300 AND b<1300; 6 | SELECT count(*), avg(b) FROM t2 WHERE b>=400 AND b<1400; 7 | SELECT count(*), avg(b) FROM t2 WHERE b>=500 AND b<1500; 8 | SELECT count(*), avg(b) FROM t2 WHERE b>=600 AND b<1600; 9 | SELECT count(*), avg(b) FROM t2 WHERE b>=700 AND b<1700; 10 | SELECT count(*), avg(b) FROM t2 WHERE b>=800 AND b<1800; 11 | SELECT count(*), avg(b) FROM t2 WHERE b>=900 AND b<1900; 12 | SELECT count(*), avg(b) FROM t2 WHERE b>=1000 AND b<2000; 13 | SELECT count(*), avg(b) FROM t2 WHERE b>=1100 AND b<2100; 14 | SELECT count(*), avg(b) FROM t2 WHERE b>=1200 AND b<2200; 15 | SELECT count(*), avg(b) FROM t2 WHERE b>=1300 AND b<2300; 16 | SELECT count(*), avg(b) FROM t2 WHERE b>=1400 AND b<2400; 17 | SELECT count(*), avg(b) FROM t2 WHERE b>=1500 AND b<2500; 18 | SELECT count(*), avg(b) FROM t2 WHERE b>=1600 AND b<2600; 19 | SELECT count(*), avg(b) FROM t2 WHERE b>=1700 AND b<2700; 20 | SELECT count(*), avg(b) FROM t2 WHERE b>=1800 AND b<2800; 21 | SELECT count(*), avg(b) FROM t2 WHERE b>=1900 AND b<2900; 22 | SELECT count(*), avg(b) FROM t2 WHERE b>=2000 AND b<3000; 23 | SELECT count(*), avg(b) FROM t2 WHERE b>=2100 AND b<3100; 24 | SELECT count(*), avg(b) FROM t2 WHERE b>=2200 AND b<3200; 25 | SELECT count(*), avg(b) FROM t2 WHERE b>=2300 AND b<3300; 26 | SELECT count(*), avg(b) FROM t2 WHERE b>=2400 AND b<3400; 27 | SELECT count(*), avg(b) FROM t2 WHERE b>=2500 AND b<3500; 28 | SELECT count(*), avg(b) FROM t2 WHERE b>=2600 AND b<3600; 29 | SELECT count(*), avg(b) FROM t2 WHERE b>=2700 AND b<3700; 30 | SELECT count(*), avg(b) FROM t2 WHERE b>=2800 AND b<3800; 31 | SELECT count(*), avg(b) FROM t2 WHERE b>=2900 AND b<3900; 32 | SELECT count(*), avg(b) FROM t2 WHERE b>=3000 AND b<4000; 33 | SELECT count(*), avg(b) FROM t2 WHERE b>=3100 AND b<4100; 34 | SELECT count(*), avg(b) FROM t2 WHERE b>=3200 AND b<4200; 35 | SELECT count(*), avg(b) FROM t2 WHERE b>=3300 AND b<4300; 36 | SELECT count(*), avg(b) FROM t2 WHERE b>=3400 AND b<4400; 37 | SELECT count(*), avg(b) FROM t2 WHERE b>=3500 AND b<4500; 38 | SELECT count(*), avg(b) FROM t2 WHERE b>=3600 AND b<4600; 39 | SELECT count(*), avg(b) FROM t2 WHERE b>=3700 AND b<4700; 40 | SELECT count(*), avg(b) FROM t2 WHERE b>=3800 AND b<4800; 41 | SELECT count(*), avg(b) FROM t2 WHERE b>=3900 AND b<4900; 42 | SELECT count(*), avg(b) FROM t2 WHERE b>=4000 AND b<5000; 43 | SELECT count(*), avg(b) FROM t2 WHERE b>=4100 AND b<5100; 44 | SELECT count(*), avg(b) FROM t2 WHERE b>=4200 AND b<5200; 45 | SELECT count(*), avg(b) FROM t2 WHERE b>=4300 AND b<5300; 46 | SELECT count(*), avg(b) FROM t2 WHERE b>=4400 AND b<5400; 47 | SELECT count(*), avg(b) FROM t2 WHERE b>=4500 AND b<5500; 48 | SELECT count(*), avg(b) FROM t2 WHERE b>=4600 AND b<5600; 49 | SELECT count(*), avg(b) FROM t2 WHERE b>=4700 AND b<5700; 50 | SELECT count(*), avg(b) FROM t2 WHERE b>=4800 AND b<5800; 51 | SELECT count(*), avg(b) FROM t2 WHERE b>=4900 AND b<5900; 52 | SELECT count(*), avg(b) FROM t2 WHERE b>=5000 AND b<6000; 53 | SELECT count(*), avg(b) FROM t2 WHERE b>=5100 AND b<6100; 54 | SELECT count(*), avg(b) FROM t2 WHERE b>=5200 AND b<6200; 55 | SELECT count(*), avg(b) FROM t2 WHERE b>=5300 AND b<6300; 56 | SELECT count(*), avg(b) FROM t2 WHERE b>=5400 AND b<6400; 57 | SELECT count(*), avg(b) FROM t2 WHERE b>=5500 AND b<6500; 58 | SELECT count(*), avg(b) FROM t2 WHERE b>=5600 AND b<6600; 59 | SELECT count(*), avg(b) FROM t2 WHERE b>=5700 AND b<6700; 60 | SELECT count(*), avg(b) FROM t2 WHERE b>=5800 AND b<6800; 61 | SELECT count(*), avg(b) FROM t2 WHERE b>=5900 AND b<6900; 62 | SELECT count(*), avg(b) FROM t2 WHERE b>=6000 AND b<7000; 63 | SELECT count(*), avg(b) FROM t2 WHERE b>=6100 AND b<7100; 64 | SELECT count(*), avg(b) FROM t2 WHERE b>=6200 AND b<7200; 65 | SELECT count(*), avg(b) FROM t2 WHERE b>=6300 AND b<7300; 66 | SELECT count(*), avg(b) FROM t2 WHERE b>=6400 AND b<7400; 67 | SELECT count(*), avg(b) FROM t2 WHERE b>=6500 AND b<7500; 68 | SELECT count(*), avg(b) FROM t2 WHERE b>=6600 AND b<7600; 69 | SELECT count(*), avg(b) FROM t2 WHERE b>=6700 AND b<7700; 70 | SELECT count(*), avg(b) FROM t2 WHERE b>=6800 AND b<7800; 71 | SELECT count(*), avg(b) FROM t2 WHERE b>=6900 AND b<7900; 72 | SELECT count(*), avg(b) FROM t2 WHERE b>=7000 AND b<8000; 73 | SELECT count(*), avg(b) FROM t2 WHERE b>=7100 AND b<8100; 74 | SELECT count(*), avg(b) FROM t2 WHERE b>=7200 AND b<8200; 75 | SELECT count(*), avg(b) FROM t2 WHERE b>=7300 AND b<8300; 76 | SELECT count(*), avg(b) FROM t2 WHERE b>=7400 AND b<8400; 77 | SELECT count(*), avg(b) FROM t2 WHERE b>=7500 AND b<8500; 78 | SELECT count(*), avg(b) FROM t2 WHERE b>=7600 AND b<8600; 79 | SELECT count(*), avg(b) FROM t2 WHERE b>=7700 AND b<8700; 80 | SELECT count(*), avg(b) FROM t2 WHERE b>=7800 AND b<8800; 81 | SELECT count(*), avg(b) FROM t2 WHERE b>=7900 AND b<8900; 82 | SELECT count(*), avg(b) FROM t2 WHERE b>=8000 AND b<9000; 83 | SELECT count(*), avg(b) FROM t2 WHERE b>=8100 AND b<9100; 84 | SELECT count(*), avg(b) FROM t2 WHERE b>=8200 AND b<9200; 85 | SELECT count(*), avg(b) FROM t2 WHERE b>=8300 AND b<9300; 86 | SELECT count(*), avg(b) FROM t2 WHERE b>=8400 AND b<9400; 87 | SELECT count(*), avg(b) FROM t2 WHERE b>=8500 AND b<9500; 88 | SELECT count(*), avg(b) FROM t2 WHERE b>=8600 AND b<9600; 89 | SELECT count(*), avg(b) FROM t2 WHERE b>=8700 AND b<9700; 90 | SELECT count(*), avg(b) FROM t2 WHERE b>=8800 AND b<9800; 91 | SELECT count(*), avg(b) FROM t2 WHERE b>=8900 AND b<9900; 92 | SELECT count(*), avg(b) FROM t2 WHERE b>=9000 AND b<10000; 93 | SELECT count(*), avg(b) FROM t2 WHERE b>=9100 AND b<10100; 94 | SELECT count(*), avg(b) FROM t2 WHERE b>=9200 AND b<10200; 95 | SELECT count(*), avg(b) FROM t2 WHERE b>=9300 AND b<10300; 96 | SELECT count(*), avg(b) FROM t2 WHERE b>=9400 AND b<10400; 97 | SELECT count(*), avg(b) FROM t2 WHERE b>=9500 AND b<10500; 98 | SELECT count(*), avg(b) FROM t2 WHERE b>=9600 AND b<10600; 99 | SELECT count(*), avg(b) FROM t2 WHERE b>=9700 AND b<10700; 100 | SELECT count(*), avg(b) FROM t2 WHERE b>=9800 AND b<10800; 101 | SELECT count(*), avg(b) FROM t2 WHERE b>=9900 AND b<10900; 102 | COMMIT; 103 | -------------------------------------------------------------------------------- /demo/benchmarks/benchmark5.sql: -------------------------------------------------------------------------------- 1 | BEGIN; 2 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%one%'; 3 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%two%'; 4 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%three%'; 5 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%four%'; 6 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%five%'; 7 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%six%'; 8 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%seven%'; 9 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eight%'; 10 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%nine%'; 11 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ten%'; 12 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eleven%'; 13 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%twelve%'; 14 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%thirteen%'; 15 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%fourteen%'; 16 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%fifteen%'; 17 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%sixteen%'; 18 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%seventeen%'; 19 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eighteen%'; 20 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%nineteen%'; 21 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%twenty%'; 22 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%twenty-one%'; 23 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%twenty-two%'; 24 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%twenty-three%'; 25 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%twenty-four%'; 26 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%twenty-five%'; 27 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%twenty-six%'; 28 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%twenty-seven%'; 29 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%twenty-eight%'; 30 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%twenty-nine%'; 31 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%thirty%'; 32 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%thirty-one%'; 33 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%thirty-two%'; 34 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%thirty-three%'; 35 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%thirty-four%'; 36 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%thirty-five%'; 37 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%thirty-six%'; 38 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%thirty-seven%'; 39 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%thirty-eight%'; 40 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%thirty-nine%'; 41 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%forty%'; 42 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%forty-one%'; 43 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%forty-two%'; 44 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%forty-three%'; 45 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%forty-four%'; 46 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%forty-five%'; 47 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%forty-six%'; 48 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%forty-seven%'; 49 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%forty-eight%'; 50 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%forty-nine%'; 51 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%fifty%'; 52 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%fifty-one%'; 53 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%fifty-two%'; 54 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%fifty-three%'; 55 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%fifty-four%'; 56 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%fifty-five%'; 57 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%fifty-six%'; 58 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%fifty-seven%'; 59 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%fifty-eight%'; 60 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%fifty-nine%'; 61 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%sixty%'; 62 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%sixty-one%'; 63 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%sixty-two%'; 64 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%sixty-three%'; 65 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%sixty-four%'; 66 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%sixty-five%'; 67 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%sixty-six%'; 68 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%sixty-seven%'; 69 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%sixty-eight%'; 70 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%sixty-nine%'; 71 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%seventy%'; 72 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%seventy-one%'; 73 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%seventy-two%'; 74 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%seventy-three%'; 75 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%seventy-four%'; 76 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%seventy-five%'; 77 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%seventy-six%'; 78 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%seventy-seven%'; 79 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%seventy-eight%'; 80 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%seventy-nine%'; 81 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eighty%'; 82 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eighty-one%'; 83 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eighty-two%'; 84 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eighty-three%'; 85 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eighty-four%'; 86 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eighty-five%'; 87 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eighty-six%'; 88 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eighty-seven%'; 89 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eighty-eight%'; 90 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%eighty-nine%'; 91 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety%'; 92 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety-one%'; 93 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety-two%'; 94 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety-three%'; 95 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety-four%'; 96 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety-five%'; 97 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety-six%'; 98 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety-seven%'; 99 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety-eight%'; 100 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety-nine%'; 101 | SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%one hundred%'; 102 | COMMIT; 103 | -------------------------------------------------------------------------------- /demo/benchmarks/benchmark6.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX i2a ON t2(a); 2 | CREATE INDEX i2b ON t2(b); 3 | -------------------------------------------------------------------------------- /demo/benchmarks/benchmarks.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | wa-sqlite benchmarks 6 | 35 | 36 | 37 |

wa-sqlite benchmarks

38 | See discussion post 39 | for details. Demos may require more browser features 40 | (e.g. ES6 module Worker) 41 | than the library needs. 42 | 43 |
44 | 48 | 49 |
50 |
51 |
52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 |
Test
Test 1: 1000 INSERTs
Test 2: 25000 INSERTs in a transaction
Test 3: 25000 INSERTs into an indexed table
Test 4: 100 SELECTs without an index
Test 5: 100 SELECTs on a string comparison
Test 6: Creating an index
Test 7: 5000 SELECTs with an index
Test 8: 1000 UPDATEs without an index
Test 9: 25000 UPDATEs with an index
Test 10: 25000 text UPDATEs with an index
Test 11: INSERTs from a SELECT
Test 12: DELETE without an index
Test 13: DELETE with an index
Test 14: A big INSERT after a big DELETE
Test 15: A big DELETE followed by many small INSERTs
Test 16: DROP TABLE
109 |
110 | 111 | 112 | 113 | 120 | 121 | 122 | 123 | 124 | -------------------------------------------------------------------------------- /demo/benchmarks/benchmarks.js: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | 3 | const searchParams = new URLSearchParams(location.search); 4 | 5 | // Load benchmark SQL from files. 6 | const benchmarksReady = Promise.all(Array.from(new Array(16), (_, i) => { 7 | const filename = `./benchmark${i + 1}.sql`; 8 | return fetch(filename).then(response => response.text()); 9 | })); 10 | 11 | // Parse configurations from the URL and add table columns. 12 | const CONFIGURATIONS = (searchParams.get('config') ?? 'default,') 13 | .split(';') 14 | .map(config => config.split(',')); 15 | const headers = document.querySelector('thead').firstElementChild; 16 | for (const config of CONFIGURATIONS) { 17 | addEntry(headers, config.join(' ')); 18 | } 19 | 20 | document.getElementById('start').addEventListener('click', async event => { 21 | // @ts-ignore 22 | event.target.disabled = true; 23 | 24 | // Clear timings from the table. 25 | Array.from(document.getElementsByTagName('tr'), element => { 26 | if (element.parentElement.tagName === 'TBODY') { 27 | // Keep only the first child. 28 | while (element.firstElementChild.nextElementSibling) { 29 | element.firstElementChild.nextElementSibling.remove(); 30 | } 31 | } 32 | }); 33 | 34 | const benchmarks = await benchmarksReady; 35 | try { 36 | // @ts-ignore 37 | const preamble = document.getElementById('preamble').value; 38 | document.getElementById('error').textContent = ''; 39 | for (const config of CONFIGURATIONS) { 40 | const workerURL = new URL('../demo-worker.js', import.meta.url); 41 | workerURL.searchParams.set('reset', 'true'); 42 | workerURL.searchParams.set('build', config[0]); 43 | workerURL.searchParams.set('config', config[1]); 44 | const worker = new Worker(workerURL, { type: 'module' }); 45 | try { 46 | await Promise.race([ 47 | new Promise((resolve, reject) => { 48 | worker.addEventListener('message', event => { 49 | if (event.data?.error) { 50 | reject(cvtCloneableToError(event.data.error)); 51 | } else { 52 | resolve(); 53 | } 54 | }, { once: true }); 55 | }), 56 | new Promise((_, reject) => setTimeout(() => { 57 | reject(new Error(`Worker initialization timeout`)); 58 | }, 1000_5000)) 59 | ]); 60 | 61 | 62 | // Execute the preamble. 63 | await query(worker, preamble); 64 | 65 | // Loop over the benchmarks. 66 | let tr = document.querySelector('tbody').firstElementChild; 67 | for (const benchmark of benchmarks) { 68 | const results = await query(worker, benchmark); 69 | if (results.error) { 70 | throw cvtCloneableToError(results.error); 71 | } 72 | 73 | addEntry(tr, results.elapsed.toString()); 74 | tr = tr.nextElementSibling; 75 | } 76 | } finally { 77 | worker.terminate(); 78 | } 79 | } 80 | } catch (e) { 81 | document.getElementById('error').textContent = e.stack.includes(e.message) ? e.stack : `${e.message}\n${e.stack}`; 82 | } finally { 83 | // @ts-ignore 84 | event.target.disabled = false; 85 | } 86 | }); 87 | 88 | function addEntry(parent, text) { 89 | const tag = parent.parentElement.tagName === 'TBODY' ? 'td' : 'th'; 90 | const child = document.createElement(tag); 91 | child.textContent = text; 92 | parent.appendChild(child); 93 | } 94 | 95 | async function query(worker, sql) { 96 | worker.postMessage(sql); 97 | return new Promise((resolve, reject) => { 98 | worker.addEventListener('message', event => { 99 | if (event.data?.error) { 100 | reject(cvtCloneableToError(event.data.error)); 101 | } else { 102 | resolve(event.data); 103 | } 104 | }, { once: true }); 105 | }); 106 | } 107 | 108 | function cvtCloneableToError(e) { 109 | if (Object.hasOwn(e, 'message')) { 110 | const error = new Error(e.message); 111 | for (const [k, v] of Object.entries(e)) { 112 | try { 113 | error[k] = v; 114 | } catch (e) { 115 | // Ignore any properties that can't be set. 116 | } 117 | } 118 | return error; 119 | } 120 | return e; 121 | } -------------------------------------------------------------------------------- /demo/benchmarks/index.html: -------------------------------------------------------------------------------- 1 | benchmarks.html -------------------------------------------------------------------------------- /demo/contention/contention-worker.js: -------------------------------------------------------------------------------- 1 | import * as SQLite from "../../src/sqlite-api.js"; 2 | 3 | const BROADCAST_CHANNEL_NAME = 'contention'; 4 | 5 | const BUILDS = new Map([ 6 | ['default', '../../dist/wa-sqlite.mjs'], 7 | ['asyncify', '../../dist/wa-sqlite-async.mjs'], 8 | ['jspi', '../../dist/wa-sqlite-jspi.mjs'], 9 | // ['default', '../../debug/wa-sqlite.mjs'], 10 | // ['asyncify', '../../debug/wa-sqlite-async.mjs'], 11 | // ['jspi', '../../debug/wa-sqlite-jspi.mjs'], 12 | ]); 13 | 14 | /** 15 | * @typedef Config 16 | * @property {string} name 17 | * @property {string} vfsModule path of the VFS module 18 | * @property {string} [vfsClassName] name of the VFS class 19 | * @property {object} [vfsOptions] VFS constructor arguments 20 | */ 21 | 22 | /** @type {Map} */ const VFS_CONFIGS = new Map([ 23 | { 24 | name: 'default', 25 | vfsModule: null 26 | }, 27 | { 28 | name: 'MemoryVFS', 29 | vfsModule: '../../src/examples/MemoryVFS.js', 30 | }, 31 | { 32 | name: 'MemoryAsyncVFS', 33 | vfsModule: '../../src/examples/MemoryAsyncVFS.js', 34 | }, 35 | { 36 | name: 'IDBBatchAtomicVFS', 37 | vfsModule: '../../src/examples/IDBBatchAtomicVFS.js', 38 | vfsOptions: { lockPolicy: 'shared+hint' } 39 | }, 40 | { 41 | name: 'IDBMirrorVFS', 42 | vfsModule: '../../src/examples/IDBMirrorVFS.js', 43 | }, 44 | { 45 | name: 'OPFSAdaptiveVFS', 46 | vfsModule: '../../src/examples/OPFSAdaptiveVFS.js', 47 | vfsOptions: { lockPolicy: 'shared+hint' } 48 | }, 49 | { 50 | name: 'OPFSCoopSyncVFS', 51 | vfsModule: '../../src/examples/OPFSCoopSyncVFS.js', 52 | }, 53 | { 54 | name: 'OPFSPermutedVFS', 55 | vfsModule: '../../src/examples/OPFSPermutedVFS.js', 56 | }, 57 | { 58 | name: 'AccessHandlePoolVFS', 59 | vfsModule: '../src/examples/AccessHandlePoolVFS.js', 60 | }, 61 | { 62 | name: 'FLOOR', 63 | vfsModule: '../../src/examples/FLOOR.js', 64 | }, 65 | ].map(config => [config.name, config])); 66 | 67 | const releaseTask = (function() { 68 | const { port1, port2 } = new MessageChannel(); 69 | port1.start(); 70 | port2.start(); 71 | 72 | return function() { 73 | return new Promise(resolve => { 74 | port2.onmessage = resolve; 75 | port1.postMessage(null); 76 | }); 77 | }; 78 | })(); 79 | 80 | (async function() { 81 | const broadcastChannel = new BroadcastChannel(BROADCAST_CHANNEL_NAME); 82 | const searchParams = new URLSearchParams(globalThis.location.search); 83 | const index = Number(searchParams.get('index')); 84 | const type = searchParams.get('type'); 85 | 86 | if (index === 0) { 87 | console.debug('primary worker clearing storage'); 88 | await clearStorage(); 89 | } 90 | 91 | postMessage(null); 92 | const { build, config: configName, queries } = await new Promise(resolve => { 93 | broadcastChannel.addEventListener('message', event => { 94 | resolve(event.data); 95 | }, { once: true }); 96 | }); 97 | const config = VFS_CONFIGS.get(configName); 98 | 99 | // Instantiate SQLite. 100 | const { default: moduleFactory } = await import(BUILDS.get(build)); 101 | const module = await moduleFactory(); 102 | const sqlite3 = SQLite.Factory(module); 103 | 104 | const dbName = searchParams.get('dbName') ?? 'hello'; 105 | const vfsName = searchParams.get('vfsName') ?? 'demo'; 106 | if (config.vfsModule) { 107 | // Create the VFS and register it as the default file system. 108 | const namespace = await import(config.vfsModule); 109 | const className = config.vfsClassName ?? config.vfsModule.match(/([^/]+)\.js$/)[1]; 110 | const vfs = await namespace[className].create(vfsName, module, config.vfsOptions); 111 | sqlite3.vfs_register(vfs, true); 112 | } 113 | 114 | // Open the database. 115 | if (index === 0) { 116 | const db = await sqlite3.open_v2(dbName); 117 | await query(sqlite3, db, queries.global); 118 | await sqlite3.close(db); 119 | } 120 | const db = await sqlite3.open_v2(dbName); 121 | await new Promise(resolve => setTimeout(resolve)); 122 | await query(sqlite3, db, queries.connection); 123 | 124 | postMessage(null); 125 | const { endTime } = await new Promise(resolve => { 126 | broadcastChannel.addEventListener('message', event => { 127 | resolve(event.data); 128 | }, { once: true }); 129 | }); 130 | 131 | // Run contention test 132 | let nIterations = 0; 133 | const sql = type === 'writer' ? queries.writer : queries.reader; 134 | while (Date.now() < endTime) { 135 | await query(sqlite3, db, sql); 136 | await releaseTask(); 137 | nIterations++; 138 | } 139 | postMessage(`worker ${index} ${type} ${nIterations} iterations`); 140 | postMessage(null); 141 | })().catch(e => { 142 | console.error(e); 143 | postMessage({ error: e }); 144 | }); 145 | 146 | async function query(sqlite3, db, sql) { 147 | while (true) { 148 | try { 149 | const rc = await sqlite3.exec(db, sql); 150 | return rc; 151 | } catch (e) { 152 | if (e.code === SQLite.SQLITE_BUSY) { 153 | if (!sqlite3.get_autocommit(db)) { 154 | await sqlite3.exec(db, 'ROLLBACK;'); 155 | } 156 | continue; 157 | } 158 | throw e; 159 | } 160 | } 161 | } 162 | 163 | async function clearStorage() { 164 | const root = await navigator.storage?.getDirectory(); 165 | if (root) { 166 | // @ts-ignore 167 | for await (const name of root.keys()) { 168 | await root.removeEntry(name, { recursive: true }); 169 | } 170 | } 171 | 172 | // Clear IndexedDB. 173 | const dbList = indexedDB.databases ? 174 | await indexedDB.databases() : 175 | ['demo', 'demo-floor'].map(name => ({ name })); 176 | await Promise.all(dbList.map(({name}) => { 177 | return new Promise((resolve, reject) => { 178 | const request = indexedDB.deleteDatabase(name); 179 | request.onsuccess = resolve; 180 | request.onerror = reject; 181 | }); 182 | })); 183 | } -------------------------------------------------------------------------------- /demo/contention/contention.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | wa-sqlite contention 6 | 35 | 36 | 37 | 38 |

wa-sqlite contention test

39 | 40 |
41 |
42 | Global init 43 | 44 |
45 |
46 | Connection init 47 | 48 |
49 |
50 | Writer task 51 | 52 |
53 |
54 | Reader task 55 | 56 |
57 |
58 | 59 | 60 | 61 |
62 | 63 | -------------------------------------------------------------------------------- /demo/contention/contention.js: -------------------------------------------------------------------------------- 1 | const WORKER_URL = 'contention-worker.js'; 2 | const BROADCAST_CHANNEL_NAME = 'contention'; 3 | 4 | const searchParams = new URLSearchParams(globalThis.location.search); 5 | 6 | const queries = JSON.parse(localStorage.getItem('contention') ?? 'null') || { 7 | global: ` 8 | BEGIN IMMEDIATE; 9 | CREATE TABLE kv(key PRIMARY KEY, value); 10 | REPLACE INTO kv VALUES ('counter', 0); 11 | COMMIT; 12 | `.split('\n').map(line => line.replace(/^[ ]{4}/, '')).join('\n').trim(), 13 | 14 | connection: ` 15 | PRAGMA synchronous = NORMAL; 16 | `.split('\n').map(line => line.replace(/^[ ]{4}/, '')).join('\n').trim(), 17 | 18 | writer: ` 19 | BEGIN IMMEDIATE; 20 | UPDATE kv SET value = value + 1 WHERE key='counter'; 21 | COMMIT; 22 | `.split('\n').map(line => line.replace(/^[ ]{4}/, '')).join('\n').trim(), 23 | 24 | reader: ` 25 | SELECT max(rowid) FROM kv; 26 | `.split('\n').map(line => line.replace(/^[ ]{4}/, '')).join('\n').trim(), 27 | }; 28 | 29 | for (const name of Object.keys(queries)) { 30 | const element = /** @type {HTMLTextAreaElement} */ (document.getElementById(name)); 31 | element.value = queries[name]; 32 | element.addEventListener('keyup', event => { 33 | // @ts-ignore 34 | queries[name] = event.target.value; 35 | localStorage.setItem('contention', JSON.stringify(queries)); 36 | }); 37 | } 38 | 39 | const build = searchParams.get('build') ?? 'default'; 40 | const config = searchParams.get('config') ?? 'default'; 41 | const nWriters = Number(searchParams.get('nWriters') ?? 1); 42 | const nReaders = Number(searchParams.get('nReaders') ?? 1); 43 | const nSeconds = Number(searchParams.get('nSeconds') ?? 10); 44 | log(`build: ${build}`); 45 | log(`config: ${config}`); 46 | log(`nWriters: ${nWriters}`); 47 | log(`nReaders: ${nReaders}`); 48 | log(`nSeconds: ${nSeconds}`); 49 | 50 | function log(item) { 51 | const element = document.createElement('pre'); 52 | 53 | let text; 54 | if (typeof item === 'string') { 55 | text = item; 56 | } else if (item.error) { 57 | element.style.color = 'red'; 58 | text = item.error.message; 59 | } else if (item) { 60 | element.style.color = 'green'; 61 | text = JSON.stringify(item); 62 | } else { 63 | return; 64 | } 65 | 66 | const now = new Date(); 67 | const hours = now.getHours().toString().padStart(2, '0'); 68 | const minutes = now.getMinutes().toString().padStart(2, '0'); 69 | const seconds = now.getSeconds().toString().padStart(2, '0'); 70 | const milliseconds = now.getMilliseconds().toString().padStart(3, '0'); 71 | const timestamp = `${hours}:${minutes}:${seconds}.${milliseconds}`; 72 | 73 | element.textContent = `[${timestamp}] ${text}`; 74 | document.getElementById('output').appendChild(element); 75 | } 76 | 77 | document.getElementById('start').addEventListener('click', async event => { 78 | try { 79 | // @ts-ignore 80 | event.target.disabled = true; 81 | 82 | const broadcastChannel = new BroadcastChannel(BROADCAST_CHANNEL_NAME); 83 | 84 | log('launch workers'); 85 | const workers = Array.from({ length: nWriters + nReaders }, (_, i) => { 86 | const url = new URL(WORKER_URL, import.meta.url); 87 | url.searchParams.set('index', i.toString()); 88 | url.searchParams.set('type', i < nWriters ? 'writer' : 'reader'); 89 | 90 | const worker = new Worker(url, { type: 'module' }); 91 | worker.addEventListener('message', event => { 92 | if (event.data) { 93 | log(event.data); 94 | } 95 | }); 96 | return worker; 97 | }); 98 | await syncWorkers(workers); 99 | 100 | broadcastChannel.postMessage({ 101 | build, 102 | config, 103 | queries 104 | }); 105 | await syncWorkers(workers); 106 | 107 | log('start') 108 | broadcastChannel.postMessage({ endTime: Date.now() + nSeconds * 1000 }); 109 | await syncWorkers(workers); 110 | 111 | log('complete'); 112 | workers.forEach(worker => worker.terminate()); 113 | 114 | const demo = document.getElementById('demo'); 115 | demo.innerHTML = ` 116 | 117 | Open SQL demo 118 | 119 | (close demo before rerunning contention test) 120 | `; 121 | } catch (e) { 122 | console.error(e); 123 | log({ error: e }); 124 | } finally { 125 | // @ts-ignore 126 | event.target.disabled = false; 127 | } 128 | }); 129 | 130 | document.getElementById('reset').addEventListener('click', async event => { 131 | localStorage.removeItem('contention'); 132 | window.location.reload(); 133 | }); 134 | 135 | function syncWorkers(workers) { 136 | return Promise.all(workers.map(worker => new Promise(resolve => { 137 | const abortController = new AbortController(); 138 | worker.addEventListener('message', event => { 139 | if (event.data === null) { 140 | resolve(); 141 | abortController.abort(); 142 | } 143 | }, { signal: abortController.signal }); 144 | }))); 145 | } -------------------------------------------------------------------------------- /demo/contention/index.html: -------------------------------------------------------------------------------- 1 | contention.html -------------------------------------------------------------------------------- /demo/demo.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | wa-sqlite demo 6 | 35 | 36 | 37 |
38 |
39 |
40 |
41 | 42 | 43 | 44 | 45 | 52 | 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /demo/demo.js: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | 3 | // This is the path to the Monaco editor distribution. For development 4 | // this loads from the local server (uses Yarn 2 path). 5 | const MONACO_VS = location.hostname.endsWith('localhost') ? 6 | '/.yarn/unplugged/monaco-editor-npm-0.34.1-03d887d213/node_modules/monaco-editor/dev/vs' : 7 | 'https://cdnjs.cloudflare.com/ajax/libs/monaco-editor/0.34.1/min/vs'; 8 | 9 | const SQL_KEY = 'wa-sqlite demo sql'; 10 | const DEFAULT_SQL = ` 11 | -- Optionally select statements to execute. 12 | 13 | CREATE TABLE IF NOT EXISTS t(x PRIMARY KEY, y); 14 | INSERT OR REPLACE INTO t VALUES ('good', 'bad'), ('hot', 'cold'), ('up', 'down'); 15 | SELECT * FROM t; 16 | `.trim(); 17 | 18 | window.addEventListener('DOMContentLoaded', async function() { 19 | // Load the Monaco editor 20 | const button = /** @type {HTMLButtonElement} */(document.getElementById('execute')); 21 | const editorReady = createMonacoEditor().then(editor => { 22 | // Change the button text with selection. 23 | editor.onDidChangeCursorSelection(({selection}) => { 24 | button.textContent = selection.isEmpty() ? 25 | 'Execute' : 26 | 'Execute selection'; 27 | }); 28 | 29 | // Persist editor content across page loads. 30 | let change; 31 | editor.onDidChangeModelContent(function() { 32 | clearTimeout(change); 33 | change = setTimeout(function() { 34 | localStorage.setItem(SQL_KEY, editor.getValue()); 35 | }, 1000); 36 | }); 37 | editor.setValue(localStorage.getItem(SQL_KEY) ?? DEFAULT_SQL); 38 | 39 | return editor; 40 | }); 41 | 42 | // Start the Worker. 43 | // Propagate the main page search parameters to the Worker URL. 44 | const workerURL = new URL('./demo-worker.js', import.meta.url); 45 | workerURL.search = location.search; 46 | const worker = new Worker(workerURL, { type: 'module' }); 47 | worker.addEventListener('message', function(event) { 48 | // The Worker will response with null on successful start, or with 49 | // an error message on failure. 50 | if (event.data) { 51 | document.getElementById('output').innerHTML = `
${event.data.error.stack}
`; 52 | } else { 53 | document.getElementById('output').innerHTML = 54 | JSON.stringify([...new URLSearchParams(location.search).entries()]); 55 | button.disabled = false; 56 | } 57 | }, { once: true }); 58 | 59 | // Execute SQL on button click. 60 | button.addEventListener('click', async function() { 61 | button.disabled = true; 62 | 63 | // Get SQL from editor. 64 | const editor = await editorReady; 65 | const selection = editor.getSelection(); 66 | const queries = selection.isEmpty() ? 67 | editor.getValue() : 68 | editor.getModel().getValueInRange(selection); 69 | 70 | // Clear any previous output on the page. 71 | const output = document.getElementById('output'); 72 | while (output.firstChild) output.removeChild(output.lastChild); 73 | 74 | const timestamp = document.getElementById('timestamp'); 75 | timestamp.textContent = new Date().toLocaleTimeString(); 76 | 77 | let time = performance.now(); 78 | worker.postMessage(queries); 79 | worker.addEventListener('message', async function(event) { 80 | timestamp.textContent += ` ${(performance.now() - time).toFixed(1)} milliseconds`; 81 | if (event.data.results) { 82 | // Format the results as tables. 83 | event.data.results 84 | .map(formatTable) 85 | .forEach(table => output.append(table)); 86 | } else { 87 | output.innerHTML = `
${event.data.error.message}
`; 88 | } 89 | button.disabled = false; 90 | }, { once: true }); 91 | }); 92 | }); 93 | 94 | async function createMonacoEditor() { 95 | // Insert a script element to bootstrap the monaco loader. 96 | await new Promise(resolve => { 97 | const loader = document.createElement('script'); 98 | loader.src = `${MONACO_VS}/loader.js`; 99 | loader.async = true; 100 | loader.addEventListener('load', resolve, { once: true }); 101 | document.head.appendChild(loader); 102 | }); 103 | 104 | // Load monaco itself. 105 | /** @type {any} */ const require = globalThis.require; 106 | require.config({ paths: { vs: MONACO_VS } }); 107 | const monaco = await new Promise(resolve => { 108 | require(['vs/editor/editor.main'], resolve); 109 | }); 110 | 111 | // Create editor. 112 | // https://microsoft.github.io/monaco-editor/api/modules/monaco.editor.html#create 113 | return monaco.editor.create(document.getElementById('editor-container'), { 114 | language: 'sql', 115 | minimap: { enabled: false }, 116 | automaticLayout: true 117 | }); 118 | } 119 | 120 | function formatTable({ columns, rows }) { 121 | const table = document.createElement('table'); 122 | 123 | const thead = table.appendChild(document.createElement('thead')); 124 | thead.appendChild(formatRow(columns, 'th')); 125 | 126 | const tbody = table.appendChild(document.createElement('tbody')); 127 | for (const row of rows) { 128 | tbody.appendChild(formatRow(row)); 129 | } 130 | 131 | return table; 132 | } 133 | 134 | function formatRow(data, tag = 'td') { 135 | const row = document.createElement('tr'); 136 | for (const value of data) { 137 | const cell = row.appendChild(document.createElement(tag)); 138 | cell.textContent = value !== null ? value.toString() : 'null'; 139 | } 140 | return row; 141 | } -------------------------------------------------------------------------------- /demo/file/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | wa-sqlite import/export 6 | 10 | 11 | 12 | 13 |

Sample database import/export

14 |
15 | Import from local file 16 |
17 |
18 | URL: 19 | 20 |
21 |
22 | 23 | 24 |
25 |
26 | 27 | -------------------------------------------------------------------------------- /demo/file/service-worker.js: -------------------------------------------------------------------------------- 1 | import * as VFS from "../../src/VFS.js"; 2 | import { IDBBatchAtomicVFS as MyVFS } from "../../src/examples/IDBBatchAtomicVFS.js"; 3 | // import { IDBMirrorVFS as MyVFS } from "../../src/examples/IDBMirrorVFS.js"; 4 | 5 | // Install the service worker as soon as possible. 6 | globalThis.addEventListener('install', (/** @type {ExtendableEvent} */ event) => { 7 | event.waitUntil(globalThis.skipWaiting()); 8 | }); 9 | globalThis.addEventListener('activate', (/** @type {ExtendableEvent} */ event) => { 10 | event.waitUntil(globalThis.clients.claim()); 11 | }); 12 | 13 | globalThis.addEventListener('fetch', async (/** @type {FetchEvent} */ event) => { 14 | const url = new URL(event.request.url); 15 | if (!url.href.includes(globalThis.registration.scope)) return; 16 | if (!url.pathname.endsWith('export')) return; 17 | 18 | // A "check" request just returns a valid response. This lets any 19 | // client context test whether the service worker is active. 20 | if (url.searchParams.has('check')) { 21 | return event.respondWith(new Response('OK')); 22 | } 23 | 24 | // Keep the service worker alive until the download is complete. 25 | let releaseEvent; 26 | event.waitUntil(new Promise(resolve => releaseEvent = resolve)); 27 | 28 | return event.respondWith((async () => { 29 | // Create the VFS and streaming source using the request parameters. 30 | const vfs = await MyVFS.create(url.searchParams.get('idb'), null); 31 | const path = url.searchParams.get('db'); 32 | const source = new DatabaseSource(vfs, path); 33 | 34 | source.isDone.finally(() => { 35 | vfs.close(); 36 | releaseEvent(); 37 | }); 38 | 39 | return new Response(new ReadableStream(source), { 40 | headers: { 41 | "Content-Type": 'application/vnd.sqlite3', 42 | "Content-Disposition": `attachment; filename=${path.match(/[^/]+$/)[0]}` 43 | } 44 | }); 45 | })()); 46 | }); 47 | 48 | // This is a stateful source object for a ReadableStream. 49 | class DatabaseSource { 50 | isDone; 51 | 52 | #vfs; 53 | #path; 54 | #fileId = Math.floor(Math.random() * 0x100000000); 55 | #iOffset = 0; 56 | #bytesRemaining = 0; 57 | 58 | #onDone = []; 59 | #resolve; 60 | #reject; 61 | 62 | constructor(vfs, path) { 63 | this.#vfs = vfs; 64 | this.#path = path; 65 | this.isDone = new Promise((resolve, reject) => { 66 | this.#resolve = resolve; 67 | this.#reject = reject; 68 | }).finally(async () => { 69 | while (this.#onDone.length) { 70 | await this.#onDone.pop()(); 71 | } 72 | }); 73 | } 74 | 75 | async start(controller) { 76 | try { 77 | // Open the file for reading. 78 | const flags = VFS.SQLITE_OPEN_MAIN_DB | VFS.SQLITE_OPEN_READONLY; 79 | await check(this.#vfs.jOpen(this.#path, this.#fileId, flags, {setInt32(){}})); 80 | this.#onDone.push(() => this.#vfs.jClose(this.#fileId)); 81 | await check(this.#vfs.jLock(this.#fileId, VFS.SQLITE_LOCK_SHARED)); 82 | this.#onDone.push(() => this.#vfs.jUnlock(this.#fileId, VFS.SQLITE_LOCK_NONE)); 83 | 84 | // Get the file size. 85 | const fileSize = new DataView(new ArrayBuffer(8)); 86 | await check(this.#vfs.jFileSize(this.#fileId, fileSize)); 87 | this.#bytesRemaining = Number(fileSize.getBigUint64(0, true)); 88 | } catch (e) { 89 | controller.error(e); 90 | this.#reject(e); 91 | } 92 | } 93 | 94 | async pull(controller) { 95 | try { 96 | const buffer = new Uint8Array(Math.min(this.#bytesRemaining, 65536)); 97 | await check(this.#vfs.jRead(this.#fileId, buffer, this.#iOffset)); 98 | controller.enqueue(buffer); 99 | 100 | this.#iOffset += buffer.byteLength; 101 | this.#bytesRemaining -= buffer.byteLength; 102 | if (this.#bytesRemaining === 0) { 103 | controller.close(); 104 | this.#resolve(); 105 | } 106 | } catch (e) { 107 | controller.error(e); 108 | this.#reject(e); 109 | } 110 | } 111 | 112 | cancel(reason) { 113 | this.#reject(new Error(reason)); 114 | } 115 | }; 116 | 117 | async function check(code) { 118 | if (await code !== VFS.SQLITE_OK) { 119 | throw new Error(`Error code: ${await code}`); 120 | } 121 | } -------------------------------------------------------------------------------- /demo/file/verifier.js: -------------------------------------------------------------------------------- 1 | import SQLiteESMFactory from '../../dist/wa-sqlite-async.mjs'; 2 | import * as SQLite from '../../src/sqlite-api.js'; 3 | import { IDBBatchAtomicVFS as MyVFS } from "../../src/examples/IDBBatchAtomicVFS.js"; 4 | // import { IDBMirrorVFS as MyVFS } from "../../src/examples/IDBMirrorVFS.js"; 5 | 6 | const SEARCH_PARAMS = new URLSearchParams(location.search); 7 | const IDB_NAME = SEARCH_PARAMS.get('idb') ?? 'sqlite-vfs'; 8 | const DB_NAME = SEARCH_PARAMS.get('db') ?? 'sqlite.db'; 9 | 10 | (async function() { 11 | const module = await SQLiteESMFactory(); 12 | const sqlite3 = SQLite.Factory(module); 13 | 14 | const vfs = await MyVFS.create(IDB_NAME, module); 15 | // @ts-ignore 16 | sqlite3.vfs_register(vfs, true); 17 | 18 | const db = await sqlite3.open_v2(DB_NAME, SQLite.SQLITE_OPEN_READWRITE, IDB_NAME); 19 | 20 | const results = [] 21 | await sqlite3.exec(db, 'PRAGMA integrity_check;', (row, columns) => { 22 | results.push(row[0]); 23 | }); 24 | await sqlite3.close(db); 25 | 26 | postMessage(results); 27 | })(); -------------------------------------------------------------------------------- /demo/hello/README.md: -------------------------------------------------------------------------------- 1 | # hello demo 2 | This is a simpler demo program that may be easier to read for those 3 | who want to see how set up and query the database. 4 | 5 | The file hello.js can be loaded either in a Window or Worker context. 6 | By default the Window context will be used. Add the query parameter 7 | "worker" to the URL to use a Worker. 8 | 9 | By default the hello.js script loads the Asyncify build and uses 10 | the IDBBatchAtomicVFS filesystem. Modify the imports at the top of 11 | the script to try other combinations. Note that not all combinations 12 | are valid: 13 | 14 | * As of May 2024, the JSPI build works only on recent Chromium browsers 15 | behind an experiment flag. 16 | * As of May 2024, OPFSPermutedVFS works only on recent Chromium browsers 17 | as it requires FileSystemSyncAccessHandle "readwrite-unsafe" locking. 18 | * Some VFS classes work only with an asynchronous build (Asyncify or JSPI). 19 | * OPFS VFS classes work only within a Worker. 20 | -------------------------------------------------------------------------------- /demo/hello/hello.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Hello wa-sqlite 6 | 22 | 23 | 24 |

25 | 29 | 30 |
31 | 32 | 93 | 94 | -------------------------------------------------------------------------------- /demo/hello/hello.js: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | 3 | // Uncomment one of the following imports to choose which SQLite build 4 | // to use. Note that an asynchronous VFS requires an asynchronous build 5 | // (Asyncify or JSPI). As of 2024-05-26, JSPI is only available behind 6 | // a flag on Chromium browsers. 7 | // import SQLiteESMFactory from '../dist/wa-sqlite.mjs'; 8 | import SQLiteESMFactory from '../../dist/wa-sqlite-async.mjs'; 9 | // import SQLiteESMFactory from '../dist/wa-sqlite-jspi.mjs'; 10 | 11 | // Uncomment one of the following imports to choose a VFS. Note that an 12 | // asynchronous VFS requires an asynchronous build, and an VFS using 13 | // FileSystemSyncAccessHandle (generally any OPFS VFS) will run only 14 | // in a Worker. 15 | // 16 | // Note that certain VFS classes cannot read each others' databases, e.g. 17 | // IDBBatchAtomicVFS and IDBMirrorVFS, OPFSPermutedVFS and any other OPFS 18 | // VFS. If you change this demo to use a different VFS, you may need to 19 | // clear the appropriate storage for things to work. 20 | import { IDBBatchAtomicVFS as MyVFS } from '../../src/examples/IDBBatchAtomicVFS.js'; 21 | // import { IDBMirrorVFS as MyVFS } from '../../src/examples/IDBMirrorVFS.js'; 22 | // import { AccessHandlePoolVFS as MyVFS } from '../src/examples/AccessHandlePoolVFS.js'; 23 | // import { OPFSAdaptiveVFS as MyVFS } from '../src/examples/OPFSAdaptiveVFS.js'; 24 | // import { OPFSAnyContextVFS as MyVFS } from '../../src/examples/OPFSAnyContextVFS.js'; 25 | // import { OPFSCoopSyncVFS as MyVFS } from '../src/examples/OPFSCoopSyncVFS.js'; 26 | // import { OPFSPermutedVFS as MyVFS } from '../src/examples/OPFSPermutedVFS.js'; 27 | 28 | import * as SQLite from '../../src/sqlite-api.js'; 29 | 30 | Promise.resolve().then(async () => { 31 | // Set up communications with the main thread. 32 | const messagePort = await new Promise(resolve => { 33 | addEventListener('message', function handler(event) { 34 | if (event.data === 'messagePort') { 35 | resolve(event.ports[0]); 36 | removeEventListener('message', handler); 37 | } 38 | }); 39 | }); 40 | 41 | // Initialize SQLite. 42 | const module = await SQLiteESMFactory(); 43 | const sqlite3 = SQLite.Factory(module); 44 | 45 | // Register a custom file system. 46 | const vfs = await MyVFS.create('hello', module); 47 | // @ts-ignore 48 | sqlite3.vfs_register(vfs, true); 49 | 50 | // Open the database. 51 | const db = await sqlite3.open_v2('test'); 52 | 53 | // Handle SQL from the main thread. 54 | messagePort.addEventListener('message', async event => { 55 | const sql = event.data; 56 | try { 57 | // Query the database. Note that although sqlite3.exec() accepts 58 | // multiple statements in a single call, this usage is not recommended 59 | // unless the statements are idempotent (i.e. resubmitting them is 60 | // harmless) or you know your VFS will never return SQLITE_BUSY. 61 | // See https://github.com/rhashimoto/wa-sqlite/discussions/171 62 | const results = []; 63 | await sqlite3.exec(db, sql, (row, columns) => { 64 | if (columns != results.at(-1)?.columns) { 65 | results.push({ columns, rows: [] }); 66 | } 67 | results.at(-1).rows.push(row); 68 | }); 69 | 70 | // Return the results. 71 | messagePort.postMessage(results); 72 | } catch (error) { 73 | messagePort.postMessage({ error: error.message }); 74 | } 75 | }); 76 | messagePort.start(); 77 | }); 78 | -------------------------------------------------------------------------------- /demo/hello/index.html: -------------------------------------------------------------------------------- 1 | hello.html -------------------------------------------------------------------------------- /demo/write-hint/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | write_hint test 6 | 11 | 12 | 13 | 14 |

Test write transaction hinting

15 |

16 | This page tests write transaction hinting that allows deadlock-free 17 | synchronization. The proposal is for SQLite to pass the hint via 18 | xFileControl; here that is simulated with a PRAGMA. 19 |

20 |

21 | The test creates a number of Worker contexts that repeatedly submit 22 | a write transaction until a target combined total is reached. 23 |

24 |
25 | 26 | 27 |
28 |
29 | 30 | 31 |
32 |
33 |
Preamble:
34 | 37 |
38 |
39 |
Transaction:
40 | 46 |
47 |
48 | 49 |
50 | 51 |
52 | 53 | -------------------------------------------------------------------------------- /demo/write-hint/index.js: -------------------------------------------------------------------------------- 1 | document.getElementById('run').addEventListener('click', async function(event) { 2 | const button = /** @type {HTMLButtonElement} */(event.target); 3 | button.disabled = true; 4 | 5 | // @ts-ignore 6 | const nWorkers = parseInt(document.getElementById('nWorkers').value); 7 | // @ts-ignore 8 | const writeHint = document.getElementById('writeHint').checked; 9 | // @ts-ignore 10 | const preamble = document.getElementById('preamble').value; 11 | // @ts-ignore 12 | const transaction = document.getElementById('transaction').value; 13 | 14 | let startTime = 0; 15 | const workers = []; 16 | try { 17 | await new Promise(resolve => { 18 | navigator.locks.request('reset', () => { 19 | resolve(); 20 | return new Promise(() => {}); 21 | }).catch(() => {}); 22 | }); 23 | for (let i = 0; i < nWorkers; i++) { 24 | const worker = new Worker('worker.js', { type: 'module' }); 25 | workers.push(worker); 26 | } 27 | await Promise.all(workers.map((worker, index) => { 28 | return new Promise(resolve => { 29 | worker.postMessage({ index, writeHint, preamble, transaction }); 30 | worker.addEventListener('message', resolve, { once: true }); 31 | }); 32 | })); 33 | 34 | log(`start ${JSON.stringify({ nWorkers, writeHint })}`); 35 | startTime = performance.now(); 36 | new BroadcastChannel('write_hint').postMessage('start'); 37 | const results = await Promise.all(workers.map((worker, index) => { 38 | return new Promise(resolve => { 39 | worker.addEventListener('message', event => { 40 | log(JSON.stringify(event.data)); 41 | resolve(event.data); 42 | }, { once: true }); 43 | }); 44 | })); 45 | } finally { 46 | log(`complete ${Math.round(performance.now() - startTime)} ms`); 47 | for (const worker of workers) { 48 | worker.terminate(); 49 | } 50 | button.disabled = false; 51 | } 52 | }); 53 | 54 | function log(text) { 55 | const now = new Date(); 56 | const hours = now.getHours().toString().padStart(2, '0'); 57 | const minutes = now.getMinutes().toString().padStart(2, '0'); 58 | const seconds = now.getSeconds().toString().padStart(2, '0'); 59 | const milliseconds = now.getMilliseconds().toString().padStart(3, '0'); 60 | const timestamp = `${hours}:${minutes}:${seconds}.${milliseconds}`; 61 | 62 | const element = document.createElement('pre'); 63 | element.textContent = `[${timestamp}] ${text}`; 64 | 65 | const output = document.getElementById('output'); 66 | output.appendChild(element); 67 | } -------------------------------------------------------------------------------- /demo/write-hint/worker.js: -------------------------------------------------------------------------------- 1 | import SQLiteESMFactory from '../../dist/wa-sqlite-async.mjs'; 2 | import { IDBBatchAtomicVFS as MyVFS } from '../../src/examples/IDBBatchAtomicVFS.js'; 3 | import * as SQLite from '../../src/sqlite-api.js'; 4 | 5 | addEventListener('message', async event => { 6 | try { 7 | const config = event.data; 8 | if (config.index === 0) { 9 | await new Promise((resolve, reject) => { 10 | const request = indexedDB.deleteDatabase('write_hint'); 11 | request.onsuccess = resolve; 12 | request.onerror = () => reject(request.error); 13 | }); 14 | 15 | await navigator.locks.request('reset', { steal: true }, () => {}); 16 | } 17 | await navigator.locks.request('reset', { mode: 'shared' }, () => {}); 18 | 19 | const module = await SQLiteESMFactory(); 20 | const sqlite3 = SQLite.Factory(module); 21 | const vfs = await MyVFS.create('write_hint', module, { 22 | lockPolicy: config.writeHint ? 'shared+hint' : 'shared' 23 | }); 24 | // @ts-ignore 25 | sqlite3.vfs_register(vfs, true); 26 | const db = await sqlite3.open_v2('test'); 27 | 28 | if (config.index === 0) { 29 | await sqlite3.exec(db, config.preamble); 30 | } 31 | 32 | await new Promise(resolve => { 33 | const broadcast = new BroadcastChannel('write_hint'); 34 | broadcast.addEventListener('message', resolve, { once: true }); 35 | postMessage('ready'); 36 | }); 37 | 38 | let nBusy = 0; 39 | let nSuccess = 0; 40 | while (true) { 41 | try { 42 | const rows = []; 43 | await sqlite3.exec(db, config.transaction, row => rows.push(row)); 44 | if (!rows.length) break; 45 | nSuccess++; 46 | } catch (e) { 47 | if (e.code !== SQLite.SQLITE_BUSY) throw e; 48 | nBusy++; 49 | } 50 | } 51 | 52 | await sqlite3.close(db); 53 | postMessage({ index: config.index, nBusy, nSuccess }); 54 | } catch (e) { 55 | postMessage({ error: e.message }); 56 | throw e; 57 | } 58 | }); -------------------------------------------------------------------------------- /dist/mc-wa-sqlite-async.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/powersync-ja/wa-sqlite/e87e9e94389cafd633c45a21aa130739358525b8/dist/mc-wa-sqlite-async.wasm -------------------------------------------------------------------------------- /dist/mc-wa-sqlite-jspi.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/powersync-ja/wa-sqlite/e87e9e94389cafd633c45a21aa130739358525b8/dist/mc-wa-sqlite-jspi.wasm -------------------------------------------------------------------------------- /dist/mc-wa-sqlite.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/powersync-ja/wa-sqlite/e87e9e94389cafd633c45a21aa130739358525b8/dist/mc-wa-sqlite.wasm -------------------------------------------------------------------------------- /dist/wa-sqlite-async-dynamic-main.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/powersync-ja/wa-sqlite/e87e9e94389cafd633c45a21aa130739358525b8/dist/wa-sqlite-async-dynamic-main.wasm -------------------------------------------------------------------------------- /dist/wa-sqlite-async.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/powersync-ja/wa-sqlite/e87e9e94389cafd633c45a21aa130739358525b8/dist/wa-sqlite-async.wasm -------------------------------------------------------------------------------- /dist/wa-sqlite-dynamic-main.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/powersync-ja/wa-sqlite/e87e9e94389cafd633c45a21aa130739358525b8/dist/wa-sqlite-dynamic-main.wasm -------------------------------------------------------------------------------- /dist/wa-sqlite-jspi.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/powersync-ja/wa-sqlite/e87e9e94389cafd633c45a21aa130739358525b8/dist/wa-sqlite-jspi.wasm -------------------------------------------------------------------------------- /dist/wa-sqlite.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/powersync-ja/wa-sqlite/e87e9e94389cafd633c45a21aa130739358525b8/dist/wa-sqlite.wasm -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- 1 | TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false. -------------------------------------------------------------------------------- /docs/assets/highlight.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --light-hl-0: #008000; 3 | --dark-hl-0: #6A9955; 4 | --light-hl-1: #AF00DB; 5 | --dark-hl-1: #C586C0; 6 | --light-hl-2: #000000; 7 | --dark-hl-2: #D4D4D4; 8 | --light-hl-3: #001080; 9 | --dark-hl-3: #9CDCFE; 10 | --light-hl-4: #A31515; 11 | --dark-hl-4: #CE9178; 12 | --light-hl-5: #0000FF; 13 | --dark-hl-5: #569CD6; 14 | --light-hl-6: #0070C1; 15 | --dark-hl-6: #4FC1FF; 16 | --light-hl-7: #795E26; 17 | --dark-hl-7: #DCDCAA; 18 | --light-hl-8: #267F99; 19 | --dark-hl-8: #4EC9B0; 20 | --light-hl-9: #098658; 21 | --dark-hl-9: #B5CEA8; 22 | --light-code-background: #FFFFFF; 23 | --dark-code-background: #1E1E1E; 24 | } 25 | 26 | @media (prefers-color-scheme: light) { :root { 27 | --hl-0: var(--light-hl-0); 28 | --hl-1: var(--light-hl-1); 29 | --hl-2: var(--light-hl-2); 30 | --hl-3: var(--light-hl-3); 31 | --hl-4: var(--light-hl-4); 32 | --hl-5: var(--light-hl-5); 33 | --hl-6: var(--light-hl-6); 34 | --hl-7: var(--light-hl-7); 35 | --hl-8: var(--light-hl-8); 36 | --hl-9: var(--light-hl-9); 37 | --code-background: var(--light-code-background); 38 | } } 39 | 40 | @media (prefers-color-scheme: dark) { :root { 41 | --hl-0: var(--dark-hl-0); 42 | --hl-1: var(--dark-hl-1); 43 | --hl-2: var(--dark-hl-2); 44 | --hl-3: var(--dark-hl-3); 45 | --hl-4: var(--dark-hl-4); 46 | --hl-5: var(--dark-hl-5); 47 | --hl-6: var(--dark-hl-6); 48 | --hl-7: var(--dark-hl-7); 49 | --hl-8: var(--dark-hl-8); 50 | --hl-9: var(--dark-hl-9); 51 | --code-background: var(--dark-code-background); 52 | } } 53 | 54 | :root[data-theme='light'] { 55 | --hl-0: var(--light-hl-0); 56 | --hl-1: var(--light-hl-1); 57 | --hl-2: var(--light-hl-2); 58 | --hl-3: var(--light-hl-3); 59 | --hl-4: var(--light-hl-4); 60 | --hl-5: var(--light-hl-5); 61 | --hl-6: var(--light-hl-6); 62 | --hl-7: var(--light-hl-7); 63 | --hl-8: var(--light-hl-8); 64 | --hl-9: var(--light-hl-9); 65 | --code-background: var(--light-code-background); 66 | } 67 | 68 | :root[data-theme='dark'] { 69 | --hl-0: var(--dark-hl-0); 70 | --hl-1: var(--dark-hl-1); 71 | --hl-2: var(--dark-hl-2); 72 | --hl-3: var(--dark-hl-3); 73 | --hl-4: var(--dark-hl-4); 74 | --hl-5: var(--dark-hl-5); 75 | --hl-6: var(--dark-hl-6); 76 | --hl-7: var(--dark-hl-7); 77 | --hl-8: var(--dark-hl-8); 78 | --hl-9: var(--dark-hl-9); 79 | --code-background: var(--dark-code-background); 80 | } 81 | 82 | .hl-0 { color: var(--hl-0); } 83 | .hl-1 { color: var(--hl-1); } 84 | .hl-2 { color: var(--hl-2); } 85 | .hl-3 { color: var(--hl-3); } 86 | .hl-4 { color: var(--hl-4); } 87 | .hl-5 { color: var(--hl-5); } 88 | .hl-6 { color: var(--hl-6); } 89 | .hl-7 { color: var(--hl-7); } 90 | .hl-8 { color: var(--hl-8); } 91 | .hl-9 { color: var(--hl-9); } 92 | pre, code { background: var(--code-background); } 93 | -------------------------------------------------------------------------------- /docs/assets/navigation.js: -------------------------------------------------------------------------------- 1 | window.navigationData = "data:application/octet-stream;base64,H4sIAAAAAAAAA4uuVipJrShRslIKDvTJLEl1DPBU0lEqSCzJULJSyswrSS1KS0xOLdaHy+pllOTmKOkoZWfmpShZGZma1eqgGRFQlFqQWJTqX1CSmZ9XjMc0VIWEDQ5zC8ZjWphbMGEjnPNzCxJLMpNyUkMqC1IRppVUFsANQlWDZqaBpbmhqVFtLAAsbW1yOAEAAA==" -------------------------------------------------------------------------------- /docs/assets/search.js: -------------------------------------------------------------------------------- 1 | window.searchData = "data:application/octet-stream;base64,H4sIAAAAAAAAA61bW2/buBr8L+qr0Q3Fi5K8FT0oUGCx293s6T4YgaHITCJUlryS7Lob9L8fUL7NJ39UGOk8Np3hSN8MLzLJl6iuvjfR7fwl+paXy+g2vrpJhI5nUZmubHQb3f3xa97aj9Vqnbb5Q2H/+rG20Sza1EV0G7U/1rb5hYO8f25XRTSLsiJtGttEt1H0c3bS0KbX/tdPd6dG87K19WOanVr++ulusDlxFatTe6vdl7R9/s3949UG3xEwtD2L1mlty5Y8Hzz/lbo+Ce4+FlUTInYCjhX606bLEJ0DbqzM33XeBr3PEThW6K96U2ZpmBZgx8rd/SizEKkDbqzMp7ywd/m/QW8F2LFyv1bZtxCpA26szH/LIlDohBzdn55t9u1P29h6a5ehr8eRpjj4sSrbuipCTTzDx4r+x27zzH58Tus0a22dN22eNSHyPuLYB/l9bcsQ3QNu/PsWNqzrn5BjpT5kmW2CanlCvkHqYi77UlvH+H3d5lU5IEtx4TPcpmyyam0HpgKm5XfAGnq53sN7HuGxSJ/e9mbvjpRR4pdF/vDls1//w5fPw+sPjMdDXi4XWVUUNnOyr7f67pIx9FLuSQekA/WmiSweiuoh9M0O2Clyy2rzUAx0bip4Qk+RzMs2VG8PnShm1BvkOvAUwXJTDMxFVO+AnSK3Tut0ZVtbL7JqE17YS9r/5yHKwdW87xnK15f1rz1Ca3fBL3/AjpTLntPyyQ6MqSexM3KsVGHTeuGeOS+HRvGzYp8wWnjwQwn0Xv9OGpKpis0qZBw/AScJhY6tFD1R8kcblhUKnyYaOhT04NNEg2eSPn6abNhsQsCTBYNmlB58mmjguErR0yXfkNwjfJpo4DBO0RMl8Wey1yX36LGStU1bu3jclKEr2EvGSOll2qbBAwMBjxS0OzvwI85J6gAbKfKYl2kx+BPOSQigI8WebLtIN22VVatVHlLGC8JI4SJ/2Nq6CUsMAU8WXJSb1YOt36R75oyWD6vvETdSplrbcrGNA4TOyJFS67p6qm3TLJ7TclkE1ZOhjBSvbWNDynnEjZfZFIE6e+AkodDVHEVPkwxe4/Tx02TD1jgEPFkwaI3Tg08TDfxypuhpkoHLDYoeK1l9D5HqUCMlmv1881zV+b9BY8wFYazwPyHO7VFjJdq0tStbtiFrUgIeLWjXQVIdbKTIZr10y7vnqhrYOzlpUfRIyW1abEKGsCNuikzoGE3A0wQDv7cpepJk8KTQg08SDZsSEDtVLmhCoOhJkoFjMwFPEwz7ECTgsYKPzaK2T3nTBg3TPfirovezKC+XdhfdvkTHb4HbKH4v399Es+gxt8XSHSSJjr8TVCs3Vkb3h//7arO2qh1iD/nlKprNr2by5r1R9/ez+ZHQ/b37Q4cS0WwuGJQgqDiazWMGFROUjGZzyaAkQaloNlcMShGUjmZzzaA0QZloNjcMyhBUEs3mCYNKCOo6ms2vGdQ1Qd1Es/kNg7qhVXVFFlz1Ra/8Xf1ZA6gDwpVacB4IaoJw1RacDYL6IFzBBeeEoFYIV3PBmSGoG8KVXXB+CGqIcJUXnCWCeiJc8QXniqC2CFd/wRkjqDOxq3/MORNTZ2JX/5hzJu71ja5zsL2DOhO7+secMzF1Jnb1jzlnYupM7Oofc87E1JnY1T/mnImpM7Grf8w5E1NnYlf/mHMmps7Erv4x50xMnZGu/pJzRlJnpKu/5JyR1Bnp6i85Z2Rv4OpGLnboos5IV3/JOSOpM9LVX3LOSOqMdPWXnDOSOiNd/SXnjKTOSFd/yTkjqTPS1V9yzkjqjHL1V5wzijqjXP0V54yizihXf8U5o6gzytVfcc6o3qzSTSvsvEKdUa7+inNGUWeUq7/inFHUGeXqrzhnFHVGuforzhlFnVGu/opzRlFntKu/5pzR1Bnt6q85ZzR1Rrv6a84ZTZ3Rrv6ac0ZTZ7Srv+ac0b0pv5vz2UmfOqNd/TXnjKbOaFd/zTmjqTPa1V9zzmjqjHb115wzmjpjXP0N54yhzhhXf8M5Y6gzxtXfcM4Y6oxx9TecM4Y645b7c8M5Y6gzxtXfcM6Y3nqsW5CxKzLqjHH1N5wzhjpjXP0N54yhzhhXf8M5Y6gziat/wjmTUGcSV/+EcyahziSu/gnnTEKdSVz9E86ZhDqTuPonnDMJdSZx9U84ZxLqTOLqn3DOJL3FcrdaZpfLe2e675StrVu7/Lz/XpnPD0etXqLF4RMmPp1wf4niOLp9+fnz/Mni/uUahuNQwJTAlENMPCIGfAF8McQ/ftMDVwFXDXG7r3MgaiDqV4iucwHVANUMUfc/ngIzAWYyxLw4OASNXEMj12GN7L9AoY0baONmqI39F/+ZKa/OTHnlYZ5O5AAPTJY+k/vnaoAO6ZS+dB6OyQALkil9yTweegEahEr6QkVOrwAXciV9uaKnUIAMyZK+ZNHTJECGcElfuHqHQoANqZK+VOHxDqBCmKQvTPSYxpmsIE/Kmyc8bwFcyJTyZoqcmwAyJEp5E4UHIIALuVLDuTr8ggVcCJfyhqt/HAH4EDDlCxgeLAAqxEv54rU/KAAkiJXyxeq86Q9ESJTyJepwCPrMEhAm4QtTf8MfRIGufHTcuj9TNSRR+5LI7L5DC5BH7cvjYTMdWBBE7QviardO2+eLYRxGcQ/xtKUOgpBe7Uvv5bY4NAAR1r4IH3a5gQXB1b7gHvesgQah1b7Qks1n4EJ2tS+7vT1kYEOAtS/AuBsMVIih9sWQ7uqeyQaCaHxBJNuzwIUIGl8EyT4rcCFOxpenbuMUOJAl48tSf/sT6JAk40tSt58JHMiR8eWo+afIW5uuc9JdoLCxr7B7Zna6u9kfv6GN4SbW+7sb1fHWC4xxEEvhi+W+ke0jZQLRx4M9WSga9CLj60X7PVYgwXMa33Oer9HAY0LPEb6eQ3ZZQRS4xsfd7jdNgQUdzvg63Ba2P8/UBBxNfJZucR8TuGBI4nNkS/YjgQz9LfH1t+15axGY0OsSX6/b4h4hcKHLJb4ut4XdPqBCz0t8PW8L+3ZAhfwlvvzR/TcgQw4TXw536eGqGsQQRIVPdJe5i5L14aLk/qYmNIE9zmfw7vLDA1bEPtLycIsPxKC+wlff3bK73pj1rzdCM6AuvPKPeWGz4yVNIEMohS+UHbnprfeg3/q67a5fXui1vk67q7prlfCEEGDhC/Cu7q58w8IQ1oU+TtPdc4bcQex8nPZ0DRsGIxiLfLxNeRE1GIeEbxzafd9fMYelBqw0ONL9LFrna1vkpY1u5/c/f/4PkqjPcOFAAAA="; -------------------------------------------------------------------------------- /jsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "exclude": [".yarn", "dist", "debug"], 3 | "compilerOptions": { 4 | "checkJs": true, 5 | "target":"ESNext", 6 | "module": "ESNext", 7 | "moduleResolution": "Node", 8 | "lib": ["DOM", "ESNext", "WebWorker"] 9 | }, 10 | "typeAcquisition": { 11 | "include": ["jasmine"] 12 | } 13 | } -------------------------------------------------------------------------------- /multiple-ciphers/mc_exported_functions.json: -------------------------------------------------------------------------------- 1 | [ 2 | "_sqlite3mc_vfs_create", 3 | "_sqlite3mc_config", 4 | "_sqlite3mc_cipher_index", 5 | "_sqlite3mc_config_cipher", 6 | "_sqlite3mc_cipher_count", 7 | "_sqlite3mc_cipher_name" 8 | ] -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@journeyapps/wa-sqlite", 3 | "version": "1.2.4", 4 | "publishConfig": { 5 | "access": "public" 6 | }, 7 | "repository": { 8 | "type": "git", 9 | "url": "git+https://github.com/powersync-ja/wa-sqlite.git" 10 | }, 11 | "type": "module", 12 | "main": "src/sqlite-api.js", 13 | "types": "src/types/index.d.ts", 14 | "files": [ 15 | "src/sqlite-constants.js", 16 | "src/sqlite-api.js", 17 | "src/types/*", 18 | "src/FacadeVFS.js", 19 | "src/VFS.js", 20 | "src/WebLocksMixin.js", 21 | "src/examples/*", 22 | "dist/*", 23 | "test/*", 24 | "scripts/download-dynamic-core.js", 25 | "scripts/tools/powersync-download.js", 26 | "powersync-version" 27 | ], 28 | "scripts": { 29 | "build-docs": "typedoc", 30 | "postinstall": "npm run powersync-core:download", 31 | "powersync-core:download": "node scripts/download-dynamic-core.js", 32 | "release": "yarn changeset publish", 33 | "start": "web-dev-server --node-resolve", 34 | "test": "web-test-runner", 35 | "test-manual": "web-test-runner --manual" 36 | }, 37 | "devDependencies": { 38 | "@changesets/cli": "^2.26.2", 39 | "@types/jasmine": "^5.1.4", 40 | "@web/dev-server": "^0.4.6", 41 | "@web/test-runner": "^0.20.0", 42 | "@web/test-runner-core": "^0.13.4", 43 | "comlink": "^4.4.1", 44 | "jasmine-core": "^4.5.0", 45 | "monaco-editor": "^0.34.1", 46 | "typedoc": "^0.25.7", 47 | "typescript": "^5.3.3", 48 | "web-test-runner-jasmine": "^0.0.6" 49 | }, 50 | "dependenciesMeta": { 51 | "monaco-editor@0.34.1": { 52 | "unplugged": true 53 | }, 54 | "web-test-runner-jasmine@0.0.6": { 55 | "unplugged": true 56 | } 57 | }, 58 | "packageManager": "yarn@4.0.2" 59 | } 60 | -------------------------------------------------------------------------------- /powersync-static/powersync_exported_functions.json: -------------------------------------------------------------------------------- 1 | ["_powersync_init_static"] 2 | -------------------------------------------------------------------------------- /powersync-version: -------------------------------------------------------------------------------- 1 | v0.3.14 -------------------------------------------------------------------------------- /scripts/docker-setup.sh: -------------------------------------------------------------------------------- 1 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y && \ 2 | . $HOME/.cargo/env && \ 3 | rustup toolchain install nightly-2024-05-18-aarch64-unknown-linux-gnu && \ 4 | rustup component add rust-src --toolchain nightly-2024-05-18-aarch64-unknown-linux-gnu 5 | 6 | sudo apt-get update -y 7 | sudo apt-get install -y tclsh 8 | 9 | # Need NVM: 10 | curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash 11 | source ~/.bashrc 12 | nvm install 18.12.0 && nvm use 18.12.0 13 | npm install -g yarn -------------------------------------------------------------------------------- /scripts/download-core-build.js: -------------------------------------------------------------------------------- 1 | /** 2 | * This script downloads PowerSync SQLite Core static WASM binaries. 3 | * This is used in the linking phase of a statically linked WA-SQLite-PowerSync WASM distributable. 4 | */ 5 | import fs from 'fs/promises'; 6 | import path from 'path'; 7 | import { fileURLToPath } from 'url'; 8 | import { downloadReleaseAsset } from './tools/powersync-download.js'; 9 | 10 | const __filename = fileURLToPath(import.meta.url); 11 | const __dirname = path.dirname(__filename); 12 | 13 | const BUILD_FILES = [`libpowersync-wasm.a`]; 14 | 15 | const LIBS_DIR = path.resolve(__dirname, '../powersync-libs'); 16 | 17 | async function directoryExists(path) { 18 | console.log(`Checking directory ${path}`); 19 | try { 20 | const stats = await fs.stat(path); 21 | return stats.isDirectory(); 22 | } catch (error) { 23 | if (error.code === 'ENOENT') { 24 | // Directory does not exist 25 | return false; 26 | } 27 | // Some other error occurred 28 | throw error; 29 | } 30 | } 31 | 32 | async function downloadDynamicCore() { 33 | console.log('Downloading PowerSync Core'); 34 | 35 | const exists = await directoryExists(LIBS_DIR); 36 | if (!exists) { 37 | console.log(`Creating libs directory ${LIBS_DIR}`); 38 | await fs.mkdir(LIBS_DIR); 39 | } 40 | 41 | for (const asset of BUILD_FILES) { 42 | await downloadReleaseAsset({ 43 | asset, 44 | outputPath: path.join(LIBS_DIR, asset) 45 | }); 46 | } 47 | } 48 | 49 | downloadDynamicCore(); 50 | -------------------------------------------------------------------------------- /scripts/download-dynamic-core.js: -------------------------------------------------------------------------------- 1 | /** 2 | * This script downloads PowerSync SQLite Core side-module binaries. 3 | * These modules are placed in the `dist` folder along other binaries. 4 | */ 5 | import path from 'path'; 6 | import { fileURLToPath } from 'url'; 7 | import { downloadReleaseAsset } from './tools/powersync-download.js'; 8 | 9 | const __filename = fileURLToPath(import.meta.url); 10 | const __dirname = path.dirname(__filename); 11 | 12 | const RELEASE_FILES = [`libpowersync.wasm`, `libpowersync-async.wasm`]; 13 | 14 | const DIST_DIR = path.resolve(__dirname, '../dist'); 15 | 16 | async function downloadDynamicCore() { 17 | try { 18 | for (const asset of RELEASE_FILES) { 19 | await downloadReleaseAsset({ 20 | asset, 21 | outputPath: path.join(DIST_DIR, asset) 22 | }); 23 | } 24 | } catch (ex) { 25 | console.warn( 26 | `Could not download PowerSync SQLite core for dynamic linking. Dynamic builds require ${RELEASE_FILES.join( 27 | '/' 28 | )} asset files. Static builds should still function correctly. ${ex}` 29 | ); 30 | } 31 | } 32 | 33 | downloadDynamicCore(); 34 | -------------------------------------------------------------------------------- /scripts/tools/powersync-download.js: -------------------------------------------------------------------------------- 1 | import fs from 'fs/promises'; 2 | import path from 'path'; 3 | import { fileURLToPath } from 'url'; 4 | 5 | const __filename = fileURLToPath(import.meta.url); 6 | const __dirname = path.dirname(__filename); 7 | 8 | const getPowerSyncVersion = async () => { 9 | const versionPath = path.resolve(__dirname, '../../powersync-version'); 10 | const versionContent = await fs.readFile(versionPath, 'utf8'); 11 | return versionContent.trim(); 12 | }; 13 | 14 | export const downloadReleaseAsset = async ({ asset, outputPath }) => { 15 | const version = await getPowerSyncVersion(); 16 | console.info(`Downloading ${asset}@${version}`); 17 | 18 | const response = await fetch( 19 | `https://github.com/powersync-ja/powersync-sqlite-core/releases/download/${version}/${asset}` 20 | ); 21 | if (!response.ok) { 22 | throw new Error(`Could not download PowerSync core asset "${asset}". ${await response.text()}`); 23 | } 24 | 25 | const fileContent = await response.arrayBuffer(); 26 | await fs.writeFile(outputPath, Buffer.from(fileContent)); 27 | }; 28 | -------------------------------------------------------------------------------- /src/VFS.js: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | import * as VFS from './sqlite-constants.js'; 3 | export * from './sqlite-constants.js'; 4 | 5 | const DEFAULT_SECTOR_SIZE = 512; 6 | 7 | // Base class for a VFS. 8 | export class Base { 9 | name; 10 | mxPathname = 64; 11 | _module; 12 | 13 | /** 14 | * @param {string} name 15 | * @param {object} module 16 | */ 17 | constructor(name, module) { 18 | this.name = name; 19 | this._module = module; 20 | } 21 | 22 | /** 23 | * @returns {void|Promise} 24 | */ 25 | close() { 26 | } 27 | 28 | /** 29 | * @returns {boolean|Promise} 30 | */ 31 | isReady() { 32 | return true; 33 | } 34 | 35 | /** 36 | * Overload in subclasses to indicate which methods are asynchronous. 37 | * @param {string} methodName 38 | * @returns {boolean} 39 | */ 40 | hasAsyncMethod(methodName) { 41 | return false; 42 | } 43 | 44 | /** 45 | * @param {number} pVfs 46 | * @param {number} zName 47 | * @param {number} pFile 48 | * @param {number} flags 49 | * @param {number} pOutFlags 50 | * @returns {number|Promise} 51 | */ 52 | xOpen(pVfs, zName, pFile, flags, pOutFlags) { 53 | return VFS.SQLITE_CANTOPEN; 54 | } 55 | 56 | /** 57 | * @param {number} pVfs 58 | * @param {number} zName 59 | * @param {number} syncDir 60 | * @returns {number|Promise} 61 | */ 62 | xDelete(pVfs, zName, syncDir) { 63 | return VFS.SQLITE_OK; 64 | } 65 | 66 | /** 67 | * @param {number} pVfs 68 | * @param {number} zName 69 | * @param {number} flags 70 | * @param {number} pResOut 71 | * @returns {number|Promise} 72 | */ 73 | xAccess(pVfs, zName, flags, pResOut) { 74 | return VFS.SQLITE_OK; 75 | } 76 | 77 | /** 78 | * @param {number} pVfs 79 | * @param {number} zName 80 | * @param {number} nOut 81 | * @param {number} zOut 82 | * @returns {number|Promise} 83 | */ 84 | xFullPathname(pVfs, zName, nOut, zOut) { 85 | return VFS.SQLITE_OK; 86 | } 87 | 88 | /** 89 | * @param {number} pVfs 90 | * @param {number} nBuf 91 | * @param {number} zBuf 92 | * @returns {number|Promise} 93 | */ 94 | xGetLastError(pVfs, nBuf, zBuf) { 95 | return VFS.SQLITE_OK; 96 | } 97 | 98 | /** 99 | * @param {number} pFile 100 | * @returns {number|Promise} 101 | */ 102 | xClose(pFile) { 103 | return VFS.SQLITE_OK; 104 | } 105 | 106 | /** 107 | * @param {number} pFile 108 | * @param {number} pData 109 | * @param {number} iAmt 110 | * @param {number} iOffsetLo 111 | * @param {number} iOffsetHi 112 | * @returns {number|Promise} 113 | */ 114 | xRead(pFile, pData, iAmt, iOffsetLo, iOffsetHi) { 115 | return VFS.SQLITE_OK; 116 | } 117 | 118 | /** 119 | * @param {number} pFile 120 | * @param {number} pData 121 | * @param {number} iAmt 122 | * @param {number} iOffsetLo 123 | * @param {number} iOffsetHi 124 | * @returns {number|Promise} 125 | */ 126 | xWrite(pFile, pData, iAmt, iOffsetLo, iOffsetHi) { 127 | return VFS.SQLITE_OK; 128 | } 129 | 130 | /** 131 | * @param {number} pFile 132 | * @param {number} sizeLo 133 | * @param {number} sizeHi 134 | * @returns {number|Promise} 135 | */ 136 | xTruncate(pFile, sizeLo, sizeHi) { 137 | return VFS.SQLITE_OK; 138 | } 139 | 140 | /** 141 | * @param {number} pFile 142 | * @param {number} flags 143 | * @returns {number|Promise} 144 | */ 145 | xSync(pFile, flags) { 146 | return VFS.SQLITE_OK; 147 | } 148 | 149 | /** 150 | * 151 | * @param {number} pFile 152 | * @param {number} pSize 153 | * @returns {number|Promise} 154 | */ 155 | xFileSize(pFile, pSize) { 156 | return VFS.SQLITE_OK; 157 | } 158 | 159 | /** 160 | * @param {number} pFile 161 | * @param {number} lockType 162 | * @returns {number|Promise} 163 | */ 164 | xLock(pFile, lockType) { 165 | return VFS.SQLITE_OK; 166 | } 167 | 168 | /** 169 | * @param {number} pFile 170 | * @param {number} lockType 171 | * @returns {number|Promise} 172 | */ 173 | xUnlock(pFile, lockType) { 174 | return VFS.SQLITE_OK; 175 | } 176 | 177 | /** 178 | * @param {number} pFile 179 | * @param {number} pResOut 180 | * @returns {number|Promise} 181 | */ 182 | xCheckReservedLock(pFile, pResOut) { 183 | return VFS.SQLITE_OK; 184 | } 185 | 186 | /** 187 | * @param {number} pFile 188 | * @param {number} op 189 | * @param {number} pArg 190 | * @returns {number|Promise} 191 | */ 192 | xFileControl(pFile, op, pArg) { 193 | return VFS.SQLITE_NOTFOUND; 194 | } 195 | 196 | /** 197 | * @param {number} pFile 198 | * @returns {number|Promise} 199 | */ 200 | xSectorSize(pFile) { 201 | return DEFAULT_SECTOR_SIZE; 202 | } 203 | 204 | /** 205 | * @param {number} pFile 206 | * @returns {number|Promise} 207 | */ 208 | xDeviceCharacteristics(pFile) { 209 | return 0; 210 | } 211 | } 212 | 213 | export const FILE_TYPE_MASK = [ 214 | VFS.SQLITE_OPEN_MAIN_DB, 215 | VFS.SQLITE_OPEN_MAIN_JOURNAL, 216 | VFS.SQLITE_OPEN_TEMP_DB, 217 | VFS.SQLITE_OPEN_TEMP_JOURNAL, 218 | VFS.SQLITE_OPEN_TRANSIENT_DB, 219 | VFS.SQLITE_OPEN_SUBJOURNAL, 220 | VFS.SQLITE_OPEN_SUPER_JOURNAL, 221 | VFS.SQLITE_OPEN_WAL 222 | ].reduce((mask, element) => mask | element); -------------------------------------------------------------------------------- /src/asyncify_imports.json: -------------------------------------------------------------------------------- 1 | [ 2 | "ipp", 3 | "ipp_async", 4 | "ippp", 5 | "ippp_async", 6 | "vppp", 7 | "vppp_async", 8 | "ipppj", 9 | "ipppj_async", 10 | "ipppi", 11 | "ipppi_async", 12 | "ipppp", 13 | "ipppp_async", 14 | "ipppip", 15 | "ipppip_async", 16 | "vpppip", 17 | "vpppip_async", 18 | "ippppi", 19 | "ippppi_async", 20 | "ippppij", 21 | "ippppij_async", 22 | "ipppiii", 23 | "ipppiii_async", 24 | "ippppip", 25 | "ippppip_async", 26 | "ippipppp", 27 | "ippipppp_async", 28 | "ipppppip", 29 | "ipppppip_async", 30 | "ipppiiip", 31 | "ipppiiip_async", 32 | "vppippii", 33 | "vppippii_async" 34 | ] 35 | -------------------------------------------------------------------------------- /src/examples/MemoryAsyncVFS.js: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | import { MemoryVFS } from './MemoryVFS.js'; 3 | 4 | // Sample asynchronous in-memory filesystem. This filesystem requires an 5 | // asynchronous WebAssembly build (Asyncify or JSPI). 6 | export class MemoryAsyncVFS extends MemoryVFS { 7 | 8 | static async create(name, module) { 9 | const vfs = new MemoryVFS(name, module); 10 | await vfs.isReady(); 11 | return vfs; 12 | } 13 | 14 | constructor(name, module) { 15 | super(name, module); 16 | } 17 | 18 | async close() { 19 | for (const fileId of this.mapIdToFile.keys()) { 20 | await this.xClose(fileId); 21 | } 22 | } 23 | 24 | /** 25 | * @param {string?} name 26 | * @param {number} fileId 27 | * @param {number} flags 28 | * @param {DataView} pOutFlags 29 | * @returns {Promise} 30 | */ 31 | async jOpen(name, fileId, flags, pOutFlags) { 32 | return super.jOpen(name, fileId, flags, pOutFlags); 33 | } 34 | 35 | /** 36 | * @param {number} fileId 37 | * @returns {Promise} 38 | */ 39 | async jClose(fileId) { 40 | return super.jClose(fileId); 41 | } 42 | 43 | /** 44 | * @param {number} fileId 45 | * @param {Uint8Array} pData 46 | * @param {number} iOffset 47 | * @returns {Promise} 48 | */ 49 | async jRead(fileId, pData, iOffset) { 50 | return super.jRead(fileId, pData, iOffset); 51 | } 52 | 53 | /** 54 | * @param {number} fileId 55 | * @param {Uint8Array} pData 56 | * @param {number} iOffset 57 | * @returns {Promise} 58 | */ 59 | async jWrite(fileId, pData, iOffset) { 60 | return super.jWrite(fileId, pData, iOffset); 61 | } 62 | 63 | /** 64 | * @param {number} fileId 65 | * @param {number} iSize 66 | * @returns {Promise} 67 | */ 68 | async xTruncate(fileId, iSize) { 69 | return super.jTruncate(fileId, iSize); 70 | } 71 | 72 | /** 73 | * @param {number} fileId 74 | * @param {DataView} pSize64 75 | * @returns {Promise} 76 | */ 77 | async jFileSize(fileId, pSize64) { 78 | return super.jFileSize(fileId, pSize64); 79 | } 80 | 81 | /** 82 | * 83 | * @param {string} name 84 | * @param {number} syncDir 85 | * @returns {Promise} 86 | */ 87 | async jDelete(name, syncDir) { 88 | return super.jDelete(name, syncDir); 89 | } 90 | 91 | /** 92 | * @param {string} name 93 | * @param {number} flags 94 | * @param {DataView} pResOut 95 | * @returns {Promise} 96 | */ 97 | async jAccess(name, flags, pResOut) { 98 | return super.jAccess(name, flags, pResOut); 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/examples/MemoryVFS.js: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | import { FacadeVFS } from '../FacadeVFS.js'; 3 | import * as VFS from '../VFS.js'; 4 | 5 | // Sample in-memory filesystem. 6 | export class MemoryVFS extends FacadeVFS { 7 | // Map of existing files, keyed by filename. 8 | mapNameToFile = new Map(); 9 | 10 | // Map of open files, keyed by id (sqlite3_file pointer). 11 | mapIdToFile = new Map(); 12 | 13 | static async create(name, module) { 14 | const vfs = new MemoryVFS(name, module); 15 | await vfs.isReady(); 16 | return vfs; 17 | } 18 | 19 | constructor(name, module) { 20 | super(name, module); 21 | } 22 | 23 | close() { 24 | for (const fileId of this.mapIdToFile.keys()) { 25 | this.jClose(fileId); 26 | } 27 | } 28 | 29 | /** 30 | * @param {string?} filename 31 | * @param {number} fileId 32 | * @param {number} flags 33 | * @param {DataView} pOutFlags 34 | * @returns {number|Promise} 35 | */ 36 | jOpen(filename, fileId, flags, pOutFlags) { 37 | const url = new URL(filename || Math.random().toString(36).slice(2), 'file://'); 38 | const pathname = url.pathname; 39 | 40 | let file = this.mapNameToFile.get(pathname); 41 | if (!file) { 42 | if (flags & VFS.SQLITE_OPEN_CREATE) { 43 | // Create a new file object. 44 | file = { 45 | pathname, 46 | flags, 47 | size: 0, 48 | data: new ArrayBuffer(0) 49 | }; 50 | this.mapNameToFile.set(pathname, file); 51 | } else { 52 | return VFS.SQLITE_CANTOPEN; 53 | } 54 | } 55 | 56 | // Put the file in the opened files map. 57 | this.mapIdToFile.set(fileId, file); 58 | pOutFlags.setInt32(0, flags, true); 59 | return VFS.SQLITE_OK; 60 | } 61 | 62 | /** 63 | * @param {number} fileId 64 | * @returns {number|Promise} 65 | */ 66 | jClose(fileId) { 67 | const file = this.mapIdToFile.get(fileId); 68 | this.mapIdToFile.delete(fileId); 69 | 70 | if (file.flags & VFS.SQLITE_OPEN_DELETEONCLOSE) { 71 | this.mapNameToFile.delete(file.pathname); 72 | } 73 | return VFS.SQLITE_OK; 74 | } 75 | 76 | /** 77 | * @param {number} fileId 78 | * @param {Uint8Array} pData 79 | * @param {number} iOffset 80 | * @returns {number|Promise} 81 | */ 82 | jRead(fileId, pData, iOffset) { 83 | const file = this.mapIdToFile.get(fileId); 84 | 85 | // Clip the requested read to the file boundary. 86 | const bgn = Math.min(iOffset, file.size); 87 | const end = Math.min(iOffset + pData.byteLength, file.size); 88 | const nBytes = end - bgn; 89 | 90 | if (nBytes) { 91 | pData.set(new Uint8Array(file.data, bgn, nBytes)); 92 | } 93 | 94 | if (nBytes < pData.byteLength) { 95 | // Zero unused area of read buffer. 96 | pData.fill(0, nBytes); 97 | return VFS.SQLITE_IOERR_SHORT_READ; 98 | } 99 | return VFS.SQLITE_OK; 100 | } 101 | 102 | /** 103 | * @param {number} fileId 104 | * @param {Uint8Array} pData 105 | * @param {number} iOffset 106 | * @returns {number|Promise} 107 | */ 108 | jWrite(fileId, pData, iOffset) { 109 | const file = this.mapIdToFile.get(fileId); 110 | if (iOffset + pData.byteLength > file.data.byteLength) { 111 | // Resize the ArrayBuffer to hold more data. 112 | const newSize = Math.max(iOffset + pData.byteLength, 2 * file.data.byteLength); 113 | const data = new ArrayBuffer(newSize); 114 | new Uint8Array(data).set(new Uint8Array(file.data, 0, file.size)); 115 | file.data = data; 116 | } 117 | 118 | // Copy data. 119 | new Uint8Array(file.data, iOffset, pData.byteLength).set(pData); 120 | file.size = Math.max(file.size, iOffset + pData.byteLength); 121 | return VFS.SQLITE_OK; 122 | } 123 | 124 | /** 125 | * @param {number} fileId 126 | * @param {number} iSize 127 | * @returns {number|Promise} 128 | */ 129 | jTruncate(fileId, iSize) { 130 | const file = this.mapIdToFile.get(fileId); 131 | 132 | // For simplicity we don't make the ArrayBuffer smaller. 133 | file.size = Math.min(file.size, iSize); 134 | return VFS.SQLITE_OK; 135 | } 136 | 137 | /** 138 | * @param {number} fileId 139 | * @param {DataView} pSize64 140 | * @returns {number|Promise} 141 | */ 142 | jFileSize(fileId, pSize64) { 143 | const file = this.mapIdToFile.get(fileId); 144 | 145 | pSize64.setBigInt64(0, BigInt(file.size), true); 146 | return VFS.SQLITE_OK; 147 | } 148 | 149 | /** 150 | * @param {string} name 151 | * @param {number} syncDir 152 | * @returns {number|Promise} 153 | */ 154 | jDelete(name, syncDir) { 155 | const url = new URL(name, 'file://'); 156 | const pathname = url.pathname; 157 | 158 | this.mapNameToFile.delete(pathname); 159 | return VFS.SQLITE_OK; 160 | } 161 | 162 | /** 163 | * @param {string} name 164 | * @param {number} flags 165 | * @param {DataView} pResOut 166 | * @returns {number|Promise} 167 | */ 168 | jAccess(name, flags, pResOut) { 169 | const url = new URL(name, 'file://'); 170 | const pathname = url.pathname; 171 | 172 | const file = this.mapNameToFile.get(pathname); 173 | pResOut.setInt32(0, file ? 1 : 0, true); 174 | return VFS.SQLITE_OK; 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/examples/README.md: -------------------------------------------------------------------------------- 1 | # wa-sqlite example code 2 | These examples are intended to help developers get started with writing extensions, 3 | and to experiment with interesting approaches and techniques. Using them as-is in 4 | production is not prohibited but that isn't their primary purpose. 5 | 6 | ## VFS examples 7 | ### MemoryVFS and MemoryAsyncVFS 8 | These implementations store database pages in memory. The default SQLite VFS already does that, so their value is mainly to provide minimal working examples for writing a VFS or to help debugging investigations by providing a comparative baseline for behavior and/or performance. First-time VFS implementers should probably start by looking at these classes, as well as the [SQLite VFS documentation](https://www.sqlite.org/vfs.html). 9 | 10 | ### IDBBatchAtomicVFS 11 | This VFS stores database pages in IndexedDB. IndexedDB works on all contexts - Window, Worker, SharedWorker, service worker, extension - which makes IDBBatchAtomicVFS a good general purpose VFS. 12 | 13 | SQLite supports a special mode for filesystems that can make "batch atomic" writes, i.e. guaranteeing that an arbitrary set of changes is made either completely or not at all, and IDBBatchAtomicVFS leverages IndexedDB to do this. When this mode can be used, an external journal file is not needed which improves performance. The journal will instead be kept in the page cache, so a requirement for triggering batch atomic mode is that the cache size must be set large enough to hold the journal. 14 | 15 | IDBBatchAtomicVFS can trade durability for performance by setting `PRAGMA synchronous=normal`. 16 | 17 | Changing the page size after the database is created is not supported (this is a change from pre-1.0). 18 | 19 | ### IDBMirrorVFS 20 | This VFS keeps all files in memory, persisting database files to IndexedDB. It works on all contexts. 21 | 22 | IDBMirrorVFS can trade durability for performance by setting `PRAGMA synchronous=normal`. 23 | 24 | Changing the page size after the database is created is not supported. 25 | 26 | IDBMirrorVFS has the same characteristics as IDBBatchAtomicVFS in the table below. The differences from IDBBatchAtomicVFS are (1) it is much faster both with and without contention, and (2) it can only use databases that fit in available memory. 27 | 28 | ### AccessHandlePoolVFS 29 | This is an OPFS VFS that has all synchronous methods, i.e. they don't return Promises. This allows it to be used with a with a synchronous WebAssembly build and that has definite performance advantages. 30 | 31 | AccessHandlePoolVFS works by pre-opening a number of access handles and associating them with SQLite open requests as needed. Operation is restricted to a single wa-sqlite instance, so multiple connections are not supported. 32 | 33 | The silver lining to not allowing multiple connections is that there is no drawback to using `PRAGMA locking_mode=exclusive`. This in turn allows `PRAGMA journal_mode=wal`, which can significantly reduce write transaction overhead. 34 | 35 | This VFS is not filesystem transparent, which means that its database files in OPFS cannot be directly imported and exported. 36 | 37 | ### OPFSAdaptiveVFS 38 | This VFS is fundamentally a straightforward mapping of OPFS access handles to VFS methods, but adds two different techniques to support multiple connections. 39 | 40 | The current OPFS spec allows only one open access handle on a file at a time. Supporting multiple connections to a database thus requires closing the access handle on one connection before opening it on another. This open/close is expensive so OPFSAdaptiveVFS does this lazily, i.e. it only closes the access handle when another connection needs it. 41 | 42 | A proposed change to OPFS allows there to be multiple open access handles on a file. OPFSAdaptiveVFS will take advantage of this on browsers that support it, and this will improve performance as well as allow overlapping multiple read transactions with a write transaction. 43 | 44 | If multiple open access handles are not supported then only journaling modes "delete" (default), "memory", and "off" are allowed. 45 | 46 | ### OPFSAnyContextVFS 47 | This VFS uses the slower File and FileSystemWritableFileStream OPFS APIs instead of synchronous access handles. This should allow it to be used on any context, i.e. not just a dedicated Worker. 48 | 49 | Read performance should be only somewhat slower, and might even be better than messaging overhead to communicate with a Worker. Write performance, however, will be very bad and will be increasingly worse as the file grows. It is recommended to use it only for read-only or nearly read-only databases. 50 | 51 | ### OPFSCoopSyncVFS 52 | This VFS is a synchronous OPFS VFS (like AccessHandlePoolVFS) that allows multiple connections and is filesystem transparent (unlike AccessHandlePoolVFS). 53 | 54 | OPFSCoopSyncVFS uses an access handle pool for files other than the main database and its journal file. For the shared files, it closes them lazily (like OPFSAdaptiveVFS) to support multiple connections while retaining performance with a single connection. 55 | 56 | To keep all the methods synchronous, when asynchronous operations are necessary (e.g. for locking) a method returns an error. The library wrapper API internally handles the error, waits for the asynchronous operation to complete, and then repeats the operation. This is not very efficient, but is only necessary when opening a database or under active multiple connection contention. 57 | 58 | Transactions that access more than one main (non-temporary) database are not supported. 59 | 60 | ### OPFSPermutedVFS 61 | This is a hybrid OPFS/IndexedDB VFS that allows high concurrency - simultaneous access by multiple readers and a single writer. It requires the proposed "readwrite-unsafe" locking mode for OPFS access handles (only on Chromium browsers as of June 2024). 62 | 63 | OPFSPermutedVFS is a lot like SQLite WAL except that it writes directly to the database file instead of a separate write-ahead log file, so there may be more than one version of a page in the file and the location of pages is generally not sequential. All the page data is stored in the file and IndexedDB is used to manage the page versions and locations. 64 | 65 | OPFSPermutedVFS can trade durability for performance by setting `PRAGMA synchronous=normal`. 66 | 67 | Changing the page size after the database is created is not supported. Not filesystem transparent except immediately after VACUUM. 68 | 69 | ## VFS Comparison 70 | 71 | ||MemoryVFS|MemoryAsyncVFS|IDBBatchAtomicVFS|OPFSAdaptiveVFS|AccessHandlePoolVFS|OPFSAnyContextVFS|OPFSCoopSyncVFS|OPFSPermutedVFS| 72 | |-|-|-|-|-|-|-|-|-| 73 | |Storage|RAM|RAM|IndexedDB|OPFS|OPFS|OPFS|OPFS|OPFS/IndexedDB| 74 | |Synchronous build|✅|:x:|:x:|:x:|✅|:x:|✅|:x:| 75 | |Asyncify build|✅|✅|✅|✅|✅|✅|✅| 76 | |JSPI build|✅|✅|✅|✅|✅|✅|✅|✅| 77 | |Contexts|All|All|All|Worker|Worker|All|Worker|Worker| 78 | |Multiple connections|:x:|:x:|✅|✅|:x:|✅|✅|✅[^1]| 79 | |Full durability|✅|✅|✅|✅|✅|✅|✅|✅| 80 | |Relaxed durability|:x:|:x:|✅|:x:|:x:|:x:|:x:|✅| 81 | |Filesystem transparency|:x:|:x:|:x:|✅|:x:|✅|✅|:x:[^2]| 82 | |Write-ahead logging|:x:|:x:|:x:|:x:|:x:|:x:|:x:|✅[^3]| 83 | |Multi-database transactions|✅|✅|✅|✅|✅|✅|:x:|✅| 84 | |Change page size|✅|✅|:x:|✅|✅|✅|✅|:x:| 85 | |No COOP/COEP requirements|✅|✅|✅|✅|✅|✅|✅|✅| 86 | 87 | [^1]: Requires FileSystemSyncAccessHandle readwrite-unsafe locking mode support. 88 | [^2]: Only filesystem transparent immediately after VACUUM. 89 | [^3]: [Sort of](https://github.com/rhashimoto/wa-sqlite/discussions/152). 90 | -------------------------------------------------------------------------------- /src/examples/tag.js: -------------------------------------------------------------------------------- 1 | import * as SQLite from '../sqlite-api.js'; 2 | 3 | /** 4 | * @typedef SQLiteResults 5 | * @property {string[]} columns 6 | * @property {SQLiteCompatibleType[][]} rows 7 | */ 8 | 9 | /** 10 | * Build a query function for a database. 11 | * 12 | * The returned function can be invoke in two ways, (1) as a template 13 | * tag, or (2) as a regular function. 14 | * 15 | * When used as a template tag, multiple SQL statements are accepted and 16 | * string interpolants can be used, e.g. 17 | * ``` 18 | * const results = await tag` 19 | * PRAGMA integrity_check; 20 | * SELECT * FROM ${tblName}; 21 | * `; 22 | * ``` 23 | * 24 | * When called as a regular function, only one statement can be used 25 | * and SQLite placeholder substitution is performed, e.g. 26 | * ``` 27 | * const results = await tag('INSERT INTO tblName VALUES (?, ?)', [ 28 | * ['foo', 1], 29 | * ['bar', 17], 30 | * ['baz', 42] 31 | * ]); 32 | * ``` 33 | * @param {SQLiteAPI} sqlite3 34 | * @param {number} db 35 | * @returns {(sql: string|TemplateStringsArray, ...values: string[]|SQLiteCompatibleType[][][]) => Promise} 36 | */ 37 | export function createTag(sqlite3, db) { 38 | // Helper function to execute the query. 39 | async function execute(sql, bindings) { 40 | const results = []; 41 | for await (const stmt of sqlite3.statements(db, sql)) { 42 | let columns; 43 | for (const binding of bindings ?? [[]]) { 44 | sqlite3.reset(stmt); 45 | if (bindings) { 46 | sqlite3.bind_collection(stmt, binding); 47 | } 48 | 49 | const rows = []; 50 | while (await sqlite3.step(stmt) === SQLite.SQLITE_ROW) { 51 | const row = sqlite3.row(stmt); 52 | rows.push(row); 53 | } 54 | 55 | columns = columns ?? sqlite3.column_names(stmt) 56 | if (columns.length) { 57 | results.push({ columns, rows }); 58 | } 59 | } 60 | 61 | // When binding parameters, only a single statement is executed. 62 | if (bindings) { 63 | return results; 64 | } 65 | } 66 | return results; 67 | } 68 | 69 | return async function(sql, ...values) { 70 | if (Array.isArray(sql)) { 71 | // Tag usage. 72 | const interleaved = []; 73 | sql.forEach((s, i) => { 74 | interleaved.push(s, values[i]); 75 | }); 76 | return execute(interleaved.join('')); 77 | } else { 78 | // Binding usage. 79 | return execute(sql, values[0]); 80 | } 81 | } 82 | } -------------------------------------------------------------------------------- /src/extra_exported_runtime_methods.json: -------------------------------------------------------------------------------- 1 | [ 2 | "addFunction", 3 | "ccall", 4 | "cwrap", 5 | "getTempRet0", 6 | "getValue", 7 | "setValue", 8 | "lengthBytesUTF8", 9 | "stringToUTF8", 10 | "stringToUTF16", 11 | "stringToUTF32", 12 | "AsciiToString", 13 | "UTF8ToString", 14 | "UTF16ToString", 15 | "UTF32ToString", 16 | "intArrayFromString", 17 | "intArrayToString", 18 | "writeArrayToMemory", 19 | "loadDynamicLibrary" 20 | ] 21 | -------------------------------------------------------------------------------- /src/jspi_exports.json: -------------------------------------------------------------------------------- 1 | [ 2 | "sqlite3_close", 3 | "sqlite3_finalize", 4 | "sqlite3_open_v2", 5 | "sqlite3_prepare", 6 | "sqlite3_prepare16", 7 | "sqlite3_prepare_v2", 8 | "sqlite3_prepare16_v2", 9 | "sqlite3_prepare_v3", 10 | "sqlite3_prepare16_v3", 11 | "sqlite3_reset", 12 | "sqlite3_step" 13 | ] -------------------------------------------------------------------------------- /src/libadapters.h: -------------------------------------------------------------------------------- 1 | #ifndef __LIBADAPTERS_H__ 2 | #define __LIBADAPTERS_H__ 3 | 4 | // Declarations for synchronous and asynchronous JavaScript relay methods. 5 | // The function name contains the C signature of the JavaScript function. 6 | // The first two arguments of each relay method is the target (e.g. VFS) 7 | // and method name (e.g. xOpen) to call. The remaining arguments are the 8 | // parameters to the method. 9 | // 10 | // Relaying is necessary because Emscripten only allows calling a statically 11 | // defined JavaScript function via a C function pointer. 12 | #define P const void* 13 | #define I int 14 | #define J int64_t 15 | #define DECLARE(TYPE, NAME, ...) \ 16 | extern TYPE NAME(__VA_ARGS__); \ 17 | extern TYPE NAME##_async(__VA_ARGS__); 18 | 19 | DECLARE(I, ipp, P, P); 20 | DECLARE(I, ippp, P, P, P); 21 | DECLARE(void, vppp, P, P, P); 22 | DECLARE(I, ipppj, P, P, P, J); 23 | DECLARE(I, ipppi, P, P, P, I); 24 | DECLARE(I, ipppp, P, P, P, P); 25 | DECLARE(I, ipppip, P, P, P, I, P); 26 | DECLARE(void, vpppip, P, P, P, I, P); 27 | DECLARE(I, ippppi, P, P, P, P, I); 28 | DECLARE(I, ipppiii, P, P, P, I, I, I); 29 | DECLARE(I, ippppij, P, P, P, P, I, J); 30 | DECLARE(I, ippppip, P, P, P, P, I, P); 31 | DECLARE(I, ippipppp, P, P, I, P, P, P, P); 32 | DECLARE(I, ipppppip, P, P, P, P, P, I, P); 33 | DECLARE(I, ipppiiip, P, P, P, I, I, I, P); 34 | DECLARE(void, vppippii, P, P, I, P, P, I, I); 35 | #undef DECLARE 36 | #undef P 37 | #undef I 38 | #undef J 39 | 40 | #endif -------------------------------------------------------------------------------- /src/libadapters.js: -------------------------------------------------------------------------------- 1 | // Method names for these signatures must be in src/asyncify_imports.json. 2 | const SIGNATURES = [ 3 | 'ipp', // xProgress, xCommitHook 4 | 'ippp', // xClose, xSectorSize, xDeviceCharacteristics 5 | 'vppp', // xShmBarrier, xFinal 6 | 'ipppj', // xTruncate 7 | 'ipppi', // xSleep, xSync, xLock, xUnlock, xShmUnmap 8 | 'ipppp', // xFileSize, xCheckReservedLock, xCurrentTime, xCurrentTimeInt64 9 | 'ipppip', // xFileControl, xRandomness, xGetLastError 10 | 'vpppip', // xFunc, xStep 11 | 'ippppi', // xDelete 12 | 'ippppij', // xRead, xWrite 13 | 'ipppiii', // xShmLock 14 | 'ippppip', // xAccess, xFullPathname 15 | 'ippipppp', // xAuthorize 16 | 'ipppppip', // xOpen 17 | 'ipppiiip', // xShmMap 18 | 'vppippii', // xUpdateHook 19 | ]; 20 | 21 | // This object will define the methods callable from WebAssembly. 22 | // See https://emscripten.org/docs/porting/connecting_cpp_and_javascript/Interacting-with-code.html#implement-a-c-api-in-javascript 23 | // 24 | // At this writing, asynchronous JavaScript functions to be called from 25 | // WebAssembly must be statically defined, i.e. they cannot be registered 26 | // at runtime. The workaround here is to define synchronous and asynchronous 27 | // relaying functions for each needed call signature. 28 | // 29 | // On the C side, calls are made to the relaying function with one or two 30 | // prepended arguments - the first argument is a key to look up the callback 31 | // object and the second argument is the name of the method if the callback 32 | // object is not a function. 33 | const adapters = { 34 | $adapters_support: function() { 35 | // @ts-ignore 36 | // Expose handleAsync to library code. 37 | const handleAsync = typeof Asyncify === 'object' ? 38 | Asyncify.handleAsync.bind(Asyncify) : 39 | null; 40 | Module['handleAsync'] = handleAsync; 41 | 42 | // This map contains the objects to which calls will be relayed, e.g. 43 | // a VFS. The key is typically the corresponding WebAssembly pointer. 44 | const targets = new Map(); 45 | Module['setCallback'] = (key, target) => targets.set(key, target); 46 | Module['getCallback'] = key => targets.get(key); 47 | Module['deleteCallback'] = key => targets.delete(key); 48 | 49 | // @ts-ignore 50 | // Overwrite this function with the relay service function. 51 | adapters_support = function(isAsync, key, ...args) { 52 | // If the receiver found with the key is a function, just call it. 53 | // Otherwise, the next argument is the name of the method to be called. 54 | const receiver = targets.get(key); 55 | let methodName = null; 56 | const f = typeof receiver === 'function' ? 57 | receiver : 58 | receiver[methodName = UTF8ToString(args.shift())]; 59 | 60 | if (isAsync) { 61 | // Call async function via handleAsync. This works for both 62 | // Asyncify and JSPI builds. 63 | if (handleAsync) { 64 | return handleAsync(() => f.apply(receiver, args)); 65 | } 66 | throw new Error('Synchronous WebAssembly cannot call async function'); 67 | } 68 | 69 | // The function should not be async so call it directly. 70 | const result = f.apply(receiver, args); 71 | if (typeof result?.then == 'function') { 72 | console.error('unexpected Promise', f); 73 | throw new Error(`${methodName} unexpectedly returned a Promise`); 74 | } 75 | return result; 76 | }; 77 | }, 78 | $adapters_support__deps: ['$UTF8ToString'], 79 | $adapters_support__postset: 'adapters_support();', 80 | }; 81 | 82 | function injectMethod(signature, isAsync) { 83 | const method = `${signature}${isAsync ? '_async' : ''}`; 84 | adapters[`${method}`] = isAsync ? 85 | // @ts-ignore 86 | function(...args) { return adapters_support(true, ...args) } : 87 | // @ts-ignore 88 | function(...args) { return adapters_support(false, ...args) }; 89 | adapters[`${method}__deps`] = ['$adapters_support']; 90 | adapters[`${method}__async`] = isAsync; 91 | 92 | // Emscripten "legalizes" 64-bit integer arguments by passing them as 93 | // two 32-bit signed integers. 94 | adapters[`${method}__sig`] = `${signature[0]}${signature.substring(1).replaceAll('j', 'ii')}`; 95 | } 96 | 97 | // For each function signature, inject a synchronous and asynchronous 98 | // relaying method definition. 99 | for (const signature of SIGNATURES) { 100 | injectMethod(signature, false); 101 | injectMethod(signature, true); 102 | } 103 | 104 | // @ts-ignore 105 | addToLibrary(adapters); -------------------------------------------------------------------------------- /src/libauthorizer.c: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | #include 3 | #include 4 | #include 5 | 6 | #include "libadapters.h" 7 | 8 | #define CALL_JS(SIGNATURE, KEY, ...) \ 9 | (asyncFlags ? \ 10 | SIGNATURE##_async(KEY, __VA_ARGS__) : \ 11 | SIGNATURE(KEY, __VA_ARGS__)) 12 | 13 | static int libauthorizer_xAuthorize( 14 | void* pApp, 15 | int iAction, 16 | const char* param3, 17 | const char* param4, 18 | const char* param5, 19 | const char* param6) { 20 | const int asyncFlags = pApp ? *(int *)pApp : 0; 21 | return CALL_JS(ippipppp, pApp, pApp, iAction, param3, param4, param5, param6); 22 | } 23 | 24 | int EMSCRIPTEN_KEEPALIVE libauthorizer_set_authorizer( 25 | sqlite3* db, 26 | int xAuthorizer, 27 | void* pApp) { 28 | return sqlite3_set_authorizer( 29 | db, 30 | xAuthorizer ? &libauthorizer_xAuthorize : NULL, 31 | pApp); 32 | } -------------------------------------------------------------------------------- /src/libauthorizer.js: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | // This file should be included in the build with --post-js. 3 | 4 | (function() { 5 | const AsyncFunction = Object.getPrototypeOf(async function(){}).constructor; 6 | let pAsyncFlags = 0; 7 | 8 | Module['set_authorizer'] = function(db, xAuthorizer, pApp) { 9 | if (pAsyncFlags) { 10 | Module['deleteCallback'](pAsyncFlags); 11 | Module['_sqlite3_free'](pAsyncFlags); 12 | pAsyncFlags = 0; 13 | } 14 | 15 | pAsyncFlags = Module['_sqlite3_malloc'](4); 16 | setValue(pAsyncFlags, xAuthorizer instanceof AsyncFunction ? 1 : 0, 'i32'); 17 | 18 | const result = ccall( 19 | 'libauthorizer_set_authorizer', 20 | 'number', 21 | ['number', 'number', 'number'], 22 | [db, xAuthorizer ? 1 : 0, pAsyncFlags]); 23 | if (!result && xAuthorizer) { 24 | Module['setCallback'](pAsyncFlags, (_, iAction, p3, p4, p5, p6) => { 25 | return xAuthorizer(pApp, iAction, p3, p4, p5, p6); 26 | }); 27 | } 28 | return result; 29 | }; 30 | })(); -------------------------------------------------------------------------------- /src/libfunction.c: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "libadapters.h" 9 | 10 | enum { 11 | xFunc, 12 | xStep, 13 | xFinal 14 | }; 15 | 16 | #define FUNC_JS(SIGNATURE, KEY, METHOD, ...) \ 17 | (asyncFlags & (1 << METHOD) ? \ 18 | SIGNATURE##_async(KEY, #METHOD, __VA_ARGS__) : \ 19 | SIGNATURE(KEY, #METHOD, __VA_ARGS__)) 20 | 21 | static void libfunction_xFunc(sqlite3_context* ctx, int argc, sqlite3_value** argv) { 22 | const void* pApp = sqlite3_user_data(ctx); 23 | const int asyncFlags = pApp ? *(int *)pApp : 0; 24 | FUNC_JS(vpppip, pApp, xFunc, ctx, argc, argv); 25 | } 26 | 27 | static void libfunction_xStep(sqlite3_context* ctx, int argc, sqlite3_value** argv) { 28 | const void* pApp = sqlite3_user_data(ctx); 29 | const int asyncFlags = pApp ? *(int *)pApp : 0; 30 | FUNC_JS(vpppip, pApp, xStep, ctx, argc, argv); 31 | } 32 | 33 | static void libfunction_xFinal(sqlite3_context* ctx) { 34 | const void* pApp = sqlite3_user_data(ctx); 35 | const int asyncFlags = pApp ? *(int *)pApp : 0; 36 | FUNC_JS(vppp, pApp, xFinal, ctx); 37 | } 38 | 39 | int EMSCRIPTEN_KEEPALIVE libfunction_create_function( 40 | sqlite3* db, 41 | const char* zFunctionName, 42 | int nArg, 43 | int eTextRep, 44 | void* pApp, 45 | void* xFunc, 46 | void* xStep, 47 | void* xFinal) { 48 | return sqlite3_create_function_v2( 49 | db, 50 | zFunctionName, 51 | nArg, 52 | eTextRep, 53 | pApp, 54 | xFunc ? &libfunction_xFunc : NULL, 55 | xStep ? &libfunction_xStep : NULL, 56 | xFinal ? &libfunction_xFinal : NULL, 57 | &sqlite3_free); 58 | } 59 | -------------------------------------------------------------------------------- /src/libfunction.js: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | // This file should be included in the build with --post-js. 3 | 4 | (function() { 5 | const AsyncFunction = Object.getPrototypeOf(async function(){}).constructor; 6 | 7 | // This list of methods must match exactly with libfunction.c. 8 | const FUNC_METHODS = [ 9 | 'xFunc', 10 | 'xStep', 11 | 'xFinal' 12 | ]; 13 | 14 | const mapFunctionNameToKey = new Map(); 15 | 16 | Module['create_function'] = function(db, zFunctionName, nArg, eTextRep, pApp, xFunc, xStep, xFinal) { 17 | // Allocate some memory to store the async flags. In addition, this 18 | // pointer is passed to SQLite as the application data (the user's 19 | // application data is ignored), and is used to look up the JavaScript 20 | // target object. 21 | const pAsyncFlags = Module['_sqlite3_malloc'](4); 22 | const target = { xFunc, xStep, xFinal }; 23 | setValue(pAsyncFlags, FUNC_METHODS.reduce((mask, method, i) => { 24 | if (target[method] instanceof AsyncFunction) { 25 | return mask | 1 << i; 26 | } 27 | return mask; 28 | }, 0), 'i32'); 29 | 30 | const result = ccall( 31 | 'libfunction_create_function', 32 | 'number', 33 | ['number', 'string', 'number', 'number', 'number', 'number', 'number', 'number'], 34 | [ 35 | db, 36 | zFunctionName, 37 | nArg, 38 | eTextRep, 39 | pAsyncFlags, 40 | xFunc ? 1 : 0, 41 | xStep ? 1 : 0, 42 | xFinal? 1 : 0 43 | ]); 44 | if (!result) { 45 | if (mapFunctionNameToKey.has(zFunctionName)) { 46 | // Reclaim the old resources used with this name. 47 | const oldKey = mapFunctionNameToKey.get(zFunctionName); 48 | Module['deleteCallback'](oldKey); 49 | } 50 | mapFunctionNameToKey.set(zFunctionName, pAsyncFlags); 51 | Module['setCallback'](pAsyncFlags, { xFunc, xStep, xFinal }); 52 | } 53 | return result; 54 | }; 55 | })(); -------------------------------------------------------------------------------- /src/libhook.c: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | #include 3 | #include 4 | #include 5 | 6 | #include "libadapters.h" 7 | 8 | #define CALL_JS(SIGNATURE, KEY, ...) \ 9 | (asyncFlags ? \ 10 | SIGNATURE##_async(KEY, __VA_ARGS__) : \ 11 | SIGNATURE(KEY, __VA_ARGS__)) 12 | 13 | static int libhook_xCommitHook(void* pApp) { 14 | const int asyncFlags = pApp ? *(int *)pApp : 0; 15 | return CALL_JS(ipp, pApp, pApp); 16 | } 17 | 18 | static void libhook_xUpdateHook( 19 | void* pApp, 20 | int iUpdateType, 21 | const char* dbName, 22 | const char* tblName, 23 | sqlite3_int64 rowid) { 24 | int hi32 = ((rowid & 0xFFFFFFFF00000000LL) >> 32); 25 | int lo32 = (rowid & 0xFFFFFFFFLL); 26 | const int asyncFlags = pApp ? *(int *)pApp : 0; 27 | CALL_JS(vppippii, pApp, pApp, iUpdateType, dbName, tblName, lo32, hi32); 28 | } 29 | 30 | void EMSCRIPTEN_KEEPALIVE libhook_commit_hook(sqlite3* db, int xCommitHook, void* pApp) { 31 | sqlite3_commit_hook(db, xCommitHook ? &libhook_xCommitHook : NULL, pApp); 32 | } 33 | 34 | void EMSCRIPTEN_KEEPALIVE libhook_update_hook(sqlite3* db, int xUpdateHook, void* pApp) { 35 | sqlite3_update_hook(db, xUpdateHook ? &libhook_xUpdateHook : NULL, pApp); 36 | } -------------------------------------------------------------------------------- /src/libhook.js: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | // This file should be included in the build with --post-js. 3 | 4 | (function() { 5 | const AsyncFunction = Object.getPrototypeOf(async function(){}).constructor; 6 | let pAsyncFlags = 0; 7 | 8 | Module['update_hook'] = function(db, xUpdateHook) { 9 | if (pAsyncFlags) { 10 | Module['deleteCallback'](pAsyncFlags); 11 | Module['_sqlite3_free'](pAsyncFlags); 12 | pAsyncFlags = 0; 13 | } 14 | 15 | pAsyncFlags = Module['_sqlite3_malloc'](4); 16 | setValue(pAsyncFlags, xUpdateHook instanceof AsyncFunction ? 1 : 0, 'i32'); 17 | 18 | ccall( 19 | 'libhook_update_hook', 20 | 'void', 21 | ['number', 'number', 'number'], 22 | [db, xUpdateHook ? 1 : 0, pAsyncFlags]); 23 | if (xUpdateHook) { 24 | Module['setCallback'](pAsyncFlags, (_, iUpdateType, dbName, tblName, lo32, hi32) => { 25 | return xUpdateHook(iUpdateType, dbName, tblName, lo32, hi32); 26 | }); 27 | } 28 | }; 29 | })(); 30 | 31 | (function() { 32 | const AsyncFunction = Object.getPrototypeOf(async function(){}).constructor; 33 | let pAsyncFlags = 0; 34 | 35 | Module['commit_hook'] = function(db, xCommitHook) { 36 | if (pAsyncFlags) { 37 | Module['deleteCallback'](pAsyncFlags); 38 | Module['_sqlite3_free'](pAsyncFlags); 39 | pAsyncFlags = 0; 40 | } 41 | 42 | pAsyncFlags = Module['_sqlite3_malloc'](4); 43 | setValue(pAsyncFlags, xCommitHook instanceof AsyncFunction ? 1 : 0, 'i32'); 44 | 45 | ccall( 46 | 'libhook_commit_hook', 47 | 'void', 48 | ['number', 'number', 'number'], 49 | [db, xCommitHook ? 1 : 0, pAsyncFlags]); 50 | if (xCommitHook) { 51 | Module['setCallback'](pAsyncFlags, (_) => { 52 | return xCommitHook(); 53 | }); 54 | } 55 | }; 56 | })(); 57 | -------------------------------------------------------------------------------- /src/libprogress.c: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | #include 3 | #include 4 | #include 5 | 6 | #include "libadapters.h" 7 | 8 | #define PROGRESS_JS(SIGNATURE, KEY, ...) \ 9 | (asyncFlags ? \ 10 | SIGNATURE##_async(KEY, __VA_ARGS__) : \ 11 | SIGNATURE(KEY, __VA_ARGS__)) 12 | 13 | static int libprogress_xProgress(void* pApp) { 14 | const int asyncFlags = pApp ? *(int *)pApp : 0; 15 | return PROGRESS_JS(ipp, pApp, pApp); 16 | } 17 | 18 | void EMSCRIPTEN_KEEPALIVE libprogress_progress_handler( 19 | sqlite3* db, 20 | int nOps, 21 | int xProgress, 22 | void* pApp) { 23 | sqlite3_progress_handler( 24 | db, 25 | nOps, 26 | xProgress ? &libprogress_xProgress : NULL, 27 | pApp); 28 | } -------------------------------------------------------------------------------- /src/libprogress.js: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | // This file should be included in the build with --post-js. 3 | 4 | (function() { 5 | const AsyncFunction = Object.getPrototypeOf(async function(){}).constructor; 6 | let pAsyncFlags = 0; 7 | 8 | Module['progress_handler'] = function(db, nOps, xProgress, pApp) { 9 | if (pAsyncFlags) { 10 | Module['deleteCallback'](pAsyncFlags); 11 | Module['_sqlite3_free'](pAsyncFlags); 12 | pAsyncFlags = 0; 13 | } 14 | 15 | pAsyncFlags = Module['_sqlite3_malloc'](4); 16 | setValue(pAsyncFlags, xProgress instanceof AsyncFunction ? 1 : 0, 'i32'); 17 | 18 | ccall( 19 | 'libprogress_progress_handler', 20 | 'number', 21 | ['number', 'number', 'number', 'number'], 22 | [db, nOps, xProgress ? 1 : 0, pAsyncFlags]); 23 | if (xProgress) { 24 | Module['setCallback'](pAsyncFlags, _ => xProgress(pApp)); 25 | } 26 | }; 27 | })(); -------------------------------------------------------------------------------- /src/libvfs.c: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "libadapters.h" 9 | 10 | // This list of methods must match exactly with libvfs.js. 11 | enum { 12 | xOpen, 13 | xDelete, 14 | xAccess, 15 | xFullPathname, 16 | xRandomness, 17 | xSleep, 18 | xCurrentTime, 19 | xGetLastError, 20 | xCurrentTimeInt64, 21 | 22 | xClose, 23 | xRead, 24 | xWrite, 25 | xTruncate, 26 | xSync, 27 | xFileSize, 28 | xLock, 29 | xUnlock, 30 | xCheckReservedLock, 31 | xFileControl, 32 | xSectorSize, 33 | xDeviceCharacteristics, 34 | xShmMap, 35 | xShmLock, 36 | xShmBarrier, 37 | xShmUnmap 38 | }; 39 | 40 | // Attach extra information to the VFS and file objects. 41 | typedef struct VFS { 42 | sqlite3_vfs base; 43 | int methodMask; // Bitmask of methods defined in JavaScript. 44 | int asyncMask; // Bitmask of methods that are asynchronous. 45 | } VFS; 46 | 47 | typedef struct VFSFile { 48 | sqlite3_file base; 49 | VFS* pVfs; // Pointer back to the VFS. 50 | } VFSFile; 51 | 52 | #define VFS_JS(SIGNATURE, KEY, METHOD, ...) \ 53 | (((VFS*)KEY)->asyncMask & (1 << METHOD) ? \ 54 | SIGNATURE##_async(KEY, #METHOD, __VA_ARGS__) : \ 55 | SIGNATURE(KEY, #METHOD, __VA_ARGS__)) 56 | 57 | static int libvfs_xClose(sqlite3_file* pFile) { 58 | return VFS_JS(ippp, ((VFSFile*)pFile)->pVfs, xClose, pFile); 59 | } 60 | 61 | static int libvfs_xRead(sqlite3_file* pFile, void* pData, int iAmt, sqlite3_int64 iOffset) { 62 | return VFS_JS(ippppij, ((VFSFile*)pFile)->pVfs, xRead, pFile, pData, iAmt, iOffset); 63 | } 64 | 65 | static int libvfs_xWrite(sqlite3_file* pFile, const void* pData, int iAmt, sqlite3_int64 iOffset) { 66 | return VFS_JS(ippppij, ((VFSFile*)pFile)->pVfs, xWrite, pFile, pData, iAmt, iOffset); 67 | } 68 | 69 | static int libvfs_xTruncate(sqlite3_file* pFile, sqlite3_int64 size) { 70 | return VFS_JS(ipppj, ((VFSFile*)pFile)->pVfs, xTruncate, pFile, size); 71 | } 72 | 73 | static int libvfs_xSync(sqlite3_file* pFile, int flags) { 74 | return VFS_JS(ipppi, ((VFSFile*)pFile)->pVfs, xSync, pFile, flags); 75 | } 76 | 77 | static int libvfs_xFileSize(sqlite3_file* pFile, sqlite3_int64* pSize) { 78 | return VFS_JS(ipppp, ((VFSFile*)pFile)->pVfs, xFileSize, pFile, pSize); 79 | } 80 | 81 | static int libvfs_xLock(sqlite3_file* pFile, int lockType) { 82 | return VFS_JS(ipppi, ((VFSFile*)pFile)->pVfs, xLock, pFile, lockType); 83 | } 84 | 85 | static int libvfs_xUnlock(sqlite3_file* pFile, int lockType) { 86 | return VFS_JS(ipppi, ((VFSFile*)pFile)->pVfs, xUnlock, pFile, lockType); 87 | } 88 | 89 | static int libvfs_xCheckReservedLock(sqlite3_file* pFile, int* pResOut) { 90 | return VFS_JS(ipppp, ((VFSFile*)pFile)->pVfs, xCheckReservedLock, pFile, pResOut); 91 | } 92 | 93 | static int libvfs_xFileControl(sqlite3_file* pFile, int flags, void* pOut) { 94 | return VFS_JS(ipppip, ((VFSFile*)pFile)->pVfs, xFileControl, pFile, flags, pOut); 95 | } 96 | 97 | static int libvfs_xSectorSize(sqlite3_file* pFile) { 98 | return VFS_JS(ippp, ((VFSFile*)pFile)->pVfs, xSectorSize, pFile); 99 | } 100 | 101 | static int libvfs_xDeviceCharacteristics(sqlite3_file* pFile) { 102 | return VFS_JS(ippp, ((VFSFile*)pFile)->pVfs, xDeviceCharacteristics, pFile); 103 | } 104 | 105 | static int libvfs_xShmMap(sqlite3_file* pFile, int iPg, int pgsz, int unused, void volatile** p) { 106 | return VFS_JS(ipppiiip, ((VFSFile*)pFile)->pVfs, xShmMap, pFile, iPg, pgsz, unused, p); 107 | } 108 | 109 | static int libvfs_xShmLock(sqlite3_file* pFile, int offset, int n, int flags) { 110 | return VFS_JS(ipppiii, ((VFSFile*)pFile)->pVfs, xShmLock, pFile, offset, n, flags); 111 | } 112 | 113 | static void libvfs_xShmBarrier(sqlite3_file* pFile) { 114 | VFS_JS(vppp, ((VFSFile*)pFile)->pVfs, xShmBarrier, pFile); 115 | } 116 | 117 | static int libvfs_xShmUnmap(sqlite3_file* pFile, int deleteFlag) { 118 | return VFS_JS(ipppi, ((VFSFile*)pFile)->pVfs, xShmUnmap, pFile, deleteFlag); 119 | } 120 | 121 | 122 | static int libvfs_xOpen(sqlite3_vfs* pVfs, const char* zName, sqlite3_file* pFile, int flags, int* pOutFlags) { 123 | const int result = VFS_JS(ipppppip, pVfs, xOpen, pVfs, (void*)zName, pFile, flags, pOutFlags); 124 | 125 | VFS* pVfsExt = (VFS*)pVfs; 126 | sqlite3_io_methods* pMethods = (sqlite3_io_methods*)sqlite3_malloc(sizeof(sqlite3_io_methods)); 127 | pMethods->iVersion = 2; 128 | #define METHOD(NAME) pMethods->NAME = (pVfsExt->methodMask & (1 << NAME)) ? libvfs_##NAME : NULL 129 | METHOD(xClose); 130 | METHOD(xRead); 131 | METHOD(xWrite); 132 | METHOD(xTruncate); 133 | METHOD(xSync); 134 | METHOD(xFileSize); 135 | METHOD(xLock); 136 | METHOD(xUnlock); 137 | METHOD(xCheckReservedLock); 138 | METHOD(xFileControl); 139 | METHOD(xSectorSize); 140 | METHOD(xDeviceCharacteristics); 141 | METHOD(xShmMap); 142 | METHOD(xShmLock); 143 | METHOD(xShmBarrier); 144 | METHOD(xShmUnmap); 145 | #undef METHOD 146 | pFile->pMethods = pMethods; 147 | ((VFSFile*)pFile)->pVfs = pVfsExt; 148 | return result; 149 | } 150 | 151 | static int libvfs_xDelete(sqlite3_vfs* pVfs, const char* zName, int syncDir) { 152 | return VFS_JS(ippppi, pVfs, xDelete, pVfs, zName, syncDir); 153 | } 154 | 155 | static int libvfs_xAccess(sqlite3_vfs* pVfs, const char* zName, int flags, int* pResOut) { 156 | return VFS_JS(ippppip, pVfs, xAccess, pVfs, zName, flags, pResOut); 157 | } 158 | 159 | static int libvfs_xFullPathname(sqlite3_vfs* pVfs, const char* zName, int nOut, char* zOut) { 160 | return VFS_JS(ippppip, pVfs, xFullPathname, pVfs, zName, nOut, zOut); 161 | } 162 | 163 | static int libvfs_xRandomness(sqlite3_vfs* pVfs, int nBuf, char* zBuf) { 164 | return VFS_JS(ipppip, pVfs, xRandomness, pVfs, nBuf, zBuf); 165 | } 166 | 167 | static int libvfs_xSleep(sqlite3_vfs* pVfs, int microseconds) { 168 | return VFS_JS(ipppi, pVfs, xSleep, pVfs, microseconds); 169 | } 170 | 171 | static int libvfs_xCurrentTime(sqlite3_vfs* pVfs, double* pJulianDay) { 172 | return VFS_JS(ipppp, pVfs, xCurrentTime, pVfs, pJulianDay); 173 | } 174 | 175 | static int libvfs_xGetLastError(sqlite3_vfs* pVfs, int nBuf, char* zBuf) { 176 | return VFS_JS(ipppip, pVfs, xGetLastError, pVfs, nBuf, zBuf); 177 | } 178 | 179 | static int libvfs_xCurrentTimeInt64(sqlite3_vfs* pVfs, sqlite3_int64* pTime) { 180 | return VFS_JS(ipppp, pVfs, xCurrentTimeInt64, pVfs, pTime); 181 | } 182 | 183 | int EMSCRIPTEN_KEEPALIVE libvfs_vfs_register( 184 | const char* zName, 185 | int mxPathName, 186 | int methodMask, 187 | int asyncMask, 188 | int makeDefault, 189 | void** ppVfs) { 190 | // Get the current default VFS to use if methods are not defined. 191 | const sqlite3_vfs* backupVfs = sqlite3_vfs_find(NULL); 192 | 193 | // Allocate and populate the new VFS. 194 | VFS* vfs = (VFS*)sqlite3_malloc(sizeof(VFS)); 195 | if (!vfs) return SQLITE_NOMEM; 196 | bzero(vfs, sizeof(VFS)); 197 | 198 | vfs->base.iVersion = 2; 199 | vfs->base.szOsFile = sizeof(VFSFile); 200 | vfs->base.mxPathname = mxPathName; 201 | vfs->base.zName = strdup(zName); 202 | 203 | // The VFS methods go to the adapter implementations in this file, 204 | // or to the default VFS if the JavaScript method is not defined. 205 | #define METHOD(NAME) vfs->base.NAME = \ 206 | (methodMask & (1 << NAME)) ? libvfs_##NAME : backupVfs->NAME 207 | 208 | METHOD(xOpen); 209 | METHOD(xDelete); 210 | METHOD(xAccess); 211 | METHOD(xFullPathname); 212 | METHOD(xRandomness); 213 | METHOD(xSleep); 214 | METHOD(xCurrentTime); 215 | METHOD(xGetLastError); 216 | METHOD(xCurrentTimeInt64); 217 | #undef METHOD 218 | 219 | vfs->methodMask = methodMask; 220 | vfs->asyncMask = asyncMask; 221 | 222 | *ppVfs = vfs; 223 | return sqlite3_vfs_register(&vfs->base, makeDefault); 224 | } 225 | -------------------------------------------------------------------------------- /src/libvfs.js: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | // This file should be included in the build with --post-js. 3 | 4 | (function() { 5 | // This list of methods must match exactly with libvfs.c. 6 | const VFS_METHODS = [ 7 | 'xOpen', 8 | 'xDelete', 9 | 'xAccess', 10 | 'xFullPathname', 11 | 'xRandomness', 12 | 'xSleep', 13 | 'xCurrentTime', 14 | 'xGetLastError', 15 | 'xCurrentTimeInt64', 16 | 17 | 'xClose', 18 | 'xRead', 19 | 'xWrite', 20 | 'xTruncate', 21 | 'xSync', 22 | 'xFileSize', 23 | 'xLock', 24 | 'xUnlock', 25 | 'xCheckReservedLock', 26 | 'xFileControl', 27 | 'xSectorSize', 28 | 'xDeviceCharacteristics', 29 | 'xShmMap', 30 | 'xShmLock', 31 | 'xShmBarrier', 32 | 'xShmUnmap' 33 | ]; 34 | 35 | const mapVFSNameToKey = new Map(); 36 | 37 | Module['vfs_register'] = function(vfs, makeDefault) { 38 | // Determine which methods exist and which are asynchronous. This is 39 | // needed for the C wrapper to know which relaying function to call. 40 | let methodMask = 0; 41 | let asyncMask = 0; 42 | VFS_METHODS.forEach((method, i) => { 43 | if (vfs[method]) { 44 | methodMask |= 1 << i; 45 | if (vfs['hasAsyncMethod'](method)) { 46 | asyncMask |= 1 << i; 47 | } 48 | } 49 | }); 50 | 51 | // Allocate space for libvfs_vfs_register to write the sqlite3_vfs 52 | // pointer. This pointer will be used to look up the JavaScript VFS 53 | // object. 54 | const vfsReturn = Module['_sqlite3_malloc'](4); 55 | try { 56 | // Call the C function that makes the sqlite3_vfs_register() call. 57 | const result = ccall( 58 | 'libvfs_vfs_register', 59 | 'number', 60 | ['string', 'number', 'number', 'number', 'number', 'number'], 61 | [vfs.name, vfs.mxPathname, methodMask, asyncMask, makeDefault ? 1 : 0, vfsReturn]); 62 | if (!result) { 63 | if (mapVFSNameToKey.has(vfs.name)) { 64 | // Reclaim the old resources used with this name. 65 | const oldKey = mapVFSNameToKey.get(vfs.name); 66 | Module['deleteCallback'](oldKey); 67 | } 68 | 69 | // Associate the sqlite3_vfs* pointer with the JavaScript VFS instance. 70 | const key = getValue(vfsReturn, '*'); 71 | mapVFSNameToKey.set(vfs.name, key); 72 | Module['setCallback'](key, vfs); 73 | } 74 | return result; 75 | } finally { 76 | Module['_sqlite3_free'](vfsReturn); 77 | } 78 | }; 79 | })(); -------------------------------------------------------------------------------- /src/main.c: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | #include 3 | #include 4 | 5 | // Some SQLite API functions take a pointer to a function that frees 6 | // memory. Although we could add a C binding to a JavaScript function 7 | // that calls sqlite3_free(), it is more efficient to pass the sqlite3_free 8 | // function pointer directly. This function provides the C pointer to 9 | // JavaScript. 10 | void* EMSCRIPTEN_KEEPALIVE getSqliteFree() { 11 | return sqlite3_free; 12 | } 13 | 14 | int main() { 15 | sqlite3_initialize(); 16 | return 0; 17 | } -------------------------------------------------------------------------------- /src/types/globals.d.ts: -------------------------------------------------------------------------------- 1 | declare namespace Asyncify { 2 | function handleAsync(f: () => Promise); 3 | } 4 | 5 | declare function UTF8ToString(ptr: number): string; 6 | declare function lengthBytesUTF8(s: string): number; 7 | declare function stringToUTF8(s: string, p: number, n: number); 8 | declare function ccall(name: string, returns: string, args: Array, options?: object): any; 9 | declare function getValue(ptr: number, type: string): number; 10 | declare function setValue(ptr: number, value: number, type: string): number; 11 | declare function mergeInto(library: object, methods: object): void; 12 | declare function __onTablesChanged(db: number, opType: number, tableName: number, rowId: number); 13 | declare var HEAPU8: Uint8Array; 14 | declare var HEAPU32: Uint32Array; 15 | declare var LibraryManager; 16 | declare var Module; 17 | declare var _vfsAccess; 18 | declare var _vfsCheckReservedLock; 19 | declare var _vfsClose; 20 | declare var _vfsDelete; 21 | declare var _vfsDeviceCharacteristics; 22 | declare var _vfsFileControl; 23 | declare var _vfsFileSize; 24 | declare var _vfsLock; 25 | declare var _vfsOpen; 26 | declare var _vfsRead; 27 | declare var _vfsSectorSize; 28 | declare var _vfsSync; 29 | declare var _vfsTruncate; 30 | declare var _vfsUnlock; 31 | declare var _vfsWrite; 32 | 33 | declare var _jsFunc; 34 | declare var _jsStep; 35 | declare var _jsFinal; 36 | 37 | declare var _modStruct; 38 | declare var _modCreate; 39 | declare var _modConnect; 40 | declare var _modBestIndex; 41 | declare var _modDisconnect; 42 | declare var _modDestroy; 43 | declare var _modOpen; 44 | declare var _modClose; 45 | declare var _modFilter; 46 | declare var _modNext; 47 | declare var _modEof; 48 | declare var _modColumn; 49 | declare var _modRowid; 50 | declare var _modUpdate; 51 | declare var _modBegin; 52 | declare var _modSync; 53 | declare var _modCommit; 54 | declare var _modRollback; 55 | declare var _modFindFunction; 56 | declare var _modRename; 57 | 58 | declare var _jsAuth; 59 | 60 | declare var _jsProgress; 61 | -------------------------------------------------------------------------------- /src/types/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2020" 4 | }, 5 | "files": ["index.d.ts"] 6 | } -------------------------------------------------------------------------------- /test/AccessHandlePoolVFS.test.js: -------------------------------------------------------------------------------- 1 | import { TestContext } from "./TestContext.js"; 2 | import { vfs_xOpen } from "./vfs_xOpen.js"; 3 | import { vfs_xAccess } from "./vfs_xAccess.js"; 4 | import { vfs_xClose } from "./vfs_xClose.js"; 5 | import { vfs_xRead } from "./vfs_xRead.js"; 6 | import { vfs_xWrite } from "./vfs_xWrite.js"; 7 | 8 | const CONFIG = 'AccessHandlePoolVFS'; 9 | const BUILDS = ['default', 'asyncify', 'jspi']; 10 | 11 | const supportsJSPI = await TestContext.supportsJSPI(); 12 | 13 | describe(CONFIG, function() { 14 | for (const build of BUILDS) { 15 | if (build === 'jspi' && !supportsJSPI) return; 16 | 17 | describe(build, function() { 18 | const context = new TestContext({ build, config: CONFIG }); 19 | 20 | vfs_xAccess(context); 21 | vfs_xOpen(context); 22 | vfs_xClose(context); 23 | vfs_xRead(context); 24 | vfs_xWrite(context); 25 | }); 26 | } 27 | }); 28 | -------------------------------------------------------------------------------- /test/IDBBatchAtomicVFS.test.js: -------------------------------------------------------------------------------- 1 | import { TestContext } from "./TestContext.js"; 2 | import { vfs_xOpen } from "./vfs_xOpen.js"; 3 | import { vfs_xAccess } from "./vfs_xAccess.js"; 4 | import { vfs_xClose } from "./vfs_xClose.js"; 5 | import { vfs_xRead } from "./vfs_xRead.js"; 6 | import { vfs_xWrite } from "./vfs_xWrite.js"; 7 | 8 | import SQLiteESMFactory from '../dist/wa-sqlite-async.mjs'; 9 | import * as SQLite from '../src/sqlite-api.js'; 10 | import { IDBBatchAtomicVFS } from "../src/examples/IDBBatchAtomicVFS.js"; 11 | 12 | const CONFIG = 'IDBBatchAtomicVFS'; 13 | const BUILDS = ['asyncify', 'jspi']; 14 | 15 | const supportsJSPI = await TestContext.supportsJSPI(); 16 | 17 | describe(CONFIG, function() { 18 | for (const build of BUILDS) { 19 | if (build === 'jspi' && !supportsJSPI) return; 20 | 21 | describe(build, function() { 22 | const context = new TestContext({ build, config: CONFIG }); 23 | 24 | vfs_xAccess(context); 25 | vfs_xOpen(context); 26 | vfs_xClose(context); 27 | vfs_xRead(context); 28 | vfs_xWrite(context); 29 | }); 30 | } 31 | 32 | it('should upgrade v5', async function() { 33 | await idbX(indexedDB.deleteDatabase('test')); 34 | 35 | { 36 | // Load IndexedDB with v5 data. 37 | const db = await new Promise((resolve, reject) => { 38 | const request = indexedDB.open('test', 5); 39 | request.onupgradeneeded = () => { 40 | const db = request.result; 41 | db.createObjectStore('blocks', { 42 | keyPath: ['path', 'offset', 'version'] 43 | }).createIndex('version', ['path', 'version']); 44 | }; 45 | request.onsuccess = () => resolve(request.result); 46 | request.onerror = () => reject(request.error); 47 | }); 48 | 49 | const data = await fetch(new URL('./data/idbv5.json', import.meta.url)) 50 | .then(response => response.json()); 51 | const blocks = db.transaction('blocks', 'readwrite').objectStore('blocks'); 52 | await Promise.all(data.blocks.map(block => { 53 | block.data = new Uint8Array(block.data); 54 | return idbX(blocks.put(block)); 55 | })); 56 | db.close(); 57 | } 58 | 59 | // Initialize SQLite. 60 | const module = await SQLiteESMFactory(); 61 | const sqlite3 = SQLite.Factory(module); 62 | 63 | const vfs = await IDBBatchAtomicVFS.create('test', module); 64 | // @ts-ignore 65 | sqlite3.vfs_register(vfs, true); 66 | 67 | const db = await sqlite3.open_v2('demo'); 68 | 69 | let integrity = ''; 70 | await sqlite3.exec(db, 'PRAGMA integrity_check', (row, columns) => { 71 | integrity = /** @type {string} */(row[0]); 72 | }); 73 | expect(integrity).toBe('ok'); 74 | 75 | const rows = []; 76 | await sqlite3.exec(db, 'SELECT x FROM foo ORDER BY rowid LIMIT 3', (row, columns) => { 77 | rows.push(row[0]); 78 | }); 79 | expect(rows).toEqual([1, 2, 3]); 80 | 81 | await sqlite3.close(db); 82 | await vfs.close(); 83 | 84 | await idbX(indexedDB.deleteDatabase('test')); 85 | }); 86 | }); 87 | 88 | /** 89 | * @param {IDBRequest} request 90 | * @returns {Promise} 91 | */ 92 | function idbX(request) { 93 | return new Promise((resolve, reject) => { 94 | request.onsuccess = () => resolve(request.result); 95 | request.onerror = () => reject(request.error); 96 | }); 97 | } -------------------------------------------------------------------------------- /test/IDBMirrorVFS.test.js: -------------------------------------------------------------------------------- 1 | import { TestContext } from "./TestContext.js"; 2 | import { vfs_xOpen } from "./vfs_xOpen.js"; 3 | import { vfs_xAccess } from "./vfs_xAccess.js"; 4 | import { vfs_xClose } from "./vfs_xClose.js"; 5 | import { vfs_xRead } from "./vfs_xRead.js"; 6 | import { vfs_xWrite } from "./vfs_xWrite.js"; 7 | 8 | const CONFIG = 'IDBMirrorVFS'; 9 | const BUILDS = ['asyncify', 'jspi']; 10 | 11 | const supportsJSPI = await TestContext.supportsJSPI(); 12 | 13 | describe(CONFIG, function() { 14 | for (const build of BUILDS) { 15 | if (build === 'jspi' && !supportsJSPI) return; 16 | 17 | describe(build, function() { 18 | const context = new TestContext({ build, config: CONFIG }); 19 | 20 | vfs_xAccess(context); 21 | vfs_xOpen(context); 22 | vfs_xClose(context); 23 | vfs_xRead(context); 24 | vfs_xWrite(context); 25 | }); 26 | } 27 | }); 28 | -------------------------------------------------------------------------------- /test/MemoryAsyncVFS.test.js: -------------------------------------------------------------------------------- 1 | import { TestContext } from "./TestContext.js"; 2 | import { vfs_xOpen } from "./vfs_xOpen.js"; 3 | import { vfs_xAccess } from "./vfs_xAccess.js"; 4 | import { vfs_xClose } from "./vfs_xClose.js"; 5 | import { vfs_xRead } from "./vfs_xRead.js"; 6 | import { vfs_xWrite } from "./vfs_xWrite.js"; 7 | 8 | const CONFIG = 'MemoryAsyncVFS'; 9 | const BUILDS = ['asyncify', 'jspi']; 10 | 11 | const supportsJSPI = await TestContext.supportsJSPI(); 12 | 13 | describe(CONFIG, function() { 14 | for (const build of BUILDS) { 15 | if (build === 'jspi' && !supportsJSPI) return; 16 | 17 | describe(build, function() { 18 | const context = new TestContext({ build, config: CONFIG }); 19 | 20 | vfs_xAccess(context); 21 | vfs_xOpen(context); 22 | vfs_xClose(context); 23 | vfs_xRead(context); 24 | vfs_xWrite(context); 25 | }); 26 | } 27 | }); 28 | -------------------------------------------------------------------------------- /test/MemoryVFS.test.js: -------------------------------------------------------------------------------- 1 | import { TestContext } from "./TestContext.js"; 2 | import { vfs_xOpen } from "./vfs_xOpen.js"; 3 | import { vfs_xAccess } from "./vfs_xAccess.js"; 4 | import { vfs_xClose } from "./vfs_xClose.js"; 5 | import { vfs_xRead } from "./vfs_xRead.js"; 6 | import { vfs_xWrite } from "./vfs_xWrite.js"; 7 | 8 | const CONFIG = 'MemoryVFS'; 9 | const BUILDS = ['default', 'asyncify', 'jspi']; 10 | 11 | const supportsJSPI = await TestContext.supportsJSPI(); 12 | 13 | describe(CONFIG, function() { 14 | for (const build of BUILDS) { 15 | if (build === 'jspi' && !supportsJSPI) return; 16 | 17 | describe(build, function() { 18 | const context = new TestContext({ build, config: CONFIG }); 19 | 20 | vfs_xAccess(context); 21 | vfs_xOpen(context); 22 | vfs_xClose(context); 23 | vfs_xRead(context); 24 | vfs_xWrite(context); 25 | }); 26 | } 27 | }); 28 | -------------------------------------------------------------------------------- /test/OPFSAdaptiveVFS.test.js: -------------------------------------------------------------------------------- 1 | import { TestContext } from "./TestContext.js"; 2 | import { vfs_xOpen } from "./vfs_xOpen.js"; 3 | import { vfs_xAccess } from "./vfs_xAccess.js"; 4 | import { vfs_xClose } from "./vfs_xClose.js"; 5 | import { vfs_xRead } from "./vfs_xRead.js"; 6 | import { vfs_xWrite } from "./vfs_xWrite.js"; 7 | 8 | const CONFIG = 'OPFSAdaptiveVFS'; 9 | const BUILDS = ['asyncify', 'jspi']; 10 | 11 | const supportsJSPI = await TestContext.supportsJSPI(); 12 | 13 | describe(CONFIG, function() { 14 | for (const build of BUILDS) { 15 | if (build === 'jspi' && !supportsJSPI) return; 16 | 17 | describe(build, function() { 18 | const context = new TestContext({ build, config: CONFIG }); 19 | 20 | vfs_xAccess(context); 21 | vfs_xOpen(context); 22 | vfs_xClose(context); 23 | vfs_xRead(context); 24 | vfs_xWrite(context); 25 | }); 26 | } 27 | }); 28 | -------------------------------------------------------------------------------- /test/OPFSAnyContextVFS.test.js: -------------------------------------------------------------------------------- 1 | import { TestContext } from "./TestContext.js"; 2 | import { vfs_xOpen } from "./vfs_xOpen.js"; 3 | import { vfs_xAccess } from "./vfs_xAccess.js"; 4 | import { vfs_xClose } from "./vfs_xClose.js"; 5 | import { vfs_xRead } from "./vfs_xRead.js"; 6 | import { vfs_xWrite } from "./vfs_xWrite.js"; 7 | 8 | const CONFIG = 'OPFSAnyContextVFS'; 9 | const BUILDS = ['asyncify', 'jspi']; 10 | 11 | const supportsJSPI = await TestContext.supportsJSPI(); 12 | 13 | describe(CONFIG, function() { 14 | for (const build of BUILDS) { 15 | if (build === 'jspi' && !supportsJSPI) return; 16 | 17 | describe(build, function() { 18 | const context = new TestContext({ build, config: CONFIG }); 19 | 20 | vfs_xAccess(context); 21 | vfs_xOpen(context); 22 | vfs_xClose(context); 23 | vfs_xRead(context); 24 | vfs_xWrite(context); 25 | }); 26 | } 27 | }); 28 | -------------------------------------------------------------------------------- /test/OPFSCoopSyncVFS.test.js: -------------------------------------------------------------------------------- 1 | import { TestContext } from "./TestContext.js"; 2 | import { vfs_xOpen } from "./vfs_xOpen.js"; 3 | import { vfs_xAccess } from "./vfs_xAccess.js"; 4 | import { vfs_xClose } from "./vfs_xClose.js"; 5 | import { vfs_xRead } from "./vfs_xRead.js"; 6 | import { vfs_xWrite } from "./vfs_xWrite.js"; 7 | 8 | const CONFIG = 'OPFSCoopSyncVFS'; 9 | const BUILDS = ['default', 'asyncify', 'jspi']; 10 | 11 | const supportsJSPI = await TestContext.supportsJSPI(); 12 | 13 | describe(CONFIG, function() { 14 | for (const build of BUILDS) { 15 | if (build === 'jspi' && !supportsJSPI) return; 16 | 17 | describe(build, function() { 18 | const context = new TestContext({ build, config: CONFIG }); 19 | 20 | vfs_xAccess(context); 21 | vfs_xOpen(context); 22 | vfs_xClose(context); 23 | vfs_xRead(context); 24 | vfs_xWrite(context); 25 | }); 26 | } 27 | }); 28 | -------------------------------------------------------------------------------- /test/OPFSPermutedVFS.test.js: -------------------------------------------------------------------------------- 1 | import { TestContext } from "./TestContext.js"; 2 | import { vfs_xOpen } from "./vfs_xOpen.js"; 3 | import { vfs_xAccess } from "./vfs_xAccess.js"; 4 | import { vfs_xClose } from "./vfs_xClose.js"; 5 | import { vfs_xRead } from "./vfs_xRead.js"; 6 | import { vfs_xWrite } from "./vfs_xWrite.js"; 7 | 8 | const CONFIG = 'OPFSPermutedVFS'; 9 | const BUILDS = ['asyncify', 'jspi']; 10 | 11 | const supportsJSPI = await TestContext.supportsJSPI(); 12 | 13 | describe(CONFIG, function() { 14 | for (const build of BUILDS) { 15 | if (build === 'jspi' && !supportsJSPI) return; 16 | 17 | describe(build, function() { 18 | const context = new TestContext({ build, config: CONFIG }); 19 | 20 | vfs_xAccess(context); 21 | vfs_xOpen(context); 22 | vfs_xClose(context); 23 | vfs_xRead(context); 24 | vfs_xWrite(context); 25 | }); 26 | } 27 | }); 28 | -------------------------------------------------------------------------------- /test/TestContext.js: -------------------------------------------------------------------------------- 1 | import * as Comlink from 'comlink'; 2 | 3 | const TEST_WORKER_URL = './test-worker.js'; 4 | const TEST_WORKER_TERMINATE = true; 5 | 6 | const mapProxyToReleaser = new WeakMap(); 7 | const workerFinalization = new FinalizationRegistry(release => release()); 8 | 9 | /** 10 | * @typedef TestContextParams 11 | * @property {string} [build] 12 | * @property {string} [config] 13 | * @property {boolean} [reset] 14 | */ 15 | 16 | /** @type {TestContextParams} */ 17 | const DEFAULT_PARAMS = Object.freeze({ 18 | build: 'default', 19 | config: 'default', 20 | reset: true 21 | }); 22 | 23 | export class TestContext { 24 | #params = structuredClone(DEFAULT_PARAMS); 25 | 26 | /** 27 | * @param {TestContextParams} params 28 | */ 29 | constructor(params = {}) { 30 | Object.assign(this.#params, params); 31 | } 32 | 33 | async create(extras = {}) { 34 | const url = new URL(TEST_WORKER_URL, import.meta.url); 35 | for (const [key, value] of Object.entries(this.#params)) { 36 | url.searchParams.set(key, value.toString()); 37 | } 38 | for (const [key, value] of Object.entries(extras)) { 39 | url.searchParams.set(key, value.toString()); 40 | } 41 | 42 | const worker = new Worker(url, { type: 'module' }); 43 | const port = await new Promise(resolve => { 44 | worker.addEventListener('message', (event) => { 45 | if (event.ports[0]) { 46 | return resolve(event.ports[0]); 47 | } 48 | const e = new Error(event.data.message); 49 | throw Object.assign(e, event.data); 50 | }, { once: true }); 51 | }); 52 | 53 | const proxy = Comlink.wrap(port); 54 | if (TEST_WORKER_TERMINATE) { 55 | function releaser() { 56 | worker.terminate(); 57 | } 58 | mapProxyToReleaser.set(proxy, releaser); 59 | workerFinalization.register(proxy, releaser); 60 | } 61 | 62 | return proxy; 63 | } 64 | 65 | async destroy(proxy) { 66 | proxy[Comlink.releaseProxy](); 67 | const releaser = mapProxyToReleaser.get(proxy); 68 | if (releaser) { 69 | workerFinalization.unregister(releaser); 70 | releaser(); 71 | } 72 | } 73 | 74 | // https://github.com/WebAssembly/js-promise-integration/issues/21#issuecomment-1634843621 75 | static async supportsJSPI() { 76 | try { 77 | const m = new Uint8Array([ 78 | 0, 97, 115, 109, 1, 0, 0, 0, 1, 5, 1, 96, 1, 111, 0, 3, 2, 1, 0, 7, 5, 1, 79 | 1, 111, 0, 0, 10, 4, 1, 2, 0, 11, 80 | ]); 81 | const { instance } = await WebAssembly.instantiate(m); 82 | // @ts-ignore 83 | new WebAssembly.Function( 84 | { 85 | parameters: [], 86 | results: ["externref"], 87 | }, 88 | instance.exports.o, 89 | { promising: "first" } 90 | ); 91 | return true; 92 | } catch (e) { 93 | return false; 94 | } 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /test/api.test.js: -------------------------------------------------------------------------------- 1 | import { TestContext } from "./TestContext.js"; 2 | import { api_exec } from "./api_exec.js"; 3 | import { api_misc } from "./api_misc.js"; 4 | import { api_statements } from "./api_statements.js"; 5 | 6 | const ALL_BUILDS = ['default', 'asyncify']; 7 | const ASYNC_BUILDS = ['asyncify']; 8 | 9 | const supportsJSPI = await TestContext.supportsJSPI(); 10 | if (supportsJSPI) { 11 | ALL_BUILDS.push('jspi'); 12 | ASYNC_BUILDS.push('jspi'); 13 | } 14 | 15 | /** @type {Map} */ 16 | const CONFIGS = new Map([ 17 | ['', ALL_BUILDS], 18 | ['MemoryVFS', ALL_BUILDS], 19 | ['AccessHandlePoolVFS', ALL_BUILDS], 20 | ['OPFSCoopSyncVFS', ALL_BUILDS], 21 | ['MemoryAsyncVFS', ASYNC_BUILDS], 22 | ['IDBBatchAtomicVFS', ASYNC_BUILDS], 23 | ['IDBMirrorVFS', ASYNC_BUILDS], 24 | ['OPFSAdaptiveVFS', ASYNC_BUILDS], 25 | ['OPFSAnyContextVFS', ASYNC_BUILDS], 26 | ['OPFSPermutedVFS', ASYNC_BUILDS], 27 | ]); 28 | 29 | describe('SQLite API', function() { 30 | for (const [config, builds] of CONFIGS) { 31 | describe(config, function() { 32 | for (const build of builds) { 33 | describe(build, function() { 34 | apiSpecs(build, config); 35 | }); 36 | } 37 | }); 38 | } 39 | }); 40 | 41 | function apiSpecs(build, config) { 42 | const context = new TestContext({ build, config }); 43 | 44 | describe(`SQLite ${build} ${config}`, function() { 45 | api_exec(context); 46 | api_misc(context); 47 | api_statements(context); 48 | }); 49 | } 50 | -------------------------------------------------------------------------------- /test/api_exec.js: -------------------------------------------------------------------------------- 1 | import * as Comlink from 'comlink'; 2 | import * as SQLite from '../src/sqlite-api.js'; 3 | 4 | export function api_exec(context) { 5 | describe('exec', function() { 6 | let proxy, sqlite3, db; 7 | beforeEach(async function() { 8 | proxy = await context.create(); 9 | sqlite3 = proxy.sqlite3; 10 | db = await sqlite3.open_v2('demo'); 11 | }); 12 | 13 | afterEach(async function() { 14 | await sqlite3.close(db); 15 | await context.destroy(proxy); 16 | }); 17 | 18 | it('should execute a query', async function() { 19 | let rc; 20 | rc = await sqlite3.exec(db, 'CREATE TABLE t(x)'); 21 | expect(rc).toEqual(SQLite.SQLITE_OK); 22 | 23 | rc = await sqlite3.exec(db, 'INSERT INTO t VALUES (1), (2), (3)'); 24 | expect(rc).toEqual(SQLite.SQLITE_OK); 25 | 26 | const nChanges = await sqlite3.changes(db); 27 | expect(nChanges).toEqual(3); 28 | }); 29 | 30 | it('should execute multiple queries', async function() { 31 | let rc; 32 | rc = await sqlite3.exec(db, ` 33 | CREATE TABLE t(x); 34 | INSERT INTO t VALUES (1), (2), (3); 35 | `); 36 | expect(rc).toEqual(SQLite.SQLITE_OK); 37 | await expectAsync(sqlite3.changes(db)).toBeResolvedTo(3); 38 | }); 39 | 40 | it('should return query results via callback', async function() { 41 | const results = { rows: [], columns: [] }; 42 | const rc = await sqlite3.exec(db, ` 43 | CREATE TABLE t(x); 44 | INSERT INTO t VALUES (1), (2), (3); 45 | SELECT * FROM t ORDER BY x; 46 | `, Comlink.proxy((row, columns) => { 47 | if (columns.length) { 48 | results.columns = columns; 49 | results.rows.push(row); 50 | } 51 | })); 52 | expect(rc).toEqual(SQLite.SQLITE_OK); 53 | expect(results).toEqual({ columns: ['x'], rows: [[1], [2], [3]] }); 54 | }); 55 | 56 | it('should allow a transaction to span multiple calls', async function() { 57 | let rc; 58 | rc = await sqlite3.get_autocommit(db); 59 | expect(rc).not.toEqual(0); 60 | 61 | rc = await sqlite3.exec(db, 'BEGIN TRANSACTION'); 62 | expect(rc).toEqual(SQLite.SQLITE_OK); 63 | 64 | rc = await sqlite3.get_autocommit(db); 65 | expect(rc).toEqual(0); 66 | 67 | rc = await sqlite3.exec(db, ` 68 | CREATE TABLE t AS 69 | WITH RECURSIVE cnt(x) AS ( 70 | SELECT 1 71 | UNION ALL 72 | SELECT x+1 FROM cnt 73 | LIMIT 100 74 | ) 75 | SELECT x FROM cnt; 76 | `); 77 | expect(rc).toEqual(SQLite.SQLITE_OK); 78 | 79 | rc = await sqlite3.get_autocommit(db); 80 | expect(rc).toEqual(0); 81 | 82 | rc = await sqlite3.exec(db, 'COMMIT'); 83 | expect(rc).toEqual(SQLite.SQLITE_OK); 84 | 85 | rc = await sqlite3.get_autocommit(db); 86 | expect(rc).not.toEqual(0); 87 | }); 88 | }); 89 | } -------------------------------------------------------------------------------- /test/api_misc.js: -------------------------------------------------------------------------------- 1 | import * as SQLite from '../src/sqlite-api.js'; 2 | 3 | export function api_misc(context) { 4 | describe('libversion', function() { 5 | let proxy, sqlite3, db; 6 | beforeEach(async function() { 7 | proxy = await context.create(); 8 | sqlite3 = proxy.sqlite3; 9 | db = await sqlite3.open_v2('demo'); 10 | }); 11 | 12 | afterEach(async function() { 13 | await sqlite3.close(db); 14 | await context.destroy(proxy); 15 | }); 16 | 17 | it('should return the library version', async function() { 18 | const versionString = await sqlite3.libversion(); 19 | expect(versionString).toMatch(/^\d+\.\d+\.\d+$/); 20 | 21 | const components = versionString.split('.') 22 | .map((component, i) => { 23 | return i ? component.padStart(3, '0') : component; 24 | }); 25 | 26 | const versionNumber = await sqlite3.libversion_number(); 27 | expect(versionNumber.toString()).toEqual(components.join('')); 28 | }); 29 | }); 30 | 31 | describe('limit', function() { 32 | let proxy, sqlite3, db; 33 | beforeEach(async function() { 34 | proxy = await context.create(); 35 | sqlite3 = proxy.sqlite3; 36 | db = await sqlite3.open_v2('demo'); 37 | }); 38 | 39 | afterEach(async function() { 40 | await sqlite3.close(db); 41 | await context.destroy(proxy); 42 | }); 43 | 44 | it('should constrain usage', async function() { 45 | const sql = ` 46 | SELECT 1, 2, 3, 4, 5, 6; 47 | `.trim(); 48 | 49 | let rc; 50 | await expectAsync(sqlite3.exec(db, sql)).toBeResolvedTo(SQLite.SQLITE_OK); 51 | 52 | rc = await sqlite3.limit(db, SQLite.SQLITE_LIMIT_COLUMN, 5); 53 | expect(rc).toBeGreaterThan(0); 54 | 55 | await expectAsync(sqlite3.exec(db, sql)).toBeRejectedWithError(/too many columns/); 56 | 57 | rc = await sqlite3.limit(db, SQLite.SQLITE_LIMIT_COLUMN, rc); 58 | expect(rc).toEqual(5); 59 | 60 | await expectAsync(sqlite3.exec(db, sql)).toBeResolvedTo(SQLite.SQLITE_OK); 61 | }); 62 | }); 63 | } -------------------------------------------------------------------------------- /test/sql.test.js: -------------------------------------------------------------------------------- 1 | import { TestContext } from "./TestContext.js"; 2 | import { sql_0001 } from "./sql_0001.js"; 3 | import { sql_0002 } from "./sql_0002.js"; 4 | import { sql_0003 } from "./sql_0003.js"; 5 | import { sql_0004 } from "./sql_0004.js"; 6 | import { sql_0005 } from "./sql_0005.js"; 7 | 8 | const ALL_BUILDS = ['default', 'asyncify', 'jspi']; 9 | const ASYNC_BUILDS = ['asyncify', 'jspi']; 10 | 11 | // Not all browsers support JSPI yet. 12 | const supportsJSPI = await TestContext.supportsJSPI(); 13 | 14 | /** @type {Map} */ 15 | const CONFIGS = new Map([ 16 | ['', ALL_BUILDS], 17 | ['MemoryVFS', ALL_BUILDS], 18 | ['AccessHandlePoolVFS', ALL_BUILDS], 19 | ['OPFSCoopSyncVFS', ALL_BUILDS], 20 | ['MemoryAsyncVFS', ASYNC_BUILDS], 21 | ['IDBBatchAtomicVFS', ASYNC_BUILDS], 22 | ['IDBMirrorVFS', ASYNC_BUILDS], 23 | ['OPFSAdaptiveVFS', ASYNC_BUILDS], 24 | ['OPFSAnyContextVFS', ASYNC_BUILDS], 25 | ['OPFSPermutedVFS', ASYNC_BUILDS], 26 | ]); 27 | 28 | const DISALLOWS_PAGE_SIZE_CHANGE = ['IDBBatchAtomicVFS', 'IDBMirrorVFS', 'OPFSPermutedVFS', 'FLOOR']; 29 | const NOT_PERSISTENT = ['', 'MemoryVFS', 'MemoryAsyncVFS']; 30 | const SINGLE_CONNECTION = ['', 'MemoryVFS', 'MemoryAsyncVFS', 'AccessHandlePoolVFS']; 31 | 32 | describe('SQL', function() { 33 | for (const [config, builds] of CONFIGS) { 34 | describe(config, function() { 35 | for (const build of builds) { 36 | // Skip JSPI tests if the browser does not support it. 37 | if (build === 'jspi' && !supportsJSPI) continue; 38 | 39 | describe(build, function() { 40 | sqlSpecs(build, config); 41 | }); 42 | } 43 | }); 44 | } 45 | }); 46 | 47 | function sqlSpecs(build, config) { 48 | const context = new TestContext({ build, config }); 49 | 50 | sql_0001(context); 51 | sql_0002(context); 52 | if (!DISALLOWS_PAGE_SIZE_CHANGE.includes(config)) { 53 | // These tests change the page size. 54 | sql_0003(context); 55 | } 56 | if (!NOT_PERSISTENT.includes(config)) { 57 | // These tests require persistent storage. 58 | sql_0004(context); 59 | } 60 | if (!SINGLE_CONNECTION.includes(config)) { 61 | // These tests require multiple connections. 62 | sql_0005(context); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /test/sql_0001.js: -------------------------------------------------------------------------------- 1 | import * as Comlink from 'comlink'; 2 | 3 | export function sql_0001(context) { 4 | describe('sql_0001', function() { 5 | let proxy, sqlite3, db; 6 | beforeEach(async function() { 7 | proxy = await context.create(); 8 | sqlite3 = proxy.sqlite3; 9 | db = await sqlite3.open_v2('demo'); 10 | }); 11 | 12 | afterEach(async function() { 13 | await sqlite3.close(db); 14 | await context.destroy(proxy); 15 | }); 16 | 17 | it('should rollback a transaction', async function() { 18 | let count; 19 | await sqlite3.exec(db, ` 20 | CREATE TABLE foo (x PRIMARY KEY); 21 | INSERT INTO foo VALUES ('foo'), ('bar'), ('baz'); 22 | SELECT COUNT(*) FROM foo; 23 | `, Comlink.proxy(row => count = row[0])); 24 | expect(count).toBe(3); 25 | 26 | count = undefined; 27 | await sqlite3.exec(db, ` 28 | BEGIN TRANSACTION; 29 | WITH numbers(n) AS (SELECT 1 UNION ALL SELECT n + 1 FROM numbers LIMIT 100) 30 | INSERT INTO foo SELECT * FROM numbers; 31 | SELECT COUNT(*) FROM foo; 32 | `, Comlink.proxy(row => count = row[0])); 33 | expect(count).toBe(103); 34 | 35 | count = undefined; 36 | await sqlite3.exec(db, ` 37 | ROLLBACK; 38 | SELECT COUNT(*) FROM foo; 39 | `, Comlink.proxy(row => count = row[0])); 40 | expect(count).toBe(3); 41 | 42 | let checkStatus; 43 | await sqlite3.exec(db, ` 44 | PRAGMA integrity_check; 45 | `, Comlink.proxy(row => checkStatus = row[0])); 46 | expect(checkStatus).toBe('ok'); 47 | }); 48 | }); 49 | } 50 | -------------------------------------------------------------------------------- /test/sql_0002.js: -------------------------------------------------------------------------------- 1 | import * as Comlink from 'comlink'; 2 | 3 | export function sql_0002(context) { 4 | describe('sql_0002', function() { 5 | let proxy, sqlite3, db; 6 | beforeEach(async function() { 7 | proxy = await context.create(); 8 | sqlite3 = proxy.sqlite3; 9 | db = await sqlite3.open_v2('demo'); 10 | }); 11 | 12 | afterEach(async function() { 13 | await sqlite3.close(db); 14 | await context.destroy(proxy); 15 | }); 16 | 17 | it('should vacuum to minimize page count', async function() { 18 | await sqlite3.exec(db, ` 19 | CREATE TABLE t AS 20 | WITH numbers(n) AS 21 | (SELECT 1 UNION ALL SELECT n + 1 FROM numbers LIMIT 10000) 22 | SELECT n FROM numbers; 23 | `); 24 | 25 | let nPagesBeforeVacuum; 26 | await sqlite3.exec(db, ` 27 | PRAGMA page_count; 28 | `, Comlink.proxy(row => nPagesBeforeVacuum = row[0])); 29 | 30 | await sqlite3.exec(db, ` 31 | DELETE FROM t WHERE sqrt(n) != floor(sqrt(n)); 32 | `); 33 | 34 | await sqlite3.exec(db, ` 35 | VACUUM; 36 | `); 37 | 38 | let nPagesAfterVacuum; 39 | await sqlite3.exec(db, ` 40 | PRAGMA page_count; 41 | `, Comlink.proxy(row => nPagesAfterVacuum = row[0])); 42 | 43 | expect(nPagesAfterVacuum).toBeLessThan(nPagesBeforeVacuum); 44 | 45 | let checkStatus; 46 | await sqlite3.exec(db, ` 47 | PRAGMA integrity_check; 48 | `, Comlink.proxy(row => checkStatus = row[0])); 49 | expect(checkStatus).toBe('ok'); 50 | }); 51 | }); 52 | } 53 | -------------------------------------------------------------------------------- /test/sql_0003.js: -------------------------------------------------------------------------------- 1 | import * as Comlink from 'comlink'; 2 | 3 | export function sql_0003(context) { 4 | describe('sql_0003', function() { 5 | let proxy, sqlite3, db; 6 | beforeEach(async function() { 7 | proxy = await context.create(); 8 | sqlite3 = proxy.sqlite3; 9 | db = await sqlite3.open_v2('demo'); 10 | }); 11 | 12 | afterEach(async function() { 13 | await sqlite3.close(db); 14 | await context.destroy(proxy); 15 | }); 16 | 17 | it('should vacuum to decrease page size', async function() { 18 | await sqlite3.exec(db, ` 19 | PRAGMA page_size=8192; 20 | CREATE TABLE t AS 21 | WITH numbers(n) AS 22 | (SELECT 1 UNION ALL SELECT n + 1 FROM numbers LIMIT 10000) 23 | SELECT n FROM numbers; 24 | `); 25 | 26 | let pageSizeBeforeVacuum; 27 | await sqlite3.exec(db, ` 28 | PRAGMA page_size; 29 | `, Comlink.proxy(row => pageSizeBeforeVacuum = row[0])); 30 | expect(pageSizeBeforeVacuum).toBe(8192); 31 | 32 | await sqlite3.exec(db, ` 33 | PRAGMA page_size=4096; 34 | VACUUM; 35 | `); 36 | 37 | let pageSizeAfterVacuum; 38 | await sqlite3.exec(db, ` 39 | PRAGMA page_size; 40 | `, Comlink.proxy(row => pageSizeAfterVacuum = row[0])); 41 | expect(pageSizeAfterVacuum).toBe(4096); 42 | 43 | let checkStatus; 44 | await sqlite3.exec(db, ` 45 | PRAGMA integrity_check; 46 | `, Comlink.proxy(row => checkStatus = row[0])); 47 | expect(checkStatus).toBe('ok'); 48 | }); 49 | 50 | it('should vacuum to increase page size', async function() { 51 | await sqlite3.exec(db, ` 52 | PRAGMA page_size=8192; 53 | CREATE TABLE t AS 54 | WITH numbers(n) AS 55 | (SELECT 1 UNION ALL SELECT n + 1 FROM numbers LIMIT 10000) 56 | SELECT n FROM numbers; 57 | `); 58 | 59 | let pageSizeBeforeVacuum; 60 | await sqlite3.exec(db, ` 61 | PRAGMA page_size; 62 | `, Comlink.proxy(row => pageSizeBeforeVacuum = row[0])); 63 | expect(pageSizeBeforeVacuum).toBe(8192); 64 | 65 | await sqlite3.exec(db, ` 66 | PRAGMA page_size=16384; 67 | VACUUM; 68 | `); 69 | 70 | let pageSizeAfterVacuum; 71 | await sqlite3.exec(db, ` 72 | PRAGMA page_size; 73 | `, Comlink.proxy(row => pageSizeAfterVacuum = row[0])); 74 | expect(pageSizeAfterVacuum).toBe(16384); 75 | 76 | let checkStatus; 77 | await sqlite3.exec(db, ` 78 | PRAGMA integrity_check; 79 | `, Comlink.proxy(row => checkStatus = row[0])); 80 | expect(checkStatus).toBe('ok'); 81 | }); 82 | }); 83 | } 84 | -------------------------------------------------------------------------------- /test/sql_0004.js: -------------------------------------------------------------------------------- 1 | import * as Comlink from 'comlink'; 2 | 3 | export function sql_0004(context) { 4 | const cleanup = []; 5 | beforeEach(async function() { 6 | cleanup.splice(0); 7 | }); 8 | 9 | afterEach(async function() { 10 | for (const fn of cleanup) { 11 | await fn(); 12 | } 13 | }); 14 | 15 | describe('sql_0004', function() { 16 | it('should recover after crash', async function() { 17 | const proxyA = await context.create(); 18 | try { 19 | const sqlite3 = proxyA.sqlite3; 20 | const db = await sqlite3.open_v2('demo'); 21 | await sqlite3.exec(db, ` 22 | PRAGMA cache_size=0; 23 | CREATE TABLE t(x); 24 | INSERT INTO t VALUES (1), (2), (3); 25 | `); 26 | 27 | let sum; 28 | await sqlite3.exec(db, ` 29 | SELECT sum(x) FROM t; 30 | `, Comlink.proxy(row => sum = row[0])); 31 | expect(sum).toBe(6); 32 | 33 | let check; 34 | await sqlite3.exec(db, ` 35 | PRAGMA integrity_check; 36 | `, Comlink.proxy(row => check = row[0])); 37 | expect(check).toBe('ok'); 38 | 39 | // Begin a transaction but don't commit it. 40 | await sqlite3.exec(db, ` 41 | BEGIN TRANSACTION; 42 | WITH RECURSIVE cnt(x) AS 43 | (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10000) 44 | INSERT INTO t SELECT * FROM cnt; 45 | `); 46 | } finally { 47 | await context.destroy(proxyA); 48 | } 49 | 50 | await new Promise(resolve => setTimeout(resolve, 250)); 51 | 52 | const proxyB = await context.create({ reset: false }); 53 | try { 54 | const sqlite3 = proxyB.sqlite3; 55 | const db = await sqlite3.open_v2('demo'); 56 | 57 | let sum; 58 | await sqlite3.exec(db, ` 59 | SELECT sum(x) FROM t; 60 | `, Comlink.proxy(row => sum = row[0])); 61 | expect(sum).toBe(6); 62 | 63 | let check; 64 | await sqlite3.exec(db, ` 65 | PRAGMA integrity_check; 66 | `, Comlink.proxy(row => check = row[0])); 67 | expect(check).toBe('ok'); 68 | 69 | await sqlite3.exec(db, ` 70 | INSERT INTO t VALUES (4), (5); 71 | `); 72 | await sqlite3.exec(db, ` 73 | SELECT sum(x) FROM t; 74 | `, Comlink.proxy(row => sum = row[0])); 75 | expect(sum).toBe(15); 76 | } finally { 77 | await context.destroy(proxyB); 78 | } 79 | }); 80 | }); 81 | } -------------------------------------------------------------------------------- /test/sql_0005.js: -------------------------------------------------------------------------------- 1 | import * as Comlink from 'comlink'; 2 | 3 | export function sql_0005(context) { 4 | describe('sql_0005', function() { 5 | beforeAll(async function() { 6 | // Clear persistent storage. 7 | const proxy = await context.create(); 8 | await context.destroy(proxy); 9 | }); 10 | 11 | const cleanup = []; 12 | beforeEach(async function() { 13 | cleanup.splice(0); 14 | }); 15 | 16 | afterEach(async function() { 17 | for (const fn of cleanup) { 18 | await fn(); 19 | } 20 | }); 21 | 22 | it('should transact atomically', async function() { 23 | const instances = []; 24 | for (let i = 0; i < 8; ++i) { 25 | const proxy = await context.create({ reset: false }); 26 | const sqlite3 = proxy.sqlite3; 27 | const db = await sqlite3.open_v2('demo'); 28 | instances.push({ sqlite3, db }); 29 | cleanup.push(async () => { 30 | await sqlite3.close(db); 31 | await context.destroy(proxy); 32 | }); 33 | 34 | if (i === 0) { 35 | await sqlite3.exec(db, ` 36 | BEGIN IMMEDIATE; 37 | CREATE TABLE IF NOT EXISTS t(key PRIMARY KEY, value); 38 | INSERT OR IGNORE INTO t VALUES ('foo', 0); 39 | COMMIT; 40 | `); 41 | } 42 | } 43 | 44 | const iterations = 32; 45 | const values = new Set(); 46 | await Promise.all(instances.map(async instance => { 47 | for (let i = 0; i < iterations; ++i) { 48 | const rows = await transact(instance, ` 49 | BEGIN IMMEDIATE; 50 | UPDATE t SET value = value + 1 WHERE key = 'foo'; 51 | SELECT value FROM t WHERE key = 'foo'; 52 | COMMIT; 53 | `); 54 | values.add(rows[0][0]); 55 | } 56 | })); 57 | 58 | expect(values.size).toBe(instances.length * iterations); 59 | expect(Array.from(values).sort((a, b) => b - a).at(0)).toBe(values.size); 60 | }); 61 | }); 62 | } 63 | 64 | async function transact({ sqlite3, db }, sql) { 65 | while (true) { 66 | try { 67 | const rows = []; 68 | await sqlite3.exec(db, sql, Comlink.proxy(row => rows.push(row))); 69 | return rows; 70 | } catch (e) { 71 | if (e.message !== 'database is locked') { 72 | throw e; 73 | } 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /test/test-worker.js: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Roy T. Hashimoto. All Rights Reserved. 2 | 3 | import * as Comlink from 'comlink'; 4 | import * as SQLite from '../src/sqlite-api.js'; 5 | 6 | const BUILDS = new Map([ 7 | ['default', '../dist/wa-sqlite.mjs'], 8 | ['asyncify', '../dist/wa-sqlite-async.mjs'], 9 | ['jspi', '../dist/wa-sqlite-jspi.mjs'], 10 | ]); 11 | 12 | const MODULE = Symbol('module'); 13 | const VFS_CONFIGS = new Map([ 14 | { 15 | name: 'default', 16 | vfsModule: null 17 | }, 18 | { 19 | name: 'AccessHandlePoolVFS', 20 | vfsModule: '../src/examples/AccessHandlePoolVFS.js', 21 | }, 22 | { 23 | name: 'OPFSCoopSyncVFS', 24 | vfsModule: '../src/examples/OPFSCoopSyncVFS.js', 25 | }, 26 | { 27 | name: 'FLOOR', 28 | vfsModule: '../src/examples/FLOOR.js', 29 | }, 30 | { 31 | name: 'MemoryVFS', 32 | vfsModule: '../src/examples/MemoryVFS.js', 33 | }, 34 | { 35 | name: 'MemoryAsyncVFS', 36 | vfsModule: '../src/examples/MemoryAsyncVFS.js', 37 | }, 38 | { 39 | name: 'IDBBatchAtomicVFS', 40 | vfsModule: '../src/examples/IDBBatchAtomicVFS.js', 41 | }, 42 | { 43 | name: 'IDBMirrorVFS', 44 | vfsModule: '../src/examples/IDBMirrorVFS.js', 45 | }, 46 | { 47 | name: 'OPFSAdaptiveVFS', 48 | vfsModule: '../src/examples/OPFSAdaptiveVFS.js', 49 | }, 50 | { 51 | name: 'OPFSAnyContextVFS', 52 | vfsModule: '../src/examples/OPFSAnyContextVFS.js', 53 | }, 54 | { 55 | name: 'OPFSPermutedVFS', 56 | vfsModule: '../src/examples/OPFSPermutedVFS.js', 57 | }, 58 | ].map(config => [config.name, config])); 59 | 60 | const INDEXEDDB_DBNAMES = ['demo']; 61 | 62 | const searchParams = new URLSearchParams(location.search); 63 | 64 | maybeReset().then(async () => { 65 | const buildName = searchParams.get('build') || BUILDS.keys().next().value; 66 | const configName = searchParams.get('config') || VFS_CONFIGS.keys().next().value; 67 | const config = VFS_CONFIGS.get(configName); 68 | 69 | // Instantiate SQLite. 70 | const { default: moduleFactory } = await import(BUILDS.get(buildName)); 71 | const module = await moduleFactory(); 72 | const sqlite3 = SQLite.Factory(module); 73 | 74 | const vfs = await (async function() { 75 | if (config.vfsModule) { 76 | // Create the VFS and register it as the default file system. 77 | const namespace = await import(config.vfsModule); 78 | const className = config.vfsClass ?? config.vfsModule.match(/([^/]+)\.js$/)[1]; 79 | const vfsArgs = (config.vfsArgs ?? ['demo', MODULE]) 80 | .map(arg => arg === MODULE ? module : arg); 81 | const vfs = await namespace[className].create(...vfsArgs); 82 | sqlite3.vfs_register(vfs, true); 83 | return vfs; 84 | } 85 | return {}; 86 | })(); 87 | 88 | const sqlite3Proxy = new Proxy(sqlite3, { 89 | get(target, p, receiver) { 90 | // Comlink intercepts some function property names, e.g. "bind", 91 | // so allow aliases to avoid the problem. 92 | if (typeof p === 'string') p = p.replaceAll('$', ''); 93 | 94 | const value = Reflect.get(target, p, receiver); 95 | if (typeof value === 'function') { 96 | return async (...args) => { 97 | const result = await value.apply(target, args); 98 | if (p === 'statements') { 99 | return Comlink.proxy(result); 100 | } 101 | return result; 102 | }; 103 | } 104 | } 105 | }); 106 | 107 | const vfsProxy = new Proxy(vfs, { 108 | get(target, p, receiver) { 109 | const value = Reflect.get(target, p, receiver); 110 | if (typeof value === 'function') { 111 | return async (...args) => { 112 | if (p === 'jRead') { 113 | // The read buffer Uint8Array will be passed by proxy so all 114 | // access is asynchronous. Pass a local buffer to the VFS 115 | // and copy the local buffer to the proxy on completion. 116 | const proxyBuffer = args[1]; 117 | args[1] = new Uint8Array(await proxyBuffer.length); 118 | const result = await value.apply(target, args); 119 | await proxyBuffer.set(args[1]); 120 | return result; 121 | } 122 | return value.apply(target, args); 123 | }; 124 | } 125 | } 126 | }); 127 | 128 | const { port1, port2 } = new MessageChannel(); 129 | Comlink.expose({ 130 | module, 131 | sqlite3: sqlite3Proxy, 132 | vfs: vfsProxy, 133 | }, port1); 134 | postMessage(null, [port2]); 135 | }).catch(e => { 136 | console.error(e); 137 | postMessage(cvtErrorToCloneable(e)); 138 | }); 139 | 140 | async function maybeReset() { 141 | if (searchParams.get('reset') !== 'true') { 142 | return; 143 | } 144 | 145 | // Limit the amount of time in this function. 146 | const abortController = new AbortController(); 147 | setTimeout(() => abortController.abort(), 10_000); 148 | 149 | // Clear OPFS. 150 | const root = await navigator.storage?.getDirectory(); 151 | if (root) { 152 | let opfsDeleted = false; 153 | while (!opfsDeleted) { 154 | abortController.signal.throwIfAborted(); 155 | try { 156 | // @ts-ignore 157 | for await (const name of root.keys()) { 158 | await root.removeEntry(name, { recursive: true }); 159 | } 160 | opfsDeleted = true; 161 | } catch (e) { 162 | // A NoModificationAllowedError is thrown if an entry can't be 163 | // deleted because it isn't closed. Just try again. 164 | if (e.name === 'NoModificationAllowedError') { 165 | await new Promise(resolve => setTimeout(resolve)); 166 | continue; 167 | } 168 | throw e; 169 | } 170 | } 171 | } 172 | 173 | // Clear IndexedDB. 174 | const dbList = indexedDB.databases ? 175 | await indexedDB.databases() : 176 | INDEXEDDB_DBNAMES.map(name => ({ name })); 177 | await Promise.all(dbList.map(({name}) => { 178 | return new Promise((resolve, reject) => { 179 | const request = indexedDB.deleteDatabase(name); 180 | request.onsuccess = resolve; 181 | request.onerror = reject; 182 | }); 183 | })); 184 | } 185 | 186 | function cvtErrorToCloneable(e) { 187 | if (e instanceof Error) { 188 | const props = new Set([ 189 | ...['name', 'message', 'stack'].filter(k => e[k] !== undefined), 190 | ...Object.getOwnPropertyNames(e) 191 | ]); 192 | return Object.fromEntries(Array.from(props, k =>  [k, e[k]]) 193 | .filter(([_, v]) => { 194 | // Skip any non-cloneable properties. 195 | try { 196 | structuredClone(v); 197 | return true; 198 | } catch (e) { 199 | return false; 200 | } 201 | })); 202 | } 203 | return e; 204 | } -------------------------------------------------------------------------------- /test/vfs_xAccess.js: -------------------------------------------------------------------------------- 1 | export function vfs_xAccess(context) { 2 | } -------------------------------------------------------------------------------- /test/vfs_xClose.js: -------------------------------------------------------------------------------- 1 | import * as Comlink from 'comlink'; 2 | import * as VFS from '../src/VFS.js'; 3 | 4 | const FILEID = 1; 5 | 6 | export function vfs_xClose(context) { 7 | describe('vfs_xClose', function() { 8 | let proxy, vfs; 9 | beforeEach(async function() { 10 | proxy = await context.create(); 11 | vfs = proxy.vfs; 12 | }); 13 | 14 | afterEach(async function() { 15 | await context.destroy(proxy); 16 | }); 17 | 18 | it('should leave an accessible file', async function() { 19 | let rc; 20 | const pOpenOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 21 | const openFlags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE; 22 | rc = await vfs.jOpen('test', FILEID, openFlags, pOpenOutput); 23 | expect(rc).toEqual(VFS.SQLITE_OK); 24 | 25 | await vfs.jClose(FILEID); 26 | 27 | const pAccessOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 28 | rc = await vfs.jAccess('test', VFS.SQLITE_ACCESS_READWRITE, pAccessOutput); 29 | expect(rc).toEqual(VFS.SQLITE_OK); 30 | expect(pAccessOutput.getInt32(0, true)).not.toEqual(0); 31 | }); 32 | 33 | it('should delete on close', async function() { 34 | let rc; 35 | const pOpenOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 36 | const openFlags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE | VFS.SQLITE_OPEN_DELETEONCLOSE; 37 | rc = await vfs.jOpen('test', FILEID, openFlags, pOpenOutput); 38 | expect(rc).toEqual(VFS.SQLITE_OK); 39 | 40 | const pAccessOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 41 | rc = await vfs.jAccess('test', VFS.SQLITE_ACCESS_READWRITE, pAccessOutput); 42 | expect(rc).toEqual(VFS.SQLITE_OK); 43 | expect(pAccessOutput.getInt32(0, true)).toEqual(1); 44 | 45 | await vfs.jClose(FILEID); 46 | 47 | rc = await vfs.jAccess('test', VFS.SQLITE_ACCESS_READWRITE, pAccessOutput); 48 | expect(rc).toEqual(VFS.SQLITE_OK); 49 | expect(pAccessOutput.getInt32(0, true)).toEqual(0); 50 | }); 51 | }); 52 | } -------------------------------------------------------------------------------- /test/vfs_xOpen.js: -------------------------------------------------------------------------------- 1 | import * as Comlink from 'comlink'; 2 | import * as VFS from '../src/VFS.js'; 3 | 4 | const FILEID = 1; 5 | 6 | export function vfs_xOpen(context) { 7 | describe('vfs_xOpen', function() { 8 | let proxy, vfs; 9 | beforeEach(async function() { 10 | proxy = await context.create(); 11 | vfs = proxy.vfs; 12 | }); 13 | 14 | afterEach(async function() { 15 | await context.destroy(proxy); 16 | }); 17 | 18 | it('should create a file', async function() { 19 | let rc; 20 | const pOpenOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 21 | const openFlags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE; 22 | rc = await vfs.jOpen('test', FILEID, openFlags, pOpenOutput); 23 | expect(rc).toEqual(VFS.SQLITE_OK); 24 | expect(pOpenOutput.getInt32(0, true)).toEqual(openFlags); 25 | 26 | const pAccessOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 27 | rc = await vfs.jAccess('test', VFS.SQLITE_ACCESS_READWRITE, pAccessOutput); 28 | expect(rc).toEqual(VFS.SQLITE_OK); 29 | expect(pAccessOutput.getInt32(0, true)).not.toEqual(0); 30 | }); 31 | 32 | it('should create a database file', async function() { 33 | let rc; 34 | const pOpenOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 35 | const openFlags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE | VFS.SQLITE_OPEN_MAIN_DB; 36 | 37 | do { 38 | const nRetryOps = await proxy.module.retryOps.length; 39 | for (let i = 0; i < nRetryOps; i++) { 40 | await proxy.module.retryOps[i]; 41 | } 42 | rc = await vfs.jOpen('test', 1, openFlags, pOpenOutput); 43 | } while (rc === VFS.SQLITE_BUSY); 44 | expect(rc).toEqual(VFS.SQLITE_OK); 45 | expect(pOpenOutput.getInt32(0, true)).toEqual(openFlags); 46 | 47 | const pAccessOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 48 | rc = await vfs.jAccess('test', VFS.SQLITE_ACCESS_READWRITE, pAccessOutput); 49 | expect(rc).toEqual(VFS.SQLITE_OK); 50 | expect(pAccessOutput.getInt32(0, true)).not.toEqual(0); 51 | }); 52 | 53 | it('should not create a file', async function() { 54 | let rc; 55 | const pOpenOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 56 | const openFlags = VFS.SQLITE_OPEN_READWRITE; 57 | rc = await vfs.jOpen('test', 1, openFlags, pOpenOutput); 58 | expect(rc).toEqual(VFS.SQLITE_CANTOPEN); 59 | 60 | const pAccessOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 61 | rc = await vfs.jAccess('test', VFS.SQLITE_ACCESS_READWRITE, pAccessOutput); 62 | expect(rc).toEqual(VFS.SQLITE_OK); 63 | expect(pAccessOutput.getInt32(0, true)).toEqual(0); 64 | }); 65 | 66 | it('should open an existing file', async function() { 67 | let rc; 68 | const pOpenOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 69 | const openFlags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE; 70 | rc = await vfs.jOpen('test', FILEID, openFlags, pOpenOutput); 71 | expect(rc).toEqual(VFS.SQLITE_OK); 72 | 73 | // Close the file because some VFS implementations don't allow 74 | // multiple open handles. 75 | await vfs.jClose(FILEID); 76 | 77 | rc = await vfs.jOpen('test', FILEID, VFS.SQLITE_OPEN_READWRITE, pOpenOutput); 78 | expect(rc).toEqual(VFS.SQLITE_OK); 79 | expect(pOpenOutput.getInt32(0, true)).toEqual(VFS.SQLITE_OPEN_READWRITE); 80 | }); 81 | 82 | it('should create an anonymous file', async function() { 83 | let rc; 84 | const pOpenOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 85 | const openFlags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE; 86 | rc = await vfs.jOpen(null, FILEID, openFlags, pOpenOutput); 87 | expect(rc).toEqual(VFS.SQLITE_OK); 88 | expect(pOpenOutput.getInt32(0, true)).toEqual(openFlags); 89 | }); 90 | }); 91 | } -------------------------------------------------------------------------------- /test/vfs_xRead.js: -------------------------------------------------------------------------------- 1 | import * as Comlink from 'comlink'; 2 | import * as VFS from '../src/VFS.js'; 3 | 4 | const FILEID = 1; 5 | 6 | export function vfs_xRead(context) { 7 | describe('vfs_xRead', function() { 8 | let proxy, vfs; 9 | beforeEach(async function() { 10 | proxy = await context.create(); 11 | vfs = proxy.vfs; 12 | }); 13 | 14 | afterEach(async function() { 15 | await context.destroy(proxy); 16 | }); 17 | 18 | it('should signal short read', async function() { 19 | let rc; 20 | const pOpenOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 21 | const openFlags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE; 22 | rc = await vfs.jOpen('test', FILEID, openFlags, pOpenOutput); 23 | expect(rc).toEqual(VFS.SQLITE_OK); 24 | 25 | const pData = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8]); 26 | const iOffset = 0; 27 | rc = await vfs.jWrite(FILEID, pData, iOffset); 28 | expect(rc).toEqual(VFS.SQLITE_OK); 29 | 30 | const pReadData = Comlink.proxy(new Uint8Array(pData.length * 2).fill(0xfb)); 31 | rc = await vfs.jRead(FILEID, pReadData, iOffset); 32 | expect(rc).toEqual(VFS.SQLITE_IOERR_SHORT_READ); 33 | expect(pReadData.subarray(0, pData.length)).toEqual(pData); 34 | expect(pReadData.subarray(pData.length)) 35 | .toEqual(new Uint8Array(pReadData.length - pData.length)); 36 | }); 37 | }); 38 | } -------------------------------------------------------------------------------- /test/vfs_xWrite.js: -------------------------------------------------------------------------------- 1 | import * as Comlink from 'comlink'; 2 | import * as VFS from '../src/VFS.js'; 3 | 4 | const FILEID = 1; 5 | 6 | export function vfs_xWrite(context) { 7 | describe('vfs_xWrite', function() { 8 | let proxy, vfs; 9 | beforeEach(async function() { 10 | proxy = await context.create(); 11 | vfs = proxy.vfs; 12 | }); 13 | 14 | afterEach(async function() { 15 | await context.destroy(proxy); 16 | }); 17 | 18 | it('should round-trip data', async function() { 19 | let rc; 20 | const pOpenOutput = Comlink.proxy(new DataView(new ArrayBuffer(4))); 21 | const openFlags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE; 22 | rc = await vfs.jOpen('test', FILEID, openFlags, pOpenOutput); 23 | expect(rc).toEqual(VFS.SQLITE_OK); 24 | 25 | const pData = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8]); 26 | const iOffset = 0; 27 | rc = await vfs.jWrite(FILEID, pData, iOffset); 28 | expect(rc).toEqual(VFS.SQLITE_OK); 29 | 30 | const pReadData = Comlink.proxy(new Uint8Array(pData.length)); 31 | rc = await vfs.jRead(FILEID, pReadData, iOffset); 32 | expect(rc).toEqual(VFS.SQLITE_OK); 33 | expect([...pReadData]).toEqual([...pData]); 34 | }); 35 | }); 36 | } -------------------------------------------------------------------------------- /typedoc.json: -------------------------------------------------------------------------------- 1 | { 2 | "disableSources": true, 3 | "entryPoints": ["src/types/index.d.ts"], 4 | "excludeNotDocumented": true, 5 | "out": "docs", 6 | "readme": "none", 7 | "tsconfig": "src/types/tsconfig.json" 8 | } -------------------------------------------------------------------------------- /web-test-runner.config.mjs: -------------------------------------------------------------------------------- 1 | import { chromeLauncher } from '@web/test-runner'; 2 | import { jasmineTestRunnerConfig } from 'web-test-runner-jasmine'; 3 | 4 | export default /** @type {import("@web/test-runner").TestRunnerConfig} */ ({ 5 | ...jasmineTestRunnerConfig(), 6 | testFramework: { 7 | config: { 8 | defaultTimeoutInterval: 5 * 60 * 1000 9 | }, 10 | }, 11 | browserLogs: true, 12 | browserStartTimeout: 60_000, 13 | nodeResolve: true, 14 | files: ['./test/*.test.js'], 15 | concurrency: 1, 16 | concurrentBrowsers: 1, 17 | browsers: [ 18 | chromeLauncher({ 19 | launchOptions: { 20 | args: [ 21 | '--flag-switches-begin', 22 | '--enable-features=WebAssemblyExperimentalJSPI', 23 | '--flag-switches-end' 24 | ], 25 | }, 26 | }), 27 | ], 28 | }); --------------------------------------------------------------------------------