├── .cspell.json ├── .czrc ├── .eslintrc.json ├── .github └── workflows │ └── rxdb-supabase.yml ├── .gitignore ├── .npmignore ├── .prettierrc.json ├── .releaserc.json ├── .vscode ├── extensions.json ├── launch.json └── settings.json ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── package-lock.json ├── package.json ├── scripts ├── build.ts └── clean.ts ├── src ├── __tests__ │ ├── supabase-replication.integration.test.ts │ └── supabase-replication.test.ts ├── supabase-replication.ts └── test-utils │ ├── humans.sql │ ├── supabase-backend-mock.ts │ ├── test-types.ts │ └── test-utils.ts ├── tsconfig.build.json └── tsconfig.json /.cspell.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.1", 3 | "language": "en", 4 | "words": [ 5 | "degit", 6 | "esbuild", 7 | "rxdb-supabase", 8 | "octocat", 9 | "rmrf", 10 | "socio" 11 | ], 12 | "flagWords": [], 13 | "ignorePaths": [ 14 | "package.json", 15 | "package-lock.json", 16 | "yarn.lock", 17 | "tsconfig.json", 18 | "node_modules/**" 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /.czrc: -------------------------------------------------------------------------------- 1 | { 2 | "path": "./node_modules/@ryansonshine/cz-conventional-changelog" 3 | } 4 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "root": true, 3 | "env": { 4 | "es2021": true, 5 | "node": true 6 | }, 7 | "parser": "@typescript-eslint/parser", 8 | "parserOptions": { 9 | "project": "./tsconfig.json" 10 | }, 11 | "plugins": ["import", "@typescript-eslint"], 12 | "ignorePatterns": ["scripts/*"], 13 | "extends": [ 14 | "eslint:recommended", 15 | "plugin:eslint-comments/recommended", 16 | "plugin:@typescript-eslint/eslint-recommended", 17 | "plugin:@typescript-eslint/recommended", 18 | "plugin:@typescript-eslint/recommended-requiring-type-checking", 19 | "plugin:@typescript-eslint/strict", 20 | "plugin:import/recommended", 21 | "plugin:import/typescript", 22 | "prettier" 23 | ], 24 | "settings": { 25 | "import/resolver": { 26 | "typescript": { 27 | "project": "tsconfig.json" 28 | } 29 | } 30 | }, 31 | "rules": { 32 | "import/order": [ 33 | "error", 34 | { 35 | "alphabetize": { 36 | "order": "asc", 37 | "caseInsensitive": true 38 | } 39 | } 40 | ], 41 | "import/no-extraneous-dependencies": "error", 42 | "import/no-mutable-exports": "error", 43 | "import/no-unused-modules": "error" 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /.github/workflows/rxdb-supabase.yml: -------------------------------------------------------------------------------- 1 | name: rxdb-supabase 2 | 3 | on: [push] 4 | 5 | jobs: 6 | rxdb-supabase: 7 | runs-on: ubuntu-latest 8 | 9 | concurrency: 10 | group: ${{ github.ref }} 11 | cancel-in-progress: true 12 | 13 | steps: 14 | - uses: actions/checkout@v2 15 | - uses: volta-cli/action@v1 16 | - run: npm ci 17 | 18 | - name: Dependencies audit 19 | run: npm audit --audit-level=moderate --omit=dev 20 | 21 | - name: Build 22 | run: npm run build 23 | 24 | - name: Format check 25 | run: npm run format:check 26 | 27 | - name: Lint check 28 | run: npm run lint:check 29 | continue-on-error: true 30 | 31 | - name: Spell check 32 | run: npm run spell:check 33 | continue-on-error: true 34 | 35 | - name: Tests 36 | run: npm run test 37 | 38 | - name: Integration Tests 39 | run: npm run integration-test 40 | env: 41 | TEST_SUPABASE_URL: ${{ secrets.TEST_SUPABASE_URL }} 42 | TEST_SUPABASE_API_KEY: ${{ secrets.TEST_SUPABASE_API_KEY }} 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | node_modules/ 3 | .nyc_output/ 4 | coverage/ 5 | .DS_Store 6 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | .nyc_output/ 3 | coverage/ 4 | -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "printWidth": 100, 3 | "semi": false 4 | } 5 | -------------------------------------------------------------------------------- /.releaserc.json: -------------------------------------------------------------------------------- 1 | { 2 | "branches": [ 3 | "+([0-9])?(.{+([0-9]),x}).x", 4 | "main", 5 | "master", 6 | "next", 7 | "next-major", 8 | { 9 | "name": "beta", 10 | "prerelease": true 11 | }, 12 | { 13 | "name": "alpha", 14 | "prerelease": true 15 | } 16 | ], 17 | "repositoryUrl": "https://github.com/marceljuenemann/rxdb-supabase.git", 18 | "plugins": [ 19 | "@semantic-release/commit-analyzer", 20 | "@semantic-release/release-notes-generator", 21 | "@semantic-release/changelog", 22 | "@semantic-release/npm", 23 | "@semantic-release/github" 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "dbaeumer.vscode-eslint", 4 | "esbenp.prettier-vscode", 5 | "eamodio.gitlens", 6 | "streetsidesoftware.code-spell-checker" 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 3 | "version": "0.2.0", 4 | "configurations": [ 5 | { 6 | "type": "pwa-node", 7 | "request": "launch", 8 | "name": "Debug Current Test File", 9 | "autoAttachChildProcesses": true, 10 | "skipFiles": ["/**", "**/node_modules/**"], 11 | "program": "${workspaceRoot}/node_modules/vitest/vitest.mjs", 12 | "args": ["run", "${relativeFile}"], 13 | "smartStep": true, 14 | "console": "integratedTerminal" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | // only use words from .cspell.json 3 | "cSpell.userWords": [], 4 | "cSpell.enabled": true, 5 | "editor.formatOnSave": true, 6 | "typescript.tsdk": "node_modules/typescript/lib", 7 | "typescript.enablePromptUseWorkspaceTsdk": true 8 | } 9 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | 7 | 8 | ## 1.0.4 (2023-08-16) 9 | 10 | - Fix to enable multiple replications to run simultaneously, now for real (#9, #10) 11 | 12 | ## 1.0.1 (2023-06-24) 13 | 14 | - Fix to enable multiple replications to run simultaneously (#9) 15 | - Using new ReplicationOptions types from rxdb directly (#2) 16 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | dev@marcel.world. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | _Pull requests, bug reports, and all other forms of contribution are welcomed and highly encouraged!_ :octocat: 4 | 5 | ### Contents 6 | 7 | - [Code of Conduct](#book-code-of-conduct) 8 | - [Asking Questions](#bulb-asking-questions) 9 | - [How can I Contribute?](#inbox_tray-how-can-i-contribute) 10 | 11 | > **This guide serves to set clear expectations for everyone involved with the project so that we can improve it together while also creating a welcoming space for everyone to participate. Following these guidelines will help ensure a positive experience for contributors and maintainers.** 12 | 13 | ## :book: Code of Conduct 14 | 15 | Please review our [Code of Conduct](./CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. 16 | 17 | ## :bulb: Asking Questions 18 | 19 | If you have any question that does not relate to a bug or a feature request, please use [GitHub Discussions](https://github.com/marceljuenemann/rxdb-supabase/discussions) instead of GitHub issues. 20 | 21 | ## :inbox_tray: How can I Contribute? 22 | 23 | **GitHub issues** 24 | 25 | If you encounter a problem with this library or if you have a new feature you'd like to see in this project, please create [a new issue](https://github.com/marceljuenemann/rxdb-supabase/issues/new/choose). 26 | 27 | **GitHub Pull requests** 28 | 29 | Please leverage the repository's own tools to make sure the code is aligned with our standards: 30 | 31 | 1. Run all check commands before submitting the PR (`type:check`, `format:check`, `lint:check`, `test:coverage` and `spell:check`) 32 | 2. Please commit your changes and run a `setup` command so you can actually check how would the template look like once cleaned up 33 | 3. Always leverage the `cz` command to create a commit. We heavily rely on this for automatic releases. 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Marcel Juenemann 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rxdb-supabase: Offline Support for Supabase 2 | 3 | ![NPM](https://img.shields.io/npm/l/rxdb-supabase) 4 | ![NPM](https://img.shields.io/npm/v/rxdb-supabase) 5 | ![GitHub Workflow Status](https://github.com/marceljuenemann/rxdb-supabase/actions/workflows/rxdb-supabase.yml/badge.svg?branch=main) 6 | 7 | > [!WARNING] 8 | > **Currently only RxDB 14 is supported. I'm planning to work on support for RxDB 16 soon!** 9 | 10 | [RxDB](https://rxdb.info/) is a client-side, offline-first database that supports various storage layers including IndexedDB. [Supabase](https://supabase.com/) is an open-source Firebase alternative that stores data in a Postgres database with row level security. This library uses RxDB's replication logic to enable a two-way sync of your client-side RxDB database with a remote Supabase table, while allowing you to define custom conflict resolution strategies. 11 | 12 | ## How it works 13 | 14 | RxDB is an **offline-first database**, so all reads and writes are performed against the client-side RxDB database, which gets synced with the corresponding Supabase table in the background. Put another way, it stores a **full copy of the Supabase table locally** (or more specifically, the subset of rows accessible to the user after row-level security is applied). Everything is configured on a per-table basis though, so you could enable offline support for some tables while querying other tables using the SupabaseClient only when online. 15 | 16 | Most of the replication and conflict resolution is handled by RxDB's [replication protocol](https://rxdb.info/replication.html). It works similar to git by always pulling all changes from Supabase before merging changes locally and then pushing them to Supabase. When you start the replication (e.g. when the user opens your web app), these three stages are executed in order: 17 | 18 | 1. **Pull changes from Supabase:** As the Supabase table might have been changed since the last sync on this particular client, we need to fetch all rows that were modified in the meantime. In order for this to be possible with Supabase, some restrictions apply to the table you want to sync: 19 | - **`_modified` field:** Your table needs a field with the timestamp of the last modification. This is easy to implement in Supabase, see the Getting Started guide below. 20 | - **`_deleted` field:** You can't actually delete rows from Supabase unless you are sure all clients have replicated the deletion locally. Instead, you need a boolean field that indicates whether the row has been deleted. You won't have to deal with this on the client-side though, as RxDB will handle this for you transparently. 21 | 1. **Push changes to Supabase:** Next, we fire INSERT and UPDATE queries to Supabase with all local writes. By default, rows are only updated if they have not changed in the meantime, i.e. all fields of the row need to have the value that they had when the local write was performed. Otherwise, RxDB's conflict handler is invoked, which you can customize to build your own strategy for merging changes. 22 | 1. **Watch Supabase changes in realtime:** After the initial sync is complete, we use Supabase's realtime feature to subscribe to any changes of the table. Note that this will miss any changes if the client is offline intermittendly, so you might want to call `reSync()` on the replication object whenever your app comes [back online](https://developer.mozilla.org/en-US/docs/Web/API/Navigator/onLine#listening_for_changes_in_network_status). 23 | 24 | ## Getting Started 25 | 26 | ### Install 27 | 28 | `npm install rxdb-supabase rxdb @supabase/supabase-js --save` 29 | 30 | ### Create your RxDB 31 | 32 | If you're new to RxDB, read the [Quickstart guide](https://rxdb.info/quickstart.html) for more details. 33 | 34 | ```typescript 35 | import { createRxDatabase } from "rxdb" 36 | import { getRxStorageDexie } from "rxdb/plugins/storage-dexie" 37 | 38 | // Create your database 39 | const myDatabase = await createRxDatabase({ 40 | name: "humans", 41 | storage: getRxStorageDexie(), // Uses IndexedDB 42 | }) 43 | 44 | // Create a collection matching your Supabase table structure. 45 | const mySchema = { 46 | title: "human schema", 47 | version: 0, 48 | primaryKey: "id", 49 | type: "object", 50 | properties: { 51 | id: { 52 | type: "string", 53 | maxLength: 100, 54 | }, 55 | name: { 56 | type: "string", 57 | }, 58 | age: { 59 | description: "age in years", 60 | type: "integer", 61 | }, 62 | }, 63 | required: ["id", "name", "age"], 64 | indexes: ["age"], 65 | } 66 | const myCollections = await db.addCollections({ 67 | humans: { 68 | schema: mySchema, 69 | /** 70 | * Whenever we attempt to replicate a local write to a row that was changed in 71 | * Supabase in the meantime (e.g. by another client), the conflict handler is 72 | * invoked. By default, RxDB will dismiss the local write and update the local 73 | * state to match the state in Supabase. With a custom conflict handler you can 74 | * implement other strategies, e.g. you might want to still perform an update 75 | * on a per-field basis as long as that field didn't change. 76 | */ 77 | // conflictHandler: ... 78 | }, 79 | }) 80 | ``` 81 | 82 | Use RxDB's functions for reading and writing the database. For example: 83 | 84 | ```typescript 85 | const myCollection = myCollections.humans 86 | myCollection.find({}).$.subscribe((documents) => { 87 | console.log("query has found " + documents.length + " documents") 88 | }) 89 | 90 | const doc = await myCollection.insert({ id: "1", name: "Alice" }) 91 | await doc.patch({ age: 21 }) 92 | await doc.remove() 93 | ``` 94 | 95 | ### Create your Supabase table 96 | 97 | As stated above, your table needs a `_modified` timestamp and a `_deleted` field in order 98 | for the replication to be able to detect which rows changed in Supabase. You can configure a different name for these fields with the `lastModifiedField` and `deletedField` options. 99 | 100 | ```sql 101 | CREATE TABLE public.humans ( 102 | id text NOT NULL, 103 | name text NOT NULL, 104 | age smallint, 105 | _deleted boolean DEFAULT false NOT NULL, 106 | _modified timestamp with time zone DEFAULT now() NOT NULL 107 | ); 108 | ALTER TABLE ONLY public.humans ADD CONSTRAINT humans_pkey PRIMARY KEY (id); 109 | ``` 110 | 111 | Create a trigger that keeps the `_modified` field updated: 112 | 113 | ```sql 114 | CREATE TRIGGER update_modified_datetime BEFORE UPDATE ON public.humans FOR EACH ROW 115 | EXECUTE FUNCTION extensions.moddatetime('_modified'); 116 | ``` 117 | 118 | ### Start the Replication 119 | 120 | Make sure you've [initialized your SupabaseClient](https://supabase.com/docs/reference/javascript/initializing) and, if you're using row level security, that the client is authenticated. 121 | 122 | ```typescript 123 | const replication = new SupabaseReplication({ 124 | supabaseClient: supabaseClient, 125 | collection: myCollection, 126 | /** 127 | * An ID for the replication, so that RxDB is able to resume the replication 128 | * on app reload. It is recommended to add the supabase URL to make sure you're 129 | * not mixing up replications against different databases. 130 | * 131 | * If you're using row-level security, you might also want to append the user ID 132 | * in case the logged in user changes, although depending on your application you 133 | * might want to re-create the entire RxDB from scratch in that case or have one 134 | * RxDB per user ID (you could add the user ID to the RxDB name). 135 | */ 136 | replicationIdentifier: "myId" + SUPABASE_URL, // TODO: Add Supabase user ID? 137 | pull: {}, // If absent, no data is pulled from Supabase 138 | push: {}, // If absent, no changes are pushed to Supabase 139 | }) 140 | ``` 141 | 142 | That's it, your replication is now running! Any errors can be observed with 143 | `replication.errors$` and you can stop the replication again with `replication.cancel()`. 144 | To ensure you don't miss any changes in Supabase, you might want to listen to the 145 | [network status](https://developer.mozilla.org/en-US/docs/Web/API/Navigator/onLine#listening_for_changes_in_network_status) and call `replication.reSync()` when the 146 | client gets back online. 147 | 148 | ## Options 149 | 150 | These are all the available options, including the options inherited from RxDB. 151 | 152 | ```typescript 153 | /** 154 | * The RxDB collection to replicate. 155 | */ 156 | collection: RxCollection 157 | 158 | /** 159 | * The SupabaseClient to replicate with. 160 | */ 161 | supabaseClient: SupabaseClient 162 | 163 | /** 164 | * The table to replicate to, if different from the name of the collection. 165 | * @default the name of the RxDB collection. 166 | */ 167 | table?: string 168 | 169 | /** 170 | * The primary key of the supabase table, if different from the primary key of the RxDB. 171 | * @default the primary key of the RxDB collection 172 | */ 173 | primaryKey?: string 174 | 175 | /** 176 | * Options for pulling data from supabase. Set to {} to pull with the default 177 | * options, as no data will be pulled if the field is absent. 178 | */ 179 | pull?: { 180 | /** 181 | * Whether to subscribe to realtime Postgres changes for the table. If set to false, 182 | * only an initial pull will be performed. Only has an effect if the live option is set 183 | * to true. 184 | * @default true 185 | */ 186 | realtimePostgresChanges?: boolean 187 | 188 | /** 189 | * The name of the supabase field that is automatically updated to the last 190 | * modified timestamp by postgres. This field is required for the pull sync 191 | * to work and can easily be implemented with moddatetime in supabase. 192 | * @default '_modified' 193 | */ 194 | lastModifiedField?: string 195 | 196 | /** 197 | * Amount of documents to fetch from Supabase in one request. 198 | * @default 100 199 | */ 200 | batchSize?: number 201 | 202 | /** 203 | * A modifier that runs on all documents that are pulled, 204 | * before they are used by RxDB. 205 | */ 206 | modifier?: (docData: any) => Promise> | WithDeleted 207 | 208 | /** 209 | * If set, the pull replication will start from the given checkpoint. 210 | */ 211 | initialCheckpoint?: SupabaseReplicationCheckpoint 212 | } 213 | 214 | /** 215 | * Options for pushing data to supabase. Set to {} to push with the default 216 | * options, as no data will be pushed if the field is absent. 217 | */ 218 | push?: { 219 | /** 220 | * Handler for pushing row updates to supabase. Must return true iff the UPDATE was 221 | * applied to the supabase table. Returning false signalises a write conflict, in 222 | * which case the current state of the row will be fetched from supabase and passed to 223 | * the RxDB collection's conflict handler. 224 | * @default the default handler will update the row only iff all fields match the 225 | * local state (before the update was applied), otherwise the conflict handler is 226 | * invoked. The default handler does not support JSON fields at the moment. 227 | */ 228 | updateHandler?: (row: RxReplicationWriteToMasterRow) => Promise 229 | 230 | /** 231 | * A modifier that runs on all pushed documents before they are send to Supabase 232 | */ 233 | modifier?: (docData: WithDeleted) => Promise | any 234 | 235 | /** 236 | * If set, the push replication will start from the given checkpoint. 237 | */ 238 | initialCheckpoint?: SupabaseReplicationCheckpoint 239 | } 240 | 241 | /** 242 | * An ID for the replication, so that RxDB is able to resume the replication 243 | * on app reload. It is recommended to add the supabase URL to make sure you're 244 | * not mixing up replications against different databases. 245 | * 246 | * If you're using row-level security, you might also want to append the user ID 247 | * in case the logged in user changes, although depending on your application you 248 | * might want to re-create the entire RxDB from scratch in that case or have one 249 | * RxDB per user ID (you could add the user ID to the RxDB name). 250 | */ 251 | replicationIdentifier: string 252 | 253 | /** 254 | * The name of the database field to mark a row as deleted. 255 | * @default '_deleted' 256 | */ 257 | deletedField?: "_deleted" | string 258 | 259 | /** 260 | * By default it will do an ongoing realtime replication. 261 | * By settings live: false the replication will run once until the local state 262 | * is in sync with the remote state, then it will cancel itself. 263 | * @default true 264 | */ 265 | live?: boolean 266 | 267 | /** 268 | * Time in milliseconds after which a Supabase request will be retried. 269 | * This time will be skipped if a offline->online switch is detected 270 | * via `navigator.onLine` 271 | * @default 5000 272 | */ 273 | retryTime?: number 274 | 275 | /** 276 | * If set to `true`, the replication is started automatically. Otherwise you need 277 | * to call `replication.start()` manually. 278 | * @default true 279 | */ 280 | autoStart?: boolean 281 | } 282 | ``` 283 | 284 | ## Notes 285 | 286 | - **JSON fields require a custom `updateHandler`.** This is because the default update handler tries to check that all fields of a row have the expected value, but the supabase client doesn't currently have a simple way to add an equality check for JSON fields. 287 | - If you delete rows frequently, you might want to enable RxDB's [cleanup plugin](https://rxdb.info/cleanup.html) to clear deleted rows from the local database after they were deleted. There's no recommended way for cleaning up those rows in Supabase yet. 288 | 289 | ## Future work 290 | 291 | While the offline-first paradigm comes with [many advantages](https://rxdb.info/offline-first.html), there are also [downsides](https://rxdb.info/downsides-of-offline-first.html), most notably that the entire table needs to be downloaded to the client. Here are a few ideas for how this project could mitigate that in the future: 292 | 293 | - [#4](https://github.com/marceljuenemann/rxdb-supabase/issues/4) Support "partitions", i.e. replicating subsets of the Supabase table, similar to subcollections in FireStore 294 | - [#5](https://github.com/marceljuenemann/rxdb-supabase/issues/5) Add better support for a "push-only" mode 295 | - [#6](https://github.com/marceljuenemann/rxdb-supabase/issues/6) Support using RxDB as a offline cache rather than a offline-first database 296 | 297 | ## Development 298 | 299 | **Build:** `npm run build` 300 | 301 | **Unit tests:** `npm run test` or `npm run test:watch` 302 | 303 | **Unit test coverage:** `npm run test:coverage` (Not working yet!) 304 | 305 | **Integration tests:** We also run integration tests against a real supabase instance: 306 | 307 | - Set up a Supabase project and use `src/__tests__humans.sql` to create the table used in tests. It does not use row level security, so that should be disabled for the table. 308 | - It requires the environment variables `TEST_SUPABASE_URL` and `TEST_SUPABASE_API_KEY` (the public API key) to be set 309 | - `npm run integration-test` 310 | 311 | **Format code:** `npm run format` (checked as part of the workflow, run for pull requests please :) 312 | 313 | **Lint:** `npm run lint` (not passing or required for pull requests yet) 314 | 315 | **Spell check:** `npm run spell:check` (not passing or required for pull requests yet) 316 | 317 | **Release checklist:** 318 | 319 | - Bump version 320 | - Update `CHANGELOG.md` 321 | - Update dependencies (`ncu -u`) 322 | - `npm i` 323 | - Build, test, format 324 | - `npm publish` 325 | - Commit 326 | - Create GitHub release 327 | 328 | _TODO: Set up semantic-releases workflow_ 329 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "rxdb-supabase", 3 | "version": "1.0.4", 4 | "description": "Offline support for Supabase using RxDB, an offline-first database", 5 | "keywords": [ 6 | "rxdb", 7 | "supabase", 8 | "offline", 9 | "replication", 10 | "sync" 11 | ], 12 | "repository": { 13 | "type": "git", 14 | "url": "https://github.com/marceljuenemann/rxdb-supabase.git" 15 | }, 16 | "author": { 17 | "name": "Marcel Juenemann", 18 | "email": "rxdb-supabase@marcel.world", 19 | "url": "https://marcel.world/" 20 | }, 21 | "type": "module", 22 | "exports": "./build/supabase-replication.js", 23 | "types": "./build/src/supabase-replication.d.ts", 24 | "license": "MIT", 25 | "engines": { 26 | "node": "^18.15.0", 27 | "npm": "^9.5.0" 28 | }, 29 | "volta": { 30 | "node": "18.15.0", 31 | "npm": "9.5.0" 32 | }, 33 | "publishConfig": { 34 | "access": "public" 35 | }, 36 | "scripts": { 37 | "build": "npm run clean && npm run type:dts && npm run build:main", 38 | "build:main": "tsx ./scripts/build.ts", 39 | "clean": "tsx ./scripts/clean.ts", 40 | "type:dts": "tsc --emitDeclarationOnly --project tsconfig.build.json", 41 | "type:check": "tsc --noEmit", 42 | "format": "prettier \"src/**/*.ts\" --write", 43 | "format:check": "prettier \"src/**/*.ts\" --check", 44 | "lint": "eslint src --ext .ts --fix", 45 | "lint:check": "eslint src --ext .ts", 46 | "test": "vitest run", 47 | "test:watch": "vitest watch", 48 | "test:coverage": "vitest run --coverage", 49 | "test:setup": "tsx ./scripts/test-setup.ts", 50 | "integration-test": "INTEGRATION_TEST=1 vitest run", 51 | "integration-test:watch": "INTEGRATION_TEST=1 vitest watch", 52 | "spell:check": "cspell \"{README.md,CODE_OF_CONDUCT.md,CONTRIBUTING.md,.github/*.md,src/**/*.ts}\"", 53 | "cz": "cz", 54 | "semantic-release": "semantic-release" 55 | }, 56 | "dependencies": { 57 | "@supabase/supabase-js": "^2.32.0", 58 | "rxdb": "^14.15.1", 59 | "rxjs": "^7.8.1" 60 | }, 61 | "devDependencies": { 62 | "@supabase/postgrest-js": "^1.8.0", 63 | "@types/node": "^20.5.0", 64 | "@types/prompts": "^2.4.4", 65 | "@typescript-eslint/eslint-plugin": "^6.4.0", 66 | "@typescript-eslint/parser": "^6.4.0", 67 | "c8": "^8.0.1", 68 | "cspell": "^7.0.0", 69 | "esbuild": "^0.19.2", 70 | "eslint": "^8.47.0", 71 | "eslint-config-prettier": "^9.0.0", 72 | "eslint-import-resolver-typescript": "^3.6.0", 73 | "eslint-plugin-eslint-comments": "^3.2.0", 74 | "eslint-plugin-import": "^2.28.0", 75 | "node-fetch": "^3.3.2", 76 | "nyc": "^15.1.0", 77 | "prettier": "^3.0.2", 78 | "source-map-support": "^0.5.21", 79 | "ts-mockito": "^2.6.1", 80 | "tsx": "^3.12.7", 81 | "typescript": "^5.1.6", 82 | "vitest": "^0.34.1" 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /scripts/build.ts: -------------------------------------------------------------------------------- 1 | import path from "node:path"; 2 | import url from "node:url"; 3 | import { build as esbuild, BuildOptions } from "esbuild"; 4 | 5 | const __dirname = url.fileURLToPath(new URL(".", import.meta.url)); 6 | 7 | const baseConfig: BuildOptions = { 8 | platform: "node", 9 | target: "node18", 10 | format: "esm", 11 | nodePaths: [path.join(__dirname, "../src")], 12 | sourcemap: true, 13 | external: [], 14 | bundle: false, 15 | }; 16 | 17 | async function build() { 18 | await esbuild({ 19 | ...baseConfig, 20 | outdir: path.join(__dirname, "../build"), 21 | entryPoints: [ 22 | path.join(__dirname, "../src/supabase-replication.ts") 23 | ], 24 | }); 25 | } 26 | 27 | if (import.meta.url.startsWith("file:")) { 28 | if (process.argv[1] === url.fileURLToPath(import.meta.url)) { 29 | await build(); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /scripts/clean.ts: -------------------------------------------------------------------------------- 1 | import fs from "node:fs/promises"; 2 | import url from "node:url"; 3 | import path from "node:path"; 4 | 5 | const __dirname = url.fileURLToPath(new URL(".", import.meta.url)); 6 | 7 | async function clean() { 8 | await Promise.all([rmrf("build"), rmrf("coverage"), rmrf(".nyc_output")]); 9 | } 10 | 11 | async function rmrf(pathFromRoot: string): Promise { 12 | await fs.rm(path.join(__dirname, "../", pathFromRoot), { 13 | recursive: true, 14 | force: true, 15 | }); 16 | } 17 | 18 | if (import.meta.url.startsWith("file:")) { 19 | if (process.argv[1] === url.fileURLToPath(import.meta.url)) { 20 | await clean(); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/__tests__/supabase-replication.integration.test.ts: -------------------------------------------------------------------------------- 1 | import process from "process" 2 | import { SupabaseClient, createClient } from "@supabase/supabase-js" 3 | 4 | import { 5 | createRxDatabase, 6 | RxCollection, 7 | RxConflictHandler, 8 | RxConflictHandlerInput, 9 | RxDatabase, 10 | WithDeleted, 11 | addRxPlugin, 12 | } from "rxdb" 13 | import { RxDBDevModePlugin } from "rxdb/plugins/dev-mode" 14 | import { RxReplicationState } from "rxdb/plugins/replication" 15 | import { getRxStorageMemory } from "rxdb/plugins/storage-memory" 16 | import { lastValueFrom, take } from "rxjs" 17 | import { afterEach, beforeAll, beforeEach, describe, expect, it } from "vitest" 18 | import { 19 | SupabaseReplication, 20 | SupabaseReplicationCheckpoint, 21 | SupabaseReplicationOptions, 22 | } from "../supabase-replication.js" 23 | import { Human, HUMAN_SCHEMA } from "../test-utils/test-types.js" 24 | import { withReplication } from "../test-utils/test-utils.js" 25 | 26 | /** 27 | * Integration test running against an actual Supabase instance. 28 | * 29 | * This test will only run if the TEST_SUPABASE_URL and TEST_SUPABASE_API_KEY environment 30 | * variables are present. It requires a "humans" table to be created, see humans.sql for 31 | * the table structure. 32 | */ 33 | describe.skipIf(!process.env.INTEGRATION_TEST)( 34 | "replicateSupabase with actual SupabaseClient", 35 | () => { 36 | let supabase: SupabaseClient 37 | let db: RxDatabase 38 | let collection: RxCollection 39 | 40 | beforeAll(() => { 41 | supabase = createClient(process.env.TEST_SUPABASE_URL!, process.env.TEST_SUPABASE_API_KEY!, { 42 | auth: { persistSession: false }, 43 | }) 44 | addRxPlugin(RxDBDevModePlugin) 45 | }) 46 | 47 | beforeEach(async () => { 48 | // Empty the supabase table. 49 | const { error } = await supabase.from("humans").delete().neq("id", -1) 50 | if (error) throw error 51 | 52 | // Create an in-memory RxDB database. 53 | db = await createRxDatabase({ 54 | name: "test", 55 | storage: getRxStorageMemory(), 56 | }) 57 | collection = ( 58 | await db.addCollections({ 59 | humans: { schema: HUMAN_SCHEMA }, 60 | }) 61 | ).humans 62 | 63 | // Start with Alice :) 64 | await replication({}, async () => { 65 | // TODO: remove explicit null, should be set by pull anyways 66 | await collection.insert({ id: "1", name: "Alice", age: null }) 67 | }) 68 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Alice", age: null }]) 69 | expect(await supabaseContents()).toEqual([ 70 | { id: "1", name: "Alice", age: null, _deleted: false }, 71 | ]) 72 | }) 73 | 74 | describe("on client-side insertion", () => { 75 | describe("without conflict", () => { 76 | it("inserts into supabase", async () => { 77 | await replication({}, async () => { 78 | await collection.insert({ id: "2", name: "Bob", age: null }) 79 | }) 80 | 81 | expect(await supabaseContents()).toEqual([ 82 | { id: "1", name: "Alice", age: null, _deleted: false }, 83 | { id: "2", name: "Bob", age: null, _deleted: false }, 84 | ]) 85 | }) 86 | }) 87 | 88 | describe("with conflict", () => { 89 | describe("with default conflict handler", () => { 90 | it("drops insertion", async () => { 91 | await supabase.from("humans").insert({ id: "2", name: "Bob" }) 92 | await collection.insert({ id: "2", name: "Bob 2", age: 2 }) 93 | await replication() 94 | 95 | expect(await supabaseContents()).toEqual([ 96 | { id: "1", name: "Alice", age: null, _deleted: false }, 97 | { id: "2", name: "Bob", age: null, _deleted: false }, 98 | ]) 99 | expect(await rxdbContents()).toEqual([ 100 | { id: "1", name: "Alice", age: null }, 101 | { id: "2", name: "Bob", age: null }, 102 | ]) 103 | }) 104 | }) 105 | 106 | describe("with custom conflict handler", () => { 107 | it("invokes conflict handler", async () => { 108 | collection.conflictHandler = resolveConflictWithName("Bob resolved") 109 | 110 | await supabase.from("humans").insert({ id: "2", name: "Bob remote" }) 111 | await collection.insert({ id: "2", name: "Bob local", age: 42 }) 112 | await replication() 113 | 114 | expect(await supabaseContents()).toEqual([ 115 | { id: "1", name: "Alice", age: null, _deleted: false }, 116 | { id: "2", name: "Bob resolved", age: 42, _deleted: false }, 117 | ]) 118 | expect(await rxdbContents()).toEqual([ 119 | { id: "1", name: "Alice", age: null }, 120 | { id: "2", name: "Bob resolved", age: 42 }, 121 | ]) 122 | }) 123 | }) 124 | }) 125 | }) 126 | 127 | describe("on client-side update", () => { 128 | describe("without conflict", () => { 129 | it("updates supabase", async () => { 130 | await replication({}, async () => { 131 | const doc = await collection.findOne("1").exec() 132 | await doc!.patch({ age: 42 }) 133 | }) 134 | expect(await supabaseContents()).toEqual([ 135 | { id: "1", name: "Alice", age: 42, _deleted: false }, 136 | ]) 137 | }) 138 | 139 | describe("with postgREST special characters", () => { 140 | it("updates supabase", async () => { 141 | // Prepare database with rows that contain special characters. 142 | await collection.insert({ 143 | id: "special-,.()-id", 144 | name: 'Robert "Bob" Doe', 145 | age: null, 146 | }) 147 | await replication() 148 | expect(await supabaseContents()).toEqual([ 149 | { id: "1", name: "Alice", age: null, _deleted: false }, 150 | { 151 | id: "special-,.()-id", 152 | name: 'Robert "Bob" Doe', 153 | age: null, 154 | _deleted: false, 155 | }, 156 | ]) 157 | const doc = await collection.findOne("special-,.()-id").exec() 158 | 159 | // The UPDATE query will now contain the special characters in the URL params. 160 | await doc!.patch({ age: 42 }) 161 | await replication() 162 | 163 | expect(await rxdbContents()).toEqual([ 164 | { id: "1", name: "Alice", age: null }, 165 | { id: "special-,.()-id", name: 'Robert "Bob" Doe', age: 42 }, 166 | ]) 167 | expect(await supabaseContents()).toEqual([ 168 | { id: "1", name: "Alice", age: null, _deleted: false }, 169 | { 170 | id: "special-,.()-id", 171 | name: 'Robert "Bob" Doe', 172 | age: 42, 173 | _deleted: false, 174 | }, 175 | ]) 176 | }) 177 | }) 178 | }) 179 | 180 | describe("with conflict", () => { 181 | beforeEach(async () => { 182 | // Set Alice's age to 42 locally, while changing her name on the server. 183 | const doc = await collection.findOne("1").exec() 184 | await doc!.patch({ age: 42 }) 185 | await supabase.from("humans").update({ name: "Alex" }).eq("id", "1") 186 | }) 187 | 188 | describe("with default conflict handler", () => { 189 | it("applies supabase changes", async () => { 190 | await replication() 191 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Alex", age: null }]) 192 | expect(await supabaseContents()).toEqual([ 193 | { id: "1", name: "Alex", age: null, _deleted: false }, 194 | ]) 195 | }) 196 | }) 197 | 198 | describe("with custom conflict handler", () => { 199 | it("invokes conflict handler", async () => { 200 | collection.conflictHandler = resolveConflictWithName("Conflict resolved") 201 | await replication() 202 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Conflict resolved", age: 42 }]) 203 | expect(await supabaseContents()).toEqual([ 204 | { id: "1", name: "Conflict resolved", age: 42, _deleted: false }, 205 | ]) 206 | }) 207 | }) 208 | }) 209 | }) 210 | 211 | describe("when supabase changed while offline", () => { 212 | it("pulls new rows", async () => { 213 | await supabase.from("humans").insert({ id: "2", name: "Bob", age: 42 }) 214 | await replication() 215 | 216 | expect(await rxdbContents()).toEqual([ 217 | { id: "1", name: "Alice", age: null }, 218 | { id: "2", name: "Bob", age: 42 }, 219 | ]) 220 | }) 221 | 222 | it("pulls rows in multiple batches", async () => { 223 | // In this test, we set the batchSize to 1, but insert multiple rows into supabase such that 224 | // they have the same _modified timestamp. The test will only pass if the pull query fetches 225 | // rows with the same timestamp as the last checkpoint (but higher primary key). 226 | await supabase.from("humans").insert([ 227 | { id: "2", name: "Human 2" }, 228 | { id: "3", name: "Human 3" }, 229 | ]) 230 | await replication({ pull: { batchSize: 1 } }) 231 | 232 | expect(await rxdbContents()).toHaveLength(3) 233 | }) 234 | 235 | it("pulls updated rows", async () => { 236 | await supabase.from("humans").update({ age: 42 }).eq("id", "1") 237 | await replication() 238 | 239 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Alice", age: 42 }]) 240 | }) 241 | 242 | it("removes rows marked as deleted", async () => { 243 | await supabase.from("humans").update({ _deleted: true }).eq("id", "1") 244 | await replication() 245 | 246 | expect(await rxdbContents()).toEqual([]) 247 | }) 248 | 249 | it("ignores actually deleted rows", async () => { 250 | await supabase.from("humans").delete().eq("id", "1") 251 | await replication() 252 | 253 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Alice", age: null }]) 254 | }) 255 | }) 256 | 257 | describe("when supabase changed while online", () => { 258 | describe("without realtime replication", () => { 259 | it("does not pull new rows", async () => { 260 | await replication({}, async () => { 261 | await supabase.from("humans").insert({ id: "2", name: "Bob", age: 42 }) 262 | await new Promise((resolve) => setTimeout(() => resolve(true), 1000)) // Wait for some time 263 | }) 264 | 265 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Alice", age: null }]) 266 | }) 267 | }) 268 | 269 | describe("with realtime replication", () => { 270 | it("pulls new rows", async () => { 271 | await replication({ pull: { realtimePostgresChanges: true } }, async (replication) => { 272 | await supabase.from("humans").insert({ id: "2", name: "Bob", age: 42 }) 273 | await lastValueFrom(replication.remoteEvents$.pipe(take(1))) // Wait for remote event 274 | }) 275 | 276 | expect(await rxdbContents()).toEqual([ 277 | { id: "1", name: "Alice", age: null }, 278 | { id: "2", name: "Bob", age: 42 }, 279 | ]) 280 | }) 281 | 282 | it("pulls updated rows", async () => { 283 | await replication({ pull: { realtimePostgresChanges: true } }, async (replication) => { 284 | await supabase.from("humans").update({ age: 42 }).eq("id", "1") 285 | await lastValueFrom(replication.remoteEvents$.pipe(take(1))) // Wait for remote event 286 | }) 287 | 288 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Alice", age: 42 }]) 289 | }) 290 | 291 | it("removes rows marked as deleted", async () => { 292 | await replication({ pull: { realtimePostgresChanges: true } }, async (replication) => { 293 | await supabase.from("humans").update({ _deleted: true }).eq("id", "1") 294 | await lastValueFrom(replication.remoteEvents$.pipe(take(1))) // Wait for remote event 295 | }) 296 | 297 | expect(await rxdbContents()).toEqual([]) 298 | }) 299 | 300 | it("ignores actually deleted rows", async () => { 301 | await replication({ pull: { realtimePostgresChanges: true } }, async (replication) => { 302 | await supabase.from("humans").delete().eq("id", "1") 303 | await new Promise((resolve) => setTimeout(() => resolve(true), 1000)) // Wait for some time 304 | }) 305 | 306 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Alice", age: null }]) 307 | }) 308 | }) 309 | }) 310 | 311 | const replication = ( 312 | options: Partial> = {}, 313 | callback: ( 314 | state: RxReplicationState, 315 | ) => Promise = async () => {}, 316 | expectErrors = false, 317 | ): Promise => { 318 | return withReplication(() => startReplication(options), callback, expectErrors) 319 | } 320 | 321 | const startReplication = ( 322 | options: Partial> = {}, 323 | ): SupabaseReplication => { 324 | const status = new SupabaseReplication({ 325 | replicationIdentifier: "test", 326 | supabaseClient: supabase, 327 | collection, 328 | pull: { realtimePostgresChanges: false }, 329 | push: {}, 330 | ...options, 331 | }) 332 | return status 333 | } 334 | 335 | const resolveConflictWithName = (name: string): RxConflictHandler => { 336 | return async (input: RxConflictHandlerInput) => { 337 | return { 338 | isEqual: false, 339 | documentData: { ...input.newDocumentState, name }, 340 | } 341 | } 342 | } 343 | 344 | const supabaseContents = async (stripModified = true): Promise[]> => { 345 | const { data, error } = await supabase.from("humans").select().order("id") 346 | if (error) throw error 347 | if (stripModified) data.forEach((human) => delete human._modified) 348 | return data as WithDeleted[] 349 | } 350 | 351 | const rxdbContents = async (): Promise => { 352 | const results = await collection.find().exec() 353 | return results.map((doc) => doc.toJSON()) 354 | } 355 | 356 | afterEach(async () => { 357 | await db.remove() 358 | }) 359 | }, 360 | ) 361 | -------------------------------------------------------------------------------- /src/__tests__/supabase-replication.test.ts: -------------------------------------------------------------------------------- 1 | import { RealtimePostgresChangesPayload } from "@supabase/realtime-js" 2 | import { 3 | addRxPlugin, 4 | createRxDatabase, 5 | RxCollection, 6 | RxDatabase, 7 | RxReplicationWriteToMasterRow, 8 | } from "rxdb" 9 | import { RxDBDevModePlugin } from "rxdb/plugins/dev-mode" 10 | import { RxReplicationState } from "rxdb/plugins/replication" 11 | import { getRxStorageMemory } from "rxdb/plugins/storage-memory" 12 | import { afterEach, beforeAll, beforeEach, describe, expect, it } from "vitest" 13 | import { 14 | SupabaseReplication, 15 | SupabaseReplicationCheckpoint, 16 | SupabaseReplicationOptions, 17 | } from "../supabase-replication.js" 18 | import { SupabaseBackendMock } from "../test-utils/supabase-backend-mock.js" 19 | import { Human, HumanRow, HUMAN_SCHEMA } from "../test-utils/test-types.js" 20 | import { withReplication, resolveConflictWithName } from "../test-utils/test-utils.js" 21 | 22 | describe.skipIf(process.env.INTEGRATION_TEST)("replicateSupabase", () => { 23 | let supabaseMock: SupabaseBackendMock 24 | let db: RxDatabase 25 | let collection: RxCollection 26 | 27 | beforeAll(() => { 28 | addRxPlugin(RxDBDevModePlugin) 29 | }) 30 | 31 | beforeEach(async () => { 32 | // Create an in-memory RxDB database. 33 | db = await createRxDatabase({ 34 | name: "test", 35 | storage: getRxStorageMemory(), 36 | ignoreDuplicate: true, 37 | }) 38 | collection = ( 39 | await db.addCollections({ 40 | humans: { schema: HUMAN_SCHEMA }, 41 | }) 42 | ).humans 43 | 44 | // Supabase client with mocked HTTP. 45 | supabaseMock = new SupabaseBackendMock() 46 | }) 47 | 48 | describe("initial pull", () => { 49 | describe("without initial checkpoint", () => { 50 | it("pulls all rows from supabase", async () => { 51 | expectPull().thenReturn(createHumans(1)) 52 | await replication() 53 | 54 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Human 1", age: 11 }]) 55 | }) 56 | }) 57 | 58 | describe("with previous checkpoint", () => { 59 | it("pulls only modified rows", async () => { 60 | const checkpoint: SupabaseReplicationCheckpoint = { 61 | modified: "timestamp", 62 | primaryKeyValue: "pkv", 63 | } 64 | expectPull({ 65 | withFilter: { lastModified: "timestamp", lastPrimaryKey: "pkv" }, 66 | }).thenReturn(createHumans(1)) 67 | await replication({ 68 | pull: { 69 | initialCheckpoint: checkpoint, 70 | realtimePostgresChanges: false, 71 | }, 72 | }) 73 | 74 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Human 1", age: 11 }]) 75 | }) 76 | 77 | describe("with custom modified field", () => { 78 | it("pulls only modified rows using the specified field", async () => { 79 | const checkpoint: SupabaseReplicationCheckpoint = { 80 | modified: "timestamp", 81 | primaryKeyValue: "pkv", 82 | } 83 | expectPull({ 84 | withFilter: { 85 | lastModified: "timestamp", 86 | lastPrimaryKey: "pkv", 87 | modifiedField: "myfield", 88 | }, 89 | }).thenReturn([ 90 | { 91 | id: "1", 92 | name: "Alice", 93 | age: null, 94 | _deleted: false, 95 | myfield: "timestamp", 96 | }, 97 | ]) 98 | await replication({ 99 | pull: { 100 | initialCheckpoint: checkpoint, 101 | realtimePostgresChanges: false, 102 | lastModifiedField: "myfield", 103 | }, 104 | }) 105 | 106 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Alice", age: null }]) 107 | }) 108 | }) 109 | }) 110 | 111 | describe("with zero rows", () => { 112 | it("pulls no rows", async () => { 113 | expectPull().thenReturn([]) 114 | await replication() 115 | 116 | expect(await rxdbContents()).toEqual([]) 117 | }) 118 | }) 119 | 120 | describe("with many rows", () => { 121 | it("pulls in batches", async () => { 122 | const expectedQuery = (lastHuman: number) => { 123 | const human = createHuman(lastHuman) 124 | return { 125 | withLimit: BATCH_SIZE, 126 | withFilter: { 127 | lastModified: human._modified, 128 | lastPrimaryKey: human.id, 129 | }, 130 | } 131 | } 132 | 133 | // Expect three queries 134 | const BATCH_SIZE = 13 135 | const humans = createHumans(BATCH_SIZE * 2 + 3) 136 | expectPull({ withLimit: BATCH_SIZE }).thenReturn(humans.slice(0, BATCH_SIZE)) 137 | expectPull(expectedQuery(BATCH_SIZE)).thenReturn(humans.slice(BATCH_SIZE, BATCH_SIZE * 2)) 138 | expectPull(expectedQuery(BATCH_SIZE * 2)).thenReturn(humans.slice(BATCH_SIZE * 2)) 139 | 140 | await replication({ 141 | pull: { batchSize: BATCH_SIZE, realtimePostgresChanges: false }, 142 | }) 143 | 144 | expect(await rxdbContents()).toHaveLength(humans.length) 145 | }) 146 | }) 147 | 148 | describe("with query failing", () => { 149 | it("retries automatically", async () => { 150 | expectPull().thenFail() 151 | expectPull().thenReturn(createHumans(1)) 152 | 153 | const errors = await replication({ retryTime: 10 }, async () => {}, true) 154 | expect(errors).toHaveLength(1) 155 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Human 1", age: 11 }]) 156 | }) 157 | }) 158 | 159 | describe("with deletion", () => { 160 | it("deletes row locally", async () => { 161 | // Fill database first 162 | await collection.insert({ id: "1", name: "Alice", age: null }) 163 | expectPull().thenReturn([]) 164 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenReturn() 165 | await replication() 166 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Alice", age: null }]) 167 | 168 | // Now return deletion 169 | expectPull().thenReturn([ 170 | { 171 | id: "1", 172 | name: "Alice", 173 | age: null, 174 | _deleted: true, 175 | _modified: "time", 176 | }, 177 | ]) 178 | await replication() 179 | expect(await rxdbContents()).toEqual([]) 180 | }) 181 | 182 | describe("with custom delete field name", () => { 183 | it("uses specified field name", async () => { 184 | // Fill database first (using default here) 185 | await collection.insert({ id: "1", name: "Alice", age: null }) 186 | expectPull().thenReturn([]) 187 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenReturn() 188 | await replication() 189 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Alice", age: null }]) 190 | 191 | // Now return deletion (using custom field here) 192 | expectPull().thenReturn([ 193 | { 194 | id: "1", 195 | name: "Alice", 196 | age: null, 197 | myfield: true, 198 | _modified: "time", 199 | }, 200 | ]) 201 | await replication({ deletedField: "myfield" }) 202 | expect(await rxdbContents()).toEqual([]) 203 | }) 204 | }) 205 | }) 206 | }) 207 | 208 | describe("with client-side insertion", () => { 209 | describe("with single insertion", () => { 210 | it("inserts row to supabase", async () => { 211 | await collection.insert({ id: "1", name: "Alice", age: null }) 212 | expectPull().thenReturn([]) 213 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenReturn() 214 | 215 | await replication() 216 | }) 217 | }) 218 | 219 | describe("with multiple insertions", () => { 220 | it("triggers multiple INSERT calls", async () => { 221 | // TODO: Batch insertion would be nice in this case. 222 | await collection.insert({ id: "1", name: "Alice", age: null }) 223 | await collection.insert({ id: "2", name: "Bob", age: 42 }) 224 | expectPull().thenReturn([]) 225 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenReturn() 226 | expectInsert('{"id":"2","name":"Bob","age":42,"_deleted":false}').thenReturn() 227 | 228 | await replication() 229 | }) 230 | }) 231 | 232 | describe("with custom _delete field", () => { 233 | it("uses specified field", async () => { 234 | await collection.insert({ id: "1", name: "Alice", age: null }) 235 | expectPull().thenReturn([]) 236 | expectInsert('{"id":"1","name":"Alice","age":null,"removed":false}').thenReturn() 237 | 238 | await replication({ deletedField: "removed" }) 239 | }) 240 | }) 241 | 242 | describe("with network error", () => { 243 | it("automatically retries", async () => { 244 | await collection.insert({ id: "1", name: "Alice", age: null }) 245 | expectPull().thenReturn([]) 246 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenFail() 247 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenReturn() 248 | 249 | const errors = await replication({ retryTime: 10 }, async () => {}, true) 250 | expect(errors).toHaveLength(1) 251 | }) 252 | }) 253 | 254 | describe("with postgres error", () => { 255 | it("automatically retries", async () => { 256 | await collection.insert({ id: "1", name: "Alice", age: null }) 257 | expectPull().thenReturn([]) 258 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenReturnError( 259 | "53000", 260 | 503, 261 | ) 262 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenReturn() 263 | 264 | const errors = await replication({ retryTime: 10 }, async () => {}, true) 265 | expect(errors).toHaveLength(1) 266 | }) 267 | }) 268 | 269 | describe("with duplicate key error", () => { 270 | it("fetches current state and invokes conflict handler ", async () => { 271 | collection.conflictHandler = resolveConflictWithName("Resolved Alice") 272 | await collection.insert({ id: "1", name: "Local Alice", age: null }) 273 | expectPull().thenReturn([]) 274 | expectInsert('{"id":"1","name":"Local Alice","age":null,"_deleted":false}').thenReturnError( 275 | "23505", 276 | ) 277 | // Should fetch current state on duplicate key error... 278 | expectSelectById("1").thenReturn([ 279 | { 280 | id: "1", 281 | name: "Remote Alice", 282 | age: 42, 283 | _deleted: false, 284 | _modified: "mod", 285 | }, 286 | ]) 287 | // Should update remote with the result of the conflict handler and the real master state as assumed state. 288 | supabaseMock 289 | .expectQuery("UPDATE Alice", { 290 | method: "PATCH", 291 | table: "humans", 292 | params: "id=eq.1&name=eq.Remote+Alice&age=eq.42&_deleted=is.false", 293 | body: '{"id":"1","name":"Resolved Alice","age":null,"_deleted":false}', 294 | }) 295 | .thenReturn({}, { "Content-Range": "0-1/1" }) 296 | 297 | await replication() 298 | }) 299 | }) 300 | }) 301 | 302 | describe("with client-side update", () => { 303 | describe("without conflict", () => { 304 | it("performs UPDATE with equality checks", async () => { 305 | await collection.insert({ 306 | id: "1", 307 | name: 'Robert "Bob" Simpson', 308 | age: null, 309 | }) 310 | expectPull().thenReturn([]) 311 | expectInsert( 312 | '{"id":"1","name":"Robert \\"Bob\\" Simpson","age":null,"_deleted":false}', 313 | ).thenReturn() 314 | 315 | await replication({}, async (replication) => { 316 | supabaseMock 317 | .expectQuery("UPDATE Bob", { 318 | method: "PATCH", 319 | table: "humans", 320 | params: "id=eq.1&name=eq.Robert+%22Bob%22+Simpson&age=is.null&_deleted=is.false", 321 | body: '{"id":"1","name":"Bobby","age":42,"_deleted":false}', 322 | }) 323 | .thenReturn({}, { "Content-Range": "0-1/1" }) // TODO: Not sure this is the correct header result 324 | await collection.upsert({ id: "1", name: "Bobby", age: 42 }) 325 | }) 326 | 327 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Bobby", age: 42 }]) 328 | }) 329 | }) 330 | 331 | describe("with conflict", () => { 332 | it("invokes conflict handler and updates again", async () => { 333 | collection.conflictHandler = resolveConflictWithName("Resolved Alice") 334 | const doc = await collection.insert({ 335 | id: "1", 336 | name: "Alice", 337 | age: null, 338 | }) 339 | expectPull().thenReturn([]) 340 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenReturn() 341 | 342 | await replication({}, async (replication) => { 343 | supabaseMock 344 | .expectQuery("UPDATE Alice (conflicting)", { 345 | method: "PATCH", 346 | table: "humans", 347 | params: "id=eq.1&name=eq.Alice&age=is.null&_deleted=is.false", 348 | body: '{"id":"1","name":"Alice local","age":42,"_deleted":false}', 349 | }) 350 | .thenReturn({}, { "Content-Range": "0-0/0" }) // Zero rows updated 351 | 352 | expectSelectById("1").thenReturn([{ id: "1", name: "Alice remote", age: 54 }]) 353 | supabaseMock 354 | .expectQuery("UPDATE Alice (after resolution)", { 355 | method: "PATCH", 356 | table: "humans", 357 | params: "id=eq.1&name=eq.Alice+remote&age=eq.54&_deleted=is.false", 358 | body: '{"id":"1","name":"Resolved Alice","age":42,"_deleted":false}', 359 | }) 360 | .thenReturn({}, { "Content-Range": "0-1/1" }) // One row updated 361 | 362 | await doc.patch({ name: "Alice local", age: 42 }) 363 | }) 364 | 365 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Resolved Alice", age: 42 }]) 366 | }) 367 | }) 368 | 369 | describe("with custom updateHandler", () => { 370 | describe("returning true", () => { 371 | it("does not trigger any queries", async () => { 372 | const doc = await collection.insert({ 373 | id: "1", 374 | name: "Alice", 375 | age: null, 376 | }) 377 | expectPull().thenReturn([]) 378 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenReturn() 379 | 380 | await replication( 381 | { push: { updateHandler: () => Promise.resolve(true) } }, 382 | async (replication) => { 383 | await doc.patch({ name: "Alice local", age: 42 }) 384 | }, 385 | ) 386 | 387 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Alice local", age: 42 }]) 388 | }) 389 | }) 390 | 391 | describe("returning false", () => { 392 | it("invokes conflict handler and updates again", async () => { 393 | let callCount = 0 394 | const customUpdateHandler = ( 395 | row: RxReplicationWriteToMasterRow, 396 | ): Promise => { 397 | callCount++ 398 | // Only return true (i.e. successful update) if we already fetched the updated state 399 | return Promise.resolve(row.assumedMasterState?.name === "Alice remote") 400 | } 401 | 402 | const doc = await collection.insert({ 403 | id: "1", 404 | name: "Alice", 405 | age: null, 406 | }) 407 | expectPull().thenReturn([]) 408 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenReturn() 409 | 410 | collection.conflictHandler = resolveConflictWithName("Resolved Alice") 411 | await replication( 412 | { push: { updateHandler: customUpdateHandler } }, 413 | async (replication) => { 414 | expectSelectById("1").thenReturn([{ id: "1", name: "Alice remote", age: 54 }]) 415 | await doc.patch({ name: "Alice local", age: 42 }) 416 | }, 417 | ) 418 | 419 | expect(callCount).toEqual(2) 420 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Resolved Alice", age: 42 }]) 421 | }) 422 | }) 423 | }) 424 | 425 | describe("with network error", () => { 426 | it("automatically retries", async () => { 427 | await collection.insert({ id: "1", name: "Alice", age: null }) 428 | expectPull().thenReturn([]) 429 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenReturn() 430 | 431 | const errors = await replication( 432 | { retryTime: 10 }, 433 | async () => { 434 | supabaseMock 435 | .expectQuery("UPDATE Alice (failing)", { 436 | method: "PATCH", 437 | table: "humans", 438 | params: "id=eq.1&name=eq.Alice&age=is.null&_deleted=is.false", 439 | body: '{"id":"1","name":"Alice 2","age":42,"_deleted":false}', 440 | }) 441 | .thenFail() 442 | supabaseMock 443 | .expectQuery("UPDATE Alice (retry)", { 444 | method: "PATCH", 445 | table: "humans", 446 | params: "id=eq.1&name=eq.Alice&age=is.null&_deleted=is.false", 447 | body: '{"id":"1","name":"Alice 2","age":42,"_deleted":false}', 448 | }) 449 | .thenReturn({}, { "Content-Range": "0-1/1" }) 450 | await collection.upsert({ id: "1", name: "Alice 2", age: 42 }) 451 | }, 452 | true, 453 | ) 454 | expect(errors).toHaveLength(1) 455 | }) 456 | }) 457 | 458 | // TODO: Test for unsupported field types (i.e. jsonb) 459 | }) 460 | 461 | describe("with client-side delete", () => { 462 | describe("with default deleted field", () => { 463 | it("performs UPDATE with equality checks", async () => { 464 | const doc = await collection.insert({ 465 | id: "1", 466 | name: "Alice", 467 | age: null, 468 | }) 469 | expectPull().thenReturn([]) 470 | expectInsert('{"id":"1","name":"Alice","age":null,"_deleted":false}').thenReturn() 471 | 472 | await replication({}, async (replication) => { 473 | supabaseMock 474 | .expectQuery("UPDATE Alice", { 475 | method: "PATCH", 476 | table: "humans", 477 | params: "id=eq.1&name=eq.Alice&age=is.null&_deleted=is.false", 478 | body: '{"id":"1","name":"Alice","age":null,"_deleted":true}', 479 | }) 480 | .thenReturn({}, { "Content-Range": "0-1/1" }) 481 | await doc.remove() 482 | }) 483 | 484 | expect(await rxdbContents()).toEqual([]) 485 | }) 486 | }) 487 | 488 | describe("with default custom deleted field", () => { 489 | it("performs UPDATE with equality checks and custom deleted field", async () => { 490 | const doc = await collection.insert({ 491 | id: "1", 492 | name: "Alice", 493 | age: null, 494 | }) 495 | expectPull().thenReturn([]) 496 | expectInsert('{"id":"1","name":"Alice","age":null,"mydelete":false}').thenReturn() 497 | 498 | await replication({ deletedField: "mydelete" }, async (replication) => { 499 | supabaseMock 500 | .expectQuery("UPDATE Alice", { 501 | method: "PATCH", 502 | table: "humans", 503 | params: "id=eq.1&name=eq.Alice&age=is.null&mydelete=is.false", 504 | body: '{"id":"1","name":"Alice","age":null,"mydelete":true}', 505 | }) 506 | .thenReturn({}, { "Content-Range": "0-1/1" }) 507 | await doc.remove() 508 | }) 509 | 510 | expect(await rxdbContents()).toEqual([]) 511 | }) 512 | }) 513 | }) 514 | 515 | describe("with realtime enabled", () => { 516 | let realtimeSubscription: { 517 | verifyUnsubscribed: any 518 | next: (event: Partial>) => any 519 | } 520 | 521 | beforeEach(() => { 522 | const topic = "rxdb-supabase-test" 523 | realtimeSubscription = supabaseMock.expectRealtimeSubscription("humans", topic) 524 | }) 525 | 526 | describe("without events received", () => { 527 | it("subscribes to and unsubscribes from RealtimeChannel", async () => { 528 | expectPull().thenReturn([]) 529 | await replication({ pull: { realtimePostgresChanges: true } }, async () => { 530 | realtimeSubscription.verifyUnsubscribed.never() 531 | }) 532 | realtimeSubscription.verifyUnsubscribed.once() 533 | }) 534 | }) 535 | 536 | describe("with insert event received", () => { 537 | it("inserts new row locally", async () => { 538 | expectPull().thenReturn([]) 539 | await replication({ pull: { realtimePostgresChanges: true } }, async () => { 540 | realtimeSubscription.next({ 541 | eventType: "INSERT", 542 | new: { 543 | id: "2", 544 | name: "Bob", 545 | age: null, 546 | _deleted: false, 547 | _modified: "2023-1", 548 | }, 549 | }) 550 | }) 551 | expect(await rxdbContents()).toEqual([{ id: "2", name: "Bob", age: null }]) 552 | }) 553 | }) 554 | 555 | describe("with multiple realtime events received", () => { 556 | it("updates local state", async () => { 557 | expectPull().thenReturn([]) 558 | await replication({ pull: { realtimePostgresChanges: true } }, async () => { 559 | realtimeSubscription.next({ 560 | eventType: "INSERT", 561 | new: { 562 | id: "2", 563 | name: "Bob", 564 | age: null, 565 | _deleted: false, 566 | _modified: "2023-1", 567 | }, 568 | }) 569 | realtimeSubscription.next({ 570 | eventType: "UPDATE", 571 | new: { 572 | id: "2", 573 | name: "Bob", 574 | age: 42, 575 | _deleted: false, 576 | _modified: "2023-2", 577 | }, 578 | }) 579 | realtimeSubscription.next({ 580 | eventType: "INSERT", 581 | new: { 582 | id: "3", 583 | name: "Carl", 584 | age: null, 585 | _deleted: false, 586 | _modified: "2023-3", 587 | }, 588 | }) 589 | realtimeSubscription.next({ 590 | eventType: "UPDATE", 591 | new: { 592 | id: "1", 593 | name: "Alice", 594 | age: null, 595 | _deleted: true, 596 | _modified: "2023-4", 597 | }, 598 | }) 599 | }) 600 | expect(await rxdbContents()).toEqual([ 601 | { id: "2", name: "Bob", age: 42 }, 602 | { id: "3", name: "Carl", age: null }, 603 | ]) 604 | }) 605 | }) 606 | 607 | describe("with DELETE event received", () => { 608 | it("ignores event", async () => { 609 | expectPull().thenReturn([]) 610 | await replication({ pull: { realtimePostgresChanges: true } }, async () => { 611 | realtimeSubscription.next({ 612 | eventType: "INSERT", 613 | new: { 614 | id: "1", 615 | name: "Alice", 616 | age: null, 617 | _deleted: false, 618 | _modified: "2023-1", 619 | }, 620 | }) 621 | realtimeSubscription.next({ 622 | eventType: "DELETE", 623 | old: { 624 | id: "1", 625 | name: "Alice", 626 | age: null, 627 | _deleted: false, 628 | _modified: "2023-1", 629 | }, 630 | }) 631 | }) 632 | expect(await rxdbContents()).toEqual([{ id: "1", name: "Alice", age: null }]) 633 | }) 634 | }) 635 | }) 636 | 637 | const replication = ( 638 | options: Partial> = {}, 639 | callback: ( 640 | state: RxReplicationState, 641 | ) => Promise = async () => {}, 642 | expectErrors = false, 643 | ): Promise => { 644 | return withReplication(() => startReplication(options), callback, expectErrors) 645 | } 646 | 647 | const startReplication = ( 648 | options: Partial> = {}, 649 | ): SupabaseReplication => { 650 | const status = new SupabaseReplication({ 651 | replicationIdentifier: "test", 652 | supabaseClient: supabaseMock.client, 653 | collection, 654 | pull: { realtimePostgresChanges: false }, 655 | push: {}, 656 | ...options, 657 | }) 658 | return status 659 | } 660 | 661 | const expectPull = ( 662 | options: { 663 | withLimit?: number 664 | withFilter?: { 665 | lastModified: string 666 | lastPrimaryKey: string 667 | modifiedField?: string 668 | } 669 | } = {}, 670 | ) => { 671 | // TODO: test double quotes inside a search string 672 | const modifiedField = options.withFilter?.modifiedField || "_modified" 673 | let expectedFilter = "" 674 | if (options.withFilter) { 675 | expectedFilter = 676 | `&or=%28${modifiedField}.gt.%22${options.withFilter.lastModified}%22%2C` + 677 | `and%28${modifiedField}.eq.%22${options.withFilter.lastModified}%22%2C` + 678 | `id.gt.%22${options.withFilter.lastPrimaryKey}%22%29%29` 679 | } 680 | return supabaseMock.expectQuery(`Pull query with filter ${expectedFilter}`, { 681 | table: "humans", 682 | params: `select=*${expectedFilter}&order=${modifiedField}.asc%2Cid.asc&limit=${ 683 | options.withLimit || 100 684 | }`, 685 | }) 686 | } 687 | 688 | const expectSelectById = (id: string) => { 689 | return supabaseMock.expectQuery(`Select by id ${id}`, { 690 | table: "humans", 691 | params: `select=*&id=eq.${id}&limit=1`, 692 | }) 693 | } 694 | 695 | const expectInsert = (body: string) => { 696 | return supabaseMock.expectInsert("humans", body) 697 | } 698 | 699 | const rxdbContents = async (): Promise => { 700 | const results = await collection.find().exec() 701 | return results.map((doc) => doc.toJSON()) 702 | } 703 | 704 | const createHumans = (count: number): HumanRow[] => { 705 | return Array.from(Array(count).keys()).map((id) => createHuman(id + 1)) 706 | } 707 | 708 | const createHuman = (id: number): HumanRow => { 709 | return { 710 | id: id.toString(), 711 | name: `Human ${id}`, 712 | age: id % 2 == 0 ? null : id * 11, 713 | _deleted: false, 714 | _modified: "2023-" + id, 715 | } 716 | } 717 | 718 | afterEach(async () => { 719 | supabaseMock.verifyNoMoreQueriesExpected() 720 | await db.remove() 721 | }) 722 | }) 723 | -------------------------------------------------------------------------------- /src/supabase-replication.ts: -------------------------------------------------------------------------------- 1 | import { RealtimeChannel, SupabaseClient } from "@supabase/supabase-js" 2 | import { 3 | ReplicationOptions, 4 | ReplicationPullHandlerResult, 5 | ReplicationPullOptions, 6 | ReplicationPushOptions, 7 | RxReplicationPullStreamItem, 8 | RxReplicationWriteToMasterRow, 9 | WithDeleted, 10 | } from "rxdb" 11 | import { RxReplicationState } from "rxdb/plugins/replication" 12 | import { Subject } from "rxjs" 13 | 14 | const DEFAULT_LAST_MODIFIED_FIELD = "_modified" 15 | const DEFAULT_DELETED_FIELD = "_deleted" 16 | const POSTGRES_DUPLICATE_KEY_ERROR_CODE = "23505" 17 | 18 | export type SupabaseReplicationOptions = { 19 | /** 20 | * The SupabaseClient to replicate with. 21 | */ 22 | supabaseClient: SupabaseClient 23 | 24 | /** 25 | * The table to replicate to, if different from the name of the collection. 26 | * @default the name of the RxDB collection. 27 | */ 28 | table?: string 29 | 30 | /** 31 | * The primary key of the supabase table, if different from the primary key of the RxDB. 32 | * @default the primary key of the RxDB collection 33 | */ 34 | // TODO: Support composite primary keys. 35 | primaryKey?: string 36 | 37 | /** 38 | * Options for pulling data from supabase. Set to {} to pull with the default 39 | * options, as no data will be pulled if the field is absent. 40 | */ 41 | pull?: Omit< 42 | ReplicationPullOptions, 43 | "handler" | "stream$" 44 | > & { 45 | /** 46 | * Whether to subscribe to realtime Postgres changes for the table. If set to false, 47 | * only an initial pull will be performed. Only has an effect if the live option is set 48 | * to true. 49 | * @default true 50 | */ 51 | realtimePostgresChanges?: boolean 52 | 53 | /** 54 | * The name of the supabase field that is automatically updated to the last 55 | * modified timestamp by postgres. This field is required for the pull sync 56 | * to work and can easily be implemented with moddatetime in supabase. 57 | * @default '_modified' 58 | */ 59 | lastModifiedField?: string 60 | } 61 | 62 | /** 63 | * Options for pushing data to supabase. Set to {} to push with the default 64 | * options, as no data will be pushed if the field is absent. 65 | */ 66 | // TODO: enable custom batch size (currently always one row at a time) 67 | push?: Omit, "handler" | "batchSize"> & { 68 | /** 69 | * Handler for pushing row updates to supabase. Must return true iff the UPDATE was 70 | * applied to the supabase table. Returning false signalises a write conflict, in 71 | * which case the current state of the row will be fetched from supabase and passed to 72 | * the RxDB collection's conflict handler. 73 | * @default the default handler will update the row only iff all fields match the 74 | * local state (before the update was applied), otherwise the conflict handler is 75 | * invoked. The default handler does not support JSON fields at the moment. 76 | */ 77 | // TODO: Support JSON fields 78 | updateHandler?: (row: RxReplicationWriteToMasterRow) => Promise 79 | } 80 | } & Omit< 81 | // We don't support waitForLeadership. You should just run in a SharedWorker anyways, no? 82 | ReplicationOptions, 83 | "pull" | "push" | "waitForLeadership" 84 | > 85 | 86 | /** 87 | * The checkpoint stores until which point the client and supabse have been synced. 88 | * For this to work, we require each row to have a datetime field that contains the 89 | * last modified time. In case two rows have the same timestamp, we use the primary 90 | * key to define a strict order. 91 | */ 92 | export interface SupabaseReplicationCheckpoint { 93 | modified: string 94 | primaryKeyValue: string | number 95 | } 96 | 97 | /** 98 | * Replicates the local RxDB database with the given Supabase client. 99 | * 100 | * See SupabaseReplicationOptions for the various configuration options. For a general introduction 101 | * to RxDB's replication protocol, see https://rxdb.info/replication.html 102 | */ 103 | export class SupabaseReplication extends RxReplicationState< 104 | RxDocType, 105 | SupabaseReplicationCheckpoint 106 | > { 107 | private readonly table: string 108 | private readonly primaryKey: string 109 | private readonly lastModifiedFieldName: string 110 | 111 | private readonly realtimeChanges: Subject< 112 | RxReplicationPullStreamItem 113 | > 114 | private realtimeChannel?: RealtimeChannel 115 | 116 | constructor(private options: SupabaseReplicationOptions) { 117 | const realtimeChanges = new Subject< 118 | RxReplicationPullStreamItem 119 | >() 120 | super( 121 | options.replicationIdentifier, 122 | options.collection, 123 | options.deletedField || DEFAULT_DELETED_FIELD, 124 | options.pull && { 125 | ...options.pull, 126 | stream$: realtimeChanges, 127 | handler: (lastCheckpoint, batchSize) => this.pullHandler(lastCheckpoint, batchSize), 128 | }, 129 | options.push && { 130 | ...options.push, 131 | batchSize: 1, // TODO: support batch insertion 132 | handler: (rows) => this.pushHandler(rows), 133 | }, 134 | typeof options.live === "undefined" ? true : options.live, 135 | typeof options.retryTime === "undefined" ? 5000 : options.retryTime, 136 | typeof options.autoStart === "undefined" ? true : options.autoStart, 137 | ) 138 | this.realtimeChanges = realtimeChanges 139 | this.table = options.table || options.collection.name 140 | this.primaryKey = options.primaryKey || options.collection.schema.primaryPath 141 | this.lastModifiedFieldName = options.pull?.lastModifiedField || DEFAULT_LAST_MODIFIED_FIELD 142 | 143 | if (this.autoStart) { 144 | this.start() 145 | } 146 | } 147 | 148 | public override async start(): Promise { 149 | if ( 150 | this.live && 151 | this.options.pull && 152 | (this.options.pull.realtimePostgresChanges || 153 | typeof this.options.pull.realtimePostgresChanges === "undefined") 154 | ) { 155 | this.watchPostgresChanges() 156 | } 157 | return super.start() 158 | } 159 | 160 | public override async cancel(): Promise { 161 | if (this.realtimeChannel) { 162 | return Promise.all([super.cancel(), this.realtimeChannel.unsubscribe()]) 163 | } 164 | return super.cancel() 165 | } 166 | 167 | /** 168 | * Pulls all changes since the last checkpoint from supabase. 169 | */ 170 | private async pullHandler( 171 | lastCheckpoint: SupabaseReplicationCheckpoint, 172 | batchSize: number, 173 | ): Promise> { 174 | let query = this.options.supabaseClient.from(this.table).select() 175 | if (lastCheckpoint && lastCheckpoint.modified) { 176 | // Construct the PostgREST query for the following condition: 177 | // WHERE _modified > lastModified OR (_modified = lastModified AND primaryKey > lastPrimaryKey) 178 | const lastModified = JSON.stringify(lastCheckpoint.modified) 179 | const lastPrimaryKey = JSON.stringify(lastCheckpoint.primaryKeyValue) // TODO: Add test for a integer primary key 180 | const isNewer = `${this.lastModifiedFieldName}.gt.${lastModified}` 181 | const isSameAge = `${this.lastModifiedFieldName}.eq.${lastModified}` 182 | query = query.or(`${isNewer},and(${isSameAge},${this.primaryKey}.gt.${lastPrimaryKey})`) 183 | } 184 | query = query.order(this.lastModifiedFieldName).order(this.primaryKey).limit(batchSize) 185 | //console.debug("Pulling changes since", lastCheckpoint?.modified, "with query", (query as any)['url'].toString()) 186 | 187 | const { data, error } = await query 188 | if (error) throw error 189 | if (data.length == 0) { 190 | return { 191 | checkpoint: lastCheckpoint, 192 | documents: [], 193 | } 194 | } else { 195 | return { 196 | checkpoint: this.rowToCheckpoint(data[data.length - 1]), 197 | documents: data.map(this.rowToRxDoc.bind(this)), 198 | } 199 | } 200 | } 201 | 202 | /** 203 | * Pushes local changes to supabase. 204 | */ 205 | private async pushHandler( 206 | rows: RxReplicationWriteToMasterRow[], 207 | ): Promise[]> { 208 | if (rows.length != 1) throw new Error("Invalid batch size") 209 | const row = rows[0] 210 | //console.debug("Pushing changes...", row.newDocumentState) 211 | return row.assumedMasterState 212 | ? this.handleUpdate(row) 213 | : this.handleInsertion(row.newDocumentState) 214 | } 215 | 216 | /** 217 | * Tries to insert a new row. Returns the current state of the row in case of a conflict. 218 | */ 219 | private async handleInsertion(doc: WithDeleted): Promise[]> { 220 | const { error } = await this.options.supabaseClient.from(this.table).insert(doc) 221 | if (!error) { 222 | return [] // Success :) 223 | } else if (error.code == POSTGRES_DUPLICATE_KEY_ERROR_CODE) { 224 | // The row was already inserted. Fetch current state and let conflict handler resolve it. 225 | return [await this.fetchByPrimaryKey((doc as any)[this.primaryKey])] 226 | } else { 227 | throw error 228 | } 229 | } 230 | 231 | /** 232 | * Updates a row in supabase if all fields match the local state. Otherwise, the current 233 | * state is fetched and passed to the conflict handler. 234 | */ 235 | private async handleUpdate( 236 | row: RxReplicationWriteToMasterRow, 237 | ): Promise[]> { 238 | const updateHandler = this.options.push?.updateHandler || this.defaultUpdateHandler.bind(this) 239 | if (await updateHandler(row)) return [] // Success :) 240 | // Fetch current state and let conflict handler resolve it. 241 | return [await this.fetchByPrimaryKey((row.newDocumentState as any)[this.primaryKey])] 242 | } 243 | 244 | /** 245 | * Updates the row only if all database fields match the expected state. 246 | */ 247 | private async defaultUpdateHandler( 248 | row: RxReplicationWriteToMasterRow, 249 | ): Promise { 250 | let query = this.options.supabaseClient 251 | .from(this.table) 252 | .update(row.newDocumentState, { count: "exact" }) 253 | Object.entries(row.assumedMasterState!).forEach(([field, value]) => { 254 | const type = typeof value 255 | if (type === "string" || type === "number") { 256 | query = query.eq(field, value) 257 | } else if (type === "boolean" || value === null) { 258 | query = query.is(field, value) 259 | } else { 260 | throw new Error(`replicateSupabase: Unsupported field of type ${type}`) 261 | } 262 | }) 263 | const { error, count } = await query 264 | if (error) throw error 265 | return count == 1 266 | } 267 | 268 | private watchPostgresChanges() { 269 | this.realtimeChannel = this.options.supabaseClient 270 | .channel(`rxdb-supabase-${this.replicationIdentifierHash}`) 271 | .on("postgres_changes", { event: "*", schema: "public", table: this.table }, (payload) => { 272 | if (payload.eventType === "DELETE" || !payload.new) return // Should have set _deleted field already 273 | //console.debug('Realtime event received:', payload) 274 | this.realtimeChanges.next({ 275 | checkpoint: this.rowToCheckpoint(payload.new), 276 | documents: [this.rowToRxDoc(payload.new)], 277 | }) 278 | }) 279 | .subscribe() 280 | } 281 | 282 | private async fetchByPrimaryKey(primaryKeyValue: any): Promise> { 283 | const { data, error } = await this.options.supabaseClient 284 | .from(this.table) 285 | .select() 286 | .eq(this.primaryKey, primaryKeyValue) 287 | .limit(1) 288 | if (error) throw error 289 | if (data.length != 1) throw new Error("No row with given primary key") 290 | return this.rowToRxDoc(data[0]) 291 | } 292 | 293 | private rowToRxDoc(row: any): WithDeleted { 294 | // TODO: Don't delete the field if it is actually part of the collection 295 | delete row[this.lastModifiedFieldName] 296 | return row as WithDeleted 297 | } 298 | 299 | private rowToCheckpoint(row: any): SupabaseReplicationCheckpoint { 300 | return { 301 | modified: row[this.lastModifiedFieldName], 302 | primaryKeyValue: row[this.primaryKey], 303 | } 304 | } 305 | } 306 | -------------------------------------------------------------------------------- /src/test-utils/humans.sql: -------------------------------------------------------------------------------- 1 | -- 2 | -- Table required for integration test against real supabase instance 3 | -- 4 | 5 | CREATE TABLE public.humans ( 6 | id text NOT NULL, 7 | name text NOT NULL, 8 | age smallint, 9 | _deleted boolean DEFAULT false NOT NULL, 10 | _modified timestamp with time zone DEFAULT now() NOT NULL 11 | ); 12 | 13 | ALTER TABLE public.humans OWNER TO postgres; 14 | 15 | ALTER TABLE ONLY public.humans 16 | ADD CONSTRAINT humans_pkey PRIMARY KEY (id); 17 | 18 | CREATE TRIGGER update_modified_datetime BEFORE UPDATE ON public.humans FOR EACH ROW EXECUTE FUNCTION extensions.moddatetime('_modified'); 19 | 20 | GRANT ALL ON TABLE public.humans TO anon; 21 | GRANT ALL ON TABLE public.humans TO authenticated; 22 | GRANT ALL ON TABLE public.humans TO service_role; 23 | -------------------------------------------------------------------------------- /src/test-utils/supabase-backend-mock.ts: -------------------------------------------------------------------------------- 1 | import { 2 | RealtimeChannel, 3 | RealtimeClient, 4 | RealtimePostgresChangesFilter, 5 | RealtimePostgresChangesPayload, 6 | SupabaseClient, 7 | } from "@supabase/supabase-js" 8 | import { Response, RequestInfo, RequestInit } from "node-fetch" 9 | import { anyFunction, anyString, anything, instance, mock, verify, when } from "ts-mockito" 10 | import { expect, vi } from "vitest" 11 | 12 | type RequestCheck = (input: URL | RequestInfo, options?: RequestInit | undefined) => void 13 | 14 | interface ExpectedFetch { 15 | name: string 16 | requestCheck: RequestCheck 17 | response: Promise 18 | } 19 | 20 | /** 21 | * Runs a real SuperbaseClient against a mock backend by using a custom fetch implementation. 22 | * Any calls to the RealtimeClient will be mocked as well. 23 | */ 24 | // TODO: Use fetch-mock package 25 | export class SupabaseBackendMock { 26 | readonly url = "http://example.com/" 27 | readonly key = "ABCDEF" 28 | readonly client: SupabaseClient 29 | 30 | private expectedFetches: ExpectedFetch[] = [] 31 | private realtimeClientMock = mock(RealtimeClient) 32 | 33 | constructor(options: any = {}) { 34 | this.client = new SupabaseClient(this.url, this.key, { 35 | ...options, 36 | global: { 37 | fetch: this.fetch.bind(this), 38 | }, 39 | auth: { 40 | persistSession: false, 41 | }, 42 | }) 43 | const hackedClient = this.client as any 44 | hackedClient.realtime = instance(this.realtimeClientMock) 45 | } 46 | 47 | expectFetch(name: string, requestCheck: RequestCheck) { 48 | return { 49 | thenReturn: (body: any = {}, headers: Record = {}) => { 50 | const response = new Response(JSON.stringify(body), { 51 | status: 200, 52 | statusText: "OK", 53 | headers, 54 | }) 55 | this.expectedFetches.push({ 56 | name, 57 | requestCheck, 58 | response: Promise.resolve(response), 59 | }) 60 | }, 61 | thenReturnError: (errorCode: string, httpCode = 409, message = "Test error message") => { 62 | const response = new Response(JSON.stringify({ code: errorCode, message }), { 63 | status: httpCode, 64 | statusText: "ERROR", 65 | }) 66 | this.expectedFetches.push({ 67 | name, 68 | requestCheck, 69 | response: Promise.resolve(response), 70 | }) 71 | }, 72 | thenFail: (error: any = {}) => { 73 | this.expectedFetches.push({ 74 | name, 75 | requestCheck, 76 | response: Promise.reject(error), 77 | }) 78 | }, 79 | } 80 | } 81 | 82 | expectQuery( 83 | name: string, 84 | expected: { table: string; params?: string; method?: string; body?: string }, 85 | ) { 86 | let expectedUrl = `${this.url}rest/v1/${expected.table}` 87 | if (expected.params) expectedUrl += `?${expected.params}` 88 | return this.expectFetch(name, (input: URL | RequestInfo, options?: RequestInit | undefined) => { 89 | // Set custom message to prevent output being truncated 90 | expect(options?.method).toEqual(expected.method || "GET") 91 | expect(input.toString(), `Expected ${input.toString()} to equal ${expectedUrl}`).toEqual( 92 | expectedUrl, 93 | ) 94 | expect(options?.body, `Expected ${options?.body} to equal ${expected.body}`).toEqual( 95 | expected.body, 96 | ) 97 | }) 98 | } 99 | 100 | expectInsert(table: string, body: string) { 101 | return this.expectQuery(`INSERT to ${table}: ${body}`, { 102 | table, 103 | method: "POST", 104 | body, 105 | }) 106 | } 107 | 108 | verifyNoMoreQueriesExpected() { 109 | expect( 110 | this.expectedFetches.map((exp) => exp.name), 111 | "Expected more Supabase calls", 112 | ).toEqual([]) 113 | } 114 | 115 | private fetch(input: URL | RequestInfo, options?: RequestInit | undefined): Promise { 116 | expect( 117 | this.expectedFetches, 118 | `Did not expect any requests. Got ${options?.method} ${input}`, 119 | ).not.toHaveLength(0) 120 | const expected = this.expectedFetches[0] 121 | this.expectedFetches = this.expectedFetches.slice(1) 122 | expected.requestCheck(input, options) 123 | return expected.response 124 | } 125 | 126 | expectRealtimeSubscription>( 127 | table: string, 128 | topic: string, 129 | event = "*", 130 | schema = "public", 131 | ) { 132 | const channelMock = mock(RealtimeChannel) 133 | let capturedCallback: (payload: RealtimePostgresChangesPayload) => void 134 | when(this.realtimeClientMock.channel(topic, anything())).thenReturn(instance(channelMock)) 135 | when(channelMock.on(anyString(), anything(), anyFunction())).thenCall( 136 | ( 137 | type, 138 | filter: RealtimePostgresChangesFilter, 139 | callback: (payload: RealtimePostgresChangesPayload) => void, 140 | ) => { 141 | expect(filter.event).toEqual(event) 142 | expect(filter.table).toEqual(table) 143 | expect(filter.schema).toEqual(schema) 144 | capturedCallback = callback 145 | return instance(channelMock) 146 | }, 147 | ) 148 | when(channelMock.subscribe()).thenReturn(instance(channelMock)) 149 | return { 150 | next: (event: Partial>) => { 151 | expect(capturedCallback, "Expected realtime subscription did not happen").toBeTruthy() 152 | capturedCallback(event as RealtimePostgresChangesPayload) 153 | }, 154 | verifyUnsubscribed: verify(channelMock.unsubscribe()), 155 | } 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /src/test-utils/test-types.ts: -------------------------------------------------------------------------------- 1 | import { WithDeleted } from "rxdb" 2 | 3 | export interface Human { 4 | id: string 5 | name: string 6 | age: number | null 7 | } 8 | 9 | export type HumanRow = WithDeleted & { 10 | _modified: string 11 | } 12 | 13 | export const HUMAN_SCHEMA = { 14 | title: "human schema", 15 | version: 0, 16 | primaryKey: "id", 17 | type: "object", 18 | properties: { 19 | id: { 20 | type: "string", 21 | maxLength: 100, 22 | }, 23 | name: { 24 | type: "string", 25 | }, 26 | age: { 27 | description: "age in years", 28 | type: "integer", 29 | minimum: 0, 30 | maximum: 150, 31 | multipleOf: 1, 32 | }, 33 | }, 34 | required: ["name", "id", "age"], 35 | indexes: ["age"], 36 | } 37 | -------------------------------------------------------------------------------- /src/test-utils/test-utils.ts: -------------------------------------------------------------------------------- 1 | import { RxConflictHandler, RxConflictHandlerInput, RxError } from "rxdb" 2 | import { RxReplicationState } from "rxdb/plugins/replication" 3 | import { SupabaseReplication, SupabaseReplicationCheckpoint } from "../supabase-replication.js" 4 | import { Human, HumanRow } from "./test-types.js" 5 | 6 | /** 7 | * Starts a SupabaseReplication with the given factory, executes the given callback while the replication 8 | * is running, and then stops the replication again. 9 | * 10 | * Throws on any errors that happened within the replication code, unless expectErrors is set to true, in 11 | * which case all errors are returned. 12 | */ 13 | export async function withReplication( 14 | replicationFactory: () => SupabaseReplication, 15 | callback: ( 16 | state: RxReplicationState, 17 | ) => Promise = async () => {}, 18 | expectErrors = false, 19 | ): Promise { 20 | return new Promise(async (resolve, reject) => { 21 | const errors: Error[] = [] 22 | const replication = replicationFactory() 23 | replication.error$.subscribe((error: any) => { 24 | if (expectErrors) { 25 | errors.push(error) 26 | } else { 27 | console.error("Replication emitted an unexpected error:", error) 28 | reject(error.rxdb ? error.parameters.errors![0] : error) 29 | } 30 | }) 31 | await replication.awaitInitialReplication() 32 | await callback(replication) 33 | await replication.awaitInSync() 34 | await replication.cancel() 35 | resolve(errors) 36 | }) 37 | } 38 | 39 | /** 40 | * A simple conflict handler for tests 41 | */ 42 | export function resolveConflictWithName(name: string): RxConflictHandler { 43 | return async (input: RxConflictHandlerInput) => { 44 | return { 45 | isEqual: false, 46 | documentData: { ...input.newDocumentState, name }, 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /tsconfig.build.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig.json", 3 | "exclude": ["./src/**/__tests__"], 4 | } 5 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "include": ["./src/**/*.ts"], 3 | "compilerOptions": { 4 | "lib": ["es2022"], 5 | "module": "esnext", 6 | "target": "es2022", 7 | "moduleResolution": "nodenext", 8 | 9 | "rootDir": "./", 10 | "outDir": "build", 11 | 12 | "strict": true, 13 | "sourceMap": true, 14 | "esModuleInterop": true, 15 | "skipLibCheck": true, 16 | "forceConsistentCasingInFileNames": true, 17 | "declaration": true, 18 | "resolveJsonModule": true 19 | } 20 | } 21 | --------------------------------------------------------------------------------