├── .editorconfig ├── .eslintignore ├── .eslintrc.js ├── .gitignore ├── .prettierrc ├── .vscode └── settings.json ├── LICENSE ├── README.md ├── assets ├── demo.gif ├── s2-black.png └── s2-white.png ├── package-lock.json ├── package.json ├── src ├── index.ts ├── logger.ts ├── protocol.ts ├── snapshot.ts ├── types.ts └── utils.ts ├── test ├── env.d.ts ├── index.spec.ts └── tsconfig.json ├── tsconfig.json ├── vitest.config.mts ├── worker-configuration.d.ts └── wrangler.jsonc /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | root = true 3 | 4 | [*] 5 | indent_style = tab 6 | end_of_line = lf 7 | charset = utf-8 8 | trim_trailing_whitespace = true 9 | insert_final_newline = true 10 | 11 | [*.yml] 12 | indent_style = space 13 | -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | dist/ 3 | *.js 4 | .wrangler/ 5 | test/ 6 | -------------------------------------------------------------------------------- /.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | root: true, 3 | env: { 4 | es2022: true, 5 | node: true, 6 | worker: true, 7 | }, 8 | extends: [ 9 | 'eslint:recommended', 10 | ], 11 | parser: '@typescript-eslint/parser', 12 | parserOptions: { 13 | ecmaVersion: 2022, 14 | sourceType: 'module', 15 | project: './tsconfig.json', 16 | }, 17 | plugins: [ 18 | '@typescript-eslint', 19 | ], 20 | globals: { 21 | WebSocketPair: 'readonly', 22 | crypto: 'readonly', 23 | NodeJS: 'readonly', 24 | }, 25 | rules: { 26 | '@typescript-eslint/no-unused-vars': ['error', { 27 | argsIgnorePattern: '^_', 28 | varsIgnorePattern: '^_', 29 | ignoreRestSiblings: true 30 | }], 31 | '@typescript-eslint/no-explicit-any': 'warn', 32 | '@typescript-eslint/no-non-null-assertion': 'warn', 33 | '@typescript-eslint/no-inferrable-types': 'error', 34 | '@typescript-eslint/no-unnecessary-type-assertion': 'error', 35 | '@typescript-eslint/ban-ts-comment': ['error', { 36 | 'ts-expect-error': 'allow-with-description', 37 | 'ts-ignore': 'allow-with-description', 38 | 'ts-nocheck': true, 39 | 'ts-check': false, 40 | }], 41 | '@typescript-eslint/no-empty-function': 'warn', 42 | '@typescript-eslint/no-empty-interface': 'error', 43 | '@typescript-eslint/prefer-optional-chain': 'error', 44 | '@typescript-eslint/prefer-nullish-coalescing': 'error', 45 | 46 | 'no-console': 'off', 47 | 'no-undef': 'off', 48 | 'no-redeclare': 'off', 49 | 'prefer-const': 'error', 50 | 'no-var': 'error', 51 | 'eqeqeq': ['error', 'always'], 52 | 'curly': ['error', 'all'], 53 | 'no-throw-literal': 'error', 54 | 'no-duplicate-imports': 'error', 55 | 'no-unused-expressions': 'error', 56 | 57 | 'object-shorthand': 'error', 58 | 'prefer-template': 'error', 59 | 'prefer-arrow-callback': 'error', 60 | 'arrow-spacing': 'error', 61 | 'no-multiple-empty-lines': ['error', { max: 2, maxEOF: 1 }], 62 | 'comma-dangle': ['error', 'always-multiline'], 63 | }, 64 | ignorePatterns: [ 65 | 'dist/', 66 | 'node_modules/', 67 | '*.js', 68 | ], 69 | }; 70 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | 3 | logs 4 | _.log 5 | npm-debug.log_ 6 | yarn-debug.log* 7 | yarn-error.log* 8 | lerna-debug.log* 9 | .pnpm-debug.log* 10 | 11 | # Diagnostic reports (https://nodejs.org/api/report.html) 12 | 13 | report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json 14 | 15 | # Runtime data 16 | 17 | pids 18 | _.pid 19 | _.seed 20 | \*.pid.lock 21 | 22 | # Directory for instrumented libs generated by jscoverage/JSCover 23 | 24 | lib-cov 25 | 26 | # Coverage directory used by tools like istanbul 27 | 28 | coverage 29 | \*.lcov 30 | 31 | # nyc test coverage 32 | 33 | .nyc_output 34 | 35 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 36 | 37 | .grunt 38 | 39 | # Bower dependency directory (https://bower.io/) 40 | 41 | bower_components 42 | 43 | # node-waf configuration 44 | 45 | .lock-wscript 46 | 47 | # Compiled binary addons (https://nodejs.org/api/addons.html) 48 | 49 | build/Release 50 | 51 | # Dependency directories 52 | 53 | node_modules/ 54 | jspm_packages/ 55 | 56 | # Snowpack dependency directory (https://snowpack.dev/) 57 | 58 | web_modules/ 59 | 60 | # TypeScript cache 61 | 62 | \*.tsbuildinfo 63 | 64 | # Optional npm cache directory 65 | 66 | .npm 67 | 68 | # Optional eslint cache 69 | 70 | .eslintcache 71 | 72 | # Optional stylelint cache 73 | 74 | .stylelintcache 75 | 76 | # Microbundle cache 77 | 78 | .rpt2_cache/ 79 | .rts2_cache_cjs/ 80 | .rts2_cache_es/ 81 | .rts2_cache_umd/ 82 | 83 | # Optional REPL history 84 | 85 | .node_repl_history 86 | 87 | # Output of 'npm pack' 88 | 89 | \*.tgz 90 | 91 | # Yarn Integrity file 92 | 93 | .yarn-integrity 94 | 95 | # parcel-bundler cache (https://parceljs.org/) 96 | 97 | .cache 98 | .parcel-cache 99 | 100 | # Next.js build output 101 | 102 | .next 103 | out 104 | 105 | # Nuxt.js build / generate output 106 | 107 | .nuxt 108 | dist 109 | 110 | # Gatsby files 111 | 112 | .cache/ 113 | 114 | # Comment in the public line in if your project uses Gatsby and not Next.js 115 | 116 | # https://nextjs.org/blog/next-9-1#public-directory-support 117 | 118 | # public 119 | 120 | # vuepress build output 121 | 122 | .vuepress/dist 123 | 124 | # vuepress v2.x temp and cache directory 125 | 126 | .temp 127 | .cache 128 | 129 | # Docusaurus cache and generated files 130 | 131 | .docusaurus 132 | 133 | # Serverless directories 134 | 135 | .serverless/ 136 | 137 | # FuseBox cache 138 | 139 | .fusebox/ 140 | 141 | # DynamoDB Local files 142 | 143 | .dynamodb/ 144 | 145 | # TernJS port file 146 | 147 | .tern-port 148 | 149 | # Stores VSCode versions used for testing VSCode extensions 150 | 151 | .vscode-test 152 | 153 | # yarn v2 154 | 155 | .yarn/cache 156 | .yarn/unplugged 157 | .yarn/build-state.yml 158 | .yarn/install-state.gz 159 | .pnp.\* 160 | 161 | # wrangler project 162 | 163 | .dev.vars* 164 | !.dev.vars.example 165 | .env* 166 | !.env.example 167 | .wrangler/ 168 | 169 | local.wrangler.jsonc 170 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "printWidth": 140, 3 | "singleQuote": true, 4 | "semi": true, 5 | "useTabs": true 6 | } 7 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "wrangler.json": "jsonc" 4 | } 5 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU AFFERO GENERAL PUBLIC LICENSE 2 | Version 3, 19 November 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU Affero General Public License is a free, copyleft license for 11 | software and other kinds of works, specifically designed to ensure 12 | cooperation with the community in the case of network server software. 13 | 14 | The licenses for most software and other practical works are designed 15 | to take away your freedom to share and change the works. By contrast, 16 | our General Public Licenses are intended to guarantee your freedom to 17 | share and change all versions of a program--to make sure it remains free 18 | software for all its users. 19 | 20 | When we speak of free software, we are referring to freedom, not 21 | price. Our General Public Licenses are designed to make sure that you 22 | have the freedom to distribute copies of free software (and charge for 23 | them if you wish), that you receive source code or can get it if you 24 | want it, that you can change the software or use pieces of it in new 25 | free programs, and that you know you can do these things. 26 | 27 | Developers that use our General Public Licenses protect your rights 28 | with two steps: (1) assert copyright on the software, and (2) offer 29 | you this License which gives you legal permission to copy, distribute 30 | and/or modify the software. 31 | 32 | A secondary benefit of defending all users' freedom is that 33 | improvements made in alternate versions of the program, if they 34 | receive widespread use, become available for other developers to 35 | incorporate. Many developers of free software are heartened and 36 | encouraged by the resulting cooperation. However, in the case of 37 | software used on network servers, this result may fail to come about. 38 | The GNU General Public License permits making a modified version and 39 | letting the public access it on a server without ever releasing its 40 | source code to the public. 41 | 42 | The GNU Affero General Public License is designed specifically to 43 | ensure that, in such cases, the modified source code becomes available 44 | to the community. It requires the operator of a network server to 45 | provide the source code of the modified version running there to the 46 | users of that server. Therefore, public use of a modified version, on 47 | a publicly accessible server, gives the public access to the source 48 | code of the modified version. 49 | 50 | An older license, called the Affero General Public License and 51 | published by Affero, was designed to accomplish similar goals. This is 52 | a different license, not a version of the Affero GPL, but Affero has 53 | released a new version of the Affero GPL which permits relicensing under 54 | this license. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | TERMS AND CONDITIONS 60 | 61 | 0. Definitions. 62 | 63 | "This License" refers to version 3 of the GNU Affero General Public License. 64 | 65 | "Copyright" also means copyright-like laws that apply to other kinds of 66 | works, such as semiconductor masks. 67 | 68 | "The Program" refers to any copyrightable work licensed under this 69 | License. Each licensee is addressed as "you". "Licensees" and 70 | "recipients" may be individuals or organizations. 71 | 72 | To "modify" a work means to copy from or adapt all or part of the work 73 | in a fashion requiring copyright permission, other than the making of an 74 | exact copy. The resulting work is called a "modified version" of the 75 | earlier work or a work "based on" the earlier work. 76 | 77 | A "covered work" means either the unmodified Program or a work based 78 | on the Program. 79 | 80 | To "propagate" a work means to do anything with it that, without 81 | permission, would make you directly or secondarily liable for 82 | infringement under applicable copyright law, except executing it on a 83 | computer or modifying a private copy. Propagation includes copying, 84 | distribution (with or without modification), making available to the 85 | public, and in some countries other activities as well. 86 | 87 | To "convey" a work means any kind of propagation that enables other 88 | parties to make or receive copies. Mere interaction with a user through 89 | a computer network, with no transfer of a copy, is not conveying. 90 | 91 | An interactive user interface displays "Appropriate Legal Notices" 92 | to the extent that it includes a convenient and prominently visible 93 | feature that (1) displays an appropriate copyright notice, and (2) 94 | tells the user that there is no warranty for the work (except to the 95 | extent that warranties are provided), that licensees may convey the 96 | work under this License, and how to view a copy of this License. If 97 | the interface presents a list of user commands or options, such as a 98 | menu, a prominent item in the list meets this criterion. 99 | 100 | 1. Source Code. 101 | 102 | The "source code" for a work means the preferred form of the work 103 | for making modifications to it. "Object code" means any non-source 104 | form of a work. 105 | 106 | A "Standard Interface" means an interface that either is an official 107 | standard defined by a recognized standards body, or, in the case of 108 | interfaces specified for a particular programming language, one that 109 | is widely used among developers working in that language. 110 | 111 | The "System Libraries" of an executable work include anything, other 112 | than the work as a whole, that (a) is included in the normal form of 113 | packaging a Major Component, but which is not part of that Major 114 | Component, and (b) serves only to enable use of the work with that 115 | Major Component, or to implement a Standard Interface for which an 116 | implementation is available to the public in source code form. A 117 | "Major Component", in this context, means a major essential component 118 | (kernel, window system, and so on) of the specific operating system 119 | (if any) on which the executable work runs, or a compiler used to 120 | produce the work, or an object code interpreter used to run it. 121 | 122 | The "Corresponding Source" for a work in object code form means all 123 | the source code needed to generate, install, and (for an executable 124 | work) run the object code and to modify the work, including scripts to 125 | control those activities. However, it does not include the work's 126 | System Libraries, or general-purpose tools or generally available free 127 | programs which are used unmodified in performing those activities but 128 | which are not part of the work. For example, Corresponding Source 129 | includes interface definition files associated with source files for 130 | the work, and the source code for shared libraries and dynamically 131 | linked subprograms that the work is specifically designed to require, 132 | such as by intimate data communication or control flow between those 133 | subprograms and other parts of the work. 134 | 135 | The Corresponding Source need not include anything that users 136 | can regenerate automatically from other parts of the Corresponding 137 | Source. 138 | 139 | The Corresponding Source for a work in source code form is that 140 | same work. 141 | 142 | 2. Basic Permissions. 143 | 144 | All rights granted under this License are granted for the term of 145 | copyright on the Program, and are irrevocable provided the stated 146 | conditions are met. This License explicitly affirms your unlimited 147 | permission to run the unmodified Program. The output from running a 148 | covered work is covered by this License only if the output, given its 149 | content, constitutes a covered work. This License acknowledges your 150 | rights of fair use or other equivalent, as provided by copyright law. 151 | 152 | You may make, run and propagate covered works that you do not 153 | convey, without conditions so long as your license otherwise remains 154 | in force. You may convey covered works to others for the sole purpose 155 | of having them make modifications exclusively for you, or provide you 156 | with facilities for running those works, provided that you comply with 157 | the terms of this License in conveying all material for which you do 158 | not control copyright. Those thus making or running the covered works 159 | for you must do so exclusively on your behalf, under your direction 160 | and control, on terms that prohibit them from making any copies of 161 | your copyrighted material outside their relationship with you. 162 | 163 | Conveying under any other circumstances is permitted solely under 164 | the conditions stated below. Sublicensing is not allowed; section 10 165 | makes it unnecessary. 166 | 167 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 168 | 169 | No covered work shall be deemed part of an effective technological 170 | measure under any applicable law fulfilling obligations under article 171 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 172 | similar laws prohibiting or restricting circumvention of such 173 | measures. 174 | 175 | When you convey a covered work, you waive any legal power to forbid 176 | circumvention of technological measures to the extent such circumvention 177 | is effected by exercising rights under this License with respect to 178 | the covered work, and you disclaim any intention to limit operation or 179 | modification of the work as a means of enforcing, against the work's 180 | users, your or third parties' legal rights to forbid circumvention of 181 | technological measures. 182 | 183 | 4. Conveying Verbatim Copies. 184 | 185 | You may convey verbatim copies of the Program's source code as you 186 | receive it, in any medium, provided that you conspicuously and 187 | appropriately publish on each copy an appropriate copyright notice; 188 | keep intact all notices stating that this License and any 189 | non-permissive terms added in accord with section 7 apply to the code; 190 | keep intact all notices of the absence of any warranty; and give all 191 | recipients a copy of this License along with the Program. 192 | 193 | You may charge any price or no price for each copy that you convey, 194 | and you may offer support or warranty protection for a fee. 195 | 196 | 5. Conveying Modified Source Versions. 197 | 198 | You may convey a work based on the Program, or the modifications to 199 | produce it from the Program, in the form of source code under the 200 | terms of section 4, provided that you also meet all of these conditions: 201 | 202 | a) The work must carry prominent notices stating that you modified 203 | it, and giving a relevant date. 204 | 205 | b) The work must carry prominent notices stating that it is 206 | released under this License and any conditions added under section 207 | 7. This requirement modifies the requirement in section 4 to 208 | "keep intact all notices". 209 | 210 | c) You must license the entire work, as a whole, under this 211 | License to anyone who comes into possession of a copy. This 212 | License will therefore apply, along with any applicable section 7 213 | additional terms, to the whole of the work, and all its parts, 214 | regardless of how they are packaged. This License gives no 215 | permission to license the work in any other way, but it does not 216 | invalidate such permission if you have separately received it. 217 | 218 | d) If the work has interactive user interfaces, each must display 219 | Appropriate Legal Notices; however, if the Program has interactive 220 | interfaces that do not display Appropriate Legal Notices, your 221 | work need not make them do so. 222 | 223 | A compilation of a covered work with other separate and independent 224 | works, which are not by their nature extensions of the covered work, 225 | and which are not combined with it such as to form a larger program, 226 | in or on a volume of a storage or distribution medium, is called an 227 | "aggregate" if the compilation and its resulting copyright are not 228 | used to limit the access or legal rights of the compilation's users 229 | beyond what the individual works permit. Inclusion of a covered work 230 | in an aggregate does not cause this License to apply to the other 231 | parts of the aggregate. 232 | 233 | 6. Conveying Non-Source Forms. 234 | 235 | You may convey a covered work in object code form under the terms 236 | of sections 4 and 5, provided that you also convey the 237 | machine-readable Corresponding Source under the terms of this License, 238 | in one of these ways: 239 | 240 | a) Convey the object code in, or embodied in, a physical product 241 | (including a physical distribution medium), accompanied by the 242 | Corresponding Source fixed on a durable physical medium 243 | customarily used for software interchange. 244 | 245 | b) Convey the object code in, or embodied in, a physical product 246 | (including a physical distribution medium), accompanied by a 247 | written offer, valid for at least three years and valid for as 248 | long as you offer spare parts or customer support for that product 249 | model, to give anyone who possesses the object code either (1) a 250 | copy of the Corresponding Source for all the software in the 251 | product that is covered by this License, on a durable physical 252 | medium customarily used for software interchange, for a price no 253 | more than your reasonable cost of physically performing this 254 | conveying of source, or (2) access to copy the 255 | Corresponding Source from a network server at no charge. 256 | 257 | c) Convey individual copies of the object code with a copy of the 258 | written offer to provide the Corresponding Source. This 259 | alternative is allowed only occasionally and noncommercially, and 260 | only if you received the object code with such an offer, in accord 261 | with subsection 6b. 262 | 263 | d) Convey the object code by offering access from a designated 264 | place (gratis or for a charge), and offer equivalent access to the 265 | Corresponding Source in the same way through the same place at no 266 | further charge. You need not require recipients to copy the 267 | Corresponding Source along with the object code. If the place to 268 | copy the object code is a network server, the Corresponding Source 269 | may be on a different server (operated by you or a third party) 270 | that supports equivalent copying facilities, provided you maintain 271 | clear directions next to the object code saying where to find the 272 | Corresponding Source. Regardless of what server hosts the 273 | Corresponding Source, you remain obligated to ensure that it is 274 | available for as long as needed to satisfy these requirements. 275 | 276 | e) Convey the object code using peer-to-peer transmission, provided 277 | you inform other peers where the object code and Corresponding 278 | Source of the work are being offered to the general public at no 279 | charge under subsection 6d. 280 | 281 | A separable portion of the object code, whose source code is excluded 282 | from the Corresponding Source as a System Library, need not be 283 | included in conveying the object code work. 284 | 285 | A "User Product" is either (1) a "consumer product", which means any 286 | tangible personal property which is normally used for personal, family, 287 | or household purposes, or (2) anything designed or sold for incorporation 288 | into a dwelling. In determining whether a product is a consumer product, 289 | doubtful cases shall be resolved in favor of coverage. For a particular 290 | product received by a particular user, "normally used" refers to a 291 | typical or common use of that class of product, regardless of the status 292 | of the particular user or of the way in which the particular user 293 | actually uses, or expects or is expected to use, the product. A product 294 | is a consumer product regardless of whether the product has substantial 295 | commercial, industrial or non-consumer uses, unless such uses represent 296 | the only significant mode of use of the product. 297 | 298 | "Installation Information" for a User Product means any methods, 299 | procedures, authorization keys, or other information required to install 300 | and execute modified versions of a covered work in that User Product from 301 | a modified version of its Corresponding Source. The information must 302 | suffice to ensure that the continued functioning of the modified object 303 | code is in no case prevented or interfered with solely because 304 | modification has been made. 305 | 306 | If you convey an object code work under this section in, or with, or 307 | specifically for use in, a User Product, and the conveying occurs as 308 | part of a transaction in which the right of possession and use of the 309 | User Product is transferred to the recipient in perpetuity or for a 310 | fixed term (regardless of how the transaction is characterized), the 311 | Corresponding Source conveyed under this section must be accompanied 312 | by the Installation Information. But this requirement does not apply 313 | if neither you nor any third party retains the ability to install 314 | modified object code on the User Product (for example, the work has 315 | been installed in ROM). 316 | 317 | The requirement to provide Installation Information does not include a 318 | requirement to continue to provide support service, warranty, or updates 319 | for a work that has been modified or installed by the recipient, or for 320 | the User Product in which it has been modified or installed. Access to a 321 | network may be denied when the modification itself materially and 322 | adversely affects the operation of the network or violates the rules and 323 | protocols for communication across the network. 324 | 325 | Corresponding Source conveyed, and Installation Information provided, 326 | in accord with this section must be in a format that is publicly 327 | documented (and with an implementation available to the public in 328 | source code form), and must require no special password or key for 329 | unpacking, reading or copying. 330 | 331 | 7. Additional Terms. 332 | 333 | "Additional permissions" are terms that supplement the terms of this 334 | License by making exceptions from one or more of its conditions. 335 | Additional permissions that are applicable to the entire Program shall 336 | be treated as though they were included in this License, to the extent 337 | that they are valid under applicable law. If additional permissions 338 | apply only to part of the Program, that part may be used separately 339 | under those permissions, but the entire Program remains governed by 340 | this License without regard to the additional permissions. 341 | 342 | When you convey a copy of a covered work, you may at your option 343 | remove any additional permissions from that copy, or from any part of 344 | it. (Additional permissions may be written to require their own 345 | removal in certain cases when you modify the work.) You may place 346 | additional permissions on material, added by you to a covered work, 347 | for which you have or can give appropriate copyright permission. 348 | 349 | Notwithstanding any other provision of this License, for material you 350 | add to a covered work, you may (if authorized by the copyright holders of 351 | that material) supplement the terms of this License with terms: 352 | 353 | a) Disclaiming warranty or limiting liability differently from the 354 | terms of sections 15 and 16 of this License; or 355 | 356 | b) Requiring preservation of specified reasonable legal notices or 357 | author attributions in that material or in the Appropriate Legal 358 | Notices displayed by works containing it; or 359 | 360 | c) Prohibiting misrepresentation of the origin of that material, or 361 | requiring that modified versions of such material be marked in 362 | reasonable ways as different from the original version; or 363 | 364 | d) Limiting the use for publicity purposes of names of licensors or 365 | authors of the material; or 366 | 367 | e) Declining to grant rights under trademark law for use of some 368 | trade names, trademarks, or service marks; or 369 | 370 | f) Requiring indemnification of licensors and authors of that 371 | material by anyone who conveys the material (or modified versions of 372 | it) with contractual assumptions of liability to the recipient, for 373 | any liability that these contractual assumptions directly impose on 374 | those licensors and authors. 375 | 376 | All other non-permissive additional terms are considered "further 377 | restrictions" within the meaning of section 10. If the Program as you 378 | received it, or any part of it, contains a notice stating that it is 379 | governed by this License along with a term that is a further 380 | restriction, you may remove that term. If a license document contains 381 | a further restriction but permits relicensing or conveying under this 382 | License, you may add to a covered work material governed by the terms 383 | of that license document, provided that the further restriction does 384 | not survive such relicensing or conveying. 385 | 386 | If you add terms to a covered work in accord with this section, you 387 | must place, in the relevant source files, a statement of the 388 | additional terms that apply to those files, or a notice indicating 389 | where to find the applicable terms. 390 | 391 | Additional terms, permissive or non-permissive, may be stated in the 392 | form of a separately written license, or stated as exceptions; 393 | the above requirements apply either way. 394 | 395 | 8. Termination. 396 | 397 | You may not propagate or modify a covered work except as expressly 398 | provided under this License. Any attempt otherwise to propagate or 399 | modify it is void, and will automatically terminate your rights under 400 | this License (including any patent licenses granted under the third 401 | paragraph of section 11). 402 | 403 | However, if you cease all violation of this License, then your 404 | license from a particular copyright holder is reinstated (a) 405 | provisionally, unless and until the copyright holder explicitly and 406 | finally terminates your license, and (b) permanently, if the copyright 407 | holder fails to notify you of the violation by some reasonable means 408 | prior to 60 days after the cessation. 409 | 410 | Moreover, your license from a particular copyright holder is 411 | reinstated permanently if the copyright holder notifies you of the 412 | violation by some reasonable means, this is the first time you have 413 | received notice of violation of this License (for any work) from that 414 | copyright holder, and you cure the violation prior to 30 days after 415 | your receipt of the notice. 416 | 417 | Termination of your rights under this section does not terminate the 418 | licenses of parties who have received copies or rights from you under 419 | this License. If your rights have been terminated and not permanently 420 | reinstated, you do not qualify to receive new licenses for the same 421 | material under section 10. 422 | 423 | 9. Acceptance Not Required for Having Copies. 424 | 425 | You are not required to accept this License in order to receive or 426 | run a copy of the Program. Ancillary propagation of a covered work 427 | occurring solely as a consequence of using peer-to-peer transmission 428 | to receive a copy likewise does not require acceptance. However, 429 | nothing other than this License grants you permission to propagate or 430 | modify any covered work. These actions infringe copyright if you do 431 | not accept this License. Therefore, by modifying or propagating a 432 | covered work, you indicate your acceptance of this License to do so. 433 | 434 | 10. Automatic Licensing of Downstream Recipients. 435 | 436 | Each time you convey a covered work, the recipient automatically 437 | receives a license from the original licensors, to run, modify and 438 | propagate that work, subject to this License. You are not responsible 439 | for enforcing compliance by third parties with this License. 440 | 441 | An "entity transaction" is a transaction transferring control of an 442 | organization, or substantially all assets of one, or subdividing an 443 | organization, or merging organizations. If propagation of a covered 444 | work results from an entity transaction, each party to that 445 | transaction who receives a copy of the work also receives whatever 446 | licenses to the work the party's predecessor in interest had or could 447 | give under the previous paragraph, plus a right to possession of the 448 | Corresponding Source of the work from the predecessor in interest, if 449 | the predecessor has it or can get it with reasonable efforts. 450 | 451 | You may not impose any further restrictions on the exercise of the 452 | rights granted or affirmed under this License. For example, you may 453 | not impose a license fee, royalty, or other charge for exercise of 454 | rights granted under this License, and you may not initiate litigation 455 | (including a cross-claim or counterclaim in a lawsuit) alleging that 456 | any patent claim is infringed by making, using, selling, offering for 457 | sale, or importing the Program or any portion of it. 458 | 459 | 11. Patents. 460 | 461 | A "contributor" is a copyright holder who authorizes use under this 462 | License of the Program or a work on which the Program is based. The 463 | work thus licensed is called the contributor's "contributor version". 464 | 465 | A contributor's "essential patent claims" are all patent claims 466 | owned or controlled by the contributor, whether already acquired or 467 | hereafter acquired, that would be infringed by some manner, permitted 468 | by this License, of making, using, or selling its contributor version, 469 | but do not include claims that would be infringed only as a 470 | consequence of further modification of the contributor version. For 471 | purposes of this definition, "control" includes the right to grant 472 | patent sublicenses in a manner consistent with the requirements of 473 | this License. 474 | 475 | Each contributor grants you a non-exclusive, worldwide, royalty-free 476 | patent license under the contributor's essential patent claims, to 477 | make, use, sell, offer for sale, import and otherwise run, modify and 478 | propagate the contents of its contributor version. 479 | 480 | In the following three paragraphs, a "patent license" is any express 481 | agreement or commitment, however denominated, not to enforce a patent 482 | (such as an express permission to practice a patent or covenant not to 483 | sue for patent infringement). To "grant" such a patent license to a 484 | party means to make such an agreement or commitment not to enforce a 485 | patent against the party. 486 | 487 | If you convey a covered work, knowingly relying on a patent license, 488 | and the Corresponding Source of the work is not available for anyone 489 | to copy, free of charge and under the terms of this License, through a 490 | publicly available network server or other readily accessible means, 491 | then you must either (1) cause the Corresponding Source to be so 492 | available, or (2) arrange to deprive yourself of the benefit of the 493 | patent license for this particular work, or (3) arrange, in a manner 494 | consistent with the requirements of this License, to extend the patent 495 | license to downstream recipients. "Knowingly relying" means you have 496 | actual knowledge that, but for the patent license, your conveying the 497 | covered work in a country, or your recipient's use of the covered work 498 | in a country, would infringe one or more identifiable patents in that 499 | country that you have reason to believe are valid. 500 | 501 | If, pursuant to or in connection with a single transaction or 502 | arrangement, you convey, or propagate by procuring conveyance of, a 503 | covered work, and grant a patent license to some of the parties 504 | receiving the covered work authorizing them to use, propagate, modify 505 | or convey a specific copy of the covered work, then the patent license 506 | you grant is automatically extended to all recipients of the covered 507 | work and works based on it. 508 | 509 | A patent license is "discriminatory" if it does not include within 510 | the scope of its coverage, prohibits the exercise of, or is 511 | conditioned on the non-exercise of one or more of the rights that are 512 | specifically granted under this License. You may not convey a covered 513 | work if you are a party to an arrangement with a third party that is 514 | in the business of distributing software, under which you make payment 515 | to the third party based on the extent of your activity of conveying 516 | the work, and under which the third party grants, to any of the 517 | parties who would receive the covered work from you, a discriminatory 518 | patent license (a) in connection with copies of the covered work 519 | conveyed by you (or copies made from those copies), or (b) primarily 520 | for and in connection with specific products or compilations that 521 | contain the covered work, unless you entered into that arrangement, 522 | or that patent license was granted, prior to 28 March 2007. 523 | 524 | Nothing in this License shall be construed as excluding or limiting 525 | any implied license or other defenses to infringement that may 526 | otherwise be available to you under applicable patent law. 527 | 528 | 12. No Surrender of Others' Freedom. 529 | 530 | If conditions are imposed on you (whether by court order, agreement or 531 | otherwise) that contradict the conditions of this License, they do not 532 | excuse you from the conditions of this License. If you cannot convey a 533 | covered work so as to satisfy simultaneously your obligations under this 534 | License and any other pertinent obligations, then as a consequence you may 535 | not convey it at all. For example, if you agree to terms that obligate you 536 | to collect a royalty for further conveying from those to whom you convey 537 | the Program, the only way you could satisfy both those terms and this 538 | License would be to refrain entirely from conveying the Program. 539 | 540 | 13. Remote Network Interaction; Use with the GNU General Public License. 541 | 542 | Notwithstanding any other provision of this License, if you modify the 543 | Program, your modified version must prominently offer all users 544 | interacting with it remotely through a computer network (if your version 545 | supports such interaction) an opportunity to receive the Corresponding 546 | Source of your version by providing access to the Corresponding Source 547 | from a network server at no charge, through some standard or customary 548 | means of facilitating copying of software. This Corresponding Source 549 | shall include the Corresponding Source for any work covered by version 3 550 | of the GNU General Public License that is incorporated pursuant to the 551 | following paragraph. 552 | 553 | Notwithstanding any other provision of this License, you have 554 | permission to link or combine any covered work with a work licensed 555 | under version 3 of the GNU General Public License into a single 556 | combined work, and to convey the resulting work. The terms of this 557 | License will continue to apply to the part which is the covered work, 558 | but the work with which it is combined will remain governed by version 559 | 3 of the GNU General Public License. 560 | 561 | 14. Revised Versions of this License. 562 | 563 | The Free Software Foundation may publish revised and/or new versions of 564 | the GNU Affero General Public License from time to time. Such new versions 565 | will be similar in spirit to the present version, but may differ in detail to 566 | address new problems or concerns. 567 | 568 | Each version is given a distinguishing version number. If the 569 | Program specifies that a certain numbered version of the GNU Affero General 570 | Public License "or any later version" applies to it, you have the 571 | option of following the terms and conditions either of that numbered 572 | version or of any later version published by the Free Software 573 | Foundation. If the Program does not specify a version number of the 574 | GNU Affero General Public License, you may choose any version ever published 575 | by the Free Software Foundation. 576 | 577 | If the Program specifies that a proxy can decide which future 578 | versions of the GNU Affero General Public License can be used, that proxy's 579 | public statement of acceptance of a version permanently authorizes you 580 | to choose that version for the Program. 581 | 582 | Later license versions may give you additional or different 583 | permissions. However, no additional obligations are imposed on any 584 | author or copyright holder as a result of your choosing to follow a 585 | later version. 586 | 587 | 15. Disclaimer of Warranty. 588 | 589 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 590 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 591 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 592 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 593 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 594 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 595 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 596 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 597 | 598 | 16. Limitation of Liability. 599 | 600 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 601 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 602 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 603 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 604 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 605 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 606 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 607 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 608 | SUCH DAMAGES. 609 | 610 | 17. Interpretation of Sections 15 and 16. 611 | 612 | If the disclaimer of warranty and limitation of liability provided 613 | above cannot be given local legal effect according to their terms, 614 | reviewing courts shall apply local law that most closely approximates 615 | an absolute waiver of all civil liability in connection with the 616 | Program, unless a warranty or assumption of liability accompanies a 617 | copy of the Program in return for a fee. 618 | 619 | END OF TERMS AND CONDITIONS 620 | 621 | How to Apply These Terms to Your New Programs 622 | 623 | If you develop a new program, and you want it to be of the greatest 624 | possible use to the public, the best way to achieve this is to make it 625 | free software which everyone can redistribute and change under these terms. 626 | 627 | To do so, attach the following notices to the program. It is safest 628 | to attach them to the start of each source file to most effectively 629 | state the exclusion of warranty; and each file should have at least 630 | the "copyright" line and a pointer to where the full notice is found. 631 | 632 | 633 | Copyright (C) 634 | 635 | This program is free software: you can redistribute it and/or modify 636 | it under the terms of the GNU Affero General Public License as published 637 | by the Free Software Foundation, either version 3 of the License, or 638 | (at your option) any later version. 639 | 640 | This program is distributed in the hope that it will be useful, 641 | but WITHOUT ANY WARRANTY; without even the implied warranty of 642 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 643 | GNU Affero General Public License for more details. 644 | 645 | You should have received a copy of the GNU Affero General Public License 646 | along with this program. If not, see . 647 | 648 | Also add information on how to contact you by electronic and paper mail. 649 | 650 | If your software can interact with users remotely through a computer 651 | network, you should also make sure that it provides a way for users to 652 | get its source. For example, if your program is a web application, its 653 | interface could display a "Source" link that leads users to an archive 654 | of the code. There are many ways you could offer source, and different 655 | solutions will be better for different programs; see section 13 for the 656 | specific requirements. 657 | 658 | You should also get your employer (if you work as a programmer) or school, 659 | if any, to sign a "copyright disclaimer" for the program, if necessary. 660 | For more information on this, and how to apply and follow the GNU AGPL, see 661 | . 662 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 |

3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 |

12 | 13 |

Yjs - S2 Cloudflare Worker

14 | 15 |

16 | 17 | Try Demo 18 | 19 |

20 | 21 |

22 | 23 | 24 | 25 | 26 |

27 |
28 | 29 | ## Overview 30 | 31 | Y-S2 is a Cloudflare Worker that provides real-time collaborative document editing using Yjs with S2.dev as the distribution channel and an R2 bucket as the storage provider. It provides scalable WebSocket-based document synchronization where document updates are made durable on an S2 stream and distributed to connected clients and reactively persisted to the R2 bucket. 32 | 33 | ## Getting Started 34 | 35 | ### Prerequisites 36 | 37 | - Cloudflare account with Workers enabled. 38 | - S2.dev account, a basin with `Create stream on append/read` enabled, and a scoped access token to the basin you want to use. 39 | - R2 bucket for snapshot storage. 40 | 41 | ### Environment Variables 42 | 43 | ```bash 44 | S2_ACCESS_TOKEN=your_s2_access_token 45 | S2_BASIN=your_s2_basin_name 46 | R2_BUCKET=your_r2_bucket_name 47 | LOG_MODE=CONSOLE|S2_SINGLE|S2_SHARED # Optional 48 | SNAPSHOT_BACKLOG_SIZE=100 # Optional 49 | ``` 50 | 51 | ### Deployment 52 | 53 | ```bash 54 | # Install dependencies 55 | npm install 56 | 57 | # Deploy to Cloudflare Workers 58 | npm run deploy 59 | 60 | # Or run locally for development 61 | npm run dev 62 | ``` 63 | 64 | ### Client Integration 65 | 66 | Connect to the deployed worker using the Yjs WebSocket provider: 67 | 68 | ```javascript 69 | import * as Y from 'yjs' 70 | import { WebsocketProvider } from 'y-websocket' 71 | 72 | const doc = new Y.Doc() 73 | const provider = new WebsocketProvider('wss://your-worker.your-subdomain.workers.dev', 'room-name', doc, { 74 | params: { authToken: 'your-auth-token' } 75 | }) 76 | 77 | ``` 78 | 79 | ### Credits 80 | 81 | Portions of this project are derived from [y-redis](https://github.com/yjs/y-redis), licensed under the GNU Affero General Public License v3.0. 82 | -------------------------------------------------------------------------------- /assets/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/s2-streamstore/y-s2/9241675e16b01cc4f49ec1da92bd79562b4faa9b/assets/demo.gif -------------------------------------------------------------------------------- /assets/s2-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/s2-streamstore/y-s2/9241675e16b01cc4f49ec1da92bd79562b4faa9b/assets/s2-black.png -------------------------------------------------------------------------------- /assets/s2-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/s2-streamstore/y-s2/9241675e16b01cc4f49ec1da92bd79562b4faa9b/assets/s2-white.png -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "y-s2", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "deploy": "wrangler deploy", 7 | "dev": "wrangler dev", 8 | "start": "wrangler dev", 9 | "test": "vitest", 10 | "cf-typegen": "wrangler types", 11 | "format": "prettier --write \"src/**/*.{ts,js,json}\"", 12 | "format:check": "prettier --check \"src/**/*.{ts,js,json}\"", 13 | "lint": "eslint src --ext .ts,.js", 14 | "lint:fix": "eslint src --ext .ts,.js --fix" 15 | }, 16 | "devDependencies": { 17 | "@cloudflare/vitest-pool-workers": "^0.8.19", 18 | "@types/node": "^24.3.0", 19 | "@typescript-eslint/eslint-plugin": "^6.0.0", 20 | "@typescript-eslint/parser": "^6.0.0", 21 | "eslint": "^8.0.0", 22 | "install": "^0.13.0", 23 | "npm": "^11.5.2", 24 | "prettier": "^3.0.0", 25 | "typescript": "^5.5.2", 26 | "vitest": "~3.2.0", 27 | "wrangler": "^4.33.1" 28 | }, 29 | "dependencies": { 30 | "@s2-dev/streamstore": "^0.15.9", 31 | "js-base64": "^3.7.8", 32 | "lib0": "^0.2.114", 33 | "y-protocols": "^1.0.5", 34 | "yjs": "^13.6.27" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import { S2 } from '@s2-dev/streamstore'; 2 | import type { EventStream } from '@s2-dev/streamstore/lib/event-streams.js'; 3 | import { S2Format, type ReadEvent } from '@s2-dev/streamstore/models/components'; 4 | import { TailResponse } from '@s2-dev/streamstore/models/errors'; 5 | import * as Y from 'yjs'; 6 | import * as decoding from 'lib0/decoding'; 7 | import * as awarenessProtocol from 'y-protocols/awareness'; 8 | import * as array from 'lib0/array'; 9 | import { toUint8Array } from 'js-base64'; 10 | import { createLogger, S2Logger } from './logger.js'; 11 | import { 12 | encodeAwarenessUpdate, 13 | encodeAwarenessUserDisconnected, 14 | encodeSyncStep1, 15 | encodeSyncStep2, 16 | messageAwareness, 17 | messageSync, 18 | messageSyncStep1, 19 | messageSyncStep2, 20 | messageSyncUpdate, 21 | } from './protocol.js'; 22 | import { retrieveSnapshot, uploadSnapshot, getSnapshotETag } from './snapshot.js'; 23 | import { 24 | decodeBigEndian64AsNumber, 25 | generateDeadlineFencingToken, 26 | isFenceCommand, 27 | isTrimCommand, 28 | MessageBatcher, 29 | parseConfig, 30 | parseFencingToken, 31 | Room, 32 | } from './utils.js'; 33 | import { createSnapshotState, createUserState, SnapshotState } from './types.js'; 34 | 35 | export interface Env { 36 | // S2 access token 37 | S2_ACCESS_TOKEN: string; 38 | // S2 basin name 39 | S2_BASIN: string; 40 | // R2 bucket for snapshots 41 | R2_BUCKET: R2Bucket; 42 | // Logging mode: CONSOLE | S2_SINGLE | S2_SHARED 43 | // CONSOLE: logs to console only 44 | // S2_SINGLE: logs to a single S2 stream with a unique worker ID 45 | // S2_SHARED: logs to a shared S2 stream with a worker ID 46 | LOG_MODE?: string; 47 | // Size of the record backlog to trigger a snapshot 48 | SNAPSHOT_BACKLOG_SIZE?: string; 49 | // Maximum age of a collected snapshot buffer before it is persisted to R2 50 | BACKLOG_BUFFER_AGE?: number; 51 | // Maximum batch size to reach before flushing to S2 52 | S2_BATCH_SIZE?: string; 53 | // Maximum time to wait before flushing a batch to S2 54 | S2_LINGER_TIME?: string; 55 | // Fencing token lease duration in seconds 56 | // Represented as: `{id} {leaseDeadline}` 57 | LEASE_DURATION?: number; 58 | } 59 | 60 | export default { 61 | async fetch(request: Request, env: Env): Promise { 62 | const url = new URL(request.url); 63 | 64 | if (request.method === 'OPTIONS') { 65 | return new Response(null, { 66 | headers: { 67 | 'Access-Control-Allow-Origin': '*', 68 | 'Access-Control-Allow-Methods': 'GET, POST, OPTIONS', 69 | 'Access-Control-Allow-Headers': 'Content-Type, Authorization', 70 | }, 71 | }); 72 | } 73 | 74 | if (url.pathname === '/auth/token') { 75 | return handleAuthToken(env, request); 76 | } 77 | 78 | if (url.pathname.startsWith('/ws/')) { 79 | return handleWebSocket(request, env); 80 | } 81 | 82 | if (url.pathname.match(/^\/auth\/perm\/(.+)\/(.+)$/)) { 83 | return handlePermissionCheck(request); 84 | } 85 | 86 | return new Response('Y-S2', { status: 200 }); 87 | }, 88 | }; 89 | 90 | async function handleWebSocket(request: Request, env: Env): Promise { 91 | const logger = createLogger(env, request); 92 | 93 | if (request.headers.get('Upgrade') !== 'websocket') { 94 | return new Response('Expected Upgrade: websocket', { status: 426 }); 95 | } 96 | 97 | const url = new URL(request.url); 98 | const roomName = url.searchParams.get('room'); 99 | const authToken = url.searchParams.get('yauth'); 100 | 101 | if (!roomName || !authToken) { 102 | logger.error('Missing required parameters', { room: !!roomName, authToken: !!authToken }, 'WebSocketValidation'); 103 | return new Response('Missing room or auth token', { status: 400 }); 104 | } 105 | 106 | try { 107 | if (!authToken || authToken.length === 0) { 108 | logger.error('Invalid auth token', { tokenLength: authToken?.length || 0 }, 'WebSocketAuth'); 109 | return new Response('Invalid auth token', { status: 401 }); 110 | } 111 | 112 | const pair = new WebSocketPair(); 113 | const client = pair[0]; 114 | const server = pair[1]; 115 | server.accept(); 116 | logger.info('WebSocket connection established', { roomName }, 'WebSocketConnection'); 117 | 118 | const s2Client = new S2({ accessToken: env.S2_ACCESS_TOKEN }); 119 | const streamName = `rooms/${encodeURIComponent(roomName)}/index`; 120 | 121 | const { maxBacklog, batchSize, lingerTime, leaseDuration, backlogBufferAge } = parseConfig(env); 122 | 123 | const room = new Room(s2Client, streamName, env.S2_BASIN); 124 | 125 | const ydoc = new Y.Doc(); 126 | const awareness = new awarenessProtocol.Awareness(ydoc); 127 | awareness.setLocalState(null); 128 | 129 | const userState = createUserState(roomName); 130 | 131 | const initializeFromSnapshot = async () => { 132 | const checkpoint = await retrieveSnapshot(env, roomName, logger); 133 | 134 | ydoc.destroy(); 135 | const newYdoc = new Y.Doc(); 136 | Object.setPrototypeOf(ydoc, Object.getPrototypeOf(newYdoc)); 137 | Object.assign(ydoc, newYdoc); 138 | 139 | awareness.destroy(); 140 | const newAwareness = new awarenessProtocol.Awareness(ydoc); 141 | Object.setPrototypeOf(awareness, Object.getPrototypeOf(newAwareness)); 142 | Object.assign(awareness, newAwareness); 143 | awareness.setLocalState(null); 144 | 145 | if (checkpoint?.snapshot) { 146 | logger.info( 147 | 'Snapshot retrieved, applying to document', 148 | { 149 | room, 150 | lastSeqNum: checkpoint.lastSeqNum, 151 | snapshotSize: checkpoint.snapshot.length, 152 | }, 153 | 'SnapshotRestore', 154 | ); 155 | Y.applyUpdateV2(ydoc, checkpoint.snapshot); 156 | } else { 157 | logger.debug('No snapshot found, starting with empty document', { room }, 'SnapshotRestore'); 158 | } 159 | 160 | const newSnapshotState = createSnapshotState(); 161 | 162 | const lastSeqNum = checkpoint?.lastSeqNum ?? 0; 163 | newSnapshotState.lastProcessedTrimSeqNum = lastSeqNum; 164 | const catchupSeqNum = checkpoint ? lastSeqNum + 1 : 0; 165 | 166 | return { catchupSeqNum, snapshotState: newSnapshotState }; 167 | }; 168 | 169 | let { catchupSeqNum, snapshotState } = await initializeFromSnapshot(); 170 | 171 | const tailResponse = await s2Client.records.checkTail({ 172 | stream: streamName, 173 | s2Basin: env.S2_BASIN, 174 | }); 175 | 176 | const tailSeqNum = tailResponse.tail.seqNum; 177 | 178 | logger.info( 179 | 'Starting catchup from S2', 180 | { 181 | room, 182 | fromSeqNum: catchupSeqNum, 183 | tailSeqNum, 184 | }, 185 | 'S2Catchup', 186 | ); 187 | 188 | const messageBatcher = new MessageBatcher(s2Client, streamName, env.S2_BASIN, logger, roomName, batchSize, lingerTime); 189 | 190 | const sendSyncMessages = (): void => { 191 | server.send(encodeSyncStep1(Y.encodeStateVector(ydoc))); 192 | server.send(encodeSyncStep2(Y.encodeStateAsUpdate(ydoc))); 193 | 194 | if (awareness.states.size > 0) { 195 | server.send(encodeAwarenessUpdate(awareness, array.from(awareness.states.keys()))); 196 | } 197 | 198 | ydoc.destroy(); 199 | awareness.destroy(); 200 | }; 201 | 202 | const readStreamWithRetry = async (): Promise => { 203 | let currentCatchupSeqNum = catchupSeqNum; 204 | let currentSnapshotState = snapshotState; 205 | let attempts = 0; 206 | const maxAttempts = 3; 207 | 208 | while (attempts < maxAttempts) { 209 | attempts++; 210 | try { 211 | logger.info('Starting S2 event stream', { room, catchupSeqNum: currentCatchupSeqNum, attempt: attempts }, 'S2EventStream'); 212 | const events = await s2Client.records.read( 213 | { 214 | stream: streamName, 215 | s2Basin: env.S2_BASIN, 216 | seqNum: currentCatchupSeqNum, 217 | s2Format: S2Format.Base64, 218 | clamp: true, 219 | }, 220 | { acceptHeaderOverride: 'text/event-stream' as any }, 221 | ); 222 | 223 | return await processEventStream(events as EventStream, currentCatchupSeqNum, currentSnapshotState); 224 | } catch (err) { 225 | if (err instanceof TailResponse) { 226 | logger.warn( 227 | 'TailResponse received - sequence number out of range', 228 | { 229 | room, 230 | requestedSeqNum: currentCatchupSeqNum, 231 | actualTailSeqNum: err.tail.seqNum, 232 | error: err.message, 233 | attempt: attempts, 234 | }, 235 | 'TailResponseReceived', 236 | ); 237 | 238 | if (currentCatchupSeqNum < err.tail.seqNum) { 239 | logger.info( 240 | 'Stale state from R2 - restarting from fresh snapshot', 241 | { 242 | room, 243 | requestedSeqNum: currentCatchupSeqNum, 244 | tailSeqNum: err.tail.seqNum, 245 | attempt: attempts, 246 | }, 247 | 'RestartFromSnapshot', 248 | ); 249 | 250 | const newState = await initializeFromSnapshot(); 251 | currentCatchupSeqNum = newState.catchupSeqNum; 252 | currentSnapshotState = newState.snapshotState; 253 | catchupSeqNum = currentCatchupSeqNum; 254 | snapshotState = currentSnapshotState; 255 | 256 | continue; 257 | } else { 258 | logger.warn('TailResponse for sequence number higher than tail - not restarting', { 259 | room, 260 | requestedSeqNum: currentCatchupSeqNum, 261 | tailSeqNum: err.tail.seqNum, 262 | attempt: attempts, 263 | }); 264 | throw err; 265 | } 266 | } else { 267 | logger.error( 268 | 'S2 stream read error', 269 | { 270 | error: err instanceof Error ? err.message : String(err), 271 | room, 272 | attempt: attempts, 273 | }, 274 | 'S2StreamError', 275 | ); 276 | throw err; 277 | } 278 | } 279 | } 280 | 281 | logger.error('Max retry attempts reached for stream reading', { room, maxAttempts }, 'MaxRetriesReached'); 282 | }; 283 | 284 | const processEventStream = async ( 285 | events: EventStream, 286 | currentCatchupSeqNum: number, 287 | currentSnapshotState: SnapshotState, 288 | ): Promise => { 289 | let isCatchingUp = currentCatchupSeqNum < tailSeqNum; 290 | 291 | if (!isCatchingUp) { 292 | logger.info( 293 | 'Already caught up, sending existing snapshot', 294 | { 295 | room, 296 | catchupSeqNum: currentCatchupSeqNum, 297 | tailSeqNum, 298 | recordCount: currentSnapshotState.recordBuffer.length, 299 | }, 300 | 'SnapshotSend', 301 | ); 302 | 303 | sendSyncMessages(); 304 | } 305 | 306 | for await (const event of events) { 307 | if (event.event === 'batch' && event.data?.records) { 308 | for (const r of event.data.records) { 309 | currentSnapshotState.trimSeqNum = r.seqNum; 310 | 311 | if (isFenceCommand(r)) { 312 | if (r.seqNum > currentSnapshotState.lastProcessedFenceSeqNum) { 313 | currentSnapshotState.currentFencingToken = atob(r.body ?? ''); 314 | currentSnapshotState.lastProcessedFenceSeqNum = r.seqNum; 315 | if (!r.body) { 316 | currentSnapshotState.blocked = false; 317 | } 318 | logger.debug( 319 | 'Received fencing token', 320 | { 321 | room, 322 | fencingToken: currentSnapshotState.currentFencingToken, 323 | }, 324 | 'FencingToken', 325 | ); 326 | } 327 | continue; 328 | } 329 | 330 | if (!r.body) { 331 | continue; 332 | } 333 | 334 | if (isTrimCommand(r)) { 335 | const trimSeqNum = decodeBigEndian64AsNumber(r.body); 336 | if (trimSeqNum > currentSnapshotState.lastProcessedTrimSeqNum) { 337 | currentSnapshotState.firstRecordAge = null; 338 | currentSnapshotState.lastProcessedTrimSeqNum = trimSeqNum; 339 | currentSnapshotState.recordBuffer = currentSnapshotState.recordBuffer.filter((r) => r.seqNum > trimSeqNum); 340 | logger.debug( 341 | 'Received trim command', 342 | { 343 | room, 344 | seqNum: trimSeqNum, 345 | }, 346 | 'TrimCommand', 347 | ); 348 | } 349 | continue; 350 | } 351 | 352 | currentSnapshotState.recordBuffer.push(r); 353 | 354 | if (currentSnapshotState.firstRecordAge === null) { 355 | currentSnapshotState.firstRecordAge = r.timestamp; 356 | } 357 | 358 | if (isCatchingUp) { 359 | if (r.seqNum + 1 >= tailSeqNum) { 360 | isCatchingUp = false; 361 | logger.info( 362 | 'Catchup completed, processing records', 363 | { 364 | room, 365 | recordCount: currentSnapshotState.recordBuffer.length, 366 | finalSeqNum: r.seqNum, 367 | }, 368 | 'S2Catchup', 369 | ); 370 | 371 | let docChanged = false; 372 | ydoc.once('afterTransaction', (tr) => { 373 | docChanged = tr.changed.size > 0; 374 | }); 375 | 376 | ydoc.transact(() => { 377 | for (const record of currentSnapshotState.recordBuffer) { 378 | try { 379 | const recordBytes = toUint8Array(record.body!); 380 | 381 | const decoder = decoding.createDecoder(recordBytes); 382 | const messageType = decoding.readUint8(decoder); 383 | 384 | if (messageType === messageSync) { 385 | const syncType = decoding.readUint8(decoder); 386 | if (syncType === messageSyncUpdate || syncType === messageSyncStep2) { 387 | const update = decoding.readVarUint8Array(decoder); 388 | Y.applyUpdate(ydoc, update); 389 | } 390 | } else if (messageType === messageAwareness) { 391 | const awarenessUpdate = decoding.readVarUint8Array(decoder); 392 | awarenessProtocol.applyAwarenessUpdate(awareness, awarenessUpdate, null); 393 | } 394 | } catch (err) { 395 | logger.error( 396 | 'Failed to apply catchup record', 397 | { 398 | room, 399 | error: err instanceof Error ? err.message : String(err), 400 | }, 401 | 'CatchupError', 402 | ); 403 | } 404 | } 405 | }); 406 | 407 | logger.debug( 408 | 'Catchup transaction completed', 409 | { 410 | room, 411 | docChanged, 412 | recordCount: currentSnapshotState.recordBuffer.length, 413 | }, 414 | 'CatchupComplete', 415 | ); 416 | 417 | sendSyncMessages(); 418 | } 419 | continue; 420 | } 421 | 422 | const recordBytes = toUint8Array(r.body); 423 | server.send(recordBytes); 424 | 425 | const leaseExpired = (() => { 426 | if (!currentSnapshotState.currentFencingToken) return true; 427 | try { 428 | const { deadline } = parseFencingToken(currentSnapshotState.currentFencingToken); 429 | return Date.now() > deadline * 1000; 430 | } catch { 431 | logger.error( 432 | 'Invalid fencing token format', 433 | { room, token: currentSnapshotState.currentFencingToken }, 434 | 'FencingTokenError', 435 | ); 436 | return false; 437 | } 438 | })(); 439 | 440 | const firstRecordExpired = 441 | currentSnapshotState.firstRecordAge !== null && Date.now() - currentSnapshotState.firstRecordAge > backlogBufferAge; 442 | 443 | const backlogSize = 444 | currentSnapshotState.trimSeqNum !== null 445 | ? currentSnapshotState.trimSeqNum + 1 - currentSnapshotState.lastProcessedTrimSeqNum 446 | : 0; 447 | 448 | const shouldSnapshot = 449 | (backlogSize >= maxBacklog && leaseExpired) || 450 | (firstRecordExpired && leaseExpired) || 451 | (currentSnapshotState.currentFencingToken && leaseExpired && backlogSize > 0); 452 | 453 | if (!shouldSnapshot || currentSnapshotState.blocked) { 454 | continue; 455 | } 456 | 457 | currentSnapshotState.blocked = true; 458 | 459 | takeSnapshot( 460 | env, 461 | leaseDuration, 462 | roomName, 463 | { ...currentSnapshotState, recordBuffer: [...currentSnapshotState.recordBuffer] }, 464 | room, 465 | logger, 466 | ); 467 | } 468 | } 469 | } 470 | }; 471 | 472 | (async () => { 473 | try { 474 | await readStreamWithRetry(); 475 | } catch (err) { 476 | logger.error( 477 | 'Failed to read stream after all retries', 478 | { error: err instanceof Error ? err.message : String(err), room }, 479 | 'StreamReadFailure', 480 | ); 481 | } 482 | })(); 483 | 484 | server.addEventListener('message', async (event: MessageEvent) => { 485 | try { 486 | const buffer = event.data instanceof ArrayBuffer ? new Uint8Array(event.data) : new TextEncoder().encode(event.data); 487 | 488 | const messageType = buffer[0]; 489 | 490 | if (messageType === messageSync) { 491 | const syncType = buffer[1]; 492 | if (syncType === messageSyncStep1) { 493 | return; 494 | } else if (syncType === messageSyncStep2) { 495 | if (buffer.length >= 4) { 496 | buffer[1] = messageSyncUpdate; 497 | } else { 498 | return; 499 | } 500 | } 501 | } 502 | 503 | const shouldPropagate = (messageType === messageSync && buffer[1] === messageSyncUpdate) || messageType === messageAwareness; 504 | 505 | if (!shouldPropagate) { 506 | logger.warn('Unexpected message type', { messageType, syncType: buffer[1] }, 'UnexpectedMessage'); 507 | return; 508 | } 509 | 510 | if (messageType === messageAwareness) { 511 | try { 512 | const decoder = decoding.createDecoder(buffer); 513 | decoding.readVarUint(decoder); 514 | decoding.readVarUint(decoder); 515 | const alen = decoding.readVarUint(decoder); 516 | const awId = decoding.readVarUint(decoder); 517 | 518 | if (alen === 1 && (userState.awarenessId === null || userState.awarenessId === awId)) { 519 | userState.awarenessId = awId; 520 | userState.awarenessLastClock = decoding.readVarUint(decoder); 521 | } 522 | } catch (err) { 523 | logger.error( 524 | 'Failed to decode awareness message', 525 | { 526 | room, 527 | error: err instanceof Error ? err.message : String(err), 528 | }, 529 | 'AwarenessError', 530 | ); 531 | return; 532 | } 533 | } 534 | messageBatcher.addMessage(buffer); 535 | } catch (err) { 536 | logger.error( 537 | 'Message processing error', 538 | { 539 | room, 540 | error: err instanceof Error ? err.message : String(err), 541 | }, 542 | 'MessageError', 543 | ); 544 | } 545 | }); 546 | 547 | server.addEventListener('close', async () => { 548 | logger.info( 549 | 'WebSocket connection closed', 550 | { 551 | room, 552 | userId: userState.awarenessId, 553 | }, 554 | 'WebSocketClose', 555 | ); 556 | 557 | if (userState.awarenessId !== null) { 558 | try { 559 | const disconnectMessage = encodeAwarenessUserDisconnected(userState.awarenessId, userState.awarenessLastClock); 560 | messageBatcher.addMessage(disconnectMessage); 561 | logger.info( 562 | 'User disconnect message queued', 563 | { 564 | room, 565 | userId: userState.awarenessId, 566 | clock: userState.awarenessLastClock, 567 | }, 568 | 'UserDisconnect', 569 | ); 570 | } catch (err) { 571 | logger.error( 572 | 'Failed to create disconnect message', 573 | { 574 | room, 575 | userId: userState.awarenessId, 576 | error: err instanceof Error ? err.message : String(err), 577 | }, 578 | 'DisconnectError', 579 | ); 580 | } 581 | } 582 | await messageBatcher.flush(); 583 | }); 584 | 585 | return new Response(null, { 586 | status: 101, 587 | webSocket: client, 588 | }); 589 | } catch (err) { 590 | logger.error( 591 | 'WebSocket handler failed', 592 | { 593 | error: err instanceof Error ? err.message : String(err), 594 | room: roomName, 595 | }, 596 | 'WebSocketHandlerError', 597 | ); 598 | return new Response('Authentication failed', { status: 401 }); 599 | } 600 | } 601 | 602 | async function handleAuthToken(env: Env, request: Request): Promise { 603 | const logger = createLogger(env, request); 604 | try { 605 | const demoToken = 'demo-jwt-token-12345'; 606 | return new Response(demoToken, { 607 | headers: { 608 | 'Access-Control-Allow-Origin': '*', 609 | 'Access-Control-Allow-Methods': 'GET, POST, OPTIONS', 610 | 'Access-Control-Allow-Headers': 'Content-Type, Authorization', 611 | }, 612 | }); 613 | } catch (error) { 614 | logger.error( 615 | 'Auth token generation failed', 616 | { 617 | error: error instanceof Error ? error.message : String(error), 618 | }, 619 | 'AuthTokenError', 620 | ); 621 | return new Response('Auth error', { status: 500 }); 622 | } 623 | } 624 | 625 | async function handlePermissionCheck(request: Request): Promise { 626 | const url = new URL(request.url); 627 | 628 | const match = url.pathname.match(/^\/auth\/perm\/(.+)\/(.+)$/); 629 | if (!match || !match[1] || !match[2]) { 630 | return new Response('Invalid path', { status: 400 }); 631 | } 632 | 633 | const [, room, userid] = match; 634 | const permissionResponse = { 635 | yroom: decodeURIComponent(room), 636 | yaccess: 'rw', 637 | yuserid: userid, 638 | }; 639 | return new Response(JSON.stringify(permissionResponse), { 640 | headers: { 641 | 'Content-Type': 'application/json', 642 | 'Access-Control-Allow-Origin': '*', 643 | 'Access-Control-Allow-Methods': 'GET, POST, OPTIONS', 644 | 'Access-Control-Allow-Headers': 'Content-Type, Authorization', 645 | }, 646 | }); 647 | } 648 | 649 | async function takeSnapshot( 650 | env: Env, 651 | leaseDuration: number, 652 | roomName: string, 653 | snapshotStateCopy: SnapshotState, 654 | room: Room, 655 | logger: S2Logger, 656 | ): Promise { 657 | const newFencingToken = generateDeadlineFencingToken(leaseDuration); 658 | const currentETag = await getSnapshotETag(env, roomName, logger); 659 | 660 | try { 661 | await room.acquireLease(newFencingToken, snapshotStateCopy.currentFencingToken); 662 | } catch (err) { 663 | logger.error('Lease acquisition failed, skipping snapshot', { roomName, error: err }, 'LeaseAcquisitionError'); 664 | return; 665 | } 666 | 667 | try { 668 | const snapshot = await retrieveSnapshot(env, roomName, logger); 669 | const startSeqNum = (snapshot?.lastSeqNum ?? -1) + 1; 670 | 671 | const ydoc = new Y.Doc(); 672 | if (snapshot?.snapshot) { 673 | Y.applyUpdateV2(ydoc, snapshot.snapshot); 674 | } 675 | 676 | const { recordBuffer } = snapshotStateCopy; 677 | if (recordBuffer.length > 0 && recordBuffer[0].seqNum < startSeqNum) { 678 | logger.warn('Record buffer is stale, aborting snapshot', { 679 | roomName, 680 | firstRecordSeqNum: recordBuffer[0].seqNum, 681 | startSeqNum, 682 | }); 683 | ydoc.destroy(); 684 | return; 685 | } 686 | 687 | ydoc.transact(() => { 688 | for (const record of recordBuffer) { 689 | if (!record.body) continue; 690 | 691 | try { 692 | const bytes = toUint8Array(record.body); 693 | const decoder = decoding.createDecoder(bytes); 694 | const messageType = decoding.readUint8(decoder); 695 | 696 | if (messageType === messageSync) { 697 | const syncType = decoding.readUint8(decoder); 698 | if (syncType === messageSyncUpdate || syncType === messageSyncStep2) { 699 | const update = decoding.readVarUint8Array(decoder); 700 | Y.applyUpdate(ydoc, update); 701 | } 702 | } 703 | } catch (err) { 704 | logger.error('Failed to apply record during snapshot', { 705 | roomName, 706 | recordSeqNum: record.seqNum, 707 | error: err, 708 | }); 709 | } 710 | } 711 | }); 712 | 713 | const newSnapshot = Y.encodeStateAsUpdateV2(ydoc); 714 | await uploadSnapshot(env, roomName, newSnapshot, snapshotStateCopy.trimSeqNum!, currentETag, logger); 715 | ydoc.destroy(); 716 | 717 | await room.releaseLease(snapshotStateCopy.trimSeqNum!, newFencingToken); 718 | 719 | logger.info('Snapshot completed successfully', { 720 | roomName, 721 | recordsProcessed: recordBuffer.length, 722 | finalSeqNum: snapshotStateCopy.trimSeqNum, 723 | }); 724 | } catch (err) { 725 | logger.error('Snapshot failed after acquiring lease', { roomName, error: err }); 726 | } finally { 727 | try { 728 | await room.forceReleaseLease(newFencingToken); 729 | } catch (err) { 730 | logger.error('Failed to release lease', { roomName, error: err }); 731 | } 732 | } 733 | } 734 | -------------------------------------------------------------------------------- /src/logger.ts: -------------------------------------------------------------------------------- 1 | import { S2 } from '@s2-dev/streamstore'; 2 | 3 | export interface LogLevel { 4 | DEBUG: 'debug'; 5 | INFO: 'info'; 6 | WARN: 'warn'; 7 | ERROR: 'error'; 8 | } 9 | 10 | export const LogLevel: LogLevel = { 11 | DEBUG: 'debug', 12 | INFO: 'info', 13 | WARN: 'warn', 14 | ERROR: 'error', 15 | } as const; 16 | 17 | export type LogLevelType = LogLevel[keyof LogLevel]; 18 | 19 | export interface LogEntry { 20 | timestamp: number; 21 | workerId: string; 22 | level: LogLevelType; 23 | message: string; 24 | data?: any; 25 | source?: string; 26 | cfRay?: string; 27 | cfRequestId?: string; 28 | } 29 | 30 | enum LogMode { 31 | CONSOLE = 'CONSOLE', 32 | S2_SINGLE = 'S2_SINGLE', 33 | S2_SHARED = 'S2_SHARED', 34 | } 35 | 36 | export interface LoggerConfig { 37 | s2Client?: S2; 38 | s2Basin?: string; 39 | streamName?: string; 40 | workerId?: string; 41 | minLevel?: LogLevelType; 42 | request?: Request; 43 | logMode?: LogMode; 44 | } 45 | 46 | export class S2Logger { 47 | private s2Client?: S2; 48 | private s2Basin?: string; 49 | private streamName?: string; 50 | private workerId: string; 51 | private minLevel: LogLevelType; 52 | private cfRay?: string; 53 | private cfRequestId?: string; 54 | private logMode: LogMode; 55 | private readonly levelPriority = { 56 | debug: 0, 57 | info: 1, 58 | warn: 2, 59 | error: 3, 60 | }; 61 | 62 | constructor(config: LoggerConfig) { 63 | this.logMode = config.logMode ?? LogMode.S2_SHARED; 64 | this.workerId = config.workerId ?? this.generateWorkerId(); 65 | this.minLevel = config.minLevel ?? LogLevel.DEBUG; 66 | 67 | if (config.request) { 68 | this.cfRay = config.request.headers.get('cf-ray') ?? undefined; 69 | this.cfRequestId = config.request.headers.get('cf-request-id') ?? undefined; 70 | } 71 | 72 | if (this.logMode !== LogMode.CONSOLE) { 73 | this.s2Client = config.s2Client; 74 | this.s2Basin = config.s2Basin; 75 | 76 | if (this.logMode === LogMode.S2_SINGLE) { 77 | this.streamName = config.streamName ?? (this.cfRay ? `logs/worker-${this.cfRay}` : `logs/worker-${this.workerId}`); 78 | } else { 79 | this.streamName = config.streamName ?? 'logs/workers-shared'; 80 | } 81 | } 82 | } 83 | 84 | private generateWorkerId(): string { 85 | const timestamp = Date.now(); 86 | const random = Math.random().toString(36).substring(2, 8); 87 | return `worker-${timestamp}-${random}`; 88 | } 89 | 90 | private shouldLog(level: LogLevelType): boolean { 91 | return this.levelPriority[level] >= this.levelPriority[this.minLevel]; 92 | } 93 | 94 | private createLogEntry(level: LogLevelType, message: string, data?: any, source?: string): LogEntry { 95 | return { 96 | timestamp: Date.now(), 97 | workerId: this.workerId, 98 | level, 99 | message, 100 | data, 101 | source, 102 | cfRay: this.cfRay, 103 | cfRequestId: this.cfRequestId, 104 | }; 105 | } 106 | 107 | private async logEntry(entry: LogEntry): Promise { 108 | if (this.logMode === LogMode.CONSOLE) { 109 | const timestamp = new Date(entry.timestamp).toISOString(); 110 | const prefix = `[${timestamp}] ${entry.level.toUpperCase()} [${entry.workerId}]`; 111 | const message = `${prefix} ${entry.message}`; 112 | const data = entry.data ? ` | Data: ${JSON.stringify(entry.data)}` : ''; 113 | const source = entry.source ? ` | Source: ${entry.source}` : ''; 114 | const cf = entry.cfRay ? ` | CF-Ray: ${entry.cfRay}` : ''; 115 | 116 | console.log(message + data + source + cf); 117 | return; 118 | } 119 | 120 | if (!this.s2Client || !this.s2Basin || !this.streamName) { 121 | console.error('S2 client not configured for log mode', this.logMode); 122 | return; 123 | } 124 | 125 | try { 126 | await this.s2Client.records.append({ 127 | stream: this.streamName, 128 | s2Basin: this.s2Basin, 129 | appendInput: { 130 | records: [ 131 | { 132 | body: JSON.stringify(entry), 133 | }, 134 | ], 135 | }, 136 | }); 137 | } catch (error) { 138 | console.error('Failed to log to S2:', error); 139 | } 140 | } 141 | 142 | debug(message: string, data?: any, source?: string): void { 143 | if (!this.shouldLog(LogLevel.DEBUG)) return; 144 | const entry = this.createLogEntry(LogLevel.DEBUG, message, data, source); 145 | this.logEntry(entry); 146 | } 147 | 148 | info(message: string, data?: any, source?: string): void { 149 | if (!this.shouldLog(LogLevel.INFO)) return; 150 | const entry = this.createLogEntry(LogLevel.INFO, message, data, source); 151 | this.logEntry(entry); 152 | } 153 | 154 | warn(message: string, data?: any, source?: string): void { 155 | if (!this.shouldLog(LogLevel.WARN)) return; 156 | const entry = this.createLogEntry(LogLevel.WARN, message, data, source); 157 | this.logEntry(entry); 158 | } 159 | 160 | error(message: string, data?: any, source?: string): void { 161 | if (!this.shouldLog(LogLevel.ERROR)) return; 162 | const entry = this.createLogEntry(LogLevel.ERROR, message, data, source); 163 | this.logEntry(entry); 164 | } 165 | 166 | getWorkerId(): string { 167 | return this.workerId; 168 | } 169 | 170 | setLevel(level: LogLevelType): void { 171 | this.minLevel = level; 172 | } 173 | } 174 | 175 | export function createLogger(env: { S2_ACCESS_TOKEN?: string; S2_BASIN?: string; LOG_MODE?: string }, request?: Request): S2Logger { 176 | const logMode = (env.LOG_MODE ?? 'CONSOLE') as LogMode; 177 | 178 | return new S2Logger({ 179 | s2Client: logMode !== LogMode.CONSOLE ? new S2({ accessToken: env.S2_ACCESS_TOKEN }) : undefined, 180 | s2Basin: env.S2_BASIN, 181 | logMode: logMode, 182 | request, 183 | }); 184 | } 185 | -------------------------------------------------------------------------------- /src/protocol.ts: -------------------------------------------------------------------------------- 1 | /* 2 | * Portions of this file are derived from y-redis 3 | * Licensed under the GNU Affero General Public License v3.0 4 | * See for license details. 5 | * 6 | * Modifications to this file are 7 | * Licensed under the GNU Affero General Public License v3.0 8 | */ 9 | 10 | import { decoding, encoding } from 'lib0'; 11 | import * as awarenessProtocol from 'y-protocols/awareness'; 12 | import * as Y from 'yjs'; 13 | import * as array from 'lib0/array'; 14 | 15 | export const messageSync = 0; 16 | export const messageSyncStep1 = 0; 17 | export const messageSyncStep2 = 1; 18 | export const messageSyncUpdate = 2; 19 | export const messageAwareness = 1; 20 | 21 | export const encodeSyncStep1 = (sv: Uint8Array): Uint8Array => 22 | encoding.encode((encoder) => { 23 | encoding.writeVarUint(encoder, messageSync); 24 | encoding.writeVarUint(encoder, messageSyncStep1); 25 | encoding.writeVarUint8Array(encoder, sv); 26 | }); 27 | 28 | export const encodeSyncStep2 = (diff: Uint8Array): Uint8Array => 29 | encoding.encode((encoder) => { 30 | encoding.writeVarUint(encoder, messageSync); 31 | encoding.writeVarUint(encoder, messageSyncStep2); 32 | encoding.writeVarUint8Array(encoder, diff); 33 | }); 34 | 35 | export const encodeAwarenessUpdate = (awareness: awarenessProtocol.Awareness, changedClients: number[]): Uint8Array => 36 | encoding.encode((encoder) => { 37 | encoding.writeVarUint(encoder, messageAwareness); 38 | encoding.writeVarUint8Array(encoder, awarenessProtocol.encodeAwarenessUpdate(awareness, changedClients)); 39 | }); 40 | 41 | export const encodeAwarenessUserDisconnected = (clientid: number, lastClock: number): Uint8Array => 42 | encoding.encode((encoder) => { 43 | encoding.writeVarUint(encoder, messageAwareness); 44 | encoding.writeVarUint8Array( 45 | encoder, 46 | encoding.encode((encoder) => { 47 | encoding.writeVarUint(encoder, 1); 48 | encoding.writeVarUint(encoder, clientid); 49 | encoding.writeVarUint(encoder, lastClock + 1); 50 | encoding.writeVarString(encoder, JSON.stringify(null)); 51 | }), 52 | ); 53 | }); 54 | 55 | export function mergeMessages(messages: Uint8Array[]): Uint8Array[] { 56 | if (messages.length < 2) { 57 | return messages; 58 | } 59 | 60 | const updates: Uint8Array[] = []; 61 | const aw = new awarenessProtocol.Awareness(new Y.Doc()); 62 | 63 | messages.forEach((m) => { 64 | try { 65 | const decoder = decoding.createDecoder(m); 66 | const messageType = decoding.readUint8(decoder); 67 | switch (messageType) { 68 | case messageSync: { 69 | const syncType = decoding.readUint8(decoder); 70 | if (syncType === messageSyncUpdate) { 71 | updates.push(decoding.readVarUint8Array(decoder)); 72 | } 73 | break; 74 | } 75 | case messageAwareness: { 76 | awarenessProtocol.applyAwarenessUpdate(aw, decoding.readVarUint8Array(decoder), null); 77 | break; 78 | } 79 | } 80 | } catch (e) { 81 | console.error('Error parsing message for merging:', e); 82 | } 83 | }); 84 | 85 | const result: Uint8Array[] = []; 86 | 87 | updates.length > 0 && 88 | result.push( 89 | encoding.encode((encoder) => { 90 | encoding.writeVarUint(encoder, messageSync); 91 | encoding.writeVarUint(encoder, messageSyncUpdate); 92 | encoding.writeVarUint8Array(encoder, Y.mergeUpdates(updates)); 93 | }), 94 | ); 95 | 96 | aw.states.size > 0 && 97 | result.push( 98 | encoding.encode((encoder) => { 99 | encoding.writeVarUint(encoder, messageAwareness); 100 | encoding.writeVarUint8Array(encoder, awarenessProtocol.encodeAwarenessUpdate(aw, array.from(aw.getStates().keys()))); 101 | }), 102 | ); 103 | 104 | return result; 105 | } 106 | -------------------------------------------------------------------------------- /src/snapshot.ts: -------------------------------------------------------------------------------- 1 | import { S2Logger } from './logger'; 2 | 3 | export async function uploadSnapshot( 4 | env: Env, 5 | room: string, 6 | ydocUpdate: Uint8Array, 7 | lastSeqNum: number | null | undefined, 8 | expectedETag: string | null, 9 | logger: S2Logger, 10 | ): Promise { 11 | if (!env.R2_BUCKET) { 12 | logger.warn('No R2 bucket configured, skipping snapshot upload', { room }, 'SnapshotUpload'); 13 | return; 14 | } 15 | 16 | const key = `snapshots/${encodeURIComponent(room)}/latest.bin`; 17 | logger.debug('Uploading snapshot to R2', { room, key, size: ydocUpdate.length, expectedETag }, 'SnapshotUpload'); 18 | 19 | const putOptions: R2PutOptions = { 20 | customMetadata: { 21 | timestamp: Date.now().toString(), 22 | room: room, 23 | lastSeqNum: lastSeqNum?.toString() ?? '0', 24 | }, 25 | }; 26 | 27 | if (expectedETag === null) { 28 | putOptions.onlyIf = { 29 | etagDoesNotMatch: '*', 30 | }; 31 | } else { 32 | putOptions.onlyIf = { 33 | etagMatches: expectedETag, 34 | }; 35 | } 36 | 37 | try { 38 | await env.R2_BUCKET.put(key, ydocUpdate, putOptions); 39 | logger.info('Snapshot uploaded successfully', { room, key, lastSeqNum, expectedETag }, 'SnapshotUpload'); 40 | } catch (error) { 41 | logger.warn( 42 | 'Snapshot upload failed', 43 | { 44 | room, 45 | key, 46 | expectedETag, 47 | error: error instanceof Error ? error.message : String(error), 48 | }, 49 | 'SnapshotUpload', 50 | ); 51 | 52 | throw error; 53 | } 54 | } 55 | 56 | export async function retrieveSnapshot( 57 | env: Env, 58 | room: string, 59 | logger: S2Logger, 60 | ): Promise<{ snapshot: Uint8Array; lastSeqNum: number; etag: string | null } | null> { 61 | if (!env.R2_BUCKET) { 62 | logger.warn('No R2 bucket configured, no snapshot to retrieve', { room }, 'SnapshotRetrieve'); 63 | return null; 64 | } 65 | 66 | const key = `snapshots/${encodeURIComponent(room)}/latest.bin`; 67 | logger.debug('Retrieving snapshot from R2', { room, key }, 'SnapshotRetrieve'); 68 | 69 | const object = await env.R2_BUCKET.get(key); 70 | if (!object) { 71 | logger.debug('No snapshot found in R2', { room, key }, 'SnapshotRetrieve'); 72 | return null; 73 | } 74 | 75 | const arrayBuffer = await object.arrayBuffer(); 76 | const lastSeqNum = parseInt(object.customMetadata?.lastSeqNum || '0', 10); 77 | return { 78 | snapshot: new Uint8Array(arrayBuffer), 79 | lastSeqNum, 80 | etag: object.etag, 81 | }; 82 | } 83 | 84 | export async function getSnapshotETag(env: Env, room: string, logger: S2Logger): Promise { 85 | if (!env.R2_BUCKET) { 86 | logger.warn('No R2 bucket configured, cannot get ETag', { room }, 'SnapshotETag'); 87 | return null; 88 | } 89 | 90 | const key = `snapshots/${encodeURIComponent(room)}/latest.bin`; 91 | logger.debug('Getting snapshot ETag from R2', { room, key }, 'SnapshotETag'); 92 | 93 | const object = await env.R2_BUCKET.head(key); 94 | if (!object) { 95 | logger.debug('No snapshot found in R2 for ETag check', { room, key }, 'SnapshotETag'); 96 | return null; 97 | } 98 | 99 | return object.etag; 100 | } 101 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | import { SequencedRecord } from '@s2-dev/streamstore/models/components'; 2 | 3 | export interface UserState { 4 | awarenessId: number | null; 5 | awarenessLastClock: number; 6 | room: string; 7 | } 8 | 9 | export function createUserState(room: string): UserState { 10 | return { 11 | awarenessId: null, 12 | awarenessLastClock: 0, 13 | room, 14 | }; 15 | } 16 | 17 | export interface SnapshotState { 18 | firstRecordAge: number | null; 19 | currentFencingToken: string; 20 | trimSeqNum: number | null; 21 | lastProcessedFenceSeqNum: number; 22 | lastProcessedTrimSeqNum: number; 23 | blocked: boolean; 24 | recordBuffer: SequencedRecord[]; 25 | } 26 | 27 | export function createSnapshotState(): SnapshotState { 28 | return { 29 | firstRecordAge: null, 30 | currentFencingToken: '', 31 | trimSeqNum: null, 32 | lastProcessedFenceSeqNum: -1, 33 | lastProcessedTrimSeqNum: -1, 34 | blocked: false, 35 | recordBuffer: [], 36 | }; 37 | } 38 | -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | import { S2 } from '@s2-dev/streamstore'; 2 | import { AppendAck, S2Format, SequencedRecord } from '@s2-dev/streamstore/models/components'; 3 | import { fromUint8Array } from 'js-base64'; 4 | import { S2Logger } from './logger'; 5 | import { mergeMessages } from './protocol'; 6 | 7 | interface Config { 8 | maxBacklog: number; 9 | batchSize: number; 10 | lingerTime: number; 11 | leaseDuration: number; 12 | backlogBufferAge: number; 13 | } 14 | 15 | export function parseConfig(env: any): Config { 16 | return { 17 | maxBacklog: env.SNAPSHOT_BACKLOG_SIZE ? parseInt(env.SNAPSHOT_BACKLOG_SIZE, 10) : 400, 18 | batchSize: env.S2_BATCH_SIZE ? parseInt(env.S2_BATCH_SIZE, 10) : 8, 19 | lingerTime: env.S2_LINGER_TIME ? parseInt(env.S2_LINGER_TIME, 10) : 50, 20 | leaseDuration: env.LEASE_DURATION ? parseInt(env.LEASE_DURATION, 10) : 30, 21 | backlogBufferAge: env.BACKLOG_BUFFER_AGE ? parseInt(env.BACKLOG_BUFFER_AGE, 10) : 60_000, 22 | }; 23 | } 24 | 25 | export function encodeBigEndian64(num: number): string { 26 | const buffer = new ArrayBuffer(8); 27 | const view = new DataView(buffer); 28 | view.setBigUint64(0, BigInt(num), false); 29 | const bytes = new Uint8Array(buffer); 30 | return btoa(String.fromCharCode(...bytes)); 31 | } 32 | 33 | export function decodeBigEndian64AsNumber(base64: string): number { 34 | const binary = atob(base64); 35 | 36 | if (binary.length !== 8) { 37 | throw new Error('Invalid input length, must be 8 bytes'); 38 | } 39 | 40 | const buffer = new ArrayBuffer(8); 41 | const bytes = new Uint8Array(buffer); 42 | 43 | for (let i = 0; i < 8; i++) { 44 | bytes[i] = binary.charCodeAt(i); 45 | } 46 | const value = new DataView(buffer).getBigUint64(0, false); 47 | return Number(value); 48 | } 49 | 50 | enum CommandType { 51 | FENCE = 'fence', 52 | TRIM = 'trim', 53 | } 54 | 55 | export function isCommandType(record: SequencedRecord, type: CommandType): boolean { 56 | return record.headers?.length === 1 && record.headers[0]?.[0] === '' && record.headers[0]?.[1] === btoa(type); 57 | } 58 | 59 | export function isFenceCommand(record: SequencedRecord): boolean { 60 | return isCommandType(record, CommandType.FENCE); 61 | } 62 | 63 | export function isTrimCommand(record: SequencedRecord): boolean { 64 | return isCommandType(record, CommandType.TRIM); 65 | } 66 | 67 | export function parseFencingToken(token: string): { id: string; deadline: number } { 68 | const [id, deadline] = token.split(' '); 69 | if (!id || !deadline) { 70 | throw new Error(`Invalid fencing token format: ${token}`); 71 | } 72 | return { id, deadline: Number(deadline) }; 73 | } 74 | 75 | export function generateDeadlineFencingToken(leaseDuration: number): string { 76 | const newDeadline = Math.floor(Date.now() / 1000 + leaseDuration); 77 | const id = crypto.getRandomValues(new Uint8Array(12)); 78 | const idBase64 = fromUint8Array(id); 79 | return `${idBase64} ${newDeadline}`; 80 | } 81 | 82 | export class Room { 83 | private s2Client: S2; 84 | private stream: string; 85 | private s2Basin: string; 86 | 87 | constructor(s2Client: S2, stream: string, s2Basin: string) { 88 | this.s2Client = s2Client; 89 | this.stream = stream; 90 | this.s2Basin = s2Basin; 91 | } 92 | 93 | async acquireLease(newFencingToken: string, prevFencingToken: string): Promise { 94 | return await this.s2Client.records.append({ 95 | stream: this.stream, 96 | s2Format: S2Format.Base64, 97 | appendInput: { 98 | records: [ 99 | { 100 | body: btoa(newFencingToken), 101 | headers: [[btoa(''), btoa('fence')]], 102 | }, 103 | ], 104 | fencingToken: prevFencingToken, 105 | }, 106 | s2Basin: this.s2Basin, 107 | }); 108 | } 109 | 110 | async forceReleaseLease(currentFencingToken: string): Promise { 111 | return await this.s2Client.records.append({ 112 | stream: this.stream, 113 | s2Format: S2Format.Base64, 114 | appendInput: { 115 | records: [ 116 | { 117 | body: '', 118 | headers: [[btoa(''), btoa('fence')]], 119 | }, 120 | ], 121 | fencingToken: currentFencingToken, 122 | }, 123 | s2Basin: this.s2Basin, 124 | }); 125 | } 126 | 127 | async releaseLease(trimSeqNum: number, prevFencingToken: string): Promise { 128 | return await this.s2Client.records.append({ 129 | s2Format: S2Format.Base64, 130 | stream: this.stream, 131 | appendInput: { 132 | records: [ 133 | { 134 | body: '', 135 | headers: [[btoa(''), btoa('fence')]], 136 | }, 137 | { 138 | body: encodeBigEndian64(trimSeqNum), 139 | headers: [[btoa(''), btoa('trim')]], 140 | }, 141 | ], 142 | fencingToken: prevFencingToken, 143 | }, 144 | s2Basin: this.s2Basin, 145 | }); 146 | } 147 | } 148 | 149 | export class MessageBatcher { 150 | private messageBatch: Uint8Array[] = []; 151 | private batchTimeout: NodeJS.Timeout | null = null; 152 | 153 | constructor( 154 | private readonly s2Client: S2, 155 | private readonly streamName: string, 156 | private readonly s2Basin: string, 157 | private readonly logger: S2Logger, 158 | private readonly room: string, 159 | private readonly batchSize: number, 160 | private readonly lingerTime: number, 161 | ) {} 162 | 163 | addMessage(message: Uint8Array): void { 164 | this.messageBatch.push(message); 165 | 166 | if (this.messageBatch.length >= this.batchSize) { 167 | this.flush(); 168 | } else { 169 | this.resetTimeout(); 170 | } 171 | } 172 | 173 | async flush(): Promise { 174 | if (this.messageBatch.length === 0) return; 175 | 176 | const batch = [...this.messageBatch]; 177 | this.clearBatch(); 178 | this.clearTimeout(); 179 | 180 | try { 181 | const messagesToSend = mergeMessages(batch); 182 | const base64Messages = messagesToSend.map((msg) => fromUint8Array(msg)); 183 | await this.s2Client.records.append({ 184 | stream: this.streamName, 185 | s2Basin: this.s2Basin, 186 | appendInput: { records: base64Messages.map((body) => ({ body })) }, 187 | s2Format: S2Format.Base64, 188 | }); 189 | } catch (err) { 190 | this.logger.error( 191 | 'Failed to append batch to S2', 192 | { 193 | room: this.room, 194 | streamName: this.streamName, 195 | error: err instanceof Error ? err.message : String(err), 196 | }, 197 | 'S2AppendError', 198 | ); 199 | } 200 | } 201 | 202 | private clearBatch(): void { 203 | this.messageBatch = []; 204 | } 205 | 206 | private clearTimeout(): void { 207 | if (this.batchTimeout) { 208 | clearTimeout(this.batchTimeout); 209 | this.batchTimeout = null; 210 | } 211 | } 212 | 213 | private resetTimeout(): void { 214 | this.clearTimeout(); 215 | this.batchTimeout = setTimeout(() => this.flush(), this.lingerTime); 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /test/env.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'cloudflare:test' { 2 | interface ProvidedEnv extends Env {} 3 | } 4 | -------------------------------------------------------------------------------- /test/index.spec.ts: -------------------------------------------------------------------------------- 1 | import { env, createExecutionContext, waitOnExecutionContext, SELF } from 'cloudflare:test'; 2 | import { describe, it, expect } from 'vitest'; 3 | import worker from '../src/index'; 4 | 5 | // For now, you'll need to do something like this to get a correctly-typed 6 | // `Request` to pass to `worker.fetch()`. 7 | const IncomingRequest = Request; 8 | 9 | describe('Hello World worker', () => { 10 | it('responds with Hello World! (unit style)', async () => { 11 | const request = new IncomingRequest('http://example.com'); 12 | // Create an empty context to pass to `worker.fetch()`. 13 | const ctx = createExecutionContext(); 14 | const response = await worker.fetch(request, env, ctx); 15 | // Wait for all `Promise`s passed to `ctx.waitUntil()` to settle before running test assertions 16 | await waitOnExecutionContext(ctx); 17 | expect(await response.text()).toMatchInlineSnapshot(`"Hello World!"`); 18 | }); 19 | 20 | it('responds with Hello World! (integration style)', async () => { 21 | const response = await SELF.fetch('https://example.com'); 22 | expect(await response.text()).toMatchInlineSnapshot(`"Hello World!"`); 23 | }); 24 | }); 25 | -------------------------------------------------------------------------------- /test/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig.json", 3 | "compilerOptions": { 4 | "types": ["@cloudflare/vitest-pool-workers"] 5 | }, 6 | "include": ["./**/*.ts", "../worker-configuration.d.ts"], 7 | "exclude": [] 8 | } 9 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | /* Visit https://aka.ms/tsconfig.json to read more about this file */ 4 | 5 | /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */ 6 | "target": "es2021", 7 | /* Specify a set of bundled library declaration files that describe the target runtime environment. */ 8 | "lib": ["es2021"], 9 | /* Specify what JSX code is generated. */ 10 | "jsx": "react-jsx", 11 | 12 | /* Specify what module code is generated. */ 13 | "module": "es2022", 14 | /* Specify how TypeScript looks up a file from a given module specifier. */ 15 | "moduleResolution": "Bundler", 16 | /* Enable importing .json files */ 17 | "resolveJsonModule": true, 18 | 19 | /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */ 20 | "allowJs": true, 21 | /* Enable error reporting in type-checked JavaScript files. */ 22 | "checkJs": false, 23 | 24 | /* Disable emitting files from a compilation. */ 25 | "noEmit": true, 26 | 27 | /* Ensure that each file can be safely transpiled without relying on other imports. */ 28 | "isolatedModules": true, 29 | /* Allow 'import x from y' when a module doesn't have a default export. */ 30 | "allowSyntheticDefaultImports": true, 31 | /* Ensure that casing is correct in imports. */ 32 | "forceConsistentCasingInFileNames": true, 33 | 34 | /* Enable all strict type-checking options. */ 35 | "strict": true, 36 | 37 | /* Skip type checking all .d.ts files. */ 38 | "skipLibCheck": true, 39 | "types": [ 40 | "node", 41 | "./worker-configuration.d.ts" 42 | ] 43 | }, 44 | "exclude": ["test"], 45 | "include": ["worker-configuration.d.ts", "src/**/*.ts"] 46 | } 47 | -------------------------------------------------------------------------------- /vitest.config.mts: -------------------------------------------------------------------------------- 1 | import { defineWorkersConfig } from '@cloudflare/vitest-pool-workers/config'; 2 | 3 | export default defineWorkersConfig({ 4 | test: { 5 | poolOptions: { 6 | workers: { 7 | wrangler: { configPath: './wrangler.jsonc' }, 8 | }, 9 | }, 10 | }, 11 | }); 12 | -------------------------------------------------------------------------------- /wrangler.jsonc: -------------------------------------------------------------------------------- 1 | /** 2 | * For more details on how to configure Wrangler, refer to: 3 | * https://developers.cloudflare.com/workers/wrangler/configuration/ 4 | */ 5 | { 6 | "$schema": "node_modules/wrangler/config-schema.json", 7 | "name": "y-s2", 8 | "main": "src/index.ts", 9 | "compatibility_date": "2025-08-19", 10 | "observability": { 11 | "enabled": true 12 | }, 13 | /** 14 | * Smart Placement 15 | * Docs: https://developers.cloudflare.com/workers/configuration/smart-placement/#smart-placement 16 | */ 17 | // "placement": { "mode": "smart" } 18 | /** 19 | * Bindings 20 | * Bindings allow your Worker to interact with resources on the Cloudflare Developer Platform, including 21 | * databases, object storage, AI inference, real-time communication and more. 22 | * https://developers.cloudflare.com/workers/runtime-apis/bindings/ 23 | */ 24 | /** 25 | * Environment Variables 26 | * https://developers.cloudflare.com/workers/wrangler/configuration/#environment-variables 27 | */ 28 | "vars": { "ENVIRONMENT": "production", "S2_ACCESS_TOKEN": "", "S2_BASIN": "" }, 29 | "r2_buckets": [ 30 | { "binding": "R2_BUCKET", "bucket_name": "" } 31 | ] 32 | /** 33 | * Note: Use secrets to store sensitive data. 34 | * https://developers.cloudflare.com/workers/configuration/secrets/ 35 | */ 36 | /** 37 | * Static Assets 38 | * https://developers.cloudflare.com/workers/static-assets/binding/ 39 | */ 40 | // "assets": { "directory": "./public/", "binding": "ASSETS" } 41 | /** 42 | * Service Bindings (communicate between multiple Workers) 43 | * https://developers.cloudflare.com/workers/wrangler/configuration/#service-bindings 44 | */ 45 | // "services": [{ "binding": "MY_SERVICE", "service": "my-service" }] 46 | } --------------------------------------------------------------------------------