20 | The content of this editor is shared with every client that visits this
21 | domain.
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/demos/blocksuite/client/sync.js:
--------------------------------------------------------------------------------
1 | // eslint-disable-next-line no-unused-vars
2 | import { Doc } from '@blocksuite/store'
3 | import { WebsocketProvider } from 'y-websocket'
4 | import { authToken } from './api.js'
5 | import { getCurrentRoom } from './route.js'
6 | import { editor } from './editor.js'
7 |
8 | const endpoint = 'ws://localhost:3002'
9 |
10 | /** @type {WebsocketProvider | null} */
11 | let currentProvider = null
12 | /** @type {Doc | null} */
13 | let currentDoc = null
14 |
15 | /** @param {Doc} doc */
16 | export function sync (doc) {
17 | if (doc === currentDoc) return
18 | if (currentProvider) currentProvider.destroy()
19 |
20 | const room = getCurrentRoom()
21 | const params = { yauth: authToken }
22 | const provider = new WebsocketProvider(endpoint, room, doc.spaceDoc, { params })
23 | provider.on('sync', () => {
24 | doc.load()
25 | editor.doc = doc
26 | })
27 | currentProvider = provider
28 | currentDoc = doc
29 | }
30 |
--------------------------------------------------------------------------------
/tests/utils.js:
--------------------------------------------------------------------------------
1 | import * as env from 'lib0/environment'
2 | import * as json from 'lib0/json'
3 | import * as ecdsa from 'lib0/crypto/ecdsa'
4 |
5 | import { createMemoryStorage } from '../src/storage/memory.js'
6 |
7 | /**
8 | * @type {Array<{ destroy: function():Promise}>}
9 | */
10 | export const prevClients = []
11 | export const store = createMemoryStorage()
12 |
13 | export const authPrivateKey = await ecdsa.importKeyJwk(json.parse(env.ensureConf('auth-private-key')))
14 | export const authPublicKey = await ecdsa.importKeyJwk(json.parse(env.ensureConf('auth-public-key')))
15 |
16 | export const redisPrefix = 'ytests'
17 |
18 | export const authDemoServerPort = 5173
19 | export const authDemoServerUrl = `http://localhost:${authDemoServerPort}`
20 | export const checkPermCallbackUrl = `${authDemoServerUrl}/auth/perm/`
21 | export const authTokenUrl = `${authDemoServerUrl}/auth/token`
22 |
23 | export const yredisPort = 9999
24 | export const yredisUrl = `ws://localhost:${yredisPort}/`
25 |
--------------------------------------------------------------------------------
/demos/auth-express/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "y-redis-auth-demo",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "server.js",
6 | "type": "module",
7 | "scripts": {
8 | "dist": "rollup -c",
9 | "start": "npm run dist && node --env-file ../../.env server.js",
10 | "debug": "node --env-file ../../.env --inspect-brk server.js"
11 | },
12 | "author": "",
13 | "license": "MIT",
14 | "dependencies": {
15 | "express": "^4.18.3",
16 | "formidable": "^3.5.1",
17 | "y-websocket": "^2.0.3",
18 | "yjs": "^13.6.15",
19 | "@codemirror/lang-javascript": "^6.2.2",
20 | "@codemirror/state": "^6.4.1",
21 | "@codemirror/view": "^6.24.1",
22 | "codemirror": "^6.0.1",
23 | "y-codemirror.next": "^0.3.2",
24 | "@types/express": "^4.17.21",
25 | "@types/formidable": "^3.4.5"
26 | },
27 | "devDependencies": {
28 | "@rollup/plugin-commonjs": "^25.0.7",
29 | "@rollup/plugin-node-resolve": "^15.2.3",
30 | "rollup": "^4.12.0"
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Instructions to build the Docker image
2 | # docker build -t y-redis .
3 |
4 | # Run the worker as follows:
5 | # docker run --env-file ./.env y-redis npm run start:worker
6 |
7 | # Run the server as follows:
8 | # docker run -p 3002:3002 --env-file ./.env y-redis npm run start:server
9 |
10 | # Use an official Node.js runtime as a parent image
11 | # FROM node:20-alpine
12 | FROM node:lts-alpine3.19
13 |
14 | # Install glibc compatibility for alpine
15 | # See more at https://wiki.alpinelinux.org/wiki/Running_glibc_programs
16 | RUN apk add gcompat
17 |
18 | # Set the working directory in the container
19 | WORKDIR /usr/src/app
20 |
21 | # Copy package.json and package-lock.json (if available) to the working directory
22 | COPY package*.json ./
23 |
24 | # Install any dependencies
25 | RUN npm install
26 |
27 | # Bundle your app source inside the Docker image
28 | COPY . .
29 |
30 | # Make port 3002 available to the world outside this container,
31 | # assuming your app runs on port 3002
32 | EXPOSE 3002
33 |
34 | # Removed CMD instruction to allow dynamic command execution at runtime
35 |
--------------------------------------------------------------------------------
/tests/auth.tests.js:
--------------------------------------------------------------------------------
1 | import * as t from 'lib0/testing'
2 | import * as utils from './utils.js'
3 | import * as jwt from 'lib0/crypto/jwt'
4 | import * as json from 'lib0/json'
5 |
6 | import { authServerStarted } from '../bin/auth-server-example.js' // starts the example server
7 |
8 | /**
9 | * @param {t.TestCase} _tc
10 | */
11 | export const testSampleAuthServer = async _tc => {
12 | await authServerStarted
13 | const room = 'sample-room'
14 | const token = await fetch(utils.authTokenUrl).then(req => req.text())
15 | // verify that the user has a valid token
16 | const { payload: userToken } = await jwt.verifyJwt(utils.authPublicKey, token)
17 | console.log('server created', { userToken })
18 | if (userToken.yuserid == null) {
19 | throw new Error('Missing userid in user token!')
20 | }
21 | const perm = await fetch(new URL(`${room}/${userToken.yuserid}`, utils.checkPermCallbackUrl)).then(req => req.json())
22 | t.info('retrieved permission: ' + json.stringify(perm))
23 | t.assert(perm.yroom === room)
24 | t.assert(perm.yaccess === 'rw')
25 | t.assert(perm.yuserid != null)
26 | }
27 |
--------------------------------------------------------------------------------
/demos/auth-express/Dockerfile:
--------------------------------------------------------------------------------
1 | # Instructions to build the Docker image
2 | # docker build -t y-redis .
3 |
4 | # Run the worker as follows:
5 | # docker run --env-file ./.env y-redis npm run start:worker
6 |
7 | # Run the server as follows:
8 | # docker run -p 3002:3002 --env-file ./.env y-redis npm run start:server
9 |
10 | # Use an official Node.js runtime as a parent image
11 | # FROM node:20-alpine
12 | FROM node:lts-alpine3.19
13 |
14 | # Install glibc compatibility for alpine
15 | # See more at https://wiki.alpinelinux.org/wiki/Running_glibc_programs
16 | RUN apk add gcompat
17 |
18 | # Set the working directory in the container
19 | WORKDIR /usr/src/app
20 |
21 | # Copy package.json and package-lock.json (if available) to the working directory
22 | COPY package*.json ./
23 |
24 | # Install any dependencies
25 | RUN npm install
26 |
27 | # Bundle your app source inside the Docker image
28 | COPY . .
29 |
30 | # build files
31 | RUN npm run dist
32 |
33 | # Make port 3002 available to the world outside this container,
34 | # assuming your app runs on port 3002
35 | EXPOSE 3002
36 |
37 | # Removed CMD instruction to allow dynamic command execution at runtime
38 | CMD node ./server.js
39 |
--------------------------------------------------------------------------------
/demos/blocksuite/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "y-redis-blocksuite-demo",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "server.js",
6 | "type": "module",
7 | "scripts": {
8 | "dist": "rollup -c",
9 | "watch": "rollup -c -w",
10 | "serve": "node --env-file ../../.env server.js",
11 | "dev": "concurrently -r 'npm run watch' 'npm run serve'",
12 | "start": "npm run dist && npm run serve",
13 | "debug": "node --env-file ../../.env --inspect-brk server.js"
14 | },
15 | "author": "",
16 | "license": "MIT",
17 | "dependencies": {
18 | "@blocksuite/blocks": "0.14.0-canary-202403201020-e0ac77d",
19 | "@blocksuite/presets": "0.14.0-canary-202403201020-e0ac77d",
20 | "@blocksuite/store": "0.14.0-canary-202403201020-e0ac77d",
21 | "express": "^4.18.3",
22 | "formidable": "^3.5.1",
23 | "lowdb": "^7.0.1"
24 | },
25 | "devDependencies": {
26 | "@rollup/plugin-alias": "^5.1.0",
27 | "@rollup/plugin-commonjs": "^25.0.7",
28 | "@rollup/plugin-json": "^6.1.0",
29 | "@rollup/plugin-node-resolve": "^15.2.3",
30 | "@types/express": "^4.17.21",
31 | "@types/formidable": "^3.4.5",
32 | "rollup-plugin-postcss": "^4.0.2"
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/bin/server.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | import * as number from 'lib0/number'
4 | import * as env from 'lib0/environment'
5 | import * as yredis from '@y/redis'
6 |
7 | const port = number.parseInt(env.getConf('port') || '3002')
8 | const redisPrefix = env.getConf('redis-prefix') || 'y'
9 | const postgresUrl = env.getConf('postgres')
10 | const s3Endpoint = env.getConf('s3-endpoint')
11 | const checkPermCallbackUrl = env.ensureConf('AUTH_PERM_CALLBACK')
12 |
13 | let store
14 | if (s3Endpoint) {
15 | console.log('using s3 store')
16 | const { createS3Storage } = await import('../src/storage/s3.js')
17 | const bucketName = 'ydocs'
18 | store = createS3Storage(bucketName)
19 | try {
20 | // make sure the bucket exists
21 | await store.client.makeBucket(bucketName)
22 | } catch (e) {}
23 | } else if (postgresUrl) {
24 | console.log('using postgres store')
25 | const { createPostgresStorage } = await import('../src/storage/postgres.js')
26 | store = await createPostgresStorage()
27 | } else {
28 | console.log('ATTENTION! using in-memory store')
29 | const { createMemoryStorage } = await import('../src/storage/memory.js')
30 | store = createMemoryStorage()
31 | }
32 |
33 | yredis.createYWebsocketServer({ port, store, checkPermCallbackUrl, redisPrefix })
34 |
--------------------------------------------------------------------------------
/demos/blocksuite/client/api.js:
--------------------------------------------------------------------------------
1 | const endpoint = `http://${window.location.host}`
2 |
3 | async function getDocMetaList () {
4 | const response = await fetch(`${endpoint}/docs`)
5 | /** @type {{id: string, title: string}[]} */
6 | const docList = await response.json()
7 | return docList
8 | }
9 |
10 | async function addDocMeta () {
11 | const response = await fetch(`${endpoint}/docs`, {
12 | method: 'POST',
13 | headers: {
14 | 'Content-Type': 'application/json'
15 | }
16 | })
17 | /** @type {{id: string, title: string}} */
18 | const docMeta = await response.json()
19 | return docMeta
20 | }
21 |
22 | /** @param {string} id @param {string} title */
23 | async function updateDocMeta (id, title) {
24 | await fetch(`${endpoint}/docs/${id}/title`, {
25 | method: 'PATCH',
26 | headers: {
27 | 'Content-Type': 'application/json'
28 | },
29 | body: JSON.stringify({ title })
30 | })
31 | }
32 |
33 | /** @param {string} id */
34 | async function deleteDocMeta (id) {
35 | await fetch(`${endpoint}/docs/${id}`, {
36 | method: 'DELETE'
37 | })
38 | }
39 |
40 | async function getAuthToken () {
41 | const response = await fetch(`${endpoint}/auth/token`)
42 | return response.text()
43 | }
44 |
45 | export const authToken = await getAuthToken()
46 |
47 | export const api = {
48 | getDocMetaList,
49 | addDocMeta,
50 | updateDocMeta,
51 | deleteDocMeta
52 | }
53 |
--------------------------------------------------------------------------------
/demos/blocksuite/client/editor.js:
--------------------------------------------------------------------------------
1 | import '@toeverything/theme/style.css'
2 |
3 | import { AffineSchemas } from '@blocksuite/blocks'
4 | import { AffineEditorContainer } from '@blocksuite/presets'
5 | // eslint-disable-next-line no-unused-vars
6 | import { Schema, DocCollection, Text, Doc, Slot } from '@blocksuite/store'
7 |
8 | const schema = new Schema().register(AffineSchemas)
9 | export const collection = new DocCollection({ schema })
10 | export const editor = new AffineEditorContainer()
11 | export const emptyDoc = collection.createDoc() // empty placeholder
12 |
13 | export function initEditor () {
14 | editor.doc = emptyDoc
15 | document.body.append(editor)
16 |
17 | return {
18 | onDocUpdated: collection.slots.docUpdated
19 | }
20 | }
21 |
22 | /** @param {string} id */
23 | export function loadDoc (id) {
24 | const localDoc = collection.getDoc(id)
25 | if (localDoc) return localDoc
26 |
27 | return collection.createDoc({ id })
28 | }
29 |
30 | /** @param {string} id */
31 | export function createDoc (id) {
32 | const doc = collection.createDoc({ id })
33 |
34 | doc.load(() => {
35 | const pageBlockId = doc.addBlock('affine:page')
36 | doc.addBlock('affine:surface', {}, pageBlockId)
37 | const noteId = doc.addBlock('affine:note', {}, pageBlockId)
38 | doc.addBlock(
39 | 'affine:paragraph',
40 | { text: new Text('Hello World!') },
41 | noteId
42 | )
43 | })
44 | }
45 |
--------------------------------------------------------------------------------
/src/storage.js:
--------------------------------------------------------------------------------
1 | import * as Y from 'yjs'
2 | import * as err from 'lib0/error'
3 |
4 | export class AbstractStorage {
5 | /**
6 | * @param {string} _room
7 | * @param {string} _docname
8 | * @param {Y.Doc} _ydoc
9 | * @return {Promise}
10 | */
11 | persistDoc (_room, _docname, _ydoc) {
12 | err.methodUnimplemented()
13 | }
14 |
15 | /**
16 | * @param {string} _room
17 | * @param {string} _docname
18 | * @return {Promise<{ doc: Uint8Array, references: Array }|null>}
19 | */
20 | retrieveDoc (_room, _docname) {
21 | err.methodUnimplemented()
22 | }
23 |
24 | /**
25 | * This can be implemented by the storage provider for better efficiency. The state vector must be
26 | * updated when persistDoc is called. Otherwise, we pull the ydoc and compute the state vector.
27 | *
28 | * @param {string} room
29 | * @param {string} docname
30 | * @return {Promise}
31 | */
32 | async retrieveStateVector (room, docname) {
33 | const r = await this.retrieveDoc(room, docname)
34 | return r ? Y.encodeStateVectorFromUpdateV2(r.doc) : null
35 | }
36 |
37 | /**
38 | * @param {string} _room
39 | * @param {string} _docname
40 | * @param {Array} _storeReferences
41 | * @return {Promise}
42 | */
43 | deleteReferences (_room, _docname, _storeReferences) {
44 | err.methodUnimplemented()
45 | }
46 |
47 | async destroy () {
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/bin/worker.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | import * as env from 'lib0/environment'
4 | import * as yredis from '@y/redis'
5 | import * as Y from 'yjs'
6 |
7 | const redisPrefix = env.getConf('redis-prefix') || 'y'
8 | const postgresUrl = env.getConf('postgres')
9 | const s3Endpoint = env.getConf('s3-endpoint')
10 |
11 | let store
12 | if (s3Endpoint) {
13 | console.log('using s3 store')
14 | const { createS3Storage } = await import('../src/storage/s3.js')
15 | const bucketName = 'ydocs'
16 | store = createS3Storage(bucketName)
17 | try {
18 | // make sure the bucket exists
19 | await store.client.makeBucket(bucketName)
20 | } catch (e) {}
21 | } else if (postgresUrl) {
22 | console.log('using postgres store')
23 | const { createPostgresStorage } = await import('../src/storage/postgres.js')
24 | store = await createPostgresStorage()
25 | } else {
26 | console.log('ATTENTION! using in-memory store')
27 | const { createMemoryStorage } = await import('../src/storage/memory.js')
28 | store = createMemoryStorage()
29 | }
30 |
31 | let ydocUpdateCallback = env.getConf('ydoc-update-callback')
32 | if (ydocUpdateCallback != null && ydocUpdateCallback.slice(-1) !== '/') {
33 | ydocUpdateCallback += '/'
34 | }
35 |
36 | /**
37 | * @type {(room: string, ydoc: Y.Doc) => Promise}
38 | */
39 | const updateCallback = async (room, ydoc) => {
40 | if (ydocUpdateCallback != null) {
41 | // call YDOC_UPDATE_CALLBACK here
42 | const formData = new FormData()
43 | // @todo only convert ydoc to updatev2 once
44 | formData.append('ydoc', new Blob([Y.encodeStateAsUpdateV2(ydoc)]))
45 | // @todo should add a timeout to fetch (see fetch signal abortcontroller)
46 | const res = await fetch(new URL(room, ydocUpdateCallback), { body: formData, method: 'PUT' })
47 | if (!res.ok) {
48 | console.error(`Issue sending data to YDOC_UPDATE_CALLBACK. status="${res.status}" statusText="${res.statusText}"`)
49 | }
50 | }
51 | }
52 |
53 | yredis.createWorker(store, redisPrefix, {
54 | updateCallback
55 | })
56 |
--------------------------------------------------------------------------------
/.env.template:
--------------------------------------------------------------------------------
1 | ### Redis endpoint
2 | REDIS=redis://localhost:6379
3 | # REDIS_PREFIX=y # by default "y:" is prepended to all used redis keys
4 |
5 | ### y-redis server component
6 | # PORT=3002 # The port that is used for listening for websocket connections
7 |
8 | ### Storage provider
9 | ## Either setup Postgres or S3 to persist data. If S3 is set up, then the
10 | ## postgres config is not used.
11 |
12 | ## S3 configuration for minio (start with `npm run minio`)
13 | S3_ENDPOINT=localhost
14 | S3_PORT=9000
15 | S3_SSL=false
16 | S3_ACCESS_KEY=minioadmin
17 | S3_SECRET_KEY=minioadmin
18 |
19 | ## PostgreSQL connection string format: postgresql://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]
20 | # POSTGRES=postgres://user:pass@localhost/database
21 | # POSTGRES_TESTDB=yredis_tests ## use a different database for testing
22 |
23 | ### Auth signature
24 | ## The auth server authenticates web clients using json-web-tokens (jwt).
25 | ## They are generated and validated using the following json-web-keys (jwk).
26 | ## Generate your own keys by calling: `npx 0ecdsa-generate-keypair --name auth`
27 | ## These keys should be kept secret!
28 | AUTH_PUBLIC_KEY=..
29 | AUTH_PRIVATE_KEY=..
30 |
31 | ### Auth backend
32 | ## This REST endpoint is called to check whether a user has access to a document. It
33 | ## is provided by you. However, there are multiple sample implementations in this
34 | ## repository.
35 | AUTH_PERM_CALLBACK=http://localhost:5173/auth/perm
36 |
37 | ### Optional change callback
38 | ## This REST endpoint is called in regular intervals when a ydoc is changed.
39 | ## Remove this if you don't need it.
40 | YDOC_UPDATE_CALLBACK=http://localhost:5173/ydoc
41 |
42 | ### Logging
43 | ## Configure what you want to log. LOG is a regular expression that is applied
44 | ## to the logged component.
45 | LOG=* # log everything
46 | # LOG="" # no logging
47 | # LOG="^(yjs|@y)" # Supports regexes
48 | # LOG=@y/redis # print all messages generated by y-redis
49 | # LOG=@y/redis/[componentname] - print only messages from a specific y-redis component
50 |
51 | ### Expert configurations
52 | ## For debugging purposes and expert use.
53 | ## Changing these variables can be very dangerous. The default is carefully
54 | ## chosen. Be sure that you know what you are doing!
55 | ##
56 | ## Redis messages exist for at least one minute on the redis stream
57 | # REDIS_MIN_MESSAGE_LIFETIME=60000
58 | ## After this timeout, a worker will pick up a task and clean up a stream.
59 | # REDIS_TASK_DEBOUNCE=10000
60 |
--------------------------------------------------------------------------------
/src/subscriber.js:
--------------------------------------------------------------------------------
1 | import * as api from './api.js'
2 | import * as map from 'lib0/map'
3 | import * as array from 'lib0/array'
4 |
5 | /**
6 | * @typedef {function(string,Array):void} SubHandler
7 | */
8 |
9 | /**
10 | * @param {Subscriber} subscriber
11 | */
12 | const run = async subscriber => {
13 | while (true) {
14 | try {
15 | const ms = await subscriber.client.getMessages(array.from(subscriber.subs.entries()).map(([stream, s]) => ({ key: stream, id: s.id })))
16 | for (let i = 0; i < ms.length; i++) {
17 | const m = ms[i]
18 | const sub = subscriber.subs.get(m.stream)
19 | if (sub == null) continue
20 | sub.id = m.lastId
21 | if (sub.nextId != null) {
22 | sub.id = sub.nextId
23 | sub.nextId = null
24 | }
25 | sub.fs.forEach(f => f(m.stream, m.messages))
26 | }
27 | } catch (e) {
28 | console.error(e)
29 | }
30 | }
31 | }
32 |
33 | /**
34 | * @param {import('./storage.js').AbstractStorage} store
35 | * @param {string} redisPrefix
36 | */
37 | export const createSubscriber = async (store, redisPrefix) => {
38 | const client = await api.createApiClient(store, redisPrefix)
39 | return new Subscriber(client)
40 | }
41 |
42 | export class Subscriber {
43 | /**
44 | * @param {api.Api} client
45 | */
46 | constructor (client) {
47 | this.client = client
48 | /**
49 | * @type {Map,id:string,nextId:string?}>}
50 | */
51 | this.subs = new Map()
52 | run(this)
53 | }
54 |
55 | /**
56 | * @param {string} stream
57 | * @param {string} id
58 | */
59 | ensureSubId (stream, id) {
60 | const sub = this.subs.get(stream)
61 | if (sub != null && api.isSmallerRedisId(id, sub.id)) {
62 | sub.nextId = id
63 | }
64 | }
65 |
66 | /**
67 | * @param {string} stream
68 | * @param {SubHandler} f
69 | */
70 | subscribe (stream, f) {
71 | const sub = map.setIfUndefined(this.subs, stream, () => ({ fs: new Set(), id: '0', nextId: null }))
72 | sub.fs.add(f)
73 | return {
74 | redisId: sub.id
75 | }
76 | }
77 |
78 | /**
79 | * @param {string} stream
80 | * @param {SubHandler} f
81 | */
82 | unsubscribe (stream, f) {
83 | const sub = this.subs.get(stream)
84 | if (sub) {
85 | sub.fs.delete(f)
86 | if (sub.fs.size === 0) {
87 | this.subs.delete(stream)
88 | }
89 | }
90 | }
91 |
92 | destroy () {
93 | this.client.destroy()
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/demos/auth-express/demo.js:
--------------------------------------------------------------------------------
1 | /* eslint-env browser */
2 |
3 | import * as Y from 'yjs'
4 | // @ts-ignore
5 | import { yCollab, yUndoManagerKeymap } from 'y-codemirror.next'
6 | import { WebsocketProvider } from 'y-websocket'
7 |
8 | import { EditorView, basicSetup } from 'codemirror'
9 | import { keymap } from '@codemirror/view'
10 | import { javascript } from '@codemirror/lang-javascript'
11 |
12 | import * as random from 'lib0/random'
13 | import { EditorState } from '@codemirror/state'
14 |
15 | export const usercolors = [
16 | { color: '#30bced', light: '#30bced33' },
17 | { color: '#6eeb83', light: '#6eeb8333' },
18 | { color: '#ffbc42', light: '#ffbc4233' },
19 | { color: '#ecd444', light: '#ecd44433' },
20 | { color: '#ee6352', light: '#ee635233' },
21 | { color: '#9ac2c9', light: '#9ac2c933' },
22 | { color: '#8acb88', light: '#8acb8833' },
23 | { color: '#1be7ff', light: '#1be7ff33' }
24 | ]
25 |
26 | export const userColor = usercolors[random.uint32() % usercolors.length]
27 |
28 | const room = 'y-redis-demo-app'
29 |
30 | // request an auth token before trying to connect
31 | const authToken = await fetch(`http://${location.host}/auth/token`).then(request => request.text())
32 |
33 | const ydoc = new Y.Doc()
34 | const provider = new WebsocketProvider('ws://localhost:3002', room, ydoc, { params: { yauth: authToken } })
35 |
36 | // The auth token expires eventually (by default in one hour)
37 | // Periodically pull a new auth token (e.g. every 30 minutes) and update the auth parameter
38 | const _updateAuthToken = async () => {
39 | try {
40 | provider.params.yauth = await fetch(`http://${location.host}/auth/token`).then(request => request.text())
41 | } catch (e) {
42 | setTimeout(_updateAuthToken, 1000) // in case of an error, retry in a second
43 | return
44 | }
45 | setTimeout(_updateAuthToken, 30 * 60 * 60 * 1000) // send a new request in 30 minutes
46 | }
47 | _updateAuthToken()
48 |
49 | const ytext = ydoc.getText('codemirror')
50 |
51 | provider.awareness.setLocalStateField('user', {
52 | name: 'Anonymous ' + Math.floor(Math.random() * 100),
53 | color: userColor.color,
54 | colorLight: userColor.light
55 | })
56 |
57 | const state = EditorState.create({
58 | doc: ytext.toString(),
59 | extensions: [
60 | keymap.of([
61 | ...yUndoManagerKeymap
62 | ]),
63 | basicSetup,
64 | javascript(),
65 | EditorView.lineWrapping,
66 | yCollab(ytext, provider.awareness)
67 | // oneDark
68 | ]
69 | })
70 |
71 | const view = new EditorView({ state, parent: /** @type {HTMLElement} */ (document.querySelector('#editor')) })
72 |
73 | // @ts-ignore
74 | window.example = { provider, ydoc, ytext, view }
75 |
--------------------------------------------------------------------------------
/src/storage/memory.js:
--------------------------------------------------------------------------------
1 | import * as Y from 'yjs'
2 | import * as map from 'lib0/map'
3 | import * as array from 'lib0/array'
4 | import * as random from 'lib0/random'
5 | import * as promise from 'lib0/promise'
6 |
7 | /**
8 | * @typedef {import('../storage.js').AbstractStorage} AbstractStorage
9 | */
10 |
11 | /**
12 | * @typedef {Object} MemoryStorageOpts
13 | */
14 |
15 | /**
16 | * @param {MemoryStorageOpts} opts
17 | */
18 | export const createMemoryStorage = (opts = {}) => new MemoryStorage(opts)
19 |
20 | /**
21 | * A helper Storage implementation for testing when only using one server. For production use
22 | * Postgres or something persistent that other clients can also read.
23 | *
24 | * @implements {AbstractStorage}
25 | */
26 | export class MemoryStorage {
27 | /**
28 | * @param {MemoryStorageOpts} _opts
29 | */
30 | constructor (_opts) {
31 | /**
32 | * path := room.docid.referenceid
33 | * @type {Map>>}
34 | */
35 | this.docs = new Map()
36 | }
37 |
38 | /**
39 | * @param {string} room
40 | * @param {string} docname
41 | * @param {Y.Doc} ydoc
42 | * @returns {Promise}
43 | */
44 | persistDoc (room, docname, ydoc) {
45 | map.setIfUndefined(
46 | map.setIfUndefined(this.docs, room, map.create),
47 | docname,
48 | map.create
49 | ).set(random.uuidv4(), Y.encodeStateAsUpdateV2(ydoc))
50 | return promise.resolve()
51 | }
52 |
53 | /**
54 | * @param {string} room
55 | * @param {string} docname
56 | * @return {Promise<{ doc: Uint8Array, references: Array } | null>}
57 | */
58 | async retrieveDoc (room, docname) {
59 | const refs = this.docs.get(room)?.get(docname)
60 | return promise.resolveWith((refs == null || refs.size === 0) ? null : { doc: Y.mergeUpdatesV2(array.from(refs.values())), references: array.from(refs.keys()) })
61 | }
62 |
63 | /**
64 | * This can be implemented by the storage provider for better efficiency. The state vector must be
65 | * updated when persistDoc is called. Otherwise, we pull the ydoc and compute the state vector.
66 | *
67 | * @param {string} room
68 | * @param {string} docname
69 | * @return {Promise}
70 | */
71 | async retrieveStateVector (room, docname) {
72 | const r = await this.retrieveDoc(room, docname)
73 | return r ? Y.encodeStateVectorFromUpdateV2(r.doc) : null
74 | }
75 |
76 | /**
77 | * @param {string} room
78 | * @param {string} docname
79 | * @param {Array} storeReferences
80 | * @return {Promise}
81 | */
82 | deleteReferences (room, docname, storeReferences) {
83 | storeReferences.forEach(r => {
84 | this.docs.get(room)?.get(docname)?.delete(r)
85 | })
86 | return promise.resolve()
87 | }
88 |
89 | async destroy () {
90 | }
91 | }
92 |
--------------------------------------------------------------------------------
/demos/blocksuite/client/ui.js:
--------------------------------------------------------------------------------
1 | // eslint-disable-next-line no-unused-vars
2 | import { Slot } from '@blocksuite/store'
3 | import { api } from './api.js'
4 | import { collection, createDoc, editor, emptyDoc, loadDoc } from './editor.js'
5 | import { getCurrentRoom, setRoom } from './route.js'
6 | import { sync } from './sync.js'
7 |
8 | /** @type HTMLSelectElement */ // @ts-ignore
9 | const docListElement = document.getElementById('doc-list')
10 | const addDocBtn = document.getElementById('add-doc')
11 | const deleteDocBtn = document.getElementById('delete-doc')
12 |
13 | /** @param {string} id */
14 | export async function resetDocList (id = '') {
15 | const docList = await api.getDocMetaList()
16 |
17 | if (docList.length > 0) {
18 | docListElement.innerHTML = ''
19 | docList.forEach((doc) => {
20 | const option = document.createElement('option')
21 | option.value = doc.id
22 | option.textContent = doc.title || 'Untitled'
23 | docListElement.appendChild(option)
24 | })
25 | if (id) docListElement.value = id
26 | } else {
27 | docListElement.innerHTML = ''
28 | }
29 | }
30 |
31 | /** @param {string} id @param {string} title */
32 | function updateDocList (id, title) {
33 | const option = docListElement.querySelector(`option[value="${id}"]`)
34 | if (!option) return
35 | option.textContent = title
36 | }
37 |
38 | async function addDoc () {
39 | const { id } = await api.addDocMeta()
40 | createDoc(id)
41 | await resetDocList(id)
42 | docListElement.selectedIndex = Array.from(docListElement.options).findIndex(o => o.value === id)
43 | switchDoc()
44 | }
45 |
46 | async function deleteDoc () {
47 | const currentDocId = docListElement.value
48 | if (!currentDocId) return
49 | await api.deleteDocMeta(currentDocId)
50 | await resetDocList()
51 | docListElement.selectedIndex = 0
52 | switchDoc()
53 | }
54 |
55 | async function updateDocTitle () {
56 | const currentDocId = docListElement.value
57 | if (!currentDocId) return
58 | const title = collection.getDoc(currentDocId)?.meta?.title ?? ''
59 | await api.updateDocMeta(currentDocId, title)
60 | updateDocList(currentDocId, title)
61 | }
62 |
63 | function switchDoc (id = docListElement.value) {
64 | let doc = collection.getDoc(id)
65 | if (!id) {
66 | editor.doc = emptyDoc
67 | setRoom('')
68 | } else {
69 | if (!doc) doc = loadDoc(id)
70 | setRoom(id)
71 | sync(doc)
72 | }
73 | }
74 |
75 | /** @param {{onDocUpdated: Slot}} editorSlots */
76 | export function initUI (editorSlots) {
77 | addDocBtn && addDocBtn.addEventListener('click', addDoc)
78 | deleteDocBtn && deleteDocBtn.addEventListener('click', deleteDoc)
79 |
80 | docListElement.addEventListener('change', () => switchDoc())
81 | window.addEventListener('popstate', () => switchDoc(getCurrentRoom()))
82 |
83 | editorSlots.onDocUpdated.on(() => updateDocTitle())
84 | }
85 |
--------------------------------------------------------------------------------
/tests/api.tests.js:
--------------------------------------------------------------------------------
1 | import * as Y from 'yjs'
2 | import * as t from 'lib0/testing'
3 | import * as api from '../src/api.js'
4 | import * as encoding from 'lib0/encoding'
5 | import * as promise from 'lib0/promise'
6 | import * as redis from 'redis'
7 | import { prevClients, store } from './utils.js'
8 |
9 | const redisPrefix = 'ytests'
10 |
11 | /**
12 | * @param {t.TestCase} tc
13 | */
14 | const createTestCase = async tc => {
15 | await promise.all(prevClients.map(c => c.destroy()))
16 | prevClients.length = 0
17 | const redisClient = redis.createClient({ url: api.redisUrl })
18 | await redisClient.connect()
19 | // flush existing content
20 | const keysToDelete = await redisClient.keys(redisPrefix + ':*')
21 | keysToDelete.length > 0 && await redisClient.del(keysToDelete)
22 | await redisClient.quit()
23 | const client = await api.createApiClient(store, redisPrefix)
24 | prevClients.push(client)
25 | const room = tc.testName
26 | const docid = 'main'
27 | const stream = api.computeRedisRoomStreamName(room, docid, redisPrefix)
28 | const ydoc = new Y.Doc()
29 | ydoc.on('update', update => {
30 | const m = encoding.encode(encoder => {
31 | encoding.writeVarUint(encoder, 0) // sync protocol
32 | encoding.writeVarUint(encoder, 2) // update message
33 | encoding.writeVarUint8Array(encoder, update)
34 | })
35 | client.addMessage(room, docid, Buffer.from(m))
36 | })
37 | return {
38 | client,
39 | ydoc,
40 | room,
41 | docid,
42 | stream
43 | }
44 | }
45 |
46 | const createWorker = async () => {
47 | const worker = await api.createWorker(store, redisPrefix, {})
48 | worker.client.redisMinMessageLifetime = 10000
49 | worker.client.redisTaskDebounce = 5000
50 | prevClients.push(worker.client)
51 | return worker
52 | }
53 |
54 | /**
55 | * @param {t.TestCase} tc
56 | */
57 | export const testUpdateApiMessages = async tc => {
58 | const { client, ydoc, room, docid } = await createTestCase(tc)
59 | ydoc.getMap().set('key1', 'val1')
60 | ydoc.getMap().set('key2', 'val2')
61 | const { ydoc: loadedDoc } = await client.getDoc(room, docid)
62 | t.compare(loadedDoc.getMap().get('key1'), 'val1')
63 | t.compare(loadedDoc.getMap().get('key2'), 'val2')
64 | }
65 |
66 | /**
67 | * @param {t.TestCase} tc
68 | */
69 | export const testWorker = async tc => {
70 | const { client, ydoc, stream, room, docid } = await createTestCase(tc)
71 | await createWorker()
72 | ydoc.getMap().set('key1', 'val1')
73 | ydoc.getMap().set('key2', 'val2')
74 | let streamexists = true
75 | while (streamexists) {
76 | streamexists = (await client.redis.exists(stream)) === 1
77 | }
78 | const { ydoc: loadedDoc } = await client.getDoc(room, docid)
79 | t.assert(loadedDoc.getMap().get('key1') === 'val1')
80 | t.assert(loadedDoc.getMap().get('key2') === 'val2')
81 | let workertasksEmpty = false
82 | while (!workertasksEmpty) {
83 | workertasksEmpty = await client.redis.xLen(client.redisWorkerStreamName) === 0
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@y/redis",
3 | "version": "0.1.6",
4 | "description": "Scalable websocket provider for Yjs using redis",
5 | "sideEffects": false,
6 | "type": "module",
7 | "funding": {
8 | "type": "GitHub Sponsors ❤",
9 | "url": "https://github.com/sponsors/dmonad"
10 | },
11 | "scripts": {
12 | "dev": "concurrently -r 'npm run start:server' 'npm run start:worker' 'npm run minio'",
13 | "start:server": "node --env-file .env ./bin/server.js",
14 | "debug:server": "node --env-file .env --inspect-brk ./bin/worker.js ./bin/server.js",
15 | "start:worker": "node --env-file .env ./bin/worker.js",
16 | "debug:worker": "node --env-file .env --inspect-brk ./bin/worker.js",
17 | "minio": "docker run -p 9000:9000 -p 9001:9001 quay.io/minio/minio server /data --console-address \":9001\"",
18 | "redis": "docker run -p 6379:6379 redis",
19 | "dist": "tsc",
20 | "lint": "standard && tsc",
21 | "test": "node --env-file .env tests/index.js",
22 | "test-inspect": "node --env-file .env --inspect-brk tests/index.js",
23 | "preversion": "npm run lint && npm run dist",
24 | "clean": "rm -rf **/dist "
25 | },
26 | "bin": {
27 | "y-redis-server": "./bin/server.js",
28 | "y-redis-worker": "./bin/worker.js"
29 | },
30 | "files": [
31 | "dist/*",
32 | "bin/*",
33 | "src/*"
34 | ],
35 | "exports": {
36 | "./package.json": "./package.json",
37 | ".": {
38 | "default": "./src/index.js",
39 | "types": "./dist/src/index.d.ts"
40 | },
41 | "./storage/memory": {
42 | "default": "./src/storage/memory.js",
43 | "types": "./dist/src/storage/memory.d.ts"
44 | },
45 | "./storage/s3": {
46 | "default": "./src/storage/s3.js",
47 | "types": "./dist/src/storage/s3.d.ts"
48 | },
49 | "./storage/postgres": {
50 | "default": "./src/storage/postgres.js",
51 | "types": "./dist/src/storage/postgres.d.ts"
52 | }
53 | },
54 | "repository": {
55 | "type": "git",
56 | "url": "git+https://github.com/yjs/y-redis.git"
57 | },
58 | "keywords": [
59 | "Yjs"
60 | ],
61 | "author": "Kevin Jahns ",
62 | "license": "AGPL-3.0 OR PROPRIETARY",
63 | "bugs": {
64 | "url": "https://github.com/yjs/y-redis/issues"
65 | },
66 | "homepage": "https://github.com/yjs/y-redis#readme",
67 | "standard": {
68 | "ignore": [
69 | "/dist",
70 | "/node_modules"
71 | ]
72 | },
73 | "dependencies": {
74 | "lib0": "^0.2.93",
75 | "redis": "^4.6.12",
76 | "uws": "github:uNetworking/uWebSockets.js#v20.40.0",
77 | "yjs": "^13.5.6"
78 | },
79 | "optionalDependencies": {
80 | "postgres": "^3.4.3",
81 | "minio": "^7.1.3"
82 | },
83 | "engines": {
84 | "npm": ">=8.0.0",
85 | "node": ">=20.0.0"
86 | },
87 | "devDependencies": {
88 | "@types/node": "^20.11.5",
89 | "@types/ws": "^8.5.10",
90 | "concurrently": "^8.2.2",
91 | "standard": "^17.1.0",
92 | "typescript": "^5.3.3",
93 | "ws": "^8.16.0",
94 | "y-websocket": "^2.0.4"
95 | }
96 | }
97 |
--------------------------------------------------------------------------------
/src/server.js:
--------------------------------------------------------------------------------
1 | import * as uws from 'uws'
2 | import * as env from 'lib0/environment'
3 | import * as logging from 'lib0/logging'
4 | import * as error from 'lib0/error'
5 | import * as jwt from 'lib0/crypto/jwt'
6 | import * as ecdsa from 'lib0/crypto/ecdsa'
7 | import * as json from 'lib0/json'
8 | import { registerYWebsocketServer } from '../src/ws.js'
9 | import * as promise from 'lib0/promise'
10 |
11 | const wsServerPublicKey = await ecdsa.importKeyJwk(json.parse(env.ensureConf('auth-public-key')))
12 | // const wsServerPrivateKey = await ecdsa.importKeyJwk(json.parse(env.ensureConf('auth-private-key')))
13 |
14 | class YWebsocketServer {
15 | /**
16 | * @param {uws.TemplatedApp} app
17 | */
18 | constructor (app) {
19 | this.app = app
20 | }
21 |
22 | async destroy () {
23 | this.app.close()
24 | }
25 | }
26 |
27 | /**
28 | * @param {Object} opts
29 | * @param {number} opts.port
30 | * @param {import('./storage.js').AbstractStorage} opts.store
31 | * @param {string} [opts.redisPrefix]
32 | * @param {string} opts.checkPermCallbackUrl
33 | * @param {(room:string,docname:string,client:import('./api.js').Api)=>void} [opts.initDocCallback] -
34 | * this is called when a doc is accessed, but it doesn't exist. You could populate the doc here.
35 | * However, this function could be called several times, until some content exists. So you need to
36 | * handle concurrent calls.
37 | */
38 | export const createYWebsocketServer = async ({
39 | redisPrefix = 'y',
40 | port,
41 | store,
42 | checkPermCallbackUrl,
43 | initDocCallback = () => {}
44 | }) => {
45 | checkPermCallbackUrl += checkPermCallbackUrl.slice(-1) !== '/' ? '/' : ''
46 | const app = uws.App({})
47 | await registerYWebsocketServer(app, '/:room', store, async (req) => {
48 | const room = req.getParameter(0)
49 | const headerWsProtocol = req.getHeader('sec-websocket-protocol')
50 | const [, , token] = /(^|,)yauth-(((?!,).)*)/.exec(headerWsProtocol) ?? [null, null, req.getQuery('yauth')]
51 | if (token == null) {
52 | throw new Error('Missing Token')
53 | }
54 | // verify that the user has a valid token
55 | const { payload: userToken } = await jwt.verifyJwt(wsServerPublicKey, token)
56 | if (userToken.yuserid == null) {
57 | throw new Error('Missing userid in user token!')
58 | }
59 | const permUrl = new URL(`${room}/${userToken.yuserid}`, checkPermCallbackUrl)
60 | try {
61 | const perm = await fetch(permUrl).then(req => req.json())
62 | return { hasWriteAccess: perm.yaccess === 'rw', room, userid: perm.yuserid || '' }
63 | } catch (e) {
64 | console.error('Failed to pull permissions from', { permUrl })
65 | throw e
66 | }
67 | }, { redisPrefix, initDocCallback })
68 |
69 | await promise.create((resolve, reject) => {
70 | app.listen(port, (token) => {
71 | if (token) {
72 | logging.print(logging.GREEN, '[y-redis] Listening to port ', port)
73 | resolve()
74 | } else {
75 | const err = error.create('[y-redis] Failed to lisen to port ' + port)
76 | reject(err)
77 | throw err
78 | }
79 | })
80 | })
81 | return new YWebsocketServer(app)
82 | }
83 |
--------------------------------------------------------------------------------
/bin/auth-server-example.js:
--------------------------------------------------------------------------------
1 | import * as uws from 'uws'
2 | import * as jwt from 'lib0/crypto/jwt'
3 | import * as ecdsa from 'lib0/crypto/ecdsa'
4 | import * as json from 'lib0/json'
5 | import * as time from 'lib0/time'
6 | import * as env from 'lib0/environment'
7 | import * as logging from 'lib0/logging'
8 | import * as error from 'lib0/error'
9 | import * as promise from 'lib0/promise'
10 | import * as encoding from 'lib0/encoding'
11 | import * as Y from 'yjs'
12 |
13 | const appName = 'Auth-Server-Example'
14 | const authPrivateKey = await ecdsa.importKeyJwk(json.parse(env.ensureConf('auth-private-key')))
15 | const port = 5173
16 |
17 | const app = uws.App({})
18 |
19 | app.put('/ydoc/:room', async (res, req) => {
20 | let aborted = false
21 | res.onAborted(() => {
22 | aborted = true
23 | })
24 | const room = req.getParameter(0)
25 | const header = req.getHeader('content-type')
26 | // this "encoder" will accumulate the received data until all data arrived
27 | const contentEncoder = encoding.createEncoder()
28 | res.onData((chunk, isLast) => {
29 | encoding.writeUint8Array(contentEncoder, new Uint8Array(chunk))
30 | if (isLast && !aborted) {
31 | const fullContent = encoding.toUint8Array(contentEncoder)
32 | const parts = uws.getParts(fullContent, header)
33 | const ydocUpdateData = parts?.find(part => part.name === 'ydoc')?.data
34 | if (ydocUpdateData == null) {
35 | console.error('Received empty data')
36 | return
37 | }
38 | const ydocUpdate = new Uint8Array(ydocUpdateData)
39 | const ydoc = new Y.Doc()
40 | Y.applyUpdateV2(ydoc, ydocUpdate)
41 | console.log(`Ydoc in room "${room}" updated. New codemirror content: "${ydoc.getText('codemirror')}"`)
42 | res.endWithoutBody()
43 | }
44 | })
45 | })
46 |
47 | // This example server always grants read-write permission to all requests.
48 | // Modify it to your own needs or implement the same API in your own backend!
49 | app.get('/auth/token', async (res, _req) => {
50 | let aborted = false
51 | res.onAborted(() => {
52 | aborted = true
53 | })
54 | const token = await jwt.encodeJwt(authPrivateKey, {
55 | iss: appName,
56 | exp: time.getUnixTime() + 60 * 60 * 1000, // token expires in one hour
57 | yuserid: 'user1'
58 | })
59 | if (aborted) return
60 | res.cork(() => {
61 | res.end(token)
62 | })
63 | })
64 |
65 | app.get('/auth/perm/:room/:userid', async (res, req) => {
66 | const yroom = req.getParameter(0)
67 | const yuserid = req.getParameter(1)
68 | res.end(json.stringify({
69 | yroom,
70 | yaccess: 'rw',
71 | yuserid
72 | }))
73 | })
74 |
75 | /**
76 | * Resolves when the server started.
77 | */
78 | export const authServerStarted = promise.create((resolve, reject) => {
79 | const server = app.listen(port, (token) => {
80 | if (token) {
81 | logging.print(logging.GREEN, `[${appName}] Listening to port ${port}`)
82 | resolve()
83 | } else {
84 | const err = error.create(`[${appName}] Failed to lisen to port ${port}`)
85 | reject(err)
86 | throw err
87 | }
88 | })
89 |
90 | // Gracefully shut down the server when running in Docker
91 | process.on('SIGTERM', shutDown)
92 | process.on('SIGINT', shutDown)
93 |
94 | function shutDown () {
95 | console.log('Received SIGTERM/SIGINT - shutting down')
96 | server.close()
97 | process.exit(0)
98 | }
99 | })
100 |
--------------------------------------------------------------------------------
/tests/storage.tests.js:
--------------------------------------------------------------------------------
1 | import * as t from 'lib0/testing'
2 | import { createPostgresStorage } from '../src/storage/postgres.js'
3 | import { createMemoryStorage } from '../src/storage/memory.js'
4 | import * as Y from 'yjs'
5 | import { createS3Storage } from '../src/storage/s3.js'
6 | import * as env from 'lib0/environment'
7 |
8 | const s3TestBucketName = 'yredis-tests'
9 |
10 | /**
11 | * @param {t.TestCase} _tc
12 | */
13 | export const testStorages = async _tc => {
14 | const s3 = createS3Storage(s3TestBucketName)
15 | try {
16 | // make sure the bucket exists
17 | await s3.client.makeBucket(s3TestBucketName)
18 | } catch (e) {}
19 | try {
20 | const files = await s3.client.listObjectsV2(s3TestBucketName, '', true).toArray()
21 | await s3.client.removeObjects(s3TestBucketName, files.map(file => file.name))
22 | } catch (e) {}
23 | const postgres = await createPostgresStorage({ database: env.ensureConf('postgres-testdb') })
24 | await postgres.sql`DELETE from yredis_docs_v1`
25 | const memory = createMemoryStorage()
26 |
27 | /**
28 | * @type {Object}
29 | */
30 | const storages = { s3, postgres, memory }
31 | for (const storageName in storages) {
32 | const storage = storages[storageName]
33 | await t.groupAsync(`storage: ${storageName}`, async () => {
34 | {
35 | t.info('persisting docs')
36 | // index doc for baseline
37 | const ydoc1 = new Y.Doc()
38 | ydoc1.getMap().set('a', 1)
39 | await storage.persistDoc('room', 'index', ydoc1)
40 | const sv1 = await storage.retrieveStateVector('room', 'index')
41 | t.assert(sv1)
42 | t.compare(new Uint8Array(sv1), Y.encodeStateVector(ydoc1), 'state vectors match')
43 | // second doc with different changes under the same index key
44 | const ydoc2 = new Y.Doc()
45 | ydoc2.getMap().set('b', 1)
46 | await storage.persistDoc('room', 'index', ydoc2)
47 | // third doc that will be stored under a different key
48 | const ydoc3 = new Y.Doc()
49 | ydoc3.getMap().set('a', 2)
50 | await storage.persistDoc('room', 'doc3', ydoc3)
51 | const sv2 = await storage.retrieveStateVector('room', 'doc3')
52 | t.assert(sv2)
53 | t.compare(new Uint8Array(sv2), Y.encodeStateVector(ydoc3), 'state vectors match')
54 | }
55 | {
56 | t.info('retrieving docs')
57 | const r1 = await storage.retrieveDoc('room', 'index')
58 | t.assert(r1)
59 | t.assert(r1.references.length === 2) // we stored two different versions that should be merged now
60 | const doc1 = new Y.Doc()
61 | Y.applyUpdateV2(doc1, r1.doc)
62 | // should have merged both changes..
63 | t.assert(doc1.getMap().get('a') === 1 && doc1.getMap().get('b') === 1)
64 | // retrieve other doc..
65 | const doc3 = new Y.Doc()
66 | const r3 = await storage.retrieveDoc('room', 'doc3')
67 | t.assert(r3)
68 | t.assert(r3.references.length === 1)
69 | Y.applyUpdateV2(doc3, r3.doc)
70 | t.assert(doc3.getMap().get('a') === 2)
71 | t.info('delete references')
72 | await storage.deleteReferences('room', 'index', [r1.references[0]])
73 | const r1v2 = await storage.retrieveDoc('room', 'index')
74 | t.assert(r1v2 && r1v2.references.length === 1)
75 | await storage.deleteReferences('room', 'index', [r1.references[1]])
76 | const r1v3 = await storage.retrieveDoc('room', 'index')
77 | t.assert(r1v3 == null)
78 | }
79 | {
80 | const sv = await storage.retrieveStateVector('nonexistend', 'nonexistend')
81 | t.assert(sv === null)
82 | }
83 | await storage.destroy()
84 | })
85 | }
86 | }
87 |
--------------------------------------------------------------------------------
/demos/auth-express/server.js:
--------------------------------------------------------------------------------
1 | import * as Y from 'yjs'
2 | import express from 'express'
3 | import formidable from 'formidable'
4 | import * as jwt from 'lib0/crypto/jwt'
5 | import * as time from 'lib0/time'
6 | import * as ecdsa from 'lib0/crypto/ecdsa'
7 | import * as env from 'lib0/environment'
8 | import * as fs from 'fs/promises'
9 | import * as promise from 'lib0/promise'
10 |
11 | const app = express()
12 | const port = 5173
13 |
14 | // Read the AUTH_PRIVATE_KEY environment variable and import the JWK
15 | export const authPrivateKey = await ecdsa.importKeyJwk(JSON.parse(env.ensureConf('auth-private-key')))
16 | // Read the AUTH_PUBLIC_KEY environment variable and import the JWK
17 | export const authPublicKey = await ecdsa.importKeyJwk(JSON.parse(env.ensureConf('auth-public-key')))
18 |
19 | const appName = 'my-express-app'
20 |
21 | // This endpoint is called in regular intervals when the document changes.
22 | // The request contains a multi-part formdata field that can be read, for example, with formidable:
23 | app.put('/ydoc/:room', async (req, res, next) => {
24 | const room = req.params.room
25 | const ydocUpdate = await promise.create((resolve, reject) => {
26 | const form = formidable({})
27 | form.parse(req, (err, _fields, files) => {
28 | if (err) {
29 | next(err)
30 | reject(err)
31 | return
32 | }
33 | if (files.ydoc) {
34 | // formidable writes the data to a file by default. This might be a good idea for your
35 | // application. Check the documentation to find a non-temporary location for the read file.
36 | // You should probably delete it if it is no longer being used.
37 | const file = files.ydoc[0]
38 | // we are just going to log the content and delete the temporary file
39 | fs.readFile(file.filepath).then(resolve, reject)
40 | }
41 | })
42 | })
43 | const ydoc = new Y.Doc()
44 | Y.applyUpdateV2(ydoc, ydocUpdate)
45 | console.log(`codemirror content in room "${room}" updated: "${ydoc.getText('codemirror').toString().replaceAll('\n', '\\n')}"`)
46 | res.sendStatus(200)
47 | })
48 |
49 | // This example server always grants read-write permission to all requests.
50 | // Modify it to your own needs or implement the same API in your own backend!
51 | app.get('/auth/token', async (_req, res) => {
52 | const token = await jwt.encodeJwt(authPrivateKey, {
53 | iss: appName,
54 | exp: time.getUnixTime() + 60 * 60 * 1000, // token expires in one hour
55 | yuserid: 'user1' // associate the client with a unique id that can will be used to check permissions
56 | })
57 | res.send(token)
58 | })
59 |
60 | // This api is called to check whether a specific user (identified by the unique "yuserid") has
61 | // access to a specific room. This rest endpoint is called by the yredis server, not the client.
62 | app.get('/auth/perm/:room/:userid', async (req, res) => {
63 | const yroom = req.params.room
64 | const yuserid = req.params.userid
65 | // This sample-server always grants full acess
66 | res.send(JSON.stringify({
67 | yroom,
68 | yaccess: 'rw', // alternatively, specify "read-only" or "no-access"
69 | yuserid
70 | }))
71 | })
72 |
73 | // serve static files
74 | app.use(express.static('./'))
75 |
76 | const server = app.listen(port, () => {
77 | console.log(`Express Demo Auth server listening on port ${port}`)
78 | })
79 |
80 | // Gracefully shut down the server when running in Docker
81 | process.on('SIGTERM', shutDown)
82 | process.on('SIGINT', shutDown)
83 |
84 | function shutDown () {
85 | console.log('Received SIGTERM/SIGINT - shutting down gracefully')
86 | server.close(() => {
87 | console.log('Closed out remaining connections - shutting down')
88 | process.exit(0)
89 | })
90 | setTimeout(() => {
91 | console.error("Couldn't close connections - forcefully shutting down")
92 | process.exit(1)
93 | }, 10000)
94 | }
95 |
--------------------------------------------------------------------------------
/src/storage/postgres.js:
--------------------------------------------------------------------------------
1 | import * as Y from 'yjs'
2 | import postgres from 'postgres'
3 | import * as error from 'lib0/error'
4 | import * as env from 'lib0/environment'
5 |
6 | /**
7 | * @typedef {import('../storage.js').AbstractStorage} AbstractStorage
8 | */
9 |
10 | /**
11 | * @param {Object} [conf]
12 | * @param {string} [conf.database]
13 | */
14 | export const createPostgresStorage = async ({ database } = {}) => {
15 | // postgres://username:password@host:port/database
16 | const postgresUrl = env.ensureConf('postgres')
17 | const postgresConf = {}
18 | if (database) {
19 | postgresConf.database = database
20 | }
21 | const sql = postgres(postgresUrl, { database })
22 | const docsTableExists = await sql`
23 | SELECT EXISTS (
24 | SELECT FROM
25 | pg_tables
26 | WHERE
27 | tablename = 'yredis_docs_v1'
28 | );
29 | `
30 | // we perform a check beforehand to avoid a pesky log message if the table already exists
31 | if (!docsTableExists || docsTableExists.length === 0 || !docsTableExists[0].exists) {
32 | await sql`
33 | CREATE TABLE IF NOT EXISTS yredis_docs_v1 (
34 | room text,
35 | doc text,
36 | r SERIAL,
37 | update bytea,
38 | sv bytea,
39 | PRIMARY KEY (room,doc,r)
40 | );
41 | `
42 | }
43 | return new PostgresStorage(sql)
44 | }
45 |
46 | /**
47 | * A Storage implementation that persists documents in PostgreSQL.
48 | *
49 | * You probably want to adapt this to your own needs.
50 | *
51 | * @implements AbstractStorage
52 | */
53 | class PostgresStorage {
54 | /**
55 | * @param {postgres.Sql} sql
56 | */
57 | constructor (sql) {
58 | this.sql = sql
59 | }
60 |
61 | /**
62 | * @param {string} room
63 | * @param {string} docname
64 | * @param {Y.Doc} ydoc
65 | * @returns {Promise}
66 | */
67 | async persistDoc (room, docname, ydoc) {
68 | await this.sql`
69 | INSERT INTO yredis_docs_v1 (room,doc,r,update, sv)
70 | VALUES (${room},${docname},DEFAULT,${Y.encodeStateAsUpdateV2(ydoc)},${Y.encodeStateVector(ydoc)})
71 | `
72 | }
73 |
74 | /**
75 | * @param {string} room
76 | * @param {string} docname
77 | * @return {Promise<{ doc: Uint8Array, references: Array } | null>}
78 | */
79 | async retrieveDoc (room, docname) {
80 | /**
81 | * @type {Array<{ room: string, doc: string, r: number, update: Buffer }>}
82 | */
83 | const rows = await this.sql`SELECT update,r from yredis_docs_v1 WHERE room = ${room} AND doc = ${docname}`
84 | if (rows.length === 0) {
85 | return null
86 | }
87 | const doc = Y.mergeUpdatesV2(rows.map(row => row.update))
88 | const references = rows.map(row => row.r)
89 | return { doc, references }
90 | }
91 |
92 | /**
93 | * @param {string} room
94 | * @param {string} docname
95 | * @return {Promise}
96 | */
97 | async retrieveStateVector (room, docname) {
98 | const rows = await this.sql`SELECT sv from yredis_docs_v1 WHERE room = ${room} AND doc = ${docname} LIMIT 1`
99 | if (rows.length > 1) {
100 | // expect that result is limited
101 | error.unexpectedCase()
102 | }
103 | return rows.length === 0 ? null : rows[0].sv
104 | }
105 |
106 | /**
107 | * @param {string} room
108 | * @param {string} docname
109 | * @param {Array} storeReferences
110 | * @return {Promise}
111 | */
112 | async deleteReferences (room, docname, storeReferences) {
113 | await this.sql`DELETE FROM yredis_docs_v1 WHERE room = ${room} AND doc = ${docname} AND r in (${storeReferences})`
114 | }
115 |
116 | async destroy () {
117 | await this.sql.end({ timeout: 5 }) // existing queries have five seconds to finish
118 | }
119 | }
120 |
121 | export const Storage = PostgresStorage
122 |
--------------------------------------------------------------------------------
/src/protocol.js:
--------------------------------------------------------------------------------
1 | import * as Y from 'yjs'
2 | import * as error from 'lib0/error'
3 | import * as encoding from 'lib0/encoding'
4 | import * as decoding from 'lib0/decoding'
5 | import * as array from 'lib0/array'
6 | import * as awarenessProtocol from 'y-protocols/awareness'
7 | import * as buffer from 'lib0/buffer'
8 | import * as logging from 'lib0/logging'
9 |
10 | const log = logging.createModuleLogger('@y/redis/protocol')
11 |
12 | export const messageSync = 0
13 | export const messageAwareness = 1
14 | export const messageAuth = 2
15 | export const messageQueryAwareness = 3
16 |
17 | export const messageSyncStep1 = 0
18 | export const messageSyncStep2 = 1
19 | export const messageSyncUpdate = 2
20 |
21 | /**
22 | * @todo this should emit a single message
23 | *
24 | * Merge messages for easier comsumption by the client.
25 | *
26 | * This is useful, for example, when the server catches messages from a pubsub / stream.
27 | * Before the server sends the messages to the clients, we can merge updates, and filter out older
28 | * awareness messages.
29 | *
30 | * @param {Array} messages
31 | */
32 | export const mergeMessages = messages => {
33 | if (messages.length < 2) {
34 | return messages
35 | }
36 | const aw = new awarenessProtocol.Awareness(new Y.Doc())
37 | /**
38 | * @type {Array}
39 | */
40 | const updates = []
41 | messages.forEach(m => {
42 | const decoder = decoding.createDecoder(m)
43 | try {
44 | const messageType = decoding.readUint8(decoder)
45 | switch (messageType) {
46 | case messageSync: {
47 | const syncType = decoding.readUint8(decoder)
48 | if (syncType === messageSyncUpdate) {
49 | updates.push(decoding.readVarUint8Array(decoder))
50 | } else {
51 | error.unexpectedCase()
52 | }
53 | break
54 | }
55 | case messageAwareness: {
56 | awarenessProtocol.applyAwarenessUpdate(aw, decoding.readVarUint8Array(decoder), null)
57 | break
58 | }
59 | default: {
60 | error.unexpectedCase()
61 | }
62 | }
63 | } catch (e) {
64 | log(logging.RED, 'Error parsing message', buffer.toBase64(m), e)
65 | }
66 | })
67 | /**
68 | * @type {Array}
69 | */
70 | const result = []
71 | updates.length > 0 && result.push(encoding.encode(encoder => {
72 | encoding.writeVarUint(encoder, messageSync)
73 | encoding.writeVarUint(encoder, messageSyncUpdate) // update
74 | encoding.writeVarUint8Array(encoder, Y.mergeUpdates(updates))
75 | }))
76 | aw.states.size > 0 && result.push(encoding.encode(encoder => {
77 | encoding.writeVarUint(encoder, messageAwareness)
78 | encoding.writeVarUint8Array(
79 | encoder,
80 | awarenessProtocol.encodeAwarenessUpdate(
81 | aw,
82 | array.from(aw.getStates().keys())
83 | )
84 | )
85 | }))
86 | return result
87 | }
88 |
89 | /**
90 | * @param {Uint8Array} sv
91 | */
92 | export const encodeSyncStep1 = sv => encoding.encode(encoder => {
93 | encoding.writeVarUint(encoder, messageSync)
94 | encoding.writeVarUint(encoder, messageSyncStep1)
95 | encoding.writeVarUint8Array(encoder, sv)
96 | })
97 |
98 | /**
99 | * @param {Uint8Array} diff
100 | */
101 | export const encodeSyncStep2 = diff => encoding.encode(encoder => {
102 | encoding.writeVarUint(encoder, messageSync)
103 | encoding.writeVarUint(encoder, messageSyncStep2)
104 | encoding.writeVarUint8Array(encoder, diff)
105 | })
106 |
107 | /**
108 | * @param {awarenessProtocol.Awareness} awareness
109 | * @param {Array} clients
110 | */
111 | export const encodeAwarenessUpdate = (awareness, clients) => encoding.encode(encoder => {
112 | encoding.writeVarUint(encoder, messageAwareness)
113 | encoding.writeVarUint8Array(encoder, awarenessProtocol.encodeAwarenessUpdate(awareness, clients))
114 | })
115 |
116 | /**
117 | * @param {number} clientid
118 | * @param {number} lastClock
119 | */
120 | export const encodeAwarenessUserDisconnected = (clientid, lastClock) =>
121 | encoding.encode(encoder => {
122 | encoding.writeVarUint(encoder, messageAwareness)
123 | encoding.writeVarUint8Array(encoder, encoding.encode(encoder => {
124 | encoding.writeVarUint(encoder, 1) // one change
125 | encoding.writeVarUint(encoder, clientid)
126 | encoding.writeVarUint(encoder, lastClock + 1)
127 | encoding.writeVarString(encoder, JSON.stringify(null))
128 | }))
129 | })
130 |
--------------------------------------------------------------------------------
/src/storage/s3.js:
--------------------------------------------------------------------------------
1 | import * as Y from 'yjs'
2 | import * as random from 'lib0/random'
3 | import * as promise from 'lib0/promise'
4 | import * as minio from 'minio'
5 | import * as env from 'lib0/environment'
6 | import * as number from 'lib0/number'
7 | import * as logging from 'lib0/logging'
8 |
9 | const log = logging.createModuleLogger('@y/redis/s3')
10 |
11 | /**
12 | * @typedef {import('../storage.js').AbstractStorage} AbstractStorage
13 | */
14 |
15 | /**
16 | * @todo perform some sanity checks here before starting (bucket exists, ..)
17 | * @param {string} bucketName
18 | */
19 | export const createS3Storage = (bucketName) => {
20 | const endPoint = env.ensureConf('s3-endpoint')
21 | const port = number.parseInt(env.ensureConf('s3-port'))
22 | const useSSL = !['false', '0'].includes(env.getConf('s3-ssl') || 'false')
23 | const accessKey = env.ensureConf('s3-access-key')
24 | const secretKey = env.ensureConf('s3-secret-key')
25 | return new S3Storage(bucketName, {
26 | endPoint,
27 | port,
28 | useSSL,
29 | accessKey,
30 | secretKey
31 | })
32 | }
33 |
34 | /**
35 | * @param {string} room
36 | * @param {string} docid
37 | */
38 | export const encodeS3ObjectName = (room, docid, r = random.uuidv4()) => `${encodeURIComponent(room)}/${encodeURIComponent(docid)}/${r}`
39 |
40 | /**
41 | * @param {string} objectName
42 | */
43 | export const decodeS3ObjectName = objectName => {
44 | const match = objectName.match(/(.*)\/(.*)\/(.*)$/)
45 | if (match == null) {
46 | throw new Error('Malformed y:room stream name!')
47 | }
48 | return { room: decodeURIComponent(match[1]), docid: decodeURIComponent(match[2]), r: match[3] }
49 | }
50 |
51 | /**
52 | * @typedef {Object} S3StorageConf
53 | * @property {string} S3StorageConf.endPoint
54 | * @property {number} S3StorageConf.port
55 | * @property {boolean} S3StorageConf.useSSL
56 | * @property {string} S3StorageConf.accessKey
57 | * @property {string} S3StorageConf.secretKey
58 | */
59 |
60 | /**
61 | * @param {import('stream').Stream} stream
62 | * @return {Promise}
63 | */
64 | const readStream = stream => promise.create((resolve, reject) => {
65 | /**
66 | * @type {Array}
67 | */
68 | const chunks = []
69 | stream.on('data', chunk => chunks.push(Buffer.from(chunk)))
70 | stream.on('error', reject)
71 | stream.on('end', () => resolve(Buffer.concat(chunks)))
72 | })
73 |
74 | /**
75 | * @implements {AbstractStorage}
76 | */
77 | export class S3Storage {
78 | /**
79 | * @param {string} bucketName
80 | * @param {S3StorageConf} conf
81 | */
82 | constructor (bucketName, { endPoint, port, useSSL, accessKey, secretKey }) {
83 | this.bucketName = bucketName
84 | this.client = new minio.Client({
85 | endPoint,
86 | port,
87 | useSSL,
88 | accessKey,
89 | secretKey
90 | })
91 | }
92 |
93 | /**
94 | * @param {string} room
95 | * @param {string} docname
96 | * @param {Y.Doc} ydoc
97 | * @returns {Promise}
98 | */
99 | async persistDoc (room, docname, ydoc) {
100 | const objectName = encodeS3ObjectName(room, docname)
101 | await this.client.putObject(this.bucketName, objectName, Buffer.from(Y.encodeStateAsUpdateV2(ydoc)))
102 | }
103 |
104 | /**
105 | * @param {string} room
106 | * @param {string} docname
107 | * @return {Promise<{ doc: Uint8Array, references: Array } | null>}
108 | */
109 | async retrieveDoc (room, docname) {
110 | log('retrieving doc room=' + room + ' docname=' + docname)
111 | const objNames = await this.client.listObjectsV2(this.bucketName, encodeS3ObjectName(room, docname, ''), true).toArray()
112 | const references = objNames.map(obj => obj.name)
113 | log('retrieved doc room=' + room + ' docname=' + docname + ' refs=' + JSON.stringify(references))
114 |
115 | if (references.length === 0) {
116 | return null
117 | }
118 | let updates = await promise.all(references.map(ref => this.client.getObject(this.bucketName, ref).then(readStream)))
119 | updates = updates.filter(update => update != null)
120 | log('retrieved doc room=' + room + ' docname=' + docname + ' updatesLen=' + updates.length)
121 | return { doc: Y.mergeUpdatesV2(updates), references }
122 | }
123 |
124 | /**
125 | * @param {string} room
126 | * @param {string} docname
127 | * @return {Promise}
128 | */
129 | async retrieveStateVector (room, docname) {
130 | const r = await this.retrieveDoc(room, docname)
131 | return r ? Y.encodeStateVectorFromUpdateV2(r.doc) : null
132 | }
133 |
134 | /**
135 | * @param {string} _room
136 | * @param {string} _docname
137 | * @param {Array} storeReferences
138 | * @return {Promise}
139 | */
140 | async deleteReferences (_room, _docname, storeReferences) {
141 | await this.client.removeObjects(this.bucketName, storeReferences)
142 | }
143 |
144 | async destroy () {
145 | }
146 | }
147 |
--------------------------------------------------------------------------------
/tests/ws.tests.js:
--------------------------------------------------------------------------------
1 | import * as Y from 'yjs'
2 | import * as t from 'lib0/testing'
3 | import * as api from '../src/api.js'
4 | import * as promise from 'lib0/promise'
5 | import { WebSocket } from 'ws'
6 | import { createYWebsocketServer } from '../src/server.js'
7 | import * as array from 'lib0/array'
8 | import { WebsocketProvider } from 'y-websocket'
9 | import * as redis from 'redis'
10 | import * as time from 'lib0/time'
11 | import * as jwt from 'lib0/crypto/jwt'
12 | import * as utils from './utils.js'
13 |
14 | const authToken = await jwt.encodeJwt(utils.authPrivateKey, {
15 | iss: 'my-auth-server',
16 | exp: time.getUnixTime() + 60 * 60 * 1000, // token expires in one hour
17 | yuserid: 'user1' // fill this with a unique id of the authorized user
18 | })
19 |
20 | /**
21 | * @param {t.TestCase} tc
22 | * @param {string} room
23 | */
24 | const createWsClient = (tc, room) => {
25 | const ydoc = new Y.Doc()
26 | const roomPrefix = tc.testName
27 | const provider = new WebsocketProvider(utils.yredisUrl, roomPrefix + '-' + room, ydoc, { WebSocketPolyfill: /** @type {any} */ (WebSocket), disableBc: true, params: {}, protocols: [`yauth-${authToken}`] })
28 | return { ydoc, provider }
29 | }
30 |
31 | const createWorker = async () => {
32 | const worker = await api.createWorker(utils.store, utils.redisPrefix, {})
33 | worker.client.redisMinMessageLifetime = 800
34 | worker.client.redisTaskDebounce = 500
35 | utils.prevClients.push(worker.client)
36 | return worker
37 | }
38 |
39 | const createServer = async () => {
40 | const server = await createYWebsocketServer({ port: utils.yredisPort, store: utils.store, redisPrefix: utils.redisPrefix, checkPermCallbackUrl: utils.checkPermCallbackUrl })
41 | utils.prevClients.push(server)
42 | return server
43 | }
44 |
45 | const createApiClient = async () => {
46 | const client = await api.createApiClient(utils.store, utils.redisPrefix)
47 | utils.prevClients.push(client)
48 | return client
49 | }
50 |
51 | /**
52 | * @param {t.TestCase} tc
53 | */
54 | const createTestCase = async tc => {
55 | await promise.all(utils.prevClients.map(c => c.destroy()))
56 | utils.prevClients.length = 0
57 | const redisClient = redis.createClient({ url: api.redisUrl })
58 | await redisClient.connect()
59 | // flush existing content
60 | const keysToDelete = await redisClient.keys(utils.redisPrefix + ':*')
61 | await redisClient.del(keysToDelete)
62 | utils.prevClients.push({ destroy: () => redisClient.quit().then(() => {}) })
63 | const server = await createServer()
64 | const [apiClient, worker] = await promise.all([createApiClient(), createWorker()])
65 | return {
66 | redisClient,
67 | apiClient,
68 | server,
69 | worker,
70 | createWsClient: /** @param {string} room */ (room) => createWsClient(tc, room)
71 | }
72 | }
73 |
74 | /**
75 | * @param {Y.Doc} ydoc1
76 | * @param {Y.Doc} ydoc2
77 | */
78 | const waitDocsSynced = (ydoc1, ydoc2) => {
79 | console.info('waiting for docs to sync...')
80 | return promise.until(0, () => {
81 | const e1 = Y.encodeStateAsUpdateV2(ydoc1)
82 | const e2 = Y.encodeStateAsUpdateV2(ydoc2)
83 | const isSynced = array.equalFlat(e1, e2)
84 | isSynced && console.info('docs sycned!')
85 | return isSynced
86 | })
87 | }
88 |
89 | /**
90 | * @param {t.TestCase} tc
91 | */
92 | export const testSyncAndCleanup = async tc => {
93 | const { createWsClient, worker, redisClient } = await createTestCase(tc)
94 | const { ydoc: doc1 } = createWsClient('map')
95 | // doc2: can retrieve changes propagated on stream
96 | const { ydoc: doc2 } = createWsClient('map')
97 | doc1.getMap().set('a', 1)
98 | t.info('docs syncing (0)')
99 | await waitDocsSynced(doc1, doc2)
100 | t.info('docs synced (1)')
101 | const docStreamExistsBefore = await redisClient.exists(api.computeRedisRoomStreamName(tc.testName + '-' + 'map', 'index', utils.redisPrefix))
102 | t.assert(doc2.getMap().get('a') === 1)
103 | // doc3 can retrieve older changes from stream
104 | const { ydoc: doc3 } = createWsClient('map')
105 | await waitDocsSynced(doc1, doc3)
106 | t.info('docs synced (2)')
107 | t.assert(doc3.getMap().get('a') === 1)
108 | await promise.wait(worker.client.redisMinMessageLifetime * 5)
109 | const docStreamExists = await redisClient.exists(api.computeRedisRoomStreamName(tc.testName + '-' + 'map', 'index', utils.redisPrefix))
110 | const workerLen = await redisClient.xLen(utils.redisPrefix + ':worker')
111 | t.assert(!docStreamExists && docStreamExistsBefore)
112 | t.assert(workerLen === 0)
113 | t.info('stream cleanup after initial changes')
114 | // doc4 can retrieve the document again from MemoryStore
115 | const { ydoc: doc4 } = createWsClient('map')
116 | await waitDocsSynced(doc3, doc4)
117 | t.info('docs synced (3)')
118 | t.assert(doc3.getMap().get('a') === 1)
119 | const memRetrieved = await utils.store.retrieveDoc(tc.testName + '-' + 'map', 'index')
120 | t.assert(memRetrieved?.references.length === 1)
121 | t.info('doc retrieved')
122 | // now write another updates that the worker will collect
123 | doc1.getMap().set('a', 2)
124 | await promise.wait(worker.client.redisMinMessageLifetime * 2)
125 | t.assert(doc2.getMap().get('a') === 2)
126 | const memRetrieved2 = await utils.store.retrieveDoc(tc.testName + '-' + 'map', 'index')
127 | t.info('map retrieved')
128 | // should delete old references
129 | t.assert(memRetrieved2?.references.length === 1)
130 | await promise.all(utils.prevClients.reverse().map(c => c.destroy()))
131 | }
132 |
--------------------------------------------------------------------------------
/demos/blocksuite/server.js:
--------------------------------------------------------------------------------
1 | import * as Y from 'yjs'
2 | import { fileURLToPath } from 'url'
3 | import { dirname, resolve } from 'path'
4 | import express from 'express'
5 | import formidable from 'formidable'
6 | import { JSONFilePreset } from 'lowdb/node'
7 | import * as jwt from 'lib0/crypto/jwt'
8 | import * as time from 'lib0/time'
9 | import * as ecdsa from 'lib0/crypto/ecdsa'
10 | import * as env from 'lib0/environment'
11 | import * as fs from 'fs/promises'
12 | import * as promise from 'lib0/promise'
13 |
14 | /** @type {{docs: {id: string, title: string, updated: string, created: string}[]}} */
15 | const defaultData = { docs: [] }
16 | const db = await JSONFilePreset('db.json', defaultData)
17 | await db.read()
18 | await db.write()
19 |
20 | const app = express()
21 | const port = 5173
22 |
23 | // serve static files
24 | app.use(express.static('./'))
25 | app.use(express.json())
26 |
27 | // Read the AUTH_PRIVATE_KEY environment variable and import the JWK
28 | export const authPrivateKey = await ecdsa.importKeyJwk(
29 | JSON.parse(env.ensureConf('auth-private-key'))
30 | )
31 | // Read the AUTH_PUBLIC_KEY environment variable and import the JWK
32 | export const authPublicKey = await ecdsa.importKeyJwk(
33 | JSON.parse(env.ensureConf('auth-public-key'))
34 | )
35 |
36 | const appName = 'my-express-app'
37 |
38 | // This endpoint is called in regular intervals when the document changes.
39 | // The request contains a multi-part formdata field that can be read, for example, with formidable:
40 | app.put('/ydoc/:room', async (req, res, next) => {
41 | const room = req.params.room
42 | const timestamp = new Date().toISOString()
43 |
44 | const ydocUpdate = await promise.create((resolve, reject) => {
45 | const form = formidable({})
46 | form.parse(req, (err, _fields, files) => {
47 | if (err) {
48 | next(err)
49 | reject(err)
50 | return
51 | }
52 | if (files.ydoc) {
53 | // formidable writes the data to a file by default. This might be a good idea for your
54 | // application. Check the documentation to find a non-temporary location for the read file.
55 | // You should probably delete it if it is no longer being used.
56 | const file = files.ydoc[0]
57 | // we are just going to log the content and delete the temporary file
58 | fs.readFile(file.filepath).then(resolve, reject)
59 | }
60 | })
61 | })
62 | const ydoc = new Y.Doc()
63 | Y.applyUpdateV2(ydoc, ydocUpdate)
64 | console.log(
65 | `BlockSuite doc in room "${room}" updated, block count: ${ydoc.getMap('blocks').size}`
66 | )
67 |
68 | await db.read()
69 | const docIndex = db.data.docs.findIndex((doc) => doc.id === room)
70 | if (docIndex !== -1) {
71 | db.data.docs[docIndex].updated = timestamp
72 | await db.write()
73 | }
74 |
75 | res.sendStatus(200)
76 | })
77 |
78 | // This example server always grants read-write permission to all requests.
79 | // Modify it to your own needs or implement the same API in your own backend!
80 | app.get('/auth/token', async (_req, res) => {
81 | const token = await jwt.encodeJwt(authPrivateKey, {
82 | iss: appName,
83 | exp: time.getUnixTime() + 1000 * 60 * 60, // token expires in an hour
84 | yuserid: 'user1' // associate the client with a unique id that can will be used to check permissions
85 | })
86 | res.send(token)
87 | })
88 |
89 | // This api is called to check whether a specific user (identified by the unique "yuserid") has
90 | // access to a specific room. This rest endpoint is called by the yredis server, not the client.
91 | app.get('/auth/perm/:room/:userid', async (req, res) => {
92 | const yroom = req.params.room
93 | const yuserid = req.params.userid
94 | // This sample-server always grants full acess
95 | res.send(
96 | JSON.stringify({
97 | yroom,
98 | yaccess: 'rw', // alternatively, specify "read-only" or "no-access"
99 | yuserid
100 | })
101 | )
102 | })
103 |
104 | app.get('/docs', async (req, res) => {
105 | await db.read()
106 | res.json(db.data.docs)
107 | })
108 |
109 | app.post('/docs', async (req, res) => {
110 | const timestamp = new Date().toISOString()
111 | const id = `${Date.now()}`
112 | const title = ''
113 | await db.read()
114 | db.data.docs.push({ id, title, created: timestamp, updated: timestamp })
115 | await db.write()
116 |
117 | res.status(201).json({ id, title })
118 | })
119 |
120 | app.delete('/docs/:id', async (req, res) => {
121 | const docId = req.params.id
122 | await db.read()
123 | db.data.docs = db.data.docs.filter(({ id }) => id !== docId)
124 | await db.write()
125 |
126 | res.send('Document removed')
127 | })
128 |
129 | app.patch('/docs/:id/title', async (req, res) => {
130 | const { id } = req.params
131 | const { title } = req.body
132 |
133 | if (typeof title !== 'string') return res.status(400).send('Missing title')
134 |
135 | await db.read()
136 | const doc = db.data.docs.find(doc => doc.id === id)
137 | if (doc) {
138 | doc.title = title
139 | doc.updated = new Date().toISOString()
140 | await db.write()
141 | res.status(200).json({ id: doc.id, title: doc.title })
142 | } else {
143 | res.status(404).send('Document not found')
144 | }
145 | })
146 |
147 | app.get('*', (req, res) => {
148 | const __filename = fileURLToPath(import.meta.url)
149 | const __dirname = dirname(__filename)
150 | res.sendFile(resolve(__dirname, 'index.html'))
151 | })
152 |
153 | const server = app.listen(port, () => {
154 | console.log(`Express Demo BlockSuite server listening on port ${port}`)
155 | })
156 |
157 | // Gracefully shut down the server when running in Docker
158 | process.on('SIGTERM', shutDown)
159 | process.on('SIGINT', shutDown)
160 |
161 | function shutDown () {
162 | console.log('Received SIGTERM/SIGINT - shutting down gracefully')
163 | server.close(() => {
164 | console.log('Closed out remaining connections - shutting down')
165 | process.exit(0)
166 | })
167 | setTimeout(() => {
168 | console.error("Couldn't close connections - forcefully shutting down")
169 | process.exit(1)
170 | }, 10000)
171 | }
172 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | # y-redis :tophat:
3 | > y-websocket compatible backend using Redis for scalability. **This is beta
4 | > software!**
5 |
6 | y-redis is an alternative backend for y-websocket. It only requires a redis
7 | instance and a storage provider (S3 or Postgres-compatible).
8 |
9 | * **Memory efficient:** The server doesn't maintain a Y.Doc in-memory. It
10 | streams updates through redis. The Yjs document is only loaded to memory for the
11 | initial sync.
12 | * **Scalable:** You can start as many y-redis instances as you want to handle
13 | a fluctuating number of clients. No coordination is needed.
14 | - **Auth:** y-redis works together with your existing infrastructure to
15 | authenticate clients and check whether a client has read-only / read-write
16 | access to a document.
17 | - **Database agnostic:** You can persist documents in S3-compatible backends, in
18 | Postgres, or implement your own storage provider.
19 |
20 | ### Licensing
21 |
22 | y-redis is dual-licensed (either [AGPL](./LICENSE) or proprietary).
23 |
24 | Please contact me to buy a license if you intend to use y-redis in your
25 | commercial product:
26 |
27 | Otherwise, you may use this software under the terms of the AGPL, which requires
28 | you to publish your source code under the terms of the AGPL too.
29 |
30 | ### Components
31 |
32 | Redis is used as a "cache" and a distribution channel for document updates.
33 | Normal databases are not fast enough for handling real-time updates of
34 | fast-changing applications (e.g. collaborative drawing applications that
35 | generate hundreds of operations per second). Hence a redis-cache for temporary
36 | storage makes sense to distribute documents as fast as possible to all peers.
37 |
38 | A persistent storage (e.g. S3 or Postgres) is used to persist document updates
39 | permanently. You can configure in which intervals you want to persist data from
40 | redis to the persistent storage. You can even implement a custom persistent
41 | storage technology.
42 |
43 | The y-redis **server component** (`/bin/server.js`) is responsible for accepting
44 | websocket-connections and distributing the updates via redis streams. Each
45 | "room" is represented as a redis stream. The server component assembles updates
46 | stored redis and in the persistent storage (e.g. S3 or Postgres) for the initial
47 | sync. After the initial sync, the server doesn't keep any Yjs state in-memory.
48 | You can start as many server components as you need. It makes sense to put the
49 | server component behind a loadbalancer, which can potentially auto-scale the
50 | server component based on CPU or network usage.
51 |
52 | The separate y-redis **worker component** (`/bin/worker.js`) is responsible for
53 | extracting data from the redis cache to a persistent database like S3 or
54 | Postgres. Once the data is persisted, the worker component cleans up stale data
55 | in redis. You can start as many worker components as you need. It is recommended
56 | to run at least one worker, so that the data is eventually persisted. The worker
57 | components coordinate which room needs to be persisted using a separate
58 | worker-queue (see `y:worker` stream in redis).
59 |
60 | You are responsible for providing a REST backend that y-redis will call to check
61 | whether a specific client (authenticated via a JWT token) has access to a
62 | specific room / document. Example servers can be found in
63 | `/bin/auth-server-example.js` and `/demos/auth-express/server.js`.
64 |
65 | ### Missing Features
66 |
67 | I'm looking for sponsors that want to sponsor the following work:
68 |
69 | - Ability to kick out users when permissions on a document changed
70 | - Configurable docker containers for y-redis server & worker
71 | - Helm chart
72 | - More exhaustive logging and reporting of possible issues
73 | - More exhaustive testing
74 | - Better documentation & more documentation for specific use-cases
75 | - Support for Bun and Deno
76 | - Perform expensive tasks (computing sync messages) in separate threads
77 |
78 | If you are interested in sponsoring some of this work, please send a mail to
79 | .
80 |
81 | # Quick Start (docker-compose)
82 |
83 | You can get everything running quickly using
84 | [docker-compose](https://docs.docker.com/compose/). The compose file runs the
85 | following components:
86 |
87 | - redis
88 | - minio as a s3 endpoint
89 | - a single y-redis server
90 | - a single y-redis worker
91 |
92 | This can be a good starting point for your application. If your cloud provider
93 | has a managed s3 service, you should probably use that instead of minio. If you
94 | want to use minio, you need to setup proper volumes and backups.
95 |
96 | The full setup gives insight into more specialized configuration options.
97 |
98 | ```sh
99 | git clone https://github.com/yjs/y-redis.git
100 | cd y-redis
101 | npm i
102 | ```
103 |
104 | ### Setup the environment variables
105 |
106 | ```sh
107 | cp .env.docker.template .env
108 | # generate unique authentication tokens
109 | npx 0ecdsa-generate-keypair --name auth >> .env
110 | ```
111 |
112 | The sample configuration configures s3 using minio.
113 | Have a look at `.env.template` for more configuration options.
114 |
115 | ### Run demo
116 |
117 | ```sh
118 | cd ./demos/auth-express
119 | docker compose up
120 | # open http://localhost:5173 in a browser
121 | ```
122 |
123 | # Full setup
124 |
125 | Components are configured via environment variables. It makes sense to start by
126 | cloning y-redis and getting one of the demos to work.
127 |
128 | Note: If you want to use any of the docker commands, feel free to use podman (a
129 | more modern alternative) instead.
130 |
131 | #### Start a redis instance
132 |
133 | Setup redis on your computer. Follow the [official
134 | documentation](https://redis.io/docs/install/install-redis/). This is
135 | recommended if you want to debug the redis stream.
136 |
137 | Alternatively, simply run redis via docker:
138 |
139 | ```sh
140 | # start the official redis docker container on port 6379
141 | docker run -p 6379:6379 redis
142 | # or `npm run redis`
143 | ```
144 |
145 | #### Start an S3 instance
146 |
147 | Setup an S3-compatible store at your favorite cloud provider.
148 |
149 | Alternatively, simply run a *minio* store as a docker container:
150 |
151 | ```sh
152 | docker run -p 9000:9000 -p 9001:9001 quay.io/minio/minio server /data --console-address \":9001\"
153 | # or `npm run minio`
154 | ```
155 |
156 | This is just a dev setup. Have a look at the minio documentation if you want to
157 | run it in production.
158 |
159 | #### Clone demo
160 |
161 | ```sh
162 | git clone https://github.com/yjs/y-redis.git
163 | cd y-redis
164 | npm i
165 | ```
166 |
167 | All features are configurable using environment variables. For local development
168 | it makes sense to setup a `.env` file, that stores project-specific secrets. Use
169 | `.env.template` as a template to setup environment variables. Make sure to read
170 | the documentation carefully and configure every single variable.
171 |
172 | ```sh
173 | # setup environment variables
174 | cp .env.template .env
175 | nano .env
176 | ```
177 |
178 | Then you can run the different components in separate terminals:
179 |
180 | ```sh
181 | # run the server
182 | npm run start:server
183 | # run a single worker in a separate terminal
184 | npm run start:worker
185 | # start the express server in a separater terminal
186 | cd demos/auth-express
187 | npm i
188 | npm start
189 | ```
190 |
191 | Open [`http://localhost:5173`](http://localhost:5173) in a browser.
192 |
--------------------------------------------------------------------------------
/src/ws.js:
--------------------------------------------------------------------------------
1 | import * as Y from 'yjs'
2 | import * as uws from 'uws'
3 | import * as promise from 'lib0/promise'
4 | import * as api from './api.js'
5 | import * as array from 'lib0/array'
6 | import * as encoding from 'lib0/encoding'
7 | import * as decoding from 'lib0/decoding'
8 | import * as protocol from './protocol.js'
9 | import * as logging from 'lib0/logging'
10 | import { createSubscriber } from './subscriber.js'
11 |
12 | const log = logging.createModuleLogger('@y/redis/ws')
13 |
14 | /**
15 | * how to sync
16 | * receive sync-step 1
17 | * // @todo y-websocket should only accept updates after receiving sync-step 2
18 | * redisId = ws.sub(conn)
19 | * {doc,redisDocLastId} = api.getdoc()
20 | * compute sync-step 2
21 | * if (redisId > redisDocLastId) {
22 | * subscriber.ensureId(redisDocLastId)
23 | * }
24 | */
25 |
26 | class YWebsocketServer {
27 | /**
28 | * @param {uws.TemplatedApp} app
29 | * @param {api.Api} client
30 | * @param {import('./subscriber.js').Subscriber} subscriber
31 | */
32 | constructor (app, client, subscriber) {
33 | this.app = app
34 | this.subscriber = subscriber
35 | this.client = client
36 | }
37 |
38 | async destroy () {
39 | this.subscriber.destroy()
40 | await this.client.destroy()
41 | }
42 | }
43 |
44 | let _idCnt = 0
45 |
46 | class User {
47 | /**
48 | * @param {string} room
49 | * @param {boolean} hasWriteAccess
50 | * @param {string} userid identifies the user globally.
51 | */
52 | constructor (room, hasWriteAccess, userid) {
53 | this.room = room
54 | this.hasWriteAccess = hasWriteAccess
55 | /**
56 | * @type {string}
57 | */
58 | this.initialRedisSubId = '0'
59 | this.subs = new Set()
60 | /**
61 | * This is just an identifier to keep track of the user for logging purposes.
62 | */
63 | this.id = _idCnt++
64 | /**
65 | * Identifies the User globally.
66 | * Note that several clients can have the same userid (e.g. if a user opened several browser
67 | * windows)
68 | */
69 | this.userid = userid
70 | /**
71 | * @type {number|null}
72 | */
73 | this.awarenessId = null
74 | this.awarenessLastClock = 0
75 | this.isClosed = false
76 | }
77 | }
78 |
79 | /**
80 | * @param {uws.TemplatedApp} app
81 | * @param {uws.RecognizedString} pattern
82 | * @param {import('./storage.js').AbstractStorage} store
83 | * @param {function(uws.HttpRequest): Promise<{ hasWriteAccess: boolean, room: string, userid: string }>} checkAuth
84 | * @param {Object} conf
85 | * @param {string} [conf.redisPrefix]
86 | * @param {(room:string,docname:string,client:api.Api)=>void} [conf.initDocCallback] - this is called when a doc is
87 | * accessed, but it doesn't exist. You could populate the doc here. However, this function could be
88 | * called several times, until some content exists. So you need to handle concurrent calls.
89 | */
90 | export const registerYWebsocketServer = async (app, pattern, store, checkAuth, { redisPrefix = 'y', initDocCallback = () => {} } = {}) => {
91 | const [client, subscriber] = await promise.all([
92 | api.createApiClient(store, redisPrefix),
93 | createSubscriber(store, redisPrefix)
94 | ])
95 | /**
96 | * @param {string} stream
97 | * @param {Array} messages
98 | */
99 | const redisMessageSubscriber = (stream, messages) => {
100 | if (app.numSubscribers(stream) === 0) {
101 | subscriber.unsubscribe(stream, redisMessageSubscriber)
102 | }
103 | const message = messages.length === 1
104 | ? messages[0]
105 | : encoding.encode(encoder => messages.forEach(message => {
106 | encoding.writeUint8Array(encoder, message)
107 | }))
108 | app.publish(stream, message, true, false)
109 | }
110 | app.ws(pattern, /** @type {uws.WebSocketBehavior} */ ({
111 | compression: uws.SHARED_COMPRESSOR,
112 | maxPayloadLength: 100 * 1024 * 1024,
113 | idleTimeout: 60,
114 | sendPingsAutomatically: true,
115 | upgrade: async (res, req, context) => {
116 | const url = req.getUrl()
117 | const headerWsKey = req.getHeader('sec-websocket-key')
118 | const headerWsProtocol = req.getHeader('sec-websocket-protocol')
119 | const headerWsExtensions = req.getHeader('sec-websocket-extensions')
120 | let aborted = false
121 | res.onAborted(() => {
122 | console.log('Upgrading client aborted', { url })
123 | aborted = true
124 | })
125 | try {
126 | const { hasWriteAccess, room, userid } = await checkAuth(req)
127 | if (aborted) return
128 | res.cork(() => {
129 | res.upgrade(
130 | new User(room, hasWriteAccess, userid),
131 | headerWsKey,
132 | headerWsProtocol,
133 | headerWsExtensions,
134 | context
135 | )
136 | })
137 | } catch (err) {
138 | console.log(`Failed to auth to endpoint ${url}`, err)
139 | if (aborted) return
140 | res.cork(() => {
141 | res.writeStatus('401 Unauthorized').end('Unauthorized')
142 | })
143 | }
144 | },
145 | open: async (ws) => {
146 | const user = ws.getUserData()
147 | log(() => ['client connected (uid=', user.id, ', ip=', Buffer.from(ws.getRemoteAddressAsText()).toString(), ')'])
148 | const stream = api.computeRedisRoomStreamName(user.room, 'index', redisPrefix)
149 | user.subs.add(stream)
150 | ws.subscribe(stream)
151 | user.initialRedisSubId = subscriber.subscribe(stream, redisMessageSubscriber).redisId
152 | const indexDoc = await client.getDoc(user.room, 'index')
153 | if (indexDoc.ydoc.store.clients.size === 0) {
154 | initDocCallback(user.room, 'index', client)
155 | }
156 | if (user.isClosed) return
157 | ws.cork(() => {
158 | ws.send(protocol.encodeSyncStep1(Y.encodeStateVector(indexDoc.ydoc)), true, false)
159 | ws.send(protocol.encodeSyncStep2(Y.encodeStateAsUpdate(indexDoc.ydoc)), true, true)
160 | if (indexDoc.awareness.states.size > 0) {
161 | ws.send(protocol.encodeAwarenessUpdate(indexDoc.awareness, array.from(indexDoc.awareness.states.keys())), true, true)
162 | }
163 | })
164 | if (api.isSmallerRedisId(indexDoc.redisLastId, user.initialRedisSubId)) {
165 | // our subscription is newer than the content that we received from the api
166 | // need to renew subscription id and make sure that we catch the latest content.
167 | subscriber.ensureSubId(stream, indexDoc.redisLastId)
168 | }
169 | },
170 | message: (ws, messageBuffer) => {
171 | const user = ws.getUserData()
172 | // don't read any messages from users without write access
173 | if (!user.hasWriteAccess) return
174 | // It is important to copy the data here
175 | const message = Buffer.from(messageBuffer.slice(0, messageBuffer.byteLength))
176 | if ( // filter out messages that we simply want to propagate to all clients
177 | // sync update or sync step 2
178 | (message[0] === protocol.messageSync && (message[1] === protocol.messageSyncUpdate || message[1] === protocol.messageSyncStep2)) ||
179 | // awareness update
180 | message[0] === protocol.messageAwareness
181 | ) {
182 | if (message[0] === protocol.messageAwareness) {
183 | const decoder = decoding.createDecoder(message)
184 | decoding.readVarUint(decoder) // read message type
185 | decoding.readVarUint(decoder) // read length of awareness update
186 | const alen = decoding.readVarUint(decoder) // number of awareness updates
187 | const awId = decoding.readVarUint(decoder)
188 | if (alen === 1 && (user.awarenessId === null || user.awarenessId === awId)) { // only update awareness if len=1
189 | user.awarenessId = awId
190 | user.awarenessLastClock = decoding.readVarUint(decoder)
191 | }
192 | }
193 | client.addMessage(user.room, 'index', message)
194 | } else if (message[0] === protocol.messageSync && message[1] === protocol.messageSyncStep1) { // sync step 1
195 | // can be safely ignored because we send the full initial state at the beginning
196 | } else {
197 | console.error('Unexpected message type', message)
198 | }
199 | },
200 | close: (ws, code, message) => {
201 | const user = ws.getUserData()
202 | user.awarenessId && client.addMessage(user.room, 'index', Buffer.from(protocol.encodeAwarenessUserDisconnected(user.awarenessId, user.awarenessLastClock)))
203 | user.isClosed = true
204 | log(() => ['client connection closed (uid=', user.id, ', code=', code, ', message="', Buffer.from(message).toString(), '")'])
205 | user.subs.forEach(topic => {
206 | if (app.numSubscribers(topic) === 0) {
207 | subscriber.unsubscribe(topic, redisMessageSubscriber)
208 | }
209 | })
210 | }
211 | }))
212 | return new YWebsocketServer(app, client, subscriber)
213 | }
214 |
--------------------------------------------------------------------------------
/src/api.js:
--------------------------------------------------------------------------------
1 | import * as Y from 'yjs'
2 | import * as redis from 'redis'
3 | import * as map from 'lib0/map'
4 | import * as decoding from 'lib0/decoding'
5 | import * as awarenessProtocol from 'y-protocols/awareness'
6 | import * as array from 'lib0/array'
7 | import * as random from 'lib0/random'
8 | import * as number from 'lib0/number'
9 | import * as promise from 'lib0/promise'
10 | import * as math from 'lib0/math'
11 | import * as protocol from './protocol.js'
12 | import * as env from 'lib0/environment'
13 | import * as logging from 'lib0/logging'
14 |
15 | const logWorker = logging.createModuleLogger('@y/redis/api/worker')
16 | const logApi = logging.createModuleLogger('@y/redis/api')
17 |
18 | export const redisUrl = env.ensureConf('redis')
19 |
20 | /**
21 | * @param {string} a
22 | * @param {string} b
23 | * @return {boolean} iff a < b
24 | */
25 | export const isSmallerRedisId = (a, b) => {
26 | const [a1, a2 = '0'] = a.split('-')
27 | const [b1, b2 = '0'] = b.split('-')
28 | const a1n = number.parseInt(a1)
29 | const b1n = number.parseInt(b1)
30 | return a1n < b1n || (a1n === b1n && number.parseInt(a2) < number.parseInt(b2))
31 | }
32 |
33 | /**
34 | * @param {import('@redis/client/dist/lib/commands/generic-transformers.js').StreamsMessagesReply} streamReply
35 | * @param {string} prefix
36 | */
37 | const extractMessagesFromStreamReply = (streamReply, prefix) => {
38 | /**
39 | * @type {Map }>>}
40 | */
41 | const messages = new Map()
42 | streamReply?.forEach(docStreamReply => {
43 | const { room, docid } = decodeRedisRoomStreamName(docStreamReply.name.toString(), prefix)
44 | const docMessages = map.setIfUndefined(
45 | map.setIfUndefined(
46 | messages,
47 | room,
48 | map.create
49 | ),
50 | docid,
51 | () => ({ lastId: array.last(docStreamReply.messages).id, messages: /** @type {Array} */ ([]) })
52 | )
53 | docStreamReply.messages.forEach(m => {
54 | if (m.message.m != null) {
55 | docMessages.messages.push(/** @type {Uint8Array} */ (m.message.m))
56 | }
57 | })
58 | })
59 | return messages
60 | }
61 |
62 | /**
63 | * @param {string} room
64 | * @param {string} docid
65 | * @param {string} prefix
66 | */
67 | export const computeRedisRoomStreamName = (room, docid, prefix) => `${prefix}:room:${encodeURIComponent(room)}:${encodeURIComponent(docid)}`
68 |
69 | /**
70 | * @param {string} rediskey
71 | * @param {string} expectedPrefix
72 | */
73 | const decodeRedisRoomStreamName = (rediskey, expectedPrefix) => {
74 | const match = rediskey.match(/^(.*):room:(.*):(.*)$/)
75 | if (match == null || match[1] !== expectedPrefix) {
76 | throw new Error(`Malformed stream name! prefix="${match?.[1]}" expectedPrefix="${expectedPrefix}", rediskey="${rediskey}"`)
77 | }
78 | return { room: decodeURIComponent(match[2]), docid: decodeURIComponent(match[3]) }
79 | }
80 |
81 | /**
82 | * @param {import('./storage.js').AbstractStorage} store
83 | * @param {string} redisPrefix
84 | */
85 | export const createApiClient = async (store, redisPrefix) => {
86 | const a = new Api(store, redisPrefix)
87 | await a.redis.connect()
88 | try {
89 | await a.redis.xGroupCreate(a.redisWorkerStreamName, a.redisWorkerGroupName, '0', { MKSTREAM: true })
90 | } catch (e) { }
91 | return a
92 | }
93 |
94 | export class Api {
95 | /**
96 | * @param {import('./storage.js').AbstractStorage} store
97 | * @param {string} prefix
98 | */
99 | constructor (store, prefix) {
100 | this.store = store
101 | this.prefix = prefix
102 | this.consumername = random.uuidv4()
103 | /**
104 | * After this timeout, a worker will pick up a task and clean up a stream.
105 | */
106 | this.redisTaskDebounce = number.parseInt(env.getConf('redis-task-debounce') || '10000') // default: 10 seconds
107 | /**
108 | * Minimum lifetime of y* update messages in redis streams.
109 | */
110 | this.redisMinMessageLifetime = number.parseInt(env.getConf('redis-min-message-lifetime') || '60000') // default: 1 minute
111 | this.redisWorkerStreamName = this.prefix + ':worker'
112 | this.redisWorkerGroupName = this.prefix + ':worker'
113 | this._destroyed = false
114 | this.redis = redis.createClient({
115 | url: redisUrl,
116 | // scripting: https://github.com/redis/node-redis/#lua-scripts
117 | scripts: {
118 | addMessage: redis.defineScript({
119 | NUMBER_OF_KEYS: 1,
120 | SCRIPT: `
121 | if redis.call("EXISTS", KEYS[1]) == 0 then
122 | redis.call("XADD", "${this.redisWorkerStreamName}", "*", "compact", KEYS[1])
123 | redis.call("XREADGROUP", "GROUP", "${this.redisWorkerGroupName}", "pending", "STREAMS", "${this.redisWorkerStreamName}", ">")
124 | end
125 | redis.call("XADD", KEYS[1], "*", "m", ARGV[1])
126 | `,
127 | /**
128 | * @param {string} key
129 | * @param {Buffer} message
130 | */
131 | transformArguments (key, message) {
132 | return [key, message]
133 | },
134 | /**
135 | * @param {null} x
136 | */
137 | transformReply (x) {
138 | return x
139 | }
140 | }),
141 | xDelIfEmpty: redis.defineScript({
142 | NUMBER_OF_KEYS: 1,
143 | SCRIPT: `
144 | if redis.call("XLEN", KEYS[1]) == 0 then
145 | redis.call("DEL", KEYS[1])
146 | end
147 | `,
148 | /**
149 | * @param {string} key
150 | */
151 | transformArguments (key) {
152 | return [key]
153 | },
154 | /**
155 | * @param {null} x
156 | */
157 | transformReply (x) {
158 | return x
159 | }
160 | })
161 | }
162 | })
163 | }
164 |
165 | /**
166 | * @param {Array<{key:string,id:string}>} streams streamname-clock pairs
167 | * @return {Promise, lastId: string }>>}
168 | */
169 | async getMessages (streams) {
170 | if (streams.length === 0) {
171 | await promise.wait(50)
172 | return []
173 | }
174 | const reads = await this.redis.xRead(
175 | redis.commandOptions({ returnBuffers: true }),
176 | streams,
177 | { BLOCK: 1000, COUNT: 1000 }
178 | )
179 | /**
180 | * @type {Array<{ stream: string, messages: Array, lastId: string }>}
181 | */
182 | const res = []
183 | reads?.forEach(stream => {
184 | res.push({
185 | stream: stream.name.toString(),
186 | messages: protocol.mergeMessages(stream.messages.map(message => message.message.m).filter(m => m != null)),
187 | lastId: array.last(stream.messages).id.toString()
188 | })
189 | })
190 | return res
191 | }
192 |
193 | /**
194 | * @param {string} room
195 | * @param {string} docid
196 | * @param {Buffer} m
197 | */
198 | addMessage (room, docid, m) {
199 | // handle sync step 2 like a normal update message
200 | if (m[0] === protocol.messageSync && m[1] === protocol.messageSyncStep2) {
201 | if (m.byteLength < 4) {
202 | // message does not contain any content, don't distribute
203 | return promise.resolve()
204 | }
205 | m[1] = protocol.messageSyncUpdate
206 | }
207 | return this.redis.addMessage(computeRedisRoomStreamName(room, docid, this.prefix), m)
208 | }
209 |
210 | /**
211 | * @param {string} room
212 | * @param {string} docid
213 | */
214 | async getStateVector (room, docid = '/') {
215 | return this.store.retrieveStateVector(room, docid)
216 | }
217 |
218 | /**
219 | * @param {string} room
220 | * @param {string} docid
221 | */
222 | async getDoc (room, docid) {
223 | logApi(`getDoc(${room}, ${docid})`)
224 | const ms = extractMessagesFromStreamReply(await this.redis.xRead(redis.commandOptions({ returnBuffers: true }), { key: computeRedisRoomStreamName(room, docid, this.prefix), id: '0' }), this.prefix)
225 | logApi(`getDoc(${room}, ${docid}) - retrieved messages`)
226 | const docMessages = ms.get(room)?.get(docid) || null
227 | const docstate = await this.store.retrieveDoc(room, docid)
228 | logApi(`getDoc(${room}, ${docid}) - retrieved doc`)
229 | const ydoc = new Y.Doc()
230 | const awareness = new awarenessProtocol.Awareness(ydoc)
231 | awareness.setLocalState(null) // we don't want to propagate awareness state
232 | if (docstate) { Y.applyUpdateV2(ydoc, docstate.doc) }
233 | let docChanged = false
234 | ydoc.once('afterTransaction', tr => {
235 | docChanged = tr.changed.size > 0
236 | })
237 | ydoc.transact(() => {
238 | docMessages?.messages.forEach(m => {
239 | const decoder = decoding.createDecoder(m)
240 | switch (decoding.readVarUint(decoder)) {
241 | case 0: { // sync message
242 | if (decoding.readVarUint(decoder) === 2) { // update message
243 | Y.applyUpdate(ydoc, decoding.readVarUint8Array(decoder))
244 | }
245 | break
246 | }
247 | case 1: { // awareness message
248 | awarenessProtocol.applyAwarenessUpdate(awareness, decoding.readVarUint8Array(decoder), null)
249 | break
250 | }
251 | }
252 | })
253 | })
254 | return { ydoc, awareness, redisLastId: docMessages?.lastId.toString() || '0', storeReferences: docstate?.references || null, docChanged }
255 | }
256 |
257 | /**
258 | * @param {WorkerOpts} opts
259 | */
260 | async consumeWorkerQueue ({ tryClaimCount = 5, updateCallback = async () => {} }) {
261 | /**
262 | * @type {Array<{stream: string, id: string}>}
263 | */
264 | const tasks = []
265 | const reclaimedTasks = await this.redis.xAutoClaim(this.redisWorkerStreamName, this.redisWorkerGroupName, this.consumername, this.redisTaskDebounce, '0', { COUNT: tryClaimCount })
266 | reclaimedTasks.messages.forEach(m => {
267 | const stream = m?.message.compact
268 | stream && tasks.push({ stream, id: m?.id })
269 | })
270 | if (tasks.length === 0) {
271 | logWorker('No tasks available, pausing..', { tasks })
272 | await promise.wait(1000)
273 | return []
274 | }
275 | logWorker('Accepted tasks ', { tasks })
276 | await promise.all(tasks.map(async task => {
277 | const streamlen = await this.redis.xLen(task.stream)
278 | if (streamlen === 0) {
279 | await this.redis.multi()
280 | .xDelIfEmpty(task.stream)
281 | .xDel(this.redisWorkerStreamName, task.id)
282 | .exec()
283 | logWorker('Stream still empty, removing recurring task from queue ', { stream: task.stream })
284 | } else {
285 | const { room, docid } = decodeRedisRoomStreamName(task.stream, this.prefix)
286 | // @todo, make sure that awareness by this.getDoc is eventually destroyed, or doesn't
287 | // register a timeout anymore
288 | logWorker('requesting doc from store')
289 | const { ydoc, storeReferences, redisLastId, docChanged } = await this.getDoc(room, docid)
290 | logWorker('retrieved doc from store. redisLastId=' + redisLastId, ' storeRefs=' + JSON.stringify(storeReferences))
291 | const lastId = math.max(number.parseInt(redisLastId.split('-')[0]), number.parseInt(task.id.split('-')[0]))
292 | if (docChanged) {
293 | try {
294 | logWorker('doc changed, calling update callback')
295 | await updateCallback(room, ydoc)
296 | } catch (e) {
297 | console.error(e)
298 | }
299 | logWorker('persisting doc')
300 | await this.store.persistDoc(room, docid, ydoc)
301 | }
302 | await promise.all([
303 | storeReferences && docChanged ? this.store.deleteReferences(room, docid, storeReferences) : promise.resolve(),
304 | // if `redisTaskDebounce` is small, or if updateCallback taskes too long, then we might
305 | // add a task twice to this list.
306 | // @todo either use a different datastructure or make sure that task doesn't exist yet
307 | // before adding it to the worker queue
308 | // This issue is not critical, as no data will be lost if this happens.
309 | this.redis.multi()
310 | .xTrim(task.stream, 'MINID', lastId - this.redisMinMessageLifetime)
311 | .xAdd(this.redisWorkerStreamName, '*', { compact: task.stream })
312 | .xReadGroup(this.redisWorkerGroupName, 'pending', { key: this.redisWorkerStreamName, id: '>' }, { COUNT: 50 }) // immediately claim this entry, will be picked up by worker after timeout
313 | .xDel(this.redisWorkerStreamName, task.id)
314 | .exec()
315 | ])
316 | logWorker('Compacted stream ', { stream: task.stream, taskId: task.id, newLastId: lastId - this.redisMinMessageLifetime })
317 | }
318 | }))
319 | return tasks
320 | }
321 |
322 | async destroy () {
323 | this._destroyed = true
324 | try {
325 | await this.redis.quit()
326 | } catch (e) {}
327 | }
328 | }
329 |
330 | /**
331 | * @typedef {Object} WorkerOpts
332 | * @property {(room: string, ydoc: Y.Doc) => Promise} [WorkerOpts.updateCallback]
333 | * @property {number} [WorkerOpts.tryClaimCount]
334 | */
335 |
336 | /**
337 | * @param {import('./storage.js').AbstractStorage} store
338 | * @param {string} redisPrefix
339 | * @param {WorkerOpts} opts
340 | */
341 | export const createWorker = async (store, redisPrefix, opts) => {
342 | const a = await createApiClient(store, redisPrefix)
343 | return new Worker(a, opts)
344 | }
345 |
346 | export class Worker {
347 | /**
348 | * @param {Api} client
349 | * @param {WorkerOpts} opts
350 | */
351 | constructor (client, opts) {
352 | this.client = client
353 | logWorker('Created worker process ', { id: client.consumername, prefix: client.prefix, minMessageLifetime: client.redisMinMessageLifetime })
354 | ;(async () => {
355 | while (!client._destroyed) {
356 | try {
357 | await client.consumeWorkerQueue(opts)
358 | } catch (e) {
359 | console.error(e)
360 | }
361 | }
362 | logWorker('Ended worker process ', { id: client.consumername })
363 | })()
364 | }
365 | }
366 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU AFFERO GENERAL PUBLIC LICENSE
2 | Version 3, 19 November 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU Affero General Public License is a free, copyleft license for
11 | software and other kinds of works, specifically designed to ensure
12 | cooperation with the community in the case of network server software.
13 |
14 | The licenses for most software and other practical works are designed
15 | to take away your freedom to share and change the works. By contrast,
16 | our General Public Licenses are intended to guarantee your freedom to
17 | share and change all versions of a program--to make sure it remains free
18 | software for all its users.
19 |
20 | When we speak of free software, we are referring to freedom, not
21 | price. Our General Public Licenses are designed to make sure that you
22 | have the freedom to distribute copies of free software (and charge for
23 | them if you wish), that you receive source code or can get it if you
24 | want it, that you can change the software or use pieces of it in new
25 | free programs, and that you know you can do these things.
26 |
27 | Developers that use our General Public Licenses protect your rights
28 | with two steps: (1) assert copyright on the software, and (2) offer
29 | you this License which gives you legal permission to copy, distribute
30 | and/or modify the software.
31 |
32 | A secondary benefit of defending all users' freedom is that
33 | improvements made in alternate versions of the program, if they
34 | receive widespread use, become available for other developers to
35 | incorporate. Many developers of free software are heartened and
36 | encouraged by the resulting cooperation. However, in the case of
37 | software used on network servers, this result may fail to come about.
38 | The GNU General Public License permits making a modified version and
39 | letting the public access it on a server without ever releasing its
40 | source code to the public.
41 |
42 | The GNU Affero General Public License is designed specifically to
43 | ensure that, in such cases, the modified source code becomes available
44 | to the community. It requires the operator of a network server to
45 | provide the source code of the modified version running there to the
46 | users of that server. Therefore, public use of a modified version, on
47 | a publicly accessible server, gives the public access to the source
48 | code of the modified version.
49 |
50 | An older license, called the Affero General Public License and
51 | published by Affero, was designed to accomplish similar goals. This is
52 | a different license, not a version of the Affero GPL, but Affero has
53 | released a new version of the Affero GPL which permits relicensing under
54 | this license.
55 |
56 | The precise terms and conditions for copying, distribution and
57 | modification follow.
58 |
59 | TERMS AND CONDITIONS
60 |
61 | 0. Definitions.
62 |
63 | "This License" refers to version 3 of the GNU Affero General Public License.
64 |
65 | "Copyright" also means copyright-like laws that apply to other kinds of
66 | works, such as semiconductor masks.
67 |
68 | "The Program" refers to any copyrightable work licensed under this
69 | License. Each licensee is addressed as "you". "Licensees" and
70 | "recipients" may be individuals or organizations.
71 |
72 | To "modify" a work means to copy from or adapt all or part of the work
73 | in a fashion requiring copyright permission, other than the making of an
74 | exact copy. The resulting work is called a "modified version" of the
75 | earlier work or a work "based on" the earlier work.
76 |
77 | A "covered work" means either the unmodified Program or a work based
78 | on the Program.
79 |
80 | To "propagate" a work means to do anything with it that, without
81 | permission, would make you directly or secondarily liable for
82 | infringement under applicable copyright law, except executing it on a
83 | computer or modifying a private copy. Propagation includes copying,
84 | distribution (with or without modification), making available to the
85 | public, and in some countries other activities as well.
86 |
87 | To "convey" a work means any kind of propagation that enables other
88 | parties to make or receive copies. Mere interaction with a user through
89 | a computer network, with no transfer of a copy, is not conveying.
90 |
91 | An interactive user interface displays "Appropriate Legal Notices"
92 | to the extent that it includes a convenient and prominently visible
93 | feature that (1) displays an appropriate copyright notice, and (2)
94 | tells the user that there is no warranty for the work (except to the
95 | extent that warranties are provided), that licensees may convey the
96 | work under this License, and how to view a copy of this License. If
97 | the interface presents a list of user commands or options, such as a
98 | menu, a prominent item in the list meets this criterion.
99 |
100 | 1. Source Code.
101 |
102 | The "source code" for a work means the preferred form of the work
103 | for making modifications to it. "Object code" means any non-source
104 | form of a work.
105 |
106 | A "Standard Interface" means an interface that either is an official
107 | standard defined by a recognized standards body, or, in the case of
108 | interfaces specified for a particular programming language, one that
109 | is widely used among developers working in that language.
110 |
111 | The "System Libraries" of an executable work include anything, other
112 | than the work as a whole, that (a) is included in the normal form of
113 | packaging a Major Component, but which is not part of that Major
114 | Component, and (b) serves only to enable use of the work with that
115 | Major Component, or to implement a Standard Interface for which an
116 | implementation is available to the public in source code form. A
117 | "Major Component", in this context, means a major essential component
118 | (kernel, window system, and so on) of the specific operating system
119 | (if any) on which the executable work runs, or a compiler used to
120 | produce the work, or an object code interpreter used to run it.
121 |
122 | The "Corresponding Source" for a work in object code form means all
123 | the source code needed to generate, install, and (for an executable
124 | work) run the object code and to modify the work, including scripts to
125 | control those activities. However, it does not include the work's
126 | System Libraries, or general-purpose tools or generally available free
127 | programs which are used unmodified in performing those activities but
128 | which are not part of the work. For example, Corresponding Source
129 | includes interface definition files associated with source files for
130 | the work, and the source code for shared libraries and dynamically
131 | linked subprograms that the work is specifically designed to require,
132 | such as by intimate data communication or control flow between those
133 | subprograms and other parts of the work.
134 |
135 | The Corresponding Source need not include anything that users
136 | can regenerate automatically from other parts of the Corresponding
137 | Source.
138 |
139 | The Corresponding Source for a work in source code form is that
140 | same work.
141 |
142 | 2. Basic Permissions.
143 |
144 | All rights granted under this License are granted for the term of
145 | copyright on the Program, and are irrevocable provided the stated
146 | conditions are met. This License explicitly affirms your unlimited
147 | permission to run the unmodified Program. The output from running a
148 | covered work is covered by this License only if the output, given its
149 | content, constitutes a covered work. This License acknowledges your
150 | rights of fair use or other equivalent, as provided by copyright law.
151 |
152 | You may make, run and propagate covered works that you do not
153 | convey, without conditions so long as your license otherwise remains
154 | in force. You may convey covered works to others for the sole purpose
155 | of having them make modifications exclusively for you, or provide you
156 | with facilities for running those works, provided that you comply with
157 | the terms of this License in conveying all material for which you do
158 | not control copyright. Those thus making or running the covered works
159 | for you must do so exclusively on your behalf, under your direction
160 | and control, on terms that prohibit them from making any copies of
161 | your copyrighted material outside their relationship with you.
162 |
163 | Conveying under any other circumstances is permitted solely under
164 | the conditions stated below. Sublicensing is not allowed; section 10
165 | makes it unnecessary.
166 |
167 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168 |
169 | No covered work shall be deemed part of an effective technological
170 | measure under any applicable law fulfilling obligations under article
171 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172 | similar laws prohibiting or restricting circumvention of such
173 | measures.
174 |
175 | When you convey a covered work, you waive any legal power to forbid
176 | circumvention of technological measures to the extent such circumvention
177 | is effected by exercising rights under this License with respect to
178 | the covered work, and you disclaim any intention to limit operation or
179 | modification of the work as a means of enforcing, against the work's
180 | users, your or third parties' legal rights to forbid circumvention of
181 | technological measures.
182 |
183 | 4. Conveying Verbatim Copies.
184 |
185 | You may convey verbatim copies of the Program's source code as you
186 | receive it, in any medium, provided that you conspicuously and
187 | appropriately publish on each copy an appropriate copyright notice;
188 | keep intact all notices stating that this License and any
189 | non-permissive terms added in accord with section 7 apply to the code;
190 | keep intact all notices of the absence of any warranty; and give all
191 | recipients a copy of this License along with the Program.
192 |
193 | You may charge any price or no price for each copy that you convey,
194 | and you may offer support or warranty protection for a fee.
195 |
196 | 5. Conveying Modified Source Versions.
197 |
198 | You may convey a work based on the Program, or the modifications to
199 | produce it from the Program, in the form of source code under the
200 | terms of section 4, provided that you also meet all of these conditions:
201 |
202 | a) The work must carry prominent notices stating that you modified
203 | it, and giving a relevant date.
204 |
205 | b) The work must carry prominent notices stating that it is
206 | released under this License and any conditions added under section
207 | 7. This requirement modifies the requirement in section 4 to
208 | "keep intact all notices".
209 |
210 | c) You must license the entire work, as a whole, under this
211 | License to anyone who comes into possession of a copy. This
212 | License will therefore apply, along with any applicable section 7
213 | additional terms, to the whole of the work, and all its parts,
214 | regardless of how they are packaged. This License gives no
215 | permission to license the work in any other way, but it does not
216 | invalidate such permission if you have separately received it.
217 |
218 | d) If the work has interactive user interfaces, each must display
219 | Appropriate Legal Notices; however, if the Program has interactive
220 | interfaces that do not display Appropriate Legal Notices, your
221 | work need not make them do so.
222 |
223 | A compilation of a covered work with other separate and independent
224 | works, which are not by their nature extensions of the covered work,
225 | and which are not combined with it such as to form a larger program,
226 | in or on a volume of a storage or distribution medium, is called an
227 | "aggregate" if the compilation and its resulting copyright are not
228 | used to limit the access or legal rights of the compilation's users
229 | beyond what the individual works permit. Inclusion of a covered work
230 | in an aggregate does not cause this License to apply to the other
231 | parts of the aggregate.
232 |
233 | 6. Conveying Non-Source Forms.
234 |
235 | You may convey a covered work in object code form under the terms
236 | of sections 4 and 5, provided that you also convey the
237 | machine-readable Corresponding Source under the terms of this License,
238 | in one of these ways:
239 |
240 | a) Convey the object code in, or embodied in, a physical product
241 | (including a physical distribution medium), accompanied by the
242 | Corresponding Source fixed on a durable physical medium
243 | customarily used for software interchange.
244 |
245 | b) Convey the object code in, or embodied in, a physical product
246 | (including a physical distribution medium), accompanied by a
247 | written offer, valid for at least three years and valid for as
248 | long as you offer spare parts or customer support for that product
249 | model, to give anyone who possesses the object code either (1) a
250 | copy of the Corresponding Source for all the software in the
251 | product that is covered by this License, on a durable physical
252 | medium customarily used for software interchange, for a price no
253 | more than your reasonable cost of physically performing this
254 | conveying of source, or (2) access to copy the
255 | Corresponding Source from a network server at no charge.
256 |
257 | c) Convey individual copies of the object code with a copy of the
258 | written offer to provide the Corresponding Source. This
259 | alternative is allowed only occasionally and noncommercially, and
260 | only if you received the object code with such an offer, in accord
261 | with subsection 6b.
262 |
263 | d) Convey the object code by offering access from a designated
264 | place (gratis or for a charge), and offer equivalent access to the
265 | Corresponding Source in the same way through the same place at no
266 | further charge. You need not require recipients to copy the
267 | Corresponding Source along with the object code. If the place to
268 | copy the object code is a network server, the Corresponding Source
269 | may be on a different server (operated by you or a third party)
270 | that supports equivalent copying facilities, provided you maintain
271 | clear directions next to the object code saying where to find the
272 | Corresponding Source. Regardless of what server hosts the
273 | Corresponding Source, you remain obligated to ensure that it is
274 | available for as long as needed to satisfy these requirements.
275 |
276 | e) Convey the object code using peer-to-peer transmission, provided
277 | you inform other peers where the object code and Corresponding
278 | Source of the work are being offered to the general public at no
279 | charge under subsection 6d.
280 |
281 | A separable portion of the object code, whose source code is excluded
282 | from the Corresponding Source as a System Library, need not be
283 | included in conveying the object code work.
284 |
285 | A "User Product" is either (1) a "consumer product", which means any
286 | tangible personal property which is normally used for personal, family,
287 | or household purposes, or (2) anything designed or sold for incorporation
288 | into a dwelling. In determining whether a product is a consumer product,
289 | doubtful cases shall be resolved in favor of coverage. For a particular
290 | product received by a particular user, "normally used" refers to a
291 | typical or common use of that class of product, regardless of the status
292 | of the particular user or of the way in which the particular user
293 | actually uses, or expects or is expected to use, the product. A product
294 | is a consumer product regardless of whether the product has substantial
295 | commercial, industrial or non-consumer uses, unless such uses represent
296 | the only significant mode of use of the product.
297 |
298 | "Installation Information" for a User Product means any methods,
299 | procedures, authorization keys, or other information required to install
300 | and execute modified versions of a covered work in that User Product from
301 | a modified version of its Corresponding Source. The information must
302 | suffice to ensure that the continued functioning of the modified object
303 | code is in no case prevented or interfered with solely because
304 | modification has been made.
305 |
306 | If you convey an object code work under this section in, or with, or
307 | specifically for use in, a User Product, and the conveying occurs as
308 | part of a transaction in which the right of possession and use of the
309 | User Product is transferred to the recipient in perpetuity or for a
310 | fixed term (regardless of how the transaction is characterized), the
311 | Corresponding Source conveyed under this section must be accompanied
312 | by the Installation Information. But this requirement does not apply
313 | if neither you nor any third party retains the ability to install
314 | modified object code on the User Product (for example, the work has
315 | been installed in ROM).
316 |
317 | The requirement to provide Installation Information does not include a
318 | requirement to continue to provide support service, warranty, or updates
319 | for a work that has been modified or installed by the recipient, or for
320 | the User Product in which it has been modified or installed. Access to a
321 | network may be denied when the modification itself materially and
322 | adversely affects the operation of the network or violates the rules and
323 | protocols for communication across the network.
324 |
325 | Corresponding Source conveyed, and Installation Information provided,
326 | in accord with this section must be in a format that is publicly
327 | documented (and with an implementation available to the public in
328 | source code form), and must require no special password or key for
329 | unpacking, reading or copying.
330 |
331 | 7. Additional Terms.
332 |
333 | "Additional permissions" are terms that supplement the terms of this
334 | License by making exceptions from one or more of its conditions.
335 | Additional permissions that are applicable to the entire Program shall
336 | be treated as though they were included in this License, to the extent
337 | that they are valid under applicable law. If additional permissions
338 | apply only to part of the Program, that part may be used separately
339 | under those permissions, but the entire Program remains governed by
340 | this License without regard to the additional permissions.
341 |
342 | When you convey a copy of a covered work, you may at your option
343 | remove any additional permissions from that copy, or from any part of
344 | it. (Additional permissions may be written to require their own
345 | removal in certain cases when you modify the work.) You may place
346 | additional permissions on material, added by you to a covered work,
347 | for which you have or can give appropriate copyright permission.
348 |
349 | Notwithstanding any other provision of this License, for material you
350 | add to a covered work, you may (if authorized by the copyright holders of
351 | that material) supplement the terms of this License with terms:
352 |
353 | a) Disclaiming warranty or limiting liability differently from the
354 | terms of sections 15 and 16 of this License; or
355 |
356 | b) Requiring preservation of specified reasonable legal notices or
357 | author attributions in that material or in the Appropriate Legal
358 | Notices displayed by works containing it; or
359 |
360 | c) Prohibiting misrepresentation of the origin of that material, or
361 | requiring that modified versions of such material be marked in
362 | reasonable ways as different from the original version; or
363 |
364 | d) Limiting the use for publicity purposes of names of licensors or
365 | authors of the material; or
366 |
367 | e) Declining to grant rights under trademark law for use of some
368 | trade names, trademarks, or service marks; or
369 |
370 | f) Requiring indemnification of licensors and authors of that
371 | material by anyone who conveys the material (or modified versions of
372 | it) with contractual assumptions of liability to the recipient, for
373 | any liability that these contractual assumptions directly impose on
374 | those licensors and authors.
375 |
376 | All other non-permissive additional terms are considered "further
377 | restrictions" within the meaning of section 10. If the Program as you
378 | received it, or any part of it, contains a notice stating that it is
379 | governed by this License along with a term that is a further
380 | restriction, you may remove that term. If a license document contains
381 | a further restriction but permits relicensing or conveying under this
382 | License, you may add to a covered work material governed by the terms
383 | of that license document, provided that the further restriction does
384 | not survive such relicensing or conveying.
385 |
386 | If you add terms to a covered work in accord with this section, you
387 | must place, in the relevant source files, a statement of the
388 | additional terms that apply to those files, or a notice indicating
389 | where to find the applicable terms.
390 |
391 | Additional terms, permissive or non-permissive, may be stated in the
392 | form of a separately written license, or stated as exceptions;
393 | the above requirements apply either way.
394 |
395 | 8. Termination.
396 |
397 | You may not propagate or modify a covered work except as expressly
398 | provided under this License. Any attempt otherwise to propagate or
399 | modify it is void, and will automatically terminate your rights under
400 | this License (including any patent licenses granted under the third
401 | paragraph of section 11).
402 |
403 | However, if you cease all violation of this License, then your
404 | license from a particular copyright holder is reinstated (a)
405 | provisionally, unless and until the copyright holder explicitly and
406 | finally terminates your license, and (b) permanently, if the copyright
407 | holder fails to notify you of the violation by some reasonable means
408 | prior to 60 days after the cessation.
409 |
410 | Moreover, your license from a particular copyright holder is
411 | reinstated permanently if the copyright holder notifies you of the
412 | violation by some reasonable means, this is the first time you have
413 | received notice of violation of this License (for any work) from that
414 | copyright holder, and you cure the violation prior to 30 days after
415 | your receipt of the notice.
416 |
417 | Termination of your rights under this section does not terminate the
418 | licenses of parties who have received copies or rights from you under
419 | this License. If your rights have been terminated and not permanently
420 | reinstated, you do not qualify to receive new licenses for the same
421 | material under section 10.
422 |
423 | 9. Acceptance Not Required for Having Copies.
424 |
425 | You are not required to accept this License in order to receive or
426 | run a copy of the Program. Ancillary propagation of a covered work
427 | occurring solely as a consequence of using peer-to-peer transmission
428 | to receive a copy likewise does not require acceptance. However,
429 | nothing other than this License grants you permission to propagate or
430 | modify any covered work. These actions infringe copyright if you do
431 | not accept this License. Therefore, by modifying or propagating a
432 | covered work, you indicate your acceptance of this License to do so.
433 |
434 | 10. Automatic Licensing of Downstream Recipients.
435 |
436 | Each time you convey a covered work, the recipient automatically
437 | receives a license from the original licensors, to run, modify and
438 | propagate that work, subject to this License. You are not responsible
439 | for enforcing compliance by third parties with this License.
440 |
441 | An "entity transaction" is a transaction transferring control of an
442 | organization, or substantially all assets of one, or subdividing an
443 | organization, or merging organizations. If propagation of a covered
444 | work results from an entity transaction, each party to that
445 | transaction who receives a copy of the work also receives whatever
446 | licenses to the work the party's predecessor in interest had or could
447 | give under the previous paragraph, plus a right to possession of the
448 | Corresponding Source of the work from the predecessor in interest, if
449 | the predecessor has it or can get it with reasonable efforts.
450 |
451 | You may not impose any further restrictions on the exercise of the
452 | rights granted or affirmed under this License. For example, you may
453 | not impose a license fee, royalty, or other charge for exercise of
454 | rights granted under this License, and you may not initiate litigation
455 | (including a cross-claim or counterclaim in a lawsuit) alleging that
456 | any patent claim is infringed by making, using, selling, offering for
457 | sale, or importing the Program or any portion of it.
458 |
459 | 11. Patents.
460 |
461 | A "contributor" is a copyright holder who authorizes use under this
462 | License of the Program or a work on which the Program is based. The
463 | work thus licensed is called the contributor's "contributor version".
464 |
465 | A contributor's "essential patent claims" are all patent claims
466 | owned or controlled by the contributor, whether already acquired or
467 | hereafter acquired, that would be infringed by some manner, permitted
468 | by this License, of making, using, or selling its contributor version,
469 | but do not include claims that would be infringed only as a
470 | consequence of further modification of the contributor version. For
471 | purposes of this definition, "control" includes the right to grant
472 | patent sublicenses in a manner consistent with the requirements of
473 | this License.
474 |
475 | Each contributor grants you a non-exclusive, worldwide, royalty-free
476 | patent license under the contributor's essential patent claims, to
477 | make, use, sell, offer for sale, import and otherwise run, modify and
478 | propagate the contents of its contributor version.
479 |
480 | In the following three paragraphs, a "patent license" is any express
481 | agreement or commitment, however denominated, not to enforce a patent
482 | (such as an express permission to practice a patent or covenant not to
483 | sue for patent infringement). To "grant" such a patent license to a
484 | party means to make such an agreement or commitment not to enforce a
485 | patent against the party.
486 |
487 | If you convey a covered work, knowingly relying on a patent license,
488 | and the Corresponding Source of the work is not available for anyone
489 | to copy, free of charge and under the terms of this License, through a
490 | publicly available network server or other readily accessible means,
491 | then you must either (1) cause the Corresponding Source to be so
492 | available, or (2) arrange to deprive yourself of the benefit of the
493 | patent license for this particular work, or (3) arrange, in a manner
494 | consistent with the requirements of this License, to extend the patent
495 | license to downstream recipients. "Knowingly relying" means you have
496 | actual knowledge that, but for the patent license, your conveying the
497 | covered work in a country, or your recipient's use of the covered work
498 | in a country, would infringe one or more identifiable patents in that
499 | country that you have reason to believe are valid.
500 |
501 | If, pursuant to or in connection with a single transaction or
502 | arrangement, you convey, or propagate by procuring conveyance of, a
503 | covered work, and grant a patent license to some of the parties
504 | receiving the covered work authorizing them to use, propagate, modify
505 | or convey a specific copy of the covered work, then the patent license
506 | you grant is automatically extended to all recipients of the covered
507 | work and works based on it.
508 |
509 | A patent license is "discriminatory" if it does not include within
510 | the scope of its coverage, prohibits the exercise of, or is
511 | conditioned on the non-exercise of one or more of the rights that are
512 | specifically granted under this License. You may not convey a covered
513 | work if you are a party to an arrangement with a third party that is
514 | in the business of distributing software, under which you make payment
515 | to the third party based on the extent of your activity of conveying
516 | the work, and under which the third party grants, to any of the
517 | parties who would receive the covered work from you, a discriminatory
518 | patent license (a) in connection with copies of the covered work
519 | conveyed by you (or copies made from those copies), or (b) primarily
520 | for and in connection with specific products or compilations that
521 | contain the covered work, unless you entered into that arrangement,
522 | or that patent license was granted, prior to 28 March 2007.
523 |
524 | Nothing in this License shall be construed as excluding or limiting
525 | any implied license or other defenses to infringement that may
526 | otherwise be available to you under applicable patent law.
527 |
528 | 12. No Surrender of Others' Freedom.
529 |
530 | If conditions are imposed on you (whether by court order, agreement or
531 | otherwise) that contradict the conditions of this License, they do not
532 | excuse you from the conditions of this License. If you cannot convey a
533 | covered work so as to satisfy simultaneously your obligations under this
534 | License and any other pertinent obligations, then as a consequence you may
535 | not convey it at all. For example, if you agree to terms that obligate you
536 | to collect a royalty for further conveying from those to whom you convey
537 | the Program, the only way you could satisfy both those terms and this
538 | License would be to refrain entirely from conveying the Program.
539 |
540 | 13. Remote Network Interaction; Use with the GNU General Public License.
541 |
542 | Notwithstanding any other provision of this License, if you modify the
543 | Program, your modified version must prominently offer all users
544 | interacting with it remotely through a computer network (if your version
545 | supports such interaction) an opportunity to receive the Corresponding
546 | Source of your version by providing access to the Corresponding Source
547 | from a network server at no charge, through some standard or customary
548 | means of facilitating copying of software. This Corresponding Source
549 | shall include the Corresponding Source for any work covered by version 3
550 | of the GNU General Public License that is incorporated pursuant to the
551 | following paragraph.
552 |
553 | Notwithstanding any other provision of this License, you have
554 | permission to link or combine any covered work with a work licensed
555 | under version 3 of the GNU General Public License into a single
556 | combined work, and to convey the resulting work. The terms of this
557 | License will continue to apply to the part which is the covered work,
558 | but the work with which it is combined will remain governed by version
559 | 3 of the GNU General Public License.
560 |
561 | 14. Revised Versions of this License.
562 |
563 | The Free Software Foundation may publish revised and/or new versions of
564 | the GNU Affero General Public License from time to time. Such new versions
565 | will be similar in spirit to the present version, but may differ in detail to
566 | address new problems or concerns.
567 |
568 | Each version is given a distinguishing version number. If the
569 | Program specifies that a certain numbered version of the GNU Affero General
570 | Public License "or any later version" applies to it, you have the
571 | option of following the terms and conditions either of that numbered
572 | version or of any later version published by the Free Software
573 | Foundation. If the Program does not specify a version number of the
574 | GNU Affero General Public License, you may choose any version ever published
575 | by the Free Software Foundation.
576 |
577 | If the Program specifies that a proxy can decide which future
578 | versions of the GNU Affero General Public License can be used, that proxy's
579 | public statement of acceptance of a version permanently authorizes you
580 | to choose that version for the Program.
581 |
582 | Later license versions may give you additional or different
583 | permissions. However, no additional obligations are imposed on any
584 | author or copyright holder as a result of your choosing to follow a
585 | later version.
586 |
587 | 15. Disclaimer of Warranty.
588 |
589 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597 |
598 | 16. Limitation of Liability.
599 |
600 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608 | SUCH DAMAGES.
609 |
610 | 17. Interpretation of Sections 15 and 16.
611 |
612 | If the disclaimer of warranty and limitation of liability provided
613 | above cannot be given local legal effect according to their terms,
614 | reviewing courts shall apply local law that most closely approximates
615 | an absolute waiver of all civil liability in connection with the
616 | Program, unless a warranty or assumption of liability accompanies a
617 | copy of the Program in return for a fee.
618 |
619 | END OF TERMS AND CONDITIONS
620 |
621 | How to Apply These Terms to Your New Programs
622 |
623 | If you develop a new program, and you want it to be of the greatest
624 | possible use to the public, the best way to achieve this is to make it
625 | free software which everyone can redistribute and change under these terms.
626 |
627 | To do so, attach the following notices to the program. It is safest
628 | to attach them to the start of each source file to most effectively
629 | state the exclusion of warranty; and each file should have at least
630 | the "copyright" line and a pointer to where the full notice is found.
631 |
632 |
633 | Copyright (C)
634 |
635 | This program is free software: you can redistribute it and/or modify
636 | it under the terms of the GNU Affero General Public License as published by
637 | the Free Software Foundation, either version 3 of the License, or
638 | (at your option) any later version.
639 |
640 | This program is distributed in the hope that it will be useful,
641 | but WITHOUT ANY WARRANTY; without even the implied warranty of
642 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643 | GNU Affero General Public License for more details.
644 |
645 | You should have received a copy of the GNU Affero General Public License
646 | along with this program. If not, see .
647 |
648 | Also add information on how to contact you by electronic and paper mail.
649 |
650 | If your software can interact with users remotely through a computer
651 | network, you should also make sure that it provides a way for users to
652 | get its source. For example, if your program is a web application, its
653 | interface could display a "Source" link that leads users to an archive
654 | of the code. There are many ways you could offer source, and different
655 | solutions will be better for different programs; see section 13 for the
656 | specific requirements.
657 |
658 | You should also get your employer (if you work as a programmer) or school,
659 | if any, to sign a "copyright disclaimer" for the program, if necessary.
660 | For more information on this, and how to apply and follow the GNU AGPL, see
661 | .
662 |
--------------------------------------------------------------------------------