├── .eslintrc.cjs ├── .github └── workflows │ └── broker-image.yml ├── .gitignore ├── .prettierrc.json ├── Dockerfile ├── LICENSE ├── README.md ├── build.js ├── diagrams.excalidraw ├── jest.config.js ├── map.png ├── package-lock.json ├── package.json ├── src ├── README.md ├── client │ ├── README.md │ ├── diagram.png │ ├── incremental-response.ts │ ├── map.png │ ├── pending-request.ts │ ├── request-manager.ts │ ├── session.ts │ └── socket-manager.ts ├── common.ts ├── index.ts ├── map.png ├── protocol │ ├── README.md │ ├── common.ts │ ├── decoder.ts │ ├── encoder.ts │ ├── header.ts │ ├── internal │ │ ├── README.md │ │ ├── common.ts │ │ ├── fetch.ts │ │ ├── list-offsets.ts │ │ ├── map.png │ │ └── produce.ts │ ├── kafka │ │ ├── README.md │ │ ├── common.ts │ │ ├── fetch.ts │ │ ├── list-offsets.ts │ │ ├── map.png │ │ ├── metadata.ts │ │ └── produce.ts │ └── map.png └── state │ ├── README.md │ ├── chunk.ts │ ├── cluster.ts │ ├── map.png │ ├── partition.ts │ └── pending-fetch.ts ├── test ├── README.md ├── __snapshots__ │ └── index.test.ts.snap ├── client │ └── incremental-response.test.ts ├── common.ts ├── globals.d.ts ├── index.test.ts ├── protocol │ ├── __snapshots__ │ │ └── header.test.ts.snap │ ├── common.ts │ ├── header.test.ts │ ├── internal │ │ ├── fetch.test.ts │ │ ├── list-offsets.test.ts │ │ └── produce.test.ts │ └── kafka │ │ ├── __snapshots__ │ │ ├── fetch.test.ts.snap │ │ ├── list-offsets.test.ts.snap │ │ ├── metadata.test.ts.snap │ │ └── produce.test.ts.snap │ │ ├── fetch.test.ts │ │ ├── list-offsets.test.ts │ │ ├── metadata.test.ts │ │ └── produce.test.ts ├── state │ ├── __snapshots__ │ │ └── chunk.test.ts.snap │ ├── chunk.test.ts │ └── cluster.test.ts └── tsconfig.json ├── tsconfig.json └── wrangler.toml /.eslintrc.cjs: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | extends: [ 3 | "plugin:@typescript-eslint/recommended", 4 | "plugin:@typescript-eslint/recommended-requiring-type-checking", 5 | "plugin:@typescript-eslint/strict", 6 | "plugin:import/recommended", 7 | "plugin:import/typescript", 8 | "prettier", 9 | ], 10 | parser: "@typescript-eslint/parser", 11 | parserOptions: { 12 | tsconfigRootDir: __dirname, 13 | project: ["./tsconfig.json", "./test/tsconfig.json"], 14 | }, 15 | plugins: ["@typescript-eslint", "import"], 16 | root: true, 17 | ignorePatterns: [".eslintrc.js", "jest.config.js", "build.js"], 18 | settings: { 19 | "import/resolver": { 20 | typescript: true, 21 | node: true, 22 | }, 23 | }, 24 | rules: { 25 | // Sort import statements by module name 26 | "import/order": [ 27 | "warn", 28 | { 29 | groups: ["external", "internal"], 30 | alphabetize: { 31 | order: "asc", 32 | }, 33 | }, 34 | ], 35 | // Sort imported members within the same import statement by name 36 | "sort-imports": [ 37 | "warn", 38 | { 39 | ignoreDeclarationSort: true, 40 | }, 41 | ], 42 | }, 43 | }; 44 | -------------------------------------------------------------------------------- /.github/workflows/broker-image.yml: -------------------------------------------------------------------------------- 1 | # Adapted from https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages 2 | # Also from https://github.com/docker/build-push-action/blob/master/docs/advanced/multi-platform.md 3 | 4 | on: 5 | push: 6 | branches: ['main'] 7 | 8 | env: 9 | REGISTRY: ghcr.io 10 | IMAGE_NAME: kafka-worker 11 | 12 | jobs: 13 | build-and-push-image: 14 | runs-on: ubuntu-latest 15 | permissions: 16 | contents: read 17 | packages: write 18 | 19 | steps: 20 | - name: Checkout repository 21 | uses: actions/checkout@v3 22 | 23 | - name: Set up QEMU 24 | uses: docker/setup-qemu-action@v2 25 | 26 | - name: Set up Docker Buildx 27 | uses: docker/setup-buildx-action@v2 28 | 29 | - name: Log in to the Container registry 30 | uses: docker/login-action@v2 31 | with: 32 | registry: ${{ env.REGISTRY }} 33 | username: ${{ github.actor }} 34 | password: ${{ secrets.GITHUB_TOKEN }} 35 | 36 | - name: Extract metadata (tags, labels) for Docker 37 | id: meta 38 | uses: docker/metadata-action@v4 39 | with: 40 | images: ${{ env.REGISTRY }}/${{ github.repository_owner }}/${{ env.IMAGE_NAME }} 41 | 42 | - name: Build and push Docker image 43 | uses: docker/build-push-action@v3 44 | with: 45 | context: . 46 | platforms: linux/amd64,linux/arm64 47 | push: true 48 | tags: ${{ steps.meta.outputs.tags }} 49 | labels: ${{ steps.meta.outputs.labels }} 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | 3 | logs 4 | _.log 5 | npm-debug.log_ 6 | yarn-debug.log* 7 | yarn-error.log* 8 | lerna-debug.log* 9 | .pnpm-debug.log* 10 | 11 | # Diagnostic reports (https://nodejs.org/api/report.html) 12 | 13 | report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json 14 | 15 | # Runtime data 16 | 17 | pids 18 | _.pid 19 | _.seed 20 | \*.pid.lock 21 | 22 | # Directory for instrumented libs generated by jscoverage/JSCover 23 | 24 | lib-cov 25 | 26 | # Coverage directory used by tools like istanbul 27 | 28 | coverage 29 | \*.lcov 30 | 31 | # nyc test coverage 32 | 33 | .nyc_output 34 | 35 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 36 | 37 | .grunt 38 | 39 | # Bower dependency directory (https://bower.io/) 40 | 41 | bower_components 42 | 43 | # node-waf configuration 44 | 45 | .lock-wscript 46 | 47 | # Compiled binary addons (https://nodejs.org/api/addons.html) 48 | 49 | build/Release 50 | 51 | # Dependency directories 52 | 53 | node_modules/ 54 | jspm_packages/ 55 | 56 | # Snowpack dependency directory (https://snowpack.dev/) 57 | 58 | web_modules/ 59 | 60 | # TypeScript cache 61 | 62 | \*.tsbuildinfo 63 | 64 | # Optional npm cache directory 65 | 66 | .npm 67 | 68 | # Optional eslint cache 69 | 70 | .eslintcache 71 | 72 | # Optional stylelint cache 73 | 74 | .stylelintcache 75 | 76 | # Microbundle cache 77 | 78 | .rpt2_cache/ 79 | .rts2_cache_cjs/ 80 | .rts2_cache_es/ 81 | .rts2_cache_umd/ 82 | 83 | # Optional REPL history 84 | 85 | .node_repl_history 86 | 87 | # Output of 'npm pack' 88 | 89 | \*.tgz 90 | 91 | # Yarn Integrity file 92 | 93 | .yarn-integrity 94 | 95 | # dotenv environment variable files 96 | 97 | .env 98 | .env.development.local 99 | .env.test.local 100 | .env.production.local 101 | .env.local 102 | 103 | # parcel-bundler cache (https://parceljs.org/) 104 | 105 | .cache 106 | .parcel-cache 107 | 108 | # Next.js build output 109 | 110 | .next 111 | out 112 | 113 | # Nuxt.js build / generate output 114 | 115 | .nuxt 116 | dist 117 | 118 | # Gatsby files 119 | 120 | .cache/ 121 | 122 | # Comment in the public line in if your project uses Gatsby and not Next.js 123 | 124 | # https://nextjs.org/blog/next-9-1#public-directory-support 125 | 126 | # public 127 | 128 | # vuepress build output 129 | 130 | .vuepress/dist 131 | 132 | # vuepress v2.x temp and cache directory 133 | 134 | .temp 135 | .cache 136 | 137 | # Docusaurus cache and generated files 138 | 139 | .docusaurus 140 | 141 | # Serverless directories 142 | 143 | .serverless/ 144 | 145 | # FuseBox cache 146 | 147 | .fusebox/ 148 | 149 | # DynamoDB Local files 150 | 151 | .dynamodb/ 152 | 153 | # TernJS port file 154 | 155 | .tern-port 156 | 157 | # Stores VSCode versions used for testing VSCode extensions 158 | 159 | .vscode-test 160 | 161 | # yarn v2 162 | 163 | .yarn/cache 164 | .yarn/unplugged 165 | .yarn/build-state.yml 166 | .yarn/install-state.gz 167 | .pnp.\* 168 | 169 | # wrangler project 170 | 171 | .dev.vars 172 | 173 | .log 174 | *.~undo-tree~ -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM node:16.17-alpine3.16 4 | 5 | WORKDIR /app 6 | 7 | COPY package.json ./ 8 | COPY package-lock.json ./ 9 | RUN npm install 10 | 11 | COPY src ./src/ 12 | COPY build.js tsconfig.json ./ 13 | RUN npm run build 14 | 15 | COPY ./wrangler.toml ./ 16 | 17 | EXPOSE 8787 18 | 19 | ENTRYPOINT [ "npm", "start" ] 20 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Max Peterson 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kafka Worker 2 | 3 | A Kafka 0.8.0 broker implementation on top of Cloudflare Workers and Durable Objects. This broker supports 4 client-facing APIs: 4 | 5 | 1. [Produce API](https://kafka.apache.org/protocol.html#The_Messages_Produce) (Version: 0) 6 | 2. [Fetch API](https://kafka.apache.org/protocol.html#The_Messages_Fetch) (Version: 0) 7 | 3. [ListOffsets API](https://kafka.apache.org/protocol.html#The_Messages_ListOffsets) (Version: 0) 8 | 4. [Metadata API](https://kafka.apache.org/protocol.html#The_Messages_Metadata) (Version: 0) 9 | 10 | No other APIs (such as internal APIs used for administrative purposes) or API versions are supported. This project is a relatively simple proof of concept, and not intended to be used for anything serious. 11 | 12 | ## Local Demo 13 | 14 | For a fully local demo, no Cloudflare account required, check out the [`kafka-worker-demo`](https://github.com/maxwellpeterson/kafka-worker-demo) project. 15 | 16 | ## WebSocket Shim 17 | 18 | The Kafka protocol uses TCP. Cloudflare Workers does not support plain TCP connections (at least until [Socket Workers](https://blog.cloudflare.com/introducing-socket-workers/) is available), but it does support WebSocket connections. To make this work, we need to frame Kafka protocol messages sent over TCP into WebSocket messages. The [`kafka-websocket-shim`](https://github.com/maxwellpeterson/kafka-websocket-shim) project implements this functionality, and must be used to connect to this broker. 19 | 20 | ## Design 21 | 22 | If you are unfamiliar with Kafka's design philosophy, I would recommend reading [this article by Jay Kreps](https://engineering.linkedin.com/distributed-systems/log-what-every-software-engineer-should-know-about-real-time-datas-unifying) first. 23 | 24 | Kafka 0.8.0 dates all the way back to 2013, which was a much simpler era. There are only 2 types of state that we need to keep track of: records stored in partitions, and information about which topics and partitions exist. In this design, a Durable Object is created for each partition, and a single Durable Object is created for topic and partition information (this is referred to as the "global cluster DO"). Just like normal Kafka, writes to separate partitions are independent operations, and records within each partition are guaranteed to be read in the same order they were written. 25 | 26 | Each client connection is handled by a "gateway worker" that runs in the Cloudflare data center (also referred to as "colo") closest to the client's location. The gateway worker handles requests from the client, and makes internal subrequests to partition DOs and the global cluster DO as needed. Each DO contacted by the gateway worker may be located in the same data center as the gateway worker instance (the ideal case), or may be located somewhere else. 27 | 28 | ### What about replication? What about leadership election? 29 | 30 | There is none, at least not in the application code. The point of using Durable Objects here is that we can offload these complexities onto the infrastructure layer, and keep our application code focused and minimal. As described in the [initial blog post](https://blog.cloudflare.com/introducing-workers-durable-objects/), this is the serverless philosophy applied to persistent state. Replication and strong consistency mechanisms are implemented internally, and all we need to do is use the Durable Object API to reap these benefits. Building a Kafka broker without implementing replication and leadership election feels like cheating, but also makes this a much more tractable project. 31 | 32 | In this design, each deployment has one logical broker that spans Cloudflare's entire network. The hostname and port of this broker is the hostname and port of the gateway worker. Translated to the traditional Kafka model, this broker is the leader node for all partitions, and all partitions have zero replica nodes and zero ISR nodes. Again, there is still replication happening, it's just not visible to the broker implementation. 33 | 34 | ## Map 35 | 36 | ![kafka worker map](map.png) 37 | -------------------------------------------------------------------------------- /build.js: -------------------------------------------------------------------------------- 1 | import { build } from "esbuild"; 2 | import path from "path"; 3 | import { fileURLToPath } from "url"; 4 | 5 | // Build script, taken from: 6 | // https://github.com/cloudflare/miniflare-typescript-esbuild-jest/blob/master/build.js 7 | 8 | const __filename = fileURLToPath(import.meta.url); 9 | const __dirname = path.dirname(__filename); 10 | 11 | try { 12 | await build({ 13 | bundle: true, 14 | sourcemap: true, 15 | format: "esm", 16 | target: "esnext", 17 | external: ["__STATIC_CONTENT_MANIFEST"], 18 | conditions: ["worker", "browser"], 19 | entryPoints: [path.join(__dirname, "src", "index.ts")], 20 | outdir: path.join(__dirname, "dist"), 21 | outExtension: { ".js": ".mjs" }, 22 | }); 23 | } catch { 24 | process.exitCode = 1; 25 | } 26 | -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | // Jest integration with miniflare, taken from: 2 | // https://github.com/cloudflare/miniflare-typescript-esbuild-jest/blob/master/jest.config.js 3 | export default { 4 | preset: "ts-jest/presets/default-esm", 5 | globals: { 6 | "ts-jest": { 7 | tsconfig: "test/tsconfig.json", 8 | useESM: true, 9 | }, 10 | }, 11 | moduleNameMapper: { 12 | "^src/(.*)$": "/src/$1", 13 | "^test/(.*)$": "/test/$1", 14 | "^(\\.{1,2}/.*)\\.js$": "$1", 15 | }, 16 | testEnvironment: "miniflare", 17 | testEnvironmentOptions: { 18 | // Miniflare doesn't yet support the `main` field in `wrangler.toml` so we 19 | // need to explicitly tell it where our built worker is. We also need to 20 | // explicitly mark it as an ES module. 21 | scriptPath: "dist/index.mjs", 22 | modules: true, 23 | }, 24 | resetMocks: true, 25 | clearMocks: true, 26 | }; 27 | -------------------------------------------------------------------------------- /map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxwellpeterson/kafka-worker/784715d42177a83d6af23edd4851723d1afee70a/map.png -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "kafka-worker", 3 | "version": "0.0.0", 4 | "devDependencies": { 5 | "@cloudflare/workers-types": "^3.14.1", 6 | "@types/jest": "^28.1.8", 7 | "@typescript-eslint/eslint-plugin": "^5.35.1", 8 | "@typescript-eslint/parser": "^5.35.1", 9 | "esbuild": "^0.15.5", 10 | "eslint": "^8.22.0", 11 | "eslint-config-prettier": "^8.5.0", 12 | "eslint-import-resolver-typescript": "^3.5.0", 13 | "eslint-plugin-import": "^2.26.0", 14 | "jest": "^28.1.3", 15 | "jest-environment-miniflare": "^2.8.2", 16 | "prettier": "2.7.1", 17 | "ts-jest": "^28.0.8", 18 | "typescript": "^4.7.4", 19 | "wrangler": "^2.0.26" 20 | }, 21 | "private": true, 22 | "type": "module", 23 | "module": "./dist/index.mjs", 24 | "scripts": { 25 | "check": "tsc && tsc -p test/tsconfig.json", 26 | "lint": "eslint .", 27 | "test": "npm run build && node --experimental-vm-modules --no-warnings node_modules/jest/bin/jest.js", 28 | "build": "node build.js", 29 | "all": "npm run check && npm run lint && npm run test", 30 | "start": "wrangler dev --local", 31 | "deploy": "wrangler publish" 32 | }, 33 | "dependencies": { 34 | "crc-32": "^1.2.2" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/README.md: -------------------------------------------------------------------------------- 1 | # Source 2 | 3 | This folder contains all source files, not including tests. 4 | 5 | The `client` folder contains files related to the gateway worker, which manages client connections. This includes converting Kafka API requests into internal API requests, converting internal API responses into Kafka API responses, forwarding internal API requests to Durable Objects, and handling the fan out of internal requests and fan in of internal responses. 6 | 7 | The `protocol` folder contains files related to the Kafka wire protocol and internal wire protocol. This includes API request and response type definitions and encoding and decoding functions. All this code does is convert JavaScript objects to ArrayBuffers, and vice-versa, so it's not super interesting. 8 | 9 | The `state` folder contains files related to Durable Objects. These Durable Objects store partition data, as well as information about which topics and partitions exist. The code in the `client` folder interacts with the code in the `state` folder by making an HTTP request or sending a WebSocket message to one of these Durable Objects. 10 | 11 | The `common.ts` file contains general-purpose types and functions that are used across the entire project. 12 | 13 | The `index.ts` file contains the entrypoint to the gateway worker, which creates WebSocket connections with clients and sets up a new session for each connection. All messages exchanged with the client flow through the `fetch` handler in this file. The high-level code path for handling a Kafka API request is `index.ts` :arrow_right: `client` :arrow_right: `state` :arrow_right: `client` :arrow_right: `index.ts`, with code from the `protocol` folder used in both `client` and `state`. 14 | 15 | ## Map 16 | 17 | ![kafka worker map](map.png) 18 | -------------------------------------------------------------------------------- /src/client/README.md: -------------------------------------------------------------------------------- 1 | # Client 2 | 3 | This folder contains files related to the gateway worker, which manages client connections. Each file contains one class, with the exception of `pending-request.ts`, which contains an interface and set of API-specific implementations. The general structure of these classes is described by the diagram below, and more detailed descriptions are included in the comments of each file. 4 | 5 | ## Class Structure 6 | 7 | ![client class structure](diagram.png) 8 | 9 | ## Map 10 | 11 | ![kafka worker map](map.png) 12 | -------------------------------------------------------------------------------- /src/client/diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxwellpeterson/kafka-worker/784715d42177a83d6af23edd4851723d1afee70a/src/client/diagram.png -------------------------------------------------------------------------------- /src/client/incremental-response.ts: -------------------------------------------------------------------------------- 1 | import { ElemOf } from "src/common"; 2 | import { PartitionInfo } from "src/state/partition"; 3 | 4 | // This is an extremely common response structure in the Kafka protocol 5 | interface BaseResponse { 6 | topics: { 7 | name: string; 8 | partitions: { 9 | index: number; 10 | }[]; 11 | }[]; 12 | } 13 | 14 | export type DoneHandler = (response: T) => void; 15 | type PartitionId = string; 16 | 17 | export type PartitionResponse = Omit< 18 | ElemOf["partitions"]>, 19 | "index" 20 | >; 21 | 22 | // Represents a response that is filled in asynchronously from multiple 23 | // Partition DO subresponses. Keeps track of the partitions that haven't 24 | // responded yet, and executes a callback when the response is complete (this 25 | // callback usually resolves a Promise) 26 | // 27 | // The constructor takes a "template" response that is filled in one "template 28 | // slot" at a time by each subresponse. You can think of this template response 29 | // as a tree with blank leaves, and each subresponse fills in a new leaf that 30 | // was previously blank. When all the leaves have been filled in, the tree is 31 | // complete and can be returned. Note that no leaves are created or removed as 32 | // part of this process, only updated in place. 33 | export class IncrementalResponse { 34 | private readonly response: T; 35 | private readonly done: DoneHandler; 36 | 37 | private readonly pendingPartitions: Set; 38 | 39 | constructor(stubResponse: T, done: DoneHandler) { 40 | this.response = stubResponse; 41 | this.done = done; 42 | 43 | // Assumes that all (topic.name, partition.index) pairs are unique. I'm not 44 | // sure if this uniqueness constraint is officially part of the Kafka 45 | // protocol, but it doesn't make sense for duplicate pairs to appear in the 46 | // same request (unless you wanted to consume the same partition at different 47 | // offsets, which I don't think is possible) 48 | const partitionIds = stubResponse.topics.flatMap((topic) => 49 | topic.partitions.map( 50 | (partition) => new PartitionInfo(topic.name, partition.index).id 51 | ) 52 | ); 53 | this.pendingPartitions = new Set(partitionIds); 54 | 55 | // Make sure we don't block forever if the template response is already complete 56 | this.checkDone(); 57 | } 58 | 59 | addPartition(partition: PartitionInfo, response: PartitionResponse) { 60 | if (!this.pendingPartitions.has(partition.id)) { 61 | // Partition is no longer pending, discard response 62 | return; 63 | } 64 | 65 | // Find the placeholder subresponse that we want to fill in 66 | const stubResponse = this.response.topics 67 | .find((topic) => topic.name === partition.topic) 68 | ?.partitions.find(({ index }) => index === partition.index); 69 | 70 | if (stubResponse === undefined) { 71 | // This should be an unreachable state 72 | return; 73 | } 74 | 75 | // Fill in the matching placeholder with the actual subresponse, remove 76 | // the partition from the pending set, and check if this was the last one 77 | Object.assign(stubResponse, { ...response, index: partition.index }); 78 | this.pendingPartitions.delete(partition.id); 79 | this.checkDone(); 80 | } 81 | 82 | private checkDone() { 83 | // If all partitions are settled, then the response is complete. Done cannot 84 | // be called again because the pending set only decreases in size over time 85 | if (this.pendingPartitions.size === 0) { 86 | this.done(this.response); 87 | } 88 | } 89 | 90 | // Fill in all pending subresponses with the given subresponse 91 | cancel(fillerResponse: PartitionResponse) { 92 | this.pendingPartitions.forEach((id) => { 93 | this.addPartition(PartitionInfo.fromId(id), fillerResponse); 94 | }); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/client/map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxwellpeterson/kafka-worker/784715d42177a83d6af23edd4851723d1afee70a/src/client/map.png -------------------------------------------------------------------------------- /src/client/pending-request.ts: -------------------------------------------------------------------------------- 1 | import { 2 | DoneHandler, 3 | IncrementalResponse, 4 | } from "src/client/incremental-response"; 5 | import { ErrorCode } from "src/protocol/common"; 6 | import { Decoder } from "src/protocol/decoder"; 7 | import { decodeInternalFetchResponse } from "src/protocol/internal/fetch"; 8 | import { decodeInternalListOffsetsResponse } from "src/protocol/internal/list-offsets"; 9 | import { decodeInternalProduceResponse } from "src/protocol/internal/produce"; 10 | import { 11 | KafkaFetchRequest, 12 | KafkaFetchResponse, 13 | stubKafkaFetchResponse, 14 | } from "src/protocol/kafka/fetch"; 15 | import { 16 | KafkaListOffsetsRequest, 17 | KafkaListOffsetsResponse, 18 | stubKafkaListOffsetsResponse, 19 | } from "src/protocol/kafka/list-offsets"; 20 | import { 21 | KafkaProduceRequest, 22 | KafkaProduceResponse, 23 | stubKafkaProduceResponse, 24 | } from "src/protocol/kafka/produce"; 25 | import { PartitionInfo } from "src/state/partition"; 26 | 27 | // Represents a client request that is waiting on subresponses from Partition 28 | // DOs. These subresponses arrive asynchronously as WebSocket messages, and need 29 | // to be pieced together into a complete response that is sent back to the client. 30 | export interface PendingRequest { 31 | handlePartitionMessage(partition: PartitionInfo, decoder: Decoder): void; 32 | // If request does not depend on given partition, this should be a no-op 33 | handlePartitionClose(partition: PartitionInfo): void; 34 | abort(): void; 35 | } 36 | 37 | export class PendingProduceRequest { 38 | private readonly response: IncrementalResponse; 39 | readonly abort: () => void; 40 | 41 | constructor( 42 | request: KafkaProduceRequest, 43 | done: DoneHandler, 44 | abort: () => void 45 | ) { 46 | const timeoutId = setTimeout(() => { 47 | this.response.cancel({ 48 | errorCode: ErrorCode.RequestTimedOut, 49 | baseOffset: BigInt(0), 50 | }); 51 | }, request.timeoutMs); 52 | 53 | this.response = new IncrementalResponse( 54 | stubKafkaProduceResponse(request, ErrorCode.None), 55 | (response) => { 56 | clearTimeout(timeoutId); 57 | done(response); 58 | } 59 | ); 60 | this.abort = () => { 61 | clearTimeout(timeoutId); 62 | abort(); 63 | }; 64 | } 65 | 66 | handlePartitionMessage(partition: PartitionInfo, decoder: Decoder): void { 67 | const response = decodeInternalProduceResponse(decoder); 68 | this.response.addPartition(partition, response); 69 | } 70 | 71 | handlePartitionClose(partition: PartitionInfo): void { 72 | this.response.addPartition(partition, { 73 | errorCode: ErrorCode.NetworkException, 74 | baseOffset: BigInt(0), 75 | }); 76 | } 77 | } 78 | 79 | export class PendingFetchRequest { 80 | private readonly response: IncrementalResponse; 81 | readonly abort: () => void; 82 | 83 | constructor( 84 | request: KafkaFetchRequest, 85 | done: DoneHandler, 86 | abort: () => void 87 | ) { 88 | this.response = new IncrementalResponse( 89 | stubKafkaFetchResponse(request, ErrorCode.None), 90 | done 91 | ); 92 | this.abort = abort; 93 | } 94 | 95 | handlePartitionMessage(partition: PartitionInfo, decoder: Decoder): void { 96 | const response = decodeInternalFetchResponse(decoder); 97 | this.response.addPartition(partition, response); 98 | } 99 | 100 | handlePartitionClose(partition: PartitionInfo): void { 101 | this.response.addPartition(partition, { 102 | errorCode: ErrorCode.NetworkException, 103 | highWatermark: BigInt(0), 104 | messageSet: new Uint8Array(), 105 | }); 106 | } 107 | } 108 | 109 | export class PendingListOffsetsRequest { 110 | private readonly response: IncrementalResponse; 111 | readonly abort: () => void; 112 | 113 | constructor( 114 | request: KafkaListOffsetsRequest, 115 | done: DoneHandler, 116 | abort: () => void 117 | ) { 118 | this.response = new IncrementalResponse( 119 | stubKafkaListOffsetsResponse(request, ErrorCode.None), 120 | done 121 | ); 122 | this.abort = abort; 123 | } 124 | 125 | handlePartitionMessage(partition: PartitionInfo, decoder: Decoder): void { 126 | const response = decodeInternalListOffsetsResponse(decoder); 127 | this.response.addPartition(partition, response); 128 | } 129 | 130 | handlePartitionClose(partition: PartitionInfo): void { 131 | this.response.addPartition(partition, { 132 | errorCode: ErrorCode.NetworkException, 133 | oldStyleOffsets: [], 134 | }); 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /src/client/request-manager.ts: -------------------------------------------------------------------------------- 1 | import { 2 | PendingFetchRequest, 3 | PendingListOffsetsRequest, 4 | PendingProduceRequest, 5 | PendingRequest, 6 | } from "src/client/pending-request"; 7 | import { SocketManager } from "src/client/socket-manager"; 8 | import { AbortedRequestError, Env } from "src/common"; 9 | import { Acks } from "src/protocol/common"; 10 | import { Decoder } from "src/protocol/decoder"; 11 | import { Encoder } from "src/protocol/encoder"; 12 | import { RequestMetadata, encodeRequestHeader } from "src/protocol/header"; 13 | import { PartitionApiKey } from "src/protocol/internal/common"; 14 | import { encodeInternalFetchRequest } from "src/protocol/internal/fetch"; 15 | import { encodeInternalListOffsetsRequest } from "src/protocol/internal/list-offsets"; 16 | import { encodeInternalProduceRequest } from "src/protocol/internal/produce"; 17 | import { 18 | KafkaFetchRequest, 19 | KafkaFetchResponse, 20 | } from "src/protocol/kafka/fetch"; 21 | import { 22 | KafkaListOffsetsRequest, 23 | KafkaListOffsetsResponse, 24 | } from "src/protocol/kafka/list-offsets"; 25 | import { 26 | KafkaProduceRequest, 27 | KafkaProduceResponse, 28 | } from "src/protocol/kafka/produce"; 29 | import { PartitionInfo } from "src/state/partition"; 30 | 31 | type CorrelationId = number; 32 | 33 | // One Kafka protocol request can operate on multiple partitions, which means 34 | // multiple subrequests to Partition DOs that fan out, and multiple subresponses 35 | // that need to be fanned in to complete the final response sent back to the 36 | // client. The RequestManager class handles the fan out and fan in process. 37 | // 38 | // Public RequestManager methods wrap the fan out and fan in process in a 39 | // "super-promise" that resolves when all subrequests have completed 40 | // successfully, failed, or timed out. All the caller needs to do is await this 41 | // promise to get the complete response. 42 | // 43 | // Note that error handling and timeouts have not been implemented, but should 44 | // be a simple extension of this existing structure (TODO). 45 | export class RequestManager { 46 | private readonly pending: Map; 47 | private readonly socket: SocketManager; 48 | 49 | constructor(env: Env) { 50 | this.pending = new Map(); 51 | this.socket = new SocketManager(env, this); 52 | } 53 | 54 | produceRequest( 55 | metadata: RequestMetadata, 56 | request: KafkaProduceRequest 57 | ): Promise { 58 | return new Promise((resolve, reject) => { 59 | if (request.acks !== Acks.None) { 60 | // If the request will return a response, resolve the promise when the 61 | // response is complete 62 | const done = (response: KafkaProduceResponse) => { 63 | this.pending.delete(metadata.correlationId); 64 | resolve(response); 65 | }; 66 | const abort = () => { 67 | this.pending.delete(metadata.correlationId); 68 | reject(new AbortedRequestError()); 69 | }; 70 | this.pending.set( 71 | metadata.correlationId, 72 | new PendingProduceRequest(request, done, abort) 73 | ); 74 | } 75 | 76 | Promise.all( 77 | request.topics.flatMap((topic) => 78 | topic.partitions.map(async (partition) => { 79 | const encoder = new Encoder(); 80 | encodeRequestHeader(encoder, { 81 | apiKey: PartitionApiKey.Produce, 82 | apiVersion: 0, 83 | correlationId: metadata.correlationId, 84 | clientId: metadata.clientId, 85 | }); 86 | 87 | const partitionRequest = encodeInternalProduceRequest(encoder, { 88 | acks: request.acks, 89 | messageSet: partition.messageSet, 90 | }); 91 | 92 | await this.socket.sendPartition( 93 | new PartitionInfo(topic.name, partition.index), 94 | partitionRequest 95 | ); 96 | }) 97 | ) 98 | ) 99 | .then(() => { 100 | if (request.acks === Acks.None) { 101 | // If the request will not return a response, resolve the promise as 102 | // soon as the subrequests are sent 103 | resolve(null); 104 | } 105 | }) 106 | .catch(reject); 107 | }); 108 | } 109 | 110 | fetchRequest( 111 | metadata: RequestMetadata, 112 | request: KafkaFetchRequest 113 | ): Promise { 114 | return new Promise((resolve, reject) => { 115 | const done = (response: KafkaFetchResponse) => { 116 | this.pending.delete(metadata.correlationId); 117 | resolve(response); 118 | }; 119 | const abort = () => { 120 | this.pending.delete(metadata.correlationId); 121 | reject(new AbortedRequestError()); 122 | }; 123 | this.pending.set( 124 | metadata.correlationId, 125 | new PendingFetchRequest(request, done, abort) 126 | ); 127 | 128 | // TODO: Convert to Promise.allSettled and handle errors individually 129 | Promise.all( 130 | request.topics.flatMap((topic) => 131 | topic.partitions.map(async (partition) => { 132 | const encoder = new Encoder(); 133 | encodeRequestHeader(encoder, { 134 | apiKey: PartitionApiKey.Fetch, 135 | apiVersion: 0, 136 | correlationId: metadata.correlationId, 137 | clientId: metadata.clientId, 138 | }); 139 | 140 | const partitionRequest = encodeInternalFetchRequest(encoder, { 141 | maxWaitMs: request.maxWaitMs, 142 | fetchOffset: partition.fetchOffset, 143 | minBytes: request.minBytes, 144 | maxBytes: partition.maxBytes, 145 | }); 146 | 147 | await this.socket.sendPartition( 148 | new PartitionInfo(topic.name, partition.index), 149 | partitionRequest 150 | ); 151 | }) 152 | ) 153 | ).catch(reject); 154 | }); 155 | } 156 | 157 | listOffsetsRequest( 158 | metadata: RequestMetadata, 159 | request: KafkaListOffsetsRequest 160 | ): Promise { 161 | return new Promise((resolve, reject) => { 162 | const done = (response: KafkaListOffsetsResponse) => { 163 | this.pending.delete(metadata.correlationId); 164 | resolve(response); 165 | }; 166 | const abort = () => { 167 | this.pending.delete(metadata.correlationId); 168 | reject(new AbortedRequestError()); 169 | }; 170 | this.pending.set( 171 | metadata.correlationId, 172 | new PendingListOffsetsRequest(request, done, abort) 173 | ); 174 | 175 | Promise.all( 176 | request.topics.flatMap((topic) => 177 | topic.partitions.map(async (partition) => { 178 | const encoder = new Encoder(); 179 | encodeRequestHeader(encoder, { 180 | apiKey: PartitionApiKey.ListOffsets, 181 | apiVersion: 0, 182 | correlationId: metadata.correlationId, 183 | clientId: metadata.clientId, 184 | }); 185 | 186 | const partitionRequest = encodeInternalListOffsetsRequest(encoder, { 187 | timestamp: partition.timestamp, 188 | maxNumOffsets: partition.maxNumOffsets, 189 | }); 190 | 191 | await this.socket.sendPartition( 192 | new PartitionInfo(topic.name, partition.index), 193 | partitionRequest 194 | ); 195 | }) 196 | ) 197 | ).catch(reject); 198 | }); 199 | } 200 | 201 | // Matches WebSocket messages received from Partition DOs to pending client 202 | // requests 203 | handlePartitionMessage(partition: PartitionInfo, message: ArrayBuffer): void { 204 | const decoder = new Decoder(message); 205 | const correlationId = decoder.readInt32(); 206 | 207 | const request = this.pending.get(correlationId); 208 | if (request !== undefined) { 209 | request.handlePartitionMessage(partition, decoder); 210 | return; 211 | } 212 | console.log("Couldn't match Partition response to pending request"); 213 | } 214 | 215 | handlePartitionClose(partition: PartitionInfo): void { 216 | this.pending.forEach((request) => request.handlePartitionClose(partition)); 217 | } 218 | 219 | close() { 220 | this.pending.forEach((request) => request.abort()); 221 | this.socket.close(); 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /src/client/session.ts: -------------------------------------------------------------------------------- 1 | import { RequestManager } from "src/client/request-manager"; 2 | import { AbortedRequestError, Env, stringify } from "src/common"; 3 | import { ApiKey, ErrorCode, validApiKey } from "src/protocol/common"; 4 | import { Decoder } from "src/protocol/decoder"; 5 | import { Encoder } from "src/protocol/encoder"; 6 | import { RequestMetadata, decodeRequestHeader } from "src/protocol/header"; 7 | import { KafkaDecoder, KafkaResponseEncoder } from "src/protocol/kafka/common"; 8 | import { 9 | decodeKafkaFetchRequest, 10 | encodeKafkaFetchResponse, 11 | stubKafkaFetchResponse, 12 | } from "src/protocol/kafka/fetch"; 13 | import { 14 | decodeKafkaListOffsetsRequest, 15 | encodeKafkaListOffsetsResponse, 16 | stubKafkaListOffsetsResponse, 17 | } from "src/protocol/kafka/list-offsets"; 18 | import { 19 | decodeKafkaMetadataRequest, 20 | encodeKafkaMetadataResponse, 21 | } from "src/protocol/kafka/metadata"; 22 | import { 23 | decodeKafkaProduceRequest, 24 | encodeKafkaProduceResponse, 25 | stubKafkaProduceResponse, 26 | } from "src/protocol/kafka/produce"; 27 | import { fetchClusterMetadata } from "src/state/cluster"; 28 | 29 | // Coordinator class that handles one client connection and forwards incoming 30 | // requests to Partition DOs and the global Cluster DO 31 | export class Session { 32 | private readonly env: Env; 33 | 34 | // Client for making internal requests to DOs 35 | private readonly internal: RequestManager; 36 | 37 | constructor(env: Env) { 38 | this.env = env; 39 | 40 | this.internal = new RequestManager(env); 41 | } 42 | 43 | async handleRequest(buffer: ArrayBuffer): Promise { 44 | const decoder = new KafkaDecoder(buffer); 45 | const header = decodeRequestHeader(decoder, validApiKey); 46 | 47 | if (header.apiVersion !== 0) { 48 | throw new Error( 49 | `Unsupported version of api ${header.apiKey}: expected 0 but got ${header.apiVersion}` 50 | ); 51 | } 52 | 53 | const encoder = new KafkaResponseEncoder(header.correlationId); 54 | 55 | switch (header.apiKey) { 56 | case ApiKey.Produce: 57 | return this.handleProduceRequest(header, decoder, encoder); 58 | case ApiKey.Fetch: 59 | return this.handleFetchRequest(header, decoder, encoder); 60 | case ApiKey.ListOffsets: 61 | return this.handleListOffsetsRequest(header, decoder, encoder); 62 | case ApiKey.Metadata: 63 | return this.handleMetadataRequest(header, decoder, encoder); 64 | } 65 | } 66 | 67 | private async handleProduceRequest( 68 | metadata: RequestMetadata, 69 | decoder: Decoder, 70 | encoder: Encoder 71 | ): Promise { 72 | const request = decodeKafkaProduceRequest(decoder); 73 | console.log(`Produce request: ${stringify(request)}`); 74 | 75 | try { 76 | const response = await this.internal.produceRequest(metadata, request); 77 | console.log(`Produce response: ${stringify(response)}`); 78 | 79 | if (response === null) { 80 | return null; 81 | } 82 | return encodeKafkaProduceResponse(encoder, response); 83 | } catch (e) { 84 | if (e instanceof AbortedRequestError) { 85 | return null; 86 | } 87 | console.log( 88 | `[Gateway Worker] Error while handling Produce request: ${stringify(e)}` 89 | ); 90 | return encodeKafkaProduceResponse( 91 | encoder, 92 | stubKafkaProduceResponse(request, ErrorCode.UnknownServerError) 93 | ); 94 | } 95 | } 96 | 97 | private async handleFetchRequest( 98 | metadata: RequestMetadata, 99 | decoder: Decoder, 100 | encoder: Encoder 101 | ): Promise { 102 | const request = decodeKafkaFetchRequest(decoder); 103 | console.log(`Fetch request: ${stringify(request)}`); 104 | 105 | try { 106 | const response = await this.internal.fetchRequest(metadata, request); 107 | console.log(`Fetch response: ${stringify(response)}`); 108 | 109 | return encodeKafkaFetchResponse(encoder, response); 110 | } catch (e) { 111 | if (e instanceof AbortedRequestError) { 112 | return null; 113 | } 114 | console.log( 115 | `[Gateway Worker] Error while handling Fetch request: ${stringify(e)}` 116 | ); 117 | return encodeKafkaFetchResponse( 118 | encoder, 119 | stubKafkaFetchResponse(request, ErrorCode.UnknownServerError) 120 | ); 121 | } 122 | } 123 | 124 | private async handleListOffsetsRequest( 125 | metadata: RequestMetadata, 126 | decoder: Decoder, 127 | encoder: Encoder 128 | ): Promise { 129 | const request = decodeKafkaListOffsetsRequest(decoder); 130 | console.log(`ListOffsets request: ${stringify(request)}`); 131 | 132 | try { 133 | const response = await this.internal.listOffsetsRequest( 134 | metadata, 135 | request 136 | ); 137 | console.log(`ListOffsets response: ${stringify(response)}`); 138 | 139 | return encodeKafkaListOffsetsResponse(encoder, response); 140 | } catch (e) { 141 | if (e instanceof AbortedRequestError) { 142 | return null; 143 | } 144 | console.log( 145 | `[Gateway Worker] Error while handling ListOffsets request: ${stringify( 146 | e 147 | )}` 148 | ); 149 | return encodeKafkaListOffsetsResponse( 150 | encoder, 151 | stubKafkaListOffsetsResponse(request, ErrorCode.UnknownServerError) 152 | ); 153 | } 154 | } 155 | 156 | private async handleMetadataRequest( 157 | metadata: RequestMetadata, 158 | decoder: Decoder, 159 | encoder: Encoder 160 | ): Promise { 161 | const request = decodeKafkaMetadataRequest(decoder); 162 | console.log(`Metadata request: ${stringify(request)}`); 163 | 164 | const response = await fetchClusterMetadata(this.env, request.topics); 165 | console.log(`Metadata response: ${stringify(response)}`); 166 | 167 | return encodeKafkaMetadataResponse(encoder, response); 168 | } 169 | 170 | close() { 171 | this.internal.close(); 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /src/client/socket-manager.ts: -------------------------------------------------------------------------------- 1 | import { AbortedRequestError, Env } from "src/common"; 2 | import { PartitionInfo, partitionStubUrl } from "src/state/partition"; 3 | 4 | interface PartitionHandler { 5 | handlePartitionMessage(partition: PartitionInfo, message: ArrayBuffer): void; 6 | handlePartitionClose(partition: PartitionInfo): void; 7 | } 8 | 9 | interface SocketState { 10 | active: Map; 11 | pending: Map>; 12 | } 13 | type SocketId = string; 14 | 15 | // WebSocket connections to DOs are created asynchronously using fetch(). This 16 | // means connection creation can overlap, and always creating a new connection 17 | // when there is no existing connection can result in multiple connections to 18 | // the same DO. The SocketManager class controls all DO WebSocket connections 19 | // and makes sure there are no duplicates. 20 | export class SocketManager { 21 | private readonly env: Env; 22 | private readonly partitionHandler: PartitionHandler; 23 | 24 | private readonly sockets: SocketState; 25 | private readonly controller: AbortController; 26 | 27 | constructor(env: Env, partitionHandler: PartitionHandler) { 28 | this.env = env; 29 | this.partitionHandler = partitionHandler; 30 | 31 | this.sockets = { 32 | active: new Map(), 33 | pending: new Map>(), 34 | }; 35 | this.controller = new AbortController(); 36 | } 37 | 38 | // Send a WebSocket message to the given partition 39 | async sendPartition( 40 | partition: PartitionInfo, 41 | message: ArrayBuffer 42 | ): Promise { 43 | // If there is already an active connection, just use that 44 | const existing = this.sockets.active.get(partition.id); 45 | if (existing !== undefined) { 46 | existing.send(message); 47 | return; 48 | } 49 | 50 | // If there is already a pending connection, wait for it to be ready to use 51 | const pending = this.sockets.pending.get(partition.id); 52 | if (pending !== undefined) { 53 | const socket = await pending; 54 | socket.send(message); 55 | return; 56 | } 57 | 58 | // Otherwise, we'll need to create a new connection ourselves 59 | const socket = await this.dialPartition(partition); 60 | socket.send(message); 61 | } 62 | 63 | // Enforces the invariant that a maximum of one socket connection per 64 | // partition can be open at the same time. Prevents the case where a second 65 | // connection is initiated after the first connection has been initiated but 66 | // before the first connection is ready to use 67 | private async dialPartition(partition: PartitionInfo): Promise { 68 | const objId = this.env.PARTITION.idFromName(partition.id); 69 | const obj = this.env.PARTITION.get(objId); 70 | 71 | const socketPromise = obj 72 | .fetch(partitionStubUrl, { 73 | headers: { 74 | Upgrade: "websocket", 75 | }, 76 | signal: this.controller.signal, 77 | }) 78 | .then((response) => { 79 | // Unmark the connection as pending. If there is no WebSocket in the 80 | // response, an error is thrown and the caller can start over with 81 | // creating a new connection (but this is a bad sign) 82 | this.sockets.pending.delete(partition.id); 83 | const socket = response.webSocket; 84 | 85 | if (socket === null) { 86 | throw new Error("Expected websocket in response from Partition"); 87 | } 88 | 89 | socket.accept(); 90 | this.sockets.active.set(partition.id, socket); 91 | 92 | socket.addEventListener("message", (event) => { 93 | if (typeof event.data === "string") { 94 | console.log("Received string data, but we want binary data!"); 95 | return; 96 | } 97 | 98 | this.partitionHandler.handlePartitionMessage(partition, event.data); 99 | }); 100 | 101 | socket.addEventListener("close", () => { 102 | this.sockets.active.delete(partition.id); 103 | this.partitionHandler.handlePartitionClose(partition); 104 | }); 105 | 106 | // Once this.partitions.pending and this.partitions.active have been 107 | // updated, and the socket event handlers have been registered, the 108 | // connection is ready for sending 109 | return socket; 110 | }) 111 | .catch((e) => { 112 | if (this.controller.signal.aborted) { 113 | throw new AbortedRequestError(); 114 | } 115 | throw e; 116 | }); 117 | 118 | // Mark connection as pending to prevent duplicate connections from being 119 | // created before this connection is ready to use 120 | this.sockets.pending.set(partition.id, socketPromise); 121 | return socketPromise; 122 | } 123 | 124 | close() { 125 | // Close all open conections 126 | this.sockets.active.forEach((socket) => socket.close()); 127 | // Abort pending connections 128 | this.controller.abort(); 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /src/common.ts: -------------------------------------------------------------------------------- 1 | // We make this a read-only object to prevent any accidental funny business 2 | export type Env = Readonly<{ 3 | // The hostname of this worker (required) 4 | HOSTNAME: string; 5 | // The port of this worker (required) 6 | PORT: string; 7 | // The size of partition storage chunks in bytes (required) 8 | PARTITION_CHUNK_SIZE: string; 9 | // A JSON string describing which topics and partitions should exist 10 | INITIAL_CLUSTER_METADATA: string; 11 | CLUSTER: DurableObjectNamespace; 12 | PARTITION: DurableObjectNamespace; 13 | }>; 14 | 15 | // Generic utility types used in many places 16 | export type ValueOf = T[keyof T]; 17 | export type ElemOf = E; 18 | 19 | // Wrapper around JSON.stringify that converts BigInts to strings 20 | export const stringify = (value: T): string => 21 | JSON.stringify( 22 | value, 23 | // Make BigInt serializable into JSON for logging, taken from: 24 | // https://github.com/GoogleChromeLabs/jsbi/issues/30#issuecomment-521460510 25 | // eslint-disable-next-line @typescript-eslint/no-unsafe-return 26 | (_key, value) => (typeof value === "bigint" ? value.toString() : value), 27 | 2 28 | ); 29 | 30 | export class AbortedRequestError extends Error { 31 | constructor(message?: string) { 32 | super(message); 33 | this.name = this.constructor.name; 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import { Session } from "src/client/session"; 2 | import { Env, stringify } from "src/common"; 3 | 4 | export { Cluster } from "src/state/cluster"; 5 | export { Partition } from "src/state/partition"; 6 | 7 | export default { 8 | fetch(request: Request, env: Env): Response { 9 | if (request.headers.get("Upgrade") !== "websocket") { 10 | return new Response("Expected Upgrade: websocket", { status: 426 }); 11 | } 12 | console.log(`Opening new client connection!`); 13 | 14 | const session = new Session(env); 15 | const webSocketPair = new WebSocketPair(); 16 | const [client, server] = Object.values(webSocketPair); 17 | 18 | server.accept(); 19 | server.addEventListener("message", (event) => { 20 | if (typeof event.data === "string") { 21 | console.log("Received string data, but we want binary data!"); 22 | return; 23 | } 24 | 25 | session 26 | .handleRequest(event.data) 27 | .then((response) => { 28 | if (response !== null) { 29 | server.send(response); 30 | } 31 | }) 32 | .catch((error) => 33 | console.log( 34 | `[Gateway Worker] Uncaught error while handling request: ${stringify( 35 | error 36 | )}` 37 | ) 38 | ); 39 | }); 40 | server.addEventListener("close", () => { 41 | session.close(); 42 | }); 43 | 44 | return new Response(null, { 45 | status: 101, 46 | webSocket: client, 47 | }); 48 | }, 49 | }; 50 | -------------------------------------------------------------------------------- /src/map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxwellpeterson/kafka-worker/784715d42177a83d6af23edd4851723d1afee70a/src/map.png -------------------------------------------------------------------------------- /src/protocol/README.md: -------------------------------------------------------------------------------- 1 | # Protocol 2 | 3 | This folder contains files related to the Kafka wire protocol and internal wire protocol. The `kafka` folder contains files specific to the Kafka protocol, and the `internal` folder contains files specific to the internal protocol. 4 | 5 | The files in the root of this folder contain code that is shared across both protocols, which use the same primitives and message structure. 6 | 7 | ## Map 8 | 9 | ![kafka worker map](map.png) 10 | -------------------------------------------------------------------------------- /src/protocol/common.ts: -------------------------------------------------------------------------------- 1 | import { ValueOf } from "src/common"; 2 | 3 | // These don't add type safety, but help with labeling 4 | export type Int8 = number; 5 | export type Int16 = number; 6 | export type Int32 = number; 7 | // This one does add some type safety 8 | export type Int64 = bigint; 9 | 10 | export const int8Size = 1; 11 | export const int16Size = 2; 12 | export const int32Size = 4; 13 | export const int64Size = 8; 14 | 15 | // In the Kafka protocol, all arrays are nullable. In most cases, there is no 16 | // semantic difference between null and empty arrays, and in these cases we 17 | // replace null arrays with empty arrays to avoid code pollution from null checks 18 | export type KafkaArray = T[] | null; 19 | export type NullableString = string | null; 20 | export type MessageSet = Uint8Array; 21 | 22 | // Returns a type predicate for the given "enum" object (this project doesn't 23 | // use TypeScript enums). These predicates are useful for decoding arbitrary 24 | // numbers into "enum" values in a type-safe way (see usage in Decoder class) 25 | export const generateEnumPredicate = 26 | , V>(enumObj: E) => 27 | (value: V): value is ValueOf => 28 | Object.values(enumObj).includes(value); 29 | 30 | // ApiKey is an Int16 31 | // https://kafka.apache.org/protocol.html#protocol_api_keys 32 | export const ApiKey = { 33 | Produce: 0, 34 | Fetch: 1, 35 | ListOffsets: 2, 36 | Metadata: 3, 37 | } as const; 38 | export type ApiKey = ValueOf; 39 | export const validApiKey = generateEnumPredicate(ApiKey); 40 | 41 | // ErrorCode is an Int16 42 | // https://kafka.apache.org/protocol.html#protocol_error_codes 43 | export const ErrorCode = { 44 | UnknownServerError: -1, 45 | None: 0, 46 | OffsetOutOfRange: 1, 47 | CorruptMessage: 2, 48 | UnknownTopicOrPartition: 3, 49 | InvalidMessageSize: 4, 50 | NotLeaderForPartition: 6, 51 | RequestTimedOut: 7, 52 | NetworkException: 13, 53 | } as const; 54 | export type ErrorCode = ValueOf; 55 | export const validErrorCode = generateEnumPredicate(ErrorCode); 56 | 57 | // Acks is an Int16 58 | // https://kafka.apache.org/protocol.html#The_Messages_Produce 59 | export const Acks = { 60 | None: 0, 61 | Leader: 1, 62 | FullISR: -1, 63 | } as const; 64 | export type Acks = ValueOf; 65 | export const validAcks = generateEnumPredicate(Acks); 66 | -------------------------------------------------------------------------------- /src/protocol/decoder.ts: -------------------------------------------------------------------------------- 1 | import { 2 | Acks, 3 | ErrorCode, 4 | Int16, 5 | Int32, 6 | Int64, 7 | KafkaArray, 8 | MessageSet, 9 | NullableString, 10 | int16Size, 11 | int32Size, 12 | int64Size, 13 | validAcks, 14 | validErrorCode, 15 | } from "src/protocol/common"; 16 | 17 | // This implementation borrows heavily from the kafkajs Node library: 18 | // https://github.com/tulios/kafkajs/blob/master/src/protocol/decoder.js 19 | 20 | export type EnumPredicate = (value: Int16) => value is T; 21 | 22 | export class Decoder { 23 | private readonly view: DataView; 24 | private offset: number; 25 | 26 | constructor(buffer: ArrayBuffer) { 27 | this.view = new DataView(buffer); 28 | this.offset = 0; 29 | } 30 | 31 | readInt16(): Int16 { 32 | const value = this.view.getInt16(this.offset); 33 | this.offset += int16Size; 34 | return value; 35 | } 36 | 37 | readEnum(predicate: EnumPredicate): T { 38 | const value = this.readInt16(); 39 | if (!predicate(value)) { 40 | throw new Error(`Invalid enum value: ${value}`); 41 | } 42 | return value; 43 | } 44 | 45 | readErrorCode(): ErrorCode { 46 | return this.readEnum(validErrorCode); 47 | } 48 | 49 | readAcks(): Acks { 50 | return this.readEnum(validAcks); 51 | } 52 | 53 | readInt32(): Int32 { 54 | const value = this.view.getInt32(this.offset); 55 | this.offset += int32Size; 56 | return value; 57 | } 58 | 59 | readInt64(): Int64 { 60 | const value = this.view.getBigInt64(this.offset); 61 | this.offset += int64Size; 62 | return value; 63 | } 64 | 65 | private readStringSize(size: number): string { 66 | const stringBuffer = this.view.buffer.slice( 67 | this.offset, 68 | this.offset + size 69 | ); 70 | const value = new TextDecoder().decode(stringBuffer); 71 | this.offset += size; 72 | return value; 73 | } 74 | 75 | readString(): string { 76 | const size = this.readInt16(); 77 | if (size === -1) { 78 | throw new Error("Unexpected null string!"); 79 | } 80 | return this.readStringSize(size); 81 | } 82 | 83 | readNullableString(): NullableString { 84 | const size = this.readInt16(); 85 | if (size === -1) { 86 | return null; 87 | } 88 | return this.readStringSize(size); 89 | } 90 | 91 | readKafkaArray(readElement: (index: number) => T): KafkaArray { 92 | const size = this.readInt32(); 93 | if (size === -1) { 94 | return null; 95 | } 96 | const values = new Array(size); 97 | for (let i = 0; i < size; i++) { 98 | values[i] = readElement(i); 99 | } 100 | return values; 101 | } 102 | 103 | // Convenience method for converting null array to empty array in cases where 104 | // there is no semantic difference between them 105 | readArray(readElement: (index: number) => T): T[] { 106 | return this.readKafkaArray(readElement) ?? []; 107 | } 108 | 109 | readInt32Array(): Int32[] { 110 | return this.readArray(() => this.readInt32()); 111 | } 112 | 113 | readInt64Array(): Int64[] { 114 | return this.readArray(() => this.readInt64()); 115 | } 116 | 117 | readStringArray(): string[] { 118 | return this.readArray(() => this.readString()); 119 | } 120 | 121 | private readSlice(size: Int32): Uint8Array { 122 | const slice = new Uint8Array(this.view.buffer, this.offset, size); 123 | this.offset += size; 124 | return slice; 125 | } 126 | 127 | readMessageSet(): MessageSet { 128 | const size = this.readInt32(); 129 | return this.readSlice(size); 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/protocol/encoder.ts: -------------------------------------------------------------------------------- 1 | import { 2 | Int16, 3 | Int32, 4 | Int64, 5 | Int8, 6 | KafkaArray, 7 | MessageSet, 8 | NullableString, 9 | int16Size, 10 | int32Size, 11 | int64Size, 12 | int8Size, 13 | } from "src/protocol/common"; 14 | 15 | // This implementation borrows heavily from the kafkajs Node library: 16 | // https://github.com/tulios/kafkajs/blob/master/src/protocol/encoder.js 17 | 18 | export class Encoder { 19 | protected view: DataView; 20 | protected offset: number; 21 | 22 | constructor(initialBufferSize = 64) { 23 | const buffer = new ArrayBuffer(initialBufferSize); 24 | this.view = new DataView(buffer); 25 | this.offset = 0; 26 | } 27 | 28 | private checkCapacity(size: number) { 29 | const nextLength = this.offset + size; 30 | if (nextLength > this.view.byteLength) { 31 | // If we run out of space, reallocate the backing array 32 | const newCapacity = 2 * nextLength; 33 | const newBuffer = new ArrayBuffer(newCapacity); 34 | new Uint8Array(newBuffer).set(new Uint8Array(this.view.buffer)); 35 | this.view = new DataView(newBuffer); 36 | } 37 | } 38 | 39 | writeInt8(value: Int8): this { 40 | this.checkCapacity(int8Size); 41 | this.view.setInt8(this.offset, value); 42 | this.offset += int8Size; 43 | return this; 44 | } 45 | 46 | writeInt16(value: Int16): this { 47 | this.checkCapacity(int16Size); 48 | this.view.setInt16(this.offset, value); 49 | this.offset += int16Size; 50 | return this; 51 | } 52 | 53 | // Convenience method that makes sure "enum" values are encoded as Int16 54 | writeEnum(value: T): this { 55 | return this.writeInt16(value); 56 | } 57 | 58 | writeInt32(value: Int32): this { 59 | this.checkCapacity(int32Size); 60 | this.view.setInt32(this.offset, value); 61 | this.offset += int32Size; 62 | return this; 63 | } 64 | 65 | writeInt64(value: Int64): this { 66 | this.checkCapacity(int64Size); 67 | this.view.setBigInt64(this.offset, value); 68 | this.offset += int64Size; 69 | return this; 70 | } 71 | 72 | writeString(value: string): this { 73 | const bytes = new TextEncoder().encode(value); 74 | this.writeInt16(bytes.length); 75 | this.checkCapacity(bytes.length); 76 | new Uint8Array(this.view.buffer).set(bytes, this.offset); 77 | this.offset += bytes.length; 78 | return this; 79 | } 80 | 81 | writeNullableString(value: NullableString): this { 82 | if (value === null) { 83 | this.writeInt16(-1); 84 | return this; 85 | } 86 | return this.writeString(value); 87 | } 88 | 89 | writeArray(values: KafkaArray, writeElement: (value: T) => void): this { 90 | if (values === null) { 91 | this.writeInt32(-1); 92 | return this; 93 | } 94 | this.writeInt32(values.length); 95 | values.forEach(writeElement); 96 | return this; 97 | } 98 | 99 | writeInt32Array(values: Int32[]): this { 100 | return this.writeArray(values, (value) => this.writeInt32(value)); 101 | } 102 | 103 | writeInt64Array(values: Int64[]): this { 104 | return this.writeArray(values, (value) => this.writeInt64(value)); 105 | } 106 | 107 | writeStringArray(values: KafkaArray): this { 108 | return this.writeArray(values, (value) => this.writeString(value)); 109 | } 110 | 111 | writeSlice(slice: Uint8Array): this { 112 | this.checkCapacity(slice.length); 113 | new Uint8Array(this.view.buffer).set(slice, this.offset); 114 | this.offset += slice.length; 115 | return this; 116 | } 117 | 118 | writeBytes(bytes: Uint8Array): this { 119 | this.writeInt32(bytes.length); 120 | return this.writeSlice(bytes); 121 | } 122 | 123 | writeMessageSet(messageSet: MessageSet): this { 124 | return this.writeBytes(messageSet); 125 | } 126 | 127 | buffer(): ArrayBuffer { 128 | if (this.view.buffer.byteLength === this.offset) { 129 | // Avoid an allocation/copy if the underlying buffer does not need to be 130 | // resized (probably because initialBufferSize was set optimally) 131 | return this.view.buffer; 132 | } 133 | return this.view.buffer.slice(0, this.offset); 134 | } 135 | 136 | slice(): Uint8Array { 137 | return new Uint8Array(this.view.buffer, 0, this.offset); 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/protocol/header.ts: -------------------------------------------------------------------------------- 1 | import { Int16, Int32, NullableString } from "src/protocol/common"; 2 | import { Decoder, EnumPredicate } from "src/protocol/decoder"; 3 | import { Encoder } from "src/protocol/encoder"; 4 | 5 | // Request Header (Version: 1) => api_key api_version correlation_id client_id 6 | // api_key => INT16 7 | // api_version => INT16 8 | // correlation_id => INT32 9 | // client_id => NULLABLE_STRING 10 | // 11 | // https://kafka.apache.org/protocol.html#protocol_messages 12 | // 13 | // This header format is also used by internal requests. 14 | 15 | export interface RequestHeader { 16 | apiKey: T; 17 | apiVersion: Int16; 18 | correlationId: Int32; 19 | clientId: NullableString; 20 | } 21 | 22 | export const encodeRequestHeader = ( 23 | encoder: Encoder, 24 | header: RequestHeader 25 | ): Encoder => { 26 | return encoder 27 | .writeEnum(header.apiKey) 28 | .writeInt16(header.apiVersion) 29 | .writeInt32(header.correlationId) 30 | .writeNullableString(header.clientId); 31 | }; 32 | 33 | export const decodeRequestHeader = ( 34 | decoder: Decoder, 35 | predicate: EnumPredicate 36 | ): RequestHeader => { 37 | return { 38 | apiKey: decoder.readEnum(predicate), 39 | apiVersion: decoder.readInt16(), 40 | correlationId: decoder.readInt32(), 41 | clientId: decoder.readNullableString(), 42 | }; 43 | }; 44 | 45 | export type RequestMetadata = Omit, "apiKey">; 46 | -------------------------------------------------------------------------------- /src/protocol/internal/README.md: -------------------------------------------------------------------------------- 1 | # Internal Protocol 2 | 3 | This folder contains files related to the internal wire protocol used for sending messages between the gateway worker and Durable Objects. This protocol uses the same primitives and message structure as the Kafka protocol. 4 | 5 | Additionally, each internal message type corresponds one-to-one with a Kafka message type of the same name. In most cases, these internal messages contain a subset of the corresponding Kafka message that is scoped to a single partition. Since Kafka messages can address multiple partitions, each Kafka message can be split apart into many internal messages of the same type, with one internal message for each partition. 6 | 7 | Each file contains request and response type definitions and encoding and decoding functions for one of the internal APIs, which again correspond one-to-one with Kafka APIs of the same name. The exception is `common.ts`, which contains code shared across multiple internal APIs. 8 | 9 | ## Map 10 | 11 | ![kafka worker map](map.png) 12 | -------------------------------------------------------------------------------- /src/protocol/internal/common.ts: -------------------------------------------------------------------------------- 1 | import { ValueOf } from "src/common"; 2 | import { 3 | ApiKey, 4 | Int32, 5 | generateEnumPredicate, 6 | int32Size, 7 | } from "src/protocol/common"; 8 | import { Encoder } from "src/protocol/encoder"; 9 | 10 | // PartitionApiKey is an Int16 11 | export const PartitionApiKey = { 12 | Produce: ApiKey.Produce, 13 | Fetch: ApiKey.Fetch, 14 | ListOffsets: ApiKey.ListOffsets, 15 | } as const; 16 | export type PartitionApiKey = ValueOf; 17 | export const validPartitionApiKey = generateEnumPredicate(PartitionApiKey); 18 | 19 | export class InternalResponseEncoder extends Encoder { 20 | constructor(correlationId: Int32, initialBufferSize = 64) { 21 | super(initialBufferSize + int32Size); 22 | this.writeInt32(correlationId); 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/protocol/internal/fetch.ts: -------------------------------------------------------------------------------- 1 | import { ErrorCode, Int32, Int64, MessageSet } from "src/protocol/common"; 2 | import { Decoder } from "src/protocol/decoder"; 3 | import { Encoder } from "src/protocol/encoder"; 4 | 5 | // Internal Fetch Request (Version: 0) => max_wait_ms fetch_offset min_bytes max_bytes 6 | // max_wait_ms => INT32 7 | // fetch_offset => INT64 8 | // min_bytes => INT32 9 | // max_bytes => INT32 10 | 11 | export interface InternalFetchRequest { 12 | maxWaitMs: Int32; 13 | fetchOffset: Int64; 14 | minBytes: Int32; 15 | maxBytes: Int32; 16 | } 17 | 18 | export const encodeInternalFetchRequest = ( 19 | encoder: Encoder, 20 | request: InternalFetchRequest 21 | ): ArrayBuffer => { 22 | return encoder 23 | .writeInt32(request.maxWaitMs) 24 | .writeInt64(request.fetchOffset) 25 | .writeInt32(request.minBytes) 26 | .writeInt32(request.maxBytes) 27 | .buffer(); 28 | }; 29 | 30 | export const decodeInternalFetchRequest = ( 31 | decoder: Decoder 32 | ): InternalFetchRequest => { 33 | return { 34 | maxWaitMs: decoder.readInt32(), 35 | fetchOffset: decoder.readInt64(), 36 | minBytes: decoder.readInt32(), 37 | maxBytes: decoder.readInt32(), 38 | }; 39 | }; 40 | 41 | // Internal Fetch Response (Version: 0) => error_code high_watermark message_set_size message_set 42 | // error_code => INT16 43 | // high_watermark => INT64 44 | // message_set_size => INT32 45 | // message_set => BYTES 46 | 47 | export interface InternalFetchResponse { 48 | errorCode: ErrorCode; 49 | highWatermark: Int64; 50 | messageSet: MessageSet; 51 | } 52 | 53 | export const encodeInternalFetchResponse = ( 54 | encoder: Encoder, 55 | response: InternalFetchResponse 56 | ): ArrayBuffer => { 57 | return encoder 58 | .writeEnum(response.errorCode) 59 | .writeInt64(response.highWatermark) 60 | .writeMessageSet(response.messageSet) 61 | .buffer(); 62 | }; 63 | 64 | export const decodeInternalFetchResponse = ( 65 | decoder: Decoder 66 | ): InternalFetchResponse => { 67 | return { 68 | errorCode: decoder.readErrorCode(), 69 | highWatermark: decoder.readInt64(), 70 | messageSet: decoder.readMessageSet(), 71 | }; 72 | }; 73 | 74 | export const stubInternalFetchResponse = ( 75 | errorCode: ErrorCode 76 | ): InternalFetchResponse => ({ 77 | errorCode, 78 | highWatermark: BigInt(0), 79 | messageSet: new Uint8Array(), 80 | }); 81 | -------------------------------------------------------------------------------- /src/protocol/internal/list-offsets.ts: -------------------------------------------------------------------------------- 1 | import { ErrorCode, Int32, Int64 } from "src/protocol/common"; 2 | import { Decoder } from "src/protocol/decoder"; 3 | import { Encoder } from "src/protocol/encoder"; 4 | 5 | // Internal ListOffsets Request (Version: 0) => timestamp max_num_offsets 6 | // timestamp => INT64 7 | // max_num_offsets => INT32 8 | 9 | export interface InternalListOffsetsRequest { 10 | timestamp: Int64; 11 | maxNumOffsets: Int32; 12 | } 13 | 14 | export const encodeInternalListOffsetsRequest = ( 15 | encoder: Encoder, 16 | request: InternalListOffsetsRequest 17 | ): ArrayBuffer => { 18 | return encoder 19 | .writeInt64(request.timestamp) 20 | .writeInt32(request.maxNumOffsets) 21 | .buffer(); 22 | }; 23 | 24 | export const decodeInternalListOffsetsRequest = ( 25 | decoder: Decoder 26 | ): InternalListOffsetsRequest => { 27 | return { 28 | timestamp: decoder.readInt64(), 29 | maxNumOffsets: decoder.readInt32(), 30 | }; 31 | }; 32 | 33 | // Internal ListOffsets Response (Version: 0) => error_code [old_style_offsets] 34 | // error_code => INT16 35 | // old_style_offsets => INT64 36 | 37 | export interface InternalListOffsetsResponse { 38 | errorCode: ErrorCode; 39 | oldStyleOffsets: Int64[]; 40 | } 41 | 42 | export const encodeInternalListOffsetsResponse = ( 43 | encoder: Encoder, 44 | response: InternalListOffsetsResponse 45 | ): ArrayBuffer => { 46 | return encoder 47 | .writeEnum(response.errorCode) 48 | .writeInt64Array(response.oldStyleOffsets) 49 | .buffer(); 50 | }; 51 | 52 | export const decodeInternalListOffsetsResponse = ( 53 | decoder: Decoder 54 | ): InternalListOffsetsResponse => { 55 | return { 56 | errorCode: decoder.readErrorCode(), 57 | oldStyleOffsets: decoder.readInt64Array(), 58 | }; 59 | }; 60 | 61 | export const stubInternalListOffsetsResponse = ( 62 | errorCode: ErrorCode 63 | ): InternalListOffsetsResponse => ({ 64 | errorCode, 65 | oldStyleOffsets: [], 66 | }); 67 | -------------------------------------------------------------------------------- /src/protocol/internal/map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxwellpeterson/kafka-worker/784715d42177a83d6af23edd4851723d1afee70a/src/protocol/internal/map.png -------------------------------------------------------------------------------- /src/protocol/internal/produce.ts: -------------------------------------------------------------------------------- 1 | import { Acks, ErrorCode, Int64, MessageSet } from "src/protocol/common"; 2 | import { Decoder } from "src/protocol/decoder"; 3 | import { Encoder } from "src/protocol/encoder"; 4 | 5 | // Internal Produce Request (Version: 0) => acks message_set_size message_set 6 | // acks => INT16 7 | // message_set_size => INT32 8 | // message_set => BYTES 9 | 10 | export interface InternalProduceRequest { 11 | acks: Acks; 12 | messageSet: MessageSet; 13 | } 14 | 15 | export const encodeInternalProduceRequest = ( 16 | encoder: Encoder, 17 | request: InternalProduceRequest 18 | ): ArrayBuffer => { 19 | return encoder 20 | .writeEnum(request.acks) 21 | .writeMessageSet(request.messageSet) 22 | .buffer(); 23 | }; 24 | 25 | export const decodeInternalProduceRequest = ( 26 | decoder: Decoder 27 | ): InternalProduceRequest => ({ 28 | acks: decoder.readAcks(), 29 | messageSet: decoder.readMessageSet(), 30 | }); 31 | 32 | // Internal Produce Response (Version: 0) => error_code base_offset 33 | // error_code => INT16 34 | // base_offset => INT64 35 | 36 | export interface InternalProduceResponse { 37 | errorCode: ErrorCode; 38 | baseOffset: Int64; 39 | } 40 | 41 | export const encodeInternalProduceResponse = ( 42 | encoder: Encoder, 43 | response: InternalProduceResponse 44 | ): ArrayBuffer => { 45 | return encoder 46 | .writeEnum(response.errorCode) 47 | .writeInt64(response.baseOffset) 48 | .buffer(); 49 | }; 50 | 51 | export const decodeInternalProduceResponse = ( 52 | decoder: Decoder 53 | ): InternalProduceResponse => { 54 | return { 55 | errorCode: decoder.readErrorCode(), 56 | baseOffset: decoder.readInt64(), 57 | }; 58 | }; 59 | 60 | export const stubInternalProduceResponse = ( 61 | errorCode: ErrorCode 62 | ): InternalProduceResponse => ({ 63 | errorCode, 64 | baseOffset: BigInt(0), 65 | }); 66 | -------------------------------------------------------------------------------- /src/protocol/kafka/README.md: -------------------------------------------------------------------------------- 1 | # Kafka Protocol 2 | 3 | This folder contains files related to the Kafka wire protocol, as described in [A Guide To The Kafka Protocol](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) and the [Kafka Protocol Guide](https://kafka.apache.org/protocol.html). These two references contain much of the same information, but neither is a strict subset of the other. _A Guide To The Kafka Protocol_ is older, shorter, more readable, and contains almost all the information needed to implement message encoding and decoding for our (somewhat ancient) version of Kafka. 4 | 5 | Each file contains request and response type definitions and encoding and decoding functions for one of the Kafka APIs, with the exception of `common.ts`, which contains code shared across multiple Kafka APIs. 6 | 7 | ## Map 8 | 9 | ![kafka worker map](map.png) 10 | -------------------------------------------------------------------------------- /src/protocol/kafka/common.ts: -------------------------------------------------------------------------------- 1 | import { ApiKey, Int32, int32Size } from "src/protocol/common"; 2 | import { Decoder } from "src/protocol/decoder"; 3 | import { Encoder } from "src/protocol/encoder"; 4 | import { RequestHeader, encodeRequestHeader } from "src/protocol/header"; 5 | 6 | export class KafkaEncoder extends Encoder { 7 | constructor(initialBufferSize = 64) { 8 | super(initialBufferSize + int32Size); 9 | // Reserve space for size header at front of buffer 10 | this.offset += int32Size; 11 | } 12 | 13 | // Overrides parent method, adds size header to buffer 14 | buffer(): ArrayBuffer { 15 | // Write size header to reserved space at front of buffer 16 | this.view.setInt32(0, this.offset - int32Size); 17 | return super.buffer(); 18 | } 19 | } 20 | 21 | export class KafkaRequestEncoder extends KafkaEncoder { 22 | constructor(header: RequestHeader) { 23 | super(); 24 | encodeRequestHeader(this, header); 25 | } 26 | } 27 | 28 | export class KafkaResponseEncoder extends KafkaEncoder { 29 | constructor(correlationId: Int32, initialBufferSize = 64) { 30 | super(initialBufferSize + int32Size); 31 | this.writeInt32(correlationId); 32 | } 33 | } 34 | 35 | export class KafkaDecoder extends Decoder { 36 | constructor(buffer: ArrayBuffer) { 37 | super(buffer); 38 | 39 | const expectedSize = this.readInt32(); 40 | const actualSize = buffer.byteLength - int32Size; 41 | 42 | if (expectedSize !== actualSize) { 43 | throw new Error( 44 | `Message length does not match size header: expected ${expectedSize} but got ${actualSize}` 45 | ); 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/protocol/kafka/fetch.ts: -------------------------------------------------------------------------------- 1 | import { ErrorCode, Int32, Int64, MessageSet } from "src/protocol/common"; 2 | import { Decoder } from "src/protocol/decoder"; 3 | import { Encoder } from "src/protocol/encoder"; 4 | 5 | // Fetch Request (Version: 0) => replica_id max_wait_ms min_bytes [topics] 6 | // replica_id => INT32 7 | // max_wait_ms => INT32 8 | // min_bytes => INT32 9 | // topics => topic [partitions] 10 | // topic => STRING 11 | // partitions => partition fetch_offset partition_max_bytes 12 | // partition => INT32 13 | // fetch_offset => INT64 14 | // partition_max_bytes => INT32 15 | // 16 | // https://kafka.apache.org/protocol.html#The_Messages_Fetch 17 | 18 | export interface KafkaFetchRequest { 19 | replicaId: Int32; 20 | maxWaitMs: Int32; 21 | minBytes: Int32; 22 | topics: { 23 | name: string; 24 | partitions: { 25 | index: Int32; 26 | fetchOffset: Int64; 27 | maxBytes: Int32; 28 | }[]; 29 | }[]; 30 | } 31 | 32 | export const encodeKafkaFetchRequest = ( 33 | encoder: Encoder, 34 | request: KafkaFetchRequest 35 | ): ArrayBuffer => { 36 | return encoder 37 | .writeInt32(request.replicaId) 38 | .writeInt32(request.maxWaitMs) 39 | .writeInt32(request.minBytes) 40 | .writeArray(request.topics, (topic) => 41 | encoder 42 | .writeString(topic.name) 43 | .writeArray(topic.partitions, (partition) => 44 | encoder 45 | .writeInt32(partition.index) 46 | .writeInt64(partition.fetchOffset) 47 | .writeInt32(partition.maxBytes) 48 | ) 49 | ) 50 | .buffer(); 51 | }; 52 | 53 | export const decodeKafkaFetchRequest = ( 54 | decoder: Decoder 55 | ): KafkaFetchRequest => { 56 | return { 57 | replicaId: decoder.readInt32(), 58 | maxWaitMs: decoder.readInt32(), 59 | minBytes: decoder.readInt32(), 60 | topics: decoder.readArray(() => ({ 61 | name: decoder.readString(), 62 | partitions: decoder.readArray(() => ({ 63 | index: decoder.readInt32(), 64 | fetchOffset: decoder.readInt64(), 65 | maxBytes: decoder.readInt32(), 66 | })), 67 | })), 68 | }; 69 | }; 70 | 71 | // Fetch Response (Version: 0) => [responses] 72 | // responses => topic [partitions] 73 | // topic => STRING 74 | // partitions => partition_index error_code high_watermark records 75 | // partition_index => INT32 76 | // error_code => INT16 77 | // high_watermark => INT64 78 | // records => RECORDS 79 | // 80 | // https://kafka.apache.org/protocol.html#The_Messages_Fetch 81 | // 82 | // There some variation here between the current protocol spec and 83 | // the older protocol guide: 84 | // 85 | // https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol 86 | // 87 | // Here we follow the older description of message set (now called record batch). 88 | 89 | export interface KafkaFetchResponse { 90 | topics: { 91 | name: string; 92 | partitions: { 93 | index: Int32; 94 | errorCode: ErrorCode; 95 | highWatermark: Int64; 96 | messageSet: MessageSet; 97 | }[]; 98 | }[]; 99 | } 100 | 101 | export const encodeKafkaFetchResponse = ( 102 | encoder: Encoder, 103 | response: KafkaFetchResponse 104 | ): ArrayBuffer => { 105 | return encoder 106 | .writeArray(response.topics, (topic) => 107 | encoder 108 | .writeString(topic.name) 109 | .writeArray(topic.partitions, (partition) => 110 | encoder 111 | .writeInt32(partition.index) 112 | .writeEnum(partition.errorCode) 113 | .writeInt64(partition.highWatermark) 114 | .writeMessageSet(partition.messageSet) 115 | ) 116 | ) 117 | .buffer(); 118 | }; 119 | 120 | export const decodeKafkaFetchResponse = ( 121 | decoder: Decoder 122 | ): KafkaFetchResponse => { 123 | return { 124 | topics: decoder.readArray(() => ({ 125 | name: decoder.readString(), 126 | partitions: decoder.readArray(() => ({ 127 | index: decoder.readInt32(), 128 | errorCode: decoder.readErrorCode(), 129 | highWatermark: decoder.readInt64(), 130 | messageSet: decoder.readMessageSet(), 131 | })), 132 | })), 133 | }; 134 | }; 135 | 136 | export const stubKafkaFetchResponse = ( 137 | request: KafkaFetchRequest, 138 | errorCode: ErrorCode 139 | ): KafkaFetchResponse => ({ 140 | topics: request.topics.map((topic) => ({ 141 | name: topic.name, 142 | partitions: topic.partitions.map((partition) => ({ 143 | index: partition.index, 144 | errorCode, 145 | highWatermark: BigInt(0), 146 | messageSet: new Uint8Array(), 147 | })), 148 | })), 149 | }); 150 | -------------------------------------------------------------------------------- /src/protocol/kafka/list-offsets.ts: -------------------------------------------------------------------------------- 1 | import { ErrorCode, Int32, Int64 } from "src/protocol/common"; 2 | import { Decoder } from "src/protocol/decoder"; 3 | import { Encoder } from "src/protocol/encoder"; 4 | 5 | // ListOffsets Request (Version: 0) => replica_id [topics] 6 | // replica_id => INT32 7 | // topics => name [partitions] 8 | // name => STRING 9 | // partitions => partition_index timestamp max_num_offsets 10 | // partition_index => INT32 11 | // timestamp => INT64 12 | // max_num_offsets => INT32 13 | // 14 | // https://kafka.apache.org/protocol.html#The_Messages_ListOffsets 15 | 16 | export interface KafkaListOffsetsRequest { 17 | replicaId: Int32; 18 | topics: { 19 | name: string; 20 | partitions: { 21 | index: Int32; 22 | timestamp: Int64; 23 | maxNumOffsets: Int32; 24 | }[]; 25 | }[]; 26 | } 27 | 28 | export const encodeKafkaListOffsetsRequest = ( 29 | encoder: Encoder, 30 | request: KafkaListOffsetsRequest 31 | ): ArrayBuffer => { 32 | return encoder 33 | .writeInt32(request.replicaId) 34 | .writeArray(request.topics, (topic) => 35 | encoder 36 | .writeString(topic.name) 37 | .writeArray(topic.partitions, (partition) => 38 | encoder 39 | .writeInt32(partition.index) 40 | .writeInt64(partition.timestamp) 41 | .writeInt32(partition.maxNumOffsets) 42 | ) 43 | ) 44 | .buffer(); 45 | }; 46 | 47 | export const decodeKafkaListOffsetsRequest = ( 48 | decoder: Decoder 49 | ): KafkaListOffsetsRequest => { 50 | return { 51 | replicaId: decoder.readInt32(), 52 | topics: decoder.readArray(() => ({ 53 | name: decoder.readString(), 54 | partitions: decoder.readArray(() => ({ 55 | index: decoder.readInt32(), 56 | timestamp: decoder.readInt64(), 57 | maxNumOffsets: decoder.readInt32(), 58 | })), 59 | })), 60 | }; 61 | }; 62 | 63 | // ListOffsets Response (Version: 0) => [topics] 64 | // topics => name [partitions] 65 | // name => STRING 66 | // partitions => partition_index error_code [old_style_offsets] 67 | // partition_index => INT32 68 | // error_code => INT16 69 | // old_style_offsets => INT64 70 | // 71 | // https://kafka.apache.org/protocol.html#The_Messages_ListOffsets 72 | 73 | export interface KafkaListOffsetsResponse { 74 | topics: { 75 | name: string; 76 | partitions: { 77 | index: Int32; 78 | errorCode: ErrorCode; 79 | oldStyleOffsets: Int64[]; 80 | }[]; 81 | }[]; 82 | } 83 | 84 | export const encodeKafkaListOffsetsResponse = ( 85 | encoder: Encoder, 86 | response: KafkaListOffsetsResponse 87 | ): ArrayBuffer => { 88 | return encoder 89 | .writeArray(response.topics, (topic) => 90 | encoder 91 | .writeString(topic.name) 92 | .writeArray(topic.partitions, (partition) => 93 | encoder 94 | .writeInt32(partition.index) 95 | .writeEnum(partition.errorCode) 96 | .writeInt64Array(partition.oldStyleOffsets) 97 | ) 98 | ) 99 | .buffer(); 100 | }; 101 | 102 | export const decodeKafkaListOffsetsResponse = ( 103 | decoder: Decoder 104 | ): KafkaListOffsetsResponse => { 105 | return { 106 | topics: decoder.readArray(() => ({ 107 | name: decoder.readString(), 108 | partitions: decoder.readArray(() => ({ 109 | index: decoder.readInt32(), 110 | errorCode: decoder.readErrorCode(), 111 | oldStyleOffsets: decoder.readInt64Array(), 112 | })), 113 | })), 114 | }; 115 | }; 116 | 117 | export const stubKafkaListOffsetsResponse = ( 118 | request: KafkaListOffsetsRequest, 119 | errorCode: ErrorCode 120 | ): KafkaListOffsetsResponse => ({ 121 | topics: request.topics.map((topic) => ({ 122 | name: topic.name, 123 | partitions: topic.partitions.map((partition) => ({ 124 | index: partition.index, 125 | errorCode, 126 | oldStyleOffsets: [], 127 | })), 128 | })), 129 | }); 130 | -------------------------------------------------------------------------------- /src/protocol/kafka/map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxwellpeterson/kafka-worker/784715d42177a83d6af23edd4851723d1afee70a/src/protocol/kafka/map.png -------------------------------------------------------------------------------- /src/protocol/kafka/metadata.ts: -------------------------------------------------------------------------------- 1 | import { ErrorCode, Int32 } from "src/protocol/common"; 2 | import { Decoder } from "src/protocol/decoder"; 3 | import { Encoder } from "src/protocol/encoder"; 4 | 5 | // Metadata Request (Version: 0) => [topics] 6 | // topics => name 7 | // name => STRING 8 | // 9 | // https://kafka.apache.org/protocol.html#The_Messages_Metadata 10 | 11 | export interface KafkaMetadataRequest { 12 | topics: string[]; 13 | } 14 | 15 | export const encodeKafkaMetadataRequest = ( 16 | encoder: Encoder, 17 | request: KafkaMetadataRequest 18 | ): ArrayBuffer => { 19 | return encoder.writeStringArray(request.topics).buffer(); 20 | }; 21 | 22 | export const decodeKafkaMetadataRequest = ( 23 | decoder: Decoder 24 | ): KafkaMetadataRequest => { 25 | return { topics: decoder.readStringArray() }; 26 | }; 27 | 28 | // Metadata Response (Version: 0) => [brokers] [topics] 29 | // brokers => node_id host port 30 | // node_id => INT32 31 | // host => STRING 32 | // port => INT32 33 | // topics => error_code name [partitions] 34 | // error_code => INT16 35 | // name => STRING 36 | // partitions => error_code partition_index leader_id [replica_nodes] [isr_nodes] 37 | // error_code => INT16 38 | // partition_index => INT32 39 | // leader_id => INT32 40 | // replica_nodes => INT32 41 | // isr_nodes => INT32 42 | // 43 | // https://kafka.apache.org/protocol.html#The_Messages_Metadata 44 | 45 | export interface KafkaMetadataResponse { 46 | brokers: { 47 | nodeId: Int32; 48 | host: string; 49 | port: Int32; 50 | }[]; 51 | topics: { 52 | errorCode: ErrorCode; 53 | name: string; 54 | partitions: { 55 | errorCode: ErrorCode; 56 | partitionIndex: Int32; 57 | leaderId: Int32; 58 | replicaNodes: Int32[]; 59 | isrNodes: Int32[]; 60 | }[]; 61 | }[]; 62 | } 63 | 64 | export const encodeKafkaMetadataResponse = ( 65 | encoder: Encoder, 66 | response: KafkaMetadataResponse 67 | ): ArrayBuffer => { 68 | return encoder 69 | .writeArray(response.brokers, (broker) => 70 | encoder 71 | .writeInt32(broker.nodeId) 72 | .writeString(broker.host) 73 | .writeInt32(broker.port) 74 | ) 75 | .writeArray(response.topics, (topic) => 76 | encoder 77 | .writeEnum(topic.errorCode) 78 | .writeString(topic.name) 79 | .writeArray(topic.partitions, (partition) => 80 | encoder 81 | .writeEnum(partition.errorCode) 82 | .writeInt32(partition.partitionIndex) 83 | .writeInt32(partition.leaderId) 84 | .writeInt32Array(partition.replicaNodes) 85 | .writeInt32Array(partition.isrNodes) 86 | ) 87 | ) 88 | .buffer(); 89 | }; 90 | 91 | export const decodeKafkaMetadataResponse = ( 92 | decoder: Decoder 93 | ): KafkaMetadataResponse => { 94 | return { 95 | brokers: decoder.readArray(() => ({ 96 | nodeId: decoder.readInt32(), 97 | host: decoder.readString(), 98 | port: decoder.readInt32(), 99 | })), 100 | topics: decoder.readArray(() => ({ 101 | errorCode: decoder.readErrorCode(), 102 | name: decoder.readString(), 103 | partitions: decoder.readArray(() => ({ 104 | errorCode: decoder.readErrorCode(), 105 | partitionIndex: decoder.readInt32(), 106 | leaderId: decoder.readInt32(), 107 | replicaNodes: decoder.readInt32Array(), 108 | isrNodes: decoder.readInt32Array(), 109 | })), 110 | })), 111 | }; 112 | }; 113 | 114 | export const stubKafkaMetadataResponse = ( 115 | request: KafkaMetadataRequest, 116 | errorCode: ErrorCode 117 | ): KafkaMetadataResponse => ({ 118 | brokers: [], 119 | topics: request.topics.map((topic) => ({ 120 | errorCode, 121 | name: topic, 122 | partitions: [], 123 | })), 124 | }); 125 | -------------------------------------------------------------------------------- /src/protocol/kafka/produce.ts: -------------------------------------------------------------------------------- 1 | import { Acks, ErrorCode, Int32, Int64, MessageSet } from "src/protocol/common"; 2 | import { Decoder } from "src/protocol/decoder"; 3 | import { Encoder } from "src/protocol/encoder"; 4 | 5 | // Produce Request (Version: 0) => acks timeout_ms [topic_data] 6 | // acks => INT16 7 | // timeout_ms => INT32 8 | // topic_data => name [partition_data] 9 | // name => STRING 10 | // partition_data => index records 11 | // index => INT32 12 | // records => RECORDS 13 | // 14 | // https://kafka.apache.org/protocol.html#The_Messages_Produce 15 | // 16 | // There some variation here between the current protocol spec and 17 | // the older protocol guide: 18 | // 19 | // https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol 20 | // 21 | // Here we follow the older description of message set (now called record batch). 22 | 23 | export interface KafkaProduceRequest { 24 | acks: Acks; 25 | timeoutMs: Int32; 26 | topics: { 27 | name: string; 28 | partitions: { 29 | index: Int32; 30 | messageSet: MessageSet; 31 | }[]; 32 | }[]; 33 | } 34 | 35 | export const encodeKafkaProduceRequest = ( 36 | encoder: Encoder, 37 | request: KafkaProduceRequest 38 | ): ArrayBuffer => { 39 | return encoder 40 | .writeEnum(request.acks) 41 | .writeInt32(request.timeoutMs) 42 | .writeArray(request.topics, (topic) => 43 | encoder 44 | .writeString(topic.name) 45 | .writeArray(topic.partitions, (partition) => 46 | encoder 47 | .writeInt32(partition.index) 48 | .writeMessageSet(partition.messageSet) 49 | ) 50 | ) 51 | .buffer(); 52 | }; 53 | 54 | export const decodeKafkaProduceRequest = ( 55 | decoder: Decoder 56 | ): KafkaProduceRequest => { 57 | return { 58 | acks: decoder.readAcks(), 59 | timeoutMs: decoder.readInt32(), 60 | topics: decoder.readArray(() => ({ 61 | name: decoder.readString(), 62 | partitions: decoder.readArray(() => ({ 63 | index: decoder.readInt32(), 64 | messageSet: decoder.readMessageSet(), 65 | })), 66 | })), 67 | }; 68 | }; 69 | 70 | // Produce Response (Version: 0) => [responses] 71 | // responses => name [partition_responses] 72 | // name => STRING 73 | // partition_responses => index error_code base_offset 74 | // index => INT32 75 | // error_code => INT16 76 | // base_offset => INT64 77 | // 78 | // https://kafka.apache.org/protocol.html#The_Messages_Produce 79 | 80 | export interface KafkaProduceResponse { 81 | topics: { 82 | name: string; 83 | partitions: { 84 | index: Int32; 85 | errorCode: ErrorCode; 86 | baseOffset: Int64; 87 | }[]; 88 | }[]; 89 | } 90 | 91 | export const encodeKafkaProduceResponse = ( 92 | encoder: Encoder, 93 | response: KafkaProduceResponse 94 | ): ArrayBuffer => { 95 | return encoder 96 | .writeArray(response.topics, (topic) => 97 | encoder 98 | .writeString(topic.name) 99 | .writeArray(topic.partitions, (partition) => 100 | encoder 101 | .writeInt32(partition.index) 102 | .writeEnum(partition.errorCode) 103 | .writeInt64(partition.baseOffset) 104 | ) 105 | ) 106 | .buffer(); 107 | }; 108 | 109 | export const decodeKafkaProduceResponse = ( 110 | decoder: Decoder 111 | ): KafkaProduceResponse => { 112 | return { 113 | topics: decoder.readArray(() => ({ 114 | name: decoder.readString(), 115 | partitions: decoder.readArray(() => ({ 116 | index: decoder.readInt32(), 117 | errorCode: decoder.readErrorCode(), 118 | baseOffset: decoder.readInt64(), 119 | })), 120 | })), 121 | }; 122 | }; 123 | 124 | export const stubKafkaProduceResponse = ( 125 | request: KafkaProduceRequest, 126 | errorCode: ErrorCode 127 | ): KafkaProduceResponse => ({ 128 | topics: request.topics.map((topic) => ({ 129 | name: topic.name, 130 | partitions: topic.partitions.map((partition) => ({ 131 | index: partition.index, 132 | errorCode, 133 | baseOffset: BigInt(0), 134 | })), 135 | })), 136 | }); 137 | -------------------------------------------------------------------------------- /src/protocol/map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxwellpeterson/kafka-worker/784715d42177a83d6af23edd4851723d1afee70a/src/protocol/map.png -------------------------------------------------------------------------------- /src/state/README.md: -------------------------------------------------------------------------------- 1 | # State 2 | 3 | This folder contains files related to Durable Objects. This project uses two Durable Object classes, which are defined in `cluster.ts` and `partition.ts`. Partition data is split into a continuous sequence of fixed-size chunks, whose format is defined in `chunk.ts`. 4 | 5 | ## Map 6 | 7 | ![kafka worker map](map.png) 8 | -------------------------------------------------------------------------------- /src/state/chunk.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line import/default 2 | import crc32 from "crc-32"; 3 | import { 4 | ErrorCode, 5 | Int64, 6 | MessageSet, 7 | int32Size, 8 | int64Size, 9 | int8Size, 10 | } from "src/protocol/common"; 11 | 12 | export interface Chunk { 13 | // The offset of the first message in the chunk 14 | offsetStart: Int64; 15 | buffer: ArrayBuffer; 16 | // Framing information about messages stored in the buffer 17 | frames: MessageFrame[]; 18 | // The next available index in the buffer (the next message should be written 19 | // to the buffer starting at this index) 20 | nextIndex: number; 21 | } 22 | // TODO: Change to interface/object type for better readability 23 | // export interface MessageFrame { 24 | // startIndex: number; 25 | // size: number 26 | // } 27 | // Tuple of [startIndex, size] 28 | export type MessageFrame = [number, number]; 29 | 30 | // Performs required preprocessing and validation on a message set from a 31 | // Produce request before it can be written to chunks 32 | export const prepareMessageSet = ( 33 | messageSet: MessageSet, 34 | initialOffset: Int64 35 | ): { filler: ChunkFiller } | { error: ErrorCode } => { 36 | // TODO: What is going on here? Node-specific weirdness? Instantiating this 37 | // with `new DataView(messageSet)` type-checks, but crashes under test with 38 | // the error "First argument to DataView constructor must be an ArrayBuffer" 39 | const view = new DataView( 40 | messageSet.buffer, 41 | messageSet.byteOffset, 42 | messageSet.byteLength 43 | ); 44 | const frames: MessageFrame[] = []; 45 | 46 | let nextOffset = initialOffset; 47 | let viewIndex = 0; 48 | while (viewIndex < view.byteLength) { 49 | const frameStart = viewIndex; 50 | 51 | // Set message offset field 52 | view.setBigInt64(viewIndex, nextOffset); 53 | viewIndex += int64Size; 54 | nextOffset++; 55 | 56 | // Read message size field 57 | const messageSize = view.getInt32(viewIndex); 58 | if (messageSize < 0) { 59 | return { error: ErrorCode.InvalidMessageSize }; 60 | } 61 | viewIndex += int32Size; 62 | 63 | // Verify crc of remainder of message 64 | const crcExpected = view.getInt32(viewIndex); 65 | // eslint-disable-next-line import/no-named-as-default-member 66 | const crcActual = crc32.buf( 67 | new Uint8Array( 68 | view.buffer, 69 | view.byteOffset + viewIndex + int32Size, 70 | messageSize - int32Size 71 | ) 72 | ); 73 | if (crcActual !== crcExpected) { 74 | return { error: ErrorCode.CorruptMessage }; 75 | } 76 | 77 | // If the magic byte is one, there is an additional attributes byte right 78 | // after the magic byte, and if the magic byte is zero then there is no 79 | // attributes byte 80 | // https://kafka.apache.org/08/documentation/#messageformat 81 | const magicByte = view.getInt8(viewIndex + int32Size); 82 | if (magicByte === 1) { 83 | // Check attributes byte to make sure that message set is not compressed, 84 | // since we don't support compression 85 | const attributes = view.getInt8(viewIndex + int32Size + int8Size); 86 | // If message set is compressed, attributes byte will be nonzero 87 | if (attributes !== 0) { 88 | console.log(`Attributes check failed, attributes: ${attributes}`); 89 | // There isn't a good error code for this that dates back to 0.8.0 90 | return { error: ErrorCode.UnknownServerError }; 91 | } 92 | } else if (magicByte !== 0) { 93 | // The magic byte can only be 0 or 1 (for this version of Kafka) 94 | console.log(`Magic byte check failed, magic byte: ${magicByte}`); 95 | // There isn't a good error code for this that dates back to 0.8.0 96 | return { error: ErrorCode.UnknownServerError }; 97 | } 98 | 99 | // Save framing information about message 100 | frames.push([frameStart, int64Size + int32Size + messageSize]); 101 | viewIndex += messageSize; 102 | } 103 | 104 | return { filler: new ChunkFiller(messageSet, frames) }; 105 | }; 106 | 107 | // Stores a preprocessed message set that can be written incrementally to 108 | // multiple chunks 109 | export class ChunkFiller { 110 | private messageSet: MessageSet; 111 | private frames: MessageFrame[]; 112 | 113 | constructor(messageSet: MessageSet, frames: MessageFrame[]) { 114 | this.messageSet = messageSet; 115 | this.frames = frames; 116 | } 117 | 118 | // Returns the number of messages written to chunk 119 | fillChunk(chunk: Chunk): number { 120 | // Number of available bytes remaining in chunk 121 | const chunkSpace = chunk.buffer.byteLength - chunk.nextIndex; 122 | const stopIndex = this.frames.findIndex( 123 | ([start, size]) => start + size > chunkSpace 124 | ); 125 | 126 | // Number of messages that will be copied into the chunk 127 | const frameCount = stopIndex === -1 ? this.frames.length : stopIndex; 128 | if (frameCount === 0) { 129 | return 0; 130 | } 131 | const [finalFrameStart, finalFrameSize] = this.frames[frameCount - 1]; 132 | const copySize = finalFrameStart + finalFrameSize; 133 | 134 | // Copy message data into chunk 135 | new Uint8Array(chunk.buffer).set( 136 | this.messageSet.subarray(0, copySize), 137 | chunk.nextIndex 138 | ); 139 | // Copy message framing information into chunk 140 | chunk.frames.push( 141 | ...this.frames 142 | .slice(0, frameCount) 143 | // Reindex message frames based on chunk buffer 144 | .map(([start, size]) => [start + chunk.nextIndex, size] as MessageFrame) 145 | ); 146 | chunk.nextIndex += copySize; 147 | 148 | // Delete message data and framing information that was copied into chunk 149 | this.messageSet = this.messageSet.subarray(copySize); 150 | this.frames = this.frames 151 | .slice(frameCount) 152 | // Reindex message frames after copied data is discarded 153 | .map(([start, size]) => [start - copySize, size]); 154 | 155 | return frameCount; 156 | } 157 | 158 | // Returns true if there are no more messages that need be written 159 | done(): boolean { 160 | return this.frames.length === 0; 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /src/state/cluster.ts: -------------------------------------------------------------------------------- 1 | import { ElemOf, Env } from "src/common"; 2 | import { ErrorCode, Int32 } from "src/protocol/common"; 3 | import { KafkaMetadataResponse } from "src/protocol/kafka/metadata"; 4 | 5 | // These are made up values 6 | const globalClusterName = "global"; 7 | export const globalBrokerId = 333; 8 | 9 | const searchParam = "topics"; 10 | const sep = ","; 11 | 12 | export const fetchClusterMetadata = async ( 13 | env: Env, 14 | topics: string[] 15 | ): Promise => { 16 | const id = env.CLUSTER.idFromName(globalClusterName); 17 | const obj = env.CLUSTER.get(id); 18 | 19 | const request = new Request( 20 | `https://cluster.state/metadata?${searchParam}=${topics.join(sep)}` 21 | ); 22 | const response = await obj.fetch(request); 23 | 24 | if (!response.ok) { 25 | const message = await response.text(); 26 | throw new Error(`Error from Cluster DO: ${message}`); 27 | } 28 | 29 | return await response.json(); 30 | }; 31 | 32 | interface ClusterMetadata { 33 | topics: { 34 | name: string; 35 | partitions: { 36 | index: Int32; 37 | }[]; 38 | }[]; 39 | } 40 | const clusterMetadataKey = "cluster_metadata"; 41 | 42 | export class Cluster { 43 | private readonly state: DurableObjectState; 44 | private readonly env: Env; 45 | 46 | private readonly initialMetadata: ClusterMetadata; 47 | 48 | constructor(state: DurableObjectState, env: Env) { 49 | this.state = state; 50 | this.env = env; 51 | 52 | this.initialMetadata = JSON.parse( 53 | env.INITIAL_CLUSTER_METADATA 54 | ) as ClusterMetadata; 55 | } 56 | 57 | // TODO: This should be converted to a WebSocket protocol 58 | async fetch(request: Request): Promise { 59 | const topicQuery = new URL(request.url).searchParams.get(searchParam); 60 | if (topicQuery === null) { 61 | return new Response(`Missing search param: ${searchParam}`, { 62 | status: 400, 63 | }); 64 | } 65 | 66 | const topicNames = topicQuery === "" ? [] : topicQuery.split(","); 67 | // Metadata is never written to disk (for now) so the intial metadata will 68 | // always be used here 69 | const state = 70 | (await this.state.storage.get(clusterMetadataKey)) ?? 71 | this.initialMetadata; 72 | 73 | const brokers = [ 74 | { 75 | nodeId: globalBrokerId, 76 | host: this.env.HOSTNAME, 77 | port: parseInt(this.env.PORT), 78 | }, 79 | ]; 80 | 81 | // Empty topic list means return metadata for all topics 82 | if (topicNames.length === 0) { 83 | return new Response( 84 | JSON.stringify({ brokers, topics: state.topics.map(generateMetadata) }) 85 | ); 86 | } 87 | 88 | return new Response( 89 | JSON.stringify({ 90 | brokers, 91 | topics: topicNames.map((topicName) => { 92 | const topic = state.topics.find(({ name }) => name === topicName); 93 | if (topic === undefined) { 94 | return { 95 | errorCode: ErrorCode.UnknownTopicOrPartition, 96 | name: topicName, 97 | partitions: [], 98 | }; 99 | } 100 | return generateMetadata(topic); 101 | }), 102 | }) 103 | ); 104 | } 105 | } 106 | 107 | type TopicState = ElemOf; 108 | type TopicMetadata = ElemOf; 109 | 110 | const generateMetadata = (topic: TopicState): TopicMetadata => ({ 111 | errorCode: ErrorCode.None, 112 | name: topic.name, 113 | partitions: topic.partitions.map((partition) => ({ 114 | errorCode: ErrorCode.None, 115 | partitionIndex: partition.index, 116 | leaderId: globalBrokerId, 117 | replicaNodes: [], 118 | isrNodes: [], 119 | })), 120 | }); 121 | -------------------------------------------------------------------------------- /src/state/map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxwellpeterson/kafka-worker/784715d42177a83d6af23edd4851723d1afee70a/src/state/map.png -------------------------------------------------------------------------------- /src/state/partition.ts: -------------------------------------------------------------------------------- 1 | import { AbortedRequestError, Env, stringify } from "src/common"; 2 | import { Acks, ErrorCode, Int64, MessageSet } from "src/protocol/common"; 3 | import { Decoder } from "src/protocol/decoder"; 4 | import { Encoder } from "src/protocol/encoder"; 5 | import { RequestMetadata, decodeRequestHeader } from "src/protocol/header"; 6 | import { 7 | InternalResponseEncoder, 8 | PartitionApiKey, 9 | validPartitionApiKey, 10 | } from "src/protocol/internal/common"; 11 | import { 12 | InternalFetchRequest, 13 | InternalFetchResponse, 14 | decodeInternalFetchRequest, 15 | encodeInternalFetchResponse, 16 | stubInternalFetchResponse, 17 | } from "src/protocol/internal/fetch"; 18 | import { 19 | InternalListOffsetsRequest, 20 | InternalListOffsetsResponse, 21 | decodeInternalListOffsetsRequest, 22 | encodeInternalListOffsetsResponse, 23 | stubInternalListOffsetsResponse, 24 | } from "src/protocol/internal/list-offsets"; 25 | import { 26 | InternalProduceResponse, 27 | decodeInternalProduceRequest, 28 | encodeInternalProduceResponse, 29 | stubInternalProduceResponse, 30 | } from "src/protocol/internal/produce"; 31 | import { Chunk, prepareMessageSet } from "src/state/chunk"; 32 | import { PendingFetch } from "src/state/pending-fetch"; 33 | 34 | export const partitionStubUrl = "https://partition.state"; 35 | 36 | interface OffsetInfo { 37 | nextOffset: Int64; 38 | chunkOffsets: Int64[]; 39 | } 40 | const offsetInfoKey = "offset-info"; 41 | const initialOffsetInfo = (): OffsetInfo => ({ 42 | nextOffset: BigInt(0), 43 | chunkOffsets: [], 44 | }); 45 | 46 | type ChunkId = string; 47 | const chunkIdPrefix = "chunk"; 48 | 49 | type RequestId = string; 50 | interface PartitionRequestMetadata extends RequestMetadata { 51 | requestId: RequestId; 52 | } 53 | 54 | export class Partition { 55 | private readonly state: DurableObjectState; 56 | private readonly chunkSize: number; 57 | 58 | private readonly pending = new Map(); 59 | private connCount = 0; 60 | 61 | constructor(state: DurableObjectState, env: Env) { 62 | this.state = state; 63 | 64 | this.chunkSize = parseInt(env.PARTITION_CHUNK_SIZE); 65 | } 66 | 67 | fetch(request: Request): Response { 68 | if (request.headers.get("Upgrade") !== "websocket") { 69 | return new Response("Expected Upgrade: websocket", { status: 426 }); 70 | } 71 | 72 | const webSocketPair = new WebSocketPair(); 73 | const [client, server] = Object.values(webSocketPair); 74 | 75 | const requestPrefix = this.connCount; 76 | this.connCount++; 77 | 78 | const requestIdDelim = "_"; 79 | let requestCount = 0; 80 | 81 | server.accept(); 82 | server.addEventListener("message", (event) => { 83 | if (typeof event.data === "string") { 84 | console.log("Received string data, but we want binary data!"); 85 | return; 86 | } 87 | 88 | // Every internal request sent to this DO is uniquely identified (within 89 | // each in-memory lifetime) by the tuple (connCount, requestCount), where 90 | // connCount identifies the socket connection that the request belongs to, 91 | // and requestCount identifies the request within the scope of its socket 92 | // connection 93 | const requestId = `${requestPrefix}${requestIdDelim}${requestCount}`; 94 | requestCount++; 95 | 96 | this.handleRequest(requestId, event.data) 97 | .then((response) => { 98 | if ( 99 | response !== null && 100 | // TODO: Needed? Added because of issues related to: 101 | // https://github.com/cloudflare/miniflare/issues/339 102 | server.readyState === WebSocket.READY_STATE_OPEN 103 | ) { 104 | server.send(response); 105 | } 106 | }) 107 | .catch((error) => 108 | console.log( 109 | `[Partition DO] Uncaught error while handling request: ${stringify( 110 | error 111 | )}` 112 | ) 113 | ); 114 | }); 115 | server.addEventListener("close", () => { 116 | this.pending.forEach((request, id) => { 117 | // eslint-disable-next-line @typescript-eslint/prefer-string-starts-ends-with 118 | if (id.slice(0, id.indexOf(requestIdDelim)) === `${requestPrefix}`) { 119 | // Abort pending requests tied to this connection 120 | request.abort(); 121 | } 122 | }); 123 | }); 124 | 125 | return new Response(null, { 126 | status: 101, 127 | webSocket: client, 128 | }); 129 | } 130 | 131 | private async handleRequest( 132 | requestId: RequestId, 133 | buffer: ArrayBuffer 134 | ): Promise { 135 | const decoder = new Decoder(buffer); 136 | const header = decodeRequestHeader(decoder, validPartitionApiKey); 137 | const encoder = new InternalResponseEncoder(header.correlationId); 138 | 139 | const metadata = { ...header, requestId }; 140 | 141 | switch (header.apiKey) { 142 | case PartitionApiKey.Produce: 143 | return this.handleProduceRequest(metadata, decoder, encoder); 144 | case PartitionApiKey.Fetch: 145 | return this.handleFetchRequest(metadata, decoder, encoder); 146 | case PartitionApiKey.ListOffsets: 147 | return this.handleListOffsetsRequest(metadata, decoder, encoder); 148 | } 149 | } 150 | 151 | private async handleProduceRequest( 152 | metadata: PartitionRequestMetadata, 153 | decoder: Decoder, 154 | encoder: Encoder 155 | ): Promise { 156 | try { 157 | const request = decodeInternalProduceRequest(decoder); 158 | const response = await this.appendMessageSet(request.messageSet); 159 | 160 | if (request.acks === Acks.None) { 161 | return null; 162 | } 163 | return encodeInternalProduceResponse(encoder, response); 164 | } catch (e) { 165 | console.log( 166 | `[Partition DO] Error while handling Produce request: ${stringify(e)}` 167 | ); 168 | return encodeInternalProduceResponse( 169 | encoder, 170 | stubInternalProduceResponse(ErrorCode.UnknownServerError) 171 | ); 172 | } 173 | } 174 | 175 | private async appendMessageSet( 176 | messageSet: MessageSet 177 | ): Promise { 178 | const cursor = await this.getCursor(); 179 | const baseOffset = cursor.nextOffset; 180 | 181 | const prepared = prepareMessageSet(messageSet, cursor.nextOffset); 182 | if ("error" in prepared) { 183 | return { 184 | errorCode: prepared.error, 185 | baseOffset: BigInt(0), 186 | }; 187 | } 188 | const currentChunk = await this.getCurrentChunk(cursor); 189 | 190 | const chunks: Record = {}; 191 | 192 | for ( 193 | let chunk = currentChunk; 194 | !prepared.filler.done(); 195 | chunk = this.nextChunk(cursor) 196 | ) { 197 | chunks[chunkId(chunk.offsetStart)] = chunk; 198 | cursor.nextOffset += BigInt(prepared.filler.fillChunk(chunk)); 199 | // Add freshly created chunks to chunk list (chunks that existed before 200 | // this request will already have been added to the chunk list) 201 | if (cursor.chunkOffsets.at(-1) !== chunk.offsetStart) { 202 | cursor.chunkOffsets.push(chunk.offsetStart); 203 | } 204 | } 205 | 206 | await this.state.storage.put({ 207 | ...chunks, 208 | [offsetInfoKey]: cursor, 209 | }); 210 | this.pending.forEach((pending) => 211 | pending.addChunks(cursor.nextOffset, Object.values(chunks)) 212 | ); 213 | 214 | return { errorCode: ErrorCode.None, baseOffset }; 215 | } 216 | 217 | private async getCursor(): Promise { 218 | return ( 219 | (await this.state.storage.get(offsetInfoKey)) ?? 220 | initialOffsetInfo() 221 | ); 222 | } 223 | 224 | private async getCurrentChunk(cursor: OffsetInfo): Promise { 225 | const currentChunkStart = cursor.chunkOffsets.at(-1); 226 | if (currentChunkStart === undefined) { 227 | return this.nextChunk(cursor); 228 | } 229 | // Chunk must exist, because offset and chunk are updated together 230 | return this.state.storage.get( 231 | chunkId(currentChunkStart) 232 | ) as Promise; 233 | } 234 | 235 | private nextChunk(cursor: OffsetInfo): Chunk { 236 | return { 237 | offsetStart: cursor.nextOffset, 238 | buffer: new ArrayBuffer(this.chunkSize), 239 | frames: [], 240 | nextIndex: 0, 241 | }; 242 | } 243 | 244 | private async handleFetchRequest( 245 | metadata: PartitionRequestMetadata, 246 | decoder: Decoder, 247 | encoder: Encoder 248 | ): Promise { 249 | try { 250 | const request = decodeInternalFetchRequest(decoder); 251 | const response = await this.fillMessageSet(metadata, request); 252 | return encodeInternalFetchResponse(encoder, response); 253 | } catch (e) { 254 | if (e instanceof AbortedRequestError) { 255 | return null; 256 | } 257 | console.log( 258 | `[Partition DO] Error while handling Fetch request: ${stringify(e)}` 259 | ); 260 | return encodeInternalFetchResponse( 261 | encoder, 262 | stubInternalFetchResponse(ErrorCode.UnknownServerError) 263 | ); 264 | } 265 | } 266 | 267 | private async fillMessageSet( 268 | metadata: PartitionRequestMetadata, 269 | request: InternalFetchRequest 270 | ): Promise { 271 | const cursor = await this.getCursor(); 272 | if (request.fetchOffset < 0 || request.fetchOffset > cursor.nextOffset) { 273 | return { 274 | errorCode: ErrorCode.OffsetOutOfRange, 275 | highWatermark: cursor.nextOffset, 276 | messageSet: new Uint8Array(), 277 | }; 278 | } 279 | 280 | // The index of the chunk one position to the right of the chunk that the 281 | // fetch request should start from (could be a binary search) 282 | const startChunkRight = cursor.chunkOffsets.findIndex( 283 | (chunkOffset) => request.fetchOffset < chunkOffset 284 | ); 285 | // The index of the chunk that the fetch request should start from 286 | const startChunk = 287 | startChunkRight === -1 288 | ? // Start from the most recent chunk 289 | cursor.chunkOffsets.length - 1 290 | : startChunkRight - 1; 291 | const maxChunks = Math.ceil(request.maxBytes / this.chunkSize) + 1; 292 | 293 | // Load the subset of chunks we need to read from storage 294 | const chunks = await this.state.storage.get( 295 | cursor.chunkOffsets 296 | .slice(startChunk, startChunk + maxChunks) 297 | .map((chunkOffset) => chunkId(chunkOffset)) 298 | ); 299 | 300 | return new Promise((resolve, reject) => { 301 | const done = (response: InternalFetchResponse) => { 302 | this.pending.delete(metadata.requestId); 303 | resolve(response); 304 | }; 305 | const abort = () => { 306 | this.pending.delete(metadata.requestId); 307 | reject(new AbortedRequestError()); 308 | }; 309 | const pending = new PendingFetch(request, cursor.nextOffset, done, abort); 310 | this.pending.set(metadata.requestId, pending); 311 | pending.addChunks(cursor.nextOffset, chunks.values()); 312 | }); 313 | } 314 | 315 | private async handleListOffsetsRequest( 316 | metadata: PartitionRequestMetadata, 317 | decoder: Decoder, 318 | encoder: Encoder 319 | ): Promise { 320 | try { 321 | const request = decodeInternalListOffsetsRequest(decoder); 322 | const response = await this.listOffsets(request); 323 | return encodeInternalListOffsetsResponse(encoder, response); 324 | } catch (e) { 325 | console.log( 326 | `[Partition DO] Error while handling ListOffsets request: ${stringify( 327 | e 328 | )}` 329 | ); 330 | return encodeInternalListOffsetsResponse( 331 | encoder, 332 | stubInternalListOffsetsResponse(ErrorCode.UnknownServerError) 333 | ); 334 | } 335 | } 336 | 337 | private async listOffsets( 338 | request: InternalListOffsetsRequest 339 | ): Promise { 340 | if (request.timestamp === BigInt(-2)) { 341 | // Only send earliest available offset 342 | return { 343 | errorCode: ErrorCode.None, 344 | oldStyleOffsets: [initialOffsetInfo().nextOffset], 345 | }; 346 | } 347 | 348 | const cursor = await this.getCursor(); 349 | 350 | // Send requested number of offsets 351 | const stopOffset = cursor.nextOffset - BigInt(request.maxNumOffsets); 352 | const clampedStopOffset = stopOffset < 0 ? -1 : stopOffset; 353 | const offsets: Int64[] = []; 354 | for (let i = cursor.nextOffset; i > clampedStopOffset; i--) { 355 | // TODO: More efficient approach here? 356 | offsets.push(i); 357 | } 358 | return { 359 | errorCode: ErrorCode.None, 360 | oldStyleOffsets: offsets, 361 | }; 362 | } 363 | } 364 | 365 | const chunkId = (offsetStart: Chunk["offsetStart"]): ChunkId => 366 | `${chunkIdPrefix}-${offsetStart.toString()}`; 367 | 368 | export class PartitionInfo { 369 | private static readonly delim = "_"; 370 | readonly topic: string; 371 | readonly index: number; 372 | readonly id: string; 373 | 374 | constructor(topic: string, index: number) { 375 | this.topic = topic; 376 | this.index = index; 377 | this.id = `${topic}${PartitionInfo.delim}${index}`; 378 | } 379 | 380 | static fromId(id: string): PartitionInfo { 381 | const delimIndex = id.lastIndexOf(this.delim); 382 | return new PartitionInfo( 383 | id.slice(0, delimIndex), 384 | parseInt(id.slice(delimIndex + 1)) 385 | ); 386 | } 387 | } 388 | -------------------------------------------------------------------------------- /src/state/pending-fetch.ts: -------------------------------------------------------------------------------- 1 | import { ErrorCode, Int64, MessageSet } from "src/protocol/common"; 2 | import { 3 | InternalFetchRequest, 4 | InternalFetchResponse, 5 | } from "src/protocol/internal/fetch"; 6 | import { Chunk } from "src/state/chunk"; 7 | 8 | export class PendingFetch { 9 | private readonly request: InternalFetchRequest; 10 | 11 | private highWatermark: Int64; 12 | private readonly messageSet: MessageSet; 13 | private nextOffset: Int64; 14 | private bytesWritten = 0; 15 | 16 | private readonly done: () => void; 17 | readonly abort: () => void; 18 | 19 | constructor( 20 | request: InternalFetchRequest, 21 | highWatermark: Int64, 22 | done: (response: InternalFetchResponse) => void, 23 | abort: () => void 24 | ) { 25 | this.request = request; 26 | 27 | this.highWatermark = highWatermark; 28 | this.messageSet = new Uint8Array(new ArrayBuffer(request.maxBytes)); 29 | this.nextOffset = request.fetchOffset; 30 | 31 | const timeoutId = setTimeout(() => { 32 | this.done(); 33 | }, request.maxWaitMs); 34 | 35 | this.done = () => { 36 | clearTimeout(timeoutId); 37 | done({ 38 | errorCode: ErrorCode.None, 39 | highWatermark: this.highWatermark, 40 | messageSet: this.messageSet.subarray(0, this.bytesWritten), 41 | }); 42 | }; 43 | this.abort = () => { 44 | clearTimeout(timeoutId); 45 | abort(); 46 | }; 47 | } 48 | 49 | addChunks(highWatermark: Int64, chunks: Iterable) { 50 | this.highWatermark = highWatermark; 51 | 52 | for (const chunk of chunks) { 53 | const messageSetFull = this.readChunk(chunk); 54 | // This covers a weird case where individual messages are larger than the 55 | // difference between the upper and lower response byte limits, the 56 | // response is below the lower limit, and another message can't be read 57 | // without exceeding the upper limit. Note that the protocol allows for 58 | // partial messages to be returned (which would cover this case), but as 59 | // implemented we only return full messages. 60 | if (messageSetFull || this.bytesWritten >= this.request.minBytes) { 61 | this.done(); 62 | } 63 | } 64 | } 65 | 66 | // Copies messages from the chunk into the response message set buffer, and 67 | // returns true if no more messages can be copied into the message set buffer 68 | private readChunk(chunk: Chunk): boolean { 69 | if ( 70 | chunk.frames.length === 0 || 71 | this.nextOffset < chunk.offsetStart || 72 | this.nextOffset >= chunk.offsetStart + BigInt(chunk.frames.length) 73 | ) { 74 | // This chunk doesn't contain the data we're looking for 75 | return false; 76 | } 77 | 78 | // The offset we want to read from might be in the middle of the chunk, 79 | // so we need to skip over the messages with smaller offsets 80 | const frameOffset = Math.max( 81 | 0, 82 | Number(this.nextOffset - chunk.offsetStart) 83 | ); 84 | // The buffer index that we want to start reading message data from 85 | const bufferOffset = chunk.frames[frameOffset][0]; 86 | 87 | const frames = chunk.frames.slice(frameOffset); 88 | // Find the index of the message that would put us over the response byte 89 | // limit. We want to read all messages up to but not including this message. 90 | const stopFrameIndex = frames.findIndex(([startIndex, size]) => { 91 | const writeSize = startIndex - bufferOffset + size; 92 | return this.bytesWritten + writeSize > this.request.maxBytes; 93 | }); 94 | 95 | if (stopFrameIndex !== -1) { 96 | // We can't read the remainder of the chunk without exceeding the byte 97 | // limit, so instead we only read part of the chunk. This read fills up 98 | // the remainder of the buffer, and is the last read that we can do. 99 | const copySize = frames[stopFrameIndex][0] - bufferOffset; 100 | this.messageSet.set( 101 | new Uint8Array(chunk.buffer, bufferOffset, copySize), 102 | this.bytesWritten 103 | ); 104 | this.bytesWritten += copySize; 105 | this.nextOffset += BigInt(stopFrameIndex); 106 | return true; 107 | } 108 | 109 | // We can read the remainder of the chunk without exceeding the byte limit 110 | const copySize = chunk.nextIndex - bufferOffset; 111 | this.messageSet.set( 112 | new Uint8Array(chunk.buffer, bufferOffset, copySize), 113 | this.bytesWritten 114 | ); 115 | this.bytesWritten += copySize; 116 | this.nextOffset += BigInt(frames.length); 117 | return false; 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # Test 2 | 3 | This folder contains all tests. 4 | -------------------------------------------------------------------------------- /test/__snapshots__/index.test.ts.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`Kafka API fetch: fetch on empty partition 1`] = `"AAAAKgAAAAAAAAABAAp0ZXN0LXRvcGljAAAAAQAAAAAAAAAAAAAAAAAAAAAAAA=="`; 4 | 5 | exports[`Kafka API fetch: fetch on empty partition 2`] = ` 6 | Object { 7 | "topics": Array [ 8 | Object { 9 | "name": "test-topic", 10 | "partitions": Array [ 11 | Object { 12 | "errorCode": 0, 13 | "highWatermark": 0n, 14 | "index": 0, 15 | "messageSet": Uint8Array [], 16 | }, 17 | ], 18 | }, 19 | ], 20 | } 21 | `; 22 | 23 | exports[`Kafka API listOffsets: full offset list after producing records 1`] = `"AAAAJgAAAAAAAAABAAp0ZXN0LXRvcGljAAAAAQAAAAAAAAAAAAAAAAAA"`; 24 | 25 | exports[`Kafka API listOffsets: full offset list after producing records 2`] = ` 26 | Object { 27 | "topics": Array [ 28 | Object { 29 | "name": "test-topic", 30 | "partitions": Array [ 31 | Object { 32 | "baseOffset": 0n, 33 | "errorCode": 0, 34 | "index": 0, 35 | }, 36 | ], 37 | }, 38 | ], 39 | } 40 | `; 41 | 42 | exports[`Kafka API listOffsets: full offset list after producing records 3`] = `"AAAAegAAAAEAAAABAAp0ZXN0LXRvcGljAAAAAQAAAAAAAAAAAAsAAAAAAAAACgAAAAAAAAAJAAAAAAAAAAgAAAAAAAAABwAAAAAAAAAGAAAAAAAAAAUAAAAAAAAABAAAAAAAAAADAAAAAAAAAAIAAAAAAAAAAQAAAAAAAAAA"`; 43 | 44 | exports[`Kafka API listOffsets: full offset list after producing records 4`] = ` 45 | Object { 46 | "topics": Array [ 47 | Object { 48 | "name": "test-topic", 49 | "partitions": Array [ 50 | Object { 51 | "errorCode": 0, 52 | "index": 0, 53 | "oldStyleOffsets": Array [ 54 | 10n, 55 | 9n, 56 | 8n, 57 | 7n, 58 | 6n, 59 | 5n, 60 | 4n, 61 | 3n, 62 | 2n, 63 | 1n, 64 | 0n, 65 | ], 66 | }, 67 | ], 68 | }, 69 | ], 70 | } 71 | `; 72 | 73 | exports[`Kafka API listOffsets: initial offsets 1`] = `"AAAAKgAAAAAAAAABAAp0ZXN0LXRvcGljAAAAAQAAAAAAAAAAAAEAAAAAAAAAAA=="`; 74 | 75 | exports[`Kafka API listOffsets: initial offsets 2`] = ` 76 | Object { 77 | "topics": Array [ 78 | Object { 79 | "name": "test-topic", 80 | "partitions": Array [ 81 | Object { 82 | "errorCode": 0, 83 | "index": 0, 84 | "oldStyleOffsets": Array [ 85 | 0n, 86 | ], 87 | }, 88 | ], 89 | }, 90 | ], 91 | } 92 | `; 93 | 94 | exports[`Kafka API listOffsets: maximum number of offsets is respected 1`] = `"AAAAJgAAAAAAAAABAAp0ZXN0LXRvcGljAAAAAQAAAAAAAAAAAAAAAAAA"`; 95 | 96 | exports[`Kafka API listOffsets: maximum number of offsets is respected 2`] = ` 97 | Object { 98 | "topics": Array [ 99 | Object { 100 | "name": "test-topic", 101 | "partitions": Array [ 102 | Object { 103 | "baseOffset": 0n, 104 | "errorCode": 0, 105 | "index": 0, 106 | }, 107 | ], 108 | }, 109 | ], 110 | } 111 | `; 112 | 113 | exports[`Kafka API listOffsets: maximum number of offsets is respected 3`] = `"AAAAQgAAAAEAAAABAAp0ZXN0LXRvcGljAAAAAQAAAAAAAAAAAAQAAAAAAAAACgAAAAAAAAAJAAAAAAAAAAgAAAAAAAAABw=="`; 114 | 115 | exports[`Kafka API listOffsets: maximum number of offsets is respected 4`] = ` 116 | Object { 117 | "topics": Array [ 118 | Object { 119 | "name": "test-topic", 120 | "partitions": Array [ 121 | Object { 122 | "errorCode": 0, 123 | "index": 0, 124 | "oldStyleOffsets": Array [ 125 | 10n, 126 | 9n, 127 | 8n, 128 | 7n, 129 | ], 130 | }, 131 | ], 132 | }, 133 | ], 134 | } 135 | `; 136 | 137 | exports[`Kafka API listOffsets: oldest offset after producing records 1`] = `"AAAAJgAAAAAAAAABAAp0ZXN0LXRvcGljAAAAAQAAAAAAAAAAAAAAAAAA"`; 138 | 139 | exports[`Kafka API listOffsets: oldest offset after producing records 2`] = ` 140 | Object { 141 | "topics": Array [ 142 | Object { 143 | "name": "test-topic", 144 | "partitions": Array [ 145 | Object { 146 | "baseOffset": 0n, 147 | "errorCode": 0, 148 | "index": 0, 149 | }, 150 | ], 151 | }, 152 | ], 153 | } 154 | `; 155 | 156 | exports[`Kafka API listOffsets: oldest offset after producing records 3`] = `"AAAAKgAAAAEAAAABAAp0ZXN0LXRvcGljAAAAAQAAAAAAAAAAAAEAAAAAAAAAAA=="`; 157 | 158 | exports[`Kafka API listOffsets: oldest offset after producing records 4`] = ` 159 | Object { 160 | "topics": Array [ 161 | Object { 162 | "name": "test-topic", 163 | "partitions": Array [ 164 | Object { 165 | "errorCode": 0, 166 | "index": 0, 167 | "oldStyleOffsets": Array [ 168 | 0n, 169 | ], 170 | }, 171 | ], 172 | }, 173 | ], 174 | } 175 | `; 176 | 177 | exports[`Kafka API metadata: fetch all topics 1`] = `"AAAAQwAAAAUAAAABAAABTQAJbG9jYWxob3N0AAAiUwAAAAEAAAAKdGVzdC10b3BpYwAAAAEAAAAAAAAAAAFNAAAAAAAAAAA="`; 178 | 179 | exports[`Kafka API metadata: fetch all topics 2`] = ` 180 | Object { 181 | "brokers": Array [ 182 | Object { 183 | "host": "localhost", 184 | "nodeId": 333, 185 | "port": 8787, 186 | }, 187 | ], 188 | "topics": Array [ 189 | Object { 190 | "errorCode": 0, 191 | "name": "test-topic", 192 | "partitions": Array [ 193 | Object { 194 | "errorCode": 0, 195 | "isrNodes": Array [], 196 | "leaderId": 333, 197 | "partitionIndex": 0, 198 | "replicaNodes": Array [], 199 | }, 200 | ], 201 | }, 202 | ], 203 | } 204 | `; 205 | 206 | exports[`Kafka API metadata: fetch nonexistent topic 1`] = `"AAAAMgAAAAUAAAABAAABTQAJbG9jYWxob3N0AAAiUwAAAAEAAwALb3RoZXItdG9waWMAAAAA"`; 207 | 208 | exports[`Kafka API metadata: fetch nonexistent topic 2`] = ` 209 | Object { 210 | "brokers": Array [ 211 | Object { 212 | "host": "localhost", 213 | "nodeId": 333, 214 | "port": 8787, 215 | }, 216 | ], 217 | "topics": Array [ 218 | Object { 219 | "errorCode": 3, 220 | "name": "other-topic", 221 | "partitions": Array [], 222 | }, 223 | ], 224 | } 225 | `; 226 | 227 | exports[`Kafka API metadata: fetch specific topic 1`] = `"AAAAQwAAAAUAAAABAAABTQAJbG9jYWxob3N0AAAiUwAAAAEAAAAKdGVzdC10b3BpYwAAAAEAAAAAAAAAAAFNAAAAAAAAAAA="`; 228 | 229 | exports[`Kafka API metadata: fetch specific topic 2`] = ` 230 | Object { 231 | "brokers": Array [ 232 | Object { 233 | "host": "localhost", 234 | "nodeId": 333, 235 | "port": 8787, 236 | }, 237 | ], 238 | "topics": Array [ 239 | Object { 240 | "errorCode": 0, 241 | "name": "test-topic", 242 | "partitions": Array [ 243 | Object { 244 | "errorCode": 0, 245 | "isrNodes": Array [], 246 | "leaderId": 333, 247 | "partitionIndex": 0, 248 | "replicaNodes": Array [], 249 | }, 250 | ], 251 | }, 252 | ], 253 | } 254 | `; 255 | 256 | exports[`Kafka API metadata: fetch specific topic and nonexistent topic 1`] = `"AAAAVgAAAAUAAAABAAABTQAJbG9jYWxob3N0AAAiUwAAAAIAAAAKdGVzdC10b3BpYwAAAAEAAAAAAAAAAAFNAAAAAAAAAAAAAwALb3RoZXItdG9waWMAAAAA"`; 257 | 258 | exports[`Kafka API metadata: fetch specific topic and nonexistent topic 2`] = ` 259 | Object { 260 | "brokers": Array [ 261 | Object { 262 | "host": "localhost", 263 | "nodeId": 333, 264 | "port": 8787, 265 | }, 266 | ], 267 | "topics": Array [ 268 | Object { 269 | "errorCode": 0, 270 | "name": "test-topic", 271 | "partitions": Array [ 272 | Object { 273 | "errorCode": 0, 274 | "isrNodes": Array [], 275 | "leaderId": 333, 276 | "partitionIndex": 0, 277 | "replicaNodes": Array [], 278 | }, 279 | ], 280 | }, 281 | Object { 282 | "errorCode": 3, 283 | "name": "other-topic", 284 | "partitions": Array [], 285 | }, 286 | ], 287 | } 288 | `; 289 | 290 | exports[`Kafka API produce: send multiple message batches to one partition 1`] = `"AAAAJwAAAAUAAAABAAtvdGhlci10b3BpYwAAAAEAAAPnAAAAAAAAAAAAAA=="`; 291 | 292 | exports[`Kafka API produce: send multiple message batches to one partition 2`] = ` 293 | Object { 294 | "topics": Array [ 295 | Object { 296 | "name": "other-topic", 297 | "partitions": Array [ 298 | Object { 299 | "baseOffset": 0n, 300 | "errorCode": 0, 301 | "index": 999, 302 | }, 303 | ], 304 | }, 305 | ], 306 | } 307 | `; 308 | 309 | exports[`Kafka API produce: send multiple message batches to one partition 3`] = `"AAAAJwAAAAYAAAABAAtvdGhlci10b3BpYwAAAAEAAAPnAAAAAAAAAAAAAw=="`; 310 | 311 | exports[`Kafka API produce: send multiple message batches to one partition 4`] = ` 312 | Object { 313 | "topics": Array [ 314 | Object { 315 | "name": "other-topic", 316 | "partitions": Array [ 317 | Object { 318 | "baseOffset": 3n, 319 | "errorCode": 0, 320 | "index": 999, 321 | }, 322 | ], 323 | }, 324 | ], 325 | } 326 | `; 327 | 328 | exports[`Kafka API produce: send one message to one partition 1`] = `"AAAAJgAAAAUAAAABAAp0ZXN0LXRvcGljAAAAAQAAAU0AAAAAAAAAAAAA"`; 329 | 330 | exports[`Kafka API produce: send one message to one partition 2`] = ` 331 | Object { 332 | "topics": Array [ 333 | Object { 334 | "name": "test-topic", 335 | "partitions": Array [ 336 | Object { 337 | "baseOffset": 0n, 338 | "errorCode": 0, 339 | "index": 333, 340 | }, 341 | ], 342 | }, 343 | ], 344 | } 345 | `; 346 | -------------------------------------------------------------------------------- /test/client/incremental-response.test.ts: -------------------------------------------------------------------------------- 1 | import { jest } from "@jest/globals"; 2 | import { 3 | IncrementalResponse, 4 | PartitionResponse, 5 | } from "src/client/incremental-response"; 6 | import { ErrorCode } from "src/protocol/common"; 7 | import { PartitionInfo } from "src/state/partition"; 8 | 9 | describe("IncrementalResponse", () => { 10 | const done = jest.fn<(response: TestResponse) => void>(); 11 | 12 | interface TestResponse { 13 | topics: { 14 | name: string; 15 | partitions: { 16 | index: number; 17 | errorCode: ErrorCode; 18 | }[]; 19 | }[]; 20 | } 21 | 22 | type TestCase = [ 23 | string, 24 | TestResponse, 25 | [PartitionInfo, PartitionResponse][], 26 | TestResponse 27 | ]; 28 | const cases: TestCase[] = [ 29 | [ 30 | "fills in response with one subresponse", 31 | { 32 | topics: [ 33 | { 34 | name: "topic-one", 35 | partitions: [{ index: 0, errorCode: ErrorCode.None }], 36 | }, 37 | ], 38 | }, 39 | [ 40 | [ 41 | new PartitionInfo("topic-one", 0), 42 | { errorCode: ErrorCode.CorruptMessage }, 43 | ], 44 | ], 45 | { 46 | topics: [ 47 | { 48 | name: "topic-one", 49 | partitions: [{ index: 0, errorCode: ErrorCode.CorruptMessage }], 50 | }, 51 | ], 52 | }, 53 | ], 54 | [ 55 | "fills in response with multiple topics", 56 | { 57 | topics: [ 58 | { 59 | name: "topic-one", 60 | partitions: [{ index: 0, errorCode: ErrorCode.None }], 61 | }, 62 | { 63 | name: "topic-two", 64 | partitions: [{ index: 5, errorCode: ErrorCode.None }], 65 | }, 66 | ], 67 | }, 68 | [ 69 | [new PartitionInfo("topic-one", 0), { errorCode: ErrorCode.None }], 70 | [ 71 | new PartitionInfo("topic-two", 5), 72 | { errorCode: ErrorCode.CorruptMessage }, 73 | ], 74 | ], 75 | { 76 | topics: [ 77 | { 78 | name: "topic-one", 79 | partitions: [{ index: 0, errorCode: ErrorCode.None }], 80 | }, 81 | { 82 | name: "topic-two", 83 | partitions: [{ index: 5, errorCode: ErrorCode.CorruptMessage }], 84 | }, 85 | ], 86 | }, 87 | ], 88 | [ 89 | "fills in response with multiple partitions", 90 | { 91 | topics: [ 92 | { 93 | name: "topic-one", 94 | partitions: [ 95 | { index: 0, errorCode: ErrorCode.None }, 96 | { index: 12, errorCode: ErrorCode.None }, 97 | ], 98 | }, 99 | ], 100 | }, 101 | [ 102 | [ 103 | new PartitionInfo("topic-one", 0), 104 | { errorCode: ErrorCode.CorruptMessage }, 105 | ], 106 | [new PartitionInfo("topic-one", 12), { errorCode: ErrorCode.None }], 107 | ], 108 | { 109 | topics: [ 110 | { 111 | name: "topic-one", 112 | partitions: [ 113 | { index: 0, errorCode: ErrorCode.CorruptMessage }, 114 | { index: 12, errorCode: ErrorCode.None }, 115 | ], 116 | }, 117 | ], 118 | }, 119 | ], 120 | ]; 121 | 122 | test.each(cases)("%s", (_name, stub, subresponses, expected) => { 123 | const incremental = new IncrementalResponse(stub, done); 124 | 125 | subresponses.forEach((subresponse) => { 126 | expect(done).not.toHaveBeenCalled(); 127 | incremental.addPartition(...subresponse); 128 | }); 129 | expect(done).toHaveBeenCalledTimes(1); 130 | expect(done).toHaveBeenCalledWith(expected); 131 | 132 | // Make sure that done is not called again and response is not mutated, even 133 | // after receiving more subresponses 134 | 135 | const response = done.mock.calls[0][0]; 136 | const snapshot = structuredClone(response) as TestResponse; 137 | 138 | subresponses.forEach((subresponse) => { 139 | incremental.addPartition(...subresponse); 140 | expect(done).toHaveBeenCalledTimes(1); 141 | expect(response).toEqual(snapshot); 142 | }); 143 | }); 144 | 145 | test("calls done immediately when stub response has no topics", () => { 146 | const stub = { topics: [] }; 147 | 148 | new IncrementalResponse(stub, done); 149 | expect(done).toHaveBeenCalledTimes(1); 150 | expect(done).toHaveBeenCalledWith(stub); 151 | }); 152 | 153 | test("calls done immediately when stub response has no partitions", () => { 154 | const stub = { 155 | topics: [{ name: "topic-one", partitions: [] }], 156 | }; 157 | 158 | new IncrementalResponse(stub, done); 159 | expect(done).toHaveBeenCalledTimes(1); 160 | expect(done).toHaveBeenCalledWith(stub); 161 | }); 162 | }); 163 | -------------------------------------------------------------------------------- /test/common.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line import/default 2 | import crc32 from "crc-32"; 3 | import { MessageSet, int32Size, int8Size } from "src/protocol/common"; 4 | import { Encoder } from "src/protocol/encoder"; 5 | 6 | // These functions are only intended to be used in test files! 7 | 8 | // Convert an ArrayBuffer to a string that can be snapshotted 9 | // Taken from https://stackoverflow.com/a/9458996 10 | export const base64 = (buffer: ArrayBuffer): string => { 11 | let binary = ""; 12 | const len = buffer.byteLength; 13 | const bytes = new Uint8Array(buffer); 14 | for (let i = 0; i < len; i++) { 15 | binary += String.fromCharCode(bytes[i]); 16 | } 17 | return btoa(binary); 18 | }; 19 | 20 | // Generate an arbitrary message set for testing 21 | export const fillMessageSet = ( 22 | numMessages: number, 23 | baseOffset = BigInt(0), 24 | keySizes = [5, 17, 76, 32, 57], 25 | valueSizes = [48, 128, 155, 96, 67] 26 | ): MessageSet => { 27 | const encoder = new Encoder(); 28 | for (let i = 0; i < numMessages; i++) { 29 | const keySize = keySizes[i % keySizes.length]; 30 | const valueSize = valueSizes[i % valueSizes.length]; 31 | 32 | // Write message offset (discarded by broker) 33 | encoder.writeInt64(baseOffset + BigInt(i)); 34 | // Write message size (crc + magic byte + attributes + (size +) key + (size +) value) 35 | encoder.writeInt32( 36 | int32Size + 37 | int8Size + 38 | int8Size + 39 | int32Size + 40 | keySize + 41 | int32Size + 42 | valueSize 43 | ); 44 | 45 | const remainder = new Encoder(); 46 | // Write magic byte (with magic value of 1) 47 | remainder.writeInt8(1); 48 | // Write attributes (no compression) 49 | remainder.writeInt8(0); 50 | // Write key and value 51 | remainder.writeBytes(fillBytes(keySize)); 52 | remainder.writeBytes(fillBytes(valueSize)); 53 | 54 | // Calculate and write crc 55 | const crcInput = remainder.slice(); 56 | // eslint-disable-next-line import/no-named-as-default-member 57 | encoder.writeInt32(crc32.buf(crcInput)); 58 | // Write remainder of message 59 | encoder.writeSlice(crcInput); 60 | } 61 | return encoder.slice(); 62 | }; 63 | 64 | const fillBytes = (length: number): Uint8Array => { 65 | const view = new DataView(new ArrayBuffer(length)); 66 | for (let i = 0; i < length; i++) { 67 | // This will wrap around when i > 127 but that's fine 68 | view.setInt8(i, i); 69 | } 70 | return new Uint8Array(view.buffer); 71 | }; 72 | -------------------------------------------------------------------------------- /test/globals.d.ts: -------------------------------------------------------------------------------- 1 | import { Env } from "src/common"; 2 | 3 | declare global { 4 | function getMiniflareBindings(): Env; 5 | } 6 | -------------------------------------------------------------------------------- /test/index.test.ts: -------------------------------------------------------------------------------- 1 | import handler from "src/index"; 2 | import { Acks, ApiKey, Int32 } from "src/protocol/common"; 3 | import { Decoder } from "src/protocol/decoder"; 4 | import { KafkaDecoder, KafkaRequestEncoder } from "src/protocol/kafka/common"; 5 | import { 6 | KafkaFetchRequest, 7 | KafkaFetchResponse, 8 | decodeKafkaFetchResponse, 9 | encodeKafkaFetchRequest, 10 | } from "src/protocol/kafka/fetch"; 11 | import { 12 | KafkaListOffsetsRequest, 13 | KafkaListOffsetsResponse, 14 | decodeKafkaListOffsetsResponse, 15 | encodeKafkaListOffsetsRequest, 16 | } from "src/protocol/kafka/list-offsets"; 17 | import { 18 | KafkaMetadataResponse, 19 | decodeKafkaMetadataResponse, 20 | encodeKafkaMetadataRequest, 21 | } from "src/protocol/kafka/metadata"; 22 | import { 23 | KafkaProduceRequest, 24 | KafkaProduceResponse, 25 | decodeKafkaProduceResponse, 26 | encodeKafkaProduceRequest, 27 | } from "src/protocol/kafka/produce"; 28 | import { base64, fillMessageSet } from "test/common"; 29 | 30 | class GatewayConn { 31 | private readonly socket: WebSocket; 32 | private readonly unhandled: (ArrayBuffer | string)[]; 33 | private handleMessage?: (message: ArrayBuffer) => void; 34 | 35 | constructor() { 36 | const env = getMiniflareBindings(); 37 | 38 | const response = handler.fetch( 39 | new Request("https://fetch.handler", { 40 | headers: { Upgrade: "websocket" }, 41 | }), 42 | env 43 | ); 44 | if (!response.ok) { 45 | throw new Error(`Initial request failed with status ${response.status}`); 46 | } 47 | 48 | const socket = response.webSocket; 49 | if (socket === null) { 50 | throw new Error("Initial response does not include WebSocket"); 51 | } 52 | 53 | socket.accept(); 54 | this.socket = socket; 55 | this.unhandled = []; 56 | 57 | this.socket.addEventListener("message", (event) => { 58 | if (typeof event.data === "string" || this.handleMessage === undefined) { 59 | this.unhandled.push(event.data); 60 | return; 61 | } 62 | this.handleMessage(event.data); 63 | }); 64 | } 65 | 66 | request(buffer: ArrayBuffer): Promise { 67 | if (this.unhandled.length > 0) { 68 | throw new Error( 69 | `Can't send next message when there are ${this.unhandled.length} unhandled messages` 70 | ); 71 | } 72 | 73 | if (this.handleMessage !== undefined) { 74 | throw new Error( 75 | "Can't send next message when there is a request in flight" 76 | ); 77 | } 78 | 79 | this.socket.send(buffer); 80 | 81 | return new Promise((resolve) => { 82 | this.handleMessage = (message: ArrayBuffer) => { 83 | this.handleMessage = undefined; 84 | resolve(message); 85 | }; 86 | }); 87 | } 88 | 89 | close() { 90 | if (this.unhandled.length > 0) { 91 | throw new Error( 92 | `Can't close connection when there are ${this.unhandled.length} unhandled messages` 93 | ); 94 | } 95 | 96 | this.socket.close(); 97 | } 98 | } 99 | 100 | type DecodeFunc = (decoder: Decoder) => T; 101 | type RequestResponse = [Int32, ArrayBuffer, DecodeFunc]; 102 | type TestCase = [string, string, RequestResponse[]]; 103 | 104 | const makeMetadataPair = ( 105 | correlationId: Int32, 106 | topics: string[] 107 | ): RequestResponse => { 108 | const encoder = new KafkaRequestEncoder({ 109 | apiKey: ApiKey.Metadata, 110 | apiVersion: 0, 111 | correlationId, 112 | clientId: null, 113 | }); 114 | const request = encodeKafkaMetadataRequest(encoder, { 115 | topics, 116 | }); 117 | return [correlationId, request, decodeKafkaMetadataResponse]; 118 | }; 119 | 120 | const makeProducePair = ( 121 | correlationId: Int32, 122 | topics: KafkaProduceRequest["topics"] 123 | ): RequestResponse => { 124 | const encoder = new KafkaRequestEncoder({ 125 | apiKey: ApiKey.Produce, 126 | apiVersion: 0, 127 | correlationId, 128 | clientId: null, 129 | }); 130 | const request = encodeKafkaProduceRequest(encoder, { 131 | acks: Acks.Leader, 132 | timeoutMs: 10_000, 133 | topics, 134 | }); 135 | return [correlationId, request, decodeKafkaProduceResponse]; 136 | }; 137 | 138 | const makeFetchPair = ( 139 | correlationId: Int32, 140 | request: KafkaFetchRequest 141 | ): RequestResponse => { 142 | const encoder = new KafkaRequestEncoder({ 143 | apiKey: ApiKey.Fetch, 144 | apiVersion: 0, 145 | correlationId, 146 | clientId: null, 147 | }); 148 | const encoded = encodeKafkaFetchRequest(encoder, request); 149 | return [correlationId, encoded, decodeKafkaFetchResponse]; 150 | }; 151 | 152 | const makeListOffsetsPair = ( 153 | correlationId: Int32, 154 | request: KafkaListOffsetsRequest 155 | ): RequestResponse => { 156 | const encoder = new KafkaRequestEncoder({ 157 | apiKey: ApiKey.ListOffsets, 158 | apiVersion: 0, 159 | correlationId, 160 | clientId: null, 161 | }); 162 | const encoded = encodeKafkaListOffsetsRequest(encoder, request); 163 | return [correlationId, encoded, decodeKafkaListOffsetsResponse]; 164 | }; 165 | 166 | describe("Kafka API", () => { 167 | const cases: TestCase< 168 | | KafkaMetadataResponse 169 | | KafkaProduceResponse 170 | | KafkaListOffsetsResponse 171 | | KafkaFetchResponse 172 | >[] = [ 173 | ["metadata", "fetch all topics", [makeMetadataPair(5, [])]], 174 | ["metadata", "fetch specific topic", [makeMetadataPair(5, ["test-topic"])]], 175 | [ 176 | "metadata", 177 | "fetch nonexistent topic", 178 | [makeMetadataPair(5, ["other-topic"])], 179 | ], 180 | [ 181 | "metadata", 182 | "fetch specific topic and nonexistent topic", 183 | [makeMetadataPair(5, ["test-topic", "other-topic"])], 184 | ], 185 | [ 186 | "produce", 187 | "send one message to one partition", 188 | [ 189 | makeProducePair(5, [ 190 | { 191 | name: "test-topic", 192 | partitions: [{ index: 333, messageSet: fillMessageSet(1) }], 193 | }, 194 | ]), 195 | ], 196 | ], 197 | [ 198 | "produce", 199 | "send multiple message batches to one partition", 200 | [ 201 | makeProducePair(5, [ 202 | { 203 | name: "other-topic", 204 | partitions: [{ index: 999, messageSet: fillMessageSet(3) }], 205 | }, 206 | ]), 207 | makeProducePair(6, [ 208 | { 209 | name: "other-topic", 210 | partitions: [{ index: 999, messageSet: fillMessageSet(2) }], 211 | }, 212 | ]), 213 | ], 214 | ], 215 | [ 216 | "listOffsets", 217 | "initial offsets", 218 | [ 219 | makeListOffsetsPair(0, { 220 | replicaId: -1, 221 | topics: [ 222 | { 223 | name: "test-topic", 224 | partitions: [ 225 | { index: 0, timestamp: BigInt(0), maxNumOffsets: 16 }, 226 | ], 227 | }, 228 | ], 229 | }), 230 | ], 231 | ], 232 | [ 233 | "listOffsets", 234 | "full offset list after producing records", 235 | [ 236 | makeProducePair(0, [ 237 | { 238 | name: "test-topic", 239 | partitions: [{ index: 0, messageSet: fillMessageSet(10) }], 240 | }, 241 | ]), 242 | makeListOffsetsPair(1, { 243 | replicaId: -1, 244 | topics: [ 245 | { 246 | name: "test-topic", 247 | partitions: [ 248 | { index: 0, timestamp: BigInt(0), maxNumOffsets: 16 }, 249 | ], 250 | }, 251 | ], 252 | }), 253 | ], 254 | ], 255 | [ 256 | "listOffsets", 257 | "oldest offset after producing records", 258 | [ 259 | makeProducePair(0, [ 260 | { 261 | name: "test-topic", 262 | partitions: [{ index: 0, messageSet: fillMessageSet(10) }], 263 | }, 264 | ]), 265 | makeListOffsetsPair(1, { 266 | replicaId: -1, 267 | topics: [ 268 | { 269 | name: "test-topic", 270 | partitions: [ 271 | { index: 0, timestamp: BigInt(-2), maxNumOffsets: 1 }, 272 | ], 273 | }, 274 | ], 275 | }), 276 | ], 277 | ], 278 | [ 279 | "listOffsets", 280 | "maximum number of offsets is respected", 281 | [ 282 | makeProducePair(0, [ 283 | { 284 | name: "test-topic", 285 | partitions: [{ index: 0, messageSet: fillMessageSet(10) }], 286 | }, 287 | ]), 288 | makeListOffsetsPair(1, { 289 | replicaId: -1, 290 | topics: [ 291 | { 292 | name: "test-topic", 293 | partitions: [ 294 | { index: 0, timestamp: BigInt(0), maxNumOffsets: 4 }, 295 | ], 296 | }, 297 | ], 298 | }), 299 | ], 300 | ], 301 | [ 302 | "fetch", 303 | "fetch on empty partition", 304 | [ 305 | makeFetchPair(0, { 306 | replicaId: -1, 307 | maxWaitMs: 500, 308 | minBytes: 256, 309 | topics: [ 310 | { 311 | name: "test-topic", 312 | partitions: [ 313 | { index: 0, fetchOffset: BigInt(0), maxBytes: 8192 }, 314 | ], 315 | }, 316 | ], 317 | }), 318 | ], 319 | ], 320 | ]; 321 | 322 | test.each(cases)("%s: %s", async (_api, _name, pairs) => { 323 | const gateway = new GatewayConn(); 324 | 325 | for (const [correlationId, request, decodeResponse] of pairs) { 326 | const response = await gateway.request(request); 327 | expect(base64(response)).toMatchSnapshot(); 328 | 329 | const decoder = new KafkaDecoder(response); 330 | expect(decoder.readInt32()).toEqual(correlationId); 331 | 332 | const decoded = decodeResponse(decoder); 333 | expect(decoded).toMatchSnapshot(); 334 | } 335 | 336 | gateway.close(); 337 | }); 338 | 339 | test("produce message set then fetch message set", async () => { 340 | const gateway = new GatewayConn(); 341 | const messageSet = fillMessageSet(5); 342 | 343 | const produceCorrelationId = 0; 344 | const produceEncoder = new KafkaRequestEncoder({ 345 | apiKey: ApiKey.Produce, 346 | apiVersion: 0, 347 | correlationId: produceCorrelationId, 348 | clientId: null, 349 | }); 350 | const produceRequest = encodeKafkaProduceRequest(produceEncoder, { 351 | acks: Acks.Leader, 352 | timeoutMs: 10_000, 353 | topics: [{ name: "test-topic", partitions: [{ index: 0, messageSet }] }], 354 | }); 355 | const produceResponse = await gateway.request(produceRequest); 356 | const produceDecoder = new KafkaDecoder(produceResponse); 357 | expect(produceDecoder.readInt32()).toEqual(produceCorrelationId); 358 | expect(decodeKafkaProduceResponse(produceDecoder)).toMatchInlineSnapshot(` 359 | Object { 360 | "topics": Array [ 361 | Object { 362 | "name": "test-topic", 363 | "partitions": Array [ 364 | Object { 365 | "baseOffset": 0n, 366 | "errorCode": 0, 367 | "index": 0, 368 | }, 369 | ], 370 | }, 371 | ], 372 | } 373 | `); 374 | 375 | const fetchCorrelationId = 1; 376 | const fetchEncoder = new KafkaRequestEncoder({ 377 | apiKey: ApiKey.Fetch, 378 | apiVersion: 0, 379 | correlationId: fetchCorrelationId, 380 | clientId: null, 381 | }); 382 | const fetchRequest = encodeKafkaFetchRequest(fetchEncoder, { 383 | replicaId: -1, 384 | maxWaitMs: 1000, 385 | minBytes: 64, 386 | topics: [ 387 | { 388 | name: "test-topic", 389 | partitions: [{ index: 0, fetchOffset: BigInt(0), maxBytes: 8192 }], 390 | }, 391 | ], 392 | }); 393 | const fetchResponse = await gateway.request(fetchRequest); 394 | const fetchDecoder = new KafkaDecoder(fetchResponse); 395 | expect(fetchDecoder.readInt32()).toEqual(fetchCorrelationId); 396 | const fetchResponseDecoded = decodeKafkaFetchResponse(fetchDecoder); 397 | expect(fetchResponseDecoded.topics[0].partitions[0].messageSet).toEqual( 398 | messageSet 399 | ); 400 | // Exclude buffer data from inline snapshot (because it's huge) 401 | fetchResponseDecoded.topics[0].partitions[0].messageSet = new Uint8Array(); 402 | expect(fetchResponseDecoded).toMatchInlineSnapshot(` 403 | Object { 404 | "topics": Array [ 405 | Object { 406 | "name": "test-topic", 407 | "partitions": Array [ 408 | Object { 409 | "errorCode": 0, 410 | "highWatermark": 5n, 411 | "index": 0, 412 | "messageSet": Uint8Array [], 413 | }, 414 | ], 415 | }, 416 | ], 417 | } 418 | `); 419 | }); 420 | }); 421 | -------------------------------------------------------------------------------- /test/protocol/__snapshots__/header.test.ts.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`RequestHeader baseline header 1`] = `"AAMAAAAAAGQAA21heA=="`; 4 | 5 | exports[`RequestHeader empty client id 1`] = `"AAMAAAAABAAAAA=="`; 6 | 7 | exports[`RequestHeader nonzero api version 1`] = `"AAAABQAAABkAA21heA=="`; 8 | 9 | exports[`RequestHeader null client id 1`] = `"AAMAAAAABAD//w=="`; 10 | -------------------------------------------------------------------------------- /test/protocol/common.ts: -------------------------------------------------------------------------------- 1 | import { Decoder } from "src/protocol/decoder"; 2 | import { Encoder } from "src/protocol/encoder"; 3 | import { base64 } from "test/common"; 4 | 5 | type EncodeFunc = (encoder: Encoder, value: T) => ArrayBuffer; 6 | type DecodeFunc = (decoder: Decoder) => T; 7 | 8 | export const testEncodeDecode = ( 9 | value: T, 10 | encode: EncodeFunc, 11 | decode: DecodeFunc 12 | ) => { 13 | const encoder = new Encoder(); 14 | const buffer = encode(encoder, value); 15 | 16 | const decoder = new Decoder(buffer); 17 | const decoded = decode(decoder); 18 | 19 | expect(decoded).toEqual(value); 20 | }; 21 | 22 | // Snapshot wire format of encoded message 23 | export const testEncodeDecodeSnapshot = ( 24 | value: T, 25 | encode: EncodeFunc, 26 | decode: DecodeFunc 27 | ) => { 28 | const encoder = new Encoder(); 29 | const buffer = encode(encoder, value); 30 | 31 | expect(base64(buffer)).toMatchSnapshot(); 32 | 33 | const decoder = new Decoder(buffer); 34 | const decoded = decode(decoder); 35 | 36 | expect(decoded).toEqual(value); 37 | }; 38 | -------------------------------------------------------------------------------- /test/protocol/header.test.ts: -------------------------------------------------------------------------------- 1 | import { ApiKey, validApiKey } from "src/protocol/common"; 2 | import { Decoder } from "src/protocol/decoder"; 3 | import { Encoder } from "src/protocol/encoder"; 4 | import { 5 | RequestHeader, 6 | decodeRequestHeader, 7 | encodeRequestHeader, 8 | } from "src/protocol/header"; 9 | import { testEncodeDecodeSnapshot } from "test/protocol/common"; 10 | 11 | describe("RequestHeader", () => { 12 | type TestCase = [string, RequestHeader]; 13 | const cases: TestCase[] = [ 14 | [ 15 | "baseline header", 16 | { 17 | apiKey: ApiKey.Metadata, 18 | apiVersion: 0, 19 | correlationId: 100, 20 | clientId: "max", 21 | }, 22 | ], 23 | [ 24 | "nonzero api version", 25 | { 26 | apiKey: ApiKey.Produce, 27 | apiVersion: 5, 28 | correlationId: 25, 29 | clientId: "max", 30 | }, 31 | ], 32 | [ 33 | "empty client id", 34 | { 35 | apiKey: ApiKey.Metadata, 36 | apiVersion: 0, 37 | correlationId: 1024, 38 | clientId: "", 39 | }, 40 | ], 41 | [ 42 | "null client id", 43 | { 44 | apiKey: ApiKey.Metadata, 45 | apiVersion: 0, 46 | correlationId: 1024, 47 | clientId: null, 48 | }, 49 | ], 50 | ]; 51 | 52 | test.each(cases)("%s", (_name, value) => { 53 | testEncodeDecodeSnapshot( 54 | value, 55 | (encoder: Encoder, header: RequestHeader) => 56 | encodeRequestHeader(encoder, header).buffer(), 57 | (decoder: Decoder) => decodeRequestHeader(decoder, validApiKey) 58 | ); 59 | }); 60 | }); 61 | -------------------------------------------------------------------------------- /test/protocol/internal/fetch.test.ts: -------------------------------------------------------------------------------- 1 | import { ErrorCode } from "src/protocol/common"; 2 | import { 3 | InternalFetchRequest, 4 | InternalFetchResponse, 5 | decodeInternalFetchRequest, 6 | decodeInternalFetchResponse, 7 | encodeInternalFetchRequest, 8 | encodeInternalFetchResponse, 9 | } from "src/protocol/internal/fetch"; 10 | import { fillMessageSet } from "test/common"; 11 | import { testEncodeDecode } from "test/protocol/common"; 12 | 13 | describe("InternalFetchRequest", () => { 14 | type TestCase = [string, InternalFetchRequest]; 15 | const cases: TestCase[] = [ 16 | [ 17 | "min bytes set", 18 | { 19 | maxWaitMs: 10_000, 20 | fetchOffset: BigInt(17), 21 | minBytes: 512, 22 | maxBytes: 8192, 23 | }, 24 | ], 25 | [ 26 | "min bytes not set", 27 | { 28 | maxWaitMs: 5000, 29 | fetchOffset: BigInt(438), 30 | minBytes: 0, 31 | maxBytes: 10_000, 32 | }, 33 | ], 34 | ]; 35 | 36 | test.each(cases)("%s", (_name, value) => { 37 | testEncodeDecode( 38 | value, 39 | encodeInternalFetchRequest, 40 | decodeInternalFetchRequest 41 | ); 42 | }); 43 | }); 44 | 45 | describe("InternalFetchResponse", () => { 46 | type TestCase = [string, InternalFetchResponse]; 47 | const cases: TestCase[] = [ 48 | [ 49 | "baseline response", 50 | { 51 | errorCode: ErrorCode.None, 52 | highWatermark: BigInt(8932), 53 | messageSet: fillMessageSet(6), 54 | }, 55 | ], 56 | ]; 57 | 58 | test.each(cases)("%s", (_name, value) => { 59 | testEncodeDecode( 60 | value, 61 | encodeInternalFetchResponse, 62 | decodeInternalFetchResponse 63 | ); 64 | }); 65 | }); 66 | -------------------------------------------------------------------------------- /test/protocol/internal/list-offsets.test.ts: -------------------------------------------------------------------------------- 1 | import { ErrorCode } from "src/protocol/common"; 2 | import { 3 | InternalListOffsetsRequest, 4 | InternalListOffsetsResponse, 5 | decodeInternalListOffsetsRequest, 6 | decodeInternalListOffsetsResponse, 7 | encodeInternalListOffsetsRequest, 8 | encodeInternalListOffsetsResponse, 9 | } from "src/protocol/internal/list-offsets"; 10 | import { testEncodeDecode } from "test/protocol/common"; 11 | 12 | describe("InternalListOffsetsRequest", () => { 13 | type TestCase = [string, InternalListOffsetsRequest]; 14 | const cases: TestCase[] = [ 15 | ["latest offset", { timestamp: BigInt(-1), maxNumOffsets: 1 }], 16 | ["multiple offsets", { timestamp: BigInt(0), maxNumOffsets: 64 }], 17 | ]; 18 | 19 | test.each(cases)("%s", (_name, value) => { 20 | testEncodeDecode( 21 | value, 22 | encodeInternalListOffsetsRequest, 23 | decodeInternalListOffsetsRequest 24 | ); 25 | }); 26 | }); 27 | 28 | describe("InternalListOffsetsResponse", () => { 29 | type TestCase = [string, InternalListOffsetsResponse]; 30 | const cases: TestCase[] = [ 31 | [ 32 | "one offset", 33 | { errorCode: ErrorCode.None, oldStyleOffsets: [BigInt(16)] }, 34 | ], 35 | [ 36 | "multiple offsets", 37 | { 38 | errorCode: ErrorCode.None, 39 | oldStyleOffsets: [BigInt(3), BigInt(2), BigInt(1), BigInt(0)], 40 | }, 41 | ], 42 | ]; 43 | 44 | test.each(cases)("%s", (_name, value) => { 45 | testEncodeDecode( 46 | value, 47 | encodeInternalListOffsetsResponse, 48 | decodeInternalListOffsetsResponse 49 | ); 50 | }); 51 | }); 52 | -------------------------------------------------------------------------------- /test/protocol/internal/produce.test.ts: -------------------------------------------------------------------------------- 1 | import { Acks, ErrorCode } from "src/protocol/common"; 2 | import { 3 | InternalProduceRequest, 4 | InternalProduceResponse, 5 | decodeInternalProduceRequest, 6 | decodeInternalProduceResponse, 7 | encodeInternalProduceRequest, 8 | encodeInternalProduceResponse, 9 | } from "src/protocol/internal/produce"; 10 | import { fillMessageSet } from "test/common"; 11 | import { testEncodeDecode } from "test/protocol/common"; 12 | 13 | describe("InternalProduceRequest", () => { 14 | type TestCase = [string, InternalProduceRequest]; 15 | const cases: TestCase[] = [ 16 | [ 17 | "baseline request", 18 | { 19 | acks: Acks.Leader, 20 | messageSet: fillMessageSet(1), 21 | }, 22 | ], 23 | [ 24 | "no acks", 25 | { 26 | acks: Acks.None, 27 | messageSet: fillMessageSet(2), 28 | }, 29 | ], 30 | [ 31 | "full isr acks", 32 | { 33 | acks: Acks.FullISR, 34 | messageSet: fillMessageSet(5), 35 | }, 36 | ], 37 | [ 38 | "empty message set", 39 | { 40 | acks: Acks.Leader, 41 | messageSet: fillMessageSet(0), 42 | }, 43 | ], 44 | ]; 45 | 46 | test.each(cases)("%s", (_name, value) => { 47 | testEncodeDecode( 48 | value, 49 | encodeInternalProduceRequest, 50 | decodeInternalProduceRequest 51 | ); 52 | }); 53 | }); 54 | 55 | describe("InternalProduceResponse", () => { 56 | type TestCase = [string, InternalProduceResponse]; 57 | const cases: TestCase[] = [ 58 | [ 59 | "baseline response", 60 | { errorCode: ErrorCode.None, baseOffset: BigInt(127) }, 61 | ], 62 | ["offset zero", { errorCode: ErrorCode.None, baseOffset: BigInt(0) }], 63 | ]; 64 | 65 | test.each(cases)("%s", (_name, value) => { 66 | testEncodeDecode( 67 | value, 68 | encodeInternalProduceResponse, 69 | decodeInternalProduceResponse 70 | ); 71 | }); 72 | }); 73 | -------------------------------------------------------------------------------- /test/protocol/kafka/__snapshots__/fetch.test.ts.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`KafkaFetchRequest one topic and partition 1`] = `"/////wAAJxAAAAEAAAAAAQAJdG9waWMtb25lAAAAAQAAAAAAAAAAAAAAEAAAEAA="`; 4 | 5 | exports[`KafkaFetchResponse one topic and partition 1`] = `"AAAAAQAJdG9waWMtb25lAAAAAQAAAAAAAAAAAAAAAAAgAAAB+wAAAAAAAAAAAAAAQ/zFP+wBAAAAAAUAAQIDBAAAADAAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8AAAAAAAAAAQAAAJ9tvRFPAQAAAAARAAECAwQFBgcICQoLDA0ODxAAAACAAAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn8AAAAAAAAAAgAAAPXs8KwsAQAAAABMAAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKSwAAAJsAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2Nzg5Ojs8PT4/QEFCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaW1xdXl9gYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXp7fH1+f4CBgoOEhYaHiImKi4yNjo+QkZKTlJWWl5iZmg=="`; 6 | -------------------------------------------------------------------------------- /test/protocol/kafka/__snapshots__/list-offsets.test.ts.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`KafkaListOffsetsRequest earliest offset 1`] = `"AAABTQAAAAEACXRvcGljLW9uZQAAAAEAAAAA//////////4AAAAB"`; 4 | 5 | exports[`KafkaListOffsetsRequest latest offset 1`] = `"AAABTQAAAAEACXRvcGljLW9uZQAAAAEAAAAA//////////8AAAAB"`; 6 | 7 | exports[`KafkaListOffsetsRequest multiple offsets 1`] = `"AAABTQAAAAEACXRvcGljLW9uZQAAAAEAAAAAAAAAAAAAAAAAAABA"`; 8 | 9 | exports[`KafkaListOffsetsResponse multiple offsets 1`] = `"AAAAAQAJdG9waWMtb25lAAAAAQAAAAAAAAAAAAQAAAAAAAAAAwAAAAAAAAACAAAAAAAAAAEAAAAAAAAAAA=="`; 10 | 11 | exports[`KafkaListOffsetsResponse offset out of range 1`] = `"AAAAAQAJdG9waWMtb25lAAAAAQAAAAAAAQAAAAA="`; 12 | 13 | exports[`KafkaListOffsetsResponse one offset 1`] = `"AAAAAQAJdG9waWMtb25lAAAAAQAAAAAAAAAAAAEAAAAAAAAAEA=="`; 14 | -------------------------------------------------------------------------------- /test/protocol/kafka/__snapshots__/metadata.test.ts.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`KafkaMetadataRequest multiple topics 1`] = `"AAAAAwAJdG9waWMtb25lAAl0b3BpYy10d28AC3RvcGljLXRocmVl"`; 4 | 5 | exports[`KafkaMetadataRequest no topics 1`] = `"AAAAAA=="`; 6 | 7 | exports[`KafkaMetadataRequest one topic 1`] = `"AAAAAQAJdG9waWMtb25l"`; 8 | 9 | exports[`KafkaMetadataResponse multi-partition topic 1`] = `"AAAAAQAAAAEAC2V4YW1wbGUuY29tAAABuwAAAAEAAAAJdG9waWMtb25lAAAAAgAAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAgAAAAEAAAAAAAAAAA=="`; 10 | 11 | exports[`KafkaMetadataResponse multiple topics 1`] = `"AAAAAQAAAAEAC2V4YW1wbGUuY29tAAABuwAAAAIAAAAJdG9waWMtb25lAAAAAQAAAAAAAQAAAAEAAAAAAAAAAAAAAAl0b3BpYy10d28AAAACAAAAAAABAAAAAQAAAAAAAAAAAAAAAAACAAAAAQAAAAAAAAAA"`; 12 | 13 | exports[`KafkaMetadataResponse multiple topics with errors 1`] = `"AAAAAQAAAAEAC2V4YW1wbGUuY29tAAABuwAAAAMAAwAJdG9waWMtb25lAAAAAAADAAl0b3BpYy10d28AAAAAAAAAC3RvcGljLXRocmVlAAAAAgAAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAgAAAAEAAAAAAAAAAA=="`; 14 | 15 | exports[`KafkaMetadataResponse no topics 1`] = `"AAAAAQAAAAEAC2V4YW1wbGUuY29tAAABuwAAAAA="`; 16 | 17 | exports[`KafkaMetadataResponse single-partition topic 1`] = `"AAAAAQAAAAEAC2V4YW1wbGUuY29tAAABuwAAAAEAAAAJdG9waWMtb25lAAAAAQAAAAAAAQAAAAEAAAAAAAAAAA=="`; 18 | -------------------------------------------------------------------------------- /test/protocol/kafka/__snapshots__/produce.test.ts.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`KafkaProduceRequest full isr acks 1`] = `"//8AAAD6AAAAAQAJdG9waWMtdHdvAAAAAQAAAAcAAAD6AAAAAAAAAAAAAABD/MU/7AEAAAAABQABAgMEAAAAMAABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLwAAAAAAAAABAAAAn229EU8BAAAAABEAAQIDBAUGBwgJCgsMDQ4PEAAAAIAAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2Nzg5Ojs8PT4/QEFCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaW1xdXl9gYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXp7fH1+fw=="`; 4 | 5 | exports[`KafkaProduceRequest multiple topics and partitions 1`] = `"AAAAAAAyAAAAAgAJdG9waWMtb25lAAAAAgAAAAIAAAH7AAAAAAAAAAAAAABD/MU/7AEAAAAABQABAgMEAAAAMAABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLwAAAAAAAAABAAAAn229EU8BAAAAABEAAQIDBAUGBwgJCgsMDQ4PEAAAAIAAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2Nzg5Ojs8PT4/QEFCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaW1xdXl9gYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXp7fH1+fwAAAAAAAAACAAAA9ezwrCwBAAAAAEwAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2Nzg5Ojs8PT4/QEFCQ0RFRkdISUpLAAAAmwABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9AQUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVpbXF1eX2BhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ent8fX5/gIGCg4SFhoeIiYqLjI2Oj5CRkpOUlZaXmJmaAAAABAAAAE8AAAAAAAAAAAAAAEP8xT/sAQAAAAAFAAECAwQAAAAwAAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vAAl0b3BpYy10d28AAAABAAAABQAAAPoAAAAAAAAAAAAAAEP8xT/sAQAAAAAFAAECAwQAAAAwAAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vAAAAAAAAAAEAAACfbb0RTwEAAAAAEQABAgMEBQYHCAkKCwwNDg8QAAAAgAABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9AQUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVpbXF1eX2BhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ent8fX5/"`; 6 | 7 | exports[`KafkaProduceRequest one topic and parition 1`] = `"AAEAAABkAAAAAQAJdG9waWMtb25lAAAAAQAAAAEAAAD6AAAAAAAAAAAAAABD/MU/7AEAAAAABQABAgMEAAAAMAABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLwAAAAAAAAABAAAAn229EU8BAAAAABEAAQIDBAUGBwgJCgsMDQ4PEAAAAIAAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2Nzg5Ojs8PT4/QEFCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaW1xdXl9gYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXp7fH1+fw=="`; 8 | 9 | exports[`KafkaProduceResponse multiple topics and partitions 1`] = `"AAAAAgAJdG9waWMtb25lAAAAAgAAAAMAAAAAAAAAAAB/AAAABQAAAAAAAAAAAAIACXRvcGljLXR3bwAAAAEAAAAIAAAAAAAAAAAD8g=="`; 10 | 11 | exports[`KafkaProduceResponse multiple topics and partitions with errors 1`] = `"AAAAAgAJdG9waWMtb25lAAAAAgAAAAMAAAAAAAAAAAB/AAAABwACAAAAAAAAABoACXRvcGljLXR3bwAAAAEAAAAPAAIAAAAAAAADsw=="`; 12 | 13 | exports[`KafkaProduceResponse one topic and partition 1`] = `"AAAAAQAJdG9waWMtb25lAAAAAQAAAAEAAAAAAAAAAAAQ"`; 14 | -------------------------------------------------------------------------------- /test/protocol/kafka/fetch.test.ts: -------------------------------------------------------------------------------- 1 | import { ErrorCode } from "src/protocol/common"; 2 | import { 3 | KafkaFetchRequest, 4 | KafkaFetchResponse, 5 | decodeKafkaFetchRequest, 6 | decodeKafkaFetchResponse, 7 | encodeKafkaFetchRequest, 8 | encodeKafkaFetchResponse, 9 | } from "src/protocol/kafka/fetch"; 10 | import { fillMessageSet } from "test/common"; 11 | import { testEncodeDecodeSnapshot } from "test/protocol/common"; 12 | 13 | describe("KafkaFetchRequest", () => { 14 | type TestCase = [string, KafkaFetchRequest]; 15 | const cases: TestCase[] = [ 16 | [ 17 | "one topic and partition", 18 | { 19 | replicaId: -1, 20 | maxWaitMs: 10_000, 21 | minBytes: 256, 22 | topics: [ 23 | { 24 | name: "topic-one", 25 | partitions: [{ index: 0, fetchOffset: BigInt(16), maxBytes: 4096 }], 26 | }, 27 | ], 28 | }, 29 | ], 30 | ]; 31 | 32 | test.each(cases)("%s", (_name, value) => { 33 | testEncodeDecodeSnapshot( 34 | value, 35 | encodeKafkaFetchRequest, 36 | decodeKafkaFetchRequest 37 | ); 38 | }); 39 | }); 40 | 41 | describe("KafkaFetchResponse", () => { 42 | type TestCase = [string, KafkaFetchResponse]; 43 | const cases: TestCase[] = [ 44 | [ 45 | "one topic and partition", 46 | { 47 | topics: [ 48 | { 49 | name: "topic-one", 50 | partitions: [ 51 | { 52 | index: 0, 53 | errorCode: ErrorCode.None, 54 | highWatermark: BigInt(32), 55 | messageSet: fillMessageSet(3), 56 | }, 57 | ], 58 | }, 59 | ], 60 | }, 61 | ], 62 | ]; 63 | 64 | test.each(cases)("%s", (_name, value) => { 65 | testEncodeDecodeSnapshot( 66 | value, 67 | encodeKafkaFetchResponse, 68 | decodeKafkaFetchResponse 69 | ); 70 | }); 71 | }); 72 | -------------------------------------------------------------------------------- /test/protocol/kafka/list-offsets.test.ts: -------------------------------------------------------------------------------- 1 | import { ErrorCode } from "src/protocol/common"; 2 | import { 3 | KafkaListOffsetsRequest, 4 | KafkaListOffsetsResponse, 5 | decodeKafkaListOffsetsRequest, 6 | decodeKafkaListOffsetsResponse, 7 | encodeKafkaListOffsetsRequest, 8 | encodeKafkaListOffsetsResponse, 9 | } from "src/protocol/kafka/list-offsets"; 10 | import { testEncodeDecodeSnapshot } from "test/protocol/common"; 11 | 12 | describe("KafkaListOffsetsRequest", () => { 13 | type TestCase = [string, KafkaListOffsetsRequest]; 14 | const cases: TestCase[] = [ 15 | [ 16 | "latest offset", 17 | { 18 | replicaId: 333, 19 | topics: [ 20 | { 21 | name: "topic-one", 22 | partitions: [ 23 | { 24 | index: 0, 25 | timestamp: BigInt(-1), 26 | maxNumOffsets: 1, 27 | }, 28 | ], 29 | }, 30 | ], 31 | }, 32 | ], 33 | [ 34 | "earliest offset", 35 | { 36 | replicaId: 333, 37 | topics: [ 38 | { 39 | name: "topic-one", 40 | partitions: [ 41 | { 42 | index: 0, 43 | timestamp: BigInt(-2), 44 | maxNumOffsets: 1, 45 | }, 46 | ], 47 | }, 48 | ], 49 | }, 50 | ], 51 | [ 52 | "multiple offsets", 53 | { 54 | replicaId: 333, 55 | topics: [ 56 | { 57 | name: "topic-one", 58 | partitions: [ 59 | { 60 | index: 0, 61 | timestamp: BigInt(0), 62 | maxNumOffsets: 64, 63 | }, 64 | ], 65 | }, 66 | ], 67 | }, 68 | ], 69 | ]; 70 | 71 | test.each(cases)("%s", (_name, value) => { 72 | testEncodeDecodeSnapshot( 73 | value, 74 | encodeKafkaListOffsetsRequest, 75 | decodeKafkaListOffsetsRequest 76 | ); 77 | }); 78 | }); 79 | 80 | describe("KafkaListOffsetsResponse", () => { 81 | type TestCase = [string, KafkaListOffsetsResponse]; 82 | const cases: TestCase[] = [ 83 | [ 84 | "one offset", 85 | { 86 | topics: [ 87 | { 88 | name: "topic-one", 89 | partitions: [ 90 | { 91 | index: 0, 92 | errorCode: ErrorCode.None, 93 | oldStyleOffsets: [BigInt(16)], 94 | }, 95 | ], 96 | }, 97 | ], 98 | }, 99 | ], 100 | [ 101 | "multiple offsets", 102 | { 103 | topics: [ 104 | { 105 | name: "topic-one", 106 | partitions: [ 107 | { 108 | index: 0, 109 | errorCode: ErrorCode.None, 110 | oldStyleOffsets: [BigInt(3), BigInt(2), BigInt(1), BigInt(0)], 111 | }, 112 | ], 113 | }, 114 | ], 115 | }, 116 | ], 117 | [ 118 | "offset out of range", 119 | { 120 | topics: [ 121 | { 122 | name: "topic-one", 123 | partitions: [ 124 | { 125 | index: 0, 126 | errorCode: ErrorCode.OffsetOutOfRange, 127 | oldStyleOffsets: [], 128 | }, 129 | ], 130 | }, 131 | ], 132 | }, 133 | ], 134 | ]; 135 | 136 | test.each(cases)("%s", (_name, value) => { 137 | testEncodeDecodeSnapshot( 138 | value, 139 | encodeKafkaListOffsetsResponse, 140 | decodeKafkaListOffsetsResponse 141 | ); 142 | }); 143 | }); 144 | -------------------------------------------------------------------------------- /test/protocol/kafka/metadata.test.ts: -------------------------------------------------------------------------------- 1 | import { ErrorCode } from "src/protocol/common"; 2 | import { 3 | KafkaMetadataRequest, 4 | KafkaMetadataResponse, 5 | decodeKafkaMetadataRequest, 6 | decodeKafkaMetadataResponse, 7 | encodeKafkaMetadataRequest, 8 | encodeKafkaMetadataResponse, 9 | } from "src/protocol/kafka/metadata"; 10 | import { testEncodeDecodeSnapshot } from "test/protocol/common"; 11 | 12 | describe("KafkaMetadataRequest", () => { 13 | type TestCase = [string, KafkaMetadataRequest]; 14 | const cases: TestCase[] = [ 15 | ["no topics", { topics: [] }], 16 | ["one topic", { topics: ["topic-one"] }], 17 | ["multiple topics", { topics: ["topic-one", "topic-two", "topic-three"] }], 18 | ]; 19 | 20 | test.each(cases)("%s", (_name, value) => { 21 | testEncodeDecodeSnapshot( 22 | value, 23 | encodeKafkaMetadataRequest, 24 | decodeKafkaMetadataRequest 25 | ); 26 | }); 27 | }); 28 | 29 | describe("KafkaMetadataResponse", () => { 30 | type TestCase = [string, KafkaMetadataResponse]; 31 | const cases: TestCase[] = [ 32 | [ 33 | "no topics", 34 | { 35 | brokers: [{ nodeId: 1, host: "example.com", port: 443 }], 36 | topics: [], 37 | }, 38 | ], 39 | [ 40 | "single-partition topic", 41 | { 42 | brokers: [{ nodeId: 1, host: "example.com", port: 443 }], 43 | topics: [ 44 | { 45 | errorCode: ErrorCode.None, 46 | name: "topic-one", 47 | partitions: [ 48 | { 49 | errorCode: ErrorCode.None, 50 | partitionIndex: 1, 51 | leaderId: 1, 52 | replicaNodes: [], 53 | isrNodes: [], 54 | }, 55 | ], 56 | }, 57 | ], 58 | }, 59 | ], 60 | [ 61 | "multi-partition topic", 62 | { 63 | brokers: [{ nodeId: 1, host: "example.com", port: 443 }], 64 | topics: [ 65 | { 66 | errorCode: ErrorCode.None, 67 | name: "topic-one", 68 | partitions: [ 69 | { 70 | errorCode: ErrorCode.None, 71 | partitionIndex: 1, 72 | leaderId: 1, 73 | replicaNodes: [], 74 | isrNodes: [], 75 | }, 76 | { 77 | errorCode: ErrorCode.None, 78 | partitionIndex: 2, 79 | leaderId: 1, 80 | replicaNodes: [], 81 | isrNodes: [], 82 | }, 83 | ], 84 | }, 85 | ], 86 | }, 87 | ], 88 | [ 89 | "multiple topics", 90 | { 91 | brokers: [{ nodeId: 1, host: "example.com", port: 443 }], 92 | topics: [ 93 | { 94 | errorCode: ErrorCode.None, 95 | name: "topic-one", 96 | partitions: [ 97 | { 98 | errorCode: ErrorCode.None, 99 | partitionIndex: 1, 100 | leaderId: 1, 101 | replicaNodes: [], 102 | isrNodes: [], 103 | }, 104 | ], 105 | }, 106 | { 107 | errorCode: ErrorCode.None, 108 | name: "topic-two", 109 | partitions: [ 110 | { 111 | errorCode: ErrorCode.None, 112 | partitionIndex: 1, 113 | leaderId: 1, 114 | replicaNodes: [], 115 | isrNodes: [], 116 | }, 117 | { 118 | errorCode: ErrorCode.None, 119 | partitionIndex: 2, 120 | leaderId: 1, 121 | replicaNodes: [], 122 | isrNodes: [], 123 | }, 124 | ], 125 | }, 126 | ], 127 | }, 128 | ], 129 | [ 130 | "multiple topics with errors", 131 | { 132 | brokers: [{ nodeId: 1, host: "example.com", port: 443 }], 133 | topics: [ 134 | { 135 | errorCode: ErrorCode.UnknownTopicOrPartition, 136 | name: "topic-one", 137 | partitions: [], 138 | }, 139 | { 140 | errorCode: ErrorCode.UnknownTopicOrPartition, 141 | name: "topic-two", 142 | partitions: [], 143 | }, 144 | { 145 | errorCode: ErrorCode.None, 146 | name: "topic-three", 147 | partitions: [ 148 | { 149 | errorCode: ErrorCode.None, 150 | partitionIndex: 1, 151 | leaderId: 1, 152 | replicaNodes: [], 153 | isrNodes: [], 154 | }, 155 | { 156 | errorCode: ErrorCode.None, 157 | partitionIndex: 2, 158 | leaderId: 1, 159 | replicaNodes: [], 160 | isrNodes: [], 161 | }, 162 | ], 163 | }, 164 | ], 165 | }, 166 | ], 167 | ]; 168 | 169 | test.each(cases)("%s", (_name, value) => { 170 | testEncodeDecodeSnapshot( 171 | value, 172 | encodeKafkaMetadataResponse, 173 | decodeKafkaMetadataResponse 174 | ); 175 | }); 176 | }); 177 | -------------------------------------------------------------------------------- /test/protocol/kafka/produce.test.ts: -------------------------------------------------------------------------------- 1 | import { Acks, ErrorCode } from "src/protocol/common"; 2 | import { 3 | KafkaProduceRequest, 4 | KafkaProduceResponse, 5 | decodeKafkaProduceRequest, 6 | decodeKafkaProduceResponse, 7 | encodeKafkaProduceRequest, 8 | encodeKafkaProduceResponse, 9 | } from "src/protocol/kafka/produce"; 10 | import { fillMessageSet } from "test/common"; 11 | import { testEncodeDecodeSnapshot } from "test/protocol/common"; 12 | 13 | describe("KafkaProduceRequest", () => { 14 | type TestCase = [string, KafkaProduceRequest]; 15 | const cases: TestCase[] = [ 16 | [ 17 | "one topic and parition", 18 | { 19 | acks: Acks.Leader, 20 | timeoutMs: 100, 21 | topics: [ 22 | { 23 | name: "topic-one", 24 | partitions: [ 25 | { 26 | index: 1, 27 | messageSet: fillMessageSet(2), 28 | }, 29 | ], 30 | }, 31 | ], 32 | }, 33 | ], 34 | [ 35 | "multiple topics and partitions", 36 | { 37 | acks: Acks.None, 38 | timeoutMs: 50, 39 | topics: [ 40 | { 41 | name: "topic-one", 42 | partitions: [ 43 | { 44 | index: 2, 45 | messageSet: fillMessageSet(3), 46 | }, 47 | { 48 | index: 4, 49 | messageSet: fillMessageSet(1), 50 | }, 51 | ], 52 | }, 53 | { 54 | name: "topic-two", 55 | partitions: [ 56 | { 57 | index: 5, 58 | messageSet: fillMessageSet(2), 59 | }, 60 | ], 61 | }, 62 | ], 63 | }, 64 | ], 65 | [ 66 | "full isr acks", 67 | { 68 | acks: Acks.FullISR, 69 | timeoutMs: 250, 70 | topics: [ 71 | { 72 | name: "topic-two", 73 | partitions: [ 74 | { 75 | index: 7, 76 | messageSet: fillMessageSet(2), 77 | }, 78 | ], 79 | }, 80 | ], 81 | }, 82 | ], 83 | ]; 84 | 85 | test.each(cases)("%s", (_name, value) => { 86 | testEncodeDecodeSnapshot( 87 | value, 88 | encodeKafkaProduceRequest, 89 | decodeKafkaProduceRequest 90 | ); 91 | }); 92 | }); 93 | 94 | describe("KafkaProduceResponse", () => { 95 | type TestCase = [string, KafkaProduceResponse]; 96 | const cases: TestCase[] = [ 97 | [ 98 | "one topic and partition", 99 | { 100 | topics: [ 101 | { 102 | name: "topic-one", 103 | partitions: [ 104 | { 105 | index: 1, 106 | errorCode: ErrorCode.None, 107 | baseOffset: BigInt(16), 108 | }, 109 | ], 110 | }, 111 | ], 112 | }, 113 | ], 114 | [ 115 | "multiple topics and partitions", 116 | { 117 | topics: [ 118 | { 119 | name: "topic-one", 120 | partitions: [ 121 | { 122 | index: 3, 123 | errorCode: ErrorCode.None, 124 | baseOffset: BigInt(127), 125 | }, 126 | { 127 | index: 5, 128 | errorCode: ErrorCode.None, 129 | baseOffset: BigInt(2), 130 | }, 131 | ], 132 | }, 133 | { 134 | name: "topic-two", 135 | partitions: [ 136 | { 137 | index: 8, 138 | errorCode: ErrorCode.None, 139 | baseOffset: BigInt(1010), 140 | }, 141 | ], 142 | }, 143 | ], 144 | }, 145 | ], 146 | [ 147 | "multiple topics and partitions with errors", 148 | { 149 | topics: [ 150 | { 151 | name: "topic-one", 152 | partitions: [ 153 | { 154 | index: 3, 155 | errorCode: ErrorCode.None, 156 | baseOffset: BigInt(127), 157 | }, 158 | { 159 | index: 7, 160 | errorCode: ErrorCode.CorruptMessage, 161 | baseOffset: BigInt(26), 162 | }, 163 | ], 164 | }, 165 | { 166 | name: "topic-two", 167 | partitions: [ 168 | { 169 | index: 15, 170 | errorCode: ErrorCode.CorruptMessage, 171 | baseOffset: BigInt(947), 172 | }, 173 | ], 174 | }, 175 | ], 176 | }, 177 | ], 178 | ]; 179 | 180 | test.each(cases)("%s", (_name, value) => { 181 | testEncodeDecodeSnapshot( 182 | value, 183 | encodeKafkaProduceResponse, 184 | decodeKafkaProduceResponse 185 | ); 186 | }); 187 | }); 188 | -------------------------------------------------------------------------------- /test/state/__snapshots__/chunk.test.ts.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`prepareMessageSet one chunk: muliple messages 1`] = `"AAAAAAAAAAAAAABD/MU/7AEAAAAABQABAgMEAAAAMAABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLwAAAAAAAAABAAAAn229EU8BAAAAABEAAQIDBAUGBwgJCgsMDQ4PEAAAAIAAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2Nzg5Ojs8PT4/QEFCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaW1xdXl9gYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXp7fH1+fwAAAAAAAAACAAAA9ezwrCwBAAAAAEwAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2Nzg5Ojs8PT4/QEFCQ0RFRkdISUpLAAAAmwABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9AQUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVpbXF1eX2BhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ent8fX5/gIGCg4SFhoeIiYqLjI2Oj5CRkpOUlZaXmJmaAAAAAAAAAAMAAACOwJ6pzAEAAAAAIAABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fAAAAYAABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9AQUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVpbXF1eXwAAAAAAAAAEAAAAih0lQM8BAAAAADkAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2NzgAAABDAAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=="`; 4 | 5 | exports[`prepareMessageSet one chunk: muliple messages 2`] = ` 6 | Object { 7 | "buffer": ArrayBuffer [], 8 | "frames": Array [ 9 | Array [ 10 | 0, 11 | 79, 12 | ], 13 | Array [ 14 | 79, 15 | 171, 16 | ], 17 | Array [ 18 | 250, 19 | 257, 20 | ], 21 | Array [ 22 | 507, 23 | 154, 24 | ], 25 | Array [ 26 | 661, 27 | 150, 28 | ], 29 | ], 30 | "nextIndex": 811, 31 | "offsetStart": 0n, 32 | } 33 | `; 34 | 35 | exports[`prepareMessageSet one chunk: muliple messages with existing messages 1`] = `"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQ/zFP+wBAAAAAAUAAQIDBAAAADAAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8AAAAAAAAAAQAAAJ9tvRFPAQAAAAARAAECAwQFBgcICQoLDA0ODxAAAACAAAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn8AAAAAAAAAAgAAAPXs8KwsAQAAAABMAAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKSwAAAJsAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2Nzg5Ojs8PT4/QEFCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaW1xdXl9gYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXp7fH1+f4CBgoOEhYaHiImKi4yNjo+QkZKTlJWWl5iZmgAAAAAAAAADAAAAjsCeqcwBAAAAACAAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHwAAAGAAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2Nzg5Ojs8PT4/QEFCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaW1xdXl8AAAAAAAAABAAAAIodJUDPAQAAAAA5AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4AAAAQwABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9AQUIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=="`; 36 | 37 | exports[`prepareMessageSet one chunk: muliple messages with existing messages 2`] = ` 38 | Object { 39 | "buffer": ArrayBuffer [], 40 | "frames": Array [ 41 | Array [ 42 | 0, 43 | 96, 44 | ], 45 | Array [ 46 | 96, 47 | 64, 48 | ], 49 | Array [ 50 | 160, 51 | 79, 52 | ], 53 | Array [ 54 | 239, 55 | 171, 56 | ], 57 | Array [ 58 | 410, 59 | 257, 60 | ], 61 | Array [ 62 | 667, 63 | 154, 64 | ], 65 | Array [ 66 | 821, 67 | 150, 68 | ], 69 | ], 70 | "nextIndex": 971, 71 | "offsetStart": 0n, 72 | } 73 | `; 74 | 75 | exports[`prepareMessageSet one chunk: one message 1`] = `"AAAAAAAAAAAAAABD/MU/7AEAAAAABQABAgMEAAAAMAABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=="`; 76 | 77 | exports[`prepareMessageSet one chunk: one message 2`] = ` 78 | Object { 79 | "buffer": ArrayBuffer [], 80 | "frames": Array [ 81 | Array [ 82 | 0, 83 | 79, 84 | ], 85 | ], 86 | "nextIndex": 79, 87 | "offsetStart": 0n, 88 | } 89 | `; 90 | 91 | exports[`prepareMessageSet one chunk: one message with existing message 1`] = `"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEP8xT/sAQAAAAAFAAECAwQAAAAwAAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=="`; 92 | 93 | exports[`prepareMessageSet one chunk: one message with existing message 2`] = ` 94 | Object { 95 | "buffer": ArrayBuffer [], 96 | "frames": Array [ 97 | Array [ 98 | 0, 99 | 56, 100 | ], 101 | Array [ 102 | 56, 103 | 79, 104 | ], 105 | ], 106 | "nextIndex": 135, 107 | "offsetStart": 0n, 108 | } 109 | `; 110 | -------------------------------------------------------------------------------- /test/state/chunk.test.ts: -------------------------------------------------------------------------------- 1 | import { Chunk, ChunkFiller, prepareMessageSet } from "src/state/chunk"; 2 | import { base64, fillMessageSet } from "test/common"; 3 | 4 | describe("prepareMessageSet", () => { 5 | type TestCase = [string, number, Chunk]; 6 | const cases: TestCase[] = [ 7 | [ 8 | "one message", 9 | 1, 10 | { 11 | offsetStart: BigInt(0), 12 | buffer: new ArrayBuffer(256), 13 | frames: [], 14 | nextIndex: 0, 15 | }, 16 | ], 17 | [ 18 | "one message with existing message", 19 | 1, 20 | { 21 | offsetStart: BigInt(0), 22 | buffer: new ArrayBuffer(256), 23 | frames: [[0, 56]], 24 | nextIndex: 56, 25 | }, 26 | ], 27 | [ 28 | "muliple messages", 29 | 5, 30 | { 31 | offsetStart: BigInt(0), 32 | buffer: new ArrayBuffer(1024), 33 | frames: [], 34 | nextIndex: 0, 35 | }, 36 | ], 37 | [ 38 | "muliple messages with existing messages", 39 | 5, 40 | { 41 | offsetStart: BigInt(0), 42 | buffer: new ArrayBuffer(1024), 43 | frames: [ 44 | [0, 96], 45 | [96, 64], 46 | ], 47 | nextIndex: 160, 48 | }, 49 | ], 50 | ]; 51 | 52 | test.each(cases)("one chunk: %s", (_name, numMessages, chunk) => { 53 | const initialChunk = structuredClone(chunk) as Chunk; 54 | const result = prepareMessageSet( 55 | fillMessageSet(numMessages), 56 | initialChunk.offsetStart 57 | ); 58 | expect("error" in result).toEqual(false); 59 | const filler = (result as { filler: ChunkFiller }).filler; 60 | 61 | const messagesFilled = filler.fillChunk(chunk); 62 | expect(messagesFilled).toEqual(numMessages); 63 | 64 | // Check that existing chunk contents are not modified 65 | expect(chunk.offsetStart).toEqual(initialChunk.offsetStart); 66 | expect(chunk.buffer.slice(0, initialChunk.nextIndex)).toEqual( 67 | initialChunk.buffer.slice(0, initialChunk.nextIndex) 68 | ); 69 | expect(chunk.frames.slice(0, initialChunk.frames.length)).toEqual( 70 | initialChunk.frames.slice(0, initialChunk.frames.length) 71 | ); 72 | 73 | // Check invariant that chunk.nextIndex equals end of chunk contents 74 | const finalFrame = chunk.frames.at(-1); 75 | if (finalFrame) { 76 | const [start, size] = finalFrame; 77 | expect(chunk.nextIndex).toEqual(start + size); 78 | } else { 79 | // Chunk has no contents 80 | expect(chunk.nextIndex).toEqual(0); 81 | } 82 | 83 | expect(base64(chunk.buffer)).toMatchSnapshot(); 84 | expect(chunk).toMatchSnapshot(); 85 | }); 86 | 87 | // test("one message", () => { 88 | // const filler = prepareMessageSet(fillMessageSet(1), BigInt(0)); 89 | // const chunk: Chunk = { 90 | // offsetStart: BigInt(0), 91 | // buffer: new ArrayBuffer(256), 92 | // frames: [], 93 | // nextIndex: 0, 94 | // }; 95 | 96 | // const messageCount = filler.fillChunk(chunk); 97 | // expect(messageCount).toEqual(1); 98 | 99 | // expect(base64(chunk.buffer)).toMatchSnapshot(); 100 | // expect(chunk).toMatchInlineSnapshot(` 101 | // Object { 102 | // "buffer": ArrayBuffer [], 103 | // "frames": Array [ 104 | // Array [ 105 | // 0, 106 | // 79, 107 | // ], 108 | // ], 109 | // "nextIndex": 79, 110 | // "offsetStart": 0n, 111 | // } 112 | // `); 113 | // }); 114 | 115 | // test("multiple messages", () => { 116 | // const filler = prepareMessageSet(fillMessageSet(5), BigInt(0)); 117 | // const chunk: Chunk = { 118 | // offsetStart: BigInt(0), 119 | // buffer: new ArrayBuffer(1024), 120 | // frames: [], 121 | // nextIndex: 0, 122 | // }; 123 | 124 | // const messageCount = filler.fillChunk(chunk); 125 | // expect(messageCount).toEqual(5); 126 | 127 | // expect(base64(chunk.buffer)).toMatchSnapshot(); 128 | // expect(chunk).toMatchInlineSnapshot(` 129 | // Object { 130 | // "buffer": ArrayBuffer [], 131 | // "frames": Array [ 132 | // Array [ 133 | // 0, 134 | // 79, 135 | // ], 136 | // Array [ 137 | // 79, 138 | // 171, 139 | // ], 140 | // Array [ 141 | // 250, 142 | // 257, 143 | // ], 144 | // Array [ 145 | // 507, 146 | // 154, 147 | // ], 148 | // Array [ 149 | // 661, 150 | // 150, 151 | // ], 152 | // ], 153 | // "nextIndex": 811, 154 | // "offsetStart": 0n, 155 | // } 156 | // `); 157 | // }); 158 | }); 159 | -------------------------------------------------------------------------------- /test/state/cluster.test.ts: -------------------------------------------------------------------------------- 1 | import { fetchClusterMetadata } from "src/state/cluster"; 2 | 3 | describe("fetchClusterMetadata", () => { 4 | const env = getMiniflareBindings(); 5 | 6 | test("returns static metadata when fetching all topics", async () => { 7 | const response = await fetchClusterMetadata(env, []); 8 | expect(response).toMatchInlineSnapshot(` 9 | Object { 10 | "brokers": Array [ 11 | Object { 12 | "host": "localhost", 13 | "nodeId": 333, 14 | "port": 8787, 15 | }, 16 | ], 17 | "topics": Array [ 18 | Object { 19 | "errorCode": 0, 20 | "name": "test-topic", 21 | "partitions": Array [ 22 | Object { 23 | "errorCode": 0, 24 | "isrNodes": Array [], 25 | "leaderId": 333, 26 | "partitionIndex": 0, 27 | "replicaNodes": Array [], 28 | }, 29 | ], 30 | }, 31 | ], 32 | } 33 | `); 34 | }); 35 | }); 36 | -------------------------------------------------------------------------------- /test/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig.json", 3 | "compilerOptions": { 4 | "types": [ 5 | "@cloudflare/workers-types", 6 | "@types/jest" 7 | ] 8 | }, 9 | "include": [ 10 | "../src/**/*", 11 | "./**/*" 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | /* Visit https://aka.ms/tsconfig.json to read more about this file */ 4 | 5 | /* Projects */ 6 | // "incremental": true, /* Enable incremental compilation */ 7 | // "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */ 8 | // "tsBuildInfoFile": "./", /* Specify the folder for .tsbuildinfo incremental compilation files. */ 9 | // "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects */ 10 | // "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */ 11 | // "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */ 12 | 13 | /* Language and Environment */ 14 | "target": "es2022" /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */, 15 | "lib": [ 16 | "es2022" 17 | ] /* Specify a set of bundled library declaration files that describe the target runtime environment. */, 18 | "jsx": "react" /* Specify what JSX code is generated. */, 19 | // "experimentalDecorators": true, /* Enable experimental support for TC39 stage 2 draft decorators. */ 20 | // "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */ 21 | // "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h' */ 22 | // "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */ 23 | // "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using `jsx: react-jsx*`.` */ 24 | // "reactNamespace": "", /* Specify the object invoked for `createElement`. This only applies when targeting `react` JSX emit. */ 25 | // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */ 26 | // "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */ 27 | 28 | /* Modules */ 29 | "module": "es2022" /* Specify what module code is generated. */, 30 | // "rootDir": "./", /* Specify the root folder within your source files. */ 31 | "moduleResolution": "node" /* Specify how TypeScript looks up a file from a given module specifier. */, 32 | // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */ 33 | "paths": { "src/*": ["./src/*"], "test/*": ["./test/*"] }, /* Specify a set of entries that re-map imports to additional lookup locations. */ 34 | // "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */ 35 | // "typeRoots": [], /* Specify multiple folders that act like `./node_modules/@types`. */ 36 | "types": [ 37 | "@cloudflare/workers-types" 38 | ] /* Specify type package names to be included without being referenced in a source file. */, 39 | // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ 40 | "resolveJsonModule": true /* Enable importing .json files */, 41 | // "noResolve": true, /* Disallow `import`s, `require`s or ``s from expanding the number of files TypeScript should add to a project. */ 42 | 43 | /* JavaScript Support */ 44 | "allowJs": true /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */, 45 | "checkJs": false /* Enable error reporting in type-checked JavaScript files. */, 46 | // "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from `node_modules`. Only applicable with `allowJs`. */ 47 | 48 | /* Emit */ 49 | // "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */ 50 | // "declarationMap": true, /* Create sourcemaps for d.ts files. */ 51 | // "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */ 52 | // "sourceMap": true, /* Create source map files for emitted JavaScript files. */ 53 | // "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If `declaration` is true, also designates a file that bundles all .d.ts output. */ 54 | // "outDir": "./", /* Specify an output folder for all emitted files. */ 55 | // "removeComments": true, /* Disable emitting comments. */ 56 | "noEmit": true /* Disable emitting files from a compilation. */, 57 | // "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */ 58 | // "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types */ 59 | // "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */ 60 | // "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */ 61 | // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ 62 | // "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */ 63 | // "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */ 64 | // "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */ 65 | // "newLine": "crlf", /* Set the newline character for emitting files. */ 66 | // "stripInternal": true, /* Disable emitting declarations that have `@internal` in their JSDoc comments. */ 67 | // "noEmitHelpers": true, /* Disable generating custom helper functions like `__extends` in compiled output. */ 68 | // "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */ 69 | // "preserveConstEnums": true, /* Disable erasing `const enum` declarations in generated code. */ 70 | // "declarationDir": "./", /* Specify the output directory for generated declaration files. */ 71 | // "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */ 72 | 73 | /* Interop Constraints */ 74 | "isolatedModules": true /* Ensure that each file can be safely transpiled without relying on other imports. */, 75 | "allowSyntheticDefaultImports": true /* Allow 'import x from y' when a module doesn't have a default export. */, 76 | // "esModuleInterop": true /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables `allowSyntheticDefaultImports` for type compatibility. */, 77 | // "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */ 78 | "forceConsistentCasingInFileNames": true /* Ensure that casing is correct in imports. */, 79 | 80 | /* Type Checking */ 81 | "strict": true /* Enable all strict type-checking options. */, 82 | // "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied `any` type.. */ 83 | // "strictNullChecks": true, /* When type checking, take into account `null` and `undefined`. */ 84 | // "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */ 85 | // "strictBindCallApply": true, /* Check that the arguments for `bind`, `call`, and `apply` methods match the original function. */ 86 | // "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */ 87 | // "noImplicitThis": true, /* Enable error reporting when `this` is given the type `any`. */ 88 | // "useUnknownInCatchVariables": true, /* Type catch clause variables as 'unknown' instead of 'any'. */ 89 | // "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */ 90 | // "noUnusedLocals": true, /* Enable error reporting when a local variables aren't read. */ 91 | // "noUnusedParameters": true, /* Raise an error when a function parameter isn't read */ 92 | // "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */ 93 | // "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */ 94 | // "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */ 95 | // "noUncheckedIndexedAccess": true, /* Include 'undefined' in index signature results */ 96 | // "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */ 97 | // "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type */ 98 | // "allowUnusedLabels": true, /* Disable error reporting for unused labels. */ 99 | // "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */ 100 | 101 | /* Completeness */ 102 | // "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */ 103 | "skipLibCheck": true /* Skip type checking all .d.ts files. */ 104 | }, 105 | "include": [ 106 | "src/**/*" 107 | ] 108 | } 109 | -------------------------------------------------------------------------------- /wrangler.toml: -------------------------------------------------------------------------------- 1 | name = "kafka-worker" 2 | main = "dist/index.mjs" 3 | compatibility_date = "2022-08-21" 4 | 5 | [build] 6 | command = "node build.js" 7 | 8 | [vars] 9 | HOSTNAME="localhost" 10 | PORT="8787" 11 | PARTITION_CHUNK_SIZE="4096" 12 | INITIAL_CLUSTER_METADATA="{\"topics\":[{\"name\":\"test-topic\",\"partitions\":[{\"index\":0}]}]}" 13 | 14 | [durable_objects] 15 | bindings = [ 16 | {name="CLUSTER", class_name="Cluster"}, 17 | {name="PARTITION", class_name="Partition"}, 18 | ] 19 | 20 | [[migrations]] 21 | tag = "v1" 22 | new_classes = ["Cluster"] 23 | 24 | [[migrations]] 25 | tag = "v2" 26 | new_classes = ["Session"] 27 | 28 | [[migrations]] 29 | tag = "v3" 30 | new_classes = ["Partition"] 31 | 32 | [[migrations]] 33 | tag = "v4" 34 | deleted_classes = ["Session"] 35 | --------------------------------------------------------------------------------