├── bun.lockb ├── examples └── cloudflare-worker │ ├── .gitignore │ ├── bindings.d.ts │ ├── tsconfig.json │ ├── wrangler.toml │ ├── build.js │ ├── package.json │ └── src │ └── index.ts ├── .env.example ├── pkg ├── index.ts ├── http.test.ts ├── types.ts ├── test_teardown.ts ├── error.ts ├── test_setup.ts ├── base64.ts ├── kafka.ts ├── http.ts ├── produce.test.ts ├── producer.ts ├── admin.ts ├── consumer.test.ts ├── admin.test.ts └── consumer.ts ├── .gitignore ├── tsup.config.js ├── tsconfig.json ├── .github └── workflows │ ├── ci.yml │ └── release.yml ├── biome.json ├── LICENSE ├── package.json └── README.md /bun.lockb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/upstash/kafka-js/HEAD/bun.lockb -------------------------------------------------------------------------------- /examples/cloudflare-worker/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | dist 3 | .mf 4 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | UPSTASH_KAFKA_REST_URL= 2 | UPSTASH_KAFKA_REST_USERNAME= 3 | UPSTASH_KAFKA_REST_PASSWORD= -------------------------------------------------------------------------------- /pkg/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./kafka"; 2 | export * from "./error"; 3 | export * from "./types"; 4 | -------------------------------------------------------------------------------- /examples/cloudflare-worker/bindings.d.ts: -------------------------------------------------------------------------------- 1 | export interface Bindings { 2 | UPSTASH_KAFKA_REST_URL: string 3 | UPSTASH_KAFKA_REST_USERNAME: string 4 | UPSTASH_KAFKA_REST_PASSWORD: string 5 | } 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | npm/ 3 | node_modules/ 4 | coverage 5 | .env* 6 | !.env.example 7 | .pnpm-debug.log 8 | dist/ 9 | .idea/ 10 | 11 | examples/**/pnpm-lock.yaml 12 | examples/**/yarn.lock 13 | examples/**/package-lock.json -------------------------------------------------------------------------------- /pkg/http.test.ts: -------------------------------------------------------------------------------- 1 | import { HttpClient } from "./http"; 2 | 3 | import { expect, test } from "bun:test"; 4 | test("remove trailing slash from urls", () => { 5 | const client = new HttpClient({ baseUrl: "https://example.com/" }); 6 | 7 | expect(client.baseUrl).toBe("https://example.com"); 8 | }); 9 | -------------------------------------------------------------------------------- /tsup.config.js: -------------------------------------------------------------------------------- 1 | import { defineConfig } from "tsup"; 2 | 3 | export default defineConfig({ 4 | entry: ["./pkg/index.ts"], 5 | format: ["cjs", "esm"], 6 | splitting: false, 7 | sourcemap: false, 8 | clean: true, 9 | bundle: true, 10 | dts: true, 11 | minify: true, 12 | minifyWhitespace: true, 13 | }); 14 | -------------------------------------------------------------------------------- /pkg/types.ts: -------------------------------------------------------------------------------- 1 | export type { Producer } from "./producer"; 2 | export type { Consumer } from "./consumer"; 3 | 4 | export type Header = { key: string; value: string }; 5 | 6 | export type Message = { 7 | topic: string; 8 | partition: number; 9 | offset: number; 10 | timestamp: number; 11 | key: string; 12 | value: string; 13 | headers: Header[]; 14 | }; 15 | -------------------------------------------------------------------------------- /pkg/test_teardown.ts: -------------------------------------------------------------------------------- 1 | import { kafka } from "./test_setup"; 2 | export default async function teardown(): Promise { 3 | const a = kafka.admin(); 4 | const existingConsumers = await a.consumers(); 5 | await Promise.all( 6 | existingConsumers.flatMap((group) => 7 | group.instances.map((instance) => a.removeConsumerInstance(group.name, instance.name)), 8 | ), 9 | ); 10 | } 11 | -------------------------------------------------------------------------------- /examples/cloudflare-worker/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "esnext", 4 | "module": "esnext", 5 | "lib": ["esnext"], 6 | "types": ["@cloudflare/workers-types"], 7 | "moduleResolution": "node", 8 | "strict": true, 9 | "noEmit": true, 10 | "esModuleInterop": true, 11 | "baseUrl": "./", 12 | "paths": { 13 | "@/*": ["src/*"] 14 | } 15 | }, 16 | "include": ["src/**/*", "bindings.d.ts"] 17 | } 18 | -------------------------------------------------------------------------------- /pkg/error.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Result of a bad request to upstash 3 | */ 4 | export class UpstashError extends Error { 5 | public readonly result: string; 6 | public readonly error: string; 7 | public readonly status: number; 8 | constructor(res: { result: string; error: string; status: number }) { 9 | super(res.error); 10 | this.name = "UpstashError"; 11 | this.result = res.result; 12 | this.error = res.error; 13 | this.status = res.status; 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /examples/cloudflare-worker/wrangler.toml: -------------------------------------------------------------------------------- 1 | name = "upstash-kafka" 2 | type = "javascript" 3 | 4 | workers_dev = true 5 | # route = "" 6 | # zone_id = "" 7 | 8 | compatibility_date = "2022-03-22" 9 | compatibility_flags = [] 10 | 11 | 12 | [build] 13 | command = "pnpm build" 14 | 15 | [build.upload] 16 | format = "modules" 17 | dir = "dist" 18 | main = "./index.mjs" 19 | 20 | 21 | # Set variables here or on cloudflare 22 | # [vars] 23 | # UPSTASH_KAFKA_REST_URL=" 24 | # UPSTASH_KAFKA_REST_USERNAME=" 25 | # UPSTASH_KAFKA_REST_PASSWORD=" 26 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "lib": ["ESNext"], 4 | "module": "esnext", 5 | "target": "esnext", 6 | "moduleResolution": "bundler", 7 | "moduleDetection": "force", 8 | "allowImportingTsExtensions": true, 9 | "noEmit": true, 10 | "strict": true, 11 | "downlevelIteration": true, 12 | "skipLibCheck": true, 13 | "allowSyntheticDefaultImports": true, 14 | "forceConsistentCasingInFileNames": true, 15 | "allowJs": true, 16 | "types": [ 17 | "bun-types" 18 | ] 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /examples/cloudflare-worker/build.js: -------------------------------------------------------------------------------- 1 | import path from "path" 2 | import { fileURLToPath } from "url" 3 | import { build } from "esbuild" 4 | 5 | const __filename = fileURLToPath(import.meta.url) 6 | const __dirname = path.dirname(__filename) 7 | 8 | try { 9 | await build({ 10 | bundle: true, 11 | sourcemap: true, 12 | format: "esm", 13 | target: "esnext", 14 | entryPoints: [path.join(__dirname, "src", "index.ts")], 15 | outdir: path.join(__dirname, "dist"), 16 | outExtension: { ".js": ".mjs" }, 17 | }) 18 | } catch (err) { 19 | console.error(err) 20 | process.exitCode = 1 21 | } 22 | -------------------------------------------------------------------------------- /pkg/test_setup.ts: -------------------------------------------------------------------------------- 1 | import { Kafka } from "./kafka"; 2 | const url = process.env.UPSTASH_KAFKA_REST_URL; 3 | if (!url) { 4 | throw new Error("UPSTASH_KAFKA_REST_URL env missing"); 5 | } 6 | 7 | const username = process.env.UPSTASH_KAFKA_REST_USERNAME; 8 | if (!username) { 9 | throw new Error("UPSTASH_KAFKA_REST_USERNAME env missing"); 10 | } 11 | 12 | const password = process.env.UPSTASH_KAFKA_REST_PASSWORD; 13 | if (!password) { 14 | throw new Error("UPSTASH_KAFKA_REST_PASSWORD env missing"); 15 | } 16 | 17 | export const kafka = new Kafka({ url, username, password }); 18 | 19 | /* eslint-disable no-unused-vars */ 20 | export enum Topic { 21 | GREEN = "green", 22 | BLUE = "blue", 23 | RED = "red", 24 | } 25 | /* eslint-enable no-unused-vars */ 26 | -------------------------------------------------------------------------------- /examples/cloudflare-worker/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "miniflare-typescript-esbuild-jest", 3 | "version": "1.0.0", 4 | "description": "Example project using Miniflare, TypeScript, esbuild and Jest", 5 | "type": "module", 6 | "module": "./dist/index.mjs", 7 | "scripts": { 8 | "build": "node build.js", 9 | "dev": "miniflare --live-reload --debug", 10 | "types:check": "tsc && tsc -p test/tsconfig.json" 11 | }, 12 | "keywords": [], 13 | "author": "", 14 | "license": "MIT", 15 | "devDependencies": { 16 | "@cloudflare/workers-types": "^3.4.0", 17 | "esbuild": "^0.13.15", 18 | "esbuild-darwin-arm64": "^0.14.34", 19 | "miniflare": "^2.4.0", 20 | "prettier": "^2.6.2", 21 | "typescript": "^4.6.3" 22 | }, 23 | "dependencies": { 24 | "@upstash/kafka": "../.." 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /examples/cloudflare-worker/src/index.ts: -------------------------------------------------------------------------------- 1 | import { Kafka } from "@upstash/kafka" 2 | import type { Bindings } from "bindings" 3 | 4 | export default { 5 | async fetch(_request: Request, env: Bindings) { 6 | const kafka = new Kafka({ 7 | url: env.UPSTASH_KAFKA_REST_URL, 8 | username: env.UPSTASH_KAFKA_REST_USERNAME, 9 | password: env.UPSTASH_KAFKA_REST_PASSWORD, 10 | }) 11 | 12 | const p = kafka.producer() 13 | const c = kafka.consumer() 14 | const topicA = "a" 15 | 16 | await p.produce(topicA, "Hello World") 17 | 18 | const messages = await c.consume({ 19 | consumerGroupId: "group_1", 20 | instanceId: "instance_1", 21 | topics: [topicA], 22 | autoOffsetReset: "earliest", 23 | }) 24 | 25 | return new Response(JSON.stringify(messages), { headers: { "content-type": "text/plain" } }) 26 | }, 27 | } 28 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: 3 | pull_request: 4 | 5 | jobs: 6 | test: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: Setup repo 10 | uses: actions/checkout@v2 11 | 12 | - name: Setup Bun 13 | uses: oven-sh/setup-bun@v1 14 | with: 15 | bun-version: latest 16 | 17 | - name: Install dependencies 18 | run: bun install 19 | 20 | - name: Verify formatting 21 | run: bunx @biomejs/biome check . 22 | 23 | - name: Run tests 24 | run: bun run test 25 | env: 26 | UPSTASH_KAFKA_REST_URL: ${{ secrets.UPSTASH_KAFKA_REST_URL }} 27 | UPSTASH_KAFKA_REST_USERNAME: ${{ secrets.UPSTASH_KAFKA_REST_USERNAME }} 28 | UPSTASH_KAFKA_REST_PASSWORD: ${{ secrets.UPSTASH_KAFKA_REST_PASSWORD }} 29 | 30 | - name: Build 31 | run: bun run build 32 | -------------------------------------------------------------------------------- /pkg/base64.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Encode a string as base64 3 | * 4 | * Credit to https://base64.guru/developers/javascript/examples/polyfill 5 | */ 6 | export function base64(origin: string): string { 7 | const alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; 8 | 9 | const len = origin.length - 1; 10 | let i = -1; 11 | let encoded = ""; 12 | 13 | while (i < len) { 14 | const code = 15 | (origin.charCodeAt(++i) << 16) | (origin.charCodeAt(++i) << 8) | origin.charCodeAt(++i); 16 | encoded += 17 | alphabet[(code >>> 18) & 63] + 18 | alphabet[(code >>> 12) & 63] + 19 | alphabet[(code >>> 6) & 63] + 20 | alphabet[code & 63]; 21 | } 22 | 23 | const pads = origin.length % 3; 24 | if (pads > 0) { 25 | encoded = encoded.slice(0, pads - 3); 26 | 27 | while (encoded.length % 4 !== 0) { 28 | encoded += "="; 29 | } 30 | } 31 | 32 | return encoded; 33 | } 34 | -------------------------------------------------------------------------------- /biome.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://biomejs.dev/schemas/1.0.0/schema.json", 3 | "linter": { 4 | "enabled": true, 5 | "rules": { 6 | "recommended": true, 7 | "a11y": { 8 | "noSvgWithoutTitle": "off" 9 | }, 10 | "correctness": { 11 | "noUnusedVariables": "warn" 12 | }, 13 | "security": { 14 | "noDangerouslySetInnerHtml": "off" 15 | }, 16 | "style": { 17 | "useBlockStatements": "error", 18 | "noNonNullAssertion": "off" 19 | }, 20 | "performance": { 21 | "noDelete": "off" 22 | }, 23 | "suspicious": { 24 | "noExplicitAny": "off" 25 | } 26 | }, 27 | "ignore": ["node_modules", ".next", "dist", ".nuxt", ".contentlayer", "examples"] 28 | }, 29 | "formatter": { 30 | "indentStyle": "space", 31 | "indentWidth": 2, 32 | "enabled": true, 33 | "lineWidth": 100, 34 | "ignore": ["node_modules", ".next", "dist", ".nuxt", ".contentlayer"] 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | release: 5 | types: 6 | - published 7 | 8 | jobs: 9 | release: 10 | name: Release 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout Repo 14 | uses: actions/checkout@v2 15 | 16 | - name: Set env 17 | run: echo "VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV 18 | 19 | - name: Setup Bun 20 | uses: oven-sh/setup-bun@v1 21 | with: 22 | bun-version: latest 23 | 24 | - name: Install dependencies 25 | run: bun install 26 | 27 | - name: Set version 28 | run: echo $(jq --arg v "$VERSION" '(.version) = $v' package.json) > package.json 29 | 30 | - name: Set NPM_TOKEN 31 | run: npm config set //registry.npmjs.org/:_authToken=${{secrets.NPM_TOKEN}} 32 | 33 | - name: Build 34 | run: bun run build 35 | - name: Publish 36 | run: | 37 | npm publish --access public 38 | env: 39 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 40 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }} 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Upstash, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@upstash/kafka", 3 | "version": "1.3.0", 4 | "engines": { 5 | "node": ">=18" 6 | }, 7 | "description": "An HTTP/REST based Kafka client built on top of Upstash REST API.", 8 | "main": "./dist/index.js", 9 | "types": "./dist/index.d.ts", 10 | "module": "./dist/index.mjs", 11 | "files": [ 12 | "./dist" 13 | ], 14 | "scripts": { 15 | "test": "bun test pkg --coverage", 16 | "fmt": "bunx @biomejs/biome check --apply ./pkg", 17 | "build": "tsup" 18 | }, 19 | "repository": { 20 | "type": "git", 21 | "url": "git+https://github.com/upstash/upstash-kafka.git" 22 | }, 23 | "keywords": [ 24 | "kafka", 25 | "database", 26 | "serverless", 27 | "edge", 28 | "upstash" 29 | ], 30 | "author": "Andreas Thomas ", 31 | "license": "MIT", 32 | "bugs": { 33 | "url": "https://github.com/upstash/upstash-kafka/issues" 34 | }, 35 | "homepage": "https://github.com/upstash/upstash-kafka#readme", 36 | "directories": { 37 | "examples": "examples" 38 | }, 39 | "devDependencies": { 40 | "@biomejs/biome": "^1.3.0", 41 | "bun-types": "^1.0.7", 42 | "tsup": "^7.2.0", 43 | "typescript": "latest" 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /pkg/kafka.ts: -------------------------------------------------------------------------------- 1 | import { Admin } from "./admin"; 2 | import { base64 } from "./base64"; 3 | import { Consumer } from "./consumer"; 4 | import { HttpClient } from "./http"; 5 | import { Producer } from "./producer"; 6 | /** 7 | * Connection credentials for upstash kafka. 8 | * Get them from https://console.upstash.com/kafka/ 9 | */ 10 | export type KafkaConfig = { 11 | /** 12 | * UPSTASH_KAFKA_REST_URL 13 | */ 14 | url: string; 15 | /** 16 | * UPSTASH_KAFKA_REST_USERNAME 17 | */ 18 | username: string; 19 | /** 20 | * UPSTASH_KAFKA_REST_PASSWORD 21 | */ 22 | password: string; 23 | }; 24 | 25 | /** 26 | * Serverless Kafka client for upstash. 27 | */ 28 | export class Kafka { 29 | private readonly client: HttpClient; 30 | 31 | /** 32 | * Create a new kafka client 33 | * 34 | * @example 35 | * ```typescript 36 | * const kafka = new Kafka({ 37 | * url: "", 38 | * username: "", 39 | * password: "", 40 | * }); 41 | * ``` 42 | */ 43 | constructor(config: KafkaConfig) { 44 | this.client = new HttpClient({ 45 | baseUrl: config.url, 46 | headers: { 47 | authorization: `Basic ${base64(`${config.username}:${config.password}`)}`, 48 | }, 49 | }); 50 | } 51 | /** 52 | * Create a new producer client 53 | */ 54 | public producer(): Producer { 55 | return new Producer(this.client); 56 | } 57 | 58 | /** 59 | * Create a new consumer client 60 | */ 61 | public consumer(): Consumer { 62 | return new Consumer(this.client); 63 | } 64 | 65 | /** 66 | * Create a new admin client 67 | */ 68 | public admin(): Admin { 69 | return new Admin(this.client); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /pkg/http.ts: -------------------------------------------------------------------------------- 1 | import { UpstashError } from "./error"; 2 | 3 | export type Request = { 4 | path: string[]; 5 | /** 6 | * Request body will be serialized to json 7 | */ 8 | body?: unknown; 9 | headers?: Record; 10 | retries?: number; 11 | }; 12 | 13 | export type HttpClientConfig = { 14 | headers?: Record; 15 | baseUrl: string; 16 | }; 17 | type ErrorResponse = { result: string; error: string; status: number }; 18 | 19 | export class HttpClient { 20 | public readonly baseUrl: string; 21 | public readonly headers: Record; 22 | 23 | public constructor(config: HttpClientConfig) { 24 | this.baseUrl = config.baseUrl.replace(/\/$/, ""); 25 | this.headers = config.headers ?? {}; 26 | } 27 | 28 | private async request( 29 | method: "GET" | "POST" | "PUT" | "DELETE", 30 | req: Request, 31 | ): Promise { 32 | const headers = { 33 | "Content-Type": "application/json", 34 | ...this.headers, 35 | ...req.headers, 36 | }; 37 | 38 | let err = new Error(); 39 | for (let attempt = 0; attempt <= (req.retries ?? 5); attempt++) { 40 | if (attempt > 0) { 41 | // 0.25s up to 8s timeouts 42 | await new Promise((r) => setTimeout(r, 2 ** attempt * 250)); 43 | } 44 | 45 | try { 46 | // fetch is defined by isomorphic fetch 47 | // eslint-disable-next-line no-undef 48 | const res = await fetch([this.baseUrl, ...req.path].join("/"), { 49 | method, 50 | headers, 51 | keepalive: true, 52 | body: JSON.stringify(req.body), 53 | }); 54 | 55 | const body = await res.json(); 56 | if (!res.ok) { 57 | throw new UpstashError(body as ErrorResponse); 58 | } 59 | 60 | return body as TResponse; 61 | } catch (e) { 62 | err = e as Error; 63 | } 64 | } 65 | throw err; 66 | } 67 | 68 | public async get(req: Request): Promise { 69 | return await this.request("GET", req); 70 | } 71 | 72 | public async post(req: Request): Promise { 73 | return await this.request("POST", req); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /pkg/produce.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, it } from "bun:test"; 2 | import { Topic, kafka } from "./test_setup"; 3 | 4 | it("publishes a single message succesfully", async () => { 5 | const p = kafka.producer(); 6 | const c = kafka.consumer(); 7 | const message = { hello: "test" }; 8 | const header = { key: "signature", value: "abcd" }; 9 | 10 | const { partition, offset, topic } = await p.produce(Topic.RED, message, { headers: [header] }); 11 | 12 | const found = await c.fetch({ topic, partition, offset }); 13 | expect(JSON.parse(found[0].value)).toEqual(message); 14 | expect(found[0].headers[0]).toEqual(header); 15 | }); 16 | it("Publish a serialized succesfully", async () => { 17 | const p = kafka.producer(); 18 | const c = kafka.consumer(); 19 | const message = "hello world"; 20 | const header = { key: "signature", value: "abcd" }; 21 | 22 | const { partition, offset, topic } = await p.produce(Topic.RED, message, { headers: [header] }); 23 | 24 | const found = await c.fetch({ topic, partition, offset }); 25 | expect(found[0].value).toEqual(message); 26 | expect(found[0].headers[0]).toEqual(header); 27 | }); 28 | 29 | it("publishes multiple messages to different topics succesfully", async () => { 30 | const p = kafka.producer(); 31 | const c = kafka.consumer(); 32 | const message0 = "test"; 33 | const message1 = "world"; 34 | 35 | const res = await p.produceMany([ 36 | { topic: Topic.RED, value: message0 }, 37 | { topic: Topic.GREEN, value: message1 }, 38 | ]); 39 | 40 | const found = await c.fetch({ 41 | topicPartitionOffsets: res.map((r) => ({ 42 | topic: r.topic, 43 | partition: r.partition, 44 | offset: r.offset, 45 | })), 46 | }); 47 | 48 | expect(found[0].value).toEqual(message0); 49 | expect(found[1].value).toEqual(message1); 50 | }); 51 | it("publishes multiple serialized messages to different topics succesfully", async () => { 52 | const p = kafka.producer(); 53 | const c = kafka.consumer(); 54 | const key0 = "k0"; 55 | const key1 = "k1"; 56 | const message0 = { hello: "test" }; 57 | const message1 = { hello: "world" }; 58 | 59 | const res = await p.produceMany([ 60 | { topic: Topic.RED, key: key0, value: message0 }, 61 | { topic: Topic.GREEN, key: key1, value: message1 }, 62 | ]); 63 | 64 | const found = await c.fetch({ 65 | topicPartitionOffsets: res.map((r) => ({ 66 | topic: r.topic, 67 | partition: r.partition, 68 | offset: r.offset, 69 | })), 70 | }); 71 | 72 | expect(JSON.parse(found[0].value)).toStrictEqual(message0); 73 | expect(found[0].key).toEqual(key0); 74 | 75 | expect(JSON.parse(found[1].value)).toEqual(message1); 76 | expect(found[1].key).toEqual(key1); 77 | }); 78 | -------------------------------------------------------------------------------- /pkg/producer.ts: -------------------------------------------------------------------------------- 1 | import { HttpClient } from "./http"; 2 | 3 | /** 4 | * Optional parameters for each produced message 5 | */ 6 | export type ProduceOptions = { 7 | /** 8 | * The partition to produce to. 9 | * Will be assigned by kafka if left empty. 10 | */ 11 | partition?: number; 12 | /** 13 | * The unix timestamp in seconds. 14 | * Will be assigned by kafka if left empty. 15 | */ 16 | timestamp?: number; 17 | /** 18 | * Events with the same event key (e.g., a customer or vehicle ID) are written 19 | * to the same partition, and Kafka guarantees that any consumer of a given 20 | * topic-partition will always read that partition's events in exactly the 21 | * same order as they were written. 22 | */ 23 | key?: string; 24 | headers?: { key: string; value: string }[]; 25 | }; 26 | 27 | /** 28 | * Request payload to produce a message to a topic. 29 | */ 30 | export type ProduceRequest = ProduceOptions & { 31 | /** 32 | * The topic where the message gets publish. 33 | * Make sure this exists in upstash before. Otherwise it will throw an error. 34 | */ 35 | topic: string; 36 | /** 37 | * The message itself. This will be serialized using `JSON.stringify` 38 | */ 39 | value: unknown; 40 | }; 41 | 42 | /** 43 | * Response for each successfull message produced 44 | */ 45 | export type ProduceResponse = { 46 | topic: string; 47 | partition: number; 48 | offset: number; 49 | timestamp: number; 50 | }; 51 | 52 | export class Producer { 53 | private readonly client: HttpClient; 54 | 55 | constructor(client: HttpClient) { 56 | this.client = client; 57 | } 58 | /** 59 | * Produce a single message to a single topic 60 | */ 61 | public async produce( 62 | topic: string, 63 | message: TMessage, 64 | opts?: ProduceOptions, 65 | ): Promise { 66 | const request: ProduceRequest = { 67 | topic, 68 | value: typeof message === "string" ? message : JSON.stringify(message), 69 | ...opts, 70 | }; 71 | 72 | const res = await this.client.post({ 73 | path: ["produce"], 74 | body: request, 75 | }); 76 | 77 | return res[0]; 78 | } 79 | 80 | /** 81 | * Produce multiple messages to different topics at the same time 82 | * 83 | * Each entry in the response array belongs to the request with the same order in the requests. 84 | */ 85 | public async produceMany(requests: ProduceRequest[]): Promise { 86 | const transformedRequests = requests.map(({ value, ...rest }) => ({ 87 | ...rest, 88 | value: typeof value === "string" ? value : JSON.stringify(value), 89 | })); 90 | 91 | return await this.client.post({ 92 | path: ["produce"], 93 | body: transformedRequests, 94 | }); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /pkg/admin.ts: -------------------------------------------------------------------------------- 1 | import { TopicPartition, TopicPartitionOffset } from "./consumer"; 2 | import { HttpClient } from "./http"; 3 | 4 | export type OffsetsRequest = { consumerGroupId: string; instanceId: string } & ( 5 | | { topicPartition?: never; topicPartitions: TopicPartition[] } 6 | | { topicPartition: TopicPartition; topicPartitions?: never } 7 | ); 8 | 9 | export type TopicPartitionOffsetsRequest = { 10 | /** 11 | * Unix timestamp in milliseconds or `earliest` or `latest` 12 | */ 13 | timestamp: number | "earliest" | "latest"; 14 | } & ( 15 | | { topicPartition?: never; topicPartitions: TopicPartition[] } 16 | | { topicPartition: TopicPartition; topicPartitions?: never } 17 | ); 18 | 19 | /** 20 | * Topic names and their partitions 21 | */ 22 | export type GetTopicsResponse = { [topic: string]: number }; 23 | 24 | type TopicAssignments = { topic: string; partitions: number[] }; 25 | 26 | type InstanceAssignments = { name: string; topics: TopicAssignments[] }; 27 | 28 | type GroupAssignments = { name: string; instances: InstanceAssignments[] }; 29 | export class Admin { 30 | private readonly client: HttpClient; 31 | 32 | constructor(client: HttpClient) { 33 | this.client = client; 34 | } 35 | /** 36 | * List all topics belonging to the user 37 | */ 38 | public async topics(): Promise { 39 | return await this.client.get({ path: ["topics"] }); 40 | } 41 | 42 | /** 43 | * Lists consumers belonging to the user known by the REST server. 44 | */ 45 | public async consumers(): Promise { 46 | return await this.client.get({ path: ["consumers"] }); 47 | } 48 | 49 | /** 50 | * Stops and removes a previously created consumer group instance. 51 | */ 52 | public async removeConsumerInstance(consumerGroup: string, instanceId: string): Promise { 53 | await this.client.post({ 54 | path: ["delete-consumer", consumerGroup, instanceId], 55 | }); 56 | } 57 | /** 58 | * Returns the last committed offsets for the topic partitions inside the group. Can be used 59 | * alongside Commit Consumer API. 60 | */ 61 | public async committedOffsets(req: OffsetsRequest): Promise { 62 | return await this.client.post({ 63 | path: ["committed", req.consumerGroupId, req.instanceId], 64 | body: req.topicPartition ? [req.topicPartition] : req.topicPartitions, 65 | }); 66 | } 67 | /** 68 | * Returns the offsets for the given partitions by timestamp. The returned offset for each 69 | * partition is the earliest offset whose timestamp is greater than or equal to the given 70 | * timestamp in the corresponding partition. 71 | */ 72 | public async topicPartitionOffsets( 73 | req: TopicPartitionOffsetsRequest, 74 | ): Promise { 75 | return await this.client.post({ 76 | path: ["offsets", req.timestamp.toString()], 77 | body: req.topicPartition ? [req.topicPartition] : req.topicPartitions, 78 | }); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /pkg/consumer.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, expect, test } from "bun:test"; 2 | import { randomUUID } from "crypto"; 3 | import { Topic, kafka } from "./test_setup"; 4 | 5 | describe("consume()", () => { 6 | test("Consume from a single topic", async () => { 7 | const p = kafka.producer(); 8 | const c = kafka.consumer(); 9 | const consumerGroupId = randomUUID(); 10 | const instanceId = randomUUID(); 11 | const topic = Topic.BLUE; 12 | 13 | const message = randomUUID(); 14 | await p.produce(topic, message); 15 | 16 | let messageFound = false; 17 | for (let i = 0; i < 30; i++) { 18 | const messages = await c.consume({ 19 | consumerGroupId, 20 | instanceId, 21 | topics: [topic], 22 | autoOffsetReset: "earliest", 23 | timeout: 3000, 24 | }); 25 | if (messages.map((m) => m.value).includes(message)) { 26 | messageFound = true; 27 | break; 28 | } 29 | await new Promise((r) => setTimeout(r, 1000)); 30 | } 31 | await kafka.admin().removeConsumerInstance(consumerGroupId, instanceId); 32 | 33 | expect(messageFound).toBe(true); 34 | }); 35 | 36 | test( 37 | "Consume from multiple topics", 38 | async () => { 39 | const p = kafka.producer(); 40 | const c = kafka.consumer(); 41 | const consumerGroupId = randomUUID(); 42 | const instanceId = randomUUID(); 43 | const topics = [Topic.BLUE, Topic.RED]; 44 | 45 | const message = randomUUID(); 46 | await p.produce(topics[0], message); 47 | 48 | let messageFound = false; 49 | for (let i = 0; i < 30; i++) { 50 | const messages = await c.consume({ 51 | consumerGroupId, 52 | instanceId, 53 | topics, 54 | autoCommit: false, 55 | autoCommitInterval: 7000, 56 | autoOffsetReset: "earliest", 57 | }); 58 | if (messages.map((m) => m.value).includes(message)) { 59 | messageFound = true; 60 | break; 61 | } 62 | await new Promise((r) => setTimeout(r, 1000)); 63 | } 64 | await kafka.admin().removeConsumerInstance(consumerGroupId, instanceId); 65 | 66 | expect(messageFound).toBe(true); 67 | }, 68 | { timeout: 10000 }, 69 | ); 70 | }); 71 | describe("commit()", () => { 72 | test("Commit offset", async () => { 73 | const p = kafka.producer(); 74 | const c = kafka.consumer(); 75 | const consumerGroupId = randomUUID(); 76 | const instanceId = randomUUID(); 77 | 78 | const message = randomUUID(); 79 | const { offset, partition, topic } = await p.produce(Topic.BLUE, message); 80 | 81 | await c.consume({ 82 | consumerGroupId, 83 | instanceId, 84 | topics: [Topic.BLUE], 85 | autoCommit: false, 86 | }); 87 | 88 | const preCommit = await c.committed({ 89 | consumerGroupId, 90 | instanceId, 91 | topicPartitions: [{ topic, partition }], 92 | }); 93 | 94 | await c.commit({ 95 | consumerGroupId, 96 | instanceId, 97 | offset: { partition, topic, offset }, 98 | }); 99 | 100 | const postCommit = await c.committed({ 101 | consumerGroupId, 102 | instanceId, 103 | topicPartitions: [{ topic, partition }], 104 | }); 105 | expect(postCommit).not.toEqual(preCommit); 106 | expect(postCommit[0].offset).toEqual(offset); 107 | await kafka.admin().removeConsumerInstance(consumerGroupId, instanceId); 108 | }); 109 | }); 110 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # upstash-kafka 2 | 3 | An HTTP/REST based Kafka client built on top of 4 | [Upstash REST API](https://docs.upstash.com/kafka/rest). 5 | 6 | [![codecov](https://codecov.io/gh/chronark/upstash-kafka/branch/main/graph/badge.svg?token=BBJ1FCHPF0)](https://codecov.io/gh/chronark/upstash-kafka) 7 | ![npm (scoped)](https://img.shields.io/npm/v/@upstash/kafka) 8 | ![npm bundle size](https://img.shields.io/bundlephobia/minzip/@upstash/kafka) 9 | 10 | > [!NOTE] 11 | > **This project is in GA Stage.** 12 | > 13 | > The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes. The Upstash team is committed to maintaining and improving its functionality. 14 | 15 | It is the only connectionless (HTTP based) Kafka client and designed for: 16 | 17 | - Serverless functions (AWS Lambda ...) 18 | - Cloudflare Workers (see the example) 19 | - Fastly Compute@Edge 20 | - Next.js Edge, Remix ... 21 | - Client side web/mobile applications 22 | - WebAssembly and other environments where HTTP is preferred over TCP 23 | connections. 24 | 25 | # Installation 26 | 27 | ```bash 28 | npm install @upstash/kafka 29 | ``` 30 | 31 | # Quickstart 32 | 33 | ## Auth 34 | 35 | 1. Go to [upstash](https://console.upstash.com/kafka) and select your database. 36 | 2. Copy the `REST API` secrets at the bottom of the page 37 | 38 | ```typescript 39 | import { Kafka } from "@upstash/kafka" 40 | 41 | const kafka = new Kafka({ 42 | url: "", 43 | username: "", 44 | password: "", 45 | }) 46 | ``` 47 | 48 | ## Produce a single message 49 | 50 | ```typescript 51 | const p = kafka.producer() 52 | const message = { hello: "world" } // Objects will get serialized using `JSON.stringify` 53 | const res = await p.produce("", message) 54 | const res = await p.produce("", message, { 55 | partition: 1, 56 | timestamp: 12345, 57 | key: "", 58 | headers: [{ key: "traceId", value: "85a9f12" }], 59 | }) 60 | ``` 61 | 62 | ## Produce multiple messages. 63 | 64 | The same options from the example above can be set for every message. 65 | 66 | ```typescript 67 | const p = kafka.producer() 68 | const res = await p.produceMany([ 69 | { 70 | topic: "my.topic", 71 | value: { hello: "world" }, 72 | // ...options 73 | }, 74 | { 75 | topic: "another.topic", 76 | value: "another message", 77 | // ...options 78 | }, 79 | ]) 80 | ``` 81 | 82 | ## Consume 83 | 84 | The first time a consumer is created, it needs to figure out the group 85 | coordinator by asking the Kafka brokers and joins the consumer group. This 86 | process takes some time to complete. That's why when a consumer instance is 87 | created first time, it may return empty messages until consumer group 88 | coordination is completed. 89 | 90 | ```typescript 91 | const c = kafka.consumer() 92 | const messages = await c.consume({ 93 | consumerGroupId: "group_1", 94 | instanceId: "instance_1", 95 | topics: ["test.topic"], 96 | autoOffsetReset: "earliest", 97 | }) 98 | ``` 99 | 100 | More examples can be found in the 101 | [docstring](https://github.com/upstash/upstash-kafka/blob/main/pkg/consumer.ts#L265) 102 | 103 | ## Commit manually 104 | 105 | While `consume` can handle committing automatically, you can also use 106 | `Consumer.commit` to manually commit. 107 | 108 | ```typescript 109 | const consumerGroupId = "mygroup" 110 | const instanceId = "myinstance" 111 | const topic = "my.topic" 112 | 113 | const c = kafka.consumer() 114 | const messages = await c.consume({ 115 | consumerGroupId, 116 | instanceId, 117 | topics: [topic], 118 | autoCommit: false, 119 | }) 120 | 121 | for (const message of messages) { 122 | // message handling logic 123 | 124 | await c.commit({ 125 | consumerGroupId, 126 | instanceId, 127 | offset: { 128 | topic: message.topic, 129 | partition: message.partition, 130 | offset: message.offset, 131 | }, 132 | }) 133 | } 134 | ``` 135 | 136 | ## Fetch 137 | 138 | You can also manage offsets manually by using `Consumer.fetch` 139 | 140 | ```typescript 141 | const c = kafka.consumer() 142 | const messages = await c.fetch({ 143 | topic: "greeting", 144 | partition: 3, 145 | offset: 42, 146 | timeout: 1000, 147 | }) 148 | ``` 149 | 150 | ## Examples 151 | 152 | See [/examples](https://github.com/upstash/upstash-kafka/tree/main/examples) as 153 | well as various examples in the docstrings of each method. 154 | 155 | # Contributing 156 | 157 | ## Requirements 158 | 159 | - [nodejs](https://nodejs.org) v14.x or higher 160 | - [pnpm](https://pnpm.io/installation) 161 | 162 | ## Setup 163 | 164 | 0. Install dependencies using `pnpm install` 165 | 1. Create a kafka instance on upstash. 166 | [docs](https://docs.upstash.com/kafka#create-a-kafka-cluster) 167 | 2. Create the following topics: `blue`, `red`, `green`. 168 | [docs](https://docs.upstash.com/kafka#create-a-topic) 169 | 170 | The partitions or retention settings don't matter at this time. 171 | 172 | 3. Create `.env` file with your kafka secrets `cp .env.example .env` 173 | 174 | ## Running tests 175 | 176 | ```bash 177 | pnpm test 178 | ``` 179 | -------------------------------------------------------------------------------- /pkg/admin.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, expect, test } from "bun:test"; 2 | import { randomUUID } from "crypto"; 3 | import { Kafka } from "./kafka"; 4 | import { Topic, kafka } from "./test_setup"; 5 | 6 | test("fails with wrong auth", async () => { 7 | const url = process.env.UPSTASH_KAFKA_REST_URL; 8 | if (!url) { 9 | throw new Error("TEST SETUP FAILED"); 10 | } 11 | const admin = new Kafka({ url, username: "username", password: "password" }).admin(); 12 | 13 | expect(() => admin.topics()).rejects.pass(); 14 | }); 15 | 16 | test("returns all topics", async () => { 17 | const topics = await kafka.admin().topics(); 18 | 19 | const expectedTopics = ["green", "blue", "red"]; 20 | for (const topic of expectedTopics) { 21 | expect(Object.keys(topics)).toContain(topic); 22 | } 23 | }); 24 | 25 | describe("consumers()", () => { 26 | test("returns all topics", async () => { 27 | const consumerGroupId = randomUUID(); 28 | const instanceId = randomUUID(); 29 | const c = kafka.consumer(); 30 | await c.consume({ consumerGroupId, instanceId, topics: [Topic.BLUE] }); 31 | const admin = kafka.admin(); 32 | const consumers = await admin.consumers(); 33 | await admin.removeConsumerInstance(consumerGroupId, instanceId); 34 | 35 | expect(consumers.map((c) => c.name)).toContain(consumerGroupId); 36 | expect( 37 | consumers.find((c) => c.name === consumerGroupId)!.instances.map((i) => i.name), 38 | ).toContain(instanceId); 39 | }); 40 | test("fails if the consumerGroup or instanceId does not exist", async () => { 41 | const consumerGroupId = randomUUID(); 42 | const instanceId = randomUUID(); 43 | 44 | const admin = kafka.admin(); 45 | expect(() => admin.removeConsumerInstance(consumerGroupId, instanceId)).rejects.pass(); 46 | }); 47 | }); 48 | 49 | describe("committedOffsets()", () => { 50 | test( 51 | "returns the latest offsets for one topicPartition", 52 | async () => { 53 | const consumerGroupId = randomUUID(); 54 | const instanceId = randomUUID(); 55 | 56 | const admin = kafka.admin(); 57 | const p = kafka.producer(); 58 | const c = kafka.consumer(); 59 | 60 | await p.produce(Topic.BLUE, randomUUID()); 61 | 62 | /** 63 | * Try consuming until the consumergroup is set up. This can take a few seconds. 64 | */ 65 | let ready = false; 66 | while (!ready) { 67 | const consumerGroups = await admin.consumers(); 68 | const consumer = consumerGroups.find((c) => c.name === consumerGroupId); 69 | if (consumer) { 70 | const instance = consumer.instances.find((i) => i.name === instanceId); 71 | if (instance) { 72 | if (instance.topics.find((t) => t.topic === Topic.BLUE)) { 73 | ready = true; 74 | } 75 | } 76 | } 77 | 78 | await c.consume({ 79 | consumerGroupId, 80 | instanceId, 81 | topics: [Topic.BLUE], 82 | }); 83 | 84 | await new Promise((res) => setTimeout(res, 1000)); 85 | } 86 | 87 | const offsets = await admin.committedOffsets({ 88 | consumerGroupId, 89 | instanceId, 90 | topicPartition: { topic: Topic.BLUE, partition: 0 }, 91 | }); 92 | 93 | expect(offsets.length).toBe(1); 94 | const offset = offsets[0]!; 95 | expect(offset.topic).toEqual(Topic.BLUE); 96 | expect(typeof offset.offset).toBe("number"); 97 | 98 | await admin.removeConsumerInstance(consumerGroupId, instanceId); 99 | }, 100 | { timeout: 10000 }, 101 | ); 102 | test( 103 | "returns the latest offsets for multiple topicPartitions", 104 | async () => { 105 | const consumerGroupId = randomUUID(); 106 | const instanceId = randomUUID(); 107 | 108 | const admin = kafka.admin(); 109 | const p = kafka.producer(); 110 | const c = kafka.consumer(); 111 | 112 | await p.produce(Topic.BLUE, randomUUID()); 113 | await p.produce(Topic.RED, randomUUID()); 114 | 115 | /** 116 | * Try consuming until the consumergroup is set up. This can take a few seconds. 117 | */ 118 | let ready = false; 119 | while (!ready) { 120 | const consumerGroups = await admin.consumers(); 121 | const consumer = consumerGroups.find((c) => c.name === consumerGroupId); 122 | if (consumer) { 123 | const instance = consumer.instances.find((i) => i.name === instanceId); 124 | if (instance) { 125 | if (instance.topics.find((t) => t.topic === Topic.BLUE)) { 126 | ready = true; 127 | } 128 | } 129 | } 130 | 131 | await c.consume({ 132 | consumerGroupId, 133 | instanceId, 134 | topics: [Topic.BLUE, Topic.RED], 135 | }); 136 | 137 | await new Promise((res) => setTimeout(res, 1000)); 138 | } 139 | 140 | const offsets = await admin.committedOffsets({ 141 | consumerGroupId, 142 | instanceId, 143 | topicPartitions: [ 144 | { topic: Topic.BLUE, partition: 0 }, 145 | { topic: Topic.RED, partition: 0 }, 146 | ], 147 | }); 148 | 149 | expect(offsets.length).toBe(2); 150 | for (const offset of offsets) { 151 | expect(typeof offset.offset).toBe("number"); 152 | } 153 | await admin.removeConsumerInstance(consumerGroupId, instanceId); 154 | }, 155 | { timeout: 10000 }, 156 | ); 157 | }); 158 | 159 | describe("topicPartitionOffsets()", () => { 160 | test("returns the offsets of a single partition and topic", async () => { 161 | const admin = kafka.admin(); 162 | const p = kafka.producer(); 163 | 164 | const { topic, partition } = await p.produce(Topic.RED, randomUUID(), { partition: 0 }); 165 | 166 | const partitionOffsets = await admin.topicPartitionOffsets({ 167 | timestamp: Math.floor(Date.now() / 1000), 168 | topicPartition: { topic, partition }, 169 | }); 170 | 171 | expect(partitionOffsets.length).toBe(1); 172 | const partitionOffset = partitionOffsets[0]!; 173 | expect(partitionOffset.topic).toEqual(topic); 174 | expect(typeof partitionOffset.offset).toBe("number"); 175 | }); 176 | test("returns the offsets of multiple partition and topic", async () => { 177 | const admin = kafka.admin(); 178 | const p = kafka.producer(); 179 | 180 | await p.produce(Topic.RED, randomUUID(), { partition: 0 }); 181 | await p.produce(Topic.BLUE, randomUUID(), { partition: 0 }); 182 | 183 | const partitionOffsets = await admin.topicPartitionOffsets({ 184 | timestamp: Math.floor(Date.now() / 1000), 185 | topicPartitions: [ 186 | { topic: Topic.BLUE, partition: 0 }, 187 | { topic: Topic.RED, partition: 0 }, 188 | ], 189 | }); 190 | 191 | expect(partitionOffsets.length).toBe(2); 192 | for (const partitionOffset of partitionOffsets) { 193 | expect(typeof partitionOffset.offset).toBe("number"); 194 | } 195 | }); 196 | }); 197 | -------------------------------------------------------------------------------- /pkg/consumer.ts: -------------------------------------------------------------------------------- 1 | import { HttpClient } from "./http"; 2 | import { Message } from "./types"; 3 | 4 | export type TopicPartition = { topic: string; partition: number }; 5 | export type TopicPartitionOffset = TopicPartition & { offset: number }; 6 | 7 | export type FetchRequest = { timeout?: number; topicPartitionOffsets?: TopicPartitionOffset[] } & ( 8 | | { topic: string; partition: number; offset: number } 9 | | { topic?: never; partition?: never; offset?: never } 10 | ); 11 | 12 | type BaseConsumerRequest = { 13 | /** 14 | * The name of the consumer group which is used as Kafka consumer group id 15 | * @see https://kafka.apache.org/documentation/#consumerconfigs_group.id 16 | */ 17 | consumerGroupId: string; 18 | /** 19 | * Used to identify kafka consumer instances in the same consumer group. 20 | * Each consumer instance id is handled by a separate consumer client. 21 | * @see https://kafka.apache.org/documentation/#consumerconfigs_group.instance.id 22 | */ 23 | instanceId: string; 24 | }; 25 | export type ConsumeRequest = BaseConsumerRequest & { 26 | topics: string[]; 27 | /** 28 | * Defines the time to wait at most for the fetch request in milliseconds. 29 | * It's optional and its default value 1000. 30 | */ 31 | timeout?: number; 32 | /** 33 | * If true, the consumer's offset will be periodically committed in the background. 34 | */ 35 | autoCommit?: boolean; 36 | /** 37 | * The frequency in milliseconds that the consumer offsets are auto-committed to Kafka 38 | * if auto commit is enabled. 39 | * Default is 5000. 40 | */ 41 | autoCommitInterval?: number; 42 | /** 43 | * What to do when there is no initial offset in Kafka or if the current 44 | * offset does not exist any more on the server. Default value is `latest`. 45 | * 46 | * `earliest`: Automatically reset the offset to the earliest offset 47 | * 48 | * `latest`: Automatically reset the offset to the latest offset 49 | * 50 | * `none`: Throw exception to the consumer if no previous offset is found for the 51 | * consumer's group. 52 | */ 53 | autoOffsetReset?: "earliest" | "latest" | "none"; 54 | }; 55 | 56 | export type FetchOptions = { 57 | /** 58 | * If true `fetch` will call upstash once for each topic in your request. 59 | * This circumenvents the issue where upstash only returns from a single topic 60 | * at a time when using fetch. 61 | * 62 | * All requests are executed in parallel. 63 | * 64 | * Default: true 65 | */ 66 | parallel?: boolean; 67 | }; 68 | 69 | export type CommitRequest = BaseConsumerRequest & { 70 | /** 71 | * Commits the last consumed messages if left empty 72 | */ 73 | offset?: TopicPartitionOffset | TopicPartitionOffset[]; 74 | }; 75 | 76 | export type CommittedRequest = BaseConsumerRequest & { topicPartitions: TopicPartition[] }; 77 | 78 | /** 79 | * Consumer APIs are used to fetch/consume messages from Kafka topics. Similar 80 | * to Kafka clients there are two mechanisms to consume messages; one is 81 | * seeking offsets manually and the other is to use consumer groups which 82 | * manage offsets automatically inside a special Kafka topic. 83 | * 84 | * We call the first one as Fetch API and the second one as Consume API. 85 | * Consume API has some additional methods if you wish to commit offsets manually. 86 | */ 87 | export class Consumer { 88 | private readonly client: HttpClient; 89 | 90 | constructor(client: HttpClient) { 91 | this.client = client; 92 | } 93 | /** 94 | * Fetches the message(s) starting with a given offset inside the partition. 95 | * This API doesn't use consumer groups. 96 | * 97 | * When fetching from multiple topics it is important to understand that 98 | * upstash only returns data for a single topic at a time, so you should 99 | * call `fetch` multiple times. 100 | * 101 | * Fetch from a single : 102 | * ```ts 103 | * fetch({ 104 | * topic: "greeting", 105 | * partition: 3, 106 | * timeout: 1000 107 | * }) 108 | * ``` 109 | * 110 | * Fetch from multiple triples: 111 | * ```ts 112 | * fetch({ 113 | * topicPartitionOffsets": [ 114 | * {"topic": "greetings", "partition": 1, "offset": 1}, 115 | * {"topic": "greetings", "partition": 2, "offset": 1}, 116 | * {"topic": "greetings", "partition": 3, "offset": 1}, 117 | * {"topic": "cities", "partition": 1, "offset": 10}, 118 | * {"topic": "cities", "partition": 2, "offset": 20} 119 | * ], 120 | * timeout: 1000 121 | * }) 122 | * ``` 123 | * 124 | * You can even combine both: 125 | * ```ts 126 | * fetch({ 127 | * topic: "words", 128 | * partition: 0, 129 | * offset: 0, 130 | * topicPartitionOffsets: [ 131 | * { topic: "cities", partition: 1, offset: 10}, 132 | * { topic: "cities", partition: 2, offset: 20} 133 | * ], 134 | * timeout: 5000 135 | * }) 136 | * ``` 137 | */ 138 | public async fetch( 139 | req: FetchRequest, 140 | opts: FetchOptions = { parallel: true }, 141 | ): Promise { 142 | let requests = [req]; 143 | 144 | if (opts?.parallel) { 145 | requests = (req.topicPartitionOffsets ?? []).map((r) => ({ ...r, timeout: req.timeout })); 146 | if (req.topic) { 147 | requests.push({ 148 | topic: req.topic, 149 | partition: req.partition, 150 | offset: req.offset, 151 | timeout: req.timeout, 152 | }); 153 | } 154 | } 155 | const responses = await Promise.all( 156 | requests.map(async (r) => await this.client.post({ path: ["fetch"], body: r })), 157 | ); 158 | 159 | return responses.flat(); 160 | } 161 | 162 | /** 163 | * Fetches the message(s) using Kafka consumer group mechanism and may commit 164 | * the offsets automatically. 165 | * 166 | * The first time a consumer is created, it needs to figure out the group 167 | * coordinator by asking the Kafka brokers and joins the consumer group. 168 | * This process takes some time to complete. That's why when a consumer 169 | * instance is created first time, it may return empty messages until consumer 170 | * group coordination is completed. 171 | * 172 | * Consume from a single topic with timeout: 173 | * ```ts 174 | * consume({ 175 | * consumerGroupId: "mygroup", 176 | * instanceId: "myconsumer", 177 | * topics: ["greetings"] 178 | * }) 179 | * ``` 180 | * 181 | * Consume from multiple topics: 182 | * ```ts 183 | * consume({ 184 | * consumerGroupId: "mygroup", 185 | * instanceId: "myconsumer", 186 | * topics: ["greetings", "cities", "world"], 187 | * timeout: 1000 188 | * }) 189 | * ``` 190 | * 191 | * Consume from topics without auto commit: 192 | * ```ts 193 | * consume({ 194 | * consumerGroupId: "mygroup", 195 | * instanceId: "myconsumer", 196 | * topics: ["greetings", "cities", "world"], 197 | * timeout: 1000, 198 | * autoCommit: false 199 | * }) 200 | * ``` 201 | * 202 | * Consume from topics starting from the earliest message: 203 | * ```ts 204 | * consume({ 205 | * consumerGroupId: "mygroup", 206 | * instanceId: "myconsumer", 207 | * topics: ["greetings", "cities", "world"], 208 | * timeout: 1000, 209 | * autoOffsetReset: "earliest" 210 | * }) 211 | * ``` 212 | * 213 | * Consume from topics with custom auto commit interval: 214 | * ```ts 215 | * consume({ 216 | * consumerGroupId: "mygroup", 217 | * instanceId: "myconsumer", 218 | * topics: ["greetings", "cities", "world"], 219 | * timeout: 1000, 220 | * autoCommit: true, 221 | * autoCommitInterval: 3000 222 | * }) 223 | * ``` 224 | */ 225 | public async consume(req: ConsumeRequest): Promise { 226 | const body: Record = {}; 227 | if (req.topics.length === 1) { 228 | body.topic = req.topics[0]; 229 | } else { 230 | body.topics = req.topics; 231 | } 232 | 233 | if (typeof req.timeout === "number") { 234 | body.timeout = req.timeout; 235 | } 236 | 237 | const headers: Record = {}; 238 | if (typeof req.autoCommit === "boolean") { 239 | headers["Kafka-Enable-Auto-Commit"] = req.autoCommit.toString(); 240 | } 241 | if (typeof req.autoCommitInterval === "number") { 242 | headers["Kafka-Auto-Commit-Interval"] = req.autoCommitInterval.toString(); 243 | } 244 | if (typeof req.autoOffsetReset === "string") { 245 | headers["Kafka-Auto-Offset-Reset"] = req.autoOffsetReset; 246 | } 247 | 248 | return await this.client.post({ 249 | path: ["consume", req.consumerGroupId, req.instanceId], 250 | headers, 251 | body, 252 | }); 253 | } 254 | 255 | /** 256 | * Commits the fetched message offsets. `commit` should be used alongside 257 | * `consume`, especially when auto commit is disabled. 258 | * 259 | * Commit single topic partition offset: 260 | * ```ts 261 | * commit({ 262 | * consumerGroupId: "mygroup", 263 | * instanceId: "myconsumer", 264 | * offset: { 265 | * topic: "cities", 266 | * partition: 1, 267 | * offset: 10, 268 | * } 269 | * }) 270 | * ``` 271 | * 272 | * Commit multiple topic partition offsets: 273 | * ```ts 274 | * commit({ 275 | * consumerGroupId: "mygroup", 276 | * instanceId: "myconsumer", 277 | * offset: [ 278 | * { topic: "cities", partition: 0, offset: 13 }, 279 | * { topic: "cities", partition: 1, offset: 37 }, 280 | * { topic: "greetings", partition: 0, offset: 19 }, 281 | * ] 282 | * }) 283 | * ``` 284 | * 285 | * Commit all latest consumed message offsets: 286 | * ```ts 287 | * commit({ 288 | * consumerGroupId: "mygroup", 289 | * instanceId: "myconsumer", 290 | * }) 291 | * ``` 292 | */ 293 | public async commit(req: CommitRequest): Promise { 294 | return await this.client.post({ 295 | path: ["commit", req.consumerGroupId, req.instanceId], 296 | body: req.offset, 297 | }); 298 | } 299 | 300 | /** 301 | * Returns the last committed offsets for the topic partitions inside the group. 302 | * 303 | * List committed offsets for multiple topic partitions: 304 | * ```ts 305 | * committed({ 306 | * consumerGroupId: "mygroup", 307 | * instanceId: "myconsumer", 308 | * topicPartitions: [ 309 | * { topic: "cities", partition: 0 }, 310 | * { topic: "cities", partition: 1 }, 311 | * { topic: "greetings", partition: 0}, 312 | * ] 313 | * }) 314 | * ``` 315 | */ 316 | public async committed(req: CommittedRequest): Promise { 317 | return await this.client.post({ 318 | path: ["committed", req.consumerGroupId, req.instanceId], 319 | body: req.topicPartitions, 320 | }); 321 | } 322 | } 323 | --------------------------------------------------------------------------------