├── .codeclimate.yml ├── .npmrc ├── .huskyrc.json ├── test ├── tsconfig.json ├── util.test.ts ├── helpers │ └── read-file.ts ├── fixtures │ ├── metadata │ │ ├── metadata_no_leader.json │ │ └── metadata_simple.json │ └── partition │ │ ├── metadata_1_partition.json │ │ └── metadata_5_partitions.json ├── partition.test.ts ├── stream.test.ts ├── message.test.ts └── metadata.test.ts ├── .eslintrc.test.json ├── .npmignore ├── .nycrc.json ├── .eslintrc.json ├── typedoc.json ├── .github └── workflows │ ├── docs.yml │ └── pr.yml ├── tsconfig.json ├── LICENSE ├── .gitignore ├── README.md ├── package.json ├── src ├── utils.ts ├── debug.ts ├── partition.ts ├── stream.ts ├── message.ts ├── errors.ts ├── metadata.ts └── index.ts ├── scripts └── generate_grpc_code.sh ├── grpc ├── generated │ ├── api_grpc_pb.d.ts │ ├── api_grpc_pb.js │ └── api_pb.d.ts └── api.proto └── media └── node-liftbridge.svg /.codeclimate.yml: -------------------------------------------------------------------------------- 1 | exclude_patterns: 2 | - "grpc/" 3 | -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | registry=https://registry.yarnpkg.com 2 | save-exact=true 3 | -------------------------------------------------------------------------------- /.huskyrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "hooks": { 3 | "pre-commit": "yarn lint && yarn lint:test" 4 | } 5 | } -------------------------------------------------------------------------------- /test/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig.json", 3 | "include": [ 4 | "*.ts" 5 | ] 6 | } -------------------------------------------------------------------------------- /.eslintrc.test.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "./.eslintrc.json" 4 | ], 5 | "parserOptions": { 6 | "project": "./test/tsconfig.json" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | src/ 2 | test/ 3 | examples/ 4 | scripts/ 5 | docs/ 6 | media/ 7 | coverage/ 8 | .nyc_output/ 9 | .codeclimate.yml 10 | .eslintrc.json 11 | .eslintrc.test.json 12 | .huskyrc.json 13 | .nycrc.json 14 | typedoc.json 15 | tsconfig.json 16 | -------------------------------------------------------------------------------- /test/util.test.ts: -------------------------------------------------------------------------------- 1 | import test from 'tape'; 2 | import { constructAddress } from '../src/utils'; 3 | 4 | test('🧰 Utility — `constructAddress()` should, well, construct an address.', async t => { 5 | t.plan(1); 6 | const address = constructAddress('localhost', 4200); 7 | t.equal(address, 'localhost:4200', 'should construct correctly.'); 8 | t.end(); 9 | }); 10 | -------------------------------------------------------------------------------- /.nycrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extension": [ 3 | ".ts" 4 | ], 5 | "require": [ 6 | "ts-node/register/transpile-only" 7 | ], 8 | "exclude": [ 9 | "**/*.d.ts", 10 | "coverage/", 11 | "lib/", 12 | "test/", 13 | "grpc/" 14 | ], 15 | "reporter": [ 16 | "text", 17 | "lcov" 18 | ], 19 | "cache": false 20 | } -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "parser": "@typescript-eslint/parser", 3 | "parserOptions": { 4 | "project": "./tsconfig.json" 5 | }, 6 | "plugins": [ 7 | "@typescript-eslint" 8 | ], 9 | "extends": [ 10 | "airbnb-typescript/base" 11 | ], 12 | "rules": { 13 | "@typescript-eslint/indent": ["error", 4], 14 | "no-console": 0, 15 | "max-len": ["error", { "code": 180 }], 16 | "max-classes-per-file": [2, 3], 17 | "arrow-parens": ["error", "as-needed"] 18 | } 19 | } -------------------------------------------------------------------------------- /typedoc.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Liftbridge Node.js Client API", 3 | "mode": "file", 4 | "out": "docs", 5 | "theme": "node_modules/typedoc-twilio-theme/bin", 6 | "logger": "console", 7 | "media": "media", 8 | "hideGenerator": true, 9 | "ignoreCompilerErrors": true, 10 | "excludePrivate": true, 11 | "listInvalidSymbolLinks": true, 12 | "categorizeByGroup": true, 13 | "categoryOrder": [ 14 | "Client", 15 | "Stream", 16 | "Message", 17 | "Metadata", 18 | "Error" 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: documentation 2 | on: 3 | push: 4 | branches: 5 | - master 6 | tags: 7 | - '!*' # Do not execute on tags 8 | paths: 9 | - '!test/' # Do not execute on changes in `test/` directory 10 | jobs: 11 | build-and-deploy-docs: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout master branch 15 | uses: actions/checkout@master 16 | 17 | - name: Build documentation and deploy 18 | uses: JamesIves/github-pages-deploy-action@master 19 | env: 20 | ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} 21 | BASE_BRANCH: master 22 | BRANCH: gh-pages 23 | FOLDER: docs 24 | BUILD_SCRIPT: yarn install && yarn docs 25 | COMMIT_EMAIL: exchequer598@gmail.com 26 | COMMIT_NAME: GP 27 | -------------------------------------------------------------------------------- /test/helpers/read-file.ts: -------------------------------------------------------------------------------- 1 | import { readFile as read } from 'fs'; 2 | import { join, resolve } from 'path'; 3 | import { promisify } from 'util'; 4 | 5 | const readFileAsync = promisify(read); 6 | 7 | /** 8 | * Read file asynchronously. 9 | * 10 | * @example Reading file using `readFile()` function. 11 | * ``` 12 | * const contents = await readFile('/tmp/example.txt'); 13 | * console.log(contents); // Prints contents of /tmp/example.txt 14 | * ``` 15 | * @param filename - Filename to read. 16 | * @param encoding - (Optional) file encoding; defaults to 'utf8'. 17 | * @returns File contents as string. 18 | */ 19 | export default async function readFile(filename: string, encoding: string = 'utf8'): Promise { 20 | const absolutePath = resolve(join(__dirname, '../', filename)); 21 | return readFileAsync(absolutePath, { encoding }); 22 | } 23 | -------------------------------------------------------------------------------- /test/fixtures/metadata/metadata_no_leader.json: -------------------------------------------------------------------------------- 1 | { 2 | "brokersList": [ 3 | { 4 | "id": "DMxXSQifWCW2rdFsr2vk4S", 5 | "host": "localhost", 6 | "port": 9292 7 | } 8 | ], 9 | "metadataList": [ 10 | { 11 | "name": "test-stream-1", 12 | "subject": "test-subject-1", 13 | "error": 0, 14 | "partitionsMap": [ 15 | [ 16 | 0, 17 | { 18 | "id": 0, 19 | "leader": null, 20 | "replicasList": [ 21 | "DMxXSQifWCW2rdFsr2vk4S" 22 | ], 23 | "isrList": [ 24 | "DMxXSQifWCW2rdFsr2vk4S" 25 | ] 26 | } 27 | ] 28 | ] 29 | } 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /test/fixtures/metadata/metadata_simple.json: -------------------------------------------------------------------------------- 1 | { 2 | "brokersList": [ 3 | { 4 | "id": "DMxXSQifWCW2rdFsr2vk4S", 5 | "host": "", 6 | "port": 9292 7 | } 8 | ], 9 | "metadataList": [ 10 | { 11 | "name": "test-stream-1", 12 | "subject": "test-subject-1", 13 | "error": 0, 14 | "partitionsMap": [ 15 | [ 16 | 0, 17 | { 18 | "id": 0, 19 | "leader": "DMxXSQifWCW2rdFsr2vk4S", 20 | "replicasList": [ 21 | "DMxXSQifWCW2rdFsr2vk4S" 22 | ], 23 | "isrList": [ 24 | "DMxXSQifWCW2rdFsr2vk4S" 25 | ] 26 | } 27 | ] 28 | ] 29 | } 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "outDir": "./lib/", 4 | "inlineSourceMap": false, 5 | "noImplicitAny": false, 6 | "noImplicitThis": true, 7 | "strictNullChecks": true, 8 | "alwaysStrict": false, 9 | "strictFunctionTypes": true, 10 | "strictPropertyInitialization": true, 11 | "noImplicitUseStrict": true, 12 | "preserveConstEnums": true, 13 | "declaration": true, 14 | "module": "commonjs", 15 | "moduleResolution": "node", 16 | "target": "es6", 17 | "esModuleInterop": true, 18 | "resolveJsonModule": true, 19 | "downlevelIteration": true, 20 | "experimentalDecorators": true, 21 | "forceConsistentCasingInFileNames": true, 22 | "lib": [ 23 | "es2018", 24 | "es2017" 25 | ], 26 | "types": [ 27 | "node" 28 | ] 29 | }, 30 | "include": [ 31 | "src/*.ts" 32 | ], 33 | "exclude": [ 34 | "node_modules" 35 | ] 36 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 GP ✅ 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | *.pid.lock 13 | 14 | # Directory for instrumented libs generated by jscoverage/JSCover 15 | lib-cov 16 | 17 | # Coverage directory used by tools like istanbul 18 | coverage 19 | 20 | # nyc test coverage 21 | .nyc_output 22 | 23 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 24 | .grunt 25 | 26 | # Bower dependency directory (https://bower.io/) 27 | bower_components 28 | 29 | # node-waf configuration 30 | .lock-wscript 31 | 32 | # Compiled binary addons (https://nodejs.org/api/addons.html) 33 | build/Release 34 | 35 | # Dependency directories 36 | node_modules/ 37 | jspm_packages/ 38 | 39 | # TypeScript v1 declaration files 40 | typings/ 41 | 42 | # Optional npm cache directory 43 | .npm 44 | 45 | # Optional eslint cache 46 | .eslintcache 47 | 48 | # Optional REPL history 49 | .node_repl_history 50 | 51 | # Output of 'npm pack' 52 | *.tgz 53 | 54 | # Yarn Integrity file 55 | .yarn-integrity 56 | 57 | # dotenv environment variables file 58 | .env 59 | 60 | # next.js build output 61 | .next 62 | 63 | # macOS .DS_Store files 64 | .DS_Store 65 | 66 | # Built output files 67 | lib/ 68 | 69 | #TypeDocs output files 70 | docs/ 71 | -------------------------------------------------------------------------------- /.github/workflows/pr.yml: -------------------------------------------------------------------------------- 1 | name: pull-request-checks 2 | on: 3 | pull_request: 4 | branches: 5 | - master 6 | jobs: 7 | build-lint-test-checks: 8 | strategy: 9 | matrix: 10 | platform: [ 'ubuntu-latest' ] 11 | node: [ '12', '10', '8' ] 12 | name: checks/node ${{ matrix.node }} 13 | runs-on: ${{ matrix.platform }} 14 | steps: 15 | - name: Checkout master branch 16 | uses: actions/checkout@master 17 | - name: Install Node.js 18 | uses: actions/setup-node@master 19 | with: 20 | node-version: ${{ matrix.node }} 21 | - name: Install dependencies 22 | run: yarn install 23 | - name: Lint 24 | run: yarn lint && yarn lint:test 25 | - name: Build 26 | run: yarn build 27 | - name: Run tests 28 | run: yarn test 29 | coverage-check: 30 | needs: [ build-lint-test-checks ] 31 | name: coverage 32 | runs-on: ubuntu-latest 33 | steps: 34 | - name: Checkout master branch 35 | uses: actions/checkout@master 36 | - name: Install Node.js 37 | uses: actions/setup-node@master 38 | with: 39 | node-version: '12' 40 | - name: Install dependencies 41 | run: yarn install 42 | - name: Build 43 | run: yarn build 44 | - name: Publish coverage report 45 | uses: paambaati/codeclimate-action@v2.2.0 46 | env: 47 | CC_TEST_REPORTER_ID: b565f7cd498145fafc38120f55678a656651299ce9371f87cf446ac20433196c 48 | with: 49 | coverageCommand: yarn coverage 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # node-liftbridge 2 | 3 | ![](liftbridge.svg) 4 | 5 |

6 | 7 |

8 | 9 | Node.js client for [Liftbridge](https://github.com/liftbridge-io/liftbridge). 10 | 11 | > Liftbridge provides lightweight, fault-tolerant message streams by implementing a durable stream augmentation for the [NATS messaging system](https://nats.io/). It extends NATS with a Kafka-like publish-subscribe log API that is highly available and horizontally scalable. Use Liftbridge as a simpler and lighter alternative to systems like Kafka and Pulsar or use it to add streaming semantics to an existing NATS deployment. 12 | 13 | 🚧 **This module is still under active development!** [Would you like to contribute?](https://github.com/paambaati/node-liftbridge) 🚧 14 | 15 | ## Installation 16 | 17 | ```bash 18 | yarn add liftbridge 19 | # or 20 | npm install liftbridge 21 | ``` 22 | 23 | ## Usage 24 | 25 | ```typescript 26 | import LiftbridgeClient from 'liftbridge'; 27 | 28 | const client = new LiftbridgeClient('localhost:9292'); 29 | await client.connect(); 30 | 31 | await client.createStream(new LiftbridgeStream({ 32 | subject: 'my-subject', 33 | name: 'stream-name', 34 | partitions: 5, 35 | maxReplication: true 36 | }); 37 | ``` 38 | 39 | 📚 See [Documentation](https://paambaati.github.io/node-liftbridge/globals.html) for more detailed examples. 40 | 41 | ## Developer Notes 42 | 43 | 1. To regenerate the gRPC bindings, update the path to the [latest proto file](https://github.com/liftbridge-io/liftbridge-grpc/blob/master/api.proto) and then run `./scripts/generate_grpc_code.sh` 44 | 45 | ## Roadmap 46 | 47 | - [ ] Tests & coverage 48 | - [ ] CI 49 | - [ ] Contribution guide 50 | - [ ] gRPC Connection pool 51 | - [ ] Logging 52 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "liftbridge", 3 | "version": "0.0.1", 4 | "description": "Node.js client for Liftbridge", 5 | "main": "lib/index.js", 6 | "repository": { 7 | "url": "https://github.com/paambaati/node-liftbridge", 8 | "type": "git" 9 | }, 10 | "author": "GP ", 11 | "contributors": [ 12 | "André König (https://andrekoenig.de/)" 13 | ], 14 | "license": "MIT", 15 | "private": false, 16 | "scripts": { 17 | "build": "tsc", 18 | "debug": "NODE_OPTIONS='--stack-trace-limit=10000' DEBUG=node-liftbridge:* ts-node src/debug.ts", 19 | "debug:grpc": "GRPC_VERBOSITY=DEBUG GRPC_TRACE=connectivity_state,call_error yarn debug", 20 | "format": "eslint --fix --ext .ts src/ && eslint --fix --config .eslintrc.test.json --ext .ts test/", 21 | "lint": "eslint --ext .ts src/", 22 | "lint:test": "eslint --config .eslintrc.test.json --ext .ts test/", 23 | "test": "tape -r ts-node/register/transpile-only test/*.test.ts", 24 | "coverage": "rm -rf ./node_modules/.cache && rm -rf coverage/ && rm -rf .nyc_output/ && nyc tape -r ts-node/register/transpile-only test/*.test.ts", 25 | "docs": "typedoc" 26 | }, 27 | "dependencies": { 28 | "@sindresorhus/fnv1a": "2.0.0", 29 | "bluebird": "3.7.2", 30 | "debug": "4.1.1", 31 | "exponential-backoff": "2.2.0", 32 | "grpc": "1.24.2", 33 | "hyperid": "2.0.3" 34 | }, 35 | "devDependencies": { 36 | "@types/bluebird": "3.5.29", 37 | "@types/debug": "4.1.5", 38 | "@types/node": "12.12.7", 39 | "@types/tape": "4.2.34", 40 | "@typescript-eslint/eslint-plugin": "2.21.0", 41 | "@typescript-eslint/parser": "2.21.0", 42 | "eslint": "6.8.0", 43 | "eslint-config-airbnb-typescript": "7.0.0", 44 | "eslint-plugin-import": "2.20.1", 45 | "grpc-tools": "1.8.1", 46 | "husky": "4.2.3", 47 | "nyc": "15.0.0", 48 | "tape": "4.13.0", 49 | "ts-node": "8.6.2", 50 | "ts-protoc-gen": "0.12.0", 51 | "typedoc": "0.16.10", 52 | "typedoc-twilio-theme": "1.0.1", 53 | "typescript": "3.8.2" 54 | }, 55 | "resolutions": { 56 | "protobufjs": "6.8.8" 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | import { backOff, IBackOffOptions } from 'exponential-backoff'; 2 | import { JitterTypes } from 'exponential-backoff/dist/options'; 3 | 4 | /** 5 | * Randomly shuffles an array. 6 | * 7 | * Simple implementation of Durstenfeld shuffle, 8 | * which is a computer-ready implementation of the [Fisher-Yates shuffle](https://wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_modern_algorithm). 9 | * 10 | * @param array Array of items to shuffle. 11 | * @returns Copy of original array in shuffled order. 12 | * @hidden 13 | */ 14 | /* istanbul ignore next */ 15 | export function shuffleArray(array: any[]) { 16 | const arrayCopy = array.slice(); 17 | for (let i = arrayCopy.length - 1; i > 0; i -= 1) { 18 | const j = Math.floor(Math.random() * (i + 1)); 19 | [arrayCopy[i], arrayCopy[j]] = [arrayCopy[j], arrayCopy[i]]; 20 | } 21 | return arrayCopy; 22 | } 23 | 24 | /** 25 | * Execute the `Promise` wrapped inside a function with retry, exponential backoff & [jitter](https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/). 26 | * Defaults to 5 retries, full jitter, backoff multiple of 1.5 and a delay interval of 100 milliseconds. 27 | * 28 | * @param call Function returning a `Promise` that you want to retry. 29 | * @param retryOptions Retry & exponential backoff options (has own defaults - read source). 30 | * @returns A Promise that settles after all the retries are done. 31 | * @hidden 32 | */ 33 | /* istanbul ignore next */ 34 | export function faultTolerantCall(call: () => Promise, retryOptions?: Partial): Promise { 35 | const retryDefaults: Partial = { 36 | delayFirstAttempt: false, 37 | numOfAttempts: 5, 38 | jitter: JitterTypes.Full, 39 | startingDelay: 100, 40 | timeMultiple: 1.5, 41 | }; 42 | return backOff(call, Object.assign(retryDefaults, retryOptions || {})); 43 | } 44 | 45 | /** 46 | * Construct an address of the form : 47 | * 48 | * @param host Hostname. 49 | * @param port Port. 50 | * @returns Constructed address. 51 | * @hidden 52 | */ 53 | export function constructAddress(host: string, port: number): string { 54 | return `${host}:${port}`; 55 | } 56 | -------------------------------------------------------------------------------- /scripts/generate_grpc_code.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | PROTOC_VERSION="3.9.1" 6 | 7 | # Switch to script location so all relative paths work. 8 | PARENT_PATH=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P ) 9 | cd "$PARENT_PATH" 10 | 11 | # Setup cleanup trap. 12 | cleanup () { 13 | echo "Something went wrong! Cleaning up downloaded artifacts..." 14 | ARG=$? 15 | rm -f protoc-*.zip readme.txt 16 | rm -rf ./bin/ 17 | exit $ARG 18 | } 19 | trap cleanup EXIT 20 | 21 | # Fetch Liftbridge gRPC Proto definition. 22 | wget -q https://raw.githubusercontent.com/liftbridge-io/liftbridge-grpc/8382298b935c78e9072d870570ba0e1585a29660/api.proto -O ../grpc/api.proto 23 | echo "Downloaded Liftbridge Proto file..." 24 | 25 | # Fetch & extract protoc. 26 | OS=$(uname) 27 | if [ "$OS" == "Darwin" ]; then 28 | wget -q "https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-osx-x86_64.zip" 29 | elif [ "$OS" == "Linux" ]; then 30 | wget -q "https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip" 31 | else 32 | echo "Unsupported operating system! Please generate this on Linux or macOS." 33 | exit 1 34 | fi 35 | echo "Downloaded Google Protobuf compiler..." 36 | 37 | unzip -qq protoc-*.zip bin/* 38 | rm -f protoc-*.zip readme.txt 39 | 40 | # Calculate absolute paths for the protoc command. 41 | IN_DIR=$(cd ../grpc/ 2> /dev/null && pwd -P) 42 | NODE_MODULES_BIN_DIR=$(cd ../node_modules/.bin/ 2> /dev/null && pwd -P) 43 | 44 | # Set up path variables for the generators. 45 | PROTOC_GEN_TS_PATH="${NODE_MODULES_BIN_DIR}/protoc-gen-ts" 46 | PROTOC_GEN_GRPC_PATH="${NODE_MODULES_BIN_DIR}/grpc_tools_node_protoc_plugin" 47 | 48 | # Directory to write generated code to (.js and .d.ts files) 49 | OUT_DIR="../grpc/generated" 50 | mkdir -p ${OUT_DIR} 51 | 52 | echo "Running static code generator..." 53 | # Generate gRPC bindings. 54 | ./bin/protoc \ 55 | --plugin="protoc-gen-ts=${PROTOC_GEN_TS_PATH}" \ 56 | --plugin="protoc-gen-grpc=${PROTOC_GEN_GRPC_PATH}" \ 57 | --js_out="import_style=commonjs,binary:${OUT_DIR}" \ 58 | --ts_out="service=grpc-node:${OUT_DIR}" \ 59 | --grpc_out="${OUT_DIR}" \ 60 | --proto_path="${IN_DIR}" \ 61 | ${IN_DIR}/api.proto 62 | 63 | # Cleanup protoc download. 64 | rm -rf ./bin/ 65 | 66 | # Remove exit trap. 67 | trap - EXIT 68 | 69 | echo "Done generating!" 70 | -------------------------------------------------------------------------------- /src/debug.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable */ 2 | 3 | import { randomBytes } from 'crypto'; 4 | import LiftbridgeStream, { StartPosition } from './stream'; 5 | import LiftbridgeMessage, { AckPolicy } from './message'; 6 | import LiftbridgeClient from './index'; 7 | import { ErrorCodes } from './errors'; 8 | 9 | if (!module.parent) { 10 | const subject = 'test7'; 11 | const streamName = 'test-stream-gp-7'; 12 | 13 | function msg() { 14 | const key = 'KEY-' + randomBytes(10).toString('hex'); 15 | return new LiftbridgeMessage({ subject, key, value: `VALUE-ok-${key}`, ackPolicy: AckPolicy.ALL, partitionStrategy: 'key' }); 16 | } 17 | 18 | const lbClient = new LiftbridgeClient(['localhost:9292']); 19 | const stream = new LiftbridgeStream({ subject, name: streamName, partitions: 1 }); 20 | 21 | lbClient.connect().then((client) => { 22 | console.log('connected to -> ', client.getChannel().getTarget()); 23 | lbClient.createStream(stream).then(response => { 24 | console.log('response for create stream = ', response.toObject()); 25 | }).catch(err => { 26 | if (err.code !== ErrorCodes.ERR_PARTITION_ALREADY_EXISTS) { 27 | throw err; 28 | } 29 | }).finally(async () => { 30 | console.log('going to publish', msg().toObject()); 31 | const pubres1 = await lbClient.publish(msg()); 32 | console.log('publish result 1 = ', pubres1.toObject()); 33 | const pubres2 = await lbClient.publish(msg()); 34 | console.log('publish result 2 = ', pubres2.toObject()); 35 | const pubres3 = await lbClient.publish(msg()); 36 | console.log('publish result 3 = ', pubres3.toObject()); 37 | await lbClient.publish(msg()); 38 | console.log('going to subscribe'); 39 | const sub = lbClient.subscribe(new LiftbridgeStream({ subject, name: streamName, startPosition: StartPosition.EARLIEST })); 40 | sub.on('status', (data) => { 41 | console.log('subscribe on status = ', data); 42 | }); 43 | sub.on('data', (data) => { 44 | console.log('subscribe on data = ', data.toObject()); 45 | }); 46 | sub.on('error', err => { 47 | console.error('subscribe on error! ', err); 48 | }); 49 | sub.on('close', () => { 50 | console.log('subscribe on close!'); 51 | }); 52 | await lbClient.publish(msg()); 53 | await lbClient.publish(msg()); 54 | await lbClient.publish(msg()); 55 | }); 56 | }); 57 | } 58 | -------------------------------------------------------------------------------- /test/fixtures/partition/metadata_1_partition.json: -------------------------------------------------------------------------------- 1 | { 2 | "brokers": { 3 | "dTmEw414ooPUMS8hu1kW03": { 4 | "id": "dTmEw414ooPUMS8hu1kW03", 5 | "host": "", 6 | "port": 9292 7 | } 8 | }, 9 | "addresses": {}, 10 | "streams": { 11 | "byName": { 12 | "test-stream-1": { 13 | "subject": "test-subject-1", 14 | "name": "test-stream-1", 15 | "partitions": [ 16 | { 17 | "id": 0, 18 | "leader": { 19 | "id": "dTmEw414ooPUMS8hu1kW03", 20 | "host": "", 21 | "port": 9292 22 | }, 23 | "replicas": [ 24 | { 25 | "id": "dTmEw414ooPUMS8hu1kW03", 26 | "host": "", 27 | "port": 9292 28 | } 29 | ], 30 | "isr": [ 31 | { 32 | "id": "dTmEw414ooPUMS8hu1kW03", 33 | "host": "", 34 | "port": 9292 35 | } 36 | ] 37 | } 38 | ] 39 | } 40 | }, 41 | "bySubject": { 42 | "test-subject-1": { 43 | "subject": "test-subject-1", 44 | "name": "test-stream-1", 45 | "partitions": [ 46 | { 47 | "id": 0, 48 | "leader": { 49 | "id": "dTmEw414ooPUMS8hu1kW03", 50 | "host": "", 51 | "port": 9292 52 | }, 53 | "replicas": [ 54 | { 55 | "id": "dTmEw414ooPUMS8hu1kW03", 56 | "host": "", 57 | "port": 9292 58 | } 59 | ], 60 | "isr": [ 61 | { 62 | "id": "dTmEw414ooPUMS8hu1kW03", 63 | "host": "", 64 | "port": 9292 65 | } 66 | ] 67 | } 68 | ] 69 | } 70 | } 71 | }, 72 | "lastUpdated": "2019-09-17T04:35:49.214Z" 73 | } -------------------------------------------------------------------------------- /grpc/generated/api_grpc_pb.d.ts: -------------------------------------------------------------------------------- 1 | // GENERATED CODE -- DO NOT EDIT! 2 | 3 | // package: proto 4 | // file: api.proto 5 | 6 | import * as api_pb from "./api_pb"; 7 | import * as grpc from "grpc"; 8 | 9 | interface IAPIService extends grpc.ServiceDefinition { 10 | createStream: grpc.MethodDefinition; 11 | subscribe: grpc.MethodDefinition; 12 | fetchMetadata: grpc.MethodDefinition; 13 | publish: grpc.MethodDefinition; 14 | } 15 | 16 | export const APIService: IAPIService; 17 | 18 | export class APIClient extends grpc.Client { 19 | constructor(address: string, credentials: grpc.ChannelCredentials, options?: object); 20 | createStream(argument: api_pb.CreateStreamRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; 21 | createStream(argument: api_pb.CreateStreamRequest, metadataOrOptions: grpc.Metadata | grpc.CallOptions | null, callback: grpc.requestCallback): grpc.ClientUnaryCall; 22 | createStream(argument: api_pb.CreateStreamRequest, metadata: grpc.Metadata | null, options: grpc.CallOptions | null, callback: grpc.requestCallback): grpc.ClientUnaryCall; 23 | subscribe(argument: api_pb.SubscribeRequest, metadataOrOptions?: grpc.Metadata | grpc.CallOptions | null): grpc.ClientReadableStream; 24 | subscribe(argument: api_pb.SubscribeRequest, metadata?: grpc.Metadata | null, options?: grpc.CallOptions | null): grpc.ClientReadableStream; 25 | fetchMetadata(argument: api_pb.FetchMetadataRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; 26 | fetchMetadata(argument: api_pb.FetchMetadataRequest, metadataOrOptions: grpc.Metadata | grpc.CallOptions | null, callback: grpc.requestCallback): grpc.ClientUnaryCall; 27 | fetchMetadata(argument: api_pb.FetchMetadataRequest, metadata: grpc.Metadata | null, options: grpc.CallOptions | null, callback: grpc.requestCallback): grpc.ClientUnaryCall; 28 | publish(argument: api_pb.PublishRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; 29 | publish(argument: api_pb.PublishRequest, metadataOrOptions: grpc.Metadata | grpc.CallOptions | null, callback: grpc.requestCallback): grpc.ClientUnaryCall; 30 | publish(argument: api_pb.PublishRequest, metadata: grpc.Metadata | null, options: grpc.CallOptions | null, callback: grpc.requestCallback): grpc.ClientUnaryCall; 31 | } 32 | -------------------------------------------------------------------------------- /media/node-liftbridge.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /test/partition.test.ts: -------------------------------------------------------------------------------- 1 | import test from 'tape'; 2 | import fnv1a from '@sindresorhus/fnv1a'; 3 | import { KeyPartitioner, RoundRobinPartitioner, BasePartitioner } from '../src/partition'; 4 | import { StreamNotFoundInMetadataError, ErrorCodes } from '../src/errors'; 5 | import readFile from './helpers/read-file'; 6 | import { IMetadata } from '../src/metadata'; 7 | 8 | // Dummy implementation that always returns total partitions + 420. 9 | class TestPartitioner extends BasePartitioner { 10 | public calculatePartition(): number { 11 | const totalPartitions = this.getPartitionCount(); 12 | return 420 + totalPartitions; 13 | } 14 | } 15 | 16 | test('⚖️ Partition — `BasePartitioner` should have values correctly set and all methods should work correctly on an implementation.', async t => { 17 | t.plan(4); 18 | const metadata: IMetadata = JSON.parse(await readFile('./fixtures/partition/metadata_1_partition.json')); 19 | const testPartitioner1 = new TestPartitioner('test-subject-1', 'some-key', metadata); 20 | t.equal(testPartitioner1.calculatePartition(), 421, 'custom implementation should work correctly.'); 21 | const testPartitioner2 = new TestPartitioner('test-subject-1', Buffer.from('some-key'), metadata); 22 | t.equal(testPartitioner2.calculatePartition(), 421, 'keys as Buffers should still work.'); 23 | try { 24 | const testPartitioner3 = new TestPartitioner('unknown-subject-1', 'some-key', metadata); 25 | testPartitioner3.calculatePartition(); 26 | t.fail('1calculatePartition()` should throw error for unknown subject.'); 27 | } catch (err) { 28 | t.true(err instanceof StreamNotFoundInMetadataError, 'thrown error should be correct.'); 29 | t.equal(err.code, ErrorCodes.ERR_STREAM_NOT_FOUND_IN_METADATA, 'error code should be correct.'); 30 | } 31 | t.end(); 32 | }); 33 | 34 | test('⚖️ Partition — `KeyPartitioner` should correctly partition on keys by hashing them.', async t => { 35 | t.plan(2); 36 | const metadata1: IMetadata = JSON.parse(await readFile('./fixtures/partition/metadata_1_partition.json')); 37 | const keyPartitioner1 = new KeyPartitioner('test-subject-1', 'some-key', metadata1); 38 | t.equal(keyPartitioner1.calculatePartition(), 0, 'should always return 0 for 1 partition.'); 39 | 40 | const metadata2: IMetadata = JSON.parse(await readFile('./fixtures/partition/metadata_5_partitions.json')); 41 | const keyPartitioner2 = new KeyPartitioner('test-subject-1', 'some-other-key', metadata2); 42 | const expectedPartition = fnv1a('some-key') % Object.keys(metadata2.streams.bySubject['test-subject-1'].partitions).length; 43 | t.equal(keyPartitioner2.calculatePartition(), expectedPartition, 'should correctly partition by hashing the key.'); 44 | t.end(); 45 | }); 46 | 47 | test('⚖️ Partition — `RoundRobinPartitioner` should correctly partition on keys in a round-robin fashion.', async t => { 48 | t.plan(7); 49 | const metadata1: IMetadata = JSON.parse(await readFile('./fixtures/partition/metadata_1_partition.json')); 50 | const rrPartitioner1 = new RoundRobinPartitioner('test-subject-1', 'some-key', metadata1); 51 | t.equal(rrPartitioner1.calculatePartition(), 0, 'should always return 0 for 1 partition.'); 52 | 53 | const metadata2: IMetadata = JSON.parse(await readFile('./fixtures/partition/metadata_5_partitions.json')); 54 | const rrPartitioner2 = new RoundRobinPartitioner('test-subject-1', 'some-other-key', metadata2); 55 | t.equal(rrPartitioner2.calculatePartition(), 0, 'should first return partition #0.'); 56 | t.equal(rrPartitioner2.calculatePartition(), 1, 'should then return partition #1.'); 57 | t.equal(rrPartitioner2.calculatePartition(), 2, 'should then return partition #2.'); 58 | t.equal(rrPartitioner2.calculatePartition(), 3, 'should then return partition #3.'); 59 | t.equal(rrPartitioner2.calculatePartition(), 4, 'should then return partition #4.'); 60 | t.equal(rrPartitioner2.calculatePartition(), 0, 'should cycle back and then return partition #0 again.'); 61 | t.end(); 62 | }); 63 | -------------------------------------------------------------------------------- /test/stream.test.ts: -------------------------------------------------------------------------------- 1 | import test from 'tape'; 2 | import LiftbridgeStream, { StartPosition } from '../src/stream'; 3 | import { 4 | InvalidPartitionsError, ErrorCodes, OffsetNotSpecifiedError, TimestampNotSpecifiedError, 5 | } from '../src/errors'; 6 | 7 | test('🏞 Stream — constructor should return a `Stream` object with the correct default values set.', t => { 8 | t.plan(5); 9 | const stream = new LiftbridgeStream({ 10 | subject: 'test-subject', 11 | name: 'test-stream', 12 | }); 13 | t.equal(stream.subject, 'test-subject', 'should have the subject set.'); 14 | t.equal(stream.name, 'test-stream', 'should have the stream name set.'); 15 | t.equal(stream.partitions, 1, 'should default to 1 partition.'); 16 | t.equal(stream.replicationFactor, 1, 'should default to a replication factor of 1.'); 17 | t.equal(stream.startPosition, StartPosition.LATEST, 'should default to `LATEST` start position.'); 18 | t.end(); 19 | }); 20 | 21 | test('🏞 Stream — constructor should return a `Stream` object with the correct optional values set.', t => { 22 | t.plan(3); 23 | const stream = new LiftbridgeStream({ 24 | subject: 'test-subject', 25 | name: 'test-stream', 26 | group: 'my-fun-group', 27 | replicationFactor: 6, 28 | startPosition: StartPosition.EARLIEST, 29 | }); 30 | t.equal(stream.group, 'my-fun-group', 'should have the group set.'); 31 | t.equal(stream.replicationFactor, 6, 'should have the replication factor set.'); 32 | t.equal(stream.startPosition, StartPosition.EARLIEST, 'should have the start position set.'); 33 | t.end(); 34 | }); 35 | 36 | 37 | test('🏞 Stream — constructor should throw when an invalid value is set for `partitions`.', t => { 38 | t.plan(2); 39 | try { 40 | // eslint-disable-next-line no-new 41 | new LiftbridgeStream({ 42 | subject: 'test-subject', 43 | name: 'test-stream', 44 | partitions: -1, 45 | }); 46 | t.fail('constructor should throw an error.'); 47 | } catch (err) { 48 | t.true(err instanceof InvalidPartitionsError, 'thrown error should be correct.'); 49 | t.equal(err.code, ErrorCodes.ERR_INVALID_PARTITIONS, 'error code should be correct.'); 50 | t.end(); 51 | } 52 | }); 53 | 54 | test('🏞 Stream — constructor should throw when start position is set to offset but no offset is specified.', t => { 55 | t.plan(2); 56 | try { 57 | // eslint-disable-next-line no-new 58 | new LiftbridgeStream({ 59 | subject: 'test-subject', 60 | name: 'test-stream', 61 | startPosition: StartPosition.OFFSET, 62 | }); 63 | t.fail('constructor should throw an error.'); 64 | } catch (err) { 65 | t.true(err instanceof OffsetNotSpecifiedError, 'thrown error should be correct.'); 66 | t.equal(err.code, ErrorCodes.ERR_OFFSET_NOT_SPECIFIED, 'error code should be correct.'); 67 | t.end(); 68 | } 69 | }); 70 | 71 | test('🏞 Stream — constructor should throw when start position is set to timestamp but no timestamp is specified.', t => { 72 | t.plan(2); 73 | try { 74 | // eslint-disable-next-line no-new 75 | new LiftbridgeStream({ 76 | subject: 'test-subject', 77 | name: 'test-stream', 78 | startPosition: StartPosition.TIMESTAMP, 79 | }); 80 | t.fail('constructor should throw an error.'); 81 | } catch (err) { 82 | t.true(err instanceof TimestampNotSpecifiedError, 'thrown error should be correct.'); 83 | t.equal(err.code, ErrorCodes.ERR_TIMESTAMP_NOT_SPECIFIED, 'error code should be correct.'); 84 | t.end(); 85 | } 86 | }); 87 | 88 | test('🏞 Stream — constructor should return a `Stream` object with the correct `partitions` when `maxReplication` is set to `true`.', t => { 89 | t.plan(1); 90 | const stream = new LiftbridgeStream({ 91 | subject: 'test-subject', 92 | name: 'test-stream', 93 | maxReplication: true, 94 | partitions: 10, 95 | }); 96 | t.equal(stream.replicationFactor, -1, 'replication factor should be correctly set to -1.'); 97 | t.end(); 98 | }); 99 | 100 | test('🏞 Stream — constructor should return a `Stream` object with the correct offset or partitions values set.', t => { 101 | t.plan(4); 102 | const stream1 = new LiftbridgeStream({ 103 | subject: 'test-subject', 104 | name: 'test-stream', 105 | startOffset: 69, 106 | }); 107 | t.equal(stream1.startOffset, 69, 'offset should be correctly set.'); 108 | t.equal(stream1.startTimestamp, undefined, 'timestamp should not be set.'); 109 | 110 | const stream2 = new LiftbridgeStream({ 111 | subject: 'test-subject', 112 | name: 'test-stream', 113 | startTimestamp: 1568630702733000000, 114 | }); 115 | t.equal(stream2.startTimestamp, 1568630702733000000, 'timestamp should be correctly set.'); 116 | t.equal(stream2.startOffset, undefined, 'offset should not be set.'); 117 | t.end(); 118 | }); 119 | -------------------------------------------------------------------------------- /test/message.test.ts: -------------------------------------------------------------------------------- 1 | import test from 'tape'; 2 | import LiftbridgeMessage, { AckPolicy } from '../src/message'; 3 | 4 | test('✉️ Message — constructor should correctly set all values.', t => { 5 | t.plan(17); 6 | const message1 = new LiftbridgeMessage({ 7 | value: 'some-value', 8 | }); 9 | t.equal(message1.getValue().toString(), 'some-value', 'value should be set.'); 10 | t.equal(message1.getKey().toString(), '', 'key should be set to empty string.'); 11 | t.equal(message1.getAckpolicy(), AckPolicy.NONE, 'ackpolicy should be set to NONE.'); 12 | t.false((message1.getCorrelationid() === null 13 | || message1.getCorrelationid() === undefined 14 | || message1.getCorrelationid() === ''), 15 | 'correlation ID should be set.'); 16 | 17 | const message2 = new LiftbridgeMessage({ 18 | value: 'some-value', 19 | subject: 'test-subject-1', 20 | }); 21 | t.equal(message2.getSubject(), 'test-subject-1', 'subject should be set.'); 22 | 23 | const message3 = new LiftbridgeMessage({ 24 | value: 'some-value', 25 | key: 'some-key', 26 | }); 27 | t.equal(message3.getKey().toString(), 'some-key', 'key as a string should be set.'); 28 | 29 | const message4 = new LiftbridgeMessage({ 30 | value: 'some-value', 31 | key: Buffer.from('some-key'), 32 | }); 33 | t.equal(message4.getKey().toString(), 'some-key', 'key as a Buffer should be set.'); 34 | 35 | const message5 = new LiftbridgeMessage({ 36 | value: Buffer.from('some-value'), 37 | }); 38 | t.equal(message5.getValue().toString(), 'some-value', 'value as a Buffer should be set.'); 39 | 40 | const message6 = new LiftbridgeMessage({ 41 | value: 'some-value', 42 | partition: 9, 43 | }); 44 | t.equal(message6.partition, 9, 'partition should be set if passed as an option.'); 45 | t.equal(message6.partitionStrategy, undefined, 'partition strategy should be unset if partition is set.'); 46 | 47 | const message7 = new LiftbridgeMessage({ 48 | value: 'some-value', 49 | partitionStrategy: 'key', 50 | }); 51 | t.equal(message7.partitionStrategy, 'key', 'partition strategy should be set if passed as an option.'); 52 | t.equal(message7.partition, undefined, 'partition should be unset if partition strategy is set.'); 53 | 54 | const message8 = new LiftbridgeMessage({ 55 | value: 'some-value', 56 | ackInbox: 'subject.9', 57 | correlationId: 'abcdfegh', 58 | ackPolicy: AckPolicy.LEADER, 59 | }); 60 | t.equal(message8.getAckinbox(), 'subject.9', 'ackInbox should be set if passed as an option.'); 61 | t.equal(message8.getCorrelationid(), 'abcdfegh', 'correlation ID should be set if passed as an option.'); 62 | t.equal(message8.getAckpolicy(), AckPolicy.LEADER, 'ackPolicy should be set if passed as an option.'); 63 | 64 | const message9 = new LiftbridgeMessage({ 65 | value: 'some-value', 66 | // @ts-ignore Force setting `null`. 67 | headers: { test: 'hello', example: null }, 68 | }); 69 | t.deepEqual(message9.getHeadersMap().toObject().sort(), [ 70 | ['test', Buffer.from('hello')], 71 | ['example', Buffer.from('')], // `null` gets turned to an empty string. 72 | ].sort(), 'headers map should be set if passed as an option.'); 73 | 74 | const message10 = new LiftbridgeMessage({ 75 | value: 'some-value', 76 | headers: { }, 77 | }); 78 | t.deepEqual(message10.getHeadersMap().toObject(), [], 'headers map should be empty if no key-value pairs are set in headers.'); 79 | t.end(); 80 | }); 81 | 82 | test('✉️ Message — `serializeMessage()` should serialize a Liftbridge message.', t => { 83 | t.plan(1); 84 | const message = new LiftbridgeMessage({ 85 | key: 'hello', 86 | value: 'test', 87 | ackPolicy: AckPolicy.ALL, 88 | headers: { 89 | example: 'sweet', 90 | }, 91 | }); 92 | const envelopeCookie = Buffer.from('LIFT'); 93 | const envelopeCookieLength = envelopeCookie.length; 94 | const serializedMessage = message.serializeBinary(); 95 | const expectedOutput = Buffer.concat([envelopeCookie, serializedMessage], envelopeCookieLength + serializedMessage.length); 96 | t.equal(Buffer.compare(expectedOutput, message.serializeMessage()), 0, '`serializeMessage()` should correctly generate a NATS-ready Liftbridge message.'); 97 | t.end(); 98 | }); 99 | 100 | test('✉️ Message — `toJSON()` should correctly transform a Liftbridge message to human-friendly JSON.', t => { 101 | t.plan(1); 102 | const message = new LiftbridgeMessage({ 103 | key: 'hello', 104 | value: 'test', 105 | ackPolicy: AckPolicy.ALL, 106 | partition: 4, 107 | correlationId: 'xxx', 108 | // @ts-ignore So we can try to pass in buffers too and see if they're toJSON()-able. 109 | headers: { 110 | example: 'sweet', 111 | lol: Buffer.from('420'), 112 | }, 113 | }); 114 | t.deepEqual(LiftbridgeMessage.toJSON(message), { 115 | offset: '0', 116 | key: 'hello', 117 | value: 'test', 118 | timestamp: '0', 119 | subject: '', 120 | reply: '', 121 | ackinbox: '', 122 | correlationid: 'xxx', 123 | ackpolicy: AckPolicy.ALL, 124 | headers: { example: 'sweet', lol: '420' }, 125 | }, 'should correctly return a nice JSON.'); 126 | t.end(); 127 | }); 128 | -------------------------------------------------------------------------------- /src/partition.ts: -------------------------------------------------------------------------------- 1 | import fnv1a from '@sindresorhus/fnv1a'; 2 | import { IMetadata } from './metadata'; 3 | import { StreamNotFoundInMetadataError } from './errors'; 4 | 5 | /** 6 | * @hidden Module-level closure that holds a subject counter for use in the RoundRobinPartitioner. 7 | */ 8 | const subjectCounter = (function subjectCounter() { 9 | const subjectCounterMap: Map = new Map(); 10 | return { 11 | add(key: string, value: number) { 12 | return subjectCounterMap.set(key, value); 13 | }, 14 | has(key: string) { 15 | return subjectCounterMap.has(key); 16 | }, 17 | get(key: string) { 18 | return subjectCounterMap.get(key); 19 | }, 20 | }; 21 | }()); 22 | 23 | /** 24 | * Abstract class for Liftbridge partitioner. 25 | * 26 | * All custom implementations must implment the [[calculatePartition]] method. 27 | * 28 | * @category Partition 29 | */ 30 | export abstract class BasePartitioner { 31 | protected readonly subject: string; 32 | 33 | protected readonly key: string; 34 | 35 | protected readonly metadata: IMetadata; 36 | 37 | /** 38 | * Partitioner base class. 39 | * 40 | * Custom partitioners are expected to extends this class and implement 41 | * the [[calculatePartition]] method. 42 | * @param message Liftbridge Message object. 43 | * @param metadata Metadata object. 44 | */ 45 | constructor(subject: string, key: string | Uint8Array, metadata: IMetadata) { 46 | this.subject = subject; 47 | this.key = typeof key === 'string' ? key : key.toString(); 48 | this.metadata = metadata; 49 | } 50 | 51 | // Gets total number of partitions for given stream subject. 52 | protected getPartitionCount(): number { 53 | const streamMeta = this.metadata.streams.bySubject[this.subject]; 54 | if (!streamMeta) { 55 | throw new StreamNotFoundInMetadataError(); 56 | } 57 | const partitionsCount = Object.keys(streamMeta.partitions).length; 58 | return partitionsCount; 59 | } 60 | 61 | /** 62 | * Calculate the partition for the given message. 63 | * 64 | * @returns Partition to send the message to. 65 | */ 66 | public abstract calculatePartition(): number; 67 | } 68 | 69 | /** 70 | * Computes the partition number for a given message by hashing the key (using the 71 | * super-simple [FNV-1A](https://softwareengineering.stackexchange.com/questions/49550/which-hashing-algorithm-is-best-for-uniqueness-and-speed/145633#145633) 72 | * algorithm) and modding by the number of partitions for the first stream found with 73 | * the subject of the message. This does not work with streams containing 74 | * wildcards in their subjects, e.g. "foo.*", since this matches on the subject 75 | * literal of the published message. This also has undefined behavior if there 76 | * are multiple streams for the given subject. 77 | * 78 | * @category Partition 79 | */ 80 | export class KeyPartitioner extends BasePartitioner { 81 | /** 82 | * Calculate the partition for the given message by hashing the key. 83 | * 84 | * @returns Partition to send the message to. 85 | */ 86 | public calculatePartition(): number { 87 | const partitionsCount = this.getPartitionCount(); 88 | if (partitionsCount <= 1) return 0; 89 | const partition = fnv1a(this.key) % partitionsCount; 90 | return partition; 91 | } 92 | } 93 | 94 | /** 95 | * Computes the partition number for a given message in a 96 | * round-robin fashion by atomically incrementing a counter for the message 97 | * subject and modding by the number of partitions for the first stream found 98 | * with the subject. This does not work with streams containing wildcards in 99 | * their subjects, e.g. "foo.*", since this matches on the subject literal of 100 | * the published message. This also has undefined behavior if there are multiple 101 | * streams for the given subject. 102 | * 103 | * @category Partition 104 | */ 105 | export class RoundRobinPartitioner extends BasePartitioner { 106 | /** 107 | * Calculate the partition for the given message by rotating the 108 | * message subject in a round-robin fashion. 109 | * 110 | * @returns Partition to send the message to. 111 | */ 112 | public calculatePartition(): number { 113 | const partitionsCount = this.getPartitionCount(); 114 | if (partitionsCount <= 1) return 0; 115 | let counter = 0; 116 | if (subjectCounter.has(this.subject)) { 117 | counter = subjectCounter.get(this.subject); 118 | subjectCounter.add(this.subject, counter += 1); 119 | } else { 120 | subjectCounter.add(this.subject, counter); 121 | } 122 | return counter % partitionsCount; 123 | } 124 | } 125 | 126 | /** 127 | * Builtin partioners as simple strings. 128 | * 129 | * @category Partition 130 | */ 131 | export const builtinPartitioners = { 132 | key: KeyPartitioner, 133 | roundrobin: RoundRobinPartitioner, 134 | }; 135 | 136 | /** 137 | * All available builtin partitioners. 138 | * 139 | * @category Partition 140 | */ 141 | export type BuiltinPartitioners = typeof builtinPartitioners; 142 | 143 | /** 144 | * Pluggable partitioner that must be an implementation of [[BasePartitioner]]. 145 | * 146 | * @category Partition 147 | */ 148 | export type PartitionerLike = new(subject: string, key: string | Uint8Array, metadata: IMetadata) => BasePartitioner; 149 | -------------------------------------------------------------------------------- /src/stream.ts: -------------------------------------------------------------------------------- 1 | import { StartPosition, StartPositionMap } from '../grpc/generated/api_pb'; 2 | import { InvalidPartitionsError, OffsetNotSpecifiedError, TimestampNotSpecifiedError } from './errors'; 3 | 4 | /** 5 | * Liftbridge stream options. 6 | * @category Stream 7 | */ 8 | export interface IStreamOptions { 9 | /** 10 | * Stream subject. 11 | */ 12 | subject: string; 13 | /** 14 | * Unique stream name. 15 | */ 16 | name: string; 17 | /** 18 | * Name of a load-balance group. When there are multiple 19 | * streams in the same group, messages will be balanced among them. 20 | */ 21 | group?: string; 22 | /** 23 | * Controls the number of servers to replicate a stream to. For 24 | * example a value of `1` would mean only 1 server would have the data, 25 | * and a value of `3` would be 3 servers would have it. If this is not set, it 26 | * defaults to `1`. A value of `-1` will signal to the server to set the 27 | * replication factor equal to the current number of servers in the 28 | * cluster. 29 | */ 30 | replicationFactor?: number; 31 | /** 32 | * Sets the stream replication factor equal 33 | * to the current number of servers in the cluster. 34 | */ 35 | maxReplication?: boolean; 36 | /** 37 | * Use in [[subscribe]] when you want to read messages 38 | * from a particular offset. 39 | */ 40 | startOffset?: number; 41 | /** 42 | * Use in [[subscribe]] when you want to read messages 43 | * from a particular timestamp. Timestamp has to be specified 44 | * as nanoseconds since UNIX epoch time. 45 | * 46 | * @example 47 | * ```typescript 48 | * const startTimestamp = Date.now() * 1e6; 49 | * ``` 50 | */ 51 | startTimestamp?: number; 52 | /** 53 | * Control where to begin consuming from in the stream. 54 | * Defaults to `LATEST`. 55 | * 56 | * Available positions are `EARLIEST`, `LATEST`, `NEW_ONLY`, 57 | * `OFFSET` ([[startOffset]] has to be set) & `TIMESTAMP` ([[startTimestamp]] has to be set). 58 | * 59 | * 1. `EARLIEST` sets the subscription start position to the earliest message received in the stream. 60 | * 2. `LATEST` sets the subscription start position to the last message received in the stream. 61 | * 3. `NEW_ONLY` sets the subscription start position to the timestamp when the subscription began. 62 | * 4. `OFFSET` sets the subscription start position to a specific offset. 63 | * 5. `TIMESTAMP` sets the subscription start position to a specific timestamp. 64 | */ 65 | startPosition?: StartPositionMap[keyof StartPositionMap]; 66 | /** 67 | * Determines how many partitions to create for a stream. If `0`, 68 | * this will behave as a stream with a single partition. If this is not 69 | * set, it defaults to `1`. 70 | */ 71 | partitions?: number; 72 | } 73 | 74 | /** 75 | * Liftbridge Stream. 76 | * Use to represent a Liftbridge stream in [[subscribe]] and [[createStream]] operations. 77 | * 78 | * @category Stream 79 | */ 80 | export default class LiftbridgeStream { 81 | /** 82 | * See [[IStreamOptions.subject]] 83 | */ 84 | public readonly subject: string; 85 | 86 | /** 87 | * See [[IStreamOptions.name]] 88 | */ 89 | public readonly name: string; 90 | 91 | /** 92 | * See [[IStreamOptions.group]] 93 | */ 94 | public readonly group: string | undefined; 95 | 96 | /** 97 | * See [[IStreamOptions.replicationFactor]] 98 | */ 99 | public readonly replicationFactor: number = 1; 100 | 101 | /** 102 | * See [[IStreamOptions.startOffset]] 103 | */ 104 | public readonly startOffset: number | undefined; 105 | 106 | /** 107 | * See [[IStreamOptions.startTimestamp]] 108 | */ 109 | public readonly startTimestamp: number | undefined; 110 | 111 | /** 112 | * See [[IStreamOptions.startPosition]] 113 | */ 114 | public readonly startPosition: StartPositionMap[keyof StartPositionMap] | undefined = StartPosition.LATEST; 115 | 116 | /** 117 | * See [[IStreamOptions.partitions]] 118 | */ 119 | public partitions: number | undefined = 1; 120 | 121 | /** 122 | * Creates a Stream object. 123 | * 124 | * @param stream Stream options. 125 | */ 126 | public constructor(stream: IStreamOptions) { 127 | this.subject = stream.subject; 128 | this.name = stream.name; 129 | if (Object.prototype.hasOwnProperty.call(stream, 'group')) this.group = stream.group; 130 | this.partitions = stream.partitions ? stream.partitions : 1; 131 | if (this.partitions < 0) { 132 | throw new InvalidPartitionsError(); 133 | } 134 | if (stream.startPosition === StartPosition.OFFSET && !stream.startOffset) throw new OffsetNotSpecifiedError(); 135 | if (stream.startPosition === StartPosition.TIMESTAMP && !stream.startTimestamp) throw new TimestampNotSpecifiedError(); 136 | this.replicationFactor = Object.prototype.hasOwnProperty.call(stream, 'replicationFactor') ? stream.replicationFactor as number : 1; 137 | this.replicationFactor = Object.prototype.hasOwnProperty.call(stream, 'maxReplication') ? -1 : this.replicationFactor; 138 | if (Object.prototype.hasOwnProperty.call(stream, 'startOffset')) this.startOffset = stream.startOffset; 139 | if (Object.prototype.hasOwnProperty.call(stream, 'startTimestamp')) this.startTimestamp = stream.startTimestamp; 140 | if (stream.startPosition && !stream.startOffset && !this.startTimestamp) this.startPosition = stream.startPosition; 141 | } 142 | } 143 | 144 | export { 145 | StartPosition, 146 | StartPositionMap, 147 | }; 148 | -------------------------------------------------------------------------------- /grpc/generated/api_grpc_pb.js: -------------------------------------------------------------------------------- 1 | // GENERATED CODE -- DO NOT EDIT! 2 | 3 | 'use strict'; 4 | var grpc = require('grpc'); 5 | var api_pb = require('./api_pb.js'); 6 | 7 | function serialize_proto_CreateStreamRequest(arg) { 8 | if (!(arg instanceof api_pb.CreateStreamRequest)) { 9 | throw new Error('Expected argument of type proto.CreateStreamRequest'); 10 | } 11 | return Buffer.from(arg.serializeBinary()); 12 | } 13 | 14 | function deserialize_proto_CreateStreamRequest(buffer_arg) { 15 | return api_pb.CreateStreamRequest.deserializeBinary(new Uint8Array(buffer_arg)); 16 | } 17 | 18 | function serialize_proto_CreateStreamResponse(arg) { 19 | if (!(arg instanceof api_pb.CreateStreamResponse)) { 20 | throw new Error('Expected argument of type proto.CreateStreamResponse'); 21 | } 22 | return Buffer.from(arg.serializeBinary()); 23 | } 24 | 25 | function deserialize_proto_CreateStreamResponse(buffer_arg) { 26 | return api_pb.CreateStreamResponse.deserializeBinary(new Uint8Array(buffer_arg)); 27 | } 28 | 29 | function serialize_proto_FetchMetadataRequest(arg) { 30 | if (!(arg instanceof api_pb.FetchMetadataRequest)) { 31 | throw new Error('Expected argument of type proto.FetchMetadataRequest'); 32 | } 33 | return Buffer.from(arg.serializeBinary()); 34 | } 35 | 36 | function deserialize_proto_FetchMetadataRequest(buffer_arg) { 37 | return api_pb.FetchMetadataRequest.deserializeBinary(new Uint8Array(buffer_arg)); 38 | } 39 | 40 | function serialize_proto_FetchMetadataResponse(arg) { 41 | if (!(arg instanceof api_pb.FetchMetadataResponse)) { 42 | throw new Error('Expected argument of type proto.FetchMetadataResponse'); 43 | } 44 | return Buffer.from(arg.serializeBinary()); 45 | } 46 | 47 | function deserialize_proto_FetchMetadataResponse(buffer_arg) { 48 | return api_pb.FetchMetadataResponse.deserializeBinary(new Uint8Array(buffer_arg)); 49 | } 50 | 51 | function serialize_proto_Message(arg) { 52 | if (!(arg instanceof api_pb.Message)) { 53 | throw new Error('Expected argument of type proto.Message'); 54 | } 55 | return Buffer.from(arg.serializeBinary()); 56 | } 57 | 58 | function deserialize_proto_Message(buffer_arg) { 59 | return api_pb.Message.deserializeBinary(new Uint8Array(buffer_arg)); 60 | } 61 | 62 | function serialize_proto_PublishRequest(arg) { 63 | if (!(arg instanceof api_pb.PublishRequest)) { 64 | throw new Error('Expected argument of type proto.PublishRequest'); 65 | } 66 | return Buffer.from(arg.serializeBinary()); 67 | } 68 | 69 | function deserialize_proto_PublishRequest(buffer_arg) { 70 | return api_pb.PublishRequest.deserializeBinary(new Uint8Array(buffer_arg)); 71 | } 72 | 73 | function serialize_proto_PublishResponse(arg) { 74 | if (!(arg instanceof api_pb.PublishResponse)) { 75 | throw new Error('Expected argument of type proto.PublishResponse'); 76 | } 77 | return Buffer.from(arg.serializeBinary()); 78 | } 79 | 80 | function deserialize_proto_PublishResponse(buffer_arg) { 81 | return api_pb.PublishResponse.deserializeBinary(new Uint8Array(buffer_arg)); 82 | } 83 | 84 | function serialize_proto_SubscribeRequest(arg) { 85 | if (!(arg instanceof api_pb.SubscribeRequest)) { 86 | throw new Error('Expected argument of type proto.SubscribeRequest'); 87 | } 88 | return Buffer.from(arg.serializeBinary()); 89 | } 90 | 91 | function deserialize_proto_SubscribeRequest(buffer_arg) { 92 | return api_pb.SubscribeRequest.deserializeBinary(new Uint8Array(buffer_arg)); 93 | } 94 | 95 | 96 | // API is the main Liftbridge server interface clients interact with. 97 | var APIService = exports.APIService = { 98 | // CreateStream creates a new stream attached to a NATS subject. It returns 99 | // an AlreadyExists status code if a stream with the given subject and name 100 | // already exists. 101 | createStream: { 102 | path: '/proto.API/CreateStream', 103 | requestStream: false, 104 | responseStream: false, 105 | requestType: api_pb.CreateStreamRequest, 106 | responseType: api_pb.CreateStreamResponse, 107 | requestSerialize: serialize_proto_CreateStreamRequest, 108 | requestDeserialize: deserialize_proto_CreateStreamRequest, 109 | responseSerialize: serialize_proto_CreateStreamResponse, 110 | responseDeserialize: deserialize_proto_CreateStreamResponse, 111 | }, 112 | // Subscribe creates an ephemeral subscription for the given stream. It 113 | // begins to receive messages starting at the given offset and waits for 114 | // new messages when it reaches the end of the stream. Use the request 115 | // context to close the subscription. 116 | subscribe: { 117 | path: '/proto.API/Subscribe', 118 | requestStream: false, 119 | responseStream: true, 120 | requestType: api_pb.SubscribeRequest, 121 | responseType: api_pb.Message, 122 | requestSerialize: serialize_proto_SubscribeRequest, 123 | requestDeserialize: deserialize_proto_SubscribeRequest, 124 | responseSerialize: serialize_proto_Message, 125 | responseDeserialize: deserialize_proto_Message, 126 | }, 127 | // FetchMetadata retrieves the latest cluster metadata, including stream 128 | // broker information. 129 | fetchMetadata: { 130 | path: '/proto.API/FetchMetadata', 131 | requestStream: false, 132 | responseStream: false, 133 | requestType: api_pb.FetchMetadataRequest, 134 | responseType: api_pb.FetchMetadataResponse, 135 | requestSerialize: serialize_proto_FetchMetadataRequest, 136 | requestDeserialize: deserialize_proto_FetchMetadataRequest, 137 | responseSerialize: serialize_proto_FetchMetadataResponse, 138 | responseDeserialize: deserialize_proto_FetchMetadataResponse, 139 | }, 140 | // Publish a new message to a subject. If the AckPolicy is not NONE and a 141 | // deadline is provided, this will synchronously block until the ack is 142 | // received. If the ack is not received in time, a DeadlineExceeded status 143 | // code is returned. 144 | publish: { 145 | path: '/proto.API/Publish', 146 | requestStream: false, 147 | responseStream: false, 148 | requestType: api_pb.PublishRequest, 149 | responseType: api_pb.PublishResponse, 150 | requestSerialize: serialize_proto_PublishRequest, 151 | requestDeserialize: deserialize_proto_PublishRequest, 152 | responseSerialize: serialize_proto_PublishResponse, 153 | responseDeserialize: deserialize_proto_PublishResponse, 154 | }, 155 | }; 156 | 157 | exports.APIClient = grpc.makeGenericClientConstructor(APIService); 158 | -------------------------------------------------------------------------------- /test/metadata.test.ts: -------------------------------------------------------------------------------- 1 | import test from 'tape'; 2 | import LiftbridgeMetadata from '../src/metadata'; 3 | import { 4 | FetchMetadataResponse, Broker, StreamMetadata, PartitionMetadata, 5 | } from '../grpc/generated/api_pb'; 6 | import readFile from './helpers/read-file'; 7 | import { 8 | NoSuchPartitionError, ErrorCodes, NoKnownPartitionError, NoKnownLeaderForPartitionError, 9 | } from '../src/errors'; 10 | 11 | /** 12 | * Converts a dump of Liftbridge RPC Metadata JSON to a `FetchMetadataResponse` object. 13 | * @param meta Metadata JSON. 14 | * @returns An instance of `FetchMetadataResponse`. 15 | */ 16 | function metadataJsonToResponse(meta: object): FetchMetadataResponse { 17 | const metadataResponse = new FetchMetadataResponse(); 18 | 19 | meta.brokersList.forEach(broker => { 20 | const b = new Broker(); 21 | b.setHost(broker.host); 22 | b.setPort(broker.port); 23 | b.setId(broker.id); 24 | metadataResponse.addBrokers(b); 25 | }); 26 | 27 | meta.metadataList.forEach(metadata => { 28 | const sm = new StreamMetadata(); 29 | sm.setName(metadata.name); 30 | sm.setSubject(metadata.subject); 31 | metadataResponse.addMetadata(sm); 32 | 33 | metadata.partitionsMap.forEach(pMap => { 34 | const idx = pMap[0]; 35 | const pMeta = pMap[1]; 36 | const pm = new PartitionMetadata(); 37 | pm.setId(idx); 38 | pm.setLeader(pMeta.leader); 39 | pMeta.replicasList.forEach(_ => pm.addReplicas(_)); 40 | pMeta.isrList.forEach(_ => pm.addIsr(_)); 41 | sm.getPartitionsMap().set(idx, pm); 42 | }); 43 | }); 44 | return metadataResponse; 45 | } 46 | 47 | test('Ⓜ️ Metadata — `build()` should return a nice human-friendly JSON interface for given Liftbridge raw metadata.', async t => { 48 | t.plan(4); 49 | const metadataResponse = metadataJsonToResponse(JSON.parse(await readFile('./fixtures/metadata/metadata_simple.json'))); 50 | // @ts-ignore No need to construct and pass a Client instance for this test. 51 | const metadata = new LiftbridgeMetadata(null, metadataResponse); 52 | t.deepEqual(Object.keys(metadata.get()).sort(), ['addresses', 'brokers', 'lastUpdated', 'streams'].sort(), 'should contain all the expected keys.'); 53 | t.deepEqual(metadata.get().brokers, { 54 | DMxXSQifWCW2rdFsr2vk4S: { 55 | id: 'DMxXSQifWCW2rdFsr2vk4S', 56 | host: '127.0.0.1', 57 | port: 9292, 58 | }, 59 | }, 'brokers metadata should be correctly set.'); 60 | t.deepEqual(metadata.get().streams, { 61 | byName: { 62 | 'test-stream-1': { 63 | subject: 'test-subject-1', 64 | name: 'test-stream-1', 65 | partitions: [{ 66 | id: 0, 67 | leader: { 68 | id: 'DMxXSQifWCW2rdFsr2vk4S', 69 | host: '127.0.0.1', 70 | port: 9292, 71 | }, 72 | replicas: [{ 73 | id: 'DMxXSQifWCW2rdFsr2vk4S', 74 | host: '127.0.0.1', 75 | port: 9292, 76 | }], 77 | isr: [{ 78 | id: 'DMxXSQifWCW2rdFsr2vk4S', 79 | host: '127.0.0.1', 80 | port: 9292, 81 | }], 82 | }], 83 | }, 84 | }, 85 | bySubject: { 86 | 'test-subject-1': { 87 | subject: 'test-subject-1', 88 | name: 'test-stream-1', 89 | partitions: [{ 90 | id: 0, 91 | leader: { 92 | id: 'DMxXSQifWCW2rdFsr2vk4S', 93 | host: '127.0.0.1', 94 | port: 9292, 95 | }, 96 | replicas: [{ 97 | id: 'DMxXSQifWCW2rdFsr2vk4S', 98 | host: '127.0.0.1', 99 | port: 9292, 100 | }], 101 | isr: [{ 102 | id: 'DMxXSQifWCW2rdFsr2vk4S', 103 | host: '127.0.0.1', 104 | port: 9292, 105 | }], 106 | }], 107 | }, 108 | }, 109 | }, 'streams/partitions metadata should be correctly set.'); 110 | t.deepEqual(metadata.get().addresses, {}, 'addresses metadata should be correctly set.'); 111 | t.end(); 112 | }); 113 | 114 | test('Ⓜ️ Metadata — `getAddress()` should return broker address for the given stream partition.', async t => { 115 | t.plan(7); 116 | const metadataResponse1 = metadataJsonToResponse(JSON.parse(await readFile('./fixtures/metadata/metadata_simple.json'))); 117 | // @ts-ignore No need to construct and pass a Client instance for this test. 118 | const metadata = new LiftbridgeMetadata(null, metadataResponse1); 119 | 120 | t.equal(metadata.getAddress('test-stream-1', 0), '127.0.0.1:9292', 'address should be correct.'); 121 | 122 | try { 123 | metadata.getAddress('unknown-stream-1', 1); 124 | t.fail('should throw for unknown stream.'); 125 | } catch (err) { 126 | t.true(err instanceof NoSuchPartitionError, 'thrown error should be correct.'); 127 | t.equal(err.code, ErrorCodes.ERR_PARTITION_DOES_NOT_EXIST, 'error code should be correct.'); 128 | } 129 | 130 | try { 131 | metadata.getAddress('test-stream-1', 1); 132 | } catch (err) { 133 | t.true(err instanceof NoKnownPartitionError, 'thrown error should be correct.'); 134 | t.equal(err.code, ErrorCodes.ERR_NO_KNOWN_PARTITION, 'error code should be correct.'); 135 | } 136 | 137 | try { 138 | const metadataResponse2 = metadataJsonToResponse(JSON.parse(await readFile('./fixtures/metadata/metadata_no_leader.json'))); 139 | // @ts-ignore No need to construct and pass a Client instance for this test. 140 | const metadata2 = new LiftbridgeMetadata(null, metadataResponse2); 141 | metadata2.getAddress('test-stream-1', 0); 142 | t.fail('should throw when there is no leader.'); 143 | } catch (err) { 144 | t.true(err instanceof NoKnownLeaderForPartitionError, 'thrown error should be correct.'); 145 | t.equal(err.code, ErrorCodes.ERR_NO_KNOWN_LEADER_FOR_PARTITION, 'error code should be correct.'); 146 | } 147 | 148 | t.end(); 149 | }); 150 | 151 | test('Ⓜ️ Metadata — `hasSubjectMetadata()` should tell if the given subject has any metadata for it.', async t => { 152 | t.plan(2); 153 | const metadataResponse = metadataJsonToResponse(JSON.parse(await readFile('./fixtures/metadata/metadata_simple.json'))); 154 | // @ts-ignore No need to construct and pass a Client instance for this test. 155 | const metadata = new LiftbridgeMetadata(null, metadataResponse); 156 | t.true(metadata.hasSubjectMetadata('test-subject-1'), 'should return `true` for a subject we know has metadata.'); 157 | t.false(metadata.hasSubjectMetadata('unknown-subject-1'), 'should return `false` unknown subject.'); 158 | t.end(); 159 | }); 160 | -------------------------------------------------------------------------------- /grpc/api.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package proto; 3 | 4 | option java_package = "io.liftbridge.proto"; 5 | 6 | // CreateStreamRequest is sent to create a new stream. 7 | message CreateStreamRequest { 8 | string subject = 1; // Stream NATS subject 9 | string name = 2; // Stream name (unique per subject) 10 | string group = 3; // Partitions NATS subject amongst group members 11 | int32 replicationFactor = 4; // Number of stream replicas 12 | int32 partitions = 5; // Number of stream partitions 13 | } 14 | 15 | // CreateStreamResponse is sent by server after creating a stream. 16 | message CreateStreamResponse { 17 | // Intentionally empty. 18 | } 19 | 20 | // StartPosition determines the start-position type on a subscription. 21 | enum StartPosition { 22 | NEW_ONLY = 0; // Start at new messages after the latest 23 | OFFSET = 1; // Start at a specified offset 24 | EARLIEST = 2; // Start at the oldest message 25 | LATEST = 3; // Start at the newest message 26 | TIMESTAMP = 4; // Start at a specified timestamp 27 | } 28 | 29 | // SubscribeRequest is sent to subscribe to a stream partition. 30 | message SubscribeRequest { 31 | string stream = 1; // Stream name to subscribe to 32 | int32 partition = 2; // Stream partition to subscribe to 33 | StartPosition startPosition = 3; // Where to begin consuming from 34 | int64 startOffset = 4 [jstype=JS_STRING]; // Offset to begin consuming from 35 | int64 startTimestamp = 5 [jstype=JS_STRING]; // Timestamp to begin consuming from 36 | } 37 | 38 | // FetchMetadataRequest is sent to retrieve the latest cluster metadata. 39 | message FetchMetadataRequest { 40 | repeated string streams = 1; // The streams to fetch metadata for (all if empty) 41 | } 42 | 43 | // FetchMetadataResponse contains the cluster metadata requested. 44 | message FetchMetadataResponse { 45 | repeated Broker brokers = 1; // Information for all brokers 46 | repeated StreamMetadata metadata = 2; // Information for all streams 47 | } 48 | 49 | // PublishRequest is sent to publish a new message. 50 | message PublishRequest { 51 | bytes key = 1; // Message key 52 | bytes value = 2; // Message payload 53 | string stream = 3; // Stream name to publish to 54 | int32 partition = 4; // Stream partition to publish to 55 | string subject = 5; // NATS subject to publish to 56 | string replySubject = 6; // NATS reply subject 57 | map headers = 7; // Message headers 58 | string ackInbox = 8; // NATS subject to publish acks to 59 | string correlationId = 9; // User-supplied value to correlate acks to publishes 60 | AckPolicy ackPolicy = 10; // Controls the behavior of acks 61 | } 62 | 63 | // PublishResponse is sent by the server after publishing a message. 64 | message PublishResponse { 65 | Ack ack = 1; // The ack for the published message if AckPolicy was not NONE 66 | } 67 | 68 | // Broker contains information for a Liftbridge broker. 69 | message Broker { 70 | string id = 1; // Broker id 71 | string host = 2; // Broker host 72 | int32 port = 3; // Broker port 73 | } 74 | 75 | // StreamMetadata contains information for a stream. 76 | message StreamMetadata { 77 | enum Error { 78 | OK = 0; 79 | UNKNOWN_STREAM = 1; 80 | } 81 | string name = 1; // The name of the stream being described 82 | string subject = 2; // The stream subject 83 | Error error = 3; // Indicates if there was something wrong with the requested stream 84 | map partitions = 4; // Information for the stream partitions 85 | } 86 | 87 | // PartitionMetadata contains information for a stream partition. 88 | message PartitionMetadata { 89 | int32 id = 1; // Partition id 90 | string leader = 2; // Broker id of the partition leader 91 | repeated string replicas = 3; // Broker ids of the partition replicas 92 | repeated string isr = 4; // Broker ids of the in-sync replica set 93 | } 94 | 95 | // AckPolicy controls the behavior of message acknowledgements. 96 | enum AckPolicy { 97 | LEADER = 0; // The ack will be sent once the leader has written the message to its log 98 | ALL = 1; // The ack will be sent after the ISR replicas have written the message to their logs 99 | NONE = 2; // No ack will be sent 100 | } 101 | 102 | // Message represents a message from a stream. 103 | message Message { 104 | int64 offset = 1 [jstype=JS_STRING]; // Monotonic message offset in the stream 105 | bytes key = 2; // Message key 106 | bytes value = 3; // Message payload 107 | int64 timestamp = 4 [jstype=JS_STRING]; // When the message was received by the broker 108 | string stream = 5; // Stream name message was received on 109 | int32 partition = 6; // Stream partition message was assigned to 110 | string subject = 7; // NATS subject message was received on 111 | string replySubject = 8; // NATS reply subject 112 | map headers = 9; // Message headers 113 | string ackInbox = 10; // NATS subject to publish acks to 114 | string correlationId = 11; // User-supplied value to correlate acks to publishes 115 | AckPolicy ackPolicy = 12; // Controls the behavior of acks 116 | } 117 | 118 | // Ack represents an acknowledgement that a message was committed to a stream 119 | // partition. 120 | message Ack { 121 | string stream = 1; // Name of the stream 122 | string partitionSubject = 2; // NATS subject partition is attached to 123 | string msgSubject = 3; // NATS subject the message was received on 124 | int64 offset = 4 [jstype=JS_STRING]; // Stream offset the message was committed to 125 | string ackInbox = 5; // NATS subject to publish acks to 126 | string correlationId = 6; // User-supplied value from the message 127 | AckPolicy ackPolicy = 7; // The AckPolicy sent on the message 128 | } 129 | 130 | // API is the main Liftbridge server interface clients interact with. 131 | service API { 132 | // CreateStream creates a new stream attached to a NATS subject. It returns 133 | // an AlreadyExists status code if a stream with the given subject and name 134 | // already exists. 135 | rpc CreateStream(CreateStreamRequest) returns (CreateStreamResponse) {} 136 | 137 | // Subscribe creates an ephemeral subscription for the given stream. It 138 | // begins to receive messages starting at the given offset and waits for 139 | // new messages when it reaches the end of the stream. Use the request 140 | // context to close the subscription. 141 | rpc Subscribe(SubscribeRequest) returns (stream Message) {} 142 | 143 | // FetchMetadata retrieves the latest cluster metadata, including stream 144 | // broker information. 145 | rpc FetchMetadata(FetchMetadataRequest) returns (FetchMetadataResponse) {} 146 | 147 | // Publish a new message to a subject. If the AckPolicy is not NONE and a 148 | // deadline is provided, this will synchronously block until the ack is 149 | // received. If the ack is not received in time, a DeadlineExceeded status 150 | // code is returned. 151 | rpc Publish(PublishRequest) returns (PublishResponse) {} 152 | } 153 | -------------------------------------------------------------------------------- /src/message.ts: -------------------------------------------------------------------------------- 1 | import hyperId from 'hyperid'; 2 | import { AckPolicy, AckPolicyMap, Message } from '../grpc/generated/api_pb'; 3 | import { BuiltinPartitioners, PartitionerLike } from './partition'; 4 | 5 | /** 6 | * @hidden 7 | */ 8 | const envelopeMagicNumber = Buffer.from([0xB9, 0x0E, 0x43, 0xB4]); 9 | /** 10 | * @hidden 11 | */ 12 | const envelopeMagicNumberLength = envelopeMagicNumber.length; 13 | /** 14 | * @hidden 15 | */ 16 | const envelopeProtoV0 = 0x00; 17 | /** 18 | * @hidden 19 | */ 20 | const envelopeMinHeaderLength = 8; 21 | 22 | /** 23 | * Liftbridge message headers. 24 | * @category Message 25 | */ 26 | interface IMessageHeader { 27 | [key: string]: string; 28 | } 29 | 30 | /** 31 | * Liftbridge message interface. 32 | * @category Message 33 | */ 34 | export interface IMessage { 35 | /** 36 | * Message subject. 37 | */ 38 | subject?: string; 39 | /** 40 | * Key to set on the Message. If Liftbridge has stream compaction enabled, 41 | * the stream will retain only the last value for each key. 42 | */ 43 | key?: Uint8Array | string | null; 44 | /** 45 | * Value to set on the Message for the key. 46 | */ 47 | value: Uint8Array | string; 48 | /** 49 | * Sets the identifier used to correlate an ack with the 50 | * published message. 51 | * 52 | * Defaults to a unique ID generated by [`hyperid`](https://github.com/mcollina/hyperid/blob/master/README.md). 53 | */ 54 | correlationId?: string; 55 | /** 56 | * Key-value pairs to set on the Message headers map. 57 | */ 58 | headers?: IMessageHeader; 59 | /** 60 | * Sets the NATS subject Liftbridge should publish the message ack 61 | * to. If it's not set or if there's no Deadline, Liftbridge will not send an ack. 62 | * This is usually not needed when using the [[publish]] API. 63 | */ 64 | ackInbox?: string; 65 | /** 66 | * Controls the behavior of message acks sent by the server. By 67 | * default, Liftbridge will send an ack when the stream leader has written 68 | * the message to its write-ahead log. 69 | */ 70 | ackPolicy?: AckPolicyMap[keyof AckPolicyMap]; 71 | /** 72 | * Specifies the stream partition to publish the message to. If 73 | * this is set, any `partitioner` will not be used. This is a pointer to 74 | * allow distinguishing between undefined and 0. 75 | */ 76 | partition?: number | undefined; 77 | /** 78 | * Specifies the strategy for mapping a message to a stream partition. 79 | * 80 | * Can be either a string (see [[BuiltinPartitioners]]) or a custom implementation 81 | * of [[BasePartitioner]]. 82 | */ 83 | partitionStrategy?: keyof BuiltinPartitioners | PartitionerLike | undefined; 84 | } 85 | 86 | /** 87 | * Liftbridge message subclass. 88 | * 89 | * Includes some helpful methods to convert to/from native JSON objects 90 | * to Liftbridge gRPC objects. 91 | * 92 | * @category Message 93 | */ 94 | export default class LiftbridgeMessage extends Message { 95 | /** 96 | * See [[IMessage.correlationId]]. 97 | */ 98 | public correlationId = hyperId().uuid; 99 | 100 | /** 101 | * See [[IMessage.partition]]. 102 | */ 103 | public partition: number | undefined = undefined; 104 | 105 | /** 106 | * See [[IMessage.partitionStrategy]]. 107 | */ 108 | public partitionStrategy: keyof BuiltinPartitioners | PartitionerLike | undefined = 'key'; 109 | 110 | /** 111 | * Creates a Message object that is a representation of the gRPC `Message` object. 112 | * @param message Message object. 113 | * @returns Message object (with some extra helpful methods). 114 | */ 115 | constructor(message: IMessage) { 116 | super(); 117 | if (message.subject) this.setSubject(message.subject); 118 | this.setValue(typeof message.value === 'string' ? Buffer.from(message.value) : message.value); 119 | 120 | if (message.key) { 121 | this.setKey(typeof message.key === 'string' ? Buffer.from(message.key) : message.key); 122 | } else { 123 | this.setKey(Buffer.from('')); 124 | } 125 | 126 | if (Object.prototype.hasOwnProperty.call(message, 'partition')) { 127 | this.partition = message.partition; 128 | this.partitionStrategy = undefined; 129 | } else { 130 | this.partitionStrategy = message.partitionStrategy; 131 | this.partition = undefined; 132 | } 133 | 134 | if (Object.prototype.hasOwnProperty.call(message, 'correlationId')) { 135 | this.setCorrelationid(message.correlationId as string); 136 | } else { 137 | this.setCorrelationid(this.correlationId); 138 | } 139 | 140 | if (Object.prototype.hasOwnProperty.call(message, 'ackPolicy')) { 141 | this.setAckpolicy(message.ackPolicy); 142 | } else { 143 | this.setAckpolicy(AckPolicy.NONE); 144 | } 145 | 146 | if (message.ackInbox) this.setAckinbox(message.ackInbox); 147 | 148 | if (message.headers) { 149 | const headerKeys = Object.keys(message.headers); 150 | if (headerKeys.length) { 151 | headerKeys.forEach(headerKey => { 152 | const headerValue = (message.headers)[headerKey] || ''; 153 | this.getHeadersMap().set(headerKey, Buffer.from(headerValue, 'utf8')); 154 | }); 155 | } 156 | } 157 | } 158 | 159 | /** 160 | * Create a serialized `Message` object with the Liftbridge envelope. 161 | * 162 | * ℹ️ Use only when you're talking directly to NATS systems. 163 | * 164 | * @returns Buffer of Protobuf message. 165 | */ 166 | public serializeMessage(): Buffer { 167 | const serializedMessage = this.serializeBinary(); 168 | let position = 0; 169 | const returnMessage = Buffer.from(envelopeMagicNumber, position, envelopeMagicNumberLength + 4 + serializedMessage.length); 170 | position += envelopeMagicNumberLength; 171 | returnMessage.write(envelopeProtoV0.toString(), position); 172 | position += 1; 173 | returnMessage.write('0x00', position); // Flags. 174 | position += 1; 175 | returnMessage.write('0x00', position); // Reserved for future use. 176 | position += 1; 177 | if (position !== envelopeMinHeaderLength) { 178 | // TODO: Throw https://github.com/liftbridge-io/go-liftbridge/blob/6490950e9e465ae75f64feba3de3823a0f744528/message.go#L523 179 | } 180 | returnMessage.write(serializedMessage.toString(), position); 181 | return returnMessage; 182 | } 183 | 184 | /** 185 | * Returns a deserialized `Message` in JSON form. 186 | * 187 | * Buffers & byte-arrays are deserialized to UTF-8 strings, 188 | * and maps of the form `[[k1, v1], [k2, v2]]` are converted to 189 | * objects of the form `{ k1: v1, k2, v2 }`. 190 | * 191 | * @param message Liftbridge `Message` to deserialize. 192 | * @returns JSON form of given `Message` as [[IMessage]]. 193 | */ 194 | public static toJSON(message: Message): IMessage { 195 | const rawObject = message.toObject(); 196 | const { headersMap, ...messageWithoutHeadersMap } = rawObject; 197 | return { 198 | ...messageWithoutHeadersMap, 199 | key: Buffer.from(rawObject.key.toString(), 'base64').toString('utf8'), 200 | value: Buffer.from(rawObject.value.toString(), 'base64').toString('utf8'), 201 | headers: rawObject.headersMap.reduce((k, v) => { 202 | const nk = { ...k }; 203 | nk[v[0]] = typeof v[1] === 'string' ? v[1] : Buffer.from(v[1]).toString('utf8'); 204 | return nk; 205 | }, {}), 206 | }; 207 | } 208 | } 209 | 210 | export { AckPolicy } from '../grpc/generated/api_pb'; 211 | -------------------------------------------------------------------------------- /src/errors.ts: -------------------------------------------------------------------------------- 1 | /* istanbul ignore file */ 2 | /* eslint max-classes-per-file: 0 */ 3 | /** 4 | * Custom errors. 5 | */ 6 | 7 | /** 8 | * Error code enums. 9 | */ 10 | 11 | /** 12 | * @hidden 13 | */ 14 | enum ConnectionErrorCodes { 15 | ERR_NO_ADDRESSES = 'ERR_NO_ADDRESSES', 16 | ERR_COULD_NOT_CONNECT = 'ERR_COULD_NOT_CONNECT', 17 | ERR_DEADLINE_EXCEEDED = 'ERR_DEADLINE_EXCEEDED', 18 | } 19 | 20 | /** 21 | * @hidden 22 | */ 23 | enum CreateStreamErrorCodes { 24 | ERR_PARTITION_ALREADY_EXISTS = 'ERR_PARTITION_ALREADY_EXISTS', 25 | ERR_INVALID_PARTITIONS = 'ERR_INVALID_PARTITIONS', 26 | } 27 | 28 | /** 29 | * @hidden 30 | */ 31 | enum SubscribeErrorCodes { 32 | ERR_PARTITION_DOES_NOT_EXIST = 'ERR_PARTITION_DOES_NOT_EXIST', 33 | ERR_OFFSET_NOT_SPECIFIED = 'ERR_OFFSET_NOT_SPECIFIED', 34 | ERR_TIMESTAMP_NOT_SPECIFIED = 'ERR_TIMESTAMP_NOT_SPECIFIED', 35 | } 36 | 37 | /** 38 | * @hidden 39 | */ 40 | enum MetadataErrorCodes { 41 | ERR_STREAM_NOT_FOUND_IN_METADATA = 'ERR_STREAM_NOT_FOUND_IN_METADATA', 42 | ERR_SUBJECT_NOT_FOUND_IN_METADATA = 'ERR_SUBJECT_NOT_FOUND_IN_METADATA', 43 | ERR_NO_KNOWN_PARTITION = 'ERR_NO_KNOWN_PARTITION', 44 | ERR_NO_KNOWN_LEADER_FOR_PARTITION = 'ERR_NO_KNOWN_LEADER_FOR_PARTITION', 45 | } 46 | 47 | /** 48 | * @hidden 49 | */ 50 | enum MessageErrorCodes { 51 | ERR_MESSAGE_MISSING_ENVELOPE_HEADER = 'ERR_MESSAGE_MISSING_ENVELOPE_HEADER', 52 | ERR_MESSAGE_UNEXPECTED_ENVELOPE_MAGIC_NUMBER = 'ERR_MESSAGE_UNEXPECTED_ENVELOPE_MAGIC_NUMBER', 53 | ERR_MESSAGE_UNKNOWN_ENVELOPE_PROTOCOL = 'ERR_MESSAGE_UNKNOWN_ENVELOPE_PROTOCOL', 54 | } 55 | 56 | /** 57 | * Liftbridge error codes. 58 | * 59 | * All errors include a `code` field that will include a unique 60 | * code for the error which can be handled gracefully. 61 | * 62 | * @example Handling a custom error. 63 | * ```typescript 64 | * import LiftbridgeClient from 'liftbridge'; 65 | * import { ErrorCodes } from 'liftbridge/errors'; 66 | * 67 | * try { 68 | * const client = new LiftbridgeClient([]); 69 | * } catch (err) { 70 | * if (err.code === ErrorCodes.ERR_NO_ADDRESSES) { 71 | * // NoAddressesError thrown. Now handle this. 72 | * } 73 | * } 74 | * ``` 75 | * 76 | * @category Error 77 | */ 78 | export const ErrorCodes = { 79 | ...ConnectionErrorCodes, 80 | ...CreateStreamErrorCodes, 81 | ...SubscribeErrorCodes, 82 | ...MetadataErrorCodes, 83 | ...MessageErrorCodes, 84 | }; 85 | 86 | /** 87 | * Base Error classes. 88 | */ 89 | 90 | /** 91 | * Connection Errors. 92 | * Master class for all errors from connectivity to the Liftbridge cluster. 93 | * @category Error 94 | */ 95 | class ConnectionError extends Error { 96 | constructor(public message: string = 'Unexpected error while connecting to Liftbridge server(s)', public code?: string) { 97 | super(); 98 | Object.setPrototypeOf(this, new.target.prototype); 99 | this.name = 'ConnectionError'; 100 | this.stack = new Error(message).stack; 101 | return this; 102 | } 103 | } 104 | 105 | /** 106 | * CreateStream Errors. 107 | * Master class for all errors from creating a Liftbridge stream. 108 | * @category Error 109 | */ 110 | class CreateStreamError extends Error { 111 | constructor(public message: string = 'Unexpected error while creating Liftbridge stream', public code?: string) { 112 | super(); 113 | Object.setPrototypeOf(this, new.target.prototype); 114 | this.name = 'CreateStreamError'; 115 | this.stack = new Error(message).stack; 116 | return this; 117 | } 118 | } 119 | 120 | /** 121 | * Subscribe Errors. 122 | * Master class for all errors from subscribing to subjects on a Liftbridge cluster. 123 | * @category Error 124 | */ 125 | class SubscribeError extends Error { 126 | constructor(public message: string = 'Unexpected error while subscribing to stream', public code?: string) { 127 | super(); 128 | Object.setPrototypeOf(this, new.target.prototype); 129 | this.name = 'SubscribeError'; 130 | this.stack = new Error(message).stack; 131 | return this; 132 | } 133 | } 134 | 135 | /** 136 | * Metadata Errors. 137 | * Master class for all errors from fetching stream/partition metadata from a Liftbridge cluster. 138 | * @category Error 139 | */ 140 | class MetadataError extends Error { 141 | constructor(public message: string = 'Unexpected error while fetching metadata for Liftbridge stream', public code?: string) { 142 | super(); 143 | Object.setPrototypeOf(this, new.target.prototype); 144 | this.name = 'MetadataError'; 145 | this.stack = new Error(message).stack; 146 | return this; 147 | } 148 | } 149 | 150 | /** 151 | * Message Errors. 152 | * Master class for all errors from reading messages from a Liftbridge subject. 153 | * @category Error 154 | */ 155 | class MessageError extends Error { 156 | constructor(public message: string = 'Unexpected error while reading message from Liftbridge subject', public code?: string) { 157 | super(); 158 | Object.setPrototypeOf(this, new.target.prototype); 159 | this.name = 'MessageError'; 160 | this.stack = new Error(message).stack; 161 | return this; 162 | } 163 | } 164 | 165 | /** 166 | * Error classes. 167 | */ 168 | 169 | /** 170 | * @category Error 171 | */ 172 | export class NoAddressesError extends ConnectionError { 173 | name = 'NoAddressesError'; 174 | 175 | message = 'No cluster addresses to connect to!'; 176 | 177 | code = ConnectionErrorCodes.ERR_NO_ADDRESSES; 178 | } 179 | 180 | /** 181 | * @category Error 182 | */ 183 | export class CouldNotConnectToAnyServerError extends ConnectionError { 184 | name = 'CouldNotConnectToAnyServerError'; 185 | 186 | message = 'Could not connect to any of the given addresses!'; 187 | 188 | code = ConnectionErrorCodes.ERR_COULD_NOT_CONNECT; 189 | } 190 | 191 | /** 192 | * @category Error 193 | */ 194 | export class DeadlineExceededError extends ConnectionError { 195 | name = 'DeadlineExceededError'; 196 | 197 | message = 'Could not get back a response within the deadline!'; 198 | 199 | code = ConnectionErrorCodes.ERR_DEADLINE_EXCEEDED; 200 | } 201 | 202 | /** 203 | * @category Error 204 | */ 205 | export class PartitionAlreadyExistsError extends CreateStreamError { 206 | name = 'PartitionAlreadyExistsError'; 207 | 208 | message = 'Partition already exists!'; 209 | 210 | code = CreateStreamErrorCodes.ERR_PARTITION_ALREADY_EXISTS; 211 | } 212 | 213 | /** 214 | * @category Error 215 | */ 216 | export class InvalidPartitionsError extends CreateStreamError { 217 | name = 'InvalidPartitionsError'; 218 | 219 | message = 'Invalid number of stream partitions! Partitions should be equal to or greater than zero.'; 220 | 221 | code = CreateStreamErrorCodes.ERR_INVALID_PARTITIONS; 222 | } 223 | 224 | /** 225 | * @category Error 226 | */ 227 | export class NoSuchPartitionError extends SubscribeError { 228 | name = 'NoSuchPartitionrror'; 229 | 230 | message = 'No such partition exists!'; 231 | 232 | code = SubscribeErrorCodes.ERR_PARTITION_DOES_NOT_EXIST; 233 | } 234 | 235 | /** 236 | * @category Error 237 | */ 238 | export class OffsetNotSpecifiedError extends SubscribeError { 239 | name = 'OffsetNotSpecifiedError'; 240 | 241 | message = 'Offset must be specified when startPosition is set to OFFSET!'; 242 | 243 | code = SubscribeErrorCodes.ERR_OFFSET_NOT_SPECIFIED; 244 | } 245 | 246 | /** 247 | * @category Error 248 | */ 249 | export class TimestampNotSpecifiedError extends SubscribeError { 250 | name = 'TimestampNotSpecifiedError'; 251 | 252 | message = 'Start timestamp must be specified when startPosition is set to TIMESTAMP!'; 253 | 254 | code = SubscribeErrorCodes.ERR_TIMESTAMP_NOT_SPECIFIED; 255 | } 256 | 257 | /** 258 | * @category Error 259 | */ 260 | export class StreamNotFoundInMetadataError extends MetadataError { 261 | name = 'StreamNotFoundInMetadataError'; 262 | 263 | message = 'No matching stream found in metadata!'; 264 | 265 | code = MetadataErrorCodes.ERR_STREAM_NOT_FOUND_IN_METADATA; 266 | } 267 | 268 | /** 269 | * @category Error 270 | */ 271 | export class SubjectNotFoundInMetadataError extends MetadataError { 272 | name = 'SubjectNotFoundInMetadataError'; 273 | 274 | message = 'No matching subject found in metadata!'; 275 | 276 | code = MetadataErrorCodes.ERR_SUBJECT_NOT_FOUND_IN_METADATA; 277 | } 278 | 279 | /** 280 | * @category Error 281 | */ 282 | export class NoKnownPartitionError extends MetadataError { 283 | name = 'NoKnownPartitionError'; 284 | 285 | message = 'No known partitions in metadata!'; 286 | 287 | code = MetadataErrorCodes.ERR_NO_KNOWN_PARTITION; 288 | } 289 | 290 | /** 291 | * @category Error 292 | */ 293 | export class NoKnownLeaderForPartitionError extends MetadataError { 294 | name = 'NoKnownLeaderForPartitionError'; 295 | 296 | message = 'No known leader for partition!'; 297 | 298 | code = MetadataErrorCodes.ERR_NO_KNOWN_LEADER_FOR_PARTITION; 299 | } 300 | 301 | /** 302 | * @category Error 303 | */ 304 | export class MissingEnvelopeHeaderError extends MessageError { 305 | name = 'MissingEnvelopeHeaderError'; 306 | 307 | message = 'Data missing envelope header!'; 308 | 309 | code = MessageErrorCodes.ERR_MESSAGE_MISSING_ENVELOPE_HEADER; 310 | } 311 | 312 | export class UnexpectedEnvelopeMagicNumberError extends MessageError { 313 | name = 'UnexpectedEnvelopeMagicNumberError'; 314 | 315 | message = 'Unexpected envelope magic number!'; 316 | 317 | code = MessageErrorCodes.ERR_MESSAGE_UNEXPECTED_ENVELOPE_MAGIC_NUMBER; 318 | } 319 | 320 | export class UnknownEnvelopeProtocolError extends MessageError { 321 | name = 'UnknownEnvelopeProtocolError'; 322 | 323 | message = 'Unknown envelope protocol!'; 324 | 325 | code = MessageErrorCodes.ERR_MESSAGE_UNEXPECTED_ENVELOPE_MAGIC_NUMBER; 326 | } 327 | -------------------------------------------------------------------------------- /test/fixtures/partition/metadata_5_partitions.json: -------------------------------------------------------------------------------- 1 | { 2 | "brokers": { 3 | "dTmEw414ooPUMS8hu1kW03": { 4 | "id": "dTmEw414ooPUMS8hu1kW03", 5 | "host": "", 6 | "port": 9292 7 | } 8 | }, 9 | "addresses": {}, 10 | "streams": { 11 | "byName": { 12 | "test-stream-1": { 13 | "subject": "test-subject-1", 14 | "name": "test-stream-1", 15 | "partitions": [ 16 | { 17 | "id": 0, 18 | "leader": { 19 | "id": "dTmEw414ooPUMS8hu1kW03", 20 | "host": "", 21 | "port": 9292 22 | }, 23 | "replicas": [ 24 | { 25 | "id": "dTmEw414ooPUMS8hu1kW03", 26 | "host": "", 27 | "port": 9292 28 | } 29 | ], 30 | "isr": [ 31 | { 32 | "id": "dTmEw414ooPUMS8hu1kW03", 33 | "host": "", 34 | "port": 9292 35 | } 36 | ] 37 | }, 38 | { 39 | "id": 1, 40 | "leader": { 41 | "id": "dTmEw414ooPUMS8hu1kW03", 42 | "host": "", 43 | "port": 9292 44 | }, 45 | "replicas": [ 46 | { 47 | "id": "dTmEw414ooPUMS8hu1kW03", 48 | "host": "", 49 | "port": 9292 50 | } 51 | ], 52 | "isr": [ 53 | { 54 | "id": "dTmEw414ooPUMS8hu1kW03", 55 | "host": "", 56 | "port": 9292 57 | } 58 | ] 59 | }, 60 | { 61 | "id": 2, 62 | "leader": { 63 | "id": "dTmEw414ooPUMS8hu1kW03", 64 | "host": "", 65 | "port": 9292 66 | }, 67 | "replicas": [ 68 | { 69 | "id": "dTmEw414ooPUMS8hu1kW03", 70 | "host": "", 71 | "port": 9292 72 | } 73 | ], 74 | "isr": [ 75 | { 76 | "id": "dTmEw414ooPUMS8hu1kW03", 77 | "host": "", 78 | "port": 9292 79 | } 80 | ] 81 | }, 82 | { 83 | "id": 3, 84 | "leader": { 85 | "id": "dTmEw414ooPUMS8hu1kW03", 86 | "host": "", 87 | "port": 9292 88 | }, 89 | "replicas": [ 90 | { 91 | "id": "dTmEw414ooPUMS8hu1kW03", 92 | "host": "", 93 | "port": 9292 94 | } 95 | ], 96 | "isr": [ 97 | { 98 | "id": "dTmEw414ooPUMS8hu1kW03", 99 | "host": "", 100 | "port": 9292 101 | } 102 | ] 103 | }, 104 | { 105 | "id": 4, 106 | "leader": { 107 | "id": "dTmEw414ooPUMS8hu1kW03", 108 | "host": "", 109 | "port": 9292 110 | }, 111 | "replicas": [ 112 | { 113 | "id": "dTmEw414ooPUMS8hu1kW03", 114 | "host": "", 115 | "port": 9292 116 | } 117 | ], 118 | "isr": [ 119 | { 120 | "id": "dTmEw414ooPUMS8hu1kW03", 121 | "host": "", 122 | "port": 9292 123 | } 124 | ] 125 | } 126 | ] 127 | } 128 | }, 129 | "bySubject": { 130 | "test-subject-1": { 131 | "subject": "test-subject-1", 132 | "name": "test-stream-1", 133 | "partitions": [ 134 | { 135 | "id": 0, 136 | "leader": { 137 | "id": "dTmEw414ooPUMS8hu1kW03", 138 | "host": "", 139 | "port": 9292 140 | }, 141 | "replicas": [ 142 | { 143 | "id": "dTmEw414ooPUMS8hu1kW03", 144 | "host": "", 145 | "port": 9292 146 | } 147 | ], 148 | "isr": [ 149 | { 150 | "id": "dTmEw414ooPUMS8hu1kW03", 151 | "host": "", 152 | "port": 9292 153 | } 154 | ] 155 | }, 156 | { 157 | "id": 1, 158 | "leader": { 159 | "id": "dTmEw414ooPUMS8hu1kW03", 160 | "host": "", 161 | "port": 9292 162 | }, 163 | "replicas": [ 164 | { 165 | "id": "dTmEw414ooPUMS8hu1kW03", 166 | "host": "", 167 | "port": 9292 168 | } 169 | ], 170 | "isr": [ 171 | { 172 | "id": "dTmEw414ooPUMS8hu1kW03", 173 | "host": "", 174 | "port": 9292 175 | } 176 | ] 177 | }, 178 | { 179 | "id": 2, 180 | "leader": { 181 | "id": "dTmEw414ooPUMS8hu1kW03", 182 | "host": "", 183 | "port": 9292 184 | }, 185 | "replicas": [ 186 | { 187 | "id": "dTmEw414ooPUMS8hu1kW03", 188 | "host": "", 189 | "port": 9292 190 | } 191 | ], 192 | "isr": [ 193 | { 194 | "id": "dTmEw414ooPUMS8hu1kW03", 195 | "host": "", 196 | "port": 9292 197 | } 198 | ] 199 | }, 200 | { 201 | "id": 3, 202 | "leader": { 203 | "id": "dTmEw414ooPUMS8hu1kW03", 204 | "host": "", 205 | "port": 9292 206 | }, 207 | "replicas": [ 208 | { 209 | "id": "dTmEw414ooPUMS8hu1kW03", 210 | "host": "", 211 | "port": 9292 212 | } 213 | ], 214 | "isr": [ 215 | { 216 | "id": "dTmEw414ooPUMS8hu1kW03", 217 | "host": "", 218 | "port": 9292 219 | } 220 | ] 221 | }, 222 | { 223 | "id": 4, 224 | "leader": { 225 | "id": "dTmEw414ooPUMS8hu1kW03", 226 | "host": "", 227 | "port": 9292 228 | }, 229 | "replicas": [ 230 | { 231 | "id": "dTmEw414ooPUMS8hu1kW03", 232 | "host": "", 233 | "port": 9292 234 | } 235 | ], 236 | "isr": [ 237 | { 238 | "id": "dTmEw414ooPUMS8hu1kW03", 239 | "host": "", 240 | "port": 9292 241 | } 242 | ] 243 | } 244 | ] 245 | } 246 | } 247 | }, 248 | "lastUpdated": "2019-09-17T04:35:49.214Z" 249 | } -------------------------------------------------------------------------------- /src/metadata.ts: -------------------------------------------------------------------------------- 1 | import Debug from 'debug'; 2 | import { ServiceError } from 'grpc'; 3 | import { APIClient } from '../grpc/generated/api_grpc_pb'; 4 | import { FetchMetadataRequest, FetchMetadataResponse } from '../grpc/generated/api_pb'; 5 | import { 6 | NoSuchPartitionError, NoKnownPartitionError, NoKnownLeaderForPartitionError, SubjectNotFoundInMetadataError, 7 | } from './errors'; 8 | import { faultTolerantCall, constructAddress } from './utils'; 9 | 10 | const debug = Debug.debug('node-liftbridge:metadata'); 11 | 12 | const DEFAULTS = { // TODO: look at how to expose this. 13 | metadataUpdateRetryConfig: { 14 | numOfAttempts: 15, 15 | startingDelay: 200, 16 | }, 17 | waitForSubjectMetadataUntil: 30000, 18 | hostname: '127.0.0.1', 19 | }; 20 | 21 | /** 22 | * Metadata interface. 23 | * 24 | * @category Metadata 25 | */ 26 | export interface IMetadata { 27 | /** 28 | * List of brokers and their information. 29 | */ 30 | brokers: { 31 | [brokerId: string]: IBrokerInfo, 32 | }; 33 | /** 34 | * List of addresses. 35 | */ 36 | addresses: { 37 | [address: string]: object, 38 | }; 39 | /** 40 | * Stream metadata by name and subject. 41 | */ 42 | streams: IStreamIndex; 43 | /** 44 | * Last updated timestamp. 45 | */ 46 | lastUpdated: Date; 47 | } 48 | 49 | /** 50 | * Stream information interface. 51 | * 52 | * @category Metadata 53 | */ 54 | interface IStreamInfo { 55 | /** 56 | * Stream subject. 57 | */ 58 | subject: string; 59 | /** 60 | * Stream name. 61 | */ 62 | name: string; 63 | /** 64 | * Partition IDs and their information. 65 | */ 66 | partitions: { 67 | [partitionId: number]: IPartitionInfo, 68 | }; 69 | } 70 | 71 | /** 72 | * Stream index interface. 73 | * Holds all the streams by name and subject for easy lookups. 74 | * 75 | * @category Metadata 76 | */ 77 | interface IStreamIndex { 78 | /** 79 | * Stream information by name. 80 | */ 81 | byName: { 82 | [name: string]: IStreamInfo, 83 | }; 84 | /** 85 | * Stream information by subject. 86 | */ 87 | bySubject: { 88 | [subject: string]: IStreamInfo, 89 | }; 90 | } 91 | 92 | /** 93 | * Partition information interface. 94 | * 95 | * @category Metadata 96 | */ 97 | interface IPartitionInfo { 98 | /** 99 | * Partition ID. 100 | */ 101 | id: number; 102 | /** 103 | * Partition leader. 104 | */ 105 | leader: IBrokerInfo; 106 | /** 107 | * Partition replicas. 108 | */ 109 | replicas: IBrokerInfo[]; 110 | /** 111 | * Partition's in-sync replica(s). 112 | */ 113 | isr: IBrokerInfo[]; 114 | } 115 | 116 | /** 117 | * Broker information interface. 118 | * 119 | * @category Metadata 120 | */ 121 | interface IBrokerInfo { 122 | /** 123 | * Unique broker ID. 124 | */ 125 | id: string; 126 | /** 127 | * Broker hostname. 128 | */ 129 | host: string; 130 | /** 131 | * Broker port. 132 | */ 133 | port: number; 134 | } 135 | 136 | /** 137 | * Liftbridge stream & partition metadata. 138 | * 139 | * Includes useful methods to fetch/refresh Liftbridge metadata and convert 140 | * them into usable JSON objects. 141 | * 142 | * @category Metadata 143 | */ 144 | export default class LiftbridgeMetadata { 145 | private readonly client: APIClient; 146 | 147 | private metadata: IMetadata; 148 | 149 | /** 150 | * Metadata class. 151 | * 152 | * Holds all metadata of brokers, streams & partitions. 153 | * @param client Liftbridge client instance. 154 | * @param metadataResponse `MetadataResponse` gRPC object. 155 | */ 156 | constructor(client: APIClient, metadataResponse: FetchMetadataResponse = new FetchMetadataResponse()) { 157 | this.client = client; 158 | this.metadata = LiftbridgeMetadata.build(metadataResponse); 159 | } 160 | 161 | // Turn the MetadataResponse into a neatly readable and parse-able native JSON object. 162 | private static build(metadataResponse: FetchMetadataResponse): IMetadata { 163 | const latestMetadata: IMetadata = { 164 | brokers: {}, 165 | addresses: {}, 166 | streams: { 167 | byName: {}, 168 | bySubject: {}, 169 | }, 170 | lastUpdated: new Date(), 171 | }; 172 | const brokersList = metadataResponse.getBrokersList().map(_ => _.toObject()); 173 | const metadataList = metadataResponse.getMetadataList().map(_ => _.toObject()); 174 | brokersList.forEach(broker => { 175 | latestMetadata.brokers[broker.id] = { 176 | id: broker.id, 177 | host: broker.host || DEFAULTS.hostname, 178 | port: broker.port, 179 | }; 180 | }); 181 | 182 | let partitions: IPartitionInfo[] = []; 183 | metadataList.forEach(meta => { 184 | meta.partitionsMap.forEach(_partitionMap => { 185 | const thisPartition = _partitionMap[1]; 186 | partitions.push({ 187 | id: thisPartition.id, 188 | leader: latestMetadata.brokers[thisPartition.leader], 189 | replicas: thisPartition.replicasList.map(_ => latestMetadata.brokers[_]), 190 | isr: thisPartition.isrList.map(_ => latestMetadata.brokers[_]), 191 | }); 192 | }); 193 | const streamInfo: IStreamInfo = { 194 | subject: meta.subject, 195 | name: meta.name, 196 | partitions, 197 | }; 198 | latestMetadata.streams.byName[meta.name] = streamInfo; 199 | latestMetadata.streams.bySubject[meta.subject] = streamInfo; 200 | latestMetadata.lastUpdated = new Date(); 201 | partitions = []; 202 | }); 203 | // TODO: figure out how to implement newMetadata.addresses 204 | return latestMetadata; 205 | } 206 | 207 | private fetchMetadata(streams?: string[]): Promise { 208 | return new Promise((resolve, reject) => { 209 | const metadataRequest = new FetchMetadataRequest(); 210 | if (streams && streams.length) { 211 | streams.forEach(metadataRequest.addStreams); 212 | } 213 | this.client.fetchMetadata(metadataRequest, (err: ServiceError | null, response: FetchMetadataResponse | undefined) => { 214 | if (err) return reject(err); 215 | return resolve(response); 216 | }); 217 | }); 218 | } 219 | 220 | // Wait for subject metadata to appear until `DEFAULTS.waitForSubjectMetadataUntil`. 221 | private async waitForSubjectMetadata(subject: string): Promise { 222 | // eslint-disable-next-line no-async-promise-executor 223 | return new Promise(async (resolve, reject) => { 224 | if (this.hasSubjectMetadata(subject)) return resolve(this.metadata.streams.bySubject[subject]); 225 | debug('metadata not found for subject', subject, 'so going to wait'); 226 | const start = process.hrtime(); 227 | let wait = true; 228 | const waiter = setTimeout(() => { 229 | wait = false; 230 | }, DEFAULTS.waitForSubjectMetadataUntil); 231 | while (wait) { 232 | // eslint-disable-next-line no-await-in-loop 233 | const metadata = await this.update(); // Keep updating and then checking for subject metadata to appear. 234 | if (this.hasSubjectMetadata(subject)) { 235 | const end = process.hrtime(start); 236 | const ms = (end[0] * 1e9 + end[1]) / 1e6; 237 | debug('metadata for subject', subject, 'found after', ms, 'milliseconds'); 238 | waiter.unref(); 239 | return resolve(metadata.streams.bySubject[subject]); 240 | } 241 | } 242 | return reject(new SubjectNotFoundInMetadataError()); 243 | }); 244 | } 245 | 246 | /** 247 | * Returns a map containing stream names and the number 248 | * of partitions for the stream. This does not match on 249 | * wildcard subjects, e.g. "foo.*". 250 | * 251 | * @param subject Subject to fetch partitions count for. 252 | * @returns total partitions for the subject. 253 | */ 254 | public async getPartitionsCountForSubject(subject: string): Promise { 255 | const subjectMeta = this.metadata.streams.bySubject[subject]; 256 | if (!subjectMeta) { 257 | const freshSubjectMeta = await this.waitForSubjectMetadata(subject); 258 | return Object.keys(freshSubjectMeta.partitions).length; 259 | } 260 | return Object.keys(subjectMeta.partitions).length; 261 | } 262 | 263 | /** 264 | * Indicates if the Metadata has info for at 265 | * least one stream with the given subject. 266 | * 267 | * @param subject Subject to check metadata for. 268 | * @returns `true` if metadata was found for subject, or `false` otherwise. 269 | */ 270 | public hasSubjectMetadata(subject: string): boolean { 271 | return !!this.metadata.streams.bySubject[subject]; 272 | } 273 | 274 | /** 275 | * Fetches the latest cluster metadata, including stream 276 | * and broker information. Also updates the local copy of metadata. 277 | * 278 | * @param streams Stream(s) to fetch metadata for. 279 | * @returns Metadata. 280 | */ 281 | public async update(streams: string | string[] = []): Promise { 282 | const streamsToUpdate = (typeof streams === 'string') ? [streams] : streams; 283 | const metadataResponse = await faultTolerantCall(() => this.fetchMetadata(streamsToUpdate), DEFAULTS.metadataUpdateRetryConfig); 284 | this.metadata = LiftbridgeMetadata.build(metadataResponse); 285 | return this.metadata; 286 | } 287 | 288 | /** 289 | * Returns the cluster metadata. 290 | * 291 | * @returns Metadata. 292 | */ 293 | public get(): IMetadata { 294 | return this.metadata; 295 | } 296 | 297 | /** 298 | * Returns the broker address for the given stream partition. 299 | * 300 | * @param stream Stream. 301 | * @param partition Stream partition. 302 | * @returns Broker address. 303 | */ 304 | public getAddress(stream: string, partition: number): string { 305 | const streamMetadata = this.metadata.streams.byName[stream]; 306 | if (!streamMetadata) throw new NoSuchPartitionError(); 307 | const partitionMetadata = streamMetadata.partitions[partition]; 308 | if (!partitionMetadata) throw new NoKnownPartitionError(); 309 | const { leader } = partitionMetadata; 310 | if (!leader) throw new NoKnownLeaderForPartitionError(); 311 | return constructAddress(leader.host, leader.port); 312 | } 313 | } 314 | -------------------------------------------------------------------------------- /grpc/generated/api_pb.d.ts: -------------------------------------------------------------------------------- 1 | // package: proto 2 | // file: api.proto 3 | 4 | import * as jspb from "google-protobuf"; 5 | 6 | export class CreateStreamRequest extends jspb.Message { 7 | getSubject(): string; 8 | setSubject(value: string): void; 9 | 10 | getName(): string; 11 | setName(value: string): void; 12 | 13 | getGroup(): string; 14 | setGroup(value: string): void; 15 | 16 | getReplicationfactor(): number; 17 | setReplicationfactor(value: number): void; 18 | 19 | getPartitions(): number; 20 | setPartitions(value: number): void; 21 | 22 | serializeBinary(): Uint8Array; 23 | toObject(includeInstance?: boolean): CreateStreamRequest.AsObject; 24 | static toObject(includeInstance: boolean, msg: CreateStreamRequest): CreateStreamRequest.AsObject; 25 | static extensions: {[key: number]: jspb.ExtensionFieldInfo}; 26 | static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; 27 | static serializeBinaryToWriter(message: CreateStreamRequest, writer: jspb.BinaryWriter): void; 28 | static deserializeBinary(bytes: Uint8Array): CreateStreamRequest; 29 | static deserializeBinaryFromReader(message: CreateStreamRequest, reader: jspb.BinaryReader): CreateStreamRequest; 30 | } 31 | 32 | export namespace CreateStreamRequest { 33 | export type AsObject = { 34 | subject: string, 35 | name: string, 36 | group: string, 37 | replicationfactor: number, 38 | partitions: number, 39 | } 40 | } 41 | 42 | export class CreateStreamResponse extends jspb.Message { 43 | serializeBinary(): Uint8Array; 44 | toObject(includeInstance?: boolean): CreateStreamResponse.AsObject; 45 | static toObject(includeInstance: boolean, msg: CreateStreamResponse): CreateStreamResponse.AsObject; 46 | static extensions: {[key: number]: jspb.ExtensionFieldInfo}; 47 | static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; 48 | static serializeBinaryToWriter(message: CreateStreamResponse, writer: jspb.BinaryWriter): void; 49 | static deserializeBinary(bytes: Uint8Array): CreateStreamResponse; 50 | static deserializeBinaryFromReader(message: CreateStreamResponse, reader: jspb.BinaryReader): CreateStreamResponse; 51 | } 52 | 53 | export namespace CreateStreamResponse { 54 | export type AsObject = { 55 | } 56 | } 57 | 58 | export class SubscribeRequest extends jspb.Message { 59 | getStream(): string; 60 | setStream(value: string): void; 61 | 62 | getPartition(): number; 63 | setPartition(value: number): void; 64 | 65 | getStartposition(): StartPositionMap[keyof StartPositionMap]; 66 | setStartposition(value: StartPositionMap[keyof StartPositionMap]): void; 67 | 68 | getStartoffset(): string; 69 | setStartoffset(value: string): void; 70 | 71 | getStarttimestamp(): string; 72 | setStarttimestamp(value: string): void; 73 | 74 | serializeBinary(): Uint8Array; 75 | toObject(includeInstance?: boolean): SubscribeRequest.AsObject; 76 | static toObject(includeInstance: boolean, msg: SubscribeRequest): SubscribeRequest.AsObject; 77 | static extensions: {[key: number]: jspb.ExtensionFieldInfo}; 78 | static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; 79 | static serializeBinaryToWriter(message: SubscribeRequest, writer: jspb.BinaryWriter): void; 80 | static deserializeBinary(bytes: Uint8Array): SubscribeRequest; 81 | static deserializeBinaryFromReader(message: SubscribeRequest, reader: jspb.BinaryReader): SubscribeRequest; 82 | } 83 | 84 | export namespace SubscribeRequest { 85 | export type AsObject = { 86 | stream: string, 87 | partition: number, 88 | startposition: StartPositionMap[keyof StartPositionMap], 89 | startoffset: string, 90 | starttimestamp: string, 91 | } 92 | } 93 | 94 | export class FetchMetadataRequest extends jspb.Message { 95 | clearStreamsList(): void; 96 | getStreamsList(): Array; 97 | setStreamsList(value: Array): void; 98 | addStreams(value: string, index?: number): string; 99 | 100 | serializeBinary(): Uint8Array; 101 | toObject(includeInstance?: boolean): FetchMetadataRequest.AsObject; 102 | static toObject(includeInstance: boolean, msg: FetchMetadataRequest): FetchMetadataRequest.AsObject; 103 | static extensions: {[key: number]: jspb.ExtensionFieldInfo}; 104 | static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; 105 | static serializeBinaryToWriter(message: FetchMetadataRequest, writer: jspb.BinaryWriter): void; 106 | static deserializeBinary(bytes: Uint8Array): FetchMetadataRequest; 107 | static deserializeBinaryFromReader(message: FetchMetadataRequest, reader: jspb.BinaryReader): FetchMetadataRequest; 108 | } 109 | 110 | export namespace FetchMetadataRequest { 111 | export type AsObject = { 112 | streamsList: Array, 113 | } 114 | } 115 | 116 | export class FetchMetadataResponse extends jspb.Message { 117 | clearBrokersList(): void; 118 | getBrokersList(): Array; 119 | setBrokersList(value: Array): void; 120 | addBrokers(value?: Broker, index?: number): Broker; 121 | 122 | clearMetadataList(): void; 123 | getMetadataList(): Array; 124 | setMetadataList(value: Array): void; 125 | addMetadata(value?: StreamMetadata, index?: number): StreamMetadata; 126 | 127 | serializeBinary(): Uint8Array; 128 | toObject(includeInstance?: boolean): FetchMetadataResponse.AsObject; 129 | static toObject(includeInstance: boolean, msg: FetchMetadataResponse): FetchMetadataResponse.AsObject; 130 | static extensions: {[key: number]: jspb.ExtensionFieldInfo}; 131 | static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; 132 | static serializeBinaryToWriter(message: FetchMetadataResponse, writer: jspb.BinaryWriter): void; 133 | static deserializeBinary(bytes: Uint8Array): FetchMetadataResponse; 134 | static deserializeBinaryFromReader(message: FetchMetadataResponse, reader: jspb.BinaryReader): FetchMetadataResponse; 135 | } 136 | 137 | export namespace FetchMetadataResponse { 138 | export type AsObject = { 139 | brokersList: Array, 140 | metadataList: Array, 141 | } 142 | } 143 | 144 | export class PublishRequest extends jspb.Message { 145 | getKey(): Uint8Array | string; 146 | getKey_asU8(): Uint8Array; 147 | getKey_asB64(): string; 148 | setKey(value: Uint8Array | string): void; 149 | 150 | getValue(): Uint8Array | string; 151 | getValue_asU8(): Uint8Array; 152 | getValue_asB64(): string; 153 | setValue(value: Uint8Array | string): void; 154 | 155 | getStream(): string; 156 | setStream(value: string): void; 157 | 158 | getPartition(): number; 159 | setPartition(value: number): void; 160 | 161 | getSubject(): string; 162 | setSubject(value: string): void; 163 | 164 | getReplysubject(): string; 165 | setReplysubject(value: string): void; 166 | 167 | getHeadersMap(): jspb.Map; 168 | clearHeadersMap(): void; 169 | getAckinbox(): string; 170 | setAckinbox(value: string): void; 171 | 172 | getCorrelationid(): string; 173 | setCorrelationid(value: string): void; 174 | 175 | getAckpolicy(): AckPolicyMap[keyof AckPolicyMap]; 176 | setAckpolicy(value: AckPolicyMap[keyof AckPolicyMap]): void; 177 | 178 | serializeBinary(): Uint8Array; 179 | toObject(includeInstance?: boolean): PublishRequest.AsObject; 180 | static toObject(includeInstance: boolean, msg: PublishRequest): PublishRequest.AsObject; 181 | static extensions: {[key: number]: jspb.ExtensionFieldInfo}; 182 | static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; 183 | static serializeBinaryToWriter(message: PublishRequest, writer: jspb.BinaryWriter): void; 184 | static deserializeBinary(bytes: Uint8Array): PublishRequest; 185 | static deserializeBinaryFromReader(message: PublishRequest, reader: jspb.BinaryReader): PublishRequest; 186 | } 187 | 188 | export namespace PublishRequest { 189 | export type AsObject = { 190 | key: Uint8Array | string, 191 | value: Uint8Array | string, 192 | stream: string, 193 | partition: number, 194 | subject: string, 195 | replysubject: string, 196 | headersMap: Array<[string, Uint8Array | string]>, 197 | ackinbox: string, 198 | correlationid: string, 199 | ackpolicy: AckPolicyMap[keyof AckPolicyMap], 200 | } 201 | } 202 | 203 | export class PublishResponse extends jspb.Message { 204 | hasAck(): boolean; 205 | clearAck(): void; 206 | getAck(): Ack | undefined; 207 | setAck(value?: Ack): void; 208 | 209 | serializeBinary(): Uint8Array; 210 | toObject(includeInstance?: boolean): PublishResponse.AsObject; 211 | static toObject(includeInstance: boolean, msg: PublishResponse): PublishResponse.AsObject; 212 | static extensions: {[key: number]: jspb.ExtensionFieldInfo}; 213 | static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; 214 | static serializeBinaryToWriter(message: PublishResponse, writer: jspb.BinaryWriter): void; 215 | static deserializeBinary(bytes: Uint8Array): PublishResponse; 216 | static deserializeBinaryFromReader(message: PublishResponse, reader: jspb.BinaryReader): PublishResponse; 217 | } 218 | 219 | export namespace PublishResponse { 220 | export type AsObject = { 221 | ack?: Ack.AsObject, 222 | } 223 | } 224 | 225 | export class Broker extends jspb.Message { 226 | getId(): string; 227 | setId(value: string): void; 228 | 229 | getHost(): string; 230 | setHost(value: string): void; 231 | 232 | getPort(): number; 233 | setPort(value: number): void; 234 | 235 | serializeBinary(): Uint8Array; 236 | toObject(includeInstance?: boolean): Broker.AsObject; 237 | static toObject(includeInstance: boolean, msg: Broker): Broker.AsObject; 238 | static extensions: {[key: number]: jspb.ExtensionFieldInfo}; 239 | static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; 240 | static serializeBinaryToWriter(message: Broker, writer: jspb.BinaryWriter): void; 241 | static deserializeBinary(bytes: Uint8Array): Broker; 242 | static deserializeBinaryFromReader(message: Broker, reader: jspb.BinaryReader): Broker; 243 | } 244 | 245 | export namespace Broker { 246 | export type AsObject = { 247 | id: string, 248 | host: string, 249 | port: number, 250 | } 251 | } 252 | 253 | export class StreamMetadata extends jspb.Message { 254 | getName(): string; 255 | setName(value: string): void; 256 | 257 | getSubject(): string; 258 | setSubject(value: string): void; 259 | 260 | getError(): StreamMetadata.ErrorMap[keyof StreamMetadata.ErrorMap]; 261 | setError(value: StreamMetadata.ErrorMap[keyof StreamMetadata.ErrorMap]): void; 262 | 263 | getPartitionsMap(): jspb.Map; 264 | clearPartitionsMap(): void; 265 | serializeBinary(): Uint8Array; 266 | toObject(includeInstance?: boolean): StreamMetadata.AsObject; 267 | static toObject(includeInstance: boolean, msg: StreamMetadata): StreamMetadata.AsObject; 268 | static extensions: {[key: number]: jspb.ExtensionFieldInfo}; 269 | static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; 270 | static serializeBinaryToWriter(message: StreamMetadata, writer: jspb.BinaryWriter): void; 271 | static deserializeBinary(bytes: Uint8Array): StreamMetadata; 272 | static deserializeBinaryFromReader(message: StreamMetadata, reader: jspb.BinaryReader): StreamMetadata; 273 | } 274 | 275 | export namespace StreamMetadata { 276 | export type AsObject = { 277 | name: string, 278 | subject: string, 279 | error: StreamMetadata.ErrorMap[keyof StreamMetadata.ErrorMap], 280 | partitionsMap: Array<[number, PartitionMetadata.AsObject]>, 281 | } 282 | 283 | export interface ErrorMap { 284 | OK: 0; 285 | UNKNOWN_STREAM: 1; 286 | } 287 | 288 | export const Error: ErrorMap; 289 | } 290 | 291 | export class PartitionMetadata extends jspb.Message { 292 | getId(): number; 293 | setId(value: number): void; 294 | 295 | getLeader(): string; 296 | setLeader(value: string): void; 297 | 298 | clearReplicasList(): void; 299 | getReplicasList(): Array; 300 | setReplicasList(value: Array): void; 301 | addReplicas(value: string, index?: number): string; 302 | 303 | clearIsrList(): void; 304 | getIsrList(): Array; 305 | setIsrList(value: Array): void; 306 | addIsr(value: string, index?: number): string; 307 | 308 | serializeBinary(): Uint8Array; 309 | toObject(includeInstance?: boolean): PartitionMetadata.AsObject; 310 | static toObject(includeInstance: boolean, msg: PartitionMetadata): PartitionMetadata.AsObject; 311 | static extensions: {[key: number]: jspb.ExtensionFieldInfo}; 312 | static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; 313 | static serializeBinaryToWriter(message: PartitionMetadata, writer: jspb.BinaryWriter): void; 314 | static deserializeBinary(bytes: Uint8Array): PartitionMetadata; 315 | static deserializeBinaryFromReader(message: PartitionMetadata, reader: jspb.BinaryReader): PartitionMetadata; 316 | } 317 | 318 | export namespace PartitionMetadata { 319 | export type AsObject = { 320 | id: number, 321 | leader: string, 322 | replicasList: Array, 323 | isrList: Array, 324 | } 325 | } 326 | 327 | export class Message extends jspb.Message { 328 | getOffset(): string; 329 | setOffset(value: string): void; 330 | 331 | getKey(): Uint8Array | string; 332 | getKey_asU8(): Uint8Array; 333 | getKey_asB64(): string; 334 | setKey(value: Uint8Array | string): void; 335 | 336 | getValue(): Uint8Array | string; 337 | getValue_asU8(): Uint8Array; 338 | getValue_asB64(): string; 339 | setValue(value: Uint8Array | string): void; 340 | 341 | getTimestamp(): string; 342 | setTimestamp(value: string): void; 343 | 344 | getStream(): string; 345 | setStream(value: string): void; 346 | 347 | getPartition(): number; 348 | setPartition(value: number): void; 349 | 350 | getSubject(): string; 351 | setSubject(value: string): void; 352 | 353 | getReplysubject(): string; 354 | setReplysubject(value: string): void; 355 | 356 | getHeadersMap(): jspb.Map; 357 | clearHeadersMap(): void; 358 | getAckinbox(): string; 359 | setAckinbox(value: string): void; 360 | 361 | getCorrelationid(): string; 362 | setCorrelationid(value: string): void; 363 | 364 | getAckpolicy(): AckPolicyMap[keyof AckPolicyMap]; 365 | setAckpolicy(value: AckPolicyMap[keyof AckPolicyMap]): void; 366 | 367 | serializeBinary(): Uint8Array; 368 | toObject(includeInstance?: boolean): Message.AsObject; 369 | static toObject(includeInstance: boolean, msg: Message): Message.AsObject; 370 | static extensions: {[key: number]: jspb.ExtensionFieldInfo}; 371 | static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; 372 | static serializeBinaryToWriter(message: Message, writer: jspb.BinaryWriter): void; 373 | static deserializeBinary(bytes: Uint8Array): Message; 374 | static deserializeBinaryFromReader(message: Message, reader: jspb.BinaryReader): Message; 375 | } 376 | 377 | export namespace Message { 378 | export type AsObject = { 379 | offset: string, 380 | key: Uint8Array | string, 381 | value: Uint8Array | string, 382 | timestamp: string, 383 | stream: string, 384 | partition: number, 385 | subject: string, 386 | replysubject: string, 387 | headersMap: Array<[string, Uint8Array | string]>, 388 | ackinbox: string, 389 | correlationid: string, 390 | ackpolicy: AckPolicyMap[keyof AckPolicyMap], 391 | } 392 | } 393 | 394 | export class Ack extends jspb.Message { 395 | getStream(): string; 396 | setStream(value: string): void; 397 | 398 | getPartitionsubject(): string; 399 | setPartitionsubject(value: string): void; 400 | 401 | getMsgsubject(): string; 402 | setMsgsubject(value: string): void; 403 | 404 | getOffset(): string; 405 | setOffset(value: string): void; 406 | 407 | getAckinbox(): string; 408 | setAckinbox(value: string): void; 409 | 410 | getCorrelationid(): string; 411 | setCorrelationid(value: string): void; 412 | 413 | getAckpolicy(): AckPolicyMap[keyof AckPolicyMap]; 414 | setAckpolicy(value: AckPolicyMap[keyof AckPolicyMap]): void; 415 | 416 | serializeBinary(): Uint8Array; 417 | toObject(includeInstance?: boolean): Ack.AsObject; 418 | static toObject(includeInstance: boolean, msg: Ack): Ack.AsObject; 419 | static extensions: {[key: number]: jspb.ExtensionFieldInfo}; 420 | static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; 421 | static serializeBinaryToWriter(message: Ack, writer: jspb.BinaryWriter): void; 422 | static deserializeBinary(bytes: Uint8Array): Ack; 423 | static deserializeBinaryFromReader(message: Ack, reader: jspb.BinaryReader): Ack; 424 | } 425 | 426 | export namespace Ack { 427 | export type AsObject = { 428 | stream: string, 429 | partitionsubject: string, 430 | msgsubject: string, 431 | offset: string, 432 | ackinbox: string, 433 | correlationid: string, 434 | ackpolicy: AckPolicyMap[keyof AckPolicyMap], 435 | } 436 | } 437 | 438 | export interface StartPositionMap { 439 | NEW_ONLY: 0; 440 | OFFSET: 1; 441 | EARLIEST: 2; 442 | LATEST: 3; 443 | TIMESTAMP: 4; 444 | } 445 | 446 | export const StartPosition: StartPositionMap; 447 | 448 | export interface AckPolicyMap { 449 | LEADER: 0; 450 | ALL: 1; 451 | NONE: 2; 452 | } 453 | 454 | export const AckPolicy: AckPolicyMap; 455 | 456 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import { readFileSync } from 'fs'; 2 | import { IBackOffOptions } from 'exponential-backoff/dist/options'; 3 | import Bluebird from 'bluebird'; 4 | import Debug from 'debug'; 5 | import { 6 | Client as GRPCClient, 7 | credentials as GRPCCredentials, 8 | ChannelCredentials, 9 | ClientReadableStream, 10 | ServiceError, 11 | status, 12 | } from 'grpc'; 13 | import { APIClient } from '../grpc/generated/api_grpc_pb'; 14 | import { 15 | SubscribeRequest, 16 | Message, 17 | CreateStreamRequest, 18 | CreateStreamResponse, 19 | PublishResponse, 20 | PublishRequest, 21 | StartPosition, 22 | } from '../grpc/generated/api_pb'; 23 | import LiftbridgeStream from './stream'; 24 | import LiftbridgeMessage from './message'; 25 | import LiftbridgeMetadata from './metadata'; 26 | import { 27 | NoAddressesError, CouldNotConnectToAnyServerError, PartitionAlreadyExistsError, DeadlineExceededError, 28 | } from './errors'; 29 | import { shuffleArray, faultTolerantCall } from './utils'; 30 | import { builtinPartitioners, PartitionerLike } from './partition'; 31 | 32 | /** 33 | * @hidden 34 | */ 35 | const debug = Debug.debug('node-liftbridge:client'); 36 | 37 | /** 38 | * @hidden 39 | */ 40 | const DEFAULTS = { 41 | timeout: 5000, 42 | }; 43 | 44 | /** 45 | * Liftbridge gRPC credentials. 46 | * 47 | * Read [`grpc #273`](https://github.com/grpc/grpc-node/issues/273#issuecomment-399506158) for more details. 48 | * 49 | * @category Client 50 | */ 51 | export interface ICredentials { 52 | /** 53 | * Root certificate file. 54 | * 55 | * Usually something like `ca.crt` 56 | */ 57 | rootCertificateFile?: string; 58 | /** 59 | * Client certificate private key file. 60 | * 61 | * Usually something like `.key` 62 | */ 63 | privateKeyFile: string; 64 | /** 65 | * Client certificate cert chain file. 66 | * 67 | * Usually something like `.crt` 68 | */ 69 | certificateChainFile: string; 70 | } 71 | 72 | /** 73 | * Create a client for working with a Liftbridge cluster. 74 | * 75 | * @example Insecure connection (default). 76 | * ```typescript 77 | * import LiftbridgeClient from 'liftbridge'; 78 | * 79 | * const client = new LiftbridgeClient('localhost:9292'); 80 | * await client.connect(); 81 | * ``` 82 | * 83 | * @example Secure TLS connection (recommended in production). 84 | * ```typescript 85 | * import LiftbridgeClient from 'liftbridge'; 86 | * 87 | * const client = new LiftbridgeClient('localhost:9292', { 88 | * rootCertificateFile: './credentials/ca.crt', 89 | * privateKeyFile: './credentials/private.key', 90 | * certificateChainFile: './credentials/chain.crt' 91 | * }); 92 | * await client.connect(); 93 | * ``` 94 | * @category Client 95 | */ 96 | export default class LiftbridgeClient { 97 | private addresses: string[]; 98 | 99 | private options?: object; 100 | 101 | private credentials: ChannelCredentials; 102 | 103 | private client!: APIClient; 104 | 105 | private metadata!: LiftbridgeMetadata; 106 | 107 | /** 108 | * A client for use with a Liftbridge cluster. 109 | * 110 | * @param addresses String or array of strings of Liftbridge server addresses to connect to. 111 | * @param serverCredentials TLS credentials to use. Defaults to insecure context. 112 | * @param options Additional [options](https://grpc.github.io/grpc/core/group__grpc__arg__keys.html) to pass on to low-level gRPC client for channel creation. 113 | */ 114 | constructor(addresses: string[] | string, serverCredentials: ICredentials | undefined = undefined, options?: object) { 115 | if (!addresses || addresses.length < 1) { 116 | throw new NoAddressesError(); 117 | } 118 | this.addresses = Array.isArray(addresses) ? addresses : [addresses]; 119 | this.credentials = LiftbridgeClient.loadCredentials(serverCredentials); 120 | this.options = options; 121 | } 122 | 123 | // Create gRPC channel credentials. 124 | private static loadCredentials(credentials: ICredentials | undefined): ChannelCredentials { 125 | if (!credentials) return GRPCCredentials.createInsecure(); 126 | return GRPCCredentials.createSsl( 127 | credentials.rootCertificateFile ? readFileSync(credentials.rootCertificateFile) : undefined, 128 | credentials.privateKeyFile ? readFileSync(credentials.privateKeyFile) : undefined, 129 | credentials.certificateChainFile ? readFileSync(credentials.certificateChainFile) : undefined, 130 | ); 131 | } 132 | 133 | // Deadline is always UNIX epoch time + milliseconds in the future when you want the deadline to expire. 134 | private static getDeadline(timeout: number = DEFAULTS.timeout) { 135 | return new Date().getTime() + timeout; 136 | } 137 | 138 | // Make a fault-tolerant connection to the Liftbridge server. 139 | private connectToLiftbridge(address: string, timeout?: number, options?: Partial): Promise { 140 | return faultTolerantCall(() => new Promise((resolve, reject) => { 141 | debug('attempting connection to', address); 142 | const connection = new GRPCClient(address, this.credentials, this.options); 143 | // `waitForReady` takes a deadline. 144 | connection.waitForReady(LiftbridgeClient.getDeadline(timeout), err => { 145 | if (err) return reject(err); 146 | debug('remote client connected and ready at', address); 147 | this.client = new APIClient(address, this.credentials, { 148 | channelOverride: connection.getChannel(), // Reuse the working channel for APIClient. 149 | }); 150 | return resolve(this.client); 151 | }); 152 | }), options); 153 | } 154 | 155 | // Find partition for the Message subject. 156 | private async findPartition(message: LiftbridgeMessage): Promise { 157 | const subject = message.getSubject(); 158 | const totalPartitions = await this.metadata.getPartitionsCountForSubject(subject); 159 | let partition: number = 0; 160 | // Calculate partition for the message by using the relevant partitioning strategy. 161 | if (totalPartitions > 0) { 162 | if (message.partition) { 163 | ({ partition } = message); 164 | } else if (message.partitionStrategy) { 165 | let PartitionerConstructor: PartitionerLike; 166 | if (typeof message.partitionStrategy === 'string') { 167 | PartitionerConstructor = builtinPartitioners[message.partitionStrategy]; 168 | } else { 169 | PartitionerConstructor = message.partitionStrategy; 170 | } 171 | partition = new PartitionerConstructor(subject, message.getKey(), this.metadata.get()).calculatePartition(); 172 | } 173 | } 174 | debug('calculated partition for message on subject', subject, partition); 175 | return partition; 176 | } 177 | 178 | private createStreamRequest(stream: LiftbridgeStream): Promise { 179 | return new Promise((resolve, reject) => { 180 | const createRequest = new CreateStreamRequest(); 181 | if (stream.group) createRequest.setGroup(stream.group); 182 | if (stream.partitions) createRequest.setPartitions(stream.partitions); 183 | createRequest.setName(stream.name); 184 | createRequest.setSubject(stream.subject); 185 | createRequest.setReplicationfactor(stream.replicationFactor); 186 | debug('attempting to create stream', stream.name, 'on subject', stream.subject); 187 | this.client.createStream(createRequest, { deadline: LiftbridgeClient.getDeadline() }, (err: ServiceError | null, response: CreateStreamResponse | undefined) => { 188 | if (err) { 189 | debug('create stream failed! error code =', err.code); 190 | if (err.code === status.ALREADY_EXISTS) return reject(new PartitionAlreadyExistsError()); 191 | if (err.code === status.DEADLINE_EXCEEDED) return reject(new DeadlineExceededError()); 192 | return reject(err); 193 | } 194 | debug('create stream successful'); 195 | return this.metadata.update(stream.name) // Stream created. Now update metadata to make sure we know about the newly created stream. 196 | .then(() => resolve(response)) 197 | .catch(reject); 198 | }); 199 | }); 200 | } 201 | 202 | private createSubscribeRequest(stream: LiftbridgeStream): ClientReadableStream { 203 | const subscribeRequest = new SubscribeRequest(); 204 | subscribeRequest.setStream(stream.name); 205 | if (stream.startPosition) subscribeRequest.setStartposition(stream.startPosition); 206 | // TODO: debug this - figure out how best to allow to set specific partition. 207 | // subscribeRequest.setPartition(0); 208 | if (stream.startOffset) { 209 | debug('attempting to subscribe to stream', stream.name, 'at offset', stream.startOffset); 210 | subscribeRequest.setStartoffset(stream.startOffset.toString()); 211 | return this.client.subscribe(subscribeRequest); 212 | } if (stream.startTimestamp) { 213 | debug('attempting to subscribe to stream', stream.name, 'at timestamp', stream.startTimestamp); 214 | subscribeRequest.setStarttimestamp(stream.startTimestamp.toString()); 215 | return this.client.subscribe(subscribeRequest); 216 | } 217 | debug('attempting to subscribe to stream', stream.name); 218 | return this.client.subscribe(subscribeRequest); 219 | } 220 | 221 | private createPublishRequest(message: LiftbridgeMessage): Promise { 222 | return new Promise((resolve, reject) => { 223 | const publishRequest = new PublishRequest(); 224 | const subject = message.getSubject(); 225 | this.findPartition(message).then(partition => { 226 | const updatedSubject = (partition && partition > 0) ? `${subject}.${partition}` : subject; 227 | message.setSubject(updatedSubject); 228 | publishRequest.setMessage(message); 229 | debug('going to publish message to subject', updatedSubject, 'at partition', partition, 'with key', message.getKey().toString()); 230 | this.client.publish(publishRequest, { deadline: LiftbridgeClient.getDeadline() }, (err: ServiceError | null, response: PublishResponse | undefined) => { 231 | if (err) { 232 | if (err.code === status.DEADLINE_EXCEEDED) return reject(new DeadlineExceededError()); 233 | return reject(err); 234 | } 235 | return resolve(response); 236 | }); 237 | }).catch(reject); 238 | }); 239 | } 240 | 241 | /** 242 | * Establish a connection to the Liftbridge cluster. 243 | * 244 | * @example Connecting to a Liftbridge cluster with a custom timeout and multiple retries. 245 | * 246 | * ```typescript 247 | * import LiftbridgeClient from 'liftbridge'; 248 | * 249 | * const client = new LiftbridgeClient('localhost:9292'); 250 | * await client.connect(3000, { 251 | * delayFirstAttempt: true, 252 | * jitter: 'full'; 253 | * numOfAttempts: 10, 254 | * timeMultiple: 1.5, 255 | * startingDelay: 250 256 | * }); 257 | * ``` 258 | * 259 | * @param timeout Milliseconds before the connection attempt times out. This is set as the [gRPC Deadline](https://grpc.io/blog/deadlines/). 260 | * @param retryOptions Retry & backoff options. 261 | * @returns Client instance. 262 | */ 263 | public connect(timeout?: number, retryOptions?: Partial): Promise { 264 | return new Promise((resolve, reject) => { 265 | // Try connecting to each Liftbridge server in random order and use the first successful connection for APIClient. 266 | const connectionAttempts = shuffleArray(this.addresses).map(address => this.connectToLiftbridge(address, timeout, retryOptions)); 267 | Bluebird.any(connectionAttempts).then(client => { 268 | this.client = client; 269 | // Client connection succeeded. Now collect broker & partition metadata for all streams. 270 | const metadata = new LiftbridgeMetadata(this.client); 271 | metadata.update().then(() => { 272 | debug('initial cluster metadata fetch completed'); 273 | this.metadata = metadata; 274 | return resolve(this.client); 275 | }); 276 | }).catch(() => reject(new CouldNotConnectToAnyServerError())); 277 | }); 278 | } 279 | 280 | /** 281 | * Create a new stream attached to a NATS subject. Subject is 282 | * the NATS subject the stream is attached to, and name is the stream 283 | * identifier, unique per subject. It throws [[PartitionAlreadyExistsError]] if a 284 | * stream with the given subject and name already exists. 285 | * 286 | * @example Create a new stream on the Liftbridge cluster. 287 | * ```typescript 288 | * import LiftbridgeClient from 'liftbridge'; 289 | * 290 | * const client = new LiftbridgeClient('localhost:9292'); 291 | * await client.connect(); 292 | * await client.createStream(new LiftbridgeStream({ 293 | * subject: 'my-subject', 294 | * name: 'stream-name', 295 | * partitions: 5, 296 | * maxReplication: true 297 | * })).catch(err => { 298 | * if (err.code !== ErrorCodes.ERR_PARTITION_ALREADY_EXISTS) { 299 | * throw err; 300 | * } 301 | * }); 302 | * ``` 303 | * 304 | * @param stream Stream to create. 305 | * @returns CreateStreamResponse gRPC object. 306 | */ 307 | public createStream(stream: LiftbridgeStream): Promise { 308 | return this.createStreamRequest(stream); 309 | } 310 | 311 | /** 312 | * Create an ephemeral subscription for the given stream. It begins 313 | * receiving messages starting at the configured position and waits 314 | * for new messages when it reaches the end of the stream. The default 315 | * start position is the end of the stream. It throws [[NoSuchPartitionError]] 316 | * if the given stream does not exist. Use `subscribe().close()` to close 317 | * a subscription. 318 | * 319 | * @example Subscribing to a subject. 320 | * ```typescript 321 | * import LiftbridgeClient from 'liftbridge'; 322 | * import LiftbridgeStream, { StartPosition } from 'liftbridge/stream'; 323 | * 324 | * const client = new LiftbridgeClient('localhost:9292'); 325 | * await client.connect(); 326 | * const subscription = client.subscribe(new LiftbridgeStream({ 327 | * subject: 'my-subject', 328 | * name: 'stream-name', 329 | * startPosition: StartPosition.EARLIEST 330 | * })); 331 | * 332 | * subscription.on('data', (data: Message) => { 333 | * console.log('subscribe on data = ', LiftbridgeMessage.toJSON(data)); 334 | * }); 335 | * 336 | * // When ready to finish subscription — 337 | * subscription.close(); 338 | * ``` 339 | * 340 | * @param stream Stream to subscribe to. 341 | * @event data On data from the subscribed Liftbridge stream. 342 | * @event status On gRPC process status. 343 | * @event error On some error. 344 | * @event end OnLiftbridge server finishing sending messages. 345 | * @returns `ReadableStream` of messages. 346 | */ 347 | public subscribe(stream: LiftbridgeStream): ClientReadableStream { 348 | const subscription = this.createSubscribeRequest(stream); 349 | return subscription; 350 | } 351 | 352 | /** 353 | * Publish a new message to the NATS subject. If the AckPolicy is 354 | * not NONE and a deadline is provided, this will synchronously block until 355 | * the first ack is received. If the ack is not received in time, a 356 | * DeadlineExceeded status code is returned. If an AckPolicy and deadline 357 | * are configured, this returns the first Ack on success, otherwise it 358 | * returns null. 359 | * 360 | * @example Publish a message to a subject. 361 | * ```typescript 362 | * import LiftbridgeClient from 'liftbridge'; 363 | * import LiftbridgeMessage, { AckPolicy } from 'liftbridge/message'; 364 | * 365 | * const client = new LiftbridgeClient('localhost:9292'); 366 | * await client.connect(); 367 | * 368 | * await client.publish(new LiftbridgeMessage({ 369 | * subject: 'my-subject', 370 | * key: 'message-key', 371 | * value: 'message-value', 372 | * ackPolicy: AckPolicy.ALL, 373 | * partitionStrategy: 'roundrobin', 374 | * ackInbox: 'ack.my-subject', 375 | * headers: { 'some-header': '123' } 376 | * })); 377 | * ``` 378 | * 379 | * @param message Message to publish. 380 | * @returns PublishResponse gRPC object. 381 | */ 382 | public publish(message: LiftbridgeMessage): Promise { 383 | const publisher = this.createPublishRequest(message); 384 | return publisher; 385 | } 386 | 387 | /** 388 | * Close the client connection to the Liftbridge cluster. 389 | */ 390 | public close(): void { 391 | this.client.close(); 392 | } 393 | } 394 | 395 | export { 396 | APIClient, 397 | IBackOffOptions, 398 | CreateStreamResponse, 399 | PublishResponse, 400 | ClientReadableStream, 401 | Message, 402 | StartPosition, 403 | }; 404 | --------------------------------------------------------------------------------