├── .editorconfig ├── .eslintignore ├── .eslintrc.js ├── .github └── workflows │ ├── deploy.yml │ └── tests.yml ├── .gitignore ├── .jshintrc ├── .npmignore ├── .travis.yml ├── CHANGELOG.md ├── LICENSE ├── README.md ├── docs ├── analytics.md ├── connect.md ├── drainer.md ├── health.md ├── hints.md ├── native.md ├── partition-drainer.md └── publisher.md ├── examples ├── best-practice-example │ ├── consumer.js │ └── producer.js ├── sasl-ssl-example │ ├── README.md │ ├── config.js │ ├── consumer.js │ └── producer.js └── ssl-example │ ├── README.md │ ├── config.js │ ├── consumer.js │ └── producer.js ├── kafka-setup ├── README.md ├── alpine.Dockerfile ├── client-jaas.conf ├── client.properties ├── debian.Dockerfile ├── docker-compose.yml ├── generate-certs.sh ├── kafka-console.sh ├── server-jaas.conf ├── start.sh └── stop.sh ├── package.json ├── src ├── index.ts └── lib │ ├── Sinek.ts │ ├── interfaces.ts │ ├── kafkajs │ ├── JSConsumer.ts │ ├── JSProducer.ts │ └── index.ts │ └── shared │ ├── Analytics.ts │ ├── CompressionTypes.ts │ ├── Health.ts │ ├── Metadata.ts │ └── index.ts ├── test ├── config.ts ├── int │ ├── Health.test.ts │ └── JSSinek.test.ts └── mocha.opts ├── tsconfig.dist.json ├── tsconfig.json └── yarn.lock /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | root = true 3 | 4 | [*] 5 | charset = utf-8 6 | insert_final_newline = true 7 | trim_trailing_whitespace = true 8 | 9 | [{*.js,*.json,*.yml}] 10 | indent_size = 2 11 | indent_style = space 12 | -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | coverage/* 2 | /node_modules/* 3 | examples/* 4 | -------------------------------------------------------------------------------- /.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | "env": { 3 | "es6": true, 4 | "node": true, 5 | "mocha": true 6 | }, 7 | "parser": '@typescript-eslint/parser', 8 | "plugins": [ 9 | '@typescript-eslint', 10 | ], 11 | "extends": [ 12 | 'eslint:recommended', 13 | 'plugin:@typescript-eslint/recommended', 14 | ], 15 | "rules": { 16 | "indent": [ 17 | "error", 18 | 2 19 | ], 20 | "linebreak-style": [ 21 | "error", 22 | "unix" 23 | ], 24 | "quotes": [ 25 | "error", 26 | "double" 27 | ], 28 | "semi": [ 29 | "error", 30 | "always" 31 | ], 32 | "no-console": 0 33 | } 34 | }; 35 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: Create npm package 2 | on: 3 | release: 4 | types: [created] 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v2 10 | # Setup .npmrc file to publish to npm 11 | - uses: actions/setup-node@v1 12 | with: 13 | node-version: '12.x' 14 | registry-url: 'https://registry.npmjs.org' 15 | - run: yarn 16 | - run: npm publish --access public 17 | env: 18 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} 19 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | # This workflow will do a clean install of node dependencies, build the source code and run tests across different versions of node 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-nodejs-with-github-actions 3 | 4 | name: Node.js CI 5 | 6 | on: 7 | push: 8 | branches: [ master ] 9 | pull_request: 10 | branches: [ master ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | strategy: 18 | matrix: 19 | node-version: [12.x] 20 | 21 | steps: 22 | - uses: actions/checkout@v2 23 | - name: Use Node.js ${{ matrix.node-version }} 24 | uses: actions/setup-node@v1 25 | with: 26 | node-version: ${{ matrix.node-version }} 27 | - run: npm install -g yarn 28 | - run: yarn 29 | - run: mkdir /tmp/kafka-data && mkdir /tmp/kafka-data/data && mkdir /tmp/kafka-data/logs && chmod -R 777 /tmp/kafka-data 30 | - run: ./kafka-setup/generate-certs.sh 31 | - name: Start Docker containers for Zookeeper, Kafka and Schema Registry 32 | run: cd kafka-setup && docker-compose up -d 33 | - run: sleep 240 34 | - run: yarn test 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | 6 | # Runtime data 7 | pids 8 | *.pid 9 | *.seed 10 | 11 | # Directory for instrumented libs generated by jscoverage/JSCover 12 | lib-cov 13 | 14 | # Coverage directory used by tools like istanbul 15 | coverage 16 | 17 | # nyc test coverage 18 | .nyc_output 19 | 20 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 21 | .grunt 22 | 23 | # node-waf configuration 24 | .lock-wscript 25 | 26 | # Compiled binary addons (http://nodejs.org/api/addons.html) 27 | build/Release 28 | 29 | # Dependency directories 30 | node_modules 31 | jspm_packages 32 | 33 | # Optional npm cache directory 34 | .npm 35 | 36 | # Optional REPL history 37 | .node_repl_history 38 | .idea 39 | logs 40 | .vscode 41 | 42 | certs 43 | 44 | # generated files 45 | dist 46 | -------------------------------------------------------------------------------- /.jshintrc: -------------------------------------------------------------------------------- 1 | { 2 | "globals": { 3 | "LOG": true 4 | }, 5 | "esversion": 6, 6 | "node": true 7 | } -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | docs/ 2 | coverage/ 3 | kafka-setup/ 4 | perf/ 5 | sasl-ssl-example/ 6 | ssl-example/ 7 | test/ 8 | .editorconfig 9 | .eslintignore 10 | .eslintrc.js 11 | .gitignore 12 | .jshintrc 13 | .travis.yml 14 | src 15 | .github/ -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "12" 4 | env: 5 | global: 6 | - ZOOKEEPER_PEERS=localhost:2181 7 | - KAFKA_PEERS=localhost:9092 8 | - KST_TOPIC=travis 9 | - CXX=g++-4.8 10 | 11 | script: 12 | - rm -rf node_modules 13 | - yarn 14 | - yarn add --frozen-lockfile node-rdkafka@2.7.4 15 | - yarn lint 16 | - mocha --exit --timeout 60000 -R spec test/int/* 17 | 18 | before_install: 19 | - wget https://archive.apache.org/dist/kafka/1.1.0/kafka_2.11-1.1.0.tgz -O kafka.tgz 20 | - mkdir -p kafka && tar xzf kafka.tgz -C kafka --strip-components 1 21 | - nohup bash -c "cd kafka && bin/zookeeper-server-start.sh config/zookeeper.properties &" 22 | - sleep 5 23 | - nohup bash -c "cd kafka && bin/kafka-server-start.sh config/server.properties &" 24 | - sleep 10 25 | - kafka/bin/kafka-topics.sh --create --partitions 1 --replication-factor 1 --topic sinek-test-topic-travis --zookeeper localhost:2181 26 | - kafka/bin/kafka-topics.sh --create --partitions 1 --replication-factor 1 --topic n-test-topic --zookeeper localhost:2181 27 | - sleep 2 28 | 29 | addons: 30 | apt: 31 | sources: 32 | - ubuntu-toolchain-r-test 33 | packages: 34 | - g++-4.8 35 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # sinek CHANGELOG 2 | 3 | ## 2020-08-10, Version 10.0.0 4 | ** **BREAKING removed deprecated client versions 5 | * **BREAKING** removed node-rdkafka 6 | * KafkaJS and primary connection to kafka 7 | * Convert to typescript. 8 | 9 | ## 2020-04-19, Version 9.1.0 10 | 11 | * support for message headers in NProducer 12 | * upgraded dependencies 13 | 14 | ## 2020-02-24, Version 9.0.0 15 | 16 | * upgraded deps 17 | * **BREAKING** node-rdkafka has been removed as optional dependency (see below) 18 | 19 | ### Please Note: 20 | 21 | You will have to manually install `node-rdkafka` alongside sinek. 22 | (This requires a Node.js version between 9 and 12 and will not work with Node.js >= 13, last tested with 12.16.1) 23 | 24 | On Mac OS High Sierra / Mojave: 25 | `CPPFLAGS=-I/usr/local/opt/openssl/include LDFLAGS=-L/usr/local/opt/openssl/lib yarn add --frozen-lockfile node-rdkafka@2.7.4` 26 | 27 | Otherwise: 28 | `yarn add --frozen-lockfile node-rdkafka@2.7.4` 29 | 30 | (Please also note: Doing this with npm does not work, it will remove your deps, `npm i -g yarn`) 31 | 32 | ## 2019-10-15, Version 8.1.0 33 | 34 | * upgraded deps 35 | * node-rdkafka is now capabale of dealing with node.js versions > 11.15.0 36 | 37 | ## 2019-10-14, Version 8.0.0 38 | 39 | * this is a major version bump and might break your setup 40 | * upgraded all dependencies 41 | * **BREAKING** node-rdkafka is now an optional dependency 42 | * **BREAKING** added as default kafkajs client dependency 43 | * `batchSize` on kafkajs is not used, so it is deprecated 44 | * `consumer.commit()` function on kafkajs is not natively supported, needs to be handled on the callback function, to commit manually 45 | * some statistics/analytics functions will not work with kafkajs 46 | * use best practice from the `examples` directory 47 | 48 | 49 | ## 2019-07-30, Version 7.30.1 50 | 51 | * pinned node-rdkafka to 2.7.0, please only use this version with node 11.15.0 52 | 53 | ## 2019-07-04, Version 7.30.0 54 | 55 | * upgraded dependencies 56 | ``` 57 | async ~2.6.2 → ~3.1.0 58 | bluebird ~3.5.4 → ~3.5.5 59 | node-rdkafka ~2.6.1 → ~2.7.0 60 | eslint ~5.16.0 → ~6.0.1 61 | express ~4.16.4 → ~4.17.1 62 | ``` 63 | * for now you have to stick with a maximum node version of v11.15.0 (node-rdkafka does not support any higher version yet) 64 | 65 | ## 2019-05-07, Version 7.29.3 66 | 67 | * dependency upgrade 68 | 69 | ## 2019-04-01, Version 7.29.2 70 | 71 | * "enable.auto.commit" now defaults to `false` to prevent frustration with batching logic 72 | 73 | ## 2019-03-29, Version 7.29.1 74 | 75 | * fixed bug in produce where partition and key would be ignored 76 | 77 | ## 2019-03-29, Version 7.29.0 78 | 79 | * moving away from semver minor resetting after major 80 | * removed rd-lt (old RD Load Test) 81 | * upgraded dependencies latest node-rdkafka 82 | * **BREAKING** marked (JS) Consumer as deprecated 83 | * **BREAKING** marked (JS) Producer as deprecated 84 | * **BREAKING** swapped node-rdkafka from optional to dependencies 85 | * **BREAKING** swapped kafka-node from dependencies to optional 86 | * cleaned-up documentation 87 | * added best-practice example for consumer and producer 88 | * adjusted test configuration to fit the best-practice example 89 | * **BREAKING** changed NProducer from Producer to HighLevelProducer to ensure message delivery based on the send promise 90 | 91 | ## 2019-03-12, Version 6.27.3 92 | 93 | * preventing undefined offset value commits for commitLocalOffsetsForTopic 94 | * added additional debug logs to NConsumer 95 | 96 | ## 2019-03-12, Version 6.27.1-6.27.2 97 | 98 | * fixed timings for analytics and lag interval in NConsumer and NProducer 99 | * fixed sortedManualBatch type error 100 | 101 | ## 2019-03-11, Version 6.27.0 102 | 103 | * (NConsumer) added batch option sortedManualBatch 104 | * (NConsumer) added commitLocalOffsetsForTopic method 105 | * **SEMI-BREAKING** (NConsumer) removed experimental resetTopicPartitionsToEarliest method 106 | * **SEMI-BREAKING** (NConsumer) removed experimental resetTopicPartitionsToLatest method 107 | * **SEMI-BREAKING** (NConsumer) removed experimental commitOffsetForAllPartitionsOfTopic method 108 | * **SEMI-BREAKING** (NConsumer) removed deprecated consumeOnce method 109 | * added "### Consuming Multiple Topics efficiently" to lib/librdkafka/README.md 110 | * (NConsumer) passing an empty array to "adjustSubscription" will unsubscribe from all topics 111 | * upgrade dependencies 112 | * fix typescript bug (string for topic missing) 113 | 114 | ## 2019-03-07, Version 6.26.0 115 | 116 | * removed custom configs where possible to fallback to librdkafka defaults 117 | 118 | ## 2019-03-07, Version 6.25.0 119 | 120 | * added `manualBatching` option to NConsumer batch mode options, it will enable you to process messages faster and controll you own commits easily (via callback) 121 | setting it to true will result in syncEvent() being called with the whole batch of messages instead of a single message 122 | * **SEMI-BREAKING** changed types to allow syncEvent single or multi message first argument 123 | 124 | ## 2019-01-30, Version 6.24.1 125 | 126 | * fixed bug in metadata partition for topic call 127 | 128 | ## 2019-01-09, Version 6.24.0 129 | 130 | * fixed error message typo 131 | * updated dependencies (node-rdkafka upgraded) 132 | 133 | ## 2018-10-16, Version 6.23.1-4 134 | 135 | * added tombstone function call to types 136 | * fixed missing exports 137 | * small release optimisations 138 | * fixed missing return type in tombstone declaration 139 | 140 | ## 2018-10-16, Version 6.23.0 141 | 142 | * added advanced configuration declaration to typescript declarations, thanks to Juri Wiens 143 | * permitted passing of null as message value 144 | * added .tombstone() function to NProducer to easily delete kafka messages 145 | * upgraded dependencies (node-rdkafka and kafka-node were updated) 146 | 147 | ## 2018-10-04, Version 6.22.3 148 | 149 | * fixed bug in typescript declaration, thanks to @holgeradam 150 | 151 | ## 2018-09-20, Version 6.22.2 152 | 153 | * fixed bug in NProducer partition identification, result of murmur was not passed correctly 154 | thanks to @elizehavi 155 | 156 | ## 2018-09-14, Version 6.22.1 157 | 158 | * typescript declaration optimizations 159 | 160 | ## 2018-09-10, Version 6.22.0 161 | 162 | * fixed some typescript declaration bugs 163 | * removed warning from Consumer and Producer, didnt feel too good 164 | * updated dependencies: kafka-node 2.6.1 -> 3.0.0 165 | 166 | ## 2018-09-02, Version 6.21.0 167 | 168 | * added TypeScript declarations for NConsumer, NProducer, Consumer, Producer and Config 169 | 170 | ## 2018-09-02, Version 6.20.0 171 | 172 | * Added new Experimental functions to NConsumer resetTopicPartitionsToEarliest, resetTopicPartitionsToLatest 173 | * Marked old Clients as deprecated, added suggestion for Consumer and Producer to move to native versions 174 | * Updated dependencies: 175 | 176 | node-rdkafka ~2.3.4 → ~2.4.1 177 | eslint ~5.0.1 → ~5.5.0 178 | sinon ~6.1.0 → ~6.1.5 179 | 180 | ## 2018-07-03, Version 6.19.0 181 | 182 | * Health Thresholds are now configurable, pass them via health subobject in the parent config to consumer or producer 183 | * updated uuid and sinon deps 184 | 185 | ## 2018-06-27, Version 6.18.0 186 | 187 | * brought getTopicMetadata and getMetadata to NConsumer, was only available on NProducer so far 188 | * added getTopicList to NProducer and NConsumer to retrieve a list of available topics 189 | * updated dependencies: 190 | 191 | node-rdkafka ~2.3.3 → ~2.3.4 192 | (also sinon, eslint and uuid) 193 | 194 | ## 2018-05-31, Version 6.17.0 195 | 196 | * switched default encoding for messages value and key for JS Kafka client to Buffer 197 | * simplified integration tests 198 | * updated dependencies: 199 | 200 | kafka-node ~2.4.1 → ~2.6.1 201 | eslint ~4.18.2 → ~4.19.1 202 | mocha ~5.0.4 → ~5.2.0 203 | sinon ^4.4.6 → ^6.0.0 204 | node-rdkafka ~2.2.3 → ~2.3.3 205 | async ~2.6.0 → ~2.6.1 206 | 207 | ## 2018-05-31, Version 6.16.0 208 | 209 | * updated NConsumer and NProducer to debug and concat errors of require of native lib 210 | 211 | ## 2018-03-27, Version 6.15.1 212 | 213 | * node-rdkafka has seg fault bugs in 2.3.1 -> falling back to 2.2.3 214 | 215 | ## 2018-03-15, Version 6.15.0 216 | 217 | * corrected consumer callback error pass (now also logging warning to not do it) 218 | * now allows to pass correlation-id (opaque key) when producing with NProducer 219 | * updated dependencies: 220 | 221 | uuid ~3.1.0 → ~3.2.1 222 | bluebird ~3.5.0 → ~3.5.1 223 | debug ^3.0.0 → ^3.1.0 224 | kafka-node ^2.3.0 → ^2.4.1 225 | eslint ^4.11.0 → ^4.18.2 226 | express ^4.16.2 → ^4.16.3 227 | mocha ~5.0.2 → ~5.0.4 228 | sinon ^4.1.2 → ^4.4.6 229 | node-rdkafka ^2.2.0 → ^2.3.1 230 | 231 | ## 2018-02-18, Version 6.14.0 232 | 233 | * now starting analytics immediately 234 | * propagating connection promise correctly 235 | 236 | ## 2017-11-13, Version 6.13.0 237 | 238 | * now proxying consumer_commit_cb 239 | * upgraded dependencies: eslint@4.11.0, sinon@4.1.2, node-rdkafka@2.2.0 240 | 241 | ## 2017-11-03, Version 6.12.0 242 | 243 | * upgraded node-librdkafka dependency to 2.1.1 244 | * added pause and resume functions for NConsumer 245 | * added commitMessage method to NConsumer 246 | * added option to switch partition selection to murmurv2 247 | 248 | ## 2017-10-22, Version 6.11.0 249 | 250 | * intelligent healthcheck, checkout librdkafka/Health.md 251 | * average batch processing time in getStats() for nconsumer 252 | * clear rejects for operations, when the clients are not connected 253 | * added unit tests for Health.js 254 | * refactored readme 255 | 256 | ## 2017-10-21, Version 6.10.0 257 | 258 | * intelligent fetch grace times in batch mode 259 | * small optimisations on nconsumer 260 | 261 | ## 2017-10-21, Version 6.9.0 262 | 263 | * **BREAKING CHANGE** nconsumer 1:n (batch mode) does not commit on every x batches now, 264 | it will only commit when a certain amount of messages has been consumed and processed 265 | requiredAmountOfMessagesForCommit = batchSize * commitEveryNBatch 266 | * this increases performance and makes less commit requests when a topic's lag has been 267 | resolved and the amount of "freshly" produced messages is clearly lower than batchSize. 268 | 269 | ## 2017-10-20, Version 6.8.0 270 | 271 | * comes with the new analytics class for nproducers and nconsumers 272 | * checkout librdkafka/Analytics.md 273 | 274 | ## 2017-10-18, Version 6.7.0 275 | 276 | * new offset info functions for NConsumer (checkout librdkafka/README.md) 277 | * new getLagStatus() function for NConsumer that fetches and compares partition offsets 278 | 279 | ## 2017-10-18, Version 6.6.1 280 | 281 | * updates `node-rdkafka` to @2.1.0 which ships fixes 282 | 283 | ## 2017-10-18, Version 6.6.0 284 | 285 | * added librdkafka/Metadata class 286 | * added new metadata functions to NProducer 287 | * send, buffer and _sendBufferFormat are now async functions 288 | * ^ **BREAKING CHANGE** sinek now requires min. Node.js Version 7.6 289 | * added `auto` mode for NProducer (automatically produces to latest partition count 290 | event if it changes during runtime of a producer -> updates every 5 minutes) 291 | * refactored and optimized NProducer send logic 292 | * updated librdkafka/README.md 293 | * added new tests for NProducer 294 | 295 | ## 2017-10-17, Version 6.5.1 296 | 297 | * fixed bug in NConsumer consume() consume options, where commitSync field was always true 298 | * added JSDOC for NConsumer and NProducer 299 | 300 | ## 2017-10-13, Version 6.5.0 301 | 302 | * new 1:N consumer mode (making 1:1 mode configurable with params -> see lib/librdkafka/README.md) 303 | * more stats for consumer batch mode 304 | * new consumer batch event 305 | * **BREAKING CHANGE** as consumer.consume(syncEvent) now rejects if you have `enabled.auto.commit: true` 306 | * updated librdkafka/README.md 307 | 308 | ## 2017-10-12, Version 6.4.1 309 | 310 | * Updated depdendencies 311 | * Re-created lockfile 312 | * fixed bug in sync commit (now catching timeout errors) 313 | 314 | ## 2017-10-12, Version 6.4.0 315 | 316 | * NConsumer automatically sets memory related configs (easier start if you missed those config params..) 317 | * NConsumer in 1:1 mode will now use commitMessageSync instead of commitMessage (this reduces performance, but 318 | ensures we do not stack tons of commit-requests in the consumers-queue), sinek 6.5.0 will follow 319 | with an option to set the amount of messages that are consumed & committed in one step 1-10000 320 | 321 | ## 2017-10-11, Version 6.3.0 322 | 323 | * bugfix on NProducer (partitions ranged from 1-30 instead of 0-29) 324 | 325 | ## 2017-09-12, Version 6.2.0 326 | 327 | * added streaming mode to NConsumer, 328 | you can pass true to .connect(true) and omit .consume() 329 | to enable streaming mode consuming 330 | * adjusted sasl example 331 | 332 | ## 2017-08-29, Version 6.1.2 333 | 334 | * fixed connection event (ready) for connect/ consumers 335 | 336 | ## 2017-08-29, Version 6.1.0 337 | 338 | #### LIBRDKAFKA Clients fixes to 6.0.4 339 | 340 | * fixed a few small things 341 | * added tconf fields to config 342 | * updated docs 343 | * more and better examples 344 | 345 | #### LIBRDKAFKA Clients to 6.1.0 346 | 347 | * updated NProducer api to allow new node-rdkafka 2.0.0 348 | (as it had breaking changes regarding its topic api) 349 | 350 | ## 2017-08-29, Version 6.0.0 351 | 352 | #### LIBRDKAFKA Clients 353 | 354 | * sinek now ships with an optional dependency to node-rdkafka 355 | * 2 native clients embbed rdkafka in the usual sinek connector api interface 356 | * NConsumer and NProducer 357 | * sasl support 358 | * additional config params through noptions 359 | 360 | ## 2017-08-20, Version 5.4.0 361 | 362 | #### Focus on SSL 363 | 364 | * fixed a few option reference passes to allow for better ssl support 365 | * added /kafka-setup that allows for an easy local ssl kafka broker setup 366 | * added /ssl-example to show how ssl connections are configured 367 | * updated readme 368 | * added eslint and updated code style accordingly 369 | 370 | ## 2017-08-11, Version 5.3.0 371 | 372 | #### General Updates & Fixes 373 | 374 | * Updated to latest kafka-node 2.2.0 375 | * Fixed bug in logging message value length 376 | * Added 3 new format methhods publish, unpublish, update to connect producer 377 | * Added partitionKey (optional) to all bufferFormat operations of publisher and connect producer 378 | 379 | ## 2017-07-11, Version 5.0.0 380 | 381 | #### Kafka Client is now able to connect directly to the Kafka Broker 382 | 383 | * Updated all dependencies 384 | * Clients can now omit Zookeeper and connect directly to a Broker by omitting zkConStr and passing kafkaHost in the config 385 | 386 | #### Producer/Consumer Key Changes [#704](https://github.com/SOHU-Co/kafka-node/pull/704) 387 | 388 | * **BREAKING CHANGE** The `key` is decoded as a `string` by default. Previously was a `Buffer`. The preferred encoding for the key can be defined by the `keyEncoding` option on any of the consumers and will fallback to `encoding` if omitted 389 | 390 | ## 2017-07-10, Version 4.4.0 391 | 392 | * First entry in CHANGELOG.md 393 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016-2019 Chris Fröhlingsdorf 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # High Level Node.js Kafka Client 2 | 3 | [![Build Status](https://travis-ci.org/nodefluent/node-sinek.svg?branch=master)](https://travis-ci.org/nodefluent/node-sinek) 4 | [![npm version](https://badge.fury.io/js/sinek.svg)](https://badge.fury.io/js/sinek) 5 | 6 | The most advanced Kafka Client. 7 | 8 | ## Features 9 | 10 | * easy promise based API 11 | * a lot of Kafka pitfalls already taken care of 12 | * backpressure and stream consume modes 13 | * secure committing in backpressure (1:n, batch) mode 14 | * plain Javascript implementation based on `kafka-node` and a super fast native implementation based on `node-rdkafka` 15 | * SSL, SASL & Kerberos support 16 | * auto reconnects 17 | * auto partition recognition and deterministic spreading for producers 18 | * **intelligent health-checks** and **analytic events** for consumers and producers 19 | 20 | ## You might also like 21 | 22 | * check out :goberserk: [node-kafka-streams](https://github.com/nodefluent/kafka-streams) for a stream processing kafka api 23 | * check out :fire: [node-kafka-connect](https://github.com/nodefluent/kafka-connect) for a easy datastore <-> kafka transfer 24 | 25 | ## Latest Changes 26 | 27 | Can be found [here](CHANGELOG.md) 28 | 29 | ## Install 30 | 31 | ```shell 32 | npm install --save sinek 33 | ``` 34 | 35 | ## Usage 36 | 37 | ### Usage - JS Client (based on kafka.js) 38 | 39 | ```javascript 40 | const { 41 | JSConsumer, 42 | JSProducer 43 | } = require("sinek"); 44 | 45 | const jsProducerConfig = { 46 | clientId: "my-app", 47 | brokers: ["kafka1:9092"] 48 | } 49 | 50 | (async () => { 51 | 52 | const topic = "my-topic"; 53 | 54 | const producer = new JSProducer(jsProducerConfig); 55 | const consumer = new JSConsumer(topic, jsConsumerConfig); 56 | 57 | producer.on("error", error => console.error(error)); 58 | consumer.on("error", error => console.error(error)); 59 | 60 | await consumer.connect(); 61 | 62 | // consume from a topic. 63 | consumer.consume(async (messages) => { 64 | messages.forEach((message) => { 65 | console.log(message); 66 | }) 67 | }); 68 | 69 | // Produce messages to a topic. 70 | await producer.connect(); 71 | producer.send(topic, "a message") 72 | })().catch(console.error); 73 | 74 | ``` 75 | 76 | # Further Docs 77 | 78 | * [Best-practice example](examples/best-practice-example) 79 | * [SSL example](examples/ssl-example/) 80 | * [SASL+SSL example](examples/sasl-ssl-example/) 81 | * [Alpine based docker example](kafka-setup/alpine.Dockerfile) 82 | * [Debian based docker example](kafka-setup/debian.Dockerfile) 83 | 84 | > make it about them, not about you 85 | > - Simon Sinek 86 | -------------------------------------------------------------------------------- /docs/analytics.md: -------------------------------------------------------------------------------- 1 | # NConsumer and NProducer analytics 2 | 3 | - as of sinek@6.8.0 the native clients support additional analytic features 4 | - checkout the usage example below 5 | 6 | ```javascript 7 | "use strict"; 8 | const {NConsumer, NProducer} = require("sinek"); 9 | 10 | const consumer = new NConsumer(/* .. */); 11 | const producer = new NProducer(/* .. */); 12 | 13 | await consumer.connect(); 14 | consumer.consume(); 15 | 16 | await producer.connect(); 17 | 18 | /* 19 | You can enable the collection and computation of analytics 20 | with by calling the following functions on instances: 21 | make sure to be connected before enabling analytics 22 | */ 23 | 24 | consumer.enableAnalytics({ 25 | analyticsInterval: 1000 * 60 * 2, //runs every 2 minutes 26 | lagFetchInterval: 1000 * 60 * 5 //runs every 5 minutes (dont run too often!) 27 | }); 28 | 29 | producer.enableAnalytics({ 30 | analyticsInterval: 1000 * 60 * 2, //runs every 2 minutes 31 | }); 32 | 33 | /* 34 | You can access the data in different ways: 35 | */ 36 | 37 | //emits on every interval 38 | consumer.on("analytics", result => { 39 | console.log(result); 40 | }); 41 | 42 | //returns the last analytics result 43 | console.log(consumer.getAnalytics()); 44 | 45 | /* 46 | Analytic results look like this 47 | */ 48 | 49 | //producer: 50 | { 51 | generatedAt: 1508499993324, //generation of result 52 | interval: 500, //configured interval 53 | produced: 4 //produced messages since last lag fetch 54 | } 55 | 56 | //consumer: 57 | { 58 | "generatedAt": 1508504204109, //generation of result 59 | "interval": 500, //configured interval 60 | "lagChange": { 61 | "timelyDifference": 1000, //ms between the last 2 fetches 62 | "fetchPerformance": -12, //ms difference between the execution of the last fetches 63 | "newLags": {}, //lags that came up since last lag fetch 64 | "changedLags": {}, //lags that are still present and have changed 65 | "resolvedLags": { //lags that have been resolved between the last fetches 66 | "n-test-topic": { 67 | "0": 0 //on topic "n-test-topic" in partition 0 is the resolved offset lag now 0 68 | } 69 | }, 70 | "stallLags": {} //lags that are still present and have not changed since last lag fetch 71 | }, 72 | "largestLag": { 73 | "topic": "n-test-topic", 74 | "partition": 0, 75 | "lowDistance": 319, 76 | "highDistance": 0, 77 | "detail": { 78 | "lowOffset": 0, 79 | "highOffset": 319, 80 | "comittedOffset": 319 81 | } 82 | }, 83 | "consumed": 1, //consumed messages since last lag fetch 84 | "errors": 0 //any client errors that occured during the interval 85 | } 86 | 87 | /* 88 | Analytics are stopped automatically if you call 89 | .close() ony the client instances, but you can also halt 90 | them manually: 91 | */ 92 | 93 | consumer.haltAnalytics(); 94 | producer.haltAnalytics(); 95 | ``` 96 | -------------------------------------------------------------------------------- /docs/connect.md: -------------------------------------------------------------------------------- 1 | # Consumer / Producer 2 | 3 | > Please **note**: That the non native clients have been deprecated, please switch to noptions configuration. 4 | 5 | - these clients were designed for node-kafka-connect, however 6 | than are also exported via sinek an can surely be used as stand-alone 7 | consumers or producers. (in fact; they are easier to setup.) 8 | - clients will automatically re-connect 9 | - consumers will automatically rebalance 10 | - producers will refresh topic meta-data on connect() //if topics are passed in constructor 11 | 12 | ## Using the consumer 13 | 14 | ```javascript 15 | const {Consumer} = require("sinek"); 16 | const consumer = new Consumer("my-topic", config); //checkout config below 17 | 18 | // without backpressure 19 | const withBackpressure = false; 20 | consumer.connect(withBackpressure).then(_ => { 21 | consumer.consume(); //resolves a promise on the first message consumed 22 | }); 23 | 24 | consumer.on("message", message => console.log(message)); 25 | consumer.on("error", error => console.error(error)); 26 | 27 | // with backpressure 28 | const withBackpressure = true; 29 | consumer.connect(withBackpressure).then(_ => { 30 | consumer.consume((message, callback) => { 31 | console.log(message); 32 | //you must return this callback to receive further messages 33 | callback(); 34 | }); 35 | }); 36 | 37 | consumer.on("error", error => console.error(error)); 38 | 39 | /* messages are always objects that look like this: 40 | { 41 | value: "..", 42 | key: "..", 43 | partition: 0, 44 | offset: 15, 45 | highWaterOffset: 15, 46 | topic: "my-topic" 47 | } 48 | */ 49 | ``` 50 | 51 | ## Consuming a whole topic once 52 | 53 | ```javascript 54 | const drainThreshold = 10000; //time that should pass with no message being received 55 | const timeout = 0; //time that should maximally pass in total (0 = infinite) 56 | const messageCallback = null; //just like the usual .consume() this supports events and callbacks (for backpressure) 57 | 58 | consumer.consumeOnce(messageCallback, drainThreshold = 10000, timeout = 0) 59 | .then(consumedMessagesCount => console.log(consumedMessagesCount)); //resolves when topic is drained 60 | .catch(error => console.error(error)); //fires on error or timeout 61 | 62 | consumer.on("message", message => console.log(message)); 63 | consumer.on("error", error => console.error(error)); 64 | ``` 65 | 66 | ## Using the producer 67 | 68 | ```javascript 69 | const {Producer} = require("sinek"); 70 | const partitions = 1; //make sure the topic exists and has the given amount of partitions (relates to kafka broker config setup) 71 | const producer = new Producer(config, ["my-topic"], partitions); 72 | 73 | producer.connect().then(_ => { 74 | //all 3 return promises (buffer wont actually buffer, they will all be sent immediatly per default) 75 | producer.send("my-topic", "my message as string"); //messages will be automatically spread across partitions randomly 76 | 77 | const compressionType = 0; 78 | producer.buffer("my-topic", "my-message-key-identifier", {bla: "message as object"}, compressionType); 79 | //this will create a keyed-message (e.g. Kafka LogCompaction on Message-Keys), producer will automatically identfiy 80 | //the message-key to a topic partition (idempotent) 81 | 82 | //if you do not pass in an identifier, it will be created as uuid.v4() 83 | 84 | const version = 1; 85 | producer.bufferFormat("my-topic", "my-message-key-identifier", {bla: "message as object"}, version, compressionType); 86 | /* same as .buffer(..) but with the fact that it wraps your message in a certain "standard" message json format e.g.: 87 | 88 | { 89 | payload: {bla: "message as object"}, 90 | key: "my-message-key-identifier", 91 | id: "my-message-key-identifier", 92 | time: "2017-05-29T11:58:15.139Z", 93 | type: "my-topic-published" 94 | } 95 | */ 96 | 97 | //using these methods you can control the create, update and delete messages via message.value.type description 98 | //its an easy schema that helps you to keep a simple design pattern for all of your kafka topics 99 | producer.bufferFormatPublish("my-topic", "my-message-key-identifier", {bla: "message as object"}, version, compressionType); 100 | producer.bufferFormatUpdate("my-topic", "my-message-key-identifier", {bla: "message as object"}, version, compressionType); 101 | producer.bufferFormatUnpublish("my-topic", "my-message-key-identifier", {bla: "message as object"}, version, compressionType); 102 | 103 | //besides setting keys (message identifiers) you can also set a key to that will make a distinct decision for the 104 | //partition that is produced to (identifiers and partition keys) have to be strings (partitionKey is an optional parameter) 105 | const distinctPartitionKeyValue = "my-distinct-partition-key-value"; 106 | producer.bufferFormatUpdate("my-topic", "my-message-key-identifier", {bla: "message as object"}, version, compressionType, distinctPartitionKeyValue); 107 | //if the partition key is not provided or null, the producer will use the identifier to determine a kafka partition 108 | }); 109 | 110 | producer.on("error", error => console.error(error)); 111 | ``` 112 | 113 | ## Common Methods 114 | 115 | ```javascript 116 | client.getStats() //returns an object with information about the consumer/producer 117 | client.pause() //stops (dont pause continously; the backpressure functionality of the consumer is most likely what you are looking for) 118 | client.resume() //continues 119 | client.close() //close the connection |-> you can pass true/false to the consumer to commit (false per default) the last state before closing 120 | ``` 121 | 122 | ## Configuring the kafka connection 123 | 124 | ```javascript 125 | const config = { 126 | 127 | //either one of the following, if you want to connect directly to the broker 128 | //you should omit the zkConStr field and just provide kafkaHost 129 | //zkConStr: "localhost:2181/kafka", 130 | kafkaHost: "localhost:9092", //no trailing slash here! 131 | 132 | logger: { 133 | debug: msg => console.log(msg), 134 | info: msg => console.log(msg), 135 | warn: msg => console.log(msg), 136 | error: msg => console.log(msg) 137 | }, 138 | groupId: "test-group", 139 | clientName: "an-unimportant-name", 140 | workerPerPartition: 1, 141 | options: { 142 | sessionTimeout: 8000, 143 | protocol: ["roundrobin"], 144 | fromOffset: "earliest", //latest 145 | fetchMaxBytes: 1024 * 100, 146 | fetchMinBytes: 1, 147 | fetchMaxWaitMs: 10, 148 | heartbeatInterval: 250, 149 | retryMinTimeout: 250, 150 | autoCommit: true, //if you set this to false and run with backpressure the consumer will commit on every successfull batch 151 | autoCommitIntervalMs: 1000, 152 | requireAcks: 0, 153 | ackTimeoutMs: 100, 154 | partitionerType: 3 155 | } 156 | }; 157 | ``` 158 | 159 | ## FAQ 160 | 161 | ### 1 162 | * Q: My consumer does not receive any messages but no error is logged :( 163 | * A: Most likely the consumer cannot establish a connection to Zookeeper (the module wont log any errors, check the host, port, url setup) 164 | 165 | ### 2 166 | * Q: I get a lot of leader not available errors when using the producer :( 167 | * A: Check your partition settings, does the topic really have the corresponding amount of partitions available 168 | * A: Otherwise this might related to topic metadata, run `producer.refreshMetadata(["topic"]).then()` (once) first. 169 | 170 | ### 3 171 | * Q: Can I connect directly to the Kafka Broker without Zookeeper 172 | * A: Yes, it is possible simply provide "kafkaHost" as config instead of "zkConStr" 173 | -------------------------------------------------------------------------------- /docs/drainer.md: -------------------------------------------------------------------------------- 1 | # Drainer 2 | 3 | ```javascript 4 | 5 | const kafkaClient = new Kafka("zk-host:2181/kafka"); 6 | //const kafkaClient = new Kafka("kafka-host:9092/", null, true); //connect directly to kafka broker 7 | 8 | kafkaClient.becomeConsumer(["a-topic"], "consumerGroupId123", options); 9 | 10 | kafkaClient.on("ready", () => { 11 | consumer = new Drainer(kafkaClient, 1); //1 = thread/worker/parallel count 12 | 13 | consumer.drain((message, done) => { 14 | console.log(message); 15 | done(); 16 | }); 17 | 18 | consumer.stopDrain(); 19 | 20 | consumer.drainOnce((message, done) => { 21 | console.log(message); 22 | done(); 23 | }, DRAIN_THRESHOLD, DRAIN_TIMEOUT).then(r => { 24 | console.log("drain done: " + r); 25 | }).catch(e => { 26 | console.log("drain timeout: " + e); 27 | }); 28 | }); 29 | 30 | kafkaClient.on("error", err => console.log("consumer error: " + err)); 31 | ``` 32 | -------------------------------------------------------------------------------- /docs/health.md: -------------------------------------------------------------------------------- 1 | # NConsumer and NProducer health checks 2 | 3 | - as of sinek@6.11.0 the native clients support additional health check features 4 | - intelligent health checks require enabled [analytics features](analytics.md) 5 | - checkout the usage example below 6 | 7 | ```javascript 8 | "use strict"; 9 | const {NConsumer, NProducer} = require("sinek"); 10 | 11 | const consumer = new NConsumer(/* .. */); 12 | const producer = new NProducer(/* .. */); 13 | 14 | await consumer.connect(); 15 | consumer.consume(); 16 | 17 | await producer.connect(); 18 | 19 | consumer.enableAnalytics(/* .. */); 20 | producer.enableAnalytics(/* .. */); 21 | 22 | //if you test this, make sure to await the first analytics event consumer.once("analytics", () => {}) 23 | 24 | const consumerHealth = await consumer.checkHealth(); 25 | 26 | /* consumer result e.g. */ 27 | { 28 | status: 0, 29 | messages: [ 30 | "No problems detected, client is healthy.", 31 | "Consumed 1240 message/s in the last interval, with 0 errors." 32 | ] 33 | } 34 | 35 | const producerHealth = await producer.checkHealth(); 36 | 37 | /* producer result e.g. */ 38 | { 39 | status: 0, 40 | messages: [ 41 | "No problems detected, client is healthy.", 42 | "Produced 520 message/s in the last interval, with 0 errors." 43 | ] 44 | } 45 | ``` 46 | 47 | - available status codes: 48 | 49 | ```javascript 50 | { 51 | DIS_ANALYTICS: -4, //you have not enabled analytics 52 | NO_ANALYTICS: -3, //no analytics result are available yet 53 | UNKNOWN: -2, //status is unknown, internal error occured 54 | UNCONNECTED: -1, //client is not connected yet 55 | HEALTHY: 0, //client is healthy 56 | RISK: 1, //client might be healthy, but something does not seem 100% right 57 | WARNING: 2, //client might be in trouble soon 58 | CRITICAL: 3 //something is wrong 59 | } 60 | ``` 61 | -------------------------------------------------------------------------------- /docs/hints.md: -------------------------------------------------------------------------------- 1 | # Hints 2 | 3 | - interesting options for tweaking consumers 4 | 5 | ```javascript 6 | const options = { 7 | sessionTimeout: 12500, 8 | protocol: ["roundrobin"], 9 | fromOffset: "latest", //earliest 10 | fetchMaxBytes: 1024 * 100, 11 | fetchMinBytes: 1, 12 | fetchMaxWaitMs: 100, 13 | autoCommit: true, 14 | autoCommitIntervalMs: 5000 15 | }; 16 | ``` 17 | 18 | - remove and create topic api will require a special broker configuration 19 | or these will just result in nothing at all 20 | 21 | ```javascript 22 | drainer.removeTopics([]).then(..) 23 | publisher.createTopics([]).then(..) 24 | ``` 25 | 26 | - using the `.getStats()` functions on Drainer, Publisher or 27 | PartitionDrainer you can get some valuable insights into whats 28 | currently going on in your client 29 | 30 | - when using "Drainer" to consume and write upserts into a database 31 | that require ACID functionality and a build-up of models/message-payloads 32 | you must set the AsyncLimit of new Drainer(.., 1) to "1" or you will 33 | have trouble with data integrity 34 | 35 | - if your data is spread entity wise above partitions you can use the 36 | "PartitionDrainer" to drain multiple partitions at the same time 37 | 38 | - the "Publisher" offers a simple API to create such (keyed) partitioned 39 | topics 40 | 41 | - it is probably a good idea to spawn a Consumer per Topic 42 | 43 | - example implementations can be found [here](https://github.com/nodefluent/kafka-streams/blob/master/lib/KafkaClient.js) 44 | and [here](https://github.com/nodefluent/kafka-connect/blob/master/lib) 45 | 46 | - potential issues/gotcha using `process.exit()` alongside nConsumer instance 47 | 48 | If for any reason you want your application to fail in an error scenario (to trigger a pod to restart in Kubernetes for example), calling `process.exit()` may not cause the application to exit as normal. \- as the consumer runs an a separate thread 49 | In this scenario, logic to close the consumer should be added and the application will exit as expected 50 | 51 | ```javascript 52 | // assuming instance of nConsumer assigned to a variable 'myConsumer' 53 | const shutdownConsumer = function(){ 54 | myConsumer.close(); 55 | }; 56 | 57 | 58 | // whoops something bad happened 59 | process.exit() 60 | 61 | process.on('exit', shutdownConsumer); 62 | process.on('SIGTERM', shutdownConsumer); 63 | process.on('SIGINT', shutdownConsumer); 64 | 65 | ``` 66 | -------------------------------------------------------------------------------- /docs/native.md: -------------------------------------------------------------------------------- 1 | # Native (librdkafka) Consumer & Producer 2 | 3 | - they are incredibly fast: consume 2m messages/sec and produce 1m messsages/sec 4 | - they have additional analytics and health-check features 5 | - they require the installation of librdkafka + (native module) node-librdkafka 6 | - they have a slightly different API compared to the connect (Javascript) variants 7 | - they support SASL (and Kerberos) 8 | - you can work directly with Buffers 9 | 10 | ## Setup (required actions to use the clients) 11 | 12 | - `npm i -g yarn` # make sure to have yarn available 13 | 14 | ### Debian/Ubuntu 15 | 16 | - `sudo apt install librdkafka-dev libsasl2-dev` 17 | - `rm -rf node_modules` 18 | - `yarn` # node-rdkafka is installed as optional dependency 19 | 20 | ### MacOS 21 | 22 | - `brew install librdkafka` 23 | - `brew install openssl` 24 | - `rm -rf node_modules` 25 | - `yarn` # node-rdkafka is installed as optional dependency 26 | 27 | ```shell 28 | # If you have a ssl problem with an error like: `Invalid value for configuration property "security.protocol"` 29 | # Add to your shell profile: 30 | export CPPFLAGS=-I/usr/local/opt/openssl/include 31 | export LDFLAGS=-L/usr/local/opt/openssl/lib 32 | # and redo the installation. (checkout yarn:openssl script in package.json of this project) 33 | ``` 34 | 35 | ## Using NConsumer & NProducer 36 | 37 | > Please **note**: You can find a best-practice example on how to use the consumer or producer [here](../examples/best-practice-example). 38 | 39 | - the API is the almost the same as the [Connect Clients](connect.md) 40 | - the only difference is that the clients are prefixed with an **N** 41 | 42 | > so exchange `const {Consumer, Producer} = require("sinek");` 43 | 44 | > with `const {NConsumer, NProducer} = require("sinek");` 45 | 46 | You can find an implementation [example here](../examples/sasl-ssl-example) 47 | 48 | ## New/Additional Configuration Parameters 49 | 50 | - as usual sinek tries to be as polite as possible and will offer you to use the same 51 | config that you are used to use with the other clients 52 | - however *librdkafka* brings a whole lot of different config settings and parameters 53 | - you can overwrite them (or use interely) with a config sub-object called **noptions** 54 | e.g. `const config = { noptions: { "metadata.broker.list": "localhost:9092"} };` 55 | - a full list and descriptions of config params can be found [CONFIG HERE](https://github.com/edenhill/librdkafka/blob/0.9.5.x/CONFIGURATION.md) 56 | - producer poll interval can be configured via `const config = { options: { pollIntervalMs: 100 }};` - *default is 100ms* 57 | - consumer poll grace (only 1 by 1 mode) can be configured via `const config = { options: { consumeGraceMs: 125 }};` - *default is 1000ms* 58 | - when **noptions** is set, you do not have to set the old config params 59 | 60 | ## Producer Auto Partition Count Mode 61 | 62 | - it is possible to let the producer automatically handle the amount 63 | of partitions (max count) of topics, when producing 64 | - to do that you must pass `"auto"` as third argument of the constructor 65 | `new NProducer(config, null, "auto");` 66 | - and you must not pass a specific partition to the `send(), buffer() or bufferXXX()` functions 67 | - this way the the producer will fetch the metadata for the specific topics, 68 | parse the partition count from it and use it as max value for its random or deterministic 69 | partition selection approach 70 | - *NOTE:* that the topic must exist before producing if you are using the `auto` mode 71 | - when auto mode is enabled you can use `producer.getStoredPartitionCounts()` to grab 72 | the locally cached partition counts 73 | 74 | ### Getting Metadata via Producer 75 | 76 | - `producer.getMetdata()` returns generic information about all topics on the connected broker 77 | - `producer.getTopicMetadata("my-topic")` returns info for specific topic (*NOTE:* will create topic 78 | if it does not already exist) 79 | - `Metadata` instances offer a few handy formatting functions checkout `/lib/librdkafka/Metadata.js` 80 | 81 | ## Switching Producer Partition "Finder" mode 82 | 83 | - when passing no partition argument to the send or buffer methods, the producer will deterministically 84 | identify the partition to produce the message to, by running a modulo (to the partition count of the topic) 85 | on the key (or generated key) of the message. 86 | - as the key is a string, it has to be turned into a hash, per default sinek uses murmurhash version 3 for that 87 | - the JAVA clients use murmurhash version 2 -> so if you want to stay compatible simply pass a config field: 88 | 89 | ```javascript 90 | const config = { 91 | options: { 92 | murmurHashVersion: "2" 93 | } 94 | }; 95 | ``` 96 | 97 | ## Consumer Modes 98 | 99 | - 1 by 1 mode by passing **a callback** to `.consume()` consumes a single message and commit after callback each round 100 | - asap mode by passing **no callback** to `.consume()` consumes messages as fast as possible 101 | 102 | ### Advanced 1:n consumer mode 103 | 104 | - as stated above, passing a iteratee function to .consume() as first parameter will enable 1 by 1 mode, 105 | where every kafka message is consumed singlehandedly, passed to the function and committed afterwards, before 106 | consuming the next message -> while this is secure and ensures that no messages is left untreated, even if your 107 | consumer dies, it is also very slow 108 | - which is why we added options to controll the behavior as you whish: 109 | 110 | ```javascript 111 | /* 112 | * batchSize (default 1) amount of messages that is max. fetched per round 113 | * commitEveryNBatch (default 1) amount of messages that should be processed before committing 114 | * concurrency (default 1) the concurrency of the execution per batch 115 | * commitSync (default true) if the commit action should be blocking or non-blocking 116 | * noBatchCommits (default false) if set to true, no commits will be made for batches 117 | * manualBatching (default false) if set to true, syncEvent will receive a whole batch instead of single messages 118 | * sortedManualBatch (default false) if set to true, syncEvent will receive a whole batch of sorted messages { topic: { partition: [ messages ] } } 119 | */ 120 | 121 | const options = { 122 | batchSize: 500, //grab up to 500 messages per batch round 123 | commitEveryNBatch: 5, //commit all offsets on every 5th batch 124 | concurrency: 2, //calls synFunction in parallel * 2 for messages in batch 125 | commitSync: false, //commits asynchronously (faster, but potential danger of growing offline commit request queue) => default is true 126 | noBatchCommits: false, //default is false, IF YOU SET THIS TO true THERE WONT BE ANY COMMITS FOR BATCHES 127 | manualBatching: false, // default is false, IF YOU SET THIS TO TRUE consume(syncEvent(messages: [{}])) (see above) 128 | sortedManualBatch: false, // default is false, IF YOU SET THIS TO TRUE consume(syncEvent({..})) (see above) 129 | }; 130 | 131 | myNConsumer.consume(syncFunction, true, false, options); 132 | ``` 133 | 134 | - when active, this mode will also expose more a field called `batch` with insight stats on the `.getStats()` object 135 | - and the consumer instance will emit a `consumer.on("batch", messages => {});` event 136 | 137 | ### Consuming Multiple Topics efficiently 138 | 139 | * Do not spawn multiple consumers unless you need a largely split offset handling or message processing 140 | * Spawn a single consumer and subscribe to multiple topics e.g. `new NConsumer(["topic1", "topic2", ..], ..)` 141 | * Choose Batch-Mode for consumption with manualy commits per topic like so 142 | 143 | ```javascript 144 | const kafkaConfig = {/* .. */}; 145 | const kafkaTopics = ["one", "two", "three"]; 146 | const batchOptions = { 147 | batchSize: 1000, // decides on the max size of our "batchOfMessages" 148 | commitEveryNBatch: 1, // will be ignored 149 | concurrency: 1, // will be ignored 150 | commitSync: false, // will be ignored 151 | noBatchCommits: true, // important, because we want to commit manually 152 | manualBatching: true, // important, because we want to control the concurrency of batches ourselves 153 | sortedManualBatch: true, // important, because we want to receive the batch in a per-partition format for easier processing 154 | }; 155 | 156 | const consumer = new NConsumer(kafkaTopics, kafkaConfig); 157 | await consumer.connect(); 158 | consumer.consume(async (batchOfMessages, callback) => { 159 | 160 | /* 161 | export interface SortedMessageBatch { 162 | [topic: string]: { 163 | [partition: number]: KafkaMessage[]; 164 | }; 165 | } 166 | */ 167 | 168 | // parallel processing on topic level 169 | const topicPromises = Object.keys(batchOfMessages).map(async (topic) => { 170 | 171 | // parallel processing on partition level 172 | const partitionPromises = Object.keys(batchOfMessages[topic]).map((partition) => { 173 | 174 | // sequential processing on message level (to respect ORDER) 175 | const messages = batchOfMessages[topic][partition]; 176 | // write batch of messages to db, process them e.g. async.eachLimit 177 | return Promise.resolve(); 178 | }); 179 | 180 | // wait until all partitions of this topic are processed and commit its offset 181 | // make sure to keep batch sizes large enough, you dont want to commit too often 182 | await Promise.all(partitionPromises); 183 | await consumer.commitLocalOffsetsForTopic(topic); 184 | }); 185 | 186 | await Promise.all(topicPromises); 187 | // callback still controlls the "backpressure" 188 | // as soon as you call it, it will fetch the next batch of messages 189 | callback(); 190 | 191 | }, true, false, batchOptions); 192 | ``` 193 | 194 | ### Accessing Consumer Offset Information 195 | 196 | - `consumer.getOffsetForTopicPartition("my-topic", 1).then(offsets => {});` 197 | - `consumer.getComittedOffsets().then(offsets => {});` 198 | - `const info = consumer.getAssignedPartitions();` 199 | - `consumer.getLagStatus().then(offsets => {});` -> automatically fetches and compares offsets for all assigned partitions for you 200 | 201 | ## Complex Analytics Access for Consumers and Producers 202 | 203 | - additional information regarding performance and offset lags are exposed through analytics functions 204 | - take a look at the description [here](analytics.md) 205 | 206 | ## Intelligent Health Check for Consumers and Producers 207 | 208 | - when analytics are enabled, the clients also offer an intelligent health check functionality 209 | - take a look at the description [here](health.md) 210 | 211 | ## Buffer, String or JSON as message values 212 | 213 | - you can call `producer.send()` with a string or with a Buffer instance 214 | - you can only call `producer.bufferXXX()` methods with objects 215 | - you can consume buffer message values with `consumer.consume(_, false, false)` 216 | - you can consume string message values with `consumer.consume(_, true, false)` - *this is the default* 217 | - you can consume json message values with `consumer.consume(_, true, true)` 218 | 219 | ## Debug Messages 220 | 221 | - if you do not pass a logger via config: `const config = { logger: { debug: console.log, info: console.log, .. }};` 222 | - the native clients will use the debug module to log messages 223 | - use e.g. `DEBUG=sinek:n* npm test` 224 | 225 | ## Memory Usage 226 | 227 | Make sure you read explain the memory usage in librdkafka [FAQ](https://github.com/edenhill/librdkafka/wiki/FAQ#explain-the-consumers-memory-usage-to-me). 228 | 229 | ## Our experience 230 | 231 | To limit memory usage, you need to set noptions to: 232 | 233 | ```json 234 | { 235 | "noptions": { 236 | "metadata.broker.list": "kafka:9092", 237 | "group.id": "consumer-group-1", 238 | "api.version.request": true, 239 | "queued.min.messages": 1000, 240 | "queued.max.messages.kbytes": 5000, 241 | "fetch.message.max.bytes": 524288, 242 | } 243 | } 244 | ``` 245 | 246 | - these values ^ are now set as default (sinek >= 6.5.0) 247 | 248 | ## Altering subscriptions 249 | 250 | - `consumer.addSubscriptions(["topic1", "topic2"])` -> will add additional subscriptions 251 | - `consumer.adjustSubscription(["topic1"])` -> will change subcriptions to these only 252 | 253 | ## Resuming and Pausing Clients 254 | 255 | - you can resume and pause clients via 256 | - `client.pause(topicPartitions);` 257 | - `client.resume(topicPartitions);` 258 | - topicPartitions is an array of objects `[{topic: "test", partition: 0}]` 259 | - be carefull, as a paused producer rejects if you try to send 260 | 261 | ## Deleting messages from topics | producing tombstone messages 262 | 263 | - the producer has a simple API to help you with this `producer.tombstone("my-topic", "my-key", null);` 264 | - **NOTE**: this only works on topics that have key compaction enabled in their configuration 265 | - please ensure you produce to the right topic -------------------------------------------------------------------------------- /docs/partition-drainer.md: -------------------------------------------------------------------------------- 1 | # PartitionDrainer [faster ~ runs a queue per topic partition] 2 | 3 | ```javascript 4 | const kafkaClient = new Kafka("zk-host:2181/kafka"); 5 | //const kafkaClient = new Kafka("kafka-host:9092/", null, true); //connect directly to kafka broker 6 | 7 | kafkaClient.becomeConsumer(["a-topic"], "consumerGroupId123", options); 8 | 9 | kafkaClient.on("ready", () => { 10 | consumer = new PartitionDrainer(kafkaClient, 1); //1 = thread/worker/parallel count per partition 11 | 12 | //drain requires a topic-name and returns a promise 13 | consumer.drain(TEST_TOPIC, (message, done) => { 14 | console.log(message); 15 | done(); 16 | }).then(_ => ..).catch(e => console.log(e)); 17 | 18 | consumer.stopDrain(); 19 | 20 | //drainOnce requires a topic-name 21 | consumer.drainOnce(TEST_TOPIC, (message, done) => { 22 | console.log(message); 23 | done(); 24 | }, DRAIN_THRESHOLD, DRAIN_TIMEOUT).then(r => { 25 | console.log("drain done: " + r); 26 | }).catch(e => { 27 | console.log("drain timeout: " + e); 28 | }); 29 | }); 30 | 31 | kafkaClient.on("error", err => console.log("consumer error: " + err)); 32 | ``` 33 | -------------------------------------------------------------------------------- /docs/publisher.md: -------------------------------------------------------------------------------- 1 | # Publisher 2 | 3 | ```javascript 4 | const kafkaClient = new Kafka("zk-host:2181/kafka"); 5 | //const kafkaClient = new Kafka("kafka-host:9092/", null, true); //connect directly to kafka broker 6 | 7 | kafkaClient.becomeProducer(["my-topic"], "a-client-name", options); 8 | 9 | kafkaClient.on("ready", () => { 10 | producer = new Publisher(kafkaClient, 30); //partition count should be the default count on your brokers partiitons e.g. 30 11 | 12 | producer.send(topic, messages, partitionKey, partition, compressionType) 13 | producer.batch(topic, []) 14 | 15 | producer.appendBuffer(topic, identifier, object, compressionType) 16 | producer.flushBuffer(topic) 17 | 18 | //easy api that uses a KeyedPartitioner Type and identifies the 19 | //target partition for the object's identifier by itself 20 | //it also brings your payload (object) in perfect shape for 21 | //a nicely consumeable topic 22 | //call producer.flushBuffer(topic) to batch send the payloads 23 | producer.bufferPublishMessage(topic, identifier, object, version, compressionType) 24 | producer.bufferUnpublishMessage(topic, identifier, object, version, compressionType) 25 | producer.bufferUpdatehMessage(topic, identifier, object, version, compressionType) 26 | }); 27 | 28 | kafkaClient.on("error", err => console.log("producer error: " + err)); 29 | ``` 30 | -------------------------------------------------------------------------------- /examples/best-practice-example/consumer.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | const { JSConsumer } = require("sinek"); 4 | 5 | // const { NConsumer } = require("sinek"); 6 | // simply replace to use the native lib-rdkafka consumer 7 | 8 | const kafkaTopics = ["one", "two", "three"]; 9 | 10 | const consumerConfiguration = { 11 | noptions: { 12 | "metadata.broker.list": "localhost:9092", 13 | "group.id": "example-group", 14 | "enable.auto.commit": false, 15 | "socket.keepalive.enable": true, 16 | "api.version.request": true, 17 | "socket.blocking.max.ms": 100, 18 | }, 19 | tconf: { 20 | "auto.offset.reset": "earliest", 21 | }, 22 | }; 23 | 24 | (async () => { 25 | const consumer = new JSConsumer(kafkaTopics, consumerConfiguration); 26 | consumer.on("error", (error) => console.error(error)); 27 | await consumer.connect(); 28 | consumer.consume(async (messages, callback) => { 29 | // deal with array of messages 30 | // and when your done call the callback to commit (depending on your batch settings) 31 | callback(); 32 | }, true, false); // batchOptions are only supported with NConsumer 33 | })().catch(console.error); 34 | -------------------------------------------------------------------------------- /examples/best-practice-example/producer.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | const { JSProducer } = require("sinek"); 4 | // const { NProducer } = require("sinek"); 5 | // simply replace to use the native lib-rdkafka producer 6 | 7 | const producerConfiguration = { 8 | noptions: { 9 | "metadata.broker.list": "localhost:9092", 10 | "client.id": "example-client", 11 | "compression.codec": "none", 12 | "socket.keepalive.enable": true, 13 | "api.version.request": true, 14 | "queue.buffering.max.ms": 1000, 15 | "batch.num.messages": 500, 16 | }, 17 | tconf: { 18 | "request.required.acks": 1 19 | }, 20 | }; 21 | 22 | // amount of partitions of the topics this consumer produces to 23 | const partitionCount = 1; // all messages to partition 0 24 | 25 | (async () => { 26 | const producer = new JSProducer(producerConfiguration, null, partitionCount); 27 | producer.on("error", error => console.error(error)); 28 | await producer.connect(); 29 | const { offset } = await producer.send("my-topic", "my-message", 0, "my-key", "my-partition-key"); 30 | 31 | })().catch(console.error); 32 | -------------------------------------------------------------------------------- /examples/sasl-ssl-example/README.md: -------------------------------------------------------------------------------- 1 | # SASL-SSL-Example 2 | 3 | - start the zk+broker combo [here](../../kafka-setup): `yarn run kafka:start` 4 | - run `node producer.js` and wait until the producer is connected and sending (as the broker will have to create the topic during the first start) 5 | - run `node consumer.js` to receive the produced messages 6 | 7 | > use debug mode to see mo information, e.g `DEBUG=* node producer.js` 8 | -------------------------------------------------------------------------------- /examples/sasl-ssl-example/config.js: -------------------------------------------------------------------------------- 1 | const debug = require("debug"); 2 | const path = require("path"); 3 | 4 | const logger = { 5 | debug: debug("sinek:debug"), 6 | info: debug("sinek:info"), 7 | warn: debug("sinek:warn"), 8 | error: debug("sinek:error") 9 | }; 10 | 11 | const consumerConfig = { 12 | logger, 13 | noptions: { 14 | //"debug": "all", 15 | "metadata.broker.list": "localhost:9193", 16 | "group.id": "example-group", 17 | "enable.auto.commit": false, 18 | "event_cb": true, 19 | "compression.codec": "none", 20 | "retry.backoff.ms": 200, 21 | "message.send.max.retries": 10, 22 | "socket.keepalive.enable": true, 23 | "queue.buffering.max.messages": 100000, 24 | "queue.buffering.max.ms": 1000, 25 | "batch.num.messages": 1000000, 26 | 27 | "security.protocol": "sasl_ssl", 28 | "ssl.key.location": path.join(__dirname, "../certs/ca-key"), 29 | "ssl.key.password": "nodesinek", 30 | "ssl.certificate.location": path.join(__dirname,"../certs/ca-cert"), 31 | "ssl.ca.location": path.join(__dirname,"../certs/ca-cert"), 32 | "sasl.mechanisms": "PLAIN", 33 | "sasl.username": "admin", 34 | "sasl.password": "nodesinek", 35 | "api.version.request": true, 36 | }, 37 | tconf: { 38 | "auto.offset.reset": "earliest" 39 | } 40 | }; 41 | 42 | const producerConfig = { 43 | logger, 44 | noptions: { 45 | //"debug": "all", 46 | "metadata.broker.list": "localhost:9193", 47 | "client.id": "example-client", 48 | "event_cb": true, 49 | "compression.codec": "none", 50 | "retry.backoff.ms": 200, 51 | "message.send.max.retries": 10, 52 | "socket.keepalive.enable": true, 53 | "queue.buffering.max.messages": 100000, 54 | "queue.buffering.max.ms": 1000, 55 | "batch.num.messages": 1000000, 56 | 57 | "security.protocol": "sasl_ssl", 58 | "ssl.key.location": path.join(__dirname, "../certs/ca-key"), 59 | "ssl.key.password": "nodesinek", 60 | "ssl.certificate.location": path.join(__dirname,"../certs/ca-cert"), 61 | "ssl.ca.location": path.join(__dirname,"../certs/ca-cert"), 62 | "sasl.mechanisms": "PLAIN", 63 | "sasl.username": "admin", 64 | "sasl.password": "nodesinek", 65 | "api.version.request": true, 66 | }, 67 | tconf: { 68 | "request.required.acks": 1 69 | } 70 | }; 71 | 72 | module.exports = { 73 | consumerConfig, 74 | producerConfig 75 | }; 76 | -------------------------------------------------------------------------------- /examples/sasl-ssl-example/consumer.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | const {NConsumer} = require("./../index.js"); 4 | const { consumerConfig: config } = require("./config.js"); 5 | const consumer = new NConsumer("test", config); 6 | 7 | consumer.on("error", error => config.logger.error(error)); 8 | 9 | /* Flow Mode 10 | consumer.connect().then(() => { 11 | config.logger.info("connected"); 12 | consumer.consume(); 13 | }).catch(error => config.logger.error(error)); 14 | */ 15 | 16 | /* Streaming Mode */ 17 | consumer.connect(true, {asString: true, asJSON: false}).then(() => { 18 | config.logger.info("connected"); 19 | }).catch(error => config.logger.error(error)); 20 | 21 | consumer.on("message", message => config.logger.info(message.offset, message.value)); 22 | -------------------------------------------------------------------------------- /examples/sasl-ssl-example/producer.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | const {NProducer} = require("./../index.js"); 4 | const { producerConfig: config } = require("./config.js"); 5 | const producer = new NProducer(config, ["test"], 1); 6 | 7 | producer.on("error", error => config.logger.error(error)); 8 | 9 | producer.connect().then(() => { 10 | config.logger.info("connected."); 11 | setInterval(() => { 12 | config.logger.info("send"); 13 | producer.send("test", "abc123"); 14 | }, 1000); 15 | }).catch(error => config.logger.error(error)); 16 | -------------------------------------------------------------------------------- /examples/ssl-example/README.md: -------------------------------------------------------------------------------- 1 | # SSL-Example 2 | 3 | - start the zk+broker combo [here](../../kafka-setup): `yarn run kafka:start` 4 | - run `node producer.js` and wait until the producer is connected and sending (as the broker will have to create the topic during the first start) 5 | - run `node consumer.js` to receive the produced messages 6 | 7 | > use debug mode to see mo information, e.g `DEBUG=* node producer.js` 8 | -------------------------------------------------------------------------------- /examples/ssl-example/config.js: -------------------------------------------------------------------------------- 1 | const debug = require("debug"); 2 | const fs = require("fs"); 3 | 4 | const config = { 5 | kafkaHost: "localhost:9093", 6 | logger: { 7 | debug: debug("sinek:debug"), 8 | info: debug("sinek:info"), 9 | warn: debug("sinek:warn"), 10 | error: debug("sinek:error") 11 | }, 12 | groupId: "example-group", 13 | clientName: "example-name", 14 | workerPerPartition: 1, 15 | options: { 16 | ssl: true, 17 | sslOptions: { 18 | // https://nodejs.org/dist/latest-v8.x/docs/api/tls.html#tls_tls_createsecurecontext_options 19 | rejectUnauthorized: true, 20 | key: fs.readFileSync("../certs/ca-key"), 21 | cert: fs.readFileSync("../certs/ca-cert"), 22 | ca:[fs.readFileSync("../certs/ca-cert")], 23 | passphrase: "nodesinek" 24 | }, 25 | sessionTimeout: 8000, 26 | protocol: ["roundrobin"], 27 | fromOffset: "latest", 28 | fetchMaxBytes: 1024 * 1024, 29 | fetchMinBytes: 1, 30 | fetchMaxWaitMs: 10, 31 | heartbeatInterval: 250, 32 | retryMinTimeout: 250, 33 | autoCommit: true, 34 | autoCommitIntervalMs: 1000, 35 | requireAcks: 1, 36 | ackTimeoutMs: 100, 37 | partitionerType: 3 38 | } 39 | }; 40 | 41 | module.exports = config; 42 | -------------------------------------------------------------------------------- /examples/ssl-example/consumer.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | const {Consumer} = require("./../index.js"); 4 | const consumer = new Consumer("test", require("./config.js")); 5 | 6 | consumer.on("error", error => console.error(error)); 7 | 8 | consumer.connect(false).then(() => { 9 | console.log("connected"); 10 | consumer.consume(); 11 | }).catch(error => console.error(error)); 12 | 13 | consumer.on("message", message => console.log(message.offset, message.value)); 14 | -------------------------------------------------------------------------------- /examples/ssl-example/producer.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | const {Producer} = require("./../index.js"); 4 | const producer = new Producer(require("./config.js"), ["test"], 1); 5 | 6 | producer.on("error", error => console.error(error)); 7 | 8 | producer.connect().then(() => { 9 | console.log("connected."); 10 | setInterval(() => { 11 | console.log("send"); 12 | producer.send("test", "abc123"); 13 | }, 1000); 14 | }).catch(error => console.error(error)); 15 | -------------------------------------------------------------------------------- /kafka-setup/README.md: -------------------------------------------------------------------------------- 1 | ## Local Kafka PLAINTEXT/SSL/SSL_SASL Dev Setup 2 | 3 | ### Requirements 4 | 5 | * Docker 6 | * Docker Compose 7 | * JDK (for keytool, `sudo apt install openjdk-8-jdk`) 8 | 9 | ### Usage 10 | 11 | > Run commands from project root 12 | 13 | - Start kafka: `npm run kafka:start` 14 | - Stop kafka: `npm run kafka:stop` 15 | - Show kafka logs: `npm run kafka:logs` 16 | - Produce to **test** topic with SSL_SASL: `npm run kafka:console produce` 17 | - Consume from **test** topic with SSL_SASL: `npm run kafka:console consume` 18 | -------------------------------------------------------------------------------- /kafka-setup/alpine.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:alpine 2 | 3 | RUN apk add --upgrade --no-cache \ 4 | alpine-sdk \ 5 | libc6-compat \ 6 | bash \ 7 | make \ 8 | gcc \ 9 | g++ \ 10 | python \ 11 | cyrus-sasl-dev \ 12 | libressl2.5-libcrypto --repository http://dl-3.alpinelinux.org/alpine/edge/main/ --allow-untrusted \ 13 | libressl2.5-libssl --repository http://dl-3.alpinelinux.org/alpine/edge/main/ --allow-untrusted \ 14 | librdkafka-dev --repository http://dl-3.alpinelinux.org/alpine/edge/community/ --allow-untrusted \ 15 | dumb-init --repository http://dl-3.alpinelinux.org/alpine/edge/community/ --allow-untrusted 16 | 17 | ENV BUILD_LIBRDKAFKA=0 18 | 19 | # place Dockerfile in your app folder first 20 | WORKDIR /usr/src/app 21 | COPY . /usr/src/app/ 22 | RUN yarn install 23 | 24 | CMD ["yarn", "start"] 25 | -------------------------------------------------------------------------------- /kafka-setup/client-jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaClient { 2 | org.apache.kafka.common.security.plain.PlainLoginModule required 3 | username="admin" 4 | password="nodesinek" 5 | user_admin="nodesinek" 6 | ; 7 | }; 8 | -------------------------------------------------------------------------------- /kafka-setup/client.properties: -------------------------------------------------------------------------------- 1 | compression.type=none 2 | security.protocol=SASL_SSL 3 | sasl.mechanism=PLAIN 4 | ssl.truststore.password=nodesinek 5 | ssl.keystore.password=nodesinek 6 | ssl.key.password=nodesinek 7 | 8 | # bootstrap.servers=localhost:9093 9 | # ssl.truststore.location=../certs/docker.kafka.server.truststore.jks 10 | # ssl.keystore.location=../certs/docker.kafka.server.keystore.jks 11 | -------------------------------------------------------------------------------- /kafka-setup/debian.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:8 2 | 3 | RUN mkdir -p /usr/src/app \ 4 | && apt-get update && apt-get install -y build-essential python librdkafka-dev libsasl2-dev libsasl2-modules openssl \ 5 | && apt-get autoremove -y && apt-get autoclean -y \ 6 | && rm -rf /var/lib/apt/lists/* 7 | 8 | # place Dockerfile in your app folder first 9 | WORKDIR /usr/src/app 10 | COPY . /usr/src/app/ 11 | RUN yarn install 12 | 13 | CMD ["yarn", "start"] 14 | -------------------------------------------------------------------------------- /kafka-setup/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | services: 3 | zookeeper: 4 | image: wurstmeister/zookeeper:latest 5 | ports: 6 | - 2181:2181 7 | environment: 8 | JMX_OPTS: "-Djava.security.auth.login.config=/etc/kafka/server-jaas.conf" 9 | volumes: 10 | - ./server-jaas.conf:/etc/kafka/server-jaas.conf 11 | 12 | kafka: 13 | image: wurstmeister/kafka:2.11-1.1.1 14 | ports: 15 | - "9092:9092" 16 | - "9093:9093" 17 | - "9193:9193" 18 | links: 19 | - zookeeper:zookeeper 20 | depends_on: 21 | - zookeeper 22 | environment: 23 | KAFKA_BROKER_ID: 1 24 | KAFKA_PORT: 9092 25 | KAFKA_ADVERTISED_PORT: 9093 26 | KAFKA_ADVERTISED_HOST_NAME: "localhost" 27 | KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://localhost:9092,SSL://localhost:9093,SASL_SSL://localhost:9193" 28 | KAFKA_LISTENERS: "PLAINTEXT://:9092,SSL://:9093,SASL_SSL://:9193" 29 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 30 | KAFKA_SSL_KEYSTORE_LOCATION: "/certs/docker.kafka.server.keystore.jks" 31 | KAFKA_SSL_TRUSTSTORE_LOCATION: "/certs/docker.kafka.server.truststore.jks" 32 | KAFKA_SSL_KEYSTORE_PASSWORD: "nodesinek" 33 | KAFKA_SSL_KEY_PASSWORD: "nodesinek" 34 | KAFKA_SSL_TRUSTSTORE_PASSWORD: "nodesinek" 35 | KAFKA_SSL_CLIENT_AUTH: "required" 36 | KAFKA_SECURITY_INTER_BROKER_PROTOCOL: "SASL_SSL" 37 | KAFKA_CREATE_TOPICS: "test:1:1,n-test-topic:1:1" 38 | KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" 39 | KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: "PLAIN" 40 | KAFKA_SASL_ENABLED_MECHANISMS: "PLAIN" 41 | KAFKA_JMX_OPTS: "-Djava.security.auth.login.config=/etc/kafka/server-jaas.conf" 42 | volumes: 43 | - /tmp/kafka-data/data:/data 44 | - /tmp/kafka-data/logs:/logs 45 | - ./server-jaas.conf:/etc/kafka/server-jaas.conf 46 | - ../certs:/certs 47 | - /var/run/docker.sock:/var/run/docker.sock 48 | -------------------------------------------------------------------------------- /kafka-setup/generate-certs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | PASSWORD="nodesinek" 3 | CN_HOST="localhost" 4 | SERVER_KEYSTORE_JKS="docker.kafka.server.keystore.jks" 5 | SERVER_KEYSTORE_P12="docker.kafka.server.keystore.p12" 6 | SERVER_KEYSTORE_PEM="docker.kafka.server.keystore.pem" 7 | SERVER_TRUSTSTORE_JKS="docker.kafka.server.truststore.jks" 8 | CLIENT_TRUSTSTORE_JKS="docker.kafka.client.truststore.jks" 9 | echo "Clearing existing Kafka SSL certs..." 10 | 11 | BASEDIR=$(git rev-parse --show-toplevel) 12 | 13 | rm -rf ${BASEDIR}/certs 14 | mkdir ${BASEDIR}/certs 15 | ( 16 | echo "Generating new Kafka SSL certs in \"${BASEDIR}/certs\" folder..." 17 | cd ${BASEDIR}/certs 18 | keytool -keystore $SERVER_KEYSTORE_JKS -alias localhost -validity 730 -genkey -storepass $PASSWORD -keypass $PASSWORD \ 19 | -dname "CN=${CN_HOST}, OU=None, O=None, L=Cologne, S=Cologne, C=DE" 20 | openssl req -new -x509 -keyout ca-key -out ca-cert -days 730 -passout pass:$PASSWORD \ 21 | -subj "/C=DE/S=Cologne/L=Cologne/O=None/OU=None/CN=${CN_HOST}" 22 | keytool -keystore $SERVER_TRUSTSTORE_JKS -alias CARoot -import -file ca-cert -storepass $PASSWORD -noprompt 23 | keytool -keystore $CLIENT_TRUSTSTORE_JKS -alias CARoot -import -file ca-cert -storepass $PASSWORD -noprompt 24 | keytool -keystore $SERVER_KEYSTORE_JKS -alias localhost -certreq -file cert-file -storepass $PASSWORD -noprompt 25 | openssl x509 -req -CA ca-cert -CAkey ca-key -in cert-file -out cert-signed -days 730 -CAcreateserial -passin pass:$PASSWORD 26 | keytool -keystore $SERVER_KEYSTORE_JKS -alias CARoot -import -file ca-cert -storepass $PASSWORD -noprompt 27 | keytool -keystore $SERVER_KEYSTORE_JKS -alias localhost -import -file cert-signed -storepass $PASSWORD -noprompt 28 | keytool -importkeystore -srckeystore $SERVER_KEYSTORE_JKS -destkeystore $SERVER_KEYSTORE_P12 -srcstoretype JKS -deststoretype PKCS12 -srcstorepass $PASSWORD -deststorepass $PASSWORD -noprompt 29 | # PEM for KafkaCat 30 | openssl pkcs12 -in $SERVER_KEYSTORE_P12 -out $SERVER_KEYSTORE_PEM -nodes -passin pass:$PASSWORD 31 | chmod +rx * 32 | ) 33 | -------------------------------------------------------------------------------- /kafka-setup/kafka-console.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | BASEDIR=$(git rev-parse --show-toplevel) 4 | 5 | COMMAND=${@} 6 | TOPIC=${TOPIC:-test} 7 | KAFKA_HOST=${KAFKA_HOST:-localhost:9193} 8 | ZHOST=${ZHOST:-localhost:2181} 9 | CLIENT_JAAS="${BASEDIR}/kafka-setup/client-jaas.conf" 10 | 11 | echo "Kafka host: ${KAFKA_HOST} // ZHost: ${ZHOST}" 12 | 13 | create_client_properties () { 14 | if [ -z "${CLIENT_PROPERTIES}" ]; then 15 | mkdir -p /tmp/kafka-data 16 | cp ${BASEDIR}/kafka-setup/client.properties /tmp/kafka-data/client.properties 17 | echo "bootstrap.servers=${KAFKA_HOST}" >> /tmp/kafka-data/client.properties 18 | echo "ssl.truststore.location=${BASEDIR}/certs/docker.kafka.server.truststore.jks" >> /tmp/kafka-data/client.properties 19 | echo "ssl.keystore.location=${BASEDIR}/certs/docker.kafka.server.keystore.jks" >> /tmp/kafka-data/client.properties 20 | CLIENT_PROPERTIES="/tmp/kafka-data/client.properties" 21 | fi 22 | } 23 | 24 | case "$COMMAND" in 25 | "consume") 26 | echo "Consume on ${TOPIC} topic." 27 | create_client_properties 28 | KAFKA_LOG4J_OPTS="-Djava.security.auth.login.config=${CLIENT_JAAS}" kafka-console-consumer --from-beginning --bootstrap-server=${KAFKA_HOST} --topic=${TOPIC} --consumer.config=${CLIENT_PROPERTIES} 29 | ;; 30 | "produce") 31 | echo "Produce on ${TOPIC} topic." 32 | create_client_properties 33 | KAFKA_LOG4J_OPTS="-Djava.security.auth.login.config=${CLIENT_JAAS}" kafka-console-producer --broker-list=${KAFKA_HOST} --topic=${TOPIC} --producer.config=${CLIENT_PROPERTIES} 34 | ;; 35 | "create-topic") 36 | echo "Create ${TOPIC} topic:" 37 | kafka-topics --create --zookeeper=${ZHOST} --replication-factor=1 --partitions=1 --topic=${TOPIC} 38 | ;; 39 | "topics") 40 | echo "Topic list:" 41 | kafka-topics --zookeeper=${ZHOST} --list 42 | ;; 43 | *) 44 | echo "Invalid command." 45 | echo "Usage: $0 {consume|produce|create-topic|topics}" 46 | echo " consume : Consume on ${TOPIC} topic." 47 | echo " produce : Produce on ${TOPIC} topic." 48 | echo " create-topic : Create the ${TOPIC} topic." 49 | echo " topics : Show topic list." 50 | ;; 51 | esac 52 | -------------------------------------------------------------------------------- /kafka-setup/server-jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | org.apache.kafka.common.security.plain.PlainLoginModule required 3 | username="admin" 4 | password="nodesinek" 5 | user_admin="nodesinek" 6 | ; 7 | }; 8 | KafkaClient { 9 | org.apache.kafka.common.security.plain.PlainLoginModule required 10 | username="admin" 11 | password="nodesinek" 12 | user_admin="nodesinek" 13 | ; 14 | }; 15 | -------------------------------------------------------------------------------- /kafka-setup/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | rm -rf /tmp/kafka-data 3 | mkdir /tmp/kafka-data 4 | mkdir /tmp/kafka-data/data 5 | mkdir /tmp/kafka-data/logs 6 | chmod -R 777 /tmp/kafka-data 7 | 8 | BASEDIR=$(git rev-parse --show-toplevel) 9 | 10 | if [ -z "$(docker-compose --file ${BASEDIR}/kafka-setup/docker-compose.yml ps -q)" ]; then 11 | ${BASEDIR}/kafka-setup/generate-certs.sh 12 | docker-compose --file ${BASEDIR}/kafka-setup/docker-compose.yml rm 13 | fi 14 | 15 | docker-compose --file ${BASEDIR}/kafka-setup/docker-compose.yml up -d 16 | -------------------------------------------------------------------------------- /kafka-setup/stop.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | BASEDIR=$(git rev-parse --show-toplevel) 3 | docker-compose --file ${BASEDIR}/kafka-setup/docker-compose.yml rm -vfs 4 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "sinek", 3 | "version": "10.0.0-alpha.0", 4 | "description": "Node.js kafka client, consumer, producer polite out of the box", 5 | "main": "dist/index.js", 6 | "types": "dist/index.d.ts", 7 | "engines": { 8 | "node": ">=8.11.3" 9 | }, 10 | "scripts": { 11 | "lint": "eslint src/. --ext .ts", 12 | "fix": "eslint --fix .", 13 | "kafka:start": "./kafka-setup/start.sh", 14 | "kafka:stop": "./kafka-setup/stop.sh", 15 | "kafka:logs": "docker-compose --file ./kafka-setup/docker-compose.yml logs -f", 16 | "kafka:console": "./kafka-setup/kafka-console.sh", 17 | "test": "_mocha --recursive --timeout 32500 --exit -R spec test/int", 18 | "yarn:openssl": "LDFLAGS='-L/usr/local/opt/openssl/lib' CPPFLAGS='-I/usr/local/opt/openssl/include' yarn", 19 | "prepare": "yarn prepublishOnly", 20 | "prepublishOnly": "tsc -p tsconfig.dist.json" 21 | }, 22 | "repository": { 23 | "type": "git", 24 | "url": "git+https://github.com/nodefluent/node-sinek.git" 25 | }, 26 | "keywords": [ 27 | "polite", 28 | "kafka", 29 | "client", 30 | "sinek", 31 | "simon", 32 | "nice", 33 | "easy", 34 | "producer", 35 | "consumer", 36 | "backpressure", 37 | "control", 38 | "flow", 39 | "queue", 40 | "ssl", 41 | "secure", 42 | "sasl", 43 | "kerberos", 44 | "librdkafka", 45 | "stream", 46 | "batch" 47 | ], 48 | "author": "Christian Fröhlingsdorf", 49 | "license": "MIT", 50 | "bugs": { 51 | "url": "https://github.com/nodefluent/node-sinek/issues" 52 | }, 53 | "homepage": "https://github.com/nodefluent/node-sinek#readme", 54 | "dependencies": { 55 | "async": "~3.2.0", 56 | "bluebird": "~3.7.2", 57 | "debug": "~4.1.1", 58 | "kafkajs": "1.12.0", 59 | "lodash.merge": "~4.6.2", 60 | "murmur2-partitioner": "~1.0.0", 61 | "murmurhash": "~1.0.0", 62 | "uuid": "~7.0.3" 63 | }, 64 | "devDependencies": { 65 | "@types/mocha": "^8.0.1", 66 | "@types/node": "^14.0.27", 67 | "@typescript-eslint/eslint-plugin": "^3.9.0", 68 | "@typescript-eslint/parser": "^3.9.0", 69 | "eslint": "~6.8.0", 70 | "expect.js": "~0.3.1", 71 | "express": "~4.17.1", 72 | "istanbul": "~0.4.5", 73 | "mocha": "~7.1.1", 74 | "sinon": "~9.0.2", 75 | "ts-node": "^8.10.2", 76 | "typescript": "^3.9.7" 77 | }, 78 | "mocha": { 79 | "extension": [ 80 | "ts" 81 | ], 82 | "exit": true, 83 | "timeout": 32500, 84 | "recursive": true, 85 | "require": [ 86 | "ts-node/register", 87 | "source-map-support/register" 88 | ] 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./lib/Sinek"; 2 | -------------------------------------------------------------------------------- /src/lib/Sinek.ts: -------------------------------------------------------------------------------- 1 | export * from "./kafkajs/JSConsumer"; 2 | export * from "./kafkajs/JSProducer"; 3 | export * from "./shared/Health"; 4 | export * from "./shared/Analytics"; 5 | -------------------------------------------------------------------------------- /src/lib/interfaces.ts: -------------------------------------------------------------------------------- 1 | import { CompressionTypes } from "kafkajs"; 2 | 3 | export interface KafkaHealthConfig { 4 | thresholds: { 5 | consumer: { 6 | errors: number; 7 | lag: number; 8 | stallLag: number; 9 | minMessages: number; 10 | }; 11 | producer: { 12 | errors: number; 13 | minMessages: number; 14 | }; 15 | }; 16 | } 17 | 18 | export interface NCommonKafkaOptions { 19 | "builtin.features"?: string; 20 | "client.id"?: string; 21 | "metadata.broker.list": string; 22 | "message.max.bytes"?: number; 23 | "message.copy.max.bytes"?: number; 24 | "receive.message.max.bytes"?: number; 25 | "max.in.flight.requests.per.connection"?: number; 26 | "metadata.request.timeout.ms"?: number; 27 | "topic.metadata.refresh.interval.ms"?: number; 28 | "metadata.max.age.ms"?: number; 29 | "topic.metadata.refresh.fast.interval.ms"?: number; 30 | "topic.metadata.refresh.fast.cnt"?: number; 31 | "topic.metadata.refresh.sparse"?: boolean; 32 | "topic.blacklist"?: string; 33 | "debug"?: string; 34 | "socket.timeout.ms"?: number; 35 | "socket.blocking.max.ms"?: number; 36 | "socket.send.buffer.bytes"?: number; 37 | "socket.receive.buffer.bytes"?: number; 38 | "socket.keepalive.enable"?: boolean; 39 | "socket.nagle.disable"?: boolean; 40 | "socket.max.fails"?: number; 41 | "broker.address.ttl"?: number; 42 | "broker.address.family"?: "any" | "v4" | "v6"; 43 | "reconnect.backoff.jitter.ms"?: number; 44 | "statistics.interval.ms"?: number; 45 | "enabled_events"?: number; 46 | "log_level"?: number; 47 | "log.queue"?: boolean; 48 | "log.thread.name"?: boolean; 49 | "log.connection.close"?: boolean; 50 | "internal.termination.signal"?: number; 51 | "api.version.request"?: boolean; 52 | "api.version.fallback.ms"?: number; 53 | "broker.version.fallback"?: string; 54 | "security.protocol"?: "plaintext" | "ssl" | "sasl_plaintext" | "sasl_ssl"; 55 | "ssl.cipher.suites"?: string; 56 | "ssl.key.location"?: string; 57 | "ssl.key.password"?: string; 58 | "ssl.certificate.location"?: string; 59 | "ssl.ca.location"?: string; 60 | "ssl.crl.location"?: string; 61 | "sasl.mechanisms"?: string; 62 | "sasl.kerberos.service.name"?: string; 63 | "sasl.kerberos.principal"?: string; 64 | "sasl.kerberos.kinit.cmd"?: string; 65 | "sasl.kerberos.keytab"?: string; 66 | "sasl.kerberos.min.time.before.relogin"?: number; 67 | "sasl.username"?: string; 68 | "sasl.password"?: string; 69 | "partition.assignment.strategy"?: string; 70 | "session.timeout.ms"?: number; 71 | "heartbeat.interval.ms"?: number; 72 | "group.protocol.type"?: string; 73 | "coordinator.query.interval.ms"?: number; 74 | "group.id"?: string; 75 | "event_cb"?: boolean; 76 | "dr_cb"?: boolean; 77 | } 78 | 79 | export interface NConsumerKafkaOptions extends NCommonKafkaOptions { 80 | "group.id": string; 81 | "enable.auto.commit"?: boolean; 82 | "auto.commit.interval.ms"?: number; 83 | "enable.auto.offset.store"?: boolean; 84 | "queued.min.messages"?: number; 85 | "queued.max.messages.kbytes"?: number; 86 | "fetch.wait.max.ms"?: number; 87 | "fetch.message.max.bytes"?: number; 88 | "fetch.min.bytes"?: number; 89 | "fetch.error.backoff.ms"?: number; 90 | "offset.store.method"?: "none" | "file" | "broker"; 91 | "enable.partition.eof"?: boolean; 92 | "check.crcs"?: boolean; 93 | } 94 | 95 | export interface NProducerKafkaOptions extends NCommonKafkaOptions { 96 | "queue.buffering.max.messages"?: number; 97 | "queue.buffering.max.kbytes"?: number; 98 | "queue.buffering.max.ms"?: number; 99 | "message.send.max.retries"?: number; 100 | "retry.backoff.ms"?: number; 101 | "compression.codec"?: CompressionTypes; 102 | "batch.num.messages"?: number; 103 | "delivery.report.only.error"?: boolean; 104 | } 105 | 106 | export interface KafkaConsumerConfig { 107 | kafkaHost?: string; 108 | groupId?: string; 109 | workerPerPartition?: number; 110 | options?: { 111 | sessionTimeout?: number; 112 | protocol?: [string]; 113 | fromOffset?: string; 114 | fetchMaxBytes?: number; 115 | fetchMinBytes?: number; 116 | fetchMaxWaitMs?: number; 117 | heartbeatInterval?: number; 118 | retryMinTimeout?: number; 119 | autoCommit?: boolean; 120 | autoCommitIntervalMs?: number; 121 | requireAcks?: number; 122 | ackTimeoutMs?: number; 123 | }; 124 | health?: KafkaHealthConfig; 125 | tconf?: { 126 | "auto.commit.enable"?: boolean; 127 | "auto.commit.interval.ms"?: number; 128 | "auto.offset.reset"?: "smallest" | "earliest" | "beginning" | "largest" | "latest" | "end" | "error"; 129 | "offset.store.path"?: string; 130 | "offset.store.sync.interval.ms"?: number; 131 | "offset.store.method"?: "file" | "broker"; 132 | "consume.callback.max.messages"?: number; 133 | }; 134 | noptions?: NConsumerKafkaOptions; 135 | logger?: KafkaLogger; 136 | } 137 | 138 | export interface KafkaProducerConfig { 139 | kafkaHost?: string; 140 | clientName?: string; 141 | workerPerPartition?: number; 142 | options?: { 143 | sessionTimeout?: number; 144 | protocol?: [string]; 145 | fromOffset?: string; 146 | fetchMaxBytes?: number; 147 | fetchMinBytes?: number; 148 | fetchMaxWaitMs?: number; 149 | heartbeatInterval?: number; 150 | retryMinTimeout?: number; 151 | requireAcks?: number; 152 | ackTimeoutMs?: number; 153 | partitionerType?: number; 154 | murmurHashVersion?: string; 155 | }; 156 | health?: KafkaHealthConfig; 157 | tconf?: { 158 | "request.required.acks"?: number; 159 | "request.timeout.ms"?: number; 160 | "message.timeout.ms"?: number; 161 | "produce.offset.report"?: boolean; 162 | }; 163 | noptions?: NProducerKafkaOptions; 164 | logger?: KafkaLogger; 165 | } 166 | 167 | // Setting noptions to required. 168 | export interface JSKafkaProducerConfig extends KafkaProducerConfig { 169 | noptions: NProducerKafkaOptions 170 | } 171 | 172 | export interface JSKafkaConsumerConfig extends KafkaConsumerConfig { 173 | noptions: NConsumerKafkaOptions 174 | } 175 | 176 | export interface KafkaMessage { 177 | topic: string; 178 | partition: number; 179 | offset: number; 180 | key: Buffer | string; 181 | value: Buffer | string | any; 182 | size: number; 183 | timestamp: number; 184 | } 185 | 186 | export interface SortedMessageBatch { 187 | [topic: string]: { 188 | [partition: string]: KafkaMessage[]; 189 | }; 190 | } 191 | 192 | export interface BatchConfig { 193 | batchSize?: number; 194 | commitEveryNBatch?: number; 195 | concurrency?: number; 196 | commitSync?: boolean; 197 | noBatchCommits?: boolean; 198 | manualBatching?: boolean; 199 | sortedManualBatch?: boolean; 200 | } 201 | 202 | export interface ConsumerStats { 203 | totalIncoming: number; 204 | lastMessage: number; 205 | receivedFirstMsg: boolean; 206 | totalProcessed: number; 207 | lastProcessed: number; 208 | queueSize: null; 209 | isPaused: boolean; 210 | omittingQueue: boolean; 211 | autoComitting: boolean; 212 | consumedSinceCommit: number; 213 | batch: { 214 | current: number; 215 | committs: number; 216 | total: number; 217 | config: BatchConfig; 218 | currentEmptyFetches: number; 219 | avgProcessingTime: number; 220 | }; 221 | lag: any; 222 | totalErrors: number; 223 | drainStats: Record|null; 224 | } 225 | 226 | export interface LagStatus { 227 | topic: string; 228 | partition: number; 229 | lowDistance: number; 230 | highDistance: number; 231 | detail: { 232 | lowOffset: number; 233 | highOffset: number; 234 | comittedOffset: number; 235 | }; 236 | } 237 | 238 | export interface ProducerStats { 239 | totalPublished: number; 240 | last: number; 241 | isPaused: boolean; 242 | totalErrors: number; 243 | } 244 | 245 | export interface MessageReturn { 246 | key: string; 247 | partition: number; 248 | offset?: number | null; 249 | value: string; 250 | } 251 | 252 | export interface MessageProduce { 253 | id: string; 254 | version: number; 255 | } 256 | 257 | export interface KafkaLogger { 258 | debug(message: string): void; 259 | info(message: string): void; 260 | warn(message: string, error?: Error): void; 261 | error(error: string | Error): void; 262 | } 263 | 264 | export interface AnalyticsLagChange { 265 | timelyDifference: number; 266 | fetchPerformance: number; 267 | newLags: Record; 268 | changedLags: Record; 269 | resolvedLags: { 270 | [key: string]: Record; 271 | }, 272 | stallLags: Record; 273 | } 274 | 275 | export interface AnalyticsConfig { 276 | analyticsInterval: number; 277 | } 278 | -------------------------------------------------------------------------------- /src/lib/kafkajs/JSConsumer.ts: -------------------------------------------------------------------------------- 1 | import { Promise } from "bluebird"; 2 | import Debug from "debug"; 3 | import { Kafka, Admin, Consumer, SASLMechanism, KafkaMessage } from "kafkajs"; 4 | import fs from "fs"; 5 | import { EventEmitter } from "events"; 6 | import { BatchConfig, LagStatus, JSKafkaConsumerConfig, KafkaLogger, ConsumerStats, AnalyticsConfig } from "../interfaces"; 7 | import { ConsumerAnalytics, ConsumerHealth, Metadata, Check, ConsumerRunResult } from "../shared"; 8 | 9 | const MESSAGE_CHARSET = "utf8"; 10 | 11 | export interface FormattedKafkaMessage extends Omit { 12 | value: Buffer | string | Record; 13 | } 14 | 15 | export interface ComittedOffsets { 16 | partition: number; 17 | offset: string; 18 | metadata: string | null; 19 | topic: string; 20 | } 21 | 22 | const DEFAULT_LOGGER = { 23 | debug: Debug("sinek:jsconsumer:debug"), 24 | info: Debug("sinek:jsconsumer:info"), 25 | warn: Debug("sinek:jsconsumer:warn"), 26 | error: Debug("sinek:jsconsumer:error") 27 | }; 28 | 29 | type Lag = { 30 | status: LagStatus[], 31 | at: number, 32 | took: number 33 | } 34 | 35 | type ConsumeCallback = ((messages, callback) => void) | null; 36 | 37 | const defaultLag = { 38 | status: [], 39 | at: 0, 40 | took: 0, 41 | }; 42 | 43 | /** 44 | * wrapper around kafkajs that immitates nconsumer 45 | * @extends EventEmitter 46 | */ 47 | export class JSConsumer extends EventEmitter { 48 | 49 | kafkaClient: Kafka; 50 | topics: string[]; 51 | config: JSKafkaConsumerConfig; 52 | asString = true; 53 | asJSON = false; 54 | asStream = false; 55 | consumer: Consumer | undefined; 56 | 57 | private _firstMessageConsumed = false; 58 | private _totalIncomingMessages = 0; 59 | private _lastReceived = 0; 60 | private _totalProcessedMessages = 0; 61 | private _lastProcessed = 0; 62 | private _isAutoCommitting = false; 63 | private _batchCount = 0; 64 | private _batchCommitts = 0; 65 | private _batchConfig: BatchConfig = {}; 66 | private _totalBatches = 0; 67 | 68 | private _lastLagStatus: Lag = defaultLag; 69 | private _lagCache: Lag = defaultLag; 70 | 71 | private _analyticsOptions: AnalyticsConfig | null = null; 72 | _analytics: ConsumerAnalytics | undefined; 73 | private _consumedSinceCommit = 0; 74 | private _emptyFetches = 0; 75 | private _avgBatchProcessingTime = 0; 76 | private _extCommitCallback: ((e: Error, partitions: any[]) => void) | undefined; 77 | 78 | private _errors = 0; 79 | private _groupId = ""; 80 | private _adminClient: Admin; 81 | private _health: ConsumerHealth; 82 | private _inClosing = false; 83 | 84 | /** 85 | * creates a new consumer instance 86 | * @param {string|Array} topics - topic or topics to subscribe to 87 | * @param {object} config - configuration object 88 | */ 89 | constructor(topics: string | string[], config: JSKafkaConsumerConfig) { 90 | super(); 91 | 92 | if (!config) { 93 | throw new Error("You are missing a config object."); 94 | } 95 | 96 | if (!config.logger || typeof config.logger !== "object") { 97 | config.logger = DEFAULT_LOGGER; 98 | } 99 | 100 | const { 101 | "metadata.broker.list": brokerList, 102 | "client.id": clientId, 103 | "security.protocol": securityProtocol, 104 | "ssl.ca.location": sslCALocation, 105 | "ssl.certificate.location": sslCertLocation, 106 | "ssl.key.location": sslKeyLocation, 107 | "ssl.key.password": sslKeyPassword, 108 | "sasl.mechanisms": mechanism, 109 | "sasl.username": username, 110 | "sasl.password": password, 111 | } = config.noptions; 112 | 113 | const brokers = brokerList.split(","); 114 | 115 | if (!brokers || !clientId) { 116 | throw new Error("You are missing a broker or group configs"); 117 | } 118 | 119 | if (securityProtocol) { 120 | this.kafkaClient = new Kafka({ 121 | brokers, 122 | clientId, 123 | ssl: { 124 | ca: [fs.readFileSync(sslCALocation as string, "utf-8")], 125 | cert: fs.readFileSync(sslCertLocation as string, "utf-8"), 126 | key: fs.readFileSync(sslKeyLocation as string, "utf-8"), 127 | passphrase: sslKeyPassword, 128 | }, 129 | sasl: { 130 | mechanism: mechanism as SASLMechanism, 131 | username: username as string, 132 | password: password as string, 133 | }, 134 | }); 135 | } else { 136 | this.kafkaClient = new Kafka({ brokers, clientId }); 137 | } 138 | 139 | this._adminClient = this.kafkaClient.admin(); 140 | this.topics = Array.isArray(topics) ? topics : [topics]; 141 | this.config = config; 142 | this._health = new ConsumerHealth(this, this.config.health); 143 | 144 | this.on("error", () => { 145 | this._errors++; 146 | }); 147 | 148 | this.on("batch", (messages, { resolveOffset, syncEvent }) => { 149 | 150 | const startBPT = Date.now(); 151 | this._totalIncomingMessages += messages.length; 152 | this._lastReceived = Date.now(); 153 | 154 | const messageOffsets: any[] = []; 155 | 156 | const mappedMessages = messages.map((message) => { 157 | this.config.logger!.debug(message); 158 | message.value = this._convertMessageValue(message.value, this.asString, this.asJSON); 159 | this.emit("message", message); 160 | messageOffsets.push(message.offset); 161 | return message; 162 | }); 163 | 164 | //execute sync event and wrap callback (in this mode the sync event recieves all messages as batch) 165 | syncEvent(mappedMessages, async (__error) => { 166 | 167 | /* ### sync event callback does not handle errors ### */ 168 | if (__error && this.config && this.config.logger && this.config.logger.warn) { 169 | this.config.logger.warn("Please dont pass errors to sinek consume callback", __error); 170 | } 171 | 172 | this._bumpVariableOfBatch(startBPT, mappedMessages.length); 173 | 174 | try { 175 | messageOffsets.forEach((offset) => { 176 | resolveOffset(offset); 177 | }); 178 | } catch (error) { 179 | this.emit("error", error); 180 | } 181 | }); 182 | }); 183 | } 184 | 185 | /** 186 | * connect to broker 187 | * @param {boolean} asStream - optional, if client should be started in streaming mode 188 | * @param {object} opts - optional, options asString, asJSON (booleans) 189 | * @returns {Promise.<*>} 190 | */ 191 | connect(asStream = false): Promise { 192 | 193 | if (asStream) { 194 | return Promise.reject(new Error("JSConsumer does not support streaming mode.")); 195 | } 196 | 197 | const { logger, groupId } = this.config; 198 | let { noptions, tconf } = this.config; 199 | 200 | const config = { 201 | "broker.list": null, 202 | "group.id": typeof groupId === "string" ? groupId : "", 203 | "enable.auto.commit": false, // default in librdkafka is true - what makes this dangerous for our batching logic(s) 204 | }; 205 | 206 | const overwriteConfig = { 207 | "offset_commit_cb": this._onOffsetCommit.bind(this) 208 | }; 209 | 210 | if (noptions && noptions["offset_commit_cb"]) { 211 | if (typeof noptions["offset_commit_cb"] !== "function") { 212 | return Promise.reject(new Error("offset_commit_cb must be a function.")); 213 | } 214 | this._extCommitCallback = noptions["offset_commit_cb"]; 215 | } 216 | 217 | noptions = Object.assign({}, config, noptions, overwriteConfig); 218 | 219 | logger!.debug(JSON.stringify(noptions)); 220 | 221 | this._isAutoCommitting = noptions["enable.auto.commit"] || false; 222 | 223 | tconf = tconf || undefined; 224 | logger!.debug(JSON.stringify(tconf)); 225 | 226 | this._groupId = noptions["group.id"]; 227 | 228 | if (!this._groupId) { 229 | return Promise.reject(new Error("Group need to be configured on noptions['groupId.id']")); 230 | } 231 | 232 | return this._connectInFlow(logger as KafkaLogger); 233 | } 234 | 235 | /** 236 | * @private 237 | * event handler for async offset committs 238 | * @param {Error} error 239 | * @param {Array} partitions 240 | */ 241 | _onOffsetCommit(error: Error, partitions: any[]): void { 242 | 243 | if (this._extCommitCallback) { 244 | try { 245 | this._extCommitCallback(error, partitions); 246 | } catch (error) { 247 | this.emit("error", error); 248 | } 249 | } 250 | 251 | if (error) { 252 | return this.config.logger!.warn("commit request failed with an error: " + JSON.stringify(error)); 253 | } 254 | 255 | this.config.logger!.debug(JSON.stringify(partitions)); 256 | } 257 | 258 | /** 259 | * @private 260 | * connects in flow mode mode 261 | * @param {object} logger 262 | * @param {object} noptions 263 | * @param {object} tconf 264 | * @returns {Promise.<*>} 265 | */ 266 | _connectInFlow(logger: KafkaLogger): Promise { 267 | 268 | return new Promise(( resolve, reject ) => { 269 | 270 | this.consumer = this.kafkaClient.consumer({ groupId: this._groupId }); 271 | const { CONNECT, CRASH, DISCONNECT } = this.consumer.events; 272 | 273 | this.consumer.on(CRASH, error => { 274 | this.emit("error", error); 275 | }); 276 | 277 | this.consumer.on(DISCONNECT, () => { 278 | if (this._inClosing) { 279 | this._reset(); 280 | } 281 | logger.warn("Disconnected."); 282 | //auto-reconnect --> handled by consumer.consume(); 283 | }); 284 | 285 | this.consumer.on(CONNECT, payload => { 286 | logger.info(`KafkaJS consumer (flow) ready with group. Info: ${payload}.`); 287 | this.emit("ready"); 288 | }); 289 | 290 | logger.debug("Connecting.."); 291 | 292 | try { 293 | Promise.all([ 294 | this.consumer.connect(), 295 | this._adminClient.connect(), 296 | ]).then(resolve); 297 | } catch (error) { 298 | this.emit("error", error); 299 | return reject(error); 300 | } 301 | }); 302 | } 303 | 304 | /** 305 | * @private 306 | * runs (and calls itself) until it has successfully 307 | * read a certain size of messages from the broker 308 | * @returns {boolean} 309 | */ 310 | _consumerRun(syncEvent: ConsumeCallback): Promise { 311 | 312 | if (!this.resume || !this.consumer) { 313 | return false; 314 | } 315 | 316 | this.consumer.run({ 317 | eachBatchAutoResolve: false, 318 | eachBatch: async ({ batch, uncommittedOffsets, resolveOffset, heartbeat, isRunning, isStale }) => { 319 | 320 | const messages = batch.messages; 321 | 322 | if (!isRunning() || isStale() || !messages.length) { 323 | 324 | //always ensure longer wait on consume error 325 | if (!isRunning() || isStale()) { 326 | 327 | if (this.config && this.config.logger && this.config.logger.debug) { 328 | // @todo - not sure where error comes from? 329 | // this.config.logger.debug(`Consumed recursively with error ${error.message}`); 330 | this.config.logger.debug(`Consumed recursively with error ${messages}`); 331 | } 332 | 333 | this.emit("error", Error); 334 | } 335 | 336 | //retry asap 337 | this._emptyFetches++; 338 | } else { 339 | 340 | if (this.config && this.config.logger && this.config.logger.debug) { 341 | this.config.logger.debug(`Consumed recursively with success ${messages.length}`); 342 | } 343 | 344 | this._emptyFetches = 0; //reset 345 | await uncommittedOffsets(); 346 | this.emit("batch", batch.messages, { resolveOffset, syncEvent }); 347 | } 348 | await heartbeat(); 349 | } 350 | }); 351 | } 352 | 353 | /** 354 | * @private 355 | * converts message value according to booleans 356 | * @param {Buffer} _value 357 | * @param {boolean} asString 358 | * @param {boolean} asJSON 359 | * @returns {Buffer|string|object} 360 | */ 361 | _convertMessageValue( 362 | _value: Buffer, 363 | asString = true, 364 | asJSON = false 365 | ): Buffer | string| Record { 366 | if (!_value) { 367 | return _value; 368 | } 369 | 370 | if (!asString && !asJSON) { 371 | return _value; 372 | } 373 | 374 | let value; 375 | 376 | if (asString || asJSON) { 377 | value = _value.toString(MESSAGE_CHARSET); 378 | } 379 | 380 | if (asJSON) { 381 | try { 382 | value = JSON.parse(value); 383 | } catch (error) { 384 | this.config.logger!.warn(`Failed to parse message value as json: ${error.message}, ${value}`); 385 | } 386 | } 387 | 388 | return value; 389 | } 390 | 391 | _bumpVariableOfBatch(startBPT: number, batchLength: number): void { 392 | 393 | this._totalProcessedMessages += batchLength; 394 | this._lastProcessed = Date.now(); 395 | 396 | //when all messages from the batch are processed 397 | this._avgBatchProcessingTime = (this._avgBatchProcessingTime + (Date.now() - startBPT)) / 2; 398 | this._consumedSinceCommit += batchLength; 399 | this._totalBatches++; 400 | this._batchCount++; 401 | 402 | this.config.logger!.debug(`committing after ${this._batchCount}, batches, messages: ${this._consumedSinceCommit}`); 403 | this.emit("commit", this._consumedSinceCommit); 404 | this._batchCount = 0; 405 | this._batchCommitts++; 406 | this._consumedSinceCommit = 0; 407 | } 408 | 409 | async _consumeHandler(syncEvent: ConsumeCallback, { manualBatching }: { manualBatching: boolean }): Promise { 410 | 411 | if (this._isAutoCommitting !== null && typeof this._isAutoCommitting !== "undefined") { 412 | this.config.logger!.warn("enable.auto.commit has no effect in 1:n consume-mode, set to null or undefined to remove this message." + 413 | "You can pass 'noBatchCommits' as true via options to .consume(), if you want to commit manually."); 414 | } 415 | 416 | if (this._isAutoCommitting) { 417 | throw new Error("Please disable enable.auto.commit when using 1:n consume-mode."); 418 | } 419 | 420 | if (!manualBatching) { 421 | this.config.logger!.warn("The consumer only allow manual batching for now"); 422 | } 423 | 424 | this.config.logger!.info("Batching manually.."); 425 | this._consumerRun(syncEvent); 426 | } 427 | 428 | /** 429 | * subscribe and start to consume, should be called only once after connection is successfull 430 | * options object supports the following fields: 431 | * batchSize amount of messages that is max. fetched per round 432 | * commitEveryNBatch amount of messages that should be processed before committing 433 | * concurrency the concurrency of the execution per batch 434 | * commitSync if the commit action should be blocking or non-blocking 435 | * noBatchCommits defaults to false, if set to true, no commits will be made for batches 436 | * 437 | * @param {function} syncEvent - callback (receives messages and callback as params) 438 | * @param {string} asString - optional, if message value should be decoded to utf8 439 | * @param {boolean} asJSON - optional, if message value should be json deserialised 440 | * @param {object} options - optional object containing options for 1:n mode: 441 | * @returns {Promise.<*>} 442 | */ 443 | consume(syncEvent: ConsumeCallback = null, asString = true, asJSON = false, options: BatchConfig): Promise { 444 | 445 | let { 446 | batchSize, 447 | commitEveryNBatch, 448 | concurrency, 449 | commitSync, 450 | noBatchCommits, 451 | manualBatching, 452 | sortedManualBatch, 453 | } = options; 454 | 455 | batchSize = batchSize || 1; 456 | commitEveryNBatch = commitEveryNBatch || 1; 457 | concurrency = concurrency || 1; 458 | commitSync = typeof commitSync === "undefined" ? true : commitSync; //default is true 459 | noBatchCommits = typeof noBatchCommits === "undefined" ? false : noBatchCommits; //default is false 460 | manualBatching = typeof manualBatching === "undefined" ? true : manualBatching; //default is true 461 | sortedManualBatch = typeof sortedManualBatch === "undefined" ? false : sortedManualBatch; //default is false 462 | 463 | this._batchConfig = { 464 | batchSize, 465 | commitEveryNBatch, 466 | concurrency, 467 | commitSync, 468 | noBatchCommits, 469 | manualBatching, 470 | sortedManualBatch 471 | } as BatchConfig; 472 | 473 | this.asString = asString; 474 | this.asJSON = asJSON; 475 | 476 | if (!this.consumer) { 477 | return Promise.reject(new Error("You must call and await .connect() before trying to consume messages.")); 478 | } 479 | 480 | if (syncEvent && this.asStream) { 481 | return Promise.reject(new Error("Usage of syncEvent is not permitted in streaming mode.")); 482 | } 483 | 484 | if (this.asStream) { 485 | return Promise.reject(new Error("Calling .consume() is not required in streaming mode.")); 486 | } 487 | 488 | if (sortedManualBatch && !manualBatching) { 489 | return Promise.reject(new Error("manualBatching batch option must be enabled, if you enable sortedManualBatch batch option.")); 490 | } 491 | 492 | if (this.config && this.config.logger) { 493 | this.config.logger.warn("batchSize is not supported by KafkaJS"); 494 | } 495 | 496 | const topics = this.topics; 497 | 498 | if (topics && topics.length) { 499 | this.config.logger!.info(`Subscribing to topics: ${topics.join(", ")}.`); 500 | topics.forEach(async (topic) => { 501 | await this.consumer!.subscribe({ topic }); 502 | }); 503 | } else { 504 | this.config.logger!.info("Not subscribing to any topics initially."); 505 | } 506 | 507 | if (!syncEvent) { 508 | return this.consumer.run({ 509 | eachMessage: async ({ message }) => { 510 | const m: FormattedKafkaMessage = message; 511 | 512 | this.config.logger!.debug(JSON.stringify(message)); 513 | 514 | this._totalIncomingMessages++; 515 | this._lastReceived = Date.now(); 516 | 517 | m.value = this._convertMessageValue(message.value, asString, asJSON); 518 | 519 | if (!this._firstMessageConsumed) { 520 | this._firstMessageConsumed = true; 521 | this.emit("first-drain-message", m); 522 | } 523 | 524 | this.emit("message", m); 525 | } 526 | }); 527 | } 528 | 529 | return this._consumeHandler(syncEvent, { 530 | manualBatching, 531 | }); 532 | } 533 | 534 | /** 535 | * pause the consumer for specific topics (partitions) 536 | * @param {Array.<{}>} topicPartitions 537 | * @throws {LibrdKafkaError} 538 | */ 539 | pause(topicPartitions = []): void { 540 | if (this.consumer) { 541 | return this.consumer.pause(topicPartitions); 542 | } 543 | } 544 | 545 | /** 546 | * resume the consumer for specific topic (partitions) 547 | * @param {Array.<{}>} topicPartitions 548 | * @throws {LibrdKafkaError} 549 | */ 550 | resume(topicPartitions = []): void { 551 | if (this.consumer) { 552 | return this.consumer.resume(topicPartitions); 553 | } 554 | } 555 | 556 | /** 557 | * returns consumer statistics 558 | * @todo - update type for consumer stats. 559 | * @returns {object} 560 | */ 561 | getStats(): ConsumerStats { 562 | return { 563 | totalIncoming: this._totalIncomingMessages, 564 | lastMessage: this._lastReceived, 565 | receivedFirstMsg: this._firstMessageConsumed, 566 | totalProcessed: this._totalProcessedMessages, 567 | lastProcessed: this._lastProcessed, 568 | queueSize: null, 569 | isPaused: false, 570 | drainStats: null, 571 | omittingQueue: true, 572 | autoComitting: this._isAutoCommitting, 573 | consumedSinceCommit: this._consumedSinceCommit, 574 | batch: { 575 | current: this._batchCount, 576 | committs: this._batchCommitts, 577 | total: this._totalBatches, 578 | currentEmptyFetches: this._emptyFetches, 579 | avgProcessingTime: this._avgBatchProcessingTime, 580 | config: this._batchConfig, 581 | }, 582 | lag: this._lagCache, //read from private cache 583 | totalErrors: this._errors 584 | }; 585 | } 586 | 587 | /** 588 | * @private 589 | * resets internal values 590 | */ 591 | _reset(): void { 592 | this._firstMessageConsumed = false; 593 | this._inClosing = false; 594 | this._totalIncomingMessages = 0; 595 | this._lastReceived = 0; 596 | this._totalProcessedMessages = 0; 597 | this._lastProcessed = 0; 598 | this.asStream = false; 599 | this._batchCount = 0; 600 | this._batchCommitts = 0; 601 | this._totalBatches = 0; 602 | this._lagCache = defaultLag; 603 | this._analytics = undefined; 604 | this._consumedSinceCommit = 0; 605 | this._emptyFetches = 0; 606 | this._avgBatchProcessingTime = 0; 607 | this._errors = 0; 608 | this._extCommitCallback = undefined; 609 | } 610 | 611 | /** 612 | * closes connection if open 613 | */ 614 | async close(): Promise { 615 | 616 | if (this.consumer) { 617 | this._inClosing = true; 618 | 619 | return Promise.all([ 620 | this.consumer.disconnect(), 621 | this._adminClient.disconnect(), 622 | ]); 623 | } 624 | } 625 | 626 | /** 627 | * gets the lowest and highest offset that is available 628 | * for a given kafka topic 629 | * @param {string} topic - name of the kafka topic 630 | * @param {number} partition - optional, default is 0 631 | * @returns {Promise.} 632 | */ 633 | async getOffsetForTopicPartition(topic: string, partition = 0): Promise { 634 | 635 | if (!this.consumer) { 636 | return Promise.reject(new Error("Consumer not yet connected.")); 637 | } 638 | 639 | if (this.config && this.config.logger && this.config.logger.debug) { 640 | this.config.logger.debug(`Fetching offsets for topic partition ${topic} ${partition}.`); 641 | } 642 | 643 | const offsetInfos = await this._adminClient.fetchOffsets({ groupId: this._groupId, topic }); 644 | 645 | return offsetInfos.filter((offsetInfo) => offsetInfo.partition === partition)[0]; 646 | } 647 | 648 | /** 649 | * gets all comitted offsets 650 | * @param {number} timeout - optional, default is 2500 651 | * @returns {Promise.} 652 | */ 653 | async getComittedOffsets(timeout = 2500): Promise { 654 | 655 | if (!this.consumer) { 656 | return []; 657 | } 658 | 659 | if (this.config && this.config.logger && this.config.logger.debug) { 660 | this.config.logger.debug(`Fetching committed offsets ${timeout}`); 661 | } 662 | 663 | return [].concat([], 664 | await Promise.all( 665 | 666 | this.topics.map(async (topic) => { 667 | 668 | const offsets = await this._adminClient.fetchOffsets({ 669 | groupId: this._groupId, 670 | topic, 671 | }); 672 | 673 | return offsets.map((offsetInfo) => ({...offsetInfo, topic})); 674 | }) 675 | ) 676 | ); 677 | } 678 | 679 | /** 680 | * gets all topic-partitions which are assigned to this consumer 681 | * @returns {Array} 682 | */ 683 | async getAssignedPartitions(): Promise<[]> { 684 | try { 685 | return (await this.getComittedOffsets()); 686 | } catch (error) { 687 | this.emit("error", error); 688 | return []; 689 | } 690 | } 691 | 692 | /** 693 | * @static 694 | * return the offset that has been comitted for a given topic and partition 695 | * @param {string} topic - topic name 696 | * @param {number} partition - partition 697 | * @param {Array} offsets - commit offsets from getComittedOffsets() 698 | */ 699 | static findPartitionOffset(topic: string, partition: number, offsets: ComittedOffsets[]): string { 700 | 701 | for (let i = 0; i < offsets.length; i++) { 702 | if (offsets[i].topic === topic && offsets[i].partition === partition) { 703 | return offsets[i].offset; 704 | } 705 | } 706 | 707 | throw new Error(`no offset found for ${topic}:${partition} in comitted offsets.`); 708 | } 709 | 710 | /** 711 | * compares the local commit offset status with the remote broker 712 | * status for the topic partitions, for all assigned partitions of 713 | * the consumer 714 | * @param {boolean} noCache - when analytics are enabled the results can be taken from cache 715 | * @returns {Promise.} 716 | */ 717 | async getLagStatus(noCache = false): Promise { 718 | 719 | if (!this.consumer) { 720 | return []; 721 | } 722 | 723 | //if allowed serve from cache 724 | if (!noCache && this._lagCache && this._lagCache.status!) { 725 | return this._lagCache.status; 726 | } 727 | 728 | if (this.config && this.config.logger && this.config.logger.debug) { 729 | this.config.logger.debug(`Getting lag status ${noCache}`); 730 | } 731 | 732 | const startT = Date.now(); 733 | const assigned = this.getAssignedPartitions(); 734 | const comitted = await this.getComittedOffsets(); 735 | 736 | const status = await Promise.all(assigned.map(async topicPartition => { 737 | try { 738 | const brokerState = await this.getOffsetForTopicPartition(topicPartition.topic, topicPartition.partition); 739 | // const comittedOffset = NConsumer.findPartitionOffset(topicPartition.topic, topicPartition.partition, comitted); 740 | // const topicOffset = await (await this._adminClient.fetchTopicOffsets(topicPartition.topic)).pop(); 741 | // const comittedOffset = topicOffset.offset; 742 | return { 743 | topic: topicPartition.topic, 744 | partition: topicPartition.partition, 745 | lowDistance: comitted - brokerState.lowOffset, 746 | highDistance: brokerState.highOffset - comitted, 747 | detail: { 748 | lowOffset: brokerState.lowOffset, 749 | highOffset: brokerState.highOffset, 750 | } 751 | }; 752 | } catch (error) { 753 | return { 754 | topic: topicPartition.topic, 755 | partition: topicPartition.partition, 756 | error 757 | }; 758 | } 759 | })); 760 | 761 | const duration = Date.now() - startT; 762 | this.config.logger!.info(`fetching and comparing lag status took: ${duration} ms.`); 763 | 764 | //store cached version 765 | if (status && Array.isArray(status)) { 766 | 767 | //keep last version 768 | if (this._lagCache && this._lagCache.status) { 769 | this._lastLagStatus = Object.assign({}, this._lagCache); 770 | } 771 | 772 | //cache new version 773 | this._lagCache = { 774 | status, 775 | at: startT, 776 | took: Date.now() - startT 777 | }; 778 | } 779 | 780 | return status; 781 | } 782 | 783 | /** 784 | * called in interval 785 | * @private 786 | */ 787 | _runAnalytics(): Promise { 788 | 789 | if (!this._analytics) { 790 | this._analytics = new ConsumerAnalytics(this, this._analyticsOptions, this.config.logger as KafkaLogger); 791 | } 792 | 793 | return this._analytics.run() 794 | .then(res => this.emit("analytics", res)) 795 | .catch(error => this.emit("error", error)); 796 | } 797 | 798 | /** 799 | * returns the last computed analytics results 800 | * @throws 801 | * @returns {object} 802 | */ 803 | getAnalytics(): ConsumerRunResult | null { 804 | 805 | if (!this._analytics) { 806 | this.emit("error", new Error("You have not enabled analytics on this consumer instance.")); 807 | return null; 808 | } 809 | 810 | return this._analytics.getLastResult(); 811 | } 812 | 813 | /** 814 | * called in interval 815 | * @private 816 | */ 817 | _runLagCheck(): LagStatus[] { 818 | return this.getLagStatus(true).catch(error => this.emit("error", error)); 819 | } 820 | 821 | /** 822 | * runs a health check and returns object with status and message 823 | * @returns {Promise.} 824 | */ 825 | checkHealth(): Promise { 826 | return this._health.check(); 827 | } 828 | 829 | /** 830 | * resolve the metadata information for a give topic 831 | * will create topic if it doesnt exist 832 | * @param {string} topic - name of the topic to query metadata for 833 | * @returns {Promise.} 834 | */ 835 | getTopicMetadata(topic: string): Promise { 836 | return new Promise(( resolve, reject ) => { 837 | 838 | if (!this.consumer) { 839 | return reject(new Error("You must call and await .connect() before trying to get metadata.")); 840 | } 841 | 842 | if (this.config && this.config.logger && this.config.logger.debug) { 843 | this.config.logger.debug(`Fetching topic metadata ${topic}`); 844 | } 845 | 846 | this._adminClient.fetchTopicMetadata({ 847 | topics: [topic], 848 | }) 849 | .then((raw) => resolve(new Metadata(raw[0]))) 850 | .catch((e) => reject(e)); 851 | }); 852 | } 853 | 854 | /** 855 | * @alias getTopicMetadata 856 | * @param {number} timeout - optional, default is 2500 857 | * @returns {Promise.} 858 | */ 859 | getMetadata(): Promise { 860 | return this.getTopicMetadata(""); 861 | } 862 | 863 | /** 864 | * returns a list of available kafka topics on the connected brokers 865 | */ 866 | async getTopicList(): Promise { 867 | const metadata: Metadata = await this.getMetadata(); 868 | return metadata.asTopicList(); 869 | } 870 | 871 | /** 872 | * Gets the last lag status 873 | * 874 | * @returns {Lag} 875 | */ 876 | getLastLagStatus(): Lag { 877 | return this._lastLagStatus; 878 | } 879 | 880 | /** 881 | * Gets the lag cache 882 | * 883 | * @returns {Lag} 884 | */ 885 | getLagCache(): Lag { 886 | return this._lagCache; 887 | } 888 | } 889 | -------------------------------------------------------------------------------- /src/lib/kafkajs/JSProducer.ts: -------------------------------------------------------------------------------- 1 | import { Promise } from "bluebird"; 2 | import Debug from "debug"; 3 | import { EventEmitter } from "events"; 4 | import { v4 as uuidv4} from "uuid"; 5 | import { murmur } from "murmurhash"; 6 | import { murmur2Partitioner } from "murmur2-partitioner"; 7 | import { Kafka, KafkaConfig, SASLMechanism, Admin, Producer, RecordMetadata, CompressionTypes } from "kafkajs"; 8 | import { Metadata, ProducerAnalytics, ProducerHealth, Check, ProducerRunResult, defaultAnalyticsInterval } from "../shared"; 9 | import { MessageReturn, JSKafkaProducerConfig, ProducerStats, AnalyticsConfig, KafkaLogger } from "../interfaces"; 10 | import fs from "fs"; 11 | 12 | const MESSAGE_TYPES = { 13 | PUBLISH: "-published", 14 | UNPUBLISH: "-unpublished", 15 | UPDATE: "-updated" 16 | }; 17 | 18 | const MAX_PART_AGE_MS = 1e3 * 60 * 5; //5 minutes 19 | const MAX_PART_STORE_SIZE = 1e4; 20 | const DEFAULT_MURMURHASH_VERSION = "3"; 21 | 22 | const DEFAULT_LOGGER = { 23 | debug: Debug("sinek:jsproducer:debug"), 24 | info: Debug("sinek:jsproducer:info"), 25 | warn: Debug("sinek:jsproducer:warn"), 26 | error: Debug("sinek:jsproducer:error") 27 | }; 28 | 29 | /** 30 | * native producer wrapper for node-librdkafka 31 | * @extends EventEmitter 32 | */ 33 | export class JSProducer extends EventEmitter { 34 | 35 | kafkaClient: Kafka; 36 | config: JSKafkaProducerConfig; 37 | 38 | paused = false; 39 | producer: Producer | undefined; 40 | 41 | private _health: ProducerHealth; 42 | private _adminClient: Admin; 43 | private _producerPollIntv = 0; 44 | private _partitionCounts = {}; 45 | private _inClosing = false; 46 | private _totalSentMessages = 0; 47 | private _lastProcessed = 0; 48 | private _analyticsOptions: AnalyticsConfig | null = null; 49 | private _analyticsIntv: NodeJS.Timeout | null = null; 50 | _analytics: ProducerAnalytics | undefined; 51 | private _murmurHashVersion: string = DEFAULT_MURMURHASH_VERSION; 52 | private _murmur; 53 | private _errors = 0; 54 | 55 | defaultPartitionCount = 1; 56 | 57 | /** 58 | * creates a new producer instance 59 | * @param {object} config - configuration object 60 | * @param {*} _ - ignore this param (api compatability) 61 | * @param {number} defaultPartitionCount - amount of default partitions for the topics to produce to 62 | */ 63 | constructor(config: JSKafkaProducerConfig, defaultPartitionCount = 1) { 64 | super(); 65 | 66 | if (!config) { 67 | throw new Error("You are missing a config object."); 68 | } 69 | 70 | if (!config.logger || typeof config.logger !== "object") { 71 | config.logger = DEFAULT_LOGGER; 72 | } 73 | 74 | if (!config.options) { 75 | config.options = {}; 76 | } 77 | 78 | const { 79 | "metadata.broker.list": brokerList, 80 | "client.id": clientId, 81 | "security.protocol": securityProtocol, 82 | "ssl.ca.location": sslCALocation, 83 | "ssl.certificate.location": sslCertLocation, 84 | "ssl.key.location": sslKeyLocation, 85 | "ssl.key.password": sslKeyPassword, 86 | "sasl.mechanisms": mechanism, 87 | "sasl.username": username, 88 | "sasl.password": password, 89 | } = config.noptions; 90 | 91 | const brokers = brokerList.split(","); 92 | 93 | if (!brokers || !clientId) { 94 | throw new Error("You are missing a broker or group configs"); 95 | } 96 | 97 | const conf = { 98 | brokers, 99 | clientId, 100 | } as KafkaConfig 101 | if (securityProtocol) { 102 | if (securityProtocol.includes('sasl')) { 103 | conf.sasl = { 104 | mechanism: mechanism as SASLMechanism, 105 | username: username as string, 106 | password: password as string, 107 | } 108 | } 109 | if (securityProtocol.includes('ssl')) { 110 | conf.ssl = { 111 | ca: [fs.readFileSync(sslCALocation as string, "utf-8")], 112 | cert: fs.readFileSync(sslCertLocation as string, "utf-8"), 113 | key: fs.readFileSync(sslKeyLocation as string, "utf-8"), 114 | passphrase: sslKeyPassword, 115 | } 116 | } 117 | } 118 | this.kafkaClient = new Kafka(conf); 119 | 120 | this.config = config; 121 | this._health = new ProducerHealth(this, this.config.health); 122 | this._adminClient = this.kafkaClient.admin(); 123 | 124 | this._murmurHashVersion = this.config.options!.murmurHashVersion || DEFAULT_MURMURHASH_VERSION; 125 | this.config.logger!.info(`using murmur ${this._murmurHashVersion} partitioner.`); 126 | 127 | this.defaultPartitionCount = defaultPartitionCount; 128 | 129 | switch (this._murmurHashVersion) { 130 | case "2": 131 | this._murmur = (key, partitionCount) => murmur2Partitioner.partition(key, partitionCount); 132 | break; 133 | 134 | case "3": 135 | this._murmur = (key, partitionCount) => murmur.v3(key) % partitionCount; 136 | break; 137 | 138 | default: 139 | throw new Error(`${this._murmurHashVersion} is not a supported murmur hash version. Choose '2' or '3'.`); 140 | } 141 | 142 | this.on("error", () => this._errors++); 143 | } 144 | 145 | /** 146 | * @throws 147 | * starts analytics tasks 148 | * @param {object} options - analytic options 149 | */ 150 | enableAnalytics(options: { analyticsInterval: number } = {analyticsInterval: defaultAnalyticsInterval}): void { 151 | 152 | if (this._analyticsIntv) { 153 | throw new Error("analytics intervals are already running."); 154 | } 155 | 156 | let { analyticsInterval } = options; 157 | this._analyticsOptions = options; 158 | 159 | analyticsInterval = analyticsInterval || defaultAnalyticsInterval; // 150 sec 160 | 161 | this._analyticsIntv = setInterval(this._runAnalytics.bind(this), analyticsInterval); 162 | } 163 | 164 | /** 165 | * halts all analytics tasks 166 | */ 167 | haltAnalytics(): void { 168 | 169 | if (this._analyticsIntv) { 170 | clearInterval(this._analyticsIntv); 171 | } 172 | } 173 | 174 | /** 175 | * connects to the broker 176 | * @returns {Promise.<*>} 177 | */ 178 | connect(): Promise { 179 | return new Promise((resolve, reject) => { 180 | const { kafkaHost, logger } = this.config; 181 | let { noptions, tconf } = this.config; 182 | 183 | let conStr: string | null = null; 184 | 185 | if (typeof kafkaHost === "string") { 186 | conStr = kafkaHost; 187 | } 188 | 189 | if (conStr === null && !noptions) { 190 | return reject(new Error("KafkaHost must be defined.")); 191 | } 192 | 193 | const config = { 194 | "metadata.broker.list": conStr, 195 | "dr_cb": true 196 | }; 197 | 198 | noptions = Object.assign({}, config, noptions); 199 | logger!.debug(JSON.stringify(noptions)); 200 | 201 | tconf = tconf ? tconf : { 202 | "request.required.acks": 1 203 | }; 204 | 205 | logger!.debug(JSON.stringify(tconf)); 206 | 207 | this.producer = this.kafkaClient.producer(); 208 | const { CONNECT, DISCONNECT, REQUEST_TIMEOUT } = this.producer.events; 209 | 210 | this.producer.on(REQUEST_TIMEOUT, details => { 211 | this.emit("error", new Error(`Request Timed out. Info ${JSON.stringify(details)}`)); 212 | }); 213 | 214 | /* ### EOF STUFF ### */ 215 | this.producer.on(DISCONNECT, () => { 216 | if (this._inClosing) { 217 | this._reset(); 218 | } 219 | logger!.warn("Disconnected."); 220 | //auto-reconnect??? -> handled by producer.poll() 221 | }); 222 | 223 | this.producer.on(CONNECT, () => { 224 | logger!.info("KafkaJS producer is ready."); 225 | this.emit("ready"); 226 | }); 227 | 228 | logger!.debug("Connecting.."); 229 | 230 | try { 231 | 232 | Promise.all([ 233 | this.producer.connect(), 234 | this._adminClient.connect(), 235 | ]).then(resolve); 236 | 237 | } catch (error) { 238 | this.emit("error", error); 239 | return reject(error); 240 | } 241 | }); 242 | } 243 | 244 | /** 245 | * returns a partition for a key 246 | * @private 247 | * @param {string} - message key 248 | * @param {number} - partition count of topic, if 0 defaultPartitionCount is used 249 | * @returns {string} - deterministic partition value for key 250 | */ 251 | _getPartitionForKey(key: string, partitionCount = 1): number { 252 | 253 | if (typeof key !== "string") { 254 | throw new Error("key must be a string."); 255 | } 256 | 257 | if (typeof partitionCount !== "number") { 258 | throw new Error("partitionCount must be number."); 259 | } 260 | 261 | return this._murmur(key, partitionCount); 262 | } 263 | 264 | /** 265 | * @async 266 | * produces a kafka message to a certain topic 267 | * @param {string} topicName - name of the topic to produce to 268 | * @param {object|string|null} message - value object for the message 269 | * @param {number} _partition - optional partition to produce to 270 | * @param {string} _key - optional message key 271 | * @param {string} _partitionKey - optional key to evaluate partition for this message 272 | * @returns {Promise.} 273 | */ 274 | async send( 275 | topicName: string, 276 | message: Record | string | null | Buffer, 277 | _partition: number | null = null, 278 | _key: string | null = null, 279 | _partitionKey: string | null = null 280 | ): Promise { 281 | 282 | /* 283 | these are not supported in the HighLevelProducer of node-rdkafka 284 | _opaqueKey = null, 285 | _headers = null, 286 | */ 287 | 288 | if (!this.producer) { 289 | throw new Error("You must call and await .connect() before trying to produce messages."); 290 | } 291 | 292 | if (this.paused) { 293 | throw new Error("producer is paused."); 294 | } 295 | 296 | if (typeof message === "undefined" || !(typeof message === "string" || Buffer.isBuffer(message) || message === null)) { 297 | throw new Error("message must be a string, an instance of Buffer or null."); 298 | } 299 | 300 | const key = _key ? _key : uuidv4(); 301 | let convertedMessage: Buffer; 302 | 303 | if (message !== null) { 304 | convertedMessage = Buffer.isBuffer(message) ? message : Buffer.from(message); 305 | } 306 | 307 | let maxPartitions = 0; 308 | //find correct max partition count 309 | if (typeof _partition !== "number") { //manual check to improve performance 310 | maxPartitions = await this.getPartitionCountOfTopic(topicName); 311 | if (maxPartitions === -1) { 312 | throw new Error("defaultPartition set to 'auto', but was not able to resolve partition count for topic" + 313 | topicName + ", please make sure the topic exists before starting the producer in auto mode."); 314 | } 315 | } else { 316 | maxPartitions = this.defaultPartitionCount; 317 | } 318 | 319 | let partition = 0; 320 | //find correct partition for this key 321 | if (maxPartitions >= 2 && typeof _partition !== "number") { //manual check to improve performance 322 | partition = this._getPartitionForKey(_partitionKey ? _partitionKey : key, maxPartitions); 323 | } 324 | 325 | //if _partition (manual) is set, it always overwrites a selected partition 326 | partition = typeof _partition === "number" ? _partition : partition; 327 | 328 | this.config.logger!.debug(JSON.stringify({ 329 | topicName, 330 | partition, 331 | key 332 | })); 333 | 334 | const producedAt = Date.now(); 335 | 336 | this._lastProcessed = producedAt; 337 | this._totalSentMessages++; 338 | const timestamp = producedAt.toString(); 339 | const acks = this.config && this.config.tconf && this.config.tconf["request.required.acks"] || 1; 340 | const compression = (this.config.noptions) 341 | ? this.config.noptions["compression.codec"] 342 | : CompressionTypes.None; 343 | 344 | return new Promise((resolve, reject) => { 345 | this.producer!.send({ 346 | topic: topicName, 347 | acks, 348 | compression, 349 | messages: [{ 350 | key, 351 | value: convertedMessage, 352 | partition, 353 | timestamp 354 | }], 355 | }) 356 | .then((metadata: RecordMetadata[] ) => { 357 | resolve({ 358 | key, 359 | partition, 360 | offset: metadata[0].offset, 361 | }); 362 | }) 363 | .catch((error) => { 364 | reject(error); 365 | }); 366 | 367 | }); 368 | } 369 | 370 | /** 371 | * @async 372 | * produces a formatted message to a topic 373 | * @param {string} topic - topic to produce to 374 | * @param {string} identifier - identifier of message (is the key) 375 | * @param {object} payload - object (part of message value) 376 | * @param {number} partition - optional partition to produce to 377 | * @param {number} version - optional version of the message value 378 | * @param {string} partitionKey - optional key to evaluate partition for this message 379 | * @returns {Promise.} 380 | */ 381 | async buffer( 382 | topic: string, 383 | identifier: string, 384 | payload: Record, 385 | partition: number | null = null, 386 | version: number | null = null, 387 | partitionKey: string | null = null 388 | ): Promise { 389 | 390 | if (typeof identifier === "undefined") { 391 | identifier = uuidv4(); 392 | } 393 | 394 | if (typeof identifier !== "string") { 395 | identifier = identifier + ""; 396 | } 397 | 398 | if (typeof payload !== "object") { 399 | throw new Error("expecting payload to be of type object."); 400 | } 401 | 402 | if (typeof payload.id === "undefined") { 403 | payload.id = identifier; 404 | } 405 | 406 | if (version && typeof payload.version === "undefined") { 407 | payload.version = version; 408 | } 409 | 410 | return await this.send(topic, JSON.stringify(payload), partition, identifier, partitionKey); 411 | } 412 | 413 | /** 414 | * @async 415 | * @private 416 | * produces a specially formatted message to a topic 417 | * @param {string} topic - topic to produce to 418 | * @param {string} identifier - identifier of message (is the key) 419 | * @param {object} _payload - object message value payload 420 | * @param {number} version - optional version (default is 1) 421 | * @param {*} _ -ignoreable, here for api compatibility 422 | * @param {string} partitionKey - optional key to deterministcally detect partition 423 | * @param {number} partition - optional partition (overwrites partitionKey) 424 | * @param {string} messageType - optional messageType (for the formatted message value) 425 | * @returns {Promise.} 426 | */ 427 | async _sendBufferFormat( 428 | topic: string, 429 | identifier: string, 430 | _payload: Record, 431 | version = 1, 432 | _: null | number, 433 | partitionKey: string | null = null, 434 | partition: number | null = null, 435 | messageType = "" 436 | ): Promise { 437 | 438 | if (typeof identifier === "undefined") { 439 | identifier = uuidv4(); 440 | } 441 | 442 | if (typeof identifier !== "string") { 443 | identifier = identifier + ""; 444 | } 445 | 446 | if (typeof _payload !== "object") { 447 | throw new Error("expecting payload to be of type object."); 448 | } 449 | 450 | if (typeof _payload.id === "undefined") { 451 | _payload.id = identifier; 452 | } 453 | 454 | if (version && typeof _payload.version === "undefined") { 455 | _payload.version = version; 456 | } 457 | 458 | const payload = { 459 | payload: _payload, 460 | key: identifier, 461 | id: uuidv4(), 462 | time: (new Date()).toISOString(), 463 | type: topic + messageType 464 | }; 465 | 466 | return await this.send(topic, JSON.stringify(payload), partition, identifier, partitionKey); 467 | } 468 | 469 | /** 470 | * an alias for bufferFormatPublish() 471 | * @alias bufferFormatPublish 472 | */ 473 | bufferFormat( 474 | topic: string, 475 | identifier: string, 476 | payload: Record, 477 | version = 1, 478 | compressionType = 0, 479 | partitionKey: string | null = null 480 | ): Promise { 481 | return this.bufferFormatPublish(topic, identifier, payload, version, compressionType, partitionKey); 482 | } 483 | 484 | /** 485 | * produces a specially formatted message to a topic, with type "publish" 486 | * @param {string} topic - topic to produce to 487 | * @param {string} identifier - identifier of message (is the key) 488 | * @param {object} _payload - object message value payload 489 | * @param {number} version - optional version (default is 1) 490 | * @param {*} _ -ignoreable, here for api compatibility 491 | * @param {string} partitionKey - optional key to deterministcally detect partition 492 | * @param {number} partition - optional partition (overwrites partitionKey) 493 | * @returns {Promise.} 494 | */ 495 | bufferFormatPublish( 496 | topic: string, 497 | identifier: string, 498 | _payload: Record, 499 | version = 1, 500 | _: null | number, 501 | partitionKey: string | null = null, 502 | partition: number | null = null 503 | ): Promise { 504 | return this._sendBufferFormat(topic, identifier, _payload, version, _, partitionKey, partition, MESSAGE_TYPES.PUBLISH); 505 | } 506 | 507 | /** 508 | * produces a specially formatted message to a topic, with type "update" 509 | * @param {string} topic - topic to produce to 510 | * @param {string} identifier - identifier of message (is the key) 511 | * @param {object} _payload - object message value payload 512 | * @param {number} version - optional version (default is 1) 513 | * @param {*} _ -ignoreable, here for api compatibility 514 | * @param {string} partitionKey - optional key to deterministcally detect partition 515 | * @param {number} partition - optional partition (overwrites partitionKey) 516 | * @returns {Promise.} 517 | */ 518 | bufferFormatUpdate( 519 | topic: string, 520 | identifier: string, 521 | _payload: Record, 522 | version = 1, 523 | _: null | number, 524 | partitionKey: string | null = null, 525 | partition: number | null = null 526 | ): Promise { 527 | return this._sendBufferFormat(topic, identifier, _payload, version, _, partitionKey, partition, MESSAGE_TYPES.UPDATE); 528 | } 529 | 530 | /** 531 | * produces a specially formatted message to a topic, with type "unpublish" 532 | * @param {string} topic - topic to produce to 533 | * @param {string} identifier - identifier of message (is the key) 534 | * @param {object} _payload - object message value payload 535 | * @param {number} version - optional version (default is 1) 536 | * @param {*} _ -ignoreable, here for api compatibility 537 | * @param {string} partitionKey - optional key to deterministcally detect partition 538 | * @param {number} partition - optional partition (overwrites partitionKey) 539 | * @returns {Promise.} 540 | */ 541 | bufferFormatUnpublish( 542 | topic: string, 543 | identifier: string, 544 | _payload: Record, 545 | version = 1, 546 | _: null | number, 547 | partitionKey: string | null = null, 548 | partition: number | null = null 549 | ): Promise { 550 | return this._sendBufferFormat(topic, identifier, _payload, version, _, partitionKey, partition, MESSAGE_TYPES.UNPUBLISH); 551 | } 552 | 553 | /** 554 | * produces a tombstone (null payload with -1 size) message 555 | * on a key compacted topic/partition this will delete all occurances of the key 556 | * @param {string} topic - name of the topic 557 | * @param {string} key - key 558 | * @param {number|null} _partition - optional partition 559 | */ 560 | tombstone( 561 | topic: string, 562 | key: string, 563 | _partition: number | null = null 564 | ): Promise { 565 | 566 | if (!key) { 567 | return Promise.reject(new Error("Tombstone messages only work on a key compacted topic, please provide a key.")); 568 | } 569 | 570 | return this.send(topic, null, _partition, key, null); 571 | } 572 | 573 | /** 574 | * pauses production (sends will not be queued) 575 | */ 576 | pause(): void { 577 | this.paused = true; 578 | } 579 | 580 | /** 581 | * resumes production 582 | */ 583 | resume(): void { 584 | this.paused = false; 585 | } 586 | 587 | /** 588 | * returns producer statistics 589 | * * @todo - update type for producer stats. 590 | * @returns {object} 591 | */ 592 | getStats(): ProducerStats { 593 | return { 594 | totalPublished: this._totalSentMessages, 595 | last: this._lastProcessed, 596 | isPaused: this.paused, 597 | totalErrors: this._errors 598 | }; 599 | } 600 | 601 | /** 602 | * @deprecated 603 | */ 604 | refreshMetadata(): void { 605 | throw new Error("refreshMetadata not implemented for nproducer."); 606 | } 607 | 608 | /** 609 | * resolve the metadata information for a give topic 610 | * will create topic if it doesnt exist 611 | * @param {string} topic - name of the topic to query metadata for 612 | * @param {number} timeout - optional, default is 2500 613 | * @returns {Promise.} 614 | */ 615 | getTopicMetadata(topic: string): Promise { 616 | return new Promise((resolve, reject) => { 617 | 618 | if (!this.producer) { 619 | return reject(new Error("You must call and await .connect() before trying to get metadata.")); 620 | } 621 | 622 | const topics = (topic === "") 623 | ? [] 624 | : [topic]; 625 | 626 | this._adminClient.fetchTopicMetadata({ 627 | topics, 628 | }).then((raw) => { 629 | resolve(new Metadata(raw)); 630 | }).catch((e) => reject(e)); 631 | }); 632 | } 633 | 634 | /** 635 | * @alias getTopicMetadata 636 | * @returns {Promise.} 637 | */ 638 | getMetadata(): Promise { 639 | return this.getTopicMetadata(""); 640 | } 641 | 642 | /** 643 | * returns a list of available kafka topics on the connected brokers 644 | */ 645 | async getTopicList(): Promise { 646 | const metadata: Metadata = await this.getMetadata(); 647 | return metadata.asTopicList(); 648 | } 649 | 650 | /** 651 | * @async 652 | * gets the partition count of the topic from the brokers metadata 653 | * keeps a local cache to speed up future requests 654 | * resolves to -1 if an error occures 655 | * @param {string} topic - name of topic 656 | * @returns {Promise.} 657 | */ 658 | async getPartitionCountOfTopic(topic: string): Promise { 659 | 660 | if (!this.producer) { 661 | throw new Error("You must call and await .connect() before trying to get metadata."); 662 | } 663 | 664 | //prevent long running leaks.. 665 | if (Object.keys(this._partitionCounts).length > MAX_PART_STORE_SIZE) { 666 | this._partitionCounts = {}; 667 | } 668 | 669 | const now = Date.now(); 670 | if (!this._partitionCounts[topic] || this._partitionCounts[topic].requested + MAX_PART_AGE_MS < now) { 671 | 672 | let count = -1; 673 | try { 674 | const metadata = await this.getMetadata(); //prevent creation of topic, if it does not exist 675 | count = metadata.getPartitionCountOfTopic(topic); 676 | } catch (error) { 677 | this.emit("error", new Error(`Failed to get metadata for topic ${topic}, because: ${error}.`)); 678 | return -1; 679 | } 680 | 681 | this._partitionCounts[topic] = { 682 | requested: now, 683 | count 684 | }; 685 | 686 | return count; 687 | } 688 | 689 | return this._partitionCounts[topic].count; 690 | } 691 | 692 | /** 693 | * gets the local partition count cache 694 | * @returns {object} 695 | */ 696 | getStoredPartitionCounts(): Record { 697 | return this._partitionCounts; 698 | } 699 | 700 | /** 701 | * @private 702 | * resets internal values 703 | */ 704 | private _reset() { 705 | this._lastProcessed = 0; 706 | this._totalSentMessages = 0; 707 | this.paused = false; 708 | this._inClosing = false; 709 | this._partitionCounts = {}; 710 | this._analytics = undefined; 711 | this._errors = 0; 712 | } 713 | 714 | /** 715 | * closes connection if open 716 | * stops poll interval if open 717 | */ 718 | async close(): Promise { 719 | 720 | this.haltAnalytics(); 721 | 722 | if (this.producer) { 723 | this._inClosing = true; 724 | clearInterval(this._producerPollIntv); 725 | 726 | try { 727 | await Promise.all([ 728 | this.producer.disconnect(), 729 | this._adminClient.disconnect(), 730 | ]); 731 | } catch(error) { 732 | // Do nothing, silently closing 733 | } 734 | 735 | //this.producer = null; 736 | } 737 | } 738 | 739 | /** 740 | * called in interval 741 | * @private 742 | */ 743 | private _runAnalytics(): void { 744 | 745 | if (!this._analytics) { 746 | this._analytics = new ProducerAnalytics(this, this._analyticsOptions, this.config.logger as KafkaLogger); 747 | } 748 | 749 | this._analytics.run() 750 | .then(res => this.emit("analytics", res)) 751 | .catch(error => this.emit("error", error)); 752 | } 753 | 754 | /** 755 | * returns the last computed analytics results 756 | * @throws 757 | * @returns {object} 758 | */ 759 | getAnalytics(): ProducerRunResult|null { 760 | 761 | if (!this._analytics) { 762 | this.emit("error", new Error("You have not enabled analytics on this consumer instance.")); 763 | return null; 764 | } 765 | 766 | return this._analytics.getLastResult(); 767 | } 768 | 769 | /** 770 | * runs a health check and returns object with status and message 771 | * @returns {Promise.} 772 | */ 773 | checkHealth(): Promise { 774 | return this._health.check(); 775 | } 776 | } 777 | -------------------------------------------------------------------------------- /src/lib/kafkajs/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./JSConsumer"; 2 | export * from "./JSProducer"; 3 | -------------------------------------------------------------------------------- /src/lib/shared/Analytics.ts: -------------------------------------------------------------------------------- 1 | import { LagStatus, AnalyticsLagChange, KafkaLogger, AnalyticsConfig, ConsumerStats, ProducerStats } from "../interfaces"; 2 | import { JSConsumer, JSProducer } from "../kafkajs"; 3 | 4 | const INTERESTING_DISTANCE = 10; 5 | export const defaultAnalyticsInterval = 1000 * 150; 6 | 7 | interface RunResult { 8 | generatedAt: number; 9 | interval: number; 10 | errors: number | null; 11 | } 12 | 13 | export interface ConsumerRunResult extends RunResult { 14 | lagChange: AnalyticsLagChange; 15 | largestLag: { 16 | topic: string; 17 | partition: number; 18 | lowDistance: number; 19 | highDistance: number; 20 | detail: { 21 | lowOffset: number, 22 | highOffset: number, 23 | comittedOffset: number 24 | } 25 | }; 26 | consumed: number | null; 27 | } 28 | 29 | export interface ProducerRunResult extends RunResult { 30 | produced: number | null; 31 | interval: number; 32 | } 33 | 34 | /** 35 | * parent analytics class 36 | */ 37 | abstract class Analytics { 38 | 39 | abstract client: JSConsumer | JSProducer; 40 | config: AnalyticsConfig | null = null; 41 | logger: KafkaLogger; 42 | 43 | _lastErrors = 0; 44 | _consumedCount = 0; 45 | 46 | abstract _lastRes: RunResult | null = null; 47 | _producedCount = 0; 48 | 49 | /** 50 | * creates a new instance 51 | * @param {object} config 52 | * @param {object} logger 53 | */ 54 | constructor(config: AnalyticsConfig | null = null, logger: KafkaLogger) { 55 | this.config = config; 56 | this.logger = logger; 57 | } 58 | 59 | /** 60 | * @private 61 | * returns occured errors in interval 62 | * @param {object} stats - getStats() client result 63 | * @returns {number} 64 | */ 65 | _errorsInInterval(stats): number { 66 | const diff = (stats.totalErrors || 0) - this._lastErrors; 67 | this._lastErrors = stats.totalErrors || 0; 68 | return diff; 69 | } 70 | 71 | /** 72 | * @static 73 | * @param {Array} offsets 74 | */ 75 | static statusArrayToKeyedObject(offsets: LagStatus[] = []) { 76 | 77 | const obj = {}; 78 | 79 | offsets.forEach(offset => { 80 | 81 | if (!obj[offset.topic]) { 82 | obj[offset.topic] = {}; 83 | } 84 | 85 | obj[offset.topic][offset.partition] = { 86 | lowDistance: offset.lowDistance, 87 | highDistance: offset.highDistance, 88 | detail: offset.detail 89 | }; 90 | }); 91 | 92 | return obj; 93 | } 94 | 95 | abstract run(); 96 | } 97 | 98 | /** 99 | * outsourced analytics for nconsumers 100 | */ 101 | export class ConsumerAnalytics extends Analytics { 102 | 103 | _lastRes: ConsumerRunResult | null = null; 104 | 105 | client: JSConsumer; 106 | 107 | /** 108 | * creates a new instance 109 | * @param {NConsumer|NProducer} client 110 | * @param {object} config 111 | * @param {object} logger 112 | */ 113 | constructor(client: JSConsumer, config: AnalyticsConfig | null = null, logger: KafkaLogger) { 114 | super(config, logger); 115 | this.client = client; // consumer or producer. 116 | } 117 | 118 | /** 119 | * resolves a comparison between lag states 120 | * @private 121 | * @returns {Promise.} 122 | */ 123 | async _checkLagChanges(): Promise { 124 | 125 | const last = this.client.getLastLagStatus(); 126 | await this.client.getLagStatus(); //await potential refresh 127 | const newest = this.client.getLagCache(); 128 | 129 | if (!last || !newest) { 130 | return { 131 | error: "No lag status fetched yet." 132 | }; 133 | } 134 | 135 | if (!last) { 136 | return { 137 | error: "Only newest status fetched yet." 138 | }; 139 | } 140 | 141 | if (!newest) { 142 | return { 143 | error: "Only last status fetched yet." 144 | }; 145 | } 146 | 147 | const newLags = {}; 148 | const changedLags = {}; 149 | const resolvedLags = {}; 150 | const stallLags = {}; 151 | 152 | const lastKeyed = Analytics.statusArrayToKeyedObject(last.status); 153 | 154 | newest.status.forEach(offset => { 155 | 156 | //didnt exist in last check 157 | if (!lastKeyed[offset.topic] || !lastKeyed[offset.topic][offset.partition]) { 158 | //distance is interesting 159 | if (offset.highDistance >= INTERESTING_DISTANCE) { 160 | if (!newLags[offset.topic]) { 161 | newLags[offset.topic] = {}; 162 | } 163 | 164 | //store new lag for this partition 165 | newLags[offset.topic][offset.partition] = offset.highDistance; 166 | } 167 | return; 168 | } 169 | //did exist in last check 170 | 171 | //distance decreased 172 | if (offset.highDistance < INTERESTING_DISTANCE) { 173 | 174 | if (!resolvedLags[offset.topic]) { 175 | resolvedLags[offset.topic] = {}; 176 | } 177 | 178 | resolvedLags[offset.topic][offset.partition] = offset.highDistance; 179 | return; 180 | } 181 | 182 | //distance equals 183 | if (offset.highDistance === lastKeyed[offset.topic][offset.partition].highDistance) { 184 | 185 | if (!stallLags[offset.topic]) { 186 | stallLags[offset.topic] = {}; 187 | } 188 | 189 | stallLags[offset.topic][offset.partition] = offset.highDistance; 190 | return; 191 | } 192 | 193 | //distance changed (but did not decrease enough) 194 | if (!changedLags[offset.topic]) { 195 | changedLags[offset.topic] = {}; 196 | } 197 | 198 | changedLags[offset.topic][offset.partition] = offset.highDistance; 199 | }); 200 | 201 | return { 202 | timelyDifference: newest.at - last.at, 203 | fetchPerformance: last.took - newest.took, 204 | newLags, 205 | changedLags, 206 | resolvedLags, 207 | stallLags 208 | }; 209 | } 210 | 211 | /** 212 | * gets the largest lag in all assigned offsets 213 | * @private 214 | * @returns {object} 215 | */ 216 | _identifyLargestLag(): { highDistance?: number, error?: string } { 217 | 218 | let lag = { 219 | highDistance: -1 220 | }; 221 | 222 | const newest = this.client.getLagCache(); 223 | 224 | if (!newest) { 225 | return { 226 | error: "Only last status fetched yet." 227 | }; 228 | } 229 | 230 | newest.status.forEach(offset => { 231 | if (offset.highDistance > lag.highDistance) { 232 | lag = offset; 233 | } 234 | }); 235 | 236 | return lag; 237 | } 238 | 239 | /** 240 | * returns consumed amount of messages in interval 241 | * @private 242 | * @param {object} stats - getStats() client result 243 | * @returns {number} 244 | */ 245 | _consumed(stats: ConsumerStats): number { 246 | const diff = (stats.totalIncoming || 0) - this._consumedCount; 247 | this._consumedCount = stats.totalIncoming || 0; 248 | return diff; 249 | } 250 | 251 | /** 252 | * @async 253 | * called in interval 254 | * @returns {object} 255 | */ 256 | async run(): Promise { 257 | 258 | const res = { 259 | generatedAt: Date.now(), 260 | interval: (this.config) ? this.config.analyticsInterval : defaultAnalyticsInterval, 261 | lagChange: {}, 262 | largestLag: {}, 263 | consumed: 0, 264 | errors: 0, 265 | }; 266 | 267 | try { 268 | res.lagChange = await this._checkLagChanges(); 269 | } catch (error) { 270 | this.logger.error(`Failed to calculate lag changes ${error.message}.`); 271 | // res.lagChange = null; 272 | } 273 | 274 | try { 275 | res.largestLag = this._identifyLargestLag(); 276 | } catch (error) { 277 | this.logger.error(`Failed to calculate largest lag ${error.message}.`); 278 | // res.largestLag = null; 279 | } 280 | 281 | const stats = this.client.getStats(); 282 | 283 | try { 284 | res.consumed = this._consumed(stats); 285 | } catch (error) { 286 | this.logger.error(`Failed to get consumed count ${error.message}.`); 287 | // res.consumed = null; 288 | } 289 | 290 | try { 291 | res.errors = this._errorsInInterval(stats); 292 | } catch (error) { 293 | this.logger.error(`Failed to get error count ${error.message}.`); 294 | // res.errors = null; 295 | } 296 | 297 | this.logger.debug(JSON.stringify(res)); 298 | this._lastRes = res as ConsumerRunResult; 299 | return res as ConsumerRunResult; 300 | } 301 | 302 | /** 303 | * returns the last result of run() 304 | * @returns {object} 305 | */ 306 | getLastResult(): ConsumerRunResult | null { 307 | return this._lastRes; 308 | } 309 | } 310 | 311 | /** 312 | * outsourced analytics for nproducers 313 | */ 314 | export class ProducerAnalytics extends Analytics { 315 | 316 | _lastRes: ProducerRunResult | null = null; 317 | 318 | client: JSProducer; 319 | 320 | /** 321 | * creates a new instance 322 | * @param {object} config 323 | * @param {object} logger 324 | */ 325 | constructor(client: JSProducer, config: AnalyticsConfig | null = null, logger: KafkaLogger) { 326 | super(config, logger); 327 | this.client = client; // consumer or producer. 328 | } 329 | 330 | /** 331 | * returns produced amount of messages in interval 332 | * @private 333 | * @param {object} stats - getStats() client result 334 | * @returns {number} 335 | */ 336 | _produced(stats: ProducerStats): number { 337 | const diff = (stats.totalPublished || 0) - this._producedCount; 338 | this._producedCount = stats.totalPublished || 0; 339 | return diff; 340 | } 341 | 342 | /** 343 | * called in interval 344 | * @returns {object} 345 | */ 346 | async run(): Promise { 347 | 348 | const res: ProducerRunResult = { 349 | generatedAt: Date.now(), 350 | interval: (this.config) ? this.config.analyticsInterval : defaultAnalyticsInterval, 351 | produced: 0, 352 | errors: null, 353 | }; 354 | 355 | const stats = this.client.getStats(); 356 | 357 | try { 358 | res.produced = this._produced(stats); 359 | } catch (error) { 360 | this.logger.error(`Failed to get produced count ${error.message}.`); 361 | res.produced = null; 362 | } 363 | 364 | try { 365 | res.errors = this._errorsInInterval(stats); 366 | } catch (error) { 367 | this.logger.error(`Failed to get error count ${error.message}.`); 368 | res.errors = null; 369 | } 370 | 371 | this.logger.debug(JSON.stringify(res)); 372 | this._lastRes = res; 373 | return res; 374 | } 375 | 376 | /** 377 | * returns the last result of run() 378 | * @returns {object} 379 | */ 380 | getLastResult(): ProducerRunResult | null { 381 | return this._lastRes; 382 | } 383 | } 384 | -------------------------------------------------------------------------------- /src/lib/shared/CompressionTypes.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | const TYPES = [0, 1, 2]; 4 | 5 | class CompressionTypes { 6 | 7 | public NONE: number; 8 | public GZIP: number; 9 | public SNAPPY: number; 10 | 11 | constructor() { 12 | this.NONE = 0; 13 | this.GZIP = 1; 14 | this.SNAPPY = 2; 15 | } 16 | 17 | isValid(type) { 18 | 19 | if (typeof type !== "number") { 20 | return false; 21 | } 22 | 23 | return TYPES.indexOf(type) !== -1; 24 | } 25 | } 26 | 27 | export default new CompressionTypes(); 28 | -------------------------------------------------------------------------------- /src/lib/shared/Health.ts: -------------------------------------------------------------------------------- 1 | import merge from "lodash.merge"; 2 | import { KafkaHealthConfig } from "../interfaces"; 3 | import { JSProducer, JSConsumer } from "../kafkajs"; 4 | 5 | const defaultConfig = { 6 | thresholds: { 7 | consumer: { 8 | errors: 5, 9 | lag: 1000, 10 | stallLag: 10, 11 | minMessages: 1 12 | }, 13 | producer: { 14 | errors: 4, 15 | minMessages: 1 16 | } 17 | } 18 | }; 19 | 20 | export const STATES = { 21 | DIS_ANALYTICS: -4, 22 | NO_ANALYTICS: -3, 23 | UNKNOWN: -2, 24 | UNCONNECTED: -1, 25 | HEALTHY: 0, 26 | RISK: 1, 27 | WARNING: 2, 28 | CRITICAL: 3 29 | }; 30 | 31 | const MESSAGES = { 32 | DIS_ANALYTICS: "Analytics are disabled, cannot measure required parameters. Please enable.", 33 | NO_ANALYTICS: "Analytics have not yet run, checks will be available after first run.", 34 | UNKNOWN: "State is unknown.", 35 | UNCONNECTED: "The client is not connected.", 36 | HEALTHY: "No problems detected, client is healthy.", 37 | ERRORS: "There was an error." 38 | }; 39 | 40 | /** 41 | * little pojso class around the check object 42 | */ 43 | export class Check { 44 | 45 | status: number; 46 | messages: string[]; 47 | 48 | /** 49 | * creates a new instance 50 | * @param {number} status - status code 51 | * @param {Array|string} message - message/s, pass an empty array to initialise clean 52 | */ 53 | constructor(status = STATES.HEALTHY, message: string | string[] = MESSAGES.HEALTHY) { 54 | this.status = status; 55 | this.messages = Array.isArray(message) ? message : [message]; 56 | } 57 | 58 | /** 59 | * 60 | * @param {number} status - new status code 61 | * @returns {boolean} 62 | */ 63 | changeStatus(status: number = STATES.UNKNOWN): boolean { 64 | 65 | if (status > this.status) { 66 | this.status = status; 67 | return true; 68 | } 69 | 70 | return false; 71 | } 72 | 73 | /** 74 | * adds a message to the check 75 | * @param {string} message - string message to attach 76 | * @returns {number} 77 | */ 78 | add(message: string = MESSAGES.UNKNOWN): number { 79 | return this.messages.push(message); 80 | } 81 | } 82 | 83 | /** 84 | * health parent class 85 | */ 86 | abstract class Health { 87 | 88 | config: KafkaHealthConfig; 89 | abstract client; 90 | STATES = STATES; 91 | MESSAGES = MESSAGES 92 | 93 | /** 94 | * creates a new instance 95 | * @param {config} config 96 | */ 97 | constructor(config?: KafkaHealthConfig) { 98 | this.config = merge({}, defaultConfig, config); 99 | } 100 | 101 | /** 102 | * returns a new check instance 103 | * @param {number} status 104 | * @param {Array|string} message 105 | */ 106 | createCheck(status: number, message: string | string[]): Check { 107 | return new Check(status, message); 108 | } 109 | } 110 | 111 | /** 112 | * health check adapted for NConsumers 113 | * @extends Health 114 | */ 115 | export class ConsumerHealth extends Health { 116 | 117 | client: JSConsumer; 118 | 119 | /** 120 | * creates a new instance 121 | * @param {NConsumer} nconsumer 122 | * @param {config} config optional 123 | */ 124 | constructor(nconsumer: JSConsumer, config?: KafkaHealthConfig) { 125 | super(config); 126 | this.client = nconsumer; 127 | } 128 | 129 | /** 130 | * runs the health check 131 | * @async 132 | * @returns {Promise.} 133 | */ 134 | async check(): Promise { 135 | 136 | /* ### preparation ### */ 137 | 138 | if (!this.client.consumer) { 139 | return super.createCheck(STATES.UNCONNECTED, MESSAGES.UNCONNECTED); 140 | } 141 | 142 | if (!this.client._analytics) { 143 | return super.createCheck(STATES.DIS_ANALYTICS, MESSAGES.DIS_ANALYTICS); 144 | } 145 | 146 | const analytics = this.client._analytics.getLastResult(); 147 | 148 | if (!analytics || Object.keys(analytics).length === 0) { 149 | return super.createCheck(STATES.NO_ANALYTICS, MESSAGES.NO_ANALYTICS); 150 | } 151 | 152 | /* ### eof preparation ### */ 153 | 154 | const check = new Check(STATES.HEALTHY, []); 155 | 156 | if (analytics.errors !== null && analytics.errors >= this.config.thresholds.consumer.errors) { 157 | check.changeStatus(STATES.CRITICAL); 158 | check.add(MESSAGES.ERRORS); 159 | } 160 | 161 | if (analytics.largestLag !== null && analytics.largestLag.highDistance && 162 | analytics.largestLag.highDistance > this.config.thresholds.consumer.lag) { 163 | check.changeStatus(STATES.WARNING); 164 | check.add(`Lag exceeds threshold with a lag of ${analytics.largestLag.highDistance}` + 165 | ` on ${analytics.largestLag.topic}:${analytics.largestLag.partition}.`); 166 | } 167 | 168 | if (analytics.lagChange !== null && typeof analytics.lagChange.stallLags === "object" && 169 | Object.keys(analytics.lagChange.stallLags).length > this.config.thresholds.consumer.stallLag) { 170 | check.changeStatus(STATES.RISK); 171 | check.add(`Amount of stall lags exceeds threshold with ${Object.keys(analytics.lagChange.stallLags).length} unchanged lagging offsets.`); 172 | } 173 | 174 | if (analytics.consumed !== null && analytics.consumed < this.config.thresholds.consumer.minMessages) { 175 | check.changeStatus(STATES.RISK); 176 | check.add(`Amount of consumed messages is low ${analytics.consumed}.`); 177 | } 178 | 179 | if (check.status === STATES.HEALTHY) { 180 | check.add(MESSAGES.HEALTHY); 181 | check.add(`Consumed ${analytics.consumed} message/s in the last interval, with ${analytics.errors} errors.`); 182 | } 183 | 184 | return check; 185 | } 186 | } 187 | 188 | /** 189 | * health check adapted for NProducers 190 | * @extends Health 191 | */ 192 | export class ProducerHealth extends Health { 193 | 194 | client: JSProducer; 195 | 196 | /** 197 | * creates a new instance 198 | * @param {NProducer} nproducer 199 | * @param {config} config 200 | */ 201 | constructor(nproducer: JSProducer, config?: KafkaHealthConfig) { 202 | super(config); 203 | this.client = nproducer; 204 | } 205 | 206 | /** 207 | * runs the health check 208 | * @async 209 | * @returns {Promise.} 210 | */ 211 | async check(): Promise { 212 | 213 | /* ### preparation ### */ 214 | 215 | if (!this.client.producer) { 216 | return super.createCheck(STATES.UNCONNECTED, MESSAGES.UNCONNECTED); 217 | } 218 | 219 | if (!this.client._analytics) { 220 | return super.createCheck(STATES.DIS_ANALYTICS, MESSAGES.DIS_ANALYTICS); 221 | } 222 | 223 | const analytics = this.client._analytics.getLastResult(); 224 | 225 | if (!analytics || Object.keys(analytics).length === 0) { 226 | return super.createCheck(STATES.NO_ANALYTICS, MESSAGES.NO_ANALYTICS); 227 | } 228 | 229 | /* ### eof preparation ### */ 230 | 231 | const check = new Check(STATES.HEALTHY); 232 | 233 | if (analytics.errors !== null && analytics.errors >= this.config.thresholds.producer.errors) { 234 | check.changeStatus(STATES.CRITICAL); 235 | check.add(MESSAGES.ERRORS); 236 | } 237 | 238 | if (analytics.produced !== null && analytics.produced < this.config.thresholds.producer.minMessages) { 239 | check.changeStatus(STATES.RISK); 240 | check.add(`Amount of produced messages is low ${analytics.produced}.`); 241 | } 242 | 243 | if (check.status === STATES.HEALTHY) { 244 | check.add(MESSAGES.HEALTHY); 245 | check.add(`Produced ${analytics.produced} message/s in the last interval, with ${analytics.errors} errors.`); 246 | } 247 | 248 | return check; 249 | } 250 | } 251 | -------------------------------------------------------------------------------- /src/lib/shared/Metadata.ts: -------------------------------------------------------------------------------- 1 | import { ITopicMetadata, PartitionMetadata } from "kafkajs"; 2 | 3 | // Defined from https://docs.confluent.io/current/kafka-rest/api.html#partitions 4 | type PartitionRestFormat = { 5 | partition: number; 6 | leader: number; 7 | replicas: PartitionReplicaRestFormat[] 8 | } 9 | 10 | type PartitionReplicaRestFormat = { 11 | broker: number; 12 | leader: boolean; 13 | in_sync: boolean; 14 | } 15 | 16 | type TopicMetaData = { 17 | topics: ITopicMetadata[] 18 | } 19 | 20 | /** 21 | * wrapper arround node-librdkafka metadata object 22 | */ 23 | export class Metadata { 24 | 25 | raw: TopicMetaData = {topics: []}; 26 | 27 | /** 28 | * creates a new instance 29 | * @param {object} raw - metadata object response of node-librdkafka client 30 | */ 31 | constructor(raw: TopicMetaData) { 32 | this.raw = raw; 33 | } 34 | 35 | /** 36 | * @throws 37 | * returns the count of partitions of the given topic 38 | * @param {string} topicName - name of the kafka topic 39 | * @returns {number} 40 | */ 41 | getPartitionCountOfTopic(topicName: string): number { 42 | 43 | const topic = this.raw.topics.filter(topic => topic.name === topicName).pop(); 44 | 45 | if (!topic) { 46 | throw new Error(topicName + " does not exist in fetched metadata."); 47 | } 48 | 49 | return topic.partitions.length; 50 | } 51 | 52 | /** 53 | * @throws 54 | * returns a partition (id) array of the given topic 55 | * @param {string} topicName - name of the kafka topic 56 | * @returns {Array} 57 | */ 58 | getPartitionsForTopic(topicName: string): number[] { 59 | 60 | const topic = this.raw.topics.filter((topic:ITopicMetadata) => topic.name === topicName).pop(); 61 | 62 | if (!topic) { 63 | throw new Error(topicName + " does not exist in fetched metadata."); 64 | } 65 | 66 | return topic.partitions.map((partition) => partition.partitionId); 67 | } 68 | 69 | /** 70 | * @throws 71 | * returns a list of topic names 72 | */ 73 | asTopicList(): string[] { 74 | return this.raw.topics 75 | .filter((topic:ITopicMetadata) => topic.name !== "__consumer_offsets") 76 | .map((topic: ITopicMetadata) => topic.name); 77 | } 78 | 79 | /** 80 | * @throws 81 | * gets formatted metadata information about give topic 82 | * @param {string} topicName - name of the kafka topic 83 | * @returns {object} 84 | */ 85 | asTopicDescription(topicName: string): Record { 86 | 87 | if (!this.raw.topics || !this.raw.topics.length) { 88 | return {}; 89 | } 90 | 91 | let topic; 92 | for (let i = 0; i < this.raw.topics.length; i++) { 93 | if (this.raw.topics[i].name === topicName) { 94 | topic = this.raw.topics[i]; 95 | break; 96 | } 97 | } 98 | 99 | if (!topic) { 100 | return {}; 101 | } 102 | 103 | return { 104 | name: topic.name, 105 | configs: null, 106 | partitions: Metadata.formatPartitions(topic.partitions) 107 | }; 108 | } 109 | 110 | /** 111 | * @throws 112 | * gets a list of formatted partition info for topic 113 | * @param {string} topicName - name of the kafka topic 114 | * @returns {Array} 115 | */ 116 | asTopicPartitions(topicName: string): PartitionRestFormat[] { 117 | 118 | if (!this.raw.topics || !this.raw.topics.length) { 119 | return []; 120 | } 121 | 122 | let topic: ITopicMetadata | null = null; 123 | for (let i = 0; i < this.raw.topics.length; i++) { 124 | if (this.raw.topics[i].name === topicName) { 125 | topic = this.raw.topics[i]; 126 | break; 127 | } 128 | } 129 | 130 | if (!topic) { 131 | return []; 132 | } 133 | 134 | return Metadata.formatPartitions(topic.partitions); 135 | } 136 | 137 | /** 138 | * @deprecated 139 | * @throws 140 | * gets a broker object (list of broker ids) 141 | * @returns {object} 142 | */ 143 | asBrokers(): Record { 144 | return { 145 | brokers: [] 146 | }; 147 | } 148 | 149 | /** 150 | * @throws 151 | * maps partitions into kafka-rest format 152 | * @param {Array} partitions - array of partitions 153 | * @returns {Array} 154 | */ 155 | static formatPartitions(partitions: PartitionMetadata[]): PartitionRestFormat[] { 156 | return partitions.map((p) => ({ 157 | partition: p.partitionId, 158 | leader: p.leader, 159 | replicas: p.replicas.map((r) => ({ 160 | broker: r, 161 | in_sync: p.isr.indexOf(r) !== -1, 162 | leader: r === p.leader 163 | })), 164 | })); 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /src/lib/shared/index.ts: -------------------------------------------------------------------------------- 1 | import CompressionTypes from "./CompressionTypes"; 2 | 3 | export { CompressionTypes }; 4 | export * from "./Health"; 5 | export * from "./Analytics"; 6 | export * from "./Metadata"; 7 | -------------------------------------------------------------------------------- /test/config.ts: -------------------------------------------------------------------------------- 1 | import { JSKafkaProducerConfig, JSKafkaConsumerConfig } from "../src/lib/interfaces"; 2 | import { CompressionTypes } from "kafkajs"; 3 | 4 | export const jsProducerConfig: JSKafkaProducerConfig = { 5 | noptions: { 6 | "metadata.broker.list": "localhost:9092", 7 | "client.id": "n-test-produce-js", 8 | "compression.codec": CompressionTypes.None, 9 | "socket.keepalive.enable": true, 10 | "queue.buffering.max.ms": 100, 11 | "batch.num.messages": 5, 12 | "log_level": 0, 13 | }, 14 | options: { 15 | murmurHashVersion: "2", 16 | }, 17 | tconf: { 18 | "request.required.acks": 1, 19 | "message.timeout.ms": 1000, 20 | }, 21 | }; 22 | 23 | export const jsConsumerConfig: JSKafkaConsumerConfig = { 24 | noptions: { 25 | "metadata.broker.list": "localhost:9092", 26 | "client.id": "n-test-consumer-js", 27 | "group.id": "n-test-group-js", 28 | "enable.auto.commit": false, 29 | "socket.keepalive.enable": true, 30 | "socket.blocking.max.ms": 5, 31 | }, 32 | options: {}, 33 | tconf: { 34 | "auto.offset.reset": "earliest", 35 | }, 36 | }; 37 | 38 | export const topic = "n-test-topic"; 39 | 40 | export const batchOptions = { 41 | batchSize: 1000, 42 | commitEveryNBatch: 1, 43 | manualBatching: true, 44 | }; 45 | -------------------------------------------------------------------------------- /test/int/Health.test.ts: -------------------------------------------------------------------------------- 1 | import assert from "assert"; 2 | import { ConsumerHealth, ProducerHealth, STATES } from "../../src/index"; 3 | 4 | describe("Health UNIT", () => { 5 | 6 | const getFakeProducerAnalyticsResult = (produced = 0, errors = 0) => { 7 | return { 8 | generatedAt: 1508679543026, 9 | interval: 500, 10 | produced, 11 | errors 12 | }; 13 | }; 14 | 15 | const getFakeConsumerAnalyticsResult = (highDistance = 0, consumed = 0, errors = 0) => { 16 | return { 17 | "generatedAt": 1508679543026, 18 | "interval": 500, 19 | "lagChange": { 20 | "timelyDifference": 1001, 21 | "fetchPerformance": -7, 22 | "newLags": {}, 23 | "changedLags": {}, 24 | "resolvedLags": { 25 | "n-test-topic": { 26 | "0": 0 27 | } 28 | }, 29 | "stallLags": {} 30 | }, 31 | "largestLag": { 32 | "topic": "n-test-topic", 33 | "partition": 0, 34 | "lowDistance": 337, 35 | highDistance, 36 | "detail": { 37 | "lowOffset": 0, 38 | "highOffset": 337, 39 | "comittedOffset": 337 40 | } 41 | }, 42 | consumed, 43 | errors 44 | }; 45 | }; 46 | 47 | const getFakeProducer = (ares = {}) => { 48 | return { 49 | producer: true, 50 | _analytics: { 51 | getLastResult: () => { 52 | return ares; 53 | } 54 | } 55 | }; 56 | }; 57 | 58 | const getFakeConsumer = (ares = {}) => { 59 | return { 60 | consumer: true, 61 | _analytics: { 62 | getLastResult: () => { 63 | return ares; 64 | } 65 | } 66 | }; 67 | }; 68 | 69 | const getPHI = fakeClient => new ProducerHealth(fakeClient); 70 | const getCHI = fakeClient => new ConsumerHealth(fakeClient); 71 | 72 | it("should be healthy", () => { 73 | 74 | const ph = getPHI(getFakeProducer(getFakeProducerAnalyticsResult(100, 0))); 75 | const ch = getCHI(getFakeConsumer(getFakeConsumerAnalyticsResult(0, 100, 0))); 76 | 77 | return Promise.all([ 78 | ph.check(), 79 | ch.check() 80 | ]).then(res => { 81 | assert.equal(res[0].status, 0); 82 | assert.equal(res[1].status, 0); 83 | }); 84 | }); 85 | 86 | it("should be critical", () => { 87 | 88 | const ph = getPHI(getFakeProducer(getFakeProducerAnalyticsResult(0, 100))); 89 | const ch = getCHI(getFakeConsumer(getFakeConsumerAnalyticsResult(0, 0, 100))); 90 | 91 | return Promise.all([ 92 | ph.check(), 93 | ch.check() 94 | ]).then(res => { 95 | assert.equal(res[0].status, STATES.CRITICAL); 96 | assert.equal(res[0].messages.length, 3); 97 | assert.equal(res[1].status, STATES.CRITICAL); 98 | assert.equal(res[1].messages.length, 2); 99 | }); 100 | }); 101 | 102 | it("should be risky", () => { 103 | 104 | const ph = getPHI(getFakeProducer(getFakeProducerAnalyticsResult(0, 2))); 105 | const ch = getCHI(getFakeConsumer(getFakeConsumerAnalyticsResult(0, 0, 2))); 106 | 107 | return Promise.all([ 108 | ph.check(), 109 | ch.check() 110 | ]).then(res => { 111 | assert.equal(res[0].status, STATES.RISK); 112 | assert.equal(res[1].status, STATES.RISK); 113 | }); 114 | }); 115 | 116 | it("should be a warning", () => { 117 | 118 | const ch = getCHI(getFakeConsumer(getFakeConsumerAnalyticsResult(1001, 100, 0))); 119 | 120 | return Promise.all([ 121 | ch.check() 122 | ]).then(res => { 123 | assert.equal(res[0].status, STATES.WARNING); 124 | assert.equal(res[0].messages.length, 1); 125 | }); 126 | }); 127 | 128 | it("should be no analytics", () => { 129 | 130 | const ph = getPHI(getFakeProducer()); 131 | const ch = getCHI(getFakeConsumer()); 132 | 133 | return Promise.all([ 134 | ph.check(), 135 | ch.check() 136 | ]).then(res => { 137 | assert.equal(res[0].status, STATES.NO_ANALYTICS); 138 | assert.equal(res[1].status, STATES.NO_ANALYTICS); 139 | }); 140 | }); 141 | 142 | it("should be disabled analytics", () => { 143 | 144 | const ph = getPHI({ 145 | producer: true 146 | }); 147 | const ch = getCHI({ 148 | consumer: true 149 | }); 150 | 151 | return Promise.all([ 152 | ph.check(), 153 | ch.check() 154 | ]).then(res => { 155 | assert.equal(res[0].status, STATES.DIS_ANALYTICS); 156 | assert.equal(res[1].status, STATES.DIS_ANALYTICS); 157 | }); 158 | }); 159 | 160 | it("should be not connected", () => { 161 | 162 | const ph = getPHI({ 163 | producer: false 164 | }); 165 | const ch = getCHI({ 166 | consumer: false 167 | }); 168 | 169 | return Promise.all([ 170 | ph.check(), 171 | ch.check() 172 | ]).then(res => { 173 | assert.equal(res[0].status, STATES.UNCONNECTED); 174 | assert.equal(res[1].status, STATES.UNCONNECTED); 175 | }); 176 | }); 177 | 178 | }); 179 | -------------------------------------------------------------------------------- /test/int/JSSinek.test.ts: -------------------------------------------------------------------------------- 1 | import assert from "assert"; 2 | import { JSConsumer, JSProducer } from '../../src' 3 | import { jsProducerConfig, jsConsumerConfig, topic } from '../config'; 4 | 5 | describe("Javascript Client INT", () => { 6 | 7 | let consumer: JSConsumer; 8 | let producer: JSProducer; 9 | const consumedMessages: any[] = []; 10 | let firstMessageReceived = false; 11 | let messagesChecker; 12 | 13 | before(done => { 14 | 15 | try { 16 | 17 | producer = new JSProducer(jsProducerConfig); 18 | consumer = new JSConsumer(topic, jsConsumerConfig); 19 | 20 | producer.on("error", error => console.error(error)); 21 | consumer.on("error", error => console.error(error)); 22 | 23 | Promise.all([ 24 | producer.connect(), 25 | consumer.connect(false) 26 | ]).then(() => { 27 | consumer.consume(async (messages, callback) => { 28 | messages.forEach((message) => { 29 | if(!firstMessageReceived){ 30 | firstMessageReceived = true; 31 | } 32 | consumedMessages.push(message); 33 | }); 34 | callback(); 35 | }, true, false, { 36 | batchSize: 1000, 37 | commitEveryNBatch: 1, 38 | manualBatching: true, 39 | }); 40 | setTimeout(done, 1000); 41 | }); 42 | } catch (e) { 43 | console.log(e); 44 | } 45 | 46 | }); 47 | 48 | after(done => { 49 | if (producer && consumer) { 50 | 51 | try { 52 | producer.close(); 53 | consumer.close(); //commit 54 | } catch (error) { 55 | console.error(error); 56 | } 57 | 58 | setTimeout(done, 500); 59 | } 60 | }); 61 | 62 | it("should be able to produce messages", async () => { 63 | 64 | // Change to await instead of promise.all in order to ensure the message order 65 | // for tests further down. 66 | try { 67 | await producer.send(topic, "a message"); 68 | await producer.bufferFormatPublish(topic, "1", { content: "a message 1" }, 1, null, null, 0); 69 | await producer.bufferFormatUpdate(topic, "2", { content: "a message 2" }, 1, null, null, 0); 70 | await producer.bufferFormatUnpublish(topic, "3", { content: "a message 3" }, 1, null, null, 0); 71 | await producer.send(topic, new Buffer("a message buffer")); 72 | 73 | return true; 74 | } catch(e) { 75 | console.error(e) 76 | return false; 77 | } 78 | }); 79 | 80 | it("should be able to wait", done => { 81 | messagesChecker = setInterval(() => { 82 | if (consumedMessages.length >= 5) { 83 | clearInterval(messagesChecker); 84 | done(); 85 | } 86 | }, 500); 87 | }); 88 | 89 | it("should have received first message", done => { 90 | assert.ok(firstMessageReceived); 91 | done(); 92 | }); 93 | 94 | it("should be able to consume messages", done => { 95 | assert.ok(consumedMessages.length); 96 | assert.ok(!Buffer.isBuffer(consumedMessages[0].value)); 97 | assert.equal(consumedMessages[0].value, "a message"); 98 | assert.equal(JSON.parse(consumedMessages[1].value).payload.content, "a message 1"); 99 | assert.equal(JSON.parse(consumedMessages[2].value).payload.content, "a message 2"); 100 | assert.equal(JSON.parse(consumedMessages[3].value).payload.content, "a message 3"); 101 | assert.equal(consumedMessages[4].value, "a message buffer"); 102 | done(); 103 | }); 104 | }); 105 | -------------------------------------------------------------------------------- /test/mocha.opts: -------------------------------------------------------------------------------- 1 | --exit 2 | -------------------------------------------------------------------------------- /tsconfig.dist.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig", 3 | "compilerOptions": { 4 | "rootDir": "src/", 5 | "noEmit": false, 6 | "outDir": "dist", 7 | "declaration": true, 8 | "declarationMap": true, 9 | "sourceMap": true 10 | }, 11 | "include": [ 12 | "src" 13 | ] 14 | } -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "module": "commonjs", 4 | "esModuleInterop": true, 5 | "target": "es2016", 6 | "sourceMap": true, 7 | "noEmit": true, 8 | 9 | "noImplicitReturns": true, 10 | "noFallthroughCasesInSwitch": true, 11 | "noUnusedParameters": true, 12 | "noUnusedLocals": true, 13 | 14 | "strict": true, 15 | 16 | "noImplicitAny": false, 17 | "strictBindCallApply": true, 18 | "strictNullChecks": true 19 | }, 20 | "include": ["./src/**/*"] 21 | } --------------------------------------------------------------------------------