├── .editorconfig
├── .github
├── ISSUE_TEMPLATE
│ └── bug_report.md
├── PULL_REQUEST_TEMPLATE
│ └── pull_request_template.md
├── stale.yml
└── workflows
│ ├── npm-publish.yml
│ └── test.yml
├── .gitignore
├── .gitmodules
├── .jshintignore
├── .jshintrc
├── .npmignore
├── CONTRIBUTING.md
├── LICENSE.txt
├── Makefile
├── README.md
├── bench
├── consumer-raw-rdkafka.js
├── consumer-subscribe.js
├── kafka-consumer-stream.js
├── producer-raw-rdkafka.js
├── producer-rdkafka.js
└── seed.sh
├── binding.gyp
├── ci
├── checks
│ ├── librdkafka-correct-version.js
│ └── librdkafka-exists.js
├── librdkafka-defs-generator.js
├── prepublish.js
└── update-version.js
├── config.d.ts
├── configure
├── cpplint.py
├── deploy.enc
├── deps
├── librdkafka.gyp
└── windows-install.py
├── docker-compose.yml
├── e2e
├── admin.spec.js
├── both.spec.js
├── consumer.spec.js
├── groups.spec.js
├── listener.js
├── producer-transaction.spec.js
└── producer.spec.js
├── errors.d.ts
├── examples
├── consumer-flow.md
├── consumer.md
├── docker-alpine.md
├── high-level-producer.md
├── metadata.md
├── oauthbearer-default-flow.md
├── producer-cluster.md
└── producer.md
├── index.d.ts
├── lib
├── admin.js
├── client.js
├── error.js
├── index.js
├── kafka-consumer-stream.js
├── kafka-consumer.js
├── producer-stream.js
├── producer.js
├── producer
│ └── high-level-producer.js
├── tools
│ └── ref-counter.js
├── topic-partition.js
├── topic.js
└── util.js
├── librdkafka.js
├── make_docs.sh
├── package-lock.json
├── package.json
├── run_docker.sh
├── src
├── admin.cc
├── admin.h
├── binding.cc
├── binding.h
├── callbacks.cc
├── callbacks.h
├── common.cc
├── common.h
├── config.cc
├── config.h
├── connection.cc
├── connection.h
├── errors.cc
├── errors.h
├── kafka-consumer.cc
├── kafka-consumer.h
├── producer.cc
├── producer.h
├── topic.cc
├── topic.h
├── workers.cc
└── workers.h
├── test
├── binding.spec.js
├── consumer.spec.js
├── error.spec.js
├── index.spec.js
├── kafka-consumer-stream.spec.js
├── kafka-consumer.spec.js
├── mocha.opts
├── mock.js
├── producer-stream.spec.js
├── producer.spec.js
├── producer
│ └── high-level-producer.spec.js
├── tools
│ └── ref-counter.spec.js
├── topic-partition.spec.js
└── util.spec.js
├── util
├── configure.js
├── get-env.js
├── test-compile.js
└── test-producer-delivery.js
└── win_install.ps1
/.editorconfig:
--------------------------------------------------------------------------------
1 | [*]
2 | indent_style = space
3 | indent_size = 2
4 | trim_trailing_whitespace = true
5 | insert_final_newline = true
6 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 |
5 | ---
6 |
7 | **Environment Information**
8 | - OS [e.g. Mac, Arch, Windows 10]:
9 | - Node Version [e.g. 8.2.1]:
10 | - NPM Version [e.g. 5.4.2]:
11 | - C++ Toolchain [e.g. Visual Studio, llvm, g++]:
12 | - node-rdkafka version [e.g. 2.3.3]:
13 |
14 | **Steps to Reproduce**
15 |
16 | **node-rdkafka Configuration Settings**
17 |
18 |
19 | **Additional context**
20 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Pull Request Template
3 | ---
4 |
5 | Please prefix all TypeScript pull-requests with `[Typescript]`
6 |
--------------------------------------------------------------------------------
/.github/stale.yml:
--------------------------------------------------------------------------------
1 | daysUntilStale: 90
2 | # Number of days of inactivity before a stale issue is closed
3 | daysUntilClose: 7
4 | # Issues with these labels will never be considered stale
5 | exemptLabels:
6 | - help wanted
7 | - bug
8 | - enhancement
9 | # Label to use when marking an issue as stale
10 | staleLabel: stale
11 | # Comment to post when marking an issue as stale. Set to `false` to disable
12 | markComment: >
13 | This issue has been automatically marked as stale because it has not had
14 | recent activity. It will be closed if no further activity occurs.
15 | # Comment to post when closing a stale issue. Set to `false` to disable
16 | closeComment: false
17 |
--------------------------------------------------------------------------------
/.github/workflows/npm-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will run tests using node and then publish a package to GitHub Packages when a release is created
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/publishing-nodejs-packages
3 |
4 | name: Publish node-rdkafka
5 |
6 | on:
7 | release:
8 | types: [created]
9 |
10 | jobs:
11 | publish-npm:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v3
15 | with:
16 | submodules: recursive
17 | - uses: actions/setup-node@v3
18 | with:
19 | node-version: 20
20 | registry-url: https://registry.npmjs.org/
21 | cache: "npm"
22 | - run: npm ci
23 | - run: npm publish
24 | env:
25 | NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}}
26 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: Build & Test
2 |
3 | on:
4 | push:
5 | branches: ["master"]
6 | pull_request:
7 | branches: ["master"]
8 |
9 | jobs:
10 | build:
11 | strategy:
12 | matrix:
13 | node: [16, 18, 20, 21, 22, 23]
14 | os: [ubuntu-22.04]
15 | include:
16 | # single mac test due to minute multipliers
17 | # https://docs.github.com/en/billing/managing-billing-for-github-actions/about-billing-for-github-actions
18 | - node: 22
19 | os: macos-14
20 | - node: 22
21 | os: windows-2022
22 | runs-on: ${{ matrix.os }}
23 | steps:
24 | - uses: actions/checkout@v3
25 | with:
26 | submodules: recursive
27 | - name: Install Node ${{ matrix.node }} in ${{ runner.os }}
28 | uses: actions/setup-node@v3
29 | with:
30 | node-version: ${{ matrix.node }}
31 | cache: "npm"
32 | - name: Update node-gyp
33 | run: npm install --global node-gyp@latest
34 | - name: Install Windows packages
35 | if: runner.os == 'Windows'
36 | run: ./win_install.ps1
37 | - name: Build
38 | run: npm ci
39 | # skipping on windows for now due to Make / mocha exit code issues
40 | - name: Test
41 | if: runner.os != 'Windows'
42 | run: npm test
43 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | build/
2 | node_modules/
3 | deps/librdkafka
4 | npm-debug.log
5 |
6 | docs
7 |
8 | deps/*
9 | !deps/*.gyp
10 | !deps/windows-install.*
11 |
12 | .DS_Store
13 |
14 | .vscode
15 | .idea
16 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "deps/librdkafka"]
2 | path = deps/librdkafka
3 | url = https://github.com/edenhill/librdkafka.git
4 |
--------------------------------------------------------------------------------
/.jshintignore:
--------------------------------------------------------------------------------
1 | README.md
2 |
--------------------------------------------------------------------------------
/.jshintrc:
--------------------------------------------------------------------------------
1 | {
2 | "node": true,
3 | "mocha": true,
4 | "browser": false,
5 | "boss": true,
6 | "curly": true,
7 | "debug": false,
8 | "devel": false,
9 | "eqeqeq": true,
10 | "evil": true,
11 | "forin": false,
12 | "latedef": false,
13 | "noarg": true,
14 | "nonew": true,
15 | "nomen": false,
16 | "onevar": false,
17 | "plusplus": false,
18 | "regexp": false,
19 | "undef": true,
20 | "strict": false,
21 | "white": false,
22 | "eqnull": true
23 | }
24 |
--------------------------------------------------------------------------------
/.npmignore:
--------------------------------------------------------------------------------
1 | deps/*
2 | !deps/librdkafka.gyp
3 | !deps/librdkafka
4 | !deps/windows-install.*
5 | .gitmodules
6 | Dockerfile
7 | deps/librdkafka/config.h
8 | build
9 | .github
10 | .vscode
11 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to `node-rdkafka`
2 |
3 | :+1::tada: First off, thanks for taking the time to contribute! :tada::+1:
4 |
5 | The following is a set of guidelines for contributing to `node-rdkafka`
6 | which is hosted in the [Blizzard Organization](https://github.com/blizzard)
7 | on GitHub. This document lists rules, guidelines, and help getting started,
8 | so if you feel something is missing feel free to send a pull request.
9 |
10 | #### Table Of Contents
11 |
12 | [What should I know before I get started?](#what-should-i-know-before-i-get-started)
13 | * [Contributor Agreement](#contributor-agreement)
14 |
15 | [How Can I Contribute?](#how-can-i-contribute)
16 | * [Setting up the repository](#setting-up-the-repository)
17 | * [Reporting Bugs](#reporting-bugs)
18 | * [Suggesting Enhancements](#suggesting-enhancements)
19 | * [Pull Requests](#pull-requests)
20 |
21 | [Styleguides](#styleguides)
22 | * [Git Commit Messages](#git-commit-messages)
23 | * [JavaScript Styleguide](#javascript-styleguide)
24 | * [C++ Styleguide](#c++-styleguide)
25 | * [Specs Styleguide](#specs-styleguide)
26 | * [Documentation Styleguide](#documentation-styleguide)
27 |
28 | [Debugging](#debugging)
29 | * [Debugging C++](#debugging-c)
30 |
31 | [Updating librdkafka version](#updating-librdkafka-version)
32 |
33 | ## What should I know before I get started?
34 |
35 | ### Contributor Agreement
36 |
37 | Not currently required.
38 |
39 | ## How can I contribute?
40 |
41 | ### Setting up the repository
42 |
43 | To set up the library locally, do the following:
44 |
45 | 1) Clone this repository.
46 | 2) Install librdkafka with `git submodule update --init --recursive`
47 | 3) Install the dependencies `npm install`
48 |
49 | ### Reporting Bugs
50 |
51 | Please use __Github Issues__ to report bugs. When filling out an issue report,
52 | make sure to copy any related code and stack traces so we can properly debug.
53 | We need to be able to reproduce a failing test to be able to fix your issue
54 | most of the time, so a custom written failing test is very helpful.
55 |
56 | Please also note the Kafka broker version that you are using and how many
57 | replicas, partitions, and brokers you are connecting to, because some issues
58 | might be related to Kafka. A list of `librdkafka` configuration key-value pairs
59 | also helps.
60 |
61 | ### Suggesting Enhancements
62 |
63 | Please use __Github Issues__ to suggest enhancements. We are happy to consider
64 | any extra functionality or features to the library, as long as they add real
65 | and related value to users. Describing your use case and why such an addition
66 | helps the user base can help guide the decision to implement it into the
67 | library's core.
68 |
69 | ### Pull Requests
70 |
71 | * Include new test cases (either end-to-end or unit tests) with your change.
72 | * Follow our style guides.
73 | * Make sure all tests are still passing and the `linter` does not report any issues.
74 | * End files with a new line.
75 | * Document the new code in the comments (if it is JavaScript) so the
76 | documentation generator can update the reference documentation.
77 | * Avoid platform-dependent code.
78 |
**Note:** If making modifications to the underlying C++, please use built-in
79 | precompiler directives to detect such platform specificities. Use `Nan`
80 | whenever possible to abstract node/v8 version incompatibility.
81 | * Make sure your branch is up to date and rebased.
82 | * Squash extraneous commits unless their history truly adds value to the library.
83 |
84 | ## Styleguides
85 |
86 | ### General style guidelines
87 |
88 | Download the [EditorConfig](http://editorconfig.org) plugin for your preferred
89 | text editor to automate the application of the following guidelines:
90 |
91 | * Use 2-space indent (no tabs).
92 | * Do not leave trailing whitespace on lines.
93 | * Files should end with a final newline.
94 |
95 | Also, adhere to the following not enforced by EditorConfig:
96 |
97 | * Limit lines to 80 characters in length. A few extra (<= 5) is fine if it helps
98 | readability, use good judgement.
99 | * Use `lf` line endings. (git's `core.autocrlf` setting can help)
100 |
101 | ### Git Commit Messages
102 |
103 | Commit messages should adhere to the guidelines in tpope's
104 | [A Note About Git Commit Messages](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html)
105 |
106 | In short:
107 |
108 | * Use the imperative mood. ("Fix bug", not "Fixed bug" or "Fixes bug")
109 | * Limit the first line to 50 characters or less, followed by a blank line
110 | and detail paragraphs (limit detail lines to about 72 characters).
111 | * Reference issue numbers or pull requests whenever possible.
112 |
113 | ### JavaScript Styleguide
114 |
115 | * Place `module.exports` at or near the top of the file.
116 | * Defined functions are hoisted, so it is appropriate to define the
117 | function after you export it.
118 | * When exporting an object, define it first, then export it, and then add
119 | methods or properties.
120 | * Do not use ES2015 specific features (for example, do not use `let`, `const`,
121 | or `class`).
122 | * All callbacks should follow the standard Node.js callback signature.
123 | * Your JavaScript should properly pass the linter (`make jslint`).
124 |
125 | ### C++ Styleguide
126 |
127 | * Class member variables should be prefixed with `m_`.
128 | * Use a comment when pointer ownership has changed hands.
129 | * Your C++ should properly pass the `cpplint.py` in the `make lint` test.
130 |
131 | ### Specs Styleguide
132 |
133 | * Write all JavaScript tests by using the `mocha` testing framework.
134 | * All `mocha` tests should use exports syntax.
135 | * All `mocha` test files should be suffixed with `.spec.js` instead of `.js`.
136 | * Unit tests should mirror the JavaScript files they test (for example,
137 | `lib/client.js` is tested in `test/client.spec.js`).
138 | * Unit tests should have no outside service dependencies. Any time a dependency,
139 | like Kafka, exists, you should create an end-to-end test.
140 | * You may mock a connection in a unit test if it is reliably similar to its real
141 | variant.
142 |
143 | ### Documentation Styleguide
144 |
145 | * Write all JavaScript documentation in jsdoc-compatible inline comments.
146 | * Each docblock should have references to return types and parameters. If an
147 | object is a parameter, you should also document any required subproperties.
148 | * Use `@see` to reference similar pieces of code.
149 | * Use comments to document your code when its intent may be difficult to understand.
150 | * All documentation outside of the code should be in Github-compatible markdown.
151 | * Make good use of font variations like __bold__ and *italics*.
152 | * Use headers and tables of contents when they make sense.
153 |
154 | ## Editor
155 |
156 | I began using Visual Studio code to develop on `node-rdkafka`. If you use it you can configure the C++ plugin to resolve the paths needed to inform your intellisense. This is the config file I am using on a mac to resolve the required paths:
157 |
158 | `c_cpp_properties.json`
159 | ```
160 | {
161 | "configurations": [
162 | {
163 | "name": "Mac",
164 | "includePath": [
165 | "${workspaceFolder}/**",
166 | "${workspaceFolder}",
167 | "${workspaceFolder}/src",
168 | "${workspaceFolder}/node_modules/nan",
169 | "${workspaceFolder}/deps/librdkafka/src",
170 | "${workspaceFolder}/deps/librdkafka/src-cpp",
171 | "/usr/local/include/node",
172 | "/usr/local/include/node/uv"
173 | ],
174 | "defines": [],
175 | "macFrameworkPath": [
176 | "/Library/Developer/CommandLineTools/SDKs/MacOSX10.14.sdk/System/Library/Frameworks"
177 | ],
178 | "compilerPath": "/usr/bin/clang",
179 | "cStandard": "c11",
180 | "cppStandard": "c++17",
181 | "intelliSenseMode": "clang-x64"
182 | }
183 | ],
184 | "version": 4
185 | }
186 | ```
187 |
188 | ## Debugging
189 |
190 | ### Debugging C++
191 |
192 | Use `gdb` for debugging (as shown in the following example).
193 |
194 | ```
195 | node-gyp rebuild --debug
196 |
197 | gdb node
198 | (gdb) set args "path/to/file.js"
199 | (gdb) run
200 | [output here]
201 | ```
202 |
203 | You can add breakpoints and so on after that.
204 |
205 | ## Updating librdkafka version
206 |
207 | The librdkafka should be periodically updated to the latest release in https://github.com/edenhill/librdkafka/releases
208 |
209 | Steps to update:
210 | 1. Update the `librdkafka` property in [`package.json`](https://github.com/Blizzard/node-rdkafka/blob/master/package.json) to the desired version.
211 |
212 | 1. Update the librdkafka git submodule to that versions release commit (example below)
213 |
214 | ```bash
215 | cd deps/librdkafka
216 | git checkout 063a9ae7a65cebdf1cc128da9815c05f91a2a996 # for version 1.8.2
217 | ```
218 |
219 | If you get an error during that checkout command, double check that the submodule was initialized / cloned! You may need to run `git submodule update --init --recursive`
220 |
221 | 1. Update [`config.d.ts`](https://github.com/Blizzard/node-rdkafka/blob/master/config.d.ts) and [`errors.d.ts`](https://github.com/Blizzard/node-rdkafka/blob/master/errors.d.ts) TypeScript definitions by running:
222 | ```bash
223 | node ci/librdkafka-defs-generator.js
224 | ```
225 | Note: This is ran automatically during CI flows but it's good to run it during the version upgrade pull request.
226 |
227 | 1. Run `npm install` to build with the new version and fix any build errors that occur.
228 |
229 | 1. Run unit tests: `npm run test`
230 |
231 | 1. Update the version numbers referenced in the [`README.md`](https://github.com/Blizzard/node-rdkafka/blob/master/README.md) file to the new version.
232 |
233 | ## Publishing new npm version
234 |
235 | 1. Increment the `version` in `package.json` and merge that change in.
236 |
237 | 1. Create a new github release. Set the tag & release title to the same string as `version` in `package.json`.
238 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 | Copyright (c) 2016 Blizzard Entertainment
3 |
4 | Permission is hereby granted, free of charge, to any person obtaining a copy of
5 | this software and associated documentation files (the "Software"), to deal in
6 | the Software without restriction, including without limitation the rights to
7 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
8 | of the Software, and to permit persons to whom the Software is furnished to do
9 | so, subject to the following conditions:
10 |
11 | The above copyright notice and this permission notice shall be included in all
12 | copies or substantial portions of the Software.
13 |
14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 | IN THE SOFTWARE.
21 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | NODE-GYP ?= node_modules/.bin/node-gyp
2 |
3 | # Sick of changing this. Do a check and try to use python 2 if it doesn't work
4 | PYTHON_VERSION_FULL := $(wordlist 2,4,$(subst ., ,$(shell python --version 2>&1)))
5 | PYTHON_VERSION_MAJOR := $(word 1,${PYTHON_VERSION_FULL})
6 |
7 | ifeq ($(PYTHON_VERSION_MAJOR), 2)
8 | PYTHON = python
9 | else
10 | PYTHON = python2
11 | endif
12 |
13 | NODE ?= node
14 | CPPLINT ?= cpplint.py
15 | BUILDTYPE ?= Release
16 | TESTS = "test/**/*.js"
17 | E2E_TESTS = $(wildcard e2e/*.spec.js)
18 | TEST_REPORTER =
19 | TEST_OUTPUT =
20 | CONFIG_OUTPUTS = \
21 | build/bindings.target.mk \
22 | build/Makefile \
23 | build/binding.Makefile build/config.gypi
24 |
25 | CPPLINT_FILES = $(wildcard src/*.cc src/*.h)
26 | CPPLINT_FILTER = -legal/copyright
27 | JSLINT_FILES = lib/*.js test/*.js e2e/*.js
28 |
29 | PACKAGE = $(shell node -pe 'require("./package.json").name.split("/")[1]')
30 | VERSION = $(shell node -pe 'require("./package.json").version')
31 |
32 | GYPBUILDARGS=
33 | ifeq ($(BUILDTYPE),Debug)
34 | GYPBUILDARGS=--debug
35 | endif
36 |
37 | .PHONY: all clean lint test lib docs e2e ghpages check
38 |
39 | all: lint lib test e2e
40 |
41 | lint: cpplint jslint
42 |
43 | cpplint:
44 | @$(PYTHON) $(CPPLINT) --filter=$(CPPLINT_FILTER) $(CPPLINT_FILES)
45 |
46 | jslint: node_modules/.dirstamp
47 | @./node_modules/.bin/jshint --verbose $(JSLINT_FILES)
48 |
49 | lib: node_modules/.dirstamp $(CONFIG_OUTPUTS)
50 | @PYTHONHTTPSVERIFY=0 $(NODE-GYP) build $(GYPBUILDARGS)
51 |
52 | node_modules/.dirstamp: package.json
53 | @npm update --loglevel warn
54 | @touch $@
55 |
56 | $(CONFIG_OUTPUTS): node_modules/.dirstamp binding.gyp
57 | @$(NODE-GYP) configure
58 |
59 | test: node_modules/.dirstamp
60 | @./node_modules/.bin/mocha --ui exports $(TEST_REPORTER) $(TESTS) $(TEST_OUTPUT)
61 |
62 | check: node_modules/.dirstamp
63 | @$(NODE) util/test-compile.js
64 |
65 | e2e: $(E2E_TESTS)
66 | @./node_modules/.bin/mocha --exit --timeout 120000 --ui exports $(TEST_REPORTER) $(E2E_TESTS) $(TEST_OUTPUT)
67 |
68 | define release
69 | NEXT_VERSION=$(shell node -pe 'require("semver").inc("$(VERSION)", "$(1)")')
70 | node -e "\
71 | var j = require('./package.json');\
72 | j.version = \"$$NEXT_VERSION\";\
73 | var s = JSON.stringify(j, null, 2);\
74 | require('fs').writeFileSync('./package.json', s);" && \
75 | git commit -m "release $$NEXT_VERSION" -- package.json && \
76 | git tag "$$NEXT_VERSION" -m "release $$NEXT_VERSION"
77 | endef
78 |
79 | docs: node_modules/.dirstamp
80 | @rm -rf docs
81 | @./node_modules/jsdoc/jsdoc.js --destination docs \
82 | --recurse -R ./README.md \
83 | -t "./node_modules/toolkit-jsdoc/" \
84 | --tutorials examples ./lib
85 |
86 | gh-pages: node_modules/.dirstamp
87 | @./make_docs.sh
88 |
89 | release-patch:
90 | @$(call release,patch)
91 |
92 | clean: node_modules/.dirstamp
93 | @rm -f deps/librdkafka/config.h
94 | @$(NODE-GYP) clean
95 |
--------------------------------------------------------------------------------
/bench/consumer-raw-rdkafka.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var Kafka = require('../');
11 | var count = 0;
12 | var total = 0;
13 | var store = [];
14 | var host = process.argv[2] || 'localhost:9092';
15 | var topic = process.argv[3] || 'test';
16 |
17 | var consumer = new Kafka.KafkaConsumer({
18 | 'metadata.broker.list': host,
19 | 'group.id': 'node-rdkafka-bench-s',
20 | 'fetch.wait.max.ms': 100,
21 | 'fetch.message.max.bytes': 1024 * 1024,
22 | 'enable.auto.commit': false
23 | // paused: true,
24 | }, {
25 | 'auto.offset.reset': 'earliest'
26 | });
27 |
28 | var interval;
29 |
30 | consumer.connect()
31 | .once('ready', function() {
32 | consumer.subscribe([topic]);
33 | consumer.consume();
34 | })
35 | .on('rebalance', function() {
36 | console.log('rebalance');
37 | })
38 | .once('data', function() {
39 | interval = setInterval(function() {
40 | console.log('%d messages per second', count);
41 | if (count > 0) {
42 | store.push(count);
43 | }
44 | count = 0;
45 | }, 1000);
46 | })
47 | .on('data', function(message) {
48 | count += 1;
49 | total += 1;
50 | });
51 |
52 | function shutdown() {
53 | clearInterval(interval);
54 |
55 | if (store.length > 0) {
56 | var calc = 0;
57 | for (var x in store) {
58 | calc += store[x];
59 | }
60 |
61 | var mps = parseFloat(calc * 1.0/store.length);
62 |
63 | console.log('%d messages per second on average', mps);
64 |
65 | }
66 |
67 | var killTimer = setTimeout(function() {
68 | process.exit();
69 | }, 5000);
70 |
71 | consumer.disconnect(function() {
72 | clearTimeout(killTimer);
73 | process.exit();
74 | });
75 |
76 | }
77 |
--------------------------------------------------------------------------------
/bench/consumer-subscribe.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var Kafka = require('../');
11 | var count = 0;
12 | var total = 0;
13 | var store = [];
14 | var host = process.argv[2] || 'localhost:9092';
15 | var topic = process.argv[3] || 'test';
16 |
17 | var consumer = new Kafka.KafkaConsumer({
18 | 'metadata.broker.list': host,
19 | 'group.id': 'node-rdkafka-bench',
20 | 'fetch.wait.max.ms': 100,
21 | 'fetch.message.max.bytes': 1024 * 1024,
22 | 'enable.auto.commit': false
23 | // paused: true,
24 | }, {
25 | 'auto.offset.reset': 'earliest'
26 | });
27 |
28 | var interval;
29 |
30 | consumer.connect()
31 | .once('ready', function() {
32 | consumer.subscribe([topic]);
33 | consumer.consume();
34 | })
35 | .once('data', function() {
36 | interval = setInterval(function() {
37 | console.log('%d messages per second', count);
38 | if (count > 0) {
39 | store.push(count);
40 | }
41 | count = 0;
42 | }, 1000);
43 | })
44 | .on('data', function(message) {
45 | count += 1;
46 | total += 1;
47 | });
48 |
49 | process.once('SIGTERM', shutdown);
50 | process.once('SIGINT', shutdown);
51 | process.once('SIGHUP', shutdown);
52 |
53 | function shutdown() {
54 | clearInterval(interval);
55 |
56 | if (store.length > 0) {
57 | var calc = 0;
58 | for (var x in store) {
59 | calc += store[x];
60 | }
61 |
62 | var mps = parseFloat(calc * 1.0/store.length);
63 |
64 | console.log('%d messages per second on average', mps);
65 |
66 | }
67 |
68 | var killTimer = setTimeout(function() {
69 | process.exit();
70 | }, 5000);
71 |
72 | consumer.disconnect(function() {
73 | clearTimeout(killTimer);
74 | process.exit();
75 | });
76 |
77 | }
78 |
--------------------------------------------------------------------------------
/bench/kafka-consumer-stream.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var Writable = require('stream').Writable;
11 |
12 | var Kafka = require('../');
13 | var count = 0;
14 | var total = 0;
15 | var store = [];
16 | var host = process.argv[2] || 'localhost:9092';
17 | var topic = process.argv[3] || 'test';
18 |
19 | var stream = Kafka.createReadStream({
20 | 'metadata.broker.list': host,
21 | 'group.id': 'node-rdkafka-benchs',
22 | 'fetch.wait.max.ms': 100,
23 | 'fetch.message.max.bytes': 1024 * 1024,
24 | 'enable.auto.commit': false
25 | // paused: true,
26 | }, {
27 | 'auto.offset.reset': 'earliest'
28 | }, {
29 | fetchSize: 16,
30 | topics: [topic]
31 | });
32 |
33 | // Track how many messages we see per second
34 | var interval;
35 |
36 | var isShuttingDown = false;
37 |
38 | stream
39 | .on('error', function(err) {
40 | console.log('Shutting down due to error');
41 | console.log(err.stack);
42 | shutdown();
43 | })
44 | .once('data', function(d) {
45 | interval = setInterval(function() {
46 | if (isShuttingDown) {
47 | clearInterval(interval);
48 | }
49 | console.log('%d messages per second', count);
50 | if (count > 0) {
51 | // Don't store ones when we didn't get data i guess?
52 | store.push(count);
53 | // setTimeout(shutdown, 500);
54 | }
55 | count = 0;
56 | }, 1000).unref();
57 | })
58 | .on('end', function() {
59 | // Can be called more than once without issue because of guard var
60 | console.log('Shutting down due to stream end');
61 | shutdown();
62 | })
63 | .pipe(new Writable({
64 | objectMode: true,
65 | write: function(message, encoding, cb) {
66 | count += 1;
67 | total += 1;
68 | setImmediate(cb);
69 | }
70 | }));
71 |
72 | process.once('SIGTERM', shutdown);
73 | process.once('SIGINT', shutdown);
74 | process.once('SIGHUP', shutdown);
75 |
76 | function shutdown() {
77 | if (isShuttingDown) {
78 | return;
79 | }
80 | clearInterval(interval);
81 | isShuttingDown = true;
82 | if (store.length > 0) {
83 | var calc = 0;
84 | for (var x in store) {
85 | calc += store[x];
86 | }
87 |
88 | var mps = parseFloat(calc * 1.0/store.length);
89 |
90 | console.log('%d messages per second on average', mps);
91 | }
92 |
93 | // Destroy the stream
94 | stream.destroy();
95 |
96 | stream.once('end', function() {
97 | console.log('total: %d', total);
98 | });
99 |
100 | }
101 |
--------------------------------------------------------------------------------
/bench/producer-raw-rdkafka.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var Kafka = require('../');
11 | var crypto = require('crypto');
12 | var count = 0;
13 | var total = 0;
14 | var totalComplete = 0;
15 | var verifiedComplete = 0;
16 | var errors = 0;
17 | var store = [];
18 | var started;
19 | var done = false;
20 | var host = process.argv[2] || '127.0.0.1:9092';
21 | var topicName = process.argv[3] || 'test';
22 | var compression = process.argv[4] || 'gzip';
23 | var MAX = process.argv[5] || 10000000;
24 |
25 | var producer = new Kafka.Producer({
26 | 'metadata.broker.list': host,
27 | 'group.id': 'node-rdkafka-bench',
28 | 'compression.codec': compression,
29 | 'retry.backoff.ms': 200,
30 | 'message.send.max.retries': 10,
31 | 'socket.keepalive.enable': true,
32 | 'queue.buffering.max.messages': 100000,
33 | 'queue.buffering.max.ms': 1000,
34 | 'batch.num.messages': 1000
35 | });
36 |
37 | // Track how many messages we see per second
38 | var interval;
39 | var ok = true;
40 |
41 | function getTimer() {
42 | if (!interval) {
43 | interval = setTimeout(function() {
44 | interval = false;
45 | if (!done) {
46 | console.log('%d messages per sent second', count);
47 | store.push(count);
48 | count = 0;
49 | getTimer();
50 |
51 | } else {
52 | console.log('%d messages remaining sent in last batch <1000ms', count);
53 | }
54 | }, 1000);
55 | }
56 |
57 | return interval;
58 | }
59 |
60 | var t;
61 |
62 | crypto.randomBytes(4096, function(ex, buffer) {
63 |
64 | producer.connect()
65 | .on('ready', function() {
66 | getTimer();
67 |
68 | started = new Date().getTime();
69 |
70 | var sendMessage = function() {
71 | try {
72 | var errorCode = producer.produce(topicName, null, buffer, null);
73 | verifiedComplete += 1;
74 | } catch (e) {
75 | console.error(e);
76 | errors++;
77 | }
78 |
79 | count += 1;
80 | totalComplete += 1;
81 | if (totalComplete === MAX) {
82 | shutdown();
83 | }
84 | if (total < MAX) {
85 | total += 1;
86 |
87 | // This is 100% sync so we need to setImmediate to give it time
88 | // to breathe.
89 | setImmediate(sendMessage);
90 | }
91 | };
92 |
93 | sendMessage();
94 |
95 | })
96 | .on('event.error', function(err) {
97 | console.error(err);
98 | process.exit(1);
99 | })
100 | .on('disconnected', shutdown);
101 |
102 | });
103 |
104 | function shutdown(e) {
105 | done = true;
106 |
107 | clearInterval(interval);
108 |
109 | var killTimer = setTimeout(function() {
110 | process.exit();
111 | }, 5000);
112 |
113 | producer.disconnect(function() {
114 | clearTimeout(killTimer);
115 | var ended = new Date().getTime();
116 | var elapsed = ended - started;
117 |
118 | // console.log('Ended %s', ended);
119 | console.log('total: %d messages over %d ms', total, elapsed);
120 |
121 | console.log('%d messages / second', parseInt(total / (elapsed / 1000)));
122 | process.exit();
123 | });
124 |
125 | }
126 |
--------------------------------------------------------------------------------
/bench/producer-rdkafka.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var Kafka = require('../');
11 | var crypto = require('crypto');
12 | var count = 0;
13 | var total = 0;
14 | var totalComplete = 0;
15 | var store = [];
16 | var host = process.argv[2] || '127.0.0.1:9092';
17 | var topicName = process.argv[3] || 'test';
18 | var compression = process.argv[4] || 'gzip';
19 | var MAX = process.argv[5] || 1000000;
20 |
21 | var stream = Kafka.Producer.createWriteStream({
22 | 'metadata.broker.list': host,
23 | 'group.id': 'node-rdkafka-bench',
24 | 'compression.codec': compression,
25 | 'retry.backoff.ms': 200,
26 | 'message.send.max.retries': 10,
27 | 'socket.keepalive.enable': true,
28 | 'queue.buffering.max.messages': 100000,
29 | 'queue.buffering.max.ms': 1000,
30 | 'batch.num.messages': 1000,
31 | }, {}, {
32 | topic: topicName,
33 | pollInterval: 20
34 | });
35 |
36 | stream.on('error', function(e) {
37 | console.log(e);
38 | process.exit(1);
39 | });
40 |
41 | // Track how many messages we see per second
42 | var interval;
43 | var done = false;
44 |
45 | function log() {
46 | console.log('%d messages per sent second', count);
47 | store.push(count);
48 | count = 0;
49 | }
50 |
51 | crypto.randomBytes(4096, function(ex, buffer) {
52 |
53 | var x = function(e) {
54 | if (e) {
55 | console.error(e);
56 | }
57 | count += 1;
58 | totalComplete += 1;
59 | if (totalComplete >= MAX && !done) {
60 | done = true;
61 | clearInterval(interval);
62 | setTimeout(shutdown, 5000);
63 | }
64 | };
65 |
66 | function write() {
67 | if (!stream.write(buffer, 'base64', x)) {
68 | return stream.once('drain', write);
69 | } else {
70 | total++;
71 | }
72 |
73 | if (total < MAX) {
74 | // we are not done
75 | setImmediate(write);
76 | }
77 |
78 | }
79 |
80 | write();
81 | interval = setInterval(log, 1000);
82 | stream.on('error', function(err) {
83 | console.log(err);
84 | });
85 | // stream.on('end', shutdown);
86 |
87 | });
88 |
89 |
90 | process.once('SIGTERM', shutdown);
91 | process.once('SIGINT', shutdown);
92 | process.once('SIGHUP', shutdown);
93 |
94 | function shutdown() {
95 |
96 | if (store.length > 0) {
97 | var calc = 0;
98 | for (var x in store) {
99 | calc += store[x];
100 | }
101 |
102 | var mps = parseFloat(calc * 1.0/store.length);
103 |
104 | console.log('%d messages per second on average', mps);
105 | console.log('%d messages total', total);
106 |
107 | }
108 |
109 | clearInterval(interval);
110 |
111 | stream.end();
112 |
113 | stream.on('close', function() {
114 | console.log('total: %d', total);
115 | });
116 |
117 | }
118 |
--------------------------------------------------------------------------------
/bench/seed.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | kafka_root=${KAFKA_ROOT:-/opt/kafka}
3 | # Generate and insert some messages
4 |
5 | OS=$(uname -s)
6 |
7 | function initializeTopic {
8 | topic=$1
9 | host=$2
10 | msg_size=$3
11 | batch_size=$4
12 | batch_count=$5
13 |
14 | if [ $host == "localhost:9092" ]; then
15 | ${kafka_root}/bin/kafka-topics.sh --create --zookeeper localhost:2181 \
16 | --replication-factor 1 --partitions 1 --topic ${topic}
17 | fi
18 |
19 | echo "Generating messages (size: ${msg_size})"
20 | : > /tmp/msgs # Truncate /tmp/msgs
21 | for i in $(seq 1 ${batch_size}); do
22 | if [ $OS == 'Darwin' ]; then
23 | printf %s\\n "$(head -c${msg_size} /dev/urandom | base64)" >> /tmp/msgs
24 | else
25 | printf %s\\n "$(head --bytes=${msg_size} /dev/urandom | base64 --wrap=0)" >> /tmp/msgs
26 | fi
27 | done
28 |
29 | echo "Done generating messages"
30 |
31 | for i in $(seq 1 ${batch_count}); do
32 | echo "Adding $(wc -l /tmp/msgs) messages to topic ${topic}"
33 | "${kafka_root}/bin/kafka-console-producer.sh" \
34 | --broker-list ${host} --topic ${topic} < /tmp/msgs
35 | done
36 | }
37 |
38 | initializeTopic "librdtesting-01" "localhost:9092" "4096" "5000" "2000"
39 |
--------------------------------------------------------------------------------
/binding.gyp:
--------------------------------------------------------------------------------
1 | {
2 | "variables": {
3 | # may be redefined in command line on configuration stage
4 | # "BUILD_LIBRDKAFKA%": " line.startsWith('#def'));
18 | const definedLines = precompilerDefinitions.map(definedLine => {
19 | const content = definedLine.split(' ').filter(v => v != '');
20 |
21 | return {
22 | command: content[0],
23 | key: content[1],
24 | value: content[2]
25 | };
26 | });
27 |
28 | const defines = {};
29 |
30 | for (let item of definedLines) {
31 | if (item.command == '#define') {
32 | defines[item.key] = item.value;
33 | }
34 | }
35 |
36 | function parseLibrdkafkaVersion(version) {
37 | const intRepresentation = parseInt(version);
38 |
39 | const major = (intRepresentation & majorMask) >> (8 * 3);
40 | const minor = (intRepresentation & minorMask) >> (8 * 2);
41 | const patch = (intRepresentation & patchMask) >> (8 * 1);
42 | const rev = (intRepresentation & revMask) >> (8 * 0);
43 |
44 | return {
45 | major,
46 | minor,
47 | patch,
48 | rev
49 | };
50 | }
51 |
52 | function versionAsString(version) {
53 | return [
54 | version.major,
55 | version.minor,
56 | version.patch,
57 | version.rev === 255 ? null : version.rev,
58 | ].filter(v => v != null).join('.');
59 | }
60 |
61 | const librdkafkaVersion = parseLibrdkafkaVersion(defines.RD_KAFKA_VERSION);
62 | const versionString = versionAsString(librdkafkaVersion);
63 |
64 | if (pjs.librdkafka !== versionString) {
65 | console.error(`Librdkafka version of ${versionString} does not match package json: ${pjs.librdkafka}`);
66 | process.exit(1);
67 | }
68 |
--------------------------------------------------------------------------------
/ci/checks/librdkafka-exists.js:
--------------------------------------------------------------------------------
1 | const path = require('path');
2 | const fs = require('fs');
3 |
4 | const root = path.resolve(__dirname, '..', '..');
5 | const librdkafkaPath = path.resolve(root, 'deps', 'librdkafka');
6 |
7 | // Ensure librdkafka is in the deps directory - this makes sure we don't accidentally
8 | // publish on a non recursive clone :)
9 |
10 | if (!fs.existsSync(librdkafkaPath)) {
11 | console.error(`Could not find librdkafka at path ${librdkafkaPath}`);
12 | process.exit(1);
13 | }
14 |
--------------------------------------------------------------------------------
/ci/librdkafka-defs-generator.js:
--------------------------------------------------------------------------------
1 | const fs = require('fs');
2 | const path = require('path');
3 |
4 | const LIBRDKAFKA_VERSION = require('../package.json').librdkafka;
5 | const LIBRDKAFKA_DIR = path.resolve(__dirname, '../deps/librdkafka/');
6 |
7 | function getHeader(file) {
8 | return `// ====== Generated from librdkafka ${LIBRDKAFKA_VERSION} file ${file} ======`;
9 | }
10 |
11 | function readLibRDKafkaFile(file) {
12 | return fs.readFileSync(path.resolve(LIBRDKAFKA_DIR, file)).toString();
13 | }
14 |
15 | function extractConfigItems(configStr) {
16 | const [_header, config] = configStr.split(/-{5,}\|.*/);
17 |
18 | const re = /(.*?)\|(.*?)\|(.*?)\|(.*?)\|(.*?)\|(.*)/g;
19 |
20 | const configItems = [];
21 |
22 | let m;
23 | do {
24 | m = re.exec(config);
25 | if (m) {
26 | const [
27 | _fullString,
28 | property,
29 | consumerOrProducer,
30 | range,
31 | defaultValue,
32 | importance,
33 | descriptionWithType,
34 | ] = m.map(el => (typeof el === 'string' ? el.trim() : el));
35 |
36 | const splitDescriptionRe = /(.*?)\s*?
.*?:\s.*?(.*?)\*/;
37 | const [_, description, rawType] = splitDescriptionRe.exec(descriptionWithType);
38 |
39 | configItems.push({
40 | property,
41 | consumerOrProducer,
42 | range,
43 | defaultValue,
44 | importance,
45 | description,
46 | rawType,
47 | });
48 | }
49 | } while (m);
50 |
51 | return configItems.map(processItem);
52 | }
53 |
54 | function processItem(configItem) {
55 | // These items are overwritten by node-rdkafka
56 | switch (configItem.property) {
57 | case 'dr_msg_cb':
58 | return { ...configItem, type: 'boolean' };
59 | case 'dr_cb':
60 | return { ...configItem, type: 'boolean | Function' };
61 | case 'rebalance_cb':
62 | return { ...configItem, type: 'boolean | Function' };
63 | case 'offset_commit_cb':
64 | return { ...configItem, type: 'boolean | Function' };
65 | }
66 |
67 | switch (configItem.rawType) {
68 | case 'integer':
69 | return { ...configItem, type: 'number' };
70 | case 'boolean':
71 | return { ...configItem, type: 'boolean' };
72 | case 'string':
73 | case 'CSV flags':
74 | return { ...configItem, type: 'string' };
75 | case 'enum value':
76 | return {
77 | ...configItem,
78 | type: configItem.range
79 | .split(',')
80 | .map(str => `'${str.trim()}'`)
81 | .join(' | '),
82 | };
83 | default:
84 | return { ...configItem, type: 'any' };
85 | }
86 | }
87 |
88 | function generateInterface(interfaceDef, configItems) {
89 | const fields = configItems
90 | .map(item =>
91 | [
92 | `/**`,
93 | ` * ${item.description}`,
94 | ...(item.defaultValue ? [` *`, ` * @default ${item.defaultValue}`] : []),
95 | ` */`,
96 | `"${item.property}"?: ${item.type};`,
97 | ]
98 | .map(row => ` ${row}`)
99 | .join('\n')
100 | )
101 | .join('\n\n');
102 |
103 | return `export interface ` + interfaceDef + ' {\n' + fields + '\n}';
104 | }
105 |
106 | function addSpecialGlobalProps(globalProps) {
107 | globalProps.push({
108 | "property": "event_cb",
109 | "consumerOrProducer": "*",
110 | "range": "",
111 | "defaultValue": "true",
112 | "importance": "low",
113 | "description": "Enables or disables `event.*` emitting.",
114 | "rawType": "boolean",
115 | "type": "boolean"
116 | });
117 | }
118 |
119 | function generateConfigDTS(file) {
120 | const configuration = readLibRDKafkaFile(file);
121 | const [globalStr, topicStr] = configuration.split('Topic configuration properties');
122 |
123 | const [globalProps, topicProps] = [extractConfigItems(globalStr), extractConfigItems(topicStr)];
124 |
125 | addSpecialGlobalProps(globalProps);
126 |
127 | const [globalSharedProps, producerGlobalProps, consumerGlobalProps] = [
128 | globalProps.filter(i => i.consumerOrProducer === '*'),
129 | globalProps.filter(i => i.consumerOrProducer === 'P'),
130 | globalProps.filter(i => i.consumerOrProducer === 'C'),
131 | ];
132 |
133 | const [topicSharedProps, producerTopicProps, consumerTopicProps] = [
134 | topicProps.filter(i => i.consumerOrProducer === '*'),
135 | topicProps.filter(i => i.consumerOrProducer === 'P'),
136 | topicProps.filter(i => i.consumerOrProducer === 'C'),
137 | ];
138 |
139 | let output = `${getHeader(file)}
140 | // Code that generated this is a derivative work of the code from Nam Nguyen
141 | // https://gist.github.com/ntgn81/066c2c8ec5b4238f85d1e9168a04e3fb
142 |
143 | `;
144 |
145 | output += [
146 | generateInterface('GlobalConfig', globalSharedProps),
147 | generateInterface('ProducerGlobalConfig extends GlobalConfig', producerGlobalProps),
148 | generateInterface('ConsumerGlobalConfig extends GlobalConfig', consumerGlobalProps),
149 | generateInterface('TopicConfig', topicSharedProps),
150 | generateInterface('ProducerTopicConfig extends TopicConfig', producerTopicProps),
151 | generateInterface('ConsumerTopicConfig extends TopicConfig', consumerTopicProps),
152 | ].join('\n\n');
153 |
154 | fs.writeFileSync(path.resolve(__dirname, '../config.d.ts'), output);
155 | }
156 |
157 | function updateErrorDefinitions(file) {
158 | const rdkafkacpp_h = readLibRDKafkaFile(file);
159 | const m = /enum ErrorCode {([^}]+)}/g.exec(rdkafkacpp_h);
160 | if (!m) {
161 | throw new Error(`Can't read rdkafkacpp.h file`)
162 | }
163 | const body = m[1]
164 | .replace(/(\t)|( +)/g, ' ')
165 | .replace(/\n\n/g, '\n')
166 | .replace(/\s+=\s+/g, ': ')
167 | .replace(/[\t ]*#define +(\w+) +(\w+)/g, (_, define, original) => {
168 | const value = new RegExp(`${original}\\s+=\\s+(\\d+)`).exec(m[1])[1];
169 | return ` ${define}: ${value},`;
170 | })
171 |
172 | // validate body
173 | const emptyCheck = body
174 | .replace(/((\s+\/\*)|( ?\*)).*/g, '')
175 | .replace(/ ERR_\w+: -?\d+,?\r?\n/g, '')
176 | .trim()
177 | if (emptyCheck !== '') {
178 | throw new Error(`Fail to parse ${file}. It contains these extra details:\n${emptyCheck}`);
179 | }
180 |
181 | const error_js_file = path.resolve(__dirname, '../lib/error.js');
182 | const error_js = fs.readFileSync(error_js_file)
183 | .toString()
184 | .replace(/(\/\/.*\r?\n)?LibrdKafkaError.codes = {[^}]+/g, `${getHeader(file)}\nLibrdKafkaError.codes = {\n${body}`)
185 |
186 | fs.writeFileSync(error_js_file, error_js);
187 | fs.writeFileSync(path.resolve(__dirname, '../errors.d.ts'), `${getHeader(file)}\nexport const CODES: { ERRORS: {${body.replace(/[ \.]*(\*\/\r?\n \w+: )(-?\d+),?/g, ' (**$2**) $1number,')}}}`)
188 | }
189 |
190 | (async function updateTypeDefs() {
191 | generateConfigDTS('CONFIGURATION.md');
192 | updateErrorDefinitions('src-cpp/rdkafkacpp.h');
193 | })()
194 |
--------------------------------------------------------------------------------
/ci/prepublish.js:
--------------------------------------------------------------------------------
1 | require('./checks/librdkafka-exists');
2 | require('./checks/librdkafka-correct-version');
3 | require('./librdkafka-defs-generator.js');
4 | require('./update-version');
5 |
--------------------------------------------------------------------------------
/ci/update-version.js:
--------------------------------------------------------------------------------
1 | const path = require('path');
2 | const semver = require('semver');
3 | const { spawn } = require('child_process');
4 | const fs = require('fs');
5 |
6 | const root = path.resolve(__dirname, '..');
7 | const pjsPath = path.resolve(root, 'package.json');
8 | const pjs = require(pjsPath);
9 |
10 | function parseVersion(tag) {
11 | const { major, minor, prerelease, patch } = semver.parse(tag);
12 |
13 | // Describe will give is commits since last tag
14 | const [commitsSinceTag, hash] = prerelease[0] ? prerelease[0].split('-') : [
15 | 1,
16 | process.env.TRAVIS_COMMIT || ''
17 | ];
18 |
19 | return {
20 | major,
21 | minor,
22 | prerelease,
23 | patch,
24 | commit: commitsSinceTag - 1,
25 | hash
26 | };
27 | }
28 |
29 | function getCommandOutput(command, args, cb) {
30 | let output = '';
31 |
32 | const cmd = spawn(command, args);
33 |
34 | cmd.stdout.on('data', (data) => {
35 | output += data;
36 | });
37 |
38 | cmd.on('close', (code) => {
39 | if (code != 0) {
40 | cb(new Error(`Command returned unsuccessful code: ${code}`));
41 | return;
42 | }
43 |
44 | cb(null, output.trim());
45 | });
46 | }
47 |
48 | function getVersion(cb) {
49 | // https://docs.travis-ci.com/user/environment-variables/
50 | if (process.env.TRAVIS_TAG) {
51 | setImmediate(() => cb(null, parseVersion(process.env.TRAVIS_TAG.trim())));
52 | return;
53 | }
54 |
55 | getCommandOutput('git', ['describe', '--tags'], (err, result) => {
56 | if (err) {
57 | cb(err);
58 | return;
59 | }
60 |
61 | cb(null, parseVersion(result.trim()));
62 | });
63 | }
64 |
65 | function getBranch(cb) {
66 | if (process.env.TRAVIS_TAG) {
67 | // TRAVIS_BRANCH matches TRAVIS_TAG when TRAVIS_TAG is set
68 | // "git branch --contains tags/TRAVIS_TAG" doesn't work on travis so we have to assume 'master'
69 | setImmediate(() => cb(null, 'master'));
70 | return;
71 | } else if (process.env.TRAVIS_BRANCH) {
72 | setImmediate(() => cb(null, process.env.TRAVIS_BRANCH.trim()));
73 | return;
74 | }
75 |
76 | getCommandOutput('git', ['rev-parse', '--abbrev-ref', 'HEAD'], (err, result) => {
77 | if (err) {
78 | cb(err);
79 | return;
80 | }
81 |
82 | cb(null, result.trim());
83 | });
84 | }
85 |
86 | function getPackageVersion(tag, branch) {
87 | const baseVersion = `v${tag.major}.${tag.minor}.${tag.patch}`;
88 |
89 | console.log(`Package version is "${baseVersion}"`);
90 |
91 | // never publish with an suffix
92 | // fixes https://github.com/Blizzard/node-rdkafka/issues/981
93 | // baseVersion += '-';
94 |
95 | // if (tag.commit === 0 && branch === 'master') {
96 | // return baseVersion;
97 | // }
98 |
99 | // if (branch !== 'master') {
100 | // baseVersion += (tag.commit + 1 + '.' + branch);
101 | // } else {
102 | // baseVersion += (tag.commit + 1);
103 | // }
104 |
105 | return baseVersion;
106 | }
107 |
108 | getVersion((err, tag) => {
109 | if (err) {
110 | throw err;
111 | }
112 |
113 | getBranch((err, branch) => {
114 | if (err) {
115 | throw err;
116 | }
117 |
118 | pjs.version = getPackageVersion(tag, branch);
119 |
120 | fs.writeFileSync(pjsPath, JSON.stringify(pjs, null, 2));
121 | })
122 |
123 | });
124 |
--------------------------------------------------------------------------------
/configure:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This file is intended to be run on unix systems to configure librdkafka
4 | # inside the submodules
5 |
6 | # This does not get run on windows which uses the build in solutions file
7 |
8 | # Get script directory
9 | scriptdir=$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)
10 |
11 | pushd ./deps/librdkafka &> /dev/null
12 |
13 | ./configure --prefix="${scriptdir}/build/deps" --libdir="${scriptdir}/build/deps" $*
14 |
15 | popd &> /dev/null
16 |
--------------------------------------------------------------------------------
/deploy.enc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Blizzard/node-rdkafka/24e6e0c2a8ffbaa6f7abadd3e4bd52233cf47d29/deploy.enc
--------------------------------------------------------------------------------
/deps/librdkafka.gyp:
--------------------------------------------------------------------------------
1 | {
2 | 'targets': [
3 | {
4 | "target_name": "librdkafka",
5 | "type": "none",
6 | "conditions": [
7 | [
8 | 'OS=="win"',
9 | {
10 | },
11 | {
12 | "actions": [
13 | {
14 | "action_name": "configure",
15 | "inputs": [],
16 | "outputs": [
17 | "librdkafka/config.h",
18 | ],
19 | "action": [
20 | "node", "../util/configure"
21 | ]
22 | },
23 | {
24 | "action_name": "build_dependencies",
25 | "inputs": [
26 | "librdkafka/config.h",
27 | ],
28 | "action": [
29 | "make", "-C", "librdkafka", "libs", "install"
30 | ],
31 | "conditions": [
32 | [
33 | 'OS=="mac"',
34 | {
35 | 'outputs': [
36 | 'deps/librdkafka/src-cpp/librdkafka++.dylib',
37 | 'deps/librdkafka/src-cpp/librdkafka++.1.dylib',
38 | 'deps/librdkafka/src/librdkafka.dylib',
39 | 'deps/librdkafka/src/librdkafka.1.dylib'
40 | ],
41 | },
42 | {
43 | 'outputs': [
44 | 'deps/librdkafka/src-cpp/librdkafka++.so',
45 | 'deps/librdkafka/src-cpp/librdkafka++.so.1',
46 | 'deps/librdkafka/src/librdkafka.so',
47 | 'deps/librdkafka/src/librdkafka.so.1',
48 | 'deps/librdkafka/src-cpp/librdkafka++.a',
49 | 'deps/librdkafka/src/librdkafka.a',
50 | ],
51 | }
52 | ]
53 | ],
54 | }
55 | ]
56 | }
57 |
58 | ]
59 | ]
60 | }
61 | ]
62 | }
63 |
--------------------------------------------------------------------------------
/deps/windows-install.py:
--------------------------------------------------------------------------------
1 | librdkafkaVersion = ''
2 | # read librdkafka version from package.json
3 | import json
4 | import os
5 | import glob
6 |
7 | with open('../package.json') as f:
8 | librdkafkaVersion = json.load(f)['librdkafka']
9 | librdkafkaWinSufix = '7' if librdkafkaVersion == '0.11.5' else '';
10 |
11 | depsPrecompiledDir = '../deps/precompiled'
12 | depsIncludeDir = '../deps/include'
13 | buildReleaseDir = 'Release'
14 |
15 | # alternative: 'https://api.nuget.org/v3-flatcontainer/librdkafka.redist/{}/librdkafka.redist.{}.nupkg'.format(librdkafkaVersion, librdkafkaVersion)
16 | env_dist = os.environ
17 | downloadBaseUrl = env_dist['NODE_RDKAFKA_NUGET_BASE_URL'] if 'NODE_RDKAFKA_NUGET_BASE_URL' in env_dist else 'https://globalcdn.nuget.org/packages/'
18 | librdkafkaNugetUrl = downloadBaseUrl + 'librdkafka.redist.{}.nupkg'.format(librdkafkaVersion)
19 | print('download librdkafka form ' + librdkafkaNugetUrl)
20 | outputDir = 'librdkafka.redist'
21 | outputFile = outputDir + '.zip'
22 | dllPath = outputDir + '/runtimes/win{}-x64/native'.format(librdkafkaWinSufix)
23 | libPath = outputDir + '/build/native/lib/win{}/x64/win{}-x64-Release/v142'.format(librdkafkaWinSufix, librdkafkaWinSufix)
24 | includePath = outputDir + '/build/native/include/librdkafka'
25 |
26 | # download librdkafka from nuget
27 | try:
28 | # For Python 3.0 and later
29 | from urllib.request import urlopen
30 | except ImportError:
31 | # Fall back to Python 2's urllib2
32 | from urllib2 import urlopen
33 | import ssl
34 |
35 | filedata = urlopen(librdkafkaNugetUrl, context=ssl._create_unverified_context())
36 |
37 | datatowrite = filedata.read()
38 | with open(outputFile, 'wb') as f:
39 | f.write(datatowrite)
40 |
41 | # extract package
42 | import zipfile
43 | zip_ref = zipfile.ZipFile(outputFile, 'r')
44 | zip_ref.extractall(outputDir)
45 | zip_ref.close()
46 |
47 | # copy files
48 | import shutil, os, errno
49 |
50 | def createdir(dir):
51 | try:
52 | os.makedirs(dir)
53 | except OSError as e:
54 | if errno.EEXIST != e.errno:
55 | raise
56 |
57 | createdir(depsPrecompiledDir)
58 | createdir(depsIncludeDir)
59 | createdir(buildReleaseDir)
60 |
61 | shutil.copy2(libPath + '/librdkafka.lib', depsPrecompiledDir)
62 | shutil.copy2(libPath + '/librdkafkacpp.lib', depsPrecompiledDir)
63 |
64 | shutil.copy2(includePath + '/rdkafka.h', depsIncludeDir)
65 | shutil.copy2(includePath + '/rdkafkacpp.h', depsIncludeDir)
66 |
67 | # copy all the required dlls
68 | for filename in glob.glob(os.path.join(dllPath, '*.dll')):
69 | shutil.copy2(filename, buildReleaseDir)
70 |
71 | # clean up
72 | os.remove(outputFile)
73 | shutil.rmtree(outputDir)
74 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | ---
2 | zookeeper:
3 | image: confluentinc/cp-zookeeper
4 | ports:
5 | - "2181:2181"
6 | environment:
7 | ZOOKEEPER_CLIENT_PORT: 2181
8 | ZOOKEEPER_TICK_TIME: 2000
9 | kafka:
10 | image: confluentinc/cp-kafka
11 | links:
12 | - zookeeper
13 | ports:
14 | - "9092:9092"
15 | environment:
16 | KAFKA_BROKER_ID: 1
17 | KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
18 | KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092'
19 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
20 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
21 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
22 | KAFKA_DEFAULT_REPLICATION_FACTOR: 1
23 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
24 |
--------------------------------------------------------------------------------
/e2e/admin.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var Kafka = require('../');
11 | var t = require('assert');
12 |
13 | var eventListener = require('./listener');
14 | var kafkaBrokerList = process.env.KAFKA_HOST || 'localhost:9092';
15 | var time = Date.now();
16 |
17 | function pollForTopic(client, topicName, maxTries, tryDelay, cb, customCondition) {
18 | var tries = 0;
19 |
20 | function getTopicIfExists(innerCb) {
21 | client.getMetadata({
22 | topic: topicName,
23 | }, function(metadataErr, metadata) {
24 | if (metadataErr) {
25 | cb(metadataErr);
26 | return;
27 | }
28 |
29 | var topicFound = metadata.topics.filter(function(topicObj) {
30 | var foundTopic = topicObj.name === topicName;
31 |
32 | // If we have a custom condition for "foundedness", do it here after
33 | // we make sure we are operating on the correct topic
34 | if (foundTopic && customCondition) {
35 | return customCondition(topicObj);
36 | }
37 | return foundTopic;
38 | });
39 |
40 | if (topicFound.length >= 1) {
41 | innerCb(null, topicFound[0]);
42 | return;
43 | }
44 |
45 | innerCb(new Error('Could not find topic ' + topicName));
46 | });
47 | }
48 |
49 | function maybeFinish(err, obj) {
50 | if (err) {
51 | queueNextTry();
52 | return;
53 | }
54 |
55 | cb(null, obj);
56 | }
57 |
58 | function queueNextTry() {
59 | tries += 1;
60 | if (tries < maxTries) {
61 | setTimeout(function() {
62 | getTopicIfExists(maybeFinish);
63 | }, tryDelay);
64 | } else {
65 | cb(new Error('Exceeded max tries of ' + maxTries));
66 | }
67 | }
68 |
69 | queueNextTry();
70 | }
71 |
72 | describe('Admin', function() {
73 | var client;
74 | var producer;
75 |
76 | before(function(done) {
77 | producer = new Kafka.Producer({
78 | 'metadata.broker.list': kafkaBrokerList,
79 | });
80 | producer.connect(null, function(err) {
81 | t.ifError(err);
82 | done();
83 | });
84 | });
85 |
86 | after(function(done) {
87 | producer.disconnect(function() {
88 | done();
89 | });
90 | });
91 |
92 | beforeEach(function() {
93 | client = Kafka.AdminClient.create({
94 | 'client.id': 'kafka-test',
95 | 'metadata.broker.list': kafkaBrokerList
96 | });
97 | });
98 |
99 | describe('createTopic', function() {
100 | it('should create topic sucessfully', function(done) {
101 | var topicName = 'admin-test-topic-' + time;
102 | client.createTopic({
103 | topic: topicName,
104 | num_partitions: 1,
105 | replication_factor: 1
106 | }, function(err) {
107 | pollForTopic(producer, topicName, 10, 1000, function(err) {
108 | t.ifError(err);
109 | done();
110 | });
111 | });
112 | });
113 |
114 | it('should raise an error when replication_factor is larger than number of brokers', function(done) {
115 | var topicName = 'admin-test-topic-bad-' + time;
116 | client.createTopic({
117 | topic: topicName,
118 | num_partitions: 9999,
119 | replication_factor: 9999
120 | }, function(err) {
121 | t.equal(typeof err, 'object', 'an error should be returned');
122 | done();
123 | });
124 | });
125 | });
126 |
127 | describe('deleteTopic', function() {
128 | it('should be able to delete a topic after creation', function(done) {
129 | var topicName = 'admin-test-topic-2bdeleted-' + time;
130 | client.createTopic({
131 | topic: topicName,
132 | num_partitions: 1,
133 | replication_factor: 1
134 | }, function(err) {
135 | pollForTopic(producer, topicName, 10, 1000, function(err) {
136 | t.ifError(err);
137 | client.deleteTopic(topicName, function(deleteErr) {
138 | // Fail if we got an error
139 | t.ifError(deleteErr);
140 | done();
141 | });
142 | });
143 | });
144 | });
145 | });
146 |
147 | describe('createPartitions', function() {
148 | it('should be able to add partitions to a topic after creation', function(done) {
149 | var topicName = 'admin-test-topic-newparts-' + time;
150 | client.createTopic({
151 | topic: topicName,
152 | num_partitions: 1,
153 | replication_factor: 1
154 | }, function(err) {
155 | pollForTopic(producer, topicName, 10, 1000, function(err) {
156 | t.ifError(err);
157 | client.createPartitions(topicName, 20, function(createErr) {
158 | pollForTopic(producer, topicName, 10, 1000, function(pollErr) {
159 | t.ifError(pollErr);
160 | done();
161 | }, function(topic) {
162 | return topic.partitions.length === 20;
163 | });
164 | });
165 | });
166 | });
167 | });
168 |
169 | it('should NOT be able to reduce partitions to a topic after creation', function(done) {
170 | var topicName = 'admin-test-topic-newparts2-' + time;
171 | client.createTopic({
172 | topic: topicName,
173 | num_partitions: 4,
174 | replication_factor: 1
175 | }, function(err) {
176 | pollForTopic(producer, topicName, 10, 1000, function(err) {
177 | t.ifError(err);
178 | client.createPartitions(topicName, 1, function(createErr) {
179 | t.equal(typeof createErr, 'object', 'an error should be returned');
180 | done();
181 | });
182 | });
183 | });
184 | });
185 | });
186 |
187 | });
188 |
--------------------------------------------------------------------------------
/e2e/groups.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var crypto = require('crypto');
11 | var t = require('assert');
12 |
13 | var Kafka = require('../');
14 | var kafkaBrokerList = process.env.KAFKA_HOST || 'localhost:9092';
15 | var eventListener = require('./listener');
16 |
17 | describe('Consumer group/Producer', function() {
18 |
19 | var producer;
20 | var consumer;
21 | var grp = 'kafka-mocha-grp-' + crypto.randomBytes(20).toString('hex');
22 |
23 | var config = {
24 | 'metadata.broker.list': kafkaBrokerList,
25 | 'group.id': grp,
26 | 'fetch.wait.max.ms': 1000,
27 | 'session.timeout.ms': 10000,
28 | 'enable.auto.commit': false,
29 | 'debug': 'all'
30 | };
31 |
32 | beforeEach(function(done) {
33 | producer = new Kafka.Producer({
34 | 'client.id': 'kafka-mocha',
35 | 'metadata.broker.list': kafkaBrokerList,
36 | 'fetch.wait.max.ms': 1,
37 | 'debug': 'all',
38 | 'dr_cb': true
39 | });
40 |
41 | producer.connect({}, function(err, d) {
42 | t.ifError(err);
43 | t.equal(typeof d, 'object', 'metadata should be returned');
44 | done();
45 | });
46 |
47 | eventListener(producer);
48 | });
49 |
50 | beforeEach(function(done) {
51 | consumer = new Kafka.KafkaConsumer(config, {
52 | 'auto.offset.reset': 'largest'
53 | });
54 |
55 | consumer.connect({}, function(err, d) {
56 | t.ifError(err);
57 | t.equal(typeof d, 'object', 'metadata should be returned');
58 | done();
59 | });
60 |
61 | eventListener(consumer);
62 | });
63 |
64 | afterEach(function(done) {
65 | producer.disconnect(function() {
66 | done();
67 | });
68 | });
69 |
70 | it('should be able to commit, read committed and restart from the committed offset', function(done) {
71 | var topic = 'test';
72 | var key = 'key';
73 | var payload = Buffer.from('value');
74 | var count = 0;
75 | var offsets = {
76 | 'first': true
77 | };
78 |
79 | var tt = setInterval(function() {
80 | try {
81 | producer.produce(topic, null, payload, key);
82 | } catch (e) {
83 | clearInterval(tt);
84 | }
85 | }, 100);
86 |
87 | consumer.on('disconnected', function() {
88 | var consumer2 = new Kafka.KafkaConsumer(config, {
89 | 'auto.offset.reset': 'largest'
90 | });
91 |
92 | consumer2.on('data', function(message) {
93 | if (offsets.first) {
94 | offsets.first = false;
95 | t.deepStrictEqual(offsets.committed, message.offset, 'Offset read by consumer 2 incorrect');
96 | clearInterval(tt);
97 | consumer2.unsubscribe();
98 | consumer2.disconnect(function() {
99 | done();
100 | });
101 | }
102 | });
103 |
104 | consumer2.on('ready', function() {
105 | consumer2.subscribe([topic]);
106 | consumer2.consume();
107 | });
108 | consumer2.connect();
109 | });
110 |
111 | consumer.on('data', function(message) {
112 | count++;
113 | if (count === 3) {
114 | consumer.commitMessageSync(message);
115 | // test consumer.committed( ) API
116 | consumer.committed(null, 5000, function(err, topicPartitions) {
117 | t.ifError(err);
118 | t.deepStrictEqual(topicPartitions.length, 1);
119 | t.deepStrictEqual(topicPartitions[0].offset, message.offset + 1, 'Offset read by consumer 1 incorrect');
120 | offsets.committed = message.offset + 1;
121 | consumer.unsubscribe();
122 | consumer.disconnect();
123 | });
124 |
125 | }
126 | });
127 |
128 | consumer.subscribe([topic]);
129 | consumer.consume();
130 | });
131 |
132 | });
133 |
--------------------------------------------------------------------------------
/e2e/listener.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | * Copyright (c) 2016 Blizzard Entertainment
4 | *
5 | * This software may be modified and distributed under the terms
6 | * of the MIT license. See the LICENSE.txt file for details.
7 | */
8 |
9 | module.exports = eventListener;
10 |
11 | function eventListener(client) {
12 | if (!process.env.DEBUG) {
13 | return;
14 | }
15 |
16 | client
17 | .on('event.error', function (err) {
18 | console.error(err);
19 | })
20 | .on('event.log', function(event) {
21 | var info = {
22 | severity: event.severity,
23 | fac: event.fac,
24 | };
25 | if (event.severity >= 7) {
26 | console.error(info, event.message);
27 | } else if (event.severity === 6 || event.severity === 5) {
28 | console.error(info, event.message);
29 | } else if (event.severity === 4) {
30 | console.error(info, event.message);
31 | } else if (event.severity > 0) {
32 | console.error(info, event.message);
33 | } else {
34 | console.error(info, event.message);
35 | }
36 | })
37 | .on('event.stats', function(event) {
38 | console.log(event, event.message);
39 | })
40 | .on('event.throttle', function(event) {
41 | console.log(event, '%s#%d throttled.', event.brokerName, event.brokerId);
42 | // event.throttleTime;
43 | })
44 | .on('event.event', function(event) {
45 | console.log(event, event.message);
46 | })
47 | .on('ready', function(info) {
48 | console.log('%s connected to kafka server', info.name);
49 | });
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/e2e/producer-transaction.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var Kafka = require('../');
11 |
12 | var kafkaBrokerList = process.env.KAFKA_HOST || 'localhost:9092';
13 |
14 | describe('Transactional Producer', function () {
15 | var TRANSACTIONS_TIMEOUT_MS = 30000;
16 | var r = Date.now() + '_' + Math.round(Math.random() * 1000);
17 | var topicIn = 'transaction_input_' + r;
18 | var topicOut = 'transaction_output_' + r;
19 |
20 | var producerTras;
21 | var consumerTrans;
22 |
23 | before(function (done) {
24 | /*
25 | prepare:
26 | transactional consumer (read from input topic)
27 | transactional producer (write to output topic)
28 | write 3 messages to input topic: A, B, C
29 | A will be skipped, B will be committed, C will be aborted
30 | */
31 | var connecting = 3;
32 | var producerInput;
33 | function connectedCb(err) {
34 | if (err) {
35 | done(err);
36 | return;
37 | }
38 | connecting--;
39 | if (connecting === 0) {
40 | producerInput.produce(topicIn, -1, Buffer.from('A'));
41 | producerInput.produce(topicIn, -1, Buffer.from('B'));
42 | producerInput.produce(topicIn, -1, Buffer.from('C'));
43 | producerInput.disconnect(function (err) {
44 | consumerTrans.subscribe([topicIn]);
45 | done(err);
46 | })
47 | }
48 | }
49 | producerInput = Kafka.Producer({
50 | 'client.id': 'kafka-test',
51 | 'metadata.broker.list': kafkaBrokerList,
52 | 'enable.idempotence': true
53 | });
54 | producerInput.setPollInterval(100);
55 | producerInput.connect({}, connectedCb);
56 |
57 | producerTras = new Kafka.Producer({
58 | 'client.id': 'kafka-test',
59 | 'metadata.broker.list': kafkaBrokerList,
60 | 'dr_cb': true,
61 | 'debug': 'all',
62 | 'transactional.id': 'noderdkafka_transactions_send_offset',
63 | 'enable.idempotence': true
64 | });
65 | producerTras.setPollInterval(100);
66 | producerTras.connect({}, connectedCb);
67 |
68 | consumerTrans = new Kafka.KafkaConsumer({
69 | 'metadata.broker.list': kafkaBrokerList,
70 | 'group.id': 'gropu_transaction_consumer',
71 | 'enable.auto.commit': false
72 | }, {
73 | 'auto.offset.reset': 'earliest',
74 | });
75 | consumerTrans.connect({}, connectedCb);
76 | });
77 |
78 | after(function (done) {
79 | let connected = 2;
80 | function execDisconnect(client) {
81 | if (!client.isConnected) {
82 | connected--;
83 | if (connected === 0) {
84 | done();
85 | }
86 | } else {
87 | client.disconnect(function() {
88 | connected--;
89 | if (connected === 0) {
90 | done();
91 | }
92 | });
93 | }
94 | }
95 | execDisconnect(producerTras);
96 | execDisconnect(consumerTrans);
97 | });
98 |
99 | it('should init transactions', function(done) {
100 | producerTras.initTransactions(TRANSACTIONS_TIMEOUT_MS, function (err) {
101 | done(err);
102 | });
103 | });
104 |
105 | it('should complete transaction', function(done) {
106 | function readMessage() {
107 | consumerTrans.consume(1, function(err, m) {
108 | if (err) {
109 | done(err);
110 | return;
111 | }
112 | if (m.length === 0) {
113 | readMessage();
114 | } else {
115 | var v = m[0].value.toString();
116 | if (v === 'A') { // skip first message
117 | readMessage();
118 | return;
119 | }
120 | if (v !== 'B') {
121 | done('Expected B');
122 | return;
123 | }
124 | producerTras.beginTransaction(function (err) {
125 | if (err) {
126 | done(err);
127 | return;
128 | }
129 | producerTras.produce(topicOut, -1, Buffer.from(v));
130 | var position = consumerTrans.position();
131 | producerTras.sendOffsetsToTransaction(position, consumerTrans, function(err) {
132 | if (err) {
133 | done(err);
134 | return;
135 | }
136 | producerTras.commitTransaction(function(err) {
137 | if (err) {
138 | done(err);
139 | return;
140 | }
141 | consumerTrans.committed(5000, function(err, tpo) {
142 | if (err) {
143 | done(err);
144 | return;
145 | }
146 | if (JSON.stringify(position) !== JSON.stringify(tpo)) {
147 | done('Committed mismatch');
148 | return;
149 | }
150 | done();
151 | });
152 | });
153 | });
154 | });
155 | }
156 | });
157 | }
158 | readMessage();
159 | });
160 |
161 | describe('abort transaction', function() {
162 | var lastConsumerTransPosition;
163 | before(function(done) {
164 | function readMessage() {
165 | consumerTrans.consume(1, function(err, m) {
166 | if (err) {
167 | done(err);
168 | return;
169 | }
170 | if (m.length === 0) {
171 | readMessage();
172 | } else {
173 | var v = m[0].value.toString();
174 | if (v !== 'C') {
175 | done('Expected C');
176 | return;
177 | }
178 | producerTras.beginTransaction(function (err) {
179 | if (err) {
180 | done(err);
181 | return;
182 | }
183 | producerTras.produce(topicOut, -1, Buffer.from(v));
184 | lastConsumerTransPosition = consumerTrans.position();
185 | producerTras.sendOffsetsToTransaction(lastConsumerTransPosition, consumerTrans, function(err) {
186 | if (err) {
187 | done(err);
188 | return;
189 | }
190 | done();
191 | });
192 | });
193 | }
194 | });
195 | }
196 | readMessage();
197 | });
198 |
199 | it ('should consume committed and uncommitted for read_uncommitted', function(done) {
200 | var allMsgs = [];
201 | var consumer = new Kafka.KafkaConsumer({
202 | 'metadata.broker.list': kafkaBrokerList,
203 | 'group.id': 'group_read_uncommitted',
204 | 'enable.auto.commit': false,
205 | 'isolation.level': 'read_uncommitted'
206 | }, {
207 | 'auto.offset.reset': 'earliest',
208 | });
209 | consumer.connect({}, function(err) {
210 | if (err) {
211 | done(err);
212 | return;
213 | }
214 | consumer.subscribe([topicOut]);
215 | consumer.consume();
216 | });
217 | consumer.on('data', function(msg) {
218 | var v = msg.value.toString();
219 | allMsgs.push(v);
220 | // both B and C must be consumed
221 | if (allMsgs.length === 2 && allMsgs[0] === 'B' && allMsgs[1] === 'C') {
222 | consumer.disconnect(function(err) {
223 | if (err) {
224 | done(err);
225 | return;
226 | }
227 | done();
228 | })
229 | }
230 | });
231 | });
232 |
233 | it ('should consume only committed for read_committed', function(done) {
234 | var allMsgs = [];
235 | var consumer = new Kafka.KafkaConsumer({
236 | 'metadata.broker.list': kafkaBrokerList,
237 | 'group.id': 'group_read_committed',
238 | 'enable.partition.eof': true,
239 | 'enable.auto.commit': false,
240 | 'isolation.level': 'read_committed'
241 | }, {
242 | 'auto.offset.reset': 'earliest',
243 | });
244 | consumer.connect({}, function(err) {
245 | if (err) {
246 | done(err);
247 | return;
248 | }
249 | consumer.subscribe([topicOut]);
250 | consumer.consume();
251 | });
252 | consumer.on('data', function(msg) {
253 | var v = msg.value.toString();
254 | allMsgs.push(v);
255 | });
256 | consumer.on('partition.eof', function(eof) {
257 | if (allMsgs.length === 1 && allMsgs[0] === 'B') {
258 | consumer.disconnect(function(err) {
259 | if (err) {
260 | done(err);
261 | return;
262 | }
263 | done();
264 | })
265 | } else {
266 | done('Expected only B');
267 | return;
268 | }
269 | });
270 | });
271 |
272 | it('should abort transaction', function(done) {
273 | producerTras.abortTransaction(function(err) {
274 | if (err) {
275 | done(err);
276 | return;
277 | }
278 | consumerTrans.committed(5000, function(err, tpo) {
279 | if (err) {
280 | done(err);
281 | return;
282 | }
283 | if (lastConsumerTransPosition[0].offset <= tpo[0].offset) {
284 | done('Committed mismatch');
285 | return;
286 | }
287 | done();
288 | });
289 | });
290 | });
291 |
292 | it('should consume only committed', function(done) {
293 | var gotB = false;
294 | var consumer = new Kafka.KafkaConsumer({
295 | 'metadata.broker.list': kafkaBrokerList,
296 | 'group.id': 'group_default',
297 | 'enable.partition.eof': true,
298 | 'enable.auto.commit': false,
299 | }, {
300 | 'auto.offset.reset': 'earliest',
301 | });
302 | consumer.connect({}, function(err) {
303 | if (err) {
304 | done(err);
305 | return;
306 | }
307 | consumer.subscribe([topicOut]);
308 | consumer.consume();
309 | });
310 | consumer.on('data', function(msg) {
311 | var v = msg.value.toString();
312 | if (v !== 'B') {
313 | done('Expected B');
314 | return;
315 | }
316 | gotB = true;
317 | });
318 | consumer.on('partition.eof', function(eof) {
319 | if (!gotB) {
320 | done('Expected B');
321 | return;
322 | }
323 | consumer.disconnect(function(err) {
324 | if (err) {
325 | done(err);
326 | return;
327 | }
328 | done();
329 | });
330 | });
331 | });
332 | });
333 | });
334 |
--------------------------------------------------------------------------------
/e2e/producer.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var Kafka = require('../');
11 | var t = require('assert');
12 | var crypto = require('crypto');
13 |
14 | var eventListener = require('./listener');
15 |
16 | var kafkaBrokerList = process.env.KAFKA_HOST || 'localhost:9092';
17 |
18 | var serviceStopped = false;
19 |
20 | describe('Producer', function() {
21 |
22 | var producer;
23 |
24 | describe('with dr_cb', function() {
25 | beforeEach(function(done) {
26 | producer = new Kafka.Producer({
27 | 'client.id': 'kafka-test',
28 | 'metadata.broker.list': kafkaBrokerList,
29 | 'dr_cb': true,
30 | 'debug': 'all'
31 | });
32 | producer.connect({}, function(err) {
33 | t.ifError(err);
34 | done();
35 | });
36 |
37 | eventListener(producer);
38 | });
39 |
40 | afterEach(function(done) {
41 | producer.disconnect(function() {
42 | done();
43 | });
44 | });
45 |
46 | it('should connect to Kafka', function(done) {
47 | producer.getMetadata({}, function(err, metadata) {
48 | t.ifError(err);
49 | t.ok(metadata);
50 |
51 | // Ensure it is in the correct format
52 | t.ok(metadata.orig_broker_name, 'Broker name is not set');
53 | t.notStrictEqual(metadata.orig_broker_id, undefined, 'Broker id is not set');
54 | t.equal(Array.isArray(metadata.brokers), true);
55 | t.equal(Array.isArray(metadata.topics), true);
56 |
57 | done();
58 | });
59 | });
60 |
61 | it('should produce a message with a null payload and null key', function(done) {
62 | var tt = setInterval(function() {
63 | producer.poll();
64 | }, 200);
65 |
66 | producer.once('delivery-report', function(err, report) {
67 | clearInterval(tt);
68 | t.ifError(err);
69 | t.notStrictEqual(report, undefined);
70 | t.strictEqual(typeof report.topic, 'string');
71 | t.strictEqual(typeof report.partition, 'number');
72 | t.strictEqual(typeof report.offset, 'number');
73 | t.strictEqual( report.key, null);
74 | done();
75 | });
76 |
77 | producer.produce('test', null, null, null);
78 | });
79 |
80 | it('should produce a message with a payload and key', function(done) {
81 | var tt = setInterval(function() {
82 | producer.poll();
83 | }, 200);
84 |
85 | producer.once('delivery-report', function(err, report) {
86 | clearInterval(tt);
87 | t.ifError(err);
88 | t.notStrictEqual(report, undefined);
89 | t.strictEqual(report.value, undefined);
90 | t.strictEqual(typeof report.topic, 'string');
91 | t.strictEqual(typeof report.partition, 'number');
92 | t.strictEqual(typeof report.offset, 'number');
93 | t.equal(report.key, 'key');
94 | done();
95 | });
96 |
97 | producer.produce('test', null, Buffer.from('value'), 'key');
98 | });
99 |
100 | it('should produce a message with a payload and key buffer', function(done) {
101 | var tt = setInterval(function() {
102 | producer.poll();
103 | }, 200);
104 |
105 | producer.once('delivery-report', function(err, report) {
106 | clearInterval(tt);
107 | t.ifError(err);
108 | t.notStrictEqual(report, undefined);
109 | t.strictEqual(report.value, undefined);
110 | t.strictEqual(typeof report.topic, 'string');
111 | t.strictEqual(typeof report.partition, 'number');
112 | t.strictEqual(typeof report.offset, 'number');
113 | t.equal(report.key.length > 3, true);
114 | done();
115 | });
116 |
117 | producer.produce('test', null, Buffer.from('value'), Buffer.from('key\0s'));
118 | });
119 |
120 | it('should produce a message with an opaque', function(done) {
121 | var tt = setInterval(function() {
122 | producer.poll();
123 | }, 200);
124 |
125 | producer.once('delivery-report', function(err, report) {
126 | clearInterval(tt);
127 | t.ifError(err);
128 | t.notStrictEqual(report, undefined);
129 | t.strictEqual(typeof report.topic, 'string');
130 | t.strictEqual(typeof report.partition, 'number');
131 | t.strictEqual(typeof report.offset, 'number');
132 | t.equal(report.opaque, 'opaque');
133 | done();
134 | });
135 |
136 | producer.produce('test', null, Buffer.from('value'), null, null, 'opaque');
137 | });
138 |
139 |
140 | it('should get 100% deliverability', function(done) {
141 | var total = 0;
142 | var max = 10000;
143 | var verified_received = 0;
144 |
145 | var tt = setInterval(function() {
146 | producer.poll();
147 | }, 200);
148 |
149 | producer
150 | .on('delivery-report', function(err, report) {
151 | t.ifError(err);
152 | t.notStrictEqual(report, undefined);
153 | t.strictEqual(typeof report.topic, 'string');
154 | t.strictEqual(typeof report.partition, 'number');
155 | t.strictEqual(typeof report.offset, 'number');
156 | verified_received++;
157 | if (verified_received === max) {
158 | clearInterval(tt);
159 | done();
160 | }
161 | });
162 |
163 | // Produce
164 | for (total = 0; total <= max; total++) {
165 | producer.produce('test', null, Buffer.from('message ' + total), null);
166 | }
167 |
168 | });
169 |
170 | });
171 |
172 | describe('with_dr_msg_cb', function() {
173 | beforeEach(function(done) {
174 | producer = new Kafka.Producer({
175 | 'client.id': 'kafka-test',
176 | 'metadata.broker.list': kafkaBrokerList,
177 | 'dr_msg_cb': true,
178 | 'debug': 'all'
179 | });
180 | producer.connect({}, function(err) {
181 | t.ifError(err);
182 | done();
183 | });
184 |
185 | eventListener(producer);
186 | });
187 |
188 | afterEach(function(done) {
189 | producer.disconnect(function() {
190 | done();
191 | });
192 | });
193 |
194 | it('should produce a message with a payload and key', function(done) {
195 | var tt = setInterval(function() {
196 | producer.poll();
197 | }, 200);
198 |
199 | producer.once('delivery-report', function(err, report) {
200 | clearInterval(tt);
201 | t.ifError(err);
202 | t.notStrictEqual(report, undefined);
203 | t.strictEqual(typeof report.topic, 'string');
204 | t.strictEqual(typeof report.partition, 'number');
205 | t.strictEqual(typeof report.offset, 'number');
206 | t.ok(report.key.toString(), 'key');
207 | t.equal(report.value.toString(), 'hai');
208 | done();
209 | });
210 |
211 | producer.produce('test', null, Buffer.from('hai'), 'key');
212 | });
213 |
214 | it('should produce a message with an empty payload and empty key (https://github.com/Blizzard/node-rdkafka/issues/117)', function(done) {
215 | var tt = setInterval(function() {
216 | producer.poll();
217 | }, 200);
218 |
219 | producer.once('delivery-report', function(err, report) {
220 | clearInterval(tt);
221 | t.ifError(err);
222 | t.notStrictEqual(report, undefined);
223 |
224 | t.strictEqual(typeof report.topic, 'string');
225 | t.strictEqual(typeof report.partition, 'number');
226 | t.strictEqual(typeof report.offset, 'number');
227 | t.equal(report.key.toString(), '', 'key should be an empty string');
228 | t.strictEqual(report.value.toString(), '', 'payload should be an empty string');
229 | done();
230 | });
231 |
232 | producer.produce('test', null, Buffer.from(''), '');
233 | });
234 |
235 | it('should produce a message with a null payload and null key (https://github.com/Blizzard/node-rdkafka/issues/117)', function(done) {
236 | producer.setPollInterval(10);
237 |
238 | producer.once('delivery-report', function(err, report) {
239 | t.ifError(err);
240 | t.notStrictEqual(report, undefined);
241 |
242 | t.strictEqual(typeof report.topic, 'string');
243 | t.strictEqual(typeof report.partition, 'number');
244 | t.strictEqual(typeof report.offset, 'number');
245 | t.strictEqual(report.key, null, 'key should be null');
246 | t.strictEqual(report.value, null, 'payload should be null');
247 | done();
248 | });
249 |
250 | producer.produce('test', null, null, null);
251 | });
252 |
253 | it('should produce an int64 key (https://github.com/Blizzard/node-rdkafka/issues/208)', function(done) {
254 |
255 | var v1 = 0x0000000000000084;
256 | var arr = new Uint8Array(8);
257 | arr[0] = 0x00;
258 | arr[1] = 0x00;
259 | arr[2] = 0x00;
260 | arr[3] = 0x00;
261 | arr[4] = 0x00;
262 | arr[5] = 0x00;
263 | arr[6] = 0x00;
264 | arr[7] = 84;
265 | var buf = Buffer.from(arr.buffer);
266 |
267 | producer.setPollInterval(10);
268 |
269 | producer.once('delivery-report', function(err, report) {
270 | t.ifError(err);
271 | t.notStrictEqual(report, undefined);
272 |
273 | t.deepEqual(buf, report.key);
274 | done();
275 | });
276 |
277 | producer.produce('test', null, null, Buffer.from(arr.buffer));
278 |
279 | });
280 |
281 | });
282 |
283 | });
284 |
--------------------------------------------------------------------------------
/examples/consumer-flow.md:
--------------------------------------------------------------------------------
1 | Connecting to a Kafka Consumer is easy. Let's try to connect to one using
2 | the Flowing implementation
3 |
4 | ```js
5 | /*
6 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
7 | *
8 | * Copyright (c) 2016 Blizzard Entertainment
9 | *
10 | * This software may be modified and distributed under the terms
11 | * of the MIT license. See the LICENSE.txt file for details.
12 | */
13 |
14 | var Kafka = require('../');
15 |
16 | var consumer = new Kafka.KafkaConsumer({
17 | //'debug': 'all',
18 | 'metadata.broker.list': 'localhost:9092',
19 | 'group.id': 'node-rdkafka-consumer-flow-example',
20 | 'enable.auto.commit': false
21 | });
22 |
23 | var topicName = 'test';
24 |
25 | //logging debug messages, if debug is enabled
26 | consumer.on('event.log', function(log) {
27 | console.log(log);
28 | });
29 |
30 | //logging all errors
31 | consumer.on('event.error', function(err) {
32 | console.error('Error from consumer');
33 | console.error(err);
34 | });
35 |
36 | //counter to commit offsets every numMessages are received
37 | var counter = 0;
38 | var numMessages = 5;
39 |
40 | consumer.on('ready', function(arg) {
41 | console.log('consumer ready.' + JSON.stringify(arg));
42 |
43 | consumer.subscribe([topicName]);
44 | //start consuming messages
45 | consumer.consume();
46 | });
47 |
48 |
49 | consumer.on('data', function(m) {
50 | counter++;
51 |
52 | //committing offsets every numMessages
53 | if (counter % numMessages === 0) {
54 | console.log('calling commit');
55 | consumer.commit(m);
56 | }
57 |
58 | // Output the actual message contents
59 | console.log(JSON.stringify(m));
60 | console.log(m.value.toString());
61 |
62 | });
63 |
64 | consumer.on('disconnected', function(arg) {
65 | console.log('consumer disconnected. ' + JSON.stringify(arg));
66 | });
67 |
68 | //starting the consumer
69 | consumer.connect();
70 |
71 | //stopping this example after 30s
72 | setTimeout(function() {
73 | consumer.disconnect();
74 | }, 30000);
75 |
76 | ```
77 |
--------------------------------------------------------------------------------
/examples/consumer.md:
--------------------------------------------------------------------------------
1 | Connecting to a Kafka Consumer is easy. Let's try to connect to one using
2 | the Stream implementation
3 |
4 | ```js
5 | /*
6 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
7 | *
8 | * Copyright (c) 2016 Blizzard Entertainment
9 | *
10 | * This software may be modified and distributed under the terms
11 | * of the MIT license. See the LICENSE.txt file for details.
12 | */
13 |
14 | var Transform = require('stream').Transform;
15 |
16 | var Kafka = require('../');
17 |
18 | var stream = Kafka.KafkaConsumer.createReadStream({
19 | 'metadata.broker.list': 'localhost:9092',
20 | 'group.id': 'librd-test',
21 | 'socket.keepalive.enable': true,
22 | 'enable.auto.commit': false
23 | }, {}, {
24 | topics: 'test',
25 | waitInterval: 0,
26 | objectMode: false
27 | });
28 |
29 | stream.on('error', function(err) {
30 | if (err) console.log(err);
31 | process.exit(1);
32 | });
33 |
34 | stream
35 | .pipe(process.stdout);
36 |
37 | stream.on('error', function(err) {
38 | console.log(err);
39 | process.exit(1);
40 | });
41 |
42 | stream.consumer.on('event.error', function(err) {
43 | console.log(err);
44 | })
45 | ```
46 |
--------------------------------------------------------------------------------
/examples/docker-alpine.md:
--------------------------------------------------------------------------------
1 | When using docker to install `node-rdkafka`, you need to make sure you install appropriate library dependencies. Alpine linux is a lighter weight version of linux and does not come with the same base libraries as other distributions (like glibc).
2 |
3 | You can see some of the differences here: https://linuxacademy.com/blog/cloud/alpine-linux-and-docker/
4 |
5 | ```dockerfile
6 | FROM node:14-alpine
7 |
8 | RUN apk --no-cache add \
9 | bash \
10 | g++ \
11 | ca-certificates \
12 | lz4-dev \
13 | musl-dev \
14 | cyrus-sasl-dev \
15 | openssl-dev \
16 | make \
17 | python3
18 |
19 | RUN apk add --no-cache --virtual .build-deps gcc zlib-dev libc-dev bsd-compat-headers py-setuptools bash
20 |
21 | # Create app directory
22 | RUN mkdir -p /usr/local/app
23 |
24 | # Move to the app directory
25 | WORKDIR /usr/local/app
26 |
27 | # Install node-rdkafka
28 | RUN npm install node-rdkafka
29 | # Copy package.json first to check if an npm install is needed
30 | ```
31 |
--------------------------------------------------------------------------------
/examples/high-level-producer.md:
--------------------------------------------------------------------------------
1 | ```js
2 | var Kafka = require('../');
3 |
4 | var producer = new Kafka.HighLevelProducer({
5 | 'metadata.broker.list': 'localhost:9092',
6 | });
7 |
8 | // Throw away the keys
9 | producer.setKeySerializer(function(v) {
10 | return new Promise((resolve, reject) => {
11 | setTimeout(() => {
12 | resolve(null);
13 | }, 20);
14 | });
15 | });
16 |
17 | // Take the message field
18 | producer.setValueSerializer(function(v) {
19 | return Buffer.from(v.message);
20 | });
21 |
22 | producer.connect(null, function() {
23 | producer.produce('test', null, {
24 | message: 'alliance4ever',
25 | }, null, Date.now(), function(err, offset) {
26 | // The offset if our acknowledgement level allows us to receive delivery offsets
27 | setImmediate(function() {
28 | producer.disconnect();
29 | });
30 | });
31 | });
32 | ```
33 |
--------------------------------------------------------------------------------
/examples/metadata.md:
--------------------------------------------------------------------------------
1 | ```js
2 | /*
3 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
4 | *
5 | * Copyright (c) 2016 Blizzard Entertainment
6 | *
7 | * This software may be modified and distributed under the terms
8 | * of the MIT license. See the LICENSE.txt file for details.
9 | */
10 |
11 | var Kafka = require('../');
12 |
13 | var producer = new Kafka.Producer({
14 | 'metadata.broker.list': 'localhost:9092',
15 | 'client.id': 'hey',
16 | 'compression.codec': 'snappy'
17 | });
18 |
19 | producer.connect()
20 | .on('ready', function(i, metadata) {
21 | console.log(i);
22 | console.log(metadata);
23 | })
24 | .on('event.error', function(err) {
25 | console.log(err);
26 | });
27 | ```
28 |
--------------------------------------------------------------------------------
/examples/oauthbearer-default-flow.md:
--------------------------------------------------------------------------------
1 | Producer, Consumer and HighLevelProducer:
2 | ```js
3 | /*
4 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
5 | *
6 | * Copyright (c) 2016 Blizzard Entertainment
7 | *
8 | * This software may be modified and distributed under the terms
9 | * of the MIT license. See the LICENSE.txt file for details.
10 | */
11 |
12 | var Kafka = require('../');
13 |
14 | var token = "your_token";
15 |
16 | var producer = new Kafka.Producer({
17 | //'debug' : 'all',
18 | 'metadata.broker.list': 'localhost:9093',
19 | 'security.protocol': 'SASL_SSL',
20 | 'sasl.mechanisms': 'OAUTHBEARER',
21 | }).setOauthBearerToken(token);
22 |
23 | //start the producer
24 | producer.connect();
25 |
26 | //refresh the token
27 | producer.setOauthBearerToken(token);
28 | ```
29 |
30 | AdminClient:
31 | ```js
32 | /*
33 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
34 | *
35 | * Copyright (c) 2016 Blizzard Entertainment
36 | *
37 | * This software may be modified and distributed under the terms
38 | * of the MIT license. See the LICENSE.txt file for details.
39 | */
40 | var Kafka = require('../');
41 |
42 | var token = "your_token";
43 |
44 | var admin = Kafka.AdminClient.create({
45 | 'metadata.broker.list': 'localhost:9093',
46 | 'security.protocol': 'SASL_SSL',
47 | 'sasl.mechanisms': 'OAUTHBEARER',
48 | }, token);
49 |
50 | //refresh the token
51 | admin.refreshOauthBearerToken(token);
52 | ```
53 |
54 | ConsumerStream:
55 | ```js
56 | /*
57 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
58 | *
59 | * Copyright (c) 2016 Blizzard Entertainment
60 | *
61 | * This software may be modified and distributed under the terms
62 | * of the MIT license. See the LICENSE.txt file for details.
63 | */
64 | var Kafka = require('../');
65 |
66 | var token = "your_token";
67 |
68 | var stream = Kafka.KafkaConsumer.createReadStream({
69 | 'metadata.broker.list': 'localhost:9093',
70 | 'group.id': 'myGroup',
71 | 'security.protocol': 'SASL_SSL',
72 | 'sasl.mechanisms': 'OAUTHBEARER'
73 | }, {}, {
74 | topics: 'test1',
75 | initOauthBearerToken: token,
76 | });
77 |
78 | //refresh the token
79 | stream.refreshOauthBearerToken(token.token);
80 | ```
81 |
--------------------------------------------------------------------------------
/examples/producer-cluster.md:
--------------------------------------------------------------------------------
1 | ```js
2 | /*
3 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
4 | *
5 | * Copyright (c) 2016 Blizzard Entertainment
6 | *
7 | * This software may be modified and distributed under the terms
8 | * of the MIT license. See the LICENSE.txt file for details.
9 | */
10 |
11 | var cluster = require('cluster');
12 | var numCPUs = 6;
13 | var Kafka = require('../');
14 |
15 | if (cluster.isMaster) {
16 | // Fork workers.
17 | for (var i = 0; i < numCPUs; i++) {
18 | cluster.fork();
19 | }
20 |
21 | var exited_workers = 0;
22 |
23 | cluster.on('exit', function(worker, code, signal) {
24 | exited_workers++;
25 | if (exited_workers === numCPUs - 1) {
26 | process.exit();
27 | }
28 | });
29 | } else {
30 | // Configure client
31 | var producer = new Kafka.Producer({
32 | 'client.id': 'kafka',
33 | 'metadata.broker.list': 'localhost:9092',
34 | 'compression.codec': 'none',
35 | 'retry.backoff.ms': 200,
36 | 'message.send.max.retries': 10,
37 | 'socket.keepalive.enable': true,
38 | 'queue.buffering.max.messages': 100000,
39 | 'queue.buffering.max.ms': 1000,
40 | 'batch.num.messages': 1000000,
41 | 'dr_cb': true
42 | });
43 |
44 | producer.setPollInterval(100);
45 |
46 | var total = 0;
47 | var totalSent = 0;
48 | var max = 20000;
49 | var errors = 0;
50 | var started = Date.now();
51 |
52 | var sendMessage = function() {
53 | var ret = producer.sendMessage({
54 | topic: 'librdtesting-01',
55 | message: Buffer.from('message ' + total)
56 | }, function() {
57 | });
58 | total++;
59 | if (total >= max) {
60 | } else {
61 | setImmediate(sendMessage);
62 | }
63 | };
64 |
65 | var verified_received = 0;
66 | var exitNextTick = false;
67 | var errorsArr = [];
68 |
69 | var t = setInterval(function() {
70 | producer.poll();
71 |
72 | if (exitNextTick) {
73 | clearInterval(t);
74 | return setTimeout(function() {
75 | console.log('[%d] Received: %d, Errors: %d, Total: %d', process.pid, verified_received, errors, total);
76 | // console.log('[%d] Finished sending %d in %d seconds', process.pid, total, parseInt((Date.now() - started) / 1000));
77 | if (errors > 0) {
78 | console.error(errorsArr[0]);
79 | return process.exitCode = 1;
80 | }
81 | process.exitCode = 0;
82 | setTimeout(process.exit, 1000);
83 | }, 2000);
84 | }
85 |
86 | if (verified_received + errors === max) {
87 | exitNextTick = true;
88 | }
89 |
90 | }, 1000);
91 | producer.connect()
92 | .on('event.error', function(e) {
93 | errors++;
94 | errorsArr.push(e);
95 | })
96 | .on('delivery-report', function() {
97 | verified_received++;
98 | })
99 | .on('ready', sendMessage);
100 |
101 |
102 | }
103 | ```
104 |
--------------------------------------------------------------------------------
/examples/producer.md:
--------------------------------------------------------------------------------
1 | ```js
2 | /*
3 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
4 | *
5 | * Copyright (c) 2016 Blizzard Entertainment
6 | *
7 | * This software may be modified and distributed under the terms
8 | * of the MIT license. See the LICENSE.txt file for details.
9 | */
10 |
11 | var Kafka = require('../');
12 |
13 | var producer = new Kafka.Producer({
14 | //'debug' : 'all',
15 | 'metadata.broker.list': 'localhost:9092',
16 | 'dr_cb': true //delivery report callback
17 | });
18 |
19 | var topicName = 'test';
20 |
21 | //logging debug messages, if debug is enabled
22 | producer.on('event.log', function(log) {
23 | console.log(log);
24 | });
25 |
26 | //logging all errors
27 | producer.on('event.error', function(err) {
28 | console.error('Error from producer');
29 | console.error(err);
30 | });
31 |
32 | //counter to stop this sample after maxMessages are sent
33 | var counter = 0;
34 | var maxMessages = 10;
35 |
36 | producer.on('delivery-report', function(err, report) {
37 | console.log('delivery-report: ' + JSON.stringify(report));
38 | counter++;
39 | });
40 |
41 | //Wait for the ready event before producing
42 | producer.on('ready', function(arg) {
43 | console.log('producer ready.' + JSON.stringify(arg));
44 |
45 | for (var i = 0; i < maxMessages; i++) {
46 | var value = Buffer.from('value-' +i);
47 | var key = "key-"+i;
48 | // if partition is set to -1, librdkafka will use the default partitioner
49 | var partition = -1;
50 | var headers = [
51 | { header: "header value" }
52 | ]
53 | producer.produce(topicName, partition, value, key, Date.now(), "", headers);
54 | }
55 |
56 | //need to keep polling for a while to ensure the delivery reports are received
57 | var pollLoop = setInterval(function() {
58 | producer.poll();
59 | if (counter === maxMessages) {
60 | clearInterval(pollLoop);
61 | producer.disconnect();
62 | }
63 | }, 1000);
64 |
65 | });
66 |
67 | producer.on('disconnected', function(arg) {
68 | console.log('producer disconnected. ' + JSON.stringify(arg));
69 | });
70 |
71 | //starting the producer
72 | producer.connect();
73 | ```
74 |
--------------------------------------------------------------------------------
/lib/admin.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 | 'use strict';
10 |
11 | module.exports = {
12 | create: createAdminClient,
13 | };
14 |
15 | var Client = require('./client');
16 | var util = require('util');
17 | var Kafka = require('../librdkafka');
18 | var LibrdKafkaError = require('./error');
19 | var shallowCopy = require('./util').shallowCopy;
20 |
21 | /**
22 | * Create a new AdminClient for making topics, partitions, and more.
23 | *
24 | * This is a factory method because it immediately starts an
25 | * active handle with the brokers.
26 | *
27 | */
28 | function createAdminClient(conf, initOauthBearerToken) {
29 | var client = new AdminClient(conf);
30 |
31 | if (initOauthBearerToken) {
32 | client.refreshOauthBearerToken(initOauthBearerToken);
33 | }
34 |
35 | // Wrap the error so we throw if it failed with some context
36 | LibrdKafkaError.wrap(client.connect(), true);
37 |
38 | // Return the client if we succeeded
39 | return client;
40 | }
41 |
42 | /**
43 | * AdminClient class for administering Kafka
44 | *
45 | * This client is the way you can interface with the Kafka Admin APIs.
46 | * This class should not be made using the constructor, but instead
47 | * should be made using the factory method.
48 | *
49 | *
50 | * var client = AdminClient.create({ ... });
51 | *
52 | *
53 | * Once you instantiate this object, it will have a handle to the kafka broker.
54 | * Unlike the other node-rdkafka classes, this class does not ensure that
55 | * it is connected to the upstream broker. Instead, making an action will
56 | * validate that.
57 | *
58 | * @param {object} conf - Key value pairs to configure the admin client
59 | * topic configuration
60 | * @constructor
61 | */
62 | function AdminClient(conf) {
63 | if (!(this instanceof AdminClient)) {
64 | return new AdminClient(conf);
65 | }
66 |
67 | conf = shallowCopy(conf);
68 |
69 | /**
70 | * NewTopic model.
71 | *
72 | * This is the representation of a new message that is requested to be made
73 | * using the Admin client.
74 | *
75 | * @typedef {object} AdminClient~NewTopic
76 | * @property {string} topic - the topic name to create
77 | * @property {number} num_partitions - the number of partitions to give the topic
78 | * @property {number} replication_factor - the replication factor of the topic
79 | * @property {object} config - a list of key values to be passed as configuration
80 | * for the topic.
81 | */
82 |
83 | this._client = new Kafka.AdminClient(conf);
84 | this._isConnected = false;
85 | this.globalConfig = conf;
86 | }
87 |
88 | /**
89 | * Connect using the admin client.
90 | *
91 | * Should be run using the factory method, so should never
92 | * need to be called outside.
93 | *
94 | * Unlike the other connect methods, this one is synchronous.
95 | */
96 | AdminClient.prototype.connect = function() {
97 | LibrdKafkaError.wrap(this._client.connect(), true);
98 | this._isConnected = true;
99 | };
100 |
101 | /**
102 | * Disconnect the admin client.
103 | *
104 | * This is a synchronous method, but all it does is clean up
105 | * some memory and shut some threads down
106 | */
107 | AdminClient.prototype.disconnect = function() {
108 | LibrdKafkaError.wrap(this._client.disconnect(), true);
109 | this._isConnected = false;
110 | };
111 |
112 | /**
113 | * Refresh OAuthBearer token, initially provided in factory method.
114 | * Expiry is always set to maximum value, as the callback of librdkafka
115 | * for token refresh is not used.
116 | *
117 | * @param {string} tokenStr - OAuthBearer token string
118 | * @see connection.cc
119 | */
120 | AdminClient.prototype.refreshOauthBearerToken = function (tokenStr) {
121 | if (!tokenStr || typeof tokenStr !== 'string') {
122 | throw new Error("OAuthBearer token is undefined/empty or not a string");
123 | }
124 |
125 | this._client.setToken(tokenStr);
126 | };
127 |
128 | /**
129 | * Create a topic with a given config.
130 | *
131 | * @param {NewTopic} topic - Topic to create.
132 | * @param {number} timeout - Number of milliseconds to wait while trying to create the topic.
133 | * @param {function} cb - The callback to be executed when finished
134 | */
135 | AdminClient.prototype.createTopic = function(topic, timeout, cb) {
136 | if (!this._isConnected) {
137 | throw new Error('Client is disconnected');
138 | }
139 |
140 | if (typeof timeout === 'function') {
141 | cb = timeout;
142 | timeout = 5000;
143 | }
144 |
145 | if (!timeout) {
146 | timeout = 5000;
147 | }
148 |
149 | this._client.createTopic(topic, timeout, function(err) {
150 | if (err) {
151 | if (cb) {
152 | cb(LibrdKafkaError.create(err));
153 | }
154 | return;
155 | }
156 |
157 | if (cb) {
158 | cb();
159 | }
160 | });
161 | };
162 |
163 | /**
164 | * Delete a topic.
165 | *
166 | * @param {string} topic - The topic to delete, by name.
167 | * @param {number} timeout - Number of milliseconds to wait while trying to delete the topic.
168 | * @param {function} cb - The callback to be executed when finished
169 | */
170 | AdminClient.prototype.deleteTopic = function(topic, timeout, cb) {
171 | if (!this._isConnected) {
172 | throw new Error('Client is disconnected');
173 | }
174 |
175 | if (typeof timeout === 'function') {
176 | cb = timeout;
177 | timeout = 5000;
178 | }
179 |
180 | if (!timeout) {
181 | timeout = 5000;
182 | }
183 |
184 | this._client.deleteTopic(topic, timeout, function(err) {
185 | if (err) {
186 | if (cb) {
187 | cb(LibrdKafkaError.create(err));
188 | }
189 | return;
190 | }
191 |
192 | if (cb) {
193 | cb();
194 | }
195 | });
196 | };
197 |
198 | /**
199 | * Create new partitions for a topic.
200 | *
201 | * @param {string} topic - The topic to add partitions to, by name.
202 | * @param {number} totalPartitions - The total number of partitions the topic should have
203 | * after the request
204 | * @param {number} timeout - Number of milliseconds to wait while trying to create the partitions.
205 | * @param {function} cb - The callback to be executed when finished
206 | */
207 | AdminClient.prototype.createPartitions = function(topic, totalPartitions, timeout, cb) {
208 | if (!this._isConnected) {
209 | throw new Error('Client is disconnected');
210 | }
211 |
212 | if (typeof timeout === 'function') {
213 | cb = timeout;
214 | timeout = 5000;
215 | }
216 |
217 | if (!timeout) {
218 | timeout = 5000;
219 | }
220 |
221 | this._client.createPartitions(topic, totalPartitions, timeout, function(err) {
222 | if (err) {
223 | if (cb) {
224 | cb(LibrdKafkaError.create(err));
225 | }
226 | return;
227 | }
228 |
229 | if (cb) {
230 | cb();
231 | }
232 | });
233 | };
234 |
--------------------------------------------------------------------------------
/lib/index.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var KafkaConsumer = require('./kafka-consumer');
11 | var Producer = require('./producer');
12 | var HighLevelProducer = require('./producer/high-level-producer');
13 | var error = require('./error');
14 | var util = require('util');
15 | var lib = require('../librdkafka');
16 | var Topic = require('./topic');
17 | var Admin = require('./admin');
18 | var features = lib.features().split(',');
19 |
20 | module.exports = {
21 | Consumer: util.deprecate(KafkaConsumer, 'Use KafkaConsumer instead. This may be changed in a later version'),
22 | Producer: Producer,
23 | HighLevelProducer: HighLevelProducer,
24 | AdminClient: Admin,
25 | KafkaConsumer: KafkaConsumer,
26 | createReadStream: KafkaConsumer.createReadStream,
27 | createWriteStream: Producer.createWriteStream,
28 | CODES: {
29 | ERRORS: error.codes,
30 | },
31 | Topic: Topic,
32 | features: features,
33 | librdkafkaVersion: lib.librdkafkaVersion
34 | };
35 |
--------------------------------------------------------------------------------
/lib/producer-stream.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | 'use strict';
11 |
12 | module.exports = ProducerStream;
13 |
14 | var Writable = require('stream').Writable;
15 | var util = require('util');
16 | var ErrorCode = require('./error').codes;
17 |
18 | util.inherits(ProducerStream, Writable);
19 |
20 | /**
21 | * Writable stream integrating with the Kafka Producer.
22 | *
23 | * This class is used to write data to Kafka in a streaming way. It takes
24 | * buffers of data and puts them into the appropriate Kafka topic. If you need
25 | * finer control over partitions or keys, this is probably not the class for
26 | * you. In that situation just use the Producer itself.
27 | *
28 | * The stream detects if Kafka is already connected. You can safely begin
29 | * writing right away.
30 | *
31 | * This stream does not operate in Object mode and can only be given buffers.
32 | *
33 | * @param {Producer} producer - The Kafka Producer object.
34 | * @param {array} topics - Array of topics
35 | * @param {object} options - Topic configuration.
36 | * @constructor
37 | * @extends stream.Writable
38 | */
39 | function ProducerStream(producer, options) {
40 | if (!(this instanceof ProducerStream)) {
41 | return new ProducerStream(producer, options);
42 | }
43 |
44 | if (options === undefined) {
45 | options = {};
46 | } else if (typeof options === 'string') {
47 | options = { encoding: options };
48 | } else if (options === null || typeof options !== 'object') {
49 | throw new TypeError('"streamOptions" argument must be a string or an object');
50 | }
51 |
52 | if (!options.objectMode && !options.topic) {
53 | throw new TypeError('ProducerStreams not using objectMode must provide a topic to produce to.');
54 | }
55 |
56 | if (options.objectMode !== true) {
57 | this._write = this._write_buffer;
58 | } else {
59 | this._write = this._write_message;
60 | }
61 |
62 | Writable.call(this, options);
63 |
64 | this.producer = producer;
65 | this.topicName = options.topic;
66 |
67 | this.autoClose = options.autoClose === undefined ? true : !!options.autoClose;
68 | this.connectOptions = options.connectOptions || {};
69 |
70 | this.producer.setPollInterval(options.pollInterval || 1000);
71 |
72 | if (options.encoding) {
73 | this.setDefaultEncoding(options.encoding);
74 | }
75 |
76 | // Connect to the producer. Unless we are already connected
77 | if (!this.producer.isConnected()) {
78 | this.connect(this.connectOptions);
79 | }
80 |
81 | var self = this;
82 |
83 | this.once('finish', function() {
84 | if (this.autoClose) {
85 | this.close();
86 | }
87 | });
88 |
89 | }
90 |
91 | ProducerStream.prototype.connect = function(options) {
92 | this.producer.connect(options, function(err, data) {
93 | if (err) {
94 | this.emit('error', err);
95 | return;
96 | }
97 |
98 | }.bind(this));
99 | };
100 |
101 | /**
102 | * Internal stream write method for ProducerStream when writing buffers.
103 | *
104 | * This method should never be called externally. It has some recursion to
105 | * handle cases where the producer is not yet connected.
106 | *
107 | * @param {buffer} chunk - Chunk to write.
108 | * @param {string} encoding - Encoding for the buffer
109 | * @param {Function} cb - Callback to call when the stream is done processing
110 | * the data.
111 | * @private
112 | * @see https://github.com/nodejs/node/blob/master/lib/fs.js#L1901
113 | */
114 | ProducerStream.prototype._write_buffer = function(data, encoding, cb) {
115 | if (!(data instanceof Buffer)) {
116 | this.emit('error', new Error('Invalid data. Can only produce buffers'));
117 | return;
118 | }
119 |
120 | var self = this;
121 |
122 | if (!this.producer.isConnected()) {
123 | this.producer.once('ready', function() {
124 | self._write(data, encoding, cb);
125 | });
126 | return;
127 | }
128 |
129 | try {
130 | this.producer.produce(self.topicName, null, data, null);
131 | setImmediate(cb);
132 | } catch (e) {
133 | if (ErrorCode.ERR__QUEUE_FULL === e.code) {
134 | // Poll for good measure
135 | self.producer.poll();
136 |
137 | // Just delay this thing a bit and pass the params
138 | // backpressure will get exerted this way.
139 | setTimeout(function() {
140 | self._write(data, encoding, cb);
141 | }, 500);
142 | } else {
143 | if (self.autoClose) {
144 | self.close();
145 | }
146 | setImmediate(function() {
147 | cb(e);
148 | });
149 | }
150 | }
151 | };
152 |
153 | /**
154 | * Internal stream write method for ProducerStream when writing objects.
155 | *
156 | * This method should never be called externally. It has some recursion to
157 | * handle cases where the producer is not yet connected.
158 | *
159 | * @param {object} message - Message to write.
160 | * @param {string} encoding - Encoding for the buffer
161 | * @param {Function} cb - Callback to call when the stream is done processing
162 | * the data.
163 | * @private
164 | * @see https://github.com/nodejs/node/blob/master/lib/fs.js#L1901
165 | */
166 | ProducerStream.prototype._write_message = function(message, encoding, cb) {
167 | var self = this;
168 |
169 | if (!this.producer.isConnected()) {
170 | this.producer.once('ready', function() {
171 | self._write(message, encoding, cb);
172 | });
173 | return;
174 | }
175 |
176 | try {
177 | this.producer.produce(message.topic, message.partition, message.value, message.key, message.timestamp, message.opaque, message.headers);
178 | setImmediate(cb);
179 | } catch (e) {
180 | if (ErrorCode.ERR__QUEUE_FULL === e.code) {
181 | // Poll for good measure
182 | self.producer.poll();
183 |
184 | // Just delay this thing a bit and pass the params
185 | // backpressure will get exerted this way.
186 | setTimeout(function() {
187 | self._write(message, encoding, cb);
188 | }, 500);
189 | } else {
190 | if (self.autoClose) {
191 | self.close();
192 | }
193 | setImmediate(function() {
194 | cb(e);
195 | });
196 | }
197 | }
198 | };
199 |
200 | function writev(producer, topic, chunks, cb) {
201 |
202 | // @todo maybe a produce batch method?
203 | var doneCount = 0;
204 | var err = null;
205 | var chunk = null;
206 |
207 | function maybeDone(e) {
208 | if (e) {
209 | err = e;
210 | }
211 | doneCount ++;
212 | if (doneCount === chunks.length) {
213 | cb(err);
214 | }
215 | }
216 |
217 | function retry(restChunks) {
218 | // Poll for good measure
219 | producer.poll();
220 |
221 | // Just delay this thing a bit and pass the params
222 | // backpressure will get exerted this way.
223 | setTimeout(function() {
224 | writev(producer, topic, restChunks, cb);
225 | }, 500);
226 | }
227 |
228 | for (var i = 0; i < chunks.length; i++) {
229 | chunk = chunks[i];
230 |
231 | try {
232 | if (Buffer.isBuffer(chunk)) {
233 | producer.produce(topic, null, chunk, null);
234 | } else {
235 | producer.produce(chunk.topic, chunk.partition, chunk.value, chunk.key, chunk.timestamp, chunk.opaque, chunk.headers);
236 | }
237 | maybeDone();
238 | } catch (e) {
239 | if (ErrorCode.ERR__QUEUE_FULL === e.code) {
240 | retry(chunks.slice(i));
241 | } else {
242 | cb(e);
243 | }
244 | break;
245 | }
246 | }
247 |
248 | }
249 |
250 | ProducerStream.prototype._writev = function(data, cb) {
251 | if (!this.producer.isConnected()) {
252 | this.once('ready', function() {
253 | this._writev(data, cb);
254 | });
255 | return;
256 | }
257 |
258 | var self = this;
259 | var len = data.length;
260 | var chunks = new Array(len);
261 | var size = 0;
262 |
263 | for (var i = 0; i < len; i++) {
264 | var chunk = data[i].chunk;
265 |
266 | chunks[i] = chunk;
267 | size += chunk.length;
268 | }
269 |
270 | writev(this.producer, this.topicName, chunks, function(err) {
271 | if (err) {
272 | self.close();
273 | cb(err);
274 | return;
275 | }
276 | cb();
277 | });
278 |
279 | };
280 |
281 | ProducerStream.prototype.close = function(cb) {
282 | var self = this;
283 | if (cb) {
284 | this.once('close', cb);
285 | }
286 |
287 | // Use interval variables in here
288 | if (self.producer._isConnected) {
289 | self.producer.disconnect(function() {
290 | // Previously this set the producer to null. I'm not sure there is any benefit
291 | // to that other than I guess helping flag it for GC?
292 | // https://github.com/Blizzard/node-rdkafka/issues/344
293 | close();
294 | });
295 | } else if (self.producer._isConnecting){
296 | self.producer.once('ready', function() {
297 | // Don't pass CB this time because it has already been passed
298 | self.close();
299 | });
300 | } else {
301 | setImmediate(close);
302 | }
303 |
304 | function close() {
305 | self.emit('close');
306 | }
307 | };
308 |
--------------------------------------------------------------------------------
/lib/producer/high-level-producer.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | module.exports = HighLevelProducer;
11 |
12 | var util = require('util');
13 | var Producer = require('../producer');
14 | var LibrdKafkaError = require('../error');
15 | var EventEmitter = require('events').EventEmitter;
16 | var RefCounter = require('../tools/ref-counter');
17 | var shallowCopy = require('../util').shallowCopy;
18 | var isObject = require('../util').isObject;
19 |
20 | util.inherits(HighLevelProducer, Producer);
21 |
22 | var noopSerializer = createSerializer(function (v) { return v; });
23 |
24 | /**
25 | * Create a serializer
26 | *
27 | * Method simply wraps a serializer provided by a user
28 | * so it adds context to the error
29 | *
30 | * @returns {function} Serialization function
31 | */
32 | function createSerializer(serializer) {
33 | var applyFn = function serializationWrapper(v, cb) {
34 | try {
35 | return cb ? serializer(v, cb) : serializer(v);
36 | } catch (e) {
37 | var modifiedError = new Error('Could not serialize value: ' + e.message);
38 | modifiedError.value = v;
39 | modifiedError.serializer = serializer;
40 | throw modifiedError;
41 | }
42 | };
43 |
44 | // We can check how many parameters the function has and activate the asynchronous
45 | // operation if the number of parameters the function accepts is > 1
46 | return {
47 | apply: applyFn,
48 | async: serializer.length > 1
49 | };
50 | }
51 |
52 | /**
53 | * Producer class for sending messages to Kafka in a higher level fashion
54 | *
55 | * This is the main entry point for writing data to Kafka if you want more
56 | * functionality than librdkafka supports out of the box. You
57 | * configure this like you do any other client, with a global
58 | * configuration and default topic configuration.
59 | *
60 | * Once you instantiate this object, you need to connect to it first.
61 | * This allows you to get the metadata and make sure the connection
62 | * can be made before you depend on it. After that, problems with
63 | * the connection will by brought down by using poll, which automatically
64 | * runs when a transaction is made on the object.
65 | *
66 | * This has a few restrictions, so it is not for free!
67 | *
68 | * 1. You may not define opaque tokens
69 | * The higher level producer is powered by opaque tokens.
70 | * 2. Every message ack will dispatch an event on the node thread.
71 | * 3. Will use a ref counter to determine if there are outgoing produces.
72 | *
73 | * This will return the new object you should use instead when doing your
74 | * produce calls
75 | *
76 | * @param {object} conf - Key value pairs to configure the producer
77 | * @param {object} topicConf - Key value pairs to create a default
78 | * topic configuration
79 | * @extends Producer
80 | * @constructor
81 | */
82 | function HighLevelProducer(conf, topicConf) {
83 | if (!(this instanceof HighLevelProducer)) {
84 | return new HighLevelProducer(conf, topicConf);
85 | }
86 |
87 | // Force this to be true for the high level producer
88 | conf = shallowCopy(conf);
89 | conf.dr_cb = true;
90 |
91 | // producer is an initialized consumer object
92 | // @see NodeKafka::Producer::Init
93 | Producer.call(this, conf, topicConf);
94 | var self = this;
95 |
96 | // Add a delivery emitter to the producer
97 | this._hl = {
98 | deliveryEmitter: new EventEmitter(),
99 | messageId: 0,
100 | // Special logic for polling. We use a reference counter to know when we need
101 | // to be doing it and when we can stop. This means when we go into fast polling
102 | // mode we don't need to do multiple calls to poll since they all will yield
103 | // the same result
104 | pollingRefTimeout: null,
105 | };
106 |
107 | // Add the polling ref counter to the class which ensures we poll when we go active
108 | this._hl.pollingRef = new RefCounter(function() {
109 | self._hl.pollingRefTimeout = setInterval(function() {
110 | try {
111 | self.poll();
112 | } catch (e) {
113 | if (!self._isConnected) {
114 | // If we got disconnected for some reason there is no point
115 | // in polling anymore
116 | clearInterval(self._hl.pollingRefTimeout);
117 | }
118 | }
119 | }, 1);
120 | }, function() {
121 | clearInterval(self._hl.pollingRefTimeout);
122 | });
123 |
124 | // Default poll interval. More sophisticated polling is also done in create rule method
125 | this.setPollInterval(1000);
126 |
127 | // Listen to all delivery reports to propagate elements with a _message_id to the emitter
128 | this.on('delivery-report', function(err, report) {
129 | if (report.opaque && report.opaque.__message_id !== undefined) {
130 | self._hl.deliveryEmitter.emit(report.opaque.__message_id, err, report.offset);
131 | }
132 | });
133 |
134 | // Save old produce here since we are making some modifications for it
135 | this._oldProduce = this.produce;
136 | this.produce = this._modifiedProduce;
137 |
138 | // Serializer information
139 | this.keySerializer = noopSerializer;
140 | this.valueSerializer = noopSerializer;
141 | }
142 |
143 | /**
144 | * Produce a message to Kafka asynchronously.
145 | *
146 | * This is the method mainly used in this class. Use it to produce
147 | * a message to Kafka.
148 | *
149 | * When this is sent off, and you recieve your callback, the assurances afforded
150 | * to you will be equal to those provided by your ack level.
151 | *
152 | * @param {string} topic - The topic name to produce to.
153 | * @param {number|null} partition - The partition number to produce to.
154 | * @param {Buffer|null} message - The message to produce.
155 | * @param {string} key - The key associated with the message.
156 | * @param {number|null} timestamp - Timestamp to send with the message.
157 | * @param {object} headers - A list of custom key value pairs that provide message metadata.
158 | * @param {function} callback - Callback to call when the delivery report is recieved.
159 | * @throws {LibrdKafkaError} - Throws a librdkafka error if it failed.
160 | * @return {boolean} - returns an error if it failed, or true if not
161 | * @see Producer#produce
162 | */
163 | HighLevelProducer.prototype._modifiedProduce = function(topic, partition, message, key, timestamp, headers, callback) {
164 | // headers are optional
165 | if (arguments.length === 6) {
166 | callback = headers;
167 | headers = undefined;
168 | }
169 |
170 | // Add the message id
171 | var opaque = {
172 | __message_id: this._hl.messageId++,
173 | };
174 |
175 | this._hl.pollingRef.increment();
176 |
177 | var self = this;
178 |
179 | var resolvedSerializedValue;
180 | var resolvedSerializedKey;
181 | var calledBack = false;
182 |
183 | // Actually do the produce with new key and value based on deserialized
184 | // results
185 | function doProduce(v, k) {
186 | try {
187 | var r = self._oldProduce(topic, partition,
188 | v, k,
189 | timestamp, opaque, headers);
190 |
191 | self._hl.deliveryEmitter.once(opaque.__message_id, function(err, offset) {
192 | self._hl.pollingRef.decrement();
193 | setImmediate(function() {
194 | // Offset must be greater than or equal to 0 otherwise it is a null offset
195 | // Possibly because we have acks off
196 | callback(err, offset >= 0 ? offset : null);
197 | });
198 | });
199 |
200 | return r;
201 | } catch (e) {
202 | callback(e);
203 | }
204 | }
205 |
206 | function produceIfComplete() {
207 | if (resolvedSerializedKey !== undefined && resolvedSerializedValue !== undefined) {
208 | doProduce(resolvedSerializedValue, resolvedSerializedKey);
209 | }
210 | }
211 |
212 | // To run on a promise if returned by the serializer
213 | function finishSerializedValue(v) {
214 | if (!calledBack) {
215 | resolvedSerializedValue = v;
216 | produceIfComplete();
217 | }
218 | }
219 |
220 | // To run on a promise of returned by the serializer
221 | function finishSerializedKey(k) {
222 | resolvedSerializedKey = k;
223 |
224 | if (!calledBack) {
225 | produceIfComplete();
226 | }
227 | }
228 |
229 | function failSerializedValue(err) {
230 | if (!calledBack) {
231 | calledBack = true;
232 | callback(err);
233 | }
234 | }
235 |
236 | function failSerializedKey(err) {
237 | if (!calledBack) {
238 | calledBack = true;
239 | callback(err);
240 | }
241 | }
242 |
243 | function valueSerializerCallback(err, v) {
244 | if (err) {
245 | failSerializedValue(err);
246 | } else {
247 | finishSerializedValue(v);
248 | }
249 | }
250 |
251 | function keySerializerCallback(err, v) {
252 | if (err) {
253 | failSerializedKey(err);
254 | } else {
255 | finishSerializedKey(v);
256 | }
257 | }
258 |
259 | try {
260 | if (this.valueSerializer.async) {
261 | // If this is async we need to give it a callback
262 | this.valueSerializer.apply(message, valueSerializerCallback);
263 | } else {
264 | var serializedValue = this.valueSerializer.apply(message);
265 | // Check if we were returned a promise in order to support promise behavior
266 | if (serializedValue &&
267 | typeof serializedValue.then === 'function' &&
268 | typeof serializedValue.catch === 'function') {
269 | // This is a promise. We need to hook into its then and catch
270 | serializedValue.then(finishSerializedValue).catch(failSerializedValue);
271 | } else {
272 | resolvedSerializedValue = serializedValue;
273 | }
274 | }
275 |
276 | if (this.keySerializer.async) {
277 | // If this is async we need to give it a callback
278 | this.keySerializer.apply(key, keySerializerCallback);
279 | } else {
280 | var serializedKey = this.keySerializer.apply(key);
281 | // Check if we were returned a promise in order to support promise behavior
282 | if (serializedKey &&
283 | typeof serializedKey.then === 'function' &&
284 | typeof serializedKey.catch === 'function') {
285 | // This is a promise. We need to hook into its then and catch
286 | serializedKey.then(finishSerializedKey).catch(failSerializedKey);
287 | } else {
288 | resolvedSerializedKey = serializedKey;
289 | }
290 | }
291 |
292 | // Only do the produce here if we are complete. That is, if the key
293 | // and value have been serialized.
294 | produceIfComplete();
295 | } catch (e) {
296 | setImmediate(function() {
297 | calledBack = true;
298 | callback(e);
299 | });
300 | }
301 | };
302 |
303 | /**
304 | * Set the key serializer
305 | *
306 | * This allows the value inside the produce call to differ from the value of the
307 | * value actually produced to kafka. Good if, for example, you want to serialize
308 | * it to a particular format.
309 | */
310 | HighLevelProducer.prototype.setKeySerializer = function(serializer) {
311 | this.keySerializer = createSerializer(serializer);
312 | };
313 |
314 | /**
315 | * Set the value serializer
316 | *
317 | * This allows the value inside the produce call to differ from the value of the
318 | * value actually produced to kafka. Good if, for example, you want to serialize
319 | * it to a particular format.
320 | */
321 | HighLevelProducer.prototype.setValueSerializer = function(serializer) {
322 | this.valueSerializer = createSerializer(serializer);
323 | };
324 |
--------------------------------------------------------------------------------
/lib/tools/ref-counter.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | module.exports = RefCounter;
11 |
12 | /**
13 | * Ref counter class.
14 | *
15 | * Is used to basically determine active/inactive and allow callbacks that
16 | * hook into each.
17 | *
18 | * For the producer, it is used to begin rapid polling after a produce until
19 | * the delivery report is dispatched.
20 | */
21 | function RefCounter(onActive, onPassive) {
22 | this.context = {};
23 | this.onActive = onActive;
24 | this.onPassive = onPassive;
25 | this.currentValue = 0;
26 | this.isRunning = false;
27 | }
28 |
29 | /**
30 | * Increment the ref counter
31 | */
32 | RefCounter.prototype.increment = function() {
33 | this.currentValue += 1;
34 |
35 | // If current value exceeds 0, activate the start
36 | if (this.currentValue > 0 && !this.isRunning) {
37 | this.isRunning = true;
38 | this.onActive(this.context);
39 | }
40 | };
41 |
42 | /**
43 | * Decrement the ref counter
44 | */
45 | RefCounter.prototype.decrement = function() {
46 | this.currentValue -= 1;
47 |
48 | if (this.currentValue <= 0 && this.isRunning) {
49 | this.isRunning = false;
50 | this.onPassive(this.context);
51 | }
52 | };
53 |
--------------------------------------------------------------------------------
/lib/topic-partition.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var Topic = require('./topic');
11 |
12 | module.exports = TopicPartition;
13 |
14 | /**
15 | * Map an array of topic partition js objects to real topic partition objects.
16 | *
17 | * @param array The array of topic partition raw objects to map to topic
18 | * partition objects
19 | */
20 | TopicPartition.map = function(array) {
21 | return array.map(function(element) {
22 | return TopicPartition.create(element);
23 | });
24 | };
25 |
26 | /**
27 | * Take a topic partition javascript object and convert it to the class.
28 | * The class will automatically convert offset identifiers to special constants
29 | *
30 | * @param element The topic partition raw javascript object
31 | */
32 | TopicPartition.create = function(element) {
33 | // Just ensure we take something that can have properties. The topic partition
34 | // class will
35 | element = element || {};
36 | return new TopicPartition(element.topic, element.partition, element.offset);
37 | };
38 |
39 | /**
40 | * Create a topic partition. Just does some validation and decoration
41 | * on topic partitions provided.
42 | *
43 | * Goal is still to behave like a plain javascript object but with validation
44 | * and potentially some extra methods
45 | */
46 | function TopicPartition(topic, partition, offset) {
47 | if (!(this instanceof TopicPartition)) {
48 | return new TopicPartition(topic, partition, offset);
49 | }
50 |
51 | // Validate that the elements we are iterating over are actual topic partition
52 | // js objects. They do not need an offset, but they do need partition
53 | if (!topic) {
54 | throw new TypeError('"topic" must be a string and must be set');
55 | }
56 |
57 | if (partition === null || partition === undefined) {
58 | throw new TypeError('"partition" must be a number and must set');
59 | }
60 |
61 | // We can just set topic and partition as they stand.
62 | this.topic = topic;
63 | this.partition = partition;
64 |
65 | if (offset === undefined || offset === null) {
66 | this.offset = Topic.OFFSET_STORED;
67 | } else if (typeof offset === 'string') {
68 | switch (offset.toLowerCase()) {
69 | case 'earliest':
70 | case 'beginning':
71 | this.offset = Topic.OFFSET_BEGINNING;
72 | break;
73 | case 'latest':
74 | case 'end':
75 | this.offset = Topic.OFFSET_END;
76 | break;
77 | case 'stored':
78 | this.offset = Topic.OFFSET_STORED;
79 | break;
80 | default:
81 | throw new TypeError('"offset", if provided as a string, must be beginning, end, or stored.');
82 | }
83 | } else if (typeof offset === 'number') {
84 | this.offset = offset;
85 | } else {
86 | throw new TypeError('"offset" must be a special string or number if it is set');
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/lib/topic.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var librdkafka = require('../librdkafka');
11 |
12 | module.exports = Topic;
13 |
14 | var topicKey = 'RdKafka::Topic::';
15 | var topicKeyLength = topicKey.length;
16 |
17 | // Take all of the topic special codes from librdkafka and add them
18 | // to the object
19 | // You can find this list in the C++ code at
20 | // https://github.com/edenhill/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L1250
21 | for (var key in librdkafka.topic) {
22 | // Skip it if it doesn't start with ErrorCode
23 | if (key.indexOf('RdKafka::Topic::') !== 0) {
24 | continue;
25 | }
26 |
27 | // Replace/add it if there are any discrepancies
28 | var newKey = key.substring(topicKeyLength);
29 | Topic[newKey] = librdkafka.topic[key];
30 | }
31 |
32 | /**
33 | * Create a topic. Just returns the string you gave it right now.
34 | *
35 | * Looks like a class, but all it does is return the topic name.
36 | * This is so that one day if there are interface changes that allow
37 | * different use of topic parameters, we can just add to this constructor and
38 | * have it return something richer
39 | */
40 | function Topic(topicName) {
41 | return topicName;
42 | }
43 |
--------------------------------------------------------------------------------
/lib/util.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var util = module.exports = {};
11 |
12 | util.shallowCopy = function (obj) {
13 |
14 | if (!util.isObject(obj)) { return obj; }
15 |
16 | var copy = {};
17 |
18 | for (var k in obj) {
19 | if (obj.hasOwnProperty(k)) {
20 | copy[k] = obj[k];
21 | }
22 | }
23 |
24 | return copy;
25 | };
26 |
27 | util.isObject = function (obj) {
28 | return obj && typeof obj === 'object';
29 | };
30 |
--------------------------------------------------------------------------------
/librdkafka.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var kafka = require('bindings')('node-librdkafka');
11 |
12 | module.exports = kafka;
13 |
--------------------------------------------------------------------------------
/make_docs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [[ `git status --porcelain` ]]; then
4 | # changes
5 | >&2 echo "You have unstaged changes. Please commit before you run this."
6 | exit 1
7 | fi
8 |
9 | # REPO=git@github.com:Blizzard/node-rdkafka.git
10 | REPO=https://github.com/Blizzard/node-rdkafka.git
11 |
12 | git remote add deploy $REPO
13 |
14 | # Get the most recent stuff if we don't have it
15 | git fetch deploy gh-pages || exit $?
16 |
17 | make docs || exit $?
18 |
19 | # Get package version and save to variable
20 |
21 | PACKAGE=$(node -pe 'require("./package.json").name.split("/")[1]')
22 | VERSION=$(node -pe 'require("./package.json").version')
23 |
24 | # Make a temporary folder
25 |
26 | TEMPDIR=$(mktemp -d)
27 |
28 | VERSIONDIR="$TEMPDIR/$VERSION"
29 | cp -r docs $VERSIONDIR
30 |
31 | # Now, checkout the gh-pages, but first get current checked out branch
32 | #
33 |
34 | CURRENT_BRANCH=$(git rev-parse --symbolic-full-name --abbrev-ref HEAD)
35 |
36 | COMMIT_MESSAGE=$(git log --pretty='format:%B' -1)
37 | COMMIT_AUTHOR=$(git log --pretty='format:%aN <%aE>' -1)
38 |
39 | if [[ `git checkout --quiet -b gh-pages deploy/gh-pages` ]]; then
40 | >&2 echo "Could not checkout gh-pages"
41 | exit 1
42 | fi
43 |
44 | rm -rf current
45 | rm -rf $VERSION
46 |
47 | cp -r $VERSIONDIR $VERSION
48 | cp -r $VERSIONDIR current
49 |
50 | git add --all
51 | git commit --author="$COMMIT_AUTHOR" -m "Updated docs for '$COMMIT_MESSAGE'"
52 |
53 | rm -rf $TEMPDIR
54 |
55 | git push $REPO gh-pages || exit $?
56 |
57 | git checkout $CURRENT_BRANCH
58 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "node-rdkafka",
3 | "version": "v3.4.0",
4 | "description": "Node.js bindings for librdkafka",
5 | "librdkafka": "2.10.0",
6 | "main": "lib/index.js",
7 | "scripts": {
8 | "configure": "node-gyp configure",
9 | "build": "node-gyp build",
10 | "test": "make test",
11 | "install": "node-gyp rebuild",
12 | "prepack": "node ./ci/prepublish.js"
13 | },
14 | "keywords": [
15 | "kafka",
16 | "librdkafka"
17 | ],
18 | "repository": {
19 | "type": "git",
20 | "url": "git@github.com:Blizzard/node-rdkafka.git"
21 | },
22 | "contributors": [
23 | {
24 | "name": "Stephen Parente",
25 | "email": "webmakersteve@gmail.com"
26 | },
27 | {
28 | "name": "Matt Gollob",
29 | "email": "mattness@users.noreply.github.com"
30 | }
31 | ],
32 | "license": "MIT",
33 | "devDependencies": {
34 | "bluebird": "^3.5.3",
35 | "jsdoc": "^4.0.2",
36 | "jshint": "^2.10.1",
37 | "mocha": "^10.2.0",
38 | "node-gyp": "^11.1.0",
39 | "toolkit-jsdoc": "^1.0.0"
40 | },
41 | "dependencies": {
42 | "bindings": "^1.3.1",
43 | "nan": "^2.22.0"
44 | },
45 | "engines": {
46 | "node": ">=16"
47 | }
48 | }
--------------------------------------------------------------------------------
/run_docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | COMPOSE_VERSION=$(docker-compose --version)
4 | DOCKER_VERSION=$(docker --version)
5 |
6 | # Start the docker compose file
7 | echo "Running docker compose up. Docker version $DOCKER_VERSION. Compose version $COMPOSE_VERSION. "
8 |
9 | docker-compose up -d
10 |
11 | if [ "$?" == "1" ]; then
12 | echo "Failed to start docker images."
13 | exit 1
14 | fi
15 |
16 | # List of topics to create in container
17 | topics=(
18 | "test"
19 | "test2"
20 | "test3"
21 | "test4"
22 | "test5"
23 | "test6"
24 | )
25 |
26 | # Run docker-compose exec to make them
27 | for topic in "${topics[@]}"
28 | do
29 | echo "Making topic $topic"
30 | until docker-compose exec kafka \
31 | kafka-topics --create --topic $topic --partitions 1 --replication-factor 1 --if-not-exists --zookeeper zookeeper:2181
32 | do
33 | topic_result="$?"
34 | if [ "$topic_result" == "1" ]; then
35 | echo "Bad status code: $topic_result. Trying again."
36 | else
37 | # If it is some unknown status code, die.
38 | exit 1
39 | fi
40 | done
41 |
42 | done
43 |
--------------------------------------------------------------------------------
/src/admin.h:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #ifndef SRC_ADMIN_H_
11 | #define SRC_ADMIN_H_
12 |
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 |
19 | #include "rdkafkacpp.h"
20 | #include "rdkafka.h" // NOLINT
21 |
22 | #include "src/common.h"
23 | #include "src/connection.h"
24 | #include "src/callbacks.h"
25 |
26 | namespace NodeKafka {
27 |
28 | /**
29 | * @brief KafkaConsumer v8 wrapped object.
30 | *
31 | * Specializes the connection to wrap a consumer object through compositional
32 | * inheritence. Establishes its prototype in node through `Init`
33 | *
34 | * @sa RdKafka::Handle
35 | * @sa NodeKafka::Client
36 | */
37 |
38 | class AdminClient : public Connection {
39 | public:
40 | static void Init(v8::Local);
41 | static v8::Local NewInstance(v8::Local);
42 |
43 | void ActivateDispatchers();
44 | void DeactivateDispatchers();
45 |
46 | Baton Connect();
47 | Baton Disconnect();
48 |
49 | Baton CreateTopic(rd_kafka_NewTopic_t* topic, int timeout_ms);
50 | Baton DeleteTopic(rd_kafka_DeleteTopic_t* topic, int timeout_ms);
51 | Baton CreatePartitions(rd_kafka_NewPartitions_t* topic, int timeout_ms);
52 | // Baton AlterConfig(rd_kafka_NewTopic_t* topic, int timeout_ms);
53 | // Baton DescribeConfig(rd_kafka_NewTopic_t* topic, int timeout_ms);
54 |
55 | protected:
56 | static Nan::Persistent constructor;
57 | static void New(const Nan::FunctionCallbackInfo& info);
58 |
59 | explicit AdminClient(Conf* globalConfig);
60 | ~AdminClient();
61 |
62 | rd_kafka_queue_t* rkqu;
63 |
64 | private:
65 | // Node methods
66 | // static NAN_METHOD(NodeValidateTopic);
67 | static NAN_METHOD(NodeCreateTopic);
68 | static NAN_METHOD(NodeDeleteTopic);
69 | static NAN_METHOD(NodeCreatePartitions);
70 |
71 | static NAN_METHOD(NodeConnect);
72 | static NAN_METHOD(NodeDisconnect);
73 | };
74 |
75 | } // namespace NodeKafka
76 |
77 | #endif // SRC_ADMIN_H_
78 |
--------------------------------------------------------------------------------
/src/binding.cc:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #include
11 | #include "src/binding.h"
12 |
13 | using NodeKafka::Producer;
14 | using NodeKafka::KafkaConsumer;
15 | using NodeKafka::AdminClient;
16 | using NodeKafka::Topic;
17 |
18 | using RdKafka::ErrorCode;
19 |
20 | NAN_METHOD(NodeRdKafkaErr2Str) {
21 | int points = Nan::To(info[0]).FromJust();
22 | // Cast to error code
23 | RdKafka::ErrorCode err = static_cast(points);
24 |
25 | std::string errstr = RdKafka::err2str(err);
26 |
27 | info.GetReturnValue().Set(Nan::New(errstr).ToLocalChecked());
28 | }
29 |
30 | NAN_METHOD(NodeRdKafkaBuildInFeatures) {
31 | RdKafka::Conf * config = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
32 |
33 | std::string features;
34 |
35 | if (RdKafka::Conf::CONF_OK == config->get("builtin.features", features)) {
36 | info.GetReturnValue().Set(Nan::New(features).ToLocalChecked());
37 | } else {
38 | info.GetReturnValue().Set(Nan::Undefined());
39 | }
40 |
41 | delete config;
42 | }
43 |
44 | void ConstantsInit(v8::Local exports) {
45 | v8::Local topicConstants = Nan::New();
46 |
47 | // RdKafka Error Code definitions
48 | NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::PARTITION_UA);
49 | NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_BEGINNING);
50 | NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_END);
51 | NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_STORED);
52 | NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_INVALID);
53 |
54 | Nan::Set(exports, Nan::New("topic").ToLocalChecked(), topicConstants);
55 |
56 | Nan::Set(exports, Nan::New("err2str").ToLocalChecked(),
57 | Nan::GetFunction(Nan::New(NodeRdKafkaErr2Str)).ToLocalChecked()); // NOLINT
58 |
59 | Nan::Set(exports, Nan::New("features").ToLocalChecked(),
60 | Nan::GetFunction(Nan::New(NodeRdKafkaBuildInFeatures)).ToLocalChecked()); // NOLINT
61 | }
62 |
63 | void Init(v8::Local exports, v8::Local m_, void* v_) {
64 | KafkaConsumer::Init(exports);
65 | Producer::Init(exports);
66 | AdminClient::Init(exports);
67 | Topic::Init(exports);
68 | ConstantsInit(exports);
69 |
70 | Nan::Set(exports, Nan::New("librdkafkaVersion").ToLocalChecked(),
71 | Nan::New(RdKafka::version_str().c_str()).ToLocalChecked());
72 | }
73 |
74 | NODE_MODULE(kafka, Init)
75 |
--------------------------------------------------------------------------------
/src/binding.h:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #ifndef SRC_BINDING_H_
11 | #define SRC_BINDING_H_
12 |
13 | #include
14 | #include
15 | #include "rdkafkacpp.h"
16 | #include "src/common.h"
17 | #include "src/errors.h"
18 | #include "src/config.h"
19 | #include "src/connection.h"
20 | #include "src/kafka-consumer.h"
21 | #include "src/producer.h"
22 | #include "src/topic.h"
23 | #include "src/admin.h"
24 |
25 | #endif // SRC_BINDING_H_
26 |
--------------------------------------------------------------------------------
/src/callbacks.h:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | * Copyright (c) 2016 Blizzard Entertainment
4 | *
5 | * This software may be modified and distributed under the terms
6 | * of the MIT license. See the LICENSE.txt file for details.
7 | */
8 |
9 | #ifndef SRC_CALLBACKS_H_
10 | #define SRC_CALLBACKS_H_
11 |
12 | #include
13 | #include
14 |
15 | #include
16 | #include
17 |
18 | #include "rdkafkacpp.h"
19 | #include "src/common.h"
20 |
21 | namespace NodeKafka {
22 |
23 | class KafkaConsumer;
24 |
25 | namespace Callbacks {
26 |
27 | class Dispatcher {
28 | public:
29 | Dispatcher();
30 | ~Dispatcher();
31 | void Dispatch(const int, v8::Local []);
32 | void AddCallback(const v8::Local&);
33 | void RemoveCallback(const v8::Local&);
34 | bool HasCallbacks();
35 | virtual void Flush() = 0;
36 | void Execute();
37 | void Activate();
38 | void Deactivate();
39 |
40 | protected:
41 | std::vector callbacks; // NOLINT
42 |
43 | uv_mutex_t async_lock;
44 |
45 | private:
46 | NAN_INLINE static NAUV_WORK_CB(AsyncMessage_) {
47 | Dispatcher *dispatcher =
48 | static_cast(async->data);
49 | dispatcher->Flush();
50 | }
51 |
52 | uv_async_t *async;
53 | };
54 |
55 | struct event_t {
56 | RdKafka::Event::Type type;
57 | std::string message;
58 |
59 | RdKafka::Event::Severity severity;
60 | std::string fac;
61 |
62 | std::string broker_name;
63 | int throttle_time;
64 | int broker_id;
65 |
66 | explicit event_t(const RdKafka::Event &);
67 | ~event_t();
68 | };
69 |
70 | class EventDispatcher : public Dispatcher {
71 | public:
72 | EventDispatcher();
73 | ~EventDispatcher();
74 | void Add(const event_t &);
75 | void Flush();
76 | protected:
77 | std::vector events;
78 | };
79 |
80 | class Event : public RdKafka::EventCb {
81 | public:
82 | Event();
83 | ~Event();
84 | void event_cb(RdKafka::Event&);
85 | EventDispatcher dispatcher;
86 | };
87 |
88 | /**
89 | * Delivery report class
90 | *
91 | * Class exists because the callback needs to be able to give information
92 | * to the v8 main thread that it can use to formulate its object.
93 | */
94 | class DeliveryReport {
95 | public:
96 | DeliveryReport(RdKafka::Message &, bool);
97 | ~DeliveryReport();
98 |
99 | // Whether we include the payload. Is the second parameter to the constructor
100 | bool m_include_payload;
101 |
102 | // If it is an error these will be set
103 | bool is_error;
104 | std::string error_string;
105 | RdKafka::ErrorCode error_code;
106 |
107 | // If it is not
108 | std::string topic_name;
109 | int32_t partition;
110 | int64_t offset;
111 | int64_t timestamp;
112 |
113 | // Opaque token used. Local value
114 | void* opaque;
115 |
116 | // Key. It is a pointer to avoid corrupted values
117 | // https://github.com/Blizzard/node-rdkafka/issues/208
118 | void* key;
119 | size_t key_len;
120 |
121 | size_t len;
122 | void* payload;
123 | };
124 |
125 | class DeliveryReportDispatcher : public Dispatcher {
126 | public:
127 | DeliveryReportDispatcher();
128 | ~DeliveryReportDispatcher();
129 | void Flush();
130 | size_t Add(const DeliveryReport &);
131 | protected:
132 | std::deque events;
133 | };
134 |
135 | class Delivery : public RdKafka::DeliveryReportCb {
136 | public:
137 | Delivery();
138 | ~Delivery();
139 | void dr_cb(RdKafka::Message&);
140 | DeliveryReportDispatcher dispatcher;
141 | void SendMessageBuffer(bool dr_copy_payload);
142 | protected:
143 | bool m_dr_msg_cb;
144 | };
145 |
146 | // Rebalance dispatcher
147 |
148 | struct event_topic_partition_t {
149 | std::string topic;
150 | int partition;
151 | int64_t offset;
152 |
153 | event_topic_partition_t(std::string p_topic, int p_partition, int64_t p_offset): // NOLINT
154 | topic(p_topic),
155 | partition(p_partition),
156 | offset(p_offset) {}
157 | };
158 |
159 | struct rebalance_event_t {
160 | RdKafka::ErrorCode err;
161 | std::vector partitions;
162 |
163 | rebalance_event_t(RdKafka::ErrorCode p_err,
164 | std::vector p_partitions):
165 | err(p_err) {
166 | // Iterate over the topic partitions because we won't have them later
167 | for (size_t topic_partition_i = 0;
168 | topic_partition_i < p_partitions.size(); topic_partition_i++) {
169 | RdKafka::TopicPartition* topic_partition =
170 | p_partitions[topic_partition_i];
171 |
172 | event_topic_partition_t tp(
173 | topic_partition->topic(),
174 | topic_partition->partition(),
175 | topic_partition->offset());
176 |
177 | partitions.push_back(tp);
178 | }
179 | }
180 | };
181 |
182 | struct offset_commit_event_t {
183 | RdKafka::ErrorCode err;
184 | std::vector partitions;
185 |
186 | offset_commit_event_t(RdKafka::ErrorCode p_err,
187 | const std::vector &p_partitions):
188 | err(p_err) {
189 | // Iterate over the topic partitions because we won't have them later
190 | for (size_t topic_partition_i = 0;
191 | topic_partition_i < p_partitions.size(); topic_partition_i++) {
192 | RdKafka::TopicPartition* topic_partition =
193 | p_partitions[topic_partition_i];
194 |
195 | // Just reuse this thing because it's the same exact thing we need
196 | event_topic_partition_t tp(
197 | topic_partition->topic(),
198 | topic_partition->partition(),
199 | topic_partition->offset());
200 |
201 | partitions.push_back(tp);
202 | }
203 | }
204 | };
205 |
206 | class RebalanceDispatcher : public Dispatcher {
207 | public:
208 | RebalanceDispatcher();
209 | ~RebalanceDispatcher();
210 | void Add(const rebalance_event_t &);
211 | void Flush();
212 | protected:
213 | std::vector m_events;
214 | };
215 |
216 | class Rebalance : public RdKafka::RebalanceCb {
217 | public:
218 | void rebalance_cb(RdKafka::KafkaConsumer *, RdKafka::ErrorCode,
219 | std::vector &);
220 |
221 | RebalanceDispatcher dispatcher;
222 | private:
223 | v8::Persistent m_cb;
224 | };
225 |
226 | class OffsetCommitDispatcher : public Dispatcher {
227 | public:
228 | OffsetCommitDispatcher();
229 | ~OffsetCommitDispatcher();
230 | void Add(const offset_commit_event_t &);
231 | void Flush();
232 | protected:
233 | std::vector m_events;
234 | };
235 |
236 | class OffsetCommit : public RdKafka::OffsetCommitCb {
237 | public:
238 | void offset_commit_cb(RdKafka::ErrorCode, std::vector &); // NOLINT
239 |
240 | OffsetCommitDispatcher dispatcher;
241 | private:
242 | v8::Persistent m_cb;
243 | };
244 |
245 | class Partitioner : public RdKafka::PartitionerCb {
246 | public:
247 | Partitioner();
248 | ~Partitioner();
249 | int32_t partitioner_cb( const RdKafka::Topic*, const std::string*, int32_t, void*); // NOLINT
250 | Nan::Callback callback; // NOLINT
251 | void SetCallback(v8::Local);
252 | private:
253 | static unsigned int djb_hash(const char*, size_t);
254 | static unsigned int random(const RdKafka::Topic*, int32_t);
255 | };
256 |
257 | } // namespace Callbacks
258 |
259 | } // namespace NodeKafka
260 |
261 | #endif // SRC_CALLBACKS_H_
262 |
--------------------------------------------------------------------------------
/src/common.h:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #ifndef SRC_COMMON_H_
11 | #define SRC_COMMON_H_
12 |
13 | #include
14 |
15 | #include
16 | #include
17 | #include
18 |
19 | #include "rdkafkacpp.h"
20 | #include "rdkafka.h" // NOLINT
21 |
22 | #include "src/errors.h"
23 |
24 | typedef std::vector BrokerMetadataList;
25 | typedef std::vector PartitionMetadataList;
26 | typedef std::vector TopicMetadataList;
27 |
28 | namespace NodeKafka {
29 |
30 | void Log(std::string);
31 |
32 | template T GetParameter(v8::Local, std::string, T);
33 | template<> std::string GetParameter(
34 | v8::Local, std::string, std::string);
35 | template<> std::vector GetParameter >(
36 | v8::Local, std::string, std::vector);
37 | // template int GetParameter(v8::Local v8ArrayToStringVector(v8::Local);
39 |
40 | class scoped_mutex_lock {
41 | public:
42 | explicit scoped_mutex_lock(uv_mutex_t& lock_) : // NOLINT
43 | async_lock(lock_) {
44 | uv_mutex_lock(&async_lock);
45 | }
46 |
47 | ~scoped_mutex_lock() {
48 | uv_mutex_unlock(&async_lock);
49 | }
50 |
51 | private:
52 | uv_mutex_t &async_lock;
53 | };
54 |
55 | /*
56 | int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock)
57 |
58 | int uv_rwlock_trywrlock(uv_rwlock_t* rwlock)
59 | */
60 |
61 | class scoped_shared_write_lock {
62 | public:
63 | explicit scoped_shared_write_lock(uv_rwlock_t& lock_) : // NOLINT
64 | async_lock(lock_) {
65 | uv_rwlock_wrlock(&async_lock);
66 | }
67 |
68 | ~scoped_shared_write_lock() {
69 | uv_rwlock_wrunlock(&async_lock);
70 | }
71 |
72 | private:
73 | uv_rwlock_t &async_lock;
74 | };
75 |
76 | class scoped_shared_read_lock {
77 | public:
78 | explicit scoped_shared_read_lock(uv_rwlock_t& lock_) : // NOLINT
79 | async_lock(lock_) {
80 | uv_rwlock_rdlock(&async_lock);
81 | }
82 |
83 | ~scoped_shared_read_lock() {
84 | uv_rwlock_rdunlock(&async_lock);
85 | }
86 |
87 | private:
88 | uv_rwlock_t &async_lock;
89 | };
90 |
91 | namespace Conversion {
92 |
93 | namespace Admin {
94 | // Topics from topic object, or topic object array
95 | rd_kafka_NewTopic_t* FromV8TopicObject(
96 | v8::Local, std::string &errstr); // NOLINT
97 | rd_kafka_NewTopic_t** FromV8TopicObjectArray(v8::Local);
98 | }
99 |
100 | namespace Topic {
101 | std::vector ToStringVector(v8::Local);
102 | v8::Local ToV8Array(std::vector);
103 | } // namespace Topic
104 |
105 | namespace TopicPartition {
106 |
107 | v8::Local ToV8Array(std::vector &);
108 | RdKafka::TopicPartition * FromV8Object(v8::Local);
109 | std::vector FromV8Array(const v8::Local &); // NOLINT
110 |
111 | } // namespace TopicPartition
112 |
113 | namespace Metadata {
114 |
115 | v8::Local ToV8Object(RdKafka::Metadata*);
116 |
117 | } // namespace Metadata
118 |
119 | namespace Message {
120 |
121 | v8::Local ToV8Object(RdKafka::Message*);
122 | v8::Local ToV8Object(RdKafka::Message*, bool, bool);
123 |
124 | }
125 |
126 | } // namespace Conversion
127 |
128 | namespace Util {
129 | std::string FromV8String(v8::Local);
130 | }
131 |
132 | } // namespace NodeKafka
133 |
134 | #endif // SRC_COMMON_H_
135 |
--------------------------------------------------------------------------------
/src/config.cc:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #include
11 | #include
12 | #include
13 |
14 | #include "src/config.h"
15 |
16 | using Nan::MaybeLocal;
17 | using Nan::Maybe;
18 | using v8::Local;
19 | using v8::String;
20 | using v8::Object;
21 | using std::cout;
22 | using std::endl;
23 |
24 | namespace NodeKafka {
25 |
26 | void Conf::DumpConfig(std::list *dump) {
27 | for (std::list::iterator it = dump->begin();
28 | it != dump->end(); ) {
29 | std::cout << *it << " = ";
30 | it++;
31 | std::cout << *it << std::endl;
32 | it++;
33 | }
34 | std::cout << std::endl;
35 | }
36 |
37 | Conf * Conf::create(RdKafka::Conf::ConfType type, v8::Local object, std::string &errstr) { // NOLINT
38 | v8::Local context = Nan::GetCurrentContext();
39 | Conf* rdconf = static_cast(RdKafka::Conf::create(type));
40 |
41 | v8::MaybeLocal _property_names = object->GetOwnPropertyNames(
42 | Nan::GetCurrentContext());
43 | v8::Local property_names = _property_names.ToLocalChecked();
44 |
45 | for (unsigned int i = 0; i < property_names->Length(); ++i) {
46 | std::string string_value;
47 | std::string string_key;
48 |
49 | v8::Local key = Nan::Get(property_names, i).ToLocalChecked();
50 | v8::Local value = Nan::Get(object, key).ToLocalChecked();
51 |
52 | if (key->IsString()) {
53 | Nan::Utf8String utf8_key(key);
54 | string_key = std::string(*utf8_key);
55 | } else {
56 | continue;
57 | }
58 |
59 | if (!value->IsFunction()) {
60 | #if NODE_MAJOR_VERSION > 6
61 | if (value->IsInt32()) {
62 | string_value = std::to_string(
63 | value->Int32Value(context).ToChecked());
64 | } else if (value->IsUint32()) {
65 | string_value = std::to_string(
66 | value->Uint32Value(context).ToChecked());
67 | } else if (value->IsBoolean()) {
68 | const bool v = Nan::To(value).ToChecked();
69 | string_value = v ? "true" : "false";
70 | } else {
71 | Nan::Utf8String utf8_value(value.As());
72 | string_value = std::string(*utf8_value);
73 | }
74 | #else
75 | Nan::Utf8String utf8_value(value.As());
76 | string_value = std::string(*utf8_value);
77 | #endif
78 | if (rdconf->set(string_key, string_value, errstr)
79 | != Conf::CONF_OK) {
80 | delete rdconf;
81 | return NULL;
82 | }
83 | } else {
84 | v8::Local cb = value.As();
85 | rdconf->ConfigureCallback(string_key, cb, true, errstr);
86 | if (!errstr.empty()) {
87 | delete rdconf;
88 | return NULL;
89 | }
90 | rdconf->ConfigureCallback(string_key, cb, false, errstr);
91 | if (!errstr.empty()) {
92 | delete rdconf;
93 | return NULL;
94 | }
95 | }
96 | }
97 |
98 | return rdconf;
99 | }
100 |
101 | void Conf::ConfigureCallback(const std::string &string_key, const v8::Local &cb, bool add, std::string &errstr) {
102 | if (string_key.compare("rebalance_cb") == 0) {
103 | if (add) {
104 | if (this->m_rebalance_cb == NULL) {
105 | this->m_rebalance_cb = new NodeKafka::Callbacks::Rebalance();
106 | }
107 | this->m_rebalance_cb->dispatcher.AddCallback(cb);
108 | this->set(string_key, this->m_rebalance_cb, errstr);
109 | } else {
110 | if (this->m_rebalance_cb != NULL) {
111 | this->m_rebalance_cb->dispatcher.RemoveCallback(cb);
112 | }
113 | }
114 | } else if (string_key.compare("offset_commit_cb") == 0) {
115 | if (add) {
116 | if (this->m_offset_commit_cb == NULL) {
117 | this->m_offset_commit_cb = new NodeKafka::Callbacks::OffsetCommit();
118 | }
119 | this->m_offset_commit_cb->dispatcher.AddCallback(cb);
120 | this->set(string_key, this->m_offset_commit_cb, errstr);
121 | } else {
122 | if (this->m_offset_commit_cb != NULL) {
123 | this->m_offset_commit_cb->dispatcher.RemoveCallback(cb);
124 | }
125 | }
126 | }
127 | }
128 |
129 | void Conf::listen() {
130 | if (m_rebalance_cb) {
131 | m_rebalance_cb->dispatcher.Activate();
132 | }
133 |
134 | if (m_offset_commit_cb) {
135 | m_offset_commit_cb->dispatcher.Activate();
136 | }
137 | }
138 |
139 | void Conf::stop() {
140 | if (m_rebalance_cb) {
141 | m_rebalance_cb->dispatcher.Deactivate();
142 | }
143 |
144 | if (m_offset_commit_cb) {
145 | m_offset_commit_cb->dispatcher.Deactivate();
146 | }
147 | }
148 |
149 | Conf::~Conf() {
150 | if (m_rebalance_cb) {
151 | delete m_rebalance_cb;
152 | }
153 | }
154 |
155 | } // namespace NodeKafka
156 |
--------------------------------------------------------------------------------
/src/config.h:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #ifndef SRC_CONFIG_H_
11 | #define SRC_CONFIG_H_
12 |
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 |
19 | #include "rdkafkacpp.h"
20 | #include "src/common.h"
21 | #include "src/callbacks.h"
22 |
23 | namespace NodeKafka {
24 |
25 | class Conf : public RdKafka::Conf {
26 | public:
27 | ~Conf();
28 |
29 | static Conf* create(RdKafka::Conf::ConfType, v8::Local, std::string &); // NOLINT
30 | static void DumpConfig(std::list *);
31 |
32 | void listen();
33 | void stop();
34 |
35 | void ConfigureCallback(const std::string &string_key, const v8::Local &cb, bool add, std::string &errstr);
36 | protected:
37 | NodeKafka::Callbacks::Rebalance * m_rebalance_cb = NULL;
38 | NodeKafka::Callbacks::OffsetCommit * m_offset_commit_cb = NULL;
39 | };
40 |
41 | } // namespace NodeKafka
42 |
43 | #endif // SRC_CONFIG_H_
44 |
--------------------------------------------------------------------------------
/src/connection.h:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #ifndef SRC_CONNECTION_H_
11 | #define SRC_CONNECTION_H_
12 |
13 | #include
14 | #include
15 | #include
16 | #include
17 |
18 | #include "rdkafkacpp.h"
19 |
20 | #include "src/common.h"
21 | #include "src/errors.h"
22 | #include "src/config.h"
23 | #include "src/callbacks.h"
24 |
25 | namespace NodeKafka {
26 |
27 | /**
28 | * @brief Connection v8 wrapped object.
29 | *
30 | * Wraps the RdKafka::Handle object with compositional inheritence and
31 | * provides sensible defaults for exposing callbacks to node
32 | *
33 | * This object can't itself expose methods to the prototype directly, as far
34 | * as I can tell. But it can provide the NAN_METHODS that just need to be added
35 | * to the prototype. Since connections, etc. are managed differently based on
36 | * whether it is a producer or consumer, they manage that. This base class
37 | * handles some of the wrapping functionality and more importantly, the
38 | * configuration of callbacks
39 | *
40 | * Any callback available to both consumers and producers, like logging or
41 | * events will be handled in here.
42 | *
43 | * @sa RdKafka::Handle
44 | * @sa NodeKafka::Client
45 | */
46 |
47 | class Connection : public Nan::ObjectWrap {
48 | struct OauthBearerToken
49 | {
50 | std::string token;
51 | int64_t expiry;
52 | };
53 |
54 | public:
55 | bool IsConnected();
56 | bool IsClosing();
57 |
58 | // Baton
59 | Baton CreateTopic(std::string);
60 | Baton CreateTopic(std::string, RdKafka::Conf*);
61 | Baton GetMetadata(bool, std::string, int);
62 | Baton QueryWatermarkOffsets(std::string, int32_t, int64_t*, int64_t*, int);
63 | Baton OffsetsForTimes(std::vector &, int);
64 |
65 | RdKafka::Handle* GetClient();
66 |
67 | static RdKafka::TopicPartition* GetPartition(std::string &);
68 | static RdKafka::TopicPartition* GetPartition(std::string &, int);
69 |
70 | Callbacks::Event m_event_cb;
71 |
72 | virtual void ActivateDispatchers() = 0;
73 | virtual void DeactivateDispatchers() = 0;
74 |
75 | virtual void ConfigureCallback(const std::string &string_key, const v8::Local &cb, bool add);
76 |
77 | protected:
78 | Connection(Conf*, Conf*);
79 | ~Connection();
80 |
81 | static Nan::Persistent constructor;
82 | static void New(const Nan::FunctionCallbackInfo& info);
83 | static Baton rdkafkaErrorToBaton(RdKafka::Error* error);
84 |
85 | bool m_has_been_disconnected;
86 | bool m_is_closing;
87 |
88 | Conf* m_gconfig;
89 | Conf* m_tconfig;
90 | std::string m_errstr;
91 |
92 | std::unique_ptr m_init_oauthToken;
93 |
94 | uv_rwlock_t m_connection_lock;
95 |
96 | RdKafka::Handle* m_client;
97 |
98 | static NAN_METHOD(NodeSetToken);
99 | static NAN_METHOD(NodeConfigureCallbacks);
100 | static NAN_METHOD(NodeGetMetadata);
101 | static NAN_METHOD(NodeQueryWatermarkOffsets);
102 | static NAN_METHOD(NodeOffsetsForTimes);
103 | };
104 |
105 | } // namespace NodeKafka
106 |
107 | #endif // SRC_CONNECTION_H_
108 |
--------------------------------------------------------------------------------
/src/errors.cc:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #include
11 |
12 | #include "src/errors.h"
13 |
14 | namespace NodeKafka {
15 |
16 | v8::Local RdKafkaError(const RdKafka::ErrorCode &err, std::string errstr) { // NOLINT
17 | //
18 | int code = static_cast(err);
19 |
20 | v8::Local ret = Nan::New();
21 |
22 | Nan::Set(ret, Nan::New("message").ToLocalChecked(),
23 | Nan::New(errstr).ToLocalChecked());
24 | Nan::Set(ret, Nan::New("code").ToLocalChecked(),
25 | Nan::New(code));
26 |
27 | return ret;
28 | }
29 |
30 | v8::Local RdKafkaError(const RdKafka::ErrorCode &err) {
31 | return RdKafkaError(err, RdKafka::err2str(err));
32 | }
33 |
34 | v8::Local RdKafkaError(const RdKafka::ErrorCode &err, std::string errstr,
35 | bool isFatal, bool isRetriable, bool isTxnRequiresAbort) {
36 | v8::Local ret = RdKafkaError(err, errstr);
37 |
38 | Nan::Set(ret, Nan::New("isFatal").ToLocalChecked(),
39 | Nan::New(isFatal));
40 | Nan::Set(ret, Nan::New("isRetriable").ToLocalChecked(),
41 | Nan::New(isRetriable));
42 | Nan::Set(ret, Nan::New("isTxnRequiresAbort").ToLocalChecked(),
43 | Nan::New(isTxnRequiresAbort));
44 |
45 | return ret;
46 | }
47 |
48 | Baton::Baton(const RdKafka::ErrorCode &code) {
49 | m_err = code;
50 | }
51 |
52 | Baton::Baton(const RdKafka::ErrorCode &code, std::string errstr) {
53 | m_err = code;
54 | m_errstr = errstr;
55 | }
56 |
57 | Baton::Baton(void* data) {
58 | m_err = RdKafka::ERR_NO_ERROR;
59 | m_data = data;
60 | }
61 |
62 | Baton::Baton(const RdKafka::ErrorCode &code, std::string errstr, bool isFatal,
63 | bool isRetriable, bool isTxnRequiresAbort) {
64 | m_err = code;
65 | m_errstr = errstr;
66 | m_isFatal = isFatal;
67 | m_isRetriable = isRetriable;
68 | m_isTxnRequiresAbort = isTxnRequiresAbort;
69 | }
70 |
71 |
72 | v8::Local Baton::ToObject() {
73 | if (m_errstr.empty()) {
74 | return RdKafkaError(m_err);
75 | } else {
76 | return RdKafkaError(m_err, m_errstr);
77 | }
78 | }
79 |
80 | v8::Local Baton::ToTxnObject() {
81 | return RdKafkaError(m_err, m_errstr, m_isFatal, m_isRetriable, m_isTxnRequiresAbort);
82 | }
83 |
84 | RdKafka::ErrorCode Baton::err() {
85 | return m_err;
86 | }
87 |
88 | std::string Baton::errstr() {
89 | if (m_errstr.empty()) {
90 | return RdKafka::err2str(m_err);
91 | } else {
92 | return m_errstr;
93 | }
94 | }
95 |
96 | } // namespace NodeKafka
97 |
--------------------------------------------------------------------------------
/src/errors.h:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #ifndef SRC_ERRORS_H_
11 | #define SRC_ERRORS_H_
12 |
13 | #include
14 | #include
15 | #include
16 |
17 | #include "rdkafkacpp.h"
18 |
19 | #include "src/common.h"
20 |
21 | namespace NodeKafka {
22 |
23 | class Baton {
24 | public:
25 | explicit Baton(const RdKafka::ErrorCode &);
26 | explicit Baton(void* data);
27 | explicit Baton(const RdKafka::ErrorCode &, std::string);
28 | explicit Baton(const RdKafka::ErrorCode &, std::string, bool isFatal,
29 | bool isRetriable, bool isTxnRequiresAbort);
30 |
31 | template T data() {
32 | return static_cast(m_data);
33 | }
34 |
35 | RdKafka::ErrorCode err();
36 | std::string errstr();
37 |
38 | v8::Local ToObject();
39 | v8::Local ToTxnObject();
40 |
41 | private:
42 | void* m_data;
43 | std::string m_errstr;
44 | RdKafka::ErrorCode m_err;
45 | bool m_isFatal;
46 | bool m_isRetriable;
47 | bool m_isTxnRequiresAbort;
48 | };
49 |
50 | v8::Local RdKafkaError(const RdKafka::ErrorCode &);
51 |
52 | } // namespace NodeKafka
53 |
54 | #endif // SRC_ERRORS_H_
55 |
--------------------------------------------------------------------------------
/src/kafka-consumer.h:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #ifndef SRC_KAFKA_CONSUMER_H_
11 | #define SRC_KAFKA_CONSUMER_H_
12 |
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 |
19 | #include "rdkafkacpp.h"
20 |
21 | #include "src/common.h"
22 | #include "src/connection.h"
23 | #include "src/callbacks.h"
24 |
25 | namespace NodeKafka {
26 |
27 | /**
28 | * @brief KafkaConsumer v8 wrapped object.
29 | *
30 | * Specializes the connection to wrap a consumer object through compositional
31 | * inheritence. Establishes its prototype in node through `Init`
32 | *
33 | * @sa RdKafka::Handle
34 | * @sa NodeKafka::Client
35 | */
36 |
37 | class KafkaConsumer : public Connection {
38 | friend class Producer;
39 | public:
40 | static void Init(v8::Local);
41 | static v8::Local NewInstance(v8::Local);
42 |
43 | Baton Connect();
44 | Baton Disconnect();
45 |
46 | Baton Subscription();
47 | Baton Unsubscribe();
48 | bool IsSubscribed();
49 |
50 | Baton Pause(std::vector &);
51 | Baton Resume(std::vector &);
52 |
53 | // Asynchronous commit events
54 | Baton Commit(std::vector);
55 | Baton Commit(RdKafka::TopicPartition*);
56 | Baton Commit();
57 |
58 | Baton OffsetsStore(std::vector &);
59 | Baton GetWatermarkOffsets(std::string, int32_t, int64_t*, int64_t*);
60 |
61 | // Synchronous commit events
62 | Baton CommitSync(std::vector);
63 | Baton CommitSync(RdKafka::TopicPartition*);
64 | Baton CommitSync();
65 |
66 | Baton Committed(std::vector &, int timeout_ms);
67 | Baton Position(std::vector &);
68 |
69 | Baton RefreshAssignments();
70 |
71 | bool HasAssignedPartitions();
72 | int AssignedPartitionCount();
73 |
74 | Baton Assign(std::vector);
75 | Baton Unassign();
76 |
77 | Baton IncrementalAssign(std::vector);
78 | Baton IncrementalUnassign(std::vector);
79 |
80 | Baton Seek(const RdKafka::TopicPartition &partition, int timeout_ms);
81 |
82 | std::string Name();
83 | std::string RebalanceProtocol();
84 |
85 | Baton Subscribe(std::vector);
86 | Baton Consume(int timeout_ms);
87 |
88 | void ActivateDispatchers();
89 | void DeactivateDispatchers();
90 |
91 | protected:
92 | static Nan::Persistent constructor;
93 | static void New(const Nan::FunctionCallbackInfo& info);
94 |
95 | KafkaConsumer(Conf *, Conf *);
96 | ~KafkaConsumer();
97 |
98 | private:
99 | static void part_list_print(const std::vector&);
100 |
101 | std::vector m_partitions;
102 | int m_partition_cnt;
103 | bool m_is_subscribed = false;
104 |
105 | void* m_consume_loop = nullptr;
106 |
107 | // Node methods
108 | static NAN_METHOD(NodeConnect);
109 | static NAN_METHOD(NodeSubscribe);
110 | static NAN_METHOD(NodeDisconnect);
111 | static NAN_METHOD(NodeAssign);
112 | static NAN_METHOD(NodeUnassign);
113 | static NAN_METHOD(NodeIncrementalAssign);
114 | static NAN_METHOD(NodeIncrementalUnassign);
115 | static NAN_METHOD(NodeRebalanceProtocol);
116 | static NAN_METHOD(NodeAssignments);
117 | static NAN_METHOD(NodeUnsubscribe);
118 | static NAN_METHOD(NodeCommit);
119 | static NAN_METHOD(NodeCommitSync);
120 | static NAN_METHOD(NodeOffsetsStore);
121 | static NAN_METHOD(NodeCommitted);
122 | static NAN_METHOD(NodePosition);
123 | static NAN_METHOD(NodeSubscription);
124 | static NAN_METHOD(NodeSeek);
125 | static NAN_METHOD(NodeGetWatermarkOffsets);
126 | static NAN_METHOD(NodeConsumeLoop);
127 | static NAN_METHOD(NodeConsume);
128 |
129 | static NAN_METHOD(NodePause);
130 | static NAN_METHOD(NodeResume);
131 | };
132 |
133 | } // namespace NodeKafka
134 |
135 | #endif // SRC_KAFKA_CONSUMER_H_
136 |
--------------------------------------------------------------------------------
/src/producer.h:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #ifndef SRC_PRODUCER_H_
11 | #define SRC_PRODUCER_H_
12 |
13 | #include
14 | #include
15 | #include
16 | #include
17 |
18 | #include "rdkafkacpp.h"
19 |
20 | #include "src/common.h"
21 | #include "src/connection.h"
22 | #include "src/callbacks.h"
23 | #include "src/topic.h"
24 |
25 | namespace NodeKafka {
26 |
27 | class ProducerMessage {
28 | public:
29 | explicit ProducerMessage(v8::Local, NodeKafka::Topic*);
30 | ~ProducerMessage();
31 |
32 | void* Payload();
33 | size_t Size();
34 | bool IsEmpty();
35 | RdKafka::Topic * GetTopic();
36 |
37 | std::string m_errstr;
38 |
39 | Topic * m_topic;
40 | int32_t m_partition;
41 | std::string m_key;
42 |
43 | void* m_buffer_data;
44 | size_t m_buffer_length;
45 |
46 | bool m_is_empty;
47 | };
48 |
49 | class Producer : public Connection {
50 | public:
51 | static void Init(v8::Local);
52 | static v8::Local NewInstance(v8::Local);
53 |
54 | Baton Connect();
55 | void Disconnect();
56 | void Poll();
57 | #if RD_KAFKA_VERSION > 0x00090200
58 | Baton Flush(int timeout_ms);
59 | #endif
60 |
61 | Baton Produce(void* message, size_t message_size,
62 | RdKafka::Topic* topic, int32_t partition,
63 | const void* key, size_t key_len,
64 | void* opaque);
65 |
66 | Baton Produce(void* message, size_t message_size,
67 | std::string topic, int32_t partition,
68 | std::string* key,
69 | int64_t timestamp, void* opaque,
70 | RdKafka::Headers* headers);
71 |
72 | Baton Produce(void* message, size_t message_size,
73 | std::string topic, int32_t partition,
74 | const void* key, size_t key_len,
75 | int64_t timestamp, void* opaque,
76 | RdKafka::Headers* headers);
77 |
78 | std::string Name();
79 |
80 | void ActivateDispatchers();
81 | void DeactivateDispatchers();
82 |
83 | void ConfigureCallback(const std::string &string_key, const v8::Local &cb, bool add) override;
84 |
85 | Baton InitTransactions(int32_t timeout_ms);
86 | Baton BeginTransaction();
87 | Baton CommitTransaction(int32_t timeout_ms);
88 | Baton AbortTransaction(int32_t timeout_ms);
89 | Baton SendOffsetsToTransaction(
90 | std::vector &offsets,
91 | NodeKafka::KafkaConsumer* consumer,
92 | int timeout_ms
93 | );
94 |
95 | protected:
96 | static Nan::Persistent constructor;
97 | static void New(const Nan::FunctionCallbackInfo&);
98 |
99 | Producer(Conf*, Conf*);
100 | ~Producer();
101 |
102 | private:
103 | static NAN_METHOD(NodeProduce);
104 | static NAN_METHOD(NodeSetPartitioner);
105 | static NAN_METHOD(NodeConnect);
106 | static NAN_METHOD(NodeDisconnect);
107 | static NAN_METHOD(NodePoll);
108 | #if RD_KAFKA_VERSION > 0x00090200
109 | static NAN_METHOD(NodeFlush);
110 | #endif
111 | static NAN_METHOD(NodeInitTransactions);
112 | static NAN_METHOD(NodeBeginTransaction);
113 | static NAN_METHOD(NodeCommitTransaction);
114 | static NAN_METHOD(NodeAbortTransaction);
115 | static NAN_METHOD(NodeSendOffsetsToTransaction);
116 |
117 | Callbacks::Delivery m_dr_cb;
118 | Callbacks::Partitioner m_partitioner_cb;
119 | };
120 |
121 | } // namespace NodeKafka
122 |
123 | #endif // SRC_PRODUCER_H_
124 |
--------------------------------------------------------------------------------
/src/topic.cc:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #include
11 | #include
12 |
13 | #include "src/common.h"
14 | #include "src/connection.h"
15 | #include "src/topic.h"
16 |
17 | namespace NodeKafka {
18 |
19 | /**
20 | * @brief Producer v8 wrapped object.
21 | *
22 | * Wraps the RdKafka::Producer object with compositional inheritence and
23 | * provides methods for interacting with it exposed to node.
24 | *
25 | * The base wrappable RdKafka::Handle deals with most of the wrapping but
26 | * we still need to declare its prototype.
27 | *
28 | * @sa RdKafka::Producer
29 | * @sa NodeKafka::Connection
30 | */
31 |
32 | Topic::Topic(std::string topic_name, RdKafka::Conf* config):
33 | m_topic_name(topic_name),
34 | m_config(config) {
35 | // We probably want to copy the config. May require refactoring if we do not
36 | }
37 |
38 | Topic::~Topic() {
39 | if (m_config) {
40 | delete m_config;
41 | }
42 | }
43 |
44 | std::string Topic::name() {
45 | return m_topic_name;
46 | }
47 |
48 | Baton Topic::toRDKafkaTopic(Connection* handle) {
49 | if (m_config) {
50 | return handle->CreateTopic(m_topic_name, m_config);
51 | } else {
52 | return handle->CreateTopic(m_topic_name);
53 | }
54 | }
55 |
56 | /*
57 |
58 | bool partition_available(int32_t partition) {
59 | return topic_->partition_available(partition);
60 | }
61 |
62 | Baton offset_store (int32_t partition, int64_t offset) {
63 | RdKafka::ErrorCode err = topic_->offset_store(partition, offset);
64 |
65 | switch (err) {
66 | case RdKafka::ERR_NO_ERROR:
67 |
68 | break;
69 | default:
70 |
71 | break;
72 | }
73 | }
74 |
75 | */
76 |
77 | Nan::Persistent Topic::constructor;
78 |
79 | void Topic::Init(v8::Local exports) {
80 | Nan::HandleScope scope;
81 |
82 | v8::Local tpl = Nan::New(New);
83 | tpl->SetClassName(Nan::New("Topic").ToLocalChecked());
84 | tpl->InstanceTemplate()->SetInternalFieldCount(1);
85 |
86 | Nan::SetPrototypeMethod(tpl, "name", NodeGetName);
87 |
88 | // connect. disconnect. resume. pause. get meta data
89 | constructor.Reset((tpl->GetFunction(Nan::GetCurrentContext()))
90 | .ToLocalChecked());
91 |
92 | Nan::Set(exports, Nan::New("Topic").ToLocalChecked(),
93 | tpl->GetFunction(Nan::GetCurrentContext()).ToLocalChecked());
94 | }
95 |
96 | void Topic::New(const Nan::FunctionCallbackInfo& info) {
97 | if (!info.IsConstructCall()) {
98 | return Nan::ThrowError("non-constructor invocation not supported");
99 | }
100 |
101 | if (info.Length() < 1) {
102 | return Nan::ThrowError("topic name is required");
103 | }
104 |
105 | if (!info[0]->IsString()) {
106 | return Nan::ThrowError("Topic name must be a string");
107 | }
108 |
109 | RdKafka::Conf* config = NULL;
110 |
111 | if (info.Length() >= 2 && !info[1]->IsUndefined() && !info[1]->IsNull()) {
112 | // If they gave us two parameters, or the 3rd parameter is null or
113 | // undefined, we want to pass null in for the config
114 |
115 | std::string errstr;
116 | if (!info[1]->IsObject()) {
117 | return Nan::ThrowError("Configuration data must be specified");
118 | }
119 |
120 | config = Conf::create(RdKafka::Conf::CONF_TOPIC, (info[1]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); // NOLINT
121 |
122 | if (!config) {
123 | return Nan::ThrowError(errstr.c_str());
124 | }
125 | }
126 |
127 | Nan::Utf8String parameterValue(Nan::To(info[0]).ToLocalChecked());
128 | std::string topic_name(*parameterValue);
129 |
130 | Topic* topic = new Topic(topic_name, config);
131 |
132 | // Wrap it
133 | topic->Wrap(info.This());
134 |
135 | // Then there is some weird initialization that happens
136 | // basically it sets the configuration data
137 | // we don't need to do that because we lazy load it
138 |
139 | info.GetReturnValue().Set(info.This());
140 | }
141 |
142 | // handle
143 |
144 | v8::Local Topic::NewInstance(v8::Local arg) {
145 | Nan::EscapableHandleScope scope;
146 |
147 | const unsigned argc = 1;
148 |
149 | v8::Local argv[argc] = { arg };
150 | v8::Local cons = Nan::New(constructor);
151 | v8::Local instance =
152 | Nan::NewInstance(cons, argc, argv).ToLocalChecked();
153 |
154 | return scope.Escape(instance);
155 | }
156 |
157 | NAN_METHOD(Topic::NodeGetName) {
158 | Nan::HandleScope scope;
159 |
160 | Topic* topic = ObjectWrap::Unwrap(info.This());
161 |
162 | info.GetReturnValue().Set(Nan::New(topic->name()).ToLocalChecked());
163 | }
164 |
165 | NAN_METHOD(Topic::NodePartitionAvailable) {
166 | // @TODO(sparente)
167 | }
168 |
169 | NAN_METHOD(Topic::NodeOffsetStore) {
170 | // @TODO(sparente)
171 | }
172 |
173 | } // namespace NodeKafka
174 |
--------------------------------------------------------------------------------
/src/topic.h:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | #ifndef SRC_TOPIC_H_
11 | #define SRC_TOPIC_H_
12 |
13 | #include
14 | #include
15 |
16 | #include "rdkafkacpp.h"
17 |
18 | #include "src/config.h"
19 |
20 | namespace NodeKafka {
21 |
22 | class Topic : public Nan::ObjectWrap {
23 | public:
24 | static void Init(v8::Local);
25 | static v8::Local NewInstance(v8::Local arg);
26 |
27 | Baton toRDKafkaTopic(Connection *handle);
28 |
29 | protected:
30 | static Nan::Persistent constructor;
31 | static void New(const Nan::FunctionCallbackInfo& info);
32 |
33 | static NAN_METHOD(NodeGetMetadata);
34 |
35 | // TopicConfig * config_;
36 |
37 | std::string errstr;
38 | std::string name();
39 |
40 | private:
41 | Topic(std::string, RdKafka::Conf *);
42 | ~Topic();
43 |
44 | std::string m_topic_name;
45 | RdKafka::Conf * m_config;
46 |
47 | static NAN_METHOD(NodeGetName);
48 | static NAN_METHOD(NodePartitionAvailable);
49 | static NAN_METHOD(NodeOffsetStore);
50 | };
51 |
52 | } // namespace NodeKafka
53 |
54 | #endif // SRC_TOPIC_H_
55 |
--------------------------------------------------------------------------------
/test/binding.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var addon = require('bindings')('node-librdkafka');
11 | var t = require('assert');
12 |
13 | var consumerConfig = {
14 | 'group.id': 'awesome'
15 | };
16 |
17 | var producerConfig = {
18 | 'client.id': 'kafka-mocha',
19 | 'metadata.broker.list': 'localhost:9092',
20 | 'socket.timeout.ms': 250
21 | };
22 |
23 | var client;
24 |
25 | module.exports = {
26 | 'native addon': {
27 | 'exports something': function() {
28 | t.equal(typeof(addon), 'object');
29 | },
30 | 'exports valid producer': function() {
31 | t.equal(typeof(addon.Producer), 'function');
32 | t.throws(addon.Producer); // Requires constructor
33 | t.equal(typeof(new addon.Producer({}, {})), 'object');
34 | },
35 | 'exports valid consumer': function() {
36 | t.equal(typeof(addon.KafkaConsumer), 'function');
37 | t.throws(addon.KafkaConsumer); // Requires constructor
38 | t.equal(typeof(new addon.KafkaConsumer(consumerConfig, {})), 'object');
39 | },
40 | 'exports version': function() {
41 | t.ok(addon.librdkafkaVersion);
42 | },
43 | 'Producer client': {
44 | 'beforeEach': function() {
45 | client = new addon.Producer(producerConfig, {});
46 | },
47 | 'afterEach': function() {
48 | client = null;
49 | },
50 | 'is an object': function() {
51 | t.equal(typeof(client), 'object');
52 | },
53 | 'requires configuration': function() {
54 | t.throws(function() {
55 | return new addon.Producer();
56 | });
57 | },
58 | 'has necessary methods from superclass': function() {
59 | var methods = ['connect', 'disconnect', 'setToken', 'configureCallbacks', 'getMetadata'];
60 | methods.forEach(function(m) {
61 | t.equal(typeof(client[m]), 'function', 'Client is missing ' + m + ' method');
62 | });
63 | }
64 | }
65 | },
66 | };
67 |
--------------------------------------------------------------------------------
/test/consumer.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var addon = require('bindings')('node-librdkafka');
11 | var t = require('assert');
12 |
13 | var client;
14 | var defaultConfig = {
15 | 'client.id': 'kafka-mocha',
16 | 'group.id': 'kafka-mocha-grp',
17 | 'metadata.broker.list': 'localhost:9092'
18 | };
19 |
20 | module.exports = {
21 | 'Consumer': {
22 | 'afterEach': function() {
23 | client = null;
24 | },
25 | 'cannot be set without a topic config': function() {
26 | t.throws(function() {
27 | client = new addon.KafkaConsumer(defaultConfig);
28 | });
29 | },
30 | 'can be given a topic config': function() {
31 | client = new addon.KafkaConsumer(defaultConfig, {});
32 | },
33 | 'throws us an error if we provide an invalid configuration value': function() {
34 | t.throws(function() {
35 | client = new addon.KafkaConsumer({
36 | 'foo': 'bar'
37 | });
38 | }, 'should throw because the key is invalid1');
39 | },
40 | 'throws us an error if topic config is given something invalid': function() {
41 | t.throws(function() {
42 | client = new addon.KafkaConsumer(defaultConfig, { 'foo': 'bar' });
43 | });
44 | },
45 | 'ignores function arguments for global configuration': function() {
46 | client = new addon.KafkaConsumer({
47 | 'event_cb': function() {},
48 | 'group.id': 'mocha-test'
49 | }, {});
50 | t.ok(client);
51 | },
52 | 'ignores function arguments for topic configuration': function() {
53 | client = new addon.KafkaConsumer(defaultConfig, {
54 | 'partitioner_cb': function() {}
55 | });
56 | }
57 | },
58 | 'KafkaConsumer client': {
59 | 'beforeEach': function() {
60 | client = new addon.KafkaConsumer(defaultConfig, {});
61 | },
62 | 'afterEach': function() {
63 | client = null;
64 | },
65 | 'is an object': function() {
66 | t.equal(typeof(client), 'object');
67 | },
68 | 'requires configuration': function() {
69 | t.throws(function() {
70 | return new addon.KafkaConsumer();
71 | });
72 | },
73 | 'has necessary methods from superclass': function() {
74 | var methods = ['connect', 'disconnect', 'setToken', 'configureCallbacks', 'getMetadata'];
75 | methods.forEach(function(m) {
76 | t.equal(typeof(client[m]), 'function', 'Client is missing ' + m + ' method');
77 | });
78 | },
79 | 'has necessary bindings for librdkafka 1:1 binding': function() {
80 | var methods = ['assign', 'unassign', 'subscribe'];
81 | methods.forEach(function(m) {
82 | t.equal(typeof(client[m]), 'function', 'Client is missing ' + m + ' method');
83 | });
84 | }
85 | },
86 | };
87 |
--------------------------------------------------------------------------------
/test/error.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
--------------------------------------------------------------------------------
/test/index.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
--------------------------------------------------------------------------------
/test/kafka-consumer-stream.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var KafkaConsumerStream = require('../lib/kafka-consumer-stream');
11 | var t = require('assert');
12 | var Writable = require('stream').Writable;
13 | var Emitter = require('events');
14 |
15 | var fakeClient;
16 |
17 | module.exports = {
18 | 'KafkaConsumerStream stream': {
19 | 'beforeEach': function() {
20 | fakeClient = new Emitter();
21 | fakeClient._isConnecting = false;
22 | fakeClient._isConnected = true;
23 | fakeClient.isConnected = function() {
24 | return true;
25 | };
26 | fakeClient.unsubscribe = function() {
27 | this.emit('unsubscribed');
28 | return true;
29 | };
30 | fakeClient.disconnect = function(cb) {
31 | this.emit('disconnected');
32 | if (cb) {
33 | t.equal(typeof cb, 'function');
34 | setImmediate(cb);
35 | }
36 | };
37 | fakeClient.consume = function(size, cb) {
38 | if (!size) {
39 | cb = size;
40 | }
41 |
42 | t.equal(typeof cb, 'function',
43 | 'Provided callback should always be a function');
44 | setImmediate(function() {
45 | cb(null, [{
46 | value: Buffer.from('test'),
47 | key: 'testkey',
48 | offset: 1
49 | }]);
50 | });
51 | };
52 | fakeClient.subscribe = function(topics) {
53 | t.equal(Array.isArray(topics), true);
54 | return this;
55 | };
56 | },
57 |
58 | 'exports a stream class': function() {
59 | t.equal(typeof(KafkaConsumerStream), 'function');
60 | },
61 |
62 | 'can be instantiated': function() {
63 | t.equal(typeof new KafkaConsumerStream(fakeClient, {
64 | topics: 'topic'
65 | }), 'object');
66 | },
67 |
68 | 'properly reads off the fake client': function(cb) {
69 | var stream = new KafkaConsumerStream(fakeClient, {
70 | topics: 'topic'
71 | });
72 | stream.on('error', function(err) {
73 | t.fail(err);
74 | });
75 | stream.once('readable', function() {
76 | var message = stream.read();
77 | t.notEqual(message, null);
78 | t.ok(Buffer.isBuffer(message.value));
79 | t.equal('test', message.value.toString());
80 | t.equal('testkey', message.key);
81 | t.equal(typeof message.offset, 'number');
82 | stream.pause();
83 | cb();
84 | });
85 | },
86 |
87 | 'properly reads off the fake with a topic function': function(cb) {
88 | fakeClient._metadata = {
89 | orig_broker_id: 1,
90 | orig_broker_name: "broker_name",
91 | brokers: [
92 | {
93 | id: 1,
94 | host: 'localhost',
95 | port: 40
96 | }
97 | ],
98 | topics: [
99 | {
100 | name: 'awesome-topic',
101 | partitions: [
102 | {
103 | id: 1,
104 | leader: 20,
105 | replicas: [1, 2],
106 | isrs: [1, 2]
107 | }
108 | ]
109 | }
110 | ]
111 | };
112 |
113 | var stream = new KafkaConsumerStream(fakeClient, {
114 | topics: function(metadata) {
115 | var topics = metadata.topics.map(function(v) {
116 | return v.name;
117 | });
118 |
119 | return topics;
120 | }
121 | });
122 | fakeClient.subscribe = function(topics) {
123 | t.equal(Array.isArray(topics), true);
124 | t.equal(topics[0], 'awesome-topic');
125 | t.equal(topics.length, 1);
126 | return this;
127 | };
128 |
129 | stream.on('error', function(err) {
130 | t.fail(err);
131 | });
132 | stream.once('readable', function() {
133 | var message = stream.read();
134 | t.notEqual(message, null);
135 | t.ok(Buffer.isBuffer(message.value));
136 | t.equal('test', message.value.toString());
137 | t.equal('testkey', message.key);
138 | t.equal(typeof message.offset, 'number');
139 | stream.pause();
140 | cb();
141 | });
142 | },
143 |
144 | 'properly reads correct number of messages but does not stop': function(next) {
145 | var numMessages = 10;
146 | var numReceived = 0;
147 | var numSent = 0;
148 |
149 | fakeClient.consume = function(size, cb) {
150 | if (numSent < numMessages) {
151 | numSent++;
152 | setImmediate(function() {
153 | cb(null, [{
154 | value: Buffer.from('test'),
155 | offset: 1
156 | }]);
157 | });
158 | } else {
159 | }
160 | };
161 | var stream = new KafkaConsumerStream(fakeClient, {
162 | topics: 'topic'
163 | });
164 | stream.on('error', function(err) {
165 | // Ignore
166 | });
167 | stream.on('readable', function() {
168 | var message = stream.read();
169 | numReceived++;
170 | t.notEqual(message, null);
171 | t.ok(Buffer.isBuffer(message.value));
172 | t.equal(typeof message.offset, 'number');
173 | if (numReceived === numMessages) {
174 | // give it a second to get an error
175 | next();
176 | }
177 | });
178 | },
179 |
180 | 'can be piped around': function(cb) {
181 | var stream = new KafkaConsumerStream(fakeClient, {
182 | topics: 'topic'
183 | });
184 | var writable = new Writable({
185 | write: function(message, encoding, next) {
186 | t.notEqual(message, null);
187 | t.ok(Buffer.isBuffer(message.value));
188 | t.equal(typeof message.offset, 'number');
189 | this.cork();
190 | cb();
191 | },
192 | objectMode: true
193 | });
194 |
195 | stream.pipe(writable);
196 | stream.on('error', function(err) {
197 | t.fail(err);
198 | });
199 |
200 | },
201 |
202 | 'streams as batch when specified': function(next) {
203 | var numMessages = 10;
204 | var numReceived = 0;
205 | var numSent = 0;
206 |
207 | fakeClient.consume = function(size, cb) {
208 | if (numSent < numMessages) {
209 | numSent++;
210 | setImmediate(function() {
211 | cb(null, [{
212 | value: Buffer.from('test'),
213 | offset: 1
214 | }]);
215 | });
216 | } else {
217 | }
218 | };
219 | var stream = new KafkaConsumerStream(fakeClient, {
220 | topics: 'topic',
221 | streamAsBatch: true
222 | });
223 | stream.on('error', function(err) {
224 | // Ignore
225 | });
226 | stream.on('readable', function() {
227 | var messages = stream.read();
228 | numReceived++;
229 | t.equal(Array.isArray(messages), true);
230 | t.equal(messages.length, 1);
231 | var message = messages[0];
232 |
233 | t.notEqual(message, null);
234 | t.ok(Buffer.isBuffer(message.value));
235 | t.equal(typeof message.offset, 'number');
236 | if (numReceived === numMessages) {
237 | // give it a second to get an error
238 | next();
239 | }
240 | });
241 | },
242 |
243 | 'stops reading on unsubscribe': function(next) {
244 | var numMessages = 10;
245 | var numReceived = 0;
246 | var numSent = 0;
247 |
248 | fakeClient.consume = function(size, cb) {
249 | if (numSent < numMessages) {
250 | numSent++;
251 | setImmediate(function() {
252 | cb(null, [{
253 | value: Buffer.from('test'),
254 | offset: 1
255 | }]);
256 | });
257 | } else {
258 | }
259 | };
260 |
261 | var stream = new KafkaConsumerStream(fakeClient, {
262 | topics: 'topic'
263 | });
264 | stream.on('error', function(err) {
265 | // Ignore
266 | });
267 | stream.on('readable', function() {
268 | var message = stream.read();
269 | numReceived++;
270 | if (message) {
271 | t.ok(Buffer.isBuffer(message.value));
272 | t.equal(typeof message.offset, 'number');
273 | if (numReceived === numMessages) {
274 | // give it a second to get an error
275 | fakeClient.emit('unsubscribed');
276 | }
277 | }
278 | });
279 |
280 | stream.on('end', function() {
281 | next();
282 | });
283 | },
284 |
285 | 'calls the callback on destroy': function (next) {
286 |
287 | fakeClient.unsubscribe = function () {};
288 | var stream = new KafkaConsumerStream(fakeClient, {
289 | topics: 'topic'
290 | });
291 | stream.once('readable', function () {
292 | stream.destroy();
293 | stream.once('close', next);
294 | });
295 |
296 | },
297 | }
298 | };
299 |
--------------------------------------------------------------------------------
/test/kafka-consumer.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var KafkaConsumer = require('../lib/kafka-consumer');
11 | var t = require('assert');
12 |
13 | var client;
14 | var defaultConfig = {
15 | 'client.id': 'kafka-mocha',
16 | 'group.id': 'kafka-mocha-grp',
17 | 'metadata.broker.list': 'localhost:9092'
18 | };
19 | var topicConfig = {};
20 |
21 | module.exports = {
22 | 'KafkaConsumer client': {
23 | 'beforeEach': function() {
24 | client = new KafkaConsumer(defaultConfig, topicConfig);
25 | },
26 | 'afterEach': function() {
27 | client = null;
28 | },
29 | 'does not modify config and clones it': function () {
30 | t.deepStrictEqual(defaultConfig, {
31 | 'client.id': 'kafka-mocha',
32 | 'group.id': 'kafka-mocha-grp',
33 | 'metadata.broker.list': 'localhost:9092'
34 | });
35 | t.deepStrictEqual(client.globalConfig, {
36 | 'client.id': 'kafka-mocha',
37 | 'group.id': 'kafka-mocha-grp',
38 | 'metadata.broker.list': 'localhost:9092'
39 | });
40 | t.notEqual(defaultConfig, client.globalConfig);
41 | },
42 | 'does not modify topic config and clones it': function () {
43 | t.deepStrictEqual(topicConfig, {});
44 | t.deepStrictEqual(client.topicConfig, {});
45 | t.notEqual(topicConfig, client.topicConfig);
46 | },
47 | },
48 | };
49 |
--------------------------------------------------------------------------------
/test/mocha.opts:
--------------------------------------------------------------------------------
1 | --ui exports
2 |
--------------------------------------------------------------------------------
/test/mock.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | 'use strict';
11 |
12 | var net = require('net');
13 | var util = require('util');
14 | var Emitter = require('events');
15 |
16 | function KafkaServer(config) {
17 | if (!(this instanceof KafkaServer)) {
18 | return new KafkaServer(config);
19 | }
20 |
21 | if (config === undefined) {
22 | config = {};
23 | } else if (typeof config !== 'object') {
24 | throw new TypeError('"config" must be an object');
25 | }
26 |
27 | Emitter.call(this);
28 |
29 | var self = this;
30 |
31 | this.socket = net.createServer(function(socket) {
32 | socket.end();
33 | }); //.unref();
34 |
35 | this.socket.on('error', function(err) {
36 | console.error(err);
37 | });
38 |
39 | this.socket.listen({
40 | port: 9092,
41 | host: 'localhost'
42 | }, function() {
43 | self.address = self.socket.address();
44 | self.emit('ready');
45 | });
46 |
47 | }
48 |
49 | util.inherits(KafkaServer, Emitter);
50 |
51 | KafkaServer.prototype.close = function(cb) {
52 | this.socket.close(cb);
53 | };
54 |
55 | module.exports = KafkaServer;
56 |
--------------------------------------------------------------------------------
/test/producer.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var Producer = require('../lib/producer');
11 | var t = require('assert');
12 | // var Mock = require('./mock');
13 |
14 | var client;
15 | var defaultConfig = {
16 | 'client.id': 'kafka-mocha',
17 | 'metadata.broker.list': 'localhost:9092',
18 | 'socket.timeout.ms': 250
19 | };
20 | var topicConfig = {};
21 |
22 | var server;
23 |
24 | module.exports = {
25 | 'Producer client': {
26 | 'beforeEach': function() {
27 | client = new Producer(defaultConfig, topicConfig);
28 | },
29 | 'afterEach': function() {
30 | client = null;
31 | },
32 | 'is an object': function() {
33 | t.equal(typeof(client), 'object');
34 | },
35 | 'requires configuration': function() {
36 | t.throws(function() {
37 | return new Producer();
38 | });
39 | },
40 | 'has necessary methods from superclass': function() {
41 | var methods = ['connect', 'disconnect', 'getMetadata'];
42 | methods.forEach(function(m) {
43 | t.equal(typeof(client[m]), 'function', 'Client is missing ' + m + ' method');
44 | });
45 | },
46 | 'has "_disconnect" override': function() {
47 | t.equal(typeof(client._disconnect), 'function', 'Producer is missing base _disconnect method');
48 | },
49 | 'does not modify config and clones it': function () {
50 | t.deepStrictEqual(defaultConfig, {
51 | 'client.id': 'kafka-mocha',
52 | 'metadata.broker.list': 'localhost:9092',
53 | 'socket.timeout.ms': 250
54 | });
55 | t.deepStrictEqual(client.globalConfig, {
56 | 'client.id': 'kafka-mocha',
57 | 'metadata.broker.list': 'localhost:9092',
58 | 'socket.timeout.ms': 250
59 | });
60 | t.notEqual(defaultConfig, client.globalConfig);
61 | },
62 | 'does not modify topic config and clones it': function () {
63 | t.deepStrictEqual(topicConfig, {});
64 | t.deepStrictEqual(client.topicConfig, {});
65 | t.notEqual(topicConfig, client.topicConfig);
66 | },
67 | 'disconnect method': {
68 | 'calls flush before it runs': function(next) {
69 | var providedTimeout = 1;
70 |
71 | client.flush = function(timeout, cb) {
72 | t.equal(providedTimeout, timeout, 'Timeouts do not match');
73 | t.equal(typeof(cb), 'function');
74 | setImmediate(cb);
75 | };
76 |
77 | client._disconnect = function(cb) {
78 | setImmediate(cb);
79 | };
80 |
81 | client.disconnect(providedTimeout, next);
82 | },
83 | 'provides a default timeout when none is provided': function(next) {
84 | client.flush = function(timeout, cb) {
85 | t.notEqual(timeout, undefined);
86 | t.notEqual(timeout, null);
87 | t.notEqual(timeout, 0);
88 | t.equal(typeof(cb), 'function');
89 | setImmediate(cb);
90 | };
91 |
92 | client._disconnect = function(cb) {
93 | setImmediate(cb);
94 | };
95 |
96 | client.disconnect(next);
97 | }
98 | }
99 | },
100 | };
101 |
--------------------------------------------------------------------------------
/test/tools/ref-counter.spec.js:
--------------------------------------------------------------------------------
1 | var t = require('assert');
2 | var RefCounter = require('../../lib/tools/ref-counter');
3 |
4 | function noop() {}
5 |
6 | module.exports = {
7 | 'RefCounter': {
8 | 'is an object': function() {
9 | t.equal(typeof(RefCounter), 'function');
10 | },
11 | 'should become active when incremented': function(next) {
12 | var refCounter = new RefCounter(function() { next(); }, noop);
13 |
14 | refCounter.increment();
15 | },
16 | 'should become inactive when incremented and decremented': function(next) {
17 | var refCounter = new RefCounter(noop, function() { next(); });
18 |
19 | refCounter.increment();
20 | setImmediate(function() {
21 | refCounter.decrement();
22 | });
23 | },
24 | 'should support multiple accesses': function(next) {
25 | var refCounter = new RefCounter(noop, function() { next(); });
26 |
27 | refCounter.increment();
28 | refCounter.increment();
29 | refCounter.decrement();
30 | setImmediate(function() {
31 | refCounter.decrement();
32 | });
33 | },
34 | 'should be reusable': function(next) {
35 | var numActives = 0;
36 | var numPassives = 0;
37 | var refCounter = new RefCounter(function() {
38 | numActives += 1;
39 | }, function() {
40 | numPassives += 1;
41 |
42 | if (numActives === 2 && numPassives === 2) {
43 | next();
44 | }
45 | });
46 |
47 | refCounter.increment();
48 | refCounter.decrement();
49 | refCounter.increment();
50 | refCounter.decrement();
51 | }
52 | }
53 | };
54 |
--------------------------------------------------------------------------------
/test/topic-partition.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var TopicPartition = require('../lib/topic-partition');
11 | var Topic = require('../lib/topic');
12 |
13 | var t = require('assert');
14 |
15 | module.exports = {
16 | 'TopicPartition': {
17 | 'is a function': function() {
18 | t.equal(typeof(TopicPartition), 'function');
19 | },
20 | 'be constructable': function() {
21 | var toppar = new TopicPartition('topic', 1, 0);
22 |
23 | t.equal(toppar.topic, 'topic');
24 | t.equal(toppar.offset, 0);
25 | t.equal(toppar.partition, 1);
26 | },
27 | 'be creatable using 0 as the partition': function() {
28 | var toppar = new TopicPartition('topic', 0, 0);
29 |
30 | t.equal(toppar.topic, 'topic');
31 | t.equal(toppar.offset, 0);
32 | t.equal(toppar.partition, 0);
33 | },
34 | 'throw if partition is null or undefined': function() {
35 | t.throws(function() {
36 | var tp = new TopicPartition('topic', undefined, 0);
37 | });
38 |
39 | t.throws(function() {
40 | var tp = new TopicPartition('topic', null, 0);
41 | });
42 | },
43 | 'sets offset to stored by default': function() {
44 | var toppar = new TopicPartition('topic', 1);
45 |
46 | t.equal(toppar.topic, 'topic');
47 | t.equal(toppar.partition, 1);
48 | t.equal(toppar.offset, Topic.OFFSET_STORED);
49 | },
50 | 'sets offset to end if "end" is provided"': function() {
51 | var toppar = new TopicPartition('topic', 1, 'end');
52 |
53 | t.equal(toppar.topic, 'topic');
54 | t.equal(toppar.partition, 1);
55 | t.equal(toppar.offset, Topic.OFFSET_END);
56 | },
57 | 'sets offset to end if "latest" is provided"': function() {
58 | var toppar = new TopicPartition('topic', 1, 'latest');
59 |
60 | t.equal(toppar.topic, 'topic');
61 | t.equal(toppar.partition, 1);
62 | t.equal(toppar.offset, Topic.OFFSET_END);
63 | },
64 | 'sets offset to beginning if "beginning" is provided"': function() {
65 | var toppar = new TopicPartition('topic', 1, 'beginning');
66 |
67 | t.equal(toppar.topic, 'topic');
68 | t.equal(toppar.partition, 1);
69 | t.equal(toppar.offset, Topic.OFFSET_BEGINNING);
70 | },
71 | 'sets offset to start if "beginning" is provided"': function() {
72 | var toppar = new TopicPartition('topic', 1, 'beginning');
73 |
74 | t.equal(toppar.topic, 'topic');
75 | t.equal(toppar.partition, 1);
76 | t.equal(toppar.offset, Topic.OFFSET_BEGINNING);
77 | },
78 | 'sets offset to stored if "stored" is provided"': function() {
79 | var toppar = new TopicPartition('topic', 1, 'stored');
80 |
81 | t.equal(toppar.topic, 'topic');
82 | t.equal(toppar.partition, 1);
83 | t.equal(toppar.offset, Topic.OFFSET_STORED);
84 | },
85 | 'throws when an invalid special offset is provided"': function() {
86 | t.throws(function() {
87 | var toppar = new TopicPartition('topic', 1, 'fake');
88 | });
89 | }
90 | },
91 | 'TopicPartition.map': {
92 | 'is a function': function() {
93 | t.equal(typeof(TopicPartition.map), 'function');
94 | },
95 | 'converts offsets inside the array': function() {
96 | var result = TopicPartition.map([{ topic: 'topic', partition: 1, offset: 'stored' }]);
97 | var toppar = result[0];
98 |
99 | t.equal(toppar.topic, 'topic');
100 | t.equal(toppar.partition, 1);
101 | t.equal(toppar.offset, Topic.OFFSET_STORED);
102 | },
103 | },
104 | };
105 |
--------------------------------------------------------------------------------
/test/util.spec.js:
--------------------------------------------------------------------------------
1 | /*
2 | * node-rdkafka - Node.js wrapper for RdKafka C/C++ library
3 | *
4 | * Copyright (c) 2016 Blizzard Entertainment
5 | *
6 | * This software may be modified and distributed under the terms
7 | * of the MIT license. See the LICENSE.txt file for details.
8 | */
9 |
10 | var shallowCopy = require('../lib/util').shallowCopy;
11 | var t = require('assert');
12 |
13 | module.exports = {
14 | 'shallowCopy utility': {
15 | 'returns value itself when it is not an object': function () {
16 | t.strictEqual(10, shallowCopy(10));
17 | t.strictEqual('str', shallowCopy('str'));
18 | t.strictEqual(null, shallowCopy(null));
19 | t.strictEqual(undefined, shallowCopy(undefined));
20 | t.strictEqual(false, shallowCopy(false));
21 | },
22 | 'returns shallow copy of the passed object': function () {
23 | var obj = {
24 | sub: { a: 10 },
25 | b: 'str',
26 | };
27 | var copy = shallowCopy(obj);
28 |
29 | t.notEqual(obj, copy);
30 | t.deepStrictEqual(obj, copy);
31 | t.equal(obj.sub, copy.sub);
32 | },
33 | 'does not copy non-enumerable and inherited properties': function () {
34 | var obj = Object.create({
35 | a: 10,
36 | }, {
37 | b: { value: 'str' },
38 | c: { value: true, enumerable: true },
39 | });
40 | var copy = shallowCopy(obj);
41 |
42 | t.notEqual(obj, copy);
43 | t.deepStrictEqual(copy, { c: true });
44 | },
45 | },
46 | };
47 |
--------------------------------------------------------------------------------
/util/configure.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var query = process.argv[2];
4 |
5 | var fs = require('fs');
6 | var path = require('path');
7 |
8 | var baseDir = path.resolve(__dirname, '../');
9 | var releaseDir = path.join(baseDir, 'build', 'deps');
10 |
11 | var isWin = /^win/.test(process.platform);
12 |
13 | // Skip running this if we are running on a windows system
14 | if (isWin) {
15 | process.stderr.write('Skipping run because we are on windows\n');
16 | process.exit(0);
17 | }
18 |
19 | var childProcess = require('child_process');
20 |
21 | try {
22 | childProcess.execSync('./configure --prefix=' + releaseDir + ' --libdir=' + releaseDir, {
23 | cwd: baseDir,
24 | stdio: [0,1,2]
25 | });
26 | process.exit(0);
27 | } catch (e) {
28 | process.stderr.write(e.message + '\n');
29 | process.exit(1);
30 | }
31 |
--------------------------------------------------------------------------------
/util/get-env.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var env = process.argv[2];
4 | var def = process.argv[3] || '';
5 |
6 | process.stdout.write(process.env[env] || def);
7 |
--------------------------------------------------------------------------------
/util/test-compile.js:
--------------------------------------------------------------------------------
1 | var kafka = require('../lib');
2 |
3 | var p = new kafka.Producer({ 'bootstrap.servers': 'localhost:9092' }, {});
4 |
5 | p.connect({ timeout: 1000 }, function(err) {
6 | if (!err) {
7 | p.disconnect();
8 | } else {
9 | process.exit(0);
10 | }
11 | });
12 |
--------------------------------------------------------------------------------
/util/test-producer-delivery.js:
--------------------------------------------------------------------------------
1 | const Kafka = require("../lib/index.js");
2 |
3 | const wait = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
4 |
5 | const sendData = async (producer, totalMessages) => {
6 | const topic = "node";
7 | const msg = "dkfljaskldfajkldsjfklasdjfalk;dsjfkl;asjfdskl;fjda;lkfjsdklfsajlkfjdsklfajsklfjsklanklsalkjkljkasfak";
8 | const buffer = Buffer.from(msg);
9 | const key = "test";
10 | for (let n = 0; n < totalMessages; ++n) {
11 | let bufferIsFull = false;
12 | do {
13 | bufferIsFull = false;
14 | try {
15 | producer.produce(topic, -1, buffer, key, null, n);
16 | }
17 | catch (error) {
18 | // Based on config, and messages, this will execute once
19 | if (error.code === Kafka.CODES.ERRORS.ERR__QUEUE_FULL) {
20 | producer.poll();
21 | // The wait introduces 11-12 seconds of latency when dr_cb is true
22 | const start = process.hrtime();
23 | await wait(50);
24 | const latency = process.hrtime(start);
25 | console.info(`Wait took ${latency[0]} seconds`);
26 | bufferIsFull = true;
27 | } else {
28 | throw error;
29 | }
30 | }
31 | } while (bufferIsFull);
32 | }
33 | console.log("Finished producing");
34 | };
35 |
36 | const verifyReports = async (reports, reportsComplete, totalMessages) => {
37 | const reportsTimeout = new Promise((resolve, reject) => {
38 | setTimeout(() => {
39 | reject("Delivery report timed out");
40 | }, 10000);
41 | });
42 | await Promise.race([reportsComplete, reportsTimeout]);
43 | await wait(500); // wait for some more delivery reports.
44 | if (reports.length === totalMessages) {
45 | console.log("Reports count match");
46 | } else {
47 | console.error("Reports count doesn't match");
48 | return;
49 | }
50 | for(let n = 0; n < totalMessages; ++n) {
51 | if(reports[n].opaque !== n) {
52 | console.error("Expect message number does not match");
53 | }
54 | }
55 | };
56 |
57 | const run = async () => {
58 | const reports = [];
59 | const totalMessages = 1000100;
60 | const producer = new Kafka.Producer({
61 | "batch.num.messages": 50000,
62 | "compression.codec": "lz4",
63 | "delivery.report.only.error": false,
64 | "dr_cb": true,
65 | "metadata.broker.list": "localhost:9092",
66 | "message.send.max.retries": 10000000,
67 | "queue.buffering.max.kbytes": 2000000,
68 | "queue.buffering.max.messages": 1000000,
69 | "queue.buffering.max.ms": 0,
70 | "socket.keepalive.enable": true,
71 | }, {});
72 |
73 | producer.setPollInterval(100);
74 | producer.on("event.log", (obj) => console.log(obj));
75 | const reportsComplete = new Promise((resolve) => {
76 | producer.on("delivery-report", (err, report) => {
77 | reports.push(report);
78 | if(reports.length === totalMessages) {
79 | resolve();
80 | }
81 | });
82 | });
83 |
84 | const readyPromise = new Promise((resolve) => {
85 | producer.on("ready", async () => {
86 | console.log("Producer is ready");
87 | resolve();
88 | });
89 | producer.connect();
90 | });
91 | await readyPromise;
92 |
93 | await sendData(producer, totalMessages);
94 | await verifyReports(reports, reportsComplete, totalMessages);
95 | process.exit(0);
96 | };
97 |
98 | run().catch((err) => {
99 | console.error(err);
100 | });
101 |
--------------------------------------------------------------------------------
/win_install.ps1:
--------------------------------------------------------------------------------
1 | choco install openssl.light
2 | choco install make
3 |
--------------------------------------------------------------------------------