├── .editorconfig
├── .eslintignore
├── .eslintrc.json
├── .github
└── ISSUE_TEMPLATE.md
├── .gitignore
├── .travis.yml
├── CHANGELOG.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── docker-compose.yml
├── docker
├── certs
│ ├── ca-cert
│ ├── ca-cert.srl
│ ├── ca-key
│ ├── cert-file
│ ├── cert-signed
│ ├── client.truststore.jks
│ ├── server.keystore.jks
│ └── server.truststore.jks
├── createTopic.js
├── docker-compose.0.10.yml
├── docker-compose.0.11.yml
├── docker-compose.0.9.yml
├── docker-compose.1.0.yml
├── docker-compose.1.1.yml
├── docker-compose.2.0.yml
├── sasl
│ └── sasl.conf
└── start-kafka.sh
├── example
├── consumer.js
├── consumerGroupMember.js
├── high-level-producer.js
├── offset.js
├── producer.js
└── streaming.js
├── kafka.js
├── lib
├── admin.js
├── assignment
│ ├── index.js
│ ├── range.js
│ └── roundrobin.js
├── baseClient.js
├── baseProducer.js
├── batch
│ └── KafkaBuffer.js
├── codec
│ ├── index.js
│ └── snappy.js
├── commitStream.js
├── consumer.js
├── consumerGroup.js
├── consumerGroupHeartbeat.js
├── consumerGroupRecovery.js
├── consumerGroupStream.js
├── consumerStream.js
├── errors
│ ├── ApiNotSupportedError.js
│ ├── BrokerNotAvailableError.js
│ ├── FailedToRebalanceConsumerError.js
│ ├── FailedToRegisterConsumerError.js
│ ├── GroupCoordinatorNotAvailableError.js
│ ├── GroupLoadInProgressError.js
│ ├── HeartbeatTimeoutError.js
│ ├── IllegalGenerationError.js
│ ├── InvalidConfigError.js
│ ├── InvalidConsumerOffsetError.js
│ ├── InvalidRequestError.js
│ ├── MessageSizeTooLargeError.js
│ ├── NotControllerError.js
│ ├── NotCoordinatorForGroupError.js
│ ├── RebalanceInProgressError.js
│ ├── SaslAuthenticationError.js
│ ├── TimeoutError.js
│ ├── TopicsNotExistError.js
│ ├── UnknownMemberIdError.js
│ └── index.js
├── highLevelProducer.js
├── kafkaClient.js
├── logging.js
├── offset.js
├── partitioner.js
├── producer.js
├── producerStream.js
├── protocol
│ ├── index.js
│ ├── protocol.js
│ ├── protocolVersions.js
│ └── protocol_struct.js
├── resources
│ └── index.js
├── utils.js
└── wrapper
│ ├── BrokerReadable.js
│ ├── BrokerTransform.js
│ └── BrokerWrapper.js
├── logging.js
├── package.json
├── run-tests.sh
├── start-docker.sh
├── test
├── .eslintrc.json
├── assignment
│ ├── test.range.js
│ └── test.roundrobin.js
├── helpers
│ ├── Childrearer.js
│ ├── EventCounter.js
│ ├── child-cg-kafka-client.js
│ ├── sendMessage.js
│ └── sendMessageEach.js
├── manual.gracefulexit.js
├── mocha.opts
├── mocks
│ ├── mockClient.js
│ └── mockSocket.js
├── test.admin.js
├── test.baseProducer.js
├── test.consumer.js
├── test.consumerGroup.js
├── test.consumerGroupHeartbeat.js
├── test.consumerGroupRecovery.js
├── test.consumerGroupStream.js
├── test.consumerStream.js
├── test.errors.js
├── test.highlevelProducer.js
├── test.kafka-node.js
├── test.kafkaClient.js
├── test.logging.js
├── test.offset.js
├── test.partitioner.js
├── test.producer.js
├── test.producerBatch.js
├── test.producerStream.js
├── test.protocol.js
└── test.rebalance.js
└── types
├── index.d.ts
├── kafka-node-tests.ts
├── tsconfig.json
└── tslint.json
/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | [*]
4 | indent_style = space
5 | indent_size = 2
6 | end_of_line = lf
7 | charset = utf-8
8 | trim_trailing_whitespace = true
9 | insert_final_newline = true
10 |
11 | [*.md]
12 | trim_trailing_whitespace = false
13 |
--------------------------------------------------------------------------------
/.eslintignore:
--------------------------------------------------------------------------------
1 | coverage
2 | types
3 |
--------------------------------------------------------------------------------
/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "semistandard",
3 | "env": {
4 | "node": true
5 | },
6 | "plugins": [
7 | "dependencies"
8 | ],
9 | "rules": {
10 | "dependencies/case-sensitive": 1
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Questions?
2 |
3 | * Check the [Troubleshooting / FAQ](https://github.com/SOHU-Co/kafka-node#troubleshooting--faq) ?
4 | * Search the [issues](https://github.com/SOHU-Co/kafka-node/issues) ?
5 |
6 | ## Bug Report
7 |
8 | ### Environment
9 |
10 | * Node version:
11 | * Kafka-node version:
12 | * Kafka version:
13 |
14 | ### For specific cases also provide
15 | * Number of Brokers:
16 | * Number partitions for topic:
17 |
18 | ### Include Sample Code to reproduce behavior
19 |
20 | ```js
21 | // include code here
22 | ```
23 |
24 | ### Include output with [Debug](https://github.com/SOHU-Co/kafka-node#how-do-i-debug-an-issue) turned on
25 |
26 |
27 | Thanks for your contribution!
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | *.swp
3 | *.log
4 | *.swo
5 | test/data.txt
6 | docs
7 | .idea
8 | coverage
9 | .DS_Store
10 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: node_js
2 | cache:
3 | directories:
4 | - node_modules
5 | node_js:
6 | - "12"
7 | - "10"
8 | - "8"
9 | sudo: required
10 | services:
11 | - docker
12 | env:
13 | matrix:
14 | - KAFKA_VERSION=0.9
15 | - KAFKA_VERSION=0.10
16 | - KAFKA_VERSION=0.11
17 | - KAFKA_VERSION=1.0
18 | - KAFKA_VERSION=1.1
19 | - KAFKA_VERSION=2.0
20 | global:
21 | # - DEBUG=kafka-node:*
22 | - KAFKA_ADVERTISED_HOST_NAME=127.0.0.1
23 | before_install:
24 | - sudo apt-get update
25 | - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
26 | - docker-compose -f docker-compose.yml -f docker/docker-compose.${KAFKA_VERSION}.yml up -d
27 | before_script:
28 | - npm prune
29 | after_failure:
30 | - docker ps
31 | - docker-compose logs
32 | after_success:
33 | - cat ./coverage/lcov.info | ./node_modules/coveralls/bin/coveralls.js
34 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to contribute
2 |
3 | All patches or feature evolutions are welcome.
4 |
5 | ## Getting Started
6 |
7 | * Make sure you have a [GitHub account](https://github.com/signup/free)
8 | * Submit a ticket for your issue, assuming one does not already exist.
9 | * Clearly describe the issue including steps to reproduce when it is a bug.
10 | * Make sure you fill in the earliest version that you know has the issue.
11 | * Fork the repository on GitHub
12 |
13 | ## Making Changes
14 |
15 | * Create a topic branch from where you want to base your work
16 | (This is usually the master branch on your forked project).
17 | * Make commits of logical units.
18 | * Check for unnecessary whitespace with `git diff --check` before committing.
19 | * The code style of current code base should be preserved
20 | * Make sure you have added the necessary tests for your changes, specially if
21 | you added a new feature.
22 | * Run _all_ the tests to assure nothing else was accidentally broken.
23 |
24 | ## Submitting Changes
25 |
26 | * Push your changes to a topic branch in your fork of the repository.
27 | * Submit a pull request to the repository.
28 | * Make sure that the PR has a clean log message and don't hesitate to squash
29 | and rebase your commits in order to preserve a clean history log.
30 |
31 | ## Code reviewers
32 |
33 | * For small fixes, one can merge PR directly.
34 | * For new features or big change of current code base, at least two
35 | collaborators should LGTM before merging.
36 | * Rebase instead of merge to avoid those "Merge ...." commits, is recommended
37 | (see https://github.com/blog/2141-squash-your-commits)
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2015 sohu.com
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of
4 | this software and associated documentation files (the "Software"), to deal in
5 | the Software without restriction, including without limitation the rights to
6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
7 | of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in all
11 | copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 | SOFTWARE.
20 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | zookeeper:
4 | image: jplock/zookeeper
5 | kafka:
6 | image: wurstmeister/kafka:latest
7 | ports:
8 | - "9092:9092"
9 | - "9093:9093"
10 | - "9094:9094"
11 | depends_on:
12 | - zookeeper
13 | environment:
14 | KAFKA_ADVERTISED_HOST_NAME: ${KAFKA_ADVERTISED_HOST_NAME}
15 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
16 | KAFKA_LISTENERS: "PLAINTEXT://:9092,SSL://:9093"
17 | KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME}:9092,SSL://${KAFKA_ADVERTISED_HOST_NAME}:9093"
18 | KAFKA_SSL_KEYSTORE_LOCATION: "/var/private/ssl/certs/server.keystore.jks"
19 | KAFKA_SSL_KEYSTORE_PASSWORD: "password"
20 | KAFKA_SSL_KEY_PASSWORD: "password"
21 | KAFKA_SSL_TRUSTSTORE_LOCATION: "/var/private/ssl/certs/server.truststore.jks"
22 | KAFKA_SSL_TRUSTSTORE_PASSWORD: "password"
23 | KAFKA_CREATE_TOPICS: "DuplicateMessageTest:1:1,RebalanceTopic:3:1,ExampleTopic:1:1,RebalanceTest:1:1"
24 | volumes:
25 | - /var/run/docker.sock:/var/run/docker.sock
26 | - ./docker/certs:/var/private/ssl/certs
27 | - ./docker/sasl:/var/private/sasl
28 |
--------------------------------------------------------------------------------
/docker/certs/ca-cert:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIICsDCCAhmgAwIBAgIJAIyGnMEdl6tCMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
3 | BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
4 | aWRnaXRzIFB0eSBMdGQwHhcNMTYwNjIxMTQxMDA5WhcNNDMxMTA2MTQxMDA5WjBF
5 | MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50
6 | ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
7 | gQCebmAuz/u/vaGmm+rd+bSu4xDqWOD0nTlHgVcPps1mGNy8I+Je5ft4r8UXN6B4
8 | 65rmJgXztg/HuBKU8mRO1VjwH7huA7NwsO+ve6i1eYQYBq3MtfxRj9gH4tE76147
9 | ET5FoFc8xn/2K09Lc/W5zhJHLeUDlYKP/SUiw5dFqKURMQIDAQABo4GnMIGkMB0G
10 | A1UdDgQWBBTOi9ZSqUCykQrG0fwkA91MsC8HDTB1BgNVHSMEbjBsgBTOi9ZSqUCy
11 | kQrG0fwkA91MsC8HDaFJpEcwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgTClNvbWUt
12 | U3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZIIJAIyGnMEd
13 | l6tCMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEATpA/7+o740mEIVYY
14 | xnZ3QR0TpW4SEudRBKoCH+obaSJH5LVxy59VohV8JeA0yQTRwn2SF1WQ5ZC5EuVO
15 | daAfUwaYWwkGXrzS30KDuj5ospv6kUaIoSnHZvulFPQbqamO5mTsvoCZxKM79Ttz
16 | EoM/vfy/Dehln4zt7ti1YdcU8ZM=
17 | -----END CERTIFICATE-----
18 |
--------------------------------------------------------------------------------
/docker/certs/ca-cert.srl:
--------------------------------------------------------------------------------
1 | 839412E1B55B2B15
2 |
--------------------------------------------------------------------------------
/docker/certs/ca-key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | Proc-Type: 4,ENCRYPTED
3 | DEK-Info: DES-EDE3-CBC,6641B585EC61FD26
4 |
5 | x6SWVdRDkrpsjLFwacn5vq1dOQwkwFvuGlJ9X1lf/lMYa//JUZtDRMi2Wea+WYhj
6 | V31UEECgZHZxbzp74No5e4Bh05hN5id00Js0MRHMYDB3S0Hs7y75SQoAlqBqsGaC
7 | VQX42xrNSzLW8jSvGxa+oX4YV54zdRdhmaNzEoWQ+qXMMdGezR1dRp6SGpwBnMv3
8 | UXlSYP+v6EFeGiUxhqatM9Sm6oXAcg71bJrc8VmHz6IPOeXm9+kRKZze/lQYmH0e
9 | m1V4H46+kVBWGiStNiQa5IgJyDd2vAVO/p+/v3bbm9+U7I7LAW09NSpTVgTCb0Vv
10 | RXEyBiBT1P48Aa9zNzk+DCOsSugdOjDj6HqJHs0mqC8D9wNuRmIEZm3D6SkZGf7C
11 | XXJF0fDlt3pDXwOyJEC96SEFoYOef4h7Z0IEfAcRSOkhgrywZg9IzrAHbCbsX5Ij
12 | /8NgGAaKDhef0p0niAXT+KmNV4AO8KHQKBVTga1jxcnCQEhBD4J8RqS7B6WQ+GqB
13 | IE/fgV41JRyGOO6Hw/Y/0hjKbmYJTC9sjoUJ0a9QdVK7gZxGpd4Svo2XyVy2PfdR
14 | DDVF4UJYKLaEWxXvUK1p7Dx+n2dqmkCoxJzomk0hXUfk/DtfeFD5++6STtAQJVgb
15 | QUPWC3Enfmf79Fv8tbHPM7KClsd6Gc3pPLHC7UPIc3/Pgip5jrEFu8Jgm4kB2dJy
16 | RQO/MB6LtU3nexdWaI6VDD3eTmrSNyRK/lCWLy8QHKK95+/xNmnZKfeXSm0D6ErR
17 | Tm6nomuO0YleeWImoLzvA7WKXjwpqaUTzHnpZEsktYRAzUOIiG4ZlA==
18 | -----END RSA PRIVATE KEY-----
19 |
--------------------------------------------------------------------------------
/docker/certs/cert-file:
--------------------------------------------------------------------------------
1 | -----BEGIN NEW CERTIFICATE REQUEST-----
2 | MIICoDCCAl4CAQAwbDEQMA4GA1UEBhMHVW5rbm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UE
3 | BxMHVW5rbm93bjEQMA4GA1UEChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMH
4 | VW5rbm93bjCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD
5 | Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gEexAiwk+7qdf+
6 | t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/IiAxmd0UgBxwIVAJdgUI8VIwvM
7 | spK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/m
8 | cQcQgYC0SRZxI+hMKBYTt88JMozIpuE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk
9 | 8b6oUZCJqIPf4VrlnwaSi2ZegHtVJWQBTDv+z0kqA4GEAAKBgDtgBe2nMFIxCeMGzzyiqyYxDWbp
10 | BgQgsowJG3O1gq7grLDgokoo5nFd4YwoSGTFapnGKjU46y4jj7jkhMDn2oj9ufmxBoikUN0y39q0
11 | IdFiB/6y4eIXe1p61FVtKaFXn+RZWARRlFW3lTSOaLqnplRuqMGtw75M9y1c1k+I0oheoDAwLgYJ
12 | KoZIhvcNAQkOMSEwHzAdBgNVHQ4EFgQUSH9fbKplQZ0gqwB0h3Kk0vC442wwCwYHKoZIzjgEAwUA
13 | Ay8AMCwCFGHc9tgI+oUoFuuTnyQHp4EH14UFAhRhUfVRQdo4jBA/h9I5wVsxdfZSPw==
14 | -----END NEW CERTIFICATE REQUEST-----
15 |
--------------------------------------------------------------------------------
/docker/certs/cert-signed:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDQTCCAqoCCQCDlBLhtVsrFTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB
3 | VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0
4 | cyBQdHkgTHRkMB4XDTE2MDYyMTE0MTIyN1oXDTQzMTEwNjE0MTIyN1owbDEQMA4G
5 | A1UEBhMHVW5rbm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93
6 | bjEQMA4GA1UEChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMH
7 | VW5rbm93bjCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn
8 | 9hG3UjzvRADDHj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3
9 | a5lQpaSfn+gEexAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1
10 | 864rYdcq7/IiAxmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXW
11 | mz3ey7yrXDa4V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hM
12 | KBYTt88JMozIpuE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6o
13 | UZCJqIPf4VrlnwaSi2ZegHtVJWQBTDv+z0kqA4GEAAKBgDtgBe2nMFIxCeMGzzyi
14 | qyYxDWbpBgQgsowJG3O1gq7grLDgokoo5nFd4YwoSGTFapnGKjU46y4jj7jkhMDn
15 | 2oj9ufmxBoikUN0y39q0IdFiB/6y4eIXe1p61FVtKaFXn+RZWARRlFW3lTSOaLqn
16 | plRuqMGtw75M9y1c1k+I0oheMA0GCSqGSIb3DQEBBQUAA4GBAGAK0H3OrcCe5K7c
17 | yz9B/FBk40kjXhy0u2acVxaWh3+1avanzabo2NgbKv3q+Bkg6kNNzFAk5f2gbll2
18 | zT1lHZcaiQcpgHw7z1zuQ2OTrSUcpXYZXEqoJufgiBJkRZLOoRXQFodN7I8MLKHU
19 | m9cl0wJGujKuydSiZJ20h/ecpwuS
20 | -----END CERTIFICATE-----
21 |
--------------------------------------------------------------------------------
/docker/certs/client.truststore.jks:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SOHU-Co/kafka-node/fcc8aefc7798f6e4b79d9b5c31f047c1933329e6/docker/certs/client.truststore.jks
--------------------------------------------------------------------------------
/docker/certs/server.keystore.jks:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SOHU-Co/kafka-node/fcc8aefc7798f6e4b79d9b5c31f047c1933329e6/docker/certs/server.keystore.jks
--------------------------------------------------------------------------------
/docker/certs/server.truststore.jks:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SOHU-Co/kafka-node/fcc8aefc7798f6e4b79d9b5c31f047c1933329e6/docker/certs/server.truststore.jks
--------------------------------------------------------------------------------
/docker/createTopic.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const execa = require('execa');
4 | const assert = require('assert');
5 |
6 | function createTopic (topicName, partitions, replicas, config = '') {
7 | assert(topicName);
8 | assert(partitions && partitions > 0);
9 | assert(replicas && replicas > 0);
10 |
11 | const args = ['exec', '-T', 'kafka', 'bash', '-c'];
12 |
13 | if (process.env.KAFKA_VERSION === '0.9') {
14 | const topic = `${topicName}:${partitions}:${replicas}`;
15 | args.push(`KAFKA_CREATE_TOPICS=${topic} KAFKA_PORT=9092 /usr/bin/create-topics.sh`);
16 | } else {
17 | if (config) {
18 | config = ` --config ${config}`;
19 | }
20 | args.push(
21 | `/opt/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper:2181 --topic ${topicName} --partitions ${partitions} --replication-factor ${replicas} ${config}`
22 | );
23 | }
24 |
25 | const createResult = execa('docker-compose', args);
26 | // createResult.stdout.pipe(process.stdout);
27 | return createResult;
28 | }
29 |
30 | module.exports = createTopic;
31 |
--------------------------------------------------------------------------------
/docker/docker-compose.0.10.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | kafka:
4 | image: wurstmeister/kafka:0.10.2.1
5 | environment:
6 | KAFKA_LISTENERS: "PLAINTEXT://:9092,SSL://:9093,SASL_PLAINTEXT://:9094"
7 | KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME}:9092,SSL://${KAFKA_ADVERTISED_HOST_NAME}:9093,SASL_PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME}:9094"
8 | KAFKA_SASL_ENABLED_MECHANISMS: "PLAIN"
9 | KAFKA_SECURITY_INTER_BROKER_PROTOCOL: "PLAINTEXT"
10 | KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
11 | KAFKA_SUPER_USERS: "User:admin,User:broker"
12 | KAFKA_OPTS: "-Djava.security.auth.login.config=/var/private/sasl/sasl.conf"
13 |
--------------------------------------------------------------------------------
/docker/docker-compose.0.11.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | kafka:
4 | image: wurstmeister/kafka:0.11.0.0
5 | environment:
6 | KAFKA_LOG_MESSAGE_FORMAT_VERSION: "0.10.2"
7 | KAFKA_LISTENERS: "PLAINTEXT://:9092,SSL://:9093,SASL_PLAINTEXT://:9094"
8 | KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME}:9092,SSL://${KAFKA_ADVERTISED_HOST_NAME}:9093,SASL_PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME}:9094"
9 | KAFKA_SASL_ENABLED_MECHANISMS: "PLAIN"
10 | KAFKA_SECURITY_INTER_BROKER_PROTOCOL: "PLAINTEXT"
11 | KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
12 | KAFKA_SUPER_USERS: "User:admin,User:broker"
13 | KAFKA_OPTS: "-Djava.security.auth.login.config=/var/private/sasl/sasl.conf"
14 | volumes:
15 | - ./docker/start-kafka.sh:/usr/bin/start-kafka.sh
16 |
17 |
--------------------------------------------------------------------------------
/docker/docker-compose.0.9.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | kafka:
4 | image: wurstmeister/kafka:0.9.0.1
5 |
--------------------------------------------------------------------------------
/docker/docker-compose.1.0.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | kafka:
4 | image: wurstmeister/kafka:1.0.0
5 | environment:
6 | KAFKA_LOG_MESSAGE_FORMAT_VERSION: "0.10.2"
7 | KAFKA_LISTENERS: "PLAINTEXT://:9092,SSL://:9093,SASL_PLAINTEXT://:9094"
8 | KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME}:9092,SSL://${KAFKA_ADVERTISED_HOST_NAME}:9093,SASL_PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME}:9094"
9 | KAFKA_SASL_ENABLED_MECHANISMS: "PLAIN"
10 | KAFKA_SECURITY_INTER_BROKER_PROTOCOL: "PLAINTEXT"
11 | KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
12 | KAFKA_SUPER_USERS: "User:admin,User:broker"
13 | KAFKA_OPTS: "-Djava.security.auth.login.config=/var/private/sasl/sasl.conf"
14 | volumes:
15 | - ./docker/start-kafka.sh:/usr/bin/start-kafka.sh
16 |
17 |
--------------------------------------------------------------------------------
/docker/docker-compose.1.1.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | kafka:
4 | image: wurstmeister/kafka:1.1.0
5 | environment:
6 | KAFKA_LOG_MESSAGE_FORMAT_VERSION: "0.10.2"
7 | KAFKA_LISTENERS: "PLAINTEXT://:9092,SSL://:9093,SASL_PLAINTEXT://:9094"
8 | KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME}:9092,SSL://${KAFKA_ADVERTISED_HOST_NAME}:9093,SASL_PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME}:9094"
9 | KAFKA_SASL_ENABLED_MECHANISMS: "PLAIN"
10 | KAFKA_SECURITY_INTER_BROKER_PROTOCOL: "PLAINTEXT"
11 | KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
12 | KAFKA_SUPER_USERS: "User:admin,User:broker"
13 | KAFKA_OPTS: "-Djava.security.auth.login.config=/var/private/sasl/sasl.conf"
14 | volumes:
15 | - ./docker/start-kafka.sh:/usr/bin/start-kafka.sh
16 |
17 |
--------------------------------------------------------------------------------
/docker/docker-compose.2.0.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | kafka:
4 | image: wurstmeister/kafka:2.11-2.0.0
5 | environment:
6 | KAFKA_LOG_MESSAGE_FORMAT_VERSION: "0.10.2"
7 | KAFKA_LISTENERS: "PLAINTEXT://:9092,SSL://:9093,SASL_PLAINTEXT://:9094"
8 | KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME}:9092,SSL://${KAFKA_ADVERTISED_HOST_NAME}:9093,SASL_PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME}:9094"
9 | KAFKA_SASL_ENABLED_MECHANISMS: "PLAIN"
10 | KAFKA_SECURITY_INTER_BROKER_PROTOCOL: "PLAINTEXT"
11 | KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
12 | KAFKA_SUPER_USERS: "User:admin,User:broker"
13 | KAFKA_OPTS: "-Djava.security.auth.login.config=/var/private/sasl/sasl.conf"
14 | volumes:
15 | - ./docker/start-kafka.sh:/usr/bin/start-kafka.sh
16 |
17 |
--------------------------------------------------------------------------------
/docker/sasl/sasl.conf:
--------------------------------------------------------------------------------
1 | KafkaServer {
2 | org.apache.kafka.common.security.plain.PlainLoginModule required
3 |
4 | username="broker"
5 | password="broker"
6 | user_kafkanode="kafkanode"
7 | user_admin="admin"
8 | user_broker="broker";
9 | };
10 |
--------------------------------------------------------------------------------
/docker/start-kafka.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #
4 | # FIXME KAFKA_OPTS with the SASL config causes a 'bad substitution' error in
5 | # the sed commands below. Only seems to impact the 1.x images, not entirely
6 | # sure why.
7 | #
8 | # Swapping KAFKA_OPTS out temporarily lets us work around the issue.
9 | #
10 | if [[ -n "$KAFKA_OPTS" ]]; then
11 | TEMP_KAFKA_OPTS="$KAFKA_OPTS"
12 | unset KAFKA_OPTS
13 | fi
14 |
15 | if [[ -z "$KAFKA_PORT" ]]; then
16 | export KAFKA_PORT=9092
17 | fi
18 | if [[ -z "$KAFKA_ADVERTISED_PORT" && \
19 | -z "$KAFKA_LISTENERS" && \
20 | -z "$KAFKA_ADVERTISED_LISTENERS" && \
21 | -S /var/run/docker.sock ]]; then
22 | export KAFKA_ADVERTISED_PORT=$(docker port `hostname` $KAFKA_PORT | sed -r "s/.*:(.*)/\1/g")
23 | fi
24 | if [[ -z "$KAFKA_BROKER_ID" ]]; then
25 | if [[ -n "$BROKER_ID_COMMAND" ]]; then
26 | export KAFKA_BROKER_ID=$(eval $BROKER_ID_COMMAND)
27 | else
28 | # By default auto allocate broker ID
29 | export KAFKA_BROKER_ID=-1
30 | fi
31 | fi
32 | if [[ -z "$KAFKA_LOG_DIRS" ]]; then
33 | export KAFKA_LOG_DIRS="/kafka/kafka-logs-$HOSTNAME"
34 | fi
35 | if [[ -z "$KAFKA_ZOOKEEPER_CONNECT" ]]; then
36 | export KAFKA_ZOOKEEPER_CONNECT=$(env | grep ZK.*PORT_2181_TCP= | sed -e 's|.*tcp://||' | paste -sd ,)
37 | fi
38 |
39 | if [[ -n "$KAFKA_HEAP_OPTS" ]]; then
40 | sed -r -i "s/(export KAFKA_HEAP_OPTS)=\"(.*)\"/\1=\"$KAFKA_HEAP_OPTS\"/g" $KAFKA_HOME/bin/kafka-server-start.sh
41 | unset KAFKA_HEAP_OPTS
42 | fi
43 |
44 | if [[ -z "$KAFKA_ADVERTISED_HOST_NAME" && -n "$HOSTNAME_COMMAND" ]]; then
45 | export KAFKA_ADVERTISED_HOST_NAME=$(eval $HOSTNAME_COMMAND)
46 | fi
47 |
48 | if [[ -n "$RACK_COMMAND" && -z "$KAFKA_BROKER_RACK" ]]; then
49 | export KAFKA_BROKER_RACK=$(eval $RACK_COMMAND)
50 | fi
51 |
52 | #Issue newline to config file in case there is not one already
53 | echo -e "\n" >> $KAFKA_HOME/config/server.properties
54 |
55 | for VAR in `env`
56 | do
57 | if [[ $VAR =~ ^KAFKA_ && ! $VAR =~ ^KAFKA_HOME && \
58 | ! $VAR =~ ^KAFKA_CREATE_TOPICS ]]; then
59 | kafka_name=`echo "$VAR" | sed -r "s/KAFKA_(.*)=.*/\1/g" | tr '[:upper:]' '[:lower:]' | tr _ .`
60 | env_var=`echo "$VAR" | sed -r "s/(.*)=.*/\1/g"`
61 | if egrep -q "(^|^#)$kafka_name=" $KAFKA_HOME/config/server.properties; then
62 | sed -r -i "s@(^|^#)($kafka_name)=(.*)@\2=${!env_var}@g" $KAFKA_HOME/config/server.properties #note that no config values may contain an '@' char
63 | else
64 | echo "$kafka_name=${!env_var}" >> $KAFKA_HOME/config/server.properties
65 | fi
66 | fi
67 | done
68 |
69 | if [[ -n "$CUSTOM_INIT_SCRIPT" ]] ; then
70 | eval $CUSTOM_INIT_SCRIPT
71 | fi
72 |
73 | create-topics.sh &
74 | KAFKA_OPTS="$TEMP_KAFKA_OPTS" exec $KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties
75 |
--------------------------------------------------------------------------------
/example/consumer.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var kafka = require('..');
4 | var Consumer = kafka.Consumer;
5 | var Offset = kafka.Offset;
6 | var Client = kafka.KafkaClient;
7 | var argv = require('optimist').argv;
8 | var topic = argv.topic || 'topic1';
9 |
10 | var client = new Client({ kafkaHost: 'localhost:9092' });
11 | var topics = [{ topic: topic, partition: 1 }, { topic: topic, partition: 0 }];
12 | var options = { autoCommit: false, fetchMaxWaitMs: 1000, fetchMaxBytes: 1024 * 1024 };
13 |
14 | var consumer = new Consumer(client, topics, options);
15 | var offset = new Offset(client);
16 |
17 | consumer.on('message', function (message) {
18 | console.log(message);
19 | });
20 |
21 | consumer.on('error', function (err) {
22 | console.log('error', err);
23 | });
24 |
25 | /*
26 | * If consumer get `offsetOutOfRange` event, fetch data from the smallest(oldest) offset
27 | */
28 | consumer.on('offsetOutOfRange', function (topic) {
29 | topic.maxNum = 2;
30 | offset.fetch([topic], function (err, offsets) {
31 | if (err) {
32 | return console.error(err);
33 | }
34 | var min = Math.min.apply(null, offsets[topic.topic][topic.partition]);
35 | consumer.setOffset(topic.topic, topic.partition, min);
36 | });
37 | });
38 |
--------------------------------------------------------------------------------
/example/consumerGroupMember.js:
--------------------------------------------------------------------------------
1 | var async = require('async');
2 | var ConsumerGroup = require('..').ConsumerGroup;
3 |
4 | var consumerOptions = {
5 | kafkaHost: '127.0.0.1:9092',
6 | groupId: 'ExampleTestGroup',
7 | sessionTimeout: 15000,
8 | protocol: ['roundrobin'],
9 | fromOffset: 'earliest' // equivalent of auto.offset.reset valid values are 'none', 'latest', 'earliest'
10 | };
11 |
12 | var topics = ['RebalanceTopic', 'RebalanceTest'];
13 |
14 | var consumerGroup = new ConsumerGroup(Object.assign({ id: 'consumer1' }, consumerOptions), topics);
15 | consumerGroup.on('error', onError);
16 | consumerGroup.on('message', onMessage);
17 |
18 | var consumerGroup2 = new ConsumerGroup(Object.assign({ id: 'consumer2' }, consumerOptions), topics);
19 | consumerGroup2.on('error', onError);
20 | consumerGroup2.on('message', onMessage);
21 | consumerGroup2.on('connect', function () {
22 | setTimeout(function () {
23 | consumerGroup2.close(true, function (error) {
24 | console.log('consumer2 closed', error);
25 | });
26 | }, 25000);
27 | });
28 |
29 | var consumerGroup3 = new ConsumerGroup(Object.assign({ id: 'consumer3' }, consumerOptions), topics);
30 | consumerGroup3.on('error', onError);
31 | consumerGroup3.on('message', onMessage);
32 |
33 | function onError (error) {
34 | console.error(error);
35 | console.error(error.stack);
36 | }
37 |
38 | function onMessage (message) {
39 | console.log(
40 | '%s read msg Topic="%s" Partition=%s Offset=%d',
41 | this.client.clientId,
42 | message.topic,
43 | message.partition,
44 | message.offset
45 | );
46 | }
47 |
48 | process.once('SIGINT', function () {
49 | async.each([consumerGroup, consumerGroup2, consumerGroup3], function (consumer, callback) {
50 | consumer.close(true, callback);
51 | });
52 | });
53 |
--------------------------------------------------------------------------------
/example/high-level-producer.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var kafka = require('..');
4 | var HighLevelProducer = kafka.HighLevelProducer;
5 | var Client = kafka.KafkaClient;
6 | var client = new Client();
7 | var argv = require('optimist').argv;
8 | var topic = argv.topic || 'topic1';
9 | var count = 10;
10 | var rets = 0;
11 | var producer = new HighLevelProducer(client);
12 |
13 | producer.on('ready', function () {
14 | setInterval(send, 1000);
15 | });
16 |
17 | producer.on('error', function (err) {
18 | console.log('error', err);
19 | });
20 |
21 | function send () {
22 | var message = new Date().toString();
23 | producer.send([{ topic: topic, messages: [message] }], function (err, data) {
24 | if (err) console.log(err);
25 | else console.log('send %d messages', ++rets);
26 | if (rets === count) process.exit();
27 | });
28 | }
29 |
--------------------------------------------------------------------------------
/example/offset.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var kafka = require('..');
4 | var Client = kafka.KafkaClient;
5 | var Offset = kafka.Offset;
6 | var offset = new Offset(new Client());
7 | var topic = 'topic1';
8 |
9 | // Fetch available offsets
10 | offset.fetch([{ topic: topic, partition: 0, maxNum: 2 }, { topic: topic, partition: 1 }], function (err, offsets) {
11 | console.log(err || offsets);
12 | });
13 |
14 | // Fetch commited offset
15 | offset.commit('kafka-node-group', [{ topic: topic, partition: 0 }], function (err, result) {
16 | console.log(err || result);
17 | });
18 |
--------------------------------------------------------------------------------
/example/producer.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var kafka = require('..');
4 | var Producer = kafka.Producer;
5 | var KeyedMessage = kafka.KeyedMessage;
6 | var Client = kafka.KafkaClient;
7 | var client = new Client();
8 | var argv = require('optimist').argv;
9 | var topic = argv.topic || 'topic1';
10 | var p = argv.p || 0;
11 | var a = argv.a || 0;
12 | var producer = new Producer(client, { requireAcks: 1 });
13 |
14 | producer.on('ready', function () {
15 | var message = 'a message';
16 | var keyedMessage = new KeyedMessage('keyed', 'a keyed message');
17 |
18 | producer.send([{ topic: topic, partition: p, messages: [message, keyedMessage], attributes: a }], function (
19 | err,
20 | result
21 | ) {
22 | console.log(err || result);
23 | process.exit();
24 | });
25 | });
26 |
27 | producer.on('error', function (err) {
28 | console.log('error', err);
29 | });
30 |
--------------------------------------------------------------------------------
/example/streaming.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const Transform = require('stream').Transform;
4 | const ProducerStream = require('..').ProducerStream;
5 | const _ = require('lodash');
6 |
7 | const producer = new ProducerStream();
8 |
9 | const stdinTransform = new Transform({
10 | objectMode: true,
11 | decodeStrings: true,
12 | transform (text, encoding, callback) {
13 | text = _.trim(text);
14 | console.log(`pushing message ${text} to ExampleTopic`);
15 | callback(null, {
16 | topic: 'ExampleTopic',
17 | messages: text
18 | });
19 | }
20 | });
21 |
22 | process.stdin.setEncoding('utf8');
23 | process.stdin.pipe(stdinTransform).pipe(producer);
24 |
25 | const ConsumerGroupStream = require('..').ConsumerGroupStream;
26 | const resultProducer = new ProducerStream();
27 |
28 | const consumerOptions = {
29 | kafkaHost: '127.0.0.1:9092',
30 | groupId: 'ExampleTestGroup',
31 | sessionTimeout: 15000,
32 | protocol: ['roundrobin'],
33 | asyncPush: false,
34 | id: 'consumer1',
35 | fromOffset: 'latest'
36 | };
37 |
38 | const consumerGroup = new ConsumerGroupStream(consumerOptions, 'ExampleTopic');
39 |
40 | const messageTransform = new Transform({
41 | objectMode: true,
42 | decodeStrings: true,
43 | transform (message, encoding, callback) {
44 | console.log(`Received message ${message.value} transforming input`);
45 | callback(null, {
46 | topic: 'RebalanceTopic',
47 | messages: `You have been (${message.value}) made an example of`
48 | });
49 | }
50 | });
51 |
52 | consumerGroup.pipe(messageTransform).pipe(resultProducer);
53 |
--------------------------------------------------------------------------------
/kafka.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | exports.HighLevelProducer = require('./lib/highLevelProducer');
4 | exports.ProducerStream = require('./lib/producerStream');
5 | exports.ConsumerGroup = require('./lib/consumerGroup');
6 | exports.ConsumerGroupStream = require('./lib/consumerGroupStream');
7 | exports.Consumer = require('./lib/consumer');
8 | exports.ConsumerStream = require('./lib/consumerStream');
9 | exports.Producer = require('./lib/producer');
10 | exports.KafkaClient = require('./lib/kafkaClient');
11 | exports.Offset = require('./lib/offset');
12 | exports.Admin = require('./lib/admin');
13 | exports.KeyedMessage = require('./lib/protocol').KeyedMessage;
14 | exports.DefaultPartitioner = require('./lib/partitioner').DefaultPartitioner;
15 | exports.CyclicPartitioner = require('./lib/partitioner').CyclicPartitioner;
16 | exports.RandomPartitioner = require('./lib/partitioner').RandomPartitioner;
17 | exports.KeyedPartitioner = require('./lib/partitioner').KeyedPartitioner;
18 | exports.CustomPartitioner = require('./lib/partitioner').CustomPartitioner;
19 |
--------------------------------------------------------------------------------
/lib/admin.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const KafkaClient = require('./kafkaClient');
4 | const resources = require('./resources');
5 | const util = require('util');
6 | const EventEmitter = require('events');
7 |
8 | function Admin (kafkaClient) {
9 | EventEmitter.call(this);
10 | if (!(kafkaClient instanceof KafkaClient)) {
11 | throw new Error("'Admin' only accepts 'KafkaClient' for its kafka client.");
12 | }
13 |
14 | var self = this;
15 | this.client = kafkaClient;
16 | this.RESOURCE_TYPES = resources.RESOURCE_TYPES;
17 | this.ready = this.client.ready;
18 | this.client.on('ready', function () {
19 | self.ready = true;
20 | self.emit('ready');
21 | });
22 | this.client.once('connect', function () {
23 | self.emit('connect');
24 | });
25 | this.client.on('error', function (err) {
26 | self.emit('error', err);
27 | });
28 | }
29 | util.inherits(Admin, EventEmitter);
30 |
31 | Admin.prototype.listGroups = function (cb) {
32 | if (!this.ready) {
33 | this.once('ready', () => this.listGroups(cb));
34 | return;
35 | }
36 | this.client.getListGroups(cb);
37 | };
38 |
39 | Admin.prototype.listTopics = function (cb) {
40 | if (!this.ready) {
41 | this.once('ready', () => this.listTopics(cb));
42 | return;
43 | }
44 | this.client.loadMetadataForTopics([], cb);
45 | };
46 |
47 | Admin.prototype.describeGroups = function (consumerGroups, cb) {
48 | if (!this.ready) {
49 | this.once('ready', () => this.describeGroups(consumerGroups, cb));
50 | return;
51 | }
52 | this.client.getDescribeGroups(consumerGroups, cb);
53 | };
54 |
55 | Admin.prototype.createTopics = function (topics, cb) {
56 | if (!this.ready) {
57 | this.once('ready', () => this.client.createTopics(topics, cb));
58 | return;
59 | }
60 | this.client.createTopics(topics, cb);
61 | };
62 |
63 | Admin.prototype.describeConfigs = function (payload, cb) {
64 | if (!this.ready) {
65 | this.once('ready', () => this.describeConfigs(payload, cb));
66 | return;
67 | }
68 | this.client.describeConfigs(payload, cb);
69 | };
70 |
71 | module.exports = Admin;
72 |
--------------------------------------------------------------------------------
/lib/assignment/index.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | module.exports = {
4 | 'roundrobin': require('./roundrobin'),
5 | 'range': require('./range')
6 | };
7 |
--------------------------------------------------------------------------------
/lib/assignment/range.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const logger = require('../logging')('kafka-node:Range');
4 | const VERSION = 0;
5 | const _ = require('lodash');
6 | const groupPartitionsByTopic = require('../utils').groupPartitionsByTopic;
7 |
8 | function assignRange (topicPartition, groupMembers, callback) {
9 | logger.debug('topicPartition: %j', topicPartition);
10 | var assignment = _(groupMembers).map('id').reduce(function (obj, id) {
11 | obj[id] = [];
12 | return obj;
13 | }, {});
14 |
15 | const topicMemberMap = topicToMemberMap(groupMembers);
16 | for (var topic in topicMemberMap) {
17 | if (!topicMemberMap.hasOwnProperty(topic)) {
18 | continue;
19 | }
20 | logger.debug('For topic %s', topic);
21 | topicMemberMap[topic].sort();
22 | logger.debug(' members: ', topicMemberMap[topic]);
23 |
24 | var numberOfPartitionsForTopic = topicPartition[topic].length;
25 | logger.debug(' numberOfPartitionsForTopic', numberOfPartitionsForTopic);
26 |
27 | var numberOfMembersForTopic = topicMemberMap[topic].length;
28 | logger.debug(' numberOfMembersForTopic', numberOfMembersForTopic);
29 |
30 | var numberPartitionsPerMember = Math.floor(numberOfPartitionsForTopic / numberOfMembersForTopic);
31 | logger.debug(' numberPartitionsPerMember', numberPartitionsPerMember);
32 |
33 | var membersWithExtraPartition = numberOfPartitionsForTopic % numberOfMembersForTopic;
34 | var topicPartitionList = createTopicPartitionArray(topic, numberOfPartitionsForTopic);
35 |
36 | for (var i = 0, n = numberOfMembersForTopic; i < n; i++) {
37 | var start = numberPartitionsPerMember * i + Math.min(i, membersWithExtraPartition);
38 | var length = numberPartitionsPerMember + (i + 1 > membersWithExtraPartition ? 0 : 1);
39 | var assignedTopicPartitions = assignment[topicMemberMap[topic][i]];
40 | assignedTopicPartitions.push.apply(assignedTopicPartitions, topicPartitionList.slice(start, start + length));
41 | }
42 | }
43 |
44 | logger.debug(assignment);
45 |
46 | callback(null, convertToAssignmentList(assignment, VERSION));
47 | }
48 |
49 | function convertToAssignmentList (assignment, version) {
50 | return _.map(assignment, function (value, key) {
51 | return {
52 | memberId: key,
53 | topicPartitions: groupPartitionsByTopic(value),
54 | version: version
55 | };
56 | });
57 | }
58 |
59 | function createTopicPartitionArray (topic, numberOfPartitions) {
60 | return _.times(numberOfPartitions, function (n) {
61 | return {
62 | topic: topic,
63 | partition: n
64 | };
65 | });
66 | }
67 |
68 | function topicToMemberMap (groupMembers) {
69 | return groupMembers.reduce(function (result, member) {
70 | member.subscription.forEach(function (topic) {
71 | if (topic in result) {
72 | result[topic].push(member.id);
73 | } else {
74 | result[topic] = [member.id];
75 | }
76 | });
77 | return result;
78 | }, {});
79 | }
80 |
81 | module.exports = {
82 | assign: assignRange,
83 | name: 'range',
84 | version: VERSION
85 | };
86 |
--------------------------------------------------------------------------------
/lib/assignment/roundrobin.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var _ = require('lodash');
4 | var groupPartitionsByTopic = require('../utils').groupPartitionsByTopic;
5 | var logger = require('../logging')('kafka-node:Roundrobin');
6 | var VERSION = 0;
7 |
8 | function assignRoundRobin (topicPartition, groupMembers, callback) {
9 | logger.debug('topicPartition: %j', topicPartition);
10 | logger.debug('groupMembers: %j', groupMembers);
11 | var _members = _(groupMembers).map('id');
12 | var members = _members.value().sort();
13 | logger.debug('members', members);
14 | var assignment = _members.reduce(function (obj, id) {
15 | obj[id] = [];
16 | return obj;
17 | }, {});
18 |
19 | var subscriberMap = groupMembers.reduce(function (subscribers, member) {
20 | subscribers[member.id] = member.subscription;
21 | return subscribers;
22 | }, {});
23 |
24 | logger.debug('subscribers', subscriberMap);
25 |
26 | // layout topic/partitions pairs into a list
27 | var topicPartitionList = _(topicPartition).map(function (partitions, topic) {
28 | return partitions.map(function (partition) {
29 | return {
30 | topic: topic,
31 | partition: partition
32 | };
33 | });
34 | }).flatten().value();
35 | logger.debug('round robin on topic partition pairs: ', topicPartitionList);
36 |
37 | var assigner = cycle(members);
38 |
39 | topicPartitionList.forEach(function (tp) {
40 | var topic = tp.topic;
41 | while (!_.includes(subscriberMap[assigner.peek()], topic)) {
42 | assigner.next();
43 | }
44 | assignment[assigner.next()].push(tp);
45 | });
46 |
47 | var ret = _.map(assignment, function (value, key) {
48 | var ret = {};
49 | ret.memberId = key;
50 | ret.topicPartitions = groupPartitionsByTopic(value);
51 | ret.version = VERSION;
52 | return ret;
53 | });
54 |
55 | callback(null, ret);
56 | }
57 |
58 | function cycle (arr) {
59 | var index = -1;
60 | var len = arr.length;
61 | return {
62 | peek: function () {
63 | return arr[(index + 1) % len];
64 | },
65 | next: function () {
66 | index = ++index % len;
67 | return arr[index];
68 | }
69 | };
70 | }
71 |
72 | module.exports = {
73 | assign: assignRoundRobin,
74 | name: 'roundrobin',
75 | version: VERSION
76 | };
77 |
--------------------------------------------------------------------------------
/lib/baseProducer.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var assert = require('assert');
4 | var util = require('util');
5 | var EventEmitter = require('events');
6 | var _ = require('lodash');
7 | var protocol = require('./protocol');
8 | var Message = protocol.Message;
9 | var KeyedMessage = protocol.KeyedMessage;
10 | var ProduceRequest = protocol.ProduceRequest;
11 | var partitioner = require('./partitioner');
12 | var DefaultPartitioner = partitioner.DefaultPartitioner;
13 | var RandomPartitioner = partitioner.RandomPartitioner;
14 | var CyclicPartitioner = partitioner.CyclicPartitioner;
15 | var KeyedPartitioner = partitioner.KeyedPartitioner;
16 | var CustomPartitioner = partitioner.CustomPartitioner;
17 |
18 | var PARTITIONER_TYPES = {
19 | default: 0,
20 | random: 1,
21 | cyclic: 2,
22 | keyed: 3,
23 | custom: 4
24 | };
25 |
26 | var PARTITIONER_MAP = {
27 | 0: DefaultPartitioner,
28 | 1: RandomPartitioner,
29 | 2: CyclicPartitioner,
30 | 3: KeyedPartitioner,
31 | 4: CustomPartitioner
32 | };
33 |
34 | var DEFAULTS = {
35 | requireAcks: 1,
36 | ackTimeoutMs: 100
37 | };
38 |
39 | /**
40 | * Provides common functionality for a kafka producer
41 | *
42 | * @param {Client} client A kafka client object to use for the producer
43 | * @param {Object} [options] An object containing configuration options
44 | * @param {Number} [options.requireAcks=1] Configuration for when to consider a message as acknowledged.
45 | *
0 = No ack required
46 | * 1 = Leader ack required
47 | * -1 = All in sync replicas ack required
48 | *
49 | * @param {Number} [options.ackTimeoutMs=100] The amount of time in milliseconds to wait for all acks before considered
50 | * the message as errored
51 | * @param {Number} [defaultPartitionType] The default partitioner type
52 | * @param {Object} [customPartitioner] a custom partitinoer to use of the form: function (partitions, key)
53 | * @constructor
54 | */
55 | function BaseProducer (client, options, defaultPartitionerType, customPartitioner) {
56 | EventEmitter.call(this);
57 | options = options || {};
58 |
59 | this.ready = false;
60 | this.client = client;
61 |
62 | this.requireAcks = options.requireAcks === undefined ? DEFAULTS.requireAcks : options.requireAcks;
63 | this.ackTimeoutMs = options.ackTimeoutMs === undefined ? DEFAULTS.ackTimeoutMs : options.ackTimeoutMs;
64 |
65 | if (customPartitioner !== undefined && options.partitionerType !== PARTITIONER_TYPES.custom) {
66 | throw new Error('Partitioner Type must be custom if providing a customPartitioner.');
67 | } else if (customPartitioner === undefined && options.partitionerType === PARTITIONER_TYPES.custom) {
68 | throw new Error('No customer partitioner defined');
69 | }
70 |
71 | var partitionerType = PARTITIONER_MAP[options.partitionerType] || PARTITIONER_MAP[defaultPartitionerType];
72 |
73 | // eslint-disable-next-line
74 | this.partitioner = new partitionerType(customPartitioner);
75 |
76 | this.connect();
77 | }
78 |
79 | util.inherits(BaseProducer, EventEmitter);
80 |
81 | BaseProducer.prototype.connect = function () {
82 | // emiter...
83 | var self = this;
84 | this.ready = this.client.ready;
85 | if (this.ready) {
86 | // Emit the ready event in next tick to give consumers a chance to set up
87 | // a `ready` listener
88 | setImmediate(function () {
89 | self.emit('ready');
90 | });
91 | }
92 | this.client.on('ready', function () {
93 | if (!self.ready) {
94 | self.ready = true;
95 | self.emit('ready');
96 | }
97 | });
98 | this.client.on('brokersChanged', function () {
99 | let topics = Object.keys(this.topicMetadata);
100 | this.refreshMetadata(topics, function (error) {
101 | if (error) {
102 | self.emit('error', error);
103 | }
104 | });
105 | });
106 | this.client.on('error', function (err) {
107 | self.emit('error', err);
108 | });
109 | this.client.on('close', function () {});
110 | };
111 |
112 | /**
113 | * Sends a new message or array of messages to a topic/partition
114 | * This will use the
115 | *
116 | * @see Client#sendProduceRequest for a more low level way to send messages to kafka
117 | *
118 | * @param {Array.} payloads An array of topic payloads
119 | * @param {BaseProducer~sendCallback} cb A function to call once the send has completed
120 | */
121 | BaseProducer.prototype.send = function (payloads, cb) {
122 | var client = this.client;
123 | var requireAcks = this.requireAcks;
124 | var ackTimeoutMs = this.ackTimeoutMs;
125 |
126 | client.sendProduceRequest(this.buildPayloads(payloads, client.topicMetadata), requireAcks, ackTimeoutMs, cb);
127 | };
128 |
129 | BaseProducer.prototype.buildPayloads = function (payloads, topicMetadata) {
130 | const topicPartitionRequests = Object.create(null);
131 | payloads.forEach(p => {
132 | p.partition = p.hasOwnProperty('partition')
133 | ? p.partition
134 | : this.partitioner.getPartition(_.map(topicMetadata[p.topic], 'partition'), p.key);
135 | p.attributes = p.hasOwnProperty('attributes') ? p.attributes : 0;
136 | let messages = _.isArray(p.messages) ? p.messages : [p.messages];
137 |
138 | messages = messages.map(function (message) {
139 | if (message instanceof KeyedMessage) {
140 | return message;
141 | }
142 | return new Message(0, 0, p.key, message, p.timestamp || Date.now());
143 | });
144 |
145 | let key = p.topic + p.partition;
146 | let request = topicPartitionRequests[key];
147 |
148 | if (request == null) {
149 | topicPartitionRequests[key] = new ProduceRequest(p.topic, p.partition, messages, p.attributes);
150 | } else {
151 | assert(request.attributes === p.attributes);
152 | Array.prototype.push.apply(request.messages, messages);
153 | }
154 | });
155 | return _.values(topicPartitionRequests);
156 | };
157 |
158 | BaseProducer.prototype.createTopics = function (topics, async, cb) {
159 | if (!this.ready) {
160 | return cb(new Error('Producer not ready!'));
161 | }
162 |
163 | this.client.createTopics(topics, async, cb);
164 | };
165 |
166 | BaseProducer.prototype.close = function (cb) {
167 | this.client.close(cb);
168 | };
169 |
170 | BaseProducer.PARTITIONER_TYPES = PARTITIONER_TYPES;
171 |
172 | module.exports = BaseProducer;
173 |
--------------------------------------------------------------------------------
/lib/batch/KafkaBuffer.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var KafkaBuffer = function (batchSize, batchAge) {
4 | this._batch_size = batchSize;
5 | this._batch_age = batchAge;
6 | this._batch_age_timer = null;
7 | this._buffer = null;
8 | };
9 |
10 | KafkaBuffer.prototype.addChunk = function (buffer, callback) {
11 | if (this._buffer == null) {
12 | this._buffer = Buffer.from(buffer);
13 | } else {
14 | this._buffer = Buffer.concat([this._buffer, buffer]);
15 | }
16 |
17 | if (typeof callback !== 'undefined' && callback != null) {
18 | if (
19 | this._batch_size == null ||
20 | this._batch_age == null ||
21 | (this._buffer && this._buffer.length > this._batch_size)
22 | ) {
23 | callback();
24 | } else {
25 | this._setupTimer(callback);
26 | }
27 | }
28 | };
29 |
30 | KafkaBuffer.prototype._setupTimer = function (callback) {
31 | var self = this;
32 |
33 | if (this._batch_age_timer != null) {
34 | clearTimeout(this._batch_age_timer);
35 | }
36 |
37 | this._batch_age_timer = setTimeout(function () {
38 | if (self._buffer && self._buffer.length > 0) {
39 | callback();
40 | }
41 | }, this._batch_age);
42 | };
43 |
44 | KafkaBuffer.prototype.getBatch = function () {
45 | return this._buffer;
46 | };
47 |
48 | KafkaBuffer.prototype.truncateBatch = function () {
49 | this._buffer = null;
50 | };
51 |
52 | module.exports = KafkaBuffer;
53 |
--------------------------------------------------------------------------------
/lib/codec/index.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var zlib = require('zlib');
4 | var snappyCodec = require('./snappy');
5 |
6 | var gzipCodec = {
7 | encode: zlib.gzip,
8 | decode: zlib.gunzip
9 | };
10 |
11 | var codecs = [
12 | null,
13 | gzipCodec,
14 | snappyCodec
15 | ];
16 |
17 | function getCodec (attributes) {
18 | return codecs[attributes & 3] || null;
19 | }
20 |
21 | module.exports = getCodec;
22 |
--------------------------------------------------------------------------------
/lib/codec/snappy.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var optional = require('optional');
4 | var async = require('async');
5 | var snappy = optional('snappy');
6 |
7 | if (snappy == null) {
8 | var unavailableCodec = function unavailableCodec () {
9 | throw new Error('Snappy codec is not installed');
10 | };
11 | module.exports = {
12 | encode: unavailableCodec,
13 | decode: unavailableCodec
14 | };
15 | } else {
16 | var SNAPPY_MAGIC_BYTES = [130, 83, 78, 65, 80, 80, 89, 0];
17 | var SNAPPY_MAGIC_BYTES_LEN = SNAPPY_MAGIC_BYTES.length;
18 | var SNAPPY_MAGIC = Buffer.from(SNAPPY_MAGIC_BYTES).toString('hex');
19 |
20 | exports.encode = snappy.compress;
21 | exports.decode = decodeSnappy;
22 | }
23 |
24 | function isChunked (buffer) {
25 | var prefix = buffer.toString('hex', 0, SNAPPY_MAGIC_BYTES_LEN);
26 | return prefix === SNAPPY_MAGIC;
27 | }
28 |
29 | // Ported from:
30 | // https://github.com/Shopify/sarama/blob/a3e2437d6d26cda6b2dc501dbdab4d3f6befa295/snappy.go
31 | function decodeSnappy (buffer, cb) {
32 | if (isChunked(buffer)) {
33 | var pos = 16;
34 | var max = buffer.length;
35 | var encoded = [];
36 | var size;
37 |
38 | while (pos < max) {
39 | size = buffer.readUInt32BE(pos);
40 | pos += 4;
41 | encoded.push(buffer.slice(pos, pos + size));
42 | pos += size;
43 | }
44 | return async.mapSeries(encoded, snappy.uncompress, function (err, decodedChunks) {
45 | if (err) return cb(err);
46 | return cb(null, Buffer.concat(decodedChunks));
47 | });
48 | }
49 | return snappy.uncompress(buffer, cb);
50 | }
51 |
--------------------------------------------------------------------------------
/lib/commitStream.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const stream = require('stream');
4 | const Transform = stream.Transform;
5 |
6 | const _ = require('lodash');
7 |
8 | var DEFAULTS = {
9 | // Auto commit config
10 | autoCommit: true,
11 | autoCommitIntervalMs: 5000,
12 | autoCommitMsgCount: 100,
13 | // Whether to act as a transform stream and emit the events we observe.
14 | // If we write all data this stream will fill its buffer and then provide
15 | // backpressure preventing our continued reading.
16 | passthrough: false
17 | };
18 |
19 | class CommitStream extends Transform {
20 | constructor (client, topics, groupId, options) {
21 | options = options || {};
22 | let parentOptions = _.defaults({ highWaterMark: options.highWaterMark }, { objectMode: true });
23 | super(parentOptions);
24 |
25 | this.options = _.defaults(options || {}, DEFAULTS);
26 | this.client = client;
27 | this.topicPartionOffsets = this.buildTopicData(_.cloneDeep(topics));
28 |
29 | this.committing = false;
30 | this.groupId = groupId;
31 |
32 | this.autoCommit = options.autoCommit;
33 | this.autoCommitMsgCount = options.autoCommitMsgCount;
34 | this.autoCommitIntervalMs = options.autoCommitIntervalMs;
35 |
36 | this.autoCommitIntervalTimer = null;
37 |
38 | if (this.autoCommit && this.autoCommitIntervalMs) {
39 | this.autoCommitIntervalTimer = setInterval(
40 | function () {
41 | this.commit();
42 | }.bind(this),
43 | this.autoCommitIntervalMs
44 | );
45 | }
46 |
47 | this.messageCount = 0;
48 | }
49 |
50 | /**
51 | * Extend Transform::on() to act as a pipe if someone consumes data from us.
52 | */
53 | on (eventName) {
54 | if (eventName === 'data') {
55 | this.options.passthrough = true;
56 | }
57 | super.on.apply(this, arguments);
58 | }
59 |
60 | /**
61 | * Extend Transform::pipe() to act as a pipe if someone consumes data from us.
62 | */
63 | pipe () {
64 | this.options.passthrough = true;
65 | super.pipe.apply(this, arguments);
66 | }
67 |
68 | _transform (data, encoding, done) {
69 | let topicUpdate = {};
70 | let self = this;
71 | topicUpdate[data.topic] = {};
72 | topicUpdate[data.topic][data.partition] = data.offset;
73 | self.updateOffsets(topicUpdate);
74 | self.messageCount++;
75 | const doneWrapper = function () {
76 | // We need to act as a through stream if we are not
77 | // purely a terminal write stream.
78 | if (self.options.passthrough) {
79 | return done(null, data);
80 | }
81 | done();
82 | };
83 | if (self.autoCommit && self.messageCount === self.autoCommitMsgCount) {
84 | self.messageCount = 0;
85 | return self.commit(doneWrapper);
86 | }
87 | doneWrapper();
88 | }
89 |
90 | buildTopicData (topicPartions) {
91 | return topicPartions.map(function (partion) {
92 | if (typeof partion !== 'object') partion = { topic: partion };
93 | partion.partition = partion.partition || 0;
94 | partion.offset = partion.offset || 0;
95 | // Metadata can be arbitrary
96 | partion.metadata = 'm';
97 | return partion;
98 | });
99 | }
100 |
101 | /**
102 | * @param {Object} topics - An object containing topic offset data keyed by
103 | * topic with keys for partion containing the offset last seen.
104 | */
105 | updateOffsets (topics, initing) {
106 | this.topicPartionOffsets.forEach(function (p) {
107 | if (!_.isEmpty(topics[p.topic]) && topics[p.topic][p.partition] !== undefined) {
108 | var offset = topics[p.topic][p.partition];
109 | if (offset === -1) offset = 0;
110 | // Note, we track the offset of the next message we want to see,
111 | // not the most recent message we have seen.
112 | if (!initing) p.offset = offset + 1;
113 | else p.offset = offset;
114 | }
115 | });
116 | }
117 |
118 | /**
119 | * Clear the autocommit interval of this commitStream if set.
120 | */
121 | clearInterval () {
122 | clearInterval(this.autoCommitIntervalTimer);
123 | }
124 |
125 | commit (cb) {
126 | let self = this;
127 |
128 | if (!cb) {
129 | cb = function noop () {};
130 | }
131 |
132 | if (self.committing) {
133 | return cb(null, 'Commit in progress');
134 | }
135 |
136 | let topicPartionOffsets = self.topicPartionOffsets;
137 |
138 | let commits = topicPartionOffsets.filter(function (partition) {
139 | return partition.offset !== 0;
140 | });
141 |
142 | if (commits.length) {
143 | self.committing = true;
144 | self.client.sendOffsetCommitRequest(self.groupId, commits, function () {
145 | self.emit('commitComplete', { group: self.groupId, commits });
146 | self.committing = false;
147 | cb.apply(this, arguments);
148 | });
149 | } else {
150 | cb(null, 'Nothing to be committed');
151 | }
152 | }
153 | }
154 |
155 | module.exports = CommitStream;
156 |
--------------------------------------------------------------------------------
/lib/consumerGroupHeartbeat.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const HeartbeatTimeoutError = require('./errors/HeartbeatTimeoutError');
4 | const logger = require('./logging')('kafka-node:ConsumerGroupHeartbeat');
5 |
6 | module.exports = class Heartbeat {
7 | constructor (client, handler) {
8 | this.client = client;
9 | this.handler = handler;
10 | this.pending = true;
11 | }
12 |
13 | send (groupId, generationId, memberId) {
14 | this.client.sendHeartbeatRequest(groupId, generationId, memberId, (error) => {
15 | if (this.canceled) {
16 | logger.debug('heartbeat yielded after being canceled', error);
17 | return;
18 | }
19 | this.pending = false;
20 | this.handler(error);
21 | });
22 | }
23 |
24 | verifyResolved () {
25 | if (this.pending) {
26 | this.canceled = true;
27 | this.pending = false;
28 | this.handler(new HeartbeatTimeoutError('Heartbeat timed out'));
29 | return false;
30 | }
31 | return true;
32 | }
33 | };
34 |
--------------------------------------------------------------------------------
/lib/consumerGroupRecovery.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const retry = require('retry');
4 | const logger = require('./logging')('kafka-node:ConsumerGroupRecovery');
5 | const assert = require('assert');
6 | const _ = require('lodash');
7 |
8 | const GroupCoordinatorNotAvailable = require('./errors/GroupCoordinatorNotAvailableError');
9 | const NotCoordinatorForGroup = require('./errors/NotCoordinatorForGroupError');
10 | const IllegalGeneration = require('./errors/IllegalGenerationError');
11 | const GroupLoadInProgress = require('./errors/GroupLoadInProgressError');
12 | const UnknownMemberId = require('./errors/UnknownMemberIdError');
13 | const RebalanceInProgress = require('./errors/RebalanceInProgressError');
14 | const HeartbeatTimeout = require('./errors/HeartbeatTimeoutError');
15 | const TimeoutError = require('./errors/TimeoutError');
16 | const BrokerNotAvailableError = require('./errors').BrokerNotAvailableError;
17 |
18 | const NETWORK_ERROR_CODES = [
19 | 'ETIMEDOUT',
20 | 'ECONNRESET',
21 | 'ESOCKETTIMEDOUT',
22 | 'ECONNREFUSED',
23 | 'EHOSTUNREACH',
24 | 'EADDRNOTAVAIL'
25 | ];
26 |
27 | const recoverableErrors = [
28 | {
29 | errors: [
30 | GroupCoordinatorNotAvailable,
31 | IllegalGeneration,
32 | GroupLoadInProgress,
33 | RebalanceInProgress,
34 | HeartbeatTimeout,
35 | TimeoutError
36 | ]
37 | },
38 | {
39 | errors: [NotCoordinatorForGroup, BrokerNotAvailableError],
40 | handler: function () {
41 | delete this.client.coordinatorId;
42 | }
43 | },
44 | {
45 | errors: [UnknownMemberId],
46 | handler: function () {
47 | this.memberId = null;
48 | }
49 | }
50 | ];
51 |
52 | function isErrorInstanceOf (error, errors) {
53 | return errors.some(function (errorClass) {
54 | return error instanceof errorClass;
55 | });
56 | }
57 |
58 | function ConsumerGroupRecovery (consumerGroup) {
59 | this.consumerGroup = consumerGroup;
60 | this.options = consumerGroup.options;
61 | }
62 |
63 | function isNetworkError (error) {
64 | if (error && error.code && error.errno) {
65 | return _.includes(NETWORK_ERROR_CODES, error.code);
66 | }
67 | return false;
68 | }
69 |
70 | ConsumerGroupRecovery.prototype.tryToRecoverFrom = function (error, source) {
71 | logger.debug('tryToRecoverFrom', source, error);
72 | this.consumerGroup.ready = false;
73 | this.consumerGroup.stopHeartbeats();
74 |
75 | var retryTimeout = false;
76 | var retry =
77 | isNetworkError(error) ||
78 | recoverableErrors.some(function (recoverableItem) {
79 | if (isErrorInstanceOf(error, recoverableItem.errors)) {
80 | recoverableItem.handler && recoverableItem.handler.call(this.consumerGroup, error);
81 | return true;
82 | }
83 | return false;
84 | }, this);
85 |
86 | if (retry) {
87 | retryTimeout = this.getRetryTimeout(error);
88 | }
89 |
90 | if (retry && retryTimeout) {
91 | logger.debug(
92 | 'RECOVERY from %s: %s retrying in %s ms',
93 | source,
94 | this.consumerGroup.client.clientId,
95 | retryTimeout,
96 | error
97 | );
98 | this.consumerGroup.scheduleReconnect(retryTimeout);
99 | } else {
100 | this.consumerGroup.emit('error', error);
101 | }
102 | this.lastError = error;
103 | };
104 |
105 | ConsumerGroupRecovery.prototype.clearError = function () {
106 | this.lastError = null;
107 | };
108 |
109 | ConsumerGroupRecovery.prototype.getRetryTimeout = function (error) {
110 | assert(error);
111 | if (!this._timeouts) {
112 | this._timeouts = retry.timeouts({
113 | retries: this.options.retries,
114 | factor: this.options.retryFactor,
115 | minTimeout: this.options.retryMinTimeout
116 | });
117 | }
118 |
119 | if (this._retryIndex == null || this.lastError == null || error.errorCode !== this.lastError.errorCode) {
120 | this._retryIndex = 0;
121 | }
122 |
123 | var index = this._retryIndex++;
124 | if (index >= this._timeouts.length) {
125 | return false;
126 | }
127 | return this._timeouts[index];
128 | };
129 |
130 | module.exports = ConsumerGroupRecovery;
131 |
--------------------------------------------------------------------------------
/lib/consumerGroupStream.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const Readable = require('stream').Readable;
4 | const ConsumerGroup = require('./consumerGroup');
5 | const _ = require('lodash');
6 | const logger = require('./logging')('kafka-node:ConsumerGroupStream');
7 | const async = require('async');
8 | const DEFAULT_HIGH_WATER_MARK = 100;
9 | const Denque = require('denque');
10 |
11 | const DEFAULTS = {
12 | autoCommit: true
13 | };
14 |
15 | function convertToCommitPayload (messages) {
16 | const ret = [];
17 | _.forEach(messages, function (partitionOffset, topic) {
18 | _.forEach(partitionOffset, function (offset, partition) {
19 | if (offset != null) {
20 | ret.push({
21 | topic: topic,
22 | partition: partition,
23 | offset: offset,
24 | metadata: 'm'
25 | });
26 | }
27 | });
28 | });
29 | return ret;
30 | }
31 |
32 | class ConsumerGroupStream extends Readable {
33 | constructor (options, topics) {
34 | super({ objectMode: true, highWaterMark: options.highWaterMark || DEFAULT_HIGH_WATER_MARK });
35 |
36 | _.defaultsDeep(options || {}, DEFAULTS);
37 | const self = this;
38 |
39 | this.autoCommit = options.autoCommit;
40 |
41 | options.connectOnReady = false;
42 | options.autoCommit = false;
43 | const originalOnRebalance = options.onRebalance;
44 | options.onRebalance = function (isAlreadyMember, callback) {
45 | const autoCommit = _.once(function (err) {
46 | if (err) {
47 | callback(err);
48 | } else {
49 | self.commit(null, true, callback);
50 | }
51 | });
52 | if (typeof originalOnRebalance === 'function') {
53 | try {
54 | originalOnRebalance(isAlreadyMember, autoCommit);
55 | } catch (e) {
56 | autoCommit(e);
57 | }
58 | } else {
59 | autoCommit();
60 | }
61 | };
62 |
63 | this.consumerGroup = new ConsumerGroup(options, topics);
64 |
65 | this.messageBuffer = new Denque();
66 | this.commitQueue = {};
67 |
68 | this.consumerGroup.on('error', error => this.emit('error', error));
69 | this.consumerGroup.on('connect', () => this.emit('connect'));
70 | this.consumerGroup.on('message', message => {
71 | this.messageBuffer.push(message);
72 | this.consumerGroup.pause();
73 | });
74 | this.consumerGroup.on('done', message => {
75 | setImmediate(() => this.transmitMessages());
76 | });
77 | }
78 |
79 | emit (event, value) {
80 | if (event === 'data' && this.autoCommit && !_.isEmpty(value)) {
81 | setImmediate(() => this.commit(value));
82 | }
83 | super.emit.apply(this, arguments);
84 | }
85 |
86 | _read () {
87 | logger.debug('_read called');
88 | if (!this.consumerGroup.ready) {
89 | logger.debug('consumerGroup is not ready, calling consumerGroup.connect');
90 | this.consumerGroup.connect();
91 | }
92 | this._reading = true;
93 | this.transmitMessages();
94 | }
95 |
96 | commit (message, force, callback) {
97 | if (message != null && message.offset !== -1) {
98 | _.set(this.commitQueue, [message.topic, message.partition], message.offset + 1);
99 | }
100 |
101 | if (this.committing && !force) {
102 | logger.debug('skipping committing');
103 | return callback && callback(null);
104 | }
105 |
106 | const commits = convertToCommitPayload(this.commitQueue);
107 | this.commitQueued(commits, force, callback);
108 | }
109 |
110 | commitQueued (commits, force, callback) {
111 | if (!force) {
112 | this.committing = true;
113 |
114 | this.autoCommitTimer = setTimeout(() => {
115 | logger.debug('setting committing to false');
116 | this.committing = false;
117 |
118 | const queuedCommits = convertToCommitPayload(this.commitQueue);
119 | if (!_.isEmpty(queuedCommits)) this.commitQueued(queuedCommits);
120 | }, this.consumerGroup.options.autoCommitIntervalMs);
121 | }
122 |
123 | if (_.isEmpty(commits)) {
124 | logger.debug('commit ignored. no commits to make.');
125 | return callback && callback(null);
126 | }
127 |
128 | logger.debug('committing', commits);
129 |
130 | this.consumerGroup.sendOffsetCommitRequest(commits, error => {
131 | if (error) {
132 | logger.error('commit request failed', error);
133 | if (callback) {
134 | return callback(error);
135 | }
136 | this.emit('error', error);
137 | return;
138 | }
139 | for (let tp of commits) {
140 | if (_.get(this.commitQueue, [tp.topic, tp.partition]) === tp.offset) {
141 | this.commitQueue[tp.topic][tp.partition] = null;
142 | }
143 | }
144 | callback && callback(null);
145 | });
146 | }
147 |
148 | transmitMessages () {
149 | while (this._reading && !this.messageBuffer.isEmpty()) {
150 | this._reading = this.push(this.messageBuffer.shift());
151 | }
152 | if (this.messageBuffer.isEmpty() && this._reading) {
153 | this.consumerGroup.resume();
154 | }
155 | }
156 |
157 | close (callback) {
158 | clearTimeout(this.autoCommitTimer);
159 | async.series(
160 | [
161 | callback => {
162 | if (this.autoCommit) {
163 | this.commit(null, true, callback);
164 | } else {
165 | callback(null);
166 | }
167 | },
168 | callback => {
169 | this.consumerGroup.close(false, () => {
170 | callback();
171 | this.emit('close');
172 | });
173 | }
174 | ],
175 | callback || _.noop
176 | );
177 | }
178 |
179 | _destroy () {
180 | this.close();
181 | }
182 | }
183 |
184 | module.exports = ConsumerGroupStream;
185 |
--------------------------------------------------------------------------------
/lib/consumerStream.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 | var util = require('util');
3 | var _ = require('lodash');
4 | var Readable = require('stream').Readable;
5 | var logger = require('./logging')('kafka-node:ConsumerStream');
6 | const Denque = require('denque');
7 | var CommitStream = require('./commitStream');
8 |
9 | var protocol = require('./protocol');
10 |
11 | var DEFAULTS = {
12 | groupId: 'kafka-node-group',
13 | // Auto commit config
14 | autoCommit: true,
15 | autoCommitMsgCount: 100,
16 | autoCommitIntervalMs: 5000,
17 | // Fetch message config
18 | fetchMaxWaitMs: 100,
19 | fetchMinBytes: 1,
20 | fetchMaxBytes: 1024 * 1024,
21 | bufferRefetchThreshold: 10,
22 | fromOffset: false,
23 | encoding: 'utf8'
24 | };
25 |
26 | var ConsumerStream = function (client, topics, options) {
27 | options.objectMode = true;
28 | this.highWaterMark = options.highWaterMark = options.highWaterMark || 100;
29 | Readable.call(this, options);
30 | if (_.isEmpty(topics)) {
31 | throw new Error('You must specify topics to subscribe to.');
32 | }
33 | // Whether we have sent a fetch request for which we have not yet received
34 | // all messages.
35 | this.fetchInFlight = false;
36 | this.fetchCount = 0;
37 | this.client = client;
38 | this.options = _.defaults(options || {}, DEFAULTS);
39 | this.ready = false;
40 | this.payloads = this.buildPayloads(topics);
41 | this.connect();
42 | this.encoding = this.options.encoding;
43 | this.emittedMessages = 0;
44 | this.messageBuffer = new Denque();
45 | this._reading = false;
46 | this.close = this.close.bind(this);
47 | };
48 | util.inherits(ConsumerStream, Readable);
49 |
50 | // The older non-stream based consumer emitted `message` events rather
51 | // than data events. This provides a backward compatibility layer for
52 | // receiving message events instead.
53 | ConsumerStream.prototype._emit = ConsumerStream.prototype.emit;
54 | ConsumerStream.prototype.emit = function () {
55 | if (arguments[0] === 'data') {
56 | this._emit('message', arguments[1]);
57 | }
58 | this._emit.apply(this, arguments);
59 | };
60 |
61 | /**
62 | * Implements the abstract Readable::_read() method.
63 | */
64 | ConsumerStream.prototype._read = function () {
65 | this._reading = true;
66 | this.transmitMessages();
67 | };
68 |
69 | /**
70 | * Buffers the received message then checks to see if we should send.
71 | *
72 | * Messages are fetched from Kafka with a size limit and not a message
73 | * count while node.js object streams have a limit in object count. As
74 | * a result we maintain an internal buffer (this.messageBuffer) from
75 | * which we push messages onto the stream as appropriate in
76 | * this.transmitMessages().
77 | *
78 | * @param {Object} message - An Kafka message object.
79 | */
80 | ConsumerStream.prototype.handleMessage = function (message) {
81 | this.messageBuffer.push(message);
82 | this.transmitMessages();
83 | };
84 |
85 | ConsumerStream.prototype.transmitMessages = function () {
86 | while (this._reading && !this.messageBuffer.isEmpty()) {
87 | this._reading = this.push(this.messageBuffer.shift());
88 | }
89 | if (this.messageBuffer.isEmpty() && this._reading) {
90 | this.fetch();
91 | }
92 | };
93 |
94 | /**
95 | * Fetch messages from kafka if appropriate.
96 | */
97 | ConsumerStream.prototype.fetch = function () {
98 | var self = this;
99 | if (self.ready && !self.fetchInFlight) {
100 | self.fetchInFlight = true;
101 | var encoder = protocol.encodeFetchRequest(self.fetchMaxWaitMs, self.fetchMinBytes);
102 | var decoder = protocol.decodeFetchResponse(self.decodeCallback.bind(self), self.maxTickMessages);
103 | self.client.send(self.payloads, encoder, decoder, function (err) {
104 | if (err) {
105 | Array.prototype.unshift.call(arguments, 'error');
106 | self.emit.apply(self, arguments);
107 | }
108 | // If the buffer is below the configured threshold attempt a fetch.
109 | if (self.messageBuffer.length < self.options.bufferRefetchThreshold) {
110 | setImmediate(function () {
111 | self.fetch();
112 | });
113 | }
114 | });
115 | }
116 | };
117 |
118 | /**
119 | * The decode callback is invoked as data is decoded from the response.
120 | */
121 | ConsumerStream.prototype.decodeCallback = function (err, type, message) {
122 | if (err) {
123 | switch (err.message) {
124 | case 'OffsetOutOfRange':
125 | return this.emit('offsetOutOfRange', err);
126 | case 'NotLeaderForPartition':
127 | return this.emit('brokersChanged');
128 | default:
129 | return this.emit('error', err);
130 | }
131 | }
132 |
133 | var encoding = this.options.encoding;
134 |
135 | if (type === 'message') {
136 | if (encoding !== 'buffer' && message.value) {
137 | message.value = message.value.toString(encoding);
138 | }
139 | this.handleMessage(message);
140 | } else if (type === 'done') {
141 | // If we had neither error nor message, this is the end of a fetch,
142 | // and we should update the offset for the next fetch.
143 | this.updateOffsets(message);
144 | this.fetchInFlight = false;
145 | }
146 | };
147 |
148 | ConsumerStream.prototype.connect = function () {
149 | var self = this;
150 |
151 | // Client already exists
152 | if (this.client.ready) {
153 | this.init();
154 | }
155 |
156 | this.client.on('ready', function () {
157 | logger.debug('consumer ready');
158 | if (!self.ready) self.init();
159 | });
160 |
161 | this.client.on('error', function (err) {
162 | logger.debug('client error %s', err.message);
163 | self.emit('error', err);
164 | });
165 |
166 | this.client.on('close', function () {
167 | logger.debug('connection closed');
168 | });
169 |
170 | this.client.on('brokersChanged', function () {
171 | var topicNames = self.payloads.map(function (p) {
172 | return p.topic;
173 | });
174 |
175 | this.refreshMetadata(topicNames, function (err) {
176 | if (err) return self.emit('error', err);
177 | });
178 | });
179 | };
180 |
181 | ConsumerStream.prototype.updateOffsets = function (topics, initing) {
182 | this.payloads.forEach(function (p) {
183 | if (!_.isEmpty(topics[p.topic]) && topics[p.topic][p.partition] !== undefined) {
184 | var offset = topics[p.topic][p.partition];
185 | // Note, we track the offset of the next message we want to see,
186 | // not the most recent message we have seen.
187 | if (offset === -1) offset = 0;
188 | if (!initing) p.offset = offset + 1;
189 | else p.offset = offset;
190 | }
191 | });
192 | };
193 |
194 | ConsumerStream.prototype.close = function (force, cb) {
195 | if (typeof force === 'function') {
196 | cb = force;
197 | force = false;
198 | }
199 | let self = this;
200 |
201 | if (force) {
202 | self.commit(function (err) {
203 | self.emit('error', err);
204 | self.client.close(cb);
205 | });
206 | } else {
207 | self.client.close(cb);
208 | }
209 | this.ready = false;
210 | };
211 |
212 | ConsumerStream.prototype.init = function () {
213 | if (!this.payloads.length) {
214 | return;
215 | }
216 |
217 | var self = this;
218 | var topics = self.payloads.map(function (p) {
219 | return p.topic;
220 | });
221 |
222 | self.client.topicExists(topics, function (err) {
223 | if (err) {
224 | return self.emit('error', err);
225 | }
226 |
227 | var start = function () {
228 | self.emit('readable');
229 | self.ready = true;
230 |
231 | // If this consumer was piped immediately then read may have been called
232 | // before readable was emitted so we should trigger a fetch.
233 | if (self._reading) {
234 | setImmediate(function () {
235 | self.fetch();
236 | });
237 | }
238 | };
239 |
240 | if (self.options.fromOffset) {
241 | return start();
242 | }
243 |
244 | self.client.sendOffsetFetchRequest(self.options.groupId, self.payloads, function (err, topics) {
245 | if (err) {
246 | return self.emit('error', err);
247 | }
248 |
249 | self.updateOffsets(topics, true);
250 | start();
251 | });
252 | });
253 | };
254 |
255 | ConsumerStream.prototype.buildPayloads = function (payloads) {
256 | var self = this;
257 | return payloads.map(function (p) {
258 | if (typeof p !== 'object') p = { topic: p };
259 | p.partition = p.partition || 0;
260 | p.offset = p.offset || 0;
261 | p.maxBytes = self.options.fetchMaxBytes;
262 | p.metadata = 'm'; // metadata can be arbitrary
263 | return p;
264 | });
265 | };
266 |
267 | ConsumerStream.prototype.createCommitStream = function (options) {
268 | options = options || this.options;
269 | options = _.defaults(options || {}, this.options);
270 | return new CommitStream(this.client, this.payloads, this.options.groupId, options);
271 | };
272 |
273 | module.exports = ConsumerStream;
274 |
--------------------------------------------------------------------------------
/lib/errors/ApiNotSupportedError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | /**
4 | * The broker did not support the requested API.
5 | *
6 | *
7 | * @constructor
8 | */
9 | var ApiNotSupportedError = function () {
10 | Error.captureStackTrace(this, this);
11 | this.message = 'The API is not supported by the receiving broker';
12 | };
13 |
14 | util.inherits(ApiNotSupportedError, Error);
15 | ApiNotSupportedError.prototype.name = 'ApiNotSupportedError';
16 |
17 | module.exports = ApiNotSupportedError;
18 |
--------------------------------------------------------------------------------
/lib/errors/BrokerNotAvailableError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | /**
4 | * A broker/leader was not available or discoverable for the action requested
5 | *
6 | * @param {String} message A message describing the issue with the broker
7 | *
8 | * @constructor
9 | */
10 | var BrokerNotAvailableError = function (message) {
11 | Error.captureStackTrace(this, this);
12 | this.message = message;
13 | };
14 |
15 | util.inherits(BrokerNotAvailableError, Error);
16 | BrokerNotAvailableError.prototype.name = 'BrokerNotAvailableError';
17 |
18 | module.exports = BrokerNotAvailableError;
19 |
--------------------------------------------------------------------------------
/lib/errors/FailedToRebalanceConsumerError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | /**
4 | * Failed to rebalance the consumer
5 | *
6 | * @param {String} message A message describing the error during rebalancing of the consumer
7 | *
8 | * @constructor
9 | */
10 | var FailedToRebalanceConsumerError = function (message) {
11 | Error.captureStackTrace(this, this);
12 | this.message = message;
13 | };
14 |
15 | util.inherits(FailedToRebalanceConsumerError, Error);
16 | FailedToRebalanceConsumerError.prototype.name = 'FailedToRebalanceConsumerError';
17 |
18 | module.exports = FailedToRebalanceConsumerError;
19 |
--------------------------------------------------------------------------------
/lib/errors/FailedToRegisterConsumerError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 | var NestedError = require('nested-error-stacks');
3 |
4 | /**
5 | * Failed to register the consumer
6 | *
7 | * @param {String} message A message describing the problem with the registration of the consumer
8 | * @param {Error} error An error related to the registration of the consumer
9 | *
10 | * @constructor
11 | */
12 | var FailedToRegisterConsumerError = function (message, nested) {
13 | NestedError.call(this, message, nested);
14 | this.message = message;
15 | };
16 |
17 | util.inherits(FailedToRegisterConsumerError, NestedError);
18 | FailedToRegisterConsumerError.prototype.name = 'FailedToRegisterConsumerError';
19 |
20 | module.exports = FailedToRegisterConsumerError;
21 |
--------------------------------------------------------------------------------
/lib/errors/GroupCoordinatorNotAvailableError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | var GroupCoordinatorNotAvailable = function (message) {
4 | Error.captureStackTrace(this, this);
5 | this.message = message;
6 | };
7 |
8 | util.inherits(GroupCoordinatorNotAvailable, Error);
9 | GroupCoordinatorNotAvailable.prototype.name = 'GroupCoordinatorNotAvailable';
10 |
11 | module.exports = GroupCoordinatorNotAvailable;
12 |
--------------------------------------------------------------------------------
/lib/errors/GroupLoadInProgressError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | var GroupLoadInProgress = function (message) {
4 | Error.captureStackTrace(this, this);
5 | this.message = message;
6 | };
7 |
8 | util.inherits(GroupLoadInProgress, Error);
9 | GroupLoadInProgress.prototype.name = 'GroupLoadInProgress';
10 |
11 | module.exports = GroupLoadInProgress;
12 |
--------------------------------------------------------------------------------
/lib/errors/HeartbeatTimeoutError.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var util = require('util');
4 |
5 | var HeartbeatTimeout = function (message) {
6 | Error.captureStackTrace(this, this);
7 | this.message = message;
8 | };
9 |
10 | util.inherits(HeartbeatTimeout, Error);
11 | HeartbeatTimeout.prototype.name = 'HeartbeatTimeout';
12 |
13 | module.exports = HeartbeatTimeout;
14 |
--------------------------------------------------------------------------------
/lib/errors/IllegalGenerationError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | var IllegalGeneration = function (message) {
4 | Error.captureStackTrace(this, this);
5 | this.message = message;
6 | };
7 |
8 | util.inherits(IllegalGeneration, Error);
9 | IllegalGeneration.prototype.name = 'IllegalGeneration';
10 |
11 | module.exports = IllegalGeneration;
12 |
--------------------------------------------------------------------------------
/lib/errors/InvalidConfigError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | var InvalidConfigError = function (message) {
4 | Error.captureStackTrace(this, this);
5 | this.message = message;
6 | };
7 |
8 | util.inherits(InvalidConfigError, Error);
9 | InvalidConfigError.prototype.name = 'InvalidConfigError';
10 |
11 | module.exports = InvalidConfigError;
12 |
--------------------------------------------------------------------------------
/lib/errors/InvalidConsumerOffsetError.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const util = require('util');
4 | const NestedError = require('nested-error-stacks');
5 |
6 | /**
7 | * The offset for the comsumer is invalid
8 | *
9 | * @param {String} message A message describing the problem with the fetching of offsets for the consumer
10 | *
11 | * @constructor
12 | */
13 | const InvalidConsumerOffsetError = function (message, nested) {
14 | NestedError.apply(this, arguments);
15 | };
16 |
17 | util.inherits(InvalidConsumerOffsetError, NestedError);
18 | InvalidConsumerOffsetError.prototype.name = 'InvalidConsumerOffsetError';
19 |
20 | module.exports = InvalidConsumerOffsetError;
21 |
--------------------------------------------------------------------------------
/lib/errors/InvalidRequestError.js:
--------------------------------------------------------------------------------
1 | const util = require('util');
2 |
3 | /**
4 | * The request was invalid for a specific reason.
5 | *
6 | * @param {*} message A message describing the issue.
7 | *
8 | * @constructor
9 | */
10 | const InvalidRequest = function (message) {
11 | Error.captureStackTrace(this, this);
12 | this.message = message;
13 | };
14 |
15 | util.inherits(InvalidRequest, Error);
16 | InvalidRequest.prototype.name = 'InvalidRequest';
17 |
18 | module.exports = InvalidRequest;
19 |
--------------------------------------------------------------------------------
/lib/errors/MessageSizeTooLargeError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | var MessageSizeTooLarge = function (vars) {
4 | Error.captureStackTrace(this, this);
5 | if (typeof vars === 'object') {
6 | this.message = `Found a message larger than the maximum fetch size of this consumer on topic ${vars.topic} partition ${vars.partition} at fetch offset ${vars.offset}. Increase the fetch size, or decrease the maximum message size the broker will allow.`;
7 | } else {
8 | this.message = vars;
9 | }
10 | };
11 |
12 | util.inherits(MessageSizeTooLarge, Error);
13 | MessageSizeTooLarge.prototype.name = 'MessageSizeTooLarge';
14 |
15 | module.exports = MessageSizeTooLarge;
16 |
--------------------------------------------------------------------------------
/lib/errors/NotControllerError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | /**
4 | * The request was sent to a broker that was not the controller.
5 | *
6 | * @param {*} message A message describing the issue.
7 | *
8 | * @constructor
9 | */
10 | var NotController = function (message) {
11 | Error.captureStackTrace(this, this);
12 | this.message = message;
13 | };
14 |
15 | util.inherits(NotController, Error);
16 | NotController.prototype.name = 'NotController';
17 |
18 | module.exports = NotController;
19 |
--------------------------------------------------------------------------------
/lib/errors/NotCoordinatorForGroupError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | var NotCoordinatorForGroup = function (message) {
4 | Error.captureStackTrace(this, this);
5 | this.message = message;
6 | };
7 |
8 | util.inherits(NotCoordinatorForGroup, Error);
9 | NotCoordinatorForGroup.prototype.name = 'NotCoordinatorForGroup';
10 |
11 | module.exports = NotCoordinatorForGroup;
12 |
--------------------------------------------------------------------------------
/lib/errors/RebalanceInProgressError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | var RebalanceInProgress = function (message) {
4 | Error.captureStackTrace(this, this);
5 | this.message = message;
6 | };
7 |
8 | util.inherits(RebalanceInProgress, Error);
9 | RebalanceInProgress.prototype.name = 'RebalanceInProgress';
10 |
11 | module.exports = RebalanceInProgress;
12 |
--------------------------------------------------------------------------------
/lib/errors/SaslAuthenticationError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | /**
4 | * Thrown when SASL authentication fails for any reason.
5 | *
6 | * @param {Number} errorCode the error code that caused the error.
7 | * @param {String} message A message describing the authentication problem.
8 | *
9 | * @constructor
10 | */
11 | var SaslAuthenticationError = function (errorCode, message) {
12 | Error.captureStackTrace(this, this);
13 | this.errorCode = errorCode;
14 | this.message = message;
15 | };
16 |
17 | util.inherits(SaslAuthenticationError, Error);
18 | SaslAuthenticationError.prototype.name = 'SaslAuthenticationError';
19 |
20 | module.exports = SaslAuthenticationError;
21 |
--------------------------------------------------------------------------------
/lib/errors/TimeoutError.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var util = require('util');
4 |
5 | var TimeoutError = function (message) {
6 | Error.captureStackTrace(this, this);
7 | this.message = message;
8 | };
9 |
10 | util.inherits(TimeoutError, Error);
11 | TimeoutError.prototype.name = 'TimeoutError';
12 |
13 | module.exports = TimeoutError;
14 |
--------------------------------------------------------------------------------
/lib/errors/TopicsNotExistError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | /**
4 | * One or more topics did not exist for the requested action
5 | *
6 | * @param {String|String[]} topics Either an array or single topic name
7 | *
8 | * @constructor
9 | */
10 | var TopicsNotExistError = function (topics) {
11 | Error.captureStackTrace(this, this);
12 | this.topics = topics;
13 | this.message = 'The topic(s) ' + topics.toString() + ' do not exist';
14 | };
15 |
16 | util.inherits(TopicsNotExistError, Error);
17 | TopicsNotExistError.prototype.name = 'TopicsNotExistError';
18 |
19 | module.exports = TopicsNotExistError;
20 |
--------------------------------------------------------------------------------
/lib/errors/UnknownMemberIdError.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 |
3 | var UnknownMemberId = function (message) {
4 | Error.captureStackTrace(this, this);
5 | this.message = message;
6 | };
7 |
8 | util.inherits(UnknownMemberId, Error);
9 | UnknownMemberId.prototype.name = 'UnknownMemberId';
10 |
11 | module.exports = UnknownMemberId;
12 |
--------------------------------------------------------------------------------
/lib/errors/index.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | ApiNotSupportedError: require('./ApiNotSupportedError'),
3 | BrokerNotAvailableError: require('./BrokerNotAvailableError'),
4 | TopicsNotExistError: require('./TopicsNotExistError'),
5 | FailedToRegisterConsumerError: require('./FailedToRegisterConsumerError'),
6 | InvalidConsumerOffsetError: require('./InvalidConsumerOffsetError'),
7 | FailedToRebalanceConsumerError: require('./FailedToRebalanceConsumerError'),
8 | InvalidConfigError: require('./InvalidConfigError'),
9 | SaslAuthenticationError: require('./SaslAuthenticationError'),
10 | InvalidRequestError: require('./InvalidRequestError'),
11 | ConsumerGroupErrors: [
12 | require('./GroupCoordinatorNotAvailableError'),
13 | require('./GroupLoadInProgressError'),
14 | require('./HeartbeatTimeoutError'),
15 | require('./IllegalGenerationError'),
16 | require('./NotCoordinatorForGroupError'),
17 | require('./RebalanceInProgressError'),
18 | require('./UnknownMemberIdError')
19 | ]
20 | };
21 |
--------------------------------------------------------------------------------
/lib/highLevelProducer.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var util = require('util');
4 | var BaseProducer = require('./baseProducer');
5 |
6 | /** @inheritdoc */
7 | function HighLevelProducer (client, options, customPartitioner) {
8 | BaseProducer.call(this, client, options, BaseProducer.PARTITIONER_TYPES.cyclic, customPartitioner);
9 | }
10 |
11 | util.inherits(HighLevelProducer, BaseProducer);
12 |
13 | HighLevelProducer.PARTITIONER_TYPES = BaseProducer.PARTITIONER_TYPES;
14 |
15 | module.exports = HighLevelProducer;
16 |
--------------------------------------------------------------------------------
/lib/logging.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const debug = require('debug');
4 |
5 | let loggerProvider = debugLoggerProvider;
6 |
7 | module.exports = exports = function getLogger (name) {
8 | return loggerProvider(name);
9 | };
10 |
11 | exports.setLoggerProvider = function setLoggerProvider (provider) {
12 | loggerProvider = provider;
13 | };
14 |
15 | function debugLoggerProvider (name) {
16 | let logger = debug(name);
17 | logger = logger.bind(logger);
18 |
19 | return {
20 | debug: logger,
21 | info: logger,
22 | warn: logger,
23 | error: logger
24 | };
25 | }
26 |
--------------------------------------------------------------------------------
/lib/offset.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var util = require('util');
4 | var async = require('async');
5 | var EventEmitter = require('events');
6 |
7 | function Offset (client) {
8 | EventEmitter.call(this);
9 | var self = this;
10 | this.client = client;
11 | this.ready = this.client.ready;
12 | this.client.on('ready', function () {
13 | self.ready = true;
14 | self.emit('ready');
15 | });
16 | this.client.once('connect', function () {
17 | self.emit('connect');
18 | });
19 | this.client.on('error', function (err) {
20 | self.emit('error', err);
21 | });
22 | }
23 | util.inherits(Offset, EventEmitter);
24 |
25 | Offset.prototype.fetch = function (payloads, cb) {
26 | if (!this.ready) {
27 | this.once('ready', () => this.fetch(payloads, cb));
28 | return;
29 | }
30 | this.client.sendOffsetRequest(this.buildPayloads(payloads), cb);
31 | };
32 |
33 | Offset.prototype.buildPayloads = function (payloads) {
34 | return payloads.map(function (p) {
35 | p.partition = p.partition || 0;
36 | p.time = p.time || Date.now();
37 | p.maxNum = p.maxNum || 1;
38 | p.metadata = 'm'; // metadata can be arbitrary
39 | return p;
40 | });
41 | };
42 |
43 | Offset.prototype.buildOffsetFetchV1Payloads = function (payloads) {
44 | return payloads.reduce(function (out, p) {
45 | out[p.topic] = out[p.topic] || [];
46 | out[p.topic].push(p.partition || 0);
47 | return out;
48 | }, {});
49 | };
50 |
51 | Offset.prototype.commit = function (groupId, payloads, cb) {
52 | if (!this.ready) {
53 | this.once('ready', () => this.commit(groupId, payloads, cb));
54 | return;
55 | }
56 | this.client.sendOffsetCommitRequest(groupId, this.buildPayloads(payloads), cb);
57 | };
58 |
59 | Offset.prototype.fetchCommits = Offset.prototype.fetchCommitsV1 = function (groupId, payloads, cb) {
60 | if (!this.ready) {
61 | this.once('ready', () => this.fetchCommitsV1(groupId, payloads, cb));
62 | return;
63 | }
64 | this.client.setCoordinatorIdAndSendOffsetFetchV1Request(groupId, this.buildOffsetFetchV1Payloads(payloads), cb);
65 | };
66 |
67 | Offset.prototype.fetchLatestOffsets = function (topics, cb) {
68 | fetchOffsets(this, topics, cb, -1);
69 | };
70 |
71 | Offset.prototype.fetchEarliestOffsets = function (topics, cb) {
72 | fetchOffsets(this, topics, cb, -2);
73 | };
74 |
75 | // private helper
76 | function fetchOffsets (offset, topics, cb, when) {
77 | if (!offset.ready) {
78 | if (when === -1) {
79 | offset.once('ready', () => offset.fetchLatestOffsets(topics, cb));
80 | } else if (when === -2) {
81 | offset.once('ready', () => offset.fetchEarliestOffsets(topics, cb));
82 | }
83 | return;
84 | }
85 | async.waterfall(
86 | [
87 | callback => {
88 | offset.client.loadMetadataForTopics(topics, callback);
89 | },
90 | (topicsMetaData, callback) => {
91 | var payloads = [];
92 | var metaDatas = topicsMetaData[1].metadata;
93 | Object.keys(metaDatas).forEach(function (topicName) {
94 | var topic = metaDatas[topicName];
95 | Object.keys(topic).forEach(function (partition) {
96 | payloads.push({
97 | topic: topicName,
98 | partition: partition,
99 | time: when
100 | });
101 | });
102 | });
103 |
104 | if (payloads.length === 0) {
105 | return callback(new Error('Topic(s) does not exist'));
106 | }
107 |
108 | offset.fetch(payloads, callback);
109 | },
110 | function (results, callback) {
111 | Object.keys(results).forEach(function (topicName) {
112 | var topic = results[topicName];
113 |
114 | Object.keys(topic).forEach(function (partitionName) {
115 | topic[partitionName] = topic[partitionName][0];
116 | });
117 | });
118 | callback(null, results);
119 | }
120 | ],
121 | cb
122 | );
123 | }
124 |
125 | module.exports = Offset;
126 |
--------------------------------------------------------------------------------
/lib/partitioner.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const util = require('util');
4 | const _ = require('lodash');
5 |
6 | function Partitioner () { }
7 |
8 | function DefaultPartitioner () {
9 | Partitioner.call(this);
10 | }
11 | util.inherits(DefaultPartitioner, Partitioner);
12 |
13 | DefaultPartitioner.prototype.getPartition = function (partitions) {
14 | if (partitions && _.isArray(partitions) && partitions.length > 0) {
15 | return partitions[0];
16 | } else {
17 | return 0;
18 | }
19 | };
20 |
21 | function CyclicPartitioner () {
22 | Partitioner.call(this);
23 | this.c = 0;
24 | }
25 | util.inherits(CyclicPartitioner, Partitioner);
26 |
27 | CyclicPartitioner.prototype.getPartition = function (partitions) {
28 | if (_.isEmpty(partitions)) return 0;
29 | return partitions[this.c++ % partitions.length];
30 | };
31 |
32 | function RandomPartitioner () {
33 | Partitioner.call(this);
34 | }
35 | util.inherits(RandomPartitioner, Partitioner);
36 |
37 | RandomPartitioner.prototype.getPartition = function (partitions) {
38 | return partitions[Math.floor(Math.random() * partitions.length)];
39 | };
40 |
41 | function KeyedPartitioner () {
42 | Partitioner.call(this);
43 | }
44 | util.inherits(KeyedPartitioner, Partitioner);
45 |
46 | // Taken from oid package (Dan Bornstein)
47 | // Copyright The Obvious Corporation.
48 | KeyedPartitioner.prototype.hashCode = function (stringOrBuffer) {
49 | let hash = 0;
50 | if (stringOrBuffer) {
51 | const string = stringOrBuffer.toString();
52 | const length = string.length;
53 |
54 | for (let i = 0; i < length; i++) {
55 | hash = ((hash * 31) + string.charCodeAt(i)) & 0x7fffffff;
56 | }
57 | }
58 |
59 | return (hash === 0) ? 1 : hash;
60 | };
61 |
62 | KeyedPartitioner.prototype.getPartition = function (partitions, key) {
63 | key = key || '';
64 |
65 | const index = this.hashCode(key) % partitions.length;
66 | return partitions[index];
67 | };
68 |
69 | function CustomPartitioner (partitioner) {
70 | Partitioner.call(this);
71 | this.getPartition = partitioner;
72 | }
73 | util.inherits(CustomPartitioner, Partitioner);
74 |
75 | module.exports.DefaultPartitioner = DefaultPartitioner;
76 | module.exports.CyclicPartitioner = CyclicPartitioner;
77 | module.exports.RandomPartitioner = RandomPartitioner;
78 | module.exports.KeyedPartitioner = KeyedPartitioner;
79 | module.exports.CustomPartitioner = CustomPartitioner;
80 |
--------------------------------------------------------------------------------
/lib/producer.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var util = require('util');
4 | var BaseProducer = require('./baseProducer');
5 |
6 | /** @inheritdoc */
7 | function Producer (client, options, customPartitioner) {
8 | BaseProducer.call(this, client, options, BaseProducer.PARTITIONER_TYPES.default, customPartitioner);
9 | }
10 |
11 | util.inherits(Producer, BaseProducer);
12 |
13 | Producer.PARTITIONER_TYPES = BaseProducer.PARTITIONER_TYPES;
14 |
15 | module.exports = Producer;
16 |
--------------------------------------------------------------------------------
/lib/producerStream.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const Writable = require('stream').Writable;
4 | const KafkaClient = require('./kafkaClient');
5 | const HighLevelProducer = require('./highLevelProducer');
6 | const logger = require('./logging')('kafka-node:ProducerStream');
7 | const _ = require('lodash');
8 |
9 | const DEFAULTS = {
10 | kafkaClient: {
11 | kafkaHost: '127.0.0.1:9092'
12 | },
13 | producer: {
14 | partitionerType: 3
15 | }
16 | };
17 |
18 | const DEFAULT_HIGH_WATER_MARK = 100;
19 |
20 | class ProducerStream extends Writable {
21 | constructor (options) {
22 | if (options == null) {
23 | options = {};
24 | }
25 |
26 | super({ objectMode: true, decodeStrings: false, highWaterMark: options.highWaterMark || DEFAULT_HIGH_WATER_MARK });
27 |
28 | _.defaultsDeep(options, DEFAULTS);
29 |
30 | this.client = new KafkaClient(options.kafkaClient);
31 | this.producer = new HighLevelProducer(this.client, options.producer, options.producer.customPartitioner);
32 | this.producer.on('error', error => this.emit('error', error));
33 | }
34 |
35 | sendPayload (payload, callback) {
36 | if (!_.isArray(payload)) {
37 | payload = [payload];
38 | }
39 | if (!this.producer.ready) {
40 | this.producer.once('ready', () => this.producer.send(payload, callback));
41 | } else {
42 | this.producer.send(payload, callback);
43 | }
44 | }
45 |
46 | close (callback) {
47 | this.producer.close(callback);
48 | }
49 |
50 | _write (message, encoding, callback) {
51 | logger.debug('_write');
52 | this.sendPayload(message, callback);
53 | }
54 |
55 | _writev (chunks, callback) {
56 | logger.debug('_writev');
57 | const payload = _.map(chunks, 'chunk');
58 | this.sendPayload(payload, callback);
59 | }
60 | }
61 |
62 | module.exports = ProducerStream;
63 |
--------------------------------------------------------------------------------
/lib/protocol/index.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var _ = require('lodash');
4 | var struct = require('./protocol_struct');
5 | var protocol = require('./protocol');
6 |
7 | exports = _.extend(exports, struct, protocol);
8 |
--------------------------------------------------------------------------------
/lib/protocol/protocolVersions.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const p = require('./protocol');
4 | const _ = require('lodash');
5 |
6 | const API_MAP = {
7 | produce: [
8 | [p.encodeProduceRequest, p.decodeProduceResponse],
9 | [p.encodeProduceV1Request, p.decodeProduceV1Response],
10 | [p.encodeProduceV2Request, p.decodeProduceV2Response]
11 | ],
12 | fetch: [
13 | [p.encodeFetchRequest, p.decodeFetchResponse],
14 | [p.encodeFetchRequestV1, p.decodeFetchResponseV1],
15 | [p.encodeFetchRequestV2, p.decodeFetchResponseV1]
16 | ],
17 | offset: [[p.encodeOffsetRequest, p.decodeOffsetResponse]],
18 | metadata: [
19 | [p.encodeMetadataRequest, p.decodeMetadataResponse],
20 | [p.encodeMetadataV1Request, p.decodeMetadataV1Response]
21 | ],
22 | leader: null,
23 | stopReplica: null,
24 | updateMetadata: null,
25 | controlledShutdown: null,
26 | offsetCommit: [
27 | // decode response should be the same for versions 0-2
28 | [p.encodeOffsetCommitRequest, p.decodeOffsetCommitResponse],
29 | [p.encodeOffsetCommitV1Request, p.decodeOffsetCommitResponse],
30 | [p.encodeOffsetCommitV2Request, p.decodeOffsetCommitResponse]
31 | ],
32 | offsetFetch: [
33 | [p.encodeOffsetFetchRequest, p.decodeOffsetFetchResponse],
34 | [p.encodeOffsetFetchV1Request, p.decodeOffsetFetchV1Response]
35 | ],
36 | groupCoordinator: [[p.encodeGroupCoordinatorRequest, p.decodeGroupCoordinatorResponse]],
37 | joinGroup: [[p.encodeJoinGroupRequest, p.decodeJoinGroupResponse]],
38 | heartbeat: [[p.encodeGroupHeartbeatRequest, p.decodeGroupHeartbeatResponse]],
39 | leaveGroup: [[p.encodeLeaveGroupRequest, p.decodeLeaveGroupResponse]],
40 | syncGroup: [[p.encodeJoinGroupRequest, p.decodeJoinGroupResponse]],
41 | describeGroups: [[p.encodeDescribeGroups, p.decodeDescribeGroups]],
42 | listGroups: [[p.encodeListGroups, p.decodeListGroups]],
43 | saslHandshake: [
44 | [p.encodeSaslHandshakeRequest, p.decodeSaslHandshakeResponse],
45 | [p.encodeSaslHandshakeRequest, p.decodeSaslHandshakeResponse]
46 | ],
47 | apiVersions: [[p.encodeVersionsRequest, p.decodeVersionsResponse]],
48 | createTopics: [
49 | [p.encodeCreateTopicRequest, p.decodeCreateTopicResponse],
50 | [p.encodeCreateTopicRequestV1, p.decodeCreateTopicResponseV1]
51 | ],
52 | deleteTopics: null,
53 | describeConfigs: [
54 | [p.encodeDescribeConfigsRequest, p.decodeDescribeConfigsResponse],
55 | [p.encodeDescribeConfigsRequestV1, p.decodeDescribeConfigsResponseV1],
56 | [p.encodeDescribeConfigsRequestV2, p.decodeDescribeConfigsResponseV2]
57 | ],
58 | saslAuthenticate: [[p.encodeSaslAuthenticationRequest, p.decodeSaslAuthenticationResponse]]
59 | };
60 |
61 | // Since versions API isn't around until 0.10 we need to hardcode the supported API versions for 0.9 here
62 | const API_SUPPORTED_IN_KAFKA_0_9 = {
63 | fetch: {
64 | min: 0,
65 | max: 1,
66 | usable: 1
67 | },
68 | produce: {
69 | min: 0,
70 | max: 1,
71 | usable: 1
72 | },
73 | offsetCommit: {
74 | min: 0,
75 | max: 2,
76 | usable: 2
77 | },
78 | offsetFetch: {
79 | min: 0,
80 | max: 1,
81 | usable: 1
82 | }
83 | };
84 |
85 | module.exports = {
86 | apiMap: API_MAP,
87 | maxSupport: _.mapValues(API_MAP, function (api) {
88 | return api != null ? api.length - 1 : null;
89 | }),
90 | baseSupport: Object.assign(
91 | _.mapValues(API_MAP, function (api) {
92 | return api != null ? { min: 0, max: 0, usable: 0 } : null;
93 | }),
94 | API_SUPPORTED_IN_KAFKA_0_9
95 | )
96 | };
97 |
--------------------------------------------------------------------------------
/lib/protocol/protocol_struct.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | function createStruct () {
4 | var args = arguments[0];
5 | return function () {
6 | for (var i = 0; i < args.length; i++) {
7 | this[args[i]] = arguments[i];
8 | }
9 | };
10 | }
11 |
12 | var KEYS = {
13 | FetchRequest: ['topic', 'partition', 'offset', 'maxBytes'],
14 | FetchResponse: ['topic', 'fetchPartitions'],
15 | OffsetCommitRequest: ['topic', 'partition', 'offset', 'metadata', 'committing', 'autoCommitIntervalMs'],
16 | OffsetCommitResponse: [],
17 | TopicAndPartition: ['topic', 'partition'],
18 | PartitionMetadata: ['topic', 'partition', 'leader', 'replicas', 'isr'],
19 | Message: ['magic', 'attributes', 'key', 'value', 'timestamp'],
20 | ProduceRequest: ['topic', 'partition', 'messages', 'attributes'],
21 | Request: ['payloads', 'encoder', 'decoder', 'callback']
22 | };
23 |
24 | var ERROR_CODE = {
25 | '0': 'NoError',
26 | '-1': 'Unknown',
27 | '1': 'OffsetOutOfRange',
28 | '2': 'InvalidMessage',
29 | '3': 'UnknownTopicOrPartition',
30 | '4': 'InvalidMessageSize',
31 | '5': 'LeaderNotAvailable',
32 | '6': 'NotLeaderForPartition',
33 | '7': 'RequestTimedOut',
34 | '8': 'BrokerNotAvailable',
35 | '9': 'ReplicaNotAvailable',
36 | '10': 'MessageSizeTooLarge',
37 | '11': 'StaleControllerEpochCode',
38 | '12': 'OffsetMetadataTooLargeCode',
39 | '14': 'GroupLoadInProgress',
40 | '15': 'GroupCoordinatorNotAvailable',
41 | '16': 'NotCoordinatorForGroup',
42 | '17': 'InvalidTopic',
43 | '18': 'RecordListTooLarge',
44 | '19': 'NotEnoughReplicas',
45 | '20': 'NotEnoughReplicasAfterAppend',
46 | '21': 'InvalidRequiredAcks',
47 | '22': 'IllegalGeneration',
48 | '23': 'InconsistentGroupProtocol',
49 | '25': 'UnknownMemberId',
50 | '26': 'InvalidSessionTimeout',
51 | '27': 'RebalanceInProgress',
52 | '28': 'InvalidCommitOffsetSize',
53 | '29': 'TopicAuthorizationFailed',
54 | '30': 'GroupAuthorizationFailed',
55 | '31': 'ClusterAuthorizationFailed',
56 | '41': 'NotController',
57 | '42': 'InvalidRequest'
58 | };
59 |
60 | var GROUP_ERROR = {
61 | GroupCoordinatorNotAvailable: require('../errors/GroupCoordinatorNotAvailableError'),
62 | IllegalGeneration: require('../errors/IllegalGenerationError'),
63 | NotCoordinatorForGroup: require('../errors/NotCoordinatorForGroupError'),
64 | GroupLoadInProgress: require('../errors/GroupLoadInProgressError'),
65 | UnknownMemberId: require('../errors/UnknownMemberIdError'),
66 | RebalanceInProgress: require('../errors/RebalanceInProgressError'),
67 | NotController: require('../errors/NotControllerError')
68 | };
69 |
70 | var REQUEST_TYPE = {
71 | produce: 0,
72 | fetch: 1,
73 | offset: 2,
74 | metadata: 3,
75 | leader: 4,
76 | stopReplica: 5,
77 | updateMetadata: 6,
78 | controlledShutdown: 7,
79 | offsetCommit: 8,
80 | offsetFetch: 9,
81 | groupCoordinator: 10,
82 | joinGroup: 11,
83 | heartbeat: 12,
84 | leaveGroup: 13,
85 | syncGroup: 14,
86 | describeGroups: 15,
87 | listGroups: 16,
88 | saslHandshake: 17,
89 | apiVersions: 18,
90 | createTopics: 19,
91 | deleteTopics: 20,
92 | describeConfigs: 32,
93 | saslAuthenticate: 36
94 | };
95 |
96 | Object.keys(KEYS).forEach(function (o) {
97 | exports[o] = createStruct(KEYS[o]);
98 | });
99 | exports.KEYS = KEYS;
100 | exports.ERROR_CODE = ERROR_CODE;
101 | exports.GROUP_ERROR = GROUP_ERROR;
102 | exports.REQUEST_TYPE = REQUEST_TYPE;
103 | exports.KeyedMessage = function KeyedMessage (key, value) {
104 | exports.Message.call(this, 0, 0, key, value, Date.now());
105 | };
106 |
--------------------------------------------------------------------------------
/lib/resources/index.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const RESOURCE_TYPES = {
4 | topic: 'topic',
5 | broker: 'broker'
6 | };
7 |
8 | const resourceTypeMap = {
9 | topic: 2,
10 | broker: 4
11 | };
12 |
13 | module.exports = {
14 | RESOURCE_TYPES,
15 | resourceTypeMap
16 | };
17 |
--------------------------------------------------------------------------------
/lib/utils.js:
--------------------------------------------------------------------------------
1 | var assert = require('assert');
2 | var InvalidConfigError = require('./errors/InvalidConfigError');
3 | var legalChars = new RegExp('^[a-zA-Z0-9._-]*$');
4 | const allowedTopicLength = 249;
5 |
6 | function validateConfig (property, value) {
7 | if (!legalChars.test(value)) {
8 | throw new InvalidConfigError([property, value, "is illegal, contains a character other than ASCII alphanumerics, '.', '_' and '-'"].join(' '));
9 | }
10 | }
11 |
12 | function validateTopicNames (topics) {
13 | // Rewriting same validations done by Apache Kafka team for topics
14 | // iterating over topics
15 | topics.some(function (topic) {
16 | if (topic.length <= 0) {
17 | throw new InvalidConfigError('topic name is illegal, cannot be empty');
18 | }
19 | if (topic === '.' || topic === '..') {
20 | throw new InvalidConfigError('topic name cannot be . or ..');
21 | }
22 | if (topic.length > allowedTopicLength) {
23 | throw new InvalidConfigError(`topic name is illegal, cannot be longer than ${allowedTopicLength} characters`);
24 | }
25 | if (!legalChars.test(topic)) {
26 | throw new InvalidConfigError(`topic name ${topic} is illegal, contains a character other than ASCII alphanumerics .,_ and -`);
27 | }
28 | });
29 | return true;
30 | }
31 |
32 | function validateTopics (topics) {
33 | if (topics.some(function (topic) {
34 | if ('partition' in topic) {
35 | return typeof topic.partition !== 'number';
36 | }
37 | return false;
38 | })) {
39 | throw new InvalidConfigError('Offset must be a number and can not contain characters');
40 | }
41 | }
42 |
43 | /*
44 | Converts:
45 |
46 | [
47 | {topic: 'test', partition: 0},
48 | {topic: 'test', partition: 1},
49 | {topic: 'Bob', partition: 0}
50 | ]
51 |
52 | Into:
53 |
54 | {
55 | test: [0, 1],
56 | bob: [0]
57 | }
58 |
59 | */
60 | function groupPartitionsByTopic (topicPartitions) {
61 | assert(Array.isArray(topicPartitions));
62 | return topicPartitions.reduce(function (result, tp) {
63 | if (!(tp.topic in result)) {
64 | result[tp.topic] = [tp.partition];
65 | } else {
66 | result[tp.topic].push(tp.partition);
67 | }
68 | return result;
69 | }, {});
70 | }
71 |
72 | /*
73 | Converts:
74 | {
75 | test: [0, 1],
76 | bob: [0]
77 | }
78 |
79 | Into a topic partition payload:
80 | [
81 | {topic: 'test', partition: 0},
82 | {topic: 'test', partition: 1},
83 | {topic: 'Bob', partition: 0}
84 | ]
85 | */
86 | function createTopicPartitionList (topicPartitions) {
87 | var tpList = [];
88 | for (var topic in topicPartitions) {
89 | if (!topicPartitions.hasOwnProperty(topic)) {
90 | continue;
91 | }
92 | topicPartitions[topic].forEach(function (partition) {
93 | tpList.push({
94 | topic: topic,
95 | partition: partition
96 | });
97 | });
98 | }
99 | return tpList;
100 | }
101 |
102 | module.exports = {
103 | validateConfig: validateConfig,
104 | validateTopics: validateTopics,
105 | groupPartitionsByTopic: groupPartitionsByTopic,
106 | createTopicPartitionList: createTopicPartitionList,
107 | validateTopicNames: validateTopicNames
108 | };
109 |
--------------------------------------------------------------------------------
/lib/wrapper/BrokerReadable.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var util = require('util');
4 | var Readable = require('stream').Readable;
5 |
6 | var BrokerReadable = function (options) {
7 | Readable.call(this, options);
8 | };
9 |
10 | util.inherits(BrokerReadable, Readable);
11 |
12 | BrokerReadable.prototype._read = function (size) {};
13 |
14 | module.exports = BrokerReadable;
15 |
--------------------------------------------------------------------------------
/lib/wrapper/BrokerTransform.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var util = require('util');
4 | var Transform = require('stream').Transform;
5 | var KafkaBuffer = require('../batch/KafkaBuffer');
6 |
7 | var BrokerTransform = function (options) {
8 | Transform.call(this, options);
9 | this.noAckBatchSize = options ? options.noAckBatchSize : null;
10 | this.noAckBatchAge = options ? options.noAckBatchAge : null;
11 | this._KafkaBuffer = new KafkaBuffer(this.noAckBatchSize, this.noAckBatchAge);
12 | };
13 |
14 | util.inherits(BrokerTransform, Transform);
15 |
16 | BrokerTransform.prototype._transform = function (chunk, enc, done) {
17 | this._KafkaBuffer.addChunk(chunk, this._transformNext.bind(this));
18 | done();
19 | };
20 |
21 | BrokerTransform.prototype._transformNext = function () {
22 | this.push(this._KafkaBuffer.getBatch());
23 | this._KafkaBuffer.truncateBatch();
24 | };
25 |
26 | module.exports = BrokerTransform;
27 |
--------------------------------------------------------------------------------
/lib/wrapper/BrokerWrapper.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var BrokerReadable = require('./BrokerReadable');
4 | var BrokerTransform = require('./BrokerTransform');
5 |
6 | const util = require('util');
7 | const EventEmitter = require('events');
8 |
9 | var BrokerWrapper = function (socket, noAckBatchOptions, idleConnectionMs, needAuthentication) {
10 | EventEmitter.call(this);
11 | this.socket = socket;
12 | this.idleConnectionMs = idleConnectionMs;
13 | this.needAuthentication = !!needAuthentication;
14 | this.authenticated = false;
15 |
16 | var self = this;
17 | var readable = new BrokerReadable();
18 | var transform = new BrokerTransform(noAckBatchOptions);
19 |
20 | readable.pipe(transform);
21 |
22 | transform.on('readable', function () {
23 | var bulkMessage = null;
24 | self._lastWrite = Date.now();
25 | while ((bulkMessage = transform.read())) {
26 | // eslint-disable-line no-cond-assign
27 | self.socket.write(bulkMessage);
28 | }
29 | });
30 |
31 | this.readableSocket = readable;
32 | };
33 |
34 | util.inherits(BrokerWrapper, EventEmitter);
35 |
36 | BrokerWrapper.prototype.getReadyEventName = function () {
37 | const lp = this.socket.longpolling ? '-longpolling' : '';
38 | return `${this.socket.addr}${lp}-ready`;
39 | };
40 |
41 | BrokerWrapper.prototype.isConnected = function () {
42 | return !this.socket.destroyed && !this.socket.closing && !this.socket.error;
43 | };
44 |
45 | BrokerWrapper.prototype.isReady = function () {
46 | return this.apiSupport != null && (!this.needAuthentication || this.authenticated);
47 | };
48 |
49 | BrokerWrapper.prototype.isIdle = function () {
50 | return Date.now() - this._lastWrite >= this.idleConnectionMs;
51 | };
52 |
53 | BrokerWrapper.prototype.write = function (buffer) {
54 | this._lastWrite = Date.now();
55 | this.socket.write(buffer);
56 | };
57 |
58 | BrokerWrapper.prototype.writeAsync = function (buffer) {
59 | this.readableSocket.push(buffer);
60 | };
61 |
62 | BrokerWrapper.prototype.toString = function () {
63 | return `[${this.constructor.name} ${
64 | this.socket.addr
65 | } (connected: ${this.isConnected()}) (ready: ${
66 | this.isReady()
67 | }) (idle: ${
68 | this.isIdle()
69 | }) (needAuthentication: ${
70 | this.needAuthentication
71 | }) (authenticated: ${
72 | this.authenticated
73 | })]`;
74 | };
75 |
76 | module.exports = BrokerWrapper;
77 |
--------------------------------------------------------------------------------
/logging.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const logging = require('./lib/logging');
4 |
5 | exports.setLoggerProvider = logging.setLoggerProvider;
6 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "kafka-node",
3 | "description": "Client for Apache Kafka v0.9.x, v0.10.x and v0.11.x",
4 | "keywords": [
5 | "kafka",
6 | "consumer",
7 | "producer",
8 | "broker"
9 | ],
10 | "files": [
11 | "kafka.js",
12 | "logging.js",
13 | "lib",
14 | "types"
15 | ],
16 | "bugs": "https://github.com/SOHU-co/kafka-node/issues",
17 | "version": "5.0.0",
18 | "main": "kafka.js",
19 | "types": "types/index.d.ts",
20 | "license": "MIT",
21 | "dependencies": {
22 | "async": "^2.6.2",
23 | "binary": "~0.3.0",
24 | "bl": "^2.2.0",
25 | "buffer-crc32": "~0.2.5",
26 | "buffermaker": "~1.2.0",
27 | "debug": "^2.1.3",
28 | "denque": "^1.3.0",
29 | "lodash": "^4.17.4",
30 | "minimatch": "^3.0.2",
31 | "nested-error-stacks": "^2.0.0",
32 | "optional": "^0.1.3",
33 | "retry": "^0.10.1",
34 | "uuid": "^3.0.0"
35 | },
36 | "engines": {
37 | "node": ">=8.5.1"
38 | },
39 | "optionalDependencies": {
40 | "snappy": "^6.0.1"
41 | },
42 | "devDependencies": {
43 | "@types/node": "^10.12.27",
44 | "coveralls": "^2.11.12",
45 | "doctoc": "^1.2.0",
46 | "eslint": "^5.14.1",
47 | "eslint-config-semistandard": "^13.0.0",
48 | "eslint-config-standard": "^12.0.0",
49 | "eslint-plugin-dependencies": "^2.2.0",
50 | "eslint-plugin-import": "^2.16.0",
51 | "eslint-plugin-node": "^8.0.1",
52 | "eslint-plugin-promise": "^4.0.1",
53 | "eslint-plugin-standard": "^4.0.0",
54 | "execa": "^0.6.1",
55 | "istanbul": "^0.4.4",
56 | "mocha": "^3.1.0",
57 | "optimist": "^0.6.1",
58 | "proxyquire": "^1.7.10",
59 | "should": "^6.0.0",
60 | "sinon": "^2.0.0",
61 | "through2": "^2.0.3",
62 | "tslint": "^5.13.0",
63 | "tslint-config-semistandard": "^7.0.0",
64 | "typescript": "^2.8.3"
65 | },
66 | "repository": {
67 | "type": "git",
68 | "url": "https://github.com/SOHU-Co/kafka-node.git"
69 | },
70 | "scripts": {
71 | "test:ts": "tslint --project ./types/tsconfig.json --config ./types/tslint.json && tsc --project types",
72 | "test": "eslint . && npm run test:ts && ./run-tests.sh",
73 | "startDocker": "./start-docker.sh",
74 | "stopDocker": "docker-compose down",
75 | "updateToc": "doctoc README.md --maxlevel 2 --notitle"
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/run-tests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | source start-docker.sh
4 | export KAFKA_TEST_HOST=$DOCKER_VM_IP
5 | echo "KAFKA_TEST_HOST: $KAFKA_TEST_HOST"
6 | ./node_modules/.bin/istanbul cover _mocha -- -t 20000 test/**/test.*js test/test.*js
7 |
--------------------------------------------------------------------------------
/start-docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$?" != "0" ]; then
4 | DOCKER_VM_IP=''
5 | fi
6 |
7 | if [ -z "$TRAVIS" ]; then
8 | if docker info | grep Alpine > /dev/null; then
9 | echo "Looks like docker for mac is running"
10 | DOCKER_VM_IP='127.0.0.1'
11 | elif docker info | grep dlite > /dev/null; then
12 | echo "Looks like docker based on dlite is running"
13 | DOCKER_VM_IP=`dlite ip`
14 | fi
15 |
16 | DOCKER_VM_IP=${DOCKER_VM_IP:-127.0.0.1}
17 | export KAFKA_ADVERTISED_HOST_NAME=$DOCKER_VM_IP
18 | docker-compose down
19 |
20 | if [ -z "$KAFKA_VERSION" ]; then
21 | docker-compose up -d
22 | else
23 | echo "Using Kafka Version: $KAFKA_VERSION"
24 | docker-compose -f docker-compose.yml -f docker/docker-compose.${KAFKA_VERSION}.yml up -d
25 | fi
26 | else
27 | DOCKER_VM_IP='127.0.0.1'
28 | fi
29 |
--------------------------------------------------------------------------------
/test/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "mocha": true
4 | },
5 | "rules": {
6 | "no-unused-expressions": "off"
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/test/assignment/test.range.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const range = require('../../lib/assignment/range');
4 | const _ = require('lodash');
5 | const should = require('should');
6 |
7 | describe('Range Assignment', function () {
8 | const topicPartition = {
9 | RebalanceTopic: ['0', '1', '2'],
10 | RebalanceTest: ['0', '1', '2']
11 | };
12 |
13 | const groupMembers = [
14 | {
15 | subscription: ['RebalanceTopic', 'RebalanceTest'],
16 | version: 0,
17 | id: 'consumer1'
18 | },
19 | {
20 | subscription: ['RebalanceTopic', 'RebalanceTest'],
21 | version: 0,
22 | id: 'consumer2'
23 | }
24 | ];
25 |
26 | it('should have required fields', function () {
27 | range.should.have.property('assign').which.is.a.Function;
28 | range.name.should.be.eql('range');
29 | range.version.should.be.eql(0);
30 | });
31 |
32 | it('should partition two topics of three partitions between two consumers', function (done) {
33 | range.assign(topicPartition, groupMembers, function (error, result) {
34 | should(error).be.empty;
35 | const consumer1 = _.head(result);
36 | consumer1.memberId.should.eql('consumer1');
37 | Object.keys(consumer1.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
38 | consumer1.topicPartitions['RebalanceTest'].should.eql([0, 1]);
39 | consumer1.topicPartitions['RebalanceTopic'].should.eql([0, 1]);
40 |
41 | const consumer2 = _.last(result);
42 | consumer2.memberId.should.eql('consumer2');
43 | Object.keys(consumer2.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
44 | consumer2.topicPartitions['RebalanceTest'].should.eql([2]);
45 | consumer2.topicPartitions['RebalanceTopic'].should.eql([2]);
46 |
47 | done();
48 | });
49 | });
50 |
51 | it('should partition two topics of three partitions between three consumers', function (done) {
52 | const gm = groupMembers.slice(0);
53 | gm.push({
54 | subscription: ['RebalanceTopic', 'RebalanceTest'],
55 | version: 0,
56 | id: 'consumer3'
57 | });
58 |
59 | range.assign(topicPartition, gm, function (error, result) {
60 | should(error).be.empty;
61 | const consumer1 = _.head(result);
62 | consumer1.memberId.should.eql('consumer1');
63 | Object.keys(consumer1.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
64 | consumer1.topicPartitions['RebalanceTest'].should.eql([0]);
65 | consumer1.topicPartitions['RebalanceTopic'].should.eql([0]);
66 |
67 | const consumer2 = result[1];
68 | consumer2.memberId.should.eql('consumer2');
69 | Object.keys(consumer2.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
70 | consumer2.topicPartitions['RebalanceTest'].should.eql([1]);
71 | consumer2.topicPartitions['RebalanceTopic'].should.eql([1]);
72 |
73 | const consumer3 = _.last(result);
74 | consumer3.memberId.should.eql('consumer3');
75 | Object.keys(consumer3.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
76 | consumer3.topicPartitions['RebalanceTest'].should.eql([2]);
77 | consumer3.topicPartitions['RebalanceTopic'].should.eql([2]);
78 |
79 | done();
80 | });
81 | });
82 |
83 | it('should partition two topics of three partitions between four consumers', function (done) {
84 | const gm = groupMembers.slice(0);
85 | gm.push(
86 | {
87 | subscription: ['RebalanceTopic', 'RebalanceTest'],
88 | version: 0,
89 | id: 'consumer3'
90 | },
91 | {
92 | subscription: ['RebalanceTopic', 'RebalanceTest'],
93 | version: 0,
94 | id: 'consumer4'
95 | }
96 | );
97 |
98 | range.assign(topicPartition, gm, function (error, result) {
99 | should(error).be.empty;
100 | const consumer1 = _.head(result);
101 | consumer1.memberId.should.eql('consumer1');
102 | Object.keys(consumer1.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
103 | consumer1.topicPartitions['RebalanceTest'].should.eql([0]);
104 | consumer1.topicPartitions['RebalanceTopic'].should.eql([0]);
105 |
106 | const consumer2 = result[1];
107 | consumer2.memberId.should.eql('consumer2');
108 | Object.keys(consumer2.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
109 | consumer2.topicPartitions['RebalanceTest'].should.eql([1]);
110 | consumer2.topicPartitions['RebalanceTopic'].should.eql([1]);
111 |
112 | const consumer4 = _.last(result);
113 | consumer4.memberId.should.eql('consumer4');
114 | Object.keys(consumer4.topicPartitions).should.eql([]);
115 | done();
116 | });
117 | });
118 | });
119 |
--------------------------------------------------------------------------------
/test/assignment/test.roundrobin.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const roundRobin = require('../../lib/assignment/roundrobin');
4 | const _ = require('lodash');
5 | const should = require('should');
6 |
7 | describe('Round Robin Assignment', function () {
8 | const topicPartition = {
9 | 'RebalanceTopic': [
10 | '0',
11 | '1',
12 | '2'
13 | ],
14 | 'RebalanceTest': [
15 | '0',
16 | '1',
17 | '2'
18 | ]
19 | };
20 |
21 | it('should have required fields', function () {
22 | roundRobin.should.have.property('assign').which.is.a.Function;
23 | roundRobin.name.should.be.eql('roundrobin');
24 | roundRobin.version.should.be.eql(0);
25 | });
26 |
27 | it('should distribute two topics three partitions to two consumers ', function (done) {
28 | const groupMembers = [
29 | {
30 | 'subscription': [
31 | 'RebalanceTopic',
32 | 'RebalanceTest'
33 | ],
34 | 'version': 0,
35 | 'id': 'consumer1'
36 | },
37 | {
38 | 'subscription': [
39 | 'RebalanceTopic',
40 | 'RebalanceTest'
41 | ],
42 | 'version': 0,
43 | 'id': 'consumer2'
44 | }
45 | ];
46 |
47 | roundRobin.assign(topicPartition, groupMembers, function (error, assignment) {
48 | should(error).be.empty;
49 | const consumer1 = _.head(assignment);
50 | consumer1.memberId.should.eql('consumer1');
51 | Object.keys(consumer1.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
52 | consumer1.topicPartitions['RebalanceTest'].should.eql(['1']);
53 | consumer1.topicPartitions['RebalanceTopic'].should.eql(['0', '2']);
54 |
55 | const consumer2 = _.last(assignment);
56 | consumer2.memberId.should.eql('consumer2');
57 | Object.keys(consumer2.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
58 | consumer2.topicPartitions['RebalanceTest'].should.eql(['0', '2']);
59 | consumer2.topicPartitions['RebalanceTopic'].should.eql(['1']);
60 | done();
61 | });
62 | });
63 |
64 | it('should distribute two topics three partitions to three consumers', function (done) {
65 | const groupMembers = [
66 | {
67 | 'subscription': [
68 | 'RebalanceTopic',
69 | 'RebalanceTest'
70 | ],
71 | 'version': 0,
72 | 'id': 'consumer1'
73 | },
74 | {
75 | 'subscription': [
76 | 'RebalanceTopic',
77 | 'RebalanceTest'
78 | ],
79 | 'version': 0,
80 | 'id': 'consumer3'
81 | },
82 | {
83 | 'subscription': [
84 | 'RebalanceTopic',
85 | 'RebalanceTest'
86 | ],
87 | 'version': 0,
88 | 'id': 'consumer2'
89 | }
90 | ];
91 |
92 | roundRobin.assign(topicPartition, groupMembers, function (error, assignment) {
93 | should(error).be.empty;
94 | assignment = _.sortBy(assignment, 'memberId');
95 |
96 | const consumer1 = _.head(assignment);
97 | consumer1.memberId.should.eql('consumer1');
98 | Object.keys(consumer1.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
99 | consumer1.topicPartitions['RebalanceTest'].should.eql(['0']);
100 | consumer1.topicPartitions['RebalanceTopic'].should.eql(['0']);
101 |
102 | const consumer2 = assignment[1];
103 | consumer2.memberId.should.eql('consumer2');
104 | Object.keys(consumer2.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
105 | consumer2.topicPartitions['RebalanceTest'].should.eql(['1']);
106 | consumer2.topicPartitions['RebalanceTopic'].should.eql(['1']);
107 |
108 | const consumer3 = _.last(assignment);
109 | consumer3.memberId.should.eql('consumer3');
110 | Object.keys(consumer3.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
111 | consumer3.topicPartitions['RebalanceTest'].should.eql(['2']);
112 | consumer3.topicPartitions['RebalanceTopic'].should.eql(['2']);
113 | done();
114 | });
115 | });
116 |
117 | it('should distribute two topics three partitions to four consumers', function (done) {
118 | const groupMembers = [
119 | {
120 | 'subscription': [
121 | 'RebalanceTopic',
122 | 'RebalanceTest'
123 | ],
124 | 'version': 0,
125 | 'id': 'consumer1'
126 | },
127 | {
128 | 'subscription': [
129 | 'RebalanceTopic',
130 | 'RebalanceTest'
131 | ],
132 | 'version': 0,
133 | 'id': 'consumer3'
134 | },
135 | {
136 | 'subscription': [
137 | 'RebalanceTopic',
138 | 'RebalanceTest'
139 | ],
140 | 'version': 0,
141 | 'id': 'consumer2'
142 | },
143 | {
144 | 'subscription': [
145 | 'RebalanceTopic',
146 | 'RebalanceTest'
147 | ],
148 | 'version': 0,
149 | 'id': 'consumer4'
150 | }
151 | ];
152 |
153 | roundRobin.assign(topicPartition, groupMembers, function (error, assignment) {
154 | should(error).be.empty;
155 | assignment = _.sortBy(assignment, 'memberId');
156 |
157 | const consumer1 = _.head(assignment);
158 | consumer1.memberId.should.eql('consumer1');
159 | Object.keys(consumer1.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
160 | consumer1.topicPartitions['RebalanceTest'].should.eql(['1']);
161 | consumer1.topicPartitions['RebalanceTopic'].should.eql(['0']);
162 |
163 | const consumer2 = assignment[1];
164 | consumer2.memberId.should.eql('consumer2');
165 | Object.keys(consumer2.topicPartitions).should.eql(['RebalanceTopic', 'RebalanceTest']);
166 | consumer2.topicPartitions['RebalanceTest'].should.eql(['2']);
167 | consumer2.topicPartitions['RebalanceTopic'].should.eql(['1']);
168 |
169 | const consumer3 = assignment[2];
170 | consumer3.memberId.should.eql('consumer3');
171 | Object.keys(consumer3.topicPartitions).should.eql(['RebalanceTopic']);
172 | consumer3.topicPartitions['RebalanceTopic'].should.eql(['2']);
173 |
174 | const consumer4 = _.last(assignment);
175 | consumer4.memberId.should.eql('consumer4');
176 | Object.keys(consumer4.topicPartitions).should.eql(['RebalanceTest']);
177 | done();
178 | });
179 | });
180 | });
181 |
--------------------------------------------------------------------------------
/test/helpers/Childrearer.js:
--------------------------------------------------------------------------------
1 | var EventEmitter = require('events').EventEmitter;
2 | var util = require('util');
3 | var debug = require('debug')('kafka-node:Test-Childrearer');
4 | var fork = require('child_process').fork;
5 | var async = require('async');
6 | var _ = require('lodash');
7 |
8 | function Childrearer (forkPath) {
9 | EventEmitter.call(this);
10 | this.children = [];
11 | this.id = 0;
12 | this.forkPath = forkPath || 'test/helpers/child-hlc';
13 | }
14 |
15 | util.inherits(Childrearer, EventEmitter);
16 |
17 | Childrearer.prototype.setVerifier = function (topic, groupId, verify) {
18 | this.topic = topic;
19 | this.groupId = groupId;
20 | this.verify = verify;
21 | };
22 |
23 | Childrearer.prototype.nextId = function () {
24 | return ++this.id;
25 | };
26 |
27 | Childrearer.prototype.closeAll = function (callback) {
28 | async.each(this.children, function (child, callback) {
29 | child.once('exit', function () {
30 | callback(null);
31 | });
32 | child.kill();
33 | }, callback);
34 | };
35 |
36 | Childrearer.prototype.kill = function (numberOfChildren, callback) {
37 | var children = _.sampleSize(this.children, numberOfChildren);
38 | this._killEachChild(children, callback);
39 | };
40 |
41 | Childrearer.prototype.killLast = function (callback) {
42 | var child = _.last(this.children);
43 | this._killEachChild([child], callback);
44 | };
45 |
46 | Childrearer.prototype.killFirst = function (callback) {
47 | var child = _.head(this.children);
48 | this._killEachChild([child], callback);
49 | };
50 |
51 | Childrearer.prototype._killEachChild = function (children, callback) {
52 | var self = this;
53 | async.each(children, function (child, callback) {
54 | child.once('exit', function (code, signal) {
55 | debug('child %s killed %d %s', this._childNum, code, signal);
56 | _.pull(self.children, this);
57 | callback();
58 | });
59 | child.kill();
60 | }, callback);
61 | };
62 |
63 | Childrearer.prototype.raise = function (children, callback, waitTime) {
64 | var newChildren = _.times(children, _.bind(this._raiseChild, this));
65 |
66 | this.children = this.children.concat(newChildren);
67 |
68 | if (callback) {
69 | async.series([
70 | function (callback) {
71 | async.each(newChildren, function (child, callback) {
72 | child.once('message', function (data) {
73 | if (data.event === 'registered') {
74 | callback(null);
75 | } else {
76 | callback(new Error('unregistered event: ' + data.event));
77 | }
78 | });
79 | }, callback);
80 | },
81 |
82 | function (callback) {
83 | if (waitTime) {
84 | setTimeout(callback, waitTime);
85 | } else {
86 | callback();
87 | }
88 | }], callback
89 | );
90 | }
91 | };
92 |
93 | Childrearer.prototype._raiseChild = function () {
94 | var self = this;
95 | var childNumber = this.nextId();
96 | debug('forking child %d', childNumber);
97 | var child = fork(this.forkPath, ['--groupId=' + this.groupId, '--topic=' + this.topic, '--consumerId=' + 'child_' + childNumber]);
98 | child._childNum = childNumber;
99 | child.on('message', function (data) {
100 | if (data.message) {
101 | self.verify.call(this, data);
102 | }
103 | });
104 | return child;
105 | };
106 |
107 | module.exports = Childrearer;
108 |
--------------------------------------------------------------------------------
/test/helpers/EventCounter.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | class EventCounter {
4 | constructor () {
5 | this.events = {};
6 | }
7 |
8 | /**
9 | * Creates an event counter with optional expected number and callback.
10 | *
11 | * @param {String} eventName - The name of the event counter.
12 | * @param {Number} limit - The number of events after which the callback
13 | * should be called.
14 | * @param {Function} callback - The callback to envoke when the expected
15 | * number is reached.
16 | * @returns {Function} - A function that can be called to increment the
17 | * coutner and collect the result.
18 | */
19 | createEventCounter (eventName, limit, callback) {
20 | if (!limit) {
21 | limit = Number.POSITIVE_INFINITY;
22 | }
23 | this.events[eventName] = {
24 | count: 0,
25 | events: []
26 | };
27 | return function () {
28 | this.events[eventName].count++;
29 | this.events[eventName].events.push(arguments);
30 | if (this.events[eventName].count === limit) {
31 | if (callback) {
32 | callback(null, this.events[eventName]);
33 | }
34 | }
35 | }.bind(this);
36 | }
37 | }
38 |
39 | module.exports = EventCounter;
40 |
--------------------------------------------------------------------------------
/test/helpers/child-cg-kafka-client.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var kafka = require('../../');
4 | var ConsumerGroup = kafka.ConsumerGroup;
5 | var argv = require('optimist').argv;
6 | var topic = argv.topic || 'topic1';
7 | var options = {
8 | kafkaHost: '127.0.0.1:9092',
9 | autoCommit: true,
10 | fetchMaxWaitMs: 1000,
11 | fetchMaxBytes: 1024 * 1024,
12 | sessionTimeout: 8000,
13 | heartbeatInterval: 250,
14 | retryMinTimeout: 250,
15 | versions: {
16 | requestTimeout: 150
17 | }
18 | };
19 | var debug = require('debug')('kafka-node:Child-ConsumerGroup');
20 |
21 | if (argv.groupId) {
22 | options.groupId = argv.groupId;
23 | }
24 |
25 | if (argv.consumerId) {
26 | options.id = argv.consumerId;
27 | }
28 |
29 | var consumer = new ConsumerGroup(options, [topic]);
30 |
31 | consumer.on('message', function (message) {
32 | var out = {
33 | id: consumer.client.clientId,
34 | message: message
35 | };
36 | process.send(out);
37 | });
38 |
39 | consumer.on('error', function (err) {
40 | debug('error', err);
41 | });
42 |
43 | consumer.on('rebalanced', function () {
44 | debug('%s rebalanced!', consumer.client.clientId);
45 | sendEvent('rebalanced');
46 | });
47 |
48 | consumer.on('rebalancing', function () {
49 | debug('%s is rebalancing', consumer.client.clientId);
50 | });
51 |
52 | function sendEvent (event) {
53 | process.send({
54 | id: consumer.client.clientId,
55 | event: event
56 | });
57 | }
58 |
59 | function close (signal) {
60 | return function () {
61 | debug('closing the consumer (%s) [%s].', signal, consumer.client.clientId);
62 | consumer.close(true, function () {
63 | process.exit();
64 | });
65 | };
66 | }
67 |
68 | process.once('SIGINT', close('SIGINT'));
69 | process.once('SIGTERM', close('SIGTERM'));
70 | process.once('SIGABRT', close('SIGABRT'));
71 | process.once('disconnect', close('disconnect'));
72 |
--------------------------------------------------------------------------------
/test/helpers/sendMessage.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const KafkaClient = require('../../lib/kafkaClient');
4 | const HighLevelProducer = require('../../lib/highLevelProducer');
5 |
6 | function sendMessage (message, topic, done) {
7 | var client = new KafkaClient({ kafkaHost: '127.0.0.1:9092' });
8 | var producer = new HighLevelProducer(client, { requireAcks: 1 });
9 |
10 | client.on('connect', function () {
11 | producer.send([{ topic: topic, messages: message, attributes: 0 }], function (error) {
12 | if (error) {
13 | done(error);
14 | } else {
15 | done(null);
16 | }
17 | producer.close(function () {});
18 | });
19 | });
20 | }
21 |
22 | module.exports = sendMessage;
23 |
--------------------------------------------------------------------------------
/test/helpers/sendMessageEach.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const KafkaClient = require('../../lib/kafkaClient');
4 | const HighLevelProducer = require('../../lib/highLevelProducer');
5 | const async = require('async');
6 | const uuid = require('uuid');
7 |
8 | function sendMessage (message, topic, done, attributes = 0) {
9 | var client = new KafkaClient({ kafkaHost: '127.0.0.1:9092' });
10 | var producer = new HighLevelProducer(client, { requireAcks: 1 });
11 |
12 | client.on('connect', function () {
13 | async.each(
14 | message,
15 | function (message, callback) {
16 | producer.send([{ topic: topic, messages: message, key: uuid.v4(), attributes, timestamp: Date.now() }], callback);
17 | },
18 | function (error) {
19 | if (error) {
20 | done(error);
21 | } else {
22 | producer.close(function () {
23 | done(null);
24 | });
25 | }
26 | }
27 | );
28 | });
29 | }
30 |
31 | module.exports = sendMessage;
32 |
--------------------------------------------------------------------------------
/test/manual.gracefulexit.js:
--------------------------------------------------------------------------------
1 | // Run this test with:
2 | // mocha test/manual.gracefulexit.js --no-exit
3 |
4 | var Client = require('../lib/client');
5 |
6 | describe('Client', function () {
7 | describe('#close', function () {
8 | it('should close gracefully', function (done) {
9 | var client = new Client();
10 | client.on('ready', function () {
11 | client.close(done);
12 | });
13 | });
14 | });
15 | });
16 |
--------------------------------------------------------------------------------
/test/mocha.opts:
--------------------------------------------------------------------------------
1 | --require should
2 | --reporter spec
3 | --ui bdd
4 | --slow 2s
5 | --timeout 4s
6 | --recursive
7 |
--------------------------------------------------------------------------------
/test/mocks/mockClient.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var util = require('util');
4 | var EventEmitter = require('events').EventEmitter;
5 |
6 | function FakeClient () {
7 | EventEmitter.call(this);
8 |
9 | this.topicExists = function (topics, cb) {
10 | setImmediate(cb);
11 | };
12 | this.refreshMetadata = function (topicNames, cb) {
13 | setImmediate(cb);
14 | };
15 | this.sendOffsetCommitRequest = function (groupId, commits, cb) {
16 | setImmediate(cb);
17 | };
18 | this.sendFetchRequest = function (consumer, payloads, fetchMaxWaitMs, fetchMinBytes, maxTickMessages) {};
19 | this.sendOffsetFetchRequest = function (groupId, payloads, cb) {
20 | setImmediate(cb);
21 | };
22 | this.sendOffsetRequest = function (payloads, cb) {
23 | setImmediate(cb);
24 | };
25 | this.addTopics = function (topics, cb) {
26 | setImmediate(cb);
27 | };
28 | this.removeTopicMetadata = function (topics, cb) {
29 | setImmediate(cb);
30 | };
31 | this.close = function (cb) {
32 | setImmediate(cb);
33 | };
34 | }
35 | util.inherits(FakeClient, EventEmitter);
36 |
37 | module.exports = FakeClient;
38 |
--------------------------------------------------------------------------------
/test/mocks/mockSocket.js:
--------------------------------------------------------------------------------
1 | var util = require('util');
2 | var EventEmitter = require('events').EventEmitter;
3 |
4 | function FakeSocket () {
5 | EventEmitter.call(this);
6 |
7 | this.unref = function () {};
8 |
9 | this.end = function () {
10 | var self = this;
11 | setImmediate(function () {
12 | self.emit('end');
13 | });
14 | };
15 | this.close = function () {};
16 | this.setKeepAlive = function () {};
17 | this.destroy = function () {};
18 | this.write = function () {};
19 | }
20 |
21 | util.inherits(FakeSocket, EventEmitter);
22 |
23 | module.exports = FakeSocket;
24 |
--------------------------------------------------------------------------------
/test/test.admin.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const Admin = require('../lib/admin');
4 | const ConsumerGroup = require('../lib/consumerGroup');
5 | const KafkaClient = require('../lib/kafkaClient');
6 | const uuid = require('uuid');
7 | const should = require('should');
8 |
9 | describe('Admin', function () {
10 | describe('#listGroups', function () {
11 | const createTopic = require('../docker/createTopic');
12 | let admin, consumer;
13 | const topic = uuid.v4();
14 | const groupId = 'test-group-id';
15 |
16 | before(function (done) {
17 | createTopic(topic, 1, 1).then(function () {
18 | consumer = new ConsumerGroup(
19 | {
20 | kafkaHost: 'localhost:9092',
21 | groupId: groupId
22 | },
23 | topic
24 | );
25 | consumer.once('connect', function () {
26 | done();
27 | });
28 | });
29 | });
30 |
31 | after(function (done) {
32 | consumer.close(done);
33 | });
34 |
35 | it('should return a list of consumer groups', function (done) {
36 | admin = new Admin(consumer.client);
37 | admin.listGroups(function (error, res) {
38 | res.should.have.keys(groupId);
39 | res[groupId].should.eql('consumer');
40 | done(error);
41 | });
42 | });
43 | });
44 |
45 | describe('#describeGroups', function () {
46 | const createTopic = require('../docker/createTopic');
47 | let admin, consumer;
48 | const topic = uuid.v4();
49 | const groupId = 'test-group-id';
50 |
51 | before(function (done) {
52 | createTopic(topic, 1, 1).then(function () {
53 | consumer = new ConsumerGroup(
54 | {
55 | kafkaHost: 'localhost:9092',
56 | groupId: groupId
57 | },
58 | topic
59 | );
60 | consumer.once('connect', function () {
61 | done();
62 | });
63 | });
64 | });
65 |
66 | after(function (done) {
67 | consumer.close(done);
68 | });
69 |
70 | it('should describe a list of consumer groups', function (done) {
71 | admin = new Admin(consumer.client);
72 | admin.describeGroups([groupId], function (error, res) {
73 | res.should.have.keys(groupId);
74 | res[groupId].should.have.property('members').with.lengthOf(1);
75 | res[groupId].should.have.property('state', 'Stable');
76 | done(error);
77 | });
78 | });
79 |
80 | it('should return empty members if consumer group doesnt exist', function (done) {
81 | admin = new Admin(consumer.client);
82 | const nonExistentGroup = 'non-existent-group';
83 | admin.describeGroups([nonExistentGroup], function (error, res) {
84 | res.should.have.keys(nonExistentGroup);
85 | res[nonExistentGroup].should.have.property('members').with.lengthOf(0);
86 | res[nonExistentGroup].should.have.property('state', 'Dead');
87 | done(error);
88 | });
89 | });
90 | });
91 |
92 | describe('#describeConfigs', function () {
93 | const createTopic = require('../docker/createTopic');
94 | let admin, client;
95 | const topicName = uuid.v4();
96 |
97 | before(function (done) {
98 | if (['0.9', '0.10'].includes(process.env.KAFKA_VERSION)) {
99 | this.skip();
100 | }
101 |
102 | createTopic(topicName, 1, 1).then(function () {
103 | client = new KafkaClient({ kafkaHost: 'localhost:9092' });
104 | admin = new Admin(client);
105 | admin.once('ready', done);
106 | });
107 | });
108 |
109 | after(function (done) {
110 | if (client) {
111 | client.close(done);
112 | } else {
113 | done();
114 | }
115 | });
116 |
117 | it('should describe a list of topic configs', function (done) {
118 | const request = {
119 | resourceType: admin.RESOURCE_TYPES.topic,
120 | resourceName: topicName,
121 | configNames: []
122 | };
123 | const payload = {
124 | includeSynonyms: false,
125 | resources: [request]
126 | };
127 | admin.describeConfigs(payload, function (error, res) {
128 | res.should.be.instanceof(Array);
129 | res.length.should.be.exactly(1);
130 | const entries = res[0];
131 | entries.should.have.property('resourceType').and.exactly('2');
132 | entries.should.have.property('resourceName').and.exactly(topicName);
133 | entries.should.have.property('configEntries');
134 | entries.configEntries.length.should.be.greaterThan(1);
135 | done(error);
136 | });
137 | });
138 |
139 | it('should describe a list of broker configs for a specific broker id', function (done) {
140 | const brokerName = '1001';
141 | const request = {
142 | resourceType: admin.RESOURCE_TYPES.broker,
143 | resourceName: brokerName,
144 | configNames: []
145 | };
146 | const payload = {
147 | includeSynonyms: false,
148 | resources: [request]
149 | };
150 | admin.describeConfigs(payload, function (error, res) {
151 | res.should.be.instanceof(Array);
152 | res.length.should.be.exactly(1);
153 | const entries = res[0];
154 | entries.should.have.property('resourceType').and.exactly('4');
155 | entries.should.have.property('resourceName').and.exactly(brokerName);
156 | entries.should.have.property('configEntries');
157 | entries.configEntries.length.should.be.greaterThan(1);
158 | done(error);
159 | });
160 | });
161 |
162 | it('should return an error if the resource (topic) doesnt exist', function (done) {
163 | const request = {
164 | resourceType: admin.RESOURCE_TYPES.topic,
165 | resourceName: '',
166 | configNames: []
167 | };
168 | const payload = {
169 | includeSynonyms: false,
170 | resources: [request]
171 | };
172 | admin.describeConfigs(payload, function (error, res) {
173 | error.should.have.property('message').and.containEql('InvalidTopic');
174 | done();
175 | });
176 | });
177 |
178 | it('should return an error if the resource (broker) doesnt exist', function (done) {
179 | const brokerId = '9999';
180 | const request = {
181 | resourceType: admin.RESOURCE_TYPES.broker,
182 | resourceName: brokerId,
183 | configNames: []
184 | };
185 | const payload = {
186 | includeSynonyms: false,
187 | resources: [request]
188 | };
189 | admin.describeConfigs(payload, function (error, res) {
190 | should.not.exist(res);
191 | error.should.have.property('message').and.containEql('No broker with id ' + brokerId);
192 | done();
193 | });
194 | });
195 |
196 | it('should return error for invalid resource type', function (done) {
197 | const request = {
198 | resourceType: 25,
199 | resourceName: topicName,
200 | configNames: []
201 | };
202 | const payload = {
203 | includeSynonyms: false,
204 | resources: [request]
205 | };
206 | admin.describeConfigs(payload, function (error, res) {
207 | should.not.exist(res);
208 | error.should.have.property('message').and.equal(`Unexpected resource type 25 for resource ${topicName}`);
209 | done();
210 | });
211 | });
212 | });
213 | });
214 |
--------------------------------------------------------------------------------
/test/test.baseProducer.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const BaseProducer = require('../lib/baseProducer');
4 | const ConsumerGroup = require('../lib/consumerGroup');
5 | const KafkaClient = require('../lib/kafkaClient');
6 | const Client = require('./mocks/mockClient');
7 | const uuid = require('uuid');
8 | const sinon = require('sinon');
9 | const async = require('async');
10 | const should = require('should');
11 |
12 | describe('BaseProducer', function () {
13 | describe('ready event', function () {
14 | const KAFKA_HOST = 'localhost:9092';
15 | let client;
16 | before(function () {
17 | client = new KafkaClient({
18 | kafkaHost: KAFKA_HOST
19 | });
20 | });
21 |
22 | it('can listen on the ready event before the client is connected', function (done) {
23 | const producer = new BaseProducer(client, {}, BaseProducer.PARTITIONER_TYPES.default);
24 | producer.once('ready', function () {
25 | should(producer.ready).be.true;
26 | done();
27 | });
28 | });
29 |
30 | it('can listen on the ready event after the client is connected', function (done) {
31 | should(client.ready).be.true;
32 | const producer = new BaseProducer(client, {}, BaseProducer.PARTITIONER_TYPES.default);
33 | producer.once('ready', function () {
34 | should(producer.ready).be.true;
35 | done();
36 | });
37 | });
38 | });
39 |
40 | describe('encoding and decoding key attribute', function () {
41 | const KAFKA_HOST = 'localhost:9092';
42 | let consumerGroup, topic, producer;
43 | beforeEach(function (done) {
44 | topic = uuid.v4();
45 |
46 | const createTopic = require('../docker/createTopic');
47 |
48 | async.series(
49 | [
50 | function (callback) {
51 | createTopic(topic, 1, 1)
52 | .then(function () {
53 | callback(null);
54 | })
55 | .catch(error => callback(error));
56 | },
57 | function (callback) {
58 | const client = new KafkaClient({
59 | kafkaHost: KAFKA_HOST
60 | });
61 |
62 | producer = new BaseProducer(client, {}, BaseProducer.PARTITIONER_TYPES.default);
63 | producer.once('ready', function () {
64 | callback(null);
65 | });
66 | },
67 | function (callback) {
68 | consumerGroup = new ConsumerGroup(
69 | {
70 | kafkaHost: KAFKA_HOST,
71 | groupId: uuid.v4()
72 | },
73 | topic
74 | );
75 | consumerGroup.once('connect', function () {
76 | callback(null);
77 | });
78 | }
79 | ],
80 | done
81 | );
82 | });
83 |
84 | describe('gzip compression', function () {
85 | afterEach(function () {
86 | consumerGroup.on('error', function () {});
87 | });
88 |
89 | it('kafkaClient', function (done) {
90 | const messageValue = uuid.v4();
91 | const time = Date.now();
92 | producer.send(
93 | [
94 | {
95 | topic: topic,
96 | messages: messageValue,
97 | key: 'myKeyIsHere',
98 | attributes: 1,
99 | timestamp: time
100 | }
101 | ],
102 | function (error) {
103 | if (error) {
104 | done(error);
105 | }
106 | }
107 | );
108 |
109 | consumerGroup.on('message', function (message) {
110 | console.log(message);
111 | message.key.should.be.exactly('myKeyIsHere');
112 | message.value.should.be.exactly(messageValue);
113 | should(message.timestamp.getTime()).be.exactly(time);
114 | done();
115 | });
116 | });
117 | });
118 |
119 | afterEach(function (done) {
120 | producer.close();
121 | consumerGroup.close(done);
122 | });
123 |
124 | it('verify key string value makes it into the message', function (done) {
125 | producer.send(
126 | [
127 | {
128 | topic: topic,
129 | messages: 'this is my message',
130 | key: 'myKeyIsHere'
131 | }
132 | ],
133 | function (error) {
134 | if (error) {
135 | done(error);
136 | }
137 | }
138 | );
139 |
140 | consumerGroup.on('message', function (message) {
141 | message.key.should.be.exactly('myKeyIsHere');
142 | done();
143 | });
144 | });
145 |
146 | it('verify empty key string value makes it into the message', function (done) {
147 | producer.send(
148 | [
149 | {
150 | topic: topic,
151 | messages: 'this is my message',
152 | key: ''
153 | }
154 | ],
155 | function (error) {
156 | if (error) {
157 | done(error);
158 | }
159 | }
160 | );
161 |
162 | consumerGroup.on('message', function (message) {
163 | message.key.should.be.exactly('');
164 | done();
165 | });
166 | });
167 |
168 | it('verify key value of 0 makes it into the message', function (done) {
169 | producer.send(
170 | [
171 | {
172 | topic: topic,
173 | messages: 'this is my message',
174 | key: 0
175 | }
176 | ],
177 | function (error) {
178 | if (error) {
179 | done(error);
180 | }
181 | }
182 | );
183 |
184 | consumerGroup.on('message', function (message) {
185 | message.key.should.be.exactly('0');
186 | done();
187 | });
188 | });
189 |
190 | it('verify key value of null makes it into the message as null', function (done) {
191 | producer.send(
192 | [
193 | {
194 | topic: topic,
195 | messages: 'this is my message',
196 | key: null
197 | }
198 | ],
199 | function (error) {
200 | if (error) {
201 | done(error);
202 | }
203 | }
204 | );
205 |
206 | consumerGroup.on('message', function (message) {
207 | should(message.key).be.null;
208 | done();
209 | });
210 | });
211 |
212 | it('verify key value of undefined makes it into the message as null', function (done) {
213 | producer.send(
214 | [
215 | {
216 | topic: topic,
217 | messages: 'this is my message',
218 | key: undefined
219 | }
220 | ],
221 | function (error) {
222 | if (error) {
223 | done(error);
224 | }
225 | }
226 | );
227 |
228 | consumerGroup.on('message', function (message) {
229 | should(message.key).be.null;
230 | done();
231 | });
232 | });
233 |
234 | it('verify key value of buffer makes it into the message as untouched buffer', function (done) {
235 | const keyBuffer = Buffer.from('testing123');
236 | producer.send(
237 | [
238 | {
239 | topic: topic,
240 | messages: 'this is my message',
241 | key: keyBuffer
242 | }
243 | ],
244 | function (error) {
245 | if (error) {
246 | done(error);
247 | }
248 | }
249 | );
250 |
251 | consumerGroup.options.keyEncoding = 'buffer';
252 |
253 | consumerGroup.on('message', function (message) {
254 | should(message.key).not.be.empty;
255 | keyBuffer.equals(message.key).should.be.true;
256 | done();
257 | });
258 | });
259 | });
260 |
261 | describe('On Brokers Changed', function () {
262 | it('should emit error when refreshMetadata fails', function (done) {
263 | const fakeClient = new Client();
264 | fakeClient.topicMetadata = {};
265 |
266 | const producer = new BaseProducer(fakeClient, {}, BaseProducer.PARTITIONER_TYPES.default);
267 |
268 | producer.once('error', function (error) {
269 | error.should.be.an.instanceOf(Error);
270 | error.message.should.be.exactly('boo');
271 | done();
272 | });
273 |
274 | const myError = new Error('boo');
275 | const refreshMetadataStub = sinon.stub(fakeClient, 'refreshMetadata').yields(myError);
276 |
277 | fakeClient.emit('brokersChanged');
278 |
279 | sinon.assert.calledWith(refreshMetadataStub, []);
280 | });
281 |
282 | it('should call client.refreshMetadata when brokerChanges', function (done) {
283 | const fakeClient = new Client();
284 |
285 | fakeClient.topicMetadata = {
286 | MyTopic: [0],
287 | YourTopic: [0, 1, 2]
288 | };
289 |
290 | const producer = new BaseProducer(fakeClient, {}, BaseProducer.PARTITIONER_TYPES.default);
291 |
292 | producer.once('error', done);
293 |
294 | const refreshMetadataStub = sinon.stub(fakeClient, 'refreshMetadata').yields(null);
295 |
296 | fakeClient.emit('brokersChanged');
297 |
298 | fakeClient.topicMetadata.should.have.property('MyTopic');
299 | fakeClient.topicMetadata.should.have.property('YourTopic');
300 | sinon.assert.calledWith(refreshMetadataStub, ['MyTopic', 'YourTopic']);
301 | done();
302 | });
303 | });
304 | });
305 |
--------------------------------------------------------------------------------
/test/test.consumerGroupHeartbeat.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const sinon = require('sinon');
4 | const should = require('should');
5 | const Heartbeat = require('../lib/consumerGroupHeartbeat');
6 | const HeartbeatTimeout = require('../lib/errors/HeartbeatTimeoutError');
7 |
8 | describe('Consumer Group Heartbeat', function () {
9 | let sandbox;
10 |
11 | beforeEach(function () {
12 | sandbox = sinon.sandbox.create();
13 | });
14 |
15 | afterEach(function () {
16 | sandbox.restore();
17 | });
18 |
19 | it('should call heartbeat handler if heartbeat yields error', function (done) {
20 | const mockClient = {
21 | sendHeartbeatRequest: sandbox.stub().yieldsAsync(new Error('busted'))
22 | };
23 |
24 | const heartbeat = new Heartbeat(mockClient, function (error) {
25 | error.message.should.eql('busted');
26 | sinon.assert.calledWithExactly(mockClient.sendHeartbeatRequest, 'groupId', 1, 'fake-member-id', sinon.match.func);
27 | heartbeat.pending.should.be.false;
28 | setImmediate(done);
29 | });
30 |
31 | heartbeat.pending.should.be.true;
32 | heartbeat.send('groupId', 1, 'fake-member-id');
33 | setImmediate(() => heartbeat.verifyResolved().should.be.true);
34 | });
35 |
36 | it('should call heartbeat handler if heartbeat yields null', function (done) {
37 | const mockClient = {
38 | sendHeartbeatRequest: sandbox.stub().yieldsAsync(null)
39 | };
40 |
41 | const heartbeat = new Heartbeat(mockClient, function (error) {
42 | should(error).be.null;
43 | sinon.assert.calledWithExactly(mockClient.sendHeartbeatRequest, 'groupId', 1, 'fake-member-id', sinon.match.func);
44 | heartbeat.pending.should.be.false;
45 | setImmediate(done);
46 | });
47 |
48 | heartbeat.pending.should.be.true;
49 | heartbeat.send('groupId', 1, 'fake-member-id');
50 | setImmediate(() => heartbeat.verifyResolved().should.be.true);
51 | });
52 |
53 | it('should call heartbeat handler with instance of TimeoutError if heartbeat timed out', function (done) {
54 | const mockClient = {
55 | sendHeartbeatRequest: sandbox.stub()
56 | };
57 |
58 | const heartbeat = new Heartbeat(mockClient, function (error) {
59 | error.should.be.an.instanceOf(HeartbeatTimeout);
60 | sinon.assert.calledWithExactly(mockClient.sendHeartbeatRequest, 'groupId', 1, 'fake-member-id', sinon.match.func);
61 | heartbeat.pending.should.be.false;
62 | heartbeat.canceled.should.be.true;
63 | setImmediate(done);
64 | });
65 |
66 | heartbeat.pending.should.be.true;
67 | heartbeat.send('groupId', 1, 'fake-member-id');
68 | setImmediate(function () {
69 | heartbeat.verifyResolved().should.be.false;
70 | });
71 | });
72 | });
73 |
--------------------------------------------------------------------------------
/test/test.consumerGroupRecovery.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const should = require('should');
4 | const _ = require('lodash');
5 | const sinon = require('sinon');
6 | const ConsumerGroupRecovery = require('../lib/consumerGroupRecovery');
7 | const GroupCoordinatorNotAvailable = require('../lib/errors/GroupCoordinatorNotAvailableError');
8 | const GroupLoadInProgress = require('../lib/errors/GroupLoadInProgressError');
9 | const HeartbeatTimeout = require('../lib/errors/HeartbeatTimeoutError');
10 | const TimeoutError = require('../lib/errors/TimeoutError');
11 | const BrokerNotAvailableError = require('../lib/errors').BrokerNotAvailableError;
12 | const EventEmitter = require('events');
13 |
14 | describe('ConsumerGroupRecovery', function () {
15 | var consumerGroupRecovery, fakeConsumerGroup;
16 |
17 | beforeEach(function () {
18 | fakeConsumerGroup = new EventEmitter();
19 | fakeConsumerGroup.client = new EventEmitter();
20 | fakeConsumerGroup.scheduleReconnect = () => {
21 | throw new Error('should be stubbed!');
22 | };
23 | Object.assign(fakeConsumerGroup, {
24 | stopHeartbeats: sinon.stub(),
25 | options: {
26 | retries: 10,
27 | retryFactor: 1.8,
28 | retryMinTimeout: 1000
29 | }
30 | });
31 | consumerGroupRecovery = new ConsumerGroupRecovery(fakeConsumerGroup);
32 | });
33 |
34 | describe('#tryToRecoverFrom', function () {
35 | it('should emit error on the client when calling trying to recover from a unknown error', function (done) {
36 | var testError = new Error('My test error');
37 |
38 | fakeConsumerGroup.once('error', function (error) {
39 | error.should.be.eql(testError);
40 | done();
41 | });
42 |
43 | consumerGroupRecovery.tryToRecoverFrom(testError, 'test');
44 |
45 | sinon.assert.calledOnce(fakeConsumerGroup.stopHeartbeats);
46 | fakeConsumerGroup.ready.should.be.false;
47 | consumerGroupRecovery.lastError.should.be.eql(testError);
48 | });
49 |
50 | it('should try to recover from a BrokerNotAvailableError', function () {
51 | const brokerNotAvailableError = new BrokerNotAvailableError('test error');
52 |
53 | fakeConsumerGroup.client.coordinatorId = 1234;
54 |
55 | fakeConsumerGroup.once('error', function (error) {
56 | error.should.not.be.eql(brokerNotAvailableError);
57 | });
58 |
59 | sinon.stub(fakeConsumerGroup, 'scheduleReconnect');
60 |
61 | consumerGroupRecovery.tryToRecoverFrom(brokerNotAvailableError, 'test');
62 |
63 | sinon.assert.calledOnce(fakeConsumerGroup.stopHeartbeats);
64 | fakeConsumerGroup.ready.should.be.false;
65 | consumerGroupRecovery.lastError.should.be.eql(brokerNotAvailableError);
66 |
67 | sinon.assert.calledOnce(fakeConsumerGroup.scheduleReconnect);
68 | should(fakeConsumerGroup.client.coordinatorId).be.undefined;
69 | });
70 |
71 | it('should try to recover from a temporary network error', function () {
72 | const fakeNetworkError = new Error('read ETIMEDOUT');
73 | fakeNetworkError.code = fakeNetworkError.errno = 'ETIMEDOUT';
74 |
75 | fakeConsumerGroup.once('error', function (error) {
76 | error.should.not.be.eql(fakeNetworkError);
77 | });
78 |
79 | sinon.stub(fakeConsumerGroup, 'scheduleReconnect');
80 |
81 | consumerGroupRecovery.tryToRecoverFrom(fakeNetworkError, 'test');
82 |
83 | sinon.assert.calledOnce(fakeConsumerGroup.stopHeartbeats);
84 | fakeConsumerGroup.ready.should.be.false;
85 | consumerGroupRecovery.lastError.should.be.eql(fakeNetworkError);
86 |
87 | sinon.assert.calledOnce(fakeConsumerGroup.scheduleReconnect);
88 | });
89 |
90 | it('should try to recover from a request timeout', function () {
91 | const fakeNetworkError = new TimeoutError('request timeout');
92 |
93 | fakeConsumerGroup.once('error', function (error) {
94 | error.should.not.be.eql(fakeNetworkError);
95 | });
96 |
97 | sinon.stub(fakeConsumerGroup, 'scheduleReconnect');
98 |
99 | consumerGroupRecovery.tryToRecoverFrom(fakeNetworkError, 'test');
100 |
101 | sinon.assert.calledOnce(fakeConsumerGroup.stopHeartbeats);
102 | fakeConsumerGroup.ready.should.be.false;
103 | consumerGroupRecovery.lastError.should.be.eql(fakeNetworkError);
104 |
105 | sinon.assert.calledOnce(fakeConsumerGroup.scheduleReconnect);
106 | });
107 |
108 | it('should try to recover from a HeartbeatTimeout', function () {
109 | const heartbeatTimeout = new HeartbeatTimeout('test error');
110 |
111 | fakeConsumerGroup.once('error', function (error) {
112 | error.should.not.be.eql(heartbeatTimeout);
113 | });
114 |
115 | sinon.stub(fakeConsumerGroup, 'scheduleReconnect');
116 |
117 | consumerGroupRecovery.tryToRecoverFrom(heartbeatTimeout, 'test');
118 |
119 | sinon.assert.calledOnce(fakeConsumerGroup.stopHeartbeats);
120 | fakeConsumerGroup.ready.should.be.false;
121 | consumerGroupRecovery.lastError.should.be.eql(heartbeatTimeout);
122 |
123 | sinon.assert.calledOnce(fakeConsumerGroup.scheduleReconnect);
124 | });
125 | });
126 |
127 | describe('#getRetryTimeout', function () {
128 | it('should reset backoff timeouts when calling with different error', function () {
129 | var error = new GroupCoordinatorNotAvailable();
130 | error.errorCode = 15;
131 |
132 | var differentError = new GroupLoadInProgress();
133 | differentError.errorCode = 14;
134 |
135 | consumerGroupRecovery.options.retries = 5;
136 |
137 | var results = _.times(3, function () {
138 | var ret = consumerGroupRecovery.getRetryTimeout(error);
139 | consumerGroupRecovery.lastError = error;
140 | return ret;
141 | });
142 |
143 | consumerGroupRecovery._timeouts.should.have.length(5);
144 |
145 | results.should.be.length(3);
146 | results.forEach(function (result, index) {
147 | result.should.eql(consumerGroupRecovery._timeouts[index]);
148 | });
149 |
150 | results = _.times(5, function () {
151 | var ret = consumerGroupRecovery.getRetryTimeout(differentError);
152 | consumerGroupRecovery.lastError = differentError;
153 | return ret;
154 | });
155 |
156 | consumerGroupRecovery._timeouts.should.have.length(5);
157 |
158 | results.should.be.length(5);
159 | results.forEach(function (result, index) {
160 | result.should.eql(consumerGroupRecovery._timeouts[index]);
161 | });
162 | });
163 |
164 | it('should return backoff timeouts when calling same error', function () {
165 | var error = new GroupCoordinatorNotAvailable();
166 | error.errorCode = 15;
167 |
168 | consumerGroupRecovery.options.retries = 5;
169 |
170 | var results = _.times(6, function () {
171 | var ret = consumerGroupRecovery.getRetryTimeout(error);
172 | consumerGroupRecovery.lastError = error;
173 | return ret;
174 | });
175 |
176 | consumerGroupRecovery._timeouts.should.have.length(5);
177 | results.pop().should.be.false;
178 | results.forEach(function (result, index) {
179 | result.should.eql(consumerGroupRecovery._timeouts[index]);
180 | });
181 | });
182 |
183 | it('should initalize timeout array when invoked', function () {
184 | var error = new GroupCoordinatorNotAvailable();
185 | error.errorCode = 15;
186 |
187 | should(consumerGroupRecovery._timeouts).be.undefined;
188 |
189 | var retryTime = consumerGroupRecovery.getRetryTimeout(error);
190 |
191 | consumerGroupRecovery._timeouts.should.have.length(10);
192 | retryTime.should.be.eql(consumerGroupRecovery._timeouts[0]);
193 | });
194 |
195 | it('should throw if empty arguments', function () {
196 | should.throws(function () {
197 | consumerGroupRecovery.getRetryTimeout();
198 | });
199 |
200 | should.throws(function () {
201 | consumerGroupRecovery.getRetryTimeout(null);
202 | });
203 | });
204 | });
205 | });
206 |
--------------------------------------------------------------------------------
/test/test.consumerGroupStream.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const uuid = require('uuid');
4 | const createTopic = require('../docker/createTopic');
5 | const sendMessage = require('./helpers/sendMessage');
6 | const async = require('async');
7 | const _ = require('lodash');
8 | const sinon = require('sinon');
9 | const proxyquire = require('proxyquire');
10 | const EventEmitter = require('events');
11 |
12 | const ConsumerGroupStream = require('../lib/consumerGroupStream');
13 |
14 | function createConsumerGroupStream (topic, overrides) {
15 | const consumerOptions = _.defaultsDeep(overrides || {}, {
16 | kafkaHost: '127.0.0.1:9092',
17 | groupId: 'ExampleTestGroup',
18 | sessionTimeout: 15000,
19 | protocol: ['roundrobin'],
20 | asyncPush: false,
21 | id: 'consumer1',
22 | autoCommit: false,
23 | fromOffset: 'earliest'
24 | });
25 | return new ConsumerGroupStream(consumerOptions, topic);
26 | }
27 |
28 | describe('ConsumerGroupStream', function () {
29 | let topic, groupId, sandbox;
30 |
31 | context('autoCommit', function () {
32 | let consumerGroupStream, ConsumerGroupStream, fakeConsumerGroup;
33 |
34 | before(function () {
35 | sandbox = sinon.sandbox.create({
36 | useFakeTimers: true
37 | });
38 | fakeConsumerGroup = new EventEmitter();
39 | _.set(fakeConsumerGroup, 'options.autoCommitIntervalMs', 500);
40 | ConsumerGroupStream = proxyquire('../lib/consumerGroupStream', {
41 | './consumerGroup': function () {
42 | return fakeConsumerGroup;
43 | }
44 | });
45 | });
46 |
47 | beforeEach(function () {
48 | consumerGroupStream = new ConsumerGroupStream({});
49 | sandbox.reset();
50 | });
51 |
52 | after(function () {
53 | sandbox.restore();
54 | });
55 |
56 | it('should continue to work work after force true commit', function (done) {
57 | fakeConsumerGroup.sendOffsetCommitRequest = sinon.stub().yieldsAsync(null);
58 | async.series(
59 | [
60 | function (callback) {
61 | consumerGroupStream.commit(
62 | {
63 | topic: 'MyTestTopic',
64 | partition: 0,
65 | offset: 1
66 | },
67 | true,
68 | callback
69 | );
70 | },
71 | function (callback) {
72 | consumerGroupStream.commit(
73 | {
74 | topic: 'MyTestTopic',
75 | partition: 0,
76 | offset: 5
77 | },
78 | false,
79 | callback
80 | );
81 | }
82 | ],
83 | function (error) {
84 | sinon.assert.calledTwice(fakeConsumerGroup.sendOffsetCommitRequest);
85 | sinon.assert.calledWith(fakeConsumerGroup.sendOffsetCommitRequest, [
86 | {
87 | metadata: 'm',
88 | offset: 2,
89 | partition: 0,
90 | topic: 'MyTestTopic'
91 | }
92 | ]);
93 |
94 | sinon.assert.calledWith(fakeConsumerGroup.sendOffsetCommitRequest, [
95 | {
96 | metadata: 'm',
97 | offset: 6,
98 | partition: 0,
99 | topic: 'MyTestTopic'
100 | }
101 | ]);
102 |
103 | consumerGroupStream.committing.should.be.true;
104 | sandbox.clock.tick(500);
105 | consumerGroupStream.committing.should.be.false;
106 | done(error);
107 | }
108 | );
109 | });
110 | });
111 |
112 | context('with topic', function () {
113 | beforeEach(function () {
114 | topic = uuid.v4();
115 | groupId = uuid.v4();
116 | return createTopic(topic, 1, 1);
117 | });
118 |
119 | describe('Auto Commit', function () {
120 | it('should resume at the next offset after new stream starts', function (done) {
121 | let messages;
122 | let consumerGroupStream;
123 | let lastReadOffset;
124 | async.series(
125 | [
126 | function (callback) {
127 | messages = _.times(3, uuid.v4);
128 | sendMessage(messages, topic, callback);
129 | },
130 |
131 | function (callback) {
132 | callback = _.once(callback);
133 | const messagesToRead = _.clone(messages);
134 | consumerGroupStream = createConsumerGroupStream(topic, { autoCommit: true, groupId: groupId });
135 | consumerGroupStream.on('data', function (message) {
136 | _.pull(messagesToRead, message.value, message.offset);
137 | if (messagesToRead.length === 0) {
138 | lastReadOffset = message.offset;
139 | callback(null);
140 | }
141 | });
142 | },
143 |
144 | function (callback) {
145 | setImmediate(function () {
146 | consumerGroupStream.close(callback);
147 | consumerGroupStream = null;
148 | });
149 | },
150 |
151 | function (callback) {
152 | setTimeout(callback, 100);
153 | },
154 |
155 | function (callback) {
156 | sendMessage([uuid.v4()], topic, callback);
157 | },
158 |
159 | function (callback) {
160 | consumerGroupStream = createConsumerGroupStream(topic, { autoCommit: true, groupId: groupId });
161 | consumerGroupStream.on('readable', function () {
162 | const message = consumerGroupStream.read();
163 | message.offset.should.be.equal(lastReadOffset + 1);
164 | consumerGroupStream.close(callback);
165 | });
166 | }
167 | ],
168 | done
169 | );
170 | });
171 | });
172 |
173 | describe('#close', function () {
174 | it('should not call consumerGroup with force option', function (done) {
175 | const consumerGroupStream = createConsumerGroupStream(topic);
176 |
177 | const closeSpy = sinon.spy(consumerGroupStream.consumerGroup, 'close');
178 |
179 | consumerGroupStream.close(function () {
180 | sinon.assert.calledOnce(closeSpy);
181 | sinon.assert.calledWithExactly(closeSpy, false, sinon.match.func);
182 | done();
183 | });
184 | });
185 |
186 | it('autoCommit false should close the consumer without committing offsets', function (done) {
187 | const messages = _.times(3, uuid.v4);
188 | let consumerGroupStream;
189 |
190 | async.series(
191 | [
192 | function (callback) {
193 | sendMessage(messages, topic, callback);
194 | },
195 | function (callback) {
196 | const messagesToRead = _.clone(messages);
197 | consumerGroupStream = createConsumerGroupStream(topic, { groupId: groupId });
198 | consumerGroupStream.on('data', function (message) {
199 | _.pull(messagesToRead, message.value);
200 | if (messagesToRead.length === 0) {
201 | callback(null);
202 | }
203 | });
204 | },
205 | function (callback) {
206 | consumerGroupStream.close(callback);
207 | },
208 | function (callback) {
209 | const messagesToRead = _.clone(messages);
210 | consumerGroupStream = createConsumerGroupStream(topic, { groupId: groupId });
211 | consumerGroupStream.on('data', function (message) {
212 | _.pull(messagesToRead, message.value);
213 | if (messagesToRead.length === 0) {
214 | callback(null);
215 | }
216 | });
217 | },
218 | function (callback) {
219 | consumerGroupStream.close(callback);
220 | }
221 | ],
222 | done
223 | );
224 | });
225 | });
226 | });
227 | });
228 |
--------------------------------------------------------------------------------
/test/test.consumerStream.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 | var should = require('should');
3 |
4 | var through2 = require('through2');
5 |
6 | var libPath = process.env['KAFKA_COV'] ? '../lib-cov/' : '../lib/';
7 | var ConsumerStream = require(libPath + 'consumerStream');
8 | var Producer = require(libPath + 'producer');
9 | var Client = require(libPath + 'kafkaClient');
10 | var EventCounter = require('./helpers/EventCounter');
11 |
12 | const TOPIC_POSTFIX = '_test_' + Date.now();
13 | const EXISTS_TOPIC_1 = '_exists_1' + TOPIC_POSTFIX;
14 | const APPEND_TOPIC_1 = '_append_1' + TOPIC_POSTFIX;
15 | const COMMIT_STREAM_TOPIC_1 = '_commit_stream_1' + TOPIC_POSTFIX;
16 | const COMMIT_STREAM_TOPIC_2 = '_commit_stream_2' + TOPIC_POSTFIX;
17 | const COMMIT_STREAM_TOPIC_3 = '_commit_stream_3' + TOPIC_POSTFIX;
18 |
19 | function createTopicAndProduceMessages (producer, topic, numberOfMessages, done) {
20 | if (!done) {
21 | done = function () {};
22 | }
23 | producer.createTopics([topic], function () {
24 | var messages = [];
25 | for (var i = 1; i <= numberOfMessages; i++) {
26 | messages.push('stream message ' + i);
27 | }
28 | producer.send([{ topic: topic, messages }], done);
29 | });
30 | }
31 |
32 | describe('ConsumerStream', function () {
33 | it("should emit both a 'message' and a 'data' event for each message", function (done) {
34 | var client = new Client();
35 | var producer = new Producer(client);
36 | producer.once('ready', function () {
37 | createTopicAndProduceMessages(producer, EXISTS_TOPIC_1, 100, function () {
38 | var topics = [{ topic: EXISTS_TOPIC_1 }];
39 | // Here we set fetchMaxBytes to ensure that we're testing running
40 | // multiple fetches as the default 1024 * 1024 makes a single fetch.
41 | var options = { autoCommit: false, groupId: '_groupId_1_test', fetchMaxBytes: 512 };
42 | var consumer = new ConsumerStream(client, topics, options);
43 | var eventCounter = new EventCounter();
44 | consumer.on('message', eventCounter.createEventCounter('message'));
45 | consumer.on('data', eventCounter.createEventCounter('data'));
46 | var incrementPipeCount = eventCounter.createEventCounter('pipe', 100, function () {
47 | should.exist(eventCounter.events.message.events);
48 | var events = eventCounter.events;
49 | should.exist(events.message);
50 | events.message.events.length.should.equal(100);
51 | events.data.events.length.should.equal(100);
52 | events.pipe.events.length.should.equal(100);
53 | consumer.close(done);
54 | });
55 | consumer.pipe(
56 | through2.obj({ highWaterMark: 8 }, function (data, enc, cb) {
57 | incrementPipeCount(data);
58 | cb(null);
59 | })
60 | );
61 | });
62 | });
63 | });
64 | it('should continue polling for new messages appended after consuming all existing messages', function (done) {
65 | var client = new Client();
66 | var producer = new Producer(client);
67 | producer.once('ready', function () {
68 | createTopicAndProduceMessages(producer, APPEND_TOPIC_1, 20, function () {
69 | var topics = [{ topic: APPEND_TOPIC_1 }];
70 | var options = { autoCommit: false, groupId: '_groupId_2_test' };
71 | var consumer = new ConsumerStream(client, topics, options);
72 | var eventCounter = new EventCounter();
73 | var increment1 = eventCounter.createEventCounter('first', 20, function (error, firstEvents) {
74 | should.not.exist(error);
75 | firstEvents.events.length.should.equal(20);
76 | firstEvents.events[0][0].value.should.equal('stream message 1');
77 | firstEvents.events[0][0].offset.should.equal(0);
78 | firstEvents.events[19][0].value.should.equal('stream message 20');
79 | firstEvents.events[19][0].offset.should.equal(19);
80 | var increment2 = eventCounter.createEventCounter('second', 20, function (error, secondEvents) {
81 | should.not.exist(error);
82 | secondEvents.count.should.equal(20);
83 | secondEvents.events[0][0].value.should.equal('stream message 1');
84 | secondEvents.events[0][0].offset.should.equal(20);
85 | secondEvents.events[19][0].value.should.equal('stream message 20');
86 | secondEvents.events[19][0].offset.should.equal(39);
87 | consumer.close(done);
88 | });
89 | consumer.on('data', increment2);
90 | createTopicAndProduceMessages(producer, APPEND_TOPIC_1, 20);
91 | });
92 | consumer.pipe(
93 | through2.obj(function (data, enc, cb) {
94 | increment1(data);
95 | cb(null);
96 | })
97 | );
98 | });
99 | });
100 | });
101 | describe('CommitStream', function () {
102 | it('should instantiate a consumer stream and increment commit manually', function (done) {
103 | const groupId = '_commitStream_1_test';
104 | const topic = COMMIT_STREAM_TOPIC_1;
105 | var client = new Client();
106 | var producer = new Producer(client);
107 | producer.once('ready', function () {
108 | createTopicAndProduceMessages(producer, topic, 20, function () {
109 | var options = { autoCommit: false, groupId };
110 | var consumer = new ConsumerStream(client, [topic], options);
111 | var eventCounter = new EventCounter();
112 | let commitStream = consumer.createCommitStream({});
113 | var increment = eventCounter.createEventCounter('first', 20, function (error, events) {
114 | if (error) {
115 | throw error;
116 | }
117 | setImmediate(function () {
118 | commitStream.commit(function () {
119 | client.sendOffsetFetchRequest(groupId, commitStream.topicPartionOffsets, function (error, data) {
120 | if (error) {
121 | throw error;
122 | }
123 | data[topic][0].should.equal(20);
124 | consumer.close(done);
125 | });
126 | });
127 | });
128 | });
129 | consumer
130 | .pipe(
131 | through2.obj(function (data, enc, cb) {
132 | increment();
133 | cb(null, data);
134 | })
135 | )
136 | .pipe(commitStream);
137 | });
138 | });
139 | });
140 | xit('should commit when the autocommit message count is reached', function (done) {
141 | const groupId = '_commitStream_2_test';
142 | const topic = COMMIT_STREAM_TOPIC_2;
143 | var client = new Client();
144 | var producer = new Producer(client);
145 | producer.once('ready', function () {
146 | createTopicAndProduceMessages(producer, topic, 20, function () {
147 | var options = { autoCommit: true, autoCommitIntervalMs: false, autoCommitMsgCount: 18, groupId };
148 | var consumer = new ConsumerStream(client, [topic], options);
149 | let commitStream = consumer.createCommitStream();
150 | commitStream.once('commitComplete', function (data) {
151 | client.sendOffsetFetchRequest(groupId, commitStream.topicPartionOffsets, function (error, data) {
152 | if (error) {
153 | throw error;
154 | }
155 | data[topic][0].should.equal(18);
156 | consumer.close(done);
157 | });
158 | });
159 | consumer
160 | .pipe(
161 | through2.obj(function (data, enc, cb) {
162 | cb(null, data);
163 | })
164 | )
165 | .pipe(commitStream);
166 | });
167 | });
168 | });
169 | xit('should autocommit after a given interval in milliseconds', function (done) {
170 | const groupId = '_commitStream_3_test';
171 | const topic = COMMIT_STREAM_TOPIC_3;
172 | var client = new Client();
173 | var producer = new Producer(client);
174 | producer.once('ready', function () {
175 | createTopicAndProduceMessages(producer, topic, 20, function () {
176 | var options = { autoCommit: true, autoCommitIntervalMs: 5, groupId };
177 | var consumer = new ConsumerStream(client, [topic], options);
178 | let commitStream = consumer.createCommitStream();
179 | commitStream.once('commitComplete', function (data) {
180 | client.sendOffsetFetchRequest(groupId, commitStream.topicPartionOffsets, function (error, data) {
181 | if (error) {
182 | throw error;
183 | }
184 | data[topic][0].should.equal(20);
185 | commitStream.clearInterval();
186 | consumer.close(done);
187 | });
188 | });
189 | consumer
190 | .pipe(
191 | through2.obj(function (data, enc, cb) {
192 | cb(null, data);
193 | })
194 | )
195 | .pipe(commitStream);
196 | });
197 | });
198 | });
199 | });
200 | });
201 |
--------------------------------------------------------------------------------
/test/test.errors.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const errors = require('../lib/errors');
4 |
5 | describe('Test Errors', function () {
6 | it('should have right number of consumer group errors', function () {
7 | errors.ConsumerGroupErrors.length.should.be.eql(7);
8 | });
9 | });
10 |
--------------------------------------------------------------------------------
/test/test.kafka-node.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var kafka = require('..');
4 | var Client = kafka.KafkaClient;
5 |
6 | const host = '127.0.0.1:9092';
7 |
8 | var client = null;
9 |
10 | before(function (done) {
11 | client = new Client({ kafkaHost: host });
12 | client.once('ready', done);
13 | client.once('error', done);
14 | });
15 |
--------------------------------------------------------------------------------
/test/test.logging.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const createLogger = require('../lib/logging');
4 | const logging = require('../logging');
5 | const sinon = require('sinon');
6 |
7 | describe('logging', function () {
8 | it('should expose a setLoggerProvider function', function () {
9 | logging.setLoggerProvider.should.be.instanceOf(Function);
10 | });
11 |
12 | it('should create logger via custom logger provider', function () {
13 | const provider = sinon.stub();
14 | const loggerName = 'kafka-node:consumer';
15 | const loggerImpl = {};
16 | provider.withArgs(loggerName).returns(loggerImpl);
17 | logging.setLoggerProvider(provider);
18 |
19 | const logger = createLogger(loggerName);
20 |
21 | logger.should.equal(loggerImpl);
22 | });
23 | });
24 |
--------------------------------------------------------------------------------
/test/test.offset.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var libPath = process.env['kafka-cov'] ? '../lib-cov/' : '../lib/';
4 | var Producer = require(libPath + 'producer');
5 | var Offset = require(libPath + 'offset');
6 | var Client = require(libPath + 'kafkaClient');
7 | var ConsumerGroup = require(libPath + 'consumerGroup');
8 | const uuid = require('uuid');
9 |
10 | var client, producer, offset;
11 |
12 | describe('Offset', function () {
13 | before(function (done) {
14 | client = new Client();
15 | producer = new Producer(client);
16 | producer.on('ready', function () {
17 | producer.createTopics(['_exist_topic_3_test'], true, function (err) {
18 | done(err);
19 | });
20 | });
21 |
22 | offset = new Offset(client);
23 | });
24 |
25 | after(function (done) {
26 | producer.close(done);
27 | });
28 |
29 | describe('#fetch', function () {
30 | it('should return offset of the topics', function (done) {
31 | var topic = '_exist_topic_3_test';
32 | var topics = [{ topic: topic }];
33 | offset.fetch(topics, function (err, data) {
34 | var offsets = data[topic][0];
35 | offsets.should.be.an.instanceOf(Array);
36 | offsets.length.should.equal(1);
37 | done(err);
38 | });
39 | });
40 |
41 | it('should return earliest offset of the topics', function (done) {
42 | var topic = '_exist_topic_3_test';
43 | var topics = [{ topic: topic, time: -2 }];
44 | offset.fetch(topics, function (err, data) {
45 | var offsets = data[topic][0];
46 | offsets.should.be.an.instanceOf(Array);
47 | offsets.length.should.above(0);
48 | done(err);
49 | });
50 | });
51 |
52 | it('should return latest offset of the topics', function (done) {
53 | var topic = '_exist_topic_3_test';
54 | var topics = [{ topic: topic, time: -1 }];
55 | offset.fetch(topics, function (err, data) {
56 | var offsets = data[topic][0];
57 | offsets.should.be.an.instanceOf(Array);
58 | offsets.length.should.above(0);
59 | done(err);
60 | });
61 | });
62 |
63 | it('should keeping calling fetch until offset is ready', function (done) {
64 | var topic = '_exist_topic_3_test';
65 | var topics = [{ topic: topic }];
66 | offset.fetch(topics, done);
67 | });
68 | });
69 |
70 | describe('#commit', function () {
71 | it('should commit successfully', function (done) {
72 | var topic = '_exist_topic_3_test';
73 | var topics = [{ topic: topic, offset: 10 }];
74 | offset.commit('_groupId_commit_test', topics, function (err, data) {
75 | data.should.be.ok;
76 | Object.keys(data)[0].should.equal(topic);
77 | done(err);
78 | });
79 | });
80 |
81 | it('should keep calling commit until offset is ready', function (done) {
82 | var topic = '_exist_topic_3_test';
83 | var topics = [{ topic: topic, offset: 10 }];
84 | offset.commit('_groupId_commit_test', topics, done);
85 | });
86 | });
87 |
88 | describe('#fetchCommits', function () {
89 | it('should get last committed offset of the consumer group', function (done) {
90 | var topic = '_exist_topic_3_test';
91 | var topics = [{ topic: topic, offset: 10 }];
92 | offset.fetchCommits('_groupId_commit_1_test', topics, function (err, data) {
93 | data.should.be.ok;
94 | Object.keys(data)[0].should.equal(topic);
95 | data[topic][0].should.equal(-1);
96 | done(err);
97 | });
98 | });
99 |
100 | it('should keep calling fetchCommits until offset is ready', function (done) {
101 | var topic = '_exist_topic_3_test';
102 | var topics = [{ topic: topic, offset: 10 }];
103 | offset.fetchCommits('_groupId_commit_1_test', topics, done);
104 | });
105 | });
106 |
107 | describe('#fetchCommitsV1', function () {
108 | var topic, topics, groupId, expectedCommittedOffset;
109 | topic = `_exist_topic_3_test`;
110 | topics = [{ topic: topic, partition: 0 }];
111 | groupId = `_groupId_commit_v1_test`;
112 | before(function (done) {
113 | producer.send([{ topic, messages: ['firstMessage'] }], error => {
114 | if (error) done(error);
115 | createCGandCommitToLatestOffset(groupId, topic, (err, highWaterOffset) => {
116 | expectedCommittedOffset = highWaterOffset;
117 | done(err);
118 | });
119 | });
120 | });
121 |
122 | it('should return -1 when the consumer group has no commits on the broker', function (done) {
123 | var groupIdNoCommits = groupId + '2';
124 | offset.fetchCommitsV1(groupIdNoCommits, topics, function (err, data) {
125 | data.should.be.ok;
126 | Object.keys(data)[0].should.equal(topic);
127 | data[topic][0].should.equal(-1);
128 | done(err);
129 | });
130 | });
131 |
132 | it('should get the last committed offset consumer group on the broker', function (done) {
133 | offset.fetchCommitsV1(groupId, topics, function (err, data) {
134 | data.should.be.ok;
135 | Object.keys(data)[0].should.equal(topic);
136 | data[topic][0].should.equal(expectedCommittedOffset);
137 | done(err);
138 | });
139 | });
140 |
141 | it('should keep calling fetchCommits until offset is ready', function (done) {
142 | var topic = '_exist_topic_3_test';
143 | var topics = [{ topic: topic, offset: 10 }];
144 | offset.fetchCommitsV1('_groupId_commit_1_test', topics, done);
145 | });
146 | });
147 |
148 | describe('#fetchEarliestOffsets', function () {
149 | it('should callback with error if topic does not exist', function (done) {
150 | offset.fetchEarliestOffsets([uuid.v4()], function (error) {
151 | error.should.be.an.instanceOf(Error);
152 | error.message.should.be.exactly('Topic(s) does not exist');
153 | done();
154 | });
155 | });
156 | });
157 |
158 | describe('#fetchLatestOffsets', function () {
159 | it('should callback with error if topic does not exist', function (done) {
160 | offset.fetchLatestOffsets([uuid.v4()], function (error) {
161 | error.should.be.an.instanceOf(Error);
162 | error.message.should.be.exactly('Topic(s) does not exist');
163 | done();
164 | });
165 | });
166 |
167 | it('should get latest kafka offsets for all topics passed in', function (done) {
168 | var topic = '_exist_topic_3_test';
169 | var topics = [topic];
170 | var partition = 0;
171 | offset.fetch([{ topic: topic, time: -1 }], function (err, results) {
172 | if (err) return done(err);
173 | var latestOffset = results[topic][partition][0];
174 | offset.fetchLatestOffsets(topics, function (err, offsets) {
175 | if (err) return done(err);
176 | offsets[topic][partition].should.equal(latestOffset);
177 | done();
178 | });
179 | });
180 | });
181 |
182 | it('should keep calling fetchLatestOffsets until offset is ready', function (done) {
183 | var topic = '_exist_topic_3_test';
184 | var topics = [topic];
185 | offset.fetchLatestOffsets(topics, done);
186 | });
187 | });
188 | });
189 |
190 | const createCGandCommitToLatestOffset = (groupId, topic, cb) => {
191 | try {
192 | var consumerGroupOptions = {
193 | groupId: groupId,
194 | fromOffset: 'earliest',
195 | kafkaHost: 'localhost:9092',
196 | autoCommitIntervalMs: 1,
197 | autoCommit: true
198 | };
199 | var consumerGroup = new ConsumerGroup(consumerGroupOptions, topic);
200 | consumerGroup.on('message', message => {
201 | if (message.offset === message.highWaterOffset - 1) {
202 | setTimeout(() => {
203 | consumerGroup.close(true, () => {
204 | return cb(null, message.highWaterOffset);
205 | });
206 | }, 0);
207 | }
208 | });
209 | consumerGroup.on('error', err => {
210 | return cb(err);
211 | });
212 | } catch (e) {
213 | return cb(e);
214 | }
215 | };
216 |
--------------------------------------------------------------------------------
/test/test.partitioner.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var _ = require('lodash');
4 | var kafka = require('..');
5 | var DefaultPartitioner = kafka.DefaultPartitioner;
6 | var RandomPartitioner = kafka.RandomPartitioner;
7 | var CyclicPartitioner = kafka.CyclicPartitioner;
8 | var KeyedPartitioner = kafka.KeyedPartitioner;
9 | var CustomPartitioner = kafka.CustomPartitioner;
10 |
11 | function getPartitions (partitioner, partitions, count) {
12 | var arr = [];
13 | for (var i = 0; i < count; i++) {
14 | arr.push(partitioner.getPartition(partitions));
15 | }
16 | return arr;
17 | }
18 |
19 | describe('Partitioner', function () {
20 | describe('DefaultPartitioner', function () {
21 | var partitioner = new DefaultPartitioner();
22 |
23 | describe('#getPartition', function () {
24 | it('should always return the first partition', function () {
25 | var partitions = _.uniq(getPartitions(partitioner, [0, 1], 100));
26 | partitions.should.have.length(1);
27 | partitions.should.containEql(0);
28 | });
29 | });
30 | });
31 |
32 | describe('RandomPartitioner', function () {
33 | var partitioner = new RandomPartitioner();
34 |
35 | describe('#getPartition', function () {
36 | it('should return partitions within the existing ones', function () {
37 | var partitions = _.uniq(getPartitions(partitioner, [0, 1], 100));
38 | partitions.should.have.length(2);
39 | partitions.should.containEql(0);
40 | partitions.should.containEql(1);
41 | });
42 | });
43 | });
44 |
45 | describe('CyclicPartitioner', function () {
46 | var partitioner = new CyclicPartitioner();
47 |
48 | describe('#getPartition', function () {
49 | it('should return partitions cycling throw the existing ones', function () {
50 | var partitions = getPartitions(partitioner, [0, 1, 2], 6);
51 | partitions.should.have.length(6);
52 | partitions[0].should.equal(0);
53 | partitions[1].should.equal(1);
54 | partitions[2].should.equal(2);
55 | partitions[3].should.equal(0);
56 | partitions[4].should.equal(1);
57 | partitions[5].should.equal(2);
58 | });
59 |
60 | it('should not modify different partitioners', function () {
61 | var partitioner2 = new CyclicPartitioner();
62 | var partitions1 = getPartitions(partitioner, [0, 1, 2], 3);
63 | var partitions2 = getPartitions(partitioner2, [0, 1, 2], 3);
64 | partitions1.should.have.length(3);
65 | partitions2.should.have.length(3);
66 | partitions1[0].should.equal(0);
67 | partitions2[0].should.equal(0);
68 | });
69 | });
70 | });
71 |
72 | describe('KeyedPartitioner', function () {
73 | var partitioner = new KeyedPartitioner();
74 |
75 | describe('#getPartition', function () {
76 | it('should return partitions based on a given key', function () {
77 | var partitions = [partitioner.getPartition([0, 1], '12345'), partitioner.getPartition([0, 1], '123')];
78 | partitions.should.have.length(2);
79 | partitions[0].should.equal(1);
80 | partitions[1].should.equal(0);
81 | });
82 |
83 | it('should return partitions based on a given buffer', function () {
84 | var partitions = [partitioner.getPartition([0, 1, 2], Buffer.from([5, 4, 3, 2])), partitioner.getPartition([0, 1, 2], Buffer.from([3, 2, 1, 0]))];
85 | partitions.should.have.length(2);
86 | partitions[0].should.equal(2);
87 | partitions[1].should.equal(0);
88 | });
89 | });
90 | });
91 |
92 | describe('CustomPartitioner', function () {
93 | function getPartition (partitions, key) {
94 | return partitions[partitions.length - 1];
95 | }
96 |
97 | var partitioner = new CustomPartitioner(getPartition);
98 |
99 | describe('#getPartition', function () {
100 | it('should always return the last partition', function () {
101 | var partitions = _.uniq(getPartitions(partitioner, [0, 1, 2, 3], 100));
102 | partitions.should.have.length(1);
103 | partitions.should.containEql(3);
104 | });
105 | });
106 | });
107 | });
108 |
--------------------------------------------------------------------------------
/test/test.producer.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var kafka = require('..');
4 | var Producer = kafka.Producer;
5 | var uuid = require('uuid');
6 | var Client = kafka.KafkaClient;
7 | var KeyedMessage = kafka.KeyedMessage;
8 | const async = require('async');
9 |
10 | var client, producer, noAckProducer, producerKeyed;
11 |
12 | [
13 | {
14 | name: 'PLAINTEXT Producer'
15 | },
16 | {
17 | name: 'SSL Producer',
18 | sslOptions: {
19 | rejectUnauthorized: false
20 | },
21 | suiteTimeout: 30000
22 | }
23 | ].forEach(function (testParameters) {
24 | var TOPIC_POSTFIX = '_test_' + Date.now();
25 | var EXISTS_TOPIC_3 = '_exists_3' + TOPIC_POSTFIX;
26 |
27 | var sslOptions = testParameters.sslOptions;
28 | var suiteTimeout = testParameters.suiteTimeout;
29 | var suiteName = testParameters.name;
30 |
31 | const kafkaHost = '127.0.0.1:' + (sslOptions == null ? '9092' : '9093');
32 |
33 | describe(suiteName, function () {
34 | before(function (done) {
35 | if (suiteTimeout) {
36 | this.timeout(suiteTimeout);
37 | }
38 | client = new Client({ kafkaHost, sslOptions });
39 | producer = new Producer(client);
40 | noAckProducer = new Producer(client, { requireAcks: 0 });
41 | producerKeyed = new Producer(client, { partitionerType: Producer.PARTITIONER_TYPES.keyed });
42 |
43 | async.series(
44 | [
45 | function (callback) {
46 | producer.once('ready', callback);
47 | },
48 | function (callback) {
49 | producer.createTopics([EXISTS_TOPIC_3], true, callback);
50 | }
51 | ],
52 | done
53 | );
54 | });
55 |
56 | after(function (done) {
57 | producer.close(done);
58 | });
59 |
60 | describe('#send', function () {
61 | before(function (done) {
62 | // Ensure that first message gets the `0`
63 | producer.send([{ topic: EXISTS_TOPIC_3, messages: '_initial' }], function (err, message) {
64 | message.should.be.ok;
65 | message[EXISTS_TOPIC_3].should.have.property('0', 0);
66 | done(err);
67 | });
68 | });
69 |
70 | it('should send message successfully', function (done) {
71 | producer.send([{ topic: EXISTS_TOPIC_3, messages: 'hello kafka' }], function (err, message) {
72 | message.should.be.ok;
73 | message[EXISTS_TOPIC_3]['0'].should.be.above(0);
74 | done(err);
75 | });
76 | });
77 |
78 | it('should send buffer message successfully', function (done) {
79 | var message = Buffer.from('hello kafka');
80 | producer.send([{ topic: EXISTS_TOPIC_3, messages: message }], function (err, message) {
81 | message.should.be.ok;
82 | message[EXISTS_TOPIC_3]['0'].should.be.above(0);
83 | done(err);
84 | });
85 | });
86 |
87 | it('should send null message successfully', function (done) {
88 | var message = null;
89 | producer.send([{ topic: EXISTS_TOPIC_3, messages: message }], function (err, message) {
90 | message.should.be.ok;
91 | message[EXISTS_TOPIC_3]['0'].should.be.above(0);
92 | done(err);
93 | });
94 | });
95 |
96 | it('should convert none buffer message to string', function (done) {
97 | var message = -1;
98 | producer.send([{ topic: EXISTS_TOPIC_3, messages: message }], function (err, message) {
99 | message.should.be.ok;
100 | message[EXISTS_TOPIC_3]['0'].should.be.above(0);
101 | done(err);
102 | });
103 | });
104 |
105 | it('should send Message struct successfully', function (done) {
106 | var message = new KeyedMessage('test-key', 'test-message');
107 | producer.send([{ topic: EXISTS_TOPIC_3, messages: message }], function (err, message) {
108 | message.should.be.ok;
109 | message[EXISTS_TOPIC_3]['0'].should.be.above(0);
110 | done(err);
111 | });
112 | });
113 |
114 | it('should support multi messages in one topic', function (done) {
115 | producer.send([{ topic: EXISTS_TOPIC_3, messages: ['hello kafka', 'hello kafka'] }], function (err, message) {
116 | message.should.be.ok;
117 | message[EXISTS_TOPIC_3]['0'].should.be.above(0);
118 | done(err);
119 | });
120 | });
121 |
122 | it('should support snappy compression', function (done) {
123 | producer.send(
124 | [
125 | {
126 | topic: EXISTS_TOPIC_3,
127 | messages: ['hello kafka', 'hello kafka'],
128 | attributes: 2
129 | }
130 | ],
131 | function (err, message) {
132 | if (err) return done(err);
133 | message.should.be.ok;
134 | message[EXISTS_TOPIC_3]['0'].should.be.above(0);
135 | done();
136 | }
137 | );
138 | });
139 |
140 | it('should support gzip compression', function (done) {
141 | producer.send(
142 | [
143 | {
144 | topic: EXISTS_TOPIC_3,
145 | messages: ['hello kafka', 'hello kafka'],
146 | attributes: 1
147 | }
148 | ],
149 | function (err, message) {
150 | if (err) return done(err);
151 | message.should.be.ok;
152 | message[EXISTS_TOPIC_3]['0'].should.be.above(0);
153 | done();
154 | }
155 | );
156 | });
157 |
158 | it('should send message without ack', function (done) {
159 | noAckProducer.send(
160 | [
161 | {
162 | topic: EXISTS_TOPIC_3,
163 | messages: 'hello kafka'
164 | }
165 | ],
166 | function (err, message) {
167 | if (err) return done(err);
168 | message.result.should.equal('no ack');
169 | done();
170 | }
171 | );
172 | });
173 |
174 | it('should send message to specified partition even when producer configured with keyed partitioner', function (done) {
175 | producerKeyed.send([{ key: '12345', partition: 0, topic: EXISTS_TOPIC_3, messages: 'hello kafka' }], function (
176 | err,
177 | message
178 | ) {
179 | message.should.be.ok;
180 | message[EXISTS_TOPIC_3]['0'].should.be.above(0);
181 | done(err);
182 | });
183 | });
184 |
185 | describe('Keyed Partitioner', function () {
186 | const createTopic = require('../docker/createTopic');
187 | const topicWithTwoPartitions = uuid.v4();
188 | let client, keyed;
189 |
190 | before(function () {
191 | return createTopic(topicWithTwoPartitions, 2, 1).then(function () {
192 | return new Promise(function (resolve, reject) {
193 | client = new Client({ kafkaHost, sslOptions });
194 | keyed = new Producer(client, { partitionerType: Producer.PARTITIONER_TYPES.keyed });
195 | client.once('ready', function () {
196 | client.refreshMetadata([topicWithTwoPartitions], function (error) {
197 | if (error) {
198 | return reject(error);
199 | }
200 | resolve();
201 | });
202 | });
203 | });
204 | });
205 | });
206 |
207 | after(function (done) {
208 | keyed.close(done);
209 | });
210 |
211 | it('should send message to partition determined by keyed partitioner', function (done) {
212 | keyed.send([{ key: '12345', topic: topicWithTwoPartitions, messages: 'hello kafka' }], function (
213 | err,
214 | message
215 | ) {
216 | message.should.be.ok;
217 | message[topicWithTwoPartitions].should.have.property('1', 0);
218 | done(err);
219 | });
220 | });
221 | });
222 | });
223 |
224 | describe('#close', function () {
225 | var client, producer;
226 |
227 | before(function (done) {
228 | client = new Client();
229 | producer = new Producer(client);
230 | producer.on('ready', done);
231 | });
232 |
233 | after(function (done) {
234 | producer.close(done);
235 | });
236 |
237 | it('should close successfully', function (done) {
238 | producer.close(done);
239 | });
240 | });
241 | });
242 | });
243 |
--------------------------------------------------------------------------------
/test/test.producerBatch.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var sinon = require('sinon');
4 | var kafka = require('..');
5 | var Producer = kafka.Producer;
6 | var Client = kafka.KafkaClient;
7 | var async = require('async');
8 |
9 | var client, producer, batchClient, batchProducer, noAckProducer;
10 |
11 | var TOPIC_POSTFIX = '_test_' + Date.now();
12 | var EXISTS_TOPIC_4 = '_exists_4' + TOPIC_POSTFIX;
13 | var BATCH_SIZE = 500;
14 | var BATCH_AGE = 300;
15 |
16 | var broker = null;
17 |
18 | // Intermittently fails
19 |
20 | xdescribe('No Ack Producer', function () {
21 | before(function (done) {
22 | async.series(
23 | {
24 | setupClient: function (callback) {
25 | client = new Client();
26 | batchClient = new Client({ noAckBatchOptions: { noAckBatchSize: BATCH_SIZE, noAckBatchAge: BATCH_AGE } });
27 | producer = new Producer(client);
28 | batchProducer = new Producer(batchClient);
29 | producer.on('ready', function () {
30 | producer.createTopics([EXISTS_TOPIC_4], true, function (err) {
31 | if (err) return callback(err);
32 | callback();
33 | });
34 | broker = Object.keys(client.brokers)[0];
35 | });
36 | },
37 | producerSend: function (callback) {
38 | producer.send([{ topic: EXISTS_TOPIC_4, messages: '_initial 1' }], function (err, message) {
39 | if (err) return callback(err);
40 | message.should.be.ok;
41 | message[EXISTS_TOPIC_4].should.have.property('0', 0);
42 | batchProducer.send([{ topic: EXISTS_TOPIC_4, messages: '_initial 2' }], function (err, message) {
43 | message.should.be.ok;
44 | message[EXISTS_TOPIC_4].should.have.property('0', 1);
45 | callback(err);
46 | });
47 | });
48 | // Ensure that first message gets the `0`
49 | }
50 | },
51 | done
52 | );
53 | });
54 |
55 | after(function (done) {
56 | async.each(
57 | [producer, batchProducer],
58 | function (producer, callback) {
59 | producer.close(callback);
60 | },
61 | done
62 | );
63 | });
64 |
65 | describe('with no batch client', function () {
66 | before(function (done) {
67 | noAckProducer = new Producer(client, { requireAcks: 0 });
68 | done();
69 | });
70 |
71 | beforeEach(function () {
72 | this.sendSpy = sinon.spy(client.brokers[broker].socket, 'write');
73 | });
74 |
75 | afterEach(function () {
76 | this.sendSpy.restore();
77 | });
78 |
79 | it('should send message directly', function (done) {
80 | var self = this;
81 | noAckProducer.send(
82 | [
83 | {
84 | topic: EXISTS_TOPIC_4,
85 | messages: 'hello kafka no batch'
86 | }
87 | ],
88 | function (err, message) {
89 | if (err) return done(err);
90 | setImmediate(function () {
91 | message.result.should.equal('no ack');
92 | self.sendSpy.args.length.should.be.equal(1);
93 | self.sendSpy.args[0].toString().should.containEql('hello kafka no batch');
94 | done();
95 | });
96 | }
97 | );
98 | });
99 | });
100 |
101 | describe('with batch client', function () {
102 | before(function (done) {
103 | noAckProducer = new Producer(batchClient, { requireAcks: 0 });
104 | done();
105 | });
106 |
107 | beforeEach(function () {
108 | this.sendSpy = sinon.spy(batchClient.brokers[broker].socket, 'write');
109 | this.clock = sinon.useFakeTimers(0, 'setTimeout');
110 | });
111 |
112 | afterEach(function () {
113 | this.sendSpy.restore();
114 | this.clock.restore();
115 | });
116 |
117 | it('should wait to send message 500 ms', function (done) {
118 | var self = this;
119 | noAckProducer.send(
120 | [
121 | {
122 | topic: EXISTS_TOPIC_4,
123 | messages: 'hello kafka with batch'
124 | }
125 | ],
126 | function (err, message) {
127 | if (err) return done(err);
128 | setImmediate(function () {
129 | message.result.should.equal('no ack');
130 | self.sendSpy.args.length.should.be.equal(0);
131 | self.clock.tick(BATCH_AGE - 5);
132 | self.sendSpy.args.length.should.be.equal(0);
133 | self.clock.tick(10);
134 | setImmediate(function () {
135 | self.sendSpy.args.length.should.be.equal(1);
136 | self.sendSpy.args[0].toString().should.containEql('hello kafka with batch');
137 | done();
138 | });
139 | });
140 | }
141 | );
142 | });
143 |
144 | it('should send message once the batch max size is reached', function (done) {
145 | var self = this;
146 | var foo = '';
147 | for (var i = 0; i < BATCH_SIZE; i++) foo += 'X';
148 | foo += 'end of message';
149 | noAckProducer.send(
150 | [
151 | {
152 | topic: EXISTS_TOPIC_4,
153 | messages: 'hello kafka with batch'
154 | }
155 | ],
156 | function (err, message) {
157 | if (err) return done(err);
158 | message.result.should.equal('no ack');
159 | self.sendSpy.args.length.should.be.equal(0);
160 | noAckProducer.send(
161 | [
162 | {
163 | topic: EXISTS_TOPIC_4,
164 | messages: foo
165 | }
166 | ],
167 | function (err, message) {
168 | if (err) return done(err);
169 | setImmediate(function () {
170 | message.result.should.equal('no ack');
171 | self.sendSpy.args.length.should.be.equal(1);
172 | self.sendSpy.args[0].toString().should.containEql('hello kafka with batch');
173 | self.sendSpy.args[0].toString().should.containEql('end of message');
174 | done();
175 | });
176 | }
177 | );
178 | }
179 | );
180 | });
181 | });
182 | });
183 |
--------------------------------------------------------------------------------
/test/test.producerStream.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const ProducerStream = require('../lib/producerStream');
4 | const ConsumerGroup = require('../lib/consumerGroup');
5 | const uuid = require('uuid');
6 | const createTopic = require('../docker/createTopic');
7 | const _ = require('lodash');
8 | const async = require('async');
9 |
10 | describe('Producer Stream', function () {
11 | let topic;
12 |
13 | before(function () {
14 | topic = uuid.v4();
15 | return createTopic(topic, 1, 1);
16 | });
17 |
18 | it('should stream to a topic and verify data was streamed to that topic', function (done) {
19 | const producerStream = new ProducerStream();
20 |
21 | const messages = [uuid.v4(), uuid.v4()];
22 |
23 | const consumer = new ConsumerGroup(
24 | {
25 | kafkaHost: '127.0.0.1:9092',
26 | groupId: uuid.v4(),
27 | fromOffset: 'earliest'
28 | },
29 | topic
30 | );
31 |
32 | consumer.on('message', function (message) {
33 | _.pull(messages, message.value);
34 | if (_.isEmpty(messages)) {
35 | async.parallel([callback => producerStream.close(callback), callback => consumer.close(callback)], done);
36 | }
37 | });
38 |
39 | producerStream.write({
40 | topic: topic,
41 | messages: messages
42 | });
43 | });
44 | });
45 |
--------------------------------------------------------------------------------
/test/test.protocol.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const versionSupport = require('../lib/protocol/protocolVersions');
4 | const protocolStruct = require('../lib/protocol/protocol_struct');
5 | const _ = require('lodash');
6 |
7 | describe('Protocol', function () {
8 | it('exports correct properties', function () {
9 | versionSupport.should.have.property('apiMap');
10 | versionSupport.should.have.property('maxSupport');
11 | versionSupport.should.have.property('baseSupport');
12 | });
13 |
14 | describe('verify API map keys', function () {
15 | it('should contain the same keys as request type', function () {
16 | Object.keys(protocolStruct.REQUEST_TYPE).should.be.eql(Object.keys(versionSupport.apiMap));
17 | Object.keys(protocolStruct.REQUEST_TYPE).should.be.eql(Object.keys(versionSupport.maxSupport));
18 | Object.keys(protocolStruct.REQUEST_TYPE).should.be.eql(Object.keys(versionSupport.baseSupport));
19 | });
20 |
21 | it('should contain different versions of encode/decode functions', function () {
22 | _.forOwn(versionSupport.apiMap, function (value) {
23 | if (value === null) {
24 | return;
25 | }
26 | value.should.be.an.instanceOf(Array);
27 | value.length.should.be.above(0);
28 | for (let requestResponse of value) {
29 | requestResponse.should.have.a.lengthOf(2);
30 | // let encoder = requestResponse[0];
31 | // encoder.name.should.startWith('encode');
32 | // encoder.name.should.not.startWith('decode');
33 |
34 | // encoder.name.should.endWith('Request');
35 | // encoder.name.should.not.endWith('Response');
36 | }
37 | });
38 | });
39 | });
40 | });
41 |
--------------------------------------------------------------------------------
/test/test.rebalance.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var kafka = require('..');
4 | var Client = kafka.KafkaClient;
5 | var Producer = kafka.Producer;
6 | var async = require('async');
7 | var debug = require('debug')('kafka-node:Test-Rebalance');
8 | var Childrearer = require('./helpers/Childrearer');
9 | var uuid = require('uuid');
10 | var _ = require('lodash');
11 |
12 | describe('Integrated Reblance', function () {
13 | describe('ConsumerGroup using Kafka Client', function () {
14 | testRebalance('test/helpers/child-cg-kafka-client');
15 | });
16 | });
17 |
18 | function testRebalance (forkPath) {
19 | var producer;
20 | var topic = 'RebalanceTopic';
21 | var rearer;
22 | var groupId = 'rebal_group';
23 |
24 | before(function (done) {
25 | if (process.env.TRAVIS) {
26 | return this.skip();
27 | }
28 | var client = new Client();
29 | producer = new Producer(client);
30 | client.on('ready', function () {
31 | client.refreshMetadata([topic], function (data) {
32 | // client.topicPartitions[topic].should.be.length(3);
33 | done();
34 | });
35 | });
36 | });
37 |
38 | beforeEach(function (done) {
39 | rearer = new Childrearer(forkPath);
40 | done();
41 | });
42 |
43 | afterEach(function (done) {
44 | debug('killChildren');
45 | rearer.closeAll(done);
46 | });
47 |
48 | function sendMessages (messages, done) {
49 | const payload = distributeMessages(messages);
50 | debug('Sending', payload);
51 | producer.send(payload, function (error) {
52 | if (error) {
53 | return done(error);
54 | }
55 | debug('all messages sent');
56 | });
57 | }
58 |
59 | function distributeMessages (messages) {
60 | const partitions = [0, 1, 2];
61 | var index = 0;
62 | var len = partitions.length;
63 |
64 | var partitionBuckets = partitions.map(function (partition) {
65 | return {
66 | topic: topic,
67 | messages: [],
68 | partition: partition
69 | };
70 | });
71 |
72 | messages.forEach(function (message) {
73 | partitionBuckets[index++ % len].messages.push(message);
74 | });
75 |
76 | return partitionBuckets;
77 | }
78 |
79 | function getConsumerVerifier (messages, expectedPartitionsConsumed, expectedConsumersConsuming, done) {
80 | var processedMessages = 0;
81 | var consumedByConsumer = {};
82 | var verified = _.once(done);
83 |
84 | return function onData (data) {
85 | debug('From child %d %j', this._childNum, data);
86 | topic.should.be.eql(data.message.topic);
87 | if (~messages.indexOf(data.message.value)) {
88 | processedMessages++;
89 | consumedByConsumer[data.id] = true;
90 | }
91 | if (processedMessages >= messages.length) {
92 | var consumedBy = Object.keys(consumedByConsumer);
93 | if (consumedBy.length >= expectedConsumersConsuming) {
94 | verified();
95 | } else {
96 | verified(
97 | new Error(
98 | 'Received messages but not by the expected ' +
99 | expectedConsumersConsuming +
100 | ' consumers: ' +
101 | JSON.stringify(consumedBy)
102 | )
103 | );
104 | }
105 | }
106 | };
107 | }
108 |
109 | function generateMessages (numberOfMessages, prefix) {
110 | return _.times(numberOfMessages, function () {
111 | return prefix + '-' + uuid.v4();
112 | });
113 | }
114 |
115 | it('verify two consumers consuming messages on all partitions', function (done) {
116 | var messages = generateMessages(3, 'verify 2 c');
117 | var numberOfConsumers = 2;
118 |
119 | var verify = getConsumerVerifier(messages, 3, numberOfConsumers, done);
120 |
121 | rearer.setVerifier(topic, groupId, verify);
122 | rearer.raise(numberOfConsumers, function () {
123 | sendMessages(messages, done);
124 | });
125 | });
126 |
127 | it('verify three consumers consuming messages on all partitions', function (done) {
128 | var messages = generateMessages(3, 'verify 3 c');
129 | var numberOfConsumers = 3;
130 |
131 | var verify = getConsumerVerifier(messages, 3, numberOfConsumers, done);
132 |
133 | rearer.setVerifier(topic, groupId, verify);
134 | rearer.raise(numberOfConsumers);
135 |
136 | sendMessages(messages, done);
137 | });
138 |
139 | it('verify three of four consumers are consuming messages on all partitions', function (done) {
140 | var messages = generateMessages(3, 'verify 4 c');
141 |
142 | var verify = getConsumerVerifier(messages, 3, 3, done);
143 | rearer.setVerifier(topic, groupId, verify);
144 | rearer.raise(4);
145 |
146 | sendMessages(messages, done);
147 | });
148 |
149 | it('verify one consumer consumes all messages on all partitions after one out of the two consumer is killed', function (done) {
150 | var messages = generateMessages(4, 'verify 1 c 1 killed');
151 | var verify = getConsumerVerifier(messages, 3, 1, done);
152 |
153 | rearer.setVerifier(topic, groupId, verify);
154 | rearer.raise(
155 | 2,
156 | function () {
157 | rearer.kill(1, function () {
158 | sendMessages(messages, done);
159 | });
160 | },
161 | 500
162 | );
163 | });
164 |
165 | it('verify two consumer consumes all messages on all partitions after two out of the four consumers are killed right away', function (done) {
166 | var messages = generateMessages(3, 'verify 4 c 2 killed');
167 | var verify = getConsumerVerifier(messages, 3, 2, done);
168 |
169 | rearer.setVerifier(topic, groupId, verify);
170 | rearer.raise(4, function () {
171 | rearer.kill(2, function () {
172 | sendMessages(messages, done);
173 | });
174 | });
175 | });
176 |
177 | it('verify three consumer consumes all messages on all partitions after one that is unassigned is killed', function (done) {
178 | var messages = generateMessages(3, 'verify 2 c 2 killed');
179 | var verify = getConsumerVerifier(messages, 3, 2, done);
180 |
181 | rearer.setVerifier(topic, groupId, verify);
182 |
183 | async.series(
184 | [
185 | function (callback) {
186 | rearer.raise(3, callback);
187 | },
188 | function (callback) {
189 | setTimeout(callback, 1000);
190 | },
191 | function (callback) {
192 | rearer.raise(1, callback);
193 | },
194 | function (callback) {
195 | setTimeout(callback, 1000);
196 | },
197 | function (callback) {
198 | rearer.killFirst(callback);
199 | }
200 | ],
201 | function () {
202 | sendMessages(messages, done);
203 | }
204 | );
205 | });
206 |
207 | it('verify two consumer consumes all messages on all partitions after two out of the four consumers are killed', function (done) {
208 | var messages = generateMessages(3, 'verify 2 c 2 killed');
209 | var verify = getConsumerVerifier(messages, 3, 2, done);
210 |
211 | rearer.setVerifier(topic, groupId, verify);
212 | rearer.raise(
213 | 4,
214 | function () {
215 | rearer.kill(2, function () {
216 | sendMessages(messages, done);
217 | });
218 | },
219 | 500
220 | );
221 | });
222 |
223 | it('verify three consumer consumes all messages on all partitions after three out of the six consumers are killed', function (done) {
224 | var messages = generateMessages(3, 'verify 3 c 3 killed');
225 | var verify = getConsumerVerifier(messages, 3, 2, done);
226 |
227 | rearer.setVerifier(topic, groupId, verify);
228 | rearer.raise(
229 | 6,
230 | function () {
231 | rearer.kill(3, function () {
232 | sendMessages(messages, done);
233 | });
234 | },
235 | 1000
236 | );
237 | });
238 | }
239 |
--------------------------------------------------------------------------------
/types/kafka-node-tests.ts:
--------------------------------------------------------------------------------
1 | import * as kafka from '..';
2 |
3 | /**
4 | * KAFKA CLIENT
5 | */
6 |
7 | const basicKafkaClient = new kafka.KafkaClient();
8 |
9 | const optionsKafkaClient = new kafka.KafkaClient({
10 | kafkaHost: 'localhost:9092',
11 | connectTimeout: 1000,
12 | requestTimeout: 1000,
13 | autoConnect: true,
14 | sslOptions: {},
15 | clientId: 'client id',
16 | connectRetryOptions: {
17 | retries: 5, factor: 0, minTimeout: 1000, maxTimeout: 1000, randomize: true
18 | }
19 | });
20 |
21 | optionsKafkaClient.connect();
22 |
23 | /**
24 | * KAFKA PRODUCER
25 | */
26 | const optionsProducer = new kafka.Producer(basicKafkaClient, { requireAcks: 0, ackTimeoutMs: 0, partitionerType: 0 });
27 |
28 | const producer = new kafka.Producer(basicKafkaClient);
29 | producer.on('error', (error: Error) => { });
30 | producer.on('ready', () => {
31 | const messages = [
32 | { topic: 'topicName', messages: ['message body'], partition: 0, attributes: 2 },
33 | { topic: 'topicName', messages: ['message body'], partition: 0 },
34 | { topic: 'topicName', messages: ['message body'], attributes: 0 },
35 | { topic: 'topicName', messages: ['message body'] },
36 | { topic: 'topicName', messages: [new kafka.KeyedMessage('key', 'message')] }
37 | ];
38 |
39 | producer.send(messages, (err: Error) => { });
40 | producer.send(messages, (err: Error, data: any) => { });
41 |
42 | producer.createTopics(['t'], true, (err: Error, data: any) => { });
43 | producer.createTopics(['t'], (err: Error, data: any) => { });
44 | producer.createTopics(['t'], false, () => { });
45 | producer.close();
46 | });
47 |
48 | /**
49 | * KAFKA HIGH LEVEL PRODUCER
50 | */
51 | const highLevelProducer = new kafka.HighLevelProducer(basicKafkaClient);
52 |
53 | highLevelProducer.on('error', (error: Error) => { });
54 | highLevelProducer.on('ready', () => {
55 | const messages = [
56 | { topic: 'topicName', messages: ['message body'], attributes: 2 },
57 | { topic: 'topicName', messages: ['message body'], partition: 0 },
58 | { topic: 'topicName', messages: ['message body'], attributes: 0 },
59 | { topic: 'topicName', messages: ['message body'] },
60 | { topic: 'topicName', messages: [new kafka.KeyedMessage('key', 'message')] }
61 | ];
62 |
63 | highLevelProducer.send(messages, (err: Error) => { });
64 | highLevelProducer.send(messages, (err: Error, data: any) => { });
65 |
66 | producer.createTopics(['t'], true, (err: Error, data: any) => { });
67 | producer.createTopics(['t'], (err: Error, data: any) => { });
68 | producer.createTopics(['t'], false, () => { });
69 | producer.close();
70 | });
71 |
72 | /**
73 | * KAFKA CONSUMER
74 | */
75 | const fetchRequests = [{ topic: 'awesome' }];
76 | const consumer = new kafka.Consumer(basicKafkaClient, fetchRequests, { groupId: 'abcde', autoCommit: true });
77 |
78 | consumer.on('error', (error: Error) => { });
79 | consumer.on('offsetOutOfRange', (error: Error) => { });
80 | consumer.on('message', (message: kafka.Message) => {
81 | const topic = message.topic;
82 | const value = message.value;
83 | const offset = message.offset;
84 | const partition = message.partition;
85 | const highWaterOffset = message.highWaterOffset;
86 | const key = message.key;
87 | });
88 |
89 | consumer.addTopics(['t1', 't2'], (err: any, added: any) => { });
90 | consumer.addTopics([{ topic: 't1', offset: 10 }], (err: any, added: any) => { }, true);
91 |
92 | consumer.removeTopics(['t1', 't2'], (err: any, removed: number) => { });
93 | consumer.removeTopics('t2', (err: any, removed: number) => { });
94 |
95 | consumer.commit((err: any, data: any) => { });
96 | consumer.commit(true, (err: any, data: any) => { });
97 |
98 | consumer.setOffset('topic', 0, 0);
99 |
100 | consumer.pause();
101 | consumer.resume();
102 | consumer.pauseTopics(['topic1', { topic: 'topic2', partition: 0 }]);
103 | consumer.resumeTopics(['topic1', { topic: 'topic2', partition: 0 }]);
104 |
105 | consumer.close(true, () => { });
106 | consumer.close((err: any) => { });
107 |
108 | /**
109 | * KAFKA CONSUMER GROUP
110 | */
111 | const ackBatchOptions = { noAckBatchSize: 1024, noAckBatchAge: 10 };
112 | const cgOptions: kafka.ConsumerGroupOptions = {
113 | kafkaHost: 'localhost:9092',
114 | batch: ackBatchOptions,
115 | groupId: 'groupID',
116 | id: 'consumerID',
117 | encoding: 'buffer',
118 | keyEncoding: 'buffer',
119 | sessionTimeout: 15000,
120 | protocol: ['roundrobin'],
121 | fromOffset: 'latest',
122 | migrateHLC: false,
123 | migrateRolling: true
124 | };
125 |
126 | const consumerGroup = new kafka.ConsumerGroup(cgOptions, ['topic1']);
127 | consumerGroup.on('error', (err) => { });
128 | consumerGroup.on('connect', () => { });
129 | consumerGroup.on('message', (msg) => { });
130 | consumerGroup.close(true, (err: Error) => { });
131 |
132 | const offset = new kafka.Offset(basicKafkaClient);
133 |
134 | offset.on('ready', () => { });
135 |
136 | offset.fetch([{ topic: 't', partition: 0, time: Date.now(), maxNum: 1 }, { topic: 't' }], (err: any, data: any) => { });
137 |
138 | offset.commit('groupId', [{ topic: 't', partition: 0, offset: 10 }], (err, data) => { });
139 |
140 | offset.fetchCommits('groupId', [{ topic: 't', partition: 0 }], (err, data) => { });
141 |
142 | offset.fetchLatestOffsets(['t'], (err, offsets) => { });
143 | offset.fetchEarliestOffsets(['t'], (err, offsets) => { });
144 |
--------------------------------------------------------------------------------
/types/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "module": "commonjs",
4 | "lib": [
5 | "es6"
6 | ],
7 | "noImplicitAny": true,
8 | "noImplicitThis": true,
9 | "strictNullChecks": true,
10 | "strictFunctionTypes": true,
11 | "strict": true,
12 | "noEmit": true,
13 | "forceConsistentCasingInFileNames": true
14 | }
15 | }
--------------------------------------------------------------------------------
/types/tslint.json:
--------------------------------------------------------------------------------
1 | {
2 | "defaultSeverity": "error",
3 | "extends": "tslint-config-semistandard",
4 | "jsRules": {},
5 | "rules": {
6 | "no-empty": false,
7 | "handle-callback-err": false,
8 | "no-unused-variable": false
9 | },
10 | "rulesDirectory": []
11 | }
12 |
--------------------------------------------------------------------------------