├── .analysis_options ├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── DEVELOPMENT.md ├── LICENSE ├── README.md ├── dart_test.yaml ├── lib ├── common.dart ├── kafka.dart ├── protocol.dart └── src │ ├── common │ ├── errors.dart │ ├── messages.dart │ ├── metadata.dart │ └── offsets.dart │ ├── consumer.dart │ ├── consumer_group.dart │ ├── fetcher.dart │ ├── offset_master.dart │ ├── producer.dart │ ├── protocol │ ├── bytes_builder.dart │ ├── bytes_reader.dart │ ├── common.dart │ ├── consumer_metadata_api.dart │ ├── fetch_api.dart │ ├── group_membership_api.dart │ ├── messages.dart │ ├── metadata_api.dart │ ├── offset_api.dart │ ├── offset_commit_api.dart │ ├── offset_fetch_api.dart │ └── produce_api.dart │ ├── session.dart │ └── util │ └── crc32.dart ├── pubspec.yaml ├── test ├── all.dart ├── common │ ├── errors_test.dart │ └── messages_test.dart ├── consumer_group_test.dart ├── consumer_test.dart ├── fetcher_test.dart ├── producer_test.dart ├── protocol │ ├── bytes_builder_test.dart │ ├── bytes_reader_test.dart │ ├── fetch_test.dart │ ├── offset_commit_test.dart │ ├── offset_fetch_test.dart │ ├── offset_test.dart │ └── produce_test.dart ├── session_test.dart ├── setup.dart └── util │ └── crc32_test.dart └── tool └── kafka-cluster ├── Dockerfile ├── scripts └── start-kafka.sh └── supervisor ├── kafka1.conf ├── kafka2.conf └── zookeeper.conf /.analysis_options: -------------------------------------------------------------------------------- 1 | analyzer: 2 | strong-mode: true 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | pubspec.lock 2 | .pub/ 3 | bin/ 4 | doc/api/ 5 | packages 6 | .packages 7 | notes.md 8 | coverage/ 9 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | 3 | dist: trusty 4 | 5 | language: dart 6 | 7 | services: 8 | - docker 9 | 10 | dart: 11 | - stable 12 | 13 | before_install: 14 | - docker build -t kafka-cluster tool/kafka-cluster/ 15 | - docker run -d --name kafka-cluster -p 2181:2181 -p 9092:9092 -p 9093:9093 --env ADVERTISED_HOST=127.0.0.1 kafka-cluster 16 | - docker ps -a 17 | - sleep 5 18 | - docker exec kafka-cluster bash -c '$KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper=localhost:2181 --topic dartKafkaTest --partitions 3 --replication-factor 2' 19 | - docker exec kafka-cluster bash -c '$KAFKA_HOME/bin/kafka-topics.sh --list --zookeeper=localhost:2181' 20 | 21 | script: 22 | - pub run test -r expanded test/all.dart 23 | - pub global activate coverage 24 | - dart --observe=8111 test/all.dart & 25 | - sleep 20 26 | - pub global run coverage:collect_coverage --port=8111 -o coverage.json --resume-isolates 27 | - pub global run coverage:format_coverage --package-root=packages --report-on lib --in coverage.json --out lcov.info --lcov 28 | 29 | after_success: 30 | - bash <(curl -s https://codecov.io/bash) 31 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | All notable changes to this project will be documented in this file. 3 | This project adheres to [Semantic Versioning](http://semver.org/). 4 | 5 | ## [Unreleased] 6 | -------------------------------------------------------------------------------- /DEVELOPMENT.md: -------------------------------------------------------------------------------- 1 | # Running tests 2 | 3 | Requirements: 4 | 5 | * Docker Toolbox (OS X) 6 | 7 | ## Starting Kafka container locally 8 | 9 | ``` 10 | docker build -t kafka-cluster tool/kafka-cluster/ 11 | docker run -d --name kafka-cluster -p 2181:2181 -p 9092:9092 -p 9093:9093 --env ADVERTISED_HOST=192.168.99.100 kafka-cluster 12 | ``` 13 | 14 | Sometimes zookeeper needs a kick: 15 | 16 | ``` 17 | docker exec kafka-cluster bash -c '$KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper=localhost:2181 --topic dartKafkaTest --partitions 3 --replication-factor 2' 18 | ``` 19 | 20 | Now you should be able to run tests with: 21 | 22 | ``` 23 | pub run test -j 1 24 | ``` 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Anatoly Pulyaevskiy 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, 5 | are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation and/or 12 | other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 15 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 22 | TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 | THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Dart Kafka 2 | 3 | [![Build Status](https://travis-ci.org/pulyaevskiy/dart-kafka.svg?branch=master)](https://travis-ci.org/pulyaevskiy/dart-kafka) 4 | [![Coverage](https://codecov.io/gh/pulyaevskiy/dart-kafka/branch/master/graph/badge.svg)](https://codecov.io/gh/pulyaevskiy/dart-kafka) 5 | [![License](https://img.shields.io/badge/license-BSD--2-blue.svg)](https://raw.githubusercontent.com/pulyaevskiy/dart-kafka/master/LICENSE) 6 | 7 | Kafka client library written in Dart. 8 | 9 | ### Current status 10 | 11 | This library is a work-in-progress. 12 | Currently all the updates are happening in `kafka-0.10` branch: 13 | 14 | * Support for Kafka 0.10 APIs including Group Membership API 15 | * Implementation of `HighLevelConsumer` capable of automatic load-balancing 16 | and re-distribution of topics/partitions in case of failures. 17 | * Better testing framework. 18 | * Isolate-based distribution of consumer group members for better utilization 19 | of system resources. 20 | 21 | Master branch currently targets 0.8.x versions of Kafka server. 22 | 23 | ### Things that are not supported yet. 24 | 25 | * Snappy compression. 26 | 27 | ## Installation 28 | 29 | There is no Pub package yet, but it will be published as soon as APIs are 30 | stable enough. 31 | 32 | For now you can use git dependency in your `pubspec.yaml`: 33 | 34 | ```yaml 35 | dependencies: 36 | kafka: 37 | git: https://github.com/dart-drivers/kafka.git 38 | ``` 39 | 40 | And then import it as usual: 41 | 42 | ```dart 43 | import 'package:kafka/kafka.dart'; 44 | ``` 45 | 46 | ## Features 47 | 48 | This library provides several high-level API objects to interact with Kafka: 49 | 50 | * __KafkaSession__ - responsible for managing connections to Kafka brokers and 51 | coordinating all requests. Also provides access to metadata information. 52 | * __Producer__ - publishes messages to Kafka topics 53 | * __Consumer__ - consumes messages from Kafka topics and stores it's state (current 54 | offsets). Leverages ConsumerMetadata API via ConsumerGroup. 55 | * __Fetcher__ - consumes messages from Kafka without storing state. 56 | * __OffsetMaster__ - provides convenience on top of Offset API allowing to easily 57 | retrieve earliest and latest offsets of particular topic-partitions. 58 | * __ConsumerGroup__ - provides convenience on top of Consumer Metadata API to easily 59 | fetch or commit consumer offsets. 60 | 61 | ## Producer 62 | 63 | Simple implementation of Kafka producer. Supports auto-detection of leaders for 64 | topic-partitions and creates separate `ProduceRequest`s for each broker. 65 | Requests are sent in parallel and all responses are aggregated in special 66 | `ProduceResult` object. 67 | 68 | ```dart 69 | // file:produce.dart 70 | import 'dart:io'; 71 | import 'package:kafka/kafka.dart'; 72 | 73 | main(List arguments) async { 74 | var host = new ContactPoint('127.0.0.1', 9092); 75 | var session = new KafkaSession([host]); 76 | 77 | var producer = new Producer(session, 1, 1000); 78 | var result = await producer.produce([ 79 | new ProduceEnvelope('topicName', 0, [new Message('msgForPartition0'.codeUnits)]), 80 | new ProduceEnvelope('topicName', 1, [new Message('msgForPartition1'.codeUnits)]) 81 | ]); 82 | print(result.hasErrors); 83 | print(result.offsets); 84 | session.close(); // make sure to always close the session when the work is done. 85 | } 86 | ``` 87 | 88 | Result: 89 | 90 | ```shell 91 | $ dart produce.dart 92 | $ false 93 | $ {dartKafkaTest: {0: 213075, 1: 201680}} 94 | ``` 95 | 96 | ## Consumer 97 | 98 | High-level implementation of Kafka consumer which stores it's state using 99 | Kafka's ConsumerMetadata API. 100 | 101 | > If you don't want to keep state of consumed offsets take a look at `Fetcher` 102 | > which was designed specifically for this use case. 103 | 104 | Consumer returns messages as a `Stream`, so all standard stream operations 105 | should be applicable. However Kafka topics are ordered streams of messages 106 | with sequential offsets. Consumer implementation allows to preserve order of 107 | messages received from server. For this purpose all messages are wrapped in 108 | special `MessageEnvelope` object with following methods: 109 | 110 | ``` 111 | /// Signals to consumer that message has been processed and it's offset can 112 | /// be committed. 113 | void commit(String metadata); 114 | 115 | /// Signals that message has been processed and we are ready for 116 | /// the next one. Offset of this message will **not** be committed. 117 | void ack(); 118 | 119 | /// Signals to consumer to cancel any further deliveries and close the stream. 120 | void cancel(); 121 | ``` 122 | 123 | One must call `commit()` or `ack()` for each processed message, otherwise 124 | Consumer won't send the next message to the stream. 125 | 126 | Simplest example of a consumer: 127 | 128 | ```dart 129 | import 'dart:io'; 130 | import 'dart:async'; 131 | import 'package:kafka/kafka.dart'; 132 | 133 | void main(List arguments) async { 134 | var host = new ContactPoint('127.0.0.1', 9092); 135 | var session = new KafkaSession([host]); 136 | var group = new ConsumerGroup(session, 'consumerGroupName'); 137 | var topics = { 138 | 'topicName': [0, 1] // list of partitions to consume from. 139 | }; 140 | 141 | var consumer = new Consumer(session, group, topics, 100, 1); 142 | await for (MessageEnvelope envelope in consumer.consume(limit: 3)) { 143 | // Assuming that messages were produces by Producer from previous example. 144 | var value = new String.fromCharCodes(envelope.message.value); 145 | print('Got message: ${envelope.offset}, ${value}'); 146 | envelope.commit('metadata'); // Important. 147 | } 148 | session.close(); // make sure to always close the session when the work is done. 149 | } 150 | ``` 151 | 152 | It is also possible to consume messages in batches for improved efficiency: 153 | 154 | ```dart 155 | import 'dart:io'; 156 | import 'dart:async'; 157 | import 'package:kafka/kafka.dart'; 158 | 159 | void main(List arguments) async { 160 | var host = new ContactPoint('127.0.0.1', 9092); 161 | var session = new KafkaSession([host]); 162 | var group = new ConsumerGroup(session, 'consumerGroupName'); 163 | var topics = { 164 | 'topicName': [0, 1] // list of partitions to consume from. 165 | }; 166 | 167 | var consumer = new Consumer(session, group, topics, 100, 1); 168 | await for (BatchEnvelope batch in consumer.batchConsume(20)) { 169 | batch.items.forEach((MessageEnvelope envelope) { 170 | // use envelope as usual 171 | }); 172 | batch.commit('metadata'); // use batch control methods instead of individual messages. 173 | } 174 | session.close(); // make sure to always close the session when the work is done. 175 | } 176 | ``` 177 | 178 | ### Consumer offset reset strategy 179 | 180 | Due to the fact that Kafka topics can be configured to delete old messages 181 | periodically, it is possible that your consumer offset may become invalid ( 182 | just because there is no such message/offset in Kafka topic anymore). 183 | 184 | In such cases `Consumer` provides configurable strategy with following options: 185 | 186 | * `OffsetOutOfRangeBehavior.throwError` 187 | * `OffsetOutOfRangeBehavior.resetToEarliest` (default) 188 | * `OffsetOutOfRangeBehavior.resetToLatest` 189 | 190 | By default if it gets `OffsetOutOfRange` server error it will reset it's offsets 191 | to earliest available in the consumed topic and partitions, which essentially 192 | means consuming all available messages from the beginning. 193 | 194 | To modify this behavior simply set `onOffsetOutOfRange` property of consumer to 195 | one of the above values: 196 | 197 | ``` 198 | var consumer = new Consumer(session, group, topics, 100, 1); 199 | consumer.onOffsetOutOfRange = OffsetOutOfRangeBehavior.throwError; 200 | ``` 201 | 202 | ## Supported protocol versions 203 | 204 | Current version targets version `0.8.2` of the Kafka protocol. There is no plans 205 | to support earlier versions. 206 | 207 | ## License 208 | 209 | BSD-2 210 | -------------------------------------------------------------------------------- /dart_test.yaml: -------------------------------------------------------------------------------- 1 | reporter: expanded 2 | -------------------------------------------------------------------------------- /lib/common.dart: -------------------------------------------------------------------------------- 1 | /// Common dependencies for other Kafka libraries withing this package. 2 | library kafka.common; 3 | 4 | import 'package:logging/logging.dart'; 5 | 6 | part 'src/common/errors.dart'; 7 | part 'src/common/messages.dart'; 8 | part 'src/common/metadata.dart'; 9 | part 'src/common/offsets.dart'; 10 | 11 | /// String identifier used to pass to Kafka server in API calls. 12 | const String dartKafkaId = 'dart_kafka'; 13 | 14 | /// Logger for this library. 15 | /// 16 | /// Doesn't do anything by default. You should set log level and add your handler 17 | /// in order to get logs. 18 | final Logger kafkaLogger = new Logger('Kafka'); 19 | -------------------------------------------------------------------------------- /lib/kafka.dart: -------------------------------------------------------------------------------- 1 | /// ## Apache Kafka client library for Dartlang 2 | /// 3 | /// This library implements Kafka binary protocol and provides 4 | /// high-level abstractions for producing and consuming messages. 5 | library kafka; 6 | 7 | import 'dart:async'; 8 | import 'dart:collection'; 9 | import 'dart:io'; 10 | 11 | import 'package:quiver/collection.dart'; 12 | 13 | import 'common.dart'; 14 | 15 | import 'protocol.dart'; 16 | 17 | export 'common.dart' hide groupBy, kafkaLogger; 18 | export 'protocol.dart' show TopicMetadata; 19 | 20 | part 'src/consumer.dart'; 21 | part 'src/consumer_group.dart'; 22 | part 'src/fetcher.dart'; 23 | part 'src/offset_master.dart'; 24 | part 'src/producer.dart'; 25 | part 'src/session.dart'; 26 | -------------------------------------------------------------------------------- /lib/protocol.dart: -------------------------------------------------------------------------------- 1 | /// Subpackage with implementation of Kafka protocol. 2 | /// 3 | /// Users of this package are not supposed to import this library directly and 4 | /// use main 'kafka' package instead. 5 | library kafka.protocol; 6 | 7 | import 'dart:collection'; 8 | import 'dart:convert'; 9 | import 'dart:io'; 10 | import 'dart:math'; 11 | import 'dart:typed_data'; 12 | 13 | import 'common.dart'; 14 | 15 | part 'src/protocol/bytes_builder.dart'; 16 | part 'src/protocol/bytes_reader.dart'; 17 | part 'src/protocol/common.dart'; 18 | part 'src/protocol/consumer_metadata_api.dart'; 19 | part 'src/protocol/fetch_api.dart'; 20 | part 'src/protocol/group_membership_api.dart'; 21 | part 'src/protocol/messages.dart'; 22 | part 'src/protocol/metadata_api.dart'; 23 | part 'src/protocol/offset_api.dart'; 24 | part 'src/protocol/offset_commit_api.dart'; 25 | part 'src/protocol/offset_fetch_api.dart'; 26 | part 'src/protocol/produce_api.dart'; 27 | part 'src/util/crc32.dart'; 28 | -------------------------------------------------------------------------------- /lib/src/common/errors.dart: -------------------------------------------------------------------------------- 1 | part of kafka.common; 2 | 3 | /// Used to indicate there is a mismatch in CRC sum of a message (message is 4 | /// corrupted). 5 | class MessageCrcMismatchError extends StateError { 6 | MessageCrcMismatchError(String message) : super(message); 7 | } 8 | 9 | /// Represents error returned by Kafka server. 10 | class KafkaServerError { 11 | static const int NoError = 0; 12 | static const int Unknown = -1; 13 | static const int OffsetOutOfRange = 1; 14 | static const int InvalidMessage = 2; 15 | static const int UnknownTopicOrPartition = 3; 16 | static const int InvalidMessageSize = 4; 17 | static const int LeaderNotAvailable = 5; 18 | static const int NotLeaderForPartition = 6; 19 | static const int RequestTimedOut = 7; 20 | static const int BrokerNotAvailable = 8; 21 | static const int ReplicaNotAvailable = 9; 22 | static const int MessageSizeTooLarge = 10; 23 | static const int StaleControllerEpoch = 11; 24 | static const int OffsetMetadataTooLarge = 12; 25 | static const int OffsetsLoadInProgress = 14; 26 | static const int ConsumerCoordinatorNotAvailable = 15; 27 | static const int NotCoordinatorForConsumer = 16; 28 | static const int InvalidTopicCode = 17; 29 | static const int RecordListTooLargeCode = 18; 30 | static const int NotEnoughReplicasCode = 19; 31 | static const int NotEnoughReplicasAfterAppendCode = 20; 32 | static const int InvalidRequiredAcksCode = 21; 33 | static const int IllegalGenerationCode = 22; 34 | static const int InconsistentGroupProtocolCode = 23; 35 | static const int InvalidGroupIdCode = 24; 36 | static const int UnknownMemberIdCode = 25; 37 | static const int InvalidSessionTimeoutCode = 26; 38 | static const int RebalanceInProgressCode = 27; 39 | static const int InvalidCommitOffsetSizeCode = 28; 40 | static const int TopicAuthorizationFailedCode = 29; 41 | static const int GroupAuthorizationFailedCode = 30; 42 | static const int ClusterAuthorizationFailedCode = 31; 43 | static const int InvalidTimestamp = 32; 44 | static const int UnsupportedSaslMechanism = 33; 45 | static const int IllegalSaslState = 34; 46 | static const int UnsupportedVersion = 35; 47 | 48 | /// Numeric code of this server error. 49 | final int code; 50 | 51 | static final Map _instances = new Map(); 52 | 53 | static const Map _errorTexts = const { 54 | 0: 'NoError', 55 | -1: 'Unknown', 56 | 1: 'OffsetOutOfRange', 57 | 2: 'InvalidMessage', 58 | 3: 'UnknownTopicOrPartition', 59 | 4: 'InvalidMessageSize', 60 | 5: 'LeaderNotAvailable', 61 | 6: 'NotLeaderForPartition', 62 | 7: 'RequestTimedOut', 63 | 8: 'BrokerNotAvailable', 64 | 9: 'ReplicaNotAvailable', 65 | 10: 'MessageSizeTooLarge', 66 | 11: 'StaleControllerEpoch', 67 | 12: 'OffsetMetadataTooLarge', 68 | 14: 'OffsetsLoadInProgress', 69 | 15: 'ConsumerCoordinatorNotAvailable', 70 | 16: 'NotCoordinatorForConsumer', 71 | 17: 'InvalidTopicCode', 72 | 18: 'RecordListTooLargeCode', 73 | 19: 'NotEnoughReplicasCode', 74 | 20: 'NotEnoughReplicasAfterAppendCode', 75 | 21: 'InvalidRequiredAcksCode', 76 | 22: 'IllegalGenerationCode', 77 | 23: 'InconsistentGroupProtocolCode', 78 | 24: 'InvalidGroupIdCode', 79 | 25: 'UnknownMemberIdCode', 80 | 26: 'InvalidSessionTimeoutCode', 81 | 27: 'RebalanceInProgressCode', 82 | 28: 'InvalidCommitOffsetSizeCode', 83 | 29: 'TopicAuthorizationFailedCode', 84 | 30: 'GroupAuthorizationFailedCode', 85 | 31: 'ClusterAuthorizationFailedCode', 86 | 32: 'InvalidTimestamp', 87 | 33: 'UnsupportedSaslMechanism', 88 | 34: 'IllegalSaslState', 89 | 35: 'UnsupportedVersion', 90 | }; 91 | 92 | /// String representation of this server error. 93 | String get message => _errorTexts[code]; 94 | 95 | KafkaServerError._(this.code); 96 | 97 | /// Creates instance of KafkaServerError from numeric error code. 98 | factory KafkaServerError(int code) { 99 | if (!_instances.containsKey(code)) { 100 | _instances[code] = new KafkaServerError._(code); 101 | } 102 | 103 | return _instances[code]; 104 | } 105 | 106 | @override 107 | String toString() => 'KafkaServerError: ${message}(${code})'; 108 | 109 | bool get isError => code != NoError; 110 | bool get isNoError => code == NoError; 111 | bool get isUnknown => code == Unknown; 112 | bool get isOffsetOutOfRange => code == OffsetOutOfRange; 113 | bool get isInvalidMessage => code == InvalidMessage; 114 | bool get isUnknownTopicOrPartition => code == UnknownTopicOrPartition; 115 | bool get isInvalidMessageSize => code == InvalidMessageSize; 116 | bool get isLeaderNotAvailable => code == LeaderNotAvailable; 117 | bool get isNotLeaderForPartition => code == NotLeaderForPartition; 118 | bool get isRequestTimedOut => code == RequestTimedOut; 119 | bool get isBrokerNotAvailable => code == BrokerNotAvailable; 120 | bool get isReplicaNotAvailable => code == ReplicaNotAvailable; 121 | bool get isMessageSizeTooLarge => code == MessageSizeTooLarge; 122 | bool get isStaleControllerEpoch => code == StaleControllerEpoch; 123 | bool get isOffsetMetadataTooLarge => code == OffsetMetadataTooLarge; 124 | bool get isOffsetsLoadInProgress => code == OffsetsLoadInProgress; 125 | bool get isConsumerCoordinatorNotAvailable => 126 | code == ConsumerCoordinatorNotAvailable; 127 | bool get isNotCoordinatorForConsumer => code == NotCoordinatorForConsumer; 128 | } 129 | -------------------------------------------------------------------------------- /lib/src/common/messages.dart: -------------------------------------------------------------------------------- 1 | part of kafka.common; 2 | 3 | /// Compression types supported by Kafka. 4 | enum KafkaCompression { none, gzip, snappy } 5 | 6 | /// Kafka Message Attributes. Only [KafkaCompression] is supported by the 7 | /// server at the moment. 8 | class MessageAttributes { 9 | /// Compression codec. 10 | final KafkaCompression compression; 11 | 12 | /// Creates new instance of MessageAttributes. 13 | MessageAttributes([this.compression = KafkaCompression.none]); 14 | 15 | /// Creates MessageAttributes from the raw byte. 16 | MessageAttributes.fromByte(int byte) : compression = getCompression(byte); 17 | 18 | static KafkaCompression getCompression(int byte) { 19 | var c = byte & 3; 20 | var map = { 21 | 0: KafkaCompression.none, 22 | 1: KafkaCompression.gzip, 23 | 2: KafkaCompression.snappy, 24 | }; 25 | return map[c]; 26 | } 27 | 28 | /// Converts this attributes into byte. 29 | int toInt() { 30 | return _compressionToInt(); 31 | } 32 | 33 | int _compressionToInt() { 34 | switch (this.compression) { 35 | case KafkaCompression.none: 36 | return 0; 37 | case KafkaCompression.gzip: 38 | return 1; 39 | case KafkaCompression.snappy: 40 | return 2; 41 | } 42 | } 43 | } 44 | 45 | /// Kafka Message as defined in the protocol. 46 | class Message { 47 | /// Metadata attributes about this message. 48 | final MessageAttributes attributes; 49 | 50 | /// Actual message contents. 51 | final List value; 52 | 53 | /// Optional message key that was used for partition assignment. 54 | /// The key can be `null`. 55 | final List key; 56 | 57 | /// Default internal constructor. 58 | Message._(this.attributes, this.key, this.value); 59 | 60 | /// Creates new [Message]. 61 | factory Message(List value, 62 | {MessageAttributes attributes, List key}) { 63 | attributes ??= new MessageAttributes(); 64 | return new Message._(attributes, key, value); 65 | } 66 | } 67 | 68 | /// Envelope used for publishing messages to Kafka. 69 | class ProduceEnvelope { 70 | /// Name of the topic. 71 | final String topicName; 72 | 73 | /// Partition ID. 74 | final int partitionId; 75 | 76 | /// List of messages to publish. 77 | final List messages; 78 | 79 | /// Compression codec to be used. 80 | final KafkaCompression compression; 81 | 82 | /// Creates new envelope containing list of messages. 83 | /// 84 | /// You can optionally set [compression] codec which will be used to encode 85 | /// messages. 86 | ProduceEnvelope(this.topicName, this.partitionId, this.messages, 87 | {this.compression: KafkaCompression.none}) { 88 | messages.forEach((m) { 89 | if (m.attributes.compression != KafkaCompression.none) { 90 | throw new StateError( 91 | 'ProduceEnvelope: compression can not be set on individual messages in ProduceEnvelope, use ProduceEnvelope.compression instead.'); 92 | } 93 | }); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /lib/src/common/metadata.dart: -------------------------------------------------------------------------------- 1 | part of kafka.common; 2 | 3 | /// Represents single node in a Kafka cluster. 4 | class Broker { 5 | /// Unique ID of this broker within cluster. 6 | final int id; 7 | 8 | /// Host name or IP address of this broker. 9 | final String host; 10 | 11 | /// Port number of this broker. 12 | final int port; 13 | 14 | static final Map _instances = new Map(); 15 | 16 | /// Creates new instance of Kafka broker. 17 | factory Broker(int id, String host, int port) { 18 | var key = '${host}:${port}'; 19 | if (!_instances.containsKey(key)) { 20 | _instances[key] = new Broker._(id, host, port); 21 | } else { 22 | if (_instances[key].id != id) throw new StateError('Broker ID mismatch.'); 23 | } 24 | 25 | return _instances[key]; 26 | } 27 | 28 | Broker._(this.id, this.host, this.port); 29 | 30 | @override 31 | toString() => 'KafkaBroker: ${host}:${port} (id: ${id})'; 32 | } 33 | 34 | class TopicPartition { 35 | final String topicName; 36 | final int partitionId; 37 | 38 | static final Map _cache = new Map(); 39 | 40 | TopicPartition._(this.topicName, this.partitionId); 41 | 42 | factory TopicPartition(String topicName, int partitionId) { 43 | var key = topicName + partitionId.toString(); 44 | if (!_cache.containsKey(key)) { 45 | _cache[key] = new TopicPartition._(topicName, partitionId); 46 | } 47 | 48 | return _cache[key]; 49 | } 50 | 51 | @override 52 | bool operator ==(other) { 53 | return (other.topicName == topicName && other.partitionId == partitionId); 54 | } 55 | 56 | @override 57 | int get hashCode => (topicName + partitionId.toString()).hashCode; 58 | } 59 | -------------------------------------------------------------------------------- /lib/src/common/offsets.dart: -------------------------------------------------------------------------------- 1 | part of kafka.common; 2 | 3 | Map groupBy(Iterable list, f(element)) { 4 | var grouped = new Map(); 5 | for (var e in list) { 6 | var key = f(e); 7 | if (!grouped.containsKey(key)) { 8 | grouped[key] = new List(); 9 | } 10 | grouped[key].add(e); 11 | } 12 | 13 | return grouped; 14 | } 15 | 16 | /// Data structure representing consumer offset. 17 | class ConsumerOffset { 18 | final String topicName; 19 | final int partitionId; 20 | final int offset; 21 | final String metadata; 22 | final int errorCode; 23 | 24 | ConsumerOffset(this.topicName, this.partitionId, this.offset, this.metadata, 25 | [this.errorCode]); 26 | } 27 | -------------------------------------------------------------------------------- /lib/src/consumer.dart: -------------------------------------------------------------------------------- 1 | part of kafka; 2 | 3 | /// Determines behavior of [Consumer] when it receives `OffsetOutOfRange` API 4 | /// error. 5 | enum OffsetOutOfRangeBehavior { 6 | /// Consumer will throw [KafkaServerError] with error code `1`. 7 | throwError, 8 | 9 | /// Consumer will reset it's offsets to the earliest available for particular 10 | /// topic-partition. 11 | resetToEarliest, 12 | 13 | /// Consumer will reset it's offsets to the latest available for particular 14 | /// topic-partition. 15 | resetToLatest 16 | } 17 | 18 | /// High-level Kafka consumer class. 19 | /// 20 | /// Provides convenience layer on top of Kafka's low-level APIs. 21 | class Consumer { 22 | /// Instance of [KafkaSession] used to send requests. 23 | final KafkaSession session; 24 | 25 | /// Consumer group this consumer belongs to. 26 | final ConsumerGroup consumerGroup; 27 | 28 | /// Topics and partitions to consume. 29 | final Map> topicPartitions; 30 | 31 | /// Maximum amount of time in milliseconds to block waiting if insufficient 32 | /// data is available at the time the request is issued. 33 | final int maxWaitTime; 34 | 35 | /// Minimum number of bytes of messages that must be available 36 | /// to give a response. 37 | final int minBytes; 38 | 39 | /// Determines this consumer's strategy of handling `OffsetOutOfRange` API 40 | /// errors. 41 | /// 42 | /// Default value is `resetToEarliest` which will automatically reset offset 43 | /// of ConsumerGroup for particular topic-partition to the earliest offset 44 | /// available. 45 | /// 46 | /// See [OffsetOutOfRangeBehavior] for details on each value. 47 | OffsetOutOfRangeBehavior onOffsetOutOfRange = 48 | OffsetOutOfRangeBehavior.resetToEarliest; 49 | 50 | /// Creates new consumer identified by [consumerGroup]. 51 | Consumer(this.session, this.consumerGroup, this.topicPartitions, 52 | this.maxWaitTime, this.minBytes); 53 | 54 | /// Consumes messages from Kafka. If [limit] is specified consuming 55 | /// will stop after exactly [limit] messages have been retrieved. If no 56 | /// specific limit is set it'll default to `-1` and will consume all incoming 57 | /// messages continuously. 58 | Stream consume({int limit: -1}) { 59 | var controller = new _MessageStreamController(limit); 60 | 61 | Future> list = _buildWorkers(); 62 | list.then((workers) { 63 | if (workers.isEmpty) { 64 | controller.close(); 65 | return; 66 | } 67 | var remaining = workers.length; 68 | var futures = workers.map((w) => w.run(controller)).toList(); 69 | futures.forEach((Future f) { 70 | f.then((_) { 71 | remaining--; 72 | if (remaining == 0) { 73 | kafkaLogger 74 | ?.info('Consumer: All workers are done. Closing stream.'); 75 | controller.close(); 76 | } 77 | }, onError: (error, stackTrace) { 78 | controller.addError(error, stackTrace); 79 | }); 80 | }); 81 | }, onError: (error, stackTrace) { 82 | controller.addError(error, stackTrace); 83 | }); 84 | 85 | return controller.stream; 86 | } 87 | 88 | /// Consume messages in batches. 89 | /// 90 | /// This will create a stream of [BatchEnvelope] objects. Each batch 91 | /// will contain up to [maxBatchSize] of `MessageEnvelope`s. 92 | /// 93 | /// Note that calling `commit`, `ack`, or `cancel` on individual message 94 | /// envelope will take no effect. Instead one should use corresponding methods 95 | /// on the BatchEnvelope itself. 96 | /// 97 | /// Currently batches are formed on per broker basis, meaning each batch will 98 | /// always contain messages from one particular broker. 99 | Stream batchConsume(int maxBatchSize) { 100 | var controller = new _BatchStreamController(); 101 | 102 | Future> list = _buildWorkers(); 103 | list.then((workers) { 104 | if (workers.isEmpty) { 105 | controller.close(); 106 | return; 107 | } 108 | var remaining = workers.length; 109 | var futures = 110 | workers.map((w) => w.runBatched(controller, maxBatchSize)).toList(); 111 | futures.forEach((Future f) { 112 | f.then((_) { 113 | kafkaLogger.info('Consumer: worker finished.'); 114 | remaining--; 115 | if (remaining == 0) { 116 | kafkaLogger 117 | ?.info('Consumer: All workers are done. Closing stream.'); 118 | controller.close(); 119 | } 120 | }, onError: (error, stackTrace) { 121 | controller.addError(error, stackTrace); 122 | }); 123 | }); 124 | }, onError: (error, stackTrace) { 125 | controller.addError(error, stackTrace); 126 | }); 127 | 128 | return controller.stream; 129 | } 130 | 131 | Future> _buildWorkers() async { 132 | var meta = await session.getMetadata(topicPartitions.keys.toSet()); 133 | var topicsByBroker = new Map>>(); 134 | 135 | topicPartitions.forEach((topic, partitions) { 136 | partitions.forEach((p) { 137 | var leader = meta.getTopicMetadata(topic).getPartition(p).leader; 138 | var broker = meta.getBroker(leader); 139 | if (topicsByBroker.containsKey(broker) == false) { 140 | topicsByBroker[broker] = new Map>(); 141 | } 142 | if (topicsByBroker[broker].containsKey(topic) == false) { 143 | topicsByBroker[broker][topic] = new Set(); 144 | } 145 | topicsByBroker[broker][topic].add(p); 146 | }); 147 | }); 148 | 149 | var workers = new List<_ConsumerWorker>(); 150 | topicsByBroker.forEach((host, topics) { 151 | var worker = new _ConsumerWorker( 152 | session, host, topics, maxWaitTime, minBytes, 153 | group: consumerGroup); 154 | worker.onOffsetOutOfRange = onOffsetOutOfRange; 155 | workers.add(worker); 156 | }); 157 | 158 | return workers; 159 | } 160 | } 161 | 162 | class _MessageStreamController { 163 | final int limit; 164 | final StreamController _controller = 165 | new StreamController(); 166 | int _added = 0; 167 | bool _cancelled = false; 168 | 169 | _MessageStreamController(this.limit); 170 | 171 | bool get canAdd => 172 | (_cancelled == false && ((limit == -1) || (_added < limit))); 173 | Stream get stream => _controller.stream; 174 | 175 | /// Attempts to add [event] to the stream. 176 | /// Returns true if adding event succeeded, false otherwise. 177 | bool add(MessageEnvelope event) { 178 | if (canAdd) { 179 | _controller.add(event); 180 | _added++; 181 | return true; 182 | } 183 | return false; 184 | } 185 | 186 | void addError(Object error, [StackTrace stackTrace]) { 187 | _controller.addError(error, stackTrace); 188 | } 189 | 190 | void cancel() { 191 | _cancelled = true; 192 | } 193 | 194 | void close() { 195 | _controller.close(); 196 | } 197 | } 198 | 199 | /// Worker responsible for fetching messages from one particular Kafka broker. 200 | class _ConsumerWorker { 201 | final KafkaSession session; 202 | final Broker host; 203 | final ConsumerGroup group; 204 | final Map> topicPartitions; 205 | final int maxWaitTime; 206 | final int minBytes; 207 | 208 | OffsetOutOfRangeBehavior onOffsetOutOfRange = 209 | OffsetOutOfRangeBehavior.resetToEarliest; 210 | 211 | _ConsumerWorker(this.session, this.host, this.topicPartitions, 212 | this.maxWaitTime, this.minBytes, 213 | {this.group}); 214 | 215 | Future run(_MessageStreamController controller) async { 216 | kafkaLogger 217 | ?.info('Consumer: Running worker on host ${host.host}:${host.port}'); 218 | 219 | while (controller.canAdd) { 220 | var request = await _createRequest(); 221 | kafkaLogger?.fine('Consumer: Sending fetch request to ${host}.'); 222 | FetchResponse response = await session.send(host, request); 223 | var didReset = await _checkOffsets(response); 224 | if (didReset) { 225 | kafkaLogger?.warning( 226 | 'Offsets were reset to ${onOffsetOutOfRange}. Forcing re-fetch.'); 227 | continue; 228 | } 229 | for (var item in response.results) { 230 | for (var offset in item.messageSet.messages.keys) { 231 | var message = item.messageSet.messages[offset]; 232 | var envelope = new MessageEnvelope( 233 | item.topicName, item.partitionId, offset, message); 234 | if (!controller.add(envelope)) { 235 | return; 236 | } else { 237 | var result = await envelope.result; 238 | if (result.status == _ProcessingStatus.commit) { 239 | var offsets = [ 240 | new ConsumerOffset(item.topicName, item.partitionId, offset, 241 | result.commitMetadata) 242 | ]; 243 | await group.commitOffsets(offsets, -1, ''); 244 | } else if (result.status == _ProcessingStatus.cancel) { 245 | controller.cancel(); 246 | return; 247 | } 248 | } 249 | } 250 | } 251 | } 252 | } 253 | 254 | Future runBatched(_BatchStreamController controller, int maxBatchSize) async { 255 | kafkaLogger?.info( 256 | 'Consumer: Running batch worker on host ${host.host}:${host.port}'); 257 | 258 | while (controller.canAdd) { 259 | var request = await _createRequest(); 260 | FetchResponse response = await session.send(host, request); 261 | var didReset = await _checkOffsets(response); 262 | if (didReset) { 263 | kafkaLogger?.warning( 264 | 'Offsets were reset to ${onOffsetOutOfRange}. Forcing re-fetch.'); 265 | continue; 266 | } 267 | 268 | for (var batch in responseToBatches(response, maxBatchSize)) { 269 | if (!controller.add(batch)) return; 270 | var result = await batch.result; 271 | if (result.status == _ProcessingStatus.commit) { 272 | await group.commitOffsets(batch.offsetsToCommit, -1, ''); 273 | } else if (result.status == _ProcessingStatus.cancel) { 274 | controller.cancel(); 275 | return; 276 | } 277 | } 278 | } 279 | } 280 | 281 | Iterable responseToBatches( 282 | FetchResponse response, int maxBatchSize) sync* { 283 | BatchEnvelope batch; 284 | for (var item in response.results) { 285 | for (var offset in item.messageSet.messages.keys) { 286 | var message = item.messageSet.messages[offset]; 287 | var envelope = new MessageEnvelope( 288 | item.topicName, item.partitionId, offset, message); 289 | 290 | if (batch == null) batch = new BatchEnvelope(); 291 | if (batch.items.length < maxBatchSize) { 292 | batch.items.add(envelope); 293 | } 294 | if (batch.items.length == maxBatchSize) { 295 | yield batch; 296 | batch = null; 297 | } 298 | } 299 | } 300 | if (batch is BatchEnvelope && batch.items.isNotEmpty) { 301 | yield batch; 302 | batch = null; 303 | } 304 | } 305 | 306 | Future _checkOffsets(FetchResponse response) async { 307 | var topicsToReset = new Map>(); 308 | for (var result in response.results) { 309 | if (result.errorCode == KafkaServerError.OffsetOutOfRange) { 310 | kafkaLogger?.warning( 311 | 'Consumer: received API error 1 for topic ${result.topicName}:${result.partitionId}'); 312 | if (!topicsToReset.containsKey(result.topicName)) { 313 | topicsToReset[result.topicName] = new Set(); 314 | } 315 | topicsToReset[result.topicName].add(result.partitionId); 316 | kafkaLogger?.info('Topics to reset: ${topicsToReset}'); 317 | } 318 | } 319 | 320 | if (topicsToReset.isNotEmpty) { 321 | switch (onOffsetOutOfRange) { 322 | case OffsetOutOfRangeBehavior.throwError: 323 | throw new KafkaServerError(1); 324 | case OffsetOutOfRangeBehavior.resetToEarliest: 325 | await group.resetOffsetsToEarliest(topicsToReset); 326 | break; 327 | case OffsetOutOfRangeBehavior.resetToLatest: 328 | await group.resetOffsetsToLatest(topicsToReset); 329 | break; 330 | } 331 | return true; 332 | } else { 333 | return false; 334 | } 335 | } 336 | 337 | Future _createRequest() async { 338 | var offsets = await group.fetchOffsets(topicPartitions); 339 | var request = new FetchRequest(maxWaitTime, minBytes); 340 | for (var o in offsets) { 341 | request.add(o.topicName, o.partitionId, o.offset + 1); 342 | } 343 | 344 | return request; 345 | } 346 | } 347 | 348 | enum _ProcessingStatus { commit, ack, cancel } 349 | 350 | class _ProcessingResult { 351 | final _ProcessingStatus status; 352 | final String commitMetadata; 353 | 354 | _ProcessingResult.commit(String metadata) 355 | : status = _ProcessingStatus.commit, 356 | commitMetadata = metadata; 357 | _ProcessingResult.ack() 358 | : status = _ProcessingStatus.ack, 359 | commitMetadata = ''; 360 | _ProcessingResult.cancel() 361 | : status = _ProcessingStatus.cancel, 362 | commitMetadata = ''; 363 | } 364 | 365 | /// Envelope for a [Message] used by high-level consumer. 366 | class MessageEnvelope { 367 | /// Topic name of this message. 368 | final String topicName; 369 | 370 | /// Partition ID of this message. 371 | final int partitionId; 372 | 373 | /// This message's offset 374 | final int offset; 375 | 376 | /// Actual message received from Kafka broker. 377 | final Message message; 378 | 379 | Completer<_ProcessingResult> _completer = new Completer<_ProcessingResult>(); 380 | 381 | /// Creates new envelope. 382 | MessageEnvelope(this.topicName, this.partitionId, this.offset, this.message); 383 | 384 | Future<_ProcessingResult> get result => _completer.future; 385 | 386 | /// Signals that message has been processed and it's offset can 387 | /// be committed (in case of high-level [Consumer] implementation). In case if 388 | /// consumerGroup functionality is not used (like in the [Fetcher]) then 389 | /// this method's behaviour will be the same as in [ack] method. 390 | void commit(String metadata) { 391 | _completer.complete(new _ProcessingResult.commit(metadata)); 392 | } 393 | 394 | /// Signals that message has been processed and we are ready for 395 | /// the next one. This method will **not** trigger offset commit if this 396 | /// envelope has been created by a high-level [Consumer]. 397 | void ack() { 398 | _completer.complete(new _ProcessingResult.ack()); 399 | } 400 | 401 | /// Signals to consumer to cancel any further deliveries and close the stream. 402 | void cancel() { 403 | _completer.complete(new _ProcessingResult.cancel()); 404 | } 405 | } 406 | 407 | /// StreamController for batch consuming of messages. 408 | class _BatchStreamController { 409 | final StreamController _controller = 410 | new StreamController(); 411 | bool _cancelled = false; 412 | 413 | bool get canAdd => (_cancelled == false); 414 | Stream get stream => _controller.stream; 415 | 416 | /// Attempts to add [batch] to the stream. 417 | /// Returns true if adding event succeeded, false otherwise. 418 | bool add(BatchEnvelope batch) { 419 | if (canAdd) { 420 | _controller.add(batch); 421 | return true; 422 | } 423 | return false; 424 | } 425 | 426 | void addError(Object error, [StackTrace stackTrace]) { 427 | _controller.addError(error, stackTrace); 428 | } 429 | 430 | void cancel() { 431 | _cancelled = true; 432 | } 433 | 434 | void close() { 435 | _controller.close(); 436 | } 437 | } 438 | 439 | /// Envelope for message batches used by `Consumer.batchConsume`. 440 | class BatchEnvelope { 441 | final List items = new List(); 442 | 443 | Completer<_ProcessingResult> _completer = new Completer<_ProcessingResult>(); 444 | Future<_ProcessingResult> get result => _completer.future; 445 | 446 | String commitMetadata; 447 | 448 | /// Signals that batch has been processed and it's offsets can 449 | /// be committed. In case if 450 | /// consumerGroup functionality is not used (like in the [Fetcher]) then 451 | /// this method's behaviour will be the same as in [ack] method. 452 | void commit(String metadata) { 453 | commitMetadata = metadata; 454 | _completer.complete(new _ProcessingResult.commit(metadata)); 455 | } 456 | 457 | /// Signals that batch has been processed and we are ready for 458 | /// the next one. This method will **not** trigger offset commit if this 459 | /// envelope has been created by a high-level [Consumer]. 460 | void ack() { 461 | _completer.complete(new _ProcessingResult.ack()); 462 | } 463 | 464 | /// Signals to consumer to cancel any further deliveries and close the stream. 465 | void cancel() { 466 | _completer.complete(new _ProcessingResult.cancel()); 467 | } 468 | 469 | Iterable get offsetsToCommit { 470 | var grouped = new Map(); 471 | for (var envelope in items) { 472 | var key = new TopicPartition(envelope.topicName, envelope.partitionId); 473 | if (!grouped.containsKey(key)) { 474 | grouped[key] = envelope.offset; 475 | } else if (grouped[key] < envelope.offset) { 476 | grouped[key] = envelope.offset; 477 | } 478 | } 479 | 480 | List offsets = []; 481 | for (var key in grouped.keys) { 482 | offsets.add(new ConsumerOffset( 483 | key.topicName, key.partitionId, grouped[key], commitMetadata)); 484 | } 485 | 486 | return offsets; 487 | } 488 | } 489 | -------------------------------------------------------------------------------- /lib/src/consumer_group.dart: -------------------------------------------------------------------------------- 1 | part of kafka; 2 | 3 | class ConsumerGroup { 4 | final KafkaSession session; 5 | final String name; 6 | 7 | Broker _coordinatorHost; 8 | 9 | ConsumerGroup(this.session, this.name); 10 | 11 | /// Retrieves offsets of this consumer group from the server. 12 | /// 13 | /// Keys in [topicPartitions] map are topic names and values are corresponding 14 | /// partition IDs. 15 | Future> fetchOffsets( 16 | Map> topicPartitions) async { 17 | return _fetchOffsets(topicPartitions, retries: 3); 18 | } 19 | 20 | /// Internal method for fetching offsets with retries. 21 | Future> _fetchOffsets( 22 | Map> topicPartitions, 23 | {int retries: 0, 24 | bool refresh: false}) async { 25 | var host = await _getCoordinator(refresh: refresh); 26 | var request = new OffsetFetchRequest(name, topicPartitions); 27 | var response = await session.send(host, request); 28 | var offsets = new List.from(response.offsets); 29 | 30 | for (var offset in offsets) { 31 | var error = new KafkaServerError(offset.errorCode); 32 | if (error.isNotCoordinatorForConsumer && retries > 1) { 33 | // Re-fetch coordinator metadata and try again 34 | kafkaLogger?.info( 35 | 'ConsumerGroup(${name}): encountered API error 16 (NotCoordinatorForConsumerCode) when fetching offsets. Scheduling retry with metadata refresh.'); 36 | return _fetchOffsets(topicPartitions, 37 | retries: retries - 1, refresh: true); 38 | } else if (error.isOffsetsLoadInProgress && retries > 1) { 39 | // Wait a little and try again. 40 | kafkaLogger?.info( 41 | 'ConsumerGroup(${name}): encountered API error 14 (OffsetsLoadInProgressCode) when fetching offsets. Scheduling retry after delay.'); 42 | return new Future>.delayed( 43 | const Duration(seconds: 1), () async { 44 | return _fetchOffsets(topicPartitions, retries: retries - 1); 45 | }); 46 | } else if (error.isError) { 47 | kafkaLogger?.info( 48 | 'ConsumerGroup(${name}): fetchOffsets failed. Error code: ${offset.errorCode} for partition ${offset.partitionId} of ${offset.topicName}.'); 49 | throw error; 50 | } 51 | } 52 | 53 | return offsets; 54 | } 55 | 56 | /// Commits provided [offsets] to the server for this consumer group. 57 | Future commitOffsets(List offsets, int consumerGenerationId, 58 | String consumerId) async { 59 | return _commitOffsets(offsets, consumerGenerationId, consumerId, 60 | retries: 3); 61 | } 62 | 63 | /// Internal method for commiting offsets with retries. 64 | Future _commitOffsets( 65 | List offsets, int consumerGenerationId, String consumerId, 66 | {int retries: 0, bool refresh: false}) async { 67 | var host = await _getCoordinator(refresh: refresh); 68 | var request = new OffsetCommitRequest(name, offsets, consumerGenerationId, 69 | consumerId, -1); // TODO: allow to customize retention time. 70 | OffsetCommitResponse response = await session.send(host, request); 71 | for (var offset in response.offsets) { 72 | var error = new KafkaServerError(offset.errorCode); 73 | if (error.isNotCoordinatorForConsumer && retries > 1) { 74 | // Re-fetch coordinator metadata and try again 75 | kafkaLogger?.info( 76 | 'ConsumerGroup(${name}): encountered API error 16 (NotCoordinatorForConsumerCode) when commiting offsets. Scheduling retry with metadata refresh.'); 77 | return _commitOffsets(offsets, consumerGenerationId, consumerId, 78 | retries: retries - 1, refresh: true); 79 | } else if (error.isError) { 80 | kafkaLogger?.info( 81 | 'ConsumerGroup(${name}): commitOffsets failed. Error code: ${offset.errorCode} for partition ${offset.partitionId} of ${offset.topicName}.'); 82 | throw error; 83 | } 84 | } 85 | 86 | return null; 87 | } 88 | 89 | Future resetOffsetsToEarliest(Map> topicPartitions) async { 90 | var offsetMaster = new OffsetMaster(session); 91 | var earliestOffsets = await offsetMaster.fetchEarliest(topicPartitions); 92 | var offsets = new List(); 93 | for (var earliest in earliestOffsets) { 94 | // When consuming we always pass `currentOffset + 1` to fetch next 95 | // message so here we need to substract 1 from earliest offset, otherwise 96 | // we'll end up in an infinite loop of "InvalidOffset" errors. 97 | var actualOffset = earliest.offset - 1; 98 | offsets.add(new ConsumerOffset(earliest.topicName, earliest.partitionId, 99 | actualOffset, 'resetToEarliest')); 100 | } 101 | 102 | return commitOffsets(offsets, -1, ''); 103 | } 104 | 105 | Future resetOffsetsToLatest(Map> topicPartitions) async { 106 | var offsetMaster = new OffsetMaster(session); 107 | var latestOffsets = await offsetMaster.fetchLatest(topicPartitions); 108 | var offsets = new List(); 109 | for (var latest in latestOffsets) { 110 | var actualOffset = latest.offset - 1; 111 | offsets.add(new ConsumerOffset(latest.topicName, latest.partitionId, 112 | actualOffset, 'resetToEarliest')); 113 | } 114 | 115 | return commitOffsets(offsets, -1, ''); 116 | } 117 | 118 | /// Returns instance of coordinator host for this consumer group. 119 | Future _getCoordinator({bool refresh: false}) async { 120 | if (refresh) { 121 | _coordinatorHost = null; 122 | } 123 | 124 | if (_coordinatorHost == null) { 125 | var metadata = await session.getConsumerMetadata(name); 126 | _coordinatorHost = new Broker(metadata.coordinatorId, 127 | metadata.coordinatorHost, metadata.coordinatorPort); 128 | } 129 | 130 | return _coordinatorHost; 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /lib/src/fetcher.dart: -------------------------------------------------------------------------------- 1 | part of kafka; 2 | 3 | /// Message Fetcher. 4 | /// 5 | /// Main difference to [Consumer] is that this class does not store it's state 6 | /// in consumer metadata. 7 | /// 8 | /// It will fetch all messages starting from specified [topicOffsets]. If no 9 | /// limit is set it will run forever consuming all incoming messages. 10 | class Fetcher { 11 | /// Instance of Kafka session. 12 | final KafkaSession session; 13 | 14 | /// Offsets to start from. 15 | final List topicOffsets; 16 | 17 | Fetcher(this.session, this.topicOffsets); 18 | 19 | /// Consumes messages from Kafka topics. 20 | /// 21 | /// It will start from specified [topicOffsets]. If no [limit] is set it will 22 | /// run continuously consuming all incoming messages. 23 | Stream fetch({int limit: -1}) { 24 | var controller = new _MessageStreamController(limit); 25 | 26 | Future> list = _buildWorkers(controller); 27 | list.then((workers) { 28 | if (workers.isEmpty) { 29 | controller.close(); 30 | return; 31 | } 32 | var remaining = workers.length; 33 | var futures = workers.map((w) => w.run()).toList(); 34 | futures.forEach((Future f) { 35 | f.then((_) { 36 | remaining--; 37 | if (remaining == 0) { 38 | kafkaLogger 39 | ?.info('Fetcher: All workers are done. Closing the stream.'); 40 | controller.close(); 41 | } 42 | }); 43 | }); 44 | }); 45 | 46 | return controller.stream; 47 | } 48 | 49 | Future> _buildWorkers( 50 | _MessageStreamController controller) async { 51 | var topicNames = new Set.from(topicOffsets.map((_) => _.topicName)); 52 | var meta = await session.getMetadata(topicNames); 53 | var offsetsByBroker = new Map>(); 54 | 55 | topicOffsets.forEach((offset) { 56 | var leader = meta 57 | .getTopicMetadata(offset.topicName) 58 | .getPartition(offset.partitionId) 59 | .leader; 60 | var broker = meta.getBroker(leader); 61 | if (offsetsByBroker.containsKey(broker) == false) { 62 | offsetsByBroker[broker] = new List(); 63 | } 64 | offsetsByBroker[broker].add(offset); 65 | }); 66 | 67 | var workers = new List<_FetcherWorker>(); 68 | offsetsByBroker.forEach((host, offsets) { 69 | workers 70 | .add(new _FetcherWorker(session, host, controller, offsets, 100, 1)); 71 | }); 72 | 73 | return workers; 74 | } 75 | } 76 | 77 | class _FetcherWorker { 78 | final KafkaSession session; 79 | final Broker broker; 80 | final _MessageStreamController controller; 81 | final List startFromOffsets; 82 | final int maxWaitTime; 83 | final int minBytes; 84 | 85 | _FetcherWorker(this.session, this.broker, this.controller, 86 | this.startFromOffsets, this.maxWaitTime, this.minBytes); 87 | 88 | Future run() async { 89 | kafkaLogger?.info( 90 | 'Fetcher: Running worker on broker ${broker.host}:${broker.port}'); 91 | var offsets = startFromOffsets.toList(); 92 | 93 | while (controller.canAdd) { 94 | var request = await _createRequest(offsets); 95 | FetchResponse response = await session.send(broker, request); 96 | _checkResponseForErrors(response); 97 | 98 | for (var item in response.results) { 99 | for (var offset in item.messageSet.messages.keys) { 100 | var message = item.messageSet.messages[offset]; 101 | var envelope = new MessageEnvelope( 102 | item.topicName, item.partitionId, offset, message); 103 | if (!controller.add(envelope)) { 104 | return; 105 | } else { 106 | var result = await envelope.result; 107 | if (result.status == _ProcessingStatus.cancel) { 108 | controller.cancel(); 109 | return; 110 | } 111 | } 112 | } 113 | if (item.messageSet.messages.isNotEmpty) { 114 | var nextOffset = new TopicOffset(item.topicName, item.partitionId, 115 | item.messageSet.messages.keys.last + 1); 116 | var previousOffset = offsets.firstWhere((o) => 117 | o.topicName == item.topicName && 118 | o.partitionId == item.partitionId); 119 | offsets.remove(previousOffset); 120 | offsets.add(nextOffset); 121 | } 122 | } 123 | } 124 | } 125 | 126 | Future _createRequest(List offsets) async { 127 | var offsetMaster = new OffsetMaster(session); 128 | var request = new FetchRequest(maxWaitTime, minBytes); 129 | for (var o in offsets) { 130 | if (o.isEarliest) { 131 | var result = await offsetMaster.fetchEarliest({ 132 | o.topicName: [o.partitionId].toSet() 133 | }); 134 | request.add(result.first.topicName, result.first.partitionId, 135 | result.first.offset); 136 | } else { 137 | request.add(o.topicName, o.partitionId, o.offset); 138 | } 139 | } 140 | 141 | return request; 142 | } 143 | 144 | _checkResponseForErrors(FetchResponse response) { 145 | if (!response.hasErrors) return; 146 | 147 | for (var result in response.results) { 148 | if (result.errorCode != KafkaServerError.NoError) { 149 | throw new KafkaServerError(result.errorCode); 150 | } 151 | } 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /lib/src/offset_master.dart: -------------------------------------------------------------------------------- 1 | part of kafka; 2 | 3 | /// Master of Offsets. 4 | /// 5 | /// Encapsulates auto-discovery logic for fetching topic offsets. 6 | class OffsetMaster { 7 | /// Instance of KafkaSession. 8 | final KafkaSession session; 9 | 10 | /// Creates new OffsetMaster. 11 | OffsetMaster(this.session); 12 | 13 | /// Returns earliest offsets for specified topics and partitions. 14 | Future> fetchEarliest( 15 | Map> topicPartitions) { 16 | return _fetch(topicPartitions, -2); 17 | } 18 | 19 | /// Returns latest offsets (that is the offset of next incoming message) 20 | /// for specified topics and partitions. 21 | /// 22 | /// These offsets are also called 'highWatermark' offsets in Kafka docs. 23 | Future> fetchLatest(Map> topicPartitions) { 24 | return _fetch(topicPartitions, -1); 25 | } 26 | 27 | Future> _fetch( 28 | Map> topicPartitions, int time, 29 | {refreshMetadata: false}) async { 30 | var meta = await session.getMetadata(topicPartitions.keys.toSet(), 31 | invalidateCache: refreshMetadata); 32 | var requests = new Map(); 33 | for (var topic in topicPartitions.keys) { 34 | var partitions = topicPartitions[topic]; 35 | for (var p in partitions) { 36 | var leader = meta.getTopicMetadata(topic).getPartition(p).leader; 37 | var host = meta.getBroker(leader); 38 | if (!requests.containsKey(host)) { 39 | requests[host] = new OffsetRequest(leader); 40 | } 41 | requests[host].addTopicPartition(topic, p, time, 1); 42 | } 43 | } 44 | 45 | var offsets = new List(); 46 | for (var host in requests.keys) { 47 | var request = requests[host]; 48 | OffsetResponse response = await session.send(host, request); 49 | for (var o in response.offsets) { 50 | var error = new KafkaServerError(o.errorCode); 51 | if (error.isNotLeaderForPartition && refreshMetadata == false) { 52 | // Refresh metadata and try again. 53 | return _fetch(topicPartitions, time, refreshMetadata: true); 54 | } 55 | 56 | if (error.isError) throw error; 57 | offsets 58 | .add(new TopicOffset(o.topicName, o.partitionId, o.offsets.first)); 59 | } 60 | } 61 | 62 | return offsets; 63 | } 64 | } 65 | 66 | /// Represents an offset of particular topic and partition. 67 | class TopicOffset { 68 | final String topicName; 69 | final int partitionId; 70 | final int offset; 71 | 72 | TopicOffset(this.topicName, this.partitionId, this.offset); 73 | 74 | /// Creates pseudo-offset which refers to earliest offset in this topic 75 | /// and partition. 76 | TopicOffset.earliest(this.topicName, this.partitionId) : offset = -2; 77 | 78 | /// Creates pseudo-offset which refers to latest offset in this topic and 79 | /// partition. 80 | TopicOffset.latest(this.topicName, this.partitionId) : offset = -1; 81 | 82 | /// Indicates whether this is an earliest pseudo-offset. 83 | bool get isEarliest => offset == -2; 84 | 85 | /// Indicates whether this is a latest pseudo-offset. 86 | bool get isLatest => offset == -1; 87 | } 88 | -------------------------------------------------------------------------------- /lib/src/producer.dart: -------------------------------------------------------------------------------- 1 | part of kafka; 2 | 3 | /// High-level Producer for Kafka. 4 | /// 5 | /// Producer encapsulates logic for broker discovery when publishing messages to 6 | /// multiple topic-partitions. It will send as many ProduceRequests as needed 7 | /// based on leader assignment for corresponding topic-partitions. 8 | /// 9 | /// Requests will be send in parallel and results will be aggregated in 10 | /// [ProduceResult]. 11 | class Producer { 12 | /// Instance of [KafkaSession] which is used to send requests to Kafka brokers. 13 | final KafkaSession session; 14 | 15 | /// How many acknowledgements the servers should receive before responding to the request. 16 | /// 17 | /// * If it is 0 the server will not send any response. 18 | /// * If it is 1, the server will wait the data is written to the local log before sending a response. 19 | /// * If it is -1 the server will block until the message is committed by all in sync replicas before sending a response. 20 | /// * For any number > 1 the server will block waiting for this number of acknowledgements to occur 21 | final int requiredAcks; 22 | 23 | /// Maximum time in milliseconds the server can await the receipt of the 24 | /// number of acknowledgements in [requiredAcks]. 25 | final int timeout; 26 | 27 | /// Creates new instance of [Producer]. 28 | /// 29 | /// [requiredAcks] specifies how many acknowledgements the servers should 30 | /// receive before responding to the request. 31 | /// 32 | /// [timeout] specifies maximum time in milliseconds the server can await 33 | /// the receipt of the number of acknowledgements in [requiredAcks]. 34 | Producer(this.session, this.requiredAcks, this.timeout); 35 | 36 | /// Sends messages to Kafka with "at least once" guarantee. 37 | /// 38 | /// Producer will attempt to retry requests when Kafka server returns any of 39 | /// the retriable errors. See [ProduceResult.hasRetriableErrors] for details. 40 | /// 41 | /// In case of such errors producer will attempt to re-send **all the messages** 42 | /// and this may lead to duplicate records in the stream (therefore 43 | /// "at least once" guarantee). 44 | /// 45 | /// If server returns errors which can not be retried then returned future will 46 | /// be completed with [ProduceError]. One can still access `ProduceResult` from 47 | /// it. 48 | /// 49 | /// In case of any non-protocol errors returned future will complete with actual 50 | /// error that was thrown. 51 | Future produce(List messages) { 52 | return _produce(messages); 53 | } 54 | 55 | Future _produce(List messages, 56 | {bool refreshMetadata: false, 57 | int retryTimes: 3, 58 | Duration retryInterval: const Duration(seconds: 1)}) async { 59 | var topicNames = new Set.from(messages.map((_) => _.topicName)); 60 | var meta = 61 | await session.getMetadata(topicNames, invalidateCache: refreshMetadata); 62 | 63 | var byBroker = new ListMultimap.fromIterable( 64 | messages, key: (ProduceEnvelope _) { 65 | var leaderId = 66 | meta.getTopicMetadata(_.topicName).getPartition(_.partitionId).leader; 67 | return meta.getBroker(leaderId); 68 | }); 69 | kafkaLogger.fine('Producer: sending ProduceRequests'); 70 | 71 | Iterable futures = new List.from(byBroker.keys.map( 72 | (broker) => session.send(broker, 73 | new ProduceRequest(requiredAcks, timeout, byBroker[broker])))); 74 | 75 | var result = await Future.wait(futures).then((responses) => 76 | new ProduceResult.fromResponses( 77 | new List.from(responses))); 78 | 79 | if (!result.hasErrors) return result; 80 | if (retryTimes <= 0) return result; 81 | 82 | if (result.hasRetriableErrors) { 83 | kafkaLogger.warning( 84 | 'Producer: server returned errors which can be retried. All returned errors are: ${result.errors}'); 85 | kafkaLogger.info( 86 | 'Producer: will retry after ${retryInterval.inSeconds} seconds.'); 87 | var retriesLeft = retryTimes - 1; 88 | var newInterval = new Duration(seconds: retryInterval.inSeconds * 2); 89 | return new Future.delayed( 90 | retryInterval, 91 | () => _produce(messages, 92 | refreshMetadata: true, 93 | retryTimes: retriesLeft, 94 | retryInterval: newInterval)); 95 | } else if (result.hasErrors) { 96 | throw new ProduceError(result); 97 | } else { 98 | return result; 99 | } 100 | } 101 | } 102 | 103 | /// Exception thrown in case when server returned errors in response to 104 | /// `Producer.produce()`. 105 | class ProduceError implements Exception { 106 | final ProduceResult result; 107 | 108 | ProduceError(this.result); 109 | 110 | @override 111 | toString() => 'ProduceError: ${result.errors}'; 112 | } 113 | 114 | /// Result of producing messages with [Producer]. 115 | class ProduceResult { 116 | /// List of actual ProduceResponse objects returned by the server. 117 | final List responses; 118 | 119 | /// Indicates whether any of server responses contain errors. 120 | final bool hasErrors; 121 | 122 | /// Collection of all unique errors returned by the server. 123 | final Iterable errors; 124 | 125 | /// Offsets for latest messages for each topic-partition assigned by the server. 126 | final Map> offsets; 127 | 128 | ProduceResult._(this.responses, Set errors, this.offsets) 129 | : hasErrors = errors.isNotEmpty, 130 | errors = new UnmodifiableListView(errors); 131 | 132 | factory ProduceResult.fromResponses(Iterable responses) { 133 | var errors = new Set(); 134 | var offsets = new Map>(); 135 | for (var r in responses) { 136 | var er = r.results 137 | .where((_) => _.errorCode != KafkaServerError.NoError) 138 | .map((result) => new KafkaServerError(result.errorCode)); 139 | errors.addAll(new Set.from(er)); 140 | r.results.forEach((result) { 141 | offsets.putIfAbsent(result.topicName, () => new Map()); 142 | offsets[result.topicName][result.partitionId] = result.offset; 143 | }); 144 | } 145 | 146 | return new ProduceResult._(responses, errors, offsets); 147 | } 148 | 149 | /// Returns `true` if this result contains server error with specified [code]. 150 | bool hasError(int code) => errors.contains(new KafkaServerError(code)); 151 | 152 | /// Returns `true` if at least one server error in this result can be retried. 153 | bool get hasRetriableErrors { 154 | return hasError(KafkaServerError.LeaderNotAvailable) || 155 | hasError(KafkaServerError.NotLeaderForPartition) || 156 | hasError(KafkaServerError.RequestTimedOut) || 157 | hasError(KafkaServerError.NotEnoughReplicasCode) || 158 | hasError(KafkaServerError.NotEnoughReplicasAfterAppendCode); 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /lib/src/protocol/bytes_builder.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | 3 | enum KafkaType { int8, int16, int32, int64, string, bytes, object } 4 | 5 | /// Bytes builder specific to Kafka protocol. 6 | /// 7 | /// Provides convenient methods for writing all Kafka data types (and some more): 8 | /// int8, int16, int32, string, bytes, array. 9 | class KafkaBytesBuilder { 10 | BytesBuilder _builder = new BytesBuilder(); 11 | 12 | int get length => _builder.length; 13 | 14 | /// Creates new builder with empty buffer. 15 | KafkaBytesBuilder(); 16 | 17 | /// Creates new builder and initializes buffer with proper request header. 18 | KafkaBytesBuilder.withRequestHeader( 19 | int apiKey, int apiVersion, int correlationId) { 20 | addInt16(apiKey); 21 | addInt16(apiVersion); 22 | addInt32(correlationId); 23 | addString(dartKafkaId); 24 | } 25 | 26 | /// Adds 8 bit integer to this buffer. 27 | void addInt8(int value) { 28 | ByteData bdata = new ByteData(1); 29 | bdata.setInt8(0, value); 30 | _add(bdata); 31 | } 32 | 33 | /// Adds 16 bit integer to this buffer. 34 | void addInt16(int value) { 35 | ByteData bdata = new ByteData(2); 36 | bdata.setInt16(0, value); 37 | _add(bdata); 38 | } 39 | 40 | /// Adds 32 bit integer to this buffer. 41 | void addInt32(int value) { 42 | ByteData bdata = new ByteData(4); 43 | bdata.setInt32(0, value); 44 | _add(bdata); 45 | } 46 | 47 | /// Adds 64 bit integer to this buffer. 48 | void addInt64(int value) { 49 | ByteData bdata = new ByteData(8); 50 | bdata.setInt64(0, value); 51 | _add(bdata); 52 | } 53 | 54 | /// Adds Kafka string to this bytes builder. 55 | /// 56 | /// Kafka string type starts with int16 indicating size of the string 57 | /// followed by the actual string value. 58 | void addString(String value) { 59 | List data = UTF8.encode(value); 60 | addInt16(data.length); 61 | _builder.add(data); 62 | } 63 | 64 | /// Adds Kafka array to this bytes builder. 65 | /// 66 | /// Kafka array starts with int32 indicating size of the array followed by 67 | /// the array items encoded according to their [KafkaType] 68 | void addArray(Iterable items, KafkaType itemType) { 69 | addInt32(items.length); 70 | for (var item in items) { 71 | switch (itemType) { 72 | case KafkaType.int8: 73 | addInt8(item); 74 | break; 75 | case KafkaType.int16: 76 | addInt16(item); 77 | break; 78 | case KafkaType.int32: 79 | addInt32(item); 80 | break; 81 | case KafkaType.int64: 82 | addInt64(item); 83 | break; 84 | case KafkaType.string: 85 | addString(item); 86 | break; 87 | case KafkaType.bytes: 88 | addBytes(new List.from(item)); 89 | break; 90 | case KafkaType.object: 91 | throw new StateError('Objects are not supported yet'); 92 | break; 93 | } 94 | } 95 | } 96 | 97 | /// Adds value of Kafka-specific Bytes type to this builder. 98 | /// 99 | /// Kafka Bytes type starts with int32 indicating size of the value following 100 | /// by actual value bytes. 101 | void addBytes(List value) { 102 | if (value == null) { 103 | addInt32(-1); 104 | } else { 105 | addInt32(value.length); 106 | _builder.add(value); 107 | } 108 | } 109 | 110 | /// Adds arbitrary data to this buffer. 111 | void addRaw(List data) { 112 | _builder.add(data); 113 | } 114 | 115 | void _add(ByteData data) { 116 | _builder.add(data.buffer.asInt8List().toList(growable: false)); 117 | } 118 | 119 | List takeBytes() => _builder.takeBytes(); 120 | 121 | List toBytes() => _builder.toBytes(); 122 | } 123 | -------------------------------------------------------------------------------- /lib/src/protocol/bytes_reader.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | 3 | /// Provides convenience methods read Kafka specific data types from a stream of bytes. 4 | class KafkaBytesReader { 5 | Int8List _data; 6 | int _offset = 0; 7 | 8 | /// Current position in this buffer. 9 | int get offset => _offset; 10 | 11 | /// Size of this byte buffer. 12 | int get length => _data.length; 13 | 14 | /// Whether this bytes buffer has been fully read. 15 | bool get isEOF => _data.length == _offset; 16 | 17 | /// Whether there are still unread bytes left in this buffer. 18 | bool get isNotEOF => !isEOF; 19 | 20 | /// Creates reader from a list of bytes. 21 | KafkaBytesReader.fromBytes(List data) { 22 | this._data = new Int8List.fromList(data); 23 | } 24 | 25 | // Reads int8 from the data and returns it. 26 | int readInt8() { 27 | var data = new ByteData.view(_data.buffer, _offset, 1); 28 | var value = data.getInt8(0); 29 | _offset += 1; 30 | 31 | return value; 32 | } 33 | 34 | /// Reads 16-bit integer from the current position of this buffer. 35 | int readInt16() { 36 | var data = new ByteData.view(_data.buffer, _offset, 2); 37 | var value = data.getInt16(0); 38 | _offset += 2; 39 | 40 | return value; 41 | } 42 | 43 | /// Reads 32-bit integer from the current position of this buffer. 44 | int readInt32() { 45 | var data = new ByteData.view(_data.buffer, _offset, 4); 46 | var value = data.getInt32(0); 47 | _offset += 4; 48 | 49 | return value; 50 | } 51 | 52 | /// Reads 64-bit integer from the current position of this buffer. 53 | int readInt64() { 54 | var data = new ByteData.view(_data.buffer, _offset, 8); 55 | var value = data.getInt64(0); 56 | _offset += 8; 57 | 58 | return value; 59 | } 60 | 61 | String readString() { 62 | var length = readInt16(); 63 | var value = _data.buffer.asInt8List(_offset, length).toList(); 64 | var valueAsString = UTF8.decode(value); 65 | _offset += length; 66 | 67 | return valueAsString; 68 | } 69 | 70 | List readBytes() { 71 | var length = readInt32(); 72 | if (length == -1) { 73 | return null; 74 | } else { 75 | var value = _data.buffer.asInt8List(_offset, length).toList(); 76 | _offset += length; 77 | return value; 78 | } 79 | } 80 | 81 | List readArray(KafkaType itemType, 82 | [dynamic objectReadHandler(KafkaBytesReader reader)]) { 83 | var length = readInt32(); 84 | var items = new List(); 85 | for (var i = 0; i < length; i++) { 86 | switch (itemType) { 87 | case KafkaType.int8: 88 | items.add(readInt8()); 89 | break; 90 | case KafkaType.int16: 91 | items.add(readInt16()); 92 | break; 93 | case KafkaType.int32: 94 | items.add(readInt32()); 95 | break; 96 | case KafkaType.int64: 97 | items.add(readInt64()); 98 | break; 99 | case KafkaType.string: 100 | items.add(readString()); 101 | break; 102 | case KafkaType.bytes: 103 | items.add(readBytes()); 104 | break; 105 | case KafkaType.object: 106 | if (objectReadHandler == null) { 107 | throw new StateError('ObjectReadHandler must be provided'); 108 | } 109 | items.add(objectReadHandler(this)); 110 | break; 111 | } 112 | } 113 | 114 | return items; 115 | } 116 | 117 | /// Reads raw bytes from this buffer. 118 | List readRaw(int length) { 119 | var value = _data.buffer.asInt8List(_offset, length).toList(); 120 | _offset += length; 121 | 122 | return value; 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /lib/src/protocol/common.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | 3 | /// Base interface for all Kafka API requests. 4 | abstract class KafkaRequest { 5 | static final _random = new Random(); 6 | 7 | final int correlationId; 8 | 9 | KafkaRequest() : correlationId = _random.nextInt(65536); 10 | 11 | List toBytes(); 12 | 13 | dynamic createResponse(List data); 14 | } 15 | -------------------------------------------------------------------------------- /lib/src/protocol/consumer_metadata_api.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | 3 | /// Kafka ConsumerMetadataRequest. 4 | class GroupCoordinatorRequest extends KafkaRequest { 5 | final int apiKey = 10; 6 | final int apiVersion = 0; 7 | final String consumerGroup; 8 | 9 | /// Creates new instance of ConsumerMetadataRequest. 10 | GroupCoordinatorRequest(this.consumerGroup) : super(); 11 | 12 | /// Converts this request into byte list 13 | @override 14 | List toBytes() { 15 | var builder = new KafkaBytesBuilder.withRequestHeader( 16 | apiKey, apiVersion, correlationId); 17 | 18 | builder.addString(consumerGroup); 19 | 20 | var body = builder.takeBytes(); 21 | builder.addBytes(body); 22 | 23 | return builder.takeBytes(); 24 | } 25 | 26 | @override 27 | createResponse(List data) { 28 | return new GroupCoordinatorResponse.fromBytes(data); 29 | } 30 | } 31 | 32 | /// Response for [GroupCoordinatorRequest]. 33 | class GroupCoordinatorResponse { 34 | final int errorCode; 35 | final int coordinatorId; 36 | final String coordinatorHost; 37 | final int coordinatorPort; 38 | 39 | Broker get coordinator => 40 | new Broker(coordinatorId, coordinatorHost, coordinatorPort); 41 | 42 | /// Creates new instance of ConsumerMetadataResponse. 43 | GroupCoordinatorResponse(this.errorCode, this.coordinatorId, 44 | this.coordinatorHost, this.coordinatorPort); 45 | 46 | /// Creates response from provided data. 47 | factory GroupCoordinatorResponse.fromBytes(List data) { 48 | var reader = new KafkaBytesReader.fromBytes(data); 49 | var size = reader.readInt32(); 50 | assert(size == data.length - 4); 51 | 52 | reader.readInt32(); // correlationId 53 | var errorCode = reader.readInt16(); 54 | var id = reader.readInt32(); 55 | var host = reader.readString(); 56 | var port = reader.readInt32(); 57 | 58 | return new GroupCoordinatorResponse(errorCode, id, host, port); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /lib/src/protocol/fetch_api.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | 3 | /// Kafka FetchRequest. 4 | class FetchRequest extends KafkaRequest { 5 | /// API key of [FetchRequest] 6 | final int apiKey = 1; 7 | 8 | /// API version of [FetchRequest] 9 | final int apiVersion = 0; 10 | 11 | /// The replica id indicates the node id of the replica initiating this request. 12 | /// Normal consumers should always specify this as -1 as they have no node id. 13 | final int _replicaId = -1; 14 | 15 | /// Maximum amount of time in milliseconds to block waiting if insufficient 16 | /// data is available at the time the request is issued. 17 | final int maxWaitTime; 18 | 19 | /// Minimum number of bytes of messages that must be available 20 | /// to give a response. 21 | final int minBytes; 22 | 23 | Map> _topics = new Map(); 24 | 25 | /// Creates new instance of FetchRequest. 26 | FetchRequest(this.maxWaitTime, this.minBytes) : super(); 27 | 28 | @override 29 | toString() => 'FetchRequest(${maxWaitTime}, ${minBytes}, ${_topics})'; 30 | 31 | /// Adds [topicName] with [paritionId] to this FetchRequest. [fetchOffset] 32 | /// defines the offset to begin this fetch from. 33 | void add(String topicName, int partitionId, int fetchOffset, 34 | [int maxBytes = 65536]) { 35 | // 36 | if (!_topics.containsKey(topicName)) { 37 | _topics[topicName] = new List(); 38 | } 39 | _topics[topicName] 40 | .add(new _FetchPartitionInfo(partitionId, fetchOffset, maxBytes)); 41 | } 42 | 43 | @override 44 | List toBytes() { 45 | var builder = new KafkaBytesBuilder.withRequestHeader( 46 | apiKey, apiVersion, correlationId); 47 | 48 | builder.addInt32(_replicaId); 49 | builder.addInt32(maxWaitTime); 50 | builder.addInt32(minBytes); 51 | 52 | builder.addInt32(_topics.length); 53 | _topics.forEach((topicName, partitions) { 54 | builder.addString(topicName); 55 | builder.addInt32(partitions.length); 56 | partitions.forEach((p) { 57 | builder.addInt32(p.partitionId); 58 | builder.addInt64(p.fetchOffset); 59 | builder.addInt32(p.maxBytes); 60 | }); 61 | }); 62 | 63 | var body = builder.takeBytes(); 64 | builder.addBytes(body); 65 | 66 | return builder.takeBytes(); 67 | } 68 | 69 | @override 70 | createResponse(List data) { 71 | return new FetchResponse.fromBytes(data); 72 | } 73 | } 74 | 75 | class _FetchPartitionInfo { 76 | int partitionId; 77 | int fetchOffset; 78 | int maxBytes; 79 | _FetchPartitionInfo(this.partitionId, this.fetchOffset, this.maxBytes); 80 | } 81 | 82 | /// Kafka FetchResponse. 83 | class FetchResponse { 84 | /// List of [FetchResult]s for each topic-partition. 85 | final List results; 86 | 87 | /// Indicates if server returned any errors in this response. 88 | /// 89 | /// Actual errors can be found in the result object for particular 90 | /// topic-partition. 91 | final bool hasErrors; 92 | 93 | FetchResponse._(this.results, this.hasErrors); 94 | 95 | /// Creates new instance of FetchResponse from binary data. 96 | factory FetchResponse.fromBytes(List data) { 97 | var reader = new KafkaBytesReader.fromBytes(data); 98 | var size = reader.readInt32(); 99 | assert(size == data.length - 4); 100 | 101 | reader.readInt32(); // correlationId 102 | var count = reader.readInt32(); 103 | var results = new List(); 104 | var hasErrors = false; 105 | while (count > 0) { 106 | var topicName = reader.readString(); 107 | var partitionCount = reader.readInt32(); 108 | while (partitionCount > 0) { 109 | var partitionId = reader.readInt32(); 110 | var errorCode = reader.readInt16(); 111 | var highwaterMarkOffset = reader.readInt64(); 112 | var messageSetSize = reader.readInt32(); 113 | var data = reader.readRaw(messageSetSize); 114 | var messageReader = new KafkaBytesReader.fromBytes(data); 115 | var messageSet = new MessageSet.fromBytes(messageReader); 116 | if (errorCode != KafkaServerError.NoError) hasErrors = true; 117 | 118 | results.add(new FetchResult(topicName, partitionId, errorCode, 119 | highwaterMarkOffset, messageSet)); 120 | partitionCount--; 121 | } 122 | count--; 123 | } 124 | 125 | return new FetchResponse._(results, hasErrors); 126 | } 127 | } 128 | 129 | /// Data structure representing result of fetching messages for particular 130 | /// topic-partition. 131 | class FetchResult { 132 | final String topicName; 133 | final int partitionId; 134 | final int errorCode; 135 | final int highwaterMarkOffset; 136 | final MessageSet messageSet; 137 | 138 | FetchResult(this.topicName, this.partitionId, this.errorCode, 139 | this.highwaterMarkOffset, this.messageSet); 140 | } 141 | -------------------------------------------------------------------------------- /lib/src/protocol/group_membership_api.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | -------------------------------------------------------------------------------- /lib/src/protocol/messages.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | 3 | /// Kafka MessageSet type. 4 | class MessageSet { 5 | /// Collection of messages. Keys in the map are message offsets. 6 | final Map _messages; 7 | 8 | /// Map of message offsets to corresponding messages. 9 | Map get messages => new UnmodifiableMapView(_messages); 10 | 11 | /// Number of messages in this set. 12 | int get length => _messages.length; 13 | 14 | MessageSet._(this._messages); 15 | 16 | /// Builds new message set for publishing. 17 | factory MessageSet.build(ProduceEnvelope envelope) { 18 | if (envelope.compression == KafkaCompression.none) { 19 | return new MessageSet._(envelope.messages.asMap()); 20 | } else { 21 | if (envelope.compression == KafkaCompression.snappy) 22 | throw new ArgumentError( 23 | 'Snappy compression is not supported yet by the client.'); 24 | 25 | var codec = new GZipCodec(); 26 | var innerEnvelope = new ProduceEnvelope( 27 | envelope.topicName, envelope.partitionId, envelope.messages); 28 | var innerMessageSet = new MessageSet.build(innerEnvelope); 29 | var value = codec.encode(innerMessageSet.toBytes()); 30 | var attrs = new MessageAttributes(KafkaCompression.gzip); 31 | 32 | return new MessageSet._({0: new Message(value, attributes: attrs)}); 33 | } 34 | } 35 | 36 | /// Creates new MessageSet from provided data. 37 | factory MessageSet.fromBytes(KafkaBytesReader reader) { 38 | int messageSize = -1; 39 | var messages = new Map(); 40 | while (reader.isNotEOF) { 41 | try { 42 | int offset = reader.readInt64(); 43 | messageSize = reader.readInt32(); 44 | var crc = reader.readInt32(); 45 | 46 | var data = reader.readRaw(messageSize - 4); 47 | var actualCrc = Crc32.signed(data); 48 | if (actualCrc != crc) { 49 | kafkaLogger?.warning( 50 | 'Message CRC sum mismatch. Expected crc: ${crc}, actual: ${actualCrc}'); 51 | throw new MessageCrcMismatchError( 52 | 'Expected crc: ${crc}, actual: ${actualCrc}'); 53 | } 54 | var messageReader = new KafkaBytesReader.fromBytes(data); 55 | var message = _readMessage(messageReader); 56 | if (message.attributes.compression == KafkaCompression.none) { 57 | messages[offset] = message; 58 | } else { 59 | if (message.attributes.compression == KafkaCompression.snappy) 60 | throw new ArgumentError( 61 | 'Snappy compression is not supported yet by the client.'); 62 | 63 | var codec = new GZipCodec(); 64 | var innerReader = 65 | new KafkaBytesReader.fromBytes(codec.decode(message.value)); 66 | var innerMessageSet = new MessageSet.fromBytes(innerReader); 67 | for (var innerOffset in innerMessageSet.messages.keys) { 68 | messages[innerOffset] = innerMessageSet.messages[innerOffset]; 69 | } 70 | } 71 | } on RangeError { 72 | // According to spec server is allowed to return partial 73 | // messages, so we just ignore it here and exit the loop. 74 | var remaining = reader.length - reader.offset; 75 | kafkaLogger?.info( 76 | 'Encountered partial message. Expected message size: ${messageSize}, bytes left in buffer: ${remaining}, total buffer size ${reader.length}'); 77 | break; 78 | } 79 | } 80 | 81 | return new MessageSet._(messages); 82 | } 83 | 84 | static Message _readMessage(KafkaBytesReader reader) { 85 | reader.readInt8(); // magicByte 86 | var attributes = new MessageAttributes.fromByte(reader.readInt8()); 87 | var key = reader.readBytes(); 88 | var value = reader.readBytes(); 89 | 90 | return new Message(value, attributes: attributes, key: key); 91 | } 92 | 93 | /// Converts this MessageSet into sequence of bytes according to Kafka 94 | /// protocol. 95 | List toBytes() { 96 | var builder = new KafkaBytesBuilder(); 97 | _messages.forEach((offset, message) { 98 | var messageData = _messageToBytes(message); 99 | builder.addInt64(offset); 100 | builder.addInt32(messageData.length); 101 | builder.addRaw(messageData); 102 | }); 103 | 104 | return builder.toBytes(); 105 | } 106 | 107 | List _messageToBytes(Message message) { 108 | var builder = new KafkaBytesBuilder(); 109 | builder.addInt8(0); // magicByte 110 | builder.addInt8(message.attributes.toInt()); 111 | builder.addBytes(message.key); 112 | builder.addBytes(message.value); 113 | 114 | var data = builder.takeBytes(); 115 | int crc = Crc32.signed(data); 116 | builder.addInt32(crc); 117 | builder.addRaw(data); 118 | 119 | return builder.toBytes(); 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /lib/src/protocol/metadata_api.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | 3 | /// Kafka MetadataRequest. 4 | class MetadataRequest extends KafkaRequest { 5 | /// API key of [MetadataRequest] 6 | final int apiKey = 3; 7 | 8 | /// API version of [MetadataRequest] 9 | final int apiVersion = 0; 10 | 11 | /// List of topic names to fetch metadata for. If set to null or empty 12 | /// this request will fetch metadata for all topics. 13 | final Set topicNames; 14 | 15 | /// Creats new instance of Kafka MetadataRequest. 16 | /// 17 | /// If [topicNames] is omitted or empty then metadata for all existing topics 18 | /// will be returned. 19 | MetadataRequest([this.topicNames]) : super(); 20 | 21 | @override 22 | List toBytes() { 23 | var builder = new KafkaBytesBuilder.withRequestHeader( 24 | apiKey, apiVersion, correlationId); 25 | Set list = (this.topicNames is Set) ? this.topicNames : new Set(); 26 | builder.addArray(list, KafkaType.string); 27 | 28 | var body = builder.takeBytes(); 29 | builder.addBytes(body); 30 | 31 | return builder.takeBytes(); 32 | } 33 | 34 | @override 35 | createResponse(List data) { 36 | return new MetadataResponse.fromBytes(data); 37 | } 38 | } 39 | 40 | /// Kafka MetadataResponse. 41 | class MetadataResponse { 42 | /// List of brokers in the cluster. 43 | final List brokers; 44 | 45 | /// List with metadata for each topic. 46 | final List topics; 47 | 48 | MetadataResponse._(this.brokers, this.topics); 49 | 50 | /// Creates response from binary data. 51 | factory MetadataResponse.fromBytes(List data) { 52 | var reader = new KafkaBytesReader.fromBytes(data); 53 | var size = reader.readInt32(); 54 | assert(size == data.length - 4); 55 | 56 | reader.readInt32(); // correlationId 57 | 58 | var brokers = reader.readArray(KafkaType.object, (reader) { 59 | return new Broker( 60 | reader.readInt32(), reader.readString(), reader.readInt32()); 61 | }); 62 | 63 | var topicMetadata = reader.readArray( 64 | KafkaType.object, (reader) => new TopicMetadata._readFrom(reader)); 65 | return new MetadataResponse._(new List.from(brokers), 66 | new List.from(topicMetadata)); 67 | } 68 | } 69 | 70 | /// Represents Kafka TopicMetadata data structure returned in MetadataResponse. 71 | class TopicMetadata { 72 | final int errorCode; 73 | final String topicName; 74 | final List partitions; 75 | 76 | TopicMetadata._(this.errorCode, this.topicName, this.partitions); 77 | 78 | factory TopicMetadata._readFrom(KafkaBytesReader reader) { 79 | var errorCode = reader.readInt16(); 80 | var topicName = reader.readString(); 81 | List partitions = reader.readArray( 82 | KafkaType.object, (reader) => new PartitionMetadata._readFrom(reader)); 83 | // ignore: STRONG_MODE_DOWN_CAST_COMPOSITE 84 | return new TopicMetadata._(errorCode, topicName, partitions); 85 | } 86 | 87 | PartitionMetadata getPartition(int partitionId) => 88 | partitions.firstWhere((p) => p.partitionId == partitionId); 89 | 90 | @override 91 | String toString() => 92 | "TopicMetadata(errorCode: ${errorCode}, name: ${topicName}, partitions: ${partitions.length})"; 93 | } 94 | 95 | /// Data structure representing partition metadata returned in MetadataResponse. 96 | class PartitionMetadata { 97 | final int partitionErrorCode; 98 | final int partitionId; 99 | final int leader; 100 | final List replicas; 101 | final List inSyncReplicas; 102 | 103 | PartitionMetadata._(this.partitionErrorCode, this.partitionId, this.leader, 104 | this.replicas, this.inSyncReplicas); 105 | 106 | factory PartitionMetadata._readFrom(KafkaBytesReader reader) { 107 | var errorCode = reader.readInt16(); 108 | var partitionId = reader.readInt32(); 109 | var leader = reader.readInt32(); 110 | var replicas = reader.readArray(KafkaType.int32); 111 | var inSyncReplicas = reader.readArray(KafkaType.int32); 112 | 113 | return new PartitionMetadata._( 114 | errorCode, 115 | partitionId, 116 | leader, 117 | replicas, // ignore: STRONG_MODE_DOWN_CAST_COMPOSITE 118 | inSyncReplicas); 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /lib/src/protocol/offset_api.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | 3 | /// Kafka OffsetRequest. 4 | class OffsetRequest extends KafkaRequest { 5 | /// API key of [OffsetRequest]. 6 | final int apiKey = 2; 7 | 8 | /// API version of [OffsetRequest]. 9 | final int apiVersion = 0; 10 | 11 | /// Unique ID assigned to the [host] within Kafka cluster. 12 | final int replicaId; 13 | 14 | Map> _topics = new Map(); 15 | 16 | /// Creates new instance of OffsetRequest. 17 | /// 18 | /// The [replicaId] argument indicates unique ID assigned to the [host] within 19 | /// Kafka cluster. One can obtain this information via [MetadataRequest]. 20 | OffsetRequest(this.replicaId) : super(); 21 | 22 | /// Adds topic and partition to this requests. 23 | /// 24 | /// [time] is used to ask for all messages before a certain time (ms). 25 | /// There are two special values: 26 | /// * Specify -1 to receive the latest offset (that is the offset of the next coming message). 27 | /// * Specify -2 to receive the earliest available offset. 28 | /// 29 | /// [maxNumberOfOffsets] indicates max number of offsets to return. 30 | void addTopicPartition( 31 | String topicName, int partitionId, int time, int maxNumberOfOffsets) { 32 | if (_topics.containsKey(topicName) == false) { 33 | _topics[topicName] = new List(); 34 | } 35 | 36 | _topics[topicName].add( 37 | new _PartitionOffsetRequestInfo(partitionId, time, maxNumberOfOffsets)); 38 | } 39 | 40 | /// Converts this request into a binary representation according to Kafka 41 | /// protocol. 42 | @override 43 | List toBytes() { 44 | var builder = new KafkaBytesBuilder.withRequestHeader( 45 | apiKey, apiVersion, correlationId); 46 | builder.addInt32(replicaId); 47 | 48 | builder.addInt32(_topics.length); 49 | _topics.forEach((topicName, partitions) { 50 | builder.addString(topicName); 51 | builder.addInt32(partitions.length); 52 | partitions.forEach((p) { 53 | builder.addInt32(p.partitionId); 54 | builder.addInt64(p.time); 55 | builder.addInt32(p.maxNumberOfOffsets); 56 | }); 57 | }); 58 | 59 | var body = builder.takeBytes(); 60 | builder.addBytes(body); 61 | 62 | return builder.takeBytes(); 63 | } 64 | 65 | @override 66 | createResponse(List data) { 67 | return new OffsetResponse.fromBytes(data); 68 | } 69 | } 70 | 71 | /// Value object holding information about partition offsets to be fetched 72 | /// by [OffsetRequest]. 73 | class _PartitionOffsetRequestInfo { 74 | /// The ID of this partition. 75 | final int partitionId; 76 | 77 | /// Used to ask for all messages before a certain time (ms). 78 | /// 79 | /// There are two special values: 80 | /// * Specify -1 to receive the latest offset (that is the offset of the next coming message). 81 | /// * Specify -2 to receive the earliest available offset. 82 | final int time; 83 | 84 | /// How many offsets to return. 85 | final int maxNumberOfOffsets; 86 | _PartitionOffsetRequestInfo( 87 | this.partitionId, this.time, this.maxNumberOfOffsets); 88 | } 89 | 90 | /// Kafka OffsetResponse. 91 | class OffsetResponse { 92 | /// Map of topics and list of partitions with offset details. 93 | final List offsets; 94 | 95 | OffsetResponse._(this.offsets); 96 | 97 | /// Creates OffsetResponse from the provided binary data. 98 | factory OffsetResponse.fromBytes(List data) { 99 | var reader = new KafkaBytesReader.fromBytes(data); 100 | var size = reader.readInt32(); 101 | assert(size == data.length - 4); 102 | 103 | reader.readInt32(); // correlationId 104 | var count = reader.readInt32(); 105 | var offsets = new List(); 106 | while (count > 0) { 107 | var topicName = reader.readString(); 108 | var partitionCount = reader.readInt32(); 109 | while (partitionCount > 0) { 110 | var partitionId = reader.readInt32(); 111 | var errorCode = reader.readInt16(); 112 | var partitionOffsets = reader.readArray(KafkaType.int64); 113 | offsets.add(new TopicOffsets._(topicName, partitionId, errorCode, 114 | partitionOffsets)); // ignore: STRONG_MODE_DOWN_CAST_COMPOSITE 115 | partitionCount--; 116 | } 117 | count--; 118 | } 119 | 120 | return new OffsetResponse._(offsets); 121 | } 122 | } 123 | 124 | /// Data structure representing offsets of particular topic-partition returned 125 | /// by [OffsetRequest]. 126 | class TopicOffsets { 127 | final String topicName; 128 | final int partitionId; 129 | final int errorCode; 130 | final List offsets; 131 | 132 | TopicOffsets._( 133 | this.topicName, this.partitionId, this.errorCode, this.offsets); 134 | } 135 | -------------------------------------------------------------------------------- /lib/src/protocol/offset_commit_api.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | 3 | /// Kafka OffsetCommitRequest. 4 | class OffsetCommitRequest extends KafkaRequest { 5 | /// API key of [OffsetCommitRequest]. 6 | final int apiKey = 8; 7 | 8 | /// API version of [OffsetCommitRequest]. 9 | final int apiVersion = 1; 10 | 11 | /// Name of the consumer group. 12 | final String consumerGroup; 13 | 14 | /// Generation ID of the consumer group. 15 | final int consumerGroupGenerationId; 16 | 17 | /// ID of the consumer. 18 | final String consumerId; 19 | 20 | /// Time period in msec to retain the offset. 21 | final int retentionTime; 22 | 23 | /// List of consumer offsets to be committed. 24 | final List offsets; 25 | 26 | /// Creates new instance of [OffsetCommitRequest]. 27 | /// 28 | /// [host] must be current coordinator broker for [consumerGroup]. 29 | OffsetCommitRequest(this.consumerGroup, this.offsets, 30 | this.consumerGroupGenerationId, this.consumerId, this.retentionTime) 31 | : super(); 32 | 33 | @override 34 | List toBytes() { 35 | var builder = new KafkaBytesBuilder.withRequestHeader( 36 | apiKey, apiVersion, correlationId); 37 | 38 | // TODO: replace groupBy with ListMultimap 39 | // ignore: STRONG_MODE_DOWN_CAST_COMPOSITE 40 | Map> groupedByTopic = groupBy( 41 | offsets, (o) => o.topicName); // ignore: STRONG_MODE_DOWN_CAST_COMPOSITE 42 | var timestamp = new DateTime.now().millisecondsSinceEpoch; 43 | builder.addString(consumerGroup); 44 | builder.addInt32(consumerGroupGenerationId); 45 | builder.addString(consumerId); 46 | builder.addInt32(groupedByTopic.length); 47 | groupedByTopic.forEach((topicName, partitionOffsets) { 48 | builder.addString(topicName); 49 | builder.addInt32(partitionOffsets.length); 50 | partitionOffsets.forEach((p) { 51 | builder.addInt32(p.partitionId); 52 | builder.addInt64(p.offset); 53 | builder.addInt64(timestamp); 54 | builder.addString(p.metadata); 55 | }); 56 | }); 57 | 58 | var body = builder.takeBytes(); 59 | builder.addBytes(body); 60 | 61 | return builder.takeBytes(); 62 | } 63 | 64 | @override 65 | createResponse(List data) { 66 | return new OffsetCommitResponse.fromData(data); 67 | } 68 | } 69 | 70 | /// Kafka OffsetCommitResponse. 71 | class OffsetCommitResponse { 72 | final List offsets; 73 | 74 | OffsetCommitResponse._(this.offsets); 75 | 76 | factory OffsetCommitResponse.fromData(List data) { 77 | List offsets = []; 78 | var reader = new KafkaBytesReader.fromBytes(data); 79 | var size = reader.readInt32(); 80 | assert(size == data.length - 4); 81 | 82 | reader.readInt32(); // correlationId 83 | var count = reader.readInt32(); 84 | while (count > 0) { 85 | var topicName = reader.readString(); 86 | var partitionCount = reader.readInt32(); 87 | while (partitionCount > 0) { 88 | var partitionId = reader.readInt32(); 89 | var errorCode = reader.readInt16(); 90 | offsets.add(new OffsetCommitResult(topicName, partitionId, errorCode)); 91 | partitionCount--; 92 | } 93 | count--; 94 | } 95 | 96 | return new OffsetCommitResponse._(offsets); 97 | } 98 | } 99 | 100 | /// Data structure representing result of commiting of consumer offset. 101 | class OffsetCommitResult { 102 | final String topicName; 103 | final int partitionId; 104 | final int errorCode; 105 | 106 | OffsetCommitResult(this.topicName, this.partitionId, this.errorCode); 107 | } 108 | -------------------------------------------------------------------------------- /lib/src/protocol/offset_fetch_api.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | 3 | /// Kafka OffsetFetchRequest. 4 | class OffsetFetchRequest extends KafkaRequest { 5 | /// API key of [OffsetFetchRequest] 6 | final int apiKey = 9; 7 | 8 | /// API version of [OffsetFetchRequest]. 9 | final int apiVersion = 1; 10 | 11 | /// Name of consumer group. 12 | final String consumerGroup; 13 | 14 | /// Map of topic names and partition IDs. 15 | final Map> topics; 16 | 17 | /// Creates new instance of [OffsetFetchRequest]. 18 | OffsetFetchRequest(this.consumerGroup, this.topics) : super(); 19 | 20 | @override 21 | List toBytes() { 22 | var builder = new KafkaBytesBuilder.withRequestHeader( 23 | apiKey, apiVersion, correlationId); 24 | 25 | builder.addString(consumerGroup); 26 | builder.addInt32(topics.length); 27 | topics.forEach((topicName, partitions) { 28 | builder.addString(topicName); 29 | builder.addArray(partitions, KafkaType.int32); 30 | }); 31 | 32 | var body = builder.takeBytes(); 33 | builder.addBytes(body); 34 | 35 | return builder.takeBytes(); 36 | } 37 | 38 | @override 39 | createResponse(List data) { 40 | return new OffsetFetchResponse.fromData(data); 41 | } 42 | } 43 | 44 | /// Kafka OffsetFetchResponse. 45 | class OffsetFetchResponse { 46 | final List offsets; 47 | 48 | OffsetFetchResponse._(this.offsets); 49 | 50 | factory OffsetFetchResponse.fromOffsets(List offsets) { 51 | return new OffsetFetchResponse._(new List.from(offsets)); 52 | } 53 | 54 | factory OffsetFetchResponse.fromData(List data) { 55 | List offsets = []; 56 | var reader = new KafkaBytesReader.fromBytes(data); 57 | var size = reader.readInt32(); 58 | assert(size == data.length - 4); 59 | 60 | reader.readInt32(); // correlationId 61 | var count = reader.readInt32(); 62 | while (count > 0) { 63 | var topicName = reader.readString(); 64 | var partitionCount = reader.readInt32(); 65 | while (partitionCount > 0) { 66 | var id = reader.readInt32(); 67 | var offset = reader.readInt64(); 68 | var metadata = reader.readString(); 69 | var errorCode = reader.readInt16(); 70 | offsets.add( 71 | new ConsumerOffset(topicName, id, offset, metadata, errorCode)); 72 | partitionCount--; 73 | } 74 | count--; 75 | } 76 | 77 | return new OffsetFetchResponse._(offsets); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /lib/src/protocol/produce_api.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | 3 | /// Kafka ProduceRequest. 4 | class ProduceRequest extends KafkaRequest { 5 | /// API key of [ProduceRequest] 6 | final int apiKey = 0; 7 | 8 | /// API version of [ProduceRequest] 9 | final int apiVersion = 0; 10 | 11 | /// Indicates how many acknowledgements the servers 12 | /// should receive before responding to the request. 13 | final int requiredAcks; 14 | 15 | /// Provides a maximum time in milliseconds the server 16 | /// can await the receipt of the number of acknowledgements in [requiredAcks]. 17 | final int timeout; 18 | 19 | /// List of produce envelopes containing messages to be published. 20 | final List messages; 21 | 22 | /// Creates Kafka [ProduceRequest]. 23 | /// 24 | /// The [requiredAcks] field indicates how many acknowledgements the servers 25 | /// should receive before responding to the request. 26 | /// The [timeout] field provides a maximum time in milliseconds the server 27 | /// can await the receipt of the number of acknowledgements in [requiredAcks]. 28 | ProduceRequest(this.requiredAcks, this.timeout, this.messages) : super(); 29 | 30 | @override 31 | List toBytes() { 32 | var builder = new KafkaBytesBuilder.withRequestHeader( 33 | apiKey, apiVersion, correlationId); 34 | builder.addInt16(requiredAcks); 35 | builder.addInt32(timeout); 36 | 37 | Map> messageSets = new Map(); 38 | for (var envelope in messages) { 39 | if (!messageSets.containsKey(envelope.topicName)) { 40 | messageSets[envelope.topicName] = new Map(); 41 | } 42 | messageSets[envelope.topicName][envelope.partitionId] = 43 | new MessageSet.build(envelope); 44 | } 45 | 46 | builder.addInt32(messageSets.length); 47 | messageSets.forEach((topicName, partitions) { 48 | builder.addString(topicName); 49 | builder.addInt32(partitions.length); 50 | partitions.forEach((partitionId, messageSet) { 51 | builder.addInt32(partitionId); 52 | var messageData = messageSet.toBytes(); 53 | builder.addInt32(messageData.length); 54 | builder.addRaw(messageData); 55 | }); 56 | }); 57 | 58 | var body = builder.takeBytes(); 59 | builder.addBytes(body); 60 | 61 | return builder.takeBytes(); 62 | } 63 | 64 | @override 65 | createResponse(List data) { 66 | return new ProduceResponse.fromBytes(data); 67 | } 68 | } 69 | 70 | /// Kafka ProduceResponse. 71 | class ProduceResponse { 72 | /// List of produce results for each topic-partition. 73 | final List results; 74 | 75 | ProduceResponse._(this.results); 76 | 77 | /// Creates response from the provided bytes [data]. 78 | factory ProduceResponse.fromBytes(List data) { 79 | var reader = new KafkaBytesReader.fromBytes(data); 80 | var size = reader.readInt32(); 81 | assert(size == data.length - 4); 82 | 83 | reader.readInt32(); // correlationId 84 | var results = new List(); 85 | var topicCount = reader.readInt32(); 86 | while (topicCount > 0) { 87 | var topicName = reader.readString(); 88 | var partitionCount = reader.readInt32(); 89 | while (partitionCount > 0) { 90 | var partitionId = reader.readInt32(); 91 | var errorCode = reader.readInt16(); 92 | var offset = reader.readInt64(); 93 | results.add(new TopicProduceResult._( 94 | topicName, partitionId, errorCode, offset)); 95 | partitionCount--; 96 | } 97 | topicCount--; 98 | } 99 | return new ProduceResponse._(results); 100 | } 101 | } 102 | 103 | /// Data structure representing result of producing messages with 104 | /// [ProduceRequest]. 105 | class TopicProduceResult { 106 | /// Name of the topic. 107 | final String topicName; 108 | 109 | /// ID of the partition. 110 | final int partitionId; 111 | 112 | /// Error code returned by the server. 113 | final int errorCode; 114 | 115 | /// Offset of the first message. 116 | final int offset; 117 | 118 | TopicProduceResult._( 119 | this.topicName, this.partitionId, this.errorCode, this.offset); 120 | 121 | @override 122 | String toString() => 123 | 'Topic: ${topicName}, partition: ${partitionId}, errorCode: ${errorCode}, offset: ${offset}'; 124 | } 125 | -------------------------------------------------------------------------------- /lib/src/session.dart: -------------------------------------------------------------------------------- 1 | part of kafka; 2 | 3 | /// Initial contact point with a Kafka cluster. 4 | class ContactPoint { 5 | final String host; 6 | final int port; 7 | 8 | ContactPoint(this.host, this.port); 9 | } 10 | 11 | /// Session responsible for communication with Kafka cluster. 12 | /// 13 | /// In order to create new Session you need to pass a list of [ContactPoint]s to 14 | /// the constructor. Each ContactPoint is defined by a host and a port of one 15 | /// of the Kafka brokers. At least one ContactPoint is required to connect to 16 | /// the cluster, all the rest members of the cluster will be automatically 17 | /// detected by the Session. 18 | /// 19 | /// For production deployments it is recommended to provide more than one 20 | /// ContactPoint since this will enable "failover" in case one of the instances 21 | /// is temporarily unavailable. 22 | class KafkaSession { 23 | /// List of Kafka brokers which are used as initial contact points. 24 | final Queue contactPoints; 25 | 26 | Map> _sockets = new Map(); 27 | Map _subscriptions = new Map(); 28 | Map> _buffers = new Map(); 29 | Map _sizes = new Map(); 30 | Map _inflightRequests = new Map(); 31 | Map _flushFutures = new Map(); 32 | 33 | // Cluster Metadata 34 | Future> _brokers; 35 | Map> _topicsMetadata = new Map(); 36 | 37 | /// Creates new session. 38 | /// 39 | /// [contactPoints] will be used to fetch Kafka metadata information. At least 40 | /// one is required. However for production consider having more than 1. 41 | /// In case of one of the hosts is temporarily unavailable the session will 42 | /// rotate them until sucessful response is returned. Error will be thrown 43 | /// when all of the default hosts are unavailable. 44 | KafkaSession(List contactPoints) 45 | : contactPoints = new Queue.from(contactPoints); 46 | 47 | /// Returns names of all existing topics in the Kafka cluster. 48 | Future> listTopics() async { 49 | // TODO: actually rotate default hosts on failure. 50 | var contactPoint = _getCurrentContactPoint(); 51 | var request = new MetadataRequest(); 52 | MetadataResponse response = 53 | await _send(contactPoint.host, contactPoint.port, request); 54 | 55 | return response.topics.map((_) => _.topicName).toSet(); 56 | } 57 | 58 | /// Fetches Kafka cluster metadata. If [topicNames] is null then metadata for 59 | /// all topics will be returned. 60 | /// 61 | /// Please note that requests to fetch __all__ topics can not be cached by 62 | /// the client, so it may not be as performant as requesting topics 63 | /// explicitely. 64 | /// 65 | /// Also, if Kafka server is configured to auto-create topics you must 66 | /// explicitely specify topic name in metadata request, otherwise topic 67 | /// will not be created. 68 | Future getMetadata(Set topicNames, 69 | {bool invalidateCache: false}) async { 70 | if (topicNames.isEmpty) 71 | throw new ArgumentError.value( 72 | topicNames, 'topicNames', 'List of topic names can not be empty'); 73 | 74 | if (invalidateCache) { 75 | _brokers = null; 76 | _topicsMetadata = new Map(); 77 | } 78 | // TODO: actually rotate default hosts on failure. 79 | var contactPoint = _getCurrentContactPoint(); 80 | 81 | var topicsToFetch = 82 | topicNames.where((t) => !_topicsMetadata.keys.contains(t)); 83 | if (topicsToFetch.length > 0) { 84 | Future responseFuture = _sendMetadataRequest( 85 | topicsToFetch.toSet(), contactPoint.host, contactPoint.port); 86 | for (var name in topicsToFetch) { 87 | _topicsMetadata[name] = responseFuture.then((response) { 88 | return response.topics.firstWhere((_) => _.topicName == name); 89 | }); 90 | } 91 | 92 | _brokers = responseFuture.then((response) => response.brokers); 93 | } 94 | List allMetadata = await Future.wait(_topicsMetadata.values); 95 | var metadata = allMetadata.where((_) => topicNames.contains(_.topicName)); 96 | var brokers = await _brokers; 97 | 98 | return new ClusterMetadata(brokers, new List.unmodifiable(metadata)); 99 | } 100 | 101 | Future _sendMetadataRequest( 102 | Set topics, String host, int port) async { 103 | var request = new MetadataRequest(topics); 104 | MetadataResponse response = await _send(host, port, request); 105 | 106 | var topicWithError = response.topics.firstWhere( 107 | (_) => _.errorCode != KafkaServerError.NoError, 108 | orElse: () => null); 109 | 110 | if (topicWithError is TopicMetadata) { 111 | var retries = 1; 112 | var error = new KafkaServerError(topicWithError.errorCode); 113 | while (error.isLeaderNotAvailable && retries < 5) { 114 | var future = new Future.delayed( 115 | new Duration(seconds: retries), () => _send(host, port, request)); 116 | 117 | response = await future; 118 | topicWithError = response.topics.firstWhere( 119 | (_) => _.errorCode != KafkaServerError.NoError, 120 | orElse: () => null); 121 | var errorCode = 122 | (topicWithError is TopicMetadata) ? topicWithError.errorCode : 0; 123 | error = new KafkaServerError(errorCode); 124 | retries++; 125 | } 126 | 127 | if (error.isError) throw error; 128 | } 129 | 130 | return response; 131 | } 132 | 133 | /// Fetches metadata for specified [consumerGroup]. 134 | /// 135 | /// It handles `ConsumerCoordinatorNotAvailableCode(15)` API error which Kafka 136 | /// returns in case [GroupCoordinatorRequest] is sent for the very first time 137 | /// to this particular broker (when special topic to store consumer offsets 138 | /// does not exist yet). 139 | /// 140 | /// It will attempt up to 5 retries (with linear delay) in order to fetch 141 | /// metadata. 142 | Future getConsumerMetadata( 143 | String consumerGroup) async { 144 | // TODO: rotate default hosts. 145 | var contactPoint = _getCurrentContactPoint(); 146 | var request = new GroupCoordinatorRequest(consumerGroup); 147 | 148 | GroupCoordinatorResponse response = 149 | await _send(contactPoint.host, contactPoint.port, request); 150 | var retries = 1; 151 | var error = new KafkaServerError(response.errorCode); 152 | while (error.isConsumerCoordinatorNotAvailable && retries < 5) { 153 | var future = new Future.delayed(new Duration(seconds: retries), 154 | () => _send(contactPoint.host, contactPoint.port, request)); 155 | 156 | response = await future; 157 | error = new KafkaServerError(response.errorCode); 158 | retries++; 159 | } 160 | 161 | if (error.isError) throw error; 162 | 163 | return response; 164 | } 165 | 166 | /// Sends request to specified [Broker]. 167 | Future send(Broker broker, KafkaRequest request) { 168 | return _send(broker.host, broker.port, request); 169 | } 170 | 171 | Future _send(String host, int port, KafkaRequest request) async { 172 | kafkaLogger.finer('Session: Sending request ${request} to ${host}:${port}'); 173 | var socket = await _getSocket(host, port); 174 | Completer completer = new Completer(); 175 | _inflightRequests[request] = completer; 176 | 177 | /// Writing to socket is synchronous, so we need to remember future 178 | /// returned by last call to `flush` and only write this request after 179 | /// previous one has been flushed. 180 | var flushFuture = _flushFutures[socket]; 181 | _flushFutures[socket] = flushFuture.then((_) { 182 | socket.add(request.toBytes()); 183 | return socket.flush().catchError((error) { 184 | _inflightRequests.remove(request); 185 | completer.completeError(error); 186 | return new Future.value(); 187 | }); 188 | }); 189 | 190 | return completer.future; 191 | } 192 | 193 | /// Closes this session and terminates all open socket connections. 194 | /// 195 | /// After session has been closed it can't be used or re-opened. 196 | Future close() async { 197 | for (var h in _sockets.keys) { 198 | await _subscriptions[h].cancel(); 199 | (await _sockets[h]).destroy(); 200 | } 201 | _sockets.clear(); 202 | } 203 | 204 | void _handleData(String hostPort, List d) { 205 | var buffer = _buffers[hostPort]; 206 | 207 | buffer.addAll(d); 208 | if (buffer.length >= 4 && _sizes[hostPort] == -1) { 209 | var sizeBytes = buffer.sublist(0, 4); 210 | var reader = new KafkaBytesReader.fromBytes(sizeBytes); 211 | _sizes[hostPort] = reader.readInt32(); 212 | } 213 | 214 | List extra; 215 | if (buffer.length > _sizes[hostPort] + 4) { 216 | extra = buffer.sublist(_sizes[hostPort] + 4); 217 | buffer.removeRange(_sizes[hostPort] + 4, buffer.length); 218 | } 219 | 220 | if (buffer.length == _sizes[hostPort] + 4) { 221 | var header = buffer.sublist(4, 8); 222 | var reader = new KafkaBytesReader.fromBytes(header); 223 | var correlationId = reader.readInt32(); 224 | var request = _inflightRequests.keys 225 | .firstWhere((r) => r.correlationId == correlationId); 226 | var completer = _inflightRequests[request]; 227 | var response = request.createResponse(buffer); 228 | _inflightRequests.remove(request); 229 | buffer.clear(); 230 | _sizes[hostPort] = -1; 231 | 232 | completer.complete(response); 233 | if (extra is List && extra.isNotEmpty) { 234 | _handleData(hostPort, extra); 235 | } 236 | } 237 | } 238 | 239 | ContactPoint _getCurrentContactPoint() { 240 | return contactPoints.first; 241 | } 242 | 243 | // void _rotateDefaultHosts() { 244 | // var current = defaultHosts.removeFirst(); 245 | // defaultHosts.addLast(current); 246 | // } 247 | 248 | Future _getSocket(String host, int port) { 249 | var key = '${host}:${port}'; 250 | if (!_sockets.containsKey(key)) { 251 | _sockets[key] = Socket.connect(host, port); 252 | _sockets[key].then((socket) { 253 | socket.setOption(SocketOption.TCP_NODELAY, true); 254 | _buffers[key] = new List(); 255 | _sizes[key] = -1; 256 | _subscriptions[key] = socket.listen((d) => _handleData(key, d)); 257 | _flushFutures[socket] = new Future.value(); 258 | }, onError: (error) { 259 | _sockets.remove(key); 260 | }); 261 | } 262 | 263 | return _sockets[key]; 264 | } 265 | } 266 | 267 | /// Stores metadata information about cluster including available brokers 268 | /// and topics. 269 | class ClusterMetadata { 270 | /// List of brokers in the cluster. 271 | final List brokers; 272 | 273 | /// List with metadata for each topic. 274 | final List topics; 275 | 276 | /// Creates new instance of cluster metadata. 277 | ClusterMetadata(this.brokers, this.topics); 278 | 279 | /// Returns [Broker] by specified [nodeId]. 280 | Broker getBroker(int nodeId) { 281 | return brokers.firstWhere((b) => b.id == nodeId); 282 | } 283 | 284 | /// Returns [TopicMetadata] for specified [topicName]. 285 | /// 286 | /// If no topic is found will throw `StateError`. 287 | TopicMetadata getTopicMetadata(String topicName) { 288 | return topics.firstWhere((topic) => topic.topicName == topicName, 289 | orElse: () => 290 | throw new StateError('No topic ${topicName} found in metadata.')); 291 | } 292 | } 293 | -------------------------------------------------------------------------------- /lib/src/util/crc32.dart: -------------------------------------------------------------------------------- 1 | part of kafka.protocol; 2 | 3 | /// CRC32 checksum calculator. 4 | /// 5 | // TODO: extract in it's own package (?) 6 | class Crc32 { 7 | static final List _table = const [ 8 | 0x00000000, 9 | 0x77073096, 10 | 0xee0e612c, 11 | 0x990951ba, 12 | 0x076dc419, 13 | 0x706af48f, 14 | 0xe963a535, 15 | 0x9e6495a3, 16 | 0x0edb8832, 17 | 0x79dcb8a4, 18 | 0xe0d5e91e, 19 | 0x97d2d988, 20 | 0x09b64c2b, 21 | 0x7eb17cbd, 22 | 0xe7b82d07, 23 | 0x90bf1d91, 24 | 0x1db71064, 25 | 0x6ab020f2, 26 | 0xf3b97148, 27 | 0x84be41de, 28 | 0x1adad47d, 29 | 0x6ddde4eb, 30 | 0xf4d4b551, 31 | 0x83d385c7, 32 | 0x136c9856, 33 | 0x646ba8c0, 34 | 0xfd62f97a, 35 | 0x8a65c9ec, 36 | 0x14015c4f, 37 | 0x63066cd9, 38 | 0xfa0f3d63, 39 | 0x8d080df5, 40 | 0x3b6e20c8, 41 | 0x4c69105e, 42 | 0xd56041e4, 43 | 0xa2677172, 44 | 0x3c03e4d1, 45 | 0x4b04d447, 46 | 0xd20d85fd, 47 | 0xa50ab56b, 48 | 0x35b5a8fa, 49 | 0x42b2986c, 50 | 0xdbbbc9d6, 51 | 0xacbcf940, 52 | 0x32d86ce3, 53 | 0x45df5c75, 54 | 0xdcd60dcf, 55 | 0xabd13d59, 56 | 0x26d930ac, 57 | 0x51de003a, 58 | 0xc8d75180, 59 | 0xbfd06116, 60 | 0x21b4f4b5, 61 | 0x56b3c423, 62 | 0xcfba9599, 63 | 0xb8bda50f, 64 | 0x2802b89e, 65 | 0x5f058808, 66 | 0xc60cd9b2, 67 | 0xb10be924, 68 | 0x2f6f7c87, 69 | 0x58684c11, 70 | 0xc1611dab, 71 | 0xb6662d3d, 72 | 0x76dc4190, 73 | 0x01db7106, 74 | 0x98d220bc, 75 | 0xefd5102a, 76 | 0x71b18589, 77 | 0x06b6b51f, 78 | 0x9fbfe4a5, 79 | 0xe8b8d433, 80 | 0x7807c9a2, 81 | 0x0f00f934, 82 | 0x9609a88e, 83 | 0xe10e9818, 84 | 0x7f6a0dbb, 85 | 0x086d3d2d, 86 | 0x91646c97, 87 | 0xe6635c01, 88 | 0x6b6b51f4, 89 | 0x1c6c6162, 90 | 0x856530d8, 91 | 0xf262004e, 92 | 0x6c0695ed, 93 | 0x1b01a57b, 94 | 0x8208f4c1, 95 | 0xf50fc457, 96 | 0x65b0d9c6, 97 | 0x12b7e950, 98 | 0x8bbeb8ea, 99 | 0xfcb9887c, 100 | 0x62dd1ddf, 101 | 0x15da2d49, 102 | 0x8cd37cf3, 103 | 0xfbd44c65, 104 | 0x4db26158, 105 | 0x3ab551ce, 106 | 0xa3bc0074, 107 | 0xd4bb30e2, 108 | 0x4adfa541, 109 | 0x3dd895d7, 110 | 0xa4d1c46d, 111 | 0xd3d6f4fb, 112 | 0x4369e96a, 113 | 0x346ed9fc, 114 | 0xad678846, 115 | 0xda60b8d0, 116 | 0x44042d73, 117 | 0x33031de5, 118 | 0xaa0a4c5f, 119 | 0xdd0d7cc9, 120 | 0x5005713c, 121 | 0x270241aa, 122 | 0xbe0b1010, 123 | 0xc90c2086, 124 | 0x5768b525, 125 | 0x206f85b3, 126 | 0xb966d409, 127 | 0xce61e49f, 128 | 0x5edef90e, 129 | 0x29d9c998, 130 | 0xb0d09822, 131 | 0xc7d7a8b4, 132 | 0x59b33d17, 133 | 0x2eb40d81, 134 | 0xb7bd5c3b, 135 | 0xc0ba6cad, 136 | 0xedb88320, 137 | 0x9abfb3b6, 138 | 0x03b6e20c, 139 | 0x74b1d29a, 140 | 0xead54739, 141 | 0x9dd277af, 142 | 0x04db2615, 143 | 0x73dc1683, 144 | 0xe3630b12, 145 | 0x94643b84, 146 | 0x0d6d6a3e, 147 | 0x7a6a5aa8, 148 | 0xe40ecf0b, 149 | 0x9309ff9d, 150 | 0x0a00ae27, 151 | 0x7d079eb1, 152 | 0xf00f9344, 153 | 0x8708a3d2, 154 | 0x1e01f268, 155 | 0x6906c2fe, 156 | 0xf762575d, 157 | 0x806567cb, 158 | 0x196c3671, 159 | 0x6e6b06e7, 160 | 0xfed41b76, 161 | 0x89d32be0, 162 | 0x10da7a5a, 163 | 0x67dd4acc, 164 | 0xf9b9df6f, 165 | 0x8ebeeff9, 166 | 0x17b7be43, 167 | 0x60b08ed5, 168 | 0xd6d6a3e8, 169 | 0xa1d1937e, 170 | 0x38d8c2c4, 171 | 0x4fdff252, 172 | 0xd1bb67f1, 173 | 0xa6bc5767, 174 | 0x3fb506dd, 175 | 0x48b2364b, 176 | 0xd80d2bda, 177 | 0xaf0a1b4c, 178 | 0x36034af6, 179 | 0x41047a60, 180 | 0xdf60efc3, 181 | 0xa867df55, 182 | 0x316e8eef, 183 | 0x4669be79, 184 | 0xcb61b38c, 185 | 0xbc66831a, 186 | 0x256fd2a0, 187 | 0x5268e236, 188 | 0xcc0c7795, 189 | 0xbb0b4703, 190 | 0x220216b9, 191 | 0x5505262f, 192 | 0xc5ba3bbe, 193 | 0xb2bd0b28, 194 | 0x2bb45a92, 195 | 0x5cb36a04, 196 | 0xc2d7ffa7, 197 | 0xb5d0cf31, 198 | 0x2cd99e8b, 199 | 0x5bdeae1d, 200 | 0x9b64c2b0, 201 | 0xec63f226, 202 | 0x756aa39c, 203 | 0x026d930a, 204 | 0x9c0906a9, 205 | 0xeb0e363f, 206 | 0x72076785, 207 | 0x05005713, 208 | 0x95bf4a82, 209 | 0xe2b87a14, 210 | 0x7bb12bae, 211 | 0x0cb61b38, 212 | 0x92d28e9b, 213 | 0xe5d5be0d, 214 | 0x7cdcefb7, 215 | 0x0bdbdf21, 216 | 0x86d3d2d4, 217 | 0xf1d4e242, 218 | 0x68ddb3f8, 219 | 0x1fda836e, 220 | 0x81be16cd, 221 | 0xf6b9265b, 222 | 0x6fb077e1, 223 | 0x18b74777, 224 | 0x88085ae6, 225 | 0xff0f6a70, 226 | 0x66063bca, 227 | 0x11010b5c, 228 | 0x8f659eff, 229 | 0xf862ae69, 230 | 0x616bffd3, 231 | 0x166ccf45, 232 | 0xa00ae278, 233 | 0xd70dd2ee, 234 | 0x4e048354, 235 | 0x3903b3c2, 236 | 0xa7672661, 237 | 0xd06016f7, 238 | 0x4969474d, 239 | 0x3e6e77db, 240 | 0xaed16a4a, 241 | 0xd9d65adc, 242 | 0x40df0b66, 243 | 0x37d83bf0, 244 | 0xa9bcae53, 245 | 0xdebb9ec5, 246 | 0x47b2cf7f, 247 | 0x30b5ffe9, 248 | 0xbdbdf21c, 249 | 0xcabac28a, 250 | 0x53b39330, 251 | 0x24b4a3a6, 252 | 0xbad03605, 253 | 0xcdd70693, 254 | 0x54de5729, 255 | 0x23d967bf, 256 | 0xb3667a2e, 257 | 0xc4614ab8, 258 | 0x5d681b02, 259 | 0x2a6f2b94, 260 | 0xb40bbe37, 261 | 0xc30c8ea1, 262 | 0x5a05df1b, 263 | 0x2d02ef8d 264 | ]; 265 | 266 | /// Computes a CRC32 value for the given input. 267 | /// 268 | /// The return value is a signed 32-bit integer. 269 | static int signed(List input, [int crc = 0]) { 270 | var value = unsigned(input, crc); 271 | if (value > 2147483647) { 272 | value -= 4294967296; 273 | } 274 | return value; 275 | } 276 | 277 | /// Computes a CRC32 value for the given input. 278 | /// 279 | /// The return value is an unsigned 32-bit integer. 280 | static int unsigned(List input, [int crc = 0]) { 281 | var value = _compute(input, crc); 282 | return value & 0xffffffff; 283 | } 284 | 285 | static int _compute(List input, [int crc = 0]) { 286 | crc = crc ^ (-1); 287 | input.forEach((i) { 288 | var x = _table[(crc ^ i) & 0xFF]; 289 | crc = (crc & 0xffffffff) >> 8; // crc >>> 8 (32-bit unsigned integer) 290 | 291 | crc ^= x; 292 | }); 293 | 294 | return crc ^ (-1); 295 | } 296 | } 297 | -------------------------------------------------------------------------------- /pubspec.yaml: -------------------------------------------------------------------------------- 1 | name: 'kafka' 2 | version: 0.1.0 3 | description: Kafka Client library for Dartlang 4 | homepage: https://github.com/pulyaevskiy/dart-kafka 5 | author: Anatoly Pulyaevskiy 6 | 7 | environment: 8 | sdk: '>=1.12.0 <2.0.0' 9 | 10 | dependencies: 11 | quiver: ^0.22.0 12 | logging: "^0.11.1+1" 13 | 14 | dev_dependencies: 15 | test: "^0.12.4+7" 16 | mockito: "^0.11.0" 17 | dart_coveralls: "^0.4.0" 18 | -------------------------------------------------------------------------------- /test/all.dart: -------------------------------------------------------------------------------- 1 | library kafka.all_tests; 2 | 3 | import 'common/errors_test.dart' as errors_test; 4 | import 'common/messages_test.dart' as messages_test; 5 | import 'util/crc32_test.dart' as crc32_test; 6 | import 'protocol/bytes_builder_test.dart' as bytes_builder_test; 7 | import 'protocol/bytes_reader_test.dart' as bytes_reader_test; 8 | import 'protocol/fetch_test.dart' as fetch_test; 9 | import 'protocol/offset_commit_test.dart' as offset_commit_test; 10 | import 'protocol/offset_fetch_test.dart' as offset_fetch_test; 11 | import 'protocol/offset_test.dart' as offset_test; 12 | import 'protocol/produce_test.dart' as produce_test; 13 | import 'session_test.dart' as session_test; 14 | import 'consumer_group_test.dart' as consumer_group_test; 15 | import 'producer_test.dart' as producer_test; 16 | import 'consumer_test.dart' as consumer_test; 17 | import 'fetcher_test.dart' as fetcher_test; 18 | 19 | void main() { 20 | errors_test.main(); 21 | messages_test.main(); 22 | bytes_builder_test.main(); 23 | bytes_reader_test.main(); 24 | crc32_test.main(); 25 | session_test.main(); 26 | fetch_test.main(); 27 | offset_commit_test.main(); 28 | offset_fetch_test.main(); 29 | offset_test.main(); 30 | produce_test.main(); 31 | consumer_group_test.main(); 32 | producer_test.main(); 33 | consumer_test.main(); 34 | fetcher_test.main(); 35 | } 36 | -------------------------------------------------------------------------------- /test/common/errors_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.common.errors.test; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/common.dart'; 5 | 6 | void main() { 7 | group('KafkaServerError:', () { 8 | test('it handles error codes correctly', () { 9 | expect(new KafkaServerError(0).isNoError, isTrue); 10 | expect(new KafkaServerError(-1).isUnknown, isTrue); 11 | expect(new KafkaServerError(-1).isError, isTrue); 12 | expect(new KafkaServerError(1).isOffsetOutOfRange, isTrue); 13 | expect(new KafkaServerError(2).isInvalidMessage, isTrue); 14 | expect(new KafkaServerError(3).isUnknownTopicOrPartition, isTrue); 15 | expect(new KafkaServerError(4).isInvalidMessageSize, isTrue); 16 | expect(new KafkaServerError(5).isLeaderNotAvailable, isTrue); 17 | expect(new KafkaServerError(6).isNotLeaderForPartition, isTrue); 18 | expect(new KafkaServerError(7).isRequestTimedOut, isTrue); 19 | expect(new KafkaServerError(8).isBrokerNotAvailable, isTrue); 20 | expect(new KafkaServerError(9).isReplicaNotAvailable, isTrue); 21 | expect(new KafkaServerError(10).isMessageSizeTooLarge, isTrue); 22 | expect(new KafkaServerError(11).isStaleControllerEpoch, isTrue); 23 | expect(new KafkaServerError(12).isOffsetMetadataTooLarge, isTrue); 24 | expect(new KafkaServerError(14).isOffsetsLoadInProgress, isTrue); 25 | expect( 26 | new KafkaServerError(15).isConsumerCoordinatorNotAvailable, isTrue); 27 | expect(new KafkaServerError(16).isNotCoordinatorForConsumer, isTrue); 28 | }); 29 | 30 | test('it can be converted to string', () { 31 | expect( 32 | new KafkaServerError(0).toString(), 'KafkaServerError: NoError(0)'); 33 | }); 34 | 35 | test('it provides error message', () { 36 | expect(new KafkaServerError(1).message, 'OffsetOutOfRange'); 37 | }); 38 | }); 39 | } 40 | -------------------------------------------------------------------------------- /test/common/messages_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.common.messages.test; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/common.dart'; 5 | 6 | void main() { 7 | group('Messages:', () { 8 | test( 9 | 'compression can not be set on individual messages in produce envelope', 10 | () { 11 | expect(() { 12 | new ProduceEnvelope('test', 0, [ 13 | new Message([1], 14 | attributes: new MessageAttributes(KafkaCompression.gzip)) 15 | ]); 16 | }, throwsStateError); 17 | }); 18 | }); 19 | 20 | group('MessageAttributes:', () { 21 | test('get compression from int', () { 22 | expect(KafkaCompression.none, MessageAttributes.getCompression(0)); 23 | expect(KafkaCompression.gzip, MessageAttributes.getCompression(1)); 24 | expect(KafkaCompression.snappy, MessageAttributes.getCompression(2)); 25 | }); 26 | 27 | test('convert to int', () { 28 | expect(new MessageAttributes(KafkaCompression.none).toInt(), equals(0)); 29 | expect(new MessageAttributes(KafkaCompression.gzip).toInt(), equals(1)); 30 | expect(new MessageAttributes(KafkaCompression.snappy).toInt(), equals(2)); 31 | }); 32 | }); 33 | } 34 | -------------------------------------------------------------------------------- /test/consumer_group_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.test.consumer_group; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:mockito/mockito.dart'; 5 | import 'package:kafka/kafka.dart'; 6 | import 'package:kafka/protocol.dart'; 7 | import 'setup.dart'; 8 | 9 | void main() { 10 | group('ConsumerGroup:', () { 11 | KafkaSession _session; 12 | String _topicName = 'dartKafkaTest'; 13 | Broker _coordinator; 14 | Broker _badCoordinator; 15 | 16 | setUp(() async { 17 | var host = await getDefaultHost(); 18 | var session = new KafkaSession([new ContactPoint(host, 9092)]); 19 | var brokersMetadata = await session.getMetadata([_topicName].toSet()); 20 | 21 | var metadata = await session.getConsumerMetadata('testGroup'); 22 | _coordinator = metadata.coordinator; 23 | _badCoordinator = 24 | brokersMetadata.brokers.firstWhere((b) => b.id != _coordinator.id); 25 | _session = spy(new KafkaSessionMock(), session); 26 | }); 27 | 28 | tearDown(() async { 29 | await _session.close(); 30 | }); 31 | 32 | test('it fetches offsets', () async { 33 | var group = new ConsumerGroup(_session, 'testGroup'); 34 | var offsets = await group.fetchOffsets({ 35 | _topicName: [0, 1, 2].toSet() 36 | }); 37 | expect(offsets.length, equals(3)); 38 | offsets.forEach((o) { 39 | expect(o.errorCode, 0); 40 | }); 41 | }); 42 | 43 | test('it tries to refresh coordinator host 3 times on fetchOffsets', 44 | () async { 45 | when(_session.getConsumerMetadata('testGroup')).thenReturn( 46 | new GroupCoordinatorResponse(0, _badCoordinator.id, 47 | _badCoordinator.host, _badCoordinator.port)); 48 | 49 | var group = new ConsumerGroup(_session, 'testGroup'); 50 | // Can't use expect(throws) here since it's async, so `verify` check below 51 | // fails. 52 | try { 53 | await group.fetchOffsets({ 54 | _topicName: [0, 1, 2].toSet() 55 | }); 56 | } catch (e) { 57 | expect(e, new isInstanceOf()); 58 | expect(e.code, equals(16)); 59 | } 60 | verify(_session.getConsumerMetadata('testGroup')).called(3); 61 | }); 62 | 63 | test( 64 | 'it retries to fetchOffsets 3 times if it gets OffsetLoadInProgress error', 65 | () async { 66 | var badOffsets = [ 67 | new ConsumerOffset(_topicName, 0, -1, '', 14), 68 | new ConsumerOffset(_topicName, 1, -1, '', 14), 69 | new ConsumerOffset(_topicName, 2, -1, '', 14) 70 | ]; 71 | when(_session.send(argThat(new isInstanceOf()), 72 | argThat(new isInstanceOf()))) 73 | .thenReturn(new OffsetFetchResponse.fromOffsets(badOffsets)); 74 | 75 | var group = new ConsumerGroup(_session, 'testGroup'); 76 | // Can't use expect(throws) here since it's async, so `verify` check below 77 | // fails. 78 | var now = new DateTime.now(); 79 | try { 80 | await group.fetchOffsets({ 81 | _topicName: [0, 1, 2].toSet() 82 | }); 83 | fail('fetchOffsets must throw an error.'); 84 | } catch (e) { 85 | var diff = now.difference(new DateTime.now()); 86 | expect(diff.abs().inSeconds, greaterThanOrEqualTo(2)); 87 | 88 | expect(e, new isInstanceOf()); 89 | expect(e.code, equals(14)); 90 | } 91 | verify(_session.send(argThat(new isInstanceOf()), 92 | argThat(new isInstanceOf()))) 93 | .called(3); 94 | }); 95 | 96 | test('it tries to refresh coordinator host 3 times on commitOffsets', 97 | () async { 98 | when(_session.getConsumerMetadata('testGroup')).thenReturn( 99 | new GroupCoordinatorResponse(0, _badCoordinator.id, 100 | _badCoordinator.host, _badCoordinator.port)); 101 | 102 | var group = new ConsumerGroup(_session, 'testGroup'); 103 | var offsets = [new ConsumerOffset(_topicName, 0, 3, '')]; 104 | 105 | try { 106 | await group.commitOffsets(offsets, -1, ''); 107 | } catch (e) { 108 | expect(e, new isInstanceOf()); 109 | expect(e.code, equals(16)); 110 | } 111 | verify(_session.getConsumerMetadata('testGroup')).called(3); 112 | }); 113 | 114 | test('it can reset offsets to earliest', () async { 115 | var offsetMaster = new OffsetMaster(_session); 116 | var earliestOffsets = await offsetMaster.fetchEarliest({ 117 | _topicName: [0, 1, 2].toSet() 118 | }); 119 | 120 | var group = new ConsumerGroup(_session, 'testGroup'); 121 | await group.resetOffsetsToEarliest({ 122 | _topicName: [0, 1, 2].toSet() 123 | }); 124 | 125 | var offsets = await group.fetchOffsets({ 126 | _topicName: [0, 1, 2].toSet() 127 | }); 128 | expect(offsets, hasLength(3)); 129 | 130 | for (var o in offsets) { 131 | var earliest = 132 | earliestOffsets.firstWhere((to) => to.partitionId == o.partitionId); 133 | expect(o.offset, equals(earliest.offset - 1)); 134 | } 135 | }); 136 | }); 137 | } 138 | 139 | class KafkaSessionMock extends Mock implements KafkaSession {} 140 | -------------------------------------------------------------------------------- /test/consumer_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.test.consumer; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/kafka.dart'; 5 | import 'setup.dart'; 6 | 7 | void main() { 8 | group('Consumer:', () { 9 | KafkaSession _session; 10 | String _topicName = 'dartKafkaTest'; 11 | Map _expectedOffsets = new Map(); 12 | 13 | setUp(() async { 14 | var date = new DateTime.now().millisecondsSinceEpoch; 15 | _topicName = 'testTopic-${date}'; 16 | var host = await getDefaultHost(); 17 | _session = new KafkaSession([new ContactPoint(host, 9092)]); 18 | var producer = new Producer(_session, 1, 100); 19 | var result = await producer.produce([ 20 | new ProduceEnvelope(_topicName, 0, [new Message('msg1'.codeUnits)]), 21 | new ProduceEnvelope(_topicName, 1, [new Message('msg2'.codeUnits)]), 22 | new ProduceEnvelope(_topicName, 2, [new Message('msg3'.codeUnits)]), 23 | ]); 24 | if (result.hasErrors) { 25 | throw new StateError( 26 | 'Consumer test: setUp failed to produce messages.'); 27 | } 28 | _expectedOffsets = result.offsets[_topicName]; 29 | }); 30 | 31 | tearDown(() async { 32 | await _session.close(); 33 | }); 34 | 35 | test('it can consume messages from multiple brokers and commit offsets', 36 | () async { 37 | var topics = { 38 | _topicName: [0, 1, 2].toSet() 39 | }; 40 | var consumer = new Consumer( 41 | _session, new ConsumerGroup(_session, 'cg'), topics, 100, 1); 42 | var consumedOffsets = new Map(); 43 | await for (MessageEnvelope envelope in consumer.consume(limit: 3)) { 44 | consumedOffsets[envelope.partitionId] = envelope.offset; 45 | expect(envelope.offset, _expectedOffsets[envelope.partitionId]); 46 | envelope.commit(''); 47 | } 48 | expect(consumedOffsets.length, _expectedOffsets.length); 49 | }); 50 | 51 | test( 52 | 'it can consume messages from multiple brokers without commiting offsets', 53 | () async { 54 | var topics = { 55 | _topicName: [0, 1, 2].toSet() 56 | }; 57 | var consumer = new Consumer( 58 | _session, new ConsumerGroup(_session, 'cg'), topics, 100, 1); 59 | var consumedOffsets = new Map(); 60 | await for (MessageEnvelope envelope in consumer.consume(limit: 3)) { 61 | consumedOffsets[envelope.partitionId] = envelope.offset; 62 | expect(envelope.offset, _expectedOffsets[envelope.partitionId]); 63 | envelope.ack(); 64 | } 65 | expect(consumedOffsets, _expectedOffsets); 66 | 67 | var group = new ConsumerGroup(_session, 'cg'); 68 | var offsets = await group.fetchOffsets(topics); 69 | expect(offsets, hasLength(3)); 70 | for (var o in offsets) { 71 | expect(-1, o.offset); 72 | } 73 | }); 74 | 75 | test('it can handle cancelation request', () async { 76 | var topics = { 77 | _topicName: [0, 1, 2].toSet() 78 | }; 79 | var consumer = new Consumer( 80 | _session, new ConsumerGroup(_session, 'cg'), topics, 100, 1); 81 | var consumedOffsets = new Map(); 82 | await for (MessageEnvelope envelope in consumer.consume(limit: 3)) { 83 | consumedOffsets[envelope.partitionId] = envelope.offset; 84 | expect(envelope.offset, _expectedOffsets[envelope.partitionId]); 85 | envelope.cancel(); 86 | } 87 | expect(consumedOffsets.length, equals(1)); 88 | }); 89 | 90 | test('it propagates worker errors via stream controller', () async { 91 | var topics = { 92 | 'someTopic': 93 | [0, 1, 2, 3].toSet() // request partition which does not exist. 94 | }; 95 | 96 | var consume = () async { 97 | try { 98 | var consumer = new Consumer( 99 | _session, new ConsumerGroup(_session, 'cg'), topics, 100, 1); 100 | var consumedOffsets = new Map(); 101 | await for (MessageEnvelope envelope in consumer.consume(limit: 3)) { 102 | envelope.ack(); 103 | } 104 | return false; 105 | } catch (e) { 106 | return true; 107 | } 108 | }; 109 | 110 | var result = await consume(); 111 | 112 | expect(result, isTrue); 113 | }); 114 | 115 | test('it can consume batches of messages from multiple brokers', () async { 116 | var topics = { 117 | _topicName: [0, 1, 2].toSet() 118 | }; 119 | var consumer = new Consumer( 120 | _session, new ConsumerGroup(_session, 'cg'), topics, 100, 1); 121 | var consumedOffsets = new Map(); 122 | 123 | var first, last; 124 | await for (var batch in consumer.batchConsume(3)) { 125 | if (first == null) { 126 | first = batch; 127 | first.ack(); 128 | } else if (last == null) { 129 | last = batch; 130 | last.cancel(); 131 | } 132 | } 133 | 134 | expect(first.items.length + last.items.length, 3); 135 | 136 | for (var i in first.items) { 137 | consumedOffsets[i.partitionId] = i.offset; 138 | expect(i.offset, _expectedOffsets[i.partitionId]); 139 | } 140 | for (var i in last.items) { 141 | consumedOffsets[i.partitionId] = i.offset; 142 | expect(i.offset, _expectedOffsets[i.partitionId]); 143 | } 144 | 145 | expect(consumedOffsets.length, _expectedOffsets.length); 146 | }); 147 | }); 148 | } 149 | -------------------------------------------------------------------------------- /test/fetcher_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.test.fetcher; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/kafka.dart'; 5 | import 'setup.dart'; 6 | 7 | void main() { 8 | group('Fetcher:', () { 9 | KafkaSession _session; 10 | String _topicName = 'dartKafkaTest'; 11 | Map _expectedOffsets = new Map(); 12 | List _initialOffsets = new List(); 13 | 14 | setUp(() async { 15 | var host = await getDefaultHost(); 16 | _session = new KafkaSession([new ContactPoint(host, 9092)]); 17 | var producer = new Producer(_session, 1, 100); 18 | var result = await producer.produce([ 19 | new ProduceEnvelope(_topicName, 0, [new Message('msg1'.codeUnits)]), 20 | new ProduceEnvelope(_topicName, 1, [new Message('msg2'.codeUnits)]), 21 | new ProduceEnvelope(_topicName, 2, [new Message('msg3'.codeUnits)]), 22 | ]); 23 | _expectedOffsets = result.offsets[_topicName]; 24 | result.offsets[_topicName].forEach((p, o) { 25 | _initialOffsets.add(new TopicOffset(_topicName, p, o)); 26 | }); 27 | }); 28 | 29 | tearDown(() async { 30 | await _session.close(); 31 | }); 32 | 33 | test('it can consume exact number of messages from multiple brokers', 34 | () async { 35 | var fetcher = new Fetcher(_session, _initialOffsets); 36 | var fetchedCount = 0; 37 | await for (MessageEnvelope envelope in fetcher.fetch(limit: 3)) { 38 | expect(envelope.offset, _expectedOffsets[envelope.partitionId]); 39 | envelope.commit(''); 40 | fetchedCount++; 41 | } 42 | expect(fetchedCount, equals(3)); 43 | }); 44 | 45 | test('it can handle cancelation request', () async { 46 | var fetcher = new Fetcher(_session, _initialOffsets); 47 | var fetchedCount = 0; 48 | await for (MessageEnvelope envelope in fetcher.fetch(limit: 3)) { 49 | expect(envelope.offset, _expectedOffsets[envelope.partitionId]); 50 | envelope.cancel(); 51 | fetchedCount++; 52 | } 53 | expect(fetchedCount, equals(1)); 54 | }); 55 | 56 | test('it can resolve earliest offset', () async { 57 | var startOffsets = [new TopicOffset.earliest(_topicName, 0)]; 58 | var fetcher = new Fetcher(_session, startOffsets); 59 | 60 | await for (MessageEnvelope envelope in fetcher.fetch(limit: 1)) { 61 | envelope.ack(); 62 | } 63 | }); 64 | }); 65 | } 66 | -------------------------------------------------------------------------------- /test/producer_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.test.producer; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/kafka.dart'; 5 | import 'setup.dart'; 6 | 7 | main() { 8 | group('Producer:', () { 9 | KafkaSession _session; 10 | String _topicName = 'dartKafkaTest'; 11 | 12 | setUp(() async { 13 | var host = await getDefaultHost(); 14 | _session = new KafkaSession([new ContactPoint(host, 9092)]); 15 | }); 16 | 17 | tearDown(() async { 18 | await _session.close(); 19 | }); 20 | 21 | test('it can produce messages to multiple brokers', () async { 22 | var producer = new Producer(_session, 1, 100); 23 | var result = await producer.produce([ 24 | new ProduceEnvelope(_topicName, 0, [new Message('test1'.codeUnits)]), 25 | new ProduceEnvelope(_topicName, 1, [new Message('test2'.codeUnits)]), 26 | new ProduceEnvelope(_topicName, 2, [new Message('test3'.codeUnits)]), 27 | ]); 28 | expect(result.hasErrors, isFalse); 29 | expect(result.offsets[_topicName][0], greaterThanOrEqualTo(0)); 30 | expect(result.offsets[_topicName][1], greaterThanOrEqualTo(0)); 31 | expect(result.offsets[_topicName][2], greaterThanOrEqualTo(0)); 32 | }); 33 | }); 34 | } 35 | -------------------------------------------------------------------------------- /test/protocol/bytes_builder_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.protocol.test.bytes_builder; 2 | 3 | import 'dart:async'; 4 | import 'dart:convert'; 5 | import 'package:test/test.dart'; 6 | import 'package:kafka/protocol.dart'; 7 | 8 | void main() { 9 | group('BytesBuilder:', () { 10 | KafkaBytesBuilder _builder; 11 | 12 | setUp(() { 13 | _builder = new KafkaBytesBuilder(); 14 | }); 15 | 16 | test('it adds Int8 values', () { 17 | _builder.addInt8(35); 18 | expect(_builder.length, equals(1)); 19 | List result = _builder.toBytes(); 20 | expect(result, hasLength(equals(1))); 21 | expect(result[0], equals(35)); 22 | }); 23 | 24 | test('it adds Int16 values', () { 25 | _builder.addInt16(341); 26 | expect(_builder.length, equals(2)); 27 | List result = _builder.toBytes(); 28 | expect(result, hasLength(equals(2))); 29 | expect(result, equals([1, 85])); 30 | }); 31 | 32 | test('it adds Int32 values', () { 33 | _builder.addInt32(1635765); 34 | expect(_builder.length, equals(4)); 35 | var result = _builder.toBytes(); 36 | expect(result, hasLength(equals(4))); 37 | expect(result, equals([0, 24, 245, 181])); 38 | }); 39 | 40 | test('it adds string values', () { 41 | _builder.addString('dart-kafka'); 42 | var result = _builder.toBytes(); 43 | expect(result, hasLength(equals(12))); // 2 bytes = size, 10 bytes = value 44 | var encodedString = result.getRange(2, 12).toList(); 45 | var value = UTF8.decode(encodedString); 46 | expect(value, equals('dart-kafka')); 47 | }); 48 | 49 | test('it adds array values of Int8', () { 50 | _builder.addArray([34, 45, 12], KafkaType.int8); 51 | var result = _builder.toBytes(); 52 | expect(result, hasLength(equals(7))); // 4 bytes = size, 3 bytes = values 53 | }); 54 | 55 | test('it adds array values of Int16', () { 56 | _builder.addArray([234, 523, 332], KafkaType.int16); 57 | var result = _builder.toBytes(); 58 | expect(result, hasLength(equals(10))); // 4 bytes = size, 6 bytes = values 59 | }); 60 | 61 | test('it adds array values of Int64', () { 62 | _builder.addArray([234, 523, 332], KafkaType.int64); 63 | var result = _builder.toBytes(); 64 | expect( 65 | result, hasLength(equals(28))); // 4 bytes = size, 24 bytes = values 66 | }); 67 | 68 | test('it adds array values of bytes', () { 69 | _builder.addArray([ 70 | [123], 71 | [32] 72 | ], KafkaType.bytes); 73 | var result = _builder.toBytes(); 74 | expect(result, hasLength(equals(14))); // 4 + 4 + 1 + 4 + 1 75 | }); 76 | 77 | test('it does not support objects in array values', () { 78 | expect( 79 | new Future(() { 80 | _builder.addArray(['foo'], KafkaType.object); 81 | }), 82 | throwsStateError); 83 | }); 84 | 85 | test('it supports null for bytes type', () { 86 | _builder.addBytes(null); 87 | var result = _builder.toBytes(); 88 | expect(result, hasLength(4)); 89 | expect(result, equals([255, 255, 255, 255])); 90 | }); 91 | }); 92 | } 93 | -------------------------------------------------------------------------------- /test/protocol/bytes_reader_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.protocol.test.bytes_reader; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/protocol.dart'; 5 | 6 | void main() { 7 | group('BytesReader:', () { 8 | KafkaBytesReader _reader; 9 | List _data; 10 | 11 | setUp(() { 12 | var builder = new KafkaBytesBuilder(); 13 | builder 14 | ..addInt8(53) 15 | ..addInt16(3541) 16 | ..addInt32(162534612) 17 | ..addString('dart-kafka') 18 | ..addBytes([12, 43, 83]) 19 | ..addArray(['one', 'two'], KafkaType.string); 20 | _data = builder.takeBytes(); 21 | _reader = new KafkaBytesReader.fromBytes(_data); 22 | }); 23 | 24 | test('it indicates end of buffer', () { 25 | var builder = new KafkaBytesBuilder(); 26 | builder.addInt8(53); 27 | _reader = new KafkaBytesReader.fromBytes(builder.takeBytes()); 28 | expect(_reader.length, equals(1)); 29 | expect(_reader.isEOF, isFalse); 30 | expect(_reader.isNotEOF, isTrue); 31 | _reader.readInt8(); 32 | expect(_reader.isEOF, isTrue); 33 | expect(_reader.isNotEOF, isFalse); 34 | }); 35 | 36 | test('it reads all Kafka types', () { 37 | expect(_reader.readInt8(), equals(53)); 38 | expect(_reader.readInt16(), equals(3541)); 39 | expect(_reader.readInt32(), equals(162534612)); 40 | expect(_reader.readString(), equals('dart-kafka')); 41 | expect(_reader.readBytes(), equals([12, 43, 83])); 42 | expect(_reader.readArray(KafkaType.string), equals(['one', 'two'])); 43 | }); 44 | 45 | test('it supports null for bytes type', () { 46 | var builder = new KafkaBytesBuilder(); 47 | builder.addBytes(null); 48 | var reader = new KafkaBytesReader.fromBytes(builder.takeBytes()); 49 | expect(reader.readBytes(), equals(null)); 50 | }); 51 | }); 52 | } 53 | -------------------------------------------------------------------------------- /test/protocol/fetch_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.test.api.fetch; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/kafka.dart'; 5 | import 'package:kafka/protocol.dart'; 6 | import '../setup.dart'; 7 | 8 | void main() { 9 | group('FetchApi:', () { 10 | String _topicName = 'dartKafkaTest'; 11 | Broker _host; 12 | KafkaSession _session; 13 | FetchRequest _request; 14 | String _message; 15 | int _offset; 16 | 17 | setUp(() async { 18 | var ip = await getDefaultHost(); 19 | _session = new KafkaSession([new ContactPoint(ip, 9092)]); 20 | var metadata = await _session.getMetadata([_topicName].toSet()); 21 | var leaderId = 22 | metadata.getTopicMetadata(_topicName).getPartition(0).leader; 23 | _host = metadata.getBroker(leaderId); 24 | }); 25 | 26 | tearDown(() async { 27 | await _session.close(); 28 | }); 29 | 30 | test('it fetches messages from Kafka topic', () async { 31 | var now = new DateTime.now(); 32 | _message = 'test:' + now.toIso8601String(); 33 | ProduceRequest produce = new ProduceRequest(1, 1000, [ 34 | new ProduceEnvelope(_topicName, 0, [new Message(_message.codeUnits)]) 35 | ]); 36 | 37 | ProduceResponse produceResponse = await _session.send(_host, produce); 38 | _offset = produceResponse.results.first.offset; 39 | _request = new FetchRequest(100, 1); 40 | _request.add(_topicName, 0, _offset); 41 | FetchResponse response = await _session.send(_host, _request); 42 | 43 | expect(response.results, hasLength(1)); 44 | expect(response.results.first.messageSet, 45 | hasLength(greaterThanOrEqualTo(1))); 46 | var value = response.results.first.messageSet.messages[_offset].value; 47 | var text = new String.fromCharCodes(value); 48 | expect(text, equals(_message)); 49 | }); 50 | 51 | test('it fetches GZip encoded messages from Kafka topic', () async { 52 | var now = new DateTime.now(); 53 | _message = 'test:' + now.toIso8601String(); 54 | ProduceRequest produce = new ProduceRequest(1, 1000, [ 55 | new ProduceEnvelope( 56 | _topicName, 57 | 0, 58 | [ 59 | new Message('hello world'.codeUnits), 60 | new Message('peace and love'.codeUnits) 61 | ], 62 | compression: KafkaCompression.gzip) 63 | ]); 64 | 65 | ProduceResponse produceResponse = await _session.send(_host, produce); 66 | _offset = produceResponse.results.first.offset; 67 | _request = new FetchRequest(100, 1); 68 | _request.add(_topicName, 0, _offset); 69 | FetchResponse response = await _session.send(_host, _request); 70 | 71 | expect(response.results, hasLength(1)); 72 | expect(response.results.first.messageSet, hasLength(equals(2))); 73 | var value = response.results.first.messageSet.messages[_offset].value; 74 | var text = new String.fromCharCodes(value); 75 | expect(text, equals('hello world')); 76 | 77 | value = response.results.first.messageSet.messages[_offset + 1].value; 78 | text = new String.fromCharCodes(value); 79 | expect(text, equals('peace and love')); 80 | }); 81 | }); 82 | } 83 | -------------------------------------------------------------------------------- /test/protocol/offset_commit_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.test.api.offset_commit; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/kafka.dart'; 5 | import 'package:kafka/protocol.dart'; 6 | import '../setup.dart'; 7 | 8 | void main() { 9 | group('OffsetCommitApi:', () { 10 | String _topicName = 'dartKafkaTest'; 11 | KafkaSession _session; 12 | Broker _host; 13 | Broker _coordinator; 14 | int _offset; 15 | String _testGroup; 16 | 17 | setUp(() async { 18 | var ip = await getDefaultHost(); 19 | _session = new KafkaSession([new ContactPoint(ip, 9092)]); 20 | var meta = await _session.getMetadata([_topicName].toSet()); 21 | var leaderId = meta.getTopicMetadata(_topicName).getPartition(0).leader; 22 | _host = meta.getBroker(leaderId); 23 | 24 | var now = new DateTime.now(); 25 | var message = 'test:' + now.toIso8601String(); 26 | ProduceRequest produce = new ProduceRequest(1, 1000, [ 27 | new ProduceEnvelope(_topicName, 0, [new Message(message.codeUnits)]) 28 | ]); 29 | ProduceResponse response = await _session.send(_host, produce); 30 | _offset = response.results.first.offset; 31 | 32 | _testGroup = 'group:' + now.millisecondsSinceEpoch.toString(); 33 | var metadata = await _session.getConsumerMetadata(_testGroup); 34 | _coordinator = metadata.coordinator; 35 | }); 36 | 37 | tearDown(() async { 38 | await _session.close(); 39 | }); 40 | 41 | test('it commits consumer offsets', () async { 42 | var offsets = [ 43 | new ConsumerOffset('dartKafkaTest', 0, _offset, 'helloworld') 44 | ]; 45 | 46 | var request = new OffsetCommitRequest(_testGroup, offsets, -1, '', -1); 47 | 48 | OffsetCommitResponse response = 49 | await _session.send(_coordinator, request); 50 | expect(response.offsets, hasLength(equals(1))); 51 | expect(response.offsets.first.topicName, equals('dartKafkaTest')); 52 | expect(response.offsets.first.errorCode, equals(0)); 53 | 54 | var fetch = new OffsetFetchRequest(_testGroup, { 55 | _topicName: new Set.from([0]) 56 | }); 57 | 58 | OffsetFetchResponse fetchResponse = 59 | await _session.send(_coordinator, fetch); 60 | var offset = fetchResponse.offsets.first; 61 | expect(offset.errorCode, equals(0)); 62 | expect(offset.offset, equals(_offset)); 63 | expect(offset.metadata, equals('helloworld')); 64 | }); 65 | }); 66 | } 67 | -------------------------------------------------------------------------------- /test/protocol/offset_fetch_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.test.api.offset_fetch; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/kafka.dart'; 5 | import 'package:kafka/protocol.dart'; 6 | import '../setup.dart'; 7 | 8 | void main() { 9 | group('OffsetFetchApi:', () { 10 | KafkaSession _session; 11 | OffsetFetchRequest _request; 12 | Broker _coordinator; 13 | String _testGroup; 14 | 15 | setUp(() async { 16 | var ip = await getDefaultHost(); 17 | _session = new KafkaSession([new ContactPoint(ip, 9092)]); 18 | var now = new DateTime.now(); 19 | _testGroup = 'group:' + now.millisecondsSinceEpoch.toString(); 20 | var metadata = await _session.getConsumerMetadata(_testGroup); 21 | _coordinator = metadata.coordinator; 22 | _request = new OffsetFetchRequest(_testGroup, { 23 | 'dartKafkaTest': new Set.from([0]) 24 | }); 25 | }); 26 | 27 | tearDown(() async { 28 | await _session.close(); 29 | }); 30 | 31 | test('it fetches consumer offsets', () async { 32 | OffsetFetchResponse response = 33 | await _session.send(_coordinator, _request); 34 | expect(response.offsets, hasLength(equals(1))); 35 | expect(response.offsets.first.topicName, equals('dartKafkaTest')); 36 | expect(response.offsets.first.partitionId, equals(0)); 37 | expect(response.offsets.first.errorCode, equals(0)); 38 | }); 39 | }); 40 | } 41 | -------------------------------------------------------------------------------- /test/protocol/offset_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.test.api.offset; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/kafka.dart'; 5 | import 'package:kafka/protocol.dart'; 6 | import '../setup.dart'; 7 | 8 | void main() { 9 | group('OffsetApi:', () { 10 | String _topicName = 'dartKafkaTest'; 11 | Broker _broker; 12 | KafkaSession _session; 13 | OffsetRequest _request; 14 | int _offset; 15 | 16 | setUp(() async { 17 | var ip = await getDefaultHost(); 18 | _session = new KafkaSession([new ContactPoint(ip, 9092)]); 19 | var metadata = await _session.getMetadata([_topicName].toSet()); 20 | var leaderId = 21 | metadata.getTopicMetadata(_topicName).getPartition(0).leader; 22 | _broker = metadata.getBroker(leaderId); 23 | 24 | var now = new DateTime.now(); 25 | var _message = 'test:' + now.toIso8601String(); 26 | ProduceRequest produce = new ProduceRequest(1, 1000, [ 27 | new ProduceEnvelope(_topicName, 0, [new Message(_message.codeUnits)]) 28 | ]); 29 | 30 | ProduceResponse response = await _session.send(_broker, produce); 31 | _offset = response.results.first.offset; 32 | _request = new OffsetRequest(leaderId); 33 | }); 34 | 35 | tearDown(() async { 36 | await _session.close(); 37 | }); 38 | 39 | test('it fetches offset info', () async { 40 | _request.addTopicPartition(_topicName, 0, -1, 1); 41 | OffsetResponse response = await _session.send(_broker, _request); 42 | 43 | expect(response.offsets, hasLength(1)); 44 | var offset = response.offsets.first; 45 | expect(offset.errorCode, equals(0)); 46 | expect(offset.offsets, hasLength(1)); 47 | expect(offset.offsets.first, equals(_offset + 1)); 48 | }); 49 | }); 50 | } 51 | -------------------------------------------------------------------------------- /test/protocol/produce_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.test.api.produce; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/kafka.dart'; 5 | import 'package:kafka/protocol.dart'; 6 | import '../setup.dart'; 7 | 8 | void main() { 9 | group('ProduceApi:', () { 10 | String _topicName = 'dartKafkaTest'; 11 | Broker _broker; 12 | KafkaSession _session; 13 | 14 | setUp(() async { 15 | var ip = await getDefaultHost(); 16 | _session = new KafkaSession([new ContactPoint(ip, 9092)]); 17 | var metadata = await _session.getMetadata([_topicName].toSet()); 18 | var leaderId = 19 | metadata.getTopicMetadata(_topicName).getPartition(0).leader; 20 | _broker = metadata.getBroker(leaderId); 21 | }); 22 | 23 | tearDown(() async { 24 | await _session.close(); 25 | }); 26 | 27 | test('it publishes messages to Kafka topic', () async { 28 | var request = new ProduceRequest(1, 1000, [ 29 | new ProduceEnvelope( 30 | _topicName, 0, [new Message('hello world'.codeUnits)]) 31 | ]); 32 | ProduceResponse response = await _session.send(_broker, request); 33 | expect(response.results, hasLength(1)); 34 | expect(response.results.first.topicName, equals(_topicName)); 35 | expect(response.results.first.errorCode, equals(0)); 36 | expect(response.results.first.offset, greaterThanOrEqualTo(0)); 37 | }); 38 | 39 | test('it publishes GZip encoded messages to Kafka topic', () async { 40 | var request = new ProduceRequest(1, 1000, [ 41 | new ProduceEnvelope( 42 | _topicName, 43 | 0, 44 | [ 45 | new Message('hello world'.codeUnits), 46 | new Message('peace and love'.codeUnits) 47 | ], 48 | compression: KafkaCompression.gzip) 49 | ]); 50 | ProduceResponse response = await _session.send(_broker, request); 51 | expect(response.results, hasLength(1)); 52 | expect(response.results.first.topicName, equals(_topicName)); 53 | expect(response.results.first.errorCode, equals(0)); 54 | expect(response.results.first.offset, greaterThanOrEqualTo(0)); 55 | }); 56 | }); 57 | } 58 | -------------------------------------------------------------------------------- /test/session_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.test.session; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/kafka.dart'; 5 | import 'setup.dart'; 6 | 7 | void main() { 8 | group('Session:', () { 9 | KafkaSession _session; 10 | String _topicName = 'dartKafkaTest'; 11 | 12 | setUp(() async { 13 | var host = await getDefaultHost(); 14 | _session = new KafkaSession([new ContactPoint(host, 9092)]); 15 | }); 16 | 17 | tearDown(() async { 18 | await _session.close(); 19 | }); 20 | 21 | test('it can list existing topics', () async { 22 | var topics = await _session.listTopics(); 23 | expect(topics, new isInstanceOf()); 24 | expect(topics, isNotEmpty); 25 | expect(topics, contains(_topicName)); 26 | }); 27 | 28 | test('it can fetch topic metadata', () async { 29 | var response = await _session.getMetadata([_topicName].toSet()); 30 | expect(response, new isInstanceOf()); 31 | expect(response.brokers, isNotEmpty); 32 | var topic = response.getTopicMetadata(_topicName); 33 | expect(topic, new isInstanceOf()); 34 | response = await _session.getMetadata([_topicName].toSet()); 35 | var newTopic = response.getTopicMetadata(_topicName); 36 | expect(newTopic, same(topic)); 37 | }); 38 | 39 | test('it invalidates topic metadata', () async { 40 | var response = await _session.getMetadata([_topicName].toSet()); 41 | var topic = response.getTopicMetadata(_topicName); 42 | response = await _session.getMetadata([_topicName].toSet(), 43 | invalidateCache: true); 44 | var newTopic = response.getTopicMetadata(_topicName); 45 | expect(newTopic, isNot(same(topic))); 46 | }); 47 | 48 | test('it fetches topic metadata for auto-created topics', () async { 49 | var date = new DateTime.now().millisecondsSinceEpoch; 50 | var topicName = 'testTopic-${date}'; 51 | var response = await _session.getMetadata([topicName].toSet()); 52 | var topic = response.getTopicMetadata(topicName); 53 | expect(topic.errorCode, equals(KafkaServerError.NoError)); 54 | expect(topic.partitions, isNotEmpty); 55 | }); 56 | 57 | test('it can fetch consumer metadata', () async { 58 | var response = await _session.getConsumerMetadata('testGroup'); 59 | expect(response.errorCode, equals(0)); 60 | expect(response.coordinatorId, isNotNull); 61 | expect(response.coordinatorHost, isNotNull); 62 | expect(response.coordinatorPort, isNotNull); 63 | }); 64 | }); 65 | } 66 | -------------------------------------------------------------------------------- /test/setup.dart: -------------------------------------------------------------------------------- 1 | import 'dart:io'; 2 | import 'dart:async'; 3 | import 'package:logging/logging.dart'; 4 | 5 | /// Returns default host's IP address depending on current environment. 6 | /// 7 | /// For running tests locally on developer machine we assume you're using 8 | /// Docker Toolbox and OS X (sorry). The IP of `default` docker-machine will 9 | /// be used. 10 | Future getDefaultHost() async { 11 | if (Platform.environment.containsKey('TRAVIS')) { 12 | return '127.0.0.1'; 13 | } else { 14 | var res = await Process.run('docker-machine', ['ip', 'default']); 15 | return res.stdout.toString().trim(); 16 | } 17 | } 18 | 19 | void enableLogs() { 20 | Logger.root.onRecord.listen(print); 21 | } 22 | -------------------------------------------------------------------------------- /test/util/crc32_test.dart: -------------------------------------------------------------------------------- 1 | library kafka.protocol.crc32.test; 2 | 3 | import 'package:test/test.dart'; 4 | import 'package:kafka/protocol.dart'; 5 | 6 | void main() { 7 | group('Crc32:', () { 8 | test('it produces valid CRC32 checksums (unsigned)', () { 9 | _dataProvider().forEach((input, expected) { 10 | var result = Crc32.unsigned(input); 11 | expect(result, equals(expected)); 12 | }); 13 | }); 14 | 15 | test('it produces valid CRC32 checksums for string inputs', () { 16 | _stringDataProvider().forEach((input, expected) { 17 | var result = Crc32.unsigned(input); 18 | expect(result, equals(expected)); 19 | }); 20 | }); 21 | 22 | test('it can produce signed checksum', () { 23 | var result = Crc32.signed('Lammert'.codeUnits); 24 | expect(result, equals(0x71FC2734)); 25 | }); 26 | }); 27 | } 28 | 29 | /// Test cases generated using: http://www.lammertbies.nl/comm/info/crc-calculation.html 30 | Map, int> _dataProvider() { 31 | return { 32 | [0]: 0xD202EF8D, 33 | [1]: 0xA505DF1B, 34 | [113, 38, 83, 70]: 0xC02EC885 35 | }; 36 | } 37 | 38 | /// Test cases generated using: http://www.lammertbies.nl/comm/info/crc-calculation.html 39 | Map, int> _stringDataProvider() { 40 | return {'Lammert'.codeUnits: 0x71FC2734,}; 41 | } 42 | -------------------------------------------------------------------------------- /tool/kafka-cluster/Dockerfile: -------------------------------------------------------------------------------- 1 | # Two-node Kafka Cluster and Zookeeper 2 | 3 | FROM spotify/kafka 4 | 5 | RUN ["rm", "-f", "/usr/bin/start-kafka.sh"] 6 | ADD scripts/start-kafka.sh /usr/bin/start-kafka.sh 7 | RUN ["chmod", "+x", "/usr/bin/start-kafka.sh"] 8 | 9 | # Supervisor config 10 | RUN ["rm", "-f", "/etc/supervisor/conf.d/kafka.conf"] 11 | ADD supervisor/kafka1.conf supervisor/kafka2.conf /etc/supervisor/conf.d/ 12 | 13 | # 2181 is zookeeper, 9092, 9093 is two kafka brokers 14 | EXPOSE 2181 9092 9093 15 | 16 | CMD ["supervisord", "-n"] 17 | -------------------------------------------------------------------------------- /tool/kafka-cluster/scripts/start-kafka.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Optional ENV variables: 4 | # * ADVERTISED_HOST: the external ip for the container, e.g. `boot2docker ip` 5 | # * ADVERTISED_PORT: the external port for Kafka, e.g. 9092 6 | # * ZK_CHROOT: the zookeeper chroot that's used by Kafka (without / prefix), e.g. "kafka" 7 | # * LOG_RETENTION_HOURS: the minimum age of a log file in hours to be eligible for deletion (default is 168, for 1 week) 8 | # * LOG_RETENTION_BYTES: configure the size at which segments are pruned from the log, (default is 1073741824, for 1GB) 9 | # * NUM_PARTITIONS: configure the default number of log partitions per topic 10 | 11 | BROKER_ID=$1 12 | BROKER_PORT=$2 13 | 14 | cp $KAFKA_HOME/config/server.properties $KAFKA_HOME/config/server$BROKER_ID.properties 15 | 16 | # Configure advertised host/port if we run in helios 17 | if [ ! -z "$HELIOS_PORT_kafka" ]; then 18 | ADVERTISED_HOST=`echo $HELIOS_PORT_kafka | cut -d':' -f 1 | xargs -n 1 dig +short | tail -n 1` 19 | ADVERTISED_PORT=`echo $HELIOS_PORT_kafka | cut -d':' -f 2` 20 | fi 21 | 22 | # Set the external host and port 23 | if [ ! -z "$ADVERTISED_HOST" ]; then 24 | echo "advertised host: $ADVERTISED_HOST" 25 | sed -r -i "s/#(advertised.host.name)=(.*)/\1=$ADVERTISED_HOST/g" $KAFKA_HOME/config/server$BROKER_ID.properties 26 | fi 27 | if [ ! -z "$ADVERTISED_PORT" ]; then 28 | echo "advertised port: $ADVERTISED_PORT" 29 | sed -r -i "s/#(advertised.port)=(.*)/\1=$ADVERTISED_PORT/g" $KAFKA_HOME/config/server$BROKER_ID.properties 30 | fi 31 | 32 | sed -r -i "s/log.retention.hours=168/log.retention.minutes=1/g" $KAFKA_HOME/config/server$BROKER_ID.properties 33 | sed -r -i "s/log.retention.check.interval.ms=300000/log.retention.check.interval.ms=10000/g" $KAFKA_HOME/config/server$BROKER_ID.properties 34 | sed -r -i "s/num.partitions=1/num.partitions=3/g" $KAFKA_HOME/config/server$BROKER_ID.properties 35 | 36 | # Set the zookeeper chroot 37 | if [ ! -z "$ZK_CHROOT" ]; then 38 | # wait for zookeeper to start up 39 | until /usr/share/zookeeper/bin/zkServer.sh status; do 40 | sleep 0.1 41 | done 42 | 43 | # create the chroot node 44 | echo "create /$ZK_CHROOT \"\"" | /usr/share/zookeeper/bin/zkCli.sh || { 45 | echo "can't create chroot in zookeeper, exit" 46 | exit 1 47 | } 48 | 49 | # configure kafka 50 | sed -r -i "s/(zookeeper.connect)=(.*)/\1=localhost:2181\/$ZK_CHROOT/g" $KAFKA_HOME/config/server$BROKER_ID.properties 51 | fi 52 | 53 | # Allow specification of log retention policies 54 | if [ ! -z "$LOG_RETENTION_HOURS" ]; then 55 | echo "log retention hours: $LOG_RETENTION_HOURS" 56 | sed -r -i "s/(log.retention.hours)=(.*)/\1=$LOG_RETENTION_HOURS/g" $KAFKA_HOME/config/server$BROKER_ID.properties 57 | fi 58 | if [ ! -z "$LOG_RETENTION_BYTES" ]; then 59 | echo "log retention bytes: $LOG_RETENTION_BYTES" 60 | sed -r -i "s/#(log.retention.bytes)=(.*)/\1=$LOG_RETENTION_BYTES/g" $KAFKA_HOME/config/server$BROKER_ID.properties 61 | fi 62 | 63 | # Configure the default number of log partitions per topic 64 | if [ ! -z "$NUM_PARTITIONS" ]; then 65 | echo "default number of partition: $NUM_PARTITIONS" 66 | sed -r -i "s/(num.partitions)=(.*)/\1=$NUM_PARTITIONS/g" $KAFKA_HOME/config/server$BROKER_ID.properties 67 | fi 68 | 69 | # Configure broker ID 70 | sed -r -i "s/^(broker\.id)=(.*)/\1=$BROKER_ID/g" $KAFKA_HOME/config/server$BROKER_ID.properties 71 | # Configure log directory 72 | sed -r -i "s/(log.dirs)=(.*)/\1=\/tmp\/kafka-logs-$BROKER_ID/g" $KAFKA_HOME/config/server$BROKER_ID.properties 73 | # Configure port 74 | sed -r -i "s/^(port)=(.*)/\1=$BROKER_PORT/g" $KAFKA_HOME/config/server$BROKER_ID.properties 75 | 76 | # Run Kafka 77 | $KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server$BROKER_ID.properties 78 | -------------------------------------------------------------------------------- /tool/kafka-cluster/supervisor/kafka1.conf: -------------------------------------------------------------------------------- 1 | [program:kafka1] 2 | command=/usr/bin/start-kafka.sh 1 9092 3 | autostart=true 4 | autorestart=true 5 | -------------------------------------------------------------------------------- /tool/kafka-cluster/supervisor/kafka2.conf: -------------------------------------------------------------------------------- 1 | [program:kafka2] 2 | command=/usr/bin/start-kafka.sh 2 9093 3 | autostart=true 4 | autorestart=true 5 | -------------------------------------------------------------------------------- /tool/kafka-cluster/supervisor/zookeeper.conf: -------------------------------------------------------------------------------- 1 | [program:zookeeper] 2 | command=/usr/share/zookeeper/bin/zkServer.sh start-foreground 3 | autostart=true 4 | autorestart=true --------------------------------------------------------------------------------