├── .gitignore ├── .travis.yml ├── CMakeLists.txt ├── LICENSE ├── README.md ├── cmake └── Modules │ └── FindSnappy.cmake ├── docs ├── build_configuration.md ├── connection.md ├── connection_configuration.md ├── consumer_metadata_request.md ├── consumer_metadata_response.md ├── extra.css ├── fetch_request.md ├── fetch_response.md ├── index.html ├── message.md ├── message_and_offset.md ├── message_set.md ├── metadata_request.md ├── metadata_response.md ├── offset_commit_request.md ├── offset_commit_response.md ├── offset_fetch_request.md ├── offset_fetch_response.md ├── offset_request.md ├── offset_response.md ├── primitives.md ├── produce_request.md └── produce_response.md ├── examples ├── CMakeLists.txt ├── README.md ├── cpp03 │ ├── CMakeLists.txt │ ├── fetch.cpp │ ├── metadata.cpp │ ├── offset.cpp │ ├── offset_fetch.cpp │ └── produce.cpp └── cpp11 │ ├── CMakeLists.txt │ ├── fetch.cpp │ ├── metadata.cpp │ ├── offset.cpp │ ├── offset_fetch.cpp │ └── produce.cpp ├── lib └── libkafka_asio │ ├── connection.h │ ├── connection_configuration.h │ ├── constants.h │ ├── consumer_metadata_request.h │ ├── consumer_metadata_response.h │ ├── detail │ ├── basic_connection.h │ ├── bytes_streambuf.h │ ├── compression.h │ ├── compression_gz.h │ ├── compression_snappy.h │ ├── connection_service.h │ ├── endian.h │ ├── fetch_response_iterator.h │ ├── functional.h │ ├── impl │ │ ├── bytes_streambuf.h │ │ ├── compression.h │ │ ├── compression_gz.h │ │ ├── compression_snappy.h │ │ ├── connection_service.h │ │ ├── consumer_metadata_request_write.h │ │ ├── consumer_metadata_response_read.h │ │ ├── fetch_request_write.h │ │ ├── fetch_response_read.h │ │ ├── message_read.h │ │ ├── message_write.h │ │ ├── metadata_request_write.h │ │ ├── metadata_response_read.h │ │ ├── offset_commit_request_write.h │ │ ├── offset_commit_response_read.h │ │ ├── offset_fetch_request_write.h │ │ ├── offset_fetch_response_read.h │ │ ├── offset_request_write.h │ │ ├── offset_response_read.h │ │ ├── produce_request_write.h │ │ ├── produce_response_read.h │ │ ├── recursive_messageset_iterator.h │ │ ├── request_write.h │ │ └── response_read.h │ ├── recursive_messageset_iterator.h │ ├── request_write.h │ ├── response_read.h │ ├── topics_partitions.h │ └── weak_impl_handler.h │ ├── error.h │ ├── fetch_request.h │ ├── fetch_response.h │ ├── impl │ ├── connection_configuration.h │ ├── consumer_metadata_request.h │ ├── consumer_metadata_response.h │ ├── fetch_request.h │ ├── fetch_response.h │ ├── message.h │ ├── metadata_request.h │ ├── metadata_response.h │ ├── offset_commit_request.h │ ├── offset_commit_response.h │ ├── offset_fetch_request.h │ ├── offset_fetch_response.h │ ├── offset_request.h │ ├── offset_response.h │ ├── produce_request.h │ └── produce_response.h │ ├── libkafka_asio.h │ ├── message.h │ ├── message_fwd.h │ ├── metadata_request.h │ ├── metadata_response.h │ ├── offset_commit_request.h │ ├── offset_commit_response.h │ ├── offset_fetch_request.h │ ├── offset_fetch_response.h │ ├── offset_request.h │ ├── offset_response.h │ ├── primitives.h │ ├── produce_request.h │ ├── produce_response.h │ ├── request.h │ └── response.h ├── mkdocs.yml └── test ├── CMakeLists.txt └── src ├── StreamTest.h ├── connection_configuration_test.cpp ├── detail ├── compression_gz_test.cpp ├── compression_snappy_test.cpp ├── consumer_metadata_request_write_test.cpp ├── fetch_request_write_test.cpp ├── fetch_response_iterator_test.cpp ├── functional_test.cpp ├── metadata_request_write_test.cpp ├── offset_commit_request_write_test.cpp ├── offset_fetch_request_write_test.cpp ├── offset_request_write_test.cpp ├── produce_request_write_test.cpp ├── recursive_messageset_iterator_test.cpp ├── request_write_test.cpp └── response_read_test.cpp ├── error_test.cpp ├── fetch_request_test.cpp ├── libkafka_asio_test.cpp ├── message_test.cpp └── metadata_response_test.cpp /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files 2 | *.slo 3 | *.lo 4 | *.o 5 | *.obj 6 | 7 | # Precompiled Headers 8 | *.gch 9 | *.pch 10 | 11 | # Compiled Dynamic libraries 12 | *.so 13 | *.dylib 14 | *.dll 15 | 16 | # Fortran module files 17 | *.mod 18 | 19 | # Compiled Static libraries 20 | *.lai 21 | *.la 22 | *.a 23 | *.lib 24 | 25 | # Executables 26 | *.exe 27 | *.out 28 | *.app 29 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: cpp 2 | compiler: 3 | - gcc 4 | - clang 5 | before_install: 6 | - sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y 7 | - sudo apt-get update -qq 8 | install: 9 | - sudo apt-get install g++-4.9 boost1.55 libgtest-dev libsnappy-dev 10 | - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.9 90 11 | - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.9 90 12 | - "cd /usr/src/gtest && sudo cmake -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ . && sudo cmake --build . && sudo mv libg* /usr/local/lib/ ; cd -" 13 | before_script: 14 | - mkdir build 15 | - cd build 16 | - cmake -DNO_CXX11=ON -DCMAKE_C_COMPILER=$CC -DCMAKE_CXX_COMPILER=$CXX .. 17 | script: 18 | - cmake --build . 19 | - test/libkafka_asio_test 20 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.6) 2 | project(libkafka_asio) 3 | 4 | enable_testing() 5 | 6 | add_subdirectory("${PROJECT_SOURCE_DIR}/examples") 7 | add_subdirectory("${PROJECT_SOURCE_DIR}/test") 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Daniel Joos 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # libkafka-asio 2 | C++ Kafka Client Library using Boost Asio 3 | 4 | [![Build Status](https://travis-ci.org/danieljoos/libkafka-asio.svg?branch=master)](https://travis-ci.org/danieljoos/libkafka-asio) 5 | [![Documentation Status](https://readthedocs.org/projects/libkafka-asio/badge/?version=latest)](https://readthedocs.org/projects/libkafka-asio/?badge=latest) 6 | 7 | ## Introduction 8 | 9 | `libkafka-asio` is a C++ header-only library, implementing the Kafka client 10 | protocol. All Kafka APIs, including offset commit/fetch, are implemented: 11 | 12 | * [Metadata](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-MetadataAPI) 13 | * [Produce](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ProduceAPI) 14 | * [Fetch](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-FetchAPI) 15 | * [Offset](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetAPI) 16 | * [Offset Commit/Fetch](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetCommit/FetchAPI) 17 | 18 | It supports GZIP and Snappy message compression and therefore relies on the [zlib](http://www.zlib.net/) and [Snappy](https://code.google.com/p/snappy/) libraries. 19 | Message compression can optionally be turned off. 20 | 21 | The library was tested on Linux (GCC 4.9, clang 3.5.1) and Windows (MSVC 10, 11, 12). There is a set of unit tests available under [`test`](test/). 22 | 23 | ## Usage 24 | 25 | Add the [`lib`](lib/) directory to your include paths and: 26 | ``` 27 | #include 28 | ``` 29 | Please see the [`examples`](examples/) directory for some examples on how to use the library. 30 | 31 | Also consult the documentation: [libkafka-asio Reference](http://libkafka-asio.rtfd.org/) 32 | 33 | ## Dependencies 34 | 35 | `libkafka-asio` depends on the [Boost C++ libraries](http://www.boost.org/) -- specially on Boost Asio. The following Boost sub-libraries are explicitly used in the project: 36 | 37 | * [boost asio](http://www.boost.org/doc/libs/release/doc/html/boost_asio.html) 38 | * [boost bind](http://www.boost.org/doc/libs/release/libs/bind/bind.html) 39 | * [boost crc](http://www.boost.org/doc/libs/release/libs/crc/) 40 | * [boost foreach](http://www.boost.org/doc/libs/release/doc/html/foreach.html) 41 | * [boost integer](http://www.boost.org/doc/libs/release/libs/integer/doc/html/index.html) 42 | * [boost iterator](http://www.boost.org/doc/libs/release/libs/iterator/doc/index.html) 43 | * [boost optional](http://www.boost.org/doc/libs/release/libs/optional/doc/html/index.html) 44 | * [boost smart ptr](http://www.boost.org/doc/libs/release/libs/smart_ptr/smart_ptr.htm) 45 | * [boost system](http://www.boost.org/doc/libs/release/libs/system/doc/index.html) 46 | 47 | You need to link against `boost_thread` and `boost_system`. 48 | 49 | So installing the boost library package on your distribution should do the trick (e.g. `apt-get install libboost-dev` on Ubuntu, or `pacman -S boost` on Arch). 50 | -------------------------------------------------------------------------------- /cmake/Modules/FindSnappy.cmake: -------------------------------------------------------------------------------- 1 | find_path( 2 | SNAPPY_INCLUDE_DIR 3 | NAMES snappy.h 4 | HINTS ${SNAPPY_ROOT_DIR}/include) 5 | 6 | find_library( 7 | SNAPPY_LIBRARIES 8 | NAMES snappy 9 | HINTS ${SNAPPY_ROOT_DIR}/lib) 10 | 11 | include(FindPackageHandleStandardArgs) 12 | 13 | find_package_handle_standard_args( 14 | Snappy DEFAULT_MSG 15 | SNAPPY_LIBRARIES 16 | SNAPPY_INCLUDE_DIR) 17 | 18 | mark_as_advanced( 19 | SNAPPY_ROOT_DIR 20 | SNAPPY_LIBRARIES 21 | SNAPPY_INCLUDE_DIR) 22 | -------------------------------------------------------------------------------- /docs/build_configuration.md: -------------------------------------------------------------------------------- 1 | 2 | # Build Configuration Options 3 | 4 | ## Compression Options 5 | 6 | For handling compressed messages, _libkafka-asio_ relies on common compression 7 | libraries: 8 | [zlib](http://www.zlib.net/) and 9 | [snappy](https://code.google.com/p/snappy/). 10 | Of course, this means that an application that uses _libkafka-asio_ must link 11 | against those compression libraries. If, for some reason, you do not 12 | want to do this, there are several build configuration options available to 13 | either turn off specific compression algorithms or compression handling at all. 14 | 15 | ### Turn off handling of compressed messages 16 | 17 | ```cpp 18 | #define LIBKAFKAASIO_NO_COMPRESSION 19 | ``` 20 | 21 | ### Turn off GZIP compression algorithm 22 | 23 | ```cpp 24 | #define LIBKAFKAASIO_NO_COMPRESSION_GZIP 25 | ``` 26 | 27 | ### Turn off Snappy compression algorithm 28 | 29 | ```cpp 30 | #define LIBKAFKAASIO_NO_COMPRESSION_SNAPPY 31 | ``` 32 | -------------------------------------------------------------------------------- /docs/connection_configuration.md: -------------------------------------------------------------------------------- 1 | 2 | # struct `ConnectionConfiguration` 3 | 4 | The data structure, used for configuring a connection object. 5 | 6 | ## Member Fields 7 | 8 | ### message_max_bytes 9 | 10 | ```cpp 11 | Int32 message_max_bytes 12 | ``` 13 | 14 | Maximum number of bytes to transmit for messages. The default value is 4 MB. 15 | 16 | ### socket_timeout 17 | 18 | ```cpp 19 | unsigned int socket_timeout 20 | ``` 21 | 22 | Timeout in milliseconds for socket operations. The default value is 1 minute. 23 | 24 | ### client_id 25 | 26 | ```cpp 27 | String client_id 28 | ``` 29 | 30 | Client identification string. The default value is `libkafka_asio`. 31 | 32 | ### broker_address 33 | 34 | ```cpp 35 | BrokerAddress::OptionalType broker_address 36 | ``` 37 | 38 | The optional broker address, used in case `auto_connect` is enabled. 39 | 40 | ### auto_connect 41 | 42 | ```cpp 43 | bool auto_connect 44 | ``` 45 | 46 | If set to `true`, the connection will try to automatically connect to one of the 47 | known Kafka servers. 48 | 49 | ## Member Functions 50 | 51 | ### SetBrokerFromString 52 | 53 | ```cpp 54 | void SetBrokerFromString(const std::string& string) 55 | ``` 56 | 57 | Set the auto-connect broker address from string. If the string contains a 58 | colon, the part before the colon is interpreted as hostname and the part 59 | after that character is interpreted as service name. Example: `localhost:9092`. 60 | 61 | ## Types 62 | 63 | ### BrokerAddress 64 | 65 | ```cpp 66 | struct BrokerAddress 67 | ``` 68 | 69 | * `hostname`: 70 | Broker hostname 71 | * `service`: 72 | Broker service (e.g. the port number). 73 | 74 | ### BrokerAddress::OptionalType 75 | 76 | ```cpp 77 | typedef boost::optional OptionalType 78 | ``` 79 | 80 | Optional broker address. 81 | -------------------------------------------------------------------------------- /docs/consumer_metadata_request.md: -------------------------------------------------------------------------------- 1 | 2 | # class `ConsumerMetadataRequest` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | Implementation of the Kafka ConsumerMetadataRequest as described on the 9 | [Kafka wiki](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ConsumerMetadataRequest). 10 | ConsumerMetadata requests are used to discover the current offset coordinator 11 | of a Kafka consumer group. 12 | 13 | ## Member Functions 14 | 15 | ### set_consumer_group 16 | 17 | ```cpp 18 | void set_consumer_group(const String& consumer_group) 19 | ``` 20 | 21 | The request will try to discover the offset coordinator for the consumer group 22 | specified. 23 | 24 | ## Types 25 | 26 | ### ResponseType 27 | 28 | ```cpp 29 | typedef ConsumerMetadataResponse ResponseType 30 | ``` 31 | 32 | Type of the response object of a ConsumerMetadata request. 33 | 34 | ### MutableResponseType 35 | 36 | ```cpp 37 | typedef MutableConsumerMetadataResponse MutableResponseType 38 | ``` 39 | 40 | Type of a mutable response object for a consumer metadata request. This type is 41 | used by the library at when reading-in the response from a Kafka server. 42 | -------------------------------------------------------------------------------- /docs/consumer_metadata_response.md: -------------------------------------------------------------------------------- 1 | 2 | # class `ConsumerMetadataResponse` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | Implementation of the Kafka ConsumerMetadataResponse as described on the 9 | [Kafka wiki](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ConsumerMetadataResponse). 10 | An object of this type will be given as response object to the handler function 11 | when invoking a ConsumerMetadata request. 12 | 13 | ## Member Functions 14 | 15 | ### error_code 16 | 17 | ```cpp 18 | Int16 error_code() const 19 | ``` 20 | 21 | Returns the error code, received from the Kafka broker. The error code is zero 22 | in case of no error. 23 | 24 | ### coordinator_id 25 | 26 | ```cpp 27 | Int32 coordinator_id() const 28 | ``` 29 | 30 | Broker ID of the coordinator for the requested consumer group. 31 | 32 | ### coordinator_host 33 | 34 | ```cpp 35 | const String& coordinator_host() const 36 | ``` 37 | 38 | Hostname of the coordinator for the requested consumer group. 39 | 40 | ### coordinator_port 41 | 42 | ```cpp 43 | Int32 coordinator_port() const 44 | ``` 45 | 46 | Port number of the coordinator for the requested consumer group. 47 | 48 | ## Types 49 | 50 | ### OptionalType 51 | 52 | ```cpp 53 | typedef boost::optional OptionalType 54 | ``` 55 | 56 | A consumer metadata response object wrapped using _Boost optional_. Such an 57 | object will be used for consumer metadata request handler functions. 58 | -------------------------------------------------------------------------------- /docs/extra.css: -------------------------------------------------------------------------------- 1 | .rst-content h2 { 2 | padding-top: 10px; 3 | border-top: 1px solid #e1e4e5; 4 | } 5 | 6 | .rst-content h3 { 7 | margin-top: 40px; 8 | margin-bottom: 5px; 9 | } 10 | 11 | .rst-content h3+pre { 12 | margin-top: 2px; 13 | margin-bottom: 5px; 14 | } 15 | -------------------------------------------------------------------------------- /docs/fetch_response.md: -------------------------------------------------------------------------------- 1 | 2 | # class `FetchResponse` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | Implementation of the Kafka FetchResponse as described on the 9 | [Kafka wiki](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-FetchResponse). 10 | An object of this type will be given as response object to the handler function 11 | when invoking a fetch request. 12 | 13 | 17 | 18 | ## Member Functions 19 | 20 | ### begin 21 | 22 | ```cpp 23 | const_iterator begin() const 24 | ``` 25 | 26 | Creates and returns an iterator object that can be used for iterating over all 27 | received messages. This function returns the start iterator object. 28 | 29 | ```cpp 30 | // Assume the response is an argument of the request handler function 31 | FetchResponse::OptionalType response; 32 | 33 | // C++11 range-based for loop 34 | for (auto message : *response) 35 | { 36 | // Do something with the message... 37 | } 38 | ``` 39 | 40 | ```cpp 41 | // STL for_each: Call a function for each received Message 42 | std::for_each(response->begin(), response->end(), &PrintMessage); 43 | ``` 44 | 45 | ```cpp 46 | // 'traditional' iteration 47 | FetchResponse::const_iterator iter = response->begin(); 48 | FetchResponse::const_iterator end_iter = response->end(); 49 | for (; iter != end_iter; ++iter) 50 | { 51 | // Again, do something... 52 | } 53 | ``` 54 | 55 | ### end 56 | 57 | ```cpp 58 | const_iterator end() const 59 | ``` 60 | 61 | End iterator (see start iterator description above). Similar to default 62 | construction of the `const_iterator` type. 63 | 64 | ### topics 65 | 66 | ```cpp 67 | const Topics& topics() const 68 | ``` 69 | 70 | Returns a reference to the set of topics, messages have been received for. 71 | 72 | ## Types 73 | 74 | ### Topic 75 | 76 | ```cpp 77 | struct Topic { 78 | Partitions partitions; 79 | } 80 | ``` 81 | 82 | * `partitions`: 83 | Map of partitions of this topic for which message data has been received. 84 | 85 | ### Partition 86 | 87 | ```cpp 88 | struct Partition { 89 | Int16 error_code; 90 | Int64 highwater_mark_offset; 91 | MessageSet messages; 92 | } 93 | ``` 94 | 95 | * `error_code`: 96 | Kafka error for this topic partition. 97 | * `highwater_mark_offset`: 98 | Offset at the end of the log for this partition on the server. 99 | * `messages`: 100 | The fetched messages. 101 | 102 | ### Topics 103 | 104 | ```cpp 105 | typedef std::map Topics 106 | ``` 107 | 108 | Map that associates a `Topic` object to the topic name. 109 | 110 | ### Partitions 111 | 112 | ```cpp 113 | typedef std::map Partitions 114 | ``` 115 | 116 | Map that associates a `Partition` object to the partition id. 117 | 118 | ### const_iterator 119 | 120 | ```cpp 121 | typedef defail::FetchResponseIterator const_iterator 122 | ``` 123 | 124 | Constant iterator type, used for iterating over all messages of a fetch 125 | response object. See `FetchResponseIterator` class template for details. 126 | 127 | ### OptionalType 128 | 129 | ```cpp 130 | typedef boost::optional OptionalType 131 | ``` 132 | 133 | A fetch response object wrapped using _Boost optional_. Such an object will 134 | be used for fetch request handler functions. 135 | -------------------------------------------------------------------------------- /docs/message.md: -------------------------------------------------------------------------------- 1 | 2 | # class `Message` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | A message communication object as described on the 9 | [Kafka wiki](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets). 10 | Messages consist of a key-value pair and some metadata, like CRC for checking 11 | the message integrity and some attributes for handling compression. 12 | 13 | Kafka handles compression by marshalling a complete set of messages, compressing 14 | it using one of the supported compression algorithms and finally putting it 15 | as the value of a single message, which has it's attributes set to the 16 | corresponding compression algorithm flag. Therefore, the `Message` class of 17 | _libkafka-asio_ defines a pointer to a possibly nested message set. 18 | When consuming compressed data from Kafka, the nested message set will hold 19 | the actual uncompressed messages. 20 | 21 |
22 | 27 |
28 | 29 | ## Constructor 30 | 31 | ### Message 32 | 33 | ```cpp 34 | Message() 35 | ``` 36 | 37 | Creates a message with an empty key and empty value. 38 | 39 | ### Message (Copy Constructor) 40 | 41 | ```cpp 42 | Message(const Message& orig, bool deep = false) 43 | ``` 44 | 45 | Creates a message by copying the given original message. If the optional `deep` 46 | parameter is set to `true`, the byte arrays for `key` and `value` as well as the 47 | nested message set, will be copied, too. Otherwise, the new message object will 48 | point to the same `key`, `value` and nested message set on the heap. 49 | 50 | ### Message (Assignment Operator) 51 | 52 | ```cpp 53 | Message& operator= (const Message& rhs) 54 | ``` 55 | 56 | Flat-copy the given message. 57 | 58 | ## Member functions 59 | 60 | ### magic_byte 61 | 62 | ```cpp 63 | Int8 magic_byte() const; 64 | ``` 65 | 66 | Always returns zero. 67 | 68 | ### attributes 69 | 70 | ```cpp 71 | Int8 attributes() const; 72 | ``` 73 | 74 | Returns the attribute bitset. The lowest 2 bits indicate the compression 75 | algorithm. 76 | 77 | ### set_attributes 78 | 79 | ```cpp 80 | void set_attributes(Int8 attributes); 81 | ``` 82 | 83 | Sets the attributes byte of this message object. 84 | 85 | ### key 86 | 87 | ```cpp 88 | const Bytes& key() const; 89 | Bytes& mutable_key(); 90 | ``` 91 | 92 | Optional message key. Can be `NULL` (default). 93 | 94 | ### value 95 | 96 | ```cpp 97 | const Bytes& value() const; 98 | Bytes& mutable_value(); 99 | ``` 100 | 101 | Actual message data as byte array 102 | 103 | ### nested_message_set 104 | 105 | ```cpp 106 | const MessageSet& nested_message_set() const; 107 | MessageSet& mutable_nested_message_set(); 108 | ``` 109 | 110 | Compressed messages contain a nested message set (see description above). 111 | 112 | ### compression 113 | 114 | ```cpp 115 | constants::Compression compression() const; 116 | ``` 117 | 118 | Returns the compression algorithm, used for compressing the message value. The 119 | function only evaluates the lowest 2 bits of the attributes field. 120 | -------------------------------------------------------------------------------- /docs/message_and_offset.md: -------------------------------------------------------------------------------- 1 | 2 | # class `MessageAndOffset` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | Adds offset information to the [`Message`](message) class. Used for message 9 | consumption from Kafka. 10 | 11 |
12 | 17 |
18 | 19 | ## Constructor 20 | 21 | ### MessageAndOffset (overload 1 of 2) 22 | 23 | ```cpp 24 | MessageAndOffset() 25 | ``` 26 | 27 | Creates an empty `MessageAndOffset` object. The offset information defaults 28 | to zero. 29 | 30 | ### MessageAndOffset (overload 2 of 2) 31 | 32 | ```cpp 33 | MessageAndOffset(const Message& message, Int64 offset) 34 | ``` 35 | 36 | Creates a `MessageAndOffset` object by copying the given `Message` object (flat 37 | copy) and using the given `offset` information. 38 | 39 | ## Member Functions 40 | 41 | ### offset 42 | 43 | ```cpp 44 | Int64 offset() const 45 | ``` 46 | 47 | Returns the offset information. 48 | 49 | ### set_offset 50 | 51 | ```cpp 52 | void set_offset(Int64 offset) 53 | ``` 54 | 55 | Sets the offset information. 56 | -------------------------------------------------------------------------------- /docs/message_set.md: -------------------------------------------------------------------------------- 1 | 2 | # Type `MessageSet` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | ```cpp 9 | typedef std::vector MessageSet; 10 | ``` 11 | 12 |
13 | 18 |
19 | -------------------------------------------------------------------------------- /docs/metadata_request.md: -------------------------------------------------------------------------------- 1 | 2 | # class `MetadataRequest` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | Implementation of the Kafka TopicMetadata request, as described on the 9 | [Kafka wiki](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-TopicMetadataRequest). 10 | Metadata requests can be used to determine information about topics and 11 | partitions on the connected broker/cluster. 12 | The leader for a topic partition can be retrieved via a metadata request. 13 | 14 | ## Member Functions 15 | 16 | ### AddTopicName 17 | 18 | ```cpp 19 | void AddTopicName(const String& topic_name) 20 | ``` 21 | 22 | Adds the given topic to this metadata request. 23 | This results in metadata being fetched for the given topic. 24 | 25 | ```cpp 26 | using libkafka_asio::MetadataRequest; 27 | MetadataRequest req; 28 | req.AddTopicName("foo"); 29 | req.AddTopicName("bar"); 30 | ``` 31 | 32 | ### Clear 33 | 34 | ```cpp 35 | void Clear() 36 | ``` 37 | 38 | Clears the list of topic names, added to this metadata request. 39 | 40 | ### topic_names 41 | 42 | ```cpp 43 | const TopicNameVector& topic_names() const 44 | ``` 45 | 46 | Returns a reference to the list of topic names of this metadata request. This 47 | method is mainly used internally for getting the request data during the 48 | conversion to the Kafka wire format. 49 | 50 | ## Types 51 | 52 | ### ResponseType 53 | 54 | ```cpp 55 | typedef MetadataResponse ResponseType 56 | ``` 57 | 58 | Type of the response object of a metadata request. 59 | 60 | ### MutableResponseType 61 | 62 | ```cpp 63 | typedef MutableMetadataResponse MutableResponseType 64 | ``` 65 | 66 | Type of a mutable response object for a metadata request. This type is used by 67 | the library at the time of reading in the response from a Kafka server. 68 | 69 | ### TopicNameVector 70 | 71 | ```cpp 72 | typedef std::vector TopicNameVector 73 | ``` 74 | 75 | Vector of topic name strings. 76 | -------------------------------------------------------------------------------- /docs/offset_commit_response.md: -------------------------------------------------------------------------------- 1 | 2 | # class `OffsetCommitResponse` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | Implementation of the Kafka OffsetCommitResponse as described on the 9 | [Kafka wiki](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetCommitResponse). 10 | An object of this type will be given as response object to the handler function 11 | when invoking an offset commit request. 12 | 13 | 17 | 18 | ## Member Functions 19 | 20 | ### topics 21 | 22 | ```cpp 23 | const Topics& topics() const 24 | ``` 25 | 26 | Returns the data of this API response object, sorted by topic. 27 | 28 | ## Types 29 | 30 | ### Topic 31 | 32 | ```cpp 33 | struct Topic { 34 | Partitions partitions; 35 | } 36 | ``` 37 | 38 | * `partitions`: 39 | The partition objects contained in this topic object. 40 | 41 | ### Partition 42 | 43 | ```cpp 44 | struct Partition { 45 | Int16 error_code; 46 | } 47 | ``` 48 | 49 | * `error_code`: 50 | Kafka error for this topic partition. 51 | 52 | ### Topics 53 | 54 | ```cpp 55 | typedef std::map Topics 56 | ``` 57 | 58 | Map that associates the offset commit response part of topics to their topic names. 59 | 60 | ### Partitions 61 | 62 | ```cpp 63 | typedef std::map Partitions 64 | ``` 65 | 66 | Map that associates a `Partition` object to the partition id. 67 | 68 | ### OptionalType 69 | 70 | ```cpp 71 | typedef boost::optional OptionalType 72 | ``` 73 | 74 | A offset-commit response object wrapped using _Boost optional_. Such an object 75 | will be used for offset-commit request handler functions. 76 | -------------------------------------------------------------------------------- /docs/offset_fetch_request.md: -------------------------------------------------------------------------------- 1 | 2 | # class `OffsetFetchRequest` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | Implementation of the Kafka OffsetFetchRequest as described on the 9 | [Kafka wiki](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetFetchRequest). 10 | Offset fetch requests are used to retrieve an offset value for one or more topic 11 | partitions of a Kafka consumer group. These kinds of requests require Kafka 12 | version 0.8.1.1 or above. Offset fetch requests must be sent to the current 13 | offset coordinator broker, which can be discovered using a ConsumerMetadata 14 | request. 15 | 16 | 20 | 21 | ## Member Functions 22 | 23 | ### FetchOffset 24 | 25 | ```cpp 26 | void FetchOffset(const String& topic_name, 27 | Int32 partition) 28 | ``` 29 | 30 | Fetch offset data for the given topic partition 31 | 32 | ### set_consumer_group 33 | 34 | ```cpp 35 | void set_consumer_group(const String& consumer_group) 36 | ``` 37 | 38 | Set the consumer group to fetch the offset data for. 39 | 40 | ### topics 41 | 42 | ```cpp 43 | const Topics& topics() const 44 | ``` 45 | 46 | Returns a reference to the list of topics of this offset fetch request. This 47 | method is mainly used internally for getting the request data during the 48 | conversion to the Kafka wire format. 49 | 50 | ### consumer_group 51 | 52 | ```cpp 53 | const String& consumer_group() const 54 | ``` 55 | 56 | Returns the consumer group string of this offset-fetch request. 57 | 58 | ## Types 59 | 60 | ### Topic 61 | 62 | ```cpp 63 | struct Topic { 64 | String topic_name; 65 | Partitions partitions; 66 | } 67 | ``` 68 | 69 | * `topic_name`: 70 | Name of the topic to fetch data for. 71 | * `partitions`: 72 | Set of partitions of this topic. 73 | 74 | ### Partition 75 | 76 | ```cpp 77 | struct Partition { 78 | Int32 partition; 79 | } 80 | ``` 81 | 82 | * `partition`: 83 | Number, identifying this topic partition. 84 | 85 | ### Topics 86 | 87 | ```cpp 88 | typedef std::vector Topics 89 | ``` 90 | 91 | Vector of `Topic` objects. 92 | 93 | ### Partitions 94 | 95 | ```cpp 96 | typedef std::vector Partitions 97 | ``` 98 | 99 | Vector of `Partition` objects. 100 | 101 | ### ResponseType 102 | 103 | ```cpp 104 | typedef OffsetFetchResponse ResponseType 105 | ``` 106 | 107 | Type of the response object of an offset fetch request. 108 | 109 | ### MutableResponseType 110 | 111 | ```cpp 112 | typedef MutableOffsetFetchResponse MutableResponseType 113 | ``` 114 | 115 | Type of a mutable response object for a offset fetch request. This type is used 116 | by the library at when reading-in the response from a Kafka server. 117 | -------------------------------------------------------------------------------- /docs/offset_fetch_response.md: -------------------------------------------------------------------------------- 1 | 2 | # class `OffsetFetchResponse` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | Implementation of the Kafka OffsetFetchResponse as described on the 9 | [Kafka wiki](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetFetchResponse). 10 | An object of this type will be given as response object to the handler function 11 | when invoking an offset fetch request. 12 | 13 | 17 | 18 | ## Member Functions 19 | 20 | ### topics 21 | 22 | ```cpp 23 | const Topics& topics() const 24 | ``` 25 | 26 | Returns a list of topics of this response 27 | 28 | ## Types 29 | 30 | ### Topic 31 | 32 | ```cpp 33 | struct Topic { 34 | Partitions partitions; 35 | } 36 | ``` 37 | 38 | * `partitions`: 39 | Set of partitions of this topic for which consumer group offset data has been 40 | received. 41 | 42 | ### Partition 43 | 44 | ```cpp 45 | struct Partition { 46 | Int64 offset; 47 | String metadata; 48 | Int16 error_code; 49 | } 50 | ``` 51 | 52 | * `offset`: 53 | The offset data, stored for this topic partition 54 | * `metadata`: 55 | The metadata string, stored for this topic partition 56 | * `error_code`: 57 | Kafka error for this topic partition. 58 | 59 | ### Topics 60 | 61 | ```cpp 62 | typedef std::map Topics 63 | ``` 64 | 65 | Map that associates the offset fetch response part of topics to their topic names. 66 | 67 | ### Partitions 68 | 69 | ```cpp 70 | typedef std::map Partitions 71 | ``` 72 | 73 | Map that associates a `Partition` object to the partition id. 74 | 75 | ### OptionalType 76 | 77 | ```cpp 78 | typedef boost::optional OptionalType 79 | ``` 80 | 81 | A offset-fetch response object wrapped using _Boost optional_. Such an object 82 | will be used for offset-fetch request handler functions. 83 | -------------------------------------------------------------------------------- /docs/offset_request.md: -------------------------------------------------------------------------------- 1 | 2 | # class `OffsetRequest` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | Implementation of the Kafka OffsetRequest as described on the 9 | [Kafka wiki](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetRequest). 10 | Offset requests are used to get the valid range of offsets for a topic partition 11 | from a Kafka server. They must be send to the Kafka broker, which is currently 12 | acting as leader for that topic partition. 13 | 14 | 18 | 19 | ## Member Functions 20 | 21 | ### FetchTopicOffset 22 | 23 | ```cpp 24 | void FetchTopicOffset(const String& topic_name, 25 | Int32 partition, 26 | Int64 time, 27 | Int32 max_number_of_offsets) 28 | ``` 29 | 30 | Fetches offset information for the given topic partition. The optional `time` 31 | parameter can be used to ask for messages before a certain time in the past. 32 | Time must be specified in milliseconds. Two special values exist for this 33 | parameters: 34 | 35 | * `libkafka_asio::constants::kOffsetTimeLatest` (`-1`) (Default) and 36 | * `libkafka_asio::constants::kOffsetTimeEarliest` (`-2`). 37 | 38 | The maximum number of offsets to fetch can optionally be specified as well. 39 | 40 | ```cpp 41 | OffsetRequest request; 42 | 43 | // Fetch latest offset for topic 'foo' partition 0 44 | request.FetchOffset("foo", 0); 45 | ``` 46 | 47 | ### Clear 48 | 49 | ```cpp 50 | void Clear() 51 | ``` 52 | 53 | Clears all entries of this request for fetching offsets of topic partitions. 54 | 55 | ### replica_id 56 | 57 | ```cpp 58 | Int32 replica_id() const 59 | ``` 60 | 61 | Always returns `-1`. 62 | 63 | ### topics 64 | 65 | ```cpp 66 | const Topics& topics() const 67 | ``` 68 | 69 | Returns a reference to the list of topics of this offset request. This 70 | method is mainly used internally for getting the request data during the 71 | conversion to the Kafka wire format. 72 | 73 | ## Types 74 | 75 | ### Topic 76 | 77 | ```cpp 78 | struct Topic { 79 | String topic_name; 80 | Partitions partitions; 81 | } 82 | ``` 83 | 84 | * `topic_name`: 85 | Name of the topic to fetch data for. 86 | * `partitions`: 87 | Set of partitions of this topic to fetch offset data for. 88 | 89 | ### Partition 90 | 91 | ```cpp 92 | struct Partition { 93 | Int32 partition; 94 | Int64 time; 95 | Int32 max_number_of_offsets; 96 | } 97 | ``` 98 | 99 | * `partition`: 100 | Number, identifying this topic partition. 101 | * `time`: 102 | Time in milliseconds before current timestamp (see explanation above). 103 | * `max_number_of_offsets`: 104 | The maximum number of offsets to fetch for this topic partition. 105 | 106 | ### Topics 107 | 108 | ```cpp 109 | typedef std::vector Topics 110 | ``` 111 | 112 | Vector of `Topic` objects. 113 | 114 | ### Partitions 115 | 116 | ```cpp 117 | typedef std::vector Partitions 118 | ``` 119 | 120 | Vector of `Partition` objects. 121 | 122 | ### ResponseType 123 | 124 | ```cpp 125 | typedef OffsetResponse ResponseType 126 | ``` 127 | 128 | Type of the response object of an offset request. 129 | 130 | ### MutableResponseType 131 | 132 | ```cpp 133 | typedef MutableOffsetResponse MutableResponseType 134 | ``` 135 | 136 | Type of a mutable response object for a offset request. This type is used by 137 | the library at when reading-in the response from a Kafka server. 138 | -------------------------------------------------------------------------------- /docs/offset_response.md: -------------------------------------------------------------------------------- 1 | 2 | # class `OffsetResponse` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | Implementation of the Kafka OffsetResponse as described on the 9 | [Kafka wiki](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetResponse). 10 | An object of this type will be given as response object to the handler function 11 | when invoking an offset request. 12 | 13 | 17 | 18 | ## Member Functions 19 | 20 | ### TopicPartitionOffset 21 | 22 | ```cpp 23 | Topic::Partition::OptionalType TopicPartitionOffset(const String& topic_name, 24 | Int32 partition) const 25 | ``` 26 | 27 | Search for offset data for the given topic partition inside this response 28 | object. If no such data can be found, the return value is empty. 29 | 30 | ```cpp 31 | // Assume the response is an argument of the request handler function 32 | OffsetResponse::OptionalType response; 33 | 34 | // Get the offset data for topic 'foo' partition 1 35 | OffsetResponse::Topic::Partition::OptionalType offsets; 36 | offsets = response->TopicPartitionOffset("foo", 1); 37 | if (offsets) 38 | { 39 | // [...] 40 | } 41 | ``` 42 | 43 | ### topics 44 | 45 | ```cpp 46 | const Topics& topics() const 47 | ``` 48 | 49 | Returns a reference to the set of topics, offsets have been received for. 50 | 51 | ## Types 52 | 53 | ### Topic 54 | 55 | ```cpp 56 | struct Topic { 57 | Partitions partitions; 58 | } 59 | ``` 60 | 61 | * `partitions`: 62 | Set of partitions of this topic for which offset data has been received. 63 | 64 | ### Partition 65 | 66 | ```cpp 67 | struct Partition { 68 | Int16 error_code; 69 | std::vector offsets; 70 | } 71 | ``` 72 | 73 | * `error_code`: 74 | Kafka error for this topic partition. 75 | * `offsets`: 76 | Vector of offsets (`std::vector`) received for this topic partition. 77 | * `partition`: 78 | Number, identifying this topic partition. 79 | 80 | ### Topics 81 | 82 | ```cpp 83 | typedef std::map Topics 84 | ``` 85 | 86 | Map that associates the offset response part of topics to their topic names. 87 | 88 | ### Partitions 89 | 90 | ```cpp 91 | typedef std::map Partitions 92 | ``` 93 | 94 | Map that associates a `Partition` object to the partition id. 95 | 96 | ### OptionalType 97 | 98 | ```cpp 99 | typedef boost::optional OptionalType 100 | ``` 101 | 102 | A offset response object wrapped using _Boost optional_. Such an object will 103 | be used for offset request handler functions. 104 | -------------------------------------------------------------------------------- /docs/primitives.md: -------------------------------------------------------------------------------- 1 | 2 | # Kafka Protocol Primitives 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | Find below the type definitions, used by _libkafka-asio_ to represent the 9 | primitive types of the Kafka protocol as described on the 10 | [Kafka wiki](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ProtocolPrimitiveTypes). 11 | 12 | ## Types 13 | 14 | ### Int8 15 | 16 | ```cpp 17 | typedef boost::int_t<8>::exact Int8 18 | ``` 19 | 20 | ### Int16 21 | 22 | ```cpp 23 | typedef boost::int_t<16>::exact Int16 24 | ``` 25 | 26 | ### Int32 27 | 28 | ```cpp 29 | typedef boost::int_t<32>::exact Int32 30 | ``` 31 | 32 | ### Int64 33 | 34 | ```cpp 35 | typedef boost::int_t<64>::exact Int64 36 | ``` 37 | 38 | ### Byte 39 | 40 | ```cpp 41 | typedef boost::uint_t<8>::exact Byte 42 | ``` 43 | 44 | ### String 45 | 46 | ```cpp 47 | typedef std::string String 48 | ``` 49 | 50 | ### Bytes 51 | 52 | ```cpp 53 | typedef boost::shared_ptr > Bytes 54 | ``` 55 | -------------------------------------------------------------------------------- /docs/produce_response.md: -------------------------------------------------------------------------------- 1 | 2 | # class `ProduceResponse` 3 | 4 | **Header File:** `` 5 | 6 | **Namespace:** `libkafka_asio` 7 | 8 | Implementation of the Kafka ProduceResponse as described on the 9 | [Kafka wiki](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ProduceResponse). 10 | An object of this type will be given as response object to the handler function 11 | when invoking a produce request and the request expected the server to send a 12 | response (which is not always the case for a ProduceRequest). 13 | 14 | 18 | 19 | ## Member Functions 20 | 21 | ### topics 22 | 23 | ```cpp 24 | const Topics& topics() const 25 | ``` 26 | 27 | Returns a reference to the list of topics, for which messages have been 28 | produced for. See the description of `Topic` type below. 29 | 30 | ## Types 31 | 32 | ### Topic 33 | 34 | ```cpp 35 | struct Topic { 36 | Partitions partitions; 37 | } 38 | ``` 39 | 40 | * `partitions`: 41 | Map of `Partition` objects, for which messages have been produced. 42 | 43 | ### Partition 44 | 45 | ```cpp 46 | struct Partition { 47 | Int16 error_code; 48 | Int64 offset; 49 | } 50 | ``` 51 | 52 | * `error_code`: 53 | Kafka error code for this partition, if any. 54 | * `offset`: 55 | Offset assigned to the first message in the set of messages produced for this 56 | partition. 57 | 58 | ### Topics 59 | 60 | ```cpp 61 | typedef std::map Topics 62 | ``` 63 | 64 | Map that associates the offset response part of topics to their topic names. 65 | 66 | ### Partitions 67 | 68 | ```cpp 69 | typedef std::map Partitions 70 | ``` 71 | 72 | Map that associates a `Partition` object to the partition id. 73 | 74 | ### OptionalType 75 | 76 | ```cpp 77 | typedef boost::optional OptionalType 78 | ``` 79 | 80 | A produce response object wrapped using _Boost optional_. Such an object will 81 | be used for produce request handler functions. 82 | -------------------------------------------------------------------------------- /examples/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.6) 2 | project(libkafka_asio_examples) 3 | 4 | add_subdirectory("${PROJECT_SOURCE_DIR}/cpp03") 5 | 6 | option(NO_CXX11, "Disable build of C++11 examples") 7 | if (NOT NO_CXX11) 8 | add_subdirectory("${PROJECT_SOURCE_DIR}/cpp11") 9 | endif () 10 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | Here you can find some examples showing how to use `libkafka-asio`. 4 | 5 | * [`cpp03`](cpp03): 6 | Examples using the C++03 language and features. This should also work with 7 | rather old compiler versions. 8 | * [`cpp11`](cpp11): 9 | Showcasing some C++11 language features. Requires newer compilers and at 10 | least CMake version 3.1 11 | 12 | The following examples are available in the above folders: 13 | 14 | - [`fetch.cpp`](cpp03/fetch.cpp): 15 | Use a fetch request to get messages for a topic partition. 16 | ([C++03](cpp03/fetch.cpp), 17 | [C++11](cpp11/fetch.cpp)) 18 | - [`metadata.cpp`](cpp03/metadata.cpp): 19 | Get topic metadata to determine leading brokers. 20 | ([C++03](cpp03/metadata.cpp), 21 | [C++11](cpp11/metadata.cpp)) 22 | - [`offset.cpp`](cpp03/offset.cpp) 23 | Get the current offset number of a topic partition. 24 | ([C++03](cpp03/offset.cpp), 25 | [C++11](cpp11/offset.cpp)) 26 | - [`offset_fetch.cpp`](cpp03/offset_fetch.cpp): 27 | Shows how to get offset data for a topic in a consumer group. Also 28 | illustrates the use of futures and promises. 29 | ([C++03](cpp03/offset_fetch.cpp), 30 | [C++11](cpp11/offset_fetch.cpp)) 31 | - [`produce.cpp`](cpp03/produce.cpp): 32 | Produce a _Hello World_ message on a topic partition. 33 | ([C++03](cpp03/produce.cpp), 34 | [C++11](cpp11/produce.cpp)) 35 | -------------------------------------------------------------------------------- /examples/cpp03/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.6) 2 | project(libkafka_asio_examples_cxx03) 3 | 4 | add_definitions("-DLIBKAFKAASIO_NO_COMPRESSION") 5 | 6 | find_package(Boost REQUIRED COMPONENTS thread system) 7 | 8 | include_directories( 9 | ${PROJECT_SOURCE_DIR}/../../lib 10 | ${Boost_INCLUDE_DIRS}) 11 | 12 | set(examples_LINK_LIBRARIES ${Boost_LIBRARIES}) 13 | 14 | if (UNIX) 15 | list(APPEND examples_LINK_LIBRARIES pthread) 16 | endif () 17 | 18 | add_executable(produce_cxx03 ${PROJECT_SOURCE_DIR}/produce.cpp) 19 | target_link_libraries(produce_cxx03 ${examples_LINK_LIBRARIES}) 20 | 21 | add_executable(fetch_cxx03 ${PROJECT_SOURCE_DIR}/fetch.cpp) 22 | target_link_libraries(fetch_cxx03 ${examples_LINK_LIBRARIES}) 23 | 24 | add_executable(metadata_cxx03 ${PROJECT_SOURCE_DIR}/metadata.cpp) 25 | target_link_libraries(metadata_cxx03 ${examples_LINK_LIBRARIES}) 26 | 27 | add_executable(offset_cxx03 ${PROJECT_SOURCE_DIR}/offset.cpp) 28 | target_link_libraries(offset_cxx03 ${examples_LINK_LIBRARIES}) 29 | 30 | add_executable(offset_fetch_cxx03 ${PROJECT_SOURCE_DIR}/offset_fetch.cpp) 31 | target_link_libraries(offset_fetch_cxx03 ${examples_LINK_LIBRARIES}) 32 | -------------------------------------------------------------------------------- /examples/cpp03/fetch.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // examples/fetch_cxx03.cpp 3 | // ------------------------ 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | // ---------------------------------- 10 | // 11 | // This example shows how to create a 'FetchRequest' to get messages for a 12 | // specific Topic & partition. On success, all received messages will be print 13 | // to stdout. 14 | // 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | using libkafka_asio::Connection; 22 | using libkafka_asio::FetchRequest; 23 | using libkafka_asio::FetchResponse; 24 | using libkafka_asio::MessageAndOffset; 25 | 26 | std::string BytesToString(const libkafka_asio::Bytes& bytes) 27 | { 28 | if (!bytes || bytes->empty()) 29 | { 30 | return ""; 31 | } 32 | return std::string((const char*) &(*bytes)[0], bytes->size()); 33 | }; 34 | 35 | void PrintMessage(const MessageAndOffset& message) 36 | { 37 | std::cout << BytesToString(message.value()) << std::endl; 38 | } 39 | 40 | void HandleFetch(const Connection::ErrorCodeType& err, 41 | const FetchResponse::OptionalType& response) 42 | { 43 | if (err) 44 | { 45 | std::cerr 46 | << "Error: " << boost::system::system_error(err).what() 47 | << std::endl; 48 | return; 49 | } 50 | std::for_each(response->begin(), response->end(), &PrintMessage); 51 | } 52 | 53 | int main(int argc, char **argv) 54 | { 55 | Connection::Configuration configuration; 56 | configuration.auto_connect = true; 57 | configuration.client_id = "libkafka_asio_example"; 58 | configuration.socket_timeout = 10000; 59 | configuration.SetBrokerFromString("192.168.15.137:49162"); 60 | 61 | boost::asio::io_service ios; 62 | Connection connection(ios, configuration); 63 | 64 | // Create a 'Fetch' request and try to get data for partition 0 of topic 65 | // 'mytopic', starting with offset 1 66 | FetchRequest request; 67 | request.FetchTopic("mytopic", 0, 1); 68 | 69 | // Send the prepared fetch request. 70 | // The connection will attempt to automatically connect to the broker, 71 | // specified in the configuration. 72 | connection.AsyncRequest(request, &HandleFetch); 73 | 74 | // Let's go! 75 | ios.run(); 76 | return 0; 77 | } 78 | -------------------------------------------------------------------------------- /examples/cpp03/metadata.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // examples/metadata_cxx03.cpp 3 | // --------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | // ---------------------------------- 10 | // 11 | // This example shows how to create a 'TopicMetadataRequest'. It can be used 12 | // to determine the leader for a specific topic-partition. 13 | // On success, this example prints the leader host:port to stdout. Errors will 14 | // be printed to stderr. 15 | // 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | using libkafka_asio::Connection; 22 | using libkafka_asio::MetadataRequest; 23 | using libkafka_asio::MetadataResponse; 24 | 25 | void HandleRequest(const Connection::ErrorCodeType& err, 26 | const MetadataResponse::OptionalType& response) 27 | { 28 | if (err || !response) 29 | { 30 | std::cerr 31 | << "Error: " << boost::system::system_error(err).what() 32 | << std::endl; 33 | return; 34 | } 35 | // Find the leader for topic 'mytopic' and partition 1 36 | MetadataResponse::Broker::OptionalType leader = 37 | response->PartitionLeader("mytopic", 1); 38 | if (!leader) 39 | { 40 | std::cerr << "No leader found!" << std::endl; 41 | return; 42 | } 43 | std::cout 44 | << "Found leader: " << leader->host << ":" << leader->port 45 | << std::endl; 46 | } 47 | 48 | int main(int argc, char** argv) 49 | { 50 | Connection::Configuration configuration; 51 | configuration.auto_connect = true; 52 | configuration.client_id = "libkafka_asio_example"; 53 | configuration.socket_timeout = 2000; 54 | configuration.SetBrokerFromString("localhost"); 55 | 56 | boost::asio::io_service ios; 57 | Connection connection(ios, configuration); 58 | 59 | MetadataRequest request; 60 | request.AddTopicName("mytopic"); 61 | 62 | connection.AsyncRequest(request, &HandleRequest); 63 | 64 | ios.run(); 65 | return 0; 66 | } 67 | -------------------------------------------------------------------------------- /examples/cpp03/offset.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // examples/offset_cxx03.cpp 3 | // ------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | // ---------------------------------- 10 | // 11 | // This example shows how to create a 'OffsetRequest' to get the information 12 | // about the latest offset of a specific topic-partition 13 | // On success, this example prints the retrieved offset to stdout. Errors will 14 | // be printed to stderr. 15 | // 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | using libkafka_asio::Connection; 22 | using libkafka_asio::OffsetRequest; 23 | using libkafka_asio::OffsetResponse; 24 | 25 | void HandleRequest(const Connection::ErrorCodeType& err, 26 | const OffsetResponse::OptionalType& response) 27 | { 28 | if (err || !response) 29 | { 30 | std::cerr 31 | << "Error: " << boost::system::system_error(err).what() 32 | << std::endl; 33 | return; 34 | } 35 | OffsetResponse::Partition::OptionalType partition = 36 | response->TopicPartitionOffset("mytopic", 1); 37 | if (!partition || partition->offsets.empty()) 38 | { 39 | std::cerr << "Failed to fetch offset!" << std::endl; 40 | return; 41 | } 42 | std::cout 43 | << "Received latest offset: " << partition->offsets[0] 44 | << std::endl; 45 | } 46 | 47 | int main(int argc, char** argv) 48 | { 49 | Connection::Configuration configuration; 50 | configuration.auto_connect = true; 51 | configuration.client_id = "libkafka_asio_example"; 52 | configuration.socket_timeout = 2000; 53 | configuration.SetBrokerFromString("192.168.59.104:49156"); 54 | 55 | boost::asio::io_service ios; 56 | Connection connection(ios, configuration); 57 | 58 | // Request the latest offset for partition 1 of topic 'mytopic' on the 59 | // configured broker. 60 | using libkafka_asio::constants::kOffsetTimeLatest; 61 | OffsetRequest request; 62 | request.FetchTopicOffset("mytopic", 1, kOffsetTimeLatest); 63 | 64 | connection.AsyncRequest(request, &HandleRequest); 65 | 66 | ios.run(); 67 | return 0; 68 | } -------------------------------------------------------------------------------- /examples/cpp03/produce.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // examples/produce_cxx03.cpp 3 | // -------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | // ---------------------------------- 10 | // 11 | // This example shows how to prepare a 'Produce' request, connect to a Kafka 12 | // server and send the produce request. Errors will be reported to stderr. 13 | // 14 | 15 | #include 16 | #include 17 | #include 18 | 19 | using libkafka_asio::Connection; 20 | using libkafka_asio::ProduceRequest; 21 | using libkafka_asio::ProduceResponse; 22 | 23 | void HandleRequest(const Connection::ErrorCodeType& err, 24 | const ProduceResponse::OptionalType& response) 25 | { 26 | if (err) 27 | { 28 | std::cerr 29 | << "Error: " << boost::system::system_error(err).what() 30 | << std::endl; 31 | return; 32 | } 33 | std::cout << "Successfully produced message!" << std::endl; 34 | } 35 | 36 | int main(int argc, char **argv) 37 | { 38 | Connection::Configuration configuration; 39 | configuration.auto_connect = true; 40 | configuration.client_id = "libkafka_asio_example"; 41 | configuration.socket_timeout = 10000; 42 | configuration.SetBrokerFromString("localhost"); 43 | 44 | boost::asio::io_service ios; 45 | Connection connection(ios, configuration); 46 | 47 | // Create a 'Produce' request and add a single message to it. The value of 48 | // that message is set to "Hello World". The message is produced for topic 49 | // "mytopic" and partition 0. 50 | ProduceRequest request; 51 | request.AddValue("Hello World", "mytopic", 0); 52 | 53 | // Send the prepared produce request. 54 | // The connection will attempt to automatically connect to one of the brokers, 55 | // specified in the configuration. 56 | connection.AsyncRequest(request, &HandleRequest); 57 | 58 | // Let's go! 59 | ios.run(); 60 | return 0; 61 | } 62 | -------------------------------------------------------------------------------- /examples/cpp11/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.1) 2 | project(libkafka_asio_examples_cxx11) 3 | 4 | add_definitions("-DLIBKAFKAASIO_NO_COMPRESSION") 5 | 6 | find_package(Boost REQUIRED COMPONENTS thread system) 7 | 8 | include_directories( 9 | ${PROJECT_SOURCE_DIR}/../../lib 10 | ${Boost_INCLUDE_DIRS}) 11 | 12 | set(examples_LINK_LIBRARIES ${Boost_LIBRARIES}) 13 | 14 | if (UNIX) 15 | list(APPEND examples_LINK_LIBRARIES pthread) 16 | endif () 17 | 18 | add_executable(fetch_cxx11 ${PROJECT_SOURCE_DIR}/fetch.cpp) 19 | target_link_libraries(fetch_cxx11 ${examples_LINK_LIBRARIES}) 20 | set_property(TARGET fetch_cxx11 PROPERTY CXX_STANDARD 11) 21 | 22 | add_executable(produce_cxx11 ${PROJECT_SOURCE_DIR}/produce.cpp) 23 | target_link_libraries(produce_cxx11 ${examples_LINK_LIBRARIES}) 24 | set_property(TARGET produce_cxx11 PROPERTY CXX_STANDARD 11) 25 | 26 | add_executable(metadata_cxx11 ${PROJECT_SOURCE_DIR}/metadata.cpp) 27 | target_link_libraries(metadata_cxx11 ${examples_LINK_LIBRARIES}) 28 | set_property(TARGET metadata_cxx11 PROPERTY CXX_STANDARD 11) 29 | 30 | add_executable(offset_cxx11 ${PROJECT_SOURCE_DIR}/offset.cpp) 31 | target_link_libraries(offset_cxx11 ${examples_LINK_LIBRARIES}) 32 | set_property(TARGET offset_cxx11 PROPERTY CXX_STANDARD 11) 33 | 34 | add_executable(offset_fetch_cxx11 ${PROJECT_SOURCE_DIR}/offset_fetch.cpp) 35 | target_link_libraries(offset_fetch_cxx11 ${examples_LINK_LIBRARIES}) 36 | set_property(TARGET offset_fetch_cxx11 PROPERTY CXX_STANDARD 11) 37 | -------------------------------------------------------------------------------- /examples/cpp11/fetch.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // examples/fetch_cxx11.cpp 3 | // ------------------------ 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | // ---------------------------------- 10 | // 11 | // This example shows how to create a 'FetchRequest' to get messages for a 12 | // specific Topic & partition. On success, all received messages will be print 13 | // to stdout. 14 | // Your compiler needs to know about C++11 and respective flags need to be set! 15 | // 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | using libkafka_asio::Connection; 22 | using libkafka_asio::FetchRequest; 23 | using libkafka_asio::FetchResponse; 24 | using libkafka_asio::MessageAndOffset; 25 | 26 | int main(int argc, char **argv) 27 | { 28 | Connection::Configuration configuration; 29 | configuration.auto_connect = true; 30 | configuration.client_id = "libkafka_asio_example"; 31 | configuration.socket_timeout = 10000; 32 | configuration.SetBrokerFromString("192.168.15.137:49162"); 33 | 34 | boost::asio::io_service ios; 35 | Connection connection(ios, configuration); 36 | 37 | // Create a 'Fetch' request and try to get data for partition 0 of topic 38 | // 'mytopic', starting with offset 1 39 | FetchRequest request; 40 | request.FetchTopic("mytopic", 0, 1); 41 | 42 | // Helper to interpret the received bytes as string 43 | auto BytesToString = [](const libkafka_asio::Bytes& bytes) -> std::string 44 | { 45 | if (!bytes || bytes->empty()) 46 | { 47 | return ""; 48 | } 49 | return std::string((const char*) &(*bytes)[0], bytes->size()); 50 | }; 51 | 52 | // Send the prepared fetch request. 53 | // The connection will attempt to automatically connect to the broker, 54 | // specified in the configuration. 55 | connection.AsyncRequest( 56 | request, 57 | [&](const Connection::ErrorCodeType& err, 58 | const FetchResponse::OptionalType& response) 59 | { 60 | if (err) 61 | { 62 | std::cerr 63 | << "Error: " << boost::system::system_error(err).what() 64 | << std::endl; 65 | return; 66 | } 67 | 68 | // Loop through the received messages. 69 | // A range based for loop might also work. 70 | std::for_each(response->begin(), response->end(), 71 | [&](const MessageAndOffset& message) 72 | { 73 | std::cout << BytesToString(message.value()) << std::endl; 74 | }); 75 | }); 76 | 77 | // Let's go! 78 | ios.run(); 79 | return 0; 80 | } 81 | -------------------------------------------------------------------------------- /examples/cpp11/metadata.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // examples/metadata_cxx11.cpp 3 | // --------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | // ---------------------------------- 10 | // 11 | // This example shows how to create a 'TopicMetadataRequest'. It can be used 12 | // to determine the leader for a specific topic-partition. 13 | // On success, this example prints the leader host:port to stdout. Errors will 14 | // be printed to stderr. 15 | // Your compiler needs to know about C++11 and respective flags need to be set! 16 | // 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | using libkafka_asio::Connection; 23 | using libkafka_asio::MetadataRequest; 24 | using libkafka_asio::MetadataResponse; 25 | 26 | int main(int argc, char** argv) 27 | { 28 | Connection::Configuration configuration; 29 | configuration.auto_connect = true; 30 | configuration.client_id = "libkafka_asio_example"; 31 | configuration.socket_timeout = 2000; 32 | configuration.SetBrokerFromString("192.168.59.104:49156"); 33 | 34 | boost::asio::io_service ios; 35 | Connection connection(ios, configuration); 36 | 37 | MetadataRequest request; 38 | request.AddTopicName("mytopic"); 39 | 40 | connection.AsyncRequest( 41 | request, 42 | [&](const Connection::ErrorCodeType& err, 43 | const MetadataResponse::OptionalType& response) 44 | { 45 | if (err || !response) 46 | { 47 | std::cerr 48 | << "Error: " << boost::system::system_error(err).what() 49 | << std::endl; 50 | return; 51 | } 52 | // Find the leader for topic 'mytopic' and partition 0 53 | auto leader = response->PartitionLeader("mytopic", 0); 54 | if (!leader) 55 | { 56 | std::cerr << "No leader found!" << std::endl; 57 | return; 58 | } 59 | std::cout 60 | << "Found leader: " << leader->host << ":" << leader->port 61 | << std::endl; 62 | }); 63 | 64 | ios.run(); 65 | return 0; 66 | } 67 | -------------------------------------------------------------------------------- /examples/cpp11/offset.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // examples/offset_cxx11.cpp 3 | // ------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | // ---------------------------------- 10 | // 11 | // This example shows how to create a 'OffsetRequest' to get the information 12 | // about the latest offset of a specific topic-partition 13 | // On success, this example prints the retrieved offset to stdout. Errors will 14 | // be printed to stderr. 15 | // Your compiler needs to know about C++11 and respective flags need to be set! 16 | // 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | using libkafka_asio::Connection; 23 | using libkafka_asio::OffsetRequest; 24 | using libkafka_asio::OffsetResponse; 25 | 26 | int main(int argc, char** argv) 27 | { 28 | Connection::Configuration configuration; 29 | configuration.auto_connect = true; 30 | configuration.client_id = "libkafka_asio_example"; 31 | configuration.socket_timeout = 2000; 32 | configuration.SetBrokerFromString("192.168.59.104:49156"); 33 | 34 | boost::asio::io_service ios; 35 | Connection connection(ios, configuration); 36 | 37 | // Request the latest offset for partition 1 of topic 'mytopic' on the 38 | // configured broker. 39 | using libkafka_asio::constants::kOffsetTimeLatest; 40 | OffsetRequest request; 41 | request.FetchTopicOffset("mytopic", 1, kOffsetTimeLatest); 42 | 43 | connection.AsyncRequest( 44 | request, 45 | [&](const Connection::ErrorCodeType& err, 46 | const OffsetResponse::OptionalType& response) 47 | { 48 | if (err || !response) 49 | { 50 | std::cerr 51 | << "Error: " << boost::system::system_error(err).what() 52 | << std::endl; 53 | return; 54 | } 55 | auto partition = response->TopicPartitionOffset("mytopic", 1); 56 | if (!partition || partition->offsets.empty()) 57 | { 58 | std::cerr << "Failed to fetch offset!" << std::endl; 59 | return; 60 | } 61 | std::cout 62 | << "Received latest offset: " << partition->offsets[0] 63 | << std::endl; 64 | }); 65 | 66 | ios.run(); 67 | return 0; 68 | } -------------------------------------------------------------------------------- /examples/cpp11/produce.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // examples/produce_cxx11.cpp 3 | // -------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | // ---------------------------------- 10 | // 11 | // This example shows how to prepare a 'Produce' request, connect to a Kafka 12 | // server and send the produce request. Errors will be reported to stderr. 13 | // The code uses lambda expressions, which were introduced in C++11. 14 | // Therefore your compiler needs to know about C++11 and respective flags need 15 | // to be set! 16 | // 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | using libkafka_asio::Connection; 23 | using libkafka_asio::ProduceRequest; 24 | using libkafka_asio::ProduceResponse; 25 | 26 | int main(int argc, char **argv) 27 | { 28 | Connection::Configuration configuration; 29 | configuration.auto_connect = true; 30 | configuration.client_id = "libkafka_asio_example"; 31 | configuration.socket_timeout = 10000; 32 | configuration.SetBrokerFromString("192.168.15.137:49162"); 33 | 34 | boost::asio::io_service ios; 35 | Connection connection(ios, configuration); 36 | 37 | // Create a 'Produce' request and add a single message to it. The value of 38 | // that message is set to "Hello World". The message is produced for topic 39 | // "mytopic" and partition 0. 40 | ProduceRequest request; 41 | request.AddValue("Hello World", "mytopic", 0); 42 | 43 | // Send the prepared produce request. 44 | // The connection will attempt to automatically connect to one of the brokers, 45 | // specified in the configuration. 46 | connection.AsyncRequest( 47 | request, 48 | [&](const Connection::ErrorCodeType& err, 49 | const ProduceResponse::OptionalType& response) 50 | { 51 | if (err) 52 | { 53 | std::cerr 54 | << "Error: " << boost::system::system_error(err).what() 55 | << std::endl; 56 | return; 57 | } 58 | std::cout << "Successfully produced message!" << std::endl; 59 | }); 60 | 61 | // Let's go! 62 | ios.run(); 63 | return 0; 64 | } 65 | -------------------------------------------------------------------------------- /lib/libkafka_asio/connection.h: -------------------------------------------------------------------------------- 1 | // 2 | // connection.h 3 | // ------------ 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef CONNECTION_H_229D7905_40B7_49F1_BAC5_910B10FADDBA 11 | #define CONNECTION_H_229D7905_40B7_49F1_BAC5_910B10FADDBA 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | 19 | typedef detail::BasicConnection Connection; 20 | 21 | } // namespace libkafka_asio 22 | 23 | #endif // CONNECTION_H_229D7905_40B7_49F1_BAC5_910B10FADDBA 24 | -------------------------------------------------------------------------------- /lib/libkafka_asio/connection_configuration.h: -------------------------------------------------------------------------------- 1 | #ifndef CONNECTION_CONFIGURATION_H_97A5C774_9202_42D5_BD64_05F2293A3ABD 2 | #define CONNECTION_CONFIGURATION_H_97A5C774_9202_42D5_BD64_05F2293A3ABD 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace libkafka_asio 10 | { 11 | 12 | // Connection configuration data structure 13 | struct ConnectionConfiguration 14 | { 15 | // Broker address configuration data structure 16 | struct BrokerAddress 17 | { 18 | typedef boost::optional OptionalType; 19 | std::string hostname; 20 | std::string service; 21 | }; 22 | 23 | // List of broker address configurations 24 | typedef std::vector BrokerList; 25 | 26 | // Maximum number of bytes to transmit for messages. 27 | // Default value is 4194304 (4 MB). 28 | Int32 message_max_bytes; 29 | 30 | // Timeout (in milliseconds) for socket operations. 31 | // Default value is 60000 (1 min). 32 | unsigned int socket_timeout; 33 | 34 | // Connection identification string. 35 | // Default value is 'libkafka_asio'. 36 | String client_id; 37 | 38 | // Automatically connect to one of the known Kafka servers 39 | bool auto_connect; 40 | 41 | // The broker address, used for auto-connect 42 | BrokerAddress::OptionalType broker_address; 43 | 44 | // Construct using default values 45 | ConnectionConfiguration(); 46 | 47 | // Set the broker address using the given string. 48 | // If the string contain a colon, the part before the colon is interpreted 49 | // as hostname and the part after that character is interpreted as service 50 | // name. 51 | // Example: localhost:9092 52 | // 53 | void SetBrokerFromString(const std::string& str); 54 | 55 | // Set the broker address using the given data structure. 56 | // The broker's address is determined by the fields: 57 | // - host 58 | // - port 59 | // A lexical cast is done on both fields. 60 | template 61 | void SetBroker(const T& broker); 62 | 63 | // Set the broker address using the given hostname and service parameter. 64 | // Both will be casted to string using a lexical_cast. 65 | template 66 | void SetBroker(const Tx& hostname, const Ty& service); 67 | 68 | }; 69 | 70 | } // namespace libkafka_asio 71 | 72 | #include 73 | 74 | #endif // CONNECTION_CONFIGURATION_H_97A5C774_9202_42D5_BD64_05F2293A3ABD 75 | -------------------------------------------------------------------------------- /lib/libkafka_asio/constants.h: -------------------------------------------------------------------------------- 1 | // 2 | // constants.h 3 | // ----------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef CONSTANTS_H_D943A30C_46DC_42BD_B60B_B55095C424F1 11 | #define CONSTANTS_H_D943A30C_46DC_42BD_B60B_B55095C424F1 12 | 13 | #include 14 | 15 | namespace libkafka_asio 16 | { 17 | namespace constants 18 | { 19 | 20 | enum ApiKeys 21 | { 22 | kApiKeyProduceRequest = 0, 23 | kApiKeyFetchRequest = 1, 24 | kApiKeyOffsetRequest = 2, 25 | kApiKeyMetadataRequest = 3, 26 | kApiKeyOffsetCommitRequest = 8, 27 | kApiKeyOffsetFetchRequest = 9, 28 | kApiKeyConsumerMetadataRequest = 10 29 | }; 30 | 31 | enum Compression 32 | { 33 | kCompressionNone = 0, 34 | kCompressionGZIP = 1, 35 | kCompressionSnappy = 2, 36 | kCompressionLz4 = 3 37 | }; 38 | 39 | enum Defaults 40 | { 41 | kDefaultPartition = 0, 42 | kDefaultProduceRequiredAcks = 1, 43 | kDefaultProduceTimeout = 10000, 44 | kDefaultFetchOffset = 0, 45 | kDefaultFetchMinBytes = 0, 46 | kDefaultFetchMaxBytes = 32768, 47 | kDefaultFetchMaxWaitTime = 0, 48 | kDefaultOffsetMaxNumberOfOffsets = 1, 49 | kDefaultOffsetCommitTimestampNow = -1, 50 | kDefaultCorrelationId = 0, 51 | kDefaultMessageMaxBytes = 4194304, 52 | kDefaultSocketTimeout = 60000 53 | }; 54 | 55 | inline bool DefaultConnectionAutoConnect() 56 | { 57 | return false; 58 | } 59 | 60 | inline const String& DefaultClientId() 61 | { 62 | static String client_id = "libkafka_asio"; 63 | return client_id; 64 | } 65 | 66 | inline const std::string& DefaultKafkaService() 67 | { 68 | static String service = "9092"; 69 | return service; 70 | } 71 | 72 | enum MetadataLeader 73 | { 74 | kMetadataLeaderUndecided = -1 75 | }; 76 | 77 | enum OffsetTime 78 | { 79 | kOffsetTimeLatest = -1, 80 | kOffsetTimeEarliest = -2 81 | }; 82 | 83 | } // namespace constants 84 | } // namespace libkafka_asio 85 | 86 | #endif // CONSTANTS_H_D943A30C_46DC_42BD_B60B_B55095C424F1 87 | -------------------------------------------------------------------------------- /lib/libkafka_asio/consumer_metadata_request.h: -------------------------------------------------------------------------------- 1 | // 2 | // consumer_metadata_request.h 3 | // --------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef CONSUMER_METADATA_REQUEST_H_4C2E5A42_11B2_4982_87DE_CA3F8B82AF76 11 | #define CONSUMER_METADATA_REQUEST_H_4C2E5A42_11B2_4982_87DE_CA3F8B82AF76 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | 19 | // Kafka Offset Commit/Fetch API request implementation: 20 | // ConsumerMetadataRequest 21 | class ConsumerMetadataRequest : 22 | public Request 23 | { 24 | friend class Request; 25 | 26 | static Int16 ApiKey(); 27 | 28 | public: 29 | typedef ConsumerMetadataResponse ResponseType; 30 | typedef MutableConsumerMetadataResponse MutableResponseType; 31 | 32 | const String& consumer_group() const; 33 | 34 | // The consumer-metadata-request will fetch the coordinating broker for the 35 | // consumer group specified in the request. 36 | void set_consumer_group(const String& consumer_group); 37 | 38 | private: 39 | String consumer_group_; 40 | }; 41 | 42 | } // namespace libkafka_asio 43 | 44 | #include 45 | 46 | #endif // CONSUMER_METADATA_REQUEST_H_4C2E5A42_11B2_4982_87DE_CA3F8B82AF76 47 | -------------------------------------------------------------------------------- /lib/libkafka_asio/consumer_metadata_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // consumer_metadata_response.h 3 | // ---------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef CONSUMER_METADATA_RESPONSE_H_426CF245_E113_4A70_A614_247BF83944DE 11 | #define CONSUMER_METADATA_RESPONSE_H_426CF245_E113_4A70_A614_247BF83944DE 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | 19 | class MutableConsumerMetadataResponse; 20 | 21 | // Kafka Offset Commit/Fetch API response implementation: 22 | // ConsumerMetadataResponse 23 | class ConsumerMetadataResponse : 24 | public Response 25 | { 26 | friend class MutableConsumerMetadataResponse; 27 | 28 | public: 29 | 30 | Int16 error_code() const; 31 | 32 | Int32 coordinator_id() const; 33 | 34 | const String& coordinator_host() const; 35 | 36 | Int32 coordinator_port() const; 37 | 38 | private: 39 | Int16 error_code_; 40 | Int32 coordinator_id_; 41 | String coordinator_host_; 42 | Int32 coordinator_port_; 43 | }; 44 | 45 | class MutableConsumerMetadataResponse : 46 | public MutableResponse 47 | { 48 | public: 49 | 50 | void set_error_code(Int16 error_code); 51 | 52 | void set_coordinator_id(Int32 coordinator_id); 53 | 54 | void set_coordinator_host(const String& coordinator_host); 55 | 56 | void set_coordinator_port(Int32 coordinator_port); 57 | }; 58 | 59 | } // namespace libkafka_asio 60 | 61 | #include 62 | 63 | #endif // CONSUMER_METADATA_RESPONSE_H_426CF245_E113_4A70_A614_247BF83944DE 64 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/bytes_streambuf.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/bytes_streambuf.h 3 | // ------------------------ 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef BYTES_STREAMBUF_H_3909DFCD_867A_4A2C_A494_3AAAE9A751E2 11 | #define BYTES_STREAMBUF_H_3909DFCD_867A_4A2C_A494_3AAAE9A751E2 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | namespace detail 19 | { 20 | 21 | class BytesStreambuf : 22 | public std::streambuf 23 | { 24 | public: 25 | explicit BytesStreambuf(Bytes data); 26 | 27 | Bytes data() const; 28 | 29 | private: 30 | Bytes data_; 31 | }; 32 | 33 | } // namespace detail 34 | } // namespace libkafka_asio 35 | 36 | #include 37 | 38 | #endif // BYTES_STREAMBUF_H_3909DFCD_867A_4A2C_A494_3AAAE9A751E2 39 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/compression.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/compression.h 3 | // -------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef COMPRESSION_H_D5F6D8BB_5E7B_4A32_8684_2F69BF8D7AD1 11 | #define COMPRESSION_H_D5F6D8BB_5E7B_4A32_8684_2F69BF8D7AD1 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | namespace libkafka_asio 18 | { 19 | namespace detail 20 | { 21 | 22 | // Compress the given bytes with the specified compression algorithm. 23 | // If the something goes wrong or the compression algorithm is not available, 24 | // the error code will be set correspondingly. 25 | Bytes Compress(const Bytes& data, 26 | constants::Compression compression, 27 | boost::system::error_code& ec); 28 | 29 | 30 | // De-compress the given bytes using the specified compression algorithm. 31 | // If there was an error or the compression algorithm is not available, the 32 | // given error code will be set. 33 | Bytes Decompress(const Bytes& data, 34 | constants::Compression compression, 35 | boost::system::error_code& ec); 36 | 37 | 38 | // Fallback implementation, which will be used in case an algorithm was disabled 39 | // and is therefore not available. It simply sets a respective error code and 40 | // returns an empty value. 41 | struct FallbackCompressionAlgorithm 42 | { 43 | static Bytes Compress(const Bytes&, boost::system::error_code& ec); 44 | 45 | static Bytes Decompress(const Bytes&, boost::system::error_code& ec); 46 | }; 47 | 48 | 49 | // Compression algorithm policy template. 50 | // Specific algorithms should specialize this template and define the 51 | // 'Algorithm' type. 52 | template 53 | struct CompressionPolicy 54 | { 55 | typedef FallbackCompressionAlgorithm Algorithm; 56 | }; 57 | 58 | } // namespace detail 59 | } // namespace libkafka_asio 60 | 61 | #include 62 | #include 63 | #include 64 | 65 | #endif // COMPRESSION_H_D5F6D8BB_5E7B_4A32_8684_2F69BF8D7AD1 66 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/compression_gz.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/compression_gz.h 3 | // ----------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef COMPRESSION_GZ_H_0B410507_A88B_468B_A1EC_ABC8B9246F62 11 | #define COMPRESSION_GZ_H_0B410507_A88B_468B_A1EC_ABC8B9246F62 12 | 13 | #if !defined(LIBKAFKAASIO_NO_COMPRESSION) \ 14 | && !defined(LIBKAFKAASIO_NO_COMPRESSION_GZIP) 15 | 16 | #include 17 | #include 18 | 19 | namespace libkafka_asio 20 | { 21 | namespace detail 22 | { 23 | 24 | // Compression/Decompression using the GZIP implementation of `zlib`. 25 | struct GZIPCompressionAlgorithm 26 | { 27 | static Bytes Compress(const Bytes& data, boost::system::error_code& ec); 28 | 29 | static Bytes Decompress(const Bytes& data, boost::system::error_code& ec); 30 | }; 31 | 32 | // Register the GZIP Algorithm 33 | template<> 34 | struct CompressionPolicy 35 | { 36 | typedef GZIPCompressionAlgorithm Algorithm; 37 | }; 38 | 39 | } // namespace detail 40 | } // namespace libkafka_asio 41 | 42 | #include 43 | 44 | #endif // GZIP compression not disabled? 45 | #endif // COMPRESSION_GZ_H_0B410507_A88B_468B_A1EC_ABC8B9246F62 46 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/compression_snappy.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/compression_snappy.h 3 | // --------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef COMPRESSION_SNAPPY_H_1C60D9AB_C9C7_45B1_8458_A6C55A1AF72F 11 | #define COMPRESSION_SNAPPY_H_1C60D9AB_C9C7_45B1_8458_A6C55A1AF72F 12 | 13 | #if !defined(LIBKAFKAASIO_NO_COMPRESSION) \ 14 | && !defined(LIBKAFKAASIO_NO_COMPRESSION_SNAPPY) 15 | 16 | #include 17 | #include 18 | #include 19 | 20 | namespace libkafka_asio 21 | { 22 | namespace detail 23 | { 24 | 25 | // Kafka message compression and decompression using the Snappy compression 26 | // algorithm. It also implements the decompression of Xerial SnappyOutputStream 27 | // data. 28 | struct SnappyCompressionAlgorithm 29 | { 30 | // Xerial Snappy OutputStream Magic Bytes 31 | static const Bytes& kSnappyStreamMagic(); 32 | 33 | // Compress the given data using Snappy 34 | static Bytes Compress(const Bytes& data, boost::system::error_code& ec); 35 | 36 | // Decompress the given data using Snappy 37 | static Bytes Decompress(const Bytes& data, boost::system::error_code& ec); 38 | 39 | private: 40 | // Decompress the given chunk 41 | static Bytes DecompressChunk(const Bytes& data, 42 | boost::system::error_code& ec); 43 | 44 | // Interprets the given data as Snappy stream (xerial OutputStream format) 45 | static Bytes DecompressStream(const Bytes& data, 46 | boost::system::error_code& ec); 47 | }; 48 | 49 | // Register the algorithm 50 | template<> 51 | struct CompressionPolicy 52 | { 53 | typedef SnappyCompressionAlgorithm Algorithm; 54 | }; 55 | 56 | } // namespace detail 57 | } // namespace libkafka_asio 58 | 59 | #include 60 | 61 | #endif // Snappy compression not disabled? 62 | #endif // COMPRESSION_SNAPPY_H_1C60D9AB_C9C7_45B1_8458_A6C55A1AF72F 63 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/endian.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/endian.h 3 | // --------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef ENDIAN_H_6E481133_65F6_495C_A384_557C81B0C628 11 | #define ENDIAN_H_6E481133_65F6_495C_A384_557C81B0C628 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | namespace libkafka_asio 18 | { 19 | namespace detail 20 | { 21 | 22 | // Convert the given 64 bit integer to big endian. 23 | inline Int64 be_to_host_64(Int64 ll) 24 | { 25 | #ifdef BOOST_LITTLE_ENDIAN 26 | ll = (((uint64_t) htonl((uint32_t) ((ll << 32) >> 32))) << 32) | 27 | (uint32_t) htonl((uint32_t) (ll >> 32)); 28 | #endif // LITTLE_ENDIAN 29 | return ll; 30 | } 31 | 32 | // Convert the 64 Bit integer back to node specific endianess, which might be 33 | // little endian. 34 | inline Int64 host_to_be_64(Int64 ll) 35 | { 36 | return be_to_host_64(ll); 37 | } 38 | 39 | } // detail 40 | } // libkafka_asio 41 | 42 | #endif // ENDIAN_H_6E481133_65F6_495C_A384_557C81B0C628 43 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/bytes_streambuf.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/bytes_streambuf.h 3 | // ----------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef BYTES_STREAMBUF_H_F7347ACB_448C_4507_924D_2A0C7A512E79 11 | #define BYTES_STREAMBUF_H_F7347ACB_448C_4507_924D_2A0C7A512E79 12 | 13 | namespace libkafka_asio 14 | { 15 | namespace detail 16 | { 17 | 18 | inline BytesStreambuf::BytesStreambuf(Bytes data) : 19 | data_(data) 20 | { 21 | if (data_ && !data->empty()) 22 | { 23 | char_type *buffer_begin = reinterpret_cast(&(*data_)[0]); 24 | char_type *buffer_end = buffer_begin + data->size(); 25 | setg(buffer_begin, buffer_begin, buffer_end); 26 | } 27 | } 28 | 29 | inline Bytes BytesStreambuf::data() const 30 | { 31 | return data_; 32 | } 33 | 34 | } // namespace detail 35 | } // namespace libkafka_asio 36 | 37 | #endif // BYTES_STREAMBUF_H_F7347ACB_448C_4507_924D_2A0C7A512E79 38 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/compression.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/compression.h 3 | // ------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef COMPRESSION_H_6B15B598_5C1D_447E_AF74_E541088998FA 11 | #define COMPRESSION_H_6B15B598_5C1D_447E_AF74_E541088998FA 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | namespace detail 19 | { 20 | 21 | inline Bytes Compress(const Bytes& data, 22 | constants::Compression compression, 23 | boost::system::error_code& ec) 24 | { 25 | using namespace libkafka_asio::constants; 26 | 27 | switch (compression) 28 | { 29 | case kCompressionGZIP: 30 | return CompressionPolicy:: 31 | Algorithm::Compress(data, ec); 32 | case kCompressionSnappy: 33 | return CompressionPolicy:: 34 | Algorithm::Compress(data, ec); 35 | case kCompressionLz4: 36 | return CompressionPolicy:: 37 | Algorithm::Compress(data, ec); 38 | case kCompressionNone: 39 | ec = kErrorSuccess; 40 | break; 41 | default: 42 | ec = kErrorCompressionNotAvailable; 43 | break; 44 | } 45 | 46 | return Bytes(); 47 | } 48 | 49 | inline Bytes Decompress(const Bytes& data, 50 | constants::Compression compression, 51 | boost::system::error_code& ec) 52 | { 53 | using namespace libkafka_asio::constants; 54 | 55 | switch (compression) 56 | { 57 | case kCompressionGZIP: 58 | return CompressionPolicy:: 59 | Algorithm::Decompress(data, ec); 60 | case kCompressionSnappy: 61 | return CompressionPolicy:: 62 | Algorithm::Decompress(data, ec); 63 | case kCompressionLz4: 64 | return CompressionPolicy:: 65 | Algorithm::Decompress(data, ec); 66 | case kCompressionNone: 67 | ec = kErrorSuccess; 68 | break; 69 | default: 70 | ec = kErrorCompressionNotAvailable; 71 | break; 72 | } 73 | 74 | return Bytes(); 75 | } 76 | 77 | inline Bytes FallbackCompressionAlgorithm::Compress( 78 | const Bytes&, boost::system::error_code& ec) 79 | { 80 | ec = kErrorCompressionNotAvailable; 81 | return Bytes(); 82 | } 83 | 84 | inline Bytes FallbackCompressionAlgorithm::Decompress( 85 | const Bytes&, boost::system::error_code& ec) 86 | { 87 | ec = kErrorCompressionNotAvailable; 88 | return Bytes(); 89 | } 90 | 91 | 92 | } // namespace detail 93 | } // namespace libkafka_asio 94 | 95 | #endif // COMPRESSION_H_6B15B598_5C1D_447E_AF74_E541088998FA 96 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/compression_gz.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/compression_gz.h 3 | // ---------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef COMPRESSION_GZ_H_4936268F_C651_4E32_A917_8AC05247B3DB 11 | #define COMPRESSION_GZ_H_4936268F_C651_4E32_A917_8AC05247B3DB 12 | 13 | #include 14 | 15 | #include 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | namespace libkafka_asio 22 | { 23 | namespace detail 24 | { 25 | 26 | inline Bytes GZIPCompressionAlgorithm::Compress( 27 | const Bytes& data, boost::system::error_code& ec) 28 | { 29 | static const size_t kBufferSize = 32 * 1024; 30 | static const int kGZIPWindowBits = 15 + 16; 31 | 32 | if (!data || data->empty()) 33 | { 34 | ec = kErrorCompressionFailed; 35 | return Bytes(); 36 | } 37 | // Initialize a new zlib stream 38 | ::z_stream gz = {}; 39 | ::gz_header header = {}; 40 | if (::deflateInit2(&gz, Z_DEFAULT_COMPRESSION, Z_DEFLATED, kGZIPWindowBits, 41 | 8, Z_DEFAULT_STRATEGY) != Z_OK) 42 | { 43 | ec = kErrorCompressionFailed; 44 | return Bytes(); 45 | } 46 | gz.next_in = reinterpret_cast< ::Bytef *>(&(*data)[0]); 47 | gz.avail_in = (::uInt) data->size(); 48 | Bytes out(new Bytes::element_type()); 49 | int ret = Z_OK; 50 | // Deflate 51 | while (ret == Z_OK) 52 | { 53 | size_t pos = out->size(); 54 | out->resize(out->size() + kBufferSize); 55 | gz.next_out = reinterpret_cast< ::Bytef *>(&(*out)[pos]); 56 | gz.avail_out = kBufferSize; 57 | ret = ::deflate(&gz, Z_FINISH); 58 | } 59 | if (gz.total_out < out->size()) 60 | { 61 | out->resize(gz.total_out); 62 | } 63 | ::deflateEnd(&gz); 64 | if (ret != Z_STREAM_END) 65 | { 66 | ec = kErrorCompressionFailed; 67 | return Bytes(); 68 | } 69 | 70 | ec = kErrorSuccess; 71 | return out; 72 | } 73 | 74 | inline Bytes GZIPCompressionAlgorithm::Decompress( 75 | const Bytes& data, boost::system::error_code& ec) 76 | { 77 | static const size_t kBufferSize = 32 * 1024; 78 | static const int kGZIPWindowBits = 15 + 32; 79 | 80 | if (!data || data->empty()) 81 | { 82 | ec = kErrorCompressionFailed; 83 | return Bytes(); 84 | } 85 | // Initialize a new zlib stream 86 | ::z_stream gz = {}; 87 | ::gz_header header = {}; 88 | if (::inflateInit2(&gz, kGZIPWindowBits) != Z_OK) 89 | { 90 | ec = kErrorCompressionFailed; 91 | return Bytes(); 92 | } 93 | gz.next_in = reinterpret_cast< ::Bytef *>(&(*data)[0]); 94 | gz.avail_in = (::uInt) data->size(); 95 | int ret = Z_OK; 96 | // Inflate gzip header 97 | ret = ::inflateGetHeader(&gz, &header); 98 | Bytes out(new Bytes::element_type()); 99 | // Now, inflate the actual data 100 | while (ret == Z_OK) 101 | { 102 | size_t pos = out->size(); 103 | out->resize(out->size() + kBufferSize); 104 | gz.next_out = reinterpret_cast< ::Bytef *>(&(*out)[pos]); 105 | gz.avail_out = kBufferSize; 106 | ret = ::inflate(&gz, 0); 107 | } 108 | if (gz.total_out < out->size()) 109 | { 110 | out->resize(gz.total_out); 111 | } 112 | ::inflateEnd(&gz); 113 | if (ret != Z_STREAM_END) 114 | { 115 | ec = kErrorCompressionFailed; 116 | return Bytes(); 117 | } 118 | 119 | ec = kErrorSuccess; 120 | return out; 121 | } 122 | 123 | } // namespace detail 124 | } // namespace libkafka_asio 125 | 126 | #endif // COMPRESSION_GZ_H_4936268F_C651_4E32_A917_8AC05247B3DB 127 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/consumer_metadata_request_write.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/consumer_metadata_request_write.h 3 | // --------------------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef CONSUMER_METADATA_REQUEST_WRITE_H_C58C7485_93D9_4595_9166_34C4CEC79445 11 | #define CONSUMER_METADATA_REQUEST_WRITE_H_C58C7485_93D9_4595_9166_34C4CEC79445 12 | 13 | #include 14 | 15 | namespace libkafka_asio 16 | { 17 | namespace detail 18 | { 19 | 20 | inline Int32 RequestMessageWireSize(const ConsumerMetadataRequest& request) 21 | { 22 | return StringWireSize(request.consumer_group()); 23 | } 24 | 25 | inline void WriteRequestMessage(const ConsumerMetadataRequest& request, 26 | std::ostream& os) 27 | { 28 | WriteString(request.consumer_group(), os); 29 | } 30 | 31 | } // namespace detail 32 | } // namespace libkafka_asio 33 | 34 | #endif // CONSUMER_METADATA_REQUEST_WRITE_H_C58C7485_93D9_4595_9166_34C4CEC79445 35 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/consumer_metadata_response_read.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/consumer_metadata_response_read.h 3 | // --------------------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef CONSUMER_METADATA_RESPONSE_READ_H_68DED213_C18C_4A39_8B5C_B15BB55E9EF8 11 | #define CONSUMER_METADATA_RESPONSE_READ_H_68DED213_C18C_4A39_8B5C_B15BB55E9EF8 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | namespace detail 19 | { 20 | 21 | inline void ReadResponseMessage(std::istream& is, 22 | MutableConsumerMetadataResponse& response, 23 | boost::system::error_code& ec) 24 | { 25 | response.set_error_code(ReadInt16(is)); 26 | response.set_coordinator_id(ReadInt32(is)); 27 | response.set_coordinator_host(ReadString(is)); 28 | response.set_coordinator_port(ReadInt32(is)); 29 | if (response.response().error_code()) 30 | { 31 | ec = (KafkaError) response.response().error_code(); 32 | } 33 | } 34 | 35 | } // namespace detail 36 | } // namespace libkafka_asio 37 | 38 | #endif // CONSUMER_METADATA_RESPONSE_READ_H_68DED213_C18C_4A39_8B5C_B15BB55E9EF8 39 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/fetch_request_write.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/fetch_request_write.h 3 | // --------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef FETCH_REQUEST_WRITE_H_B441DB98_D6A0_42AF_B49E_B7FEA169DCC6 11 | #define FETCH_REQUEST_WRITE_H_B441DB98_D6A0_42AF_B49E_B7FEA169DCC6 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | namespace detail 19 | { 20 | 21 | inline Int32 RequestMessageWireSize(const FetchRequest& request) 22 | { 23 | Int32 size = 24 | sizeof(Int32) + // ReplicaId 25 | sizeof(Int32) + // MaxWaitTime 26 | sizeof(Int32); // MinByte 27 | 28 | // Topics Array 29 | size += sizeof(Int32); 30 | BOOST_FOREACH(const FetchRequest::Topic& topic, request.topics()) 31 | { 32 | size += StringWireSize(topic.topic_name); 33 | 34 | // Partitions Array 35 | size += static_cast( 36 | sizeof(Int32) + // Array Length 37 | topic.partitions.size() * ( 38 | sizeof(Int32) + // Partition 39 | sizeof(Int64) + // FetchOffset 40 | sizeof(Int32))); // MaxBytes 41 | } 42 | return size; 43 | } 44 | 45 | inline void WriteRequestMessage(const FetchRequest& request, std::ostream& os) 46 | { 47 | WriteInt32(request.replica_id(), os); 48 | WriteInt32(request.max_wait_time(), os); 49 | WriteInt32(request.min_bytes(), os); 50 | 51 | // Topics Array 52 | WriteInt32(static_cast(request.topics().size()), os); 53 | BOOST_FOREACH(const FetchRequest::Topic& topic, request.topics()) 54 | { 55 | WriteString(topic.topic_name, os); 56 | 57 | // Partitions Array 58 | WriteInt32(static_cast(topic.partitions.size()), os); 59 | BOOST_FOREACH(const FetchRequest::Partition& partition, topic.partitions) 60 | { 61 | WriteInt32(partition.partition, os); 62 | WriteInt64(partition.fetch_offset, os); 63 | WriteInt32(partition.max_bytes, os); 64 | } 65 | } 66 | } 67 | 68 | } // namespace detail 69 | } // namespace libkafka_asio 70 | 71 | #endif // FETCH_REQUEST_WRITE_H_B441DB98_D6A0_42AF_B49E_B7FEA169DCC6 72 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/fetch_response_read.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/fetch_response_read.h 3 | // --------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef FETCH_RESPONSE_READ_H_03C2A581_53D0_498E_AF6A_2082227485A3 11 | #define FETCH_RESPONSE_READ_H_03C2A581_53D0_498E_AF6A_2082227485A3 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | namespace detail 19 | { 20 | 21 | inline void ReadResponseMessage(std::istream& is, 22 | MutableFetchResponse& response, 23 | boost::system::error_code& ec) 24 | { 25 | // Topic array 26 | int topic_count = ReadInt32(is); 27 | for (int t = 0; t < topic_count; ++t) 28 | { 29 | FetchResponse::Topics::key_type key; 30 | FetchResponse::Topics::mapped_type topic; 31 | key = ReadString(is); 32 | 33 | // Partitions array 34 | int partition_count = ReadInt32(is); 35 | for (int p = 0; p < partition_count; ++p) 36 | { 37 | FetchResponse::Partitions::key_type key; 38 | FetchResponse::Partitions::mapped_type partition; 39 | key = ReadInt32(is); 40 | partition.error_code = ReadInt16(is); 41 | partition.highwater_mark_offset = ReadInt64(is); 42 | 43 | if (partition.error_code) 44 | { 45 | ec = (KafkaError) partition.error_code; 46 | return; 47 | } 48 | 49 | // MessageSet 50 | Int32 message_set_size = ReadInt32(is); 51 | ReadMessageSet(is, partition.messages, message_set_size, ec); 52 | if (ec != kErrorSuccess) 53 | { 54 | return; 55 | } 56 | topic.partitions.insert(std::make_pair(key, partition)); 57 | } 58 | response.mutable_topics().insert(std::make_pair(key, topic)); 59 | } 60 | } 61 | 62 | } // namespace detail 63 | } // namespace libkafka_asio 64 | 65 | #endif // FETCH_RESPONSE_READ_H_03C2A581_53D0_498E_AF6A_2082227485A3 66 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/message_read.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/message_read.h 3 | // -------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef MESSAGE_READ_H_5892DA02_680A_469F_9624_A68F5D3E1FC5 11 | #define MESSAGE_READ_H_5892DA02_680A_469F_9624_A68F5D3E1FC5 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | namespace libkafka_asio 18 | { 19 | namespace detail 20 | { 21 | 22 | inline void ReadMessage(std::istream& is, 23 | Message& message, 24 | boost::system::error_code& ec) 25 | { 26 | Int32 crc = ReadInt32(is); 27 | Int8 magic_byte = ReadInt8(is); 28 | // discarding results of above reads 29 | (void)crc; 30 | (void)magic_byte; 31 | 32 | message.set_attributes(ReadInt8(is)); 33 | ReadBytes(is, message.mutable_key()); 34 | ReadBytes(is, message.mutable_value()); 35 | Bytes data = Decompress(message.value(), message.compression(), ec); 36 | if (!ec && data && !data->empty()) 37 | { 38 | BytesStreambuf intermediate_buffer(data); 39 | std::istream intermediate_is(&intermediate_buffer); 40 | ReadMessageSet(intermediate_is, message.mutable_nested_message_set(), 41 | data->size(), ec); 42 | } 43 | } 44 | 45 | inline void ReadMessageSet(std::istream& is, 46 | MessageSet& message_set, 47 | size_t size, 48 | boost::system::error_code& ec) 49 | { 50 | size_t read_count = 0; 51 | while (read_count < size) 52 | { 53 | MessageAndOffset message; 54 | message.set_offset(ReadInt64(is)); 55 | Int32 message_size = ReadInt32(is); 56 | if ((Int32) (size - read_count) < message_size) 57 | { 58 | // Ignore partial messages 59 | is.seekg(size - read_count, std::ios::cur); 60 | read_count = size; 61 | } 62 | else 63 | { 64 | ReadMessage(is, message, ec); 65 | message_set.push_back(message); 66 | read_count += sizeof(Int64) + sizeof(Int32) + message_size; 67 | } 68 | } 69 | } 70 | 71 | } // namespace detail 72 | } // namespace libkafka_asio 73 | 74 | #endif // MESSAGE_READ_H_5892DA02_680A_469F_9624_A68F5D3E1FC5 75 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/message_write.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/message_write.h 3 | // -------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef MESSAGE_WRITE_H_201F6605_6810_441C_9F25_47D8D669A771 11 | #define MESSAGE_WRITE_H_201F6605_6810_441C_9F25_47D8D669A771 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | namespace libkafka_asio 19 | { 20 | namespace detail 21 | { 22 | 23 | inline Int32 MessageWireSize(const Message& message) 24 | { 25 | return 26 | sizeof(Int32) + // Crc 27 | sizeof(Int8) + // MagicByte 28 | sizeof(Int8) + // Attributes 29 | BytesWireSize( message.key() ) + // Key 30 | BytesWireSize( message.value() ); // Value 31 | } 32 | 33 | inline Int32 MessageSetWireSize(const MessageSet& message_set) 34 | { 35 | Int32 size = 0; 36 | BOOST_FOREACH(const MessageAndOffset &message, message_set) 37 | { 38 | size += 39 | sizeof(Int64) + // Offset 40 | sizeof(Int32) + // MessageSize 41 | MessageWireSize(message); // Message 42 | } 43 | return size; 44 | } 45 | 46 | inline void WriteMessage(const Message& value, std::ostream& os) 47 | { 48 | using boost::asio::buffer_cast; 49 | 50 | // Write everything (except crc) to an intermediate buffer 51 | boost::asio::streambuf intermediate_buffer; 52 | std::ostream intermediate_os(&intermediate_buffer); 53 | WriteInt8(value.magic_byte(), intermediate_os); 54 | WriteInt8(value.attributes(), intermediate_os); 55 | WriteBytes(value.key(), intermediate_os); 56 | WriteBytes(value.value(), intermediate_os); 57 | 58 | size_t size = intermediate_buffer.size(); 59 | intermediate_buffer.commit(size); 60 | 61 | // Calculate crc 62 | boost::crc_32_type crc; 63 | crc.process_bytes(buffer_cast(intermediate_buffer.data()), size); 64 | 65 | // Write to the real stream 66 | WriteInt32(crc.checksum(), os); 67 | os.write(buffer_cast(intermediate_buffer.data()), size); 68 | } 69 | 70 | inline void WriteMessageSet(const MessageSet& value, std::ostream& os) 71 | { 72 | BOOST_FOREACH(const MessageAndOffset &message, value) 73 | { 74 | WriteInt64(message.offset(), os); 75 | WriteInt32(MessageWireSize(message), os); 76 | WriteMessage(message, os); 77 | } 78 | } 79 | 80 | } // namespace detail 81 | } // namespace libkafka_asio 82 | 83 | #endif // MESSAGE_WRITE_H_201F6605_6810_441C_9F25_47D8D669A771 84 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/metadata_request_write.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/metadata_request_write.h 3 | // ------------------------------------ 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef METADATA_REQUEST_WRITE_H_28BB9709_A65B_44C9_91C3_0A380BD76F7D 11 | #define METADATA_REQUEST_WRITE_H_28BB9709_A65B_44C9_91C3_0A380BD76F7D 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | namespace detail 19 | { 20 | 21 | inline Int32 RequestMessageWireSize(const MetadataRequest& request) 22 | { 23 | Int32 size = sizeof(Int32); 24 | BOOST_FOREACH(const String& topic_name, request.topic_names()) 25 | { 26 | size += StringWireSize(topic_name); 27 | } 28 | return size; 29 | } 30 | 31 | inline void WriteRequestMessage(const MetadataRequest& request, 32 | std::ostream& os) 33 | { 34 | // Topic Names 35 | WriteInt32(static_cast(request.topic_names().size()), os); 36 | BOOST_FOREACH(const String& topic_name, request.topic_names()) 37 | { 38 | WriteString(topic_name, os); 39 | } 40 | } 41 | 42 | } // namespace detail 43 | } // namespace libkafka_asio 44 | 45 | #endif // METADATA_REQUEST_WRITE_H_28BB9709_A65B_44C9_91C3_0A380BD76F7D 46 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/metadata_response_read.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/metadata_response_read.h 3 | // ------------------------------------ 4 | // 5 | // Copyright (c) 2015-2016 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef METADATA_RESPONSE_READ_EBB58854_D8E4_40DE_A2E6_BADBFC9D8DB8 11 | #define METADATA_RESPONSE_READ_EBB58854_D8E4_40DE_A2E6_BADBFC9D8DB8 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | namespace libkafka_asio 18 | { 19 | namespace detail 20 | { 21 | 22 | inline void ReadResponseMessage(std::istream& is, 23 | MutableMetadataResponse& response, 24 | boost::system::error_code& ec) 25 | { 26 | // error code not used 27 | (void)ec; 28 | // Brokers 29 | response.mutable_brokers().resize(ReadInt32(is)); 30 | BOOST_FOREACH(MetadataResponse::Broker& broker, response.mutable_brokers()) 31 | { 32 | broker.node_id = ReadInt32(is); 33 | broker.host = ReadString(is); 34 | broker.port = ReadInt32(is); 35 | } 36 | 37 | // Topic Metadata 38 | int topic_count = ReadInt32(is); 39 | for (int t = 0; t < topic_count; ++t) 40 | { 41 | MetadataResponse::Topics::key_type key; 42 | MetadataResponse::Topics::mapped_type topic; 43 | topic.error_code = ReadInt16(is); 44 | key = ReadString(is); 45 | 46 | int partition_count = ReadInt32(is); 47 | for (int p = 0; p < partition_count; ++p) 48 | { 49 | MetadataResponse::Partitions::key_type key; 50 | MetadataResponse::Partitions::mapped_type partition; 51 | partition.error_code = ReadInt16(is); 52 | key = ReadInt32(is); 53 | partition.leader = ReadInt32(is); 54 | 55 | Int32 replicas_size = ReadInt32(is); 56 | for (Int32 k = 0; k < replicas_size; ++k) 57 | { 58 | partition.replicas.push_back(ReadInt32(is)); 59 | } 60 | 61 | Int32 isr_size = ReadInt32(is); 62 | for (Int32 k = 0; k < isr_size; ++k) 63 | { 64 | partition.isr.push_back(ReadInt32(is)); 65 | } 66 | topic.partitions.insert(std::make_pair(key, partition)); 67 | } 68 | response.mutable_topics().insert(std::make_pair(key, topic)); 69 | } 70 | } 71 | 72 | } // namespace detail 73 | } // namespace libkafka_asio 74 | 75 | #endif // METADATA_RESPONSE_READ_H_EBB58854_D8E4_40DE_A2E6_BADBFC9D8DB8 76 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/offset_commit_request_write.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/offset_commit_request_write.h 3 | // --------------------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_COMMIT_REQUEST_WRITE_H_8C84B333_19EF_4E81_8E1D_0236C0EA7061 11 | #define OFFSET_COMMIT_REQUEST_WRITE_H_8C84B333_19EF_4E81_8E1D_0236C0EA7061 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | namespace detail 19 | { 20 | 21 | inline Int32 RequestMessageWireSize(const OffsetCommitRequest& request) 22 | { 23 | Int32 size = StringWireSize(request.consumer_group()); 24 | 25 | // Topics Array 26 | size += sizeof(Int32); 27 | BOOST_FOREACH(const OffsetCommitRequest::Topic& topic, request.topics()) 28 | { 29 | // Partitions Array 30 | size += sizeof(Int32); 31 | BOOST_FOREACH(const OffsetCommitRequest::Partition& partition, 32 | topic.partitions) 33 | { 34 | size += 35 | sizeof(Int32) + // Partition 36 | sizeof(Int64) + // Offset 37 | sizeof(Int64) + // Timestamp 38 | StringWireSize(partition.metadata); // Metadata 39 | } 40 | } 41 | return size; 42 | } 43 | 44 | inline void WriteRequestMessage(const OffsetCommitRequest& request, 45 | std::ostream& os) 46 | { 47 | WriteString(request.consumer_group(), os); 48 | 49 | // Topics Array 50 | WriteInt32(static_cast(request.topics().size()), os); 51 | BOOST_FOREACH(const OffsetCommitRequest::Topic& topic, request.topics()) 52 | { 53 | WriteString(topic.topic_name, os); 54 | 55 | // Partitions Array 56 | WriteInt32(static_cast(topic.partitions.size()), os); 57 | BOOST_FOREACH(const OffsetCommitRequest::Partition& partition, 58 | topic.partitions) 59 | { 60 | WriteInt32(partition.partition, os); 61 | WriteInt64(partition.offset, os); 62 | WriteInt64(partition.timestamp, os); 63 | WriteString(partition.metadata, os); 64 | } 65 | } 66 | } 67 | 68 | } // namespace detail 69 | } // namespace libkafka_asio 70 | 71 | #endif // OFFSET_COMMIT_REQUEST_WRITE_H_8C84B333_19EF_4E81_8E1D_0236C0EA7061 72 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/offset_commit_response_read.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/offset_commit_response_read.h 3 | // ----------------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_COMMIT_RESPONSE_READ_H_2BC25D29_FD96_4830_AD74_E2495AA55545 11 | #define OFFSET_COMMIT_RESPONSE_READ_H_2BC25D29_FD96_4830_AD74_E2495AA55545 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | namespace detail 19 | { 20 | 21 | inline void ReadResponseMessage(std::istream& is, 22 | MutableOffsetCommitResponse& response, 23 | boost::system::error_code& ec) 24 | { 25 | // error code not used 26 | (void)ec; 27 | int topic_count = ReadInt32(is); 28 | for (int t = 0; t < topic_count; ++t) 29 | { 30 | OffsetCommitResponse::Topics::key_type key; 31 | OffsetCommitResponse::Topics::mapped_type topic; 32 | key = ReadString(is); 33 | 34 | int partition_count = ReadInt32(is); 35 | for (int p = 0; p < partition_count; ++p) 36 | { 37 | OffsetCommitResponse::Partitions::key_type key; 38 | OffsetCommitResponse::Partitions::mapped_type partition; 39 | key = ReadInt32(is); 40 | partition.error_code = ReadInt16(is); 41 | topic.partitions.insert(std::make_pair(key, partition)); 42 | } 43 | response.mutable_topics().insert(std::make_pair(key, topic)); 44 | } 45 | } 46 | 47 | } // namespace detail 48 | } // namespace libkafka_asio 49 | 50 | #endif // OFFSET_COMMIT_RESPONSE_READ_H_2BC25D29_FD96_4830_AD74_E2495AA55545 51 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/offset_fetch_request_write.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/offset_fetch_request_write.h 3 | // ---------------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_FETCH_REQUEST_WRITE_H_508FD276_A6DC_4FC5_B188_1C8ABB9BBDC6 11 | #define OFFSET_FETCH_REQUEST_WRITE_H_508FD276_A6DC_4FC5_B188_1C8ABB9BBDC6 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | namespace detail 19 | { 20 | 21 | inline Int32 RequestMessageWireSize(const OffsetFetchRequest& request) 22 | { 23 | Int32 size = StringWireSize(request.consumer_group()); 24 | // Topics Array 25 | size += sizeof(Int32); 26 | BOOST_FOREACH(const OffsetFetchRequest::Topic& topic, request.topics()) 27 | { 28 | size += StringWireSize(topic.topic_name); 29 | // Partitions Array 30 | size += sizeof(Int32); 31 | size += static_cast(topic.partitions.size()) * sizeof(Int32); 32 | } 33 | return size; 34 | } 35 | 36 | inline void WriteRequestMessage(const OffsetFetchRequest& request, 37 | std::ostream& os) 38 | { 39 | WriteString(request.consumer_group(), os); 40 | // Topics Array 41 | WriteInt32(static_cast(request.topics().size()), os); 42 | BOOST_FOREACH(const OffsetFetchRequest::Topic& topic, request.topics()) 43 | { 44 | WriteString(topic.topic_name, os); 45 | // Partitions Array 46 | WriteInt32(static_cast(topic.partitions.size()), os); 47 | BOOST_FOREACH(const OffsetFetchRequest::Partition partition, 48 | topic.partitions) 49 | { 50 | WriteInt32(partition.partition, os); 51 | } 52 | } 53 | } 54 | 55 | } // namespace detail 56 | } // namespace libkafka_asio 57 | 58 | #endif // OFFSET_FETCH_REQUEST_WRITE_H_508FD276_A6DC_4FC5_B188_1C8ABB9BBDC6 59 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/offset_fetch_response_read.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/offset_fetch_response_read.h 3 | // ---------------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_FETCH_RESPONSE_READ_H_EF46A204_533F_42EF_837F_877019C5989E 11 | #define OFFSET_FETCH_RESPONSE_READ_H_EF46A204_533F_42EF_837F_877019C5989E 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | namespace detail 19 | { 20 | 21 | inline void ReadResponseMessage(std::istream& is, 22 | MutableOffsetFetchResponse& response, 23 | boost::system::error_code& ec) 24 | { 25 | // error code not used 26 | (void)ec; 27 | int topic_count = ReadInt32(is); 28 | for (int t = 0; t < topic_count; ++t) 29 | { 30 | OffsetFetchResponse::Topics::key_type key; 31 | OffsetFetchResponse::Topics::mapped_type topic; 32 | key = ReadString(is); 33 | 34 | int partition_count = ReadInt32(is); 35 | for (int p = 0; p < partition_count; ++p) 36 | { 37 | OffsetFetchResponse::Partitions::key_type key; 38 | OffsetFetchResponse::Partitions::mapped_type partition; 39 | key = ReadInt32(is); 40 | partition.offset = ReadInt64(is); 41 | partition.metadata = ReadString(is); 42 | partition.error_code = ReadInt16(is); 43 | topic.partitions.insert(std::make_pair(key, partition)); 44 | } 45 | response.mutable_topics().insert(std::make_pair(key, topic)); 46 | } 47 | } 48 | 49 | } // namespace detail 50 | } // namespace libkafka_asio 51 | 52 | #endif // OFFSET_FETCH_RESPONSE_READ_H_EF46A204_533F_42EF_837F_877019C5989E 53 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/offset_request_write.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/offset_request_write.h 3 | // ---------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_REQUEST_WRITE_H_B5A2F2C0_18BC_4AD5_B892_27834F8D87DE 11 | #define OFFSET_REQUEST_WRITE_H_B5A2F2C0_18BC_4AD5_B892_27834F8D87DE 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | namespace detail 19 | { 20 | 21 | inline Int32 RequestMessageWireSize(const OffsetRequest& request) 22 | { 23 | Int32 size = sizeof(Int32); // ReplicaId 24 | 25 | // Topics array 26 | size += sizeof(Int32); 27 | BOOST_FOREACH(const OffsetRequest::Topic& topic, request.topics()) 28 | { 29 | size += StringWireSize(topic.topic_name); 30 | 31 | // Partitions array 32 | size += sizeof(Int32); 33 | size += static_cast(topic.partitions.size() * ( 34 | sizeof(Int32) + // Partition 35 | sizeof(Int64) + // Time 36 | sizeof(Int32))); // MaxNumberOfOffsets 37 | } 38 | return size; 39 | } 40 | 41 | inline void WriteRequestMessage(const OffsetRequest& request, std::ostream& os) 42 | { 43 | WriteInt32(request.replica_id(), os); 44 | 45 | // Topics array 46 | WriteInt32(static_cast(request.topics().size()), os); 47 | BOOST_FOREACH(const OffsetRequest::Topic& topic, request.topics()) 48 | { 49 | WriteString(topic.topic_name, os); 50 | 51 | // Partitions array 52 | WriteInt32(static_cast(topic.partitions.size()), os); 53 | BOOST_FOREACH(const OffsetRequest::Partition& partition, topic.partitions) 54 | { 55 | WriteInt32(partition.partition, os); 56 | WriteInt64(partition.time, os); 57 | WriteInt32(partition.max_number_of_offsets, os); 58 | } 59 | } 60 | } 61 | 62 | } // namespace detail 63 | } // namespace libkafka_asio 64 | 65 | #endif // OFFSET_REQUEST_WRITE_H_B5A2F2C0_18BC_4AD5_B892_27834F8D87DE 66 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/offset_response_read.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/offset_response_read.h 3 | // ---------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_RESPONSE_READ_H_BAFF7FC5_03CA_46CD_B8EC_AD2CDBFF19F0 11 | #define OFFSET_RESPONSE_READ_H_BAFF7FC5_03CA_46CD_B8EC_AD2CDBFF19F0 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | namespace detail 19 | { 20 | 21 | inline void ReadResponseMessage(std::istream& is, 22 | MutableOffsetResponse& response, 23 | boost::system::error_code& ec) 24 | { 25 | int topic_count = ReadInt32(is); 26 | for (int t = 0; t < topic_count; ++t) 27 | { 28 | OffsetResponse::Topics::key_type key; 29 | OffsetResponse::Topics::mapped_type topic; 30 | key = ReadString(is); 31 | 32 | int partition_count = ReadInt32(is); 33 | for (int p = 0; p < partition_count; ++p) 34 | { 35 | OffsetResponse::Partitions::key_type key; 36 | OffsetResponse::Partitions::mapped_type partition; 37 | key = ReadInt32(is); 38 | partition.error_code = ReadInt16(is); 39 | if (partition.error_code) 40 | { 41 | ec = (KafkaError) partition.error_code; 42 | return; 43 | } 44 | Int32 offsets_size = ReadInt32(is); 45 | for (Int32 k = 0; k < offsets_size; ++k) 46 | { 47 | partition.offsets.push_back(ReadInt64(is)); 48 | } 49 | topic.partitions.insert(std::make_pair(key, partition)); 50 | } 51 | response.mutable_topics().insert(std::make_pair(key, topic)); 52 | } 53 | } 54 | 55 | 56 | } // namespace detail 57 | } // namespace libkafka_asio 58 | 59 | #endif // OFFSET_RESPONSE_READ_H_BAFF7FC5_03CA_46CD_B8EC_AD2CDBFF19F0 60 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/produce_request_write.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/produce_request_write.h 3 | // ----------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef PRODUCE_REQUEST_WRITE_H_E92E8BE2_2AB8_4383_A2DD_698DD8482789 11 | #define PRODUCE_REQUEST_WRITE_H_E92E8BE2_2AB8_4383_A2DD_698DD8482789 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | namespace libkafka_asio 18 | { 19 | namespace detail 20 | { 21 | 22 | inline Int32 RequestMessageWireSize(const ProduceRequest& request) 23 | { 24 | Int32 size = 25 | sizeof(Int16) + // RequiredAcks 26 | sizeof(Int32); // Timeout 27 | 28 | // Topic Array 29 | size += sizeof(Int32); 30 | BOOST_FOREACH(const ProduceRequest::Topic& topic, request.topics()) 31 | { 32 | size += StringWireSize(topic.topic_name); 33 | 34 | // Partition array 35 | size += sizeof(Int32); 36 | BOOST_FOREACH(const ProduceRequest::Partition& partition, 37 | topic.partitions) 38 | { 39 | size += 40 | sizeof(Int32) + // Partition 41 | sizeof(Int32) + // MessageSetSize 42 | MessageSetWireSize(partition.messages); // MessageSet 43 | } 44 | } 45 | return size; 46 | } 47 | 48 | inline void WriteRequestMessage(const ProduceRequest& request, std::ostream& os) 49 | { 50 | WriteInt16(request.required_acks(), os); 51 | WriteInt32(request.timeout(), os); 52 | 53 | // Topic Array 54 | WriteInt32(static_cast(request.topics().size()), os); 55 | BOOST_FOREACH(const ProduceRequest::Topic& topic, request.topics()) 56 | { 57 | WriteString(topic.topic_name, os); 58 | 59 | // Partition Array 60 | WriteInt32(static_cast(topic.partitions.size()), os); 61 | BOOST_FOREACH(const ProduceRequest::Partition& partition, 62 | topic.partitions) 63 | { 64 | WriteInt32(partition.partition, os); 65 | WriteInt32(MessageSetWireSize(partition.messages), os); 66 | WriteMessageSet(partition.messages, os); 67 | } 68 | } 69 | } 70 | 71 | } // namespace detail 72 | } // namespace libkafka_asio 73 | 74 | #endif // PRODUCE_REQUEST_WRITE_H_E92E8BE2_2AB8_4383_A2DD_698DD8482789 75 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/produce_response_read.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/produce_response_read.h 3 | // ----------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef PRODUCE_RESPONSE_READ_H_9046BEF5_332B_4B67_8138_5964E836BF6C 11 | #define PRODUCE_RESPONSE_READ_H_9046BEF5_332B_4B67_8138_5964E836BF6C 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | namespace libkafka_asio 19 | { 20 | namespace detail 21 | { 22 | 23 | inline void ReadResponseMessage(std::istream& is, 24 | MutableProduceResponse& response, 25 | boost::system::error_code& ec) 26 | { 27 | int topic_count = ReadInt32(is); 28 | for (int t = 0; t < topic_count; ++t) 29 | { 30 | ProduceResponse::Topics::key_type key; 31 | ProduceResponse::Topics::mapped_type topic; 32 | key = ReadString(is); 33 | 34 | int partition_count = ReadInt32(is); 35 | for (int p = 0; p < partition_count; ++p) 36 | { 37 | ProduceResponse::Partitions::key_type key; 38 | ProduceResponse::Partitions::mapped_type partition; 39 | key = ReadInt32(is); 40 | partition.error_code = ReadInt16(is); 41 | partition.offset = ReadInt64(is); 42 | 43 | if (partition.error_code) 44 | { 45 | ec = (KafkaError) partition.error_code; 46 | return; 47 | } 48 | topic.partitions.insert(std::make_pair(key, partition)); 49 | } 50 | response.mutable_topics().insert(std::make_pair(key, topic)); 51 | } 52 | } 53 | 54 | } // namespace detail 55 | } // namespace libkafka_asio 56 | 57 | #endif // PRODUCE_RESPONSE_READ_H_9046BEF5_332B_4B67_8138_5964E836BF6C 58 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/request_write.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/request_write.h 3 | // --------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef REQUEST_WRITE_H_5A99E292_8486_40A6_8B1B_90D78026F803 11 | #define REQUEST_WRITE_H_5A99E292_8486_40A6_8B1B_90D78026F803 12 | 13 | #include 14 | 15 | namespace libkafka_asio 16 | { 17 | namespace detail 18 | { 19 | 20 | inline Int32 StringWireSize(const String& str) 21 | { 22 | return static_cast(sizeof(Int16) + str.size()); 23 | } 24 | 25 | inline Int32 BytesWireSize(const Bytes& bytes) 26 | { 27 | Int32 size = sizeof(Int32); 28 | if (bytes) 29 | { 30 | size += static_cast(bytes->size()); 31 | } 32 | return size; 33 | } 34 | 35 | template< typename TRequest > 36 | Int32 RequestWireSize(const TRequest& request, const String& client_id) 37 | { 38 | return 39 | sizeof(Int16) + // ApiKey 40 | sizeof(Int16) + // ApiVersion 41 | sizeof(Int32) + // CorrelationId 42 | detail::StringWireSize(client_id) + // ClientId 43 | RequestMessageWireSize(request); // RequestMessage 44 | } 45 | 46 | inline void WriteInt8(Int8 value, std::ostream& os) 47 | { 48 | os.write( reinterpret_cast(&value), sizeof(Int8) ); 49 | } 50 | 51 | inline void WriteInt16(Int16 value, std::ostream& os) 52 | { 53 | value = htons(value); 54 | os.write( reinterpret_cast(&value), sizeof(Int16) ); 55 | } 56 | 57 | inline void WriteInt32(Int32 value, std::ostream& os) 58 | { 59 | value = htonl(value); 60 | os.write( reinterpret_cast(&value), sizeof(Int32) ); 61 | } 62 | 63 | inline void WriteInt64(Int64 value, std::ostream& os) 64 | { 65 | value = host_to_be_64(value); 66 | os.write( reinterpret_cast(&value), sizeof(Int64) ); 67 | } 68 | 69 | inline void WriteString(const String& value, std::ostream& os) 70 | { 71 | WriteInt16(static_cast(value.size()), os); 72 | os.write( value.c_str(), value.size() ); 73 | } 74 | 75 | inline void WriteBytes(const Bytes& value, std::ostream& os) 76 | { 77 | if (!value) 78 | { 79 | WriteInt32(-1, os); 80 | return; 81 | } 82 | WriteInt32(static_cast(value->size()), os); 83 | if (!value->empty()) 84 | { 85 | os.write(reinterpret_cast(&(*value)[0]), value->size()); 86 | } 87 | } 88 | 89 | template< typename TRequest > 90 | void WriteRequest(const TRequest& request, const String& client_id, 91 | std::ostream& os) 92 | { 93 | WriteInt32(RequestWireSize(request, client_id), os); 94 | WriteInt16(request.api_key(), os); 95 | WriteInt16(request.api_version(), os); 96 | WriteInt32(request.correlation_id(), os); 97 | WriteString(client_id, os); 98 | WriteRequestMessage(request, os); 99 | } 100 | 101 | } // namespace detail 102 | } // namespace libkafka_asio 103 | 104 | #endif // REQUEST_WRITE_H_5A99E292_8486_40A6_8B1B_90D78026F803 105 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/impl/response_read.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/impl/response_read.h 3 | // --------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef REQUEST_READ_H_2C4DEC07_68A7_48A3_A19E_4ECDE2AF19D9 11 | #define REQUEST_READ_H_2C4DEC07_68A7_48A3_A19E_4ECDE2AF19D9 12 | 13 | #include 14 | 15 | namespace libkafka_asio 16 | { 17 | namespace detail 18 | { 19 | 20 | inline Int8 ReadInt8(std::istream& is) 21 | { 22 | Int8 result = 0; 23 | is.read(reinterpret_cast(&result), sizeof(Int8)); 24 | return result; 25 | } 26 | 27 | inline Int16 ReadInt16(std::istream& is) 28 | { 29 | Int16 result = 0; 30 | is.read(reinterpret_cast(&result), sizeof(Int16)); 31 | result = ntohs(result); 32 | return result; 33 | } 34 | 35 | inline Int32 ReadInt32(std::istream& is) 36 | { 37 | Int32 result = 0; 38 | is.read(reinterpret_cast(&result), sizeof(Int32)); 39 | result = ntohl(result); 40 | return result; 41 | } 42 | 43 | inline Int64 ReadInt64(std::istream& is) 44 | { 45 | Int64 result = 0; 46 | is.read(reinterpret_cast(&result), sizeof(Int64)); 47 | result = be_to_host_64(result); 48 | return result; 49 | } 50 | 51 | inline String ReadString(std::istream& is) 52 | { 53 | Int16 length = ReadInt16(is); 54 | if (length > 0) 55 | { 56 | String result(length, '\0'); 57 | is.read(&result[0], length); 58 | return result; 59 | } 60 | return ""; 61 | } 62 | 63 | inline void ReadBytes(std::istream& is, Bytes& bytes) 64 | { 65 | Int32 length = ReadInt32(is); 66 | if (length > 0) 67 | { 68 | bytes.reset(new Bytes::element_type(length, 0)); 69 | is.read(reinterpret_cast(&(*bytes)[0]), length); 70 | } 71 | } 72 | 73 | template 74 | void ReadResponse(std::istream& is, 75 | TMutableResponse& response, 76 | boost::system::error_code& ec) 77 | { 78 | response.set_correlation_id(ReadInt32(is)); 79 | ReadResponseMessage(is, response, ec); 80 | } 81 | 82 | } // namespace detail 83 | } // namespace libkafka_asio 84 | 85 | #endif // REQUEST_READ_H_2C4DEC07_68A7_48A3_A19E_4ECDE2AF19D9 86 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/recursive_messageset_iterator.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/recursive_messageset_iterator.h 3 | // -------------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef RECURSIVE_MESSAGESET_ITERATOR_H_FDB00A65_7998_46B4_AF03_CCBB481582E2 11 | #define RECURSIVE_MESSAGESET_ITERATOR_H_FDB00A65_7998_46B4_AF03_CCBB481582E2 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | namespace libkafka_asio 18 | { 19 | namespace detail 20 | { 21 | 22 | class RecursiveMessageSetIterator : 23 | public boost::iterator_facade< 24 | RecursiveMessageSetIterator, 25 | const MessageAndOffset, 26 | boost::forward_traversal_tag 27 | > 28 | { 29 | public: 30 | 31 | // Create a new empty and uninitialized iterator. Such objects can be used 32 | // as end-iterator. 33 | RecursiveMessageSetIterator(); 34 | 35 | // Create a new object, recursively iterating over the given MessageSet. 36 | // The MessageSet MUST always have a longer lifetime than the iterator object. 37 | explicit RecursiveMessageSetIterator(const MessageSet& message_set); 38 | 39 | // Copy the given iterator object. The resulting iterator will iterate over 40 | // the same MessageSet. The outer- as well as the inner-iterator will be 41 | // copied as well. So one can continue to work with the original iterator 42 | // object without affecting the new one and vice-versa. 43 | RecursiveMessageSetIterator(const RecursiveMessageSetIterator& orig); 44 | 45 | // Assign the given iterator to this one by copying it. The same rules apply 46 | // as for the copy-constructor above. 47 | RecursiveMessageSetIterator& operator=( 48 | const RecursiveMessageSetIterator& rhs); 49 | 50 | private: 51 | friend class boost::iterator_core_access; 52 | 53 | typedef boost::scoped_ptr InnerIteratorType; 54 | 55 | void increment(); 56 | 57 | bool equal(const RecursiveMessageSetIterator& other) const; 58 | 59 | const MessageAndOffset& dereference() const; 60 | 61 | void Reset(); 62 | 63 | bool IsInnerDone() const; 64 | 65 | // Pointer to the MessageSet we're iterating through 66 | const MessageSet *message_set_; 67 | 68 | // Outer iterators: Normal iterators on the given MessageSet 69 | MessageSet::const_iterator outer_iterator_; 70 | MessageSet::const_iterator outer_end_iterator_; 71 | 72 | // Inner iterator: If the Message, currently pointed to by the outer iterator 73 | // has a nested MessageSet, the inner iterator is used to walk through it. 74 | InnerIteratorType inner_iterator_; 75 | }; 76 | 77 | } // namespace detail 78 | } // namespace libkafka_asio 79 | 80 | #include 81 | 82 | #endif // RECURSIVE_MESSAGESET_ITERATOR_H_FDB00A65_7998_46B4_AF03_CCBB481582E2 83 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/request_write.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/request_write.h 3 | // ---------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef REQUEST_WRITE_H_5475991E_0B9F_42A7_97EC_6E5206FB6A6A 11 | #define REQUEST_WRITE_H_5475991E_0B9F_42A7_97EC_6E5206FB6A6A 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | namespace libkafka_asio 18 | { 19 | 20 | // 21 | // Forward declarations 22 | // 23 | 24 | class MetadataRequest; 25 | 26 | class ProduceRequest; 27 | 28 | class FetchRequest; 29 | 30 | class OffsetRequest; 31 | 32 | class ConsumerMetadataRequest; 33 | 34 | class OffsetCommitRequest; 35 | 36 | class OffsetFetchRequest; 37 | 38 | namespace detail 39 | { 40 | 41 | Int32 StringWireSize(const String& str); 42 | 43 | Int32 BytesWireSize(const Bytes& bytes); 44 | 45 | Int32 MessageWireSize(const Message& message); 46 | 47 | Int32 MessageSetWireSize(const MessageSet& message_set); 48 | 49 | template 50 | Int32 RequestWireSize(const TRequest& request, const String& client_id); 51 | 52 | Int32 RequestMessageWireSize(const MetadataRequest& request); 53 | 54 | Int32 RequestMessageWireSize(const ProduceRequest& request); 55 | 56 | Int32 RequestMessageWireSize(const FetchRequest& request); 57 | 58 | Int32 RequestMessageWireSize(const OffsetRequest& request); 59 | 60 | Int32 RequestMessageWireSize(const ConsumerMetadataRequest& request); 61 | 62 | Int32 RequestMessageWireSize(const OffsetCommitRequest& request); 63 | 64 | Int32 RequestMessageWireSize(const OffsetFetchRequest& request); 65 | 66 | void WriteInt8(Int8 value, std::ostream& os); 67 | 68 | void WriteInt16(Int16 value, std::ostream& os); 69 | 70 | void WriteInt32(Int32 value, std::ostream& os); 71 | 72 | void WriteInt64(Int64 value, std::ostream& os); 73 | 74 | void WriteString(const String& value, std::ostream& os); 75 | 76 | void WriteBytes(const Bytes& value, std::ostream& os); 77 | 78 | void WriteMessage(const Message& value, std::ostream& os); 79 | 80 | void WriteMessageSet(const MessageSet& value, std::ostream& os); 81 | 82 | template 83 | void WriteRequest(const TRequest& request, const String& client_id, 84 | std::ostream& os); 85 | 86 | void WriteRequestMessage(const MetadataRequest& request, std::ostream& os); 87 | 88 | void WriteRequestMessage(const ProduceRequest& request, std::ostream& os); 89 | 90 | void WriteRequestMessage(const FetchRequest& request, std::ostream& os); 91 | 92 | void WriteRequestMessage(const OffsetRequest& request, std::ostream& os); 93 | 94 | void WriteRequestMessage(const ConsumerMetadataRequest& request, 95 | std::ostream& os); 96 | 97 | void WriteRequestMessage(const OffsetCommitRequest& request, std::ostream& os); 98 | 99 | void WriteRequestMessage(const OffsetFetchRequest& request, std::ostream& os); 100 | 101 | } // namespace detail 102 | } // namespace libkafka_asio 103 | 104 | #include 105 | 106 | #endif // REQUEST_WRITE_H_5475991E_0B9F_42A7_97EC_6E5206FB6A6A 107 | -------------------------------------------------------------------------------- /lib/libkafka_asio/detail/response_read.h: -------------------------------------------------------------------------------- 1 | // 2 | // detail/response_read.h 3 | // ---------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef RESPONSE_READ_H_TODO 11 | #define RESPONSE_READ_H_TODO 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | namespace libkafka_asio 20 | { 21 | 22 | // 23 | // Forward declarations 24 | // 25 | 26 | class MutableMetadataResponse; 27 | 28 | class MutableProduceResponse; 29 | 30 | class MutableFetchResponse; 31 | 32 | class MutableOffsetResponse; 33 | 34 | class MutableConsumerMetadataResponse; 35 | 36 | class MutableOffsetCommitResponse; 37 | 38 | class MutableOffsetFetchResponse; 39 | 40 | namespace detail 41 | { 42 | 43 | Int8 ReadInt8(std::istream& is); 44 | 45 | Int16 ReadInt16(std::istream& is); 46 | 47 | Int32 ReadInt32(std::istream& is); 48 | 49 | Int64 ReadInt64(std::istream& is); 50 | 51 | String ReadString(std::istream& is); 52 | 53 | void ReadBytes(std::istream& is, Bytes& bytes); 54 | 55 | void ReadMessage(std::istream& is, 56 | Message& message, 57 | boost::system::error_code& ec); 58 | 59 | void ReadMessageSet(std::istream& is, 60 | MessageSet& message_set, 61 | size_t size, 62 | boost::system::error_code& ec); 63 | 64 | template 65 | void ReadResponse(std::istream& is, 66 | TMutableResponse& response, 67 | boost::system::error_code& ec); 68 | 69 | void ReadResponseMessage(std::istream& is, 70 | MutableMetadataResponse& response, 71 | boost::system::error_code& ec); 72 | 73 | void ReadResponseMessage(std::istream& is, 74 | MutableProduceResponse& response, 75 | boost::system::error_code& ec); 76 | 77 | void ReadResponseMessage(std::istream& is, 78 | MutableFetchResponse& response, 79 | boost::system::error_code& ec); 80 | 81 | void ReadResponseMessage(std::istream& is, 82 | MutableOffsetResponse& response, 83 | boost::system::error_code& ec); 84 | 85 | void ReadResponseMessage(std::istream& is, 86 | MutableConsumerMetadataResponse& response, 87 | boost::system::error_code& ec); 88 | 89 | void ReadResponseMessage(std::istream& is, 90 | MutableOffsetCommitResponse& response, 91 | boost::system::error_code& ec); 92 | 93 | void ReadResponseMessage(std::istream& is, 94 | MutableOffsetFetchResponse& response, 95 | boost::system::error_code& ec); 96 | 97 | } // namespace detail 98 | } // namespace libkafka_asio 99 | 100 | #include 101 | 102 | #endif // RESPONSE_READ_H_TODO 103 | -------------------------------------------------------------------------------- /lib/libkafka_asio/fetch_request.h: -------------------------------------------------------------------------------- 1 | // 2 | // fetch_request.h 3 | // --------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef FETCH_REQUEST_H_BFC4C4C3_2D84_45C2_9FB5_78613B53A352 11 | #define FETCH_REQUEST_H_BFC4C4C3_2D84_45C2_9FB5_78613B53A352 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | namespace libkafka_asio 21 | { 22 | 23 | // Kafka Fetch API request implementation 24 | class FetchRequest : 25 | public Request 26 | { 27 | friend class Request; 28 | 29 | static Int16 ApiKey(); 30 | 31 | struct PartitionProperties 32 | { 33 | Int64 fetch_offset; 34 | Int32 max_bytes; 35 | }; 36 | 37 | typedef detail::TopicsPartitionsVector< 38 | detail::EmptyProperties, 39 | PartitionProperties 40 | > TopicsPartitions; 41 | 42 | public: 43 | typedef FetchResponse ResponseType; 44 | typedef MutableFetchResponse MutableResponseType; 45 | typedef TopicsPartitions::TopicType Topic; 46 | typedef TopicsPartitions::PartitionType Partition; 47 | typedef TopicsPartitions::TopicsType Topics; 48 | typedef TopicsPartitions::PartitionsType Partitions; 49 | 50 | FetchRequest(); 51 | 52 | Int32 replica_id() const; 53 | 54 | Int32 max_wait_time() const; 55 | 56 | Int32 min_bytes() const; 57 | 58 | const Topics& topics() const; 59 | 60 | // Maximum time to wait for message data to become available on the server. 61 | // This option can be used in combination with the `min_bytes` parameter. 62 | // The timeout must be specified in milliseconds. 63 | void set_max_wait_time(Int32 max_wait_time); 64 | 65 | // Set the minimum number of bytes to wait for on the server side. 66 | // If this is set to 0, the server won't wait at all. If set to 1, the server 67 | // waits until 1 byte of the requested topic-partition data is available or 68 | // the specified timeout occurs. 69 | void set_min_bytes(Int32 min_bytes); 70 | 71 | // Fetch data for the specified topic-partition. 72 | // If such entry already exists in this Fetch request, it gets overridden. 73 | // Optionally, the offset to start the Fetch operation from, as well as the 74 | // maximum number of bytes to fetch, can be specified. 75 | void FetchTopic(const String& topic_name, Int32 partition, 76 | Int64 fetch_offset = constants::kDefaultFetchOffset, 77 | Int32 max_bytes = constants::kDefaultFetchMaxBytes); 78 | 79 | // Clears this Fetch request by removing all topic/partition entries. 80 | void Clear(); 81 | 82 | private: 83 | Int32 max_wait_time_; 84 | Int32 min_bytes_; 85 | Topics topics_; 86 | }; 87 | 88 | } // namespace libkafka_asio 89 | 90 | #include 91 | 92 | #endif // FETCH_REQUEST_H_BFC4C4C3_2D84_45C2_9FB5_78613B53A352 93 | -------------------------------------------------------------------------------- /lib/libkafka_asio/fetch_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // fetch_response.h 3 | // ---------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef FETCH_RESPONSE_H_AEAFD09F_2738_4548_B44B_0843DC1B9BB1 11 | #define FETCH_RESPONSE_H_AEAFD09F_2738_4548_B44B_0843DC1B9BB1 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | namespace libkafka_asio 19 | { 20 | 21 | class MutableFetchResponse; 22 | 23 | // Kafka Fetch API Response Implementation 24 | class FetchResponse : 25 | public Response 26 | { 27 | friend class MutableFetchResponse; 28 | 29 | struct PartitionProperties 30 | { 31 | Int16 error_code; 32 | Int64 highwater_mark_offset; 33 | MessageSet messages; 34 | }; 35 | 36 | typedef detail::TopicsPartitionsMap< 37 | detail::EmptyProperties, 38 | PartitionProperties 39 | > TopicsPartitions; 40 | 41 | public: 42 | typedef TopicsPartitions::TopicType Topic; 43 | typedef TopicsPartitions::PartitionType Partition; 44 | typedef TopicsPartitions::TopicsType Topics; 45 | typedef TopicsPartitions::PartitionsType Partitions; 46 | 47 | typedef detail::FetchResponseIterator const_iterator; 48 | 49 | const Topics& topics() const; 50 | 51 | // Start iterator, used for iterating over all received messages 52 | const_iterator begin() const; 53 | 54 | // End iterator 55 | const_iterator end() const; 56 | 57 | private: 58 | Topics topics_; 59 | }; 60 | 61 | class MutableFetchResponse : 62 | public MutableResponse 63 | { 64 | public: 65 | FetchResponse::Topics& mutable_topics(); 66 | }; 67 | 68 | } // namespace libkafka_asio 69 | 70 | #include 71 | 72 | #endif // FETCH_RESPONSE_H_AEAFD09F_2738_4548_B44B_0843DC1B9BB1 73 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/connection_configuration.h: -------------------------------------------------------------------------------- 1 | #ifndef CONNECTION_CONFIGURATION_H_0435D34D_97AC_4D67_8E3A_DEBAAC218C4D 2 | #define CONNECTION_CONFIGURATION_H_0435D34D_97AC_4D67_8E3A_DEBAAC218C4D 3 | 4 | #include 5 | #include 6 | 7 | namespace libkafka_asio 8 | { 9 | 10 | inline ConnectionConfiguration::ConnectionConfiguration() : 11 | message_max_bytes(constants::kDefaultMessageMaxBytes), 12 | socket_timeout(constants::kDefaultSocketTimeout), 13 | client_id(constants::DefaultClientId()), 14 | auto_connect(constants::DefaultConnectionAutoConnect()) 15 | { 16 | } 17 | 18 | inline void ConnectionConfiguration::SetBrokerFromString(const std::string& str) 19 | { 20 | if (str.empty()) 21 | { 22 | broker_address.reset(); 23 | return; 24 | } 25 | BrokerAddress broker; 26 | std::string::size_type delimiter_position = str.find(':'); 27 | if (delimiter_position != std::string::npos && 28 | delimiter_position > 0 && 29 | delimiter_position < str.size() - 1) 30 | { 31 | broker.hostname = str.substr(0, delimiter_position); 32 | broker.service = str.substr(delimiter_position + 1); 33 | } 34 | else 35 | { 36 | broker.hostname = str.substr(0, delimiter_position); 37 | broker.service = constants::DefaultKafkaService(); 38 | } 39 | if (broker.hostname.empty() || broker.service.empty()) 40 | { 41 | return; 42 | } 43 | broker_address = broker; 44 | } 45 | 46 | template 47 | inline void ConnectionConfiguration::SetBroker(const T& broker) 48 | { 49 | SetBroker(broker.host, broker.port); 50 | } 51 | 52 | template 53 | inline void ConnectionConfiguration::SetBroker(const Tx& hostname, 54 | const Ty& service) 55 | { 56 | BrokerAddress broker; 57 | broker.hostname = boost::lexical_cast(hostname); 58 | broker.service = boost::lexical_cast(service); 59 | broker_address = broker; 60 | } 61 | 62 | } // namespace libkafka_asio 63 | 64 | #endif // CONNECTION_CONFIGURATION_H_0435D34D_97AC_4D67_8E3A_DEBAAC218C4D 65 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/consumer_metadata_request.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/consumer_metadata_request.h 3 | // -------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef CONSUMER_METADATA_REQUEST_H_9A89DB2D_7214_4D88_94EA_8A0E93822A06 11 | #define CONSUMER_METADATA_REQUEST_H_9A89DB2D_7214_4D88_94EA_8A0E93822A06 12 | 13 | #include 14 | 15 | namespace libkafka_asio 16 | { 17 | 18 | inline Int16 ConsumerMetadataRequest::ApiKey() 19 | { 20 | return constants::kApiKeyConsumerMetadataRequest; 21 | } 22 | 23 | inline const String& ConsumerMetadataRequest::consumer_group() const 24 | { 25 | return consumer_group_; 26 | } 27 | 28 | inline void ConsumerMetadataRequest::set_consumer_group( 29 | const String& consumer_group) 30 | { 31 | consumer_group_ = consumer_group; 32 | } 33 | 34 | } // namespace libkafka_asio 35 | 36 | #include 37 | 38 | #endif // CONSUMER_METADATA_REQUEST_H_9A89DB2D_7214_4D88_94EA_8A0E93822A06 39 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/consumer_metadata_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/consumer_metadata_response.h 3 | // --------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef CONSUMER_METADATA_RESPONSE_H_9DC3DEAB_1C5B_4B67_A8F5_FBB39CB8F065 11 | #define CONSUMER_METADATA_RESPONSE_H_9DC3DEAB_1C5B_4B67_A8F5_FBB39CB8F065 12 | 13 | namespace libkafka_asio 14 | { 15 | 16 | inline Int16 ConsumerMetadataResponse::error_code() const 17 | { 18 | return error_code_; 19 | } 20 | 21 | inline Int32 ConsumerMetadataResponse::coordinator_id() const 22 | { 23 | return coordinator_id_; 24 | } 25 | 26 | inline const String& ConsumerMetadataResponse::coordinator_host() const 27 | { 28 | return coordinator_host_; 29 | } 30 | 31 | inline Int32 ConsumerMetadataResponse::coordinator_port() const 32 | { 33 | return coordinator_port_; 34 | } 35 | 36 | inline void MutableConsumerMetadataResponse::set_error_code( 37 | Int16 error_code) 38 | { 39 | response_.error_code_ = error_code; 40 | } 41 | 42 | inline void MutableConsumerMetadataResponse::set_coordinator_id( 43 | Int32 coordinator_id) 44 | { 45 | response_.coordinator_id_ = coordinator_id; 46 | } 47 | 48 | inline void MutableConsumerMetadataResponse::set_coordinator_host( 49 | const String& coordinator_host) 50 | { 51 | response_.coordinator_host_ = coordinator_host; 52 | } 53 | 54 | inline void MutableConsumerMetadataResponse::set_coordinator_port( 55 | Int32 coordinator_port) 56 | { 57 | response_.coordinator_port_ = coordinator_port; 58 | } 59 | 60 | } // namespace libkafka_asio 61 | 62 | #include 63 | 64 | #endif // CONSUMER_METADATA_RESPONSE_H_9DC3DEAB_1C5B_4B67_A8F5_FBB39CB8F065 65 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/fetch_request.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/fetch_request.h 3 | // -------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef FETCH_REQUEST_H_31F2C3C7_9F99_4B7F_B824_210C071C6045 11 | #define FETCH_REQUEST_H_31F2C3C7_9F99_4B7F_B824_210C071C6045 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | 19 | inline Int16 FetchRequest::ApiKey() 20 | { 21 | return constants::kApiKeyFetchRequest; 22 | } 23 | 24 | inline FetchRequest::FetchRequest() : 25 | max_wait_time_(constants::kDefaultFetchMaxWaitTime), 26 | min_bytes_(constants::kDefaultFetchMinBytes) 27 | { 28 | } 29 | 30 | inline Int32 FetchRequest::replica_id() const 31 | { 32 | return -1; 33 | } 34 | 35 | inline Int32 FetchRequest::max_wait_time() const 36 | { 37 | return max_wait_time_; 38 | } 39 | 40 | inline Int32 FetchRequest::min_bytes() const 41 | { 42 | return min_bytes_; 43 | } 44 | 45 | inline const FetchRequest::Topics& FetchRequest::topics() const 46 | { 47 | return topics_; 48 | } 49 | 50 | inline void FetchRequest::set_max_wait_time(Int32 max_wait_time) 51 | { 52 | max_wait_time_ = max_wait_time; 53 | } 54 | 55 | inline void FetchRequest::set_min_bytes(Int32 min_bytes) 56 | { 57 | min_bytes_ = min_bytes; 58 | } 59 | 60 | inline void FetchRequest::FetchTopic(const String& topic_name, 61 | Int32 partition, 62 | Int64 fetch_offset, 63 | Int32 max_bytes) 64 | { 65 | Topics::iterator topic_iter = detail::FindTopicByName(topic_name, topics_); 66 | Partitions::iterator partition_iter = 67 | detail::FindTopicPartitionByNumber(partition, topic_iter->partitions); 68 | partition_iter->fetch_offset = fetch_offset; 69 | partition_iter->max_bytes = max_bytes; 70 | } 71 | 72 | inline void FetchRequest::Clear() 73 | { 74 | topics_.clear(); 75 | } 76 | 77 | } // namespace libkafka_asio 78 | 79 | #include 80 | 81 | #endif // FETCH_REQUEST_H_31F2C3C7_9F99_4B7F_B824_210C071C6045 82 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/fetch_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/fetch_response.h 3 | // --------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef FETCH_RESPONSE_H_DB84F3DB_3BBE_4D08_A577_F6B7C81552D1 11 | #define FETCH_RESPONSE_H_DB84F3DB_3BBE_4D08_A577_F6B7C81552D1 12 | 13 | namespace libkafka_asio 14 | { 15 | 16 | inline const FetchResponse::Topics& FetchResponse::topics() const 17 | { 18 | return topics_; 19 | } 20 | 21 | inline FetchResponse::Topics& MutableFetchResponse::mutable_topics() 22 | { 23 | return response_.topics_; 24 | } 25 | 26 | inline FetchResponse::const_iterator FetchResponse::begin() const 27 | { 28 | return const_iterator(topics_); 29 | } 30 | 31 | inline FetchResponse::const_iterator FetchResponse::end() const 32 | { 33 | return const_iterator(); 34 | } 35 | 36 | } // namespace libkafka_asio 37 | 38 | #include 39 | 40 | #endif // FETCH_RESPONSE_H_DB84F3DB_3BBE_4D08_A577_F6B7C81552D1 41 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/metadata_request.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/metadata_request.h 3 | // ----------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef METADATA_REQUEST_H_6A4217F2_F071_46BC_9B26_FC84451D03A5 11 | #define METADATA_REQUEST_H_6A4217F2_F071_46BC_9B26_FC84451D03A5 12 | 13 | #include 14 | 15 | namespace libkafka_asio 16 | { 17 | 18 | inline Int16 MetadataRequest::ApiKey() 19 | { 20 | return constants::kApiKeyMetadataRequest; 21 | } 22 | 23 | inline const MetadataRequest::TopicNameVector& MetadataRequest::topic_names() 24 | const 25 | { 26 | return topic_names_; 27 | } 28 | 29 | inline void MetadataRequest::AddTopicName(const String& topic_name) 30 | { 31 | topic_names_.push_back(topic_name); 32 | } 33 | 34 | inline void MetadataRequest::Clear() 35 | { 36 | topic_names_.clear(); 37 | } 38 | 39 | } // namespace libkafka_asio 40 | 41 | #include 42 | 43 | #endif // METADATA_REQUEST_H_6A4217F2_F071_46BC_9B26_FC84451D03A5 44 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/metadata_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/metadata_response.h 3 | // ------------------------ 4 | // 5 | // Copyright (c) 2015-2016 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef METADATA_RESPONSE_H_E93D2A75_E1ED_4CEA_97A4_48983B568306 11 | #define METADATA_RESPONSE_H_E93D2A75_E1ED_4CEA_97A4_48983B568306 12 | 13 | #include 14 | 15 | namespace libkafka_asio 16 | { 17 | 18 | inline const MetadataResponse::BrokerVector& MetadataResponse::brokers() const 19 | { 20 | return brokers_; 21 | } 22 | 23 | inline const MetadataResponse::Topics& MetadataResponse::topics() const 24 | { 25 | return topics_; 26 | } 27 | 28 | inline MetadataResponse::BrokerVector& 29 | MutableMetadataResponse::mutable_brokers() 30 | { 31 | return response_.brokers_; 32 | } 33 | 34 | inline MetadataResponse::Topics& MutableMetadataResponse::mutable_topics() 35 | { 36 | return response_.topics_; 37 | } 38 | 39 | inline MetadataResponse::Broker::OptionalType 40 | MetadataResponse::PartitionLeader(const String& topic, Int32 partition) const 41 | { 42 | Topics::const_iterator topic_iter = topics_.find(topic); 43 | if (topic_iter == topics_.end()) 44 | { 45 | return Broker::OptionalType(); 46 | } 47 | Partitions::const_iterator partition_iter = 48 | topic_iter->second.partitions.find(partition); 49 | if (partition_iter == topic_iter->second.partitions.end() || 50 | partition_iter->second.leader == constants::kMetadataLeaderUndecided) 51 | { 52 | return Broker::OptionalType(); 53 | } 54 | BrokerVector::const_iterator broker_iter = 55 | detail::FindBrokerById(partition_iter->second.leader, brokers_); 56 | if (broker_iter != brokers_.end()) 57 | { 58 | return *broker_iter; 59 | } 60 | return Broker::OptionalType(); 61 | } 62 | 63 | } // namespace libkafka_asio 64 | 65 | #include 66 | 67 | #endif // METADATA_RESPONSE_H_E93D2A75_E1ED_4CEA_97A4_48983B568306 68 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/offset_commit_request.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/offset_commit_request.h 3 | // ---------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_COMMIT_REQUEST_H_391FB5CD_2126_42DC_8CC4_16F5F38FFB82 11 | #define OFFSET_COMMIT_REQUEST_H_391FB5CD_2126_42DC_8CC4_16F5F38FFB82 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | 19 | inline Int16 OffsetCommitRequest::ApiKey() 20 | { 21 | return constants::kApiKeyOffsetCommitRequest; 22 | } 23 | 24 | inline const String& OffsetCommitRequest::consumer_group() const 25 | { 26 | return consumer_group_; 27 | } 28 | 29 | inline const OffsetCommitRequest::Topics& OffsetCommitRequest::topics() const 30 | { 31 | return topics_; 32 | } 33 | 34 | inline void OffsetCommitRequest::set_consumer_group( 35 | const String& consumer_group) 36 | { 37 | consumer_group_ = consumer_group; 38 | } 39 | 40 | inline void OffsetCommitRequest::CommitOffset(const String& topic_name, 41 | Int32 partition, 42 | Int64 offset, 43 | Int64 timestamp, 44 | const String& metadata) 45 | { 46 | Topics::iterator topic_iter = detail::FindTopicByName(topic_name, topics_); 47 | Partitions::iterator partition_iter = 48 | detail::FindTopicPartitionByNumber(partition, topic_iter->partitions); 49 | partition_iter->offset = offset; 50 | partition_iter->timestamp = timestamp; 51 | partition_iter->metadata = metadata; 52 | } 53 | 54 | inline void OffsetCommitRequest::Clear() 55 | { 56 | topics_.clear(); 57 | } 58 | 59 | } // namespace libkafka_asio 60 | 61 | #include 62 | 63 | #endif // OFFSET_COMMIT_REQUEST_H_391FB5CD_2126_42DC_8CC4_16F5F38FFB82 64 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/offset_commit_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/offset_commit_response.h 3 | // ----------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_COMMIT_RESPONSE_H_6DDDCEF0_8532_4D06_91EB_EDD87BCB2FB6 11 | #define OFFSET_COMMIT_RESPONSE_H_6DDDCEF0_8532_4D06_91EB_EDD87BCB2FB6 12 | 13 | namespace libkafka_asio 14 | { 15 | 16 | inline const OffsetCommitResponse::Topics& OffsetCommitResponse::topics() const 17 | { 18 | return topics_; 19 | } 20 | 21 | inline OffsetCommitResponse::Topics& 22 | MutableOffsetCommitResponse::mutable_topics() 23 | { 24 | return response_.topics_; 25 | } 26 | 27 | } // namespace libkafka_asio 28 | 29 | #include 30 | 31 | #endif // OFFSET_COMMIT_RESPONSE_H_6DDDCEF0_8532_4D06_91EB_EDD87BCB2FB6 32 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/offset_fetch_request.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/offset_fetch_request.h 3 | // --------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_FETCH_REQUEST_H_FCE526A2_0BA6_48F4_9717_4729A46D6D5D 11 | #define OFFSET_FETCH_REQUEST_H_FCE526A2_0BA6_48F4_9717_4729A46D6D5D 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | 19 | inline Int16 OffsetFetchRequest::ApiKey() 20 | { 21 | return constants::kApiKeyOffsetFetchRequest; 22 | } 23 | 24 | inline const String& OffsetFetchRequest::consumer_group() const 25 | { 26 | return consumer_group_; 27 | } 28 | 29 | inline const OffsetFetchRequest::Topics& OffsetFetchRequest::topics() const 30 | { 31 | return topics_; 32 | } 33 | 34 | inline void OffsetFetchRequest::set_consumer_group(const String& consumer_group) 35 | { 36 | consumer_group_ = consumer_group; 37 | } 38 | 39 | inline void OffsetFetchRequest::FetchOffset(const String& topic_name, 40 | Int32 partition) 41 | { 42 | Topics::iterator topic_iter = detail::FindTopicByName(topic_name, topics_); 43 | Partitions::iterator partition_iter = 44 | detail::FindTopicPartitionByNumber(partition, topic_iter->partitions); 45 | } 46 | 47 | } // namespace libkafka_asio 48 | 49 | #include 50 | 51 | #endif // OFFSET_FETCH_REQUEST_H_FCE526A2_0BA6_48F4_9717_4729A46D6D5D 52 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/offset_fetch_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/offset_fetch_response.h 3 | // ---------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_FETCH_RESPONSE_H_DAC75554_54A7_424A_984C_56EA3C50749E 11 | #define OFFSET_FETCH_RESPONSE_H_DAC75554_54A7_424A_984C_56EA3C50749E 12 | 13 | namespace libkafka_asio 14 | { 15 | 16 | inline const OffsetFetchResponse::Topics& OffsetFetchResponse::topics() const 17 | { 18 | return topics_; 19 | } 20 | 21 | inline OffsetFetchResponse::Topics& MutableOffsetFetchResponse::mutable_topics() 22 | { 23 | return response_.topics_; 24 | } 25 | 26 | } // namespace libkafka_asio 27 | 28 | #include 29 | 30 | #endif // OFFSET_FETCH_RESPONSE_H_DAC75554_54A7_424A_984C_56EA3C50749E 31 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/offset_request.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/offset_request.h 3 | // --------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_REQUEST_H_AD3922D4_7FA5_4EF7_A4A9_984D5FF43311 11 | #define OFFSET_REQUEST_H_AD3922D4_7FA5_4EF7_A4A9_984D5FF43311 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | 19 | inline Int16 OffsetRequest::ApiKey() 20 | { 21 | return constants::kApiKeyOffsetRequest; 22 | } 23 | 24 | inline Int32 OffsetRequest::replica_id() const 25 | { 26 | return -1; 27 | } 28 | 29 | inline const OffsetRequest::Topics& OffsetRequest::topics() const 30 | { 31 | return topics_; 32 | } 33 | 34 | inline void OffsetRequest::FetchTopicOffset(const String& topic_name, 35 | Int32 partition, 36 | Int64 time, 37 | Int32 max_number_of_offsets) 38 | { 39 | Topics::iterator topic_iter = detail::FindTopicByName(topic_name, topics_); 40 | Partitions::iterator partition_iter = 41 | detail::FindTopicPartitionByNumber(partition, topic_iter->partitions); 42 | partition_iter->time = time; 43 | partition_iter->max_number_of_offsets = max_number_of_offsets; 44 | } 45 | 46 | inline void OffsetRequest::Clear() 47 | { 48 | topics_.clear(); 49 | } 50 | 51 | } // namespace libkafka_asio 52 | 53 | #include 54 | 55 | #endif // OFFSET_REQUEST_H_AD3922D4_7FA5_4EF7_A4A9_984D5FF43311 56 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/offset_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/offset_response.h 3 | // ---------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_RESPONSE_H_59E9CB4F_A0BD_46FE_8650_3F0890A7C0D2 11 | #define OFFSET_RESPONSE_H_59E9CB4F_A0BD_46FE_8650_3F0890A7C0D2 12 | 13 | #include 14 | 15 | namespace libkafka_asio 16 | { 17 | 18 | inline const OffsetResponse::Topics& OffsetResponse::topics() const 19 | { 20 | return topics_; 21 | } 22 | 23 | inline OffsetResponse::Partition::OptionalType 24 | OffsetResponse::TopicPartitionOffset(const String& topic_name, 25 | Int32 partition) const 26 | { 27 | Topics::const_iterator topic_iter = topics_.find(topic_name); 28 | if (topic_iter == topics_.end()) 29 | { 30 | return Partition::OptionalType(); 31 | } 32 | Partitions::const_iterator partition_iter = 33 | topic_iter->second.partitions.find(partition); 34 | if (partition_iter == topic_iter->second.partitions.end()) 35 | { 36 | return Partition::OptionalType(); 37 | } 38 | return partition_iter->second; 39 | } 40 | 41 | inline OffsetResponse::Topics& MutableOffsetResponse::mutable_topics() 42 | { 43 | return response_.topics_; 44 | } 45 | 46 | } // namespace libkafka_asio 47 | 48 | #include 49 | 50 | #endif // OFFSET_RESPONSE_H_59E9CB4F_A0BD_46FE_8650_3F0890A7C0D2 51 | -------------------------------------------------------------------------------- /lib/libkafka_asio/impl/produce_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // impl/produce_response.h 3 | // ----------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef PRODUCE_RESPONSE_H_C0AC4959_A1B0_4497_9B4C_ED995AA5E3CC 11 | #define PRODUCE_RESPONSE_H_C0AC4959_A1B0_4497_9B4C_ED995AA5E3CC 12 | 13 | #include 14 | 15 | namespace libkafka_asio 16 | { 17 | 18 | inline ProduceResponse::Topic::OptionalType ProduceResponse::FindTopic( 19 | const String& topic_name) const 20 | { 21 | Topics::const_iterator iter = topics_.find(topic_name); 22 | if (iter != topics_.end()) 23 | { 24 | return iter->second; 25 | } 26 | return Topic::OptionalType(); 27 | } 28 | 29 | inline ProduceResponse::Partition::OptionalType 30 | ProduceResponse::FindTopicPartition(const String& topic_name, 31 | Int32 partition) const 32 | { 33 | Topic::OptionalType topic = FindTopic(topic_name); 34 | if (topic) 35 | { 36 | Partitions::const_iterator iter = topic->partitions.find(partition); 37 | if (iter != topic->partitions.end()) 38 | { 39 | return iter->second; 40 | } 41 | } 42 | return Partition::OptionalType(); 43 | } 44 | 45 | } // namespace libkafka_asio 46 | 47 | #include 48 | 49 | #endif // PRODUCE_RESPONSE_H_C0AC4959_A1B0_4497_9B4C_ED995AA5E3CC 50 | -------------------------------------------------------------------------------- /lib/libkafka_asio/libkafka_asio.h: -------------------------------------------------------------------------------- 1 | // 2 | // libkafka_asio.h 3 | // --------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef LIBKAFKA_ASIO_H_D2B00651_6C27_47B1_9403_2856E2D065EA 11 | #define LIBKAFKA_ASIO_H_D2B00651_6C27_47B1_9403_2856E2D065EA 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | #include 18 | #include 19 | 20 | // Metadata API 21 | #include 22 | 23 | // Produce API 24 | #include 25 | 26 | // Fetch API 27 | #include 28 | 29 | // Offset API 30 | #include 31 | 32 | // Offset Commit/Fetch API 33 | #include 34 | #include 35 | #include 36 | 37 | #endif // LIBKAFKA_ASIO_H_D2B00651_6C27_47B1_9403_2856E2D065EA 38 | -------------------------------------------------------------------------------- /lib/libkafka_asio/message.h: -------------------------------------------------------------------------------- 1 | // 2 | // message.h 3 | // --------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef MESSAGE_H_7E5FAA53_67B6_4727_BF98_3AECC80F8825 11 | #define MESSAGE_H_7E5FAA53_67B6_4727_BF98_3AECC80F8825 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | namespace libkafka_asio 20 | { 21 | 22 | // Kafka Message Data Structure 23 | class Message 24 | { 25 | public: 26 | // Create a new and empty message object 27 | Message(); 28 | 29 | // Copy from the given message object. If the `deep` parameter is set to 30 | // `true`, the byte arrays for `key` and `value` will be copied as well. 31 | Message(const Message& orig, bool deep = false); 32 | 33 | // Copy from the given message object 34 | Message& operator=(const Message& rhs); 35 | 36 | // Magic byte is always zero 37 | Int8 magic_byte() const; 38 | 39 | // Attributes bitset. The lowest 2 bits indicate the used compression 40 | // algorithm. 41 | Int8 attributes() const; 42 | 43 | // Sets the attributes byte of this message 44 | void set_attributes(Int8 attributes); 45 | 46 | // Optional message key. Can be NULL (default). 47 | const Bytes& key() const; 48 | 49 | Bytes& mutable_key(); 50 | 51 | // Actual message data as byte array 52 | const Bytes& value() const; 53 | 54 | Bytes& mutable_value(); 55 | 56 | // Compressed messages contain a nested message set 57 | const MessageSet& nested_message_set() const; 58 | 59 | MessageSet& mutable_nested_message_set(); 60 | 61 | // Returns the compression algorithm, used for compressing the message value 62 | constants::Compression compression() const; 63 | 64 | private: 65 | Int8 attributes_; 66 | Bytes key_; 67 | Bytes value_; 68 | boost::shared_ptr nested_message_set_; 69 | }; 70 | 71 | // Message data structure with an additional offset 72 | class MessageAndOffset : 73 | public Message 74 | { 75 | public: 76 | MessageAndOffset(); 77 | 78 | MessageAndOffset(const Message& message, Int64 offset); 79 | 80 | Int64 offset() const; 81 | 82 | void set_offset(Int64 offset); 83 | 84 | private: 85 | Int64 offset_; 86 | }; 87 | 88 | // Compresses the given `MessageSet` object using the specified compression 89 | // algorithm and puts the result as value into a new `Message` object. The 90 | // compression attribute of that object will be set respectively. The created 91 | // `Message` object will be returned by this function. 92 | Message CompressMessageSet(const MessageSet& message_set, 93 | constants::Compression compression, 94 | boost::system::error_code& ec); 95 | 96 | } // namespace libkafka_asio 97 | 98 | #include 99 | 100 | #endif // MESSAGE_H_7E5FAA53_67B6_4727_BF98_3AECC80F8825 101 | -------------------------------------------------------------------------------- /lib/libkafka_asio/message_fwd.h: -------------------------------------------------------------------------------- 1 | // 2 | // message_fwd.h 3 | // ------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef MESSAGE_FWD_H_2FB308F0_768F_4F8A_8943_FBD773BAA21D 11 | #define MESSAGE_FWD_H_2FB308F0_768F_4F8A_8943_FBD773BAA21D 12 | 13 | #include 14 | 15 | namespace libkafka_asio 16 | { 17 | 18 | class Message; 19 | 20 | class MessageAndOffset; 21 | 22 | typedef std::vector MessageSet; 23 | 24 | } // namespace libkafka_asio 25 | 26 | #endif // MESSAGE_FWD_H_2FB308F0_768F_4F8A_8943_FBD773BAA21D 27 | -------------------------------------------------------------------------------- /lib/libkafka_asio/metadata_request.h: -------------------------------------------------------------------------------- 1 | // 2 | // metadata_request.h 3 | // ------------------ 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef METADATA_REQUEST_H_07C079EE_94FF_41EB_9CA6_618E552E405F 11 | #define METADATA_REQUEST_H_07C079EE_94FF_41EB_9CA6_618E552E405F 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | namespace libkafka_asio 19 | { 20 | 21 | // Kafka Metadata API request implementation. 22 | // The metadata API can be used to determine information about available topics, 23 | // partitions and brokers. 24 | class MetadataRequest : 25 | public Request 26 | { 27 | friend class Request; 28 | 29 | static Int16 ApiKey(); 30 | 31 | public: 32 | typedef MetadataResponse ResponseType; 33 | typedef MutableMetadataResponse MutableResponseType; 34 | typedef std::vector TopicNameVector; 35 | 36 | const TopicNameVector& topic_names() const; 37 | 38 | // Add a topic to fetch metadata for 39 | void AddTopicName(const String& topic_name); 40 | 41 | // Removes all topic name entries, added to this request 42 | void Clear(); 43 | 44 | private: 45 | TopicNameVector topic_names_; 46 | }; 47 | 48 | } // namespace libkafka_asio 49 | 50 | #include 51 | 52 | #endif // METADATA_REQUEST_H_07C079EE_94FF_41EB_9CA6_618E552E405F 53 | -------------------------------------------------------------------------------- /lib/libkafka_asio/metadata_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // metadata_response.h 3 | // ------------------- 4 | // 5 | // Copyright (c) 2015-2016 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef METADATA_RESPONSE_H_3EEE6475_7990_4611_B8E6_5CA255FB9791 11 | #define METADATA_RESPONSE_H_3EEE6475_7990_4611_B8E6_5CA255FB9791 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | namespace libkafka_asio 20 | { 21 | 22 | class MutableMetadataResponse; 23 | 24 | // Kafka Metadata API response 25 | class MetadataResponse : 26 | public Response 27 | { 28 | friend class MutableMetadataResponse; 29 | 30 | struct PartitionProperties 31 | { 32 | typedef std::vector ReplicasVector; 33 | typedef std::vector IsrVector; 34 | Int16 error_code; 35 | Int32 leader; 36 | ReplicasVector replicas; 37 | IsrVector isr; 38 | }; 39 | 40 | struct TopicProperties 41 | { 42 | Int16 error_code; 43 | }; 44 | 45 | typedef detail::TopicsPartitionsMap< 46 | TopicProperties, 47 | PartitionProperties 48 | > TopicsPartitions; 49 | 50 | public: 51 | struct Broker 52 | { 53 | typedef boost::optional OptionalType; 54 | Int32 node_id; 55 | String host; 56 | Int32 port; 57 | }; 58 | typedef std::vector BrokerVector; 59 | typedef TopicsPartitions::TopicType Topic; 60 | typedef TopicsPartitions::PartitionType Partition; 61 | typedef TopicsPartitions::TopicsType Topics; 62 | typedef TopicsPartitions::PartitionsType Partitions; 63 | 64 | const BrokerVector& brokers() const; 65 | 66 | const Topics& topics() const; 67 | 68 | Broker::OptionalType PartitionLeader(const String& topic, 69 | Int32 partition) const; 70 | 71 | private: 72 | BrokerVector brokers_; 73 | Topics topics_; 74 | }; 75 | 76 | class MutableMetadataResponse : 77 | public MutableResponse 78 | { 79 | public: 80 | MetadataResponse::BrokerVector& mutable_brokers(); 81 | 82 | MetadataResponse::Topics& mutable_topics(); 83 | }; 84 | 85 | } // namespace libkafka_asio 86 | 87 | #include 88 | 89 | #endif // METADATA_RESPONSE_H_3EEE6475_7990_4611_B8E6_5CA255FB9791 90 | -------------------------------------------------------------------------------- /lib/libkafka_asio/offset_commit_request.h: -------------------------------------------------------------------------------- 1 | // 2 | // offset_commit_request.h 3 | // ----------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_COMMIT_REQUEST_H_CC036F24_24FE_4B96_84F6_2164413E458A 11 | #define OFFSET_COMMIT_REQUEST_H_CC036F24_24FE_4B96_84F6_2164413E458A 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | namespace libkafka_asio 20 | { 21 | 22 | // Kafka Offset Commit/Fetch API request implementation: 23 | // Offset commit request 24 | class OffsetCommitRequest : 25 | public Request 26 | { 27 | friend class Request; 28 | 29 | static Int16 ApiKey(); 30 | 31 | struct PartitionProperties 32 | { 33 | Int64 offset; 34 | Int64 timestamp; 35 | String metadata; 36 | }; 37 | 38 | typedef detail::TopicsPartitionsVector< 39 | detail::EmptyProperties, 40 | PartitionProperties 41 | > TopicsPartitions; 42 | 43 | public: 44 | typedef OffsetCommitResponse ResponseType; 45 | typedef MutableOffsetCommitResponse MutableResponseType; 46 | typedef TopicsPartitions::TopicType Topic; 47 | typedef TopicsPartitions::PartitionType Partition; 48 | typedef TopicsPartitions::TopicsType Topics; 49 | typedef TopicsPartitions::PartitionsType Partitions; 50 | 51 | const String& consumer_group() const; 52 | 53 | const Topics& topics() const; 54 | 55 | void set_consumer_group(const String& consumer_group); 56 | 57 | void CommitOffset( 58 | const String& topic_name, 59 | Int32 partition, 60 | Int64 offset, 61 | Int64 timestamp = constants::kDefaultOffsetCommitTimestampNow, 62 | const String& metadata = ""); 63 | 64 | void Clear(); 65 | 66 | private: 67 | String consumer_group_; 68 | Topics topics_; 69 | }; 70 | 71 | } // namespace libkafka_asio 72 | 73 | #include 74 | 75 | #endif // OFFSET_COMMIT_REQUEST_H_CC036F24_24FE_4B96_84F6_2164413E458A 76 | -------------------------------------------------------------------------------- /lib/libkafka_asio/offset_commit_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // offset_commit_response.h 3 | // ------------------------ 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_COMMIT_RESPONSE_H_96791A91_0B01_4306_BB23_70B03D36B8F3 11 | #define OFFSET_COMMIT_RESPONSE_H_96791A91_0B01_4306_BB23_70B03D36B8F3 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | 19 | class MutableOffsetCommitResponse; 20 | 21 | // Kafka Offset Commit/Fetch API response implementation: 22 | // Offset commit response 23 | class OffsetCommitResponse : 24 | public Response 25 | { 26 | friend class MutableOffsetCommitResponse; 27 | 28 | struct PartitionProperties 29 | { 30 | Int16 error_code; 31 | }; 32 | 33 | typedef detail::TopicsPartitionsMap< 34 | detail::EmptyProperties, 35 | PartitionProperties 36 | > TopicsPartitions; 37 | 38 | public: 39 | typedef TopicsPartitions::TopicType Topic; 40 | typedef TopicsPartitions::PartitionType Partition; 41 | typedef TopicsPartitions::TopicsType Topics; 42 | typedef TopicsPartitions::PartitionsType Partitions; 43 | 44 | const Topics& topics() const; 45 | 46 | private: 47 | Topics topics_; 48 | }; 49 | 50 | class MutableOffsetCommitResponse : 51 | public MutableResponse 52 | { 53 | public: 54 | OffsetCommitResponse::Topics& mutable_topics(); 55 | }; 56 | 57 | } // namespace libkafka_asio 58 | 59 | #include 60 | 61 | #endif // OFFSET_COMMIT_RESPONSE_H_96791A91_0B01_4306_BB23_70B03D36B8F3 62 | -------------------------------------------------------------------------------- /lib/libkafka_asio/offset_fetch_request.h: -------------------------------------------------------------------------------- 1 | // 2 | // offset_fetch_request.h 3 | // ---------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_FETCH_REQUEST_H_507D083D_1B17_43CF_9D84_48480E9E44F7 11 | #define OFFSET_FETCH_REQUEST_H_507D083D_1B17_43CF_9D84_48480E9E44F7 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | namespace libkafka_asio 20 | { 21 | 22 | // Kafka Offset Commit/Fetch API request implementation: 23 | // Offset fetch request 24 | class OffsetFetchRequest : 25 | public Request 26 | { 27 | friend class Request; 28 | 29 | static Int16 ApiKey(); 30 | 31 | typedef detail::TopicsPartitionsVector< 32 | detail::EmptyProperties, 33 | detail::EmptyProperties 34 | > TopicsPartitions; 35 | 36 | public: 37 | typedef OffsetFetchResponse ResponseType; 38 | typedef MutableOffsetFetchResponse MutableResponseType; 39 | typedef TopicsPartitions::TopicType Topic; 40 | typedef TopicsPartitions::PartitionType Partition; 41 | typedef TopicsPartitions::TopicsType Topics; 42 | typedef TopicsPartitions::PartitionsType Partitions; 43 | 44 | const String& consumer_group() const; 45 | 46 | const Topics& topics() const; 47 | 48 | void set_consumer_group(const String& consumer_group); 49 | 50 | void FetchOffset(const String& topic_name, Int32 partition); 51 | 52 | private: 53 | String consumer_group_; 54 | Topics topics_; 55 | }; 56 | 57 | } // namespace libkafka_asio 58 | 59 | #include 60 | 61 | #endif // OFFSET_FETCH_REQUEST_H_507D083D_1B17_43CF_9D84_48480E9E44F7 62 | -------------------------------------------------------------------------------- /lib/libkafka_asio/offset_fetch_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // offset_fetch_response.h 3 | // ----------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_FETCH_RESPONSE_H_0D96F788_105E_410C_8BE6_218FA04045AC 11 | #define OFFSET_FETCH_RESPONSE_H_0D96F788_105E_410C_8BE6_218FA04045AC 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | namespace libkafka_asio 18 | { 19 | 20 | class MutableOffsetFetchResponse; 21 | 22 | // Kafka Offset Commit/Fetch API response implementation: 23 | // Offset fetch response 24 | class OffsetFetchResponse : 25 | public Response 26 | { 27 | friend class MutableOffsetFetchResponse; 28 | 29 | struct PartitionProperties 30 | { 31 | Int64 offset; 32 | String metadata; 33 | Int16 error_code; 34 | }; 35 | 36 | typedef detail::TopicsPartitionsMap< 37 | detail::EmptyProperties, 38 | PartitionProperties 39 | > TopicsPartitions; 40 | 41 | public: 42 | typedef TopicsPartitions::TopicType Topic; 43 | typedef TopicsPartitions::PartitionType Partition; 44 | typedef TopicsPartitions::TopicsType Topics; 45 | typedef TopicsPartitions::PartitionsType Partitions; 46 | 47 | const Topics& topics() const; 48 | 49 | private: 50 | Topics topics_; 51 | }; 52 | 53 | class MutableOffsetFetchResponse : 54 | public MutableResponse 55 | { 56 | public: 57 | OffsetFetchResponse::Topics& mutable_topics(); 58 | }; 59 | 60 | } // namespace libkafka_asio 61 | 62 | #include 63 | 64 | #endif // OFFSET_FETCH_RESPONSE_H_0D96F788_105E_410C_8BE6_218FA04045AC 65 | -------------------------------------------------------------------------------- /lib/libkafka_asio/offset_request.h: -------------------------------------------------------------------------------- 1 | // 2 | // offset_request.h 3 | // ---------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_REQUEST_H_1AF16555_D1BD_466F_8853_BDA1F88D978A 11 | #define OFFSET_REQUEST_H_1AF16555_D1BD_466F_8853_BDA1F88D978A 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | namespace libkafka_asio 21 | { 22 | 23 | // Kafka Offset API request implementation 24 | class OffsetRequest : 25 | public Request 26 | { 27 | friend class Request; 28 | 29 | static Int16 ApiKey(); 30 | 31 | struct PartitionProperties 32 | { 33 | Int64 time; 34 | Int32 max_number_of_offsets; 35 | }; 36 | 37 | typedef detail::TopicsPartitionsVector< 38 | detail::EmptyProperties, 39 | PartitionProperties 40 | > TopicsPartitions; 41 | 42 | public: 43 | typedef OffsetResponse ResponseType; 44 | typedef MutableOffsetResponse MutableResponseType; 45 | typedef TopicsPartitions::TopicType Topic; 46 | typedef TopicsPartitions::PartitionType Partition; 47 | typedef TopicsPartitions::TopicsType Topics; 48 | typedef TopicsPartitions::PartitionsType Partitions; 49 | 50 | Int32 replica_id() const; 51 | 52 | const Topics& topics() const; 53 | 54 | // Fetch offset information for the given topic-partition. 55 | // The (optional) `time` parameter can be used to ask for messages before 56 | // a certain time in the past (in milliseconds). Two special values exist for 57 | // this parameter: 58 | // `libkafka_asio::constants::kOffsetTimeLatest` (-1) (Default) 59 | // `libkafka_asio::constants::kOffsetTimeEarliest` (-2) 60 | // 61 | void FetchTopicOffset(const String& topic_name, 62 | Int32 partition, 63 | Int64 time = constants::kOffsetTimeLatest, 64 | Int32 max_number_of_offsets 65 | = constants::kDefaultOffsetMaxNumberOfOffsets); 66 | 67 | // Clears all entries for fetching topic-partitions 68 | void Clear(); 69 | 70 | private: 71 | Topics topics_; 72 | }; 73 | 74 | } // namespace libkafka_asio 75 | 76 | #include 77 | 78 | #endif // OFFSET_REQUEST_H_1AF16555_D1BD_466F_8853_BDA1F88D978A 79 | -------------------------------------------------------------------------------- /lib/libkafka_asio/offset_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // offset_response.h 3 | // ----------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef OFFSET_RESPONSE_H_23EABE67_36C3_4D9E_8C37_4C8E916DE537 11 | #define OFFSET_RESPONSE_H_23EABE67_36C3_4D9E_8C37_4C8E916DE537 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | namespace libkafka_asio 20 | { 21 | 22 | class MutableOffsetResponse; 23 | 24 | // Kafka Offset API response 25 | class OffsetResponse : 26 | public Response 27 | { 28 | friend class MutableOffsetResponse; 29 | 30 | struct PartitionProperties 31 | { 32 | typedef std::vector OffsetVector; 33 | Int16 error_code; 34 | OffsetVector offsets; 35 | }; 36 | 37 | typedef detail::TopicsPartitionsMap< 38 | detail::EmptyProperties, 39 | PartitionProperties 40 | > TopicsPartitions; 41 | 42 | public: 43 | typedef TopicsPartitions::TopicType Topic; 44 | typedef TopicsPartitions::PartitionType Partition; 45 | typedef TopicsPartitions::TopicsType Topics; 46 | typedef TopicsPartitions::PartitionsType Partitions; 47 | 48 | const Topics& topics() const; 49 | 50 | // Search for offset data inside this response object for the given topic 51 | // and partition. If no such data can be found, the return value is empty. 52 | Partition::OptionalType TopicPartitionOffset(const String& topic_name, 53 | Int32 partition) const; 54 | 55 | private: 56 | Topics topics_; 57 | }; 58 | 59 | class MutableOffsetResponse : 60 | public MutableResponse 61 | { 62 | public: 63 | OffsetResponse::Topics& mutable_topics(); 64 | }; 65 | 66 | } // namespace libkafka_asio 67 | 68 | #include 69 | 70 | #endif // OFFSET_RESPONSE_H_23EABE67_36C3_4D9E_8C37_4C8E916DE537 71 | -------------------------------------------------------------------------------- /lib/libkafka_asio/primitives.h: -------------------------------------------------------------------------------- 1 | // 2 | // primitives.h 3 | // ------------ 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef PRIMITIVES_H_2018391E_E5C4_4FB1_8271_CAD5C2C99951 11 | #define PRIMITIVES_H_2018391E_E5C4_4FB1_8271_CAD5C2C99951 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | namespace libkafka_asio 19 | { 20 | 21 | // 22 | // The Kafka protocol primitives, as described in the Kafka wiki. 23 | // 24 | 25 | typedef boost::int_t<8>::exact Int8; 26 | typedef boost::int_t<16>::exact Int16; 27 | typedef boost::int_t<32>::exact Int32; 28 | typedef boost::int_t<64>::exact Int64; 29 | typedef boost::uint_t<8>::exact Byte; 30 | typedef std::string String; 31 | typedef boost::shared_ptr > Bytes; 32 | 33 | } // namespace libkafka_asio 34 | 35 | #endif // PRIMITIVES_H_2018391E_E5C4_4FB1_8271_CAD5C2C99951 36 | -------------------------------------------------------------------------------- /lib/libkafka_asio/produce_response.h: -------------------------------------------------------------------------------- 1 | // 2 | // produce_response.h 3 | // ------------------ 4 | // 5 | // Copyright (c) 2015-2016 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef PRODUCE_RESPONSE_H_8880F422_07A4_42F6_9B3B_D500A69EF83A 11 | #define PRODUCE_RESPONSE_H_8880F422_07A4_42F6_9B3B_D500A69EF83A 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | namespace libkafka_asio 21 | { 22 | 23 | class MutableProduceResponse; 24 | 25 | // Kafka Produce API response implementation 26 | class ProduceResponse : 27 | public Response 28 | { 29 | friend class MutableProduceResponse; 30 | 31 | struct PartitionProperties 32 | { 33 | Int16 error_code; 34 | Int64 offset; 35 | }; 36 | 37 | typedef detail::TopicsPartitionsMap< 38 | detail::EmptyProperties, 39 | PartitionProperties 40 | > TopicsPartitions; 41 | 42 | public: 43 | typedef TopicsPartitions::TopicType Topic; 44 | typedef TopicsPartitions::PartitionType Partition; 45 | typedef TopicsPartitions::TopicsType Topics; 46 | typedef TopicsPartitions::PartitionsType Partitions; 47 | 48 | const Topics& topics() const 49 | { 50 | return topics_; 51 | } 52 | 53 | Topic::OptionalType FindTopic(const String& topic_name) const; 54 | 55 | Partition::OptionalType FindTopicPartition(const String& topic_name, 56 | Int32 partition) const; 57 | 58 | private: 59 | Topics topics_; 60 | }; 61 | 62 | class MutableProduceResponse : 63 | public MutableResponse 64 | { 65 | public: 66 | ProduceResponse::Topics& mutable_topics() 67 | { 68 | return response_.topics_; 69 | } 70 | }; 71 | 72 | } // namespace libkafka_asio 73 | 74 | #include 75 | 76 | #endif // PRODUCE_RESPONSE_H_8880F422_07A4_42F6_9B3B_D500A69EF83A 77 | -------------------------------------------------------------------------------- /lib/libkafka_asio/request.h: -------------------------------------------------------------------------------- 1 | // 2 | // request.h 3 | // --------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef BASE_REQUEST_H_693E2835_7487_4561_8C4B_590D06336668 11 | #define BASE_REQUEST_H_693E2835_7487_4561_8C4B_590D06336668 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | 19 | // Base request template 20 | template 21 | class Request 22 | { 23 | public: 24 | Request() : 25 | correlation_id_(constants::kDefaultCorrelationId) 26 | { 27 | } 28 | 29 | inline Int16 api_key() const 30 | { 31 | return TRequest::ApiKey(); 32 | } 33 | 34 | Int16 api_version() const 35 | { 36 | return 0; 37 | } 38 | 39 | Int32 correlation_id() const 40 | { 41 | return correlation_id_; 42 | } 43 | 44 | // Set the correlation ID. The Kafka server will put this value into the 45 | // corresponding response message. 46 | void set_correlation_id(Int32 correlation_id) 47 | { 48 | correlation_id_ = correlation_id; 49 | } 50 | 51 | bool ResponseExpected() const 52 | { 53 | return true; 54 | } 55 | 56 | private: 57 | Int32 correlation_id_; 58 | String client_id_; 59 | }; 60 | 61 | } // namespace libkafka_asio 62 | 63 | #endif // BASE_REQUEST_H_693E2835_7487_4561_8C4B_590D06336668 64 | -------------------------------------------------------------------------------- /lib/libkafka_asio/response.h: -------------------------------------------------------------------------------- 1 | // 2 | // response.h 3 | // ---------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef RESPONSE_H_96E99D03_30D1_4F88_B5E8_002B7CF6A32E 11 | #define RESPONSE_H_96E99D03_30D1_4F88_B5E8_002B7CF6A32E 12 | 13 | #include 14 | #include 15 | 16 | namespace libkafka_asio 17 | { 18 | 19 | template 20 | class MutableResponse; 21 | 22 | // Base response template 23 | template 24 | class Response 25 | { 26 | friend class MutableResponse; 27 | 28 | public: 29 | typedef boost::optional OptionalType; 30 | 31 | Int32 correlation_id() const 32 | { 33 | return correlation_id_; 34 | } 35 | 36 | private: 37 | Int32 correlation_id_; 38 | }; 39 | 40 | template 41 | class MutableResponse 42 | { 43 | public: 44 | void set_correlation_id(Int32 correlation_id) 45 | { 46 | response_.correlation_id_ = correlation_id; 47 | } 48 | 49 | const TResponse& response() const 50 | { 51 | return response_; 52 | } 53 | 54 | protected: 55 | TResponse response_; 56 | }; 57 | 58 | } // namespace libkafka_asio 59 | 60 | #endif // RESPONSE_H_96E99D03_30D1_4F88_B5E8_002B7CF6A32E 61 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | site_name: libkafka-asio 3 | theme: readthedocs 4 | extra_css: [extra.css] 5 | pages: 6 | - ['index.html', 'Reference'] 7 | - ['build_configuration.md', 'Core', 'Build Configuration'] 8 | - ['primitives.md', 'Core', 'Primitive Types'] 9 | - ['connection.md', 'Core', 'Connection'] 10 | - ['connection_configuration.md', 'Core', 'ConnectionConfiguration'] 11 | - ['message.md', 'Core', 'Message'] 12 | - ['message_and_offset.md', 'Core', 'MessageAndOffset'] 13 | - ['message_set.md', 'Core', 'MessageSet'] 14 | - ['metadata_request.md', 'Metadata API', 'MetadataRequest'] 15 | - ['metadata_response.md', 'Metadata API', 'MetadataResponse'] 16 | - ['produce_request.md', 'Produce API', 'ProduceRequest'] 17 | - ['produce_response.md', 'Produce API', 'ProduceResponse'] 18 | - ['fetch_request.md', 'Fetch API', 'FetchRequest'] 19 | - ['fetch_response.md', 'Fetch API', 'FetchResponse'] 20 | - ['offset_request.md', 'Offset API', 'OffsetRequest'] 21 | - ['offset_response.md', 'Offset API', 'OffsetResponse'] 22 | - ['consumer_metadata_request.md', 'Offset Commit/Fetch API', 'ConsumerMetadataRequest'] 23 | - ['consumer_metadata_response.md', 'Offset Commit/Fetch API', 'ConsumerMetadataResponse'] 24 | - ['offset_commit_request.md', 'Offset Commit/Fetch API', 'OffsetCommitRequest'] 25 | - ['offset_commit_response.md', 'Offset Commit/Fetch API', 'OffsetCommitResponse'] 26 | - ['offset_fetch_request.md', 'Offset Commit/Fetch API', 'OffsetFetchRequest'] 27 | - ['offset_fetch_response.md', 'Offset Commit/Fetch API', 'OffsetFetchResponse'] 28 | -------------------------------------------------------------------------------- /test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.6) 2 | project(libkafka_asio_test) 3 | 4 | list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/../cmake/Modules") 5 | 6 | find_package(GTest REQUIRED) 7 | find_package(Boost REQUIRED COMPONENTS thread system) 8 | find_package(ZLIB) 9 | find_package(Snappy) 10 | 11 | include_directories( 12 | ${PROJECT_SOURCE_DIR}/src 13 | ${PROJECT_SOURCE_DIR}/../lib 14 | ${GTEST_INCLUDE_DIRS} 15 | ${Boost_INCLUDE_DIRS}) 16 | include_directories( 17 | ${ZLIB_INCLUDE_DIRS} 18 | ${SNAPPY_INCLUDE_DIRS}) 19 | 20 | file(GLOB_RECURSE ${PROJECT_NAME}_SOURCES "${PROJECT_SOURCE_DIR}/src/*.cpp") 21 | add_executable(${PROJECT_NAME} ${${PROJECT_NAME}_SOURCES}) 22 | target_link_libraries(${PROJECT_NAME} ${GTEST_LIBRARIES} ${Boost_LIBRARIES}) 23 | target_link_libraries(${PROJECT_NAME} ${ZLIB_LIBRARIES}) 24 | target_link_libraries(${PROJECT_NAME} ${SNAPPY_LIBRARIES}) 25 | if(UNIX) 26 | target_link_libraries(${PROJECT_NAME} pthread) 27 | endif() 28 | 29 | if(COMMAND GTEST_ADD_TESTS) 30 | enable_testing() 31 | GTEST_ADD_TESTS(${PROJECT_NAME} "" ${${PROJECT_NAME}_SOURCES}) 32 | endif() -------------------------------------------------------------------------------- /test/src/StreamTest.h: -------------------------------------------------------------------------------- 1 | // 2 | // StreamTest.h 3 | // ------------ 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #ifndef STREAM_TEST_H_FE8E717B_A2CE_4CA4_8A94_EA5C745A278F 11 | #define STREAM_TEST_H_FE8E717B_A2CE_4CA4_8A94_EA5C745A278F 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | // Little helper for testing stream based operations 19 | class StreamTest 20 | { 21 | protected: 22 | 23 | void ResetStream() 24 | { 25 | streambuf.reset(new boost::asio::streambuf()); 26 | stream.reset(new std::iostream(streambuf.get())); 27 | } 28 | 29 | libkafka_asio::Bytes ReadEverything() 30 | { 31 | using libkafka_asio::Bytes; 32 | Bytes result(new Bytes::element_type()); 33 | while (stream->good()) 34 | { 35 | char c = 0; 36 | stream->get(c); 37 | if (stream->good()) 38 | { 39 | result->push_back(c); 40 | } 41 | } 42 | return result; 43 | } 44 | 45 | typedef boost::shared_ptr StreamBufType; 46 | typedef boost::shared_ptr StreamType; 47 | StreamBufType streambuf; 48 | StreamType stream; 49 | }; 50 | 51 | #endif // STREAM_TEST_H_FE8E717B_A2CE_4CA4_8A94_EA5C745A278F 52 | -------------------------------------------------------------------------------- /test/src/connection_configuration_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // connection_configuration_test.cpp 3 | // ----------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | #include "../../lib/libkafka_asio/connection_configuration.h" 13 | 14 | using namespace libkafka_asio; 15 | 16 | class ConnectionConfigurationTest : 17 | public ::testing::Test 18 | { 19 | protected: 20 | struct TestBrokerStruct 21 | { 22 | std::string host; 23 | unsigned int port; 24 | }; 25 | 26 | protected: 27 | virtual void SetUp() 28 | { 29 | ASSERT_FALSE(configuration.broker_address); 30 | } 31 | 32 | ConnectionConfiguration configuration; 33 | }; 34 | 35 | TEST_F(ConnectionConfigurationTest, SetBrokerFromString_Empty) 36 | { 37 | configuration.SetBrokerFromString(""); 38 | ASSERT_FALSE(configuration.broker_address); 39 | } 40 | 41 | TEST_F(ConnectionConfigurationTest, SetBrokerFromString) 42 | { 43 | configuration.SetBrokerFromString("localhost:1234"); 44 | ASSERT_TRUE(static_cast(configuration.broker_address)); 45 | ASSERT_STREQ("localhost", configuration.broker_address->hostname.c_str()); 46 | ASSERT_STREQ("1234", configuration.broker_address->service.c_str()); 47 | } 48 | 49 | TEST_F(ConnectionConfigurationTest, SetBrokerFromString_NoService) 50 | { 51 | configuration.SetBrokerFromString("localhost"); 52 | ASSERT_TRUE(static_cast(configuration.broker_address)); 53 | ASSERT_STREQ("localhost", configuration.broker_address->hostname.c_str()); 54 | // Default Kafka Service: 55 | ASSERT_STREQ("9092", configuration.broker_address->service.c_str()); 56 | } 57 | 58 | TEST_F(ConnectionConfigurationTest, SetBrokerFromString_Colon) 59 | { 60 | configuration.SetBrokerFromString(":"); 61 | ASSERT_FALSE(static_cast(configuration.broker_address)); 62 | } 63 | 64 | TEST_F(ConnectionConfigurationTest, SetBroker_1) 65 | { 66 | TestBrokerStruct t; 67 | t.host = "example.org"; 68 | t.port = 8888; 69 | configuration.SetBroker(t); 70 | ASSERT_TRUE(static_cast(configuration.broker_address)); 71 | ASSERT_STREQ("example.org", configuration.broker_address->hostname.c_str()); 72 | ASSERT_STREQ("8888", configuration.broker_address->service.c_str()); 73 | } 74 | 75 | TEST_F(ConnectionConfigurationTest, SetBroker_2) 76 | { 77 | configuration.SetBroker("example.org", 1234); 78 | ASSERT_TRUE(static_cast(configuration.broker_address)); 79 | ASSERT_STREQ("example.org", configuration.broker_address->hostname.c_str()); 80 | ASSERT_STREQ("1234", configuration.broker_address->service.c_str()); 81 | } 82 | -------------------------------------------------------------------------------- /test/src/detail/consumer_metadata_request_write_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // detail/consumer_metadata_request_write_test.cpp 3 | // ----------------------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | 13 | #include "StreamTest.h" 14 | 15 | using libkafka_asio::ConsumerMetadataRequest; 16 | 17 | class ConsumerMetadataRequestWriteTest : 18 | public ::testing::Test, 19 | public StreamTest 20 | { 21 | protected: 22 | void SetUp() 23 | { 24 | ResetStream(); 25 | } 26 | }; 27 | 28 | TEST_F(ConsumerMetadataRequestWriteTest, WriteRequestMessage) 29 | { 30 | ConsumerMetadataRequest request; 31 | request.set_consumer_group("TestConsumerGroup"); 32 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 33 | 34 | using namespace libkafka_asio::detail; 35 | ASSERT_STREQ("TestConsumerGroup", ReadString(*stream).c_str()); 36 | 37 | // Nothing else ... 38 | ASSERT_EQ(0, streambuf->size()); 39 | } 40 | 41 | TEST_F(ConsumerMetadataRequestWriteTest, WriteRequestMessage_Empty) 42 | { 43 | ConsumerMetadataRequest request; 44 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 45 | 46 | using namespace libkafka_asio::detail; 47 | ASSERT_STREQ("", ReadString(*stream).c_str()); 48 | 49 | // Nothing else ... 50 | ASSERT_EQ(0, streambuf->size()); 51 | } 52 | -------------------------------------------------------------------------------- /test/src/detail/fetch_request_write_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // detail/fetch_request_write_test.cpp 3 | // ----------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | 13 | #include "StreamTest.h" 14 | 15 | using libkafka_asio::FetchRequest; 16 | 17 | class FetchRequestWriteTest : 18 | public ::testing::Test, 19 | public StreamTest 20 | { 21 | protected: 22 | void SetUp() 23 | { 24 | ResetStream(); 25 | } 26 | }; 27 | 28 | TEST_F(FetchRequestWriteTest, WriteRequestMessage) 29 | { 30 | FetchRequest request; 31 | request.set_max_wait_time(100); 32 | request.set_min_bytes(1); 33 | request.FetchTopic("Topic1", 0, 123); 34 | request.FetchTopic("Topic2", 1, 456, 1024); 35 | 36 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 37 | 38 | using namespace libkafka_asio::detail; 39 | ASSERT_EQ(-1, ReadInt32(*stream)); // ReplicaId 40 | ASSERT_EQ(100, ReadInt32(*stream)); // MaxWaitTime 41 | ASSERT_EQ(1, ReadInt32(*stream)); // MinBytes 42 | ASSERT_EQ(2, ReadInt32(*stream)); // Topic array size 43 | 44 | ASSERT_STREQ("Topic1", ReadString(*stream).c_str()); // TopicName 45 | ASSERT_EQ(1, ReadInt32(*stream)); // Partition array size 46 | ASSERT_EQ(0, ReadInt32(*stream)); // Partition 0 47 | ASSERT_EQ(123, ReadInt64(*stream)); // FetchOffset 123 48 | ASSERT_EQ(libkafka_asio::constants::kDefaultFetchMaxBytes, 49 | ReadInt32(*stream)); // MaxBytes (default) 50 | 51 | ASSERT_STREQ("Topic2", ReadString(*stream).c_str()); // TopicName 52 | ASSERT_EQ(1, ReadInt32(*stream)); // Partition array size 53 | ASSERT_EQ(1, ReadInt32(*stream)); // Partition 1 54 | ASSERT_EQ(456, ReadInt64(*stream)); // FetchOffset 456 55 | ASSERT_EQ(1024, ReadInt32(*stream)); // MaxBytes 1024 56 | 57 | // Nothing else ... 58 | ASSERT_EQ(0, streambuf->size()); 59 | } 60 | 61 | TEST_F(FetchRequestWriteTest, WriteRequestMessage_Empty) 62 | { 63 | FetchRequest request; 64 | 65 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 66 | 67 | using namespace libkafka_asio::detail; 68 | using namespace libkafka_asio::constants; 69 | ASSERT_EQ(-1, ReadInt32(*stream)); // ReplicaId 70 | ASSERT_EQ(kDefaultFetchMaxWaitTime, ReadInt32(*stream)); // MaxWaitTime 71 | ASSERT_EQ(kDefaultFetchMinBytes, ReadInt32(*stream)); // MinBytes 72 | ASSERT_EQ(0, ReadInt32(*stream)); // Topic array size 73 | 74 | // Nothing else ... 75 | ASSERT_EQ(0, streambuf->size()); 76 | } 77 | -------------------------------------------------------------------------------- /test/src/detail/metadata_request_write_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // detail/metadata_request_write_test.cpp 3 | // -------------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | 13 | #include "StreamTest.h" 14 | 15 | using libkafka_asio::MetadataRequest; 16 | 17 | class MetadataRequestWriteTest : 18 | public ::testing::Test, 19 | public StreamTest 20 | { 21 | protected: 22 | void SetUp() 23 | { 24 | ResetStream(); 25 | } 26 | }; 27 | 28 | TEST_F(MetadataRequestWriteTest, WriteRequestMessage) 29 | { 30 | MetadataRequest request; 31 | request.AddTopicName("Foo"); 32 | request.AddTopicName("Bar"); 33 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 34 | 35 | using namespace libkafka_asio::detail; 36 | ASSERT_EQ(2, ReadInt32(*stream)); // Topic array size 37 | ASSERT_STREQ("Foo", ReadString(*stream).c_str()); 38 | ASSERT_STREQ("Bar", ReadString(*stream).c_str()); 39 | 40 | // Nothing else ... 41 | ASSERT_EQ(0, streambuf->size()); 42 | } 43 | 44 | TEST_F(MetadataRequestWriteTest, WriteRequestMessage_Empty) 45 | { 46 | MetadataRequest request; 47 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 48 | 49 | using namespace libkafka_asio::detail; 50 | ASSERT_EQ(0, ReadInt32(*stream)); // Topic array size 51 | 52 | // Nothing else ... 53 | ASSERT_EQ(0, streambuf->size()); 54 | } 55 | -------------------------------------------------------------------------------- /test/src/detail/offset_commit_request_write_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // detail/offset_commit_request_write_test.cpp 3 | // ------------------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | 13 | #include "StreamTest.h" 14 | 15 | using libkafka_asio::OffsetCommitRequest; 16 | 17 | class OffsetCommitRequestWriteTest : 18 | public ::testing::Test, 19 | public StreamTest 20 | { 21 | protected: 22 | void SetUp() 23 | { 24 | ResetStream(); 25 | } 26 | }; 27 | 28 | TEST_F(OffsetCommitRequestWriteTest, WriteRequestMessage) 29 | { 30 | OffsetCommitRequest request; 31 | request.set_consumer_group("TestConsumerGroup"); 32 | request.CommitOffset("Topic1", 4, 1234, 88888888, "my metadata"); 33 | request.CommitOffset("Topic1", 5, 5678, 99999999); 34 | request.CommitOffset("Topic2", 0, 1234); 35 | 36 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 37 | 38 | using namespace libkafka_asio::detail; 39 | using namespace libkafka_asio::constants; 40 | ASSERT_STREQ("TestConsumerGroup", ReadString(*stream).c_str()); 41 | ASSERT_EQ(2, ReadInt32(*stream)); // Topic array size 42 | 43 | ASSERT_STREQ("Topic1", ReadString(*stream).c_str()); // TopicName 44 | ASSERT_EQ(2, ReadInt32(*stream)); // Partition array size 45 | ASSERT_EQ(4, ReadInt32(*stream)); // Partition 46 | ASSERT_EQ(1234, ReadInt64(*stream)); // Offset 47 | ASSERT_EQ(88888888, ReadInt64(*stream)); // Timestamp 48 | ASSERT_STREQ("my metadata", ReadString(*stream).c_str()); // Metadata 49 | ASSERT_EQ(5, ReadInt32(*stream)); // Partition 50 | ASSERT_EQ(5678, ReadInt64(*stream)); // Offset 51 | ASSERT_EQ(99999999, ReadInt64(*stream)); // Timestamp 52 | ASSERT_STREQ("", ReadString(*stream).c_str()); // Metadata 53 | 54 | ASSERT_STREQ("Topic2", ReadString(*stream).c_str()); // TopicName 55 | ASSERT_EQ(1, ReadInt32(*stream)); // Partition array size 56 | ASSERT_EQ(0, ReadInt32(*stream)); // Partition 57 | ASSERT_EQ(1234, ReadInt64(*stream)); // Offset 58 | ASSERT_EQ(kDefaultOffsetCommitTimestampNow, ReadInt64(*stream)); // Timestamp 59 | ASSERT_STREQ("", ReadString(*stream).c_str()); // Metadata 60 | 61 | // Nothing else ... 62 | ASSERT_EQ(0, streambuf->size()); 63 | } 64 | 65 | TEST_F(OffsetCommitRequestWriteTest, WriteRequestMessage_Empty) 66 | { 67 | OffsetCommitRequest request; 68 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 69 | 70 | using namespace libkafka_asio::detail; 71 | ASSERT_STREQ("", ReadString(*stream).c_str()); // ConsumerGroup 72 | ASSERT_EQ(0, ReadInt32(*stream)); // Topic array size 73 | 74 | // Nothing else ... 75 | ASSERT_EQ(0, streambuf->size()); 76 | } 77 | -------------------------------------------------------------------------------- /test/src/detail/offset_fetch_request_write_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // detail/offset_fetch_request_write_test.cpp 3 | // ------------------------------------------ 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | 13 | #include "StreamTest.h" 14 | 15 | using libkafka_asio::OffsetFetchRequest; 16 | 17 | class OffsetFetchRequestWriteTest : 18 | public ::testing::Test, 19 | public StreamTest 20 | { 21 | protected: 22 | void SetUp() 23 | { 24 | ResetStream(); 25 | } 26 | }; 27 | 28 | TEST_F(OffsetFetchRequestWriteTest, WriteRequestMessage) 29 | { 30 | OffsetFetchRequest request; 31 | request.set_consumer_group("TestGroup"); 32 | request.FetchOffset("Topic1", 0); 33 | request.FetchOffset("Topic1", 1); 34 | request.FetchOffset("Topic2", 1); 35 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 36 | 37 | using namespace libkafka_asio::detail; 38 | ASSERT_STREQ("TestGroup", ReadString(*stream).c_str()); // ConsumerGroup 39 | ASSERT_EQ(2, ReadInt32(*stream)); // Topic array size 40 | ASSERT_STREQ("Topic1", ReadString(*stream).c_str()); // TopicName 41 | ASSERT_EQ(2, ReadInt32(*stream)); // Partition array size 42 | ASSERT_EQ(0, ReadInt32(*stream)); // Partition 43 | ASSERT_EQ(1, ReadInt32(*stream)); // Partition 44 | ASSERT_STREQ("Topic2", ReadString(*stream).c_str()); // TopicName 45 | ASSERT_EQ(1, ReadInt32(*stream)); // Partition array size 46 | ASSERT_EQ(1, ReadInt32(*stream)); // Partition 47 | 48 | // Nothing else ... 49 | ASSERT_EQ(0, streambuf->size()); 50 | } 51 | 52 | TEST_F(OffsetFetchRequestWriteTest, WriteRequestMessage_Empty) 53 | { 54 | OffsetFetchRequest request; 55 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 56 | 57 | using namespace libkafka_asio::detail; 58 | ASSERT_STREQ("", ReadString(*stream).c_str()); // ConsumerGroup 59 | ASSERT_EQ(0, ReadInt32(*stream)); // Topic array size 60 | 61 | // Nothing else ... 62 | ASSERT_EQ(0, streambuf->size()); 63 | } 64 | -------------------------------------------------------------------------------- /test/src/detail/offset_request_write_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // detail/offset_request_write_test.cpp 3 | // ------------------------------------ 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | 13 | #include "StreamTest.h" 14 | 15 | using libkafka_asio::OffsetRequest; 16 | 17 | class OffsetRequestWriteTest : 18 | public ::testing::Test, 19 | public StreamTest 20 | { 21 | protected: 22 | void SetUp() 23 | { 24 | ResetStream(); 25 | } 26 | }; 27 | 28 | TEST_F(OffsetRequestWriteTest, WriteRequestMessage) 29 | { 30 | OffsetRequest request; 31 | request.FetchTopicOffset("Topic1", 1); 32 | request.FetchTopicOffset("Topic2", 3, -2, 5); 33 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 34 | 35 | using namespace libkafka_asio::detail; 36 | using namespace libkafka_asio::constants; 37 | ASSERT_EQ(-1, ReadInt32(*stream)); // ReplicaId 38 | ASSERT_EQ(2, ReadInt32(*stream)); // Topic array size 39 | 40 | ASSERT_STREQ("Topic1", ReadString(*stream).c_str()); // TopicName 41 | ASSERT_EQ(1, ReadInt32(*stream)); // Partition array size 42 | ASSERT_EQ(1, ReadInt32(*stream)); // Partition 43 | ASSERT_EQ(kOffsetTimeLatest, ReadInt64(*stream)); // Time 44 | ASSERT_EQ(kDefaultOffsetMaxNumberOfOffsets, ReadInt32(*stream)); 45 | 46 | ASSERT_STREQ("Topic2", ReadString(*stream).c_str()); // TopicName 47 | ASSERT_EQ(1, ReadInt32(*stream)); // Partition array size 48 | ASSERT_EQ(3, ReadInt32(*stream)); // Partition 49 | ASSERT_EQ(-2, ReadInt64(*stream)); // Time 50 | ASSERT_EQ(5, ReadInt32(*stream)); // MaxNumberOfOffsets 51 | 52 | // Nothing else ... 53 | ASSERT_EQ(0, streambuf->size()); 54 | } 55 | 56 | TEST_F(OffsetRequestWriteTest, WriteRequestMessage_Empty) 57 | { 58 | OffsetRequest request; 59 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 60 | 61 | using namespace libkafka_asio::detail; 62 | using namespace libkafka_asio::constants; 63 | ASSERT_EQ(-1, ReadInt32(*stream)); // ReplicaId 64 | ASSERT_EQ(0, ReadInt32(*stream)); // Topic array size 65 | 66 | // Nothing else ... 67 | ASSERT_EQ(0, streambuf->size()); 68 | } -------------------------------------------------------------------------------- /test/src/detail/produce_request_write_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // detail/produce_request_write_test.cpp 3 | // ------------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | 13 | #include "StreamTest.h" 14 | 15 | using libkafka_asio::ProduceRequest; 16 | using libkafka_asio::Int32; 17 | 18 | class ProduceRequestWriteTest : 19 | public ::testing::Test, 20 | public StreamTest 21 | { 22 | protected: 23 | void SetUp() 24 | { 25 | ResetStream(); 26 | } 27 | }; 28 | 29 | TEST_F(ProduceRequestWriteTest, WriteRequestMessage) 30 | { 31 | ProduceRequest request; 32 | request.set_required_acks(1); 33 | request.set_timeout(100); 34 | request.AddValue("Foo Bar", "Topic1", 2); 35 | ASSERT_EQ(1, request.topics().size()); 36 | ASSERT_EQ(1, request.topics()[0].partitions.size()); 37 | 38 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 39 | 40 | using namespace libkafka_asio::detail; 41 | ASSERT_EQ(1, ReadInt16(*stream)); // RequiredAcks 42 | ASSERT_EQ(100, ReadInt32(*stream)); // Timeout 43 | ASSERT_EQ(1, ReadInt32(*stream)); // Topic array size 44 | ASSERT_STREQ("Topic1", ReadString(*stream).c_str()); // TopicName 45 | ASSERT_EQ(1, ReadInt32(*stream)); // Partition array size 46 | ASSERT_EQ(2, ReadInt32(*stream)); // Partition 47 | 48 | // 'MessageSetWireSize' and 'ReadMessageSet' are tested somewhere else, so 49 | // let's just assume they work correctly. 50 | Int32 expected_message_set_size 51 | = MessageSetWireSize(request.topics()[0].partitions[0].messages); 52 | ASSERT_EQ(expected_message_set_size, ReadInt32(*stream)); // MessageSetSize 53 | libkafka_asio::MessageSet message_set; 54 | boost::system::error_code ec; 55 | ReadMessageSet(*stream, message_set, expected_message_set_size, ec); 56 | 57 | // Nothing else ... 58 | ASSERT_EQ(0, streambuf->size()); 59 | } 60 | 61 | TEST_F(ProduceRequestWriteTest, WriteRequestMessage_Empty) 62 | { 63 | ProduceRequest request; 64 | 65 | libkafka_asio::detail::WriteRequestMessage(request, *stream); 66 | 67 | using namespace libkafka_asio::detail; 68 | using namespace libkafka_asio::constants; 69 | ASSERT_EQ(kDefaultProduceRequiredAcks, ReadInt16(*stream)); // RequiredAcks 70 | ASSERT_EQ(kDefaultProduceTimeout, ReadInt32(*stream)); // Timeout 71 | ASSERT_EQ(0, ReadInt32(*stream)); // Topic array size 72 | 73 | // Nothing else ... 74 | ASSERT_EQ(0, streambuf->size()); 75 | } 76 | -------------------------------------------------------------------------------- /test/src/detail/recursive_messageset_iterator_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // detail/recursive_messageset_iterator_test.cpp 3 | // --------------------------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | #include 18 | 19 | using namespace libkafka_asio; 20 | using namespace libkafka_asio::detail; 21 | 22 | class MessageGenerator 23 | { 24 | public: 25 | MessageGenerator() : 26 | default_count_(0), 27 | count_(default_count_) 28 | { 29 | } 30 | 31 | MessageGenerator(Int64& count) : 32 | count_(count) 33 | { 34 | } 35 | 36 | MessageAndOffset operator()() 37 | { 38 | return MessageAndOffset(Message(), count_++); 39 | } 40 | 41 | private: 42 | Int64 default_count_; 43 | Int64& count_; 44 | }; 45 | 46 | TEST(RecursiveMessageSetIteratorTest, Empty) 47 | { 48 | MessageSet empty_set; 49 | RecursiveMessageSetIterator iter(empty_set), end_iter; 50 | ASSERT_EQ(0, std::distance(iter, end_iter)); 51 | ASSERT_EQ(end_iter, iter); 52 | ++iter; 53 | ASSERT_EQ(end_iter, iter); 54 | } 55 | 56 | TEST(RecursiveMessageSetIteratorTest, Flat) 57 | { 58 | MessageSet flat_messages; 59 | std::generate_n(std::back_inserter(flat_messages), 10, MessageGenerator()); 60 | ASSERT_EQ(10, std::distance(RecursiveMessageSetIterator(flat_messages), 61 | RecursiveMessageSetIterator())); 62 | RecursiveMessageSetIterator iter(flat_messages), end_iter; 63 | Int64 count = 0; 64 | for (; iter != end_iter; ++iter) 65 | { 66 | ASSERT_EQ(count, iter->offset()); 67 | ++count; 68 | } 69 | } 70 | 71 | TEST(RecursiveMessageSetIteratorTest, Recurse) 72 | { 73 | // Construct the following hierarchy: 74 | // 75 | // [0] (offset 0) 76 | // |- [0] (offset 5) * 77 | // |- [1] (offset 6) * 78 | // [1] (offset 1) * 79 | // [2] (offset 2) 80 | // |- [0] (offset 7) * 81 | // [3] (offset 3) 82 | // |- [0] (offset 8) * 83 | // |- [1] (offset 9) 84 | // | |- [0] (offset 10) * 85 | // | |- [1] (offset 11) * 86 | // | |- [2] (offset 12) * 87 | // [4] (offset 4) 88 | // |- [0] (offset 13) * 89 | // |- [1] (offset 14) * 90 | // 91 | // All elements with star (*) should be visited by the iterator. 92 | // 93 | Int64 count = 0; 94 | MessageGenerator generator(count); 95 | MessageSet messages; 96 | std::generate_n( 97 | std::back_inserter(messages), 98 | 5, generator); 99 | std::generate_n( 100 | std::back_inserter(messages[0].mutable_nested_message_set()), 101 | 2, generator); 102 | std::generate_n( 103 | std::back_inserter(messages[2].mutable_nested_message_set()), 104 | 1, generator); 105 | std::generate_n( 106 | std::back_inserter(messages[3].mutable_nested_message_set()), 107 | 2, generator); 108 | std::generate_n( 109 | std::back_inserter(messages[3].mutable_nested_message_set() 110 | [1].mutable_nested_message_set()), 111 | 3, generator); 112 | std::generate_n( 113 | std::back_inserter(messages[4].mutable_nested_message_set()), 114 | 2, generator); 115 | 116 | // The expected offset sequence: 117 | // (5, 6), 1, (7), (8, (10, 11, 12)), (13, 14) 118 | Int64 expected_offsets[] = {5, 6, 1, 7, 8, 10, 11, 12, 13, 14}; 119 | RecursiveMessageSetIterator iter(messages), end_iter; 120 | BOOST_FOREACH(Int64 expected_offset, expected_offsets) 121 | { 122 | ASSERT_EQ(expected_offset, iter->offset()); 123 | iter++; 124 | } 125 | ASSERT_EQ(end_iter, iter); 126 | } -------------------------------------------------------------------------------- /test/src/error_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // error_test.cpp 3 | // -------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | 13 | using namespace libkafka_asio; 14 | 15 | TEST(ErrorTest, ClientErrorCategory) 16 | { 17 | using boost::system::error_code; 18 | error_code error = error_code(kErrorAlreadyConnected); 19 | ASSERT_STREQ("libkafka_asio::ClientError", error.category().name()); 20 | error = error_code(kErrorNotConnected); 21 | ASSERT_STREQ("libkafka_asio::ClientError", error.category().name()); 22 | error = error_code(kErrorInProgress); 23 | ASSERT_STREQ("libkafka_asio::ClientError", error.category().name()); 24 | error = error_code(kErrorNoBroker); 25 | ASSERT_STREQ("libkafka_asio::ClientError", error.category().name()); 26 | // Boost system should be able to generate an error message: 27 | ASSERT_STREQ("No broker found", boost::system::system_error(error).what()); 28 | } 29 | 30 | TEST(ErrorTest, KafkaErrorCategory) 31 | { 32 | using boost::system::error_code; 33 | // 'Testing' two errors should suffice here 34 | error_code error = error_code(kErrorNoError); 35 | ASSERT_STREQ("libkafka_asio::KafkaError", error.category().name()); 36 | error = error_code(kErrorMessageSizeTooLarge); 37 | ASSERT_STREQ("libkafka_asio::KafkaError", error.category().name()); 38 | // Boost system should be able to generate an error message: 39 | ASSERT_STREQ("Message was too large", 40 | boost::system::system_error(error).what()); 41 | } -------------------------------------------------------------------------------- /test/src/fetch_request_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // fetch_request_test.cpp 3 | // ---------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | 13 | class FetchRequestTest : 14 | public ::testing::Test 15 | { 16 | protected: 17 | virtual void SetUp() 18 | { 19 | ASSERT_EQ(0, request.topics().size()); 20 | } 21 | 22 | libkafka_asio::FetchRequest request; 23 | }; 24 | 25 | TEST_F(FetchRequestTest, FetchTopic_New) 26 | { 27 | request.FetchTopic("mytopic", 1, 2); 28 | ASSERT_EQ(1, request.topics().size()); 29 | ASSERT_EQ(1, request.topics()[0].partitions.size()); 30 | ASSERT_STREQ("mytopic", request.topics()[0].topic_name.c_str()); 31 | ASSERT_EQ(1, request.topics()[0].partitions[0].partition); 32 | ASSERT_EQ(2, request.topics()[0].partitions[0].fetch_offset); 33 | ASSERT_EQ(libkafka_asio::constants::kDefaultFetchMaxBytes, 34 | request.topics()[0].partitions[0].max_bytes); 35 | } 36 | 37 | TEST_F(FetchRequestTest, FetchTopic_Override) 38 | { 39 | request.FetchTopic("mytopic", 1, 2); 40 | ASSERT_EQ(1, request.topics().size()); 41 | ASSERT_EQ(1, request.topics()[0].partitions.size()); 42 | ASSERT_EQ(2, request.topics()[0].partitions[0].fetch_offset); 43 | request.FetchTopic("mytopic", 1, 4); 44 | ASSERT_EQ(1, request.topics().size()); 45 | ASSERT_EQ(1, request.topics()[0].partitions.size()); 46 | ASSERT_EQ(4, request.topics()[0].partitions[0].fetch_offset); 47 | } 48 | 49 | TEST_F(FetchRequestTest, FetchTopic_MultiplePartitions) 50 | { 51 | request.FetchTopic("mytopic", 0, 2); 52 | request.FetchTopic("mytopic", 1, 4); 53 | ASSERT_EQ(1, request.topics().size()); 54 | ASSERT_EQ(2, request.topics()[0].partitions.size()); 55 | ASSERT_EQ(2, request.topics()[0].partitions[0].fetch_offset); 56 | ASSERT_EQ(4, request.topics()[0].partitions[1].fetch_offset); 57 | } 58 | 59 | TEST_F(FetchRequestTest, FetchTopic_MultipleTopics) 60 | { 61 | request.FetchTopic("foo", 0, 2); 62 | request.FetchTopic("bar", 1, 4); 63 | ASSERT_EQ(2, request.topics().size()); 64 | ASSERT_EQ(1, request.topics()[0].partitions.size()); 65 | ASSERT_EQ(1, request.topics()[1].partitions.size()); 66 | ASSERT_EQ(2, request.topics()[0].partitions[0].fetch_offset); 67 | ASSERT_EQ(4, request.topics()[1].partitions[0].fetch_offset); 68 | } 69 | -------------------------------------------------------------------------------- /test/src/libkafka_asio_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // libkafka_asio_test.cpp 3 | // ---------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | 12 | int main(int argc, char** argv) 13 | { 14 | ::testing::InitGoogleTest(&argc, argv); 15 | return RUN_ALL_TESTS(); 16 | } 17 | -------------------------------------------------------------------------------- /test/src/message_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // message_test.cpp 3 | // ---------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | #include 13 | 14 | using libkafka_asio::Message; 15 | using libkafka_asio::MessageAndOffset; 16 | using libkafka_asio::MessageSet; 17 | using libkafka_asio::CompressMessageSet; 18 | using libkafka_asio::Bytes; 19 | 20 | TEST(MessageTest, FlatCopy) 21 | { 22 | Message orig; 23 | std::string test_value = "foo bar"; 24 | orig.mutable_value().reset( 25 | new Bytes::element_type(test_value.begin(), test_value.end())); 26 | Message copy(orig, false); 27 | ASSERT_TRUE(static_cast(orig.value())); 28 | ASSERT_TRUE(static_cast(copy.value())); 29 | // Both messages should point to the same value buffer 30 | ASSERT_EQ(orig.value().get(), copy.value().get()); 31 | } 32 | 33 | TEST(MessageTest, AssignmentOperator) 34 | { 35 | Message orig; 36 | std::string test_value = "foo bar"; 37 | orig.mutable_value().reset( 38 | new Bytes::element_type(test_value.begin(), test_value.end())); 39 | Message copy; 40 | copy = orig; 41 | ASSERT_TRUE(static_cast(orig.value())); 42 | ASSERT_TRUE(static_cast(copy.value())); 43 | // Both messages should point to the same value buffer 44 | ASSERT_EQ(orig.value().get(), copy.value().get()); 45 | } 46 | 47 | TEST(MessageTest, DeepCopy) 48 | { 49 | Message orig; 50 | { 51 | std::string test_value = "foo bar"; 52 | orig.mutable_value().reset( 53 | new Bytes::element_type(test_value.begin(), test_value.end())); 54 | } 55 | Message copy(orig, true); 56 | ASSERT_TRUE(static_cast(orig.value())); 57 | ASSERT_TRUE(static_cast(copy.value())); 58 | // Each message should now have it's own value buffer 59 | ASSERT_NE(orig.value().get(), copy.value().get()); 60 | // But the actual value should be the same (a copy) 61 | ASSERT_FALSE(orig.value()->empty()); 62 | ASSERT_FALSE(copy.value()->empty()); 63 | std::string test_value1((const char*)&(*orig.value())[0], 64 | orig.value()->size()); 65 | std::string test_value2((const char*)&(*copy.value())[0], 66 | copy.value()->size()); 67 | ASSERT_STREQ(test_value1.c_str(), test_value2.c_str()); 68 | } 69 | 70 | TEST(MessageTest, CompressMessageSet) 71 | { 72 | MessageSet message_set(2); 73 | message_set[0].set_offset(1); 74 | message_set[1].set_offset(2); 75 | boost::system::error_code ec; 76 | using namespace libkafka_asio::constants; 77 | Message msg = CompressMessageSet(message_set, kCompressionGZIP, ec); 78 | ASSERT_EQ(libkafka_asio::kErrorSuccess, ec); 79 | ASSERT_TRUE(static_cast(msg.value())); 80 | ASSERT_FALSE(msg.value()->empty()); 81 | ASSERT_EQ(kCompressionGZIP, msg.compression()); 82 | } 83 | 84 | TEST(MessageTest, CompressMessageSetNoneCompression) 85 | { 86 | MessageSet message_set(2); 87 | message_set[0].set_offset(1); 88 | message_set[1].set_offset(2); 89 | boost::system::error_code ec; 90 | using namespace libkafka_asio::constants; 91 | Message msg = CompressMessageSet(message_set, kCompressionNone, ec); 92 | ASSERT_EQ(libkafka_asio::kErrorCompressionFailed, ec); 93 | } 94 | -------------------------------------------------------------------------------- /test/src/metadata_response_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // metadata_response_test.cpp 3 | // -------------------------- 4 | // 5 | // Copyright (c) 2015 Daniel Joos 6 | // 7 | // Distributed under MIT license. (See file LICENSE) 8 | // 9 | 10 | #include 11 | #include 12 | 13 | using namespace libkafka_asio; 14 | 15 | class MetadataResponseTest : 16 | public ::testing::Test 17 | { 18 | protected: 19 | void AddBroker(const String& host, Int32 node_id, Int32 port) 20 | { 21 | MetadataResponse::Broker broker; 22 | broker.host = host; 23 | broker.node_id = node_id; 24 | broker.port = port; 25 | response.mutable_brokers().push_back(broker); 26 | } 27 | 28 | MutableMetadataResponse response; 29 | }; 30 | 31 | 32 | TEST_F(MetadataResponseTest, PartitionLeader) 33 | { 34 | AddBroker("localhost", 123, 49152); 35 | AddBroker("example.com", 456, 49152); 36 | ASSERT_EQ(2, response.response().brokers().size()); 37 | MetadataResponse::Topic metadata; 38 | MetadataResponse::Partition test_partition; 39 | test_partition.leader = 456; 40 | metadata.partitions.insert(std::make_pair(1, test_partition)); 41 | response.mutable_topics().insert(std::make_pair("foo", metadata)); 42 | ASSERT_EQ(1, response.response().topics().size()); 43 | 44 | MetadataResponse::Broker::OptionalType leader = 45 | response.response().PartitionLeader("foo", 1); 46 | ASSERT_TRUE(static_cast(leader)); 47 | ASSERT_EQ(456, leader->node_id); 48 | ASSERT_STREQ("example.com", leader->host.c_str()); 49 | } 50 | 51 | TEST_F(MetadataResponseTest, PartitionLeader_InElection) 52 | { 53 | MetadataResponse::Topic metadata; 54 | MetadataResponse::Partition test_partition; 55 | test_partition.leader = -1; 56 | metadata.partitions.insert(std::make_pair(1, test_partition)); 57 | response.mutable_topics().insert(std::make_pair("foo", metadata)); 58 | ASSERT_EQ(1, response.response().topics().size()); 59 | 60 | MetadataResponse::Broker::OptionalType leader = 61 | response.response().PartitionLeader("foo", 1); 62 | ASSERT_FALSE(static_cast(leader)); 63 | } 64 | --------------------------------------------------------------------------------