├── .gitignore ├── .gitmodules ├── .travis.yml ├── CMakeLists.txt ├── LICENSE ├── README.md ├── cmake ├── FindRdKafka.cmake ├── cmake_uninstall.cmake.in ├── config.cmake.in ├── cppkafka.h.in └── cppkafka.pc.in ├── docs ├── Doxyfile.in └── mainpage.dox ├── examples ├── CMakeLists.txt ├── buffered_producer_example.cpp ├── consumer_dispatcher_example.cpp ├── consumer_example.cpp ├── consumers_information_example.cpp ├── metadata_example.cpp └── producer_example.cpp ├── include └── cppkafka │ ├── CMakeLists.txt │ ├── buffer.h │ ├── clonable_ptr.h │ ├── configuration.h │ ├── configuration_base.h │ ├── configuration_option.h │ ├── consumer.h │ ├── cppkafka.h │ ├── detail │ ├── callback_invoker.h │ └── endianness.h │ ├── error.h │ ├── event.h │ ├── exceptions.h │ ├── group_information.h │ ├── header.h │ ├── header_list.h │ ├── header_list_iterator.h │ ├── kafka_handle_base.h │ ├── logging.h │ ├── macros.h │ ├── message.h │ ├── message_builder.h │ ├── message_internal.h │ ├── message_timestamp.h │ ├── metadata.h │ ├── producer.h │ ├── queue.h │ ├── topic.h │ ├── topic_configuration.h │ ├── topic_partition.h │ ├── topic_partition_list.h │ └── utils │ ├── backoff_committer.h │ ├── backoff_performer.h │ ├── buffered_producer.h │ ├── compacted_topic_processor.h │ ├── consumer_dispatcher.h │ ├── poll_interface.h │ ├── poll_strategy_base.h │ └── roundrobin_poll_strategy.h ├── src ├── CMakeLists.txt ├── buffer.cpp ├── configuration.cpp ├── configuration_option.cpp ├── consumer.cpp ├── error.cpp ├── event.cpp ├── exceptions.cpp ├── group_information.cpp ├── kafka_handle_base.cpp ├── message.cpp ├── message_internal.cpp ├── message_timestamp.cpp ├── metadata.cpp ├── producer.cpp ├── queue.cpp ├── topic.cpp ├── topic_configuration.cpp ├── topic_partition.cpp ├── topic_partition_list.cpp └── utils │ ├── backoff_committer.cpp │ ├── backoff_performer.cpp │ ├── poll_strategy_base.cpp │ └── roundrobin_poll_strategy.cpp └── tests ├── CMakeLists.txt ├── buffer_test.cpp ├── compacted_topic_processor_test.cpp ├── configuration_test.cpp ├── consumer_test.cpp ├── headers_test.cpp ├── kafka_handle_base_test.cpp ├── producer_test.cpp ├── roundrobin_poll_test.cpp ├── test_main.cpp ├── test_utils.cpp ├── test_utils.h ├── test_utils_impl.h └── topic_partition_list_test.cpp /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | include/cppkafka/config.h 3 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "third_party/Catch2"] 2 | path = third_party/Catch2 3 | url = https://github.com/catchorg/Catch2.git 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: cpp 2 | 3 | sudo: required 4 | 5 | compiler: 6 | - gcc 7 | - clang 8 | 9 | env: 10 | - RDKAFKA_VERSION=v0.9.4 11 | - RDKAFKA_VERSION=v0.11.6 12 | 13 | os: 14 | - linux 15 | 16 | addons: 17 | apt: 18 | packages: 19 | - libboost-dev 20 | - libboost-program-options-dev 21 | - zookeeper 22 | - zookeeperd 23 | 24 | before_script: 25 | - KAFKA_VERSION=2.11-2.2.0 26 | - wget https://archive.apache.org/dist/kafka/2.2.0/kafka_$KAFKA_VERSION.tgz 27 | - tar xvzf kafka_$KAFKA_VERSION.tgz 28 | - ./kafka_$KAFKA_VERSION/bin/kafka-server-start.sh ./kafka_$KAFKA_VERSION/config/server.properties > /dev/null 2> /dev/null & 29 | - git clone https://github.com/edenhill/librdkafka.git 30 | - while ! echo "asd" | nc localhost 9092; do sleep 1; done 31 | - ./kafka_$KAFKA_VERSION/bin/kafka-topics.sh --create --zookeeper localhost:2181 --topic cppkafka_test1 --partitions 3 --replication-factor 1 32 | - ./kafka_$KAFKA_VERSION/bin/kafka-topics.sh --create --zookeeper localhost:2181 --topic cppkafka_test2 --partitions 3 --replication-factor 1 33 | 34 | script: 35 | - cd librdkafka 36 | - git checkout $RDKAFKA_VERSION 37 | - ./configure --prefix=./install && make libs && make install 38 | - cd .. 39 | - mkdir build && cd build 40 | - cmake .. -DCPPKAFKA_CMAKE_VERBOSE=ON -DRDKAFKA_ROOT=./librdkafka/install -DKAFKA_TEST_INSTANCE=localhost:9092 41 | - make examples 42 | - make tests 43 | - ./tests/cppkafka_tests 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016-2017, Matias Fontanini 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | * Redistributions in binary form must reproduce the above 11 | copyright notice, this list of conditions and the following disclaimer 12 | in the documentation and/or other materials provided with the 13 | distribution. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cppkafka: high level C++ wrapper for _rdkafka_ 2 | 3 | [![Build status](https://travis-ci.org/mfontanini/cppkafka.svg?branch=master)](https://travis-ci.org/mfontanini/cppkafka) 4 | 5 | _cppkafka_ allows C++ applications to consume and produce messages using the Apache Kafka 6 | protocol. The library is built on top of [_librdkafka_](https://github.com/edenhill/librdkafka), 7 | and provides a high level API that uses modern C++ features to make it easier to write code 8 | while keeping the wrapper's performance overhead to a minimum. 9 | 10 | # Features 11 | 12 | * _cppkafka_ is a high level C++ wrapper for _rdkafka_, aiming to allow using _rdkafka_ in a 13 | simple, less error prone way. 14 | 15 | * _cppkafka_ provides an API to produce messages as well as consuming messages, but the latter is 16 | only supported via the high level consumer API. _cppkafka_ requires **rdkafka >= 0.9.4** in 17 | order to use it. Other wrapped functionalities are also provided, like fetching metadata, 18 | offsets, etc. 19 | 20 | * _cppkafka_ provides message header support. This feature requires **rdkafka >= 0.11.4**. 21 | 22 | * _cppkafka_ tries to add minimal overhead over _librdkafka_. A very thin wrapper for _librdkafka_ 23 | messages is used for consumption so there's virtually no overhead at all. 24 | 25 | # It's simple! 26 | 27 | _cppkafka_'s API is simple to use. For example, this code creates a producer that writes a message 28 | into some partition: 29 | 30 | ```c++ 31 | #include 32 | 33 | using namespace std; 34 | using namespace cppkafka; 35 | 36 | int main() { 37 | // Create the config 38 | Configuration config = { 39 | { "metadata.broker.list", "127.0.0.1:9092" } 40 | }; 41 | 42 | // Create the producer 43 | Producer producer(config); 44 | 45 | // Produce a message! 46 | string message = "hey there!"; 47 | producer.produce(MessageBuilder("my_topic").partition(0).payload(message)); 48 | producer.flush(); 49 | } 50 | ``` 51 | 52 | # Compiling 53 | 54 | In order to compile _cppkafka_ you need: 55 | 56 | * _librdkafka >= 0.9.4_ 57 | * _CMake >= 3.9.2_ 58 | * A compiler with good C++11 support (e.g. gcc >= 4.8). This was tested successfully on _g++ 4.8.3_. 59 | * The boost library (for boost::optional) 60 | 61 | Now, in order to build, just run: 62 | 63 | ```Shell 64 | mkdir build 65 | cd build 66 | cmake .. 67 | make 68 | make install 69 | ``` 70 | 71 | ## CMake options 72 | 73 | The following cmake options can be specified: 74 | * `RDKAFKA_ROOT` : Specify a different librdkafka install directory. 75 | * `RDKAFKA_DIR` : Specify a different directory where the RdKafkaConfig.cmake is installed. 76 | * `BOOST_ROOT` : Specify a different Boost install directory. 77 | * `CPPKAFKA_CMAKE_VERBOSE` : Generate verbose output. Default is `OFF`. 78 | * `CPPKAFKA_BUILD_SHARED` : Build cppkafka as a shared library. Default is `ON`. 79 | * `CPPKAFKA_DISABLE_TESTS` : Disable build of cppkafka tests. Default is `OFF`. 80 | * `CPPKAFKA_DISABLE_EXAMPLES` : Disable build of cppkafka examples. Default is `OFF`. 81 | * `CPPKAFKA_BOOST_STATIC_LIBS` : Link with Boost static libraries. Default is `ON`. 82 | * `CPPKAFKA_BOOST_USE_MULTITHREADED` : Use Boost multi-threaded libraries. Default is `ON`. 83 | * `CPPKAFKA_RDKAFKA_STATIC_LIB` : Link to Rdkafka static library. Default is `OFF`. 84 | * `CPPKAFKA_CONFIG_DIR` : Install location of the cmake configuration files. Default is `lib/cmake/cppkafka`. 85 | * `CPPKAFKA_PKGCONFIG_DIR` : Install location of the .pc file. Default is `share/pkgconfig`. 86 | * `CPPKAFKA_EXPORT_PKGCONFIG` : Generate `cppkafka.pc` file. Default is `ON`. 87 | * `CPPKAFKA_EXPORT_CMAKE_CONFIG` : Generate CMake config, target and version files. Default is `ON`. 88 | 89 | Example: 90 | ```Shell 91 | cmake -DRDKAFKA_ROOT=/some/other/dir -DCPPKAFKA_BUILD_SHARED=OFF ... 92 | ``` 93 | 94 | # Using 95 | 96 | If you want to use _cppkafka_, you'll need to link your application with: 97 | 98 | * _cppkafka_ 99 | * _rdkafka_ 100 | 101 | If using CMake, this is simplified by doing: 102 | ```cmake 103 | find_package(CppKafka REQUIRED) 104 | 105 | target_link_libraries( CppKafka::cppkafka) 106 | ``` 107 | 108 | # Documentation 109 | 110 | You can generate the documentation by running `make docs` inside the build directory. This requires 111 | _Doxygen_ to be installed. The documentation will be written in html format at 112 | `/docs/html/`. 113 | 114 | Make sure to check the [wiki](https://github.com/mfontanini/cppkafka/wiki) which includes 115 | some documentation about the project and some of its features. 116 | -------------------------------------------------------------------------------- /cmake/FindRdKafka.cmake: -------------------------------------------------------------------------------- 1 | # This find module helps find the RdKafka module. It exports the following variables: 2 | # - RdKafka_INCLUDE_DIR : The directory where rdkafka.h is located. 3 | # - RdKafka_LIBNAME : The name of the library, i.e. librdkafka.a, librdkafka.so, etc. 4 | # - RdKafka_LIBRARY_PATH : The full library path i.e. /${RdKafka_LIBNAME} 5 | # - RdKafka::rdkafka : Imported library containing all above properties set. 6 | 7 | if (CPPKAFKA_RDKAFKA_STATIC_LIB) 8 | set(RDKAFKA_PREFIX ${CMAKE_STATIC_LIBRARY_PREFIX}) 9 | set(RDKAFKA_SUFFIX ${CMAKE_STATIC_LIBRARY_SUFFIX}) 10 | set(RDKAFKA_LIBRARY_TYPE STATIC) 11 | else() 12 | set(RDKAFKA_PREFIX ${CMAKE_SHARED_LIBRARY_PREFIX}) 13 | set(RDKAFKA_SUFFIX ${CMAKE_SHARED_LIBRARY_SUFFIX}) 14 | set(RDKAFKA_LIBRARY_TYPE SHARED) 15 | endif() 16 | 17 | set(RdKafka_LIBNAME ${RDKAFKA_PREFIX}rdkafka${RDKAFKA_SUFFIX}) 18 | 19 | find_path(RdKafka_INCLUDE_DIR 20 | NAMES librdkafka/rdkafka.h 21 | HINTS ${RdKafka_ROOT}/include 22 | ) 23 | 24 | find_library(RdKafka_LIBRARY_PATH 25 | NAMES ${RdKafka_LIBNAME} rdkafka 26 | HINTS ${RdKafka_ROOT}/lib ${RdKafka_ROOT}/lib64 27 | ) 28 | 29 | # Check lib paths 30 | if (CPPKAFKA_CMAKE_VERBOSE) 31 | get_property(FIND_LIBRARY_32 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB32_PATHS) 32 | get_property(FIND_LIBRARY_64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS) 33 | message(STATUS "RDKAFKA search 32-bit library paths: ${FIND_LIBRARY_32}") 34 | message(STATUS "RDKAFKA search 64-bit library paths: ${FIND_LIBRARY_64}") 35 | message(STATUS "RdKafka_ROOT = ${RdKafka_ROOT}") 36 | message(STATUS "RdKafka_INCLUDE_DIR = ${RdKafka_INCLUDE_DIR}") 37 | message(STATUS "RdKafka_LIBNAME = ${RdKafka_LIBNAME}") 38 | message(STATUS "RdKafka_LIBRARY_PATH = ${RdKafka_LIBRARY_PATH}") 39 | endif() 40 | 41 | include(FindPackageHandleStandardArgs) 42 | find_package_handle_standard_args(RdKafka DEFAULT_MSG 43 | RdKafka_LIBNAME 44 | RdKafka_LIBRARY_PATH 45 | RdKafka_INCLUDE_DIR 46 | ) 47 | 48 | set(CONTENTS "#include \n #if RD_KAFKA_VERSION >= ${RDKAFKA_MIN_VERSION_HEX}\n int main() { }\n #endif") 49 | set(FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/rdkafka_version_test.cpp) 50 | file(WRITE ${FILE_NAME} ${CONTENTS}) 51 | 52 | try_compile(RdKafka_FOUND ${CMAKE_CURRENT_BINARY_DIR} 53 | SOURCES ${FILE_NAME} 54 | CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${RdKafka_INCLUDE_DIR}") 55 | 56 | if (RdKafka_FOUND) 57 | add_library(RdKafka::rdkafka ${RDKAFKA_LIBRARY_TYPE} IMPORTED GLOBAL) 58 | if (UNIX AND NOT APPLE) 59 | set(RDKAFKA_DEPENDENCIES pthread rt ssl crypto dl z) 60 | else() 61 | set(RDKAFKA_DEPENDENCIES pthread ssl crypto dl z) 62 | endif() 63 | set_target_properties(RdKafka::rdkafka PROPERTIES 64 | IMPORTED_NAME RdKafka 65 | IMPORTED_LOCATION "${RdKafka_LIBRARY_PATH}" 66 | INTERFACE_INCLUDE_DIRECTORIES "${RdKafka_INCLUDE_DIR}" 67 | INTERFACE_LINK_LIBRARIES "${RDKAFKA_DEPENDENCIES}") 68 | message(STATUS "Found valid rdkafka version") 69 | mark_as_advanced( 70 | RDKAFKA_LIBRARY 71 | RdKafka_INCLUDE_DIR 72 | RdKafka_LIBRARY_PATH 73 | ) 74 | else() 75 | message(FATAL_ERROR "Failed to find valid rdkafka version") 76 | endif() 77 | -------------------------------------------------------------------------------- /cmake/cmake_uninstall.cmake.in: -------------------------------------------------------------------------------- 1 | # Taken from https://cmake.org/Wiki/CMake_FAQ#Can_I_do_.22make_uninstall.22_with_CMake.3F 2 | 3 | if(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") 4 | message(FATAL_ERROR "Cannot find install manifest: @CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") 5 | endif(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") 6 | 7 | file(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) 8 | string(REGEX REPLACE "\n" ";" files "${files}") 9 | foreach(file ${files}) 10 | message(STATUS "Uninstalling $ENV{DESTDIR}${file}") 11 | if(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") 12 | exec_program( 13 | "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" 14 | OUTPUT_VARIABLE rm_out 15 | RETURN_VALUE rm_retval 16 | ) 17 | if(NOT "${rm_retval}" STREQUAL 0) 18 | message(FATAL_ERROR "Problem when removing $ENV{DESTDIR}${file}") 19 | endif(NOT "${rm_retval}" STREQUAL 0) 20 | else(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") 21 | message(STATUS "File $ENV{DESTDIR}${file} does not exist.") 22 | endif(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") 23 | endforeach(file) 24 | -------------------------------------------------------------------------------- /cmake/config.cmake.in: -------------------------------------------------------------------------------- 1 | @PACKAGE_INIT@ 2 | 3 | include(CMakeFindDependencyMacro) 4 | 5 | # Add FindRdKafka.cmake 6 | set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_LIST_DIR}") 7 | 8 | set(RDKAFKA_MIN_VERSION_HEX "@RDKAFKA_MIN_VERSION_HEX@") 9 | 10 | # Find boost optional 11 | find_dependency(Boost REQUIRED) 12 | 13 | # Try to find the RdKafka configuration file if present. 14 | # This will search default system locations as well as RdKafka_ROOT and RdKafka_DIR paths if specified. 15 | find_package(RdKafka QUIET CONFIG) 16 | set(RDKAFKA_TARGET_IMPORTS ${RdKafka_FOUND}) 17 | if (NOT RdKafka_FOUND) 18 | find_dependency(RdKafka REQUIRED MODULE) 19 | endif() 20 | 21 | include("${CMAKE_CURRENT_LIST_DIR}/@TARGET_EXPORT_NAME@.cmake") 22 | 23 | # Export 'CppKafka_ROOT' 24 | set_and_check(@PROJECT_NAME@_ROOT "@PACKAGE_CMAKE_INSTALL_PREFIX@") 25 | 26 | # Export 'CppKafka_INSTALL_INCLUDE_DIR' 27 | set_and_check(@PROJECT_NAME@_INSTALL_INCLUDE_DIR "@PACKAGE_CMAKE_INSTALL_INCLUDEDIR@") 28 | 29 | # Export 'CppKafka_INSTALL_LIB_DIR' 30 | set_and_check(@PROJECT_NAME@_INSTALL_LIB_DIR "@PACKAGE_CMAKE_INSTALL_LIBDIR@") 31 | 32 | # Validate installed components 33 | check_required_components("@PROJECT_NAME@") 34 | -------------------------------------------------------------------------------- /cmake/cppkafka.h.in: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_H 31 | #define CPPKAFKA_H 32 | 33 | @CPPKAFKA_HEADERS@ 34 | #endif 35 | -------------------------------------------------------------------------------- /cmake/cppkafka.pc.in: -------------------------------------------------------------------------------- 1 | prefix=@CMAKE_INSTALL_PREFIX@ 2 | exec_prefix=${prefix} 3 | libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@ 4 | sharedlibdir=${prefix}/@CMAKE_INSTALL_LIBDIR@ 5 | includedir=${prefix}/include 6 | 7 | Name: cppkafka 8 | Url: https://github.com/mfontanini/cppkafka 9 | Description: C++ wrapper library on top of RdKafka 10 | Version: @CPPKAFKA_VERSION@ 11 | Requires: 12 | Requires.private: @RDKAFKA_REQUIRES@ 13 | Libs: -L${libdir} -L${sharedlibdir} -lcppkafka 14 | Cflags: -I${includedir} -I${includedir}/cppkafka -I@Boost_INCLUDE_DIRS@ 15 | -------------------------------------------------------------------------------- /docs/mainpage.dox: -------------------------------------------------------------------------------- 1 | /** 2 | * \mainpage Documentation 3 | * 4 | * \section intro_sec Introduction 5 | * 6 | * cppkafka is a C++11 wrapper for rdkafka, an Apache Kafka client library. 7 | * 8 | * cppkafka provides a high level interface for producing and consuming Kafka 9 | * messages. 10 | * 11 | */ 12 | -------------------------------------------------------------------------------- /examples/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include) 2 | 3 | add_custom_target(examples) 4 | macro(create_example example_name) 5 | string(REPLACE "_" "-" sanitized_name ${example_name}) 6 | add_executable(${sanitized_name} EXCLUDE_FROM_ALL "${example_name}_example.cpp") 7 | target_link_libraries(${sanitized_name} cppkafka RdKafka::rdkafka Boost::boost Boost::program_options) 8 | add_dependencies(examples ${sanitized_name}) 9 | endmacro() 10 | 11 | create_example(producer) 12 | create_example(buffered_producer) 13 | create_example(consumer) 14 | create_example(consumer_dispatcher) 15 | create_example(metadata) 16 | create_example(consumers_information) 17 | -------------------------------------------------------------------------------- /examples/buffered_producer_example.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "cppkafka/utils/buffered_producer.h" 5 | #include "cppkafka/configuration.h" 6 | 7 | using std::string; 8 | using std::exception; 9 | using std::getline; 10 | using std::cin; 11 | using std::cout; 12 | using std::endl; 13 | 14 | using cppkafka::BufferedProducer; 15 | using cppkafka::Configuration; 16 | using cppkafka::Topic; 17 | using cppkafka::MessageBuilder; 18 | using cppkafka::Message; 19 | 20 | namespace po = boost::program_options; 21 | 22 | int main(int argc, char* argv[]) { 23 | string brokers; 24 | string topic_name; 25 | int partition_value = -1; 26 | 27 | po::options_description options("Options"); 28 | options.add_options() 29 | ("help,h", "produce this help message") 30 | ("brokers,b", po::value(&brokers)->required(), 31 | "the kafka broker list") 32 | ("topic,t", po::value(&topic_name)->required(), 33 | "the topic in which to write to") 34 | ("partition,p", po::value(&partition_value), 35 | "the partition to write into (unassigned if not provided)") 36 | ; 37 | 38 | po::variables_map vm; 39 | 40 | try { 41 | po::store(po::command_line_parser(argc, argv).options(options).run(), vm); 42 | po::notify(vm); 43 | } 44 | catch (exception& ex) { 45 | cout << "Error parsing options: " << ex.what() << endl; 46 | cout << endl; 47 | cout << options << endl; 48 | return 1; 49 | } 50 | 51 | // Create a message builder for this topic 52 | MessageBuilder builder(topic_name); 53 | 54 | // Get the partition we want to write to. If no partition is provided, this will be 55 | // an unassigned one 56 | if (partition_value != -1) { 57 | builder.partition(partition_value); 58 | } 59 | 60 | // Construct the configuration 61 | Configuration config = { 62 | { "metadata.broker.list", brokers } 63 | }; 64 | 65 | // Create the producer 66 | BufferedProducer producer(config); 67 | 68 | // Set a produce success callback 69 | producer.set_produce_success_callback([](const Message& msg) { 70 | cout << "Successfully produced message with payload " << msg.get_payload() << endl; 71 | }); 72 | // Set a produce failure callback 73 | producer.set_produce_failure_callback([](const Message& msg) { 74 | cout << "Failed to produce message with payload " << msg.get_payload() << endl; 75 | // Return false so we stop trying to produce this message 76 | return false; 77 | }); 78 | 79 | cout << "Producing messages into topic " << topic_name << endl; 80 | 81 | // Now read lines and write them into kafka 82 | string line; 83 | while (getline(cin, line)) { 84 | // Set the payload on this builder 85 | builder.payload(line); 86 | 87 | // Add the message we've built to the buffered producer 88 | producer.add_message(builder); 89 | 90 | // Now flush so we: 91 | // * emit the buffered message 92 | // * poll the producer so we dispatch on delivery report callbacks and 93 | // therefore get the produce failure/success callbacks 94 | producer.flush(); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /examples/consumer_dispatcher_example.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "cppkafka/consumer.h" 6 | #include "cppkafka/configuration.h" 7 | #include "cppkafka/utils/consumer_dispatcher.h" 8 | 9 | using std::string; 10 | using std::exception; 11 | using std::cout; 12 | using std::endl; 13 | using std::function; 14 | 15 | using cppkafka::Consumer; 16 | using cppkafka::ConsumerDispatcher; 17 | using cppkafka::Configuration; 18 | using cppkafka::Message; 19 | using cppkafka::TopicPartition; 20 | using cppkafka::TopicPartitionList; 21 | using cppkafka::Error; 22 | 23 | namespace po = boost::program_options; 24 | 25 | function on_signal; 26 | 27 | void signal_handler(int) { 28 | on_signal(); 29 | } 30 | 31 | // This example uses ConsumerDispatcher, a simple synchronous wrapper over a Consumer 32 | // to allow processing messages using pattern matching rather than writing a loop 33 | // and check if there's a message, if there's an error, etc. 34 | int main(int argc, char* argv[]) { 35 | string brokers; 36 | string topic_name; 37 | string group_id; 38 | 39 | po::options_description options("Options"); 40 | options.add_options() 41 | ("help,h", "produce this help message") 42 | ("brokers,b", po::value(&brokers)->required(), 43 | "the kafka broker list") 44 | ("topic,t", po::value(&topic_name)->required(), 45 | "the topic in which to write to") 46 | ("group-id,g", po::value(&group_id)->required(), 47 | "the consumer group id") 48 | ; 49 | 50 | po::variables_map vm; 51 | 52 | try { 53 | po::store(po::command_line_parser(argc, argv).options(options).run(), vm); 54 | po::notify(vm); 55 | } 56 | catch (exception& ex) { 57 | cout << "Error parsing options: " << ex.what() << endl; 58 | cout << endl; 59 | cout << options << endl; 60 | return 1; 61 | } 62 | 63 | // Construct the configuration 64 | Configuration config = { 65 | { "metadata.broker.list", brokers }, 66 | { "group.id", group_id }, 67 | // Disable auto commit 68 | { "enable.auto.commit", false } 69 | }; 70 | 71 | // Create the consumer 72 | Consumer consumer(config); 73 | 74 | // Print the assigned partitions on assignment 75 | consumer.set_assignment_callback([](const TopicPartitionList& partitions) { 76 | cout << "Got assigned: " << partitions << endl; 77 | }); 78 | 79 | // Print the revoked partitions on revocation 80 | consumer.set_revocation_callback([](const TopicPartitionList& partitions) { 81 | cout << "Got revoked: " << partitions << endl; 82 | }); 83 | 84 | // Subscribe to the topic 85 | consumer.subscribe({ topic_name }); 86 | 87 | cout << "Consuming messages from topic " << topic_name << endl; 88 | 89 | // Create a consumer dispatcher 90 | ConsumerDispatcher dispatcher(consumer); 91 | 92 | // Stop processing on SIGINT 93 | on_signal = [&]() { 94 | dispatcher.stop(); 95 | }; 96 | signal(SIGINT, signal_handler); 97 | 98 | // Now run the dispatcher, providing a callback to handle messages, one to handle 99 | // errors and another one to handle EOF on a partition 100 | dispatcher.run( 101 | // Callback executed whenever a new message is consumed 102 | [&](Message msg) { 103 | // Print the key (if any) 104 | if (msg.get_key()) { 105 | cout << msg.get_key() << " -> "; 106 | } 107 | // Print the payload 108 | cout << msg.get_payload() << endl; 109 | // Now commit the message 110 | consumer.commit(msg); 111 | }, 112 | // Whenever there's an error (other than the EOF soft error) 113 | [](Error error) { 114 | cout << "[+] Received error notification: " << error << endl; 115 | }, 116 | // Whenever EOF is reached on a partition, print this 117 | [](ConsumerDispatcher::EndOfFile, const TopicPartition& topic_partition) { 118 | cout << "Reached EOF on partition " << topic_partition << endl; 119 | } 120 | ); 121 | } 122 | -------------------------------------------------------------------------------- /examples/consumer_example.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "cppkafka/consumer.h" 6 | #include "cppkafka/configuration.h" 7 | 8 | using std::string; 9 | using std::exception; 10 | using std::cout; 11 | using std::endl; 12 | 13 | using cppkafka::Consumer; 14 | using cppkafka::Configuration; 15 | using cppkafka::Message; 16 | using cppkafka::TopicPartitionList; 17 | 18 | namespace po = boost::program_options; 19 | 20 | bool running = true; 21 | 22 | int main(int argc, char* argv[]) { 23 | string brokers; 24 | string topic_name; 25 | string group_id; 26 | 27 | po::options_description options("Options"); 28 | options.add_options() 29 | ("help,h", "produce this help message") 30 | ("brokers,b", po::value(&brokers)->required(), 31 | "the kafka broker list") 32 | ("topic,t", po::value(&topic_name)->required(), 33 | "the topic in which to write to") 34 | ("group-id,g", po::value(&group_id)->required(), 35 | "the consumer group id") 36 | ; 37 | 38 | po::variables_map vm; 39 | 40 | try { 41 | po::store(po::command_line_parser(argc, argv).options(options).run(), vm); 42 | po::notify(vm); 43 | } 44 | catch (exception& ex) { 45 | cout << "Error parsing options: " << ex.what() << endl; 46 | cout << endl; 47 | cout << options << endl; 48 | return 1; 49 | } 50 | 51 | // Stop processing on SIGINT 52 | signal(SIGINT, [](int) { running = false; }); 53 | 54 | // Construct the configuration 55 | Configuration config = { 56 | { "metadata.broker.list", brokers }, 57 | { "group.id", group_id }, 58 | // Disable auto commit 59 | { "enable.auto.commit", false } 60 | }; 61 | 62 | // Create the consumer 63 | Consumer consumer(config); 64 | 65 | // Print the assigned partitions on assignment 66 | consumer.set_assignment_callback([](const TopicPartitionList& partitions) { 67 | cout << "Got assigned: " << partitions << endl; 68 | }); 69 | 70 | // Print the revoked partitions on revocation 71 | consumer.set_revocation_callback([](const TopicPartitionList& partitions) { 72 | cout << "Got revoked: " << partitions << endl; 73 | }); 74 | 75 | // Subscribe to the topic 76 | consumer.subscribe({ topic_name }); 77 | 78 | cout << "Consuming messages from topic " << topic_name << endl; 79 | 80 | // Now read lines and write them into kafka 81 | while (running) { 82 | // Try to consume a message 83 | Message msg = consumer.poll(); 84 | if (msg) { 85 | // If we managed to get a message 86 | if (msg.get_error()) { 87 | // Ignore EOF notifications from rdkafka 88 | if (!msg.is_eof()) { 89 | cout << "[+] Received error notification: " << msg.get_error() << endl; 90 | } 91 | } 92 | else { 93 | // Print the key (if any) 94 | if (msg.get_key()) { 95 | cout << msg.get_key() << " -> "; 96 | } 97 | // Print the payload 98 | cout << msg.get_payload() << endl; 99 | // Now commit the message 100 | consumer.commit(msg); 101 | } 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /examples/consumers_information_example.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "cppkafka/producer.h" 5 | #include "cppkafka/configuration.h" 6 | #include "cppkafka/group_information.h" 7 | #include "cppkafka/topic.h" 8 | 9 | using std::string; 10 | using std::exception; 11 | using std::vector; 12 | using std::cout; 13 | using std::endl; 14 | 15 | using cppkafka::Producer; 16 | using cppkafka::Exception; 17 | using cppkafka::Configuration; 18 | using cppkafka::Topic; 19 | using cppkafka::GroupInformation; 20 | using cppkafka::GroupMemberInformation; 21 | using cppkafka::MemberAssignmentInformation; 22 | 23 | namespace po = boost::program_options; 24 | 25 | int main(int argc, char* argv[]) { 26 | string brokers; 27 | string group_id; 28 | bool show_assignment = false; 29 | 30 | po::options_description options("Options"); 31 | options.add_options() 32 | ("help,h", "produce this help message") 33 | ("brokers,b", po::value(&brokers)->required(), 34 | "the kafka broker list") 35 | ("group-id,g", po::value(&group_id), 36 | "only fetch consumer group information for the specified one") 37 | ("assignment,a", po::value(&show_assignment)->implicit_value(true), 38 | "show topic/partition assignment for each consumer group") 39 | ; 40 | 41 | po::variables_map vm; 42 | 43 | try { 44 | po::store(po::command_line_parser(argc, argv).options(options).run(), vm); 45 | po::notify(vm); 46 | } 47 | catch (exception& ex) { 48 | cout << "Error parsing options: " << ex.what() << endl; 49 | cout << endl; 50 | cout << options << endl; 51 | return 1; 52 | } 53 | 54 | // Construct the configuration 55 | Configuration config = { 56 | { "metadata.broker.list", brokers }, 57 | // Disable auto commit 58 | { "enable.auto.commit", false } 59 | }; 60 | 61 | try { 62 | // Construct a producer 63 | Producer producer(config); 64 | 65 | // Fetch the group information 66 | vector groups = [&]() { 67 | if (!group_id.empty()) { 68 | return vector{producer.get_consumer_group(group_id)}; 69 | } 70 | else { 71 | return producer.get_consumer_groups(); 72 | } 73 | }(); 74 | 75 | if (groups.empty()) { 76 | cout << "Found no consumers" << endl; 77 | return 0; 78 | } 79 | cout << "Found the following consumers: " << endl; 80 | for (const GroupInformation& group : groups) { 81 | cout << "* \"" << group.get_name() << "\" having the following (" << 82 | group.get_members().size() << ") members: " << endl; 83 | for (const GroupMemberInformation& info : group.get_members()) { 84 | cout << " - " << info.get_member_id() << " @ " << info.get_client_host(); 85 | if (show_assignment) { 86 | MemberAssignmentInformation assignment(info.get_member_assignment()); 87 | cout << " has assigned: " << assignment.get_topic_partitions(); 88 | } 89 | cout << endl; 90 | } 91 | cout << endl; 92 | } 93 | } 94 | catch (const Exception& ex) { 95 | cout << "Error fetching group information: " << ex.what() << endl; 96 | } 97 | } -------------------------------------------------------------------------------- /examples/metadata_example.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "cppkafka/producer.h" 5 | #include "cppkafka/configuration.h" 6 | #include "cppkafka/metadata.h" 7 | #include "cppkafka/topic.h" 8 | 9 | using std::string; 10 | using std::exception; 11 | using std::cout; 12 | using std::endl; 13 | 14 | using cppkafka::Producer; 15 | using cppkafka::Exception; 16 | using cppkafka::Configuration; 17 | using cppkafka::Topic; 18 | using cppkafka::Metadata; 19 | using cppkafka::TopicMetadata; 20 | using cppkafka::BrokerMetadata; 21 | 22 | namespace po = boost::program_options; 23 | 24 | int main(int argc, char* argv[]) { 25 | string brokers; 26 | 27 | po::options_description options("Options"); 28 | options.add_options() 29 | ("help,h", "produce this help message") 30 | ("brokers,b", po::value(&brokers)->required(), 31 | "the kafka broker list") 32 | ; 33 | 34 | po::variables_map vm; 35 | 36 | try { 37 | po::store(po::command_line_parser(argc, argv).options(options).run(), vm); 38 | po::notify(vm); 39 | } 40 | catch (exception& ex) { 41 | cout << "Error parsing options: " << ex.what() << endl; 42 | cout << endl; 43 | cout << options << endl; 44 | return 1; 45 | } 46 | 47 | // Construct the configuration 48 | Configuration config = { 49 | { "metadata.broker.list", brokers }, 50 | }; 51 | 52 | try { 53 | // Construct a producer 54 | Producer producer(config); 55 | 56 | // Fetch the metadata 57 | Metadata metadata = producer.get_metadata(); 58 | 59 | // Iterate over brokers 60 | cout << "Found the following brokers: " << endl; 61 | for (const BrokerMetadata& broker : metadata.get_brokers()) { 62 | cout << "* " << broker.get_host() << endl; 63 | } 64 | cout << endl; 65 | 66 | // Iterate over topics 67 | cout << "Found the following topics: " << endl; 68 | for (const TopicMetadata& topic : metadata.get_topics()) { 69 | cout << "* " << topic.get_name() << ": " << topic.get_partitions().size() 70 | << " partitions" << endl; 71 | } 72 | } 73 | catch (const Exception& ex) { 74 | cout << "Error fetching metadata: " << ex.what() << endl; 75 | } 76 | } -------------------------------------------------------------------------------- /examples/producer_example.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "cppkafka/producer.h" 5 | #include "cppkafka/configuration.h" 6 | 7 | using std::string; 8 | using std::exception; 9 | using std::getline; 10 | using std::cin; 11 | using std::cout; 12 | using std::endl; 13 | 14 | using cppkafka::Producer; 15 | using cppkafka::Configuration; 16 | using cppkafka::Topic; 17 | using cppkafka::MessageBuilder; 18 | 19 | namespace po = boost::program_options; 20 | 21 | int main(int argc, char* argv[]) { 22 | string brokers; 23 | string topic_name; 24 | int partition_value = -1; 25 | 26 | po::options_description options("Options"); 27 | options.add_options() 28 | ("help,h", "produce this help message") 29 | ("brokers,b", po::value(&brokers)->required(), 30 | "the kafka broker list") 31 | ("topic,t", po::value(&topic_name)->required(), 32 | "the topic in which to write to") 33 | ("partition,p", po::value(&partition_value), 34 | "the partition to write into (unassigned if not provided)") 35 | ; 36 | 37 | po::variables_map vm; 38 | 39 | try { 40 | po::store(po::command_line_parser(argc, argv).options(options).run(), vm); 41 | po::notify(vm); 42 | } 43 | catch (exception& ex) { 44 | cout << "Error parsing options: " << ex.what() << endl; 45 | cout << endl; 46 | cout << options << endl; 47 | return 1; 48 | } 49 | 50 | // Create a message builder for this topic 51 | MessageBuilder builder(topic_name); 52 | 53 | // Get the partition we want to write to. If no partition is provided, this will be 54 | // an unassigned one 55 | if (partition_value != -1) { 56 | builder.partition(partition_value); 57 | } 58 | 59 | // Construct the configuration 60 | Configuration config = { 61 | { "metadata.broker.list", brokers } 62 | }; 63 | 64 | // Create the producer 65 | Producer producer(config); 66 | 67 | cout << "Producing messages into topic " << topic_name << endl; 68 | 69 | // Now read lines and write them into kafka 70 | string line; 71 | while (getline(cin, line)) { 72 | // Set the payload on this builder 73 | builder.payload(line); 74 | 75 | // Actually produce the message we've built 76 | producer.produce(builder); 77 | } 78 | 79 | // Flush all produced messages 80 | producer.flush(); 81 | } 82 | -------------------------------------------------------------------------------- /include/cppkafka/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(CPPKAFKA_HEADER "${CMAKE_CURRENT_BINARY_DIR}/cppkafka.h") 2 | 3 | # Local function to auto-generate main cppkafka.h header file 4 | function(make_cppkafka_header) 5 | file(GLOB INCLUDE_HEADERS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.h" "utils/*.h") 6 | list(SORT INCLUDE_HEADERS) 7 | foreach(header ${INCLUDE_HEADERS}) 8 | if (NOT ${header} MATCHES "cppkafka.h") 9 | SET(CPPKAFKA_HEADERS "${CPPKAFKA_HEADERS}#include \n") 10 | endif() 11 | endforeach() 12 | 13 | #create file from template 14 | configure_file("${PROJECT_SOURCE_DIR}/cmake/cppkafka.h.in" "${CPPKAFKA_HEADER}" @ONLY) 15 | endfunction() 16 | 17 | # Run file generation function 18 | make_cppkafka_header() 19 | 20 | # Install headers including the auto-generated cppkafka.h 21 | file(GLOB INCLUDE_FILES "*.h") 22 | file(GLOB UTILS_INCLUDE_FILES "utils/*.h") 23 | file(GLOB DETAIL_INCLUDE_FILES "detail/*.h") 24 | install( 25 | FILES ${INCLUDE_FILES} 26 | DESTINATION include/cppkafka/ 27 | COMPONENT Headers 28 | ) 29 | install( 30 | FILES ${UTILS_INCLUDE_FILES} 31 | DESTINATION include/cppkafka/utils/ 32 | COMPONENT Headers 33 | ) 34 | install( 35 | FILES ${DETAIL_INCLUDE_FILES} 36 | DESTINATION include/cppkafka/detail/ 37 | COMPONENT Headers 38 | ) 39 | install( 40 | FILES "${CPPKAFKA_HEADER}" 41 | DESTINATION include/cppkafka/ 42 | COMPONENT Headers 43 | ) 44 | -------------------------------------------------------------------------------- /include/cppkafka/clonable_ptr.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_CLONABLE_PTR_H 31 | #define CPPKAFKA_CLONABLE_PTR_H 32 | 33 | #include 34 | 35 | namespace cppkafka { 36 | 37 | /** 38 | * Smart pointer which allows copying via a clone functor 39 | */ 40 | template 41 | class ClonablePtr { 42 | public: 43 | /** 44 | * \brief Creates an instance 45 | * 46 | * \param ptr The pointer to be wrapped 47 | * \param deleter The deleter functor 48 | * \param cloner The clone functor 49 | */ 50 | ClonablePtr(T* ptr, const Deleter& deleter, const Cloner& cloner) 51 | : handle_(ptr, deleter), cloner_(cloner) { 52 | 53 | } 54 | 55 | /** 56 | * \brief Copies the given ClonablePtr 57 | * 58 | * Cloning will be done by invoking the Cloner type 59 | * 60 | * \param rhs The pointer to be copied 61 | */ 62 | ClonablePtr(const ClonablePtr& rhs) 63 | : handle_(std::unique_ptr(rhs.try_clone(), rhs.get_deleter())), 64 | cloner_(rhs.get_cloner()) { 65 | 66 | } 67 | 68 | /** 69 | * \brief Copies and assigns the given pointer 70 | * 71 | * \param rhs The pointer to be copied 72 | */ 73 | ClonablePtr& operator=(const ClonablePtr& rhs) { 74 | if (this != &rhs) { 75 | handle_ = std::unique_ptr(rhs.try_clone(), rhs.get_deleter()); 76 | cloner_ = rhs.get_cloner(); 77 | } 78 | return *this; 79 | } 80 | 81 | ClonablePtr(ClonablePtr&&) = default; 82 | ClonablePtr& operator=(ClonablePtr&&) = default; 83 | ~ClonablePtr() = default; 84 | 85 | /** 86 | * \brief Getter for the internal pointer 87 | */ 88 | T* get() const { 89 | return handle_.get(); 90 | } 91 | 92 | /** 93 | * \brief Releases ownership of the internal pointer 94 | */ 95 | T* release() { 96 | return handle_.release(); 97 | } 98 | 99 | /** 100 | * \brief Reset the internal pointer to a new one 101 | */ 102 | void reset(T* ptr) { 103 | handle_.reset(ptr); 104 | } 105 | 106 | /** 107 | * \brief Get the deleter 108 | */ 109 | const Deleter& get_deleter() const { 110 | return handle_.get_deleter(); 111 | } 112 | 113 | /** 114 | * \brief Get the cloner 115 | */ 116 | const Cloner& get_cloner() const { 117 | return cloner_; 118 | } 119 | 120 | /** 121 | * \brief Indicates whether this ClonablePtr instance is valid (not null) 122 | */ 123 | explicit operator bool() const { 124 | return static_cast(handle_); 125 | } 126 | private: 127 | T* try_clone() const { 128 | return cloner_ ? cloner_(get()) : get(); 129 | } 130 | 131 | std::unique_ptr handle_; 132 | Cloner cloner_; 133 | }; 134 | 135 | } // cppkafka 136 | 137 | #endif // CPPKAFKA_CLONABLE_PTR_H 138 | -------------------------------------------------------------------------------- /include/cppkafka/configuration_base.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_CONFIGURATION_BASE_H 31 | #define CPPKAFKA_CONFIGURATION_BASE_H 32 | 33 | #include 34 | #include 35 | #include 36 | #include "exceptions.h" 37 | #include "configuration_option.h" 38 | 39 | namespace cppkafka { 40 | 41 | template 42 | class ConfigurationBase { 43 | private: 44 | template 45 | struct Type2Type { }; 46 | public: 47 | /** 48 | * Sets a bool value 49 | */ 50 | Concrete& set(const std::string& name, bool value) { 51 | return proxy_set(name, value ? "true" : "false"); 52 | } 53 | 54 | /** 55 | * Sets a value of any integral value 56 | */ 57 | template ::value>::type> 59 | Concrete& set(const std::string& name, T value) { 60 | return proxy_set(name, std::to_string(value)); 61 | } 62 | 63 | /** 64 | * Sets a cstring value 65 | */ 66 | Concrete& set(const std::string& name, const char* value) { 67 | return proxy_set(name, value); 68 | } 69 | 70 | /** 71 | * Sets a list of options 72 | */ 73 | Concrete& set(const std::vector& options) { 74 | for (const auto& option : options) { 75 | proxy_set(option.get_key(), option.get_value()); 76 | } 77 | return static_cast(*this); 78 | } 79 | 80 | /** 81 | * \brief Gets a value, converting it to the given type. 82 | * 83 | * If the configuration option is not found, then ConfigOptionNotFound is thrown. 84 | * 85 | * If the configuration value can't be converted to the given type, then 86 | * InvalidConfigOptionType is thrown. 87 | * 88 | * Valid conversion types: 89 | * * std::string 90 | * * bool 91 | * * int 92 | */ 93 | template 94 | T get(const std::string& name) const { 95 | std::string value = static_cast(*this).get(name); 96 | return convert(value, Type2Type()); 97 | } 98 | protected: 99 | static std::map parse_dump(const char** values, size_t count) { 100 | std::map output; 101 | for (size_t i = 0; i < count; i += 2) { 102 | output[values[i]] = values[i + 1]; 103 | } 104 | return output; 105 | } 106 | private: 107 | Concrete& proxy_set(const std::string& name, const std::string& value) { 108 | return static_cast(*this).set(name, value); 109 | } 110 | 111 | static std::string convert(const std::string& value, Type2Type) { 112 | return value; 113 | } 114 | 115 | static bool convert(const std::string& value, Type2Type) { 116 | if (value == "true") { 117 | return true; 118 | } 119 | else if (value == "false") { 120 | return false; 121 | } 122 | else { 123 | throw InvalidConfigOptionType(value, "bool"); 124 | } 125 | } 126 | 127 | static int convert(const std::string& value, Type2Type) { 128 | try { 129 | return std::stoi(value); 130 | } 131 | catch (std::exception&) { 132 | throw InvalidConfigOptionType(value, "int"); 133 | } 134 | } 135 | }; 136 | 137 | } // cppkafka 138 | 139 | #endif // CPPKAFKA_CONFIGURATION_BASE_H 140 | -------------------------------------------------------------------------------- /include/cppkafka/configuration_option.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_CONFIGURATION_OPTION_H 31 | #define CPPKAFKA_CONFIGURATION_OPTION_H 32 | 33 | #include 34 | #include 35 | #include "macros.h" 36 | 37 | namespace cppkafka { 38 | 39 | /** 40 | * Wrapper over a configuration (key, value) pair 41 | */ 42 | class CPPKAFKA_API ConfigurationOption { 43 | public: 44 | /** 45 | * Construct using a std::string value 46 | */ 47 | ConfigurationOption(const std::string& key, const std::string& value); 48 | 49 | /** 50 | * Construct using a const char* value 51 | */ 52 | ConfigurationOption(const std::string& key, const char* value); 53 | 54 | /** 55 | * Construct using a bool value 56 | */ 57 | ConfigurationOption(const std::string& key, bool value); 58 | 59 | /** 60 | * Construct using any integral value 61 | */ 62 | template ::value>::type> 64 | ConfigurationOption(const std::string& key, T value) 65 | : ConfigurationOption(key, std::to_string(value)) { 66 | 67 | } 68 | 69 | /** 70 | * Gets the key 71 | */ 72 | const std::string& get_key() const; 73 | 74 | /** 75 | * Gets the value 76 | */ 77 | const std::string& get_value() const; 78 | private: 79 | std::string key_; 80 | std::string value_; 81 | }; 82 | 83 | } // cppkafka 84 | 85 | #endif // CPPKAFKA_CONFIGURATION_OPTION_H 86 | -------------------------------------------------------------------------------- /include/cppkafka/cppkafka.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_H 31 | #define CPPKAFKA_H 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | #include 44 | #include 45 | #include 46 | #include 47 | #include 48 | #include 49 | #include 50 | #include 51 | #include 52 | #include 53 | #include 54 | #include 55 | #include 56 | #include 57 | #include 58 | #include 59 | #include 60 | #include 61 | #include 62 | #include 63 | #include 64 | #include 65 | #include 66 | #include 67 | #include 68 | 69 | #endif 70 | -------------------------------------------------------------------------------- /include/cppkafka/detail/callback_invoker.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_CALLBACK_INVOKER_H 31 | #define CPPKAFKA_CALLBACK_INVOKER_H 32 | 33 | #include 34 | #include 35 | #include "../logging.h" 36 | #include "../kafka_handle_base.h" 37 | 38 | namespace cppkafka { 39 | 40 | // Error values 41 | template 42 | T error_value() { return T{}; } 43 | 44 | template<> inline 45 | void error_value() {}; 46 | 47 | template<> inline 48 | bool error_value() { return false; } 49 | 50 | template<> inline 51 | int error_value() { return -1; } 52 | 53 | /** 54 | * \brief Wraps an std::function object and runs it while preventing all exceptions from escaping 55 | * \tparam Func An std::function object 56 | */ 57 | template 58 | class CallbackInvoker 59 | { 60 | public: 61 | using RetType = typename Func::result_type; 62 | using LogCallback = std::function; 66 | CallbackInvoker(const char* callback_name, 67 | const Func& callback, 68 | KafkaHandleBase* handle) 69 | : callback_name_(callback_name), 70 | callback_(callback), 71 | handle_(handle) { 72 | } 73 | 74 | explicit operator bool() const { 75 | return (bool)callback_; 76 | } 77 | 78 | template 79 | RetType operator()(Args&&... args) const { 80 | static const char* library_name = "cppkafka"; 81 | std::ostringstream error_msg; 82 | try { 83 | if (callback_) { 84 | return callback_(std::forward(args)...); 85 | } 86 | return error_value(); 87 | } 88 | catch (const std::exception& ex) { 89 | if (handle_) { 90 | error_msg << "Caught exception in " << callback_name_ << " callback: " << ex.what(); 91 | } 92 | } 93 | catch (...) { 94 | if (handle_) { 95 | error_msg << "Caught unknown exception in " << callback_name_ << " callback"; 96 | } 97 | } 98 | // Log error 99 | if (handle_) { 100 | if (handle_->get_configuration().get_log_callback()) { 101 | try { 102 | // Log it 103 | handle_->get_configuration().get_log_callback()(*handle_, 104 | static_cast(LogLevel::LogErr), 105 | library_name, 106 | error_msg.str()); 107 | } 108 | catch (...) {} // sink everything 109 | } 110 | else { 111 | rd_kafka_log_print(handle_->get_handle(), 112 | static_cast(LogLevel::LogErr), 113 | library_name, 114 | error_msg.str().c_str()); 115 | } 116 | } 117 | return error_value(); 118 | } 119 | private: 120 | const char* callback_name_; 121 | const Func& callback_; 122 | KafkaHandleBase* handle_; 123 | }; 124 | 125 | } 126 | 127 | #endif 128 | -------------------------------------------------------------------------------- /include/cppkafka/detail/endianness.h: -------------------------------------------------------------------------------- 1 | 2 | // "License": Public Domain 3 | // I, Mathias Panzenböck, place this file hereby into the public domain. Use it at your own risk for whatever you like. 4 | // In case there are jurisdictions that don't support putting things in the public domain you can also consider it to 5 | // be "dual licensed" under the BSD, MIT and Apache licenses, if you want to. This code is trivial anyway. Consider it 6 | // an example on how to get the endian conversion functions on different platforms. 7 | 8 | #ifndef CPPKAFKA_PORTABLE_ENDIAN_H 9 | #define CPPKAFKA_PORTABLE_ENDIAN_H 10 | 11 | #if (defined(_WIN16) || defined(_WIN32) || defined(_WIN64)) && !defined(__WINDOWS__) 12 | 13 | # define __WINDOWS__ 14 | 15 | #endif 16 | 17 | #if defined(__linux__) || defined(__CYGWIN__) || defined(__sun) 18 | 19 | # include 20 | 21 | #elif defined(__APPLE__) 22 | 23 | # include 24 | 25 | # define htobe16(x) OSSwapHostToBigInt16(x) 26 | # define htole16(x) OSSwapHostToLittleInt16(x) 27 | # define be16toh(x) OSSwapBigToHostInt16(x) 28 | # define le16toh(x) OSSwapLittleToHostInt16(x) 29 | 30 | # define htobe32(x) OSSwapHostToBigInt32(x) 31 | # define htole32(x) OSSwapHostToLittleInt32(x) 32 | # define be32toh(x) OSSwapBigToHostInt32(x) 33 | # define le32toh(x) OSSwapLittleToHostInt32(x) 34 | 35 | # define htobe64(x) OSSwapHostToBigInt64(x) 36 | # define htole64(x) OSSwapHostToLittleInt64(x) 37 | # define be64toh(x) OSSwapBigToHostInt64(x) 38 | # define le64toh(x) OSSwapLittleToHostInt64(x) 39 | 40 | # define __BYTE_ORDER BYTE_ORDER 41 | # define __BIG_ENDIAN BIG_ENDIAN 42 | # define __LITTLE_ENDIAN LITTLE_ENDIAN 43 | # define __PDP_ENDIAN PDP_ENDIAN 44 | 45 | #elif defined(__OpenBSD__) || defined(__FreeBSD__) 46 | 47 | # include 48 | 49 | #elif defined(__NetBSD__) || defined(__DragonFly__) 50 | 51 | # include 52 | 53 | # define be16toh(x) betoh16(x) 54 | # define le16toh(x) letoh16(x) 55 | 56 | # define be32toh(x) betoh32(x) 57 | # define le32toh(x) letoh32(x) 58 | 59 | # define be64toh(x) betoh64(x) 60 | # define le64toh(x) letoh64(x) 61 | 62 | #elif defined(__WINDOWS__) 63 | 64 | # include 65 | 66 | # if BYTE_ORDER == LITTLE_ENDIAN 67 | 68 | # define htobe16(x) htons(x) 69 | # define htole16(x) (x) 70 | # define be16toh(x) ntohs(x) 71 | # define le16toh(x) (x) 72 | 73 | # define htobe32(x) htonl(x) 74 | # define htole32(x) (x) 75 | # define be32toh(x) ntohl(x) 76 | # define le32toh(x) (x) 77 | 78 | # define htobe64(x) htonll(x) 79 | # define htole64(x) (x) 80 | # define be64toh(x) ntohll(x) 81 | # define le64toh(x) (x) 82 | 83 | # elif BYTE_ORDER == BIG_ENDIAN 84 | 85 | /* that would be xbox 360 */ 86 | # define htobe16(x) (x) 87 | # define htole16(x) __builtin_bswap16(x) 88 | # define be16toh(x) (x) 89 | # define le16toh(x) __builtin_bswap16(x) 90 | 91 | # define htobe32(x) (x) 92 | # define htole32(x) __builtin_bswap32(x) 93 | # define be32toh(x) (x) 94 | # define le32toh(x) __builtin_bswap32(x) 95 | 96 | # define htobe64(x) (x) 97 | # define htole64(x) __builtin_bswap64(x) 98 | # define be64toh(x) (x) 99 | # define le64toh(x) __builtin_bswap64(x) 100 | 101 | # else 102 | 103 | # error byte order not supported 104 | 105 | # endif 106 | 107 | # define __BYTE_ORDER BYTE_ORDER 108 | # define __BIG_ENDIAN BIG_ENDIAN 109 | # define __LITTLE_ENDIAN LITTLE_ENDIAN 110 | # define __PDP_ENDIAN PDP_ENDIAN 111 | 112 | #else 113 | 114 | # error platform not supported 115 | 116 | #endif 117 | 118 | #endif // CPPKAFKA_PORTABLE_ENDIAN_H 119 | -------------------------------------------------------------------------------- /include/cppkafka/error.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_ERROR_H 31 | #define CPPKAFKA_ERROR_H 32 | 33 | #include 34 | #include 35 | #include 36 | #include "macros.h" 37 | 38 | namespace cppkafka { 39 | 40 | /** 41 | * Abstraction for an rdkafka error 42 | */ 43 | class CPPKAFKA_API Error { 44 | public: 45 | /** 46 | * @brief Constructs an error object with RD_KAFKA_RESP_ERR_NO_ERROR 47 | */ 48 | Error() = default; 49 | /** 50 | * Constructs an error object 51 | */ 52 | Error(rd_kafka_resp_err_t error); 53 | 54 | /** 55 | * Gets the error value 56 | */ 57 | rd_kafka_resp_err_t get_error() const; 58 | 59 | /** 60 | * Gets the error string 61 | */ 62 | std::string to_string() const; 63 | 64 | /** 65 | * Checks whether this error contains an actual error (and not RD_KAFKA_RESP_ERR_NO_ERROR) 66 | */ 67 | explicit operator bool() const; 68 | 69 | /** 70 | * Compares this error for equality 71 | */ 72 | bool operator==(const Error& rhs) const; 73 | 74 | /** 75 | * Compares this error for inequality 76 | */ 77 | bool operator!=(const Error& rhs) const; 78 | 79 | /** 80 | * Writes this error's string representation into a stream 81 | */ 82 | CPPKAFKA_API friend std::ostream& operator<<(std::ostream& output, const Error& rhs); 83 | private: 84 | rd_kafka_resp_err_t error_{RD_KAFKA_RESP_ERR_NO_ERROR}; 85 | }; 86 | 87 | } // cppkafka 88 | 89 | #endif // CPPKAFKA_ERROR_H 90 | -------------------------------------------------------------------------------- /include/cppkafka/exceptions.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_EXCEPTIONS_H 31 | #define CPPKAFKA_EXCEPTIONS_H 32 | 33 | #include 34 | #include 35 | #include 36 | #include "macros.h" 37 | #include "error.h" 38 | 39 | namespace cppkafka { 40 | 41 | /** 42 | * Base class for all cppkafka exceptions 43 | */ 44 | class CPPKAFKA_API Exception : public std::exception { 45 | public: 46 | Exception(std::string message); 47 | 48 | const char* what() const noexcept; 49 | private: 50 | std::string message_; 51 | }; 52 | 53 | /** 54 | * A configuration related error 55 | */ 56 | class CPPKAFKA_API ConfigException : public Exception { 57 | public: 58 | ConfigException(const std::string& config_name, const std::string& error); 59 | }; 60 | 61 | /** 62 | * Indicates a configuration option was not set 63 | */ 64 | class CPPKAFKA_API ConfigOptionNotFound : public Exception { 65 | public: 66 | ConfigOptionNotFound(const std::string& config_name); 67 | }; 68 | 69 | /** 70 | * Indicates a configuration option value could not be converted to a specified type 71 | */ 72 | class CPPKAFKA_API InvalidConfigOptionType : public Exception { 73 | public: 74 | InvalidConfigOptionType(const std::string& config_name, const std::string& type); 75 | }; 76 | 77 | /** 78 | * Indicates something that was being looked up failed to be found 79 | */ 80 | class CPPKAFKA_API ElementNotFound : public Exception { 81 | public: 82 | ElementNotFound(const std::string& element_type, const std::string& name); 83 | }; 84 | 85 | /** 86 | * Indicates something that was incorrectly parsed 87 | */ 88 | class CPPKAFKA_API ParseException : public Exception { 89 | public: 90 | ParseException(const std::string& message); 91 | }; 92 | 93 | /** 94 | * Indicates something had an unexpected versiom 95 | */ 96 | class CPPKAFKA_API UnexpectedVersion : public Exception { 97 | public: 98 | UnexpectedVersion(uint32_t version); 99 | }; 100 | 101 | /** 102 | * A generic rdkafka handle error 103 | */ 104 | class CPPKAFKA_API HandleException : public Exception { 105 | public: 106 | HandleException(Error error); 107 | 108 | Error get_error() const; 109 | private: 110 | Error error_; 111 | }; 112 | 113 | /** 114 | * Consumer exception 115 | */ 116 | class CPPKAFKA_API ConsumerException : public Exception { 117 | public: 118 | ConsumerException(Error error); 119 | 120 | Error get_error() const; 121 | private: 122 | Error error_; 123 | }; 124 | 125 | /** 126 | * Queue exception for rd_kafka_queue_t errors 127 | */ 128 | class CPPKAFKA_API QueueException : public Exception { 129 | public: 130 | QueueException(Error error); 131 | 132 | Error get_error() const; 133 | private: 134 | Error error_; 135 | }; 136 | 137 | /** 138 | * Backoff performer has no more retries left for a specific action. 139 | */ 140 | class CPPKAFKA_API ActionTerminatedException : public Exception { 141 | public: 142 | ActionTerminatedException(const std::string& error); 143 | }; 144 | 145 | } // cppkafka 146 | 147 | #endif // CPPKAFKA_EXCEPTIONS_H 148 | -------------------------------------------------------------------------------- /include/cppkafka/group_information.h: -------------------------------------------------------------------------------- 1 | #ifndef CPPKAFKA_GROUP_INFORMATION_H 2 | #define CPPKAFKA_GROUP_INFORMATION_H 3 | 4 | #include 5 | #include 6 | #include "macros.h" 7 | #include "metadata.h" 8 | #include "error.h" 9 | #include "topic_partition_list.h" 10 | 11 | namespace cppkafka { 12 | 13 | /** 14 | * \brief Parses the member assignment information 15 | * 16 | * This class parses the data in GroupMemberInformation::get_member_assignment. 17 | */ 18 | class CPPKAFKA_API MemberAssignmentInformation { 19 | public: 20 | /** 21 | * Constructs an instance 22 | */ 23 | MemberAssignmentInformation(const std::vector& data); 24 | 25 | /** 26 | * Gets the version 27 | */ 28 | uint16_t get_version() const; 29 | 30 | /** 31 | * Gets the topic/partition assignment 32 | */ 33 | const TopicPartitionList& get_topic_partitions() const; 34 | private: 35 | uint16_t version_; 36 | TopicPartitionList topic_partitions_; 37 | }; 38 | 39 | /** 40 | * \brief Represents the information about a specific consumer group member 41 | */ 42 | class CPPKAFKA_API GroupMemberInformation { 43 | public: 44 | /** 45 | * Constructs an instance using the provided information 46 | * 47 | * \param info The information pointer 48 | */ 49 | GroupMemberInformation(const rd_kafka_group_member_info& info); 50 | 51 | /** 52 | * Gets the member id 53 | */ 54 | const std::string& get_member_id() const; 55 | 56 | /** 57 | * Gets the client id 58 | */ 59 | const std::string& get_client_id() const; 60 | 61 | /** 62 | * Gets the client host 63 | */ 64 | const std::string& get_client_host() const; 65 | 66 | /** 67 | * Gets the member metadata 68 | */ 69 | const std::vector& get_member_metadata() const; 70 | 71 | /** 72 | * Gets the member assignment 73 | */ 74 | const std::vector& get_member_assignment() const; 75 | private: 76 | std::string member_id_; 77 | std::string client_id_; 78 | std::string client_host_; 79 | std::vector member_metadata_; 80 | std::vector member_assignment_; 81 | }; 82 | 83 | /** 84 | * \brief Represents the information about a specific consumer group 85 | */ 86 | class CPPKAFKA_API GroupInformation { 87 | public: 88 | /** 89 | * Constructs an instance using the provided information. 90 | * 91 | * \param info The information pointer 92 | */ 93 | GroupInformation(const rd_kafka_group_info& info); 94 | 95 | /** 96 | * Gets the broker metadata 97 | */ 98 | const BrokerMetadata& get_broker() const; 99 | 100 | /** 101 | * Gets the group name 102 | */ 103 | const std::string& get_name() const; 104 | 105 | /** 106 | * Gets the broker-originated error 107 | */ 108 | Error get_error() const; 109 | 110 | /** 111 | * Gets the group state 112 | */ 113 | const std::string& get_state() const; 114 | 115 | /** 116 | * Gets the group protocol type 117 | */ 118 | const std::string& get_protocol_type() const; 119 | 120 | /** 121 | * Gets the group protocol 122 | */ 123 | const std::string& get_protocol() const; 124 | 125 | /** 126 | * Gets the group members 127 | */ 128 | const std::vector& get_members() const; 129 | private: 130 | BrokerMetadata broker_; 131 | std::string name_; 132 | Error error_; 133 | std::string state_; 134 | std::string protocol_type_; 135 | std::string protocol_; 136 | std::vector members_; 137 | }; 138 | 139 | using GroupInformationList = std::vector; 140 | 141 | } // cppkafka 142 | 143 | #endif // CPPKAFKA_GROUP_INFORMATION_H 144 | -------------------------------------------------------------------------------- /include/cppkafka/logging.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_LOGGING_H 31 | #define CPPKAFKA_LOGGING_H 32 | 33 | namespace cppkafka { 34 | 35 | // Based on syslog.h levels 36 | enum class LogLevel : int { 37 | LogEmerg = 0, /* system is unusable */ 38 | LogAlert = 1, /* action must be taken immediately */ 39 | LogCrit = 2, /* critical conditions */ 40 | LogErr = 3, /* error conditions */ 41 | LogWarning = 4, /* warning conditions */ 42 | LogNotice = 5, /* normal but significant condition */ 43 | LogInfo = 6, /* informational */ 44 | LogDebug = 7 /* debug-level messages */ 45 | }; 46 | 47 | } //cppkafka 48 | 49 | #endif //CPPKAFKA_LOGGING_H 50 | -------------------------------------------------------------------------------- /include/cppkafka/macros.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_MACROS_H 31 | #define CPPKAFKA_MACROS_H 32 | 33 | // If cppkafka was built into a shared library 34 | #if defined(_WIN32) && !defined(CPPKAFKA_STATIC) 35 | // Export/import symbols, depending on whether we're compiling or consuming the lib 36 | #ifdef cppkafka_EXPORTS 37 | #define CPPKAFKA_API __declspec(dllexport) 38 | #else 39 | #define CPPKAFKA_API __declspec(dllimport) 40 | #endif // cppkafka_EXPORTS 41 | #else 42 | // Otherwise, default this to an empty macro 43 | #define CPPKAFKA_API 44 | #endif // _WIN32 && !CPPKAFKA_STATIC 45 | 46 | // See: https://github.com/edenhill/librdkafka/issues/1792 47 | #define RD_KAFKA_QUEUE_REFCOUNT_BUG_VERSION 0x000b0500 //v0.11.5.00 48 | #define RD_KAFKA_HEADERS_SUPPORT_VERSION 0x000b0402 //v0.11.4.02 49 | #define RD_KAFKA_ADMIN_API_SUPPORT_VERSION 0x000b0500 //v0.11.5.00 50 | #define RD_KAFKA_MESSAGE_LATENCY_SUPPORT_VERSION 0x000b0000 //v0.11.0.00 51 | #define RD_KAFKA_EVENT_STATS_SUPPORT_VERSION 0x000b0000 //v0.11.0.00 52 | #define RD_KAFKA_MESSAGE_STATUS_SUPPORT_VERSION 0x01000002 //v1.0.0.02 53 | #define RD_KAFKA_STORE_OFFSETS_SUPPORT_VERSION 0x00090501 //v0.9.5.01 54 | #define RD_KAFKA_DESTROY_FLAGS_SUPPORT_VERSION 0x000b0600 //v0.11.6 55 | 56 | #endif // CPPKAFKA_MACROS_H 57 | -------------------------------------------------------------------------------- /include/cppkafka/message_internal.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_MESSAGE_INTERNAL_H 31 | #define CPPKAFKA_MESSAGE_INTERNAL_H 32 | 33 | #include 34 | #include "macros.h" 35 | 36 | namespace cppkafka { 37 | 38 | class Message; 39 | 40 | class Internal { 41 | public: 42 | virtual ~Internal() = default; 43 | }; 44 | using InternalPtr = std::shared_ptr; 45 | 46 | /** 47 | * \brief Private message data structure 48 | */ 49 | class CPPKAFKA_API MessageInternal { 50 | public: 51 | MessageInternal(void* user_data, std::shared_ptr internal); 52 | static std::unique_ptr load(Message& message); 53 | void* get_user_data() const; 54 | InternalPtr get_internal() const; 55 | private: 56 | void* user_data_; 57 | InternalPtr internal_; 58 | }; 59 | 60 | template 61 | class MessageInternalGuard { 62 | public: 63 | MessageInternalGuard(BuilderType& builder) 64 | : builder_(builder), 65 | user_data_(builder.user_data()) { 66 | if (builder_.internal()) { 67 | // Swap contents with user_data 68 | ptr_.reset(new MessageInternal(user_data_, builder_.internal())); 69 | builder_.user_data(ptr_.get()); //overwrite user data 70 | } 71 | } 72 | ~MessageInternalGuard() { 73 | //Restore user data 74 | builder_.user_data(user_data_); 75 | } 76 | void release() { 77 | ptr_.release(); 78 | } 79 | private: 80 | BuilderType& builder_; 81 | std::unique_ptr ptr_; 82 | void* user_data_; 83 | }; 84 | 85 | } 86 | 87 | #endif //CPPKAFKA_MESSAGE_INTERNAL_H 88 | -------------------------------------------------------------------------------- /include/cppkafka/message_timestamp.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_MESSAGE_TIMESTAMP_H 31 | #define CPPKAFKA_MESSAGE_TIMESTAMP_H 32 | 33 | #include 34 | #include 35 | #include "macros.h" 36 | 37 | namespace cppkafka { 38 | 39 | /** 40 | * Represents a message's timestamp 41 | */ 42 | class CPPKAFKA_API MessageTimestamp { 43 | friend class Message; 44 | public: 45 | /** 46 | * The timestamp type 47 | */ 48 | enum TimestampType { 49 | CREATE_TIME = RD_KAFKA_TIMESTAMP_CREATE_TIME, 50 | LOG_APPEND_TIME = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME 51 | }; 52 | 53 | /** 54 | * Gets the timestamp value. If the timestamp was created with a 'time_point', 55 | * the duration represents the number of milliseconds since epoch. 56 | */ 57 | std::chrono::milliseconds get_timestamp() const; 58 | 59 | /** 60 | * Gets the timestamp type 61 | */ 62 | TimestampType get_type() const; 63 | private: 64 | MessageTimestamp(std::chrono::milliseconds timestamp, TimestampType type); 65 | 66 | std::chrono::milliseconds timestamp_; 67 | TimestampType type_; 68 | }; 69 | 70 | } // cppkafka 71 | 72 | #endif //CPPKAFKA_MESSAGE_TIMESTAMP_H 73 | -------------------------------------------------------------------------------- /include/cppkafka/topic.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_TOPIC_H 31 | #define CPPKAFKA_TOPIC_H 32 | 33 | #include 34 | #include 35 | #include 36 | #include "macros.h" 37 | 38 | namespace cppkafka { 39 | 40 | /** 41 | * \brief Represents a rdkafka topic 42 | * 43 | * This is a simple wrapper over a rd_kafka_topic_t* 44 | */ 45 | class CPPKAFKA_API Topic { 46 | public: 47 | /** 48 | * \brief Creates a Topic object that doesn't take ownership of the handle 49 | * 50 | * \param handle The handle to be used 51 | */ 52 | static Topic make_non_owning(rd_kafka_topic_t* handle); 53 | 54 | /** 55 | * \brief Constructs an empty topic 56 | * 57 | * Note that using any methods except Topic::get_handle on an empty topic is undefined 58 | * behavior 59 | */ 60 | Topic(); 61 | 62 | /** 63 | * \brief Constructs a topic using a handle 64 | * 65 | * This will take ownership of the handle 66 | * 67 | * \param handle The handle to be used 68 | */ 69 | Topic(rd_kafka_topic_t* handle); 70 | 71 | /** 72 | * Returns the topic name 73 | */ 74 | std::string get_name() const; 75 | 76 | /** 77 | * \brief Check if the partition is available 78 | * 79 | * This translates into a call to rd_kafka_topic_partition_available 80 | * 81 | * \param partition The partition to check 82 | */ 83 | bool is_partition_available(int partition) const; 84 | 85 | /** 86 | * Indicates whether this topic is valid (not null) 87 | */ 88 | explicit operator bool() const { 89 | return handle_ != nullptr; 90 | } 91 | 92 | /** 93 | * Returns the rdkakfa handle 94 | */ 95 | rd_kafka_topic_t* get_handle() const; 96 | private: 97 | using HandlePtr = std::unique_ptr; 98 | 99 | struct NonOwningTag { }; 100 | 101 | Topic(rd_kafka_topic_t* handle, NonOwningTag); 102 | 103 | HandlePtr handle_; 104 | }; 105 | 106 | } // cppkafka 107 | 108 | #endif // CPPKAFKA_TOPIC_H 109 | -------------------------------------------------------------------------------- /include/cppkafka/topic_configuration.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_TOPIC_CONFIGURATION_H 31 | #define CPPKAFKA_TOPIC_CONFIGURATION_H 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include "clonable_ptr.h" 38 | #include "configuration_base.h" 39 | #include "macros.h" 40 | 41 | namespace cppkafka { 42 | 43 | class Topic; 44 | class Buffer; 45 | 46 | /** 47 | * \brief Represents the topic configuration 48 | * 49 | * ConfigurationBase provides some extra overloads for set 50 | */ 51 | class CPPKAFKA_API TopicConfiguration : public ConfigurationBase { 52 | public: 53 | /** 54 | * \brief Partitioner callback 55 | * 56 | * This has the same requirements as rdkafka's partitioner calback: 57 | * - *Must not* call any rd_kafka_*() functions except: 58 | * rd_kafka_topic_partition_available(). This is done via Topic::is_partition_available 59 | * - *Must not* block or execute for prolonged periods of time. 60 | * - *Must* return a value between 0 and partition_count-1, or the 61 | * special RD_KAFKA_PARTITION_UA value if partitioning 62 | * could not be performed. 63 | */ 64 | using PartitionerCallback = std::function; 66 | 67 | using ConfigurationBase::set; 68 | using ConfigurationBase::get; 69 | 70 | /** 71 | * Default constructs a topic configuration object 72 | */ 73 | TopicConfiguration(); 74 | 75 | /** 76 | * Constructs a TopicConfiguration object using a list of options 77 | */ 78 | TopicConfiguration(const std::vector& options); 79 | 80 | /** 81 | * Constructs a TopicConfiguration object using a list of options 82 | */ 83 | TopicConfiguration(const std::initializer_list& options); 84 | 85 | /** 86 | * Sets an option 87 | * 88 | * \param name The name of the option 89 | * \param value The value of the option 90 | */ 91 | TopicConfiguration& set(const std::string& name, const std::string& value); 92 | 93 | /** 94 | * \brief Sets the partitioner callback 95 | * 96 | * This translates into a call to rd_kafka_topic_conf_set_partitioner_cb 97 | */ 98 | TopicConfiguration& set_partitioner_callback(PartitionerCallback callback); 99 | 100 | /** 101 | * \brief Sets the "this" pointer as the opaque pointer for this handle 102 | * 103 | * This method will be called by consumers/producers when the topic configuration object 104 | * has been put in a persistent memory location. Users of cppkafka do not need to use this. 105 | */ 106 | TopicConfiguration& set_as_opaque(); 107 | 108 | /** 109 | * Gets the partitioner callback 110 | */ 111 | const PartitionerCallback& get_partitioner_callback() const; 112 | 113 | /** 114 | * Returns true iff the given property name has been set 115 | */ 116 | bool has_property(const std::string& name) const; 117 | 118 | /** 119 | * Gets an option's value 120 | * 121 | * \param name The option's name 122 | */ 123 | std::string get(const std::string& name) const; 124 | 125 | /** 126 | * Gets all options, including default values which are set by rdkafka 127 | */ 128 | std::map get_all() const; 129 | 130 | /** 131 | * Gets the rdkafka handle 132 | */ 133 | rd_kafka_topic_conf_t* get_handle() const; 134 | private: 135 | using HandlePtr = ClonablePtr; 138 | 139 | TopicConfiguration(rd_kafka_topic_conf_t* ptr); 140 | static HandlePtr make_handle(rd_kafka_topic_conf_t* ptr); 141 | 142 | HandlePtr handle_; 143 | PartitionerCallback partitioner_callback_; 144 | }; 145 | 146 | } // cppkafka 147 | 148 | #endif // CPPKAFKA_TOPIC_CONFIGURATION_H 149 | -------------------------------------------------------------------------------- /include/cppkafka/topic_partition.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_TOPIC_PARTITION_H 31 | #define CPPKAFKA_TOPIC_PARTITION_H 32 | 33 | #include 34 | #include 35 | #include 36 | #include "macros.h" 37 | 38 | namespace cppkafka { 39 | 40 | /** 41 | * Represents a topic/partition 42 | */ 43 | class CPPKAFKA_API TopicPartition { 44 | public: 45 | /** 46 | * Special offsets enum 47 | */ 48 | enum Offset { 49 | OFFSET_BEGINNING = -2, 50 | OFFSET_END = -1, 51 | OFFSET_STORED = -1000, 52 | OFFSET_INVALID = -1001 53 | }; 54 | 55 | /** 56 | * Default constructs a topic/partition 57 | */ 58 | TopicPartition(); 59 | 60 | /** 61 | * \brief Constructs a topic/partition 62 | * 63 | * The partition value will be RD_KAFKA_OFFSET_INVALID 64 | * 65 | * \param topic The topic name 66 | */ 67 | TopicPartition(const char* topic); 68 | 69 | /** 70 | * \brief Constructs a topic/partition 71 | * 72 | * The partition value will be RD_KAFKA_OFFSET_INVALID 73 | * 74 | * \param topic The topic name 75 | */ 76 | TopicPartition(std::string topic); 77 | 78 | /** 79 | * Constructs a topic/partition 80 | * 81 | * \param topic The topic name 82 | * \param partition The partition to be used 83 | */ 84 | TopicPartition(std::string topic, int partition); 85 | 86 | /** 87 | * Constructs a topic/partition 88 | * 89 | * \param topic The topic name 90 | * \param partition The partition to be used 91 | * \param offset The offset to be used 92 | */ 93 | TopicPartition(std::string topic, int partition, int64_t offset); 94 | 95 | /** 96 | * Gets the topic name 97 | */ 98 | const std::string& get_topic() const; 99 | 100 | /** 101 | * Gets the partition 102 | */ 103 | int get_partition() const; 104 | 105 | /** 106 | * Gets the offset 107 | */ 108 | int64_t get_offset() const; 109 | 110 | /** 111 | * @brief Sets the partition 112 | */ 113 | void set_partition(int partition); 114 | 115 | /** 116 | * Sets the offset 117 | */ 118 | void set_offset(int64_t offset); 119 | 120 | /** 121 | * Compare the (topic, partition) for less-than equality 122 | */ 123 | bool operator<(const TopicPartition& rhs) const; 124 | 125 | /** 126 | * Compare the (topic, partition) for equality 127 | */ 128 | bool operator==(const TopicPartition& rhs) const; 129 | 130 | /** 131 | * Compare the (topic, partition) for in-equality 132 | */ 133 | bool operator!=(const TopicPartition& rhs) const; 134 | 135 | /** 136 | * Print to a stream 137 | */ 138 | CPPKAFKA_API friend std::ostream& operator<<(std::ostream& output, const TopicPartition& rhs); 139 | private: 140 | std::string topic_; 141 | int partition_; 142 | int64_t offset_; 143 | }; 144 | 145 | } // cppkafka 146 | 147 | #endif // CPPKAFKA_TOPIC_PARTITION_H 148 | -------------------------------------------------------------------------------- /include/cppkafka/topic_partition_list.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_TOPIC_PARTITION_LIST_H 31 | #define CPPKAFKA_TOPIC_PARTITION_LIST_H 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include "macros.h" 40 | 41 | namespace cppkafka { 42 | 43 | class TopicPartition; 44 | class PartitionMetadata; 45 | 46 | using TopicPartitionsListPtr = std::unique_ptr; 48 | /** 49 | * A topic partition list 50 | */ 51 | using TopicPartitionList = std::vector; 52 | 53 | // Conversions between rdkafka handles and TopicPartitionList 54 | CPPKAFKA_API TopicPartitionsListPtr convert(const TopicPartitionList& topic_partitions); 55 | CPPKAFKA_API TopicPartitionList convert(const TopicPartitionsListPtr& topic_partitions); 56 | CPPKAFKA_API TopicPartitionList convert(rd_kafka_topic_partition_list_t* topic_partitions); 57 | CPPKAFKA_API TopicPartitionList convert(const std::string& topic, 58 | const std::vector& partition_metadata); 59 | CPPKAFKA_API TopicPartitionsListPtr make_handle(rd_kafka_topic_partition_list_t* handle); 60 | 61 | // Extracts a partition list subset belonging to the provided topics (case-insensitive) 62 | CPPKAFKA_API TopicPartitionList find_matches(const TopicPartitionList& partitions, 63 | const std::set& topics); 64 | 65 | // Extracts a partition list subset belonging to the provided partition ids 66 | // Note: this assumes that all topic partitions in the original list belong to the same topic 67 | // otherwise the partition ids may not be unique 68 | CPPKAFKA_API TopicPartitionList find_matches(const TopicPartitionList& partitions, 69 | const std::set& ids); 70 | 71 | CPPKAFKA_API std::ostream& operator<<(std::ostream& output, const TopicPartitionList& rhs); 72 | 73 | } // cppkafka 74 | 75 | #endif // CPPKAFKA_TOPIC_PARTITION_LIST_H 76 | -------------------------------------------------------------------------------- /include/cppkafka/utils/backoff_performer.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_BACKOFF_PERFORMER_H 31 | #define CPPKAFKA_BACKOFF_PERFORMER_H 32 | 33 | #include 34 | #include 35 | #include 36 | #include "../consumer.h" 37 | #include "../exceptions.h" 38 | 39 | namespace cppkafka { 40 | 41 | /** 42 | * 43 | */ 44 | class CPPKAFKA_API BackoffPerformer { 45 | public: 46 | using TimeUnit = std::chrono::milliseconds; 47 | static const TimeUnit DEFAULT_INITIAL_BACKOFF; 48 | static const TimeUnit DEFAULT_BACKOFF_STEP; 49 | static const TimeUnit DEFAULT_MAXIMUM_BACKOFF; 50 | static const size_t DEFAULT_MAXIMUM_RETRIES; 51 | 52 | /** 53 | * The backoff policy to use 54 | */ 55 | enum class BackoffPolicy { 56 | LINEAR, 57 | EXPONENTIAL 58 | }; 59 | 60 | /** 61 | * Constructs an instance of backoff performer 62 | * 63 | * By default, the linear backoff policy is used 64 | */ 65 | BackoffPerformer(); 66 | 67 | /** 68 | * \brief Sets the backoff policy 69 | * 70 | * \param policy The backoff policy to be used 71 | */ 72 | void set_backoff_policy(BackoffPolicy policy); 73 | 74 | /** 75 | * \brief Sets the initial backoff 76 | * 77 | * The first time a commit fails, this will be the delay between the request is sent 78 | * and we re-try doing so 79 | * 80 | * \param value The value to be used 81 | */ 82 | void set_initial_backoff(TimeUnit value); 83 | 84 | /** 85 | * \brief Sets the backoff step 86 | * 87 | * When using the linear backoff policy, this will be the delay between sending a request 88 | * that fails and re-trying it 89 | * 90 | * \param value The value to be used 91 | */ 92 | void set_backoff_step(TimeUnit value); 93 | 94 | /** 95 | * \brief Sets the maximum backoff 96 | * 97 | * The backoff used will never be larger than this number 98 | * 99 | * \param value The value to be used 100 | */ 101 | void set_maximum_backoff(TimeUnit value); 102 | 103 | /** 104 | * \brief Sets the maximum number of retries for the commit operation 105 | * 106 | * \param value The number of retries before giving up 107 | * 108 | * \remark Setting value to 0 is equivalent to 1, i.e. it will try at least once 109 | */ 110 | void set_maximum_retries(size_t value); 111 | 112 | /** 113 | * \brief Executes an action and backs off if it fails 114 | * 115 | * This will call the functor and will retry in case it returns false 116 | * 117 | * \param callback The action to be executed 118 | */ 119 | template 120 | void perform(const Functor& callback) { 121 | TimeUnit backoff = initial_backoff_; 122 | size_t retries = maximum_retries_; 123 | while (retries--) { 124 | auto start = std::chrono::steady_clock::now(); 125 | // If the callback returns true, we're done 126 | if (callback()) { 127 | return; //success 128 | } 129 | auto end = std::chrono::steady_clock::now(); 130 | auto time_elapsed = end - start; 131 | // If we still have time left, then sleep 132 | if (time_elapsed < backoff) { 133 | std::this_thread::sleep_for(backoff - time_elapsed); 134 | } 135 | // Increase out backoff depending on the policy being used 136 | backoff = increase_backoff(backoff); 137 | } 138 | // No more retries left or we have a terminal error. 139 | throw ActionTerminatedException("Commit failed: no more retries."); 140 | } 141 | private: 142 | TimeUnit increase_backoff(TimeUnit backoff); 143 | 144 | TimeUnit initial_backoff_; 145 | TimeUnit backoff_step_; 146 | TimeUnit maximum_backoff_; 147 | BackoffPolicy policy_; 148 | size_t maximum_retries_; 149 | }; 150 | 151 | } // cppkafka 152 | 153 | #endif // CPPKAFKA_BACKOFF_PERFORMER_H 154 | -------------------------------------------------------------------------------- /include/cppkafka/utils/poll_interface.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #ifndef CPPKAFKA_POLL_INTERFACE_H 31 | #define CPPKAFKA_POLL_INTERFACE_H 32 | 33 | #include "../consumer.h" 34 | 35 | namespace cppkafka { 36 | 37 | /** 38 | * \interface PollInterface 39 | * 40 | * \brief Interface defining polling methods for the Consumer class 41 | */ 42 | struct PollInterface { 43 | virtual ~PollInterface() = default; 44 | 45 | /** 46 | * \brief Get the underlying consumer controlled by this strategy 47 | * 48 | * \return A reference to the consumer instance 49 | */ 50 | virtual Consumer& get_consumer() = 0; 51 | 52 | /** 53 | * \brief Sets the timeout for polling functions 54 | * 55 | * This calls Consumer::set_timeout 56 | * 57 | * \param timeout The timeout to be set 58 | */ 59 | virtual void set_timeout(std::chrono::milliseconds timeout) = 0; 60 | 61 | /** 62 | * \brief Gets the timeout for polling functions 63 | * 64 | * This calls Consumer::get_timeout 65 | * 66 | * \return The timeout 67 | */ 68 | virtual std::chrono::milliseconds get_timeout() = 0; 69 | 70 | /** 71 | * \brief Polls all assigned partitions for new messages in round-robin fashion 72 | * 73 | * Each call to poll() will first consume from the global event queue and if there are 74 | * no pending events, will attempt to consume from all partitions until a valid message is found. 75 | * The timeout used on this call will be the one configured via PollInterface::set_timeout. 76 | * 77 | * \return A message. The returned message *might* be empty. It's necessary to check 78 | * that it's a valid one before using it (see example above). 79 | * 80 | * \remark You need to call poll() or poll_batch() periodically as a keep alive mechanism, 81 | * otherwise the broker will think this consumer is down and will trigger a rebalance 82 | * (if using dynamic subscription) 83 | */ 84 | virtual Message poll() = 0; 85 | 86 | /** 87 | * \brief Polls for new messages 88 | * 89 | * Same as the other overload of PollInterface::poll but the provided 90 | * timeout will be used instead of the one configured on this Consumer. 91 | * 92 | * \param timeout The timeout to be used on this call 93 | */ 94 | virtual Message poll(std::chrono::milliseconds timeout) = 0; 95 | 96 | /** 97 | * \brief Polls all assigned partitions for a batch of new messages in round-robin fashion 98 | * 99 | * Each call to poll_batch() will first attempt to consume from the global event queue 100 | * and if the maximum batch number has not yet been filled, will attempt to fill it by 101 | * reading the remaining messages from each partition. 102 | * 103 | * \param max_batch_size The maximum amount of messages expected 104 | * 105 | * \return A list of messages 106 | * 107 | * \remark You need to call poll() or poll_batch() periodically as a keep alive mechanism, 108 | * otherwise the broker will think this consumer is down and will trigger a rebalance 109 | * (if using dynamic subscription) 110 | */ 111 | virtual std::vector poll_batch(size_t max_batch_size) = 0; 112 | 113 | /** 114 | * \brief Polls all assigned partitions for a batch of new messages in round-robin fashion 115 | * 116 | * Same as the other overload of PollInterface::poll_batch but the provided 117 | * timeout will be used instead of the one configured on this Consumer. 118 | * 119 | * \param max_batch_size The maximum amount of messages expected 120 | * 121 | * \param timeout The timeout for this operation 122 | * 123 | * \return A list of messages 124 | */ 125 | virtual std::vector poll_batch(size_t max_batch_size, std::chrono::milliseconds timeout) = 0; 126 | }; 127 | 128 | } //cppkafka 129 | 130 | #endif //CPPKAFKA_POLL_INTERFACE_H 131 | -------------------------------------------------------------------------------- /src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(SOURCES 2 | configuration.cpp 3 | topic_configuration.cpp 4 | configuration_option.cpp 5 | exceptions.cpp 6 | topic.cpp 7 | buffer.cpp 8 | queue.cpp 9 | message.cpp 10 | message_timestamp.cpp 11 | message_internal.cpp 12 | topic_partition.cpp 13 | topic_partition_list.cpp 14 | metadata.cpp 15 | group_information.cpp 16 | error.cpp 17 | event.cpp 18 | 19 | kafka_handle_base.cpp 20 | producer.cpp 21 | consumer.cpp 22 | 23 | utils/backoff_performer.cpp 24 | utils/backoff_committer.cpp 25 | utils/poll_strategy_base.cpp 26 | utils/roundrobin_poll_strategy.cpp 27 | ) 28 | 29 | set(TARGET_NAME cppkafka) 30 | set(PKG_DIR "${CMAKE_BINARY_DIR}/package") 31 | set(PKG_CONFIG_FILE "${PKG_DIR}/${TARGET_NAME}.pc") 32 | set(CONFIG_FILE "${PKG_DIR}/${PROJECT_NAME}Config.cmake") 33 | set(VERSION_FILE "${PKG_DIR}/${PROJECT_NAME}ConfigVersion.cmake") 34 | set(FIND_RDKAFKA_FILE "${PROJECT_SOURCE_DIR}/cmake/FindRdKafka.cmake") 35 | set(NAMESPACE "${PROJECT_NAME}::") 36 | set(TARGET_EXPORT_NAME ${PROJECT_NAME}Targets) 37 | 38 | add_library(${TARGET_NAME} ${CPPKAFKA_LIBRARY_TYPE} ${SOURCES}) 39 | IF(MSVC) 40 | target_compile_definitions(${TARGET_NAME} PUBLIC NOMINMAX) 41 | ENDIF() 42 | target_compile_features(${TARGET_NAME} PUBLIC cxx_std_11) 43 | target_include_directories(${TARGET_NAME} PUBLIC $) 44 | set_target_properties(${TARGET_NAME} PROPERTIES 45 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_INSTALL_LIBDIR}" 46 | ARCHIVE_OUTPUT_NAME "${TARGET_NAME}" 47 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_INSTALL_LIBDIR}" 48 | LIBRARY_OUTPUT_NAME "${TARGET_NAME}" 49 | INSTALL_RPATH "${CMAKE_INSTALL_LIBDIR}" 50 | INSTALL_RPATH_USE_LINK_PATH TRUE 51 | VERSION ${CPPKAFKA_VERSION} 52 | SOVERSION ${CPPKAFKA_VERSION}) 53 | # In CMake >= 3.15 Boost::boost == Boost::headers 54 | target_link_libraries(${TARGET_NAME} PUBLIC RdKafka::rdkafka Boost::boost) 55 | if (WIN32) 56 | # On windows ntohs and related are in ws2_32 57 | target_link_libraries(${TARGET_NAME} PUBLIC ws2_32.lib) 58 | endif() 59 | 60 | # Install cppkafka target and specify all properties needed for the exported file 61 | install( 62 | TARGETS ${TARGET_NAME} 63 | EXPORT ${TARGET_EXPORT_NAME} 64 | COMPONENT binaries 65 | LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" 66 | ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" 67 | RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" 68 | INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" 69 | ) 70 | 71 | if (CPPKAFKA_EXPORT_PKGCONFIG) 72 | # Generate and install pkgconfig file 73 | configure_file(${PROJECT_SOURCE_DIR}/cmake/cppkafka.pc.in ${PKG_CONFIG_FILE} @ONLY) 74 | 75 | install( 76 | FILES ${PKG_CONFIG_FILE} 77 | DESTINATION "${CPPKAFKA_PKGCONFIG_DIR}" 78 | COMPONENT pkgconfig 79 | ) 80 | endif() 81 | 82 | if (CPPKAFKA_EXPORT_CMAKE_CONFIG) 83 | # Install the exported file 84 | install( 85 | EXPORT "${TARGET_EXPORT_NAME}" 86 | NAMESPACE "${NAMESPACE}" 87 | COMPONENT config 88 | DESTINATION "${CPPKAFKA_CONFIG_DIR}" 89 | ) 90 | 91 | # Generate CMAKE configuration file and exported targets 92 | configure_package_config_file( 93 | "${PROJECT_SOURCE_DIR}/cmake/config.cmake.in" 94 | "${CONFIG_FILE}" 95 | INSTALL_DESTINATION "${CPPKAFKA_CONFIG_DIR}" 96 | PATH_VARS RDKAFKA_MIN_VERSION_HEX CMAKE_INSTALL_PREFIX CMAKE_INSTALL_INCLUDEDIR CMAKE_INSTALL_LIBDIR 97 | ) 98 | 99 | # Generate version file 100 | write_basic_package_version_file( 101 | "${VERSION_FILE}" 102 | VERSION ${CPPKAFKA_VERSION} 103 | COMPATIBILITY AnyNewerVersion 104 | ) 105 | 106 | install( 107 | FILES "${CONFIG_FILE}" "${VERSION_FILE}" "${FIND_RDKAFKA_FILE}" 108 | DESTINATION "${CPPKAFKA_CONFIG_DIR}" 109 | COMPONENT config 110 | ) 111 | endif() 112 | -------------------------------------------------------------------------------- /src/buffer.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include 31 | #include 32 | #include 33 | #include "buffer.h" 34 | 35 | using std::string; 36 | using std::equal; 37 | using std::lexicographical_compare; 38 | using std::ostream; 39 | using std::hex; 40 | using std::dec; 41 | 42 | namespace cppkafka { 43 | 44 | Buffer::Buffer() 45 | : data_(nullptr), size_(0) { 46 | 47 | } 48 | 49 | Buffer::Buffer(const string& data) 50 | : Buffer(data.data(), data.size()) { 51 | 52 | } 53 | 54 | const Buffer::DataType* Buffer::get_data() const { 55 | return data_; 56 | } 57 | 58 | size_t Buffer::get_size() const { 59 | return size_; 60 | } 61 | 62 | Buffer::const_iterator Buffer::begin() const { 63 | return data_; 64 | } 65 | 66 | Buffer::const_iterator Buffer::end() const { 67 | return data_ + size_; 68 | } 69 | 70 | Buffer::operator bool() const { 71 | return size_ != 0; 72 | } 73 | 74 | Buffer::operator string() const { 75 | return string(data_, data_ + size_); 76 | } 77 | 78 | ostream& operator<<(ostream& output, const Buffer& rhs) { 79 | for (const uint8_t value : rhs) { 80 | if (value >= 0x20 && value < 0x7f) { 81 | output << value; 82 | } 83 | else { 84 | output << "\\x"; 85 | if (value < 16) { 86 | output << '0'; 87 | } 88 | output << hex << static_cast(value) << dec; 89 | } 90 | } 91 | return output; 92 | } 93 | 94 | bool operator==(const Buffer& lhs, const Buffer& rhs) { 95 | if (lhs.get_size() != rhs.get_size()) { 96 | return false; 97 | } 98 | return equal(lhs.get_data(), lhs.get_data() + lhs.get_size(), rhs.get_data()); 99 | } 100 | 101 | bool operator!=(const Buffer& lhs, const Buffer& rhs) { 102 | return !(lhs == rhs); 103 | } 104 | 105 | bool operator<(const Buffer& lhs, const Buffer& rhs) { 106 | return lexicographical_compare(lhs.get_data(), lhs.get_data() + lhs.get_size(), 107 | rhs.get_data(), rhs.get_data() + rhs.get_size()); 108 | } 109 | 110 | bool operator>(const Buffer& lhs, const Buffer& rhs) { 111 | return lexicographical_compare(rhs.get_data(), rhs.get_data() + rhs.get_size(), 112 | lhs.get_data(), lhs.get_data() + lhs.get_size()); 113 | } 114 | 115 | bool operator<=(const Buffer& lhs, const Buffer& rhs) { 116 | return !(lhs > rhs); 117 | } 118 | 119 | bool operator>=(const Buffer& lhs, const Buffer& rhs) { 120 | return !(lhs < rhs); 121 | } 122 | 123 | } // cppkafka 124 | -------------------------------------------------------------------------------- /src/configuration_option.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include "configuration_option.h" 31 | 32 | using std::string; 33 | 34 | namespace cppkafka { 35 | 36 | ConfigurationOption::ConfigurationOption(const string& key, const string& value) 37 | : key_(key), value_(value) { 38 | 39 | } 40 | 41 | ConfigurationOption::ConfigurationOption(const string& key, const char* value) 42 | : key_(key), value_(value) { 43 | 44 | } 45 | 46 | ConfigurationOption::ConfigurationOption(const string& key, bool value) 47 | : key_(key), value_(value ? "true" : "false") { 48 | 49 | } 50 | 51 | const string& ConfigurationOption::get_key() const { 52 | return key_; 53 | } 54 | 55 | const string& ConfigurationOption::get_value() const { 56 | return value_; 57 | } 58 | 59 | } // cppkafka 60 | -------------------------------------------------------------------------------- /src/error.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include 31 | #include "error.h" 32 | 33 | using std::string; 34 | using std::ostream; 35 | 36 | namespace cppkafka { 37 | 38 | Error::Error(rd_kafka_resp_err_t error) 39 | : error_(error) { 40 | 41 | } 42 | 43 | rd_kafka_resp_err_t Error::get_error() const { 44 | return error_; 45 | } 46 | 47 | string Error::to_string() const { 48 | return rd_kafka_err2str(error_); 49 | } 50 | 51 | Error::operator bool() const { 52 | return error_ != RD_KAFKA_RESP_ERR_NO_ERROR; 53 | } 54 | 55 | bool Error::operator==(const Error& rhs) const { 56 | return error_ == rhs.error_; 57 | } 58 | 59 | bool Error::operator!=(const Error& rhs) const { 60 | return !(*this == rhs); 61 | } 62 | 63 | ostream& operator<<(ostream& output, const Error& rhs) { 64 | return output << rhs.to_string(); 65 | } 66 | 67 | } // cppkafka 68 | -------------------------------------------------------------------------------- /src/event.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include "event.h" 31 | 32 | using std::allocator; 33 | using std::string; 34 | using std::unique_ptr; 35 | using std::vector; 36 | 37 | namespace cppkafka { 38 | 39 | Event::Event(rd_kafka_event_t* handle) 40 | : handle_(handle, &rd_kafka_event_destroy) { 41 | 42 | } 43 | 44 | string Event::get_name() const { 45 | return rd_kafka_event_name(handle_.get()); 46 | } 47 | 48 | rd_kafka_event_type_t Event::get_type() const { 49 | return rd_kafka_event_type(handle_.get()); 50 | } 51 | 52 | Message Event::get_next_message() const { 53 | // Note: the constness in rd_kafka_event_message_next's return value is not needed and it 54 | // breaks Message's interface. This is dirty but it looks like it should have no side effects. 55 | const auto message = 56 | const_cast(rd_kafka_event_message_next(handle_.get())); 57 | return Message::make_non_owning(message); 58 | } 59 | 60 | vector Event::get_messages() { 61 | return get_messages(allocator()); 62 | } 63 | 64 | size_t Event::get_message_count() const { 65 | return rd_kafka_event_message_count(handle_.get()); 66 | } 67 | 68 | Error Event::get_error() const { 69 | return rd_kafka_event_error(handle_.get()); 70 | } 71 | 72 | void* Event::get_opaque() const { 73 | return rd_kafka_event_opaque(handle_.get()); 74 | } 75 | 76 | TopicPartition Event::get_topic_partition() const { 77 | using TopparHandle = unique_ptr; 79 | TopparHandle toppar_handle{rd_kafka_event_topic_partition(handle_.get()), 80 | &rd_kafka_topic_partition_destroy}; 81 | return TopicPartition(toppar_handle->topic, toppar_handle->partition, toppar_handle->offset); 82 | } 83 | 84 | TopicPartitionList Event::get_topic_partition_list() const { 85 | auto toppars_handle = rd_kafka_event_topic_partition_list(handle_.get()); 86 | return convert(toppars_handle); 87 | } 88 | 89 | Event::operator bool() const { 90 | return !!handle_; 91 | } 92 | 93 | } // cppkafka 94 | -------------------------------------------------------------------------------- /src/exceptions.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include "exceptions.h" 31 | 32 | using std::string; 33 | using std::to_string; 34 | 35 | namespace cppkafka { 36 | 37 | // Exception 38 | 39 | Exception::Exception(string message) 40 | : message_(move(message)) { 41 | 42 | } 43 | 44 | const char* Exception::what() const noexcept { 45 | return message_.data(); 46 | } 47 | 48 | // ConfigException 49 | 50 | ConfigException::ConfigException(const string& config_name, const string& error) 51 | : Exception("Failed to set " + config_name + ": " + error) { 52 | 53 | } 54 | 55 | // ConfigOptionNotFound 56 | 57 | ConfigOptionNotFound::ConfigOptionNotFound(const string& config_name) 58 | : Exception(config_name + " not found") { 59 | 60 | } 61 | 62 | // InvalidConfigOptionType 63 | 64 | InvalidConfigOptionType::InvalidConfigOptionType(const string& config_name, const string& type) 65 | : Exception(config_name + " could not be converted to " + type) { 66 | 67 | } 68 | 69 | // ElementNotFound 70 | 71 | ElementNotFound::ElementNotFound(const string& element_type, const string& name) 72 | : Exception("Could not find " + element_type + " for " + name) { 73 | 74 | } 75 | 76 | // ParseException 77 | 78 | ParseException::ParseException(const string& message) 79 | : Exception(message) { 80 | 81 | } 82 | 83 | // UnexpectedVersion 84 | 85 | UnexpectedVersion::UnexpectedVersion(uint32_t version) 86 | : Exception("Unexpected version " + to_string(version)) { 87 | } 88 | 89 | // HandleException 90 | 91 | HandleException::HandleException(Error error) 92 | : Exception(error.to_string()), error_(error) { 93 | 94 | } 95 | 96 | Error HandleException::get_error() const { 97 | return error_; 98 | } 99 | 100 | // ConsumerException 101 | 102 | ConsumerException::ConsumerException(Error error) 103 | : Exception(error.to_string()), error_(error) { 104 | 105 | } 106 | 107 | Error ConsumerException::get_error() const { 108 | return error_; 109 | } 110 | 111 | // QueueException 112 | 113 | QueueException::QueueException(Error error) 114 | : Exception(error.to_string()), error_(error) { 115 | 116 | } 117 | 118 | Error QueueException::get_error() const { 119 | return error_; 120 | } 121 | 122 | // ActionTerminatedException 123 | 124 | ActionTerminatedException::ActionTerminatedException(const string& error) 125 | : Exception(error) { 126 | 127 | } 128 | 129 | } // cppkafka 130 | -------------------------------------------------------------------------------- /src/group_information.cpp: -------------------------------------------------------------------------------- 1 | #include "group_information.h" 2 | #include 3 | #include 4 | #include "topic_partition.h" 5 | #include "exceptions.h" 6 | #include "detail/endianness.h" 7 | 8 | using std::string; 9 | using std::vector; 10 | using std::memcpy; 11 | using std::distance; 12 | 13 | namespace cppkafka { 14 | 15 | // MemberAssignmentInformation 16 | MemberAssignmentInformation::MemberAssignmentInformation(const vector& data) { 17 | const char* error_msg = "Message is malformed"; 18 | // Version + topic list size 19 | if (data.size() < sizeof(uint16_t) + sizeof(uint32_t)) { 20 | throw ParseException(error_msg); 21 | } 22 | const uint8_t* ptr = data.data(); 23 | const uint8_t* end = ptr + data.size(); 24 | memcpy(&version_, ptr, sizeof(version_)); 25 | version_ = be16toh(version_); 26 | ptr += sizeof(version_); 27 | 28 | uint32_t total_topics; 29 | memcpy(&total_topics, ptr, sizeof(total_topics)); 30 | total_topics = be32toh(total_topics); 31 | ptr += sizeof(total_topics); 32 | 33 | for (uint32_t i = 0; i != total_topics; ++i) { 34 | if (ptr + sizeof(uint16_t) > end) { 35 | throw ParseException(error_msg); 36 | } 37 | uint16_t topic_length; 38 | memcpy(&topic_length, ptr, sizeof(topic_length)); 39 | topic_length = be16toh(topic_length); 40 | ptr += sizeof(topic_length); 41 | 42 | // Check for string length + size of partitions list 43 | if (topic_length > distance(ptr, end) + sizeof(uint32_t)) { 44 | throw ParseException(error_msg); 45 | } 46 | string topic_name(ptr, ptr + topic_length); 47 | ptr += topic_length; 48 | 49 | uint32_t total_partitions; 50 | memcpy(&total_partitions, ptr, sizeof(total_partitions)); 51 | total_partitions = be32toh(total_partitions); 52 | ptr += sizeof(total_partitions); 53 | 54 | if (ptr + total_partitions * sizeof(uint32_t) > end) { 55 | throw ParseException(error_msg); 56 | } 57 | for (uint32_t j = 0; j < total_partitions; ++j) { 58 | uint32_t partition; 59 | memcpy(&partition, ptr, sizeof(partition)); 60 | partition = be32toh(partition); 61 | ptr += sizeof(partition); 62 | 63 | topic_partitions_.emplace_back(topic_name, partition); 64 | } 65 | } 66 | } 67 | 68 | uint16_t MemberAssignmentInformation::get_version() const { 69 | return version_; 70 | } 71 | 72 | const TopicPartitionList& MemberAssignmentInformation::get_topic_partitions() const { 73 | return topic_partitions_; 74 | } 75 | 76 | // GroupMemberInformation 77 | 78 | GroupMemberInformation::GroupMemberInformation(const rd_kafka_group_member_info& info) 79 | : member_id_(info.member_id), client_id_(info.client_id), client_host_(info.client_host), 80 | member_metadata_((uint8_t*)info.member_metadata, 81 | (uint8_t*)info.member_metadata + info.member_metadata_size), 82 | member_assignment_((uint8_t*)info.member_assignment, 83 | (uint8_t*)info.member_assignment + info.member_assignment_size) { 84 | 85 | } 86 | 87 | const string& GroupMemberInformation::get_member_id() const { 88 | return member_id_; 89 | } 90 | 91 | const string& GroupMemberInformation::get_client_id() const { 92 | return client_id_; 93 | } 94 | 95 | const string& GroupMemberInformation::get_client_host() const { 96 | return client_host_; 97 | } 98 | 99 | const vector& GroupMemberInformation::get_member_metadata() const { 100 | return member_metadata_; 101 | } 102 | 103 | const vector& GroupMemberInformation::get_member_assignment() const { 104 | return member_assignment_; 105 | } 106 | 107 | // GroupInformation 108 | 109 | GroupInformation::GroupInformation(const rd_kafka_group_info& info) 110 | : broker_(info.broker), name_(info.group), error_(info.err), state_(info.state), 111 | protocol_type_(info.protocol_type), protocol_(info.protocol) { 112 | for (int i = 0; i < info.member_cnt; ++i) { 113 | members_.emplace_back(info.members[i]); 114 | } 115 | } 116 | 117 | const BrokerMetadata& GroupInformation::get_broker() const { 118 | return broker_; 119 | } 120 | 121 | const string& GroupInformation::get_name() const { 122 | return name_; 123 | } 124 | 125 | Error GroupInformation::get_error() const { 126 | return error_; 127 | } 128 | 129 | const string& GroupInformation::get_state() const { 130 | return state_; 131 | } 132 | 133 | const string& GroupInformation::get_protocol_type() const { 134 | return protocol_type_; 135 | } 136 | 137 | const string& GroupInformation::get_protocol() const { 138 | return protocol_; 139 | } 140 | 141 | const vector& GroupInformation::get_members() const { 142 | return members_; 143 | } 144 | 145 | } // cppkafka 146 | -------------------------------------------------------------------------------- /src/message.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include "message.h" 31 | #include "message_internal.h" 32 | 33 | using std::chrono::milliseconds; 34 | 35 | namespace cppkafka { 36 | 37 | void dummy_deleter(rd_kafka_message_t*) { 38 | 39 | } 40 | 41 | Message Message::make_non_owning(rd_kafka_message_t* handle) { 42 | return Message(handle, NonOwningTag()); 43 | } 44 | 45 | Message::Message() 46 | : handle_(nullptr, nullptr), 47 | user_data_(nullptr) { 48 | 49 | } 50 | 51 | Message::Message(rd_kafka_message_t* handle) 52 | : Message(HandlePtr(handle, &rd_kafka_message_destroy)) { 53 | 54 | } 55 | 56 | Message::Message(rd_kafka_message_t* handle, NonOwningTag) 57 | : Message(HandlePtr(handle, &dummy_deleter)) { 58 | 59 | } 60 | 61 | Message::Message(HandlePtr handle) 62 | : handle_(move(handle)), 63 | payload_(handle_ ? Buffer((const Buffer::DataType*)handle_->payload, handle_->len) : Buffer()), 64 | key_(handle_ ? Buffer((const Buffer::DataType*)handle_->key, handle_->key_len) : Buffer()), 65 | user_data_(handle_ ? handle_->_private : nullptr) { 66 | #if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION) 67 | // get the header list if any 68 | if (handle_) { 69 | rd_kafka_headers_t* headers_handle; 70 | Error error = rd_kafka_message_headers(handle_.get(), &headers_handle); 71 | if (!error) { 72 | header_list_ = HeaderListType::make_non_owning(headers_handle); 73 | } 74 | } 75 | #endif 76 | } 77 | 78 | Message& Message::load_internal() { 79 | if (user_data_) { 80 | MessageInternal* mi = static_cast(user_data_); 81 | user_data_ = mi->get_user_data(); 82 | internal_ = mi->get_internal(); 83 | } 84 | return *this; 85 | } 86 | 87 | boost::optional Message::get_timestamp() const { 88 | rd_kafka_timestamp_type_t type = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE; 89 | int64_t timestamp = rd_kafka_message_timestamp(handle_.get(), &type); 90 | if (timestamp == -1 || type == RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) { 91 | return {}; 92 | } 93 | return MessageTimestamp(std::chrono::milliseconds(timestamp), 94 | static_cast(type)); 95 | } 96 | 97 | } // cppkafka 98 | -------------------------------------------------------------------------------- /src/message_internal.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | #include "message_internal.h" 30 | #include "message.h" 31 | #include "message_builder.h" 32 | 33 | namespace cppkafka { 34 | 35 | // MessageInternal 36 | 37 | MessageInternal::MessageInternal(void* user_data, 38 | std::shared_ptr internal) 39 | : user_data_(user_data), 40 | internal_(internal) { 41 | } 42 | 43 | std::unique_ptr MessageInternal::load(Message& message) { 44 | return std::unique_ptr(message.load_internal().get_handle() ? 45 | static_cast(message.get_handle()->_private) : nullptr); 46 | } 47 | 48 | void* MessageInternal::get_user_data() const { 49 | return user_data_; 50 | } 51 | 52 | InternalPtr MessageInternal::get_internal() const { 53 | return internal_; 54 | } 55 | 56 | } 57 | -------------------------------------------------------------------------------- /src/message_timestamp.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include "message_timestamp.h" 31 | 32 | using std::chrono::milliseconds; 33 | 34 | namespace cppkafka { 35 | 36 | MessageTimestamp::MessageTimestamp(milliseconds timestamp, TimestampType type) 37 | : timestamp_(timestamp), 38 | type_(type) { 39 | 40 | } 41 | 42 | milliseconds MessageTimestamp::get_timestamp() const { 43 | return timestamp_; 44 | } 45 | 46 | MessageTimestamp::TimestampType MessageTimestamp::get_type() const { 47 | return type_; 48 | } 49 | 50 | } // cppkafka 51 | 52 | -------------------------------------------------------------------------------- /src/metadata.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include 31 | #include "metadata.h" 32 | #include "error.h" 33 | 34 | using std::string; 35 | using std::vector; 36 | using std::unordered_set; 37 | 38 | namespace cppkafka { 39 | 40 | // PartitionMetadata 41 | 42 | PartitionMetadata::PartitionMetadata(const rd_kafka_metadata_partition& partition) 43 | : id_(partition.id), error_(partition.err), leader_(partition.leader) { 44 | for (int i = 0; i < partition.replica_cnt; ++i) { 45 | replicas_.push_back(partition.replicas[i]); 46 | } 47 | for (int i = 0; i < partition.isr_cnt; ++i) { 48 | isrs_.push_back(partition.isrs[i]); 49 | } 50 | } 51 | 52 | uint32_t PartitionMetadata::get_id() const { 53 | return id_; 54 | } 55 | 56 | Error PartitionMetadata::get_error() const { 57 | return error_; 58 | } 59 | 60 | int32_t PartitionMetadata::get_leader() const { 61 | return leader_; 62 | } 63 | 64 | const vector& PartitionMetadata::get_replicas() const { 65 | return replicas_; 66 | } 67 | 68 | const vector& PartitionMetadata::get_in_sync_replica_brokers() const { 69 | return isrs_; 70 | } 71 | 72 | // TopicMetadata 73 | 74 | TopicMetadata::TopicMetadata(const rd_kafka_metadata_topic& topic) 75 | : name_(topic.topic), error_(topic.err) { 76 | for (int i = 0; i < topic.partition_cnt; ++i) { 77 | partitions_.emplace_back(topic.partitions[i]); 78 | } 79 | } 80 | 81 | const string& TopicMetadata::get_name() const { 82 | return name_; 83 | } 84 | 85 | Error TopicMetadata::get_error() const { 86 | return error_; 87 | } 88 | 89 | const vector& TopicMetadata::get_partitions() const { 90 | return partitions_; 91 | } 92 | 93 | // BrokerMetadata 94 | 95 | BrokerMetadata::BrokerMetadata(const rd_kafka_metadata_broker_t& broker) 96 | : host_(broker.host), id_(broker.id), port_(static_cast(broker.port)) { 97 | 98 | } 99 | 100 | const string& BrokerMetadata::get_host() const { 101 | return host_; 102 | } 103 | 104 | int32_t BrokerMetadata::get_id() const { 105 | return id_; 106 | } 107 | 108 | uint16_t BrokerMetadata::get_port() const { 109 | return port_; 110 | } 111 | 112 | // Metadata 113 | 114 | void dummy_metadata_destroyer(const rd_kafka_metadata_t*) { 115 | 116 | } 117 | 118 | Metadata Metadata::make_non_owning(const rd_kafka_metadata_t* handle) { 119 | return Metadata(handle, NonOwningTag{}); 120 | } 121 | 122 | Metadata::Metadata() 123 | : handle_(nullptr, nullptr) { 124 | 125 | } 126 | 127 | Metadata::Metadata(const rd_kafka_metadata_t* handle) 128 | : handle_(handle, &rd_kafka_metadata_destroy) { 129 | 130 | } 131 | 132 | Metadata::Metadata(const rd_kafka_metadata_t* handle, NonOwningTag) 133 | : handle_(handle, &dummy_metadata_destroyer) { 134 | 135 | } 136 | 137 | vector Metadata::get_brokers() const { 138 | assert(handle_); 139 | vector output; 140 | for (int i = 0; i < handle_->broker_cnt; ++i) { 141 | const rd_kafka_metadata_broker_t& broker = handle_->brokers[i]; 142 | output.emplace_back(broker); 143 | } 144 | return output; 145 | } 146 | 147 | vector Metadata::get_topics() const { 148 | assert(handle_); 149 | vector output; 150 | for (int i = 0; i < handle_->topic_cnt; ++i) { 151 | const rd_kafka_metadata_topic_t& topic = handle_->topics[i]; 152 | output.emplace_back(topic); 153 | } 154 | return output; 155 | } 156 | 157 | vector Metadata::get_topics(const unordered_set& topics) const { 158 | assert(handle_); 159 | vector output; 160 | for (int i = 0; i < handle_->topic_cnt; ++i) { 161 | const rd_kafka_metadata_topic_t& topic = handle_->topics[i]; 162 | if (topics.count(topic.topic)) { 163 | output.emplace_back(topic); 164 | } 165 | } 166 | return output; 167 | } 168 | 169 | vector Metadata::get_topics_prefixed(const string& prefix) const { 170 | assert(handle_); 171 | vector output; 172 | for (int i = 0; i < handle_->topic_cnt; ++i) { 173 | const rd_kafka_metadata_topic_t& topic = handle_->topics[i]; 174 | string topic_name = topic.topic; 175 | if (topic_name.find(prefix) == 0) { 176 | output.emplace_back(topic); 177 | } 178 | } 179 | return output; 180 | } 181 | 182 | 183 | Metadata::operator bool() const { 184 | return handle_ != nullptr; 185 | } 186 | 187 | const rd_kafka_metadata_t* Metadata::get_handle() const { 188 | return handle_.get(); 189 | } 190 | 191 | } // cppkafka 192 | -------------------------------------------------------------------------------- /src/queue.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | #include "queue.h" 30 | #include "exceptions.h" 31 | 32 | using std::vector; 33 | using std::exception; 34 | using std::chrono::milliseconds; 35 | using std::allocator; 36 | 37 | namespace cppkafka { 38 | 39 | void dummy_deleter(rd_kafka_queue_t*) { 40 | 41 | } 42 | 43 | const milliseconds Queue::DEFAULT_TIMEOUT{1000}; 44 | 45 | Queue Queue::make_non_owning(rd_kafka_queue_t* handle) { 46 | return Queue(handle, NonOwningTag{}); 47 | } 48 | 49 | Queue Queue::make_queue(rd_kafka_queue_t* handle) { 50 | if (rd_kafka_version() <= RD_KAFKA_QUEUE_REFCOUNT_BUG_VERSION) { 51 | return Queue::make_non_owning(handle); 52 | } 53 | else { 54 | return Queue(handle); 55 | } 56 | } 57 | 58 | Queue::Queue() 59 | : handle_(nullptr, nullptr), 60 | timeout_ms_(DEFAULT_TIMEOUT) { 61 | 62 | } 63 | 64 | Queue::Queue(rd_kafka_queue_t* handle) 65 | : handle_(handle, &rd_kafka_queue_destroy), 66 | timeout_ms_(DEFAULT_TIMEOUT) { 67 | 68 | } 69 | 70 | Queue::Queue(rd_kafka_queue_t* handle, NonOwningTag) 71 | : handle_(handle, &dummy_deleter) { 72 | 73 | } 74 | 75 | rd_kafka_queue_t* Queue::get_handle() const { 76 | return handle_.get(); 77 | } 78 | 79 | size_t Queue::get_length() const { 80 | return rd_kafka_queue_length(handle_.get()); 81 | } 82 | 83 | void Queue::forward_to_queue(const Queue& forward_queue) const { 84 | return rd_kafka_queue_forward(handle_.get(), forward_queue.handle_.get()); 85 | } 86 | 87 | void Queue::disable_queue_forwarding() const { 88 | return rd_kafka_queue_forward(handle_.get(), nullptr); 89 | } 90 | 91 | void Queue::set_timeout(milliseconds timeout) { 92 | timeout_ms_ = timeout; 93 | } 94 | 95 | milliseconds Queue::get_timeout() const { 96 | return timeout_ms_; 97 | } 98 | 99 | Message Queue::consume() const { 100 | return consume(timeout_ms_); 101 | } 102 | 103 | Message Queue::consume(milliseconds timeout) const { 104 | return Message(rd_kafka_consume_queue(handle_.get(), static_cast(timeout.count()))); 105 | } 106 | 107 | vector Queue::consume_batch(size_t max_batch_size) const { 108 | return consume_batch(max_batch_size, timeout_ms_, allocator()); 109 | } 110 | 111 | vector Queue::consume_batch(size_t max_batch_size, milliseconds timeout) const { 112 | return consume_batch(max_batch_size, timeout, allocator()); 113 | } 114 | 115 | Event Queue::next_event() const { 116 | return next_event(timeout_ms_); 117 | } 118 | 119 | Event Queue::next_event(milliseconds timeout) const { 120 | return Event(rd_kafka_queue_poll(handle_.get(), timeout.count())); 121 | } 122 | 123 | } //cppkafka 124 | -------------------------------------------------------------------------------- /src/topic.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include "topic.h" 31 | 32 | using std::move; 33 | using std::string; 34 | 35 | namespace cppkafka { 36 | 37 | void dummy_deleter(rd_kafka_topic_t*) { 38 | 39 | } 40 | 41 | Topic Topic::make_non_owning(rd_kafka_topic_t* handle) { 42 | return Topic(handle, NonOwningTag{}); 43 | } 44 | 45 | Topic::Topic() 46 | : handle_(nullptr, nullptr) { 47 | 48 | } 49 | 50 | Topic::Topic(rd_kafka_topic_t* handle) 51 | : handle_(handle, &rd_kafka_topic_destroy) { 52 | 53 | } 54 | 55 | Topic::Topic(rd_kafka_topic_t* handle, NonOwningTag) 56 | : handle_(handle, &dummy_deleter) { 57 | 58 | } 59 | 60 | string Topic::get_name() const { 61 | return rd_kafka_topic_name(handle_.get()); 62 | } 63 | 64 | bool Topic::is_partition_available(int partition) const { 65 | return rd_kafka_topic_partition_available(handle_.get(), partition) == 1; 66 | } 67 | 68 | rd_kafka_topic_t* Topic::get_handle() const { 69 | return handle_.get(); 70 | } 71 | 72 | } // cppkafka 73 | -------------------------------------------------------------------------------- /src/topic_configuration.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include "topic_configuration.h" 31 | #include 32 | #include 33 | #include "exceptions.h" 34 | #include "topic.h" 35 | #include "buffer.h" 36 | #include "detail/callback_invoker.h" 37 | 38 | using std::string; 39 | using std::map; 40 | using std::vector; 41 | using std::initializer_list; 42 | 43 | namespace cppkafka { 44 | 45 | int32_t partitioner_callback_proxy(const rd_kafka_topic_t* handle, const void *key_ptr, 46 | size_t key_size, int32_t partition_count, 47 | void* topic_opaque, void* message_opaque) { 48 | const TopicConfiguration* config = static_cast(topic_opaque); 49 | const auto& callback = config->get_partitioner_callback(); 50 | if (callback) { 51 | Topic topic = Topic::make_non_owning(const_cast(handle)); 52 | Buffer key(static_cast(key_ptr), key_size); 53 | return CallbackInvoker("topic partitioner", callback, nullptr) 54 | (topic, key, partition_count); 55 | } 56 | else { 57 | return rd_kafka_msg_partitioner_consistent_random(handle, key_ptr, key_size, 58 | partition_count, topic_opaque, 59 | message_opaque); 60 | } 61 | } 62 | 63 | TopicConfiguration::TopicConfiguration() 64 | : handle_(make_handle(rd_kafka_topic_conf_new())) { 65 | 66 | } 67 | 68 | TopicConfiguration::TopicConfiguration(const vector& options) 69 | : TopicConfiguration() { 70 | set(options); 71 | } 72 | 73 | TopicConfiguration::TopicConfiguration(const initializer_list& options) 74 | : TopicConfiguration() { 75 | set(options); 76 | } 77 | 78 | TopicConfiguration::TopicConfiguration(rd_kafka_topic_conf_t* ptr) 79 | : handle_(make_handle(ptr)) { 80 | 81 | } 82 | 83 | TopicConfiguration& TopicConfiguration::set(const string& name, const string& value) { 84 | char error_buffer[512]; 85 | rd_kafka_conf_res_t result; 86 | result = rd_kafka_topic_conf_set(handle_.get(), name.data(), value.data(), error_buffer, 87 | sizeof(error_buffer)); 88 | if (result != RD_KAFKA_CONF_OK) { 89 | throw ConfigException(name, error_buffer); 90 | } 91 | return *this; 92 | } 93 | 94 | TopicConfiguration& TopicConfiguration::set_partitioner_callback(PartitionerCallback callback) { 95 | partitioner_callback_ = move(callback); 96 | rd_kafka_topic_conf_set_partitioner_cb(handle_.get(), &partitioner_callback_proxy); 97 | return *this; 98 | } 99 | 100 | TopicConfiguration& TopicConfiguration::set_as_opaque() { 101 | rd_kafka_topic_conf_set_opaque(handle_.get(), this); 102 | return *this; 103 | } 104 | 105 | const TopicConfiguration::PartitionerCallback& 106 | TopicConfiguration::get_partitioner_callback() const { 107 | return partitioner_callback_; 108 | } 109 | 110 | bool TopicConfiguration::has_property(const string& name) const { 111 | size_t size = 0; 112 | return rd_kafka_topic_conf_get(handle_.get(), name.data(), nullptr, &size) == RD_KAFKA_CONF_OK; 113 | } 114 | 115 | string TopicConfiguration::get(const string& name) const { 116 | size_t size = 0; 117 | auto result = rd_kafka_topic_conf_get(handle_.get(), name.data(), nullptr, &size); 118 | if (result != RD_KAFKA_CONF_OK) { 119 | throw ConfigOptionNotFound(name); 120 | } 121 | vector buffer(size); 122 | rd_kafka_topic_conf_get(handle_.get(), name.data(), buffer.data(), &size); 123 | return string(buffer.data()); 124 | } 125 | 126 | map TopicConfiguration::get_all() const { 127 | size_t count = 0; 128 | const char** all = rd_kafka_topic_conf_dump(handle_.get(), &count); 129 | map output = parse_dump(all, count); 130 | rd_kafka_conf_dump_free(all, count); 131 | return output; 132 | } 133 | 134 | rd_kafka_topic_conf_t* TopicConfiguration::get_handle() const { 135 | return handle_.get(); 136 | } 137 | 138 | TopicConfiguration::HandlePtr TopicConfiguration::make_handle(rd_kafka_topic_conf_t* ptr) { 139 | return HandlePtr(ptr, &rd_kafka_topic_conf_destroy, &rd_kafka_topic_conf_dup); 140 | } 141 | 142 | } // cppkafka 143 | -------------------------------------------------------------------------------- /src/topic_partition.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include 31 | #include 32 | #include 33 | #include "topic_partition.h" 34 | 35 | using std::string; 36 | using std::to_string; 37 | using std::ostream; 38 | using std::tie; 39 | 40 | namespace cppkafka { 41 | 42 | TopicPartition::TopicPartition() 43 | : TopicPartition("") { 44 | 45 | } 46 | 47 | TopicPartition::TopicPartition(const char* topic) 48 | : TopicPartition(string(topic)) { 49 | 50 | } 51 | 52 | TopicPartition::TopicPartition(string topic) 53 | : TopicPartition(move(topic), RD_KAFKA_PARTITION_UA) { 54 | 55 | } 56 | 57 | TopicPartition::TopicPartition(string topic, int partition) 58 | : TopicPartition(move(topic), partition, RD_KAFKA_OFFSET_INVALID) { 59 | 60 | } 61 | 62 | TopicPartition::TopicPartition(string topic, int partition, int64_t offset) 63 | : topic_(move(topic)), partition_(partition), offset_(offset) { 64 | 65 | } 66 | 67 | const string& TopicPartition::get_topic() const { 68 | return topic_; 69 | } 70 | 71 | int TopicPartition::get_partition() const { 72 | return partition_; 73 | } 74 | 75 | int64_t TopicPartition::get_offset() const { 76 | return offset_; 77 | } 78 | 79 | void TopicPartition::set_partition(int partition) { 80 | partition_ = partition; 81 | } 82 | 83 | void TopicPartition::set_offset(int64_t offset) { 84 | offset_ = offset; 85 | } 86 | 87 | bool TopicPartition::operator<(const TopicPartition& rhs) const { 88 | return tie(topic_, partition_) < tie(rhs.topic_, rhs.partition_); 89 | } 90 | 91 | bool TopicPartition::operator==(const TopicPartition& rhs) const { 92 | return tie(topic_, partition_) == tie(rhs.topic_, rhs.partition_); 93 | } 94 | 95 | bool TopicPartition::operator!=(const TopicPartition& rhs) const { 96 | return !(*this == rhs); 97 | } 98 | 99 | ostream& operator<<(ostream& output, const TopicPartition& rhs) { 100 | return output << rhs.get_topic() << "[" 101 | << rhs.get_partition() << ":" 102 | << (rhs.get_offset() == RD_KAFKA_OFFSET_INVALID ? "#" : to_string(rhs.get_offset())) 103 | << "]"; 104 | } 105 | 106 | } // cppkafka 107 | -------------------------------------------------------------------------------- /src/topic_partition_list.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include 31 | #include 32 | #include "topic_partition_list.h" 33 | #include "topic_partition.h" 34 | #include "exceptions.h" 35 | #include "metadata.h" 36 | 37 | using std::vector; 38 | using std::set; 39 | using std::ostream; 40 | using std::string; 41 | using std::equal; 42 | 43 | namespace cppkafka { 44 | 45 | TopicPartitionsListPtr convert(const TopicPartitionList& topic_partitions) { 46 | TopicPartitionsListPtr handle(rd_kafka_topic_partition_list_new(topic_partitions.size()), 47 | &rd_kafka_topic_partition_list_destroy); 48 | for (const auto& item : topic_partitions) { 49 | rd_kafka_topic_partition_t* new_item = rd_kafka_topic_partition_list_add( 50 | handle.get(), 51 | item.get_topic().data(), 52 | item.get_partition() 53 | ); 54 | new_item->offset = item.get_offset(); 55 | } 56 | return handle; 57 | } 58 | 59 | TopicPartitionList convert(const TopicPartitionsListPtr& topic_partitions) { 60 | return convert(topic_partitions.get()); 61 | } 62 | 63 | TopicPartitionList convert(rd_kafka_topic_partition_list_t* topic_partitions) { 64 | TopicPartitionList output; 65 | for (int i = 0; i < topic_partitions->cnt; ++i) { 66 | const auto& elem = topic_partitions->elems[i]; 67 | output.emplace_back(elem.topic, elem.partition, elem.offset); 68 | } 69 | return output; 70 | } 71 | 72 | TopicPartitionList convert(const std::string& topic, 73 | const std::vector& partition_metadata) 74 | { 75 | TopicPartitionList output; 76 | for (const auto& meta : partition_metadata) { 77 | output.emplace_back(topic, meta.get_id()); 78 | } 79 | return output; 80 | } 81 | 82 | TopicPartitionsListPtr make_handle(rd_kafka_topic_partition_list_t* handle) { 83 | return TopicPartitionsListPtr(handle, &rd_kafka_topic_partition_list_destroy); 84 | } 85 | 86 | TopicPartitionList find_matches(const TopicPartitionList& partitions, 87 | const set& topics) { 88 | TopicPartitionList subset; 89 | for (const auto& partition : partitions) { 90 | for (const auto& topic : topics) { 91 | if (topic.size() == partition.get_topic().size()) { 92 | // compare both strings 93 | bool match = equal(topic.begin(), topic.end(), partition.get_topic().begin(), 94 | [](char c1, char c2)->bool { 95 | return toupper(c1) == toupper(c2); 96 | }); 97 | if (match) { 98 | subset.emplace_back(partition); 99 | } 100 | } 101 | } 102 | } 103 | return subset; 104 | } 105 | 106 | TopicPartitionList find_matches(const TopicPartitionList& partitions, 107 | const set& ids) { 108 | TopicPartitionList subset; 109 | for (const auto& partition : partitions) { 110 | if (ids.count(partition.get_partition()) > 0) { 111 | subset.emplace_back(partition); 112 | } 113 | } 114 | return subset; 115 | } 116 | 117 | ostream& operator<<(ostream& output, const TopicPartitionList& rhs) { 118 | output << "[ "; 119 | for (auto iter = rhs.begin(); iter != rhs.end(); ++iter) { 120 | if (iter != rhs.begin()) { 121 | output << ", "; 122 | } 123 | output << *iter; 124 | } 125 | output << " ]"; 126 | return output; 127 | } 128 | 129 | } // cppkafka 130 | -------------------------------------------------------------------------------- /src/utils/backoff_committer.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include 31 | #include "utils/backoff_committer.h" 32 | 33 | using std::min; 34 | 35 | namespace cppkafka { 36 | 37 | BackoffCommitter::BackoffCommitter(Consumer& consumer) 38 | : consumer_(consumer) { 39 | 40 | } 41 | 42 | void BackoffCommitter::set_error_callback(ErrorCallback callback) { 43 | callback_ = move(callback); 44 | } 45 | 46 | void BackoffCommitter::commit() { 47 | perform([&] { 48 | return do_commit(); 49 | }); 50 | } 51 | 52 | void BackoffCommitter::commit(const Message& msg) { 53 | perform([&] { 54 | return do_commit(msg); 55 | }); 56 | } 57 | 58 | void BackoffCommitter::commit(const TopicPartitionList& topic_partitions) { 59 | perform([&] { 60 | return do_commit(topic_partitions); 61 | }); 62 | } 63 | 64 | Consumer& BackoffCommitter::get_consumer() { 65 | return consumer_; 66 | } 67 | 68 | } // cppkafka 69 | -------------------------------------------------------------------------------- /src/utils/backoff_performer.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include 31 | #include 32 | #include "utils/backoff_performer.h" 33 | 34 | using std::min; 35 | using std::numeric_limits; 36 | 37 | namespace cppkafka { 38 | 39 | const BackoffPerformer::TimeUnit BackoffPerformer::DEFAULT_INITIAL_BACKOFF{100}; 40 | const BackoffPerformer::TimeUnit BackoffPerformer::DEFAULT_BACKOFF_STEP{50}; 41 | const BackoffPerformer::TimeUnit BackoffPerformer::DEFAULT_MAXIMUM_BACKOFF{1000}; 42 | const size_t BackoffPerformer::DEFAULT_MAXIMUM_RETRIES{numeric_limits::max()}; 43 | 44 | BackoffPerformer::BackoffPerformer() 45 | : initial_backoff_(DEFAULT_INITIAL_BACKOFF), 46 | backoff_step_(DEFAULT_BACKOFF_STEP), maximum_backoff_(DEFAULT_MAXIMUM_BACKOFF), 47 | policy_(BackoffPolicy::LINEAR), maximum_retries_(DEFAULT_MAXIMUM_RETRIES) { 48 | 49 | } 50 | 51 | void BackoffPerformer::set_backoff_policy(BackoffPolicy policy) { 52 | policy_ = policy; 53 | } 54 | 55 | void BackoffPerformer::set_initial_backoff(TimeUnit value) { 56 | initial_backoff_ = value; 57 | } 58 | 59 | void BackoffPerformer::set_backoff_step(TimeUnit value) { 60 | backoff_step_ = value; 61 | } 62 | 63 | void BackoffPerformer::set_maximum_backoff(TimeUnit value) { 64 | maximum_backoff_ = value; 65 | } 66 | 67 | void BackoffPerformer::set_maximum_retries(size_t value) { 68 | maximum_retries_ = value == 0 ? 1 : value; 69 | } 70 | 71 | BackoffPerformer::TimeUnit BackoffPerformer::increase_backoff(TimeUnit backoff) { 72 | if (policy_ == BackoffPolicy::LINEAR) { 73 | backoff = backoff + backoff_step_; 74 | } 75 | else { 76 | backoff = backoff * 2; 77 | } 78 | return min(backoff, maximum_backoff_); 79 | } 80 | 81 | } // cppkafka 82 | -------------------------------------------------------------------------------- /src/utils/poll_strategy_base.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include "utils/poll_strategy_base.h" 31 | #include "consumer.h" 32 | 33 | using std::chrono::milliseconds; 34 | 35 | namespace cppkafka { 36 | 37 | PollStrategyBase::PollStrategyBase(Consumer& consumer) 38 | : consumer_(consumer), 39 | consumer_queue_(QueueData{consumer.get_consumer_queue(), boost::any()}) { 40 | // get all currently active partition assignments 41 | TopicPartitionList assignment = consumer_.get_assignment(); 42 | on_assignment(assignment); 43 | 44 | // take over the assignment callback 45 | assignment_callback_ = consumer.get_assignment_callback(); 46 | consumer_.set_assignment_callback([this](TopicPartitionList& partitions) { 47 | on_assignment(partitions); 48 | }); 49 | // take over the revocation callback 50 | revocation_callback_ = consumer.get_revocation_callback(); 51 | consumer_.set_revocation_callback([this](const TopicPartitionList& partitions) { 52 | on_revocation(partitions); 53 | }); 54 | // take over the rebalance error callback 55 | rebalance_error_callback_ = consumer.get_rebalance_error_callback(); 56 | consumer_.set_rebalance_error_callback([this](Error error) { 57 | on_rebalance_error(error); 58 | }); 59 | } 60 | 61 | PollStrategyBase::~PollStrategyBase() { 62 | //reset the original callbacks 63 | consumer_.set_assignment_callback(assignment_callback_); 64 | consumer_.set_revocation_callback(revocation_callback_); 65 | consumer_.set_rebalance_error_callback(rebalance_error_callback_); 66 | } 67 | 68 | void PollStrategyBase::set_timeout(milliseconds timeout) { 69 | consumer_.set_timeout(timeout); 70 | } 71 | 72 | milliseconds PollStrategyBase::get_timeout() { 73 | return consumer_.get_timeout(); 74 | } 75 | 76 | Consumer& PollStrategyBase::get_consumer() { 77 | return consumer_; 78 | } 79 | 80 | QueueData& PollStrategyBase::get_consumer_queue() { 81 | return consumer_queue_; 82 | } 83 | 84 | PollStrategyBase::QueueMap& PollStrategyBase::get_partition_queues() { 85 | return partition_queues_; 86 | } 87 | 88 | void PollStrategyBase::reset_state() { 89 | 90 | } 91 | 92 | void PollStrategyBase::assign(TopicPartitionList& partitions) { 93 | // populate partition queues 94 | for (const auto& partition : partitions) { 95 | // get the queue associated with this partition 96 | partition_queues_.emplace(partition, QueueData{consumer_.get_partition_queue(partition), boost::any()}); 97 | } 98 | reset_state(); 99 | } 100 | 101 | void PollStrategyBase::revoke(const TopicPartitionList& partitions) { 102 | for (const auto &partition : partitions) { 103 | partition_queues_.erase(partition); 104 | } 105 | reset_state(); 106 | } 107 | 108 | void PollStrategyBase::revoke() { 109 | partition_queues_.clear(); 110 | reset_state(); 111 | } 112 | 113 | void PollStrategyBase::on_assignment(TopicPartitionList& partitions) { 114 | assign(partitions); 115 | // call original consumer callback if any 116 | if (assignment_callback_) { 117 | assignment_callback_(partitions); 118 | } 119 | } 120 | 121 | void PollStrategyBase::on_revocation(const TopicPartitionList& partitions) { 122 | revoke(partitions); 123 | // call original consumer callback if any 124 | if (revocation_callback_) { 125 | revocation_callback_(partitions); 126 | } 127 | } 128 | 129 | void PollStrategyBase::on_rebalance_error(Error error) { 130 | reset_state(); 131 | // call original consumer callback if any 132 | if (rebalance_error_callback_) { 133 | rebalance_error_callback_(error); 134 | } 135 | } 136 | 137 | } //cppkafka 138 | -------------------------------------------------------------------------------- /src/utils/roundrobin_poll_strategy.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, Matias Fontanini 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * * Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above 12 | * copyright notice, this list of conditions and the following disclaimer 13 | * in the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | * 28 | */ 29 | 30 | #include "utils/roundrobin_poll_strategy.h" 31 | 32 | using std::string; 33 | using std::chrono::milliseconds; 34 | using std::make_move_iterator; 35 | using std::allocator; 36 | 37 | namespace cppkafka { 38 | 39 | RoundRobinPollStrategy::RoundRobinPollStrategy(Consumer& consumer) 40 | : PollStrategyBase(consumer) { 41 | reset_state(); 42 | } 43 | 44 | RoundRobinPollStrategy::~RoundRobinPollStrategy() { 45 | restore_forwarding(); 46 | } 47 | 48 | 49 | Message RoundRobinPollStrategy::poll() { 50 | return poll(get_consumer().get_timeout()); 51 | } 52 | 53 | Message RoundRobinPollStrategy::poll(milliseconds timeout) { 54 | // Always give priority to group and global events 55 | Message message = get_consumer_queue().queue.consume(milliseconds(0)); 56 | if (message) { 57 | return message; 58 | } 59 | size_t num_queues = get_partition_queues().size(); 60 | while (num_queues--) { 61 | //consume the next partition (non-blocking) 62 | message = get_next_queue().queue.consume(milliseconds(0)); 63 | if (message) { 64 | return message; 65 | } 66 | } 67 | // We still don't have a valid message so we block on the event queue 68 | return get_consumer_queue().queue.consume(timeout); 69 | } 70 | 71 | std::vector RoundRobinPollStrategy::poll_batch(size_t max_batch_size) { 72 | return poll_batch(max_batch_size, get_consumer().get_timeout(), allocator()); 73 | } 74 | 75 | std::vector RoundRobinPollStrategy::poll_batch(size_t max_batch_size, 76 | milliseconds timeout) { 77 | return poll_batch(max_batch_size, timeout, allocator()); 78 | } 79 | 80 | void RoundRobinPollStrategy::restore_forwarding() { 81 | // forward all partition queues 82 | for (const auto& toppar : get_partition_queues()) { 83 | toppar.second.queue.forward_to_queue(get_consumer_queue().queue); 84 | } 85 | } 86 | 87 | QueueData& RoundRobinPollStrategy::get_next_queue() { 88 | if (get_partition_queues().empty()) { 89 | throw QueueException(RD_KAFKA_RESP_ERR__STATE); 90 | } 91 | if (++queue_iter_ == get_partition_queues().end()) { 92 | queue_iter_ = get_partition_queues().begin(); 93 | } 94 | return queue_iter_->second; 95 | } 96 | 97 | void RoundRobinPollStrategy::reset_state() { 98 | queue_iter_ = get_partition_queues().begin(); 99 | } 100 | 101 | } //cppkafka 102 | -------------------------------------------------------------------------------- /tests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/) 2 | include_directories(SYSTEM ${CATCH_INCLUDE}) 3 | 4 | if (NOT KAFKA_TEST_INSTANCE) 5 | set(KAFKA_TEST_INSTANCE kafka-vm:9092 6 | CACHE STRING "The kafka instance to which to connect to run tests") 7 | endif() 8 | if (NOT KAFKA_NUM_PARTITIONS) 9 | set(KAFKA_NUM_PARTITIONS 3 CACHE STRING "Kafka Number of partitions") 10 | endif() 11 | if (NOT KAFKA_TOPICS) 12 | set(KAFKA_TOPICS "cppkafka_test1;cppkafka_test2" CACHE STRING "Kafka topics") 13 | endif() 14 | 15 | # Convert list of topics into a C++ initializer list 16 | FOREACH(TOPIC ${KAFKA_TOPICS}) 17 | if (NOT TOPIC_LIST) 18 | set(TOPIC_LIST "\"${TOPIC}\"") 19 | else() 20 | set(TOPIC_LIST "${TOPIC_LIST},\"${TOPIC}\"") 21 | endif() 22 | ENDFOREACH() 23 | 24 | add_custom_target(tests) 25 | 26 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}) 27 | add_definitions( 28 | "-DKAFKA_TEST_INSTANCE=\"${KAFKA_TEST_INSTANCE}\"" 29 | -DKAFKA_NUM_PARTITIONS=${KAFKA_NUM_PARTITIONS} 30 | -DKAFKA_TOPIC_NAMES=${TOPIC_LIST} 31 | ) 32 | 33 | add_executable(cppkafka_tests 34 | buffer_test.cpp 35 | compacted_topic_processor_test.cpp 36 | configuration_test.cpp 37 | topic_partition_list_test.cpp 38 | kafka_handle_base_test.cpp 39 | producer_test.cpp 40 | consumer_test.cpp 41 | roundrobin_poll_test.cpp 42 | headers_test.cpp 43 | test_utils.cpp 44 | 45 | # Main file 46 | test_main.cpp 47 | ) 48 | 49 | # In CMake >= 3.15 Boost::boost == Boost::headers 50 | target_link_libraries(cppkafka_tests cppkafka RdKafka::rdkafka Boost::boost) 51 | add_dependencies(tests cppkafka_tests) 52 | add_test(cppkafka cppkafka_tests) 53 | -------------------------------------------------------------------------------- /tests/buffer_test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "cppkafka/buffer.h" 7 | 8 | using std::string; 9 | using std::vector; 10 | using std::array; 11 | using std::ostringstream; 12 | 13 | using namespace cppkafka; 14 | 15 | TEST_CASE("conversions", "[buffer]") { 16 | const string data = "Hello world!"; 17 | const Buffer buffer(data); 18 | const Buffer empty_buffer; 19 | 20 | SECTION("construction") { 21 | CHECK_THROWS_AS(Buffer((const char*)nullptr, 5), Exception); 22 | } 23 | 24 | SECTION("bool conversion") { 25 | CHECK(!!buffer == true); 26 | CHECK(!!empty_buffer == false); 27 | } 28 | 29 | SECTION("string conversion") { 30 | CHECK(static_cast(buffer) == data); 31 | CHECK(static_cast(empty_buffer).empty()); 32 | } 33 | 34 | SECTION("vector conversion") { 35 | const vector buffer_as_vector = buffer; 36 | CHECK(string(buffer_as_vector.begin(), buffer_as_vector.end()) == data); 37 | } 38 | } 39 | 40 | TEST_CASE("construction", "[buffer]") { 41 | // From string 42 | const string str_data = "Hello world!"; 43 | // From vector 44 | const vector vector_data(str_data.begin(), str_data.end()); 45 | // From array 46 | const array array_data{{'H','e','l','l','o',' ','w','o','r','l','d','!'}}; 47 | // From raw array 48 | const char raw_array[12]{'H','e','l','l','o',' ','w','o','r','l','d','!'}; 49 | 50 | // Build buffers 51 | const Buffer buffer(vector_data); //vector 52 | const Buffer buffer2(vector_data.begin(), vector_data.end()); //iterators 53 | const Buffer buffer3(str_data.data(), str_data.data() + str_data.size()); //char iterators 54 | const Buffer buffer4(array_data); //arrays 55 | const Buffer buffer5(raw_array); //raw arrays 56 | const Buffer buffer6(str_data); //string 57 | const Buffer buffer7(str_data.data(), str_data.size()); //type + size 58 | 59 | // Test 60 | CHECK(str_data == buffer); 61 | CHECK(buffer == buffer2); 62 | CHECK(buffer == buffer3); 63 | CHECK(buffer == buffer4); 64 | CHECK(buffer == buffer5); 65 | CHECK(buffer == buffer6); 66 | CHECK(buffer == buffer7); 67 | } 68 | 69 | 70 | TEST_CASE("comparison", "[buffer]") { 71 | const string data = "Hello world!"; 72 | const Buffer buffer1(data); 73 | const Buffer buffer2(data); 74 | const Buffer empty_buffer; 75 | 76 | SECTION("equality") { 77 | CHECK(buffer1 == buffer2); 78 | CHECK(buffer2 == buffer1); 79 | } 80 | 81 | SECTION("inequality") { 82 | CHECK(buffer1 != empty_buffer); 83 | CHECK(empty_buffer != buffer1); 84 | } 85 | } 86 | 87 | TEST_CASE("stream extraction", "[buffer]") { 88 | const string data = "Hello \x7fwor\x03ld!"; 89 | const string pretty_string = "Hello \\x7fwor\\x03ld!"; 90 | const Buffer buffer(data); 91 | 92 | ostringstream output; 93 | output << buffer; 94 | CHECK(output.str() == pretty_string ); 95 | } 96 | -------------------------------------------------------------------------------- /tests/compacted_topic_processor_test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "cppkafka/utils/buffered_producer.h" 9 | #include "cppkafka/consumer.h" 10 | #include "cppkafka/utils/compacted_topic_processor.h" 11 | #include "test_utils.h" 12 | 13 | using std::string; 14 | using std::to_string; 15 | using std::stoi; 16 | using std::set; 17 | using std::tie; 18 | using std::vector; 19 | using std::map; 20 | using std::move; 21 | using std::thread; 22 | using std::mutex; 23 | using std::unique_lock; 24 | using std::lock_guard; 25 | using std::condition_variable; 26 | 27 | using std::chrono::system_clock; 28 | using std::chrono::seconds; 29 | using std::chrono::milliseconds; 30 | 31 | using namespace cppkafka; 32 | 33 | static Configuration make_producer_config() { 34 | Configuration config; 35 | config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); 36 | return config; 37 | } 38 | 39 | static Configuration make_consumer_config() { 40 | Configuration config; 41 | config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); 42 | config.set("enable.auto.commit", false); 43 | config.set("group.id", "compacted_topic_test"); 44 | return config; 45 | } 46 | 47 | TEST_CASE("consumption", "[consumer][compacted]") { 48 | Consumer consumer(make_consumer_config()); 49 | // We'll use ints as the key, strings as the value 50 | using CompactedConsumer = CompactedTopicProcessor; 51 | using Event = CompactedConsumer::Event; 52 | CompactedConsumer compacted_consumer(consumer); 53 | // Convert the buffer to an int for the key 54 | compacted_consumer.set_key_decoder([](const Buffer& buffer) { 55 | return stoi(buffer); 56 | }); 57 | // We won't use any formats on the value, just convert it to a string 58 | compacted_consumer.set_value_decoder([](int /*key*/, const Buffer& buffer) { 59 | return string(buffer); 60 | }); 61 | 62 | // Every time there's an event, we'll push it into a vector 63 | vector events; 64 | compacted_consumer.set_event_handler([&](const Event& event) { 65 | events.push_back(event); 66 | }); 67 | consumer.subscribe({ KAFKA_TOPICS[0] }); 68 | set eof_partitions; 69 | while (eof_partitions.size() != static_cast(KAFKA_NUM_PARTITIONS)) { 70 | Message msg = consumer.poll(); 71 | if (msg && msg.is_eof()) { 72 | eof_partitions.insert(msg.get_partition()); 73 | } 74 | } 75 | 76 | BufferedProducer producer(make_producer_config()); 77 | 78 | struct ElementType { 79 | string value; 80 | int partition; 81 | }; 82 | map elements = { 83 | {"42", {"hi there", 0}}, 84 | {"1337", {"heh", 1}} 85 | }; 86 | for (const auto& element_pair : elements) { 87 | const ElementType& element = element_pair.second; 88 | MessageBuilder builder(KAFKA_TOPICS[0]); 89 | builder.partition(element.partition).key(element_pair.first).payload(element.value); 90 | producer.produce(builder); 91 | } 92 | // Now erase the first element 93 | string deleted_key = "42"; 94 | producer.produce(MessageBuilder(KAFKA_TOPICS[0]).partition(0).key(deleted_key)); 95 | producer.flush(); 96 | 97 | for (size_t i = 0; i < 10; ++i) { 98 | compacted_consumer.process_event(); 99 | } 100 | 101 | size_t set_count = 0; 102 | size_t delete_count = 0; 103 | CHECK(events.empty() == false); 104 | for (const Event& event : events) { 105 | switch (event.get_type()) { 106 | case Event::SET_ELEMENT: 107 | { 108 | auto iter = elements.find(to_string(event.get_key())); 109 | REQUIRE(iter != elements.end()); 110 | CHECK(iter->second.value == event.get_value()); 111 | CHECK(iter->second.partition == event.get_partition()); 112 | set_count++; 113 | } 114 | break; 115 | case Event::DELETE_ELEMENT: 116 | CHECK(event.get_partition() == 0); 117 | CHECK(event.get_key() == 42); 118 | delete_count++; 119 | break; 120 | default: 121 | break; 122 | } 123 | } 124 | CHECK(set_count == 2); 125 | CHECK(delete_count == 1); 126 | } 127 | -------------------------------------------------------------------------------- /tests/configuration_test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "cppkafka/configuration.h" 3 | #include "cppkafka/exceptions.h" 4 | 5 | using namespace cppkafka; 6 | 7 | using std::string; 8 | 9 | TEST_CASE("normal config", "[config]") { 10 | Configuration config; 11 | 12 | SECTION("get existing") { 13 | config.set("group.id", "foo").set("metadata.broker.list", "asd:9092"); 14 | CHECK(config.get("group.id") == "foo"); 15 | CHECK(config.get("metadata.broker.list") == "asd:9092"); 16 | CHECK(config.get("group.id") == "foo"); 17 | } 18 | 19 | SECTION("get non existent") { 20 | REQUIRE_THROWS_AS(config.get("asd"), ConfigOptionNotFound); 21 | } 22 | 23 | SECTION("set overloads") { 24 | config.set("enable.auto.commit", true); 25 | config.set("auto.commit.interval.ms", 100); 26 | 27 | CHECK(config.get("enable.auto.commit") == "true"); 28 | CHECK(config.get("auto.commit.interval.ms") == "100"); 29 | CHECK(config.get("auto.commit.interval.ms") == 100); 30 | } 31 | 32 | SECTION("set multiple") { 33 | config = { 34 | { "group.id", "foo" }, 35 | { "metadata.broker.list", string("asd:9092") }, 36 | { "message.max.bytes", 2000 }, 37 | { "topic.metadata.refresh.sparse", true } 38 | }; 39 | 40 | CHECK(config.get("group.id") == "foo"); 41 | CHECK(config.get("metadata.broker.list") == "asd:9092"); 42 | CHECK(config.get("message.max.bytes") == 2000); 43 | CHECK(config.get("topic.metadata.refresh.sparse") == true); 44 | } 45 | 46 | SECTION("default topic config") { 47 | config.set_default_topic_configuration({{ "request.required.acks", 2 }}); 48 | 49 | const auto& topic_config = config.get_default_topic_configuration(); 50 | CHECK(!!topic_config == true); 51 | CHECK(topic_config->get("request.required.acks") == 2); 52 | } 53 | 54 | SECTION("get all") { 55 | config.set("enable.auto.commit", false); 56 | auto option_map = config.get_all(); 57 | CHECK(option_map.at("enable.auto.commit") == "false"); 58 | } 59 | } 60 | 61 | TEST_CASE("topic config", "[config]") { 62 | TopicConfiguration config; 63 | 64 | SECTION("get existing") { 65 | config.set("auto.commit.enable", true).set("offset.store.method", "broker"); 66 | CHECK(config.get("auto.commit.enable") == "true"); 67 | CHECK(config.get("offset.store.method") == "broker"); 68 | CHECK(config.get("auto.commit.enable") == true); 69 | } 70 | 71 | SECTION("get non existent") { 72 | REQUIRE_THROWS_AS(config.get("asd"), ConfigOptionNotFound); 73 | } 74 | 75 | SECTION("set multiple") { 76 | config = { 77 | { "compression.codec", "none" }, 78 | { "offset.store.method", string("file") }, 79 | { "request.required.acks", 2 }, 80 | { "produce.offset.report", true } 81 | }; 82 | CHECK(config.get("compression.codec") == "none"); 83 | CHECK(config.get("offset.store.method") == "file"); 84 | CHECK(config.get("request.required.acks") == 2); 85 | CHECK(config.get("produce.offset.report") == true); 86 | } 87 | 88 | SECTION("get all") { 89 | config.set("auto.commit.enable", false); 90 | auto option_map = config.get_all(); 91 | CHECK(option_map.at("auto.commit.enable") == "false"); 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /tests/kafka_handle_base_test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "cppkafka/consumer.h" 5 | #include "cppkafka/producer.h" 6 | #include "cppkafka/metadata.h" 7 | #include "cppkafka/group_information.h" 8 | #include "test_utils.h" 9 | 10 | using std::vector; 11 | using std::set; 12 | using std::unordered_set; 13 | using std::string; 14 | 15 | using namespace cppkafka; 16 | 17 | Configuration make_config() { 18 | Configuration config; 19 | config.set("metadata.broker.list", KAFKA_TEST_INSTANCE); 20 | return config; 21 | } 22 | 23 | string get_kafka_host() { 24 | string uri = KAFKA_TEST_INSTANCE; 25 | size_t index = uri.find(':'); 26 | if (index == string::npos) { 27 | return uri; 28 | } 29 | else { 30 | return uri.substr(0, index); 31 | } 32 | } 33 | 34 | uint16_t get_kafka_port() { 35 | string uri = KAFKA_TEST_INSTANCE; 36 | size_t index = uri.find(':'); 37 | if (index == string::npos) { 38 | return 9092; 39 | } 40 | else { 41 | return stoul(uri.substr(index + 1)); 42 | } 43 | } 44 | 45 | TEST_CASE("metadata", "[handle_base]") { 46 | if (KAFKA_TOPICS.size() < 2) { 47 | return; //skip test 48 | } 49 | Producer producer({}); 50 | producer.add_brokers(KAFKA_TEST_INSTANCE); 51 | Metadata metadata = producer.get_metadata(); 52 | 53 | SECTION("brokers") { 54 | vector brokers = metadata.get_brokers(); 55 | REQUIRE(brokers.size() == 1); 56 | const auto& broker = brokers[0]; 57 | // TODO: resolve this 58 | //REQUIRE(broker.get_host() == get_kafka_host()); 59 | CHECK(broker.get_port() == get_kafka_port()); 60 | } 61 | 62 | SECTION("topics") { 63 | unordered_set topic_names = { KAFKA_TOPICS[0], KAFKA_TOPICS[1] }; 64 | size_t found_topics = 0; 65 | 66 | const vector& topics = metadata.get_topics(); 67 | CHECK(topics.size() >= 2); 68 | 69 | for (const auto& topic : topics) { 70 | if (topic_names.count(topic.get_name()) == 1) { 71 | const vector& partitions = topic.get_partitions(); 72 | REQUIRE(partitions.size() == KAFKA_NUM_PARTITIONS); 73 | set expected_ids; 74 | for (int i = 0; i < KAFKA_NUM_PARTITIONS; expected_ids.emplace(i++)); 75 | for (const PartitionMetadata& partition : partitions) { 76 | REQUIRE(expected_ids.erase(partition.get_id()) == 1); 77 | for (int32_t replica : partition.get_replicas()) { 78 | REQUIRE(replica == 0); 79 | } 80 | for (int32_t isr : partition.get_in_sync_replica_brokers()) { 81 | REQUIRE(isr == 0); 82 | } 83 | } 84 | found_topics++; 85 | } 86 | } 87 | CHECK(found_topics == topic_names.size()); 88 | 89 | // Find by names 90 | CHECK(metadata.get_topics(topic_names).size() == topic_names.size()); 91 | // Find by prefix 92 | CHECK(metadata.get_topics_prefixed("cppkafka_").size() == topic_names.size()); 93 | 94 | // Now get the whole metadata only for this topic 95 | Topic topic = producer.get_topic(KAFKA_TOPICS[0]); 96 | CHECK(producer.get_metadata(topic).get_name() == KAFKA_TOPICS[0]); 97 | } 98 | } 99 | 100 | TEST_CASE("consumer groups", "[handle_base]") { 101 | string consumer_group = "kafka_handle_test"; 102 | string client_id = "my_client_id"; 103 | 104 | Configuration config = make_config(); 105 | config.set("group.id", consumer_group); 106 | config.set("client.id", client_id); 107 | config.set("enable.auto.commit", false); 108 | 109 | // Build consumer 110 | Consumer consumer(config); 111 | consumer.subscribe({ KAFKA_TOPICS[0] }); 112 | ConsumerRunner runner(consumer, 0, 3); 113 | runner.try_join(); 114 | 115 | GroupInformation information = consumer.get_consumer_group(consumer_group); 116 | CHECK(information.get_name() == consumer_group); 117 | CHECK(information.get_protocol_type() == "consumer"); 118 | CHECK(information.get_members().size() == 1); 119 | 120 | auto member = information.get_members()[0]; 121 | CHECK(member.get_client_id() == client_id); 122 | 123 | MemberAssignmentInformation assignment = member.get_member_assignment(); 124 | CHECK(assignment.get_version() == 0); 125 | TopicPartitionList expected_topic_partitions; 126 | for (int i = 0; i < KAFKA_NUM_PARTITIONS; expected_topic_partitions.emplace_back(KAFKA_TOPICS[0], i++)); 127 | TopicPartitionList topic_partitions = assignment.get_topic_partitions(); 128 | sort(topic_partitions.begin(), topic_partitions.end()); 129 | CHECK(topic_partitions == expected_topic_partitions); 130 | } 131 | -------------------------------------------------------------------------------- /tests/roundrobin_poll_test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "cppkafka/cppkafka.h" 12 | #include "test_utils.h" 13 | 14 | using std::vector; 15 | using std::move; 16 | using std::string; 17 | using std::exception; 18 | using std::thread; 19 | using std::set; 20 | using std::mutex; 21 | using std::tie; 22 | using std::condition_variable; 23 | using std::lock_guard; 24 | using std::unique_lock; 25 | using std::unique_ptr; 26 | using std::make_move_iterator; 27 | using std::chrono::seconds; 28 | using std::chrono::milliseconds; 29 | using std::chrono::system_clock; 30 | 31 | using namespace cppkafka; 32 | 33 | #define ENABLE_STRICT_RR_ORDER 0 34 | 35 | //================================================================================== 36 | // Helper functions 37 | //================================================================================== 38 | static Configuration make_producer_config() { 39 | Configuration config = { 40 | { "metadata.broker.list", KAFKA_TEST_INSTANCE }, 41 | { "max.in.flight", 1 } 42 | }; 43 | return config; 44 | } 45 | 46 | static Configuration make_consumer_config(const string& group_id = make_consumer_group_id()) { 47 | Configuration config = { 48 | { "metadata.broker.list", KAFKA_TEST_INSTANCE }, 49 | { "enable.auto.commit", false }, 50 | { "group.id", group_id }, 51 | }; 52 | return config; 53 | } 54 | 55 | #if ENABLE_STRICT_RR_ORDER 56 | static vector make_roundrobin_partition_vector(int total_messages) { 57 | vector partition_order; 58 | for (int i = 0, partition = 0; i < total_messages+1; ++i) { 59 | if ((i % KAFKA_NUM_PARTITIONS) == 0) { 60 | partition = 0; 61 | } 62 | partition_order.push_back(partition++); 63 | } 64 | return partition_order; 65 | } 66 | #endif 67 | 68 | //======================================================================== 69 | // TESTS 70 | //======================================================================== 71 | 72 | TEST_CASE("roundrobin consumer test", "[roundrobin consumer]") { 73 | TopicPartitionList assignment; 74 | int messages_per_partition = 3; 75 | int total_messages = KAFKA_NUM_PARTITIONS * messages_per_partition; 76 | 77 | // Create a consumer and subscribe to the topic 78 | PollStrategyAdapter consumer(make_consumer_config()); 79 | consumer.subscribe({ KAFKA_TOPICS[0] }); 80 | consumer.add_polling_strategy(unique_ptr(new RoundRobinPollStrategy(consumer))); 81 | 82 | PollConsumerRunner runner(consumer, total_messages, KAFKA_NUM_PARTITIONS); 83 | 84 | // Produce messages so we stop the consumer 85 | BufferedProducer producer(make_producer_config()); 86 | string payload = "RoundRobin"; 87 | 88 | // push 3 messages in each partition 89 | for (int i = 0; i < total_messages; ++i) { 90 | producer.sync_produce(MessageBuilder(KAFKA_TOPICS[0]) 91 | .partition(i % KAFKA_NUM_PARTITIONS) 92 | .payload(payload)); 93 | } 94 | producer.flush(); 95 | 96 | runner.try_join(); 97 | 98 | // Check that we have all messages 99 | REQUIRE(runner.get_messages().size() == total_messages); 100 | 101 | #if ENABLE_STRICT_RR_ORDER 102 | // Check that we have one message from each partition in desired order 103 | vector partition_order = make_roundrobin_partition_vector(total_messages+KAFKA_NUM_PARTITIONS); 104 | int partition_idx; 105 | for (int i = 0; i < total_messages; ++i) { 106 | if (i == 0) { 107 | // find first polled partition index 108 | partition_idx = runner.get_messages()[i].get_partition(); 109 | } 110 | CHECK(runner.get_messages()[i].get_partition() == partition_order[i+partition_idx]); 111 | REQUIRE((string)runner.get_messages()[i].get_payload() == payload); 112 | } 113 | 114 | //============ resume original poll strategy =============// 115 | //validate that once the round robin strategy is deleted, normal poll works as before 116 | consumer.delete_polling_strategy(); 117 | 118 | ConsumerRunner serial_runner(consumer, total_messages, KAFKA_NUM_PARTITIONS); 119 | 120 | payload = "SerialPolling"; 121 | // push 3 messages in each partition 122 | for (int i = 0; i < total_messages; ++i) { 123 | producer.sync_produce(MessageBuilder(KAFKA_TOPICS[0]).partition(i%KAFKA_NUM_PARTITIONS).payload(payload)); 124 | } 125 | producer.flush(); 126 | serial_runner.try_join(); 127 | 128 | // Check that we have all messages 129 | REQUIRE(serial_runner.get_messages().size() == total_messages); 130 | 131 | for (int i = 0; i < total_messages; ++i) { 132 | REQUIRE((string)serial_runner.get_messages()[i].get_payload() == payload); 133 | } 134 | #else 135 | // Simple payload check 136 | for (int i = 0; i < total_messages; ++i) { 137 | REQUIRE((string)runner.get_messages()[i].get_payload() == payload); 138 | } 139 | #endif 140 | } 141 | 142 | -------------------------------------------------------------------------------- /tests/test_main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #define CATCH_CONFIG_RUNNER 3 | #include 4 | 5 | using std::string; 6 | using std::chrono::steady_clock; 7 | using std::chrono::milliseconds; 8 | using std::chrono::duration_cast; 9 | 10 | using Catch::ConsoleReporter; 11 | using Catch::ReporterConfig; 12 | using Catch::ReporterPreferences; 13 | using Catch::TestCaseInfo; 14 | using Catch::TestCaseStats; 15 | using Catch::Totals; 16 | using Catch::Session; 17 | 18 | std::vector KAFKA_TOPICS = {KAFKA_TOPIC_NAMES}; 19 | 20 | namespace cppkafka { 21 | 22 | class InstantTestReporter : public ConsoleReporter { 23 | public: 24 | using ClockType = steady_clock; 25 | 26 | InstantTestReporter(const ReporterConfig& config) 27 | : ConsoleReporter(config) { 28 | } 29 | 30 | static string getDescription() { 31 | return "Reports the tests' progress as they run"; 32 | } 33 | 34 | ReporterPreferences getPreferences() const override { 35 | ReporterPreferences output; 36 | output.shouldRedirectStdOut = false; 37 | return output; 38 | } 39 | 40 | void testCaseStarting(const TestCaseInfo& info) override { 41 | ConsoleReporter::testCaseStarting(info); 42 | stream << "Running test \"" << info.name << "\" @ " << info.lineInfo << "\n"; 43 | test_start_ts_ = ClockType::now(); 44 | } 45 | 46 | void testCaseEnded(const TestCaseStats& stats) override { 47 | const Totals& totals = stats.totals; 48 | const size_t totalTestCases = totals.assertions.passed + totals.assertions.failed; 49 | const auto elapsed = ClockType::now() - test_start_ts_; 50 | stream << "Done. " << totals.assertions.passed << "/" << totalTestCases 51 | << " assertions succeeded in " << duration_cast(elapsed).count() 52 | << "ms\n"; 53 | } 54 | private: 55 | ClockType::time_point test_start_ts_; 56 | }; 57 | 58 | CATCH_REGISTER_REPORTER("instant", InstantTestReporter) 59 | 60 | } // cppkafka 61 | 62 | int main(int argc, char* argv[]) { 63 | Session session; 64 | 65 | int returnCode = session.applyCommandLine( argc, argv ); 66 | if (returnCode != 0) { 67 | return returnCode; 68 | } 69 | if (session.configData().reporterNames.empty()) { 70 | // Set our reporter as the default one 71 | session.configData().reporterNames.emplace_back("instant"); 72 | } 73 | 74 | int numFailed = session.run(); 75 | return numFailed; 76 | } 77 | -------------------------------------------------------------------------------- /tests/test_utils.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "test_utils.h" 7 | 8 | using std::chrono::duration_cast; 9 | using std::chrono::milliseconds; 10 | using std::chrono::seconds; 11 | using std::chrono::system_clock; 12 | using std::hex; 13 | using std::move; 14 | using std::numeric_limits; 15 | using std::ostringstream; 16 | using std::random_device; 17 | using std::string; 18 | using std::uniform_int_distribution; 19 | using std::unique_ptr; 20 | using std::vector; 21 | 22 | //================================================================================== 23 | // PollStrategyAdapter 24 | //================================================================================== 25 | 26 | PollStrategyAdapter::PollStrategyAdapter(Configuration config) 27 | : Consumer(config) { 28 | } 29 | 30 | void PollStrategyAdapter::add_polling_strategy(unique_ptr poll_strategy) { 31 | strategy_ = move(poll_strategy); 32 | } 33 | 34 | void PollStrategyAdapter::delete_polling_strategy() { 35 | strategy_.reset(); 36 | } 37 | 38 | Message PollStrategyAdapter::poll() { 39 | if (strategy_) { 40 | return strategy_->poll(); 41 | } 42 | return Consumer::poll(); 43 | } 44 | 45 | Message PollStrategyAdapter::poll(milliseconds timeout) { 46 | if (strategy_) { 47 | return strategy_->poll(timeout); 48 | } 49 | return Consumer::poll(timeout); 50 | } 51 | 52 | vector PollStrategyAdapter::poll_batch(size_t max_batch_size) { 53 | if (strategy_) { 54 | return strategy_->poll_batch(max_batch_size); 55 | } 56 | return Consumer::poll_batch(max_batch_size); 57 | } 58 | 59 | vector PollStrategyAdapter::poll_batch(size_t max_batch_size, milliseconds timeout) { 60 | if (strategy_) { 61 | return strategy_->poll_batch(max_batch_size, timeout); 62 | } 63 | return Consumer::poll_batch(max_batch_size, timeout); 64 | } 65 | 66 | void PollStrategyAdapter::set_timeout(milliseconds timeout) { 67 | if (strategy_) { 68 | strategy_->set_timeout(timeout); 69 | } 70 | else { 71 | Consumer::set_timeout(timeout); 72 | } 73 | } 74 | 75 | milliseconds PollStrategyAdapter::get_timeout() { 76 | if (strategy_) { 77 | return strategy_->get_timeout(); 78 | } 79 | return Consumer::get_timeout(); 80 | } 81 | 82 | // Misc 83 | 84 | string make_consumer_group_id() { 85 | ostringstream output; 86 | output << hex; 87 | 88 | random_device rd; 89 | uniform_int_distribution distribution(0, numeric_limits::max()); 90 | const auto now = duration_cast(system_clock::now().time_since_epoch()); 91 | const auto random_number = distribution(rd); 92 | output << now.count() << random_number; 93 | return output.str(); 94 | } 95 | -------------------------------------------------------------------------------- /tests/test_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef CPPKAFKA_TEST_UTILS_H 2 | #define CPPKAFKA_TEST_UTILS_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include "cppkafka/consumer.h" 8 | #include "cppkafka/utils/roundrobin_poll_strategy.h" 9 | #include "cppkafka/utils/consumer_dispatcher.h" 10 | 11 | extern const std::vector KAFKA_TOPICS; 12 | 13 | using namespace cppkafka; 14 | 15 | //================================================================================== 16 | // BasicConsumerRunner 17 | //================================================================================== 18 | template 19 | class BasicConsumerRunner { 20 | public: 21 | BasicConsumerRunner(ConsumerType& consumer, 22 | size_t expected, 23 | size_t partitions); 24 | BasicConsumerRunner(const BasicConsumerRunner&) = delete; 25 | BasicConsumerRunner& operator=(const BasicConsumerRunner&) = delete; 26 | ~BasicConsumerRunner(); 27 | 28 | const std::vector& get_messages() const; 29 | 30 | void try_join(); 31 | private: 32 | ConsumerType& consumer_; 33 | std::thread thread_; 34 | std::vector messages_; 35 | }; 36 | 37 | //================================================================================== 38 | // PollStrategyAdapter 39 | //================================================================================== 40 | /** 41 | * \brief Specific implementation which can be used with other 42 | * util classes such as BasicConsumerDispatcher. 43 | */ 44 | class PollStrategyAdapter : public Consumer { 45 | public: 46 | PollStrategyAdapter(Configuration config); 47 | void add_polling_strategy(std::unique_ptr poll_strategy); 48 | void delete_polling_strategy(); 49 | Message poll(); 50 | Message poll(std::chrono::milliseconds timeout); 51 | std::vector poll_batch(size_t max_batch_size); 52 | std::vector poll_batch(size_t max_batch_size, 53 | std::chrono::milliseconds timeout); 54 | void set_timeout(std::chrono::milliseconds timeout); 55 | std::chrono::milliseconds get_timeout(); 56 | private: 57 | std::unique_ptr strategy_; 58 | }; 59 | 60 | // Misc 61 | 62 | std::string make_consumer_group_id(); 63 | 64 | using PollConsumerRunner = BasicConsumerRunner; 65 | using ConsumerRunner = BasicConsumerRunner; 66 | 67 | 68 | #include "test_utils_impl.h" 69 | 70 | #endif // CPPKAFKA_TEST_UTILS_H 71 | -------------------------------------------------------------------------------- /tests/test_utils_impl.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "cppkafka/utils/consumer_dispatcher.h" 5 | 6 | using std::vector; 7 | using std::move; 8 | using std::thread; 9 | using std::mutex; 10 | using std::lock_guard; 11 | using std::unique_lock; 12 | using std::condition_variable; 13 | using std::chrono::system_clock; 14 | using std::chrono::milliseconds; 15 | using std::chrono::seconds; 16 | 17 | using cppkafka::Consumer; 18 | using cppkafka::BasicConsumerDispatcher; 19 | 20 | using cppkafka::Message; 21 | using cppkafka::TopicPartition; 22 | 23 | //================================================================================== 24 | // BasicConsumerRunner 25 | //================================================================================== 26 | template 27 | BasicConsumerRunner::BasicConsumerRunner(ConsumerType& consumer, 28 | size_t expected, 29 | size_t partitions) 30 | : consumer_(consumer) { 31 | bool booted = false; 32 | mutex mtx; 33 | condition_variable cond; 34 | thread_ = thread([&, expected, partitions]() { 35 | consumer_.set_timeout(milliseconds(500)); 36 | size_t number_eofs = 0; 37 | auto start = system_clock::now(); 38 | BasicConsumerDispatcher dispatcher(consumer_); 39 | dispatcher.run( 40 | // Message callback 41 | [&](Message msg) { 42 | if (number_eofs == partitions) { 43 | messages_.push_back(move(msg)); 44 | } 45 | }, 46 | // EOF callback 47 | [&](typename BasicConsumerDispatcher::EndOfFile, 48 | const TopicPartition& topic_partition) { 49 | if (number_eofs != partitions) { 50 | number_eofs++; 51 | if (number_eofs == partitions) { 52 | lock_guard _(mtx); 53 | booted = true; 54 | cond.notify_one(); 55 | } 56 | } 57 | }, 58 | // Every time there's any event callback 59 | [&](typename BasicConsumerDispatcher::Event) { 60 | if (expected > 0 && messages_.size() == expected) { 61 | dispatcher.stop(); 62 | } 63 | if (expected == 0 && number_eofs >= partitions) { 64 | dispatcher.stop(); 65 | } 66 | if (system_clock::now() - start >= seconds(20)) { 67 | dispatcher.stop(); 68 | } 69 | } 70 | ); 71 | // dispatcher has stopped 72 | if (number_eofs < partitions) { 73 | lock_guard _(mtx); 74 | booted = true; 75 | cond.notify_one(); 76 | } 77 | }); 78 | 79 | unique_lock lock(mtx); 80 | while (!booted) { 81 | cond.wait(lock); 82 | } 83 | } 84 | 85 | template 86 | BasicConsumerRunner::~BasicConsumerRunner() { 87 | try_join(); 88 | } 89 | 90 | template 91 | const std::vector& BasicConsumerRunner::get_messages() const { 92 | return messages_; 93 | } 94 | 95 | template 96 | void BasicConsumerRunner::try_join() { 97 | if (thread_.joinable()) { 98 | thread_.join(); 99 | } 100 | } 101 | 102 | 103 | -------------------------------------------------------------------------------- /tests/topic_partition_list_test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "cppkafka/topic_partition_list.h" 4 | #include "cppkafka/topic_partition.h" 5 | 6 | using std::ostringstream; 7 | using std::set; 8 | using std::string; 9 | 10 | using namespace cppkafka; 11 | 12 | TEST_CASE("rdkafka conversion", "[topic_partition]") { 13 | TopicPartitionList list1; 14 | list1.push_back("foo"); 15 | list1.push_back({ "bar", 2 }); 16 | list1.push_back({ "asd", 2, TopicPartition::OFFSET_BEGINNING }); 17 | 18 | TopicPartitionList list2 = convert(convert(list1)); 19 | 20 | CHECK(list1.size() == list2.size()); 21 | for (size_t i = 0; i < list1.size(); ++i) { 22 | const auto& item1 = list1[i]; 23 | const auto& item2 = list2[i]; 24 | CHECK(item1.get_topic() == item2.get_topic()); 25 | CHECK(item1.get_partition() == item2.get_partition()); 26 | CHECK(item1.get_offset() == item2.get_offset()); 27 | } 28 | } 29 | 30 | TEST_CASE("topic partition to string", "[topic_partition]") { 31 | ostringstream output; 32 | TopicPartition topic_partition("foo", 5); 33 | output << topic_partition; 34 | CHECK(output.str() == "foo[5:#]"); 35 | } 36 | 37 | TEST_CASE("topic partition list to string", "[topic_partition]") { 38 | ostringstream output; 39 | TopicPartitionList list; 40 | list.push_back("foo"); 41 | list.push_back({ "bar", 2 }); 42 | list.push_back({ "foobar", 3, 4 }); 43 | 44 | output << list; 45 | CHECK(output.str() == "[ foo[-1:#], bar[2:#], foobar[3:4] ]"); 46 | } 47 | 48 | TEST_CASE("find matches by topic", "[topic_partition]") { 49 | const TopicPartitionList list = { 50 | { "foo", 0 }, 51 | { "bar", 3 }, 52 | { "fb", 1 }, 53 | { "foo", 1 }, 54 | { "fb", 2 }, 55 | { "other", 1 }, 56 | { "a", 1 } 57 | }; 58 | 59 | const TopicPartitionList expected = { 60 | { "foo", 0 }, 61 | { "fb", 1 }, 62 | { "foo", 1 }, 63 | { "fb", 2 }, 64 | }; 65 | const TopicPartitionList subset = find_matches(list, set{"foo", "fb"}); 66 | CHECK(subset == expected); 67 | } 68 | 69 | TEST_CASE("find matches by id", "[topic_partition]") { 70 | const TopicPartitionList list = { 71 | { "foo", 2 }, 72 | { "foo", 3 }, 73 | { "foo", 4 }, 74 | { "foo", 5 }, 75 | { "foo", 6 }, 76 | { "foo", 7 }, 77 | { "foo", 8 } 78 | }; 79 | 80 | const TopicPartitionList expected = { 81 | { "foo", 2 }, 82 | { "foo", 5 }, 83 | { "foo", 8 }, 84 | }; 85 | const TopicPartitionList subset = find_matches(list, set{2,5,8}); 86 | CHECK(subset == expected); 87 | } 88 | --------------------------------------------------------------------------------