├── .gitmodules ├── kafka ├── version.lua ├── tnt_kafka.h ├── producer.h ├── CMakeLists.txt ├── queue.h ├── consumer_msg.h ├── consumer.h ├── queue.c ├── common.h ├── tnt_kafka.c ├── consumer_msg.c ├── callbacks.h ├── callbacks.c ├── common.c ├── init.lua └── producer.c ├── tests ├── requirements.txt ├── app.lua ├── producer.lua ├── test_producer.py ├── consumer.lua └── test_consumer.py ├── .gitignore ├── examples ├── static-build │ ├── README.md │ └── Dockerfile ├── producer │ ├── async_producer.lua │ └── sync_producer.lua └── consumer │ ├── auto_offset_store.lua │ └── manual_offset_store.lua ├── patches ├── librdkafka-tarantool-security-36.patch ├── librdkafka-fix-centos7.patch ├── librdkafka-tarantool-security-72.patch ├── librdkafka-tarantool-security-55.patch ├── librdkafka-tarantool-security-71.patch ├── librdkafka-tarantool-security-70.patch ├── librdkafka-tarantool-security-52.patch ├── librdkafka-tarantool-security-47.patch ├── librdkafka-fix-ubsan.patch └── librdkafka-tarantool-security-94.patch ├── docker └── Dockerfile ├── kafka-1.1.0-0.rockspec ├── cmake ├── FindRdKafka.cmake └── FindTarantool.cmake ├── kafka-scm-1.rockspec ├── .github └── workflows │ ├── publish.yml │ ├── fast_testing.yml │ └── asan_testing.yml ├── benchmarks ├── async_producer.lua ├── sync_producer.lua ├── auto_offset_store_consumer.lua └── manual_offset_store_consumer.lua ├── CMakeLists.txt ├── README.md ├── Makefile └── LICENSE /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "librdkafka"] 2 | path = librdkafka 3 | url = https://github.com/confluentinc/librdkafka.git 4 | -------------------------------------------------------------------------------- /kafka/version.lua: -------------------------------------------------------------------------------- 1 | -- Сontains the module version. 2 | -- Requires manual update in case of release commit. 3 | 4 | return '1.6.14' 5 | -------------------------------------------------------------------------------- /tests/requirements.txt: -------------------------------------------------------------------------------- 1 | pytest==8.4.2 2 | pytest-timeout==2.4.0 3 | kafka-python-ng==2.2.3 4 | aiokafka==0.12.0 5 | tarantool==1.2.0 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .rocks 3 | tests/venv 4 | tests/.pytest_cache 5 | tests/__* 6 | cmake-build-debug 7 | build.luarocks/ 8 | *.xlog 9 | *.snap 10 | -------------------------------------------------------------------------------- /examples/static-build/README.md: -------------------------------------------------------------------------------- 1 | Static kafka build 2 | --- 3 | 4 | ```bash 5 | $ docker buildx build --target export --output rocks . 6 | $ ls rocks 7 | kafka-scm-1.linux-x86_64.rock 8 | ``` 9 | -------------------------------------------------------------------------------- /tests/app.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | local box = require('box') 4 | 5 | box.cfg{ 6 | listen = 3301 7 | } 8 | 9 | box.once('init', function() 10 | box.schema.user.grant("guest", 'read,write,execute,create,drop', 'universe') 11 | end) 12 | 13 | 14 | rawset(_G, 'consumer', require('tests.consumer')) 15 | rawset(_G, 'producer', require('tests.producer')) 16 | -------------------------------------------------------------------------------- /kafka/tnt_kafka.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_TNT_KAFKA_H 2 | #define TNT_KAFKA_TNT_KAFKA_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | //////////////////////////////////////////////////////////////////////////////////////////////////// 9 | /** 10 | * Entry point 11 | */ 12 | 13 | LUA_API int luaopen_kafka_tntkafka(lua_State *L); 14 | 15 | #endif //TNT_KAFKA_TNT_KAFKA_H 16 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-36.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdhttp.c b/src/rdhttp.c 2 | index dca6c6f8..5a290c5b 100644 3 | --- a/src/rdhttp.c 4 | +++ b/src/rdhttp.c 5 | @@ -345,6 +345,7 @@ rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk, 6 | 7 | /* Retry */ 8 | rd_http_error_destroy(herr); 9 | + herr = 0; 10 | rd_usleep(retry_ms * 1000 * (i + 1), &rk->rk_terminate); 11 | } 12 | 13 | -------------------------------------------------------------------------------- /patches/librdkafka-fix-centos7.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdrand.c b/src/rdrand.c 2 | index 77d02cfc..ad388e2a 100644 3 | --- a/src/rdrand.c 4 | +++ b/src/rdrand.c 5 | @@ -32,10 +32,12 @@ 6 | #include "tinycthread.h" 7 | #include "rdmurmur2.h" 8 | #ifndef _WIN32 9 | +#ifdef HAVE_GETENTROPY 10 | /* getentropy() can be present in one of these two */ 11 | #include 12 | #include 13 | #endif 14 | +#endif 15 | 16 | #ifdef HAVE_OSSL_SECURE_RAND_BYTES 17 | #include 18 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tarantool/tarantool:1.x-centos7 2 | 3 | RUN yum update -y 4 | 5 | RUN yum install -y cmake \ 6 | gcc \ 7 | gcc-c++ \ 8 | cyrus-sasl-lib \ 9 | openssl-libs \ 10 | openssl-devel \ 11 | tarantool-devel 12 | 13 | ADD . /opt/tarantool 14 | 15 | WORKDIR /opt/tarantool 16 | 17 | ENV STATIC_BUILD ON 18 | ENV WITH_OPENSSL_1_1 OFF 19 | 20 | RUN cmake . 21 | 22 | RUN make 23 | 24 | ENTRYPOINT tarantool /opt/tarantool/tests/app.lua 25 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-72.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_subscription.c b/src/rdkafka_subscription.c 2 | index 08058935..2974d0dc 100644 3 | --- a/src/rdkafka_subscription.c 4 | +++ b/src/rdkafka_subscription.c 5 | @@ -196,8 +196,8 @@ const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk) { 6 | } 7 | 8 | result = rko->rko_u.rebalance_protocol.str; 9 | - 10 | - rd_kafka_op_destroy(rko); 11 | + rd_kafka_op_t *rko_ = rko; 12 | + rd_kafka_op_destroy(rko_); 13 | 14 | return result; 15 | } 16 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-55.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_partition.c b/src/rdkafka_partition.c 2 | index 2d889e09..4e26a40c 100644 3 | --- a/src/rdkafka_partition.c 4 | +++ b/src/rdkafka_partition.c 5 | @@ -1162,7 +1162,7 @@ void rd_kafka_toppar_broker_delegate(rd_kafka_toppar_t *rktp, 6 | if (rktp->rktp_broker || rkb) 7 | rd_kafka_toppar_broker_migrate(rktp, rktp->rktp_broker, rkb); 8 | 9 | - if (internal_fallback) 10 | + if (internal_fallback && rkb) 11 | rd_kafka_broker_destroy(rkb); 12 | } 13 | 14 | -------------------------------------------------------------------------------- /examples/static-build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.gitlab.com/runfor/envs/centos:7-build as static-kafka 2 | 3 | RUN set -x \ 4 | && git clone --recurse-submodules https://github.com/tarantool/kafka /opt/kafka \ 5 | && wget -P /etc/yum.repos.d/ https://copr.fedorainfracloud.org/coprs/bgstack15/stackrpms/repo/epel-7/bgstack15-stackrpms-epel-7.repo \ 6 | && yum install -y tarantool tarantool-devel openssl110 7 | 8 | WORKDIR /opt/kafka 9 | 10 | RUN tarantoolctl rocks STATIC_BUILD=ON make \ 11 | && tarantoolctl rocks pack kafka 12 | 13 | FROM scratch as export 14 | COPY --from=static-kafka /opt/kafka/kafka-scm-1.linux-x86_64.rock / 15 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-71.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_topic.c b/src/rdkafka_topic.c 2 | index 7f79a2ff..eddd2b5a 100644 3 | --- a/src/rdkafka_topic.c 4 | +++ b/src/rdkafka_topic.c 5 | @@ -854,6 +854,7 @@ static int rd_kafka_topic_partition_cnt_update(rd_kafka_topic_t *rkt, 6 | /* Remove from desp list since the 7 | * partition is now known. */ 8 | rd_kafka_toppar_desired_unlink(rktp); 9 | + rktp = NULL; 10 | } else { 11 | rktp = rd_kafka_toppar_new(rkt, i); 12 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-70.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/lz4.c b/src/lz4.c 2 | index 335e2a03..6299373a 100644 3 | --- a/src/lz4.c 4 | +++ b/src/lz4.c 5 | @@ -1009,7 +1009,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated( 6 | } /* too far */ 7 | assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */ 8 | 9 | - if (LZ4_read32(match) == LZ4_read32(ip)) { 10 | + if (match != NULL && LZ4_read32(match) == LZ4_read32(ip)) { 11 | if (maybe_extMem) offset = current - matchIndex; 12 | break; /* match found */ 13 | } 14 | -------------------------------------------------------------------------------- /kafka-1.1.0-0.rockspec: -------------------------------------------------------------------------------- 1 | package = "kafka" 2 | version = "1.1.0-0" 3 | source = { 4 | url = "git+https://github.com/tarantool/kafka.git", 5 | branch = 'master', 6 | } 7 | description = { 8 | summary = "Kafka library for Tarantool", 9 | homepage = "https://github.com/tarantool/kafka", 10 | license = "Apache", 11 | } 12 | dependencies = { 13 | "lua >= 5.1" -- actually tarantool > 1.6 14 | } 15 | external_dependencies = { 16 | TARANTOOL = { 17 | header = 'tarantool/module.h' 18 | } 19 | } 20 | build = { 21 | type = 'cmake'; 22 | variables = { 23 | CMAKE_BUILD_TYPE="RelWithDebInfo", 24 | TARANTOOL_DIR="$(TARANTOOL_DIR)", 25 | TARANTOOL_INSTALL_LIBDIR="$(LIBDIR)", 26 | TARANTOOL_INSTALL_LUADIR="$(LUADIR)", 27 | STATIC_BUILD="$(STATIC_BUILD)", 28 | WITH_OPENSSL_1_1="$(WITH_OPENSSL_1_1)" 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /cmake/FindRdKafka.cmake: -------------------------------------------------------------------------------- 1 | find_path(RDKAFKA_ROOT_DIR 2 | NAMES include/librdkafka/rdkafka.h 3 | ) 4 | 5 | find_path(RDKAFKA_INCLUDE_DIR 6 | NAMES librdkafka/rdkafka.h 7 | HINTS ${RDKAFKA_ROOT_DIR}/include 8 | ) 9 | 10 | find_library(RDKAFKA_LIBRARY 11 | NAMES ${CMAKE_SHARED_LIBRARY_PREFIX}rdkafka${CMAKE_SHARED_LIBRARY_SUFFIX} rdkafka 12 | HINTS ${RDKAFKA_ROOT_DIR}/lib 13 | ) 14 | 15 | find_library(RDKAFKA_STATIC 16 | NAMES ${CMAKE_STATIC_LIBRARY_PREFIX}rdkafka${CMAKE_STATIC_LIBRARY_SUFFIX} rdkafka 17 | HINTS ${RDKAFKA_ROOT_DIR}/lib 18 | ) 19 | 20 | include(FindPackageHandleStandardArgs) 21 | find_package_handle_standard_args(RDKAFKA DEFAULT_MSG 22 | RDKAFKA_LIBRARY 23 | RDKAFKA_INCLUDE_DIR 24 | ) 25 | 26 | mark_as_advanced( 27 | RDKAFKA_ROOT_DIR 28 | RDKAFKA_INCLUDE_DIR 29 | RDKAFKA_LIBRARY 30 | ) 31 | -------------------------------------------------------------------------------- /kafka/producer.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_PRODUCER_H 2 | #define TNT_KAFKA_PRODUCER_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | int 9 | lua_producer_tostring(struct lua_State *L); 10 | 11 | int 12 | lua_producer_msg_delivery_poll(struct lua_State *L); 13 | 14 | int 15 | lua_producer_poll_logs(struct lua_State *L); 16 | 17 | int 18 | lua_producer_poll_stats(struct lua_State *L); 19 | 20 | int 21 | lua_producer_poll_errors(struct lua_State *L); 22 | 23 | int 24 | lua_producer_produce(struct lua_State *L); 25 | 26 | int 27 | lua_producer_close(struct lua_State *L); 28 | 29 | int 30 | lua_create_producer(struct lua_State *L); 31 | 32 | int 33 | lua_producer_destroy(struct lua_State *L); 34 | 35 | int 36 | lua_producer_dump_conf(struct lua_State *L); 37 | 38 | int 39 | lua_producer_metadata(struct lua_State *L); 40 | 41 | int 42 | lua_producer_list_groups(struct lua_State *L); 43 | 44 | #endif //TNT_KAFKA_PRODUCER_H 45 | -------------------------------------------------------------------------------- /kafka-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "kafka" 2 | version = "scm-1" 3 | source = { 4 | url = "git+https://github.com/tarantool/kafka.git", 5 | branch = 'master', 6 | } 7 | description = { 8 | summary = "Kafka library for Tarantool", 9 | homepage = "https://github.com/tarantool/kafka", 10 | license = "Apache", 11 | } 12 | dependencies = { 13 | "lua >= 5.1" -- actually tarantool > 1.6 14 | } 15 | external_dependencies = { 16 | TARANTOOL = { 17 | header = 'tarantool/module.h' 18 | } 19 | } 20 | build = { 21 | type = 'cmake'; 22 | variables = { 23 | CMAKE_BUILD_TYPE="RelWithDebInfo", 24 | TARANTOOL_DIR="$(TARANTOOL_DIR)", 25 | TARANTOOL_INSTALL_LIBDIR="$(LIBDIR)", 26 | TARANTOOL_INSTALL_LUADIR="$(LUADIR)", 27 | STATIC_BUILD="$(STATIC_BUILD)", 28 | ENABLE_ASAN="$(ENABLE_ASAN)", 29 | ENABLE_UBSAN="$(ENABLE_UBSAN)", 30 | WITH_OPENSSL_1_1="$(WITH_OPENSSL_1_1)", 31 | WITH_GSSAPI="$(WITH_GSSAPI)", 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-52.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c 2 | index a8a1204b..65fbcbfd 100644 3 | --- a/src/rdkafka_conf.c 4 | +++ b/src/rdkafka_conf.c 5 | @@ -3463,7 +3463,7 @@ rd_kafka_resp_err_t rd_kafka_confval_set_type(rd_kafka_confval_t *confval, 6 | return RD_KAFKA_RESP_ERR__INVALID_ARG; 7 | } 8 | 9 | - vlen = strlen(v); 10 | + vlen = v ? strlen(v) : 0; 11 | if ((confval->u.STR.minlen || confval->u.STR.maxlen) && 12 | (vlen < confval->u.STR.minlen || 13 | vlen > confval->u.STR.maxlen)) { 14 | @@ -3479,7 +3479,7 @@ rd_kafka_resp_err_t rd_kafka_confval_set_type(rd_kafka_confval_t *confval, 15 | if (confval->u.STR.v) 16 | rd_free(confval->u.STR.v); 17 | 18 | - confval->u.STR.v = rd_strdup(v); 19 | + confval->u.STR.v = v ? rd_strdup(v) : rd_strdup(""); 20 | } break; 21 | 22 | case RD_KAFKA_CONFVAL_PTR: 23 | -------------------------------------------------------------------------------- /kafka/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include_directories(${RDKAFKA_INCLUDE_DIR}) 2 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}) 3 | 4 | add_library(tntkafka SHARED tnt_kafka.c callbacks.c consumer.c consumer_msg.c producer.c queue.c common.c) 5 | 6 | if (SANITIZER_FLAGS) 7 | separate_arguments(SANITIZER_FLAGS UNIX_COMMAND "${SANITIZER_FLAGS}") 8 | target_compile_options(tntkafka PRIVATE ${SANITIZER_FLAGS}) 9 | target_link_options(tntkafka PRIVATE ${SANITIZER_FLAGS}) 10 | endif() 11 | 12 | if (APPLE) 13 | set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} \ 14 | -undefined suppress -flat_namespace") 15 | endif(APPLE) 16 | 17 | find_package(Threads REQUIRED) 18 | target_link_libraries(tntkafka ${CMAKE_THREAD_LIBS_INIT}) 19 | set_target_properties(tntkafka PROPERTIES C_VISIBILITY_PRESET hidden) 20 | 21 | target_link_libraries(tntkafka ${RDKAFKA_LIBRARY}) 22 | set_target_properties(tntkafka PROPERTIES PREFIX "" OUTPUT_NAME "tntkafka") 23 | 24 | install(TARGETS tntkafka LIBRARY DESTINATION ${TARANTOOL_INSTALL_LIBDIR}/kafka) 25 | install(FILES init.lua version.lua DESTINATION ${TARANTOOL_INSTALL_LUADIR}/kafka) 26 | -------------------------------------------------------------------------------- /kafka/queue.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_QUEUE_H 2 | #define TNT_KAFKA_QUEUE_H 3 | 4 | #include 5 | 6 | //////////////////////////////////////////////////////////////////////////////////////////////////// 7 | /** 8 | * General thread safe queue based on licked list 9 | */ 10 | 11 | typedef struct queue_node_t { 12 | void *value; 13 | struct queue_node_t *next; 14 | } queue_node_t; 15 | 16 | typedef struct { 17 | pthread_mutex_t lock; 18 | queue_node_t *head; 19 | queue_node_t *tail; 20 | int count; 21 | } queue_t; 22 | 23 | /** 24 | * Pop without locking mutex. 25 | * Caller must lock and unlock queue mutex by itself. 26 | * Use with caution! 27 | * @param queue 28 | * @return 29 | */ 30 | void * 31 | queue_lockfree_pop(queue_t *queue); 32 | 33 | void * 34 | queue_pop(queue_t *queue); 35 | 36 | /** 37 | * Push without locking mutex. 38 | * Caller must lock and unlock queue mutex by itself. 39 | * Use with caution! 40 | * @param queue 41 | * @param value 42 | * @return 43 | */ 44 | void 45 | queue_lockfree_push(queue_t *queue, void *value); 46 | 47 | int 48 | queue_push(queue_t *queue, void *value); 49 | 50 | queue_t * 51 | new_queue(); 52 | 53 | void destroy_queue(queue_t *queue); 54 | 55 | #endif // TNT_KAFKA_QUEUE_H 56 | -------------------------------------------------------------------------------- /examples/producer/async_producer.lua: -------------------------------------------------------------------------------- 1 | local os = require('os') 2 | local log = require('log') 3 | local tnt_kafka = require('kafka') 4 | 5 | local error_callback = function(err) 6 | log.error("got error: %s", err) 7 | end 8 | local log_callback = function(fac, str, level) 9 | log.info("got log: %d - %s - %s", level, fac, str) 10 | end 11 | 12 | local producer, err = tnt_kafka.Producer.create({ 13 | brokers = "kafka:9092", -- brokers for bootstrap 14 | options = {}, -- options for librdkafka 15 | error_callback = error_callback, -- optional callback for errors 16 | log_callback = log_callback, -- optional callback for logs and debug messages 17 | default_topic_options = { 18 | ["partitioner"] = "murmur2_random", 19 | }, -- optional default topic options 20 | }) 21 | if err ~= nil then 22 | print(err) 23 | os.exit(1) 24 | end 25 | 26 | for i = 1, 1000 do 27 | local err = producer:produce_async({ -- don't wait until message will be delivired to kafka 28 | topic = "test_topic", 29 | key = "test_key", 30 | value = "test_value" -- only strings allowed 31 | }) 32 | if err ~= nil then 33 | print(err) 34 | os.exit(1) 35 | end 36 | end 37 | 38 | producer:close() -- always stop consumer to send all pending messages before app close and free all used resources 39 | -------------------------------------------------------------------------------- /kafka/consumer_msg.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_CONSUMER_MSG_H 2 | #define TNT_KAFKA_CONSUMER_MSG_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | 10 | //////////////////////////////////////////////////////////////////////////////////////////////////// 11 | /** 12 | * Consumer Message 13 | */ 14 | typedef struct { 15 | rd_kafka_headers_t *headers; 16 | int32_t partition; 17 | char *value; 18 | size_t value_len; 19 | char *key; 20 | size_t key_len; 21 | int64_t offset; 22 | char *topic_name; 23 | } msg_t; 24 | 25 | msg_t *lua_check_consumer_msg(struct lua_State *L, int index); 26 | 27 | msg_t *new_consumer_msg(const rd_kafka_message_t *rd_message); 28 | 29 | void destroy_consumer_msg(msg_t *msg); 30 | 31 | int lua_consumer_msg_topic(struct lua_State *L); 32 | 33 | int lua_consumer_msg_partition(struct lua_State *L); 34 | 35 | int lua_consumer_msg_headers(struct lua_State *L); 36 | 37 | int lua_consumer_msg_offset(struct lua_State *L); 38 | 39 | int lua_consumer_msg_key(struct lua_State *L); 40 | 41 | int lua_consumer_msg_value(struct lua_State *L); 42 | 43 | int lua_consumer_msg_tostring(struct lua_State *L); 44 | 45 | int lua_consumer_msg_gc(struct lua_State *L); 46 | 47 | #endif //TNT_KAFKA_CONSUMER_MSG_H 48 | -------------------------------------------------------------------------------- /kafka/consumer.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_CONSUMER_H 2 | #define TNT_KAFKA_CONSUMER_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | int 9 | lua_consumer_subscribe(struct lua_State *L); 10 | 11 | int 12 | lua_consumer_unsubscribe(struct lua_State *L); 13 | 14 | int 15 | lua_consumer_tostring(struct lua_State *L); 16 | 17 | int 18 | lua_consumer_poll_msg(struct lua_State *L); 19 | 20 | int 21 | lua_consumer_poll_logs(struct lua_State *L); 22 | 23 | int 24 | lua_consumer_poll_stats(struct lua_State *L); 25 | 26 | int 27 | lua_consumer_poll_errors(struct lua_State *L); 28 | 29 | int 30 | lua_consumer_poll_rebalances(struct lua_State *L); 31 | 32 | int 33 | lua_consumer_store_offset(struct lua_State *L); 34 | 35 | int 36 | lua_consumer_seek_partitions(struct lua_State *L); 37 | 38 | int 39 | lua_consumer_close(struct lua_State *L); 40 | 41 | int 42 | lua_consumer_destroy(struct lua_State *L); 43 | 44 | int 45 | lua_create_consumer(struct lua_State *L); 46 | 47 | int 48 | lua_consumer_dump_conf(struct lua_State *L); 49 | 50 | int 51 | lua_consumer_metadata(struct lua_State *L); 52 | 53 | int 54 | lua_consumer_list_groups(struct lua_State *L); 55 | 56 | int 57 | lua_consumer_pause(struct lua_State *L); 58 | 59 | int 60 | lua_consumer_resume(struct lua_State *L); 61 | 62 | int 63 | lua_consumer_rebalance_protocol(struct lua_State *L); 64 | 65 | int 66 | lua_consumer_offsets_for_times(struct lua_State *L); 67 | 68 | #endif //TNT_KAFKA_CONSUMER_H 69 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-47.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_assignment.c b/src/rdkafka_assignment.c 2 | index 6d1f0191..ee4cea61 100644 3 | --- a/src/rdkafka_assignment.c 4 | +++ b/src/rdkafka_assignment.c 5 | @@ -315,21 +315,22 @@ static void rd_kafka_assignment_handle_OffsetFetch(rd_kafka_t *rk, 6 | rd_kafka_dbg( 7 | rk, CGRP, "OFFSET", 8 | "Offset fetch error for %d partition(s): %s", 9 | - offsets->cnt, rd_kafka_err2str(err)); 10 | + offsets ? offsets->cnt : -1, rd_kafka_err2str(err)); 11 | rd_kafka_consumer_err( 12 | rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, 13 | NULL, NULL, RD_KAFKA_OFFSET_INVALID, 14 | "Failed to fetch committed offsets for " 15 | "%d partition(s) in group \"%s\": %s", 16 | - offsets->cnt, rk->rk_group_id->str, 17 | + offsets ? offsets->cnt : -1, rk->rk_group_id->str, 18 | rd_kafka_err2str(err)); 19 | } 20 | } 21 | 22 | /* Apply the fetched offsets to the assignment */ 23 | - rd_kafka_assignment_apply_offsets(rk, offsets, err); 24 | - 25 | - rd_kafka_topic_partition_list_destroy(offsets); 26 | + if (offsets) { 27 | + rd_kafka_assignment_apply_offsets(rk, offsets, err); 28 | + rd_kafka_topic_partition_list_destroy(offsets); 29 | + } 30 | } 31 | -------------------------------------------------------------------------------- /examples/producer/sync_producer.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local os = require('os') 3 | local log = require('log') 4 | local tnt_kafka = require('kafka') 5 | 6 | local error_callback = function(err) 7 | log.error("got error: %s", err) 8 | end 9 | local log_callback = function(fac, str, level) 10 | log.info("got log: %d - %s - %s", level, fac, str) 11 | end 12 | 13 | local producer, err = tnt_kafka.Producer.create({ 14 | brokers = "kafka:9092", -- brokers for bootstrap 15 | options = {}, -- options for librdkafka 16 | error_callback = error_callback, -- optional callback for errors 17 | log_callback = log_callback, -- optional callback for logs and debug messages 18 | default_topic_options = { 19 | ["partitioner"] = "murmur2_random", 20 | }, -- optional default topic options 21 | }) 22 | if err ~= nil then 23 | print(err) 24 | os.exit(1) 25 | end 26 | 27 | for i = 1, 1000 do 28 | fiber.create(function() 29 | local message = "test_value " .. tostring(i) 30 | local err = producer:produce({ -- wait until message will be delivired to kafka (using channel under the hood) 31 | topic = "test_topic", 32 | key = "test_key", 33 | value = message -- only strings allowed 34 | }) 35 | if err ~= nil then 36 | print(string.format("got error '%s' while sending value '%s'", err, message)) 37 | else 38 | print(string.format("successfully sent value '%s'", message)) 39 | end 40 | end) 41 | end 42 | 43 | fiber.sleep(10) 44 | 45 | producer:close() -- always stop consumer to send all pending messages before app close and free all used resources 46 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | # Allows you to run this workflow manually from the Actions tab 5 | workflow_dispatch: 6 | push: 7 | 8 | jobs: 9 | version-check: 10 | # We need this job to run only on push with tag. 11 | if: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') }} 12 | runs-on: ubuntu-22.04 13 | steps: 14 | - name: Check module version 15 | uses: tarantool/actions/check-module-version@master 16 | with: 17 | module-name: 'kafka' 18 | rock-make-opts: 'STATIC_BUILD=ON' 19 | 20 | publish-scm-1: 21 | if: github.ref == 'refs/heads/master' 22 | runs-on: ubuntu-22.04 23 | steps: 24 | - uses: actions/checkout@v5 25 | - uses: tarantool/rocks.tarantool.org/github-action@master 26 | with: 27 | auth: ${{ secrets.ROCKS_AUTH }} 28 | files: kafka-scm-1.rockspec 29 | 30 | publish-tag: 31 | if: startsWith(github.ref, 'refs/tags/') 32 | needs: version-check 33 | runs-on: ubuntu-22.04 34 | steps: 35 | - uses: actions/checkout@v5 36 | - uses: tarantool/setup-tarantool@v4 37 | with: 38 | tarantool-version: '2.11' 39 | # Make a release 40 | - run: echo TAG=${GITHUB_REF##*/} >> $GITHUB_ENV 41 | - run: tarantoolctl rocks new_version --tag ${{ env.TAG }} 42 | - run: tarantoolctl rocks pack kafka-${{ env.TAG }}-1.rockspec 43 | 44 | - uses: tarantool/rocks.tarantool.org/github-action@master 45 | with: 46 | auth: ${{ secrets.ROCKS_AUTH }} 47 | files: | 48 | kafka-${{ env.TAG }}-1.rockspec 49 | kafka-${{ env.TAG }}-1.src.rock 50 | -------------------------------------------------------------------------------- /benchmarks/async_producer.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local box = require('box') 3 | local os = require('os') 4 | local log = require('log') 5 | local clock = require('clock') 6 | local tnt_kafka = require('kafka') 7 | 8 | box.cfg{} 9 | 10 | box.once('init', function() 11 | box.schema.user.grant("guest", 'read,write,execute,create,drop', 'universe') 12 | end) 13 | 14 | local function produce() 15 | local producer, err = tnt_kafka.Producer.create({brokers = "kafka:9092", options = {}}) 16 | if err ~= nil then 17 | print(err) 18 | os.exit(1) 19 | end 20 | 21 | local before = clock.monotonic64() 22 | for i = 1, 10000000 do 23 | while true do 24 | local err = producer:produce_async({ -- don't wait until message will be delivired to kafka 25 | topic = "async_producer_benchmark", 26 | value = "test_value_" .. tostring(i) -- only strings allowed 27 | }) 28 | if err ~= nil then 29 | -- print(err) 30 | fiber.sleep(0.1) 31 | else 32 | break 33 | end 34 | end 35 | if i % 1000 == 0 then 36 | -- log.info("done %d", i) 37 | fiber.yield() 38 | end 39 | end 40 | 41 | log.info("stopping") 42 | local ok, err = producer:close() -- always stop consumer to send all pending messages before app close 43 | if err ~= nil then 44 | print(err) 45 | os.exit(1) 46 | end 47 | 48 | local duration = clock.monotonic64() - before 49 | print(string.format("done benchmark for %f seconds", tonumber(duration * 1.0 / (10 ^ 9)))) 50 | end 51 | 52 | log.info("starting benchmark") 53 | 54 | produce() 55 | -------------------------------------------------------------------------------- /.github/workflows/fast_testing.yml: -------------------------------------------------------------------------------- 1 | name: fast_testing 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | workflow_dispatch: 9 | 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | linux: 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | tarantool: 20 | - '2.11' 21 | 22 | runs-on: ubuntu-latest 23 | steps: 24 | - name: Install tarantool ${{ matrix.tarantool }} 25 | uses: tarantool/setup-tarantool@v4 26 | with: 27 | tarantool-version: ${{ matrix.tarantool }} 28 | 29 | - name: Clone the module 30 | uses: actions/checkout@v5 31 | with: 32 | submodules: true 33 | 34 | - uses: actions/setup-python@v6 35 | with: 36 | python-version: '3.11' 37 | cache: 'pip' 38 | cache-dependency-path: 'tests/requirements.txt' 39 | 40 | - name: Start Kafka 41 | uses: ybyzek/cp-all-in-one-action@v0.2.1 42 | with: 43 | type: cp-all-in-one-community 44 | 45 | - name: Install Python dependencies 46 | run: pip install -r tests/requirements.txt 47 | 48 | - name: Build module 49 | run: | 50 | export MAKEFLAGS=-j8 51 | tarantoolctl rocks STATIC_BUILD=ON make 52 | 53 | - name: Run tarantool application 54 | run: TT_LOG=tarantool.log tarantool tests/app.lua > output.log 2>&1 & 55 | 56 | - name: Run test 57 | run: KAFKA_HOST=localhost:9092 pytest tests 58 | 59 | - name: Print Tarantool logs 60 | if: always() 61 | run: | 62 | cat tarantool.log 63 | cat output.log 64 | -------------------------------------------------------------------------------- /patches/librdkafka-fix-ubsan.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_assignor.c b/src/rdkafka_assignor.c 2 | index 465568c4..124df9de 100644 3 | --- a/src/rdkafka_assignor.c 4 | +++ b/src/rdkafka_assignor.c 5 | @@ -235,7 +235,8 @@ static int rd_kafka_member_subscription_match( 6 | } 7 | 8 | 9 | -static void rd_kafka_assignor_topic_destroy(rd_kafka_assignor_topic_t *at) { 10 | +static void rd_kafka_assignor_topic_destroy(void *rkat) { 11 | + rd_kafka_assignor_topic_t *at = (rd_kafka_assignor_topic_t *)rkat; 12 | rd_list_destroy(&at->members); 13 | rd_free(at); 14 | } 15 | @@ -267,7 +268,7 @@ rd_kafka_member_subscriptions_map(rd_kafka_cgrp_t *rkcg, 16 | rd_kafka_metadata_get_internal(metadata); 17 | 18 | rd_list_init(eligible_topics, RD_MIN(metadata->topic_cnt, 10), 19 | - (void *)rd_kafka_assignor_topic_destroy); 20 | + rd_kafka_assignor_topic_destroy); 21 | 22 | /* For each topic in the cluster, scan through the member list 23 | * to find matching subscriptions. */ 24 | @@ -451,7 +452,8 @@ rd_kafka_assignor_t *rd_kafka_assignor_find(rd_kafka_t *rk, 25 | /** 26 | * Destroys an assignor (but does not unlink). 27 | */ 28 | -static void rd_kafka_assignor_destroy(rd_kafka_assignor_t *rkas) { 29 | +static void rd_kafka_assignor_destroy(void *rkasp) { 30 | + rd_kafka_assignor_t *rkas = (rd_kafka_assignor_t *) rkasp; 31 | rd_kafkap_str_destroy(rkas->rkas_protocol_type); 32 | rd_kafkap_str_destroy(rkas->rkas_protocol_name); 33 | rd_free(rkas); 34 | @@ -580,7 +582,7 @@ int rd_kafka_assignors_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) { 35 | int idx = 0; 36 | 37 | rd_list_init(&rk->rk_conf.partition_assignors, 3, 38 | - (void *)rd_kafka_assignor_destroy); 39 | + rd_kafka_assignor_destroy); 40 | 41 | /* Initialize builtin assignors (ignore errors) */ 42 | rd_kafka_range_assignor_init(rk); 43 | -------------------------------------------------------------------------------- /cmake/FindTarantool.cmake: -------------------------------------------------------------------------------- 1 | # Define GNU standard installation directories 2 | include(GNUInstallDirs) 3 | 4 | macro(extract_definition name output input) 5 | string(REGEX MATCH "#define[\t ]+${name}[\t ]+\"([^\"]*)\"" 6 | _t "${input}") 7 | string(REGEX REPLACE "#define[\t ]+${name}[\t ]+\"(.*)\"" "\\1" 8 | ${output} "${_t}") 9 | endmacro() 10 | 11 | find_path(TARANTOOL_INCLUDE_DIR tarantool/module.h 12 | HINTS ${TARANTOOL_DIR} ENV TARANTOOL_DIR 13 | PATH_SUFFIXES include 14 | ) 15 | 16 | if(TARANTOOL_INCLUDE_DIR) 17 | set(_config "-") 18 | file(READ "${TARANTOOL_INCLUDE_DIR}/tarantool/module.h" _config0) 19 | string(REPLACE "\\" "\\\\" _config ${_config0}) 20 | unset(_config0) 21 | extract_definition(PACKAGE_VERSION TARANTOOL_VERSION ${_config}) 22 | extract_definition(INSTALL_PREFIX _install_prefix ${_config}) 23 | unset(_config) 24 | endif() 25 | 26 | include(FindPackageHandleStandardArgs) 27 | find_package_handle_standard_args(Tarantool 28 | REQUIRED_VARS TARANTOOL_INCLUDE_DIR VERSION_VAR TARANTOOL_VERSION) 29 | if(TARANTOOL_FOUND) 30 | set(TARANTOOL_INCLUDE_DIRS "${TARANTOOL_INCLUDE_DIR}" 31 | "${TARANTOOL_INCLUDE_DIR}/tarantool/" 32 | CACHE PATH "Include directories for Tarantool") 33 | set(TARANTOOL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/tarantool" 34 | CACHE PATH "Directory for storing Lua modules written in Lua") 35 | set(TARANTOOL_INSTALL_LUADIR "${CMAKE_INSTALL_DATADIR}/tarantool" 36 | CACHE PATH "Directory for storing Lua modules written in C") 37 | 38 | if (NOT TARANTOOL_FIND_QUIETLY AND NOT FIND_TARANTOOL_DETAILS) 39 | set(FIND_TARANTOOL_DETAILS ON CACHE INTERNAL "Details about TARANTOOL") 40 | message(STATUS "Tarantool LUADIR is ${TARANTOOL_INSTALL_LUADIR}") 41 | message(STATUS "Tarantool LIBDIR is ${TARANTOOL_INSTALL_LIBDIR}") 42 | endif () 43 | endif() 44 | mark_as_advanced(TARANTOOL_INCLUDE_DIRS TARANTOOL_INSTALL_LIBDIR 45 | TARANTOOL_INSTALL_LUADIR) 46 | -------------------------------------------------------------------------------- /.github/workflows/asan_testing.yml: -------------------------------------------------------------------------------- 1 | name: asan_testing 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | workflow_dispatch: 9 | 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | linux: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Clone the module 19 | uses: actions/checkout@v5 20 | with: 21 | submodules: true 22 | 23 | - uses: actions/setup-python@v6 24 | with: 25 | python-version: '3.11' 26 | cache: 'pip' 27 | cache-dependency-path: 'tests/requirements.txt' 28 | 29 | - name: Start Kafka 30 | uses: ybyzek/cp-all-in-one-action@v0.2.1 31 | with: 32 | type: cp-all-in-one-community 33 | 34 | - name: Install Python dependencies 35 | run: pip install -r tests/requirements.txt 36 | 37 | - name: Install dependencies 38 | run: sudo apt install -y libreadline-dev 39 | 40 | - name: Build module 41 | run: | 42 | export MAKEFLAGS=-j8 43 | export CC=clang 44 | export CXX=clang++ 45 | git clone https://github.com/tarantool/tarantool 46 | cd tarantool 47 | git checkout release/2.11 48 | export LSAN_OPTIONS=suppressions=${PWD}/asan/lsan.supp 49 | cmake . -DENABLE_ASAN=ON -DENABLE_UB_SANITIZER=ON -DENABLE_DIST=ON 50 | make -j16 51 | sudo make install 52 | cd .. 53 | tarantoolctl rocks STATIC_BUILD=ON ENABLE_ASAN=ON ENABLE_UBSAN=ON make 54 | 55 | - name: Run tarantool application 56 | run: | 57 | export TT_LOG=tarantool.log 58 | export LSAN_OPTIONS=suppressions=${PWD}/tarantool/asan/lsan.supp 59 | tarantool tests/app.lua > output.log 2>&1 & 60 | 61 | - name: Run test 62 | run: KAFKA_HOST=localhost:9092 pytest tests 63 | 64 | - name: Print Tarantool logs 65 | if: always() 66 | run: | 67 | cat tarantool.log 68 | cat output.log 69 | -------------------------------------------------------------------------------- /examples/consumer/auto_offset_store.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local os = require('os') 3 | local log = require('log') 4 | local tnt_kafka = require('kafka') 5 | 6 | local error_callback = function(err) 7 | log.error("got error: %s", err) 8 | end 9 | local log_callback = function(fac, str, level) 10 | log.info("got log: %d - %s - %s", level, fac, str) 11 | end 12 | local rebalance_callback = function(msg) 13 | log.info("got rebalance msg: %s", json.encode(msg)) 14 | end 15 | 16 | local consumer, err = tnt_kafka.Consumer.create({ 17 | brokers = "localhost:9092", -- brokers for bootstrap 18 | options = { 19 | ["enable.auto.offset.store"] = "true", 20 | ["group.id"] = "example_consumer", 21 | ["auto.offset.reset"] = "earliest", 22 | ["enable.partition.eof"] = "false" 23 | }, -- options for librdkafka 24 | error_callback = error_callback, -- optional callback for errors 25 | log_callback = log_callback, -- optional callback for logs and debug messages 26 | rebalance_callback = rebalance_callback, -- optional callback for rebalance messages 27 | default_topic_options = { 28 | ["auto.offset.reset"] = "earliest", 29 | }, -- optional default topic options 30 | }) 31 | if err ~= nil then 32 | print(err) 33 | os.exit(1) 34 | end 35 | 36 | local err = consumer:subscribe({"test_topic"}) -- array of topics to subscribe 37 | if err ~= nil then 38 | print(err) 39 | os.exit(1) 40 | end 41 | 42 | fiber.create(function() 43 | local out, err = consumer:output() 44 | if err ~= nil then 45 | print(string.format("got fatal error '%s'", err)) 46 | return 47 | end 48 | 49 | while true do 50 | if out:is_closed() then 51 | return 52 | end 53 | 54 | local msg = out:get() 55 | if msg ~= nil then 56 | print(string.format( 57 | "got msg with topic='%s' partition='%s' offset='%s' key='%s' value='%s'", 58 | msg:topic(), msg:partition(), msg:offset(), msg:key(), msg:value() 59 | )) 60 | end 61 | end 62 | end) 63 | 64 | fiber.sleep(10) 65 | 66 | local err = consumer:unsubscribe({"test_topic"}) -- array of topics to unsubscribe 67 | if err ~= nil then 68 | print(err) 69 | os.exit(1) 70 | end 71 | 72 | consumer:close() -- always stop consumer to commit all pending offsets before app close and free all used resources 73 | -------------------------------------------------------------------------------- /benchmarks/sync_producer.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local box = require('box') 3 | local log = require('log') 4 | local os = require('os') 5 | local clock = require('clock') 6 | local tnt_kafka = require('kafka') 7 | 8 | box.cfg{ 9 | memtx_memory = 524288000, -- 500 MB 10 | } 11 | 12 | box.once('init', function() 13 | box.schema.user.grant("guest", 'read,write,execute,create,drop', 'universe') 14 | end) 15 | 16 | local function produce() 17 | local producer, err = tnt_kafka.Producer.create({ 18 | brokers = "kafka:9092", 19 | options = { 20 | ["queue.buffering.max.ms"] = "50", 21 | } 22 | }) 23 | if err ~= nil then 24 | print(err) 25 | os.exit(1) 26 | end 27 | 28 | local before = clock.monotonic64() 29 | local input_ch = fiber.channel(); 30 | for i = 1, 10000 do 31 | fiber.create(function() 32 | while true do 33 | if input_ch:is_closed() then 34 | break 35 | end 36 | local value = input_ch:get() 37 | if value ~= nil then 38 | while true do 39 | local err = producer:produce({ 40 | topic = "sync_producer_benchmark", 41 | value = value -- only strings allowed 42 | }) 43 | if err ~= nil then 44 | -- print(err) 45 | fiber.sleep(0.1) 46 | else 47 | -- if value % 10000 == 0 then 48 | -- log.info("done %d", value) 49 | -- end 50 | break 51 | end 52 | end 53 | end 54 | end 55 | end) 56 | end 57 | 58 | for i = 1, 10000000 do 59 | input_ch:put(i) 60 | if i % 10000 == 0 then 61 | fiber.yield() 62 | end 63 | end 64 | 65 | input_ch:close() 66 | 67 | log.info("stopping") 68 | local ok, err = producer:close() -- always stop consumer to send all pending messages before app close 69 | if err ~= nil then 70 | print(err) 71 | os.exit(1) 72 | end 73 | 74 | local duration = clock.monotonic64() - before 75 | print(string.format("done benchmark for %f seconds", tonumber(duration * 1.0 / (10 ^ 9)))) 76 | end 77 | 78 | log.info("starting benchmark") 79 | 80 | produce() 81 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-94.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_partition.c b/src/rdkafka_partition.c 2 | index 2d889e09..cf367d3a 100644 3 | --- a/src/rdkafka_partition.c 4 | +++ b/src/rdkafka_partition.c 5 | @@ -3612,12 +3612,14 @@ reply: 6 | 7 | if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.query_tmr, 8 | RD_DO_LOCK)) 9 | - rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce, 10 | - "query timer"); 11 | + if (rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce, 12 | + "query timer")) 13 | + rko->rko_u.leaders.eonce = NULL; 14 | if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.timeout_tmr, 15 | RD_DO_LOCK)) 16 | - rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce, 17 | - "timeout timer"); 18 | + if (rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce, 19 | + "timeout timer")) 20 | + rko->rko_u.leaders.eonce = NULL; 21 | 22 | if (rko->rko_u.leaders.eonce) { 23 | rd_kafka_enq_once_disable(rko->rko_u.leaders.eonce); 24 | diff --git a/src/rdkafka_queue.h b/src/rdkafka_queue.h 25 | index 0d50f587..04dddbf9 100644 26 | --- a/src/rdkafka_queue.h 27 | +++ b/src/rdkafka_queue.h 28 | @@ -983,7 +983,8 @@ rd_kafka_enq_once_add_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) { 29 | 30 | 31 | /** 32 | - * @brief Decrement refcount for source (non-owner), such as a timer. 33 | + * @brief Decrement refcount for source (non-owner), such as a timer 34 | + * and return 1 if eonce was destroyed. 35 | * 36 | * @param srcdesc a human-readable descriptive string of the source. 37 | * May be used for future debugging. 38 | @@ -993,7 +994,7 @@ rd_kafka_enq_once_add_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) { 39 | * This API is used to undo an add_source() from the 40 | * same code. 41 | */ 42 | -static RD_INLINE RD_UNUSED void 43 | +static RD_INLINE RD_UNUSED int 44 | rd_kafka_enq_once_del_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) { 45 | int do_destroy; 46 | 47 | @@ -1006,7 +1007,10 @@ rd_kafka_enq_once_del_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) { 48 | if (do_destroy) { 49 | /* We're the last refcount holder, clean up eonce. */ 50 | rd_kafka_enq_once_destroy0(eonce); 51 | + return 1; 52 | } 53 | + 54 | + return 0; 55 | } 56 | 57 | /** 58 | -------------------------------------------------------------------------------- /examples/consumer/manual_offset_store.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local os = require('os') 3 | local log = require('log') 4 | local tnt_kafka = require('kafka') 5 | 6 | local error_callback = function(err) 7 | log.error("got error: %s", err) 8 | end 9 | local log_callback = function(fac, str, level) 10 | log.info("got log: %d - %s - %s", level, fac, str) 11 | end 12 | local rebalance_callback = function(msg) 13 | log.info("got rebalance msg: %s", json.encode(msg)) 14 | end 15 | 16 | local consumer, err = tnt_kafka.Consumer.create({ 17 | brokers = "localhost:9092", -- brokers for bootstrap 18 | options = { 19 | ["enable.auto.offset.store"] = "false", 20 | ["group.id"] = "example_consumer", 21 | ["auto.offset.reset"] = "earliest", 22 | ["enable.partition.eof"] = "false" 23 | }, -- options for librdkafka 24 | error_callback = error_callback, -- optional callback for errors 25 | log_callback = log_callback, -- optional callback for logs and debug messages 26 | rebalance_callback = rebalance_callback, -- optional callback for rebalance messages 27 | default_topic_options = { 28 | ["auto.offset.reset"] = "earliest", 29 | }, -- optional default topic options 30 | }) 31 | if err ~= nil then 32 | print(err) 33 | os.exit(1) 34 | end 35 | 36 | local err = consumer:subscribe({"test_topic"}) -- array of topics to subscribe 37 | if err ~= nil then 38 | print(err) 39 | os.exit(1) 40 | end 41 | 42 | for i = 1, 10 do 43 | fiber.create(function() 44 | local out, err = consumer:output() 45 | if err ~= nil then 46 | print(string.format("got fatal error '%s'", err)) 47 | return 48 | end 49 | while true do 50 | if out:is_closed() then 51 | return 52 | end 53 | 54 | local msg = out:get() 55 | if msg ~= nil then 56 | print(string.format( 57 | "got msg with topic='%s' partition='%s' offset='%s' key='%s' value='%s'", 58 | msg:topic(), msg:partition(), msg:offset(), msg:key(), msg:value() 59 | )) 60 | 61 | local err = consumer:store_offset(msg) -- don't forget to commit processed messages 62 | if err ~= nil then 63 | print(string.format( 64 | "got error '%s' while commiting msg from topic '%s'", 65 | err, msg:topic() 66 | )) 67 | end 68 | end 69 | end 70 | end) 71 | end 72 | 73 | fiber.sleep(10) 74 | 75 | local err = consumer:unsubscribe({"test_topic"}) -- array of topics to unsubscribe 76 | if err ~= nil then 77 | print(err) 78 | os.exit(1) 79 | end 80 | 81 | consumer:close() -- always stop consumer to commit all pending offsets before app close and free all used resources 82 | -------------------------------------------------------------------------------- /kafka/queue.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | //////////////////////////////////////////////////////////////////////////////////////////////////// 9 | /** 10 | * General thread safe queue based on linked list 11 | */ 12 | 13 | /** 14 | * Pop without locking mutex. 15 | * Caller must lock and unlock queue mutex by itself. 16 | * Use with caution! 17 | * @param queue 18 | * @return 19 | */ 20 | void * 21 | queue_lockfree_pop(queue_t *queue) { 22 | if (queue == NULL) 23 | return NULL; 24 | 25 | void *output = NULL; 26 | 27 | if (queue->head != NULL) { 28 | output = queue->head->value; 29 | queue_node_t *tmp = queue->head; 30 | queue->head = queue->head->next; 31 | free(tmp); 32 | if (queue->head == NULL) { 33 | queue->tail = NULL; 34 | } 35 | 36 | queue->count -= 1; 37 | } 38 | 39 | return output; 40 | } 41 | 42 | void * 43 | queue_pop(queue_t *queue) { 44 | if (queue == NULL) 45 | return NULL; 46 | 47 | pthread_mutex_lock(&queue->lock); 48 | void *output = queue_lockfree_pop(queue); 49 | pthread_mutex_unlock(&queue->lock); 50 | 51 | return output; 52 | } 53 | 54 | /** 55 | * Push without locking mutex. 56 | * Caller must lock and unlock queue mutex by itself. 57 | * Use with caution! 58 | * @param queue 59 | * @param value 60 | * @return 61 | */ 62 | void 63 | queue_lockfree_push(queue_t *queue, void *value) { 64 | if (queue == NULL) 65 | return; 66 | 67 | queue_node_t *new_node; 68 | new_node = xmalloc(sizeof(queue_node_t)); 69 | new_node->value = value; 70 | new_node->next = NULL; 71 | 72 | if (queue->tail != NULL) 73 | queue->tail->next = new_node; 74 | 75 | queue->tail = new_node; 76 | if (queue->head == NULL) 77 | queue->head = new_node; 78 | 79 | queue->count += 1; 80 | } 81 | 82 | int 83 | queue_push(queue_t *queue, void *value) { 84 | if (value == NULL || queue == NULL) { 85 | return -1; 86 | } 87 | 88 | pthread_mutex_lock(&queue->lock); 89 | queue_lockfree_push(queue, value); 90 | pthread_mutex_unlock(&queue->lock); 91 | 92 | return 0; 93 | } 94 | 95 | queue_t * 96 | new_queue() { 97 | queue_t *queue = xmalloc(sizeof(queue_t)); 98 | XPTHREAD(pthread_mutex_init(&queue->lock, NULL)); 99 | 100 | queue->head = NULL; 101 | queue->tail = NULL; 102 | queue->count = 0; 103 | 104 | return queue; 105 | } 106 | 107 | void 108 | destroy_queue(queue_t *queue) { 109 | if (queue == NULL) 110 | return; 111 | 112 | /* Drain remaining nodes (values are owned by caller) */ 113 | pthread_mutex_lock(&queue->lock); 114 | queue_node_t *n = queue->head; 115 | while (n != NULL) { 116 | queue_node_t *next = n->next; 117 | free(n); 118 | n = next; 119 | } 120 | pthread_mutex_unlock(&queue->lock); 121 | 122 | XPTHREAD(pthread_mutex_destroy(&queue->lock)); 123 | free(queue); 124 | } 125 | -------------------------------------------------------------------------------- /kafka/common.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_COMMON_H 2 | #define TNT_KAFKA_COMMON_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | /** 17 | * An x* variant of a memory allocation function calls the original function 18 | * and panics if it fails (i.e. it should never return NULL). 19 | */ 20 | #define xalloc_impl(size, func, args...) \ 21 | ({ \ 22 | void *ret = func(args); \ 23 | if (unlikely(ret == NULL)) { \ 24 | fprintf(stderr, "Can't allocate %zu bytes at %s:%d", \ 25 | (size_t)(size), __FILE__, __LINE__); \ 26 | exit(EXIT_FAILURE); \ 27 | } \ 28 | ret; \ 29 | }) 30 | 31 | #define xmalloc(size) xalloc_impl((size), malloc, (size)) 32 | #define xcalloc(n, size) xalloc_impl((n) * (size), calloc, (n), (size)) 33 | #define xrealloc(ptr, size) xalloc_impl((size), realloc, (ptr), (size)) 34 | #define xrd_kafka_topic_partition_list_new(size) xalloc_impl((size), rd_kafka_topic_partition_list_new, (size)) 35 | #define xrd_kafka_headers_new(size) xalloc_impl((size), rd_kafka_headers_new, (size)) 36 | #define xrd_kafka_topic_conf_new() xalloc_impl(1, rd_kafka_topic_conf_new) 37 | #define xrd_kafka_conf_new() xalloc_impl(1, rd_kafka_conf_new) 38 | 39 | static inline void xpthread_fail(const char *what, int rc, const char *file, int line) { 40 | fprintf(stderr, "%s failed (rc=%d: %s) at %s:%d\n", 41 | what, rc, strerror(rc), file, line); 42 | exit(EXIT_FAILURE); 43 | } 44 | 45 | #define XPTHREAD(call) do { \ 46 | int _rc = (call); \ 47 | if (unlikely(_rc != 0)) \ 48 | xpthread_fail(#call, _rc, __FILE__, __LINE__); \ 49 | } while (0) 50 | 51 | extern const char* const consumer_label; 52 | extern const char* const consumer_msg_label; 53 | extern const char* const producer_label; 54 | 55 | int 56 | lua_librdkafka_version(struct lua_State *L); 57 | 58 | int 59 | lua_librdkafka_dump_conf(struct lua_State *L, rd_kafka_t *rk); 60 | 61 | int 62 | lua_librdkafka_metadata(struct lua_State *L, rd_kafka_t *rk, rd_kafka_topic_t *only_rkt, int timeout_ms); 63 | 64 | int 65 | lua_librdkafka_list_groups(struct lua_State *L, rd_kafka_t *rk, const char *group, int timeout_ms); 66 | 67 | /** 68 | * Push native lua error with code -3 69 | */ 70 | int 71 | lua_push_error(struct lua_State *L); 72 | 73 | void 74 | set_thread_name(const char *name); 75 | 76 | rd_kafka_resp_err_t 77 | kafka_pause(rd_kafka_t *rk); 78 | 79 | rd_kafka_resp_err_t 80 | kafka_resume(rd_kafka_t *rk); 81 | 82 | int 83 | lua_push_kafka_error(struct lua_State *L, rd_kafka_t *rk, rd_kafka_resp_err_t err); 84 | 85 | #endif //TNT_KAFKA_COMMON_H 86 | -------------------------------------------------------------------------------- /benchmarks/auto_offset_store_consumer.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local log = require('log') 3 | local box = require('box') 4 | local os = require('os') 5 | local clock = require('clock') 6 | local tnt_kafka = require('kafka') 7 | 8 | box.cfg{ 9 | memtx_memory = 524288000, -- 500 MB 10 | } 11 | 12 | local TOPIC = "auto_offset_store_consumer_benchmark" 13 | local MSG_COUNT = 10000000 14 | 15 | box.once('init', function() 16 | box.schema.user.grant("guest", 'read,write,execute,create,drop', 'universe') 17 | end) 18 | 19 | local function produce_initial_data() 20 | local producer, err = tnt_kafka.Producer.create({ brokers = "kafka:9092"}) 21 | if err ~= nil then 22 | print(err) 23 | os.exit(1) 24 | end 25 | 26 | for i = 1, MSG_COUNT do 27 | while true do 28 | local err = producer:produce_async({ -- don't wait until message will be delivired to kafka 29 | topic = TOPIC, 30 | value = "test_value_" .. tostring(i) -- only strings allowed 31 | }) 32 | if err ~= nil then 33 | -- print(err) 34 | fiber.sleep(0.1) 35 | else 36 | break 37 | end 38 | end 39 | if i % 1000 == 0 then 40 | fiber.yield() 41 | end 42 | end 43 | 44 | local ok, err = producer:close() -- always stop consumer to send all pending messages before app close 45 | if err ~= nil then 46 | print(err) 47 | os.exit(1) 48 | end 49 | end 50 | 51 | local function consume() 52 | local consumer, err = tnt_kafka.Consumer.create({ brokers = "kafka:9092", options = { 53 | ["enable.auto.offset.store"] = "true", 54 | ["group.id"] = "test_consumer1", 55 | ["auto.offset.reset"] = "earliest", 56 | ["enable.partition.eof"] = "false", 57 | ["queued.min.messages"] = "100000" 58 | }}) 59 | if err ~= nil then 60 | print(err) 61 | os.exit(1) 62 | end 63 | 64 | local err = consumer:subscribe({TOPIC}) 65 | if err ~= nil then 66 | print(err) 67 | os.exit(1) 68 | end 69 | 70 | local before = clock.monotonic64() 71 | local counter = 0 72 | local out, err = consumer:output() 73 | if err ~= nil then 74 | print(string.format("got fatal error '%s'", err)) 75 | return 76 | end 77 | 78 | while counter < MSG_COUNT do 79 | if out:is_closed() then 80 | return 81 | end 82 | 83 | local msg = out:get() 84 | if msg ~= nil then 85 | counter = counter + 1 86 | -- print(msg:value()) 87 | end 88 | if counter % 10000 == 0 then 89 | log.info("done %d", counter) 90 | fiber.yield() 91 | end 92 | end 93 | 94 | print("closing") 95 | local ok, err = consumer:close() 96 | if err ~= nil then 97 | print(err) 98 | os.exit(1) 99 | end 100 | 101 | local duration = clock.monotonic64() - before 102 | print(string.format("done benchmark for %f seconds", tonumber(duration * 1.0 / (10 ^ 9)))) 103 | end 104 | 105 | print("producing initial data") 106 | produce_initial_data() 107 | 108 | print("starting benchmark") 109 | consume() 110 | -------------------------------------------------------------------------------- /benchmarks/manual_offset_store_consumer.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local box = require('box') 3 | local os = require('os') 4 | local log = require('log') 5 | local clock = require('clock') 6 | local tnt_kafka = require('kafka') 7 | 8 | box.cfg{ 9 | memtx_memory = 524288000, 10 | } 11 | 12 | local TOPIC = "manual_offset_store_consumer" 13 | local MSG_COUNT = 10000000 14 | 15 | box.once('init', function() 16 | box.schema.user.grant("guest", 'read,write,execute,create,drop', 'universe') 17 | end) 18 | 19 | local function produce_initial_data() 20 | local producer, err = tnt_kafka.Producer.create({ brokers = "kafka:9092"}) 21 | if err ~= nil then 22 | print(err) 23 | os.exit(1) 24 | end 25 | 26 | for i = 1, MSG_COUNT do 27 | while true do 28 | local err = producer:produce_async({ -- don't wait until message will be delivired to kafka 29 | topic = TOPIC, 30 | value = "test_value_" .. tostring(i) -- only strings allowed 31 | }) 32 | if err ~= nil then 33 | -- print(err) 34 | fiber.sleep(0.1) 35 | else 36 | break 37 | end 38 | end 39 | if i % 1000 == 0 then 40 | fiber.yield() 41 | end 42 | end 43 | 44 | local ok, err = producer:close() -- always stop consumer to send all pending messages before app close 45 | if err ~= nil then 46 | print(err) 47 | os.exit(1) 48 | end 49 | end 50 | 51 | local function consume() 52 | local consumer, err = tnt_kafka.Consumer.create({ brokers = "kafka:9092", options = { 53 | ["enable.auto.offset.store"] = "false", 54 | ["group.id"] = "test_consumer1", 55 | ["auto.offset.reset"] = "earliest", 56 | ["enable.partition.eof"] = "false", 57 | ["queued.min.messages"] = "100000" 58 | }}) 59 | if err ~= nil then 60 | print(err) 61 | os.exit(1) 62 | end 63 | 64 | local err = consumer:subscribe({TOPIC}) 65 | if err ~= nil then 66 | print(err) 67 | os.exit(1) 68 | end 69 | 70 | local before = clock.monotonic64() 71 | local counter = 0 72 | local out, err = consumer:output() 73 | if err ~= nil then 74 | print(string.format("got fatal error '%s'", err)) 75 | return 76 | end 77 | 78 | while counter < MSG_COUNT do 79 | if out:is_closed() then 80 | return 81 | end 82 | 83 | local msg = out:get() 84 | if msg ~= nil then 85 | counter = counter + 1 86 | err = consumer:store_offset(msg) 87 | if err ~= nil then 88 | print(err) 89 | end 90 | end 91 | if counter % 10000 == 0 then 92 | log.info("done %d", counter) 93 | fiber.yield() 94 | end 95 | end 96 | 97 | print("closing") 98 | local ok, err = consumer:close() 99 | if err ~= nil then 100 | print(err) 101 | os.exit(1) 102 | end 103 | 104 | local duration = clock.monotonic64() - before 105 | print(string.format("done benchmark for %f seconds", tonumber(duration * 1.0 / (10 ^ 9)))) 106 | end 107 | 108 | log.info("producing initial data") 109 | produce_initial_data() 110 | 111 | log.info("starting benchmark") 112 | consume() 113 | -------------------------------------------------------------------------------- /kafka/tnt_kafka.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | //////////////////////////////////////////////////////////////////////////////////////////////////// 13 | /** 14 | * Entry point 15 | */ 16 | 17 | LUA_API int __attribute__ ((visibility("default"))) 18 | luaopen_kafka_tntkafka(lua_State *L) { 19 | static const struct luaL_Reg consumer_methods [] = { 20 | {"subscribe", lua_consumer_subscribe}, 21 | {"unsubscribe", lua_consumer_unsubscribe}, 22 | {"poll_msg", lua_consumer_poll_msg}, 23 | {"poll_logs", lua_consumer_poll_logs}, 24 | {"poll_stats", lua_consumer_poll_stats}, 25 | {"poll_errors", lua_consumer_poll_errors}, 26 | {"poll_rebalances", lua_consumer_poll_rebalances}, 27 | {"store_offset", lua_consumer_store_offset}, 28 | {"seek_partitions", lua_consumer_seek_partitions}, 29 | {"dump_conf", lua_consumer_dump_conf}, 30 | {"metadata", lua_consumer_metadata}, 31 | {"list_groups", lua_consumer_list_groups}, 32 | {"pause", lua_consumer_pause}, 33 | {"resume", lua_consumer_resume}, 34 | {"close", lua_consumer_close}, 35 | {"destroy", lua_consumer_destroy}, 36 | {"rebalance_protocol", lua_consumer_rebalance_protocol}, 37 | {"offsets_for_times", lua_consumer_offsets_for_times}, 38 | {"__tostring", lua_consumer_tostring}, 39 | {NULL, NULL} 40 | }; 41 | 42 | luaL_newmetatable(L, consumer_label); 43 | lua_pushvalue(L, -1); 44 | luaL_register(L, NULL, consumer_methods); 45 | lua_setfield(L, -2, "__index"); 46 | lua_pushstring(L, consumer_label); 47 | lua_setfield(L, -2, "__metatable"); 48 | lua_pop(L, 1); 49 | 50 | static const struct luaL_Reg consumer_msg_methods [] = { 51 | {"topic", lua_consumer_msg_topic}, 52 | {"partition", lua_consumer_msg_partition}, 53 | {"headers", lua_consumer_msg_headers}, 54 | {"offset", lua_consumer_msg_offset}, 55 | {"key", lua_consumer_msg_key}, 56 | {"value", lua_consumer_msg_value}, 57 | {"__tostring", lua_consumer_msg_tostring}, 58 | {"__gc", lua_consumer_msg_gc}, 59 | {NULL, NULL} 60 | }; 61 | 62 | luaL_newmetatable(L, consumer_msg_label); 63 | lua_pushvalue(L, -1); 64 | luaL_register(L, NULL, consumer_msg_methods); 65 | lua_setfield(L, -2, "__index"); 66 | lua_pushstring(L, consumer_msg_label); 67 | lua_setfield(L, -2, "__metatable"); 68 | lua_pop(L, 1); 69 | 70 | static const struct luaL_Reg producer_methods [] = { 71 | {"produce", lua_producer_produce}, 72 | {"msg_delivery_poll", lua_producer_msg_delivery_poll}, 73 | {"poll_logs", lua_producer_poll_logs}, 74 | {"poll_stats", lua_producer_poll_stats}, 75 | {"poll_errors", lua_producer_poll_errors}, 76 | {"dump_conf", lua_producer_dump_conf}, 77 | {"metadata", lua_producer_metadata}, 78 | {"list_groups", lua_producer_list_groups}, 79 | {"close", lua_producer_close}, 80 | {"destroy", lua_producer_destroy}, 81 | {"__tostring", lua_producer_tostring}, 82 | {NULL, NULL} 83 | }; 84 | 85 | luaL_newmetatable(L, producer_label); 86 | lua_pushvalue(L, -1); 87 | luaL_register(L, NULL, producer_methods); 88 | lua_setfield(L, -2, "__index"); 89 | lua_pushstring(L, producer_label); 90 | lua_setfield(L, -2, "__metatable"); 91 | lua_pop(L, 1); 92 | 93 | lua_newtable(L); 94 | static const struct luaL_Reg meta [] = { 95 | {"create_consumer", lua_create_consumer}, 96 | {"create_producer", lua_create_producer}, 97 | {"librdkafka_version", lua_librdkafka_version}, 98 | {NULL, NULL} 99 | }; 100 | luaL_register(L, NULL, meta); 101 | return 1; 102 | } 103 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.13 FATAL_ERROR) 2 | 3 | project(kafka C) 4 | 5 | set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) 6 | set(CMAKE_SKIP_INSTALL_ALL_DEPENDENCY TRUE) 7 | 8 | # Set CFLAGS 9 | set(MY_C_FLAGS "-Wall -Wextra -Werror -std=gnu11 -fno-strict-aliasing -Wno-deprecated-declarations") 10 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MY_C_FLAGS}") 11 | set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${MY_C_FLAGS} -ggdb3") 12 | 13 | find_package(Tarantool REQUIRED) 14 | 15 | set(STATIC_BUILD "OFF" CACHE BOOL "Link dependencies statically?") 16 | set(WITH_OPENSSL_1_1 "OFF" CACHE BOOL "Require openssl version >= 1.1?") 17 | set(WITH_GSSAPI "OFF" CACHE BOOL "Enable Kerberos (GSSAPI) support") 18 | set(ENABLE_ASAN "OFF" CACHE BOOL "Enable ASAN") 19 | set(ENABLE_UBSAN "OFF" CACHE BOOL "Enable UBSAN") 20 | 21 | if (WITH_OPENSSL_1_1) 22 | find_package(OpenSSL 1.1 REQUIRED) 23 | else() 24 | find_package(OpenSSL REQUIRED) 25 | endif() 26 | message("Found OPENSSL version: ${OPENSSL_VERSION}") 27 | 28 | if (ENABLE_ASAN) 29 | list(APPEND SANITIZER_FLAGS -fsanitize=address) 30 | endif() 31 | 32 | if (ENABLE_UBSAN) 33 | list(APPEND SANITIZER_FLAGS -fsanitize=undefined) 34 | endif() 35 | 36 | if (SANITIZER_FLAGS) 37 | list(JOIN SANITIZER_FLAGS " " SANITIZER_FLAGS) 38 | set(LIBRDKAFKA_FLAGS --enable-devel --disable-optimization) 39 | set(CMAKE_BUILD_TYPE "Debug") 40 | set(LIBRDKAFKA_CXX_FLAGS "${SANITIZER_FLAGS}") 41 | set(LIBRDKAFKA_C_FLAGS "${SANITIZER_FLAGS}") 42 | set(LIBRDKAFKA_LD_FLAGS "${SANITIZER_FLAGS}") 43 | endif() 44 | 45 | if (APPLE) 46 | set(LIBRDKAFKA_LD_FLAGS "${LIBRDKAFKA_LD_FLAGS} ${CMAKE_C_SYSROOT_FLAG} ${CMAKE_OSX_SYSROOT}") 47 | set(LIBRDKAFKA_CXX_FLAGS "${LIBRDKAFKA_CXX_FLAGS} ${CMAKE_C_SYSROOT_FLAG} ${CMAKE_OSX_SYSROOT}") 48 | set(LIBRDKAFKA_C_FLAGS "${LIBRDKAFKA_C_FLAGS} ${CMAKE_C_SYSROOT_FLAG} ${CMAKE_OSX_SYSROOT}") 49 | endif() 50 | 51 | if (WITH_GSSAPI) 52 | set(LIBRDKAFKA_FLAGS ${LIBRDKAFKA_FLAGS} --enable-gssapi) 53 | endif() 54 | 55 | if(STATIC_BUILD) 56 | include(ExternalProject) 57 | set(PATCHES_DIR "${CMAKE_SOURCE_DIR}/patches") 58 | ExternalProject_Add(librdkafka 59 | SOURCE_DIR ${CMAKE_CURRENT_LIST_DIR}/librdkafka 60 | INSTALL_DIR ${CMAKE_BINARY_DIR}/librdkafka 61 | BUILD_IN_SOURCE 1 62 | CONFIGURE_COMMAND /configure 63 | --cc=${CMAKE_C_COMPILER} 64 | --cxx=${CMAKE_CXX_COMPILER} 65 | --CFLAGS=${LIBRDKAFKA_C_FLAGS} 66 | --CPPFLAGS=${LIBRDKAFKA_CXX_FLAGS} 67 | --LDFLAGS=${LIBRDKAFKA_LD_FLAGS} 68 | --prefix= 69 | ${LIBRDKAFKA_FLAGS} 70 | 71 | --enable-ssl 72 | --disable-zstd 73 | --disable-lz4 74 | --disable-lz4-ext 75 | --disable-curl 76 | --enable-static 77 | BUILD_COMMAND make -C src -j 78 | INSTALL_COMMAND make -C src install 79 | PATCH_COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-47.patch" 80 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-52.patch" 81 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-55.patch" 82 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-70.patch" 83 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-36.patch" 84 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-71.patch" 85 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-72.patch" 86 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-94.patch" 87 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-fix-ubsan.patch" 88 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-fix-centos7.patch" 89 | ) 90 | 91 | add_library(librdkafka_static INTERFACE) 92 | add_dependencies(librdkafka_static librdkafka) 93 | ExternalProject_Get_Property(librdkafka INSTALL_DIR) 94 | target_include_directories(librdkafka_static SYSTEM INTERFACE ${INSTALL_DIR}/include) 95 | target_link_libraries(librdkafka_static INTERFACE ${INSTALL_DIR}/lib/librdkafka.a) 96 | 97 | set(RDKAFKA_LIBRARY ${RDKAFKA_LIBRARY} librdkafka_static) 98 | else() 99 | find_package(RdKafka REQUIRED) 100 | # Link RdKafka transitive dependencies manually 101 | set(RDKAFKA_LIBRARY ${RDKAFKA_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY} ${OPENSSL_SSL_LIBRARY}) 102 | endif() 103 | 104 | include_directories(${TARANTOOL_INCLUDE_DIRS}) 105 | 106 | add_subdirectory(kafka) 107 | -------------------------------------------------------------------------------- /tests/producer.lua: -------------------------------------------------------------------------------- 1 | local box = require('box') 2 | local log = require('log') 3 | local json = require('json') 4 | local tnt_kafka = require('kafka') 5 | 6 | local TOPIC_NAME = "test_producer" 7 | 8 | local producer = nil 9 | local errors = {} 10 | local logs = {} 11 | local stats = {} 12 | 13 | local function create(brokers, additional_opts) 14 | local err 15 | errors = {} 16 | logs = {} 17 | stats = {} 18 | local error_callback = function(err) 19 | log.error("got error: %s", err) 20 | table.insert(errors, err) 21 | end 22 | local log_callback = function(fac, str, level) 23 | log.info("got log: %d - %s - %s", level, fac, str) 24 | table.insert(logs, string.format("got log: %d - %s - %s", level, fac, str)) 25 | end 26 | local stats_callback = function(json_stats) 27 | log.info("got stats %s", json_stats) 28 | table.insert(stats, json_stats) 29 | end 30 | 31 | local options = { 32 | ["statistics.interval.ms"] = "1000", 33 | } 34 | if additional_opts ~= nil then 35 | for key, value in pairs(additional_opts) do 36 | options[key] = value 37 | end 38 | end 39 | 40 | producer, err = tnt_kafka.Producer.create({ 41 | brokers = brokers, 42 | options = options, 43 | log_callback = log_callback, 44 | stats_callback = stats_callback, 45 | error_callback = error_callback, 46 | default_topic_options = { 47 | ["partitioner"] = "murmur2_random", 48 | }, 49 | }) 50 | if err ~= nil then 51 | log.error("got err %s", err) 52 | box.error{code = 500, reason = err} 53 | end 54 | end 55 | 56 | local function produce(messages) 57 | for _, message in ipairs(messages) do 58 | local err = producer:produce({ 59 | topic = TOPIC_NAME, 60 | key = message.key, 61 | value = message.value, 62 | headers = message.headers, 63 | }) 64 | if err ~= nil then 65 | log.error("got error '%s' while sending value '%s'", err, json.encode(message)) 66 | else 67 | log.error("successfully sent value '%s'", json.encode(message)) 68 | end 69 | end 70 | end 71 | 72 | local function dump_conf() 73 | return producer:dump_conf() 74 | end 75 | 76 | local function get_errors() 77 | return errors 78 | end 79 | 80 | local function get_logs() 81 | return logs 82 | end 83 | 84 | local function get_stats() 85 | return stats 86 | end 87 | 88 | local function metadata(timeout_ms, topic) 89 | return producer:metadata({timeout_ms = timeout_ms, topic = topic}) 90 | end 91 | 92 | local function list_groups(timeout_ms) 93 | local res, err = producer:list_groups({timeout_ms = timeout_ms}) 94 | if err ~= nil then 95 | return nil, err 96 | end 97 | log.info("Groups: %s", json.encode(res)) 98 | -- Some fields can have binary data that won't 99 | -- be correctly processed by connector. 100 | for _, group in ipairs(res) do 101 | group['members'] = nil 102 | end 103 | return res 104 | end 105 | 106 | local function close() 107 | local _, err = producer:close() 108 | if err ~= nil then 109 | log.error("got err %s", err) 110 | box.error{code = 500, reason = err} 111 | end 112 | end 113 | 114 | local function test_create_errors() 115 | log.info('Create without config') 116 | local _, err = tnt_kafka.Producer.create() 117 | assert(err == 'config must not be nil') 118 | 119 | log.info('Create with empty config') 120 | local _, err = tnt_kafka.Producer.create({}) 121 | assert(err == 'producer config table must have non nil key \'brokers\' which contains string') 122 | 123 | log.info('Create with empty brokers') 124 | local _, err = tnt_kafka.Producer.create({brokers = ''}) 125 | assert(err == 'No valid brokers specified') 126 | 127 | log.info('Create with invalid default_topic_options keys') 128 | local _, err = tnt_kafka.Producer.create({brokers = '', default_topic_options = {[{}] = 2}}) 129 | assert(err == 'producer config default topic options must contains only string keys and string values') 130 | 131 | log.info('Create with invalid default_topic_options property') 132 | local _, err = tnt_kafka.Producer.create({brokers = '', default_topic_options = {[2] = 2}}) 133 | assert(err == 'No such configuration property: "2"') 134 | 135 | log.info('Create with invalid options keys') 136 | local _, err = tnt_kafka.Producer.create({brokers = '', options = {[{}] = 2}}) 137 | assert(err == 'producer config options must contains only string keys and string values') 138 | 139 | log.info('Create with invalid options property') 140 | local _, err = tnt_kafka.Producer.create({brokers = '', options = {[2] = 2}}) 141 | assert(err == 'No such configuration property: "2"') 142 | 143 | log.info('Create with incompatible properties') 144 | local _, err = tnt_kafka.Producer.create({brokers = '', options = {['reconnect.backoff.max.ms'] = '2', ['reconnect.backoff.ms'] = '1000'}}) 145 | assert(err == '`reconnect.backoff.max.ms` must be >= `reconnect.backoff.ms`') 146 | end 147 | 148 | return { 149 | create = create, 150 | produce = produce, 151 | get_errors = get_errors, 152 | get_logs = get_logs, 153 | get_stats = get_stats, 154 | close = close, 155 | dump_conf = dump_conf, 156 | metadata = metadata, 157 | list_groups = list_groups, 158 | 159 | test_create_errors = test_create_errors, 160 | } 161 | -------------------------------------------------------------------------------- /kafka/consumer_msg.c: -------------------------------------------------------------------------------- 1 | #include "common.h" 2 | #include "consumer_msg.h" 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | static const char null_literal[] = "NULL"; 11 | 12 | //////////////////////////////////////////////////////////////////////////////////////////////////// 13 | /** 14 | * Consumer Message 15 | */ 16 | 17 | msg_t * 18 | lua_check_consumer_msg(struct lua_State *L, int index) { 19 | msg_t **msg_p = (msg_t **)luaL_checkudata(L, index, consumer_msg_label); 20 | if (msg_p == NULL || *msg_p == NULL) 21 | luaL_error(L, "Kafka consumer message fatal error: failed to retrieve message from lua stack!"); 22 | return *msg_p; 23 | } 24 | 25 | int 26 | lua_consumer_msg_topic(struct lua_State *L) { 27 | const msg_t *msg = lua_check_consumer_msg(L, 1); 28 | lua_pushstring(L, msg->topic_name); 29 | return 1; 30 | } 31 | 32 | int 33 | lua_consumer_msg_partition(struct lua_State *L) { 34 | const msg_t *msg = lua_check_consumer_msg(L, 1); 35 | 36 | lua_pushnumber(L, (double)msg->partition); 37 | return 1; 38 | } 39 | 40 | int 41 | lua_consumer_msg_offset(struct lua_State *L) { 42 | msg_t *msg = lua_check_consumer_msg(L, 1); 43 | 44 | luaL_pushint64(L, msg->offset); 45 | return 1; 46 | } 47 | 48 | int 49 | lua_consumer_msg_key(struct lua_State *L) { 50 | msg_t *msg = lua_check_consumer_msg(L, 1); 51 | 52 | if (msg->key_len <= 0 || msg->key == NULL) 53 | lua_pushnil(L); 54 | else 55 | lua_pushlstring(L, msg->key, msg->key_len); 56 | return 1; 57 | } 58 | 59 | int 60 | lua_consumer_msg_value(struct lua_State *L) { 61 | const msg_t *msg = lua_check_consumer_msg(L, 1); 62 | 63 | if (msg->value_len <= 0 || msg->value == NULL) 64 | lua_pushnil(L); 65 | else 66 | lua_pushlstring(L, msg->value, msg->value_len); 67 | return 1; 68 | } 69 | 70 | int 71 | lua_consumer_msg_headers(struct lua_State *L) { 72 | const msg_t *msg = lua_check_consumer_msg(L, 1); 73 | if (msg->headers == NULL) 74 | return 0; 75 | 76 | lua_newtable(L); 77 | 78 | size_t idx = 0; 79 | const char *key; 80 | const void *val; 81 | size_t size; 82 | 83 | while (!rd_kafka_header_get_all(msg->headers, idx++, 84 | &key, &val, &size)) { 85 | lua_pushstring(L, key); 86 | if (val != NULL) 87 | lua_pushlstring(L, val, size); 88 | else 89 | *(void **)luaL_pushcdata(L, luaL_ctypeid(L, "void *")) = NULL; 90 | lua_settable(L, -3); 91 | } 92 | return 1; 93 | } 94 | 95 | int 96 | lua_consumer_msg_tostring(struct lua_State *L) { 97 | const msg_t *msg = lua_check_consumer_msg(L, 1); 98 | 99 | size_t key_len = msg->key_len <= 0 ? sizeof(null_literal) : msg->key_len + 1; 100 | char key[key_len]; 101 | 102 | if (msg->key_len <= 0 || msg->key == NULL) { 103 | memcpy(key, null_literal, sizeof(null_literal)); 104 | } else { 105 | strncpy(key, msg->key, msg->key_len); 106 | key[msg->key_len] = '\0'; 107 | } 108 | 109 | size_t value_len = msg->value_len <= 0 ? sizeof(null_literal) : msg->value_len + 1; 110 | char value[value_len]; 111 | 112 | if (msg->value_len <= 0 || msg->value == NULL) { 113 | memcpy(value, null_literal, sizeof(null_literal)); 114 | } else { 115 | strncpy(value, msg->value, msg->value_len); 116 | value[msg->value_len] = '\0'; 117 | } 118 | 119 | lua_pushfstring(L, 120 | "Kafka Consumer Message: topic=%s partition=%d offset=%d key=%s value=%s", 121 | msg->topic_name, 122 | msg->partition, 123 | msg->offset, 124 | key, 125 | value); 126 | return 1; 127 | } 128 | 129 | int 130 | lua_consumer_msg_gc(struct lua_State *L) { 131 | msg_t **msg_p = (msg_t **)luaL_checkudata(L, 1, consumer_msg_label); 132 | if (msg_p && *msg_p) { 133 | destroy_consumer_msg(*msg_p); 134 | } 135 | if (msg_p) 136 | *msg_p = NULL; 137 | 138 | return 0; 139 | } 140 | 141 | msg_t * 142 | new_consumer_msg(const rd_kafka_message_t *rd_message) { 143 | const char *topic_name = rd_kafka_topic_name(rd_message->rkt); 144 | if (topic_name == NULL) 145 | topic_name = ""; 146 | 147 | size_t topic_name_len = strlen(topic_name); 148 | size_t message_size = sizeof(msg_t) + rd_message->len + rd_message->key_len + topic_name_len + 1; 149 | msg_t *msg = xcalloc(1, message_size); 150 | msg->partition = rd_message->partition; 151 | msg->value = (char*)msg + sizeof(msg_t); 152 | msg->key = (char*)msg + sizeof(msg_t) + rd_message->len; 153 | msg->topic_name = (char*)msg + sizeof(msg_t) + rd_message->len + rd_message->key_len; 154 | 155 | // headers 156 | rd_kafka_headers_t *hdrsp; 157 | rd_kafka_resp_err_t err = rd_kafka_message_headers(rd_message, &hdrsp); 158 | if (err == RD_KAFKA_RESP_ERR_NO_ERROR) 159 | msg->headers = rd_kafka_headers_copy(hdrsp); 160 | 161 | // value 162 | if (rd_message->len > 0) 163 | memcpy(msg->value, rd_message->payload, rd_message->len); 164 | msg->value_len = rd_message->len; 165 | 166 | // key 167 | if (rd_message->key_len > 0) 168 | memcpy(msg->key, rd_message->key, rd_message->key_len); 169 | msg->key_len = rd_message->key_len; 170 | msg->offset = rd_message->offset; 171 | 172 | // topic name 173 | memcpy(msg->topic_name, topic_name, topic_name_len + 1); 174 | 175 | return msg; 176 | } 177 | 178 | void 179 | destroy_consumer_msg(msg_t *msg) { 180 | if (msg == NULL) 181 | return; 182 | if (msg->headers != NULL) 183 | rd_kafka_headers_destroy(msg->headers); 184 | free(msg); 185 | 186 | return; 187 | } 188 | -------------------------------------------------------------------------------- /tests/test_producer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import json 4 | import asyncio 5 | 6 | from aiokafka import AIOKafkaConsumer 7 | import tarantool 8 | 9 | KAFKA_HOST = os.getenv("KAFKA_HOST", "kafka:9092") 10 | 11 | 12 | def get_server(): 13 | return tarantool.Connection("127.0.0.1", 3301, 14 | user="guest", 15 | password=None, 16 | socket_timeout=10, 17 | connection_timeout=40, 18 | reconnect_max_attempts=3, 19 | reconnect_delay=1, 20 | connect_now=True) 21 | 22 | 23 | def test_producer_should_produce_msgs(): 24 | server = get_server() 25 | 26 | server.call("producer.create", [KAFKA_HOST]) 27 | 28 | messages = [ 29 | {'key': '1', 'value': '1'}, 30 | {'key': '2', 'value': '2'}, 31 | {'key': '3', 'value': '3'}, 32 | {'key': '4', 'value': '4', 'headers': {'header1_key': 'header1_value', 'header2_key': 'header2_value'}}, 33 | ] 34 | server.call("producer.produce", [messages]) 35 | 36 | loop = asyncio.get_event_loop_policy().new_event_loop() 37 | 38 | async def test(): 39 | kafka_output = [] 40 | 41 | async def consume(): 42 | consumer = AIOKafkaConsumer( 43 | 'test_producer', 44 | group_id="test_group", 45 | bootstrap_servers='localhost:9092', 46 | auto_offset_reset="earliest", 47 | ) 48 | # Get cluster layout 49 | await consumer.start() 50 | 51 | try: 52 | # Consume messages 53 | async for msg in consumer: 54 | kafka_msg = { 55 | 'key': msg.key if msg.key is None else msg.key.decode('utf8'), 56 | 'value': msg.value if msg.value is None else msg.value.decode('utf8') 57 | } 58 | if msg.headers: 59 | kafka_msg['headers'] = {} 60 | for k, v in msg.headers: 61 | kafka_msg['headers'][k] = v.decode('utf8') 62 | kafka_output.append(kafka_msg) 63 | 64 | finally: 65 | # Will leave consumer group; perform autocommit if enabled. 66 | await consumer.stop() 67 | 68 | try: 69 | await asyncio.wait_for(consume(), 10) 70 | except asyncio.TimeoutError: 71 | pass 72 | 73 | assert kafka_output == messages 74 | 75 | loop.run_until_complete(test()) 76 | loop.close() 77 | 78 | server.call("producer.close", []) 79 | 80 | 81 | def test_producer_should_log_errors(): 82 | server = get_server() 83 | 84 | server.call("producer.create", ["kafka:9090"]) 85 | 86 | time.sleep(2) 87 | 88 | response = server.call("producer.get_errors", []) 89 | 90 | assert len(response) > 0 91 | assert len(response[0]) > 0 92 | 93 | server.call("producer.close", []) 94 | 95 | 96 | def test_producer_stats(): 97 | server = get_server() 98 | 99 | server.call("producer.create", ["kafka:9090"]) 100 | 101 | time.sleep(5) 102 | 103 | response = server.call("producer.get_stats", []) 104 | assert len(response) > 0 105 | assert len(response[0]) > 0 106 | stat = json.loads(response[0][0]) 107 | 108 | assert 'rdkafka#producer' in stat['name'] 109 | assert 'kafka:9090/bootstrap' in stat['brokers'] 110 | assert stat['type'] == 'producer' 111 | 112 | server.call("producer.close", []) 113 | 114 | 115 | def test_producer_dump_conf(): 116 | server = get_server() 117 | 118 | server.call("producer.create", ["kafka:9090"]) 119 | 120 | time.sleep(2) 121 | 122 | response = server.call("producer.dump_conf", []) 123 | assert len(response) > 0 124 | assert len(response[0]) > 0 125 | assert 'session.timeout.ms' in response[0] 126 | assert 'socket.max.fails' in response[0] 127 | assert 'compression.codec' in response[0] 128 | 129 | server.call("producer.close", []) 130 | 131 | 132 | def test_producer_metadata(): 133 | server = get_server() 134 | 135 | server.call("producer.create", [KAFKA_HOST]) 136 | 137 | time.sleep(2) 138 | 139 | response = server.call("producer.metadata", []) 140 | assert 'orig_broker_name' in response[0] 141 | assert 'orig_broker_id' in response[0] 142 | assert 'brokers' in response[0] 143 | assert 'topics' in response[0] 144 | assert 'host' in response[0]['brokers'][0] 145 | assert 'port' in response[0]['brokers'][0] 146 | assert 'id' in response[0]['brokers'][0] 147 | 148 | response = server.call("producer.list_groups", []) 149 | assert response[0] is not None 150 | response = server.call("producer.list_groups", [0]) 151 | assert tuple(response) == (None, 'Local: Timed out') 152 | 153 | response = server.call("producer.metadata", [0]) 154 | assert tuple(response) == (None, 'Local: Timed out') 155 | 156 | server.call("producer.close", []) 157 | 158 | server.call("producer.create", ["badhost:8080"]) 159 | response = server.call("producer.metadata", [200]) 160 | assert tuple(response) == (None, 'Local: Broker transport failure') 161 | response = server.call("producer.list_groups", [200]) 162 | assert response[0] is None 163 | server.call("producer.close", []) 164 | 165 | 166 | def test_producer_should_log_debug(): 167 | server = get_server() 168 | 169 | server.call("producer.create", [KAFKA_HOST, {"debug": "broker,topic,msg"}]) 170 | 171 | time.sleep(2) 172 | 173 | response = server.call("producer.get_logs", []) 174 | 175 | assert len(response) > 0 176 | assert len(response[0]) > 0 177 | 178 | server.call("producer.close", []) 179 | 180 | 181 | def test_producer_create_errors(): 182 | server = get_server() 183 | server.call("producer.test_create_errors") 184 | -------------------------------------------------------------------------------- /kafka/callbacks.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_CALLBACKS_H 2 | #define TNT_KAFKA_CALLBACKS_H 3 | 4 | #include "common.h" 5 | #include "queue.h" 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | #include 14 | #include 15 | 16 | //////////////////////////////////////////////////////////////////////////////////////////////////// 17 | /** 18 | * Common callbacks handling 19 | */ 20 | 21 | /** 22 | * Handle logs from RDKafka 23 | */ 24 | 25 | typedef struct { 26 | int level; 27 | char *fac; 28 | char *buf; 29 | } log_msg_t; 30 | 31 | log_msg_t * 32 | new_log_msg(int level, const char *fac, const char *buf); 33 | 34 | void 35 | destroy_log_msg(log_msg_t *msg); 36 | 37 | void 38 | log_callback(const rd_kafka_t *rd_kafka, int level, const char *fac, const char *buf); 39 | 40 | int 41 | push_log_cb_args(struct lua_State *L, const log_msg_t *msg); 42 | 43 | /** 44 | * Handle stats from RDKafka 45 | */ 46 | 47 | int 48 | stats_callback(rd_kafka_t *rd_kafka, char *json, size_t json_len, void *opaque); 49 | 50 | int 51 | push_stats_cb_args(struct lua_State *L, const char *msg); 52 | 53 | /** 54 | * Handle errors from RDKafka 55 | */ 56 | 57 | typedef struct { 58 | int err; 59 | char *reason; 60 | } error_msg_t; 61 | 62 | error_msg_t * 63 | new_error_msg(int err, const char *reason); 64 | 65 | void 66 | destroy_error_msg(error_msg_t *msg); 67 | 68 | void 69 | error_callback(rd_kafka_t *rd_kafka, int err, const char *reason, void *opaque); 70 | 71 | int 72 | push_errors_cb_args(struct lua_State *L, const error_msg_t *msg); 73 | 74 | /** 75 | * Handle message delivery reports from RDKafka 76 | */ 77 | 78 | typedef struct { 79 | int dr_callback; 80 | int err; 81 | } dr_msg_t; 82 | 83 | dr_msg_t * 84 | new_dr_msg(int dr_callback, int err); 85 | 86 | void 87 | destroy_dr_msg(dr_msg_t *dr_msg); 88 | 89 | void 90 | msg_delivery_callback(rd_kafka_t *producer, const rd_kafka_message_t *msg, void *opaque); 91 | 92 | 93 | /** 94 | * Handle rebalance callbacks from RDKafka 95 | */ 96 | 97 | typedef enum { 98 | REB_EVENT_ASSIGN, 99 | REB_EVENT_REVOKE, 100 | REB_EVENT_ERROR, 101 | } rebalance_event_kind_t; 102 | 103 | typedef struct { 104 | rebalance_event_kind_t kind; 105 | rd_kafka_topic_partition_list_t *partitions; 106 | rd_kafka_resp_err_t err; 107 | } rebalance_msg_t; 108 | 109 | rebalance_msg_t *new_rebalance_msg(rebalance_event_kind_t kind, 110 | const rd_kafka_topic_partition_list_t *partitions, 111 | rd_kafka_resp_err_t err); 112 | 113 | void destroy_rebalance_msg(rebalance_msg_t *rebalance_msg); 114 | 115 | void rebalance_callback(rd_kafka_t *consumer, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque); 116 | 117 | /** 118 | * Structure which contains all queues for communication between main TX thread and 119 | * RDKafka callbacks from background threads 120 | */ 121 | 122 | enum { 123 | LOG_QUEUE, 124 | STATS_QUEUE, 125 | ERROR_QUEUE, 126 | REBALANCE_QUEUE, 127 | MAX_QUEUE, 128 | }; 129 | 130 | RD_UNUSED 131 | static const char *const queue2str[] = { 132 | [LOG_QUEUE] = "log_callback", 133 | [STATS_QUEUE] = "stats_callback", 134 | [ERROR_QUEUE] = "error_callback", 135 | [REBALANCE_QUEUE] = "rebalance_callback", 136 | }; 137 | 138 | #define LUA_RDKAFKA_POLL_FUNC(rd_type, name, queue_no, destroy_fn, push_args_fn) \ 139 | int \ 140 | lua_##rd_type##_##name(struct lua_State *L) { \ 141 | if (lua_gettop(L) != 2) \ 142 | return luaL_error(L, "Usage: count, err = " #rd_type ":" #name "(limit)"); \ 143 | \ 144 | rd_type##_t *rd = lua_check_##rd_type(L, 1); \ 145 | if (rd->event_queues == NULL || \ 146 | rd->event_queues->queues[queue_no] == NULL || \ 147 | rd->event_queues->cb_refs[queue_no] == LUA_REFNIL) { \ 148 | lua_pushnumber(L, 0); \ 149 | lua_pushliteral(L, #rd_type "." #name " error: callback is not set"); \ 150 | return 2; \ 151 | } \ 152 | \ 153 | int limit = lua_tonumber(L, 2); \ 154 | void *msg = NULL; \ 155 | int count = 0; \ 156 | while (count < limit) { \ 157 | msg = queue_pop(rd->event_queues->queues[queue_no]); \ 158 | if (msg == NULL) \ 159 | break; \ 160 | \ 161 | count++; \ 162 | lua_rawgeti(L, LUA_REGISTRYINDEX, rd->event_queues->cb_refs[queue_no]); \ 163 | int args_count = push_args_fn(L, msg); \ 164 | int rc = lua_pcall(L, args_count, 0, 0); /* call (N arguments, 0 result) */ \ 165 | destroy_fn(msg); \ 166 | if (rc != 0) { \ 167 | lua_pushinteger(L, count); \ 168 | lua_insert(L, -2); \ 169 | return 2; \ 170 | } \ 171 | } \ 172 | \ 173 | lua_pushinteger(L, count); \ 174 | return 1; \ 175 | } 176 | 177 | typedef struct { 178 | queue_t *consume_queue; 179 | queue_t *delivery_queue; 180 | 181 | queue_t *queues[MAX_QUEUE]; 182 | int cb_refs[MAX_QUEUE]; 183 | } event_queues_t; 184 | 185 | event_queues_t *new_event_queues(); 186 | 187 | void destroy_event_queues(struct lua_State *L, event_queues_t *event_queues); 188 | 189 | #endif //TNT_KAFKA_CALLBACKS_H 190 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Tarantool kafka 2 | =============== 3 | Full featured high performance kafka library for Tarantool based on [librdkafka](https://github.com/confluentinc/librdkafka). 4 | 5 | Can produce more then 150k messages per second and consume more then 140k messages per second. 6 | 7 | ## Features 8 | * Kafka producer and consumer implementations. 9 | * Fiber friendly. 10 | * Mostly errorless functions and methods. Error handling in Tarantool ecosystem is quite a mess, 11 | some libraries throw lua native `error` while others throws `box.error` instead. `kafka` returns 12 | non-critical errors as strings which allows you to decide how to handle it. 13 | 14 | ## Requirements 15 | * Tarantool >= 1.10.2 16 | * Tarantool development headers 17 | * librdkafka >= 0.11.5 18 | * librdkafka development headers 19 | * openssl-libs 20 | * openssl development headers 21 | * make 22 | * cmake 23 | * gcc 24 | 25 | ## Installation 26 | ```bash 27 | tt rocks install kafka 28 | ``` 29 | 30 | ### Build module with statically linked librdkafka 31 | 32 | To install the kafka module with builtin `librdkafka` dependency, use the `STATIC_BUILD` option: 33 | 34 | ```bash 35 | tt rocks STATIC_BUILD=ON install kafka 36 | ``` 37 | 38 | Be aware that this approach doesn't include static openssl. 39 | Instead, it assumes tarantool has openssl symbols exported. 40 | That means, kafka static build is only usable with static tarantool build. 41 | 42 | For a successful static build, you need to compile kafka 43 | against the [same version of openssl](https://github.com/tarantool/tarantool/blob/800e5ed617f7cd352ec597ce16973c7e4cad76c8/static-build/CMakeLists.txt#L11) that tarantool does. 44 | 45 | ## Usage 46 | 47 | Consumer 48 | ```lua 49 | local os = require('os') 50 | local log = require('log') 51 | local tnt_kafka = require('kafka') 52 | 53 | local consumer, err = tnt_kafka.Consumer.create({ brokers = "localhost:9092" }) 54 | if err ~= nil then 55 | print(err) 56 | os.exit(1) 57 | end 58 | 59 | local err = consumer:subscribe({ "some_topic" }) 60 | if err ~= nil then 61 | print(err) 62 | os.exit(1) 63 | end 64 | 65 | local out, err = consumer:output() 66 | if err ~= nil then 67 | print(string.format("got fatal error '%s'", err)) 68 | os.exit(1) 69 | end 70 | 71 | while true do 72 | if out:is_closed() then 73 | os.exit(1) 74 | end 75 | 76 | local msg = out:get() 77 | if msg ~= nil then 78 | print(string.format( 79 | "got msg with topic='%s' partition='%s' offset='%s' key='%s' value='%s'", 80 | msg:topic(), msg:partition(), msg:offset(), msg:key(), msg:value() 81 | )) 82 | end 83 | end 84 | 85 | -- from another fiber on app shutdown 86 | consumer:close() 87 | ``` 88 | 89 | Producer 90 | ```lua 91 | local os = require('os') 92 | local log = require('log') 93 | local tnt_kafka = require('kafka') 94 | 95 | local producer, err = tnt_kafka.Producer.create({ brokers = "kafka:9092" }) 96 | if err ~= nil then 97 | print(err) 98 | os.exit(1) 99 | end 100 | 101 | for i = 1, 1000 do 102 | local message = "test_value " .. tostring(i) 103 | local err = producer:produce({ 104 | topic = "test_topic", 105 | key = "test_key", 106 | value = message 107 | }) 108 | if err ~= nil then 109 | print(string.format("got error '%s' while sending value '%s'", err, message)) 110 | else 111 | print(string.format("successfully sent value '%s'", message)) 112 | end 113 | end 114 | 115 | producer:close() 116 | ``` 117 | 118 | You can pass additional configuration parameters for librdkafka 119 | https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md in special table `options` on client creation: 120 | ```lua 121 | tnt_kafka.Producer.create({ 122 | options = { 123 | ["some.key"] = "some_value", 124 | }, 125 | }) 126 | 127 | tnt_kafka.Consumer.create({ 128 | options = { 129 | ["some.key"] = "some_value", 130 | }, 131 | }) 132 | ``` 133 | 134 | More examples in `examples` folder. 135 | 136 | ## Using SSL 137 | 138 | Connection to brokers using SSL supported by librdkafka itself so you only need to properly configure brokers by 139 | using this guide https://github.com/confluentinc/librdkafka/wiki/Using-SSL-with-librdkafka 140 | 141 | After that you only need to pass following configuration parameters on client creation: 142 | ```lua 143 | tnt_kafka.Producer.create({ 144 | brokers = "broker_list", 145 | options = { 146 | ["security.protocol"] = "ssl", 147 | -- CA certificate file for verifying the broker's certificate. 148 | ["ssl.ca.location"] = "ca-cert", 149 | -- Client's certificate 150 | ["ssl.certificate.location"] = "client_?????_client.pem", 151 | -- Client's key 152 | ["ssl.key.location"] = "client_?????_client.key", 153 | -- Key password, if any 154 | ["ssl.key.password"] = "abcdefgh", 155 | }, 156 | }) 157 | 158 | tnt_kafka.Consumer.create({ 159 | brokers = "broker_list", 160 | options = { 161 | ["security.protocol"] = "ssl", 162 | -- CA certificate file for verifying the broker's certificate. 163 | ["ssl.ca.location"] = "ca-cert", 164 | -- Client's certificate 165 | ["ssl.certificate.location"] = "client_?????_client.pem", 166 | -- Client's key 167 | ["ssl.key.location"] = "client_?????_client.key", 168 | -- Key password, if any 169 | ["ssl.key.password"] = "abcdefgh", 170 | }, 171 | }) 172 | ``` 173 | 174 | ## Known issues 175 | 176 | ## TODO 177 | * Ordered storage for offsets to prevent commits unprocessed messages 178 | * More examples 179 | * Better documentation 180 | 181 | ## Benchmarks 182 | 183 | Before any commands init and updated git submodule 184 | ```bash 185 | git submodule init 186 | git submodule update 187 | ``` 188 | 189 | ### Producer 190 | 191 | #### Async 192 | 193 | Result: over 160000 produced messages per second on macbook pro 2016 194 | 195 | Local run in docker: 196 | ```bash 197 | make docker-run-environment 198 | make docker-create-benchmark-async-producer-topic 199 | make docker-run-benchmark-async-producer-interactive 200 | ``` 201 | 202 | #### Sync 203 | 204 | Result: over 90000 produced messages per second on macbook pro 2016 205 | 206 | Local run in docker: 207 | ```bash 208 | make docker-run-environment 209 | make docker-create-benchmark-sync-producer-topic 210 | make docker-run-benchmark-sync-producer-interactive 211 | ``` 212 | 213 | ### Consumer 214 | 215 | #### Auto offset store enabled 216 | 217 | Result: over 190000 consumed messages per second on macbook pro 2016 218 | 219 | Local run in docker: 220 | ```bash 221 | make docker-run-environment 222 | make docker-create-benchmark-auto-offset-store-consumer-topic 223 | make docker-run-benchmark-auto-offset-store-consumer-interactive 224 | ``` 225 | 226 | #### Manual offset store 227 | 228 | Result: over 190000 consumed messages per second on macbook pro 2016 229 | 230 | Local run in docker: 231 | ```bash 232 | make docker-run-environment 233 | make docker-create-benchmark-manual-commit-consumer-topic 234 | make docker-run-benchmark-manual-commit-consumer-interactive 235 | ``` 236 | 237 | ## Developing 238 | 239 | ### Tests 240 | Before run any test you should add to `/etc/hosts` entry 241 | ``` 242 | 127.0.0.1 kafka 243 | ``` 244 | 245 | You can run docker based integration tests via makefile target 246 | ```bash 247 | make test-run-with-docker 248 | ``` 249 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | NETWORK="tnt-kafka-tests" 2 | 3 | docker-remove-network: 4 | docker network remove ${NETWORK} || true 5 | 6 | docker-create-network: docker-remove-network 7 | docker network create ${NETWORK} 8 | 9 | docker-remove-zoo: 10 | docker rm -f zookeeper || true 11 | 12 | docker-run-zoo: docker-remove-zoo 13 | docker run -d \ 14 | --net=${NETWORK} \ 15 | --name=zookeeper \ 16 | -p 2181:2181 \ 17 | -e ZOOKEEPER_CLIENT_PORT=2181 \ 18 | confluentinc/cp-zookeeper:5.0.0 19 | 20 | docker-remove-kafka: 21 | docker rm -f kafka || true 22 | 23 | docker-pull-kafka: 24 | docker pull wurstmeister/kafka 25 | 26 | docker-run-kafka: docker-remove-kafka 27 | docker run -d \ 28 | --net=${NETWORK} \ 29 | --name=kafka \ 30 | -p 9092:9092 \ 31 | -e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \ 32 | -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \ 33 | -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092 \ 34 | -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 \ 35 | wurstmeister/kafka 36 | 37 | docker-read-topic-data: 38 | docker run \ 39 | --net=${NETWORK} \ 40 | --rm \ 41 | confluentinc/cp-kafka:5.0.0 \ 42 | kafka-console-consumer --bootstrap-server kafka:9092 --topic test_partially_unsubscribe_1 --from-beginning 43 | 44 | APP_NAME = kafka-test 45 | APP_IMAGE = kafka-test-image 46 | 47 | docker-build-app: 48 | docker build -t ${APP_IMAGE} -f ./docker/Dockerfile . 49 | 50 | docker-remove-app: 51 | docker rm -f ${APP_NAME} || true 52 | 53 | docker-run-app: docker-build-app docker-remove-app 54 | docker run -d \ 55 | -p 3301:3301 \ 56 | --net ${NETWORK} \ 57 | --name ${APP_NAME} \ 58 | -e KAFKA_BROKERS=kafka:9092 \ 59 | ${APP_IMAGE} 60 | 61 | docker-run-interactive: docker-build-app docker-remove-app 62 | docker run -it \ 63 | -p 3301:3301 \ 64 | --net ${NETWORK} \ 65 | --name ${APP_NAME} \ 66 | -e KAFKA_BROKERS=kafka:9092 \ 67 | ${APP_IMAGE} 68 | 69 | docker-remove-all: \ 70 | docker-remove-app \ 71 | docker-remove-kafka \ 72 | docker-remove-zoo \ 73 | docker-remove-network 74 | 75 | docker-run-environment: \ 76 | docker-remove-all \ 77 | docker-create-network \ 78 | docker-run-zoo \ 79 | docker-run-kafka 80 | 81 | docker-run-all: \ 82 | docker-run-environment \ 83 | docker-create-network \ 84 | docker-build-app \ 85 | docker-run-app 86 | 87 | ####################################################################### 88 | # Tests 89 | 90 | tests-dep: 91 | cd ./tests && \ 92 | python3 -m venv venv && \ 93 | . venv/bin/activate && \ 94 | pip install -r requirements.txt && \ 95 | deactivate 96 | 97 | tests-run: 98 | cd ./tests && \ 99 | . venv/bin/activate && \ 100 | pytest -W ignore -vv && \ 101 | deactivate 102 | 103 | test-sleep: tests-dep docker-run-all 104 | sleep 10 105 | 106 | test-run-with-docker: test-sleep 107 | docker run \ 108 | --net=${NETWORK} \ 109 | --rm confluentinc/cp-kafka:5.0.0 \ 110 | kafka-topics --create --topic test_producer --partitions 1 --replication-factor 1 \ 111 | --if-not-exists --zookeeper zookeeper:2181 112 | 113 | docker run \ 114 | --net=${NETWORK} \ 115 | --rm confluentinc/cp-kafka:5.0.0 \ 116 | kafka-topics --create --topic test_consume --partitions 1 --replication-factor 1 \ 117 | --if-not-exists --zookeeper zookeeper:2181 118 | 119 | docker run \ 120 | --net=${NETWORK} \ 121 | --rm confluentinc/cp-kafka:5.0.0 \ 122 | kafka-topics --create --topic test_unsubscribe --partitions 1 --replication-factor 1 \ 123 | --if-not-exists --zookeeper zookeeper:2181 124 | 125 | docker run \ 126 | --net=${NETWORK} \ 127 | --rm confluentinc/cp-kafka:5.0.0 \ 128 | kafka-topics --create --topic test_unsub_partially_1 --partitions 1 --replication-factor 1 \ 129 | --if-not-exists --zookeeper zookeeper:2181 130 | 131 | docker run \ 132 | --net=${NETWORK} \ 133 | --rm confluentinc/cp-kafka:5.0.0 \ 134 | kafka-topics --create --topic test_unsub_partially_2 --partitions 1 --replication-factor 1 \ 135 | --if-not-exists --zookeeper zookeeper:2181 136 | 137 | docker run \ 138 | --net=${NETWORK} \ 139 | --rm confluentinc/cp-kafka:5.0.0 \ 140 | kafka-topics --create --topic test_multi_consume_1 --partitions 1 --replication-factor 1 \ 141 | --if-not-exists --zookeeper zookeeper:2181 142 | 143 | docker run \ 144 | --net=${NETWORK} \ 145 | --rm confluentinc/cp-kafka:5.0.0 \ 146 | kafka-topics --create --topic test_multi_consume_2 --partitions 1 --replication-factor 1 \ 147 | --if-not-exists --zookeeper zookeeper:2181 148 | 149 | docker run \ 150 | --net=${NETWORK} \ 151 | --rm confluentinc/cp-kafka:5.0.0 \ 152 | kafka-topics --create --topic test_consuming_from_last_committed_offset --partitions 1 --replication-factor 1 \ 153 | --if-not-exists --zookeeper zookeeper:2181 154 | 155 | sleep 5 156 | 157 | cd ./tests && \ 158 | python3 -m venv venv && \ 159 | . venv/bin/activate && \ 160 | pip install -r requirements.txt && \ 161 | deactivate 162 | 163 | cd ./tests && \ 164 | . venv/bin/activate && \ 165 | pytest -W ignore -vv && \ 166 | deactivate 167 | 168 | ####################################################################### 169 | # Benchmarks 170 | 171 | docker-create-benchmark-async-producer-topic: 172 | docker run \ 173 | --net=${NETWORK} \ 174 | --rm confluentinc/cp-kafka:5.0.0 \ 175 | kafka-topics --create --topic async_producer_benchmark --partitions 2 --replication-factor 1 \ 176 | --if-not-exists --zookeeper zookeeper:2181 177 | 178 | docker-run-benchmark-async-producer-interactive: docker-build-app docker-remove-app 179 | docker run -it \ 180 | -p 3301:3301 \ 181 | --net ${NETWORK} \ 182 | --name ${APP_NAME} \ 183 | --entrypoint "tarantool" \ 184 | -e KAFKA_BROKERS=kafka:9092 \ 185 | ${APP_IMAGE} \ 186 | /opt/tarantool/benchmarks/async_producer.lua 187 | 188 | docker-read-benchmark-async-producer-topic-data: 189 | docker run \ 190 | --net=${NETWORK} \ 191 | --rm \ 192 | confluentinc/cp-kafka:5.0.0 \ 193 | kafka-console-consumer --bootstrap-server kafka:9092 --topic async_producer_benchmark --from-beginning 194 | 195 | docker-create-benchmark-sync-producer-topic: 196 | docker run \ 197 | --net=${NETWORK} \ 198 | --rm confluentinc/cp-kafka:5.0.0 \ 199 | kafka-topics --create --topic sync_producer_benchmark --partitions 2 --replication-factor 1 \ 200 | --if-not-exists --zookeeper zookeeper:2181 201 | 202 | docker-run-benchmark-sync-producer-interactive: docker-build-app docker-remove-app 203 | docker run -it \ 204 | -p 3301:3301 \ 205 | --net ${NETWORK} \ 206 | --name ${APP_NAME} \ 207 | --entrypoint "tarantool" \ 208 | -e KAFKA_BROKERS=kafka:9092 \ 209 | ${APP_IMAGE} \ 210 | /opt/tarantool/benchmarks/sync_producer.lua 211 | 212 | docker-read-benchmark-sync-producer-topic-data: 213 | docker run \ 214 | --net=${NETWORK} \ 215 | --rm \ 216 | confluentinc/cp-kafka:5.0.0 \ 217 | kafka-console-consumer --bootstrap-server kafka:9092 --topic sync_producer_benchmark --from-beginning 218 | 219 | docker-create-benchmark-auto-offset-store-consumer-topic: 220 | docker run \ 221 | --net=${NETWORK} \ 222 | --rm confluentinc/cp-kafka:5.0.0 \ 223 | kafka-topics --create --topic auto_offset_store_consumer_benchmark --partitions 2 --replication-factor 1 \ 224 | --if-not-exists --zookeeper zookeeper:2181 225 | 226 | docker-run-benchmark-auto-offset-store-consumer-interactive: docker-build-app docker-remove-app 227 | docker run -it \ 228 | -p 3301:3301 \ 229 | --net ${NETWORK} \ 230 | --name ${APP_NAME} \ 231 | --entrypoint "tarantool" \ 232 | -e KAFKA_BROKERS=kafka:9092 \ 233 | ${APP_IMAGE} \ 234 | /opt/tarantool/benchmarks/auto_offset_store_consumer.lua 235 | 236 | docker-read-benchmark-auto-offset-store-consumer-topic-data: 237 | docker run \ 238 | --net=${NETWORK} \ 239 | --rm \ 240 | confluentinc/cp-kafka:5.0.0 \ 241 | kafka-console-consumer --bootstrap-server kafka:9092 --topic auto_offset_store_consumer_benchmark --from-beginning 242 | 243 | docker-create-benchmark-manual-commit-consumer-topic: 244 | docker run \ 245 | --net=${NETWORK} \ 246 | --rm confluentinc/cp-kafka:5.0.0 \ 247 | kafka-topics --create --topic manual_offset_store_consumer --partitions 2 --replication-factor 1 \ 248 | --if-not-exists --zookeeper zookeeper:2181 249 | 250 | docker-run-benchmark-manual-commit-consumer-interactive: docker-build-app docker-remove-app 251 | docker run -it \ 252 | -p 3301:3301 \ 253 | --net ${NETWORK} \ 254 | --name ${APP_NAME} \ 255 | --entrypoint "tarantool" \ 256 | -e KAFKA_BROKERS=kafka:9092 \ 257 | ${APP_IMAGE} \ 258 | /opt/tarantool/benchmarks/manual_offset_store_consumer.lua 259 | 260 | docker-read-benchmark-manual-commit-consumer-topic-data: 261 | docker run \ 262 | --net=${NETWORK} \ 263 | --rm \ 264 | confluentinc/cp-kafka:5.0.0 \ 265 | kafka-console-consumer --bootstrap-server kafka:9092 --topic manual_offset_store_consumer --from-beginning 266 | -------------------------------------------------------------------------------- /kafka/callbacks.c: -------------------------------------------------------------------------------- 1 | #include "callbacks.h" 2 | #include "common.h" 3 | #include "consumer_msg.h" 4 | #include "queue.h" 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | //////////////////////////////////////////////////////////////////////////////////////////////////// 18 | /** 19 | * Common callbacks handling 20 | */ 21 | 22 | /** 23 | * Handle logs from RDKafka 24 | */ 25 | 26 | log_msg_t * 27 | new_log_msg(int level, const char *fac, const char *buf) { 28 | log_msg_t *msg = xmalloc(sizeof(log_msg_t)); 29 | msg->level = level; 30 | msg->fac = xmalloc(sizeof(char) * strlen(fac) + 1); 31 | strcpy(msg->fac, fac); 32 | msg->buf = xmalloc(sizeof(char) * strlen(buf) + 1); 33 | strcpy(msg->buf, buf); 34 | return msg; 35 | } 36 | 37 | void 38 | destroy_log_msg(log_msg_t *msg) { 39 | if (msg->fac != NULL) 40 | free(msg->fac); 41 | if (msg->buf != NULL) 42 | free(msg->buf); 43 | free(msg); 44 | } 45 | 46 | void 47 | log_callback(const rd_kafka_t *rd_kafka, int level, const char *fac, const char *buf) { 48 | event_queues_t *event_queues = rd_kafka_opaque(rd_kafka); 49 | if (event_queues != NULL && event_queues->queues[LOG_QUEUE] != NULL) { 50 | log_msg_t *msg = new_log_msg(level, fac, buf); 51 | if (msg != NULL && queue_push(event_queues->queues[LOG_QUEUE], msg) != 0) { 52 | destroy_log_msg(msg); 53 | } 54 | } 55 | } 56 | 57 | int 58 | stats_callback(rd_kafka_t *rd_kafka, char *json, size_t json_len, void *opaque) { 59 | (void)opaque; 60 | (void)json_len; 61 | event_queues_t *event_queues = rd_kafka_opaque(rd_kafka); 62 | if (event_queues != NULL && event_queues->queues[STATS_QUEUE] != NULL) { 63 | if (json != NULL) { 64 | if (queue_push(event_queues->queues[STATS_QUEUE], json) != 0) 65 | return 0; // destroy json after return 66 | return 1; // json should be freed manually 67 | } 68 | } 69 | return 0; 70 | } 71 | 72 | /** 73 | * Handle errors from RDKafka 74 | */ 75 | 76 | error_msg_t * 77 | new_error_msg(int err, const char *reason) { 78 | error_msg_t *msg = xmalloc(sizeof(error_msg_t)); 79 | msg->err = err; 80 | msg->reason = xmalloc(sizeof(char) * strlen(reason) + 1); 81 | strcpy(msg->reason, reason); 82 | return msg; 83 | } 84 | 85 | void 86 | destroy_error_msg(error_msg_t *msg) { 87 | if (msg->reason != NULL) 88 | free(msg->reason); 89 | free(msg); 90 | } 91 | 92 | void 93 | error_callback(rd_kafka_t *rd_kafka, int err, const char *reason, void *opaque) { 94 | (void)rd_kafka; 95 | event_queues_t *event_queues = opaque; 96 | if (event_queues != NULL && event_queues->queues[ERROR_QUEUE] != NULL) { 97 | error_msg_t *msg = new_error_msg(err, reason); 98 | if (msg != NULL && queue_push(event_queues->queues[ERROR_QUEUE], msg) != 0) 99 | destroy_error_msg(msg); 100 | } 101 | } 102 | 103 | int 104 | push_log_cb_args(struct lua_State *L, const log_msg_t *msg) { 105 | lua_pushstring(L, msg->fac); 106 | lua_pushstring(L, msg->buf); 107 | lua_pushinteger(L, msg->level); 108 | return 3; 109 | } 110 | 111 | int 112 | push_stats_cb_args(struct lua_State *L, const char *msg) { 113 | lua_pushstring(L, msg); 114 | return 1; 115 | } 116 | 117 | int 118 | push_errors_cb_args(struct lua_State *L, const error_msg_t *msg) { 119 | lua_pushstring(L, msg->reason); 120 | return 1; 121 | } 122 | 123 | /** 124 | * Handle message delivery reports from RDKafka 125 | */ 126 | 127 | dr_msg_t * 128 | new_dr_msg(int dr_callback, int err) { 129 | dr_msg_t *dr_msg; 130 | dr_msg = xmalloc(sizeof(dr_msg_t)); 131 | dr_msg->dr_callback = dr_callback; 132 | dr_msg->err = err; 133 | return dr_msg; 134 | } 135 | 136 | void 137 | destroy_dr_msg(dr_msg_t *dr_msg) { 138 | free(dr_msg); 139 | } 140 | 141 | void 142 | msg_delivery_callback(rd_kafka_t *producer, const rd_kafka_message_t *msg, void *opaque) { 143 | (void)producer; 144 | event_queues_t *event_queues = opaque; 145 | if (msg->_private == NULL || event_queues == NULL || event_queues->delivery_queue == NULL) 146 | return; 147 | 148 | dr_msg_t *dr_msg = msg->_private; 149 | if (dr_msg != NULL) { 150 | if (msg->err != RD_KAFKA_RESP_ERR_NO_ERROR) { 151 | dr_msg->err = msg->err; 152 | } 153 | queue_push(event_queues->delivery_queue, dr_msg); 154 | } 155 | } 156 | 157 | /** 158 | * Handle rebalance callbacks from RDKafka 159 | */ 160 | 161 | rebalance_msg_t * 162 | new_rebalance_msg(rebalance_event_kind_t kind, 163 | const rd_kafka_topic_partition_list_t *partitions, 164 | rd_kafka_resp_err_t err) { 165 | rebalance_msg_t *msg = xcalloc(1, sizeof(*msg)); 166 | msg->kind = kind; 167 | msg->err = err; 168 | 169 | if (partitions != NULL) { 170 | msg->partitions = rd_kafka_topic_partition_list_copy(partitions); 171 | } 172 | return msg; 173 | } 174 | 175 | void 176 | destroy_rebalance_msg(rebalance_msg_t *msg) { 177 | if (msg == NULL) 178 | return; 179 | if (msg->partitions != NULL) 180 | rd_kafka_topic_partition_list_destroy(msg->partitions); 181 | free(msg); 182 | } 183 | 184 | static void 185 | push_rebalance_event_if_needed(event_queues_t *eq, 186 | rebalance_event_kind_t kind, 187 | const rd_kafka_topic_partition_list_t *partitions, 188 | rd_kafka_resp_err_t err) { 189 | if (eq == NULL) 190 | return; 191 | if (eq->queues[REBALANCE_QUEUE] == NULL) 192 | return; 193 | if (eq->cb_refs[REBALANCE_QUEUE] == LUA_REFNIL) 194 | return; 195 | 196 | rebalance_msg_t *msg = new_rebalance_msg(kind, partitions, err); 197 | if (msg == NULL) 198 | return; 199 | 200 | if (queue_push(eq->queues[REBALANCE_QUEUE], msg) != 0) { 201 | destroy_rebalance_msg(msg); 202 | } 203 | } 204 | 205 | void 206 | rebalance_callback(rd_kafka_t *consumer, 207 | rd_kafka_resp_err_t err, 208 | rd_kafka_topic_partition_list_t *partitions, 209 | void *opaque) 210 | { 211 | event_queues_t *eq = opaque; 212 | const char *proto = rd_kafka_rebalance_protocol(consumer); 213 | int cooperative = (proto != NULL) && strcmp(proto, "COOPERATIVE") == 0; 214 | 215 | switch (err) { 216 | case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: 217 | push_rebalance_event_if_needed(eq, REB_EVENT_ASSIGN, partitions, RD_KAFKA_RESP_ERR_NO_ERROR); 218 | if (cooperative) 219 | rd_kafka_incremental_assign(consumer, partitions); 220 | else 221 | rd_kafka_assign(consumer, partitions); 222 | break; 223 | 224 | case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: 225 | rd_kafka_commit(consumer, partitions, 0); 226 | push_rebalance_event_if_needed(eq, REB_EVENT_REVOKE, partitions, RD_KAFKA_RESP_ERR_NO_ERROR); 227 | if (cooperative) 228 | rd_kafka_incremental_unassign(consumer, partitions); 229 | else 230 | rd_kafka_assign(consumer, NULL); 231 | break; 232 | 233 | default: 234 | push_rebalance_event_if_needed(eq, REB_EVENT_ERROR, NULL, err); 235 | rd_kafka_assign(consumer, NULL); 236 | break; 237 | } 238 | } 239 | 240 | /** 241 | * Structure which contains all queues for communication between main TX thread and 242 | * RDKafka callbacks from background threads 243 | */ 244 | 245 | event_queues_t * 246 | new_event_queues() { 247 | event_queues_t *event_queues = xcalloc(1, sizeof(event_queues_t)); 248 | for (int i = 0; i < MAX_QUEUE; i++) 249 | event_queues->cb_refs[i] = LUA_REFNIL; 250 | return event_queues; 251 | } 252 | 253 | void 254 | destroy_event_queues(struct lua_State *L, event_queues_t *event_queues) { 255 | if (event_queues == NULL) 256 | return; 257 | if (event_queues->consume_queue != NULL) { 258 | msg_t *msg = NULL; 259 | while (true) { 260 | msg = queue_pop(event_queues->consume_queue); 261 | if (msg == NULL) 262 | break; 263 | destroy_consumer_msg(msg); 264 | } 265 | destroy_queue(event_queues->consume_queue); 266 | } 267 | if (event_queues->delivery_queue != NULL) { 268 | dr_msg_t *msg = NULL; 269 | while (true) { 270 | msg = queue_pop(event_queues->delivery_queue); 271 | if (msg == NULL) 272 | break; 273 | luaL_unref(L, LUA_REGISTRYINDEX, msg->dr_callback); 274 | destroy_dr_msg(msg); 275 | } 276 | destroy_queue(event_queues->delivery_queue); 277 | } 278 | 279 | for (int i = 0; i < MAX_QUEUE; i++) { 280 | if (event_queues->queues[i] == NULL) 281 | continue; 282 | while (true) { 283 | void *msg = queue_pop(event_queues->queues[i]); 284 | if (msg == NULL) 285 | break; 286 | 287 | switch (i) { 288 | case LOG_QUEUE: 289 | destroy_log_msg(msg); 290 | break; 291 | case STATS_QUEUE: 292 | free(msg); 293 | break; 294 | case ERROR_QUEUE: 295 | destroy_error_msg(msg); 296 | break; 297 | case REBALANCE_QUEUE: { 298 | destroy_rebalance_msg(msg); 299 | break; 300 | } 301 | } 302 | } 303 | destroy_queue(event_queues->queues[i]); 304 | } 305 | 306 | for (int i = 0; i < MAX_QUEUE; i++) 307 | luaL_unref(L, LUA_REGISTRYINDEX, event_queues->cb_refs[i]); 308 | 309 | free(event_queues); 310 | } 311 | -------------------------------------------------------------------------------- /tests/consumer.lua: -------------------------------------------------------------------------------- 1 | local box = require("box") 2 | local json = require("json") 3 | local log = require("log") 4 | local fiber = require('fiber') 5 | local tnt_kafka = require('kafka') 6 | 7 | local consumer = nil 8 | local errors = {} 9 | local logs = {} 10 | local stats = {} 11 | local rebalances = {} 12 | 13 | local function create(brokers, additional_opts) 14 | local err 15 | errors = {} 16 | logs = {} 17 | stats = {} 18 | rebalances = {} 19 | local error_callback = function(err) 20 | log.error("got error: %s", err) 21 | table.insert(errors, err) 22 | end 23 | local log_callback = function(fac, str, level) 24 | log.info("got log: %d - %s - %s", level, fac, str) 25 | table.insert(logs, string.format("got log: %d - %s - %s", level, fac, str)) 26 | end 27 | local stats_callback = function(json_stats) 28 | log.info("got stats %s", json_stats) 29 | table.insert(stats, json_stats) 30 | end 31 | local rebalance_callback = function(msg) 32 | log.info("got rebalance msg: %s", json.encode(msg)) 33 | table.insert(rebalances, msg) 34 | end 35 | 36 | local options = { 37 | ["enable.auto.offset.store"] = "false", 38 | ["group.id"] = "test_consumer", 39 | ["auto.offset.reset"] = "earliest", 40 | ["enable.partition.eof"] = "false", 41 | ["log_level"] = "7", 42 | ["statistics.interval.ms"] = "1000", 43 | } 44 | if additional_opts ~= nil then 45 | for key, value in pairs(additional_opts) do 46 | if value == nil then 47 | options[key] = nil 48 | else 49 | options[key] = value 50 | end 51 | end 52 | end 53 | consumer, err = tnt_kafka.Consumer.create({ 54 | brokers = brokers, 55 | options = options, 56 | error_callback = error_callback, 57 | log_callback = log_callback, 58 | stats_callback = stats_callback, 59 | rebalance_callback = rebalance_callback, 60 | default_topic_options = { 61 | ["auto.offset.reset"] = "earliest", 62 | }, 63 | }) 64 | if err ~= nil then 65 | log.error("got err %s", err) 66 | box.error{code = 500, reason = err} 67 | end 68 | log.info("consumer created") 69 | end 70 | 71 | local function subscribe(topics) 72 | log.info("consumer subscribing") 73 | log.info(topics) 74 | local err = consumer:subscribe(topics) 75 | if err ~= nil then 76 | log.error("got err %s", err) 77 | box.error{code = 500, reason = err} 78 | end 79 | log.info("consumer subscribed") 80 | end 81 | 82 | local function unsubscribe(topics) 83 | log.info("consumer unsubscribing") 84 | log.info(topics) 85 | local err = consumer:unsubscribe(topics) 86 | if err ~= nil then 87 | log.error("got err %s", err) 88 | box.error{code = 500, reason = err} 89 | end 90 | log.info("consumer unsubscribed") 91 | end 92 | 93 | local function msg_totable(msg) 94 | return { 95 | value = msg:value(), 96 | key = msg:key(), 97 | topic = msg:topic(), 98 | partition = msg:partition(), 99 | offset = msg:offset(), 100 | headers = msg:headers(), 101 | } 102 | end 103 | 104 | local function append_message(t, msg) 105 | table.insert(t, msg_totable(msg)) 106 | end 107 | 108 | local function consume(timeout) 109 | log.info("consume called") 110 | 111 | local consumed = {} 112 | local f = fiber.create(function() 113 | local out = consumer:output() 114 | while true do 115 | if out:is_closed() then 116 | break 117 | end 118 | 119 | local msg = out:get() 120 | if msg ~= nil then 121 | log.info("%s", msg) 122 | log.info("got msg with topic='%s' partition='%d' offset='%d' key='%s' value='%s'", msg:topic(), msg:partition(), msg:offset(), msg:key(), msg:value()) 123 | append_message(consumed, msg) 124 | local err = consumer:store_offset(msg) 125 | if err ~= nil then 126 | log.error("got error '%s' while committing msg from topic '%s'", err, msg:topic()) 127 | end 128 | else 129 | fiber.sleep(0.2) 130 | end 131 | end 132 | end) 133 | 134 | log.info("consume wait") 135 | fiber.sleep(timeout) 136 | log.info("consume ends") 137 | 138 | f:cancel() 139 | 140 | return consumed 141 | end 142 | 143 | local function get_errors() 144 | return errors 145 | end 146 | 147 | local function get_logs() 148 | return logs 149 | end 150 | 151 | local function get_stats() 152 | return stats 153 | end 154 | 155 | local function get_rebalances() 156 | return rebalances 157 | end 158 | 159 | local function dump_conf() 160 | return consumer:dump_conf() 161 | end 162 | 163 | local function metadata(timeout_ms) 164 | return consumer:metadata({timeout_ms = timeout_ms}) 165 | end 166 | 167 | local function list_groups(timeout_ms) 168 | local res, err = consumer:list_groups({timeout_ms = timeout_ms}) 169 | if err ~= nil then 170 | return nil, err 171 | end 172 | log.info("Groups: %s", json.encode(res)) 173 | -- Some fields can have binary data that won't 174 | -- be correctly processed by connector. 175 | for _, group in ipairs(res) do 176 | group['members'] = nil 177 | end 178 | return res 179 | end 180 | 181 | local function pause() 182 | return consumer:pause() 183 | end 184 | 185 | local function resume() 186 | return consumer:resume() 187 | end 188 | 189 | local function close() 190 | log.info("closing consumer") 191 | local _, err = consumer:close() 192 | if err ~= nil then 193 | log.error("got err %s", err) 194 | box.error{code = 500, reason = err} 195 | end 196 | log.info("consumer closed") 197 | end 198 | 199 | local function test_seek_partitions() 200 | log.info('Test seek') 201 | local messages = {} 202 | 203 | local out = consumer:output() 204 | 205 | for _ = 1, 5 do 206 | local msg = out:get(10) 207 | if msg == nil then 208 | error('Message is not delivered') 209 | end 210 | log.info('Get message: %s', json.encode(msg_totable(msg))) 211 | append_message(messages, msg) 212 | consumer:seek_partitions({ 213 | {msg:topic(), msg:partition(), msg:offset()} 214 | }, 1000) 215 | end 216 | 217 | return messages 218 | end 219 | 220 | local function rebalance_protocol() 221 | return consumer:rebalance_protocol() 222 | end 223 | 224 | local function test_create_errors() 225 | log.info('Create without config') 226 | local _, err = tnt_kafka.Consumer.create() 227 | assert(err == 'config must not be nil') 228 | 229 | log.info('Create with empty config') 230 | local _, err = tnt_kafka.Consumer.create({}) 231 | assert(err == 'consumer config table must have non nil key \'brokers\' which contains string') 232 | 233 | log.info('Create with empty brokers') 234 | local _, err = tnt_kafka.Consumer.create({brokers = ''}) 235 | assert(err == 'No valid brokers specified') 236 | 237 | log.info('Create with invalid default_topic_options keys') 238 | local _, err = tnt_kafka.Consumer.create({brokers = '', default_topic_options = {[{}] = 2}}) 239 | assert(err == 'consumer config default topic options must contains only string keys and string values') 240 | 241 | log.info('Create with invalid default_topic_options property') 242 | local _, err = tnt_kafka.Consumer.create({brokers = '', default_topic_options = {[2] = 2}}) 243 | assert(err == 'No such configuration property: "2"') 244 | 245 | log.info('Create with invalid options keys') 246 | local _, err = tnt_kafka.Consumer.create({brokers = '', options = {[{}] = 2}}) 247 | assert(err == 'consumer config options must contains only string keys and string values') 248 | 249 | log.info('Create with invalid options property') 250 | local _, err = tnt_kafka.Consumer.create({brokers = '', options = {[2] = 2}}) 251 | assert(err == 'No such configuration property: "2"') 252 | 253 | log.info('Create with incompatible properties') 254 | local _, err = tnt_kafka.Consumer.create({brokers = '', options = {['reconnect.backoff.max.ms'] = '2', ['reconnect.backoff.ms'] = '1000'}}) 255 | assert(err == '`reconnect.backoff.max.ms` must be >= `reconnect.backoff.ms`') 256 | end 257 | 258 | local function offsets_for_times(...) 259 | return consumer:offsets_for_times(...) 260 | end 261 | 262 | local function offsets_for_times_topic(topic, ts_ms, timeout_ms) 263 | return consumer:offsets_for_times({{topic, 0, ts_ms}}, timeout_ms or 10000) 264 | end 265 | 266 | local function drain_output(chan, max_seconds) 267 | local deadline = fiber.time() + (max_seconds or 0.5) 268 | local empty_rounds = 0 269 | while fiber.time() < deadline do 270 | local drained = 0 271 | while true do 272 | local msg = chan:get(0) 273 | if msg == nil then 274 | break 275 | end 276 | drained = drained + 1 277 | end 278 | if drained == 0 then 279 | empty_rounds = empty_rounds + 1 280 | if empty_rounds >= 3 then 281 | break 282 | end 283 | else 284 | empty_rounds = 0 285 | end 286 | fiber.sleep(0.01) 287 | end 288 | end 289 | 290 | local function seek_from_time(topic, ts_ms, timeout_ms) 291 | local timeout = timeout_ms or 10000 292 | 293 | consumer:pause() 294 | drain_output(consumer:output(), 0.5) 295 | 296 | local res, err = offsets_for_times_topic(topic, ts_ms, timeout) 297 | if err ~= nil then 298 | consumer:resume() 299 | return nil, err 300 | end 301 | 302 | local seeks = {} 303 | for _, e in ipairs(res) do 304 | local ec = e.error_code or 0 305 | local off = e.offset 306 | if ec == 0 and off ~= -1001 then 307 | table.insert(seeks, {e.topic, e.partition, off}) 308 | end 309 | end 310 | 311 | if #seeks == 0 then 312 | consumer:resume() 313 | return {} 314 | end 315 | 316 | local s_err = consumer:seek_partitions(seeks, timeout) 317 | drain_output(consumer:output(), 0.1) 318 | consumer:resume() 319 | 320 | if s_err ~= nil then 321 | return nil, s_err 322 | end 323 | return seeks 324 | end 325 | 326 | return { 327 | create = create, 328 | subscribe = subscribe, 329 | unsubscribe = unsubscribe, 330 | consume = consume, 331 | close = close, 332 | get_errors = get_errors, 333 | get_logs = get_logs, 334 | get_stats = get_stats, 335 | get_rebalances = get_rebalances, 336 | dump_conf = dump_conf, 337 | metadata = metadata, 338 | list_groups = list_groups, 339 | pause = pause, 340 | resume = resume, 341 | rebalance_protocol = rebalance_protocol, 342 | offsets_for_times_topic = offsets_for_times_topic, 343 | offsets_for_times = offsets_for_times, 344 | seek_from_time = seek_from_time, 345 | 346 | test_seek_partitions = test_seek_partitions, 347 | test_create_errors = test_create_errors, 348 | } 349 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /kafka/common.c: -------------------------------------------------------------------------------- 1 | #define _GNU_SOURCE 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | const char* const consumer_label = "__tnt_kafka_consumer"; 8 | const char* const consumer_msg_label = "__tnt_kafka_consumer_msg"; 9 | const char* const producer_label = "__tnt_kafka_producer"; 10 | 11 | int 12 | lua_push_kafka_error(struct lua_State *L, rd_kafka_t *rk, rd_kafka_resp_err_t err) 13 | { 14 | if (err == RD_KAFKA_RESP_ERR__FATAL) { 15 | char fatal[512]; 16 | rd_kafka_resp_err_t underlying = rd_kafka_fatal_error(rk, fatal, sizeof(fatal)); 17 | lua_pushfstring(L, "%s: %s", rd_kafka_err2str(err), rd_kafka_err2str(underlying)); 18 | } else { 19 | lua_pushstring(L, rd_kafka_err2str(err)); 20 | } 21 | return 1; 22 | } 23 | 24 | /** 25 | * Push native lua error with code -3 26 | */ 27 | int 28 | lua_push_error(struct lua_State *L) { 29 | lua_pushnumber(L, -3); 30 | lua_insert(L, -2); 31 | return 2; 32 | } 33 | 34 | /** 35 | * Push current librdkafka version 36 | */ 37 | int 38 | lua_librdkafka_version(struct lua_State *L) { 39 | const char *version = rd_kafka_version_str(); 40 | lua_pushstring(L, version); 41 | return 1; 42 | } 43 | 44 | int 45 | lua_librdkafka_dump_conf(struct lua_State *L, rd_kafka_t *rk) { 46 | if (rk != NULL) { 47 | const rd_kafka_conf_t *conf = rd_kafka_conf(rk); 48 | if (conf == NULL) 49 | return 0; 50 | 51 | size_t cntp = 0; 52 | const char **confstr = rd_kafka_conf_dump((rd_kafka_conf_t *)conf, &cntp); 53 | if (confstr == NULL) 54 | return 0; 55 | 56 | lua_newtable(L); 57 | for (size_t i = 0; i < cntp; i += 2) { 58 | lua_pushstring(L, confstr[i]); 59 | lua_pushstring(L, confstr[i + 1]); 60 | lua_settable(L, -3); 61 | } 62 | rd_kafka_conf_dump_free(confstr, cntp); 63 | return 1; 64 | } 65 | return 0; 66 | } 67 | 68 | static ssize_t 69 | wait_librdkafka_metadata(va_list args) { 70 | rd_kafka_t *rk = va_arg(args, rd_kafka_t *); 71 | int all_topics = va_arg(args, int); 72 | rd_kafka_topic_t *only_rkt = va_arg(args, rd_kafka_topic_t *); 73 | const struct rd_kafka_metadata **metadatap = va_arg(args, const struct rd_kafka_metadata **); 74 | int timeout_ms = va_arg(args, int); 75 | return rd_kafka_metadata(rk, all_topics, only_rkt, metadatap, timeout_ms); 76 | } 77 | 78 | int 79 | lua_librdkafka_metadata(struct lua_State *L, rd_kafka_t *rk, rd_kafka_topic_t *only_rkt, int timeout_ms) { 80 | assert(rk != NULL); 81 | 82 | int all_topics = 0; 83 | if (only_rkt == NULL) 84 | all_topics = 1; 85 | 86 | const struct rd_kafka_metadata *metadatap; 87 | rd_kafka_resp_err_t err = coio_call(wait_librdkafka_metadata, rk, all_topics, only_rkt, &metadatap, timeout_ms); 88 | 89 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 90 | lua_pushnil(L); 91 | lua_push_kafka_error(L, rk, err); 92 | return 2; 93 | } 94 | 95 | lua_newtable(L); // metadata 96 | 97 | lua_pushliteral(L, "brokers"); // metadata.brokers 98 | lua_createtable(L, metadatap->broker_cnt, 0); 99 | for (int i = 0; i < metadatap->broker_cnt; i++) { 100 | lua_pushnumber(L, i + 1); // metadata.brokers[i] 101 | lua_createtable(L, 0, 3); 102 | 103 | lua_pushliteral(L, "id"); // metadata.brokers[i].id 104 | lua_pushnumber(L, metadatap->brokers[i].id); 105 | lua_settable(L, -3); 106 | 107 | lua_pushliteral(L, "port"); // metadata.brokers[i].port 108 | lua_pushnumber(L, metadatap->brokers[i].port); 109 | lua_settable(L, -3); 110 | 111 | lua_pushliteral(L, "host"); // metadata.brokers[i].host 112 | lua_pushstring(L, metadatap->brokers[i].host); 113 | lua_settable(L, -3); 114 | 115 | lua_settable(L, -3); // metadata.brokers[i] 116 | } 117 | 118 | lua_settable(L, -3); // metadata.brokers 119 | 120 | lua_pushliteral(L, "topics"); // metadata.topics 121 | lua_createtable(L, metadatap->topic_cnt, 0); 122 | for (int i = 0; i < metadatap->topic_cnt; i++) { 123 | lua_pushnumber(L, i + 1); // metadata.topics[i] 124 | lua_createtable(L, 0, 4); 125 | 126 | lua_pushliteral(L, "topic"); // metadata.topics[i].topic 127 | lua_pushstring(L, metadatap->topics[i].topic); 128 | lua_settable(L, -3); 129 | 130 | lua_pushliteral(L, "partitions"); // metadata.topics[i].partitions 131 | lua_createtable(L, 0, metadatap->topics[i].partition_cnt); 132 | 133 | for (int j = 0; j < metadatap->topics[i].partition_cnt; j++) { 134 | lua_pushnumber(L, j + 1); // metadata.topics[i].partitions[j] 135 | lua_createtable(L, 0, 8); 136 | 137 | lua_pushliteral(L, "id"); // metadata.topics[i].partitions[j].id 138 | lua_pushnumber(L, metadatap->topics[i].partitions[j].id); 139 | lua_settable(L, -3); 140 | 141 | lua_pushliteral(L, "leader"); // metadata.topics[i].partitions[j].leader 142 | lua_pushnumber(L, metadatap->topics[i].partitions[j].leader); 143 | lua_settable(L, -3); 144 | 145 | if (metadatap->topics[i].partitions[j].err != RD_KAFKA_RESP_ERR_NO_ERROR) { 146 | lua_pushliteral(L, "error_code"); // metadata.topics[i].partitions[j].error_code 147 | lua_pushnumber(L, metadatap->topics[i].partitions[j].err); 148 | lua_settable(L, -3); 149 | 150 | lua_pushliteral(L, "error"); // metadata.topics[i].partitions[j].error 151 | lua_push_kafka_error(L, rk, metadatap->topics[i].partitions[j].err); 152 | lua_settable(L, -3); 153 | } 154 | 155 | lua_pushliteral(L, "isr"); // metadata.topics[i].partitions[j].isr 156 | lua_createtable(L, metadatap->topics[i].partitions[j].isr_cnt, 0); 157 | for (int k = 0; k < metadatap->topics[i].partitions[j].isr_cnt; k++) { 158 | lua_pushnumber(L, k + 1); // metadata.topics[i].partitions[j].isr[k] 159 | lua_pushnumber(L, metadatap->topics[i].partitions[j].isrs[k]); 160 | lua_settable(L, -3); 161 | } 162 | lua_settable(L, -3); // metadata.topics[i].partitions[j].isr 163 | 164 | lua_pushliteral(L, "replicas"); // metadata.topics[i].partitions[j].replicas 165 | lua_createtable(L, metadatap->topics[i].partitions[j].replica_cnt, 0); 166 | for (int k = 0; k < metadatap->topics[i].partitions[j].replica_cnt; k++) { 167 | lua_pushnumber(L, k + 1); // metadata.topics[i].partitions[j].replicas[k] 168 | lua_pushnumber(L, metadatap->topics[i].partitions[j].replicas[k]); 169 | lua_settable(L, -3); 170 | } 171 | lua_settable(L, -3); // metadata.topics[i].partitions[j].replicas 172 | lua_settable(L, -3); // metadata.topics[i].partitions[j] 173 | } 174 | 175 | lua_settable(L, -3); // metadata.topics[i].partitions 176 | 177 | if (metadatap->topics[i].err != RD_KAFKA_RESP_ERR_NO_ERROR) { 178 | lua_pushliteral(L, "error_code"); // metadata.topics[i].error_code 179 | lua_pushnumber(L, metadatap->topics[i].err); 180 | lua_settable(L, -3); 181 | 182 | lua_pushliteral(L, "error"); // metadata.topics[i].error 183 | lua_push_kafka_error(L, rk, metadatap->topics[i].err); 184 | lua_settable(L, -3); 185 | } 186 | 187 | lua_settable(L, -3); // metadata.topics[i] 188 | } 189 | lua_settable(L, -3); // metadata.topics 190 | 191 | lua_pushliteral(L, "orig_broker_id"); // metadata.orig_broker_id 192 | lua_pushinteger(L, metadatap->orig_broker_id); 193 | lua_settable(L, -3); 194 | 195 | lua_pushliteral(L, "orig_broker_name"); // metadata.orig_broker_name 196 | lua_pushstring(L, metadatap->orig_broker_name); 197 | lua_settable(L, -3); 198 | 199 | rd_kafka_metadata_destroy(metadatap); 200 | return 1; 201 | } 202 | 203 | static ssize_t 204 | wait_librdkafka_list_groups(va_list args) { 205 | rd_kafka_t *rk = va_arg(args, rd_kafka_t *); 206 | const char *group = va_arg(args, const char *); 207 | const struct rd_kafka_group_list **grplistp = va_arg(args, const struct rd_kafka_group_list **); 208 | int timeout_ms = va_arg(args, int); 209 | return rd_kafka_list_groups(rk, group, grplistp, timeout_ms); 210 | } 211 | 212 | int 213 | lua_librdkafka_list_groups(struct lua_State *L, rd_kafka_t *rk, const char *group, int timeout_ms) { 214 | const struct rd_kafka_group_list *grplistp; 215 | rd_kafka_resp_err_t err = coio_call(wait_librdkafka_list_groups, rk, group, &grplistp, timeout_ms); 216 | 217 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 218 | lua_pushnil(L); 219 | lua_push_kafka_error(L, rk, err); 220 | return 2; 221 | } 222 | 223 | lua_createtable(L, grplistp->group_cnt, 0); 224 | for (int i = 0; i < grplistp->group_cnt; i++) { 225 | lua_pushnumber(L, i + 1); 226 | lua_createtable(L, 0, 8); 227 | 228 | lua_pushliteral(L, "broker"); 229 | lua_createtable(L, 0, 3); 230 | 231 | lua_pushliteral(L, "id"); 232 | lua_pushnumber(L, grplistp->groups[i].broker.id); 233 | lua_settable(L, -3); 234 | 235 | lua_pushliteral(L, "port"); 236 | lua_pushnumber(L, grplistp->groups[i].broker.port); 237 | lua_settable(L, -3); 238 | 239 | lua_pushliteral(L, "host"); 240 | lua_pushstring(L, grplistp->groups[i].broker.host); 241 | lua_settable(L, -3); 242 | 243 | lua_settable(L, -3); 244 | 245 | lua_pushstring(L, "group"); 246 | lua_pushstring(L, grplistp->groups[i].group); 247 | lua_settable(L, -3); 248 | 249 | if (grplistp->groups[i].err != RD_KAFKA_RESP_ERR_NO_ERROR) { 250 | lua_pushliteral(L, "error_code"); 251 | lua_pushnumber(L, grplistp->groups[i].err); 252 | lua_settable(L, -3); 253 | 254 | lua_pushliteral(L, "error"); 255 | lua_push_kafka_error(L, rk, grplistp->groups[i].err); 256 | lua_settable(L, -3); 257 | } 258 | 259 | lua_pushliteral(L, "state"); 260 | lua_pushstring(L, grplistp->groups[i].state); 261 | lua_settable(L, -3); 262 | 263 | lua_pushliteral(L, "protocol_type"); 264 | lua_pushstring(L, grplistp->groups[i].protocol_type); 265 | lua_settable(L, -3); 266 | 267 | lua_pushliteral(L, "protocol"); 268 | lua_pushstring(L, grplistp->groups[i].protocol); 269 | lua_settable(L, -3); 270 | 271 | lua_pushliteral(L, "members"); 272 | lua_createtable(L, grplistp->groups[i].member_cnt, 0); 273 | for (int j = 0; j < grplistp->groups[i].member_cnt; j++) { 274 | lua_pushnumber(L, j + 1); 275 | lua_createtable(L, 0, 8); 276 | 277 | lua_pushliteral(L, "member_id"); 278 | lua_pushstring(L, grplistp->groups[i].members[j].member_id); 279 | lua_settable(L, -3); 280 | 281 | lua_pushliteral(L, "client_id"); 282 | lua_pushstring(L, grplistp->groups[i].members[j].client_id); 283 | lua_settable(L, -3); 284 | 285 | lua_pushliteral(L, "client_host"); 286 | lua_pushstring(L, grplistp->groups[i].members[j].client_host); 287 | lua_settable(L, -3); 288 | 289 | lua_pushliteral(L, "member_metadata"); 290 | lua_pushlstring(L, 291 | grplistp->groups[i].members[j].member_metadata, 292 | grplistp->groups[i].members[j].member_metadata_size); 293 | lua_settable(L, -3); 294 | 295 | lua_pushliteral(L, "member_assignment"); 296 | lua_pushlstring(L, 297 | grplistp->groups[i].members[j].member_assignment, 298 | grplistp->groups[i].members[j].member_assignment_size); 299 | lua_settable(L, -3); 300 | 301 | lua_settable(L, -3); 302 | } 303 | lua_settable(L, -3); 304 | 305 | lua_settable(L, -3); 306 | } 307 | 308 | rd_kafka_group_list_destroy(grplistp); 309 | return 1; 310 | } 311 | 312 | void 313 | set_thread_name(const char *name) 314 | #ifdef __linux__ 315 | { 316 | int rc = pthread_setname_np(pthread_self(), name); 317 | (void)rc; 318 | assert(rc == 0); 319 | } 320 | #elif __APPLE__ 321 | { 322 | pthread_setname_np(name); 323 | } 324 | #else 325 | { 326 | (void)name; 327 | } 328 | #endif 329 | 330 | static rd_kafka_resp_err_t 331 | kafka_pause_resume(rd_kafka_t *rk, 332 | rd_kafka_resp_err_t (*fun)(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions)) { 333 | rd_kafka_topic_partition_list_t *partitions = NULL; 334 | rd_kafka_resp_err_t err = rd_kafka_assignment(rk, &partitions); 335 | 336 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) 337 | return err; 338 | 339 | err = fun(rk, partitions); 340 | rd_kafka_topic_partition_list_destroy(partitions); 341 | return err; 342 | } 343 | 344 | rd_kafka_resp_err_t 345 | kafka_pause(rd_kafka_t *rk) { 346 | return kafka_pause_resume(rk, rd_kafka_pause_partitions); 347 | } 348 | 349 | rd_kafka_resp_err_t 350 | kafka_resume(rd_kafka_t *rk) { 351 | return kafka_pause_resume(rk, rd_kafka_resume_partitions); 352 | } 353 | -------------------------------------------------------------------------------- /kafka/init.lua: -------------------------------------------------------------------------------- 1 | local log = require("log") 2 | local fiber = require('fiber') 3 | local tnt_kafka = require("kafka.tntkafka") 4 | 5 | local DEFAULT_TIMEOUT_MS = 2000 6 | 7 | local Consumer = {} 8 | 9 | Consumer.__index = Consumer 10 | 11 | function Consumer.create(config) 12 | if config == nil then 13 | return nil, "config must not be nil" 14 | end 15 | 16 | local consumer, err = tnt_kafka.create_consumer(config) 17 | if err ~= nil then 18 | return nil, err 19 | end 20 | 21 | local new = { 22 | config = config, 23 | _consumer = consumer, 24 | _output_ch = fiber.channel(10000), 25 | } 26 | setmetatable(new, Consumer) 27 | 28 | new._poll_msg_fiber = fiber.create(function() 29 | new:_poll_msg() 30 | end) 31 | new._poll_msg_fiber:name('kafka_msg_poller') 32 | 33 | if config.log_callback ~= nil then 34 | new._poll_logs_fiber = fiber.create(function() 35 | new:_poll_logs() 36 | end) 37 | new._poll_logs_fiber:name('kafka_logs_poller') 38 | end 39 | 40 | if config.stats_callback ~= nil then 41 | new._poll_stats_fiber = fiber.create(function() 42 | new:_poll_stats() 43 | end) 44 | new._poll_stats_fiber:name('kafka_stats_poller') 45 | end 46 | 47 | if config.error_callback ~= nil then 48 | new._poll_errors_fiber = fiber.create(function() 49 | new:_poll_errors() 50 | end) 51 | new._poll_errors_fiber:name('kafka_error_poller') 52 | end 53 | 54 | if config.rebalance_callback ~= nil then 55 | new._poll_rebalances_fiber = fiber.create(function() 56 | new:_poll_rebalances() 57 | end) 58 | new._poll_rebalances_fiber:name('kafka_rebalances_poller') 59 | end 60 | 61 | return new, nil 62 | end 63 | 64 | function Consumer:_poll_msg() 65 | local msgs 66 | while true do 67 | msgs = self._consumer:poll_msg(100) 68 | if #msgs > 0 then 69 | for _, msg in ipairs(msgs) do 70 | self._output_ch:put(msg) 71 | end 72 | fiber.yield() 73 | else 74 | -- throttling poll 75 | fiber.sleep(0.01) 76 | end 77 | end 78 | end 79 | 80 | jit.off(Consumer._poll_msg) 81 | 82 | function Consumer:_poll_logs() 83 | local count, err 84 | while true do 85 | count, err = self._consumer:poll_logs(100) 86 | if err ~= nil then 87 | log.error("Consumer poll logs error: %s", err) 88 | -- throttling poll 89 | fiber.sleep(0.1) 90 | elseif count > 0 then 91 | fiber.yield() 92 | else 93 | -- throttling poll 94 | fiber.sleep(1) 95 | end 96 | end 97 | end 98 | 99 | jit.off(Consumer._poll_logs) 100 | 101 | function Consumer:_poll_stats() 102 | local count, err 103 | while true do 104 | count, err = self._consumer:poll_stats(100) 105 | if err ~= nil then 106 | log.error("Consumer poll stats error: %s", err) 107 | -- throttling poll 108 | fiber.sleep(0.1) 109 | elseif count > 0 then 110 | fiber.yield() 111 | else 112 | -- throttling poll 113 | fiber.sleep(1) 114 | end 115 | end 116 | end 117 | 118 | jit.off(Consumer._poll_stats) 119 | 120 | function Consumer:_poll_errors() 121 | local count, err 122 | while true do 123 | count, err = self._consumer:poll_errors(100) 124 | if err ~= nil then 125 | log.error("Consumer poll errors error: %s", err) 126 | -- throttling poll 127 | fiber.sleep(0.1) 128 | elseif count > 0 then 129 | fiber.yield() 130 | else 131 | -- throttling poll 132 | fiber.sleep(1) 133 | end 134 | end 135 | end 136 | 137 | jit.off(Consumer._poll_errors) 138 | 139 | function Consumer:_poll_rebalances() 140 | local count, err 141 | while true do 142 | count, err = self._consumer:poll_rebalances(1) 143 | if err ~= nil then 144 | log.error("Consumer poll rebalances error: %s", err) 145 | -- throttling poll 146 | fiber.sleep(0.1) 147 | elseif count > 0 then 148 | fiber.yield() 149 | else 150 | -- throttling poll 151 | fiber.sleep(0.5) 152 | end 153 | end 154 | end 155 | 156 | jit.off(Consumer._poll_rebalances) 157 | 158 | function Consumer:close() 159 | if self._consumer == nil then 160 | return false 161 | end 162 | 163 | local ok = self._consumer:close() 164 | 165 | self._poll_msg_fiber:cancel() 166 | self._output_ch:close() 167 | 168 | fiber.yield() 169 | 170 | if self._poll_logs_fiber ~= nil then 171 | self._poll_logs_fiber:cancel() 172 | end 173 | if self._poll_stats_fiber ~= nil then 174 | self._poll_stats_fiber:cancel() 175 | end 176 | if self._poll_errors_fiber ~= nil then 177 | self._poll_errors_fiber:cancel() 178 | end 179 | if self._poll_rebalances_fiber ~= nil then 180 | self._poll_rebalances_fiber:cancel() 181 | end 182 | 183 | self._consumer:destroy() 184 | 185 | self._consumer = nil 186 | 187 | return ok 188 | end 189 | 190 | local function get_timeout_from_options(options) 191 | local timeout_ms = DEFAULT_TIMEOUT_MS 192 | if type(options) == 'table' and options.timeout_ms ~= nil then 193 | timeout_ms = options.timeout_ms 194 | end 195 | return timeout_ms 196 | end 197 | 198 | function Consumer:subscribe(topics) 199 | return self._consumer:subscribe(topics) 200 | end 201 | 202 | function Consumer:unsubscribe(topics) 203 | return self._consumer:unsubscribe(topics) 204 | end 205 | 206 | function Consumer:output() 207 | return self._output_ch 208 | end 209 | 210 | function Consumer:store_offset(message) 211 | return self._consumer:store_offset(message) 212 | end 213 | 214 | function Consumer:pause() 215 | return self._consumer:pause() 216 | end 217 | 218 | function Consumer:resume() 219 | return self._consumer:resume() 220 | end 221 | 222 | function Consumer:rebalance_protocol() 223 | return self._consumer:rebalance_protocol() 224 | end 225 | 226 | function Consumer:offsets_for_times(offsets, timeout_ms) 227 | return self._consumer:offsets_for_times(offsets, timeout_ms) 228 | end 229 | 230 | function Consumer:seek_partitions(topic_partitions_list, options) 231 | local timeout_ms = get_timeout_from_options(options) 232 | return self._consumer:seek_partitions(topic_partitions_list, timeout_ms) 233 | end 234 | 235 | function Consumer:dump_conf() 236 | if self._consumer == nil then 237 | return 238 | end 239 | return self._consumer:dump_conf() 240 | end 241 | 242 | function Consumer:metadata(options) 243 | if self._consumer == nil then 244 | return 245 | end 246 | 247 | local timeout_ms = get_timeout_from_options(options) 248 | 249 | return self._consumer:metadata(timeout_ms) 250 | end 251 | 252 | function Consumer:list_groups(options) 253 | if self._consumer == nil then 254 | return 255 | end 256 | 257 | local timeout_ms = get_timeout_from_options(options) 258 | 259 | local group 260 | if options ~= nil and options.group ~= nil then 261 | group = options.group 262 | end 263 | 264 | return self._consumer:list_groups(group, timeout_ms) 265 | end 266 | 267 | local Producer = {} 268 | 269 | Producer.__index = Producer 270 | 271 | function Producer.create(config) 272 | if config == nil then 273 | return nil, "config must not be nil" 274 | end 275 | 276 | local producer, err = tnt_kafka.create_producer(config) 277 | if err ~= nil then 278 | return nil, err 279 | end 280 | 281 | local new = { 282 | config = config, 283 | _counter = 0, 284 | _delivery_map = {}, 285 | _producer = producer, 286 | } 287 | setmetatable(new, Producer) 288 | 289 | new._msg_delivery_poll_fiber = fiber.create(function() 290 | new:_msg_delivery_poll() 291 | end) 292 | 293 | if config.log_callback ~= nil then 294 | new._poll_logs_fiber = fiber.create(function() 295 | new:_poll_logs() 296 | end) 297 | end 298 | 299 | if config.stats_callback ~= nil then 300 | new._poll_stats_fiber = fiber.create(function() 301 | new:_poll_stats() 302 | end) 303 | end 304 | 305 | if config.error_callback ~= nil then 306 | new._poll_errors_fiber = fiber.create(function() 307 | new:_poll_errors() 308 | end) 309 | end 310 | 311 | return new, nil 312 | end 313 | 314 | function Producer:_msg_delivery_poll() 315 | while true do 316 | local count, err 317 | while true do 318 | count, err = self._producer:msg_delivery_poll(100) 319 | if err ~= nil then 320 | log.error(err) 321 | -- throttling poll 322 | fiber.sleep(0.01) 323 | elseif count > 0 then 324 | fiber.yield() 325 | else 326 | -- throttling poll 327 | fiber.sleep(0.01) 328 | end 329 | end 330 | end 331 | end 332 | 333 | jit.off(Producer._msg_delivery_poll) 334 | 335 | function Producer:_poll_logs() 336 | local count, err 337 | while true do 338 | count, err = self._producer:poll_logs(100) 339 | if err ~= nil then 340 | log.error("Producer poll logs error: %s", err) 341 | -- throttling poll 342 | fiber.sleep(0.1) 343 | elseif count > 0 then 344 | fiber.yield() 345 | else 346 | -- throttling poll 347 | fiber.sleep(1) 348 | end 349 | end 350 | end 351 | 352 | jit.off(Producer._poll_logs) 353 | 354 | function Producer:_poll_stats() 355 | local count, err 356 | while true do 357 | count, err = self._producer:poll_stats(100) 358 | if err ~= nil then 359 | log.error("Producer poll stats error: %s", err) 360 | -- throttling poll 361 | fiber.sleep(0.1) 362 | elseif count > 0 then 363 | fiber.yield() 364 | else 365 | -- throttling poll 366 | fiber.sleep(1) 367 | end 368 | end 369 | end 370 | 371 | jit.off(Producer._poll_stats) 372 | 373 | function Producer:_poll_errors() 374 | local count, err 375 | while true do 376 | count, err = self._producer:poll_errors(100) 377 | if err ~= nil then 378 | log.error("Producer poll errors error: %s", err) 379 | -- throttling poll 380 | fiber.sleep(0.1) 381 | elseif count > 0 then 382 | fiber.yield() 383 | else 384 | -- throttling poll 385 | fiber.sleep(1) 386 | end 387 | end 388 | end 389 | 390 | jit.off(Producer._poll_errors) 391 | 392 | function Producer:produce_async(msg) 393 | local err = self._producer:produce(msg) 394 | return err 395 | end 396 | 397 | local function dr_callback_factory(delivery_chan) 398 | return function(err) 399 | delivery_chan:put(err) 400 | end 401 | end 402 | 403 | function Producer:produce(msg) 404 | local delivery_chan = fiber.channel(1) 405 | 406 | msg.dr_callback = dr_callback_factory(delivery_chan) 407 | 408 | local err = self._producer:produce(msg) 409 | if err == nil then 410 | err = delivery_chan:get() 411 | end 412 | 413 | return err 414 | end 415 | 416 | function Producer:dump_conf() 417 | if self._producer == nil then 418 | return 419 | end 420 | return self._producer:dump_conf() 421 | end 422 | 423 | function Producer:metadata(options) 424 | if self._producer == nil then 425 | return 426 | end 427 | 428 | local timeout_ms = get_timeout_from_options(options) 429 | 430 | local topic 431 | if options ~= nil and options.topic ~= nil then 432 | topic = options.topic 433 | end 434 | 435 | return self._producer:metadata(topic, timeout_ms) 436 | end 437 | 438 | function Producer:list_groups(options) 439 | if self._producer == nil then 440 | return 441 | end 442 | 443 | local timeout_ms = get_timeout_from_options(options) 444 | 445 | local group 446 | if options ~= nil and options.group ~= nil then 447 | group = options.group 448 | end 449 | 450 | return self._producer:list_groups(group, timeout_ms) 451 | end 452 | 453 | function Producer:close() 454 | if self._producer == nil then 455 | return false 456 | end 457 | 458 | local ok = self._producer:close() 459 | 460 | self._msg_delivery_poll_fiber:cancel() 461 | if self._poll_logs_fiber ~= nil then 462 | self._poll_logs_fiber:cancel() 463 | end 464 | if self._poll_stats_fiber ~= nil then 465 | self._poll_stats_fiber:cancel() 466 | end 467 | if self._poll_errors_fiber ~= nil then 468 | self._poll_errors_fiber:cancel() 469 | end 470 | 471 | self._producer:destroy() 472 | 473 | self._producer = nil 474 | 475 | return ok 476 | end 477 | 478 | return { 479 | Consumer = Consumer, 480 | Producer = Producer, 481 | _LIBRDKAFKA = tnt_kafka.librdkafka_version(), 482 | _VERSION = require('kafka.version'), 483 | } 484 | -------------------------------------------------------------------------------- /tests/test_consumer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import json 4 | import asyncio 5 | from contextlib import contextmanager 6 | import random 7 | import string 8 | 9 | import pytest 10 | from aiokafka import AIOKafkaProducer 11 | import tarantool 12 | 13 | KAFKA_HOST = os.getenv("KAFKA_HOST", "kafka:9092") 14 | 15 | 16 | def randomword(length): 17 | letters = string.ascii_lowercase 18 | return ''.join(random.choice(letters) for i in range(length)) 19 | 20 | 21 | def get_message_values(messages): 22 | result = [] 23 | for msg in messages: 24 | if 'value' in msg: 25 | result.append(msg['value']) 26 | return result 27 | 28 | 29 | def get_server(): 30 | return tarantool.Connection("127.0.0.1", 3301, 31 | user="guest", 32 | password=None, 33 | socket_timeout=40, 34 | connection_timeout=40, 35 | reconnect_max_attempts=3, 36 | reconnect_delay=1, 37 | connect_now=True) 38 | 39 | 40 | @contextmanager 41 | def create_consumer(server, *args): 42 | try: 43 | server.call("consumer.create", args) 44 | yield 45 | 46 | finally: 47 | server.call("consumer.close", []) 48 | 49 | 50 | def write_into_kafka(topic, messages): 51 | loop = asyncio.get_event_loop_policy().new_event_loop() 52 | 53 | async def send(): 54 | producer = AIOKafkaProducer(bootstrap_servers='localhost:9092') 55 | # Get cluster layout and initial topic/partition leadership information 56 | await producer.start() 57 | try: 58 | # Produce message 59 | for msg in messages: 60 | headers = None 61 | if 'headers' in msg: 62 | headers = [] 63 | for k, v in msg['headers'].items(): 64 | headers.append((k, v.encode('utf-8') if v is not None else v)) 65 | await producer.send_and_wait( 66 | topic, 67 | value=msg['value'].encode('utf-8'), 68 | key=msg['key'].encode('utf-8'), 69 | headers=headers, 70 | ) 71 | 72 | finally: 73 | # Wait for all pending messages to be delivered or expire. 74 | await producer.stop() 75 | 76 | loop.run_until_complete(send()) 77 | loop.close() 78 | 79 | 80 | def test_consumer_should_consume_msgs(): 81 | message1 = { 82 | "key": "test1", 83 | "value": "test1", 84 | } 85 | 86 | message2 = { 87 | "key": "test1", 88 | "value": "test2", 89 | } 90 | 91 | message3 = { 92 | "key": "test1", 93 | "value": "test3", 94 | "headers": {"key1": "value1", "key2": "value2", "nullable": None}, 95 | } 96 | 97 | message4 = { 98 | "key": "", 99 | "value": "test4", 100 | } 101 | 102 | message5 = { 103 | "key": "", 104 | "value": "", 105 | } 106 | 107 | write_into_kafka("test_consume", ( 108 | message1, 109 | message2, 110 | message3, 111 | message4, 112 | message5, 113 | )) 114 | 115 | server = get_server() 116 | 117 | with create_consumer(server, KAFKA_HOST, {"group.id": "should_consume_msgs"}): 118 | server.call("consumer.subscribe", [["test_consume"]]) 119 | 120 | response = server.call("consumer.consume", [10])[0] 121 | 122 | assert set(get_message_values(response)) == { 123 | "test1", 124 | "test2", 125 | "test3", 126 | "test4", 127 | } 128 | 129 | for msg in filter(lambda x: 'value' in x, response): 130 | if msg['value'] == 'test1': 131 | assert msg['key'] == 'test1' 132 | elif msg['value'] == 'test3': 133 | assert msg['headers'] == {'key1': 'value1', 'key2': 'value2', 'nullable': None} 134 | 135 | 136 | def test_consumer_seek_partitions(): 137 | key = "test_seek_unique_key" 138 | value = "test_seek_unique_value" 139 | message = { 140 | "key": key, 141 | "value": value, 142 | } 143 | 144 | topic = 'test_consumer_seek' + randomword(15) 145 | write_into_kafka(topic, (message,)) 146 | 147 | server = get_server() 148 | 149 | with create_consumer(server, KAFKA_HOST, {'group.id': 'consumer_seek'}): 150 | server.call('consumer.subscribe', [[topic]]) 151 | 152 | response = server.call("consumer.test_seek_partitions") 153 | assert len(response[0]) == 5 154 | 155 | for item in response[0]: 156 | assert item['key'] == key 157 | assert item['value'] == value 158 | 159 | 160 | def test_consumer_create_errors(): 161 | server = get_server() 162 | server.call("consumer.test_create_errors") 163 | 164 | 165 | def test_consumer_should_consume_msgs_from_multiple_topics(): 166 | message1 = { 167 | "key": "test1", 168 | "value": "test1" 169 | } 170 | 171 | message2 = { 172 | "key": "test1", 173 | "value": "test2" 174 | } 175 | 176 | message3 = { 177 | "key": "test1", 178 | "value": "test33" 179 | } 180 | 181 | write_into_kafka("test_multi_consume_1", (message1, message2)) 182 | write_into_kafka("test_multi_consume_2", (message3, )) 183 | 184 | server = get_server() 185 | 186 | with create_consumer(server, KAFKA_HOST, {"group.id": "should_consume_msgs_from_multiple_topics"}): 187 | server.call("consumer.subscribe", [["test_multi_consume_1", "test_multi_consume_2"]]) 188 | 189 | response = server.call("consumer.consume", [10])[0] 190 | 191 | assert set(get_message_values(response)) == { 192 | "test1", 193 | "test2", 194 | "test33" 195 | } 196 | 197 | 198 | def test_consumer_should_completely_unsubscribe_from_topics(): 199 | message1 = { 200 | "key": "test1", 201 | "value": "test1" 202 | } 203 | 204 | message2 = { 205 | "key": "test1", 206 | "value": "test2" 207 | } 208 | 209 | message3 = { 210 | "key": "test1", 211 | "value": "test34" 212 | } 213 | 214 | t = f"test_unsubscribe_{randomword(4)}" 215 | write_into_kafka(t, (message1, message2)) 216 | 217 | server = get_server() 218 | 219 | with create_consumer(server, KAFKA_HOST, { 220 | "group.id": "should_completely_unsubscribe_from_topics", 221 | }): 222 | server.call("consumer.subscribe", [[t]]) 223 | 224 | response = server.call("consumer.consume", [10])[0] 225 | 226 | assert set(get_message_values(response)) == { 227 | "test1", 228 | "test2", 229 | } 230 | 231 | server.call("consumer.unsubscribe", [[t]]) 232 | 233 | write_into_kafka(t, (message3, )) 234 | 235 | response = server.call("consumer.consume", [10]) 236 | 237 | assert set(*response) == set() 238 | 239 | 240 | def test_consumer_should_partially_unsubscribe_from_topics(): 241 | message1 = { 242 | "key": "test1", 243 | "value": "test1" 244 | } 245 | 246 | message2 = { 247 | "key": "test1", 248 | "value": "test2" 249 | } 250 | 251 | message3 = { 252 | "key": "test1", 253 | "value": "test35" 254 | } 255 | 256 | message4 = { 257 | "key": "test1", 258 | "value": "test45" 259 | } 260 | 261 | server = get_server() 262 | 263 | salt = randomword(4) 264 | with create_consumer(server, KAFKA_HOST, { 265 | "group.id": f"should_partially_unsubscribe_from_topics_{salt}", 266 | }): 267 | t1 = f"test_unsub_partially_{salt}_1" 268 | t2 = f"test_unsub_partially_{salt}_2" 269 | 270 | # Ensure topics exist BEFORE subscribe/rebalance (auto-create can lag in CI) 271 | write_into_kafka(t1, (message1, )) 272 | write_into_kafka(t2, (message2, )) 273 | server.call("consumer.subscribe", [[t1, t2]]) 274 | time.sleep(5) # give group join/rebalance time 275 | 276 | # waiting up to 30 seconds 277 | response = server.call("consumer.consume", [30])[0] 278 | 279 | assert set(get_message_values(response)) == { 280 | "test1", 281 | "test2", 282 | } 283 | 284 | server.call("consumer.unsubscribe", [[t1]]) 285 | time.sleep(2) # let revoke/apply subscription update settle 286 | 287 | write_into_kafka(t1, (message3, )) 288 | write_into_kafka(t2, (message4, )) 289 | time.sleep(5) 290 | 291 | response = server.call("consumer.consume", [30])[0] 292 | 293 | assert set(get_message_values(response)) == {"test45"} 294 | 295 | 296 | def test_consumer_should_log_errors(): 297 | server = get_server() 298 | 299 | with create_consumer(server, "kafka:9090"): 300 | time.sleep(5) 301 | 302 | response = server.call("consumer.get_errors", []) 303 | 304 | assert len(response.data[0]) > 0 305 | 306 | 307 | def test_consumer_stats(): 308 | server = get_server() 309 | 310 | with create_consumer(server, "kafka:9090"): 311 | time.sleep(5) 312 | 313 | response = server.call("consumer.get_stats", []) 314 | assert len(response) > 0 315 | found = False 316 | for resp in response: 317 | if len(resp) == 0: 318 | continue 319 | 320 | found = True 321 | stat = json.loads(resp[0]) 322 | 323 | assert 'rdkafka#consumer' in stat['name'] 324 | assert 'kafka:9090/bootstrap' in stat['brokers'] 325 | assert stat['type'] == 'consumer' 326 | break 327 | 328 | assert found 329 | 330 | 331 | def test_consumer_dump_conf(): 332 | server = get_server() 333 | 334 | with create_consumer(server, "kafka:9090"): 335 | time.sleep(2) 336 | 337 | response = server.call("consumer.dump_conf", []) 338 | assert len(response) > 0 339 | assert len(response[0]) > 0 340 | assert 'session.timeout.ms' in response[0] 341 | assert 'socket.max.fails' in response[0] 342 | assert 'compression.codec' in response[0] 343 | 344 | 345 | def test_consumer_metadata(): 346 | server = get_server() 347 | 348 | with create_consumer(server, KAFKA_HOST): 349 | time.sleep(2) 350 | 351 | response = server.call("consumer.metadata", []) 352 | assert 'orig_broker_name' in response[0] 353 | assert 'orig_broker_id' in response[0] 354 | assert 'brokers' in response[0] 355 | assert 'topics' in response[0] 356 | assert 'host' in response[0]['brokers'][0] 357 | assert 'port' in response[0]['brokers'][0] 358 | assert 'id' in response[0]['brokers'][0] 359 | 360 | response = server.call("consumer.metadata", [0]) 361 | assert tuple(response) == (None, 'Local: Timed out') 362 | 363 | response = server.call("consumer.list_groups", []) 364 | assert response[0] is not None 365 | response = server.call("consumer.list_groups", [0]) 366 | assert tuple(response) == (None, 'Local: Timed out') 367 | 368 | with create_consumer(server, "badhost:9090"): 369 | response = server.call("consumer.metadata", [0]) 370 | assert tuple(response) == (None, 'Local: Broker transport failure') 371 | 372 | response = server.call("consumer.metadata", [0]) 373 | assert tuple(response) == (None, 'Local: Broker transport failure') 374 | 375 | 376 | def test_consumer_should_log_debug(): 377 | server = get_server() 378 | 379 | with create_consumer(server, KAFKA_HOST, {"debug": "consumer,cgrp,topic,fetch"}): 380 | time.sleep(2) 381 | 382 | response = server.call("consumer.get_logs", []) 383 | 384 | assert len(response.data[0]) > 0 385 | 386 | 387 | def test_consumer_should_log_rebalances(): 388 | # Use unique topic and create it before subscribe: in CI topic auto-create can lag, 389 | # and subscribing to a non-existent topic may yield no assignment/rebalance events. 390 | topic = f"test_rebalances_{randomword(6)}" 391 | write_into_kafka(topic, ({"key": "init", "value": "init"},)) 392 | 393 | server = get_server() 394 | gid = f"g_rebalances_{randomword(6)}" 395 | with create_consumer(server, KAFKA_HOST, {"group.id": gid}): 396 | time.sleep(2) 397 | server.call("consumer.subscribe", [[topic]]) 398 | time.sleep(10) 399 | response = server.call("consumer.get_rebalances", []) 400 | assert len(response.data[0]) > 0 401 | 402 | 403 | def test_consumer_rebalance_protocol(): 404 | server = get_server() 405 | 406 | with create_consumer(server, KAFKA_HOST, {"bootstrap.servers": KAFKA_HOST}): 407 | time.sleep(5) 408 | response = server.call("consumer.rebalance_protocol", []) 409 | assert response[0] == 'NONE' 410 | 411 | # Ensure topic exists before subscribe (auto-create can lag) 412 | topic = f"test_rebalance_proto_{randomword(6)}" 413 | write_into_kafka(topic, ({"key": "init", "value": "init"},)) 414 | 415 | server.call("consumer.subscribe", [[topic]]) 416 | response = server.call("consumer.rebalance_protocol", []) 417 | assert response[0] == 'NONE' 418 | 419 | 420 | def test_consumer_should_continue_consuming_from_last_committed_offset(): 421 | message1 = { 422 | "key": "test1", 423 | "value": "test1" 424 | } 425 | 426 | message2 = { 427 | "key": "test1", 428 | "value": "test2" 429 | } 430 | 431 | message3 = { 432 | "key": "test1", 433 | "value": "test3" 434 | } 435 | 436 | message4 = { 437 | "key": "test1", 438 | "value": "test4" 439 | } 440 | 441 | server = get_server() 442 | 443 | with create_consumer(server, KAFKA_HOST, {"group.id": "should_continue_consuming_from_last_committed_offset"}): 444 | server.call("consumer.subscribe", [["test_consuming_from_last_committed_offset"]]) 445 | 446 | write_into_kafka("test_consuming_from_last_committed_offset", (message1, )) 447 | write_into_kafka("test_consuming_from_last_committed_offset", (message2, )) 448 | 449 | # waiting up to 30 seconds 450 | response = server.call("consumer.consume", [30])[0] 451 | 452 | assert set(get_message_values(response)) == { 453 | "test1", 454 | "test2", 455 | } 456 | 457 | time.sleep(2) 458 | 459 | with create_consumer(server, KAFKA_HOST, {"group.id": "should_continue_consuming_from_last_committed_offset"}): 460 | server.call("consumer.subscribe", [["test_consuming_from_last_committed_offset"]]) 461 | 462 | write_into_kafka("test_consuming_from_last_committed_offset", (message3, )) 463 | write_into_kafka("test_consuming_from_last_committed_offset", (message4, )) 464 | 465 | response = server.call("consumer.consume", [30])[0] 466 | 467 | assert set(get_message_values(response)) == { 468 | "test3", 469 | "test4", 470 | } 471 | 472 | 473 | def test_consumer_pause_resume(): 474 | message_before_pause = { 475 | "key": "message_before_pause", 476 | "value": "message_before_pause", 477 | } 478 | 479 | message_on_pause = { 480 | "key": "message_on_pause", 481 | "value": "message_on_pause", 482 | } 483 | 484 | message_after_pause = { 485 | "key": "message_after_pause", 486 | "value": "message_after_pause", 487 | } 488 | 489 | server = get_server() 490 | 491 | with create_consumer(server, KAFKA_HOST, {"group.id": "should_consume_msgs"}): 492 | topic = "test_resume_pause" 493 | # Ensure topic exists before subscribe/rebalance 494 | write_into_kafka(topic, (message_before_pause,)) 495 | server.call("consumer.subscribe", [[topic]]) 496 | 497 | response = server.call("consumer.consume", [10])[0] 498 | 499 | assert set(get_message_values(response)) == { 500 | "message_before_pause", 501 | } 502 | 503 | response = server.call("consumer.pause") 504 | assert len(response) == 0 505 | 506 | write_into_kafka("test_resume_pause", (message_on_pause,)) 507 | response = server.call("consumer.consume", [2])[0] 508 | assert len(response) == 0 509 | 510 | response = server.call("consumer.resume") 511 | assert len(response) == 0 512 | write_into_kafka("test_resume_pause", (message_after_pause,)) 513 | 514 | response = server.call("consumer.consume", [2])[0] 515 | assert set(get_message_values(response)) == { 516 | "message_on_pause", 517 | "message_after_pause", 518 | } 519 | 520 | 521 | @pytest.mark.timeout(5) 522 | def test_consumer_should_be_closed(): 523 | server = get_server() 524 | 525 | with create_consumer(server, '127.0.0.1:12345', {"group.id": None}): 526 | pass 527 | 528 | 529 | def test_offsets_for_times_api(): 530 | topic = "test_offsets_for_times_api" 531 | tag = randomword(6) 532 | 533 | batch1 = [{"key": f"b1-{tag}-{i}", "value": f"v1-{tag}-{i}"} for i in range(3)] 534 | write_into_kafka(topic, batch1) 535 | 536 | time.sleep(2) 537 | ts_cut_ms = int(time.time() * 1000) 538 | 539 | batch2 = [{"key": f"b2-{tag}-{i}", "value": f"v2-{tag}-{i}"} for i in range(2)] 540 | write_into_kafka(topic, batch2) 541 | 542 | server = get_server() 543 | group_id = f"g_offsets_for_times_api_{tag}" 544 | 545 | with create_consumer(server, KAFKA_HOST, {"group.id": group_id}): 546 | res = server.call("consumer.offsets_for_times", [[topic, 0, ts_cut_ms], ['invalid', 1000, ts_cut_ms]], 3000) 547 | assert len(res) == 1 and len(res[0]) == 2 548 | 549 | item = res[0][0] 550 | assert item.get("topic") == topic 551 | assert item.get("partition") == 0 552 | assert "error_code" in item, item 553 | assert item["error_code"] == 0 554 | assert "error" not in item 555 | assert isinstance(item.get("offset"), int) and item["offset"] >= 0 556 | 557 | item = res[0][1] 558 | assert item.get("topic") == "invalid" 559 | assert item["error"] == "Broker: Unknown topic or partition" 560 | assert item["error_code"] != 0 561 | 562 | 563 | def test_offsets_for_times_seek_from_cut(): 564 | topic = "test_offsets_for_times" 565 | tag = randomword(6) 566 | 567 | batch1 = [{"key": f"b1-{tag}-{i}", "value": f"v1-{tag}-{i}"} for i in range(5)] 568 | write_into_kafka(topic, batch1) 569 | 570 | time.sleep(2) 571 | ts_cut_ms = int(time.time() * 1000) 572 | 573 | batch2 = [{"key": f"b2-{tag}-{i}", "value": f"v2-{tag}-{i}"} for i in range(5)] 574 | write_into_kafka(topic, batch2) 575 | 576 | server = get_server() 577 | group_id = f"g_seek_from_time_{tag}" 578 | 579 | with create_consumer(server, KAFKA_HOST, {"group.id": group_id}): 580 | server.call("consumer.subscribe", [[topic]]) 581 | 582 | time.sleep(10) 583 | 584 | res = server.call("consumer.seek_from_time", [topic, ts_cut_ms, 5000]) 585 | assert len(res) == 1 586 | 587 | applied = res[0] 588 | assert isinstance(applied, list) and len(applied) >= 1, f"no seeks applied: {applied}" 589 | 590 | for item in applied: 591 | assert isinstance(item, list) and len(item) == 3, item 592 | t, p, o = item 593 | assert t == topic, f"unexpected topic in seek: {item}" 594 | assert isinstance(p, int) 595 | assert isinstance(o, int) and o != -1001, f"invalid offset in seek: {item}" 596 | 597 | msgs = server.call("consumer.consume", [4])[0] 598 | values = set(get_message_values(msgs)) 599 | 600 | want = {m["value"] for m in batch2} 601 | not_want = {m["value"] for m in batch1} 602 | 603 | assert values.issuperset(want), f"missing: {want - values}, got={values}" 604 | assert values.isdisjoint(not_want), f"unexpected (batch1) values present: {values & not_want}" 605 | -------------------------------------------------------------------------------- /kafka/producer.c: -------------------------------------------------------------------------------- 1 | #include "producer.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | //////////////////////////////////////////////////////////////////////////////////////////////////// 16 | 17 | /** 18 | * Producer poll thread 19 | */ 20 | 21 | typedef struct producer_poller_t { 22 | rd_kafka_t *rd_producer; 23 | pthread_t thread; 24 | pthread_attr_t attr; 25 | int should_stop; 26 | pthread_mutex_t lock; 27 | } producer_poller_t; 28 | 29 | typedef struct producer_topics_t { 30 | rd_kafka_topic_t **elements; 31 | int32_t count; 32 | int32_t capacity; 33 | } producer_topics_t; 34 | 35 | typedef struct { 36 | rd_kafka_t *rd_producer; 37 | producer_topics_t *topics; 38 | event_queues_t *event_queues; 39 | producer_poller_t *poller; 40 | } producer_t; 41 | 42 | static void * 43 | producer_poll_loop(void *arg) { 44 | set_thread_name("kafka_producer"); 45 | 46 | producer_poller_t *poller = arg; 47 | int count = 0; 48 | int should_stop = 0; 49 | 50 | while (true) { 51 | { 52 | pthread_mutex_lock(&poller->lock); 53 | 54 | should_stop = poller->should_stop; 55 | 56 | pthread_mutex_unlock(&poller->lock); 57 | 58 | if (should_stop) { 59 | break; 60 | } 61 | } 62 | 63 | { 64 | count = rd_kafka_poll(poller->rd_producer, 1000); 65 | if (count == 0) { 66 | // throttling calls with 100ms sleep 67 | usleep(100000); 68 | } 69 | } 70 | } 71 | 72 | pthread_exit(NULL); 73 | } 74 | 75 | static producer_poller_t * 76 | new_producer_poller(rd_kafka_t *rd_producer) { 77 | producer_poller_t *poller = xmalloc(sizeof(producer_poller_t)); 78 | poller->rd_producer = rd_producer; 79 | poller->should_stop = 0; 80 | 81 | XPTHREAD(pthread_mutex_init(&poller->lock, NULL)); 82 | XPTHREAD(pthread_attr_init(&poller->attr)); 83 | XPTHREAD(pthread_attr_setdetachstate(&poller->attr, PTHREAD_CREATE_JOINABLE)); 84 | XPTHREAD(pthread_create(&poller->thread, &poller->attr, producer_poll_loop, (void *)poller)); 85 | 86 | return poller; 87 | } 88 | 89 | static ssize_t 90 | stop_poller(va_list args) { 91 | producer_poller_t *poller = va_arg(args, producer_poller_t *); 92 | pthread_mutex_lock(&poller->lock); 93 | 94 | poller->should_stop = 1; 95 | 96 | XPTHREAD(pthread_mutex_unlock(&poller->lock)); 97 | XPTHREAD(pthread_join(poller->thread, NULL)); 98 | 99 | return 0; 100 | } 101 | 102 | static void 103 | destroy_producer_poller(producer_poller_t *poller) { 104 | // stopping polling thread 105 | coio_call(stop_poller, poller); 106 | 107 | XPTHREAD(pthread_attr_destroy(&poller->attr)); 108 | XPTHREAD(pthread_mutex_destroy(&poller->lock)); 109 | 110 | free(poller); 111 | } 112 | 113 | /** 114 | * Producer 115 | */ 116 | 117 | static producer_topics_t * 118 | new_producer_topics(int32_t capacity) { 119 | rd_kafka_topic_t **elements; 120 | elements = xmalloc(sizeof(rd_kafka_topic_t *) * capacity); 121 | producer_topics_t *topics; 122 | topics = xmalloc(sizeof(producer_topics_t)); 123 | topics->capacity = capacity; 124 | topics->count = 0; 125 | topics->elements = elements; 126 | 127 | return topics; 128 | } 129 | 130 | static void 131 | add_producer_topics(producer_topics_t *topics, rd_kafka_topic_t *element) { 132 | if (topics->count >= topics->capacity) { 133 | rd_kafka_topic_t **new_elements = xrealloc(topics->elements, sizeof(rd_kafka_topic_t *) * topics->capacity * 2); 134 | topics->elements = new_elements; 135 | topics->capacity *= 2; 136 | } 137 | topics->elements[topics->count++] = element; 138 | } 139 | 140 | static rd_kafka_topic_t * 141 | find_producer_topic_by_name(producer_topics_t *topics, const char *name) { 142 | rd_kafka_topic_t *topic; 143 | for (int i = 0; i < topics->count; i++) { 144 | topic = topics->elements[i]; 145 | if (strcmp(rd_kafka_topic_name(topic), name) == 0) { 146 | return topic; 147 | } 148 | } 149 | return NULL; 150 | } 151 | 152 | static void 153 | destroy_producer_topics(producer_topics_t *topics) { 154 | rd_kafka_topic_t **topic_p; 155 | rd_kafka_topic_t **end = topics->elements + topics->count; 156 | for (topic_p = topics->elements; topic_p < end; topic_p++) { 157 | rd_kafka_topic_destroy(*topic_p); 158 | } 159 | 160 | free(topics->elements); 161 | free(topics); 162 | } 163 | 164 | static inline producer_t * 165 | lua_check_producer(struct lua_State *L, int index) { 166 | producer_t **producer_p = (producer_t **)luaL_checkudata(L, index, producer_label); 167 | if (producer_p == NULL || *producer_p == NULL) 168 | luaL_error(L, "Kafka producer fatal error: failed to retrieve producer from lua stack!"); 169 | return *producer_p; 170 | } 171 | 172 | int 173 | lua_producer_tostring(struct lua_State *L) { 174 | const producer_t *producer = lua_check_producer(L, 1); 175 | lua_pushfstring(L, "Kafka Producer: %p", producer); 176 | return 1; 177 | } 178 | 179 | int 180 | lua_producer_msg_delivery_poll(struct lua_State *L) { 181 | if (lua_gettop(L) != 2) 182 | luaL_error(L, "Usage: count, err = producer:msg_delivery_poll(events_limit)"); 183 | 184 | producer_t *producer = lua_check_producer(L, 1); 185 | 186 | int events_limit = lua_tonumber(L, 2); 187 | int callbacks_count = 0; 188 | dr_msg_t *dr_msg = NULL; 189 | 190 | while (events_limit > callbacks_count) { 191 | dr_msg = queue_pop(producer->event_queues->delivery_queue); 192 | if (dr_msg == NULL) 193 | break; 194 | callbacks_count += 1; 195 | lua_rawgeti(L, LUA_REGISTRYINDEX, dr_msg->dr_callback); 196 | if (dr_msg->err != RD_KAFKA_RESP_ERR_NO_ERROR) { 197 | lua_push_kafka_error(L, producer->rd_producer, dr_msg->err); 198 | } else { 199 | lua_pushnil(L); 200 | } 201 | /* do the call (1 arguments, 0 result) */ 202 | int rc = lua_pcall(L, 1, 0, 0); 203 | luaL_unref(L, LUA_REGISTRYINDEX, dr_msg->dr_callback); 204 | destroy_dr_msg(dr_msg); 205 | if (rc != 0) { 206 | lua_pushinteger(L, callbacks_count); 207 | lua_insert(L, -2); /* count below error string */ 208 | return 2; 209 | } 210 | } 211 | 212 | lua_pushinteger(L, callbacks_count); 213 | return 1; 214 | } 215 | 216 | LUA_RDKAFKA_POLL_FUNC(producer, poll_logs, LOG_QUEUE, destroy_log_msg, push_log_cb_args) 217 | LUA_RDKAFKA_POLL_FUNC(producer, poll_stats, STATS_QUEUE, free, push_stats_cb_args) 218 | LUA_RDKAFKA_POLL_FUNC(producer, poll_errors, ERROR_QUEUE, destroy_error_msg, push_errors_cb_args) 219 | 220 | int 221 | lua_producer_produce(struct lua_State *L) { 222 | if (lua_gettop(L) != 2 || !lua_istable(L, 2)) 223 | luaL_error(L, "Usage: err = producer:produce(msg)"); 224 | 225 | lua_pushliteral(L, "topic"); 226 | lua_gettable(L, -2); 227 | const char *topic = lua_tostring(L, -1); 228 | lua_pop(L, 1); 229 | if (topic == NULL) { 230 | lua_pushliteral(L, "producer message must contains non nil 'topic' key"); 231 | return 1; 232 | } 233 | 234 | lua_pushliteral(L, "key"); 235 | lua_gettable(L, -2); 236 | size_t key_len; 237 | // rd_kafka will copy key so no need to worry about this cast 238 | char *key = (char *)lua_tolstring(L, -1, &key_len); 239 | 240 | lua_pop(L, 1); 241 | 242 | lua_pushliteral(L, "value"); 243 | lua_gettable(L, -2); 244 | size_t value_len; 245 | // rd_kafka will copy value so no need to worry about this cast 246 | char *value = (char *)lua_tolstring(L, -1, &value_len); 247 | 248 | lua_pop(L, 1); 249 | 250 | if (key == NULL && value == NULL) { 251 | lua_pushliteral(L, "producer message must contains non nil key or value"); 252 | return 1; 253 | } 254 | 255 | int32_t partition = RD_KAFKA_PARTITION_UA; 256 | lua_pushliteral(L, "partition"); 257 | lua_gettable(L, -2); 258 | if (lua_isnumber(L, -1)) 259 | partition = lua_tonumber(L, -1); 260 | lua_pop(L, 1); 261 | 262 | dr_msg_t *dr_msg = NULL; 263 | rd_kafka_headers_t *hdrs = NULL; 264 | lua_pushliteral(L, "headers"); 265 | lua_gettable(L, -2); 266 | if (lua_istable(L, -1)) { 267 | hdrs = xrd_kafka_headers_new(8); 268 | 269 | lua_pushnil(L); 270 | while (lua_next(L, -2) != 0) { 271 | size_t hdr_value_len = 0; 272 | const char *hdr_value = lua_tolstring(L, -1, &hdr_value_len); 273 | size_t hdr_key_len = 0; 274 | const char *hdr_key = lua_tolstring(L, -2, &hdr_key_len); 275 | 276 | rd_kafka_resp_err_t err = rd_kafka_header_add( 277 | hdrs, hdr_key, hdr_key_len, hdr_value, hdr_value_len); 278 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 279 | lua_pushliteral(L, "failed to add kafka headers"); 280 | goto error; 281 | } 282 | 283 | lua_pop(L, 1); 284 | } 285 | } 286 | 287 | lua_pop(L, 1); 288 | 289 | // create delivery callback queue if got msg id 290 | lua_pushliteral(L, "dr_callback"); 291 | lua_gettable(L, -2); 292 | if (lua_isfunction(L, -1)) { 293 | dr_msg = new_dr_msg(luaL_ref(L, LUA_REGISTRYINDEX), RD_KAFKA_RESP_ERR_NO_ERROR); 294 | if (dr_msg == NULL) { 295 | lua_pushliteral(L, "failed to create callback message"); 296 | goto error; 297 | } 298 | } else { 299 | lua_pop(L, 1); 300 | } 301 | 302 | // pop msg 303 | lua_pop(L, 1); 304 | 305 | producer_t *producer = lua_check_producer(L, 1); 306 | rd_kafka_topic_t *rd_topic = find_producer_topic_by_name(producer->topics, topic); 307 | if (rd_topic == NULL) { 308 | rd_topic = rd_kafka_topic_new(producer->rd_producer, topic, NULL); 309 | if (rd_topic == NULL) { 310 | lua_push_kafka_error(L, producer->rd_producer, rd_kafka_last_error()); 311 | goto error; 312 | } 313 | add_producer_topics(producer->topics, rd_topic); 314 | } 315 | 316 | rd_kafka_vu_t vus[8]; 317 | size_t n = 0; 318 | 319 | vus[n++] = (rd_kafka_vu_t){ .vtype = RD_KAFKA_VTYPE_RKT, .u.rkt = rd_topic }; 320 | vus[n++] = (rd_kafka_vu_t){ .vtype = RD_KAFKA_VTYPE_PARTITION, .u.i32 = partition }; 321 | vus[n++] = (rd_kafka_vu_t){ .vtype = RD_KAFKA_VTYPE_MSGFLAGS, .u.i = RD_KAFKA_MSG_F_COPY }; 322 | vus[n++] = (rd_kafka_vu_t){ .vtype = RD_KAFKA_VTYPE_VALUE, .u.mem = { .ptr = (void*)value, .size = value_len } }; 323 | vus[n++] = (rd_kafka_vu_t){ .vtype = RD_KAFKA_VTYPE_KEY, .u.mem = { .ptr = (void*)key, .size = key_len } }; 324 | vus[n++] = (rd_kafka_vu_t){ .vtype = RD_KAFKA_VTYPE_OPAQUE, .u.ptr = dr_msg }; 325 | if (hdrs) 326 | vus[n++] = (rd_kafka_vu_t){ .vtype = RD_KAFKA_VTYPE_HEADERS, .u.headers = hdrs }; 327 | 328 | rd_kafka_error_t *e = rd_kafka_produceva(producer->rd_producer, vus, n); 329 | if (e == NULL) 330 | return 0; 331 | 332 | lua_push_kafka_error(L, producer->rd_producer, rd_kafka_error_code(e)); 333 | rd_kafka_error_destroy(e); 334 | 335 | error: 336 | if (hdrs != NULL) 337 | rd_kafka_headers_destroy(hdrs); 338 | if (dr_msg != NULL) { 339 | luaL_unref(L, LUA_REGISTRYINDEX, dr_msg->dr_callback); 340 | destroy_dr_msg(dr_msg); 341 | } 342 | return 1; 343 | } 344 | 345 | static ssize_t 346 | producer_flush(va_list args) { 347 | rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; 348 | rd_kafka_t *rd_producer = va_arg(args, rd_kafka_t *); 349 | while (true) { 350 | err = rd_kafka_flush(rd_producer, 1000); 351 | if (err != RD_KAFKA_RESP_ERR__TIMED_OUT) { 352 | break; 353 | } 354 | } 355 | return 0; 356 | } 357 | 358 | static ssize_t 359 | wait_producer_destroy(va_list args) { 360 | rd_kafka_t *rd_kafka = va_arg(args, rd_kafka_t *); 361 | rd_kafka_destroy(rd_kafka); 362 | return 0; 363 | } 364 | 365 | static void 366 | destroy_producer(struct lua_State *L, producer_t *producer) { 367 | if (producer->poller != NULL) { 368 | destroy_producer_poller(producer->poller); 369 | producer->poller = NULL; 370 | } 371 | 372 | if (producer->topics != NULL) { 373 | destroy_producer_topics(producer->topics); 374 | producer->topics = NULL; 375 | } 376 | 377 | /* 378 | * Here we close producer and only then destroys other stuff. 379 | * Otherwise raise condition is possible when e.g. 380 | * event queue is destroyed but producer still receives logs, errors, etc. 381 | * Only topics should be destroyed. 382 | */ 383 | if (producer->rd_producer != NULL) { 384 | /* Destroy handle */ 385 | coio_call(wait_producer_destroy, producer->rd_producer); 386 | producer->rd_producer = NULL; 387 | } 388 | 389 | if (producer->event_queues != NULL) { 390 | destroy_event_queues(L, producer->event_queues); 391 | producer->event_queues = NULL; 392 | } 393 | 394 | free(producer); 395 | } 396 | 397 | int 398 | lua_producer_close(struct lua_State *L) { 399 | producer_t **producer_p = (producer_t **)luaL_checkudata(L, 1, producer_label); 400 | if (producer_p == NULL || *producer_p == NULL) { 401 | lua_pushboolean(L, 0); 402 | return 1; 403 | } 404 | 405 | if ((*producer_p)->rd_producer != NULL) { 406 | coio_call(producer_flush, (*producer_p)->rd_producer); 407 | } 408 | 409 | if ((*producer_p)->poller != NULL) { 410 | destroy_producer_poller((*producer_p)->poller); 411 | (*producer_p)->poller = NULL; 412 | } 413 | 414 | lua_pushboolean(L, 1); 415 | return 1; 416 | } 417 | 418 | int 419 | lua_producer_dump_conf(struct lua_State *L) { 420 | producer_t **producer_p = (producer_t **)luaL_checkudata(L, 1, producer_label); 421 | if (producer_p == NULL || *producer_p == NULL) 422 | return 0; 423 | 424 | if ((*producer_p)->rd_producer != NULL) 425 | return lua_librdkafka_dump_conf(L, (*producer_p)->rd_producer); 426 | return 0; 427 | } 428 | 429 | int 430 | lua_producer_destroy(struct lua_State *L) { 431 | producer_t **producer_p = (producer_t **)luaL_checkudata(L, 1, producer_label); 432 | if (producer_p && *producer_p) { 433 | destroy_producer(L, *producer_p); 434 | } 435 | if (producer_p) 436 | *producer_p = NULL; 437 | return 0; 438 | } 439 | 440 | int 441 | lua_create_producer(struct lua_State *L) { 442 | if (lua_gettop(L) != 1 || !lua_istable(L, 1)) 443 | luaL_error(L, "Usage: producer, err = create_producer(conf)"); 444 | 445 | lua_pushstring(L, "brokers"); 446 | lua_gettable(L, -2); 447 | const char *brokers = lua_tostring(L, -1); 448 | lua_pop(L, 1); 449 | if (brokers == NULL) { 450 | lua_pushnil(L); 451 | lua_pushliteral(L, "producer config table must have non nil key 'brokers' which contains string"); 452 | return 2; 453 | } 454 | 455 | char errstr[512]; 456 | 457 | rd_kafka_topic_conf_t *topic_conf = xrd_kafka_topic_conf_new(); 458 | lua_pushstring(L, "default_topic_options"); 459 | lua_gettable(L, -2); 460 | if (lua_istable(L, -1)) { 461 | lua_pushnil(L); 462 | // stack now contains: -1 => nil; -2 => table 463 | while (lua_next(L, -2)) { 464 | // stack now contains: -1 => value; -2 => key; -3 => table 465 | if (!(lua_isstring(L, -1)) || !(lua_isstring(L, -2))) { 466 | lua_pushnil(L); 467 | lua_pushliteral(L, "producer config default topic options must contains only string keys and string values"); 468 | goto topic_error; 469 | } 470 | 471 | const char *value = lua_tostring(L, -1); 472 | const char *key = lua_tostring(L, -2); 473 | if (rd_kafka_topic_conf_set(topic_conf, key, value, errstr, sizeof(errstr))) { 474 | lua_pushnil(L); 475 | lua_pushstring(L, errstr); 476 | goto topic_error; 477 | } 478 | 479 | // pop value, leaving original key 480 | lua_pop(L, 1); 481 | // stack now contains: -1 => key; -2 => table 482 | } 483 | // stack now contains: -1 => table 484 | } 485 | lua_pop(L, 1); 486 | 487 | rd_kafka_conf_t *rd_config = xrd_kafka_conf_new(); 488 | rd_kafka_conf_set_default_topic_conf(rd_config, topic_conf); 489 | 490 | event_queues_t *event_queues = new_event_queues(); 491 | event_queues->delivery_queue = new_queue(); 492 | rd_kafka_conf_set_dr_msg_cb(rd_config, msg_delivery_callback); 493 | 494 | for (int i = 0; i < MAX_QUEUE; i++) { 495 | if (i == REBALANCE_QUEUE) 496 | continue; 497 | 498 | lua_pushstring(L, queue2str[i]); 499 | lua_gettable(L, -2); 500 | if (lua_isfunction(L, -1)) { 501 | event_queues->cb_refs[i] = luaL_ref(L, LUA_REGISTRYINDEX); 502 | event_queues->queues[i] = new_queue(); 503 | switch (i) { 504 | case LOG_QUEUE: 505 | rd_kafka_conf_set_log_cb(rd_config, log_callback); 506 | break; 507 | case ERROR_QUEUE: 508 | rd_kafka_conf_set_error_cb(rd_config, error_callback); 509 | break; 510 | case STATS_QUEUE: 511 | rd_kafka_conf_set_stats_cb(rd_config, stats_callback); 512 | break; 513 | } 514 | } else { 515 | lua_pop(L, 1); 516 | } 517 | } 518 | 519 | rd_kafka_conf_set_opaque(rd_config, event_queues); 520 | 521 | lua_pushstring(L, "options"); 522 | lua_gettable(L, -2); 523 | if (lua_istable(L, -1)) { 524 | lua_pushnil(L); 525 | // stack now contains: -1 => nil; -2 => table 526 | while (lua_next(L, -2)) { 527 | // stack now contains: -1 => value; -2 => key; -3 => table 528 | if (!(lua_isstring(L, -1)) || !(lua_isstring(L, -2))) { 529 | lua_pushnil(L); 530 | lua_pushliteral(L, "producer config options must contains only string keys and string values"); 531 | goto config_error; 532 | } 533 | 534 | const char *value = lua_tostring(L, -1); 535 | const char *key = lua_tostring(L, -2); 536 | if (rd_kafka_conf_set(rd_config, key, value, errstr, sizeof(errstr))) { 537 | lua_pushnil(L); 538 | lua_pushstring(L, errstr); 539 | goto config_error; 540 | } 541 | 542 | // pop value, leaving original key 543 | lua_pop(L, 1); 544 | // stack now contains: -1 => key; -2 => table 545 | } 546 | // stack now contains: -1 => table 547 | } 548 | lua_pop(L, 1); 549 | 550 | rd_kafka_t *rd_producer; 551 | if (!(rd_producer = rd_kafka_new(RD_KAFKA_PRODUCER, rd_config, errstr, sizeof(errstr)))) { 552 | lua_pushnil(L); 553 | lua_pushstring(L, errstr); 554 | goto config_error; 555 | } 556 | 557 | rd_config = NULL; // was freed by rd_kafka_new 558 | if (rd_kafka_brokers_add(rd_producer, brokers) == 0) { 559 | lua_pushnil(L); 560 | lua_pushliteral(L, "No valid brokers specified"); 561 | goto broker_error; 562 | } 563 | 564 | // creating background thread for polling consumer 565 | producer_poller_t *poller = new_producer_poller(rd_producer); 566 | if (poller == NULL) { 567 | lua_pushnil(L); 568 | lua_pushliteral(L, "Failed to create producer poller thread"); 569 | goto broker_error; 570 | } 571 | 572 | producer_t *producer; 573 | producer = xmalloc(sizeof(producer_t)); 574 | producer->rd_producer = rd_producer; 575 | producer->topics = new_producer_topics(256); 576 | producer->event_queues = event_queues; 577 | producer->poller = poller; 578 | 579 | producer_t **producer_p = (producer_t **)lua_newuserdata(L, sizeof(producer)); 580 | *producer_p = producer; 581 | 582 | luaL_getmetatable(L, producer_label); 583 | lua_setmetatable(L, -2); 584 | return 1; 585 | 586 | broker_error: 587 | rd_kafka_destroy(rd_producer); 588 | config_error: 589 | if (rd_config != NULL) 590 | rd_kafka_conf_destroy(rd_config); 591 | destroy_event_queues(L, event_queues); 592 | return 2; 593 | topic_error: 594 | rd_kafka_topic_conf_destroy(topic_conf); 595 | return 2; 596 | } 597 | 598 | int 599 | lua_producer_metadata(struct lua_State *L) { 600 | producer_t **producer_p = (producer_t **)luaL_checkudata(L, 1, producer_label); 601 | if (producer_p == NULL || *producer_p == NULL) 602 | return 0; 603 | 604 | if ((*producer_p)->rd_producer != NULL) { 605 | rd_kafka_topic_t *topic = NULL; 606 | const char *topic_name = lua_tostring(L, 2); 607 | if (topic_name != NULL) { 608 | topic = find_producer_topic_by_name((*producer_p)->topics, topic_name); 609 | if (topic == NULL) { 610 | lua_pushnil(L); 611 | lua_pushfstring(L, "Topic \"%s\" is not found", topic_name); 612 | return 2; 613 | } 614 | } 615 | 616 | int timeout_ms = lua_tointeger(L, 3); 617 | return lua_librdkafka_metadata(L, (*producer_p)->rd_producer, topic, timeout_ms); 618 | } 619 | return 0; 620 | } 621 | 622 | int 623 | lua_producer_list_groups(struct lua_State *L) { 624 | producer_t **producer_p = luaL_checkudata(L, 1, producer_label); 625 | if (producer_p == NULL || *producer_p == NULL) 626 | return 0; 627 | 628 | if ((*producer_p)->rd_producer != NULL) { 629 | const char *group = lua_tostring(L, 2); 630 | int timeout_ms = lua_tointeger(L, 3); 631 | return lua_librdkafka_list_groups(L, (*producer_p)->rd_producer, group, timeout_ms); 632 | } 633 | return 0; 634 | } 635 | --------------------------------------------------------------------------------