├── .github └── workflows │ ├── asan_testing.yml │ ├── fast_testing.yml │ └── publish.yml ├── .gitignore ├── .gitmodules ├── CMakeLists.txt ├── LICENSE ├── Makefile ├── README.md ├── benchmarks ├── async_producer.lua ├── auto_offset_store_consumer.lua ├── manual_offset_store_consumer.lua └── sync_producer.lua ├── cmake ├── FindRdKafka.cmake └── FindTarantool.cmake ├── docker └── Dockerfile ├── examples ├── consumer │ ├── auto_offset_store.lua │ └── manual_offset_store.lua ├── producer │ ├── async_producer.lua │ └── sync_producer.lua └── static-build │ ├── Dockerfile │ └── README.md ├── kafka-1.1.0-0.rockspec ├── kafka-scm-1.rockspec ├── kafka ├── CMakeLists.txt ├── callbacks.c ├── callbacks.h ├── common.c ├── common.h ├── consumer.c ├── consumer.h ├── consumer_msg.c ├── consumer_msg.h ├── init.lua ├── producer.c ├── producer.h ├── queue.c ├── queue.h ├── tnt_kafka.c ├── tnt_kafka.h └── version.lua ├── patches ├── librdkafka-tarantool-security-36.patch ├── librdkafka-tarantool-security-47.patch ├── librdkafka-tarantool-security-52.patch ├── librdkafka-tarantool-security-55.patch ├── librdkafka-tarantool-security-70.patch ├── librdkafka-tarantool-security-71.patch ├── librdkafka-tarantool-security-72.patch └── librdkafka-tarantool-security-94.patch └── tests ├── app.lua ├── consumer.lua ├── producer.lua ├── requirements.txt ├── test_consumer.py └── test_producer.py /.github/workflows/asan_testing.yml: -------------------------------------------------------------------------------- 1 | name: asan_testing 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | workflow_dispatch: 9 | 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | linux: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Clone the module 19 | uses: actions/checkout@v4 20 | with: 21 | submodules: true 22 | 23 | - uses: actions/setup-python@v5 24 | with: 25 | python-version: '3.10' 26 | cache: 'pip' 27 | cache-dependency-path: 'tests/requirements.txt' 28 | 29 | - name: Start Kafka 30 | uses: ybyzek/cp-all-in-one-action@v0.2.1 31 | with: 32 | type: cp-all-in-one-community 33 | 34 | - name: Install Python dependencies 35 | run: pip install -r tests/requirements.txt 36 | 37 | - name: Install dependencies 38 | run: sudo apt install -y libreadline-dev 39 | 40 | - name: Build module 41 | run: | 42 | export MAKEFLAGS=-j8 43 | export CC=clang 44 | export CXX=clang++ 45 | git clone https://github.com/tarantool/tarantool 46 | cd tarantool 47 | git checkout release/2.11 48 | export LSAN_OPTIONS=suppressions=${PWD}/asan/lsan.supp 49 | cmake . -DENABLE_ASAN=ON -DENABLE_UB_SANITIZER=ON -DENABLE_DIST=ON 50 | make -j16 51 | sudo make install 52 | cd .. 53 | tarantoolctl rocks STATIC_BUILD=ON ENABLE_ASAN=ON ENABLE_UBSAN=ON make 54 | 55 | - name: Run tarantool application 56 | run: | 57 | export TT_LOG=tarantool.log 58 | export LSAN_OPTIONS=suppressions=${PWD}/tarantool/asan/lsan.supp 59 | tarantool tests/app.lua > output.log 2>&1 & 60 | 61 | - name: Run test 62 | run: KAFKA_HOST=localhost:9092 pytest tests 63 | 64 | - name: Print Tarantool logs 65 | if: always() 66 | run: | 67 | cat tarantool.log 68 | cat output.log 69 | -------------------------------------------------------------------------------- /.github/workflows/fast_testing.yml: -------------------------------------------------------------------------------- 1 | name: fast_testing 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | workflow_dispatch: 9 | 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | linux: 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | tarantool: 20 | - '2.11' 21 | 22 | runs-on: ubuntu-latest 23 | steps: 24 | - name: Install tarantool ${{ matrix.tarantool }} 25 | uses: tarantool/setup-tarantool@v3 26 | with: 27 | tarantool-version: ${{ matrix.tarantool }} 28 | 29 | - name: Clone the module 30 | uses: actions/checkout@v4 31 | with: 32 | submodules: true 33 | 34 | - uses: actions/setup-python@v5 35 | with: 36 | python-version: '3.10' 37 | cache: 'pip' 38 | cache-dependency-path: 'tests/requirements.txt' 39 | 40 | - name: Start Kafka 41 | uses: ybyzek/cp-all-in-one-action@v0.2.1 42 | with: 43 | type: cp-all-in-one-community 44 | 45 | - name: Install Python dependencies 46 | run: pip install -r tests/requirements.txt 47 | 48 | - name: Build module 49 | run: | 50 | export MAKEFLAGS=-j8 51 | tarantoolctl rocks STATIC_BUILD=ON make 52 | 53 | - name: Run tarantool application 54 | run: TT_LOG=tarantool.log tarantool tests/app.lua > output.log 2>&1 & 55 | 56 | - name: Run test 57 | run: KAFKA_HOST=localhost:9092 pytest tests 58 | 59 | - name: Print Tarantool logs 60 | if: always() 61 | run: | 62 | cat tarantool.log 63 | cat output.log 64 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | # Allows you to run this workflow manually from the Actions tab 5 | workflow_dispatch: 6 | push: 7 | 8 | jobs: 9 | version-check: 10 | # We need this job to run only on push with tag. 11 | if: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') }} 12 | runs-on: ubuntu-22.04 13 | steps: 14 | - name: Check module version 15 | uses: tarantool/actions/check-module-version@master 16 | with: 17 | module-name: 'kafka' 18 | rock-make-opts: 'STATIC_BUILD=ON' 19 | 20 | publish-scm-1: 21 | if: github.ref == 'refs/heads/master' 22 | runs-on: ubuntu-22.04 23 | steps: 24 | - uses: actions/checkout@v4 25 | - uses: tarantool/rocks.tarantool.org/github-action@master 26 | with: 27 | auth: ${{ secrets.ROCKS_AUTH }} 28 | files: kafka-scm-1.rockspec 29 | 30 | publish-tag: 31 | if: startsWith(github.ref, 'refs/tags/') 32 | needs: version-check 33 | runs-on: ubuntu-22.04 34 | steps: 35 | - uses: actions/checkout@v4 36 | - uses: tarantool/setup-tarantool@v3 37 | with: 38 | tarantool-version: '2.11' 39 | # Make a release 40 | - run: echo TAG=${GITHUB_REF##*/} >> $GITHUB_ENV 41 | - run: tarantoolctl rocks new_version --tag ${{ env.TAG }} 42 | - run: tarantoolctl rocks pack kafka-${{ env.TAG }}-1.rockspec 43 | 44 | - uses: tarantool/rocks.tarantool.org/github-action@master 45 | with: 46 | auth: ${{ secrets.ROCKS_AUTH }} 47 | files: | 48 | kafka-${{ env.TAG }}-1.rockspec 49 | kafka-${{ env.TAG }}-1.src.rock 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .rocks 3 | tests/venv 4 | tests/.pytest_cache 5 | tests/__* 6 | cmake-build-debug 7 | build.luarocks/ 8 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "librdkafka"] 2 | path = librdkafka 3 | url = https://github.com/confluentinc/librdkafka.git 4 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.13 FATAL_ERROR) 2 | 3 | project(kafka C) 4 | 5 | set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) 6 | set(CMAKE_SKIP_INSTALL_ALL_DEPENDENCY TRUE) 7 | 8 | # Set CFLAGS 9 | set(MY_C_FLAGS "-Wall -Wextra -Werror -std=gnu11 -fno-strict-aliasing -Wno-deprecated-declarations") 10 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MY_C_FLAGS}") 11 | set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${MY_C_FLAGS} -ggdb3") 12 | 13 | find_package(Tarantool REQUIRED) 14 | 15 | set(STATIC_BUILD "OFF" CACHE BOOL "Link dependencies statically?") 16 | set(WITH_OPENSSL_1_1 "OFF" CACHE BOOL "Require openssl version >= 1.1?") 17 | set(WITH_GSSAPI "OFF" CACHE BOOL "Enable Kerberos (GSSAPI) support") 18 | set(ENABLE_ASAN "OFF" CACHE BOOL "Enable ASAN") 19 | set(ENABLE_UBSAN "OFF" CACHE BOOL "Enable UBSAN") 20 | 21 | if (WITH_OPENSSL_1_1) 22 | find_package(OpenSSL 1.1 REQUIRED) 23 | else() 24 | find_package(OpenSSL REQUIRED) 25 | endif() 26 | message("Found OPENSSL version: ${OPENSSL_VERSION}") 27 | 28 | if (ENABLE_ASAN) 29 | list(APPEND SANITIZER_FLAGS -fsanitize=address) 30 | endif() 31 | 32 | if (ENABLE_UBSAN) 33 | list(APPEND SANITIZER_FLAGS -fsanitize=undefined) 34 | endif() 35 | 36 | if (SANITIZER_FLAGS) 37 | list(JOIN SANITIZER_FLAGS " " SANITIZER_FLAGS) 38 | set(LIBRDKAFKA_FLAGS --enable-devel --disable-optimization) 39 | set(CMAKE_BUILD_TYPE "Debug") 40 | set(LIBRDKAFKA_CXX_FLAGS "${SANITIZER_FLAGS}") 41 | set(LIBRDKAFKA_C_FLAGS "${SANITIZER_FLAGS}") 42 | set(LIBRDKAFKA_LD_FLAGS "${SANITIZER_FLAGS}") 43 | endif() 44 | 45 | if (APPLE) 46 | set(LIBRDKAFKA_LD_FLAGS "${LIBRDKAFKA_LD_FLAGS} ${CMAKE_C_SYSROOT_FLAG} ${CMAKE_OSX_SYSROOT}") 47 | set(LIBRDKAFKA_CXX_FLAGS "${LIBRDKAFKA_CXX_FLAGS} ${CMAKE_C_SYSROOT_FLAG} ${CMAKE_OSX_SYSROOT}") 48 | set(LIBRDKAFKA_C_FLAGS "${LIBRDKAFKA_C_FLAGS} ${CMAKE_C_SYSROOT_FLAG} ${CMAKE_OSX_SYSROOT}") 49 | endif() 50 | 51 | if (WITH_GSSAPI) 52 | set(LIBRDKAFKA_FLAGS ${LIBRDKAFKA_FLAGS} --enable-gssapi) 53 | endif() 54 | 55 | if(STATIC_BUILD) 56 | include(ExternalProject) 57 | set(PATCHES_DIR "${CMAKE_SOURCE_DIR}/patches") 58 | ExternalProject_Add(librdkafka 59 | SOURCE_DIR ${CMAKE_CURRENT_LIST_DIR}/librdkafka 60 | INSTALL_DIR ${CMAKE_BINARY_DIR}/librdkafka 61 | BUILD_IN_SOURCE 1 62 | CONFIGURE_COMMAND /configure 63 | --cc=${CMAKE_C_COMPILER} 64 | --cxx=${CMAKE_CXX_COMPILER} 65 | --CFLAGS=${LIBRDKAFKA_C_FLAGS} 66 | --CPPFLAGS=${LIBRDKAFKA_CXX_FLAGS} 67 | --LDFLAGS=${LIBRDKAFKA_LD_FLAGS} 68 | --prefix= 69 | ${LIBRDKAFKA_FLAGS} 70 | 71 | --enable-ssl 72 | --disable-zstd 73 | --disable-lz4 74 | --disable-lz4-ext 75 | --enable-static 76 | BUILD_COMMAND make -C src -j 77 | INSTALL_COMMAND make -C src install 78 | PATCH_COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-47.patch" 79 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-52.patch" 80 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-55.patch" 81 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-70.patch" 82 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-36.patch" 83 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-71.patch" 84 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-72.patch" 85 | COMMAND patch -d -p1 -i "${PATCHES_DIR}/librdkafka-tarantool-security-94.patch" 86 | ) 87 | 88 | add_library(librdkafka_static INTERFACE) 89 | add_dependencies(librdkafka_static librdkafka) 90 | ExternalProject_Get_Property(librdkafka INSTALL_DIR) 91 | target_include_directories(librdkafka_static SYSTEM INTERFACE ${INSTALL_DIR}/include) 92 | target_link_libraries(librdkafka_static INTERFACE ${INSTALL_DIR}/lib/librdkafka.a) 93 | 94 | set(RDKAFKA_LIBRARY ${RDKAFKA_LIBRARY} librdkafka_static) 95 | else() 96 | find_package(RdKafka REQUIRED) 97 | # Link RdKafka transitive dependencies manually 98 | set(RDKAFKA_LIBRARY ${RDKAFKA_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY} ${OPENSSL_SSL_LIBRARY}) 99 | endif() 100 | 101 | include_directories(${TARANTOOL_INCLUDE_DIRS}) 102 | 103 | add_subdirectory(kafka) 104 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | NETWORK="tnt-kafka-tests" 2 | 3 | docker-remove-network: 4 | docker network remove ${NETWORK} || true 5 | 6 | docker-create-network: docker-remove-network 7 | docker network create ${NETWORK} 8 | 9 | docker-remove-zoo: 10 | docker rm -f zookeeper || true 11 | 12 | docker-run-zoo: docker-remove-zoo 13 | docker run -d \ 14 | --net=${NETWORK} \ 15 | --name=zookeeper \ 16 | -p 2181:2181 \ 17 | -e ZOOKEEPER_CLIENT_PORT=2181 \ 18 | confluentinc/cp-zookeeper:5.0.0 19 | 20 | docker-remove-kafka: 21 | docker rm -f kafka || true 22 | 23 | docker-pull-kafka: 24 | docker pull wurstmeister/kafka 25 | 26 | docker-run-kafka: docker-remove-kafka 27 | docker run -d \ 28 | --net=${NETWORK} \ 29 | --name=kafka \ 30 | -p 9092:9092 \ 31 | -e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \ 32 | -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \ 33 | -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092 \ 34 | -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 \ 35 | wurstmeister/kafka 36 | 37 | docker-read-topic-data: 38 | docker run \ 39 | --net=${NETWORK} \ 40 | --rm \ 41 | confluentinc/cp-kafka:5.0.0 \ 42 | kafka-console-consumer --bootstrap-server kafka:9092 --topic test_partially_unsubscribe_1 --from-beginning 43 | 44 | APP_NAME = kafka-test 45 | APP_IMAGE = kafka-test-image 46 | 47 | docker-build-app: 48 | docker build -t ${APP_IMAGE} -f ./docker/Dockerfile . 49 | 50 | docker-remove-app: 51 | docker rm -f ${APP_NAME} || true 52 | 53 | docker-run-app: docker-build-app docker-remove-app 54 | docker run -d \ 55 | -p 3301:3301 \ 56 | --net ${NETWORK} \ 57 | --name ${APP_NAME} \ 58 | -e KAFKA_BROKERS=kafka:9092 \ 59 | ${APP_IMAGE} 60 | 61 | docker-run-interactive: docker-build-app docker-remove-app 62 | docker run -it \ 63 | -p 3301:3301 \ 64 | --net ${NETWORK} \ 65 | --name ${APP_NAME} \ 66 | -e KAFKA_BROKERS=kafka:9092 \ 67 | ${APP_IMAGE} 68 | 69 | docker-remove-all: \ 70 | docker-remove-app \ 71 | docker-remove-kafka \ 72 | docker-remove-zoo \ 73 | docker-remove-network 74 | 75 | docker-run-environment: \ 76 | docker-remove-all \ 77 | docker-create-network \ 78 | docker-run-zoo \ 79 | docker-run-kafka 80 | 81 | docker-run-all: \ 82 | docker-run-environment \ 83 | docker-create-network \ 84 | docker-build-app \ 85 | docker-run-app 86 | 87 | ####################################################################### 88 | # Tests 89 | 90 | tests-dep: 91 | cd ./tests && \ 92 | python3 -m venv venv && \ 93 | . venv/bin/activate && \ 94 | pip install -r requirements.txt && \ 95 | deactivate 96 | 97 | tests-run: 98 | cd ./tests && \ 99 | . venv/bin/activate && \ 100 | pytest -W ignore -vv && \ 101 | deactivate 102 | 103 | test-sleep: tests-dep docker-run-all 104 | sleep 10 105 | 106 | test-run-with-docker: test-sleep 107 | docker run \ 108 | --net=${NETWORK} \ 109 | --rm confluentinc/cp-kafka:5.0.0 \ 110 | kafka-topics --create --topic test_producer --partitions 1 --replication-factor 1 \ 111 | --if-not-exists --zookeeper zookeeper:2181 112 | 113 | docker run \ 114 | --net=${NETWORK} \ 115 | --rm confluentinc/cp-kafka:5.0.0 \ 116 | kafka-topics --create --topic test_consume --partitions 1 --replication-factor 1 \ 117 | --if-not-exists --zookeeper zookeeper:2181 118 | 119 | docker run \ 120 | --net=${NETWORK} \ 121 | --rm confluentinc/cp-kafka:5.0.0 \ 122 | kafka-topics --create --topic test_unsubscribe --partitions 1 --replication-factor 1 \ 123 | --if-not-exists --zookeeper zookeeper:2181 124 | 125 | docker run \ 126 | --net=${NETWORK} \ 127 | --rm confluentinc/cp-kafka:5.0.0 \ 128 | kafka-topics --create --topic test_unsub_partially_1 --partitions 1 --replication-factor 1 \ 129 | --if-not-exists --zookeeper zookeeper:2181 130 | 131 | docker run \ 132 | --net=${NETWORK} \ 133 | --rm confluentinc/cp-kafka:5.0.0 \ 134 | kafka-topics --create --topic test_unsub_partially_2 --partitions 1 --replication-factor 1 \ 135 | --if-not-exists --zookeeper zookeeper:2181 136 | 137 | docker run \ 138 | --net=${NETWORK} \ 139 | --rm confluentinc/cp-kafka:5.0.0 \ 140 | kafka-topics --create --topic test_multi_consume_1 --partitions 1 --replication-factor 1 \ 141 | --if-not-exists --zookeeper zookeeper:2181 142 | 143 | docker run \ 144 | --net=${NETWORK} \ 145 | --rm confluentinc/cp-kafka:5.0.0 \ 146 | kafka-topics --create --topic test_multi_consume_2 --partitions 1 --replication-factor 1 \ 147 | --if-not-exists --zookeeper zookeeper:2181 148 | 149 | docker run \ 150 | --net=${NETWORK} \ 151 | --rm confluentinc/cp-kafka:5.0.0 \ 152 | kafka-topics --create --topic test_consuming_from_last_committed_offset --partitions 1 --replication-factor 1 \ 153 | --if-not-exists --zookeeper zookeeper:2181 154 | 155 | sleep 5 156 | 157 | cd ./tests && \ 158 | python3 -m venv venv && \ 159 | . venv/bin/activate && \ 160 | pip install -r requirements.txt && \ 161 | deactivate 162 | 163 | cd ./tests && \ 164 | . venv/bin/activate && \ 165 | pytest -W ignore -vv && \ 166 | deactivate 167 | 168 | ####################################################################### 169 | # Benchmarks 170 | 171 | docker-create-benchmark-async-producer-topic: 172 | docker run \ 173 | --net=${NETWORK} \ 174 | --rm confluentinc/cp-kafka:5.0.0 \ 175 | kafka-topics --create --topic async_producer_benchmark --partitions 2 --replication-factor 1 \ 176 | --if-not-exists --zookeeper zookeeper:2181 177 | 178 | docker-run-benchmark-async-producer-interactive: docker-build-app docker-remove-app 179 | docker run -it \ 180 | -p 3301:3301 \ 181 | --net ${NETWORK} \ 182 | --name ${APP_NAME} \ 183 | --entrypoint "tarantool" \ 184 | -e KAFKA_BROKERS=kafka:9092 \ 185 | ${APP_IMAGE} \ 186 | /opt/tarantool/benchmarks/async_producer.lua 187 | 188 | docker-read-benchmark-async-producer-topic-data: 189 | docker run \ 190 | --net=${NETWORK} \ 191 | --rm \ 192 | confluentinc/cp-kafka:5.0.0 \ 193 | kafka-console-consumer --bootstrap-server kafka:9092 --topic async_producer_benchmark --from-beginning 194 | 195 | docker-create-benchmark-sync-producer-topic: 196 | docker run \ 197 | --net=${NETWORK} \ 198 | --rm confluentinc/cp-kafka:5.0.0 \ 199 | kafka-topics --create --topic sync_producer_benchmark --partitions 2 --replication-factor 1 \ 200 | --if-not-exists --zookeeper zookeeper:2181 201 | 202 | docker-run-benchmark-sync-producer-interactive: docker-build-app docker-remove-app 203 | docker run -it \ 204 | -p 3301:3301 \ 205 | --net ${NETWORK} \ 206 | --name ${APP_NAME} \ 207 | --entrypoint "tarantool" \ 208 | -e KAFKA_BROKERS=kafka:9092 \ 209 | ${APP_IMAGE} \ 210 | /opt/tarantool/benchmarks/sync_producer.lua 211 | 212 | docker-read-benchmark-sync-producer-topic-data: 213 | docker run \ 214 | --net=${NETWORK} \ 215 | --rm \ 216 | confluentinc/cp-kafka:5.0.0 \ 217 | kafka-console-consumer --bootstrap-server kafka:9092 --topic sync_producer_benchmark --from-beginning 218 | 219 | docker-create-benchmark-auto-offset-store-consumer-topic: 220 | docker run \ 221 | --net=${NETWORK} \ 222 | --rm confluentinc/cp-kafka:5.0.0 \ 223 | kafka-topics --create --topic auto_offset_store_consumer_benchmark --partitions 2 --replication-factor 1 \ 224 | --if-not-exists --zookeeper zookeeper:2181 225 | 226 | docker-run-benchmark-auto-offset-store-consumer-interactive: docker-build-app docker-remove-app 227 | docker run -it \ 228 | -p 3301:3301 \ 229 | --net ${NETWORK} \ 230 | --name ${APP_NAME} \ 231 | --entrypoint "tarantool" \ 232 | -e KAFKA_BROKERS=kafka:9092 \ 233 | ${APP_IMAGE} \ 234 | /opt/tarantool/benchmarks/auto_offset_store_consumer.lua 235 | 236 | docker-read-benchmark-auto-offset-store-consumer-topic-data: 237 | docker run \ 238 | --net=${NETWORK} \ 239 | --rm \ 240 | confluentinc/cp-kafka:5.0.0 \ 241 | kafka-console-consumer --bootstrap-server kafka:9092 --topic auto_offset_store_consumer_benchmark --from-beginning 242 | 243 | docker-create-benchmark-manual-commit-consumer-topic: 244 | docker run \ 245 | --net=${NETWORK} \ 246 | --rm confluentinc/cp-kafka:5.0.0 \ 247 | kafka-topics --create --topic manual_offset_store_consumer --partitions 2 --replication-factor 1 \ 248 | --if-not-exists --zookeeper zookeeper:2181 249 | 250 | docker-run-benchmark-manual-commit-consumer-interactive: docker-build-app docker-remove-app 251 | docker run -it \ 252 | -p 3301:3301 \ 253 | --net ${NETWORK} \ 254 | --name ${APP_NAME} \ 255 | --entrypoint "tarantool" \ 256 | -e KAFKA_BROKERS=kafka:9092 \ 257 | ${APP_IMAGE} \ 258 | /opt/tarantool/benchmarks/manual_offset_store_consumer.lua 259 | 260 | docker-read-benchmark-manual-commit-consumer-topic-data: 261 | docker run \ 262 | --net=${NETWORK} \ 263 | --rm \ 264 | confluentinc/cp-kafka:5.0.0 \ 265 | kafka-console-consumer --bootstrap-server kafka:9092 --topic manual_offset_store_consumer --from-beginning 266 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Tarantool kafka 2 | =============== 3 | Full featured high performance kafka library for Tarantool based on [librdkafka](https://github.com/confluentinc/librdkafka). 4 | 5 | Can produce more then 150k messages per second and consume more then 140k messages per second. 6 | 7 | ## Features 8 | * Kafka producer and consumer implementations. 9 | * Fiber friendly. 10 | * Mostly errorless functions and methods. Error handling in Tarantool ecosystem is quite a mess, 11 | some libraries throw lua native `error` while others throws `box.error` instead. `kafka` returns 12 | non-critical errors as strings which allows you to decide how to handle it. 13 | 14 | ## Requirements 15 | * Tarantool >= 1.10.2 16 | * Tarantool development headers 17 | * librdkafka >= 0.11.5 18 | * librdkafka development headers 19 | * openssl-libs 20 | * openssl development headers 21 | * make 22 | * cmake 23 | * gcc 24 | 25 | ## Installation 26 | ```bash 27 | tt rocks install kafka 28 | ``` 29 | 30 | ### Build module with statically linked librdkafka 31 | 32 | To install the kafka module with builtin `librdkafka` dependency, use the `STATIC_BUILD` option: 33 | 34 | ```bash 35 | tt rocks STATIC_BUILD=ON install kafka 36 | ``` 37 | 38 | Be aware that this approach doesn't include static openssl. 39 | Instead, it assumes tarantool has openssl symbols exported. 40 | That means, kafka static build is only usable with static tarantool build. 41 | 42 | For a successful static build, you need to compile kafka 43 | against the [same version of openssl](https://github.com/tarantool/tarantool/blob/800e5ed617f7cd352ec597ce16973c7e4cad76c8/static-build/CMakeLists.txt#L11) that tarantool does. 44 | 45 | ## Usage 46 | 47 | Consumer 48 | ```lua 49 | local os = require('os') 50 | local log = require('log') 51 | local tnt_kafka = require('kafka') 52 | 53 | local consumer, err = tnt_kafka.Consumer.create({ brokers = "localhost:9092" }) 54 | if err ~= nil then 55 | print(err) 56 | os.exit(1) 57 | end 58 | 59 | local err = consumer:subscribe({ "some_topic" }) 60 | if err ~= nil then 61 | print(err) 62 | os.exit(1) 63 | end 64 | 65 | local out, err = consumer:output() 66 | if err ~= nil then 67 | print(string.format("got fatal error '%s'", err)) 68 | os.exit(1) 69 | end 70 | 71 | while true do 72 | if out:is_closed() then 73 | os.exit(1) 74 | end 75 | 76 | local msg = out:get() 77 | if msg ~= nil then 78 | print(string.format( 79 | "got msg with topic='%s' partition='%s' offset='%s' key='%s' value='%s'", 80 | msg:topic(), msg:partition(), msg:offset(), msg:key(), msg:value() 81 | )) 82 | end 83 | end 84 | 85 | -- from another fiber on app shutdown 86 | consumer:close() 87 | ``` 88 | 89 | Producer 90 | ```lua 91 | local os = require('os') 92 | local log = require('log') 93 | local tnt_kafka = require('kafka') 94 | 95 | local producer, err = tnt_kafka.Producer.create({ brokers = "kafka:9092" }) 96 | if err ~= nil then 97 | print(err) 98 | os.exit(1) 99 | end 100 | 101 | for i = 1, 1000 do 102 | local message = "test_value " .. tostring(i) 103 | local err = producer:produce({ 104 | topic = "test_topic", 105 | key = "test_key", 106 | value = message 107 | }) 108 | if err ~= nil then 109 | print(string.format("got error '%s' while sending value '%s'", err, message)) 110 | else 111 | print(string.format("successfully sent value '%s'", message)) 112 | end 113 | end 114 | 115 | producer:close() 116 | ``` 117 | 118 | You can pass additional configuration parameters for librdkafka 119 | https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md in special table `options` on client creation: 120 | ```lua 121 | tnt_kafka.Producer.create({ 122 | options = { 123 | ["some.key"] = "some_value", 124 | }, 125 | }) 126 | 127 | tnt_kafka.Consumer.create({ 128 | options = { 129 | ["some.key"] = "some_value", 130 | }, 131 | }) 132 | ``` 133 | 134 | More examples in `examples` folder. 135 | 136 | ## Using SSL 137 | 138 | Connection to brokers using SSL supported by librdkafka itself so you only need to properly configure brokers by 139 | using this guide https://github.com/confluentinc/librdkafka/wiki/Using-SSL-with-librdkafka 140 | 141 | After that you only need to pass following configuration parameters on client creation: 142 | ```lua 143 | tnt_kafka.Producer.create({ 144 | brokers = "broker_list", 145 | options = { 146 | ["security.protocol"] = "ssl", 147 | -- CA certificate file for verifying the broker's certificate. 148 | ["ssl.ca.location"] = "ca-cert", 149 | -- Client's certificate 150 | ["ssl.certificate.location"] = "client_?????_client.pem", 151 | -- Client's key 152 | ["ssl.key.location"] = "client_?????_client.key", 153 | -- Key password, if any 154 | ["ssl.key.password"] = "abcdefgh", 155 | }, 156 | }) 157 | 158 | tnt_kafka.Consumer.create({ 159 | brokers = "broker_list", 160 | options = { 161 | ["security.protocol"] = "ssl", 162 | -- CA certificate file for verifying the broker's certificate. 163 | ["ssl.ca.location"] = "ca-cert", 164 | -- Client's certificate 165 | ["ssl.certificate.location"] = "client_?????_client.pem", 166 | -- Client's key 167 | ["ssl.key.location"] = "client_?????_client.key", 168 | -- Key password, if any 169 | ["ssl.key.password"] = "abcdefgh", 170 | }, 171 | }) 172 | ``` 173 | 174 | ## Known issues 175 | 176 | ## TODO 177 | * Ordered storage for offsets to prevent commits unprocessed messages 178 | * More examples 179 | * Better documentation 180 | 181 | ## Benchmarks 182 | 183 | Before any commands init and updated git submodule 184 | ```bash 185 | git submodule init 186 | git submodule update 187 | ``` 188 | 189 | ### Producer 190 | 191 | #### Async 192 | 193 | Result: over 160000 produced messages per second on macbook pro 2016 194 | 195 | Local run in docker: 196 | ```bash 197 | make docker-run-environment 198 | make docker-create-benchmark-async-producer-topic 199 | make docker-run-benchmark-async-producer-interactive 200 | ``` 201 | 202 | #### Sync 203 | 204 | Result: over 90000 produced messages per second on macbook pro 2016 205 | 206 | Local run in docker: 207 | ```bash 208 | make docker-run-environment 209 | make docker-create-benchmark-sync-producer-topic 210 | make docker-run-benchmark-sync-producer-interactive 211 | ``` 212 | 213 | ### Consumer 214 | 215 | #### Auto offset store enabled 216 | 217 | Result: over 190000 consumed messages per second on macbook pro 2016 218 | 219 | Local run in docker: 220 | ```bash 221 | make docker-run-environment 222 | make docker-create-benchmark-auto-offset-store-consumer-topic 223 | make docker-run-benchmark-auto-offset-store-consumer-interactive 224 | ``` 225 | 226 | #### Manual offset store 227 | 228 | Result: over 190000 consumed messages per second on macbook pro 2016 229 | 230 | Local run in docker: 231 | ```bash 232 | make docker-run-environment 233 | make docker-create-benchmark-manual-commit-consumer-topic 234 | make docker-run-benchmark-manual-commit-consumer-interactive 235 | ``` 236 | 237 | ## Developing 238 | 239 | ### Tests 240 | Before run any test you should add to `/etc/hosts` entry 241 | ``` 242 | 127.0.0.1 kafka 243 | ``` 244 | 245 | You can run docker based integration tests via makefile target 246 | ```bash 247 | make test-run-with-docker 248 | ``` 249 | -------------------------------------------------------------------------------- /benchmarks/async_producer.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local box = require('box') 3 | local os = require('os') 4 | local log = require('log') 5 | local clock = require('clock') 6 | local tnt_kafka = require('kafka') 7 | 8 | box.cfg{} 9 | 10 | box.once('init', function() 11 | box.schema.user.grant("guest", 'read,write,execute,create,drop', 'universe') 12 | end) 13 | 14 | local function produce() 15 | local producer, err = tnt_kafka.Producer.create({brokers = "kafka:9092", options = {}}) 16 | if err ~= nil then 17 | print(err) 18 | os.exit(1) 19 | end 20 | 21 | local before = clock.monotonic64() 22 | for i = 1, 10000000 do 23 | while true do 24 | local err = producer:produce_async({ -- don't wait until message will be delivired to kafka 25 | topic = "async_producer_benchmark", 26 | value = "test_value_" .. tostring(i) -- only strings allowed 27 | }) 28 | if err ~= nil then 29 | -- print(err) 30 | fiber.sleep(0.1) 31 | else 32 | break 33 | end 34 | end 35 | if i % 1000 == 0 then 36 | -- log.info("done %d", i) 37 | fiber.yield() 38 | end 39 | end 40 | 41 | log.info("stopping") 42 | local ok, err = producer:close() -- always stop consumer to send all pending messages before app close 43 | if err ~= nil then 44 | print(err) 45 | os.exit(1) 46 | end 47 | 48 | local duration = clock.monotonic64() - before 49 | print(string.format("done benchmark for %f seconds", tonumber(duration * 1.0 / (10 ^ 9)))) 50 | end 51 | 52 | log.info("starting benchmark") 53 | 54 | produce() 55 | -------------------------------------------------------------------------------- /benchmarks/auto_offset_store_consumer.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local log = require('log') 3 | local box = require('box') 4 | local os = require('os') 5 | local clock = require('clock') 6 | local tnt_kafka = require('kafka') 7 | 8 | box.cfg{ 9 | memtx_memory = 524288000, -- 500 MB 10 | } 11 | 12 | local TOPIC = "auto_offset_store_consumer_benchmark" 13 | local MSG_COUNT = 10000000 14 | 15 | box.once('init', function() 16 | box.schema.user.grant("guest", 'read,write,execute,create,drop', 'universe') 17 | end) 18 | 19 | local function produce_initial_data() 20 | local producer, err = tnt_kafka.Producer.create({ brokers = "kafka:9092"}) 21 | if err ~= nil then 22 | print(err) 23 | os.exit(1) 24 | end 25 | 26 | for i = 1, MSG_COUNT do 27 | while true do 28 | local err = producer:produce_async({ -- don't wait until message will be delivired to kafka 29 | topic = TOPIC, 30 | value = "test_value_" .. tostring(i) -- only strings allowed 31 | }) 32 | if err ~= nil then 33 | -- print(err) 34 | fiber.sleep(0.1) 35 | else 36 | break 37 | end 38 | end 39 | if i % 1000 == 0 then 40 | fiber.yield() 41 | end 42 | end 43 | 44 | local ok, err = producer:close() -- always stop consumer to send all pending messages before app close 45 | if err ~= nil then 46 | print(err) 47 | os.exit(1) 48 | end 49 | end 50 | 51 | local function consume() 52 | local consumer, err = tnt_kafka.Consumer.create({ brokers = "kafka:9092", options = { 53 | ["enable.auto.offset.store"] = "true", 54 | ["group.id"] = "test_consumer1", 55 | ["auto.offset.reset"] = "earliest", 56 | ["enable.partition.eof"] = "false", 57 | ["queued.min.messages"] = "100000" 58 | }}) 59 | if err ~= nil then 60 | print(err) 61 | os.exit(1) 62 | end 63 | 64 | local err = consumer:subscribe({TOPIC}) 65 | if err ~= nil then 66 | print(err) 67 | os.exit(1) 68 | end 69 | 70 | local before = clock.monotonic64() 71 | local counter = 0 72 | local out, err = consumer:output() 73 | if err ~= nil then 74 | print(string.format("got fatal error '%s'", err)) 75 | return 76 | end 77 | 78 | while counter < MSG_COUNT do 79 | if out:is_closed() then 80 | return 81 | end 82 | 83 | local msg = out:get() 84 | if msg ~= nil then 85 | counter = counter + 1 86 | -- print(msg:value()) 87 | end 88 | if counter % 10000 == 0 then 89 | log.info("done %d", counter) 90 | fiber.yield() 91 | end 92 | end 93 | 94 | print("closing") 95 | local ok, err = consumer:close() 96 | if err ~= nil then 97 | print(err) 98 | os.exit(1) 99 | end 100 | 101 | local duration = clock.monotonic64() - before 102 | print(string.format("done benchmark for %f seconds", tonumber(duration * 1.0 / (10 ^ 9)))) 103 | end 104 | 105 | print("producing initial data") 106 | produce_initial_data() 107 | 108 | print("starting benchmark") 109 | consume() 110 | -------------------------------------------------------------------------------- /benchmarks/manual_offset_store_consumer.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local box = require('box') 3 | local os = require('os') 4 | local log = require('log') 5 | local clock = require('clock') 6 | local tnt_kafka = require('kafka') 7 | 8 | box.cfg{ 9 | memtx_memory = 524288000, 10 | } 11 | 12 | local TOPIC = "manual_offset_store_consumer" 13 | local MSG_COUNT = 10000000 14 | 15 | box.once('init', function() 16 | box.schema.user.grant("guest", 'read,write,execute,create,drop', 'universe') 17 | end) 18 | 19 | local function produce_initial_data() 20 | local producer, err = tnt_kafka.Producer.create({ brokers = "kafka:9092"}) 21 | if err ~= nil then 22 | print(err) 23 | os.exit(1) 24 | end 25 | 26 | for i = 1, MSG_COUNT do 27 | while true do 28 | local err = producer:produce_async({ -- don't wait until message will be delivired to kafka 29 | topic = TOPIC, 30 | value = "test_value_" .. tostring(i) -- only strings allowed 31 | }) 32 | if err ~= nil then 33 | -- print(err) 34 | fiber.sleep(0.1) 35 | else 36 | break 37 | end 38 | end 39 | if i % 1000 == 0 then 40 | fiber.yield() 41 | end 42 | end 43 | 44 | local ok, err = producer:close() -- always stop consumer to send all pending messages before app close 45 | if err ~= nil then 46 | print(err) 47 | os.exit(1) 48 | end 49 | end 50 | 51 | local function consume() 52 | local consumer, err = tnt_kafka.Consumer.create({ brokers = "kafka:9092", options = { 53 | ["enable.auto.offset.store"] = "false", 54 | ["group.id"] = "test_consumer1", 55 | ["auto.offset.reset"] = "earliest", 56 | ["enable.partition.eof"] = "false", 57 | ["queued.min.messages"] = "100000" 58 | }}) 59 | if err ~= nil then 60 | print(err) 61 | os.exit(1) 62 | end 63 | 64 | local err = consumer:subscribe({TOPIC}) 65 | if err ~= nil then 66 | print(err) 67 | os.exit(1) 68 | end 69 | 70 | local before = clock.monotonic64() 71 | local counter = 0 72 | local out, err = consumer:output() 73 | if err ~= nil then 74 | print(string.format("got fatal error '%s'", err)) 75 | return 76 | end 77 | 78 | while counter < MSG_COUNT do 79 | if out:is_closed() then 80 | return 81 | end 82 | 83 | local msg = out:get() 84 | if msg ~= nil then 85 | counter = counter + 1 86 | err = consumer:store_offset(msg) 87 | if err ~= nil then 88 | print(err) 89 | end 90 | end 91 | if counter % 10000 == 0 then 92 | log.info("done %d", counter) 93 | fiber.yield() 94 | end 95 | end 96 | 97 | print("closing") 98 | local ok, err = consumer:close() 99 | if err ~= nil then 100 | print(err) 101 | os.exit(1) 102 | end 103 | 104 | local duration = clock.monotonic64() - before 105 | print(string.format("done benchmark for %f seconds", tonumber(duration * 1.0 / (10 ^ 9)))) 106 | end 107 | 108 | log.info("producing initial data") 109 | produce_initial_data() 110 | 111 | log.info("starting benchmark") 112 | consume() 113 | -------------------------------------------------------------------------------- /benchmarks/sync_producer.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local box = require('box') 3 | local log = require('log') 4 | local os = require('os') 5 | local clock = require('clock') 6 | local tnt_kafka = require('kafka') 7 | 8 | box.cfg{ 9 | memtx_memory = 524288000, -- 500 MB 10 | } 11 | 12 | box.once('init', function() 13 | box.schema.user.grant("guest", 'read,write,execute,create,drop', 'universe') 14 | end) 15 | 16 | local function produce() 17 | local producer, err = tnt_kafka.Producer.create({ 18 | brokers = "kafka:9092", 19 | options = { 20 | ["queue.buffering.max.ms"] = "50", 21 | } 22 | }) 23 | if err ~= nil then 24 | print(err) 25 | os.exit(1) 26 | end 27 | 28 | local before = clock.monotonic64() 29 | local input_ch = fiber.channel(); 30 | for i = 1, 10000 do 31 | fiber.create(function() 32 | while true do 33 | if input_ch:is_closed() then 34 | break 35 | end 36 | local value = input_ch:get() 37 | if value ~= nil then 38 | while true do 39 | local err = producer:produce({ 40 | topic = "sync_producer_benchmark", 41 | value = value -- only strings allowed 42 | }) 43 | if err ~= nil then 44 | -- print(err) 45 | fiber.sleep(0.1) 46 | else 47 | -- if value % 10000 == 0 then 48 | -- log.info("done %d", value) 49 | -- end 50 | break 51 | end 52 | end 53 | end 54 | end 55 | end) 56 | end 57 | 58 | for i = 1, 10000000 do 59 | input_ch:put(i) 60 | if i % 10000 == 0 then 61 | fiber.yield() 62 | end 63 | end 64 | 65 | input_ch:close() 66 | 67 | log.info("stopping") 68 | local ok, err = producer:close() -- always stop consumer to send all pending messages before app close 69 | if err ~= nil then 70 | print(err) 71 | os.exit(1) 72 | end 73 | 74 | local duration = clock.monotonic64() - before 75 | print(string.format("done benchmark for %f seconds", tonumber(duration * 1.0 / (10 ^ 9)))) 76 | end 77 | 78 | log.info("starting benchmark") 79 | 80 | produce() 81 | -------------------------------------------------------------------------------- /cmake/FindRdKafka.cmake: -------------------------------------------------------------------------------- 1 | find_path(RDKAFKA_ROOT_DIR 2 | NAMES include/librdkafka/rdkafka.h 3 | ) 4 | 5 | find_path(RDKAFKA_INCLUDE_DIR 6 | NAMES librdkafka/rdkafka.h 7 | HINTS ${RDKAFKA_ROOT_DIR}/include 8 | ) 9 | 10 | find_library(RDKAFKA_LIBRARY 11 | NAMES ${CMAKE_SHARED_LIBRARY_PREFIX}rdkafka${CMAKE_SHARED_LIBRARY_SUFFIX} rdkafka 12 | HINTS ${RDKAFKA_ROOT_DIR}/lib 13 | ) 14 | 15 | find_library(RDKAFKA_STATIC 16 | NAMES ${CMAKE_STATIC_LIBRARY_PREFIX}rdkafka${CMAKE_STATIC_LIBRARY_SUFFIX} rdkafka 17 | HINTS ${RDKAFKA_ROOT_DIR}/lib 18 | ) 19 | 20 | include(FindPackageHandleStandardArgs) 21 | find_package_handle_standard_args(RDKAFKA DEFAULT_MSG 22 | RDKAFKA_LIBRARY 23 | RDKAFKA_INCLUDE_DIR 24 | ) 25 | 26 | mark_as_advanced( 27 | RDKAFKA_ROOT_DIR 28 | RDKAFKA_INCLUDE_DIR 29 | RDKAFKA_LIBRARY 30 | ) 31 | -------------------------------------------------------------------------------- /cmake/FindTarantool.cmake: -------------------------------------------------------------------------------- 1 | # Define GNU standard installation directories 2 | include(GNUInstallDirs) 3 | 4 | macro(extract_definition name output input) 5 | string(REGEX MATCH "#define[\t ]+${name}[\t ]+\"([^\"]*)\"" 6 | _t "${input}") 7 | string(REGEX REPLACE "#define[\t ]+${name}[\t ]+\"(.*)\"" "\\1" 8 | ${output} "${_t}") 9 | endmacro() 10 | 11 | find_path(TARANTOOL_INCLUDE_DIR tarantool/module.h 12 | HINTS ${TARANTOOL_DIR} ENV TARANTOOL_DIR 13 | PATH_SUFFIXES include 14 | ) 15 | 16 | if(TARANTOOL_INCLUDE_DIR) 17 | set(_config "-") 18 | file(READ "${TARANTOOL_INCLUDE_DIR}/tarantool/module.h" _config0) 19 | string(REPLACE "\\" "\\\\" _config ${_config0}) 20 | unset(_config0) 21 | extract_definition(PACKAGE_VERSION TARANTOOL_VERSION ${_config}) 22 | extract_definition(INSTALL_PREFIX _install_prefix ${_config}) 23 | unset(_config) 24 | endif() 25 | 26 | include(FindPackageHandleStandardArgs) 27 | find_package_handle_standard_args(Tarantool 28 | REQUIRED_VARS TARANTOOL_INCLUDE_DIR VERSION_VAR TARANTOOL_VERSION) 29 | if(TARANTOOL_FOUND) 30 | set(TARANTOOL_INCLUDE_DIRS "${TARANTOOL_INCLUDE_DIR}" 31 | "${TARANTOOL_INCLUDE_DIR}/tarantool/" 32 | CACHE PATH "Include directories for Tarantool") 33 | set(TARANTOOL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/tarantool" 34 | CACHE PATH "Directory for storing Lua modules written in Lua") 35 | set(TARANTOOL_INSTALL_LUADIR "${CMAKE_INSTALL_DATADIR}/tarantool" 36 | CACHE PATH "Directory for storing Lua modules written in C") 37 | 38 | if (NOT TARANTOOL_FIND_QUIETLY AND NOT FIND_TARANTOOL_DETAILS) 39 | set(FIND_TARANTOOL_DETAILS ON CACHE INTERNAL "Details about TARANTOOL") 40 | message(STATUS "Tarantool LUADIR is ${TARANTOOL_INSTALL_LUADIR}") 41 | message(STATUS "Tarantool LIBDIR is ${TARANTOOL_INSTALL_LIBDIR}") 42 | endif () 43 | endif() 44 | mark_as_advanced(TARANTOOL_INCLUDE_DIRS TARANTOOL_INSTALL_LIBDIR 45 | TARANTOOL_INSTALL_LUADIR) 46 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tarantool/tarantool:1.x-centos7 2 | 3 | RUN yum update -y 4 | 5 | RUN yum install -y cmake \ 6 | gcc \ 7 | gcc-c++ \ 8 | cyrus-sasl-lib \ 9 | openssl-libs \ 10 | openssl-devel \ 11 | tarantool-devel 12 | 13 | ADD . /opt/tarantool 14 | 15 | WORKDIR /opt/tarantool 16 | 17 | ENV STATIC_BUILD ON 18 | ENV WITH_OPENSSL_1_1 OFF 19 | 20 | RUN cmake . 21 | 22 | RUN make 23 | 24 | ENTRYPOINT tarantool /opt/tarantool/tests/app.lua 25 | -------------------------------------------------------------------------------- /examples/consumer/auto_offset_store.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local os = require('os') 3 | local log = require('log') 4 | local tnt_kafka = require('kafka') 5 | 6 | local error_callback = function(err) 7 | log.error("got error: %s", err) 8 | end 9 | local log_callback = function(fac, str, level) 10 | log.info("got log: %d - %s - %s", level, fac, str) 11 | end 12 | local rebalance_callback = function(msg) 13 | log.info("got rebalance msg: %s", json.encode(msg)) 14 | end 15 | 16 | local consumer, err = tnt_kafka.Consumer.create({ 17 | brokers = "localhost:9092", -- brokers for bootstrap 18 | options = { 19 | ["enable.auto.offset.store"] = "true", 20 | ["group.id"] = "example_consumer", 21 | ["auto.offset.reset"] = "earliest", 22 | ["enable.partition.eof"] = "false" 23 | }, -- options for librdkafka 24 | error_callback = error_callback, -- optional callback for errors 25 | log_callback = log_callback, -- optional callback for logs and debug messages 26 | rebalance_callback = rebalance_callback, -- optional callback for rebalance messages 27 | default_topic_options = { 28 | ["auto.offset.reset"] = "earliest", 29 | }, -- optional default topic options 30 | }) 31 | if err ~= nil then 32 | print(err) 33 | os.exit(1) 34 | end 35 | 36 | local err = consumer:subscribe({"test_topic"}) -- array of topics to subscribe 37 | if err ~= nil then 38 | print(err) 39 | os.exit(1) 40 | end 41 | 42 | fiber.create(function() 43 | local out, err = consumer:output() 44 | if err ~= nil then 45 | print(string.format("got fatal error '%s'", err)) 46 | return 47 | end 48 | 49 | while true do 50 | if out:is_closed() then 51 | return 52 | end 53 | 54 | local msg = out:get() 55 | if msg ~= nil then 56 | print(string.format( 57 | "got msg with topic='%s' partition='%s' offset='%s' key='%s' value='%s'", 58 | msg:topic(), msg:partition(), msg:offset(), msg:key(), msg:value() 59 | )) 60 | end 61 | end 62 | end) 63 | 64 | fiber.sleep(10) 65 | 66 | local err = consumer:unsubscribe({"test_topic"}) -- array of topics to unsubscribe 67 | if err ~= nil then 68 | print(err) 69 | os.exit(1) 70 | end 71 | 72 | consumer:close() -- always stop consumer to commit all pending offsets before app close and free all used resources 73 | -------------------------------------------------------------------------------- /examples/consumer/manual_offset_store.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local os = require('os') 3 | local log = require('log') 4 | local tnt_kafka = require('kafka') 5 | 6 | local error_callback = function(err) 7 | log.error("got error: %s", err) 8 | end 9 | local log_callback = function(fac, str, level) 10 | log.info("got log: %d - %s - %s", level, fac, str) 11 | end 12 | local rebalance_callback = function(msg) 13 | log.info("got rebalance msg: %s", json.encode(msg)) 14 | end 15 | 16 | local consumer, err = tnt_kafka.Consumer.create({ 17 | brokers = "localhost:9092", -- brokers for bootstrap 18 | options = { 19 | ["enable.auto.offset.store"] = "false", 20 | ["group.id"] = "example_consumer", 21 | ["auto.offset.reset"] = "earliest", 22 | ["enable.partition.eof"] = "false" 23 | }, -- options for librdkafka 24 | error_callback = error_callback, -- optional callback for errors 25 | log_callback = log_callback, -- optional callback for logs and debug messages 26 | rebalance_callback = rebalance_callback, -- optional callback for rebalance messages 27 | default_topic_options = { 28 | ["auto.offset.reset"] = "earliest", 29 | }, -- optional default topic options 30 | }) 31 | if err ~= nil then 32 | print(err) 33 | os.exit(1) 34 | end 35 | 36 | local err = consumer:subscribe({"test_topic"}) -- array of topics to subscribe 37 | if err ~= nil then 38 | print(err) 39 | os.exit(1) 40 | end 41 | 42 | for i = 1, 10 do 43 | fiber.create(function() 44 | local out, err = consumer:output() 45 | if err ~= nil then 46 | print(string.format("got fatal error '%s'", err)) 47 | return 48 | end 49 | while true do 50 | if out:is_closed() then 51 | return 52 | end 53 | 54 | local msg = out:get() 55 | if msg ~= nil then 56 | print(string.format( 57 | "got msg with topic='%s' partition='%s' offset='%s' key='%s' value='%s'", 58 | msg:topic(), msg:partition(), msg:offset(), msg:key(), msg:value() 59 | )) 60 | 61 | local err = consumer:store_offset(msg) -- don't forget to commit processed messages 62 | if err ~= nil then 63 | print(string.format( 64 | "got error '%s' while commiting msg from topic '%s'", 65 | err, msg:topic() 66 | )) 67 | end 68 | end 69 | end 70 | end) 71 | end 72 | 73 | fiber.sleep(10) 74 | 75 | local err = consumer:unsubscribe({"test_topic"}) -- array of topics to unsubscribe 76 | if err ~= nil then 77 | print(err) 78 | os.exit(1) 79 | end 80 | 81 | consumer:close() -- always stop consumer to commit all pending offsets before app close and free all used resources 82 | -------------------------------------------------------------------------------- /examples/producer/async_producer.lua: -------------------------------------------------------------------------------- 1 | local os = require('os') 2 | local log = require('log') 3 | local tnt_kafka = require('kafka') 4 | 5 | local error_callback = function(err) 6 | log.error("got error: %s", err) 7 | end 8 | local log_callback = function(fac, str, level) 9 | log.info("got log: %d - %s - %s", level, fac, str) 10 | end 11 | 12 | local producer, err = tnt_kafka.Producer.create({ 13 | brokers = "kafka:9092", -- brokers for bootstrap 14 | options = {}, -- options for librdkafka 15 | error_callback = error_callback, -- optional callback for errors 16 | log_callback = log_callback, -- optional callback for logs and debug messages 17 | default_topic_options = { 18 | ["partitioner"] = "murmur2_random", 19 | }, -- optional default topic options 20 | }) 21 | if err ~= nil then 22 | print(err) 23 | os.exit(1) 24 | end 25 | 26 | for i = 1, 1000 do 27 | local err = producer:produce_async({ -- don't wait until message will be delivired to kafka 28 | topic = "test_topic", 29 | key = "test_key", 30 | value = "test_value" -- only strings allowed 31 | }) 32 | if err ~= nil then 33 | print(err) 34 | os.exit(1) 35 | end 36 | end 37 | 38 | producer:close() -- always stop consumer to send all pending messages before app close and free all used resources 39 | -------------------------------------------------------------------------------- /examples/producer/sync_producer.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local os = require('os') 3 | local log = require('log') 4 | local tnt_kafka = require('kafka') 5 | 6 | local error_callback = function(err) 7 | log.error("got error: %s", err) 8 | end 9 | local log_callback = function(fac, str, level) 10 | log.info("got log: %d - %s - %s", level, fac, str) 11 | end 12 | 13 | local producer, err = tnt_kafka.Producer.create({ 14 | brokers = "kafka:9092", -- brokers for bootstrap 15 | options = {}, -- options for librdkafka 16 | error_callback = error_callback, -- optional callback for errors 17 | log_callback = log_callback, -- optional callback for logs and debug messages 18 | default_topic_options = { 19 | ["partitioner"] = "murmur2_random", 20 | }, -- optional default topic options 21 | }) 22 | if err ~= nil then 23 | print(err) 24 | os.exit(1) 25 | end 26 | 27 | for i = 1, 1000 do 28 | fiber.create(function() 29 | local message = "test_value " .. tostring(i) 30 | local err = producer:produce({ -- wait until message will be delivired to kafka (using channel under the hood) 31 | topic = "test_topic", 32 | key = "test_key", 33 | value = message -- only strings allowed 34 | }) 35 | if err ~= nil then 36 | print(string.format("got error '%s' while sending value '%s'", err, message)) 37 | else 38 | print(string.format("successfully sent value '%s'", message)) 39 | end 40 | end) 41 | end 42 | 43 | fiber.sleep(10) 44 | 45 | producer:close() -- always stop consumer to send all pending messages before app close and free all used resources 46 | -------------------------------------------------------------------------------- /examples/static-build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.gitlab.com/runfor/envs/centos:7-build as static-kafka 2 | 3 | RUN set -x \ 4 | && git clone --recurse-submodules https://github.com/tarantool/kafka /opt/kafka \ 5 | && wget -P /etc/yum.repos.d/ https://copr.fedorainfracloud.org/coprs/bgstack15/stackrpms/repo/epel-7/bgstack15-stackrpms-epel-7.repo \ 6 | && yum install -y tarantool tarantool-devel openssl110 7 | 8 | WORKDIR /opt/kafka 9 | 10 | RUN tarantoolctl rocks STATIC_BUILD=ON make \ 11 | && tarantoolctl rocks pack kafka 12 | 13 | FROM scratch as export 14 | COPY --from=static-kafka /opt/kafka/kafka-scm-1.linux-x86_64.rock / 15 | -------------------------------------------------------------------------------- /examples/static-build/README.md: -------------------------------------------------------------------------------- 1 | Static kafka build 2 | --- 3 | 4 | ```bash 5 | $ docker buildx build --target export --output rocks . 6 | $ ls rocks 7 | kafka-scm-1.linux-x86_64.rock 8 | ``` 9 | -------------------------------------------------------------------------------- /kafka-1.1.0-0.rockspec: -------------------------------------------------------------------------------- 1 | package = "kafka" 2 | version = "1.1.0-0" 3 | source = { 4 | url = "git+https://github.com/tarantool/kafka.git", 5 | branch = 'master', 6 | } 7 | description = { 8 | summary = "Kafka library for Tarantool", 9 | homepage = "https://github.com/tarantool/kafka", 10 | license = "Apache", 11 | } 12 | dependencies = { 13 | "lua >= 5.1" -- actually tarantool > 1.6 14 | } 15 | external_dependencies = { 16 | TARANTOOL = { 17 | header = 'tarantool/module.h' 18 | } 19 | } 20 | build = { 21 | type = 'cmake'; 22 | variables = { 23 | CMAKE_BUILD_TYPE="RelWithDebInfo", 24 | TARANTOOL_DIR="$(TARANTOOL_DIR)", 25 | TARANTOOL_INSTALL_LIBDIR="$(LIBDIR)", 26 | TARANTOOL_INSTALL_LUADIR="$(LUADIR)", 27 | STATIC_BUILD="$(STATIC_BUILD)", 28 | WITH_OPENSSL_1_1="$(WITH_OPENSSL_1_1)" 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /kafka-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "kafka" 2 | version = "scm-1" 3 | source = { 4 | url = "git+https://github.com/tarantool/kafka.git", 5 | branch = 'master', 6 | } 7 | description = { 8 | summary = "Kafka library for Tarantool", 9 | homepage = "https://github.com/tarantool/kafka", 10 | license = "Apache", 11 | } 12 | dependencies = { 13 | "lua >= 5.1" -- actually tarantool > 1.6 14 | } 15 | external_dependencies = { 16 | TARANTOOL = { 17 | header = 'tarantool/module.h' 18 | } 19 | } 20 | build = { 21 | type = 'cmake'; 22 | variables = { 23 | CMAKE_BUILD_TYPE="RelWithDebInfo", 24 | TARANTOOL_DIR="$(TARANTOOL_DIR)", 25 | TARANTOOL_INSTALL_LIBDIR="$(LIBDIR)", 26 | TARANTOOL_INSTALL_LUADIR="$(LUADIR)", 27 | STATIC_BUILD="$(STATIC_BUILD)", 28 | ENABLE_ASAN="$(ENABLE_ASAN)", 29 | ENABLE_UBSAN="$(ENABLE_UBSAN)", 30 | WITH_OPENSSL_1_1="$(WITH_OPENSSL_1_1)", 31 | WITH_GSSAPI="$(WITH_GSSAPI)", 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /kafka/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include_directories(${RDKAFKA_INCLUDE_DIR}) 2 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}) 3 | 4 | add_library(tntkafka SHARED tnt_kafka.c callbacks.c consumer.c consumer_msg.c producer.c queue.c common.c) 5 | 6 | if (SANITIZER_FLAGS) 7 | separate_arguments(SANITIZER_FLAGS UNIX_COMMAND "${SANITIZER_FLAGS}") 8 | target_compile_options(tntkafka PRIVATE ${SANITIZER_FLAGS}) 9 | target_link_options(tntkafka PRIVATE ${SANITIZER_FLAGS}) 10 | endif() 11 | 12 | if (APPLE) 13 | set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} \ 14 | -undefined suppress -flat_namespace") 15 | endif(APPLE) 16 | 17 | find_package(Threads REQUIRED) 18 | target_link_libraries(tntkafka ${CMAKE_THREAD_LIBS_INIT}) 19 | set_target_properties(tntkafka PROPERTIES C_VISIBILITY_PRESET hidden) 20 | 21 | target_link_libraries(tntkafka ${RDKAFKA_LIBRARY}) 22 | set_target_properties(tntkafka PROPERTIES PREFIX "" OUTPUT_NAME "tntkafka") 23 | 24 | install(TARGETS tntkafka LIBRARY DESTINATION ${TARANTOOL_INSTALL_LIBDIR}/kafka) 25 | install(FILES init.lua version.lua DESTINATION ${TARANTOOL_INSTALL_LUADIR}/kafka) 26 | -------------------------------------------------------------------------------- /kafka/callbacks.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | //////////////////////////////////////////////////////////////////////////////////////////////////// 18 | /** 19 | * Common callbacks handling 20 | */ 21 | 22 | /** 23 | * Handle logs from RDKafka 24 | */ 25 | 26 | log_msg_t * 27 | new_log_msg(int level, const char *fac, const char *buf) { 28 | log_msg_t *msg = xmalloc(sizeof(log_msg_t)); 29 | msg->level = level; 30 | msg->fac = xmalloc(sizeof(char) * strlen(fac) + 1); 31 | strcpy(msg->fac, fac); 32 | msg->buf = xmalloc(sizeof(char) * strlen(buf) + 1); 33 | strcpy(msg->buf, buf); 34 | return msg; 35 | } 36 | 37 | void 38 | destroy_log_msg(log_msg_t *msg) { 39 | if (msg->fac != NULL) 40 | free(msg->fac); 41 | if (msg->buf != NULL) 42 | free(msg->buf); 43 | free(msg); 44 | } 45 | 46 | void 47 | log_callback(const rd_kafka_t *rd_kafka, int level, const char *fac, const char *buf) { 48 | event_queues_t *event_queues = rd_kafka_opaque(rd_kafka); 49 | if (event_queues != NULL && event_queues->queues[LOG_QUEUE] != NULL) { 50 | log_msg_t *msg = new_log_msg(level, fac, buf); 51 | if (msg != NULL && queue_push(event_queues->queues[LOG_QUEUE], msg) != 0) { 52 | destroy_log_msg(msg); 53 | } 54 | } 55 | } 56 | 57 | int 58 | stats_callback(rd_kafka_t *rd_kafka, char *json, size_t json_len, void *opaque) { 59 | (void)opaque; 60 | (void)json_len; 61 | event_queues_t *event_queues = rd_kafka_opaque(rd_kafka); 62 | if (event_queues != NULL && event_queues->queues[STATS_QUEUE] != NULL) { 63 | if (json != NULL) { 64 | if (queue_push(event_queues->queues[STATS_QUEUE], json) != 0) 65 | return 0; // destroy json after return 66 | return 1; // json should be freed manually 67 | } 68 | } 69 | return 0; 70 | } 71 | 72 | /** 73 | * Handle errors from RDKafka 74 | */ 75 | 76 | error_msg_t * 77 | new_error_msg(int err, const char *reason) { 78 | error_msg_t *msg = xmalloc(sizeof(error_msg_t)); 79 | msg->err = err; 80 | msg->reason = xmalloc(sizeof(char) * strlen(reason) + 1); 81 | strcpy(msg->reason, reason); 82 | return msg; 83 | } 84 | 85 | void 86 | destroy_error_msg(error_msg_t *msg) { 87 | if (msg->reason != NULL) 88 | free(msg->reason); 89 | free(msg); 90 | } 91 | 92 | void 93 | error_callback(rd_kafka_t *UNUSED(rd_kafka), int err, const char *reason, void *opaque) { 94 | event_queues_t *event_queues = opaque; 95 | if (event_queues != NULL && event_queues->queues[ERROR_QUEUE] != NULL) { 96 | error_msg_t *msg = new_error_msg(err, reason); 97 | if (msg != NULL && queue_push(event_queues->queues[ERROR_QUEUE], msg) != 0) 98 | destroy_error_msg(msg); 99 | } 100 | } 101 | 102 | int 103 | push_log_cb_args(struct lua_State *L, const log_msg_t *msg) 104 | { 105 | lua_pushstring(L, msg->fac); 106 | lua_pushstring(L, msg->buf); 107 | lua_pushinteger(L, msg->level); 108 | return 3; 109 | } 110 | 111 | int 112 | push_stats_cb_args(struct lua_State *L, const char *msg) 113 | { 114 | lua_pushstring(L, msg); 115 | return 1; 116 | } 117 | 118 | int 119 | push_errors_cb_args(struct lua_State *L, const error_msg_t *msg) 120 | { 121 | lua_pushstring(L, msg->reason); 122 | return 1; 123 | } 124 | 125 | /** 126 | * Handle message delivery reports from RDKafka 127 | */ 128 | 129 | dr_msg_t * 130 | new_dr_msg(int dr_callback, int err) { 131 | dr_msg_t *dr_msg; 132 | dr_msg = xmalloc(sizeof(dr_msg_t)); 133 | dr_msg->dr_callback = dr_callback; 134 | dr_msg->err = err; 135 | return dr_msg; 136 | } 137 | 138 | void 139 | destroy_dr_msg(dr_msg_t *dr_msg) { 140 | free(dr_msg); 141 | } 142 | 143 | void 144 | msg_delivery_callback(rd_kafka_t *UNUSED(producer), const rd_kafka_message_t *msg, void *opaque) { 145 | event_queues_t *event_queues = opaque; 146 | if (msg->_private != NULL && event_queues != NULL && event_queues->delivery_queue != NULL) { 147 | dr_msg_t *dr_msg = msg->_private; 148 | if (dr_msg != NULL) { 149 | if (msg->err != RD_KAFKA_RESP_ERR_NO_ERROR) { 150 | dr_msg->err = msg->err; 151 | } 152 | queue_push(event_queues->delivery_queue, dr_msg); 153 | } 154 | } 155 | } 156 | 157 | /** 158 | * Handle rebalance callbacks from RDKafka 159 | */ 160 | 161 | rebalance_msg_t * 162 | new_rebalance_revoke_msg(rd_kafka_topic_partition_list_t *revoked) { 163 | rebalance_msg_t *msg = xmalloc(sizeof(rebalance_msg_t)); 164 | pthread_mutex_t lock; 165 | if (pthread_mutex_init(&lock, NULL) != 0) { 166 | free(msg); 167 | return NULL; 168 | } 169 | 170 | msg->lock = lock; 171 | 172 | pthread_cond_t sync; 173 | if (pthread_cond_init(&sync, NULL) != 0) { 174 | free(msg); 175 | return NULL; 176 | } 177 | 178 | msg->sync = sync; 179 | msg->revoked = revoked; 180 | msg->assigned = NULL; 181 | msg->err = RD_KAFKA_RESP_ERR_NO_ERROR; 182 | return msg; 183 | } 184 | 185 | rebalance_msg_t * 186 | new_rebalance_assign_msg(rd_kafka_topic_partition_list_t *assigned) { 187 | rebalance_msg_t *msg = xmalloc(sizeof(rebalance_msg_t)); 188 | pthread_mutex_t lock; 189 | if (pthread_mutex_init(&lock, NULL) != 0) { 190 | free(msg); 191 | return NULL; 192 | } 193 | 194 | msg->lock = lock; 195 | 196 | pthread_cond_t sync; 197 | if (pthread_cond_init(&sync, NULL) != 0) { 198 | free(msg); 199 | return NULL; 200 | } 201 | 202 | msg->sync = sync; 203 | msg->revoked = NULL; 204 | msg->assigned = assigned; 205 | msg->err = RD_KAFKA_RESP_ERR_NO_ERROR; 206 | return msg; 207 | } 208 | 209 | rebalance_msg_t * 210 | new_rebalance_error_msg(rd_kafka_resp_err_t err) { 211 | rebalance_msg_t *msg = xmalloc(sizeof(rebalance_msg_t)); 212 | pthread_mutex_t lock; 213 | if (pthread_mutex_init(&lock, NULL) != 0) { 214 | free(msg); 215 | return NULL; 216 | } 217 | 218 | msg->lock = lock; 219 | 220 | pthread_cond_t sync; 221 | if (pthread_cond_init(&sync, NULL) != 0) { 222 | free(msg); 223 | return NULL; 224 | } 225 | 226 | msg->sync = sync; 227 | msg->revoked = NULL; 228 | msg->assigned = NULL; 229 | msg->err = err; 230 | return msg; 231 | } 232 | 233 | void 234 | destroy_rebalance_msg(rebalance_msg_t *rebalance_msg) { 235 | pthread_mutex_destroy(&rebalance_msg->lock); 236 | pthread_cond_destroy(&rebalance_msg->sync); 237 | free(rebalance_msg); 238 | } 239 | 240 | void 241 | rebalance_callback(rd_kafka_t *consumer, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque) { 242 | event_queues_t *event_queues = opaque; 243 | rebalance_msg_t *msg = NULL; 244 | switch (err) 245 | { 246 | case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: 247 | msg = new_rebalance_assign_msg(partitions); 248 | if (msg != NULL) { 249 | 250 | pthread_mutex_lock(&msg->lock); 251 | 252 | if (queue_push(event_queues->queues[REBALANCE_QUEUE], msg) == 0) { 253 | // waiting while main TX thread invokes rebalance callback 254 | pthread_cond_wait(&msg->sync, &msg->lock); 255 | } 256 | 257 | pthread_mutex_unlock(&msg->lock); 258 | 259 | destroy_rebalance_msg(msg); 260 | } 261 | rd_kafka_assign(consumer, partitions); 262 | break; 263 | 264 | case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: 265 | rd_kafka_commit(consumer, partitions, 0); // sync commit 266 | 267 | msg = new_rebalance_revoke_msg(partitions); 268 | if (msg != NULL) { 269 | 270 | pthread_mutex_lock(&msg->lock); 271 | 272 | if (queue_push(event_queues->queues[REBALANCE_QUEUE], msg) == 0) { 273 | // waiting while main TX thread invokes rebalance callback 274 | pthread_cond_wait(&msg->sync, &msg->lock); 275 | } 276 | 277 | pthread_mutex_unlock(&msg->lock); 278 | 279 | destroy_rebalance_msg(msg); 280 | } 281 | 282 | rd_kafka_assign(consumer, NULL); 283 | break; 284 | 285 | default: 286 | msg = new_rebalance_error_msg(err); 287 | if (msg != NULL) { 288 | 289 | pthread_mutex_lock(&msg->lock); 290 | 291 | if (queue_push(event_queues->queues[REBALANCE_QUEUE], msg) == 0) { 292 | // waiting while main TX thread invokes rebalance callback 293 | pthread_cond_wait(&msg->sync, &msg->lock); 294 | } 295 | 296 | pthread_mutex_unlock(&msg->lock); 297 | 298 | destroy_rebalance_msg(msg); 299 | } 300 | rd_kafka_assign(consumer, NULL); 301 | break; 302 | } 303 | } 304 | 305 | /** 306 | * Structure which contains all queues for communication between main TX thread and 307 | * RDKafka callbacks from background threads 308 | */ 309 | 310 | event_queues_t * 311 | new_event_queues() { 312 | event_queues_t *event_queues = xcalloc(1, sizeof(event_queues_t)); 313 | for (int i = 0; i < MAX_QUEUE; i++) 314 | event_queues->cb_refs[i] = LUA_REFNIL; 315 | return event_queues; 316 | } 317 | 318 | void 319 | destroy_event_queues(struct lua_State *L, event_queues_t *event_queues) { 320 | if (event_queues == NULL) 321 | return; 322 | if (event_queues->consume_queue != NULL) { 323 | msg_t *msg = NULL; 324 | while (true) { 325 | msg = queue_pop(event_queues->consume_queue); 326 | if (msg == NULL) 327 | break; 328 | destroy_consumer_msg(msg); 329 | } 330 | destroy_queue(event_queues->consume_queue); 331 | } 332 | if (event_queues->delivery_queue != NULL) { 333 | dr_msg_t *msg = NULL; 334 | while (true) { 335 | msg = queue_pop(event_queues->delivery_queue); 336 | if (msg == NULL) 337 | break; 338 | luaL_unref(L, LUA_REGISTRYINDEX, msg->dr_callback); 339 | destroy_dr_msg(msg); 340 | } 341 | destroy_queue(event_queues->delivery_queue); 342 | } 343 | 344 | for (int i = 0; i < MAX_QUEUE; i++) { 345 | if (event_queues->queues[i] == NULL) 346 | continue; 347 | while (true) { 348 | void *msg = queue_pop(event_queues->queues[i]); 349 | if (msg == NULL) 350 | break; 351 | 352 | switch (i) { 353 | case LOG_QUEUE: 354 | destroy_log_msg(msg); 355 | break; 356 | case STATS_QUEUE: 357 | break; 358 | case ERROR_QUEUE: 359 | destroy_error_msg(msg); 360 | break; 361 | case REBALANCE_QUEUE: { 362 | rebalance_msg_t *rebalance_msg = msg; 363 | pthread_mutex_lock(&rebalance_msg->lock); 364 | // allowing background thread proceed rebalancing 365 | pthread_cond_signal(&rebalance_msg->sync); 366 | pthread_mutex_unlock(&rebalance_msg->lock); 367 | break; 368 | } 369 | } 370 | } 371 | destroy_queue(event_queues->queues[i]); 372 | } 373 | 374 | for (int i = 0; i < MAX_QUEUE; i++) 375 | luaL_unref(L, LUA_REGISTRYINDEX, event_queues->cb_refs[i]); 376 | 377 | free(event_queues); 378 | } 379 | -------------------------------------------------------------------------------- /kafka/callbacks.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_CALLBACKS_H 2 | #define TNT_KAFKA_CALLBACKS_H 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | #include 14 | 15 | //////////////////////////////////////////////////////////////////////////////////////////////////// 16 | /** 17 | * Common callbacks handling 18 | */ 19 | 20 | /** 21 | * Handle logs from RDKafka 22 | */ 23 | 24 | typedef struct { 25 | int level; 26 | char *fac; 27 | char *buf; 28 | } log_msg_t; 29 | 30 | log_msg_t * 31 | new_log_msg(int level, const char *fac, const char *buf); 32 | 33 | void 34 | destroy_log_msg(log_msg_t *msg); 35 | 36 | void 37 | log_callback(const rd_kafka_t *rd_kafka, int level, const char *fac, const char *buf); 38 | 39 | int 40 | push_log_cb_args(struct lua_State *L, const log_msg_t *msg); 41 | 42 | /** 43 | * Handle stats from RDKafka 44 | */ 45 | 46 | int 47 | stats_callback(rd_kafka_t *rd_kafka, char *json, size_t json_len, void *opaque); 48 | 49 | int 50 | push_stats_cb_args(struct lua_State *L, const char *msg); 51 | 52 | /** 53 | * Handle errors from RDKafka 54 | */ 55 | 56 | typedef struct { 57 | int err; 58 | char *reason; 59 | } error_msg_t; 60 | 61 | error_msg_t * 62 | new_error_msg(int err, const char *reason); 63 | 64 | void 65 | destroy_error_msg(error_msg_t *msg); 66 | 67 | void 68 | error_callback(rd_kafka_t *UNUSED(rd_kafka), int err, const char *reason, void *opaque); 69 | 70 | int 71 | push_errors_cb_args(struct lua_State *L, const error_msg_t *msg); 72 | 73 | /** 74 | * Handle message delivery reports from RDKafka 75 | */ 76 | 77 | typedef struct { 78 | int dr_callback; 79 | int err; 80 | } dr_msg_t; 81 | 82 | dr_msg_t * 83 | new_dr_msg(int dr_callback, int err); 84 | 85 | void 86 | destroy_dr_msg(dr_msg_t *dr_msg); 87 | 88 | void 89 | msg_delivery_callback(rd_kafka_t *UNUSED(producer), const rd_kafka_message_t *msg, void *opaque); 90 | 91 | 92 | /** 93 | * Handle rebalance callbacks from RDKafka 94 | */ 95 | 96 | typedef struct { 97 | pthread_mutex_t lock; 98 | pthread_cond_t sync; 99 | rd_kafka_topic_partition_list_t *revoked; 100 | rd_kafka_topic_partition_list_t *assigned; 101 | rd_kafka_resp_err_t err; 102 | } rebalance_msg_t; 103 | 104 | rebalance_msg_t *new_rebalance_revoke_msg(rd_kafka_topic_partition_list_t *revoked); 105 | 106 | rebalance_msg_t *new_rebalance_assign_msg(rd_kafka_topic_partition_list_t *assigned); 107 | 108 | rebalance_msg_t *new_rebalance_error_msg(rd_kafka_resp_err_t err); 109 | 110 | void destroy_rebalance_msg(rebalance_msg_t *rebalance_msg); 111 | 112 | void rebalance_callback(rd_kafka_t *consumer, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque); 113 | 114 | /** 115 | * Structure which contains all queues for communication between main TX thread and 116 | * RDKafka callbacks from background threads 117 | */ 118 | 119 | enum { 120 | LOG_QUEUE, 121 | STATS_QUEUE, 122 | ERROR_QUEUE, 123 | REBALANCE_QUEUE, 124 | MAX_QUEUE, 125 | }; 126 | 127 | RD_UNUSED 128 | static const char *const queue2str[] = { 129 | [LOG_QUEUE] = "log_callback", 130 | [STATS_QUEUE] = "stats_callback", 131 | [ERROR_QUEUE] = "error_callback", 132 | [REBALANCE_QUEUE] = "rebalance_callback", 133 | }; 134 | 135 | #define LUA_RDKAFKA_POLL_FUNC(rd_type, name, queue_no, destroy_fn, push_args_fn) \ 136 | int \ 137 | lua_##rd_type##_##name(struct lua_State *L) { \ 138 | if (lua_gettop(L) != 2) \ 139 | return luaL_error(L, "Usage: count, err = " #rd_type ":" #name "(limit)"); \ 140 | \ 141 | rd_type##_t *rd = lua_check_##rd_type(L, 1); \ 142 | if (rd->event_queues == NULL || \ 143 | rd->event_queues->queues[queue_no] == NULL || \ 144 | rd->event_queues->cb_refs[queue_no] == LUA_REFNIL) { \ 145 | lua_pushnumber(L, 0); \ 146 | lua_pushliteral(L, #rd_type "." #name " error: callback is not set"); \ 147 | return 2; \ 148 | } \ 149 | \ 150 | int limit = lua_tonumber(L, 2); \ 151 | void *msg = NULL; \ 152 | int count = 0; \ 153 | char *err_str = NULL; \ 154 | while (count < limit) { \ 155 | msg = queue_pop(rd->event_queues->queues[queue_no]); \ 156 | if (msg == NULL) \ 157 | break; \ 158 | \ 159 | count++; \ 160 | lua_rawgeti(L, LUA_REGISTRYINDEX, rd->event_queues->cb_refs[queue_no]); \ 161 | int args_count = push_args_fn(L, msg); \ 162 | if (lua_pcall(L, args_count, 0, 0) != 0) /* call (N arguments, 0 result) */ \ 163 | err_str = (char *)lua_tostring(L, -1); \ 164 | destroy_fn(msg); \ 165 | \ 166 | if (err_str != NULL) \ 167 | break; \ 168 | } \ 169 | lua_pushinteger(L, count); \ 170 | if (err_str != NULL) \ 171 | lua_pushstring(L, err_str); \ 172 | else \ 173 | lua_pushnil(L); \ 174 | \ 175 | return 2; \ 176 | } 177 | 178 | typedef struct { 179 | queue_t *consume_queue; 180 | queue_t *delivery_queue; 181 | 182 | queue_t *queues[MAX_QUEUE]; 183 | int cb_refs[MAX_QUEUE]; 184 | } event_queues_t; 185 | 186 | event_queues_t *new_event_queues(); 187 | 188 | void destroy_event_queues(struct lua_State *L, event_queues_t *event_queues); 189 | 190 | #endif //TNT_KAFKA_CALLBACKS_H 191 | -------------------------------------------------------------------------------- /kafka/common.c: -------------------------------------------------------------------------------- 1 | #define _GNU_SOURCE 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | const char* const consumer_label = "__tnt_kafka_consumer"; 8 | const char* const consumer_msg_label = "__tnt_kafka_consumer_msg"; 9 | const char* const producer_label = "__tnt_kafka_producer"; 10 | 11 | /** 12 | * Push native lua error with code -3 13 | */ 14 | int 15 | lua_push_error(struct lua_State *L) { 16 | lua_pushnumber(L, -3); 17 | lua_insert(L, -2); 18 | return 2; 19 | } 20 | 21 | /** 22 | * Push current librdkafka version 23 | */ 24 | int 25 | lua_librdkafka_version(struct lua_State *L) { 26 | const char *version = rd_kafka_version_str(); 27 | lua_pushstring(L, version); 28 | return 1; 29 | } 30 | 31 | int 32 | lua_librdkafka_dump_conf(struct lua_State *L, rd_kafka_t *rk) { 33 | if (rk != NULL) { 34 | const rd_kafka_conf_t *conf = rd_kafka_conf(rk); 35 | if (conf == NULL) 36 | return 0; 37 | 38 | size_t cntp = 0; 39 | const char **confstr = rd_kafka_conf_dump((rd_kafka_conf_t *)conf, &cntp); 40 | if (confstr == NULL) 41 | return 0; 42 | 43 | lua_newtable(L); 44 | for (size_t i = 0; i < cntp; i += 2) { 45 | lua_pushstring(L, confstr[i]); 46 | lua_pushstring(L, confstr[i + 1]); 47 | lua_settable(L, -3); 48 | } 49 | rd_kafka_conf_dump_free(confstr, cntp); 50 | return 1; 51 | } 52 | return 0; 53 | } 54 | 55 | static ssize_t 56 | wait_librdkafka_metadata(va_list args) { 57 | rd_kafka_t *rk = va_arg(args, rd_kafka_t *); 58 | int all_topics = va_arg(args, int); 59 | rd_kafka_topic_t *only_rkt = va_arg(args, rd_kafka_topic_t *); 60 | const struct rd_kafka_metadata **metadatap = va_arg(args, const struct rd_kafka_metadata **); 61 | int timeout_ms = va_arg(args, int); 62 | return rd_kafka_metadata(rk, all_topics, only_rkt, metadatap, timeout_ms); 63 | } 64 | 65 | int 66 | lua_librdkafka_metadata(struct lua_State *L, rd_kafka_t *rk, rd_kafka_topic_t *only_rkt, int timeout_ms) { 67 | assert(rk != NULL); 68 | 69 | int all_topics = 0; 70 | if (only_rkt == NULL) 71 | all_topics = 1; 72 | 73 | const struct rd_kafka_metadata *metadatap; 74 | rd_kafka_resp_err_t err = coio_call(wait_librdkafka_metadata, rk, all_topics, only_rkt, &metadatap, timeout_ms); 75 | 76 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 77 | lua_pushnil(L); 78 | lua_pushstring(L, rd_kafka_err2str(err)); 79 | return 2; 80 | } 81 | 82 | lua_newtable(L); // metadata 83 | 84 | lua_pushliteral(L, "brokers"); // metadata.brokers 85 | lua_createtable(L, metadatap->broker_cnt, 0); 86 | for (int i = 0; i < metadatap->broker_cnt; i++) { 87 | lua_pushnumber(L, i + 1); // metadata.brokers[i] 88 | lua_createtable(L, 0, 3); 89 | 90 | lua_pushliteral(L, "id"); // metadata.brokers[i].id 91 | lua_pushnumber(L, metadatap->brokers[i].id); 92 | lua_settable(L, -3); 93 | 94 | lua_pushliteral(L, "port"); // metadata.brokers[i].port 95 | lua_pushnumber(L, metadatap->brokers[i].port); 96 | lua_settable(L, -3); 97 | 98 | lua_pushliteral(L, "host"); // metadata.brokers[i].host 99 | lua_pushstring(L, metadatap->brokers[i].host); 100 | lua_settable(L, -3); 101 | 102 | lua_settable(L, -3); // metadata.brokers[i] 103 | } 104 | 105 | lua_settable(L, -3); // metadata.brokers 106 | 107 | lua_pushliteral(L, "topics"); // metadata.topics 108 | lua_createtable(L, metadatap->topic_cnt, 0); 109 | for (int i = 0; i < metadatap->topic_cnt; i++) { 110 | lua_pushnumber(L, i + 1); // metadata.topics[i] 111 | lua_createtable(L, 0, 4); 112 | 113 | lua_pushliteral(L, "topic"); // metadata.topics[i].topic 114 | lua_pushstring(L, metadatap->topics[i].topic); 115 | lua_settable(L, -3); 116 | 117 | lua_pushliteral(L, "partitions"); // metadata.topics[i].partitions 118 | lua_createtable(L, 0, metadatap->topics[i].partition_cnt); 119 | 120 | for (int j = 0; j < metadatap->topics[i].partition_cnt; j++) { 121 | lua_pushnumber(L, j + 1); // metadata.topics[i].partitions[j] 122 | lua_createtable(L, 0, 8); 123 | 124 | lua_pushliteral(L, "id"); // metadata.topics[i].partitions[j].id 125 | lua_pushnumber(L, metadatap->topics[i].partitions[j].id); 126 | lua_settable(L, -3); 127 | 128 | lua_pushliteral(L, "leader"); // metadata.topics[i].partitions[j].leader 129 | lua_pushnumber(L, metadatap->topics[i].partitions[j].leader); 130 | lua_settable(L, -3); 131 | 132 | if (metadatap->topics[i].partitions[j].err != RD_KAFKA_RESP_ERR_NO_ERROR) { 133 | lua_pushliteral(L, "error_code"); // metadata.topics[i].partitions[j].error_code 134 | lua_pushnumber(L, metadatap->topics[i].partitions[j].err); 135 | lua_settable(L, -3); 136 | 137 | lua_pushliteral(L, "error"); // metadata.topics[i].partitions[j].error 138 | lua_pushstring(L, rd_kafka_err2str(metadatap->topics[i].partitions[j].err)); 139 | lua_settable(L, -3); 140 | } 141 | 142 | lua_pushliteral(L, "isr"); // metadata.topics[i].partitions[j].isr 143 | lua_createtable(L, metadatap->topics[i].partitions[j].isr_cnt, 0); 144 | for (int k = 0; k < metadatap->topics[i].partitions[j].isr_cnt; k++) { 145 | lua_pushnumber(L, k + 1); // metadata.topics[i].partitions[j].isr[k] 146 | lua_pushnumber(L, metadatap->topics[i].partitions[j].isrs[k]); 147 | lua_settable(L, -3); 148 | } 149 | lua_settable(L, -3); // metadata.topics[i].partitions[j].isr 150 | 151 | lua_pushliteral(L, "replicas"); // metadata.topics[i].partitions[j].replicas 152 | lua_createtable(L, metadatap->topics[i].partitions[j].replica_cnt, 0); 153 | for (int k = 0; k < metadatap->topics[i].partitions[j].replica_cnt; k++) { 154 | lua_pushnumber(L, k + 1); // metadata.topics[i].partitions[j].replicas[k] 155 | lua_pushnumber(L, metadatap->topics[i].partitions[j].replicas[k]); 156 | lua_settable(L, -3); 157 | } 158 | lua_settable(L, -3); // metadata.topics[i].partitions[j].replicas 159 | lua_settable(L, -3); // metadata.topics[i].partitions[j] 160 | } 161 | 162 | lua_settable(L, -3); // metadata.topics[i].partitions 163 | 164 | if (metadatap->topics[i].err != RD_KAFKA_RESP_ERR_NO_ERROR) { 165 | lua_pushliteral(L, "error_code"); // metadata.topics[i].error_code 166 | lua_pushnumber(L, metadatap->topics[i].err); 167 | lua_settable(L, -3); 168 | 169 | lua_pushliteral(L, "error"); // metadata.topics[i].error 170 | lua_pushstring(L, rd_kafka_err2str(metadatap->topics[i].err)); 171 | lua_settable(L, -3); 172 | } 173 | 174 | lua_settable(L, -3); // metadata.topics[i] 175 | } 176 | lua_settable(L, -3); // metadata.topics 177 | 178 | lua_pushliteral(L, "orig_broker_id"); // metadata.orig_broker_id 179 | lua_pushinteger(L, metadatap->orig_broker_id); 180 | lua_settable(L, -3); 181 | 182 | lua_pushliteral(L, "orig_broker_name"); // metadata.orig_broker_name 183 | lua_pushstring(L, metadatap->orig_broker_name); 184 | lua_settable(L, -3); 185 | 186 | rd_kafka_metadata_destroy(metadatap); 187 | return 1; 188 | } 189 | 190 | static ssize_t 191 | wait_librdkafka_list_groups(va_list args) { 192 | rd_kafka_t *rk = va_arg(args, rd_kafka_t *); 193 | const char *group = va_arg(args, const char *); 194 | const struct rd_kafka_group_list **grplistp = va_arg(args, const struct rd_kafka_group_list **); 195 | int timeout_ms = va_arg(args, int); 196 | return rd_kafka_list_groups(rk, group, grplistp, timeout_ms); 197 | } 198 | 199 | int 200 | lua_librdkafka_list_groups(struct lua_State *L, rd_kafka_t *rk, const char *group, int timeout_ms) { 201 | const struct rd_kafka_group_list *grplistp; 202 | rd_kafka_resp_err_t err = coio_call(wait_librdkafka_list_groups, rk, group, &grplistp, timeout_ms); 203 | 204 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 205 | lua_pushnil(L); 206 | lua_pushstring(L, rd_kafka_err2str(err)); 207 | return 2; 208 | } 209 | 210 | lua_createtable(L, grplistp->group_cnt, 0); 211 | for (int i = 0; i < grplistp->group_cnt; i++) { 212 | lua_pushnumber(L, i + 1); 213 | lua_createtable(L, 0, 8); 214 | 215 | lua_pushliteral(L, "broker"); 216 | lua_createtable(L, 0, 3); 217 | 218 | lua_pushliteral(L, "id"); 219 | lua_pushnumber(L, grplistp->groups[i].broker.id); 220 | lua_settable(L, -3); 221 | 222 | lua_pushliteral(L, "port"); 223 | lua_pushnumber(L, grplistp->groups[i].broker.port); 224 | lua_settable(L, -3); 225 | 226 | lua_pushliteral(L, "host"); 227 | lua_pushstring(L, grplistp->groups[i].broker.host); 228 | lua_settable(L, -3); 229 | 230 | lua_settable(L, -3); 231 | 232 | lua_pushstring(L, "group"); 233 | lua_pushstring(L, grplistp->groups[i].group); 234 | lua_settable(L, -3); 235 | 236 | if (grplistp->groups[i].err != RD_KAFKA_RESP_ERR_NO_ERROR) { 237 | lua_pushliteral(L, "error_code"); 238 | lua_pushnumber(L, grplistp->groups[i].err); 239 | lua_settable(L, -3); 240 | 241 | lua_pushliteral(L, "error"); 242 | lua_pushstring(L, rd_kafka_err2str(grplistp->groups[i].err)); 243 | lua_settable(L, -3); 244 | } 245 | 246 | lua_pushliteral(L, "state"); 247 | lua_pushstring(L, grplistp->groups[i].state); 248 | lua_settable(L, -3); 249 | 250 | lua_pushliteral(L, "protocol_type"); 251 | lua_pushstring(L, grplistp->groups[i].protocol_type); 252 | lua_settable(L, -3); 253 | 254 | lua_pushliteral(L, "protocol"); 255 | lua_pushstring(L, grplistp->groups[i].protocol); 256 | lua_settable(L, -3); 257 | 258 | lua_pushliteral(L, "members"); 259 | lua_createtable(L, grplistp->groups[i].member_cnt, 0); 260 | for (int j = 0; j < grplistp->groups[i].member_cnt; j++) { 261 | lua_pushnumber(L, j + 1); 262 | lua_createtable(L, 0, 8); 263 | 264 | lua_pushliteral(L, "member_id"); 265 | lua_pushstring(L, grplistp->groups[i].members[j].member_id); 266 | lua_settable(L, -3); 267 | 268 | lua_pushliteral(L, "client_id"); 269 | lua_pushstring(L, grplistp->groups[i].members[j].client_id); 270 | lua_settable(L, -3); 271 | 272 | lua_pushliteral(L, "client_host"); 273 | lua_pushstring(L, grplistp->groups[i].members[j].client_host); 274 | lua_settable(L, -3); 275 | 276 | lua_pushliteral(L, "member_metadata"); 277 | lua_pushlstring(L, 278 | grplistp->groups[i].members[j].member_metadata, 279 | grplistp->groups[i].members[j].member_metadata_size); 280 | lua_settable(L, -3); 281 | 282 | lua_pushliteral(L, "member_assignment"); 283 | lua_pushlstring(L, 284 | grplistp->groups[i].members[j].member_assignment, 285 | grplistp->groups[i].members[j].member_assignment_size); 286 | lua_settable(L, -3); 287 | 288 | lua_settable(L, -3); 289 | } 290 | lua_settable(L, -3); 291 | 292 | lua_settable(L, -3); 293 | } 294 | 295 | rd_kafka_group_list_destroy(grplistp); 296 | return 1; 297 | } 298 | 299 | void 300 | set_thread_name(const char *name) 301 | #ifdef __linux__ 302 | { 303 | int rc = pthread_setname_np(pthread_self(), name); 304 | (void)rc; 305 | assert(rc == 0); 306 | } 307 | #elif __APPLE__ 308 | { 309 | pthread_setname_np(name); 310 | } 311 | #else 312 | { 313 | (void)name; 314 | } 315 | #endif 316 | 317 | static rd_kafka_resp_err_t 318 | kafka_pause_resume(rd_kafka_t *rk, 319 | rd_kafka_resp_err_t (*fun)(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions)) { 320 | rd_kafka_topic_partition_list_t *partitions = NULL; 321 | rd_kafka_resp_err_t err = rd_kafka_assignment(rk, &partitions); 322 | 323 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) 324 | return err; 325 | 326 | err = fun(rk, partitions); 327 | rd_kafka_topic_partition_list_destroy(partitions); 328 | return err; 329 | } 330 | 331 | rd_kafka_resp_err_t 332 | kafka_pause(rd_kafka_t *rk) { 333 | return kafka_pause_resume(rk, rd_kafka_pause_partitions); 334 | } 335 | 336 | rd_kafka_resp_err_t 337 | kafka_resume(rd_kafka_t *rk) { 338 | return kafka_pause_resume(rk, rd_kafka_resume_partitions); 339 | } 340 | -------------------------------------------------------------------------------- /kafka/common.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_COMMON_H 2 | #define TNT_KAFKA_COMMON_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | 14 | #ifdef UNUSED 15 | #elif defined(__GNUC__) 16 | # define UNUSED(x) UNUSED_ ## x __attribute__((unused)) 17 | #elif defined(__LCLINT__) 18 | # define UNUSED(x) /*@unused@*/ x 19 | #else 20 | # define UNUSED(x) x 21 | #endif 22 | 23 | /** 24 | * You may use likely()/unlikely() to provide the compiler with branch 25 | * prediction information. 26 | * 27 | * @sa https://en.cppreference.com/w/cpp/language/attributes/likely 28 | */ 29 | #if __has_builtin(__builtin_expect) || defined(__GNUC__) 30 | # define likely(x) __builtin_expect(!! (x),1) 31 | # define unlikely(x) __builtin_expect(!! (x),0) 32 | #else 33 | # define likely(x) (x) 34 | # define unlikely(x) (x) 35 | #endif 36 | 37 | /** 38 | * An x* variant of a memory allocation function calls the original function 39 | * and panics if it fails (i.e. it should never return NULL). 40 | */ 41 | #define xalloc_impl(size, func, args...) \ 42 | ({ \ 43 | void *ret = func(args); \ 44 | if (unlikely(ret == NULL)) { \ 45 | fprintf(stderr, "Can't allocate %zu bytes at %s:%d", \ 46 | (size_t)(size), __FILE__, __LINE__); \ 47 | exit(EXIT_FAILURE); \ 48 | } \ 49 | ret; \ 50 | }) 51 | 52 | #define xmalloc(size) xalloc_impl((size), malloc, (size)) 53 | #define xcalloc(n, size) xalloc_impl((n) * (size), calloc, (n), (size)) 54 | #define xrealloc(ptr, size) xalloc_impl((size), realloc, (ptr), (size)) 55 | 56 | extern const char* const consumer_label; 57 | extern const char* const consumer_msg_label; 58 | extern const char* const producer_label; 59 | 60 | int 61 | lua_librdkafka_version(struct lua_State *L); 62 | 63 | int 64 | lua_librdkafka_dump_conf(struct lua_State *L, rd_kafka_t *rk); 65 | 66 | int 67 | lua_librdkafka_metadata(struct lua_State *L, rd_kafka_t *rk, rd_kafka_topic_t *only_rkt, int timeout_ms); 68 | 69 | int 70 | lua_librdkafka_list_groups(struct lua_State *L, rd_kafka_t *rk, const char *group, int timeout_ms); 71 | 72 | /** 73 | * Push native lua error with code -3 74 | */ 75 | int 76 | lua_push_error(struct lua_State *L); 77 | 78 | void 79 | set_thread_name(const char *name); 80 | 81 | rd_kafka_resp_err_t 82 | kafka_pause(rd_kafka_t *rk); 83 | 84 | rd_kafka_resp_err_t 85 | kafka_resume(rd_kafka_t *rk); 86 | 87 | #endif //TNT_KAFKA_COMMON_H 88 | -------------------------------------------------------------------------------- /kafka/consumer.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_CONSUMER_H 2 | #define TNT_KAFKA_CONSUMER_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | int 9 | lua_consumer_subscribe(struct lua_State *L); 10 | 11 | int 12 | lua_consumer_unsubscribe(struct lua_State *L); 13 | 14 | int 15 | lua_consumer_tostring(struct lua_State *L); 16 | 17 | int 18 | lua_consumer_poll_msg(struct lua_State *L); 19 | 20 | int 21 | lua_consumer_poll_logs(struct lua_State *L); 22 | 23 | int 24 | lua_consumer_poll_stats(struct lua_State *L); 25 | 26 | int 27 | lua_consumer_poll_errors(struct lua_State *L); 28 | 29 | int 30 | lua_consumer_poll_rebalances(struct lua_State *L); 31 | 32 | int 33 | lua_consumer_store_offset(struct lua_State *L); 34 | 35 | int 36 | lua_consumer_seek_partitions(struct lua_State *L); 37 | 38 | int 39 | lua_consumer_close(struct lua_State *L); 40 | 41 | int 42 | lua_consumer_destroy(struct lua_State *L); 43 | 44 | int 45 | lua_create_consumer(struct lua_State *L); 46 | 47 | int 48 | lua_consumer_dump_conf(struct lua_State *L); 49 | 50 | int 51 | lua_consumer_metadata(struct lua_State *L); 52 | 53 | int 54 | lua_consumer_list_groups(struct lua_State *L); 55 | 56 | int 57 | lua_consumer_pause(struct lua_State *L); 58 | 59 | int 60 | lua_consumer_resume(struct lua_State *L); 61 | 62 | int 63 | lua_consumer_rebalance_protocol(struct lua_State *L); 64 | 65 | #endif //TNT_KAFKA_CONSUMER_H 66 | -------------------------------------------------------------------------------- /kafka/consumer_msg.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include 5 | 6 | #include 7 | 8 | #include 9 | 10 | static const char null_literal[] = "NULL"; 11 | 12 | //////////////////////////////////////////////////////////////////////////////////////////////////// 13 | /** 14 | * Consumer Message 15 | */ 16 | 17 | msg_t * 18 | lua_check_consumer_msg(struct lua_State *L, int index) { 19 | msg_t **msg_p = (msg_t **)luaL_checkudata(L, index, consumer_msg_label); 20 | if (msg_p == NULL || *msg_p == NULL) 21 | luaL_error(L, "Kafka consumer message fatal error: failed to retrieve message from lua stack!"); 22 | return *msg_p; 23 | } 24 | 25 | int 26 | lua_consumer_msg_topic(struct lua_State *L) { 27 | const msg_t *msg = lua_check_consumer_msg(L, 1); 28 | lua_pushstring(L, rd_kafka_topic_name(msg->topic)); 29 | return 1; 30 | } 31 | 32 | int 33 | lua_consumer_msg_partition(struct lua_State *L) { 34 | const msg_t *msg = lua_check_consumer_msg(L, 1); 35 | 36 | lua_pushnumber(L, (double)msg->partition); 37 | return 1; 38 | } 39 | 40 | int 41 | lua_consumer_msg_offset(struct lua_State *L) { 42 | msg_t *msg = lua_check_consumer_msg(L, 1); 43 | 44 | luaL_pushint64(L, msg->offset); 45 | return 1; 46 | } 47 | 48 | int 49 | lua_consumer_msg_key(struct lua_State *L) { 50 | msg_t *msg = lua_check_consumer_msg(L, 1); 51 | 52 | if (msg->key_len <= 0 || msg->key == NULL) 53 | lua_pushnil(L); 54 | else 55 | lua_pushlstring(L, msg->key, msg->key_len); 56 | return 1; 57 | } 58 | 59 | int 60 | lua_consumer_msg_value(struct lua_State *L) { 61 | const msg_t *msg = lua_check_consumer_msg(L, 1); 62 | 63 | if (msg->value_len <= 0 || msg->value == NULL) 64 | lua_pushnil(L); 65 | else 66 | lua_pushlstring(L, msg->value, msg->value_len); 67 | return 1; 68 | } 69 | 70 | int 71 | lua_consumer_msg_headers(struct lua_State *L) { 72 | const msg_t *msg = lua_check_consumer_msg(L, 1); 73 | if (msg->headers == NULL) 74 | return 0; 75 | 76 | lua_newtable(L); 77 | 78 | size_t idx = 0; 79 | const char *key; 80 | const void *val; 81 | size_t size; 82 | 83 | while (!rd_kafka_header_get_all(msg->headers, idx++, 84 | &key, &val, &size)) { 85 | lua_pushstring(L, key); 86 | if (val != NULL) 87 | lua_pushlstring(L, val, size); 88 | else 89 | *(void **)luaL_pushcdata(L, luaL_ctypeid(L, "void *")) = NULL; 90 | lua_settable(L, -3); 91 | } 92 | return 1; 93 | } 94 | 95 | int 96 | lua_consumer_msg_tostring(struct lua_State *L) { 97 | const msg_t *msg = lua_check_consumer_msg(L, 1); 98 | 99 | size_t key_len = msg->key_len <= 0 ? sizeof(null_literal) : msg->key_len + 1; 100 | char key[key_len]; 101 | 102 | if (msg->key_len <= 0 || msg->key == NULL) { 103 | memcpy(key, null_literal, sizeof(null_literal)); 104 | } else { 105 | strncpy(key, msg->key, msg->key_len); 106 | key[msg->key_len] = '\0'; 107 | } 108 | 109 | size_t value_len = msg->value_len <= 0 ? sizeof(null_literal) : msg->value_len + 1; 110 | char value[value_len]; 111 | 112 | if (msg->value_len <= 0 || msg->value == NULL) { 113 | memcpy(value, null_literal, sizeof(null_literal)); 114 | } else { 115 | strncpy(value, msg->value, msg->value_len); 116 | value[msg->value_len] = '\0'; 117 | } 118 | 119 | lua_pushfstring(L, 120 | "Kafka Consumer Message: topic=%s partition=%d offset=%d key=%s value=%s", 121 | rd_kafka_topic_name(msg->topic), 122 | msg->partition, 123 | msg->offset, 124 | key, 125 | value); 126 | return 1; 127 | } 128 | 129 | int 130 | lua_consumer_msg_gc(struct lua_State *L) { 131 | msg_t **msg_p = (msg_t **)luaL_checkudata(L, 1, consumer_msg_label); 132 | if (msg_p && *msg_p) { 133 | destroy_consumer_msg(*msg_p); 134 | } 135 | if (msg_p) 136 | *msg_p = NULL; 137 | 138 | return 0; 139 | } 140 | 141 | msg_t * 142 | new_consumer_msg(rd_kafka_message_t *rd_message) { 143 | size_t message_size = sizeof(msg_t) + rd_message->len + rd_message->key_len; 144 | msg_t *msg = xcalloc(message_size, 1); 145 | msg->topic = rd_message->rkt; 146 | msg->partition = rd_message->partition; 147 | msg->value = (char*)msg + sizeof(msg_t); 148 | msg->key = (char*)msg + sizeof(msg_t) + rd_message->len; 149 | 150 | // headers 151 | rd_kafka_headers_t *hdrsp; 152 | rd_kafka_resp_err_t err = rd_kafka_message_headers(rd_message, &hdrsp); 153 | if (err == RD_KAFKA_RESP_ERR_NO_ERROR) 154 | msg->headers = rd_kafka_headers_copy(hdrsp); 155 | 156 | // value 157 | if (rd_message->len > 0) 158 | memcpy(msg->value, rd_message->payload, rd_message->len); 159 | msg->value_len = rd_message->len; 160 | 161 | // key 162 | if (rd_message->key_len > 0) 163 | memcpy(msg->key, rd_message->key, rd_message->key_len); 164 | msg->key_len = rd_message->key_len; 165 | msg->offset = rd_message->offset; 166 | 167 | return msg; 168 | } 169 | 170 | void 171 | destroy_consumer_msg(msg_t *msg) { 172 | if (msg == NULL) 173 | return; 174 | if (msg->headers != NULL) 175 | rd_kafka_headers_destroy(msg->headers); 176 | free(msg); 177 | 178 | return; 179 | } 180 | -------------------------------------------------------------------------------- /kafka/consumer_msg.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_CONSUMER_MSG_H 2 | #define TNT_KAFKA_CONSUMER_MSG_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | 10 | //////////////////////////////////////////////////////////////////////////////////////////////////// 11 | /** 12 | * Consumer Message 13 | */ 14 | typedef struct { 15 | rd_kafka_topic_t *topic; 16 | rd_kafka_headers_t *headers; 17 | int32_t partition; 18 | char *value; 19 | size_t value_len; 20 | char *key; 21 | size_t key_len; 22 | int64_t offset; 23 | } msg_t; 24 | 25 | msg_t *lua_check_consumer_msg(struct lua_State *L, int index); 26 | 27 | msg_t *new_consumer_msg(rd_kafka_message_t *rd_message); 28 | 29 | void destroy_consumer_msg(msg_t *msg); 30 | 31 | int lua_consumer_msg_topic(struct lua_State *L); 32 | 33 | int lua_consumer_msg_partition(struct lua_State *L); 34 | 35 | int lua_consumer_msg_headers(struct lua_State *L); 36 | 37 | int lua_consumer_msg_offset(struct lua_State *L); 38 | 39 | int lua_consumer_msg_key(struct lua_State *L); 40 | 41 | int lua_consumer_msg_value(struct lua_State *L); 42 | 43 | int lua_consumer_msg_tostring(struct lua_State *L); 44 | 45 | int lua_consumer_msg_gc(struct lua_State *L); 46 | 47 | #endif //TNT_KAFKA_CONSUMER_MSG_H 48 | -------------------------------------------------------------------------------- /kafka/init.lua: -------------------------------------------------------------------------------- 1 | local log = require("log") 2 | local fiber = require('fiber') 3 | local tnt_kafka = require("kafka.tntkafka") 4 | 5 | local DEFAULT_TIMEOUT_MS = 2000 6 | 7 | local Consumer = {} 8 | 9 | Consumer.__index = Consumer 10 | 11 | function Consumer.create(config) 12 | if config == nil then 13 | return nil, "config must not be nil" 14 | end 15 | 16 | local consumer, err = tnt_kafka.create_consumer(config) 17 | if err ~= nil then 18 | return nil, err 19 | end 20 | 21 | local new = { 22 | config = config, 23 | _consumer = consumer, 24 | _output_ch = fiber.channel(10000), 25 | } 26 | setmetatable(new, Consumer) 27 | 28 | new._poll_msg_fiber = fiber.create(function() 29 | new:_poll_msg() 30 | end) 31 | new._poll_msg_fiber:name('kafka_msg_poller') 32 | 33 | if config.log_callback ~= nil then 34 | new._poll_logs_fiber = fiber.create(function() 35 | new:_poll_logs() 36 | end) 37 | new._poll_logs_fiber:name('kafka_logs_poller') 38 | end 39 | 40 | if config.stats_callback ~= nil then 41 | new._poll_stats_fiber = fiber.create(function() 42 | new:_poll_stats() 43 | end) 44 | new._poll_stats_fiber:name('kafka_stats_poller') 45 | end 46 | 47 | if config.error_callback ~= nil then 48 | new._poll_errors_fiber = fiber.create(function() 49 | new:_poll_errors() 50 | end) 51 | new._poll_errors_fiber:name('kafka_error_poller') 52 | end 53 | 54 | if config.rebalance_callback ~= nil then 55 | new._poll_rebalances_fiber = fiber.create(function() 56 | new:_poll_rebalances() 57 | end) 58 | new._poll_rebalances_fiber:name('kafka_rebalances_poller') 59 | end 60 | 61 | return new, nil 62 | end 63 | 64 | function Consumer:_poll_msg() 65 | local msgs 66 | while true do 67 | msgs = self._consumer:poll_msg(100) 68 | if #msgs > 0 then 69 | for _, msg in ipairs(msgs) do 70 | self._output_ch:put(msg) 71 | end 72 | fiber.yield() 73 | else 74 | -- throttling poll 75 | fiber.sleep(0.01) 76 | end 77 | end 78 | end 79 | 80 | jit.off(Consumer._poll_msg) 81 | 82 | function Consumer:_poll_logs() 83 | local count, err 84 | while true do 85 | count, err = self._consumer:poll_logs(100) 86 | if err ~= nil then 87 | log.error("Consumer poll logs error: %s", err) 88 | -- throttling poll 89 | fiber.sleep(0.1) 90 | elseif count > 0 then 91 | fiber.yield() 92 | else 93 | -- throttling poll 94 | fiber.sleep(1) 95 | end 96 | end 97 | end 98 | 99 | jit.off(Consumer._poll_logs) 100 | 101 | function Consumer:_poll_stats() 102 | local count, err 103 | while true do 104 | count, err = self._consumer:poll_stats(100) 105 | if err ~= nil then 106 | log.error("Consumer poll stats error: %s", err) 107 | -- throttling poll 108 | fiber.sleep(0.1) 109 | elseif count > 0 then 110 | fiber.yield() 111 | else 112 | -- throttling poll 113 | fiber.sleep(1) 114 | end 115 | end 116 | end 117 | 118 | jit.off(Consumer._poll_stats) 119 | 120 | function Consumer:_poll_errors() 121 | local count, err 122 | while true do 123 | count, err = self._consumer:poll_errors(100) 124 | if err ~= nil then 125 | log.error("Consumer poll errors error: %s", err) 126 | -- throttling poll 127 | fiber.sleep(0.1) 128 | elseif count > 0 then 129 | fiber.yield() 130 | else 131 | -- throttling poll 132 | fiber.sleep(1) 133 | end 134 | end 135 | end 136 | 137 | jit.off(Consumer._poll_errors) 138 | 139 | function Consumer:_poll_rebalances() 140 | local count, err 141 | while true do 142 | count, err = self._consumer:poll_rebalances(1) 143 | if err ~= nil then 144 | log.error("Consumer poll rebalances error: %s", err) 145 | -- throttling poll 146 | fiber.sleep(0.1) 147 | elseif count > 0 then 148 | fiber.yield() 149 | else 150 | -- throttling poll 151 | fiber.sleep(0.5) 152 | end 153 | end 154 | end 155 | 156 | jit.off(Consumer._poll_rebalances) 157 | 158 | function Consumer:close() 159 | if self._consumer == nil then 160 | return false 161 | end 162 | 163 | local ok = self._consumer:close() 164 | 165 | self._poll_msg_fiber:cancel() 166 | self._output_ch:close() 167 | 168 | fiber.yield() 169 | 170 | if self._poll_logs_fiber ~= nil then 171 | self._poll_logs_fiber:cancel() 172 | end 173 | if self._poll_stats_fiber ~= nil then 174 | self._poll_stats_fiber:cancel() 175 | end 176 | if self._poll_errors_fiber ~= nil then 177 | self._poll_errors_fiber:cancel() 178 | end 179 | if self._poll_rebalances_fiber ~= nil then 180 | self._poll_rebalances_fiber:cancel() 181 | end 182 | 183 | self._consumer:destroy() 184 | 185 | self._consumer = nil 186 | 187 | return ok 188 | end 189 | 190 | local function get_timeout_from_options(options) 191 | local timeout_ms = DEFAULT_TIMEOUT_MS 192 | if type(options) == 'table' and options.timeout_ms ~= nil then 193 | timeout_ms = options.timeout_ms 194 | end 195 | return timeout_ms 196 | end 197 | 198 | function Consumer:subscribe(topics) 199 | return self._consumer:subscribe(topics) 200 | end 201 | 202 | function Consumer:unsubscribe(topics) 203 | return self._consumer:unsubscribe(topics) 204 | end 205 | 206 | function Consumer:output() 207 | return self._output_ch 208 | end 209 | 210 | function Consumer:store_offset(message) 211 | return self._consumer:store_offset(message) 212 | end 213 | 214 | function Consumer:pause() 215 | return self._consumer:pause() 216 | end 217 | 218 | function Consumer:resume() 219 | return self._consumer:resume() 220 | end 221 | 222 | function Consumer:rebalance_protocol() 223 | return self._consumer:rebalance_protocol() 224 | end 225 | 226 | function Consumer:seek_partitions(topic_partitions_list, options) 227 | local timeout_ms = get_timeout_from_options(options) 228 | return self._consumer:seek_partitions(topic_partitions_list, timeout_ms) 229 | end 230 | 231 | function Consumer:dump_conf() 232 | if self._consumer == nil then 233 | return 234 | end 235 | return self._consumer:dump_conf() 236 | end 237 | 238 | function Consumer:metadata(options) 239 | if self._consumer == nil then 240 | return 241 | end 242 | 243 | local timeout_ms = get_timeout_from_options(options) 244 | 245 | return self._consumer:metadata(timeout_ms) 246 | end 247 | 248 | function Consumer:list_groups(options) 249 | if self._consumer == nil then 250 | return 251 | end 252 | 253 | local timeout_ms = get_timeout_from_options(options) 254 | 255 | local group 256 | if options ~= nil and options.group ~= nil then 257 | group = options.group 258 | end 259 | 260 | return self._consumer:list_groups(group, timeout_ms) 261 | end 262 | 263 | local Producer = {} 264 | 265 | Producer.__index = Producer 266 | 267 | function Producer.create(config) 268 | if config == nil then 269 | return nil, "config must not be nil" 270 | end 271 | 272 | local producer, err = tnt_kafka.create_producer(config) 273 | if err ~= nil then 274 | return nil, err 275 | end 276 | 277 | local new = { 278 | config = config, 279 | _counter = 0, 280 | _delivery_map = {}, 281 | _producer = producer, 282 | } 283 | setmetatable(new, Producer) 284 | 285 | new._msg_delivery_poll_fiber = fiber.create(function() 286 | new:_msg_delivery_poll() 287 | end) 288 | 289 | if config.log_callback ~= nil then 290 | new._poll_logs_fiber = fiber.create(function() 291 | new:_poll_logs() 292 | end) 293 | end 294 | 295 | if config.stats_callback ~= nil then 296 | new._poll_stats_fiber = fiber.create(function() 297 | new:_poll_stats() 298 | end) 299 | end 300 | 301 | if config.error_callback ~= nil then 302 | new._poll_errors_fiber = fiber.create(function() 303 | new:_poll_errors() 304 | end) 305 | end 306 | 307 | return new, nil 308 | end 309 | 310 | function Producer:_msg_delivery_poll() 311 | while true do 312 | local count, err 313 | while true do 314 | count, err = self._producer:msg_delivery_poll(100) 315 | if err ~= nil then 316 | log.error(err) 317 | -- throttling poll 318 | fiber.sleep(0.01) 319 | elseif count > 0 then 320 | fiber.yield() 321 | else 322 | -- throttling poll 323 | fiber.sleep(0.01) 324 | end 325 | end 326 | end 327 | end 328 | 329 | jit.off(Producer._msg_delivery_poll) 330 | 331 | function Producer:_poll_logs() 332 | local count, err 333 | while true do 334 | count, err = self._producer:poll_logs(100) 335 | if err ~= nil then 336 | log.error("Producer poll logs error: %s", err) 337 | -- throttling poll 338 | fiber.sleep(0.1) 339 | elseif count > 0 then 340 | fiber.yield() 341 | else 342 | -- throttling poll 343 | fiber.sleep(1) 344 | end 345 | end 346 | end 347 | 348 | jit.off(Producer._poll_logs) 349 | 350 | function Producer:_poll_stats() 351 | local count, err 352 | while true do 353 | count, err = self._producer:poll_stats(100) 354 | if err ~= nil then 355 | log.error("Producer poll stats error: %s", err) 356 | -- throttling poll 357 | fiber.sleep(0.1) 358 | elseif count > 0 then 359 | fiber.yield() 360 | else 361 | -- throttling poll 362 | fiber.sleep(1) 363 | end 364 | end 365 | end 366 | 367 | jit.off(Producer._poll_stats) 368 | 369 | function Producer:_poll_errors() 370 | local count, err 371 | while true do 372 | count, err = self._producer:poll_errors(100) 373 | if err ~= nil then 374 | log.error("Producer poll errors error: %s", err) 375 | -- throttling poll 376 | fiber.sleep(0.1) 377 | elseif count > 0 then 378 | fiber.yield() 379 | else 380 | -- throttling poll 381 | fiber.sleep(1) 382 | end 383 | end 384 | end 385 | 386 | jit.off(Producer._poll_errors) 387 | 388 | function Producer:produce_async(msg) 389 | local err = self._producer:produce(msg) 390 | return err 391 | end 392 | 393 | local function dr_callback_factory(delivery_chan) 394 | return function(err) 395 | delivery_chan:put(err) 396 | end 397 | end 398 | 399 | function Producer:produce(msg) 400 | local delivery_chan = fiber.channel(1) 401 | 402 | msg.dr_callback = dr_callback_factory(delivery_chan) 403 | 404 | local err = self._producer:produce(msg) 405 | if err == nil then 406 | err = delivery_chan:get() 407 | end 408 | 409 | return err 410 | end 411 | 412 | function Producer:dump_conf() 413 | if self._producer == nil then 414 | return 415 | end 416 | return self._producer:dump_conf() 417 | end 418 | 419 | function Producer:metadata(options) 420 | if self._producer == nil then 421 | return 422 | end 423 | 424 | local timeout_ms = get_timeout_from_options(options) 425 | 426 | local topic 427 | if options ~= nil and options.topic ~= nil then 428 | topic = options.topic 429 | end 430 | 431 | return self._producer:metadata(topic, timeout_ms) 432 | end 433 | 434 | function Producer:list_groups(options) 435 | if self._producer == nil then 436 | return 437 | end 438 | 439 | local timeout_ms = get_timeout_from_options(options) 440 | 441 | local group 442 | if options ~= nil and options.group ~= nil then 443 | group = options.group 444 | end 445 | 446 | return self._producer:list_groups(group, timeout_ms) 447 | end 448 | 449 | function Producer:close() 450 | if self._producer == nil then 451 | return false 452 | end 453 | 454 | local ok = self._producer:close() 455 | 456 | self._msg_delivery_poll_fiber:cancel() 457 | if self._poll_logs_fiber ~= nil then 458 | self._poll_logs_fiber:cancel() 459 | end 460 | if self._poll_stats_fiber ~= nil then 461 | self._poll_stats_fiber:cancel() 462 | end 463 | if self._poll_errors_fiber ~= nil then 464 | self._poll_errors_fiber:cancel() 465 | end 466 | 467 | self._producer:destroy() 468 | 469 | self._producer = nil 470 | 471 | return ok 472 | end 473 | 474 | return { 475 | Consumer = Consumer, 476 | Producer = Producer, 477 | _LIBRDKAFKA = tnt_kafka.librdkafka_version(), 478 | _VERSION = require('kafka.version'), 479 | } 480 | -------------------------------------------------------------------------------- /kafka/producer.c: -------------------------------------------------------------------------------- 1 | #include "producer.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | //////////////////////////////////////////////////////////////////////////////////////////////////// 16 | 17 | /** 18 | * Producer poll thread 19 | */ 20 | 21 | typedef struct producer_poller_t { 22 | rd_kafka_t *rd_producer; 23 | pthread_t thread; 24 | pthread_attr_t attr; 25 | int should_stop; 26 | pthread_mutex_t lock; 27 | } producer_poller_t; 28 | 29 | typedef struct producer_topics_t { 30 | rd_kafka_topic_t **elements; 31 | int32_t count; 32 | int32_t capacity; 33 | } producer_topics_t; 34 | 35 | typedef struct { 36 | rd_kafka_t *rd_producer; 37 | producer_topics_t *topics; 38 | event_queues_t *event_queues; 39 | producer_poller_t *poller; 40 | } producer_t; 41 | 42 | static void * 43 | producer_poll_loop(void *arg) { 44 | set_thread_name("kafka_producer"); 45 | 46 | producer_poller_t *poller = arg; 47 | int count = 0; 48 | int should_stop = 0; 49 | 50 | while (true) { 51 | { 52 | pthread_mutex_lock(&poller->lock); 53 | 54 | should_stop = poller->should_stop; 55 | 56 | pthread_mutex_unlock(&poller->lock); 57 | 58 | if (should_stop) { 59 | break; 60 | } 61 | } 62 | 63 | { 64 | count = rd_kafka_poll(poller->rd_producer, 1000); 65 | if (count == 0) { 66 | // throttling calls with 100ms sleep 67 | usleep(100000); 68 | } 69 | } 70 | } 71 | 72 | pthread_exit(NULL); 73 | } 74 | 75 | static producer_poller_t * 76 | new_producer_poller(rd_kafka_t *rd_producer) { 77 | producer_poller_t *poller = xmalloc(sizeof(producer_poller_t)); 78 | poller->rd_producer = rd_producer; 79 | poller->should_stop = 0; 80 | 81 | pthread_mutex_init(&poller->lock, NULL); 82 | pthread_attr_init(&poller->attr); 83 | pthread_attr_setdetachstate(&poller->attr, PTHREAD_CREATE_JOINABLE); 84 | int rc = pthread_create(&poller->thread, &poller->attr, producer_poll_loop, (void *)poller); 85 | if (rc < 0) { 86 | free(poller); 87 | return NULL; 88 | } 89 | 90 | return poller; 91 | } 92 | 93 | static ssize_t 94 | stop_poller(va_list args) { 95 | producer_poller_t *poller = va_arg(args, producer_poller_t *); 96 | pthread_mutex_lock(&poller->lock); 97 | 98 | poller->should_stop = 1; 99 | 100 | pthread_mutex_unlock(&poller->lock); 101 | 102 | pthread_join(poller->thread, NULL); 103 | 104 | return 0; 105 | } 106 | 107 | static void 108 | destroy_producer_poller(producer_poller_t *poller) { 109 | // stopping polling thread 110 | coio_call(stop_poller, poller); 111 | 112 | pthread_attr_destroy(&poller->attr); 113 | pthread_mutex_destroy(&poller->lock); 114 | 115 | free(poller); 116 | } 117 | 118 | /** 119 | * Producer 120 | */ 121 | 122 | static producer_topics_t * 123 | new_producer_topics(int32_t capacity) { 124 | rd_kafka_topic_t **elements; 125 | elements = xmalloc(sizeof(rd_kafka_topic_t *) * capacity); 126 | producer_topics_t *topics; 127 | topics = xmalloc(sizeof(producer_topics_t)); 128 | topics->capacity = capacity; 129 | topics->count = 0; 130 | topics->elements = elements; 131 | 132 | return topics; 133 | } 134 | 135 | static void 136 | add_producer_topics(producer_topics_t *topics, rd_kafka_topic_t *element) { 137 | if (topics->count >= topics->capacity) { 138 | rd_kafka_topic_t **new_elements = xrealloc(topics->elements, sizeof(rd_kafka_topic_t *) * topics->capacity * 2); 139 | topics->elements = new_elements; 140 | topics->capacity *= 2; 141 | } 142 | topics->elements[topics->count++] = element; 143 | } 144 | 145 | static rd_kafka_topic_t * 146 | find_producer_topic_by_name(producer_topics_t *topics, const char *name) { 147 | rd_kafka_topic_t *topic; 148 | for (int i = 0; i < topics->count; i++) { 149 | topic = topics->elements[i]; 150 | if (strcmp(rd_kafka_topic_name(topic), name) == 0) { 151 | return topic; 152 | } 153 | } 154 | return NULL; 155 | } 156 | 157 | static void 158 | destroy_producer_topics(producer_topics_t *topics) { 159 | rd_kafka_topic_t **topic_p; 160 | rd_kafka_topic_t **end = topics->elements + topics->count; 161 | for (topic_p = topics->elements; topic_p < end; topic_p++) { 162 | rd_kafka_topic_destroy(*topic_p); 163 | } 164 | 165 | free(topics->elements); 166 | free(topics); 167 | } 168 | 169 | static inline producer_t * 170 | lua_check_producer(struct lua_State *L, int index) { 171 | producer_t **producer_p = (producer_t **)luaL_checkudata(L, index, producer_label); 172 | if (producer_p == NULL || *producer_p == NULL) 173 | luaL_error(L, "Kafka producer fatal error: failed to retrieve producer from lua stack!"); 174 | return *producer_p; 175 | } 176 | 177 | int 178 | lua_producer_tostring(struct lua_State *L) { 179 | const producer_t *producer = lua_check_producer(L, 1); 180 | lua_pushfstring(L, "Kafka Producer: %p", producer); 181 | return 1; 182 | } 183 | 184 | int 185 | lua_producer_msg_delivery_poll(struct lua_State *L) { 186 | if (lua_gettop(L) != 2) 187 | luaL_error(L, "Usage: count, err = producer:msg_delivery_poll(events_limit)"); 188 | 189 | producer_t *producer = lua_check_producer(L, 1); 190 | 191 | int events_limit = lua_tonumber(L, 2); 192 | int callbacks_count = 0; 193 | char *err_str = NULL; 194 | dr_msg_t *dr_msg = NULL; 195 | 196 | pthread_mutex_lock(&producer->event_queues->delivery_queue->lock); 197 | 198 | while (events_limit > callbacks_count) { 199 | dr_msg = queue_lockfree_pop(producer->event_queues->delivery_queue); 200 | if (dr_msg == NULL) 201 | break; 202 | callbacks_count += 1; 203 | lua_rawgeti(L, LUA_REGISTRYINDEX, dr_msg->dr_callback); 204 | if (dr_msg->err != RD_KAFKA_RESP_ERR_NO_ERROR) { 205 | lua_pushstring(L, rd_kafka_err2str(dr_msg->err)); 206 | } else { 207 | lua_pushnil(L); 208 | } 209 | /* do the call (1 arguments, 0 result) */ 210 | if (lua_pcall(L, 1, 0, 0) != 0) { 211 | err_str = (char *)lua_tostring(L, -1); 212 | } 213 | luaL_unref(L, LUA_REGISTRYINDEX, dr_msg->dr_callback); 214 | destroy_dr_msg(dr_msg); 215 | if (err_str != NULL) { 216 | break; 217 | } 218 | } 219 | 220 | pthread_mutex_unlock(&producer->event_queues->delivery_queue->lock); 221 | 222 | lua_pushnumber(L, (double)callbacks_count); 223 | if (err_str != NULL) { 224 | lua_pushstring(L, err_str); 225 | } else { 226 | lua_pushnil(L); 227 | } 228 | return 2; 229 | } 230 | 231 | LUA_RDKAFKA_POLL_FUNC(producer, poll_logs, LOG_QUEUE, destroy_log_msg, push_log_cb_args) 232 | LUA_RDKAFKA_POLL_FUNC(producer, poll_stats, STATS_QUEUE, free, push_stats_cb_args) 233 | LUA_RDKAFKA_POLL_FUNC(producer, poll_errors, ERROR_QUEUE, destroy_error_msg, push_errors_cb_args) 234 | 235 | int 236 | lua_producer_produce(struct lua_State *L) { 237 | if (lua_gettop(L) != 2 || !lua_istable(L, 2)) 238 | luaL_error(L, "Usage: err = producer:produce(msg)"); 239 | 240 | lua_pushliteral(L, "topic"); 241 | lua_gettable(L, -2); 242 | const char *topic = lua_tostring(L, -1); 243 | lua_pop(L, 1); 244 | if (topic == NULL) { 245 | lua_pushliteral(L, "producer message must contains non nil 'topic' key"); 246 | return 1; 247 | } 248 | 249 | lua_pushliteral(L, "key"); 250 | lua_gettable(L, -2); 251 | size_t key_len; 252 | // rd_kafka will copy key so no need to worry about this cast 253 | char *key = (char *)lua_tolstring(L, -1, &key_len); 254 | 255 | lua_pop(L, 1); 256 | 257 | lua_pushliteral(L, "value"); 258 | lua_gettable(L, -2); 259 | size_t value_len; 260 | // rd_kafka will copy value so no need to worry about this cast 261 | char *value = (char *)lua_tolstring(L, -1, &value_len); 262 | 263 | lua_pop(L, 1); 264 | 265 | if (key == NULL && value == NULL) { 266 | lua_pushliteral(L, "producer message must contains non nil key or value"); 267 | return 1; 268 | } 269 | 270 | rd_kafka_headers_t *hdrs = NULL; 271 | lua_pushliteral(L, "headers"); 272 | lua_gettable(L, -2); 273 | if (lua_istable(L, -1)) { 274 | hdrs = rd_kafka_headers_new(8); 275 | if (hdrs == NULL) { 276 | lua_pushliteral(L, "failed to allocate kafka headers"); 277 | return 1; 278 | } 279 | 280 | lua_pushnil(L); 281 | while (lua_next(L, -2) != 0) { 282 | size_t hdr_value_len = 0; 283 | const char *hdr_value = lua_tolstring(L, -1, &hdr_value_len); 284 | size_t hdr_key_len = 0; 285 | const char *hdr_key = lua_tolstring(L, -2, &hdr_key_len); 286 | 287 | rd_kafka_resp_err_t err = rd_kafka_header_add( 288 | hdrs, hdr_key, hdr_key_len, hdr_value, hdr_value_len); 289 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 290 | lua_pushliteral(L, "failed to add kafka headers"); 291 | goto error; 292 | } 293 | 294 | lua_pop(L, 1); 295 | } 296 | } 297 | 298 | lua_pop(L, 1); 299 | 300 | // create delivery callback queue if got msg id 301 | dr_msg_t *dr_msg = NULL; 302 | lua_pushliteral(L, "dr_callback"); 303 | lua_gettable(L, -2); 304 | if (lua_isfunction(L, -1)) { 305 | dr_msg = new_dr_msg(luaL_ref(L, LUA_REGISTRYINDEX), RD_KAFKA_RESP_ERR_NO_ERROR); 306 | if (dr_msg == NULL) { 307 | lua_pushliteral(L, "failed to create callback message"); 308 | goto error; 309 | } 310 | } else { 311 | lua_pop(L, 1); 312 | } 313 | 314 | // pop msg 315 | lua_pop(L, 1); 316 | 317 | producer_t *producer = lua_check_producer(L, 1); 318 | rd_kafka_topic_t *rd_topic = find_producer_topic_by_name(producer->topics, topic); 319 | if (rd_topic == NULL) { 320 | rd_topic = rd_kafka_topic_new(producer->rd_producer, topic, NULL); 321 | if (rd_topic == NULL) { 322 | lua_pushstring(L, rd_kafka_err2str(rd_kafka_last_error())); 323 | goto error; 324 | } 325 | add_producer_topics(producer->topics, rd_topic); 326 | } 327 | 328 | rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; 329 | if (hdrs == NULL) { 330 | int rc = rd_kafka_produce(rd_topic, -1, RD_KAFKA_MSG_F_COPY, value, value_len, key, key_len, dr_msg); 331 | if (rc != 0) 332 | err = rd_kafka_last_error(); 333 | } else { 334 | err = rd_kafka_producev( 335 | producer->rd_producer, 336 | RD_KAFKA_V_RKT(rd_topic), 337 | RD_KAFKA_V_PARTITION(-1), 338 | RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), 339 | RD_KAFKA_V_VALUE(value, value_len), 340 | RD_KAFKA_V_KEY(key, key_len), 341 | RD_KAFKA_V_HEADERS(hdrs), 342 | RD_KAFKA_V_OPAQUE(dr_msg), 343 | RD_KAFKA_V_END); 344 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) 345 | rd_kafka_headers_destroy(hdrs); 346 | } 347 | 348 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 349 | lua_pushstring(L, rd_kafka_err2str(err)); 350 | return 1; 351 | } 352 | return 0; 353 | 354 | error: 355 | if (hdrs != NULL) 356 | rd_kafka_headers_destroy(hdrs); 357 | return 1; 358 | } 359 | 360 | static ssize_t 361 | producer_flush(va_list args) { 362 | rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; 363 | rd_kafka_t *rd_producer = va_arg(args, rd_kafka_t *); 364 | while (true) { 365 | err = rd_kafka_flush(rd_producer, 1000); 366 | if (err != RD_KAFKA_RESP_ERR__TIMED_OUT) { 367 | break; 368 | } 369 | } 370 | return 0; 371 | } 372 | 373 | static ssize_t 374 | wait_producer_destroy(va_list args) { 375 | rd_kafka_t *rd_kafka = va_arg(args, rd_kafka_t *); 376 | rd_kafka_destroy(rd_kafka); 377 | return 0; 378 | } 379 | 380 | static void 381 | destroy_producer(struct lua_State *L, producer_t *producer) { 382 | if (producer->topics != NULL) { 383 | destroy_producer_topics(producer->topics); 384 | producer->topics = NULL; 385 | } 386 | 387 | /* 388 | * Here we close producer and only then destroys other stuff. 389 | * Otherwise raise condition is possible when e.g. 390 | * event queue is destroyed but producer still receives logs, errors, etc. 391 | * Only topics should be destroyed. 392 | */ 393 | if (producer->rd_producer != NULL) { 394 | /* Destroy handle */ 395 | coio_call(wait_producer_destroy, producer->rd_producer); 396 | producer->rd_producer = NULL; 397 | } 398 | 399 | if (producer->poller != NULL) { 400 | destroy_producer_poller(producer->poller); 401 | producer->poller = NULL; 402 | } 403 | 404 | if (producer->event_queues != NULL) { 405 | destroy_event_queues(L, producer->event_queues); 406 | producer->event_queues = NULL; 407 | } 408 | 409 | free(producer); 410 | } 411 | 412 | int 413 | lua_producer_close(struct lua_State *L) { 414 | producer_t **producer_p = (producer_t **)luaL_checkudata(L, 1, producer_label); 415 | if (producer_p == NULL || *producer_p == NULL) { 416 | lua_pushboolean(L, 0); 417 | return 1; 418 | } 419 | 420 | if ((*producer_p)->rd_producer != NULL) { 421 | coio_call(producer_flush, (*producer_p)->rd_producer); 422 | } 423 | 424 | if ((*producer_p)->poller != NULL) { 425 | destroy_producer_poller((*producer_p)->poller); 426 | (*producer_p)->poller = NULL; 427 | } 428 | 429 | lua_pushboolean(L, 1); 430 | return 1; 431 | } 432 | 433 | int 434 | lua_producer_dump_conf(struct lua_State *L) { 435 | producer_t **producer_p = (producer_t **)luaL_checkudata(L, 1, producer_label); 436 | if (producer_p == NULL || *producer_p == NULL) 437 | return 0; 438 | 439 | if ((*producer_p)->rd_producer != NULL) 440 | return lua_librdkafka_dump_conf(L, (*producer_p)->rd_producer); 441 | return 0; 442 | } 443 | 444 | int 445 | lua_producer_destroy(struct lua_State *L) { 446 | producer_t **producer_p = (producer_t **)luaL_checkudata(L, 1, producer_label); 447 | if (producer_p && *producer_p) { 448 | destroy_producer(L, *producer_p); 449 | } 450 | if (producer_p) 451 | *producer_p = NULL; 452 | return 0; 453 | } 454 | 455 | int 456 | lua_create_producer(struct lua_State *L) { 457 | if (lua_gettop(L) != 1 || !lua_istable(L, 1)) 458 | luaL_error(L, "Usage: producer, err = create_producer(conf)"); 459 | 460 | lua_pushstring(L, "brokers"); 461 | lua_gettable(L, -2); 462 | const char *brokers = lua_tostring(L, -1); 463 | lua_pop(L, 1); 464 | if (brokers == NULL) { 465 | lua_pushnil(L); 466 | lua_pushliteral(L, "producer config table must have non nil key 'brokers' which contains string"); 467 | return 2; 468 | } 469 | 470 | char errstr[512]; 471 | 472 | rd_kafka_topic_conf_t *topic_conf = rd_kafka_topic_conf_new(); 473 | lua_pushstring(L, "default_topic_options"); 474 | lua_gettable(L, -2); 475 | if (lua_istable(L, -1)) { 476 | lua_pushnil(L); 477 | // stack now contains: -1 => nil; -2 => table 478 | while (lua_next(L, -2)) { 479 | // stack now contains: -1 => value; -2 => key; -3 => table 480 | if (!(lua_isstring(L, -1)) || !(lua_isstring(L, -2))) { 481 | lua_pushnil(L); 482 | lua_pushliteral(L, "producer config default topic options must contains only string keys and string values"); 483 | goto topic_error; 484 | } 485 | 486 | const char *value = lua_tostring(L, -1); 487 | const char *key = lua_tostring(L, -2); 488 | if (rd_kafka_topic_conf_set(topic_conf, key, value, errstr, sizeof(errstr))) { 489 | lua_pushnil(L); 490 | lua_pushstring(L, errstr); 491 | goto topic_error; 492 | } 493 | 494 | // pop value, leaving original key 495 | lua_pop(L, 1); 496 | // stack now contains: -1 => key; -2 => table 497 | } 498 | // stack now contains: -1 => table 499 | } 500 | lua_pop(L, 1); 501 | 502 | rd_kafka_conf_t *rd_config = rd_kafka_conf_new(); 503 | rd_kafka_conf_set_default_topic_conf(rd_config, topic_conf); 504 | 505 | event_queues_t *event_queues = new_event_queues(); 506 | event_queues->delivery_queue = new_queue(); 507 | rd_kafka_conf_set_dr_msg_cb(rd_config, msg_delivery_callback); 508 | 509 | for (int i = 0; i < MAX_QUEUE; i++) { 510 | if (i == REBALANCE_QUEUE) 511 | continue; 512 | 513 | lua_pushstring(L, queue2str[i]); 514 | lua_gettable(L, -2); 515 | if (lua_isfunction(L, -1)) { 516 | event_queues->cb_refs[i] = luaL_ref(L, LUA_REGISTRYINDEX); 517 | event_queues->queues[i] = new_queue(); 518 | switch (i) { 519 | case LOG_QUEUE: 520 | rd_kafka_conf_set_log_cb(rd_config, log_callback); 521 | break; 522 | case ERROR_QUEUE: 523 | rd_kafka_conf_set_error_cb(rd_config, error_callback); 524 | break; 525 | case STATS_QUEUE: 526 | rd_kafka_conf_set_stats_cb(rd_config, stats_callback); 527 | break; 528 | } 529 | } else { 530 | lua_pop(L, 1); 531 | } 532 | } 533 | 534 | rd_kafka_conf_set_opaque(rd_config, event_queues); 535 | 536 | lua_pushstring(L, "options"); 537 | lua_gettable(L, -2); 538 | if (lua_istable(L, -1)) { 539 | lua_pushnil(L); 540 | // stack now contains: -1 => nil; -2 => table 541 | while (lua_next(L, -2)) { 542 | // stack now contains: -1 => value; -2 => key; -3 => table 543 | if (!(lua_isstring(L, -1)) || !(lua_isstring(L, -2))) { 544 | lua_pushnil(L); 545 | lua_pushliteral(L, "producer config options must contains only string keys and string values"); 546 | goto config_error; 547 | } 548 | 549 | const char *value = lua_tostring(L, -1); 550 | const char *key = lua_tostring(L, -2); 551 | if (rd_kafka_conf_set(rd_config, key, value, errstr, sizeof(errstr))) { 552 | lua_pushnil(L); 553 | lua_pushstring(L, errstr); 554 | goto config_error; 555 | } 556 | 557 | // pop value, leaving original key 558 | lua_pop(L, 1); 559 | // stack now contains: -1 => key; -2 => table 560 | } 561 | // stack now contains: -1 => table 562 | } 563 | lua_pop(L, 1); 564 | 565 | rd_kafka_t *rd_producer; 566 | if (!(rd_producer = rd_kafka_new(RD_KAFKA_PRODUCER, rd_config, errstr, sizeof(errstr)))) { 567 | lua_pushnil(L); 568 | lua_pushstring(L, errstr); 569 | goto config_error; 570 | } 571 | 572 | rd_config = NULL; // was freed by rd_kafka_new 573 | if (rd_kafka_brokers_add(rd_producer, brokers) == 0) { 574 | lua_pushnil(L); 575 | lua_pushliteral(L, "No valid brokers specified"); 576 | goto broker_error; 577 | } 578 | 579 | // creating background thread for polling consumer 580 | producer_poller_t *poller = new_producer_poller(rd_producer); 581 | 582 | producer_t *producer; 583 | producer = xmalloc(sizeof(producer_t)); 584 | producer->rd_producer = rd_producer; 585 | producer->topics = new_producer_topics(256); 586 | producer->event_queues = event_queues; 587 | producer->poller = poller; 588 | 589 | producer_t **producer_p = (producer_t **)lua_newuserdata(L, sizeof(producer)); 590 | *producer_p = producer; 591 | 592 | luaL_getmetatable(L, producer_label); 593 | lua_setmetatable(L, -2); 594 | return 1; 595 | 596 | broker_error: 597 | rd_kafka_destroy(rd_producer); 598 | config_error: 599 | if (rd_config != NULL) 600 | rd_kafka_conf_destroy(rd_config); 601 | destroy_event_queues(L, event_queues); 602 | return 2; 603 | topic_error: 604 | rd_kafka_topic_conf_destroy(topic_conf); 605 | return 2; 606 | } 607 | 608 | int 609 | lua_producer_metadata(struct lua_State *L) { 610 | producer_t **producer_p = (producer_t **)luaL_checkudata(L, 1, producer_label); 611 | if (producer_p == NULL || *producer_p == NULL) 612 | return 0; 613 | 614 | if ((*producer_p)->rd_producer != NULL) { 615 | rd_kafka_topic_t *topic = NULL; 616 | const char *topic_name = lua_tostring(L, 2); 617 | if (topic_name != NULL) { 618 | topic = find_producer_topic_by_name((*producer_p)->topics, topic_name); 619 | if (topic == NULL) { 620 | lua_pushnil(L); 621 | lua_pushfstring(L, "Topic \"%s\" is not found", topic_name); 622 | return 2; 623 | } 624 | } 625 | 626 | int timeout_ms = lua_tointeger(L, 3); 627 | return lua_librdkafka_metadata(L, (*producer_p)->rd_producer, topic, timeout_ms); 628 | } 629 | return 0; 630 | } 631 | 632 | int 633 | lua_producer_list_groups(struct lua_State *L) { 634 | producer_t **producer_p = luaL_checkudata(L, 1, producer_label); 635 | if (producer_p == NULL || *producer_p == NULL) 636 | return 0; 637 | 638 | if ((*producer_p)->rd_producer != NULL) { 639 | const char *group = lua_tostring(L, 2); 640 | int timeout_ms = lua_tointeger(L, 3); 641 | return lua_librdkafka_list_groups(L, (*producer_p)->rd_producer, group, timeout_ms); 642 | } 643 | return 0; 644 | } 645 | -------------------------------------------------------------------------------- /kafka/producer.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_PRODUCER_H 2 | #define TNT_KAFKA_PRODUCER_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | int 9 | lua_producer_tostring(struct lua_State *L); 10 | 11 | int 12 | lua_producer_msg_delivery_poll(struct lua_State *L); 13 | 14 | int 15 | lua_producer_poll_logs(struct lua_State *L); 16 | 17 | int 18 | lua_producer_poll_stats(struct lua_State *L); 19 | 20 | int 21 | lua_producer_poll_errors(struct lua_State *L); 22 | 23 | int 24 | lua_producer_produce(struct lua_State *L); 25 | 26 | int 27 | lua_producer_close(struct lua_State *L); 28 | 29 | int 30 | lua_create_producer(struct lua_State *L); 31 | 32 | int 33 | lua_producer_destroy(struct lua_State *L); 34 | 35 | int 36 | lua_producer_dump_conf(struct lua_State *L); 37 | 38 | int 39 | lua_producer_metadata(struct lua_State *L); 40 | 41 | int 42 | lua_producer_list_groups(struct lua_State *L); 43 | 44 | #endif //TNT_KAFKA_PRODUCER_H 45 | -------------------------------------------------------------------------------- /kafka/queue.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | //////////////////////////////////////////////////////////////////////////////////////////////////// 9 | /** 10 | * General thread safe queue based on licked list 11 | */ 12 | 13 | /** 14 | * Pop without locking mutex. 15 | * Caller must lock and unlock queue mutex by itself. 16 | * Use with caution! 17 | * @param queue 18 | * @return 19 | */ 20 | void * 21 | queue_lockfree_pop(queue_t *queue) { 22 | void *output = NULL; 23 | 24 | if (queue->head != NULL) { 25 | output = queue->head->value; 26 | queue_node_t *tmp = queue->head; 27 | queue->head = queue->head->next; 28 | free(tmp); 29 | if (queue->head == NULL) { 30 | queue->tail = NULL; 31 | } 32 | 33 | queue->count -= 1; 34 | } 35 | 36 | return output; 37 | } 38 | 39 | void * 40 | queue_pop(queue_t *queue) { 41 | pthread_mutex_lock(&queue->lock); 42 | 43 | void *output = queue_lockfree_pop(queue); 44 | 45 | pthread_mutex_unlock(&queue->lock); 46 | 47 | return output; 48 | } 49 | 50 | /** 51 | * Push without locking mutex. 52 | * Caller must lock and unlock queue mutex by itself. 53 | * Use with caution! 54 | * @param queue 55 | * @param value 56 | * @return 57 | */ 58 | void 59 | queue_lockfree_push(queue_t *queue, void *value) { 60 | queue_node_t *new_node; 61 | new_node = xmalloc(sizeof(queue_node_t)); 62 | new_node->value = value; 63 | new_node->next = NULL; 64 | 65 | if (queue->tail != NULL) { 66 | queue->tail->next = new_node; 67 | } 68 | 69 | queue->tail = new_node; 70 | if (queue->head == NULL) { 71 | queue->head = new_node; 72 | } 73 | 74 | queue->count += 1; 75 | } 76 | 77 | int 78 | queue_push(queue_t *queue, void *value) { 79 | if (value == NULL || queue == NULL) { 80 | return -1; 81 | } 82 | 83 | pthread_mutex_lock(&queue->lock); 84 | queue_lockfree_push(queue, value); 85 | pthread_mutex_unlock(&queue->lock); 86 | 87 | return 0; 88 | } 89 | 90 | queue_t * 91 | new_queue() { 92 | queue_t *queue = xmalloc(sizeof(queue_t)); 93 | pthread_mutex_t lock; 94 | if (pthread_mutex_init(&lock, NULL) != 0) { 95 | free(queue); 96 | return NULL; 97 | } 98 | 99 | queue->lock = lock; 100 | queue->head = NULL; 101 | queue->tail = NULL; 102 | queue->count = 0; 103 | 104 | return queue; 105 | } 106 | 107 | void 108 | destroy_queue(queue_t *queue) { 109 | if (queue == NULL) 110 | return; 111 | pthread_mutex_destroy(&queue->lock); 112 | free(queue); 113 | } 114 | -------------------------------------------------------------------------------- /kafka/queue.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_QUEUE_H 2 | #define TNT_KAFKA_QUEUE_H 3 | 4 | #include 5 | 6 | //////////////////////////////////////////////////////////////////////////////////////////////////// 7 | /** 8 | * General thread safe queue based on licked list 9 | */ 10 | 11 | typedef struct queue_node_t { 12 | void *value; 13 | struct queue_node_t *next; 14 | } queue_node_t; 15 | 16 | typedef struct { 17 | pthread_mutex_t lock; 18 | queue_node_t *head; 19 | queue_node_t *tail; 20 | int count; 21 | } queue_t; 22 | 23 | /** 24 | * Pop without locking mutex. 25 | * Caller must lock and unlock queue mutex by itself. 26 | * Use with caution! 27 | * @param queue 28 | * @return 29 | */ 30 | void * 31 | queue_lockfree_pop(queue_t *queue); 32 | 33 | void * 34 | queue_pop(queue_t *queue); 35 | 36 | /** 37 | * Push without locking mutex. 38 | * Caller must lock and unlock queue mutex by itself. 39 | * Use with caution! 40 | * @param queue 41 | * @param value 42 | * @return 43 | */ 44 | void 45 | queue_lockfree_push(queue_t *queue, void *value); 46 | 47 | int 48 | queue_push(queue_t *queue, void *value); 49 | 50 | queue_t * 51 | new_queue(); 52 | 53 | void destroy_queue(queue_t *queue); 54 | 55 | #endif // TNT_KAFKA_QUEUE_H 56 | -------------------------------------------------------------------------------- /kafka/tnt_kafka.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | //////////////////////////////////////////////////////////////////////////////////////////////////// 13 | /** 14 | * Entry point 15 | */ 16 | 17 | LUA_API int __attribute__ ((visibility("default"))) 18 | luaopen_kafka_tntkafka(lua_State *L) { 19 | static const struct luaL_Reg consumer_methods [] = { 20 | {"subscribe", lua_consumer_subscribe}, 21 | {"unsubscribe", lua_consumer_unsubscribe}, 22 | {"poll_msg", lua_consumer_poll_msg}, 23 | {"poll_logs", lua_consumer_poll_logs}, 24 | {"poll_stats", lua_consumer_poll_stats}, 25 | {"poll_errors", lua_consumer_poll_errors}, 26 | {"poll_rebalances", lua_consumer_poll_rebalances}, 27 | {"store_offset", lua_consumer_store_offset}, 28 | {"seek_partitions", lua_consumer_seek_partitions}, 29 | {"dump_conf", lua_consumer_dump_conf}, 30 | {"metadata", lua_consumer_metadata}, 31 | {"list_groups", lua_consumer_list_groups}, 32 | {"pause", lua_consumer_pause}, 33 | {"resume", lua_consumer_resume}, 34 | {"close", lua_consumer_close}, 35 | {"destroy", lua_consumer_destroy}, 36 | {"rebalance_protocol", lua_consumer_rebalance_protocol}, 37 | {"__tostring", lua_consumer_tostring}, 38 | {NULL, NULL} 39 | }; 40 | 41 | luaL_newmetatable(L, consumer_label); 42 | lua_pushvalue(L, -1); 43 | luaL_register(L, NULL, consumer_methods); 44 | lua_setfield(L, -2, "__index"); 45 | lua_pushstring(L, consumer_label); 46 | lua_setfield(L, -2, "__metatable"); 47 | lua_pop(L, 1); 48 | 49 | static const struct luaL_Reg consumer_msg_methods [] = { 50 | {"topic", lua_consumer_msg_topic}, 51 | {"partition", lua_consumer_msg_partition}, 52 | {"headers", lua_consumer_msg_headers}, 53 | {"offset", lua_consumer_msg_offset}, 54 | {"key", lua_consumer_msg_key}, 55 | {"value", lua_consumer_msg_value}, 56 | {"__tostring", lua_consumer_msg_tostring}, 57 | {"__gc", lua_consumer_msg_gc}, 58 | {NULL, NULL} 59 | }; 60 | 61 | luaL_newmetatable(L, consumer_msg_label); 62 | lua_pushvalue(L, -1); 63 | luaL_register(L, NULL, consumer_msg_methods); 64 | lua_setfield(L, -2, "__index"); 65 | lua_pushstring(L, consumer_msg_label); 66 | lua_setfield(L, -2, "__metatable"); 67 | lua_pop(L, 1); 68 | 69 | static const struct luaL_Reg producer_methods [] = { 70 | {"produce", lua_producer_produce}, 71 | {"msg_delivery_poll", lua_producer_msg_delivery_poll}, 72 | {"poll_logs", lua_producer_poll_logs}, 73 | {"poll_stats", lua_producer_poll_stats}, 74 | {"poll_errors", lua_producer_poll_errors}, 75 | {"dump_conf", lua_producer_dump_conf}, 76 | {"metadata", lua_producer_metadata}, 77 | {"list_groups", lua_producer_list_groups}, 78 | {"close", lua_producer_close}, 79 | {"destroy", lua_producer_destroy}, 80 | {"__tostring", lua_producer_tostring}, 81 | {NULL, NULL} 82 | }; 83 | 84 | luaL_newmetatable(L, producer_label); 85 | lua_pushvalue(L, -1); 86 | luaL_register(L, NULL, producer_methods); 87 | lua_setfield(L, -2, "__index"); 88 | lua_pushstring(L, producer_label); 89 | lua_setfield(L, -2, "__metatable"); 90 | lua_pop(L, 1); 91 | 92 | lua_newtable(L); 93 | static const struct luaL_Reg meta [] = { 94 | {"create_consumer", lua_create_consumer}, 95 | {"create_producer", lua_create_producer}, 96 | {"librdkafka_version", lua_librdkafka_version}, 97 | {NULL, NULL} 98 | }; 99 | luaL_register(L, NULL, meta); 100 | return 1; 101 | } 102 | -------------------------------------------------------------------------------- /kafka/tnt_kafka.h: -------------------------------------------------------------------------------- 1 | #ifndef TNT_KAFKA_TNT_KAFKA_H 2 | #define TNT_KAFKA_TNT_KAFKA_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | //////////////////////////////////////////////////////////////////////////////////////////////////// 9 | /** 10 | * Entry point 11 | */ 12 | 13 | LUA_API int luaopen_kafka_tntkafka(lua_State *L); 14 | 15 | #endif //TNT_KAFKA_TNT_KAFKA_H 16 | -------------------------------------------------------------------------------- /kafka/version.lua: -------------------------------------------------------------------------------- 1 | -- Сontains the module version. 2 | -- Requires manual update in case of release commit. 3 | 4 | return '1.6.10' 5 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-36.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdhttp.c b/src/rdhttp.c 2 | index dca6c6f8..5a290c5b 100644 3 | --- a/src/rdhttp.c 4 | +++ b/src/rdhttp.c 5 | @@ -345,6 +345,7 @@ rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk, 6 | 7 | /* Retry */ 8 | rd_http_error_destroy(herr); 9 | + herr = 0; 10 | rd_usleep(retry_ms * 1000 * (i + 1), &rk->rk_terminate); 11 | } 12 | 13 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-47.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_assignment.c b/src/rdkafka_assignment.c 2 | index 6d1f0191..ee4cea61 100644 3 | --- a/src/rdkafka_assignment.c 4 | +++ b/src/rdkafka_assignment.c 5 | @@ -315,21 +315,22 @@ static void rd_kafka_assignment_handle_OffsetFetch(rd_kafka_t *rk, 6 | rd_kafka_dbg( 7 | rk, CGRP, "OFFSET", 8 | "Offset fetch error for %d partition(s): %s", 9 | - offsets->cnt, rd_kafka_err2str(err)); 10 | + offsets ? offsets->cnt : -1, rd_kafka_err2str(err)); 11 | rd_kafka_consumer_err( 12 | rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, 13 | NULL, NULL, RD_KAFKA_OFFSET_INVALID, 14 | "Failed to fetch committed offsets for " 15 | "%d partition(s) in group \"%s\": %s", 16 | - offsets->cnt, rk->rk_group_id->str, 17 | + offsets ? offsets->cnt : -1, rk->rk_group_id->str, 18 | rd_kafka_err2str(err)); 19 | } 20 | } 21 | 22 | /* Apply the fetched offsets to the assignment */ 23 | - rd_kafka_assignment_apply_offsets(rk, offsets, err); 24 | - 25 | - rd_kafka_topic_partition_list_destroy(offsets); 26 | + if (offsets) { 27 | + rd_kafka_assignment_apply_offsets(rk, offsets, err); 28 | + rd_kafka_topic_partition_list_destroy(offsets); 29 | + } 30 | } 31 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-52.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c 2 | index a8a1204b..65fbcbfd 100644 3 | --- a/src/rdkafka_conf.c 4 | +++ b/src/rdkafka_conf.c 5 | @@ -3463,7 +3463,7 @@ rd_kafka_resp_err_t rd_kafka_confval_set_type(rd_kafka_confval_t *confval, 6 | return RD_KAFKA_RESP_ERR__INVALID_ARG; 7 | } 8 | 9 | - vlen = strlen(v); 10 | + vlen = v ? strlen(v) : 0; 11 | if ((confval->u.STR.minlen || confval->u.STR.maxlen) && 12 | (vlen < confval->u.STR.minlen || 13 | vlen > confval->u.STR.maxlen)) { 14 | @@ -3479,7 +3479,7 @@ rd_kafka_resp_err_t rd_kafka_confval_set_type(rd_kafka_confval_t *confval, 15 | if (confval->u.STR.v) 16 | rd_free(confval->u.STR.v); 17 | 18 | - confval->u.STR.v = rd_strdup(v); 19 | + confval->u.STR.v = v ? rd_strdup(v) : rd_strdup(""); 20 | } break; 21 | 22 | case RD_KAFKA_CONFVAL_PTR: 23 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-55.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_partition.c b/src/rdkafka_partition.c 2 | index 2d889e09..4e26a40c 100644 3 | --- a/src/rdkafka_partition.c 4 | +++ b/src/rdkafka_partition.c 5 | @@ -1162,7 +1162,7 @@ void rd_kafka_toppar_broker_delegate(rd_kafka_toppar_t *rktp, 6 | if (rktp->rktp_broker || rkb) 7 | rd_kafka_toppar_broker_migrate(rktp, rktp->rktp_broker, rkb); 8 | 9 | - if (internal_fallback) 10 | + if (internal_fallback && rkb) 11 | rd_kafka_broker_destroy(rkb); 12 | } 13 | 14 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-70.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/lz4.c b/src/lz4.c 2 | index 335e2a03..6299373a 100644 3 | --- a/src/lz4.c 4 | +++ b/src/lz4.c 5 | @@ -1009,7 +1009,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated( 6 | } /* too far */ 7 | assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */ 8 | 9 | - if (LZ4_read32(match) == LZ4_read32(ip)) { 10 | + if (match != NULL && LZ4_read32(match) == LZ4_read32(ip)) { 11 | if (maybe_extMem) offset = current - matchIndex; 12 | break; /* match found */ 13 | } 14 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-71.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_topic.c b/src/rdkafka_topic.c 2 | index 7f79a2ff..eddd2b5a 100644 3 | --- a/src/rdkafka_topic.c 4 | +++ b/src/rdkafka_topic.c 5 | @@ -854,6 +854,7 @@ static int rd_kafka_topic_partition_cnt_update(rd_kafka_topic_t *rkt, 6 | /* Remove from desp list since the 7 | * partition is now known. */ 8 | rd_kafka_toppar_desired_unlink(rktp); 9 | + rktp = NULL; 10 | } else { 11 | rktp = rd_kafka_toppar_new(rkt, i); 12 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-72.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_subscription.c b/src/rdkafka_subscription.c 2 | index 08058935..2974d0dc 100644 3 | --- a/src/rdkafka_subscription.c 4 | +++ b/src/rdkafka_subscription.c 5 | @@ -196,8 +196,8 @@ const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk) { 6 | } 7 | 8 | result = rko->rko_u.rebalance_protocol.str; 9 | - 10 | - rd_kafka_op_destroy(rko); 11 | + rd_kafka_op_t *rko_ = rko; 12 | + rd_kafka_op_destroy(rko_); 13 | 14 | return result; 15 | } 16 | -------------------------------------------------------------------------------- /patches/librdkafka-tarantool-security-94.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdkafka_partition.c b/src/rdkafka_partition.c 2 | index 2d889e09..cf367d3a 100644 3 | --- a/src/rdkafka_partition.c 4 | +++ b/src/rdkafka_partition.c 5 | @@ -3612,12 +3612,14 @@ reply: 6 | 7 | if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.query_tmr, 8 | RD_DO_LOCK)) 9 | - rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce, 10 | - "query timer"); 11 | + if (rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce, 12 | + "query timer")) 13 | + rko->rko_u.leaders.eonce = NULL; 14 | if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.timeout_tmr, 15 | RD_DO_LOCK)) 16 | - rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce, 17 | - "timeout timer"); 18 | + if (rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce, 19 | + "timeout timer")) 20 | + rko->rko_u.leaders.eonce = NULL; 21 | 22 | if (rko->rko_u.leaders.eonce) { 23 | rd_kafka_enq_once_disable(rko->rko_u.leaders.eonce); 24 | diff --git a/src/rdkafka_queue.h b/src/rdkafka_queue.h 25 | index 0d50f587..04dddbf9 100644 26 | --- a/src/rdkafka_queue.h 27 | +++ b/src/rdkafka_queue.h 28 | @@ -983,7 +983,8 @@ rd_kafka_enq_once_add_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) { 29 | 30 | 31 | /** 32 | - * @brief Decrement refcount for source (non-owner), such as a timer. 33 | + * @brief Decrement refcount for source (non-owner), such as a timer 34 | + * and return 1 if eonce was destroyed. 35 | * 36 | * @param srcdesc a human-readable descriptive string of the source. 37 | * May be used for future debugging. 38 | @@ -993,7 +994,7 @@ rd_kafka_enq_once_add_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) { 39 | * This API is used to undo an add_source() from the 40 | * same code. 41 | */ 42 | -static RD_INLINE RD_UNUSED void 43 | +static RD_INLINE RD_UNUSED int 44 | rd_kafka_enq_once_del_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) { 45 | int do_destroy; 46 | 47 | @@ -1006,7 +1007,10 @@ rd_kafka_enq_once_del_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) { 48 | if (do_destroy) { 49 | /* We're the last refcount holder, clean up eonce. */ 50 | rd_kafka_enq_once_destroy0(eonce); 51 | + return 1; 52 | } 53 | + 54 | + return 0; 55 | } 56 | 57 | /** 58 | -------------------------------------------------------------------------------- /tests/app.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | local box = require('box') 4 | 5 | box.cfg{ 6 | listen = 3301 7 | } 8 | 9 | box.once('init', function() 10 | box.schema.user.grant("guest", 'read,write,execute,create,drop', 'universe') 11 | end) 12 | 13 | 14 | rawset(_G, 'consumer', require('tests.consumer')) 15 | rawset(_G, 'producer', require('tests.producer')) 16 | -------------------------------------------------------------------------------- /tests/consumer.lua: -------------------------------------------------------------------------------- 1 | local box = require("box") 2 | local json = require("json") 3 | local log = require("log") 4 | local fiber = require('fiber') 5 | local tnt_kafka = require('kafka') 6 | 7 | local consumer = nil 8 | local errors = {} 9 | local logs = {} 10 | local stats = {} 11 | local rebalances = {} 12 | 13 | local function create(brokers, additional_opts) 14 | local err 15 | errors = {} 16 | logs = {} 17 | stats = {} 18 | rebalances = {} 19 | local error_callback = function(err) 20 | log.error("got error: %s", err) 21 | table.insert(errors, err) 22 | end 23 | local log_callback = function(fac, str, level) 24 | log.info("got log: %d - %s - %s", level, fac, str) 25 | table.insert(logs, string.format("got log: %d - %s - %s", level, fac, str)) 26 | end 27 | local stats_callback = function(json_stats) 28 | log.info("got stats") 29 | table.insert(stats, json_stats) 30 | end 31 | local rebalance_callback = function(msg) 32 | log.info("got rebalance msg: %s", json.encode(msg)) 33 | table.insert(rebalances, msg) 34 | end 35 | 36 | local options = { 37 | ["enable.auto.offset.store"] = "false", 38 | ["group.id"] = "test_consumer", 39 | ["auto.offset.reset"] = "earliest", 40 | ["enable.partition.eof"] = "false", 41 | ["log_level"] = "7", 42 | ["statistics.interval.ms"] = "1000", 43 | } 44 | if additional_opts ~= nil then 45 | for key, value in pairs(additional_opts) do 46 | if value == nil then 47 | options[key] = nil 48 | else 49 | options[key] = value 50 | end 51 | end 52 | end 53 | consumer, err = tnt_kafka.Consumer.create({ 54 | brokers = brokers, 55 | options = options, 56 | error_callback = error_callback, 57 | log_callback = log_callback, 58 | stats_callback = stats_callback, 59 | rebalance_callback = rebalance_callback, 60 | default_topic_options = { 61 | ["auto.offset.reset"] = "earliest", 62 | }, 63 | }) 64 | if err ~= nil then 65 | log.error("got err %s", err) 66 | box.error{code = 500, reason = err} 67 | end 68 | log.info("consumer created") 69 | end 70 | 71 | local function subscribe(topics) 72 | log.info("consumer subscribing") 73 | log.info(topics) 74 | local err = consumer:subscribe(topics) 75 | if err ~= nil then 76 | log.error("got err %s", err) 77 | box.error{code = 500, reason = err} 78 | end 79 | log.info("consumer subscribed") 80 | end 81 | 82 | local function unsubscribe(topics) 83 | log.info("consumer unsubscribing") 84 | log.info(topics) 85 | local err = consumer:unsubscribe(topics) 86 | if err ~= nil then 87 | log.error("got err %s", err) 88 | box.error{code = 500, reason = err} 89 | end 90 | log.info("consumer unsubscribed") 91 | end 92 | 93 | local function msg_totable(msg) 94 | return { 95 | value = msg:value(), 96 | key = msg:key(), 97 | topic = msg:topic(), 98 | partition = msg:partition(), 99 | offset = msg:offset(), 100 | headers = msg:headers(), 101 | } 102 | end 103 | 104 | local function append_message(t, msg) 105 | table.insert(t, msg_totable(msg)) 106 | end 107 | 108 | local function consume(timeout) 109 | log.info("consume called") 110 | 111 | local consumed = {} 112 | local f = fiber.create(function() 113 | local out = consumer:output() 114 | while true do 115 | if out:is_closed() then 116 | break 117 | end 118 | 119 | local msg = out:get() 120 | if msg ~= nil then 121 | log.info("%s", msg) 122 | log.info("got msg with topic='%s' partition='%d' offset='%d' key='%s' value='%s'", msg:topic(), msg:partition(), msg:offset(), msg:key(), msg:value()) 123 | append_message(consumed, msg) 124 | local err = consumer:store_offset(msg) 125 | if err ~= nil then 126 | log.error("got error '%s' while committing msg from topic '%s'", err, msg:topic()) 127 | end 128 | else 129 | fiber.sleep(0.2) 130 | end 131 | end 132 | end) 133 | 134 | log.info("consume wait") 135 | fiber.sleep(timeout) 136 | log.info("consume ends") 137 | 138 | f:cancel() 139 | 140 | return consumed 141 | end 142 | 143 | local function get_errors() 144 | return errors 145 | end 146 | 147 | local function get_logs() 148 | return logs 149 | end 150 | 151 | local function get_stats() 152 | return stats 153 | end 154 | 155 | local function get_rebalances() 156 | return rebalances 157 | end 158 | 159 | local function dump_conf() 160 | return consumer:dump_conf() 161 | end 162 | 163 | local function metadata(timeout_ms) 164 | return consumer:metadata({timeout_ms = timeout_ms}) 165 | end 166 | 167 | local function list_groups(timeout_ms) 168 | local res, err = consumer:list_groups({timeout_ms = timeout_ms}) 169 | if err ~= nil then 170 | return nil, err 171 | end 172 | log.info("Groups: %s", json.encode(res)) 173 | -- Some fields can have binary data that won't 174 | -- be correctly processed by connector. 175 | for _, group in ipairs(res) do 176 | group['members'] = nil 177 | end 178 | return res 179 | end 180 | 181 | local function pause() 182 | return consumer:pause() 183 | end 184 | 185 | local function resume() 186 | return consumer:resume() 187 | end 188 | 189 | local function close() 190 | log.info("closing consumer") 191 | local _, err = consumer:close() 192 | if err ~= nil then 193 | log.error("got err %s", err) 194 | box.error{code = 500, reason = err} 195 | end 196 | log.info("consumer closed") 197 | end 198 | 199 | local function test_seek_partitions() 200 | log.info('Test seek') 201 | local messages = {} 202 | 203 | local out = consumer:output() 204 | 205 | for _ = 1, 5 do 206 | local msg = out:get(3) 207 | if msg == nil then 208 | error('Message is not delivered') 209 | end 210 | log.info('Get message: %s', json.encode(msg_totable(msg))) 211 | append_message(messages, msg) 212 | consumer:seek_partitions({ 213 | {msg:topic(), msg:partition(), msg:offset()} 214 | }, 1000) 215 | end 216 | 217 | return messages 218 | end 219 | 220 | local function rebalance_protocol() 221 | return consumer:rebalance_protocol() 222 | end 223 | 224 | local function test_create_errors() 225 | log.info('Create without config') 226 | local _, err = tnt_kafka.Consumer.create() 227 | assert(err == 'config must not be nil') 228 | 229 | log.info('Create with empty config') 230 | local _, err = tnt_kafka.Consumer.create({}) 231 | assert(err == 'consumer config table must have non nil key \'brokers\' which contains string') 232 | 233 | log.info('Create with empty brokers') 234 | local _, err = tnt_kafka.Consumer.create({brokers = ''}) 235 | assert(err == 'No valid brokers specified') 236 | 237 | log.info('Create with invalid default_topic_options keys') 238 | local _, err = tnt_kafka.Consumer.create({brokers = '', default_topic_options = {[{}] = 2}}) 239 | assert(err == 'consumer config default topic options must contains only string keys and string values') 240 | 241 | log.info('Create with invalid default_topic_options property') 242 | local _, err = tnt_kafka.Consumer.create({brokers = '', default_topic_options = {[2] = 2}}) 243 | assert(err == 'No such configuration property: "2"') 244 | 245 | log.info('Create with invalid options keys') 246 | local _, err = tnt_kafka.Consumer.create({brokers = '', options = {[{}] = 2}}) 247 | assert(err == 'consumer config options must contains only string keys and string values') 248 | 249 | log.info('Create with invalid options property') 250 | local _, err = tnt_kafka.Consumer.create({brokers = '', options = {[2] = 2}}) 251 | assert(err == 'No such configuration property: "2"') 252 | 253 | log.info('Create with incompatible properties') 254 | local _, err = tnt_kafka.Consumer.create({brokers = '', options = {['reconnect.backoff.max.ms'] = '2', ['reconnect.backoff.ms'] = '1000'}}) 255 | assert(err == '`reconnect.backoff.max.ms` must be >= `reconnect.backoff.ms`') 256 | end 257 | 258 | return { 259 | create = create, 260 | subscribe = subscribe, 261 | unsubscribe = unsubscribe, 262 | consume = consume, 263 | close = close, 264 | get_errors = get_errors, 265 | get_logs = get_logs, 266 | get_stats = get_stats, 267 | get_rebalances = get_rebalances, 268 | dump_conf = dump_conf, 269 | metadata = metadata, 270 | list_groups = list_groups, 271 | pause = pause, 272 | resume = resume, 273 | rebalance_protocol = rebalance_protocol, 274 | 275 | test_seek_partitions = test_seek_partitions, 276 | test_create_errors = test_create_errors, 277 | } 278 | -------------------------------------------------------------------------------- /tests/producer.lua: -------------------------------------------------------------------------------- 1 | local box = require('box') 2 | local log = require('log') 3 | local json = require('json') 4 | local tnt_kafka = require('kafka') 5 | 6 | local TOPIC_NAME = "test_producer" 7 | 8 | local producer = nil 9 | local errors = {} 10 | local logs = {} 11 | local stats = {} 12 | 13 | local function create(brokers, additional_opts) 14 | local err 15 | errors = {} 16 | logs = {} 17 | stats = {} 18 | local error_callback = function(err) 19 | log.error("got error: %s", err) 20 | table.insert(errors, err) 21 | end 22 | local log_callback = function(fac, str, level) 23 | log.info("got log: %d - %s - %s", level, fac, str) 24 | table.insert(logs, string.format("got log: %d - %s - %s", level, fac, str)) 25 | end 26 | local stats_callback = function(json_stats) 27 | log.info("got stats") 28 | table.insert(stats, json_stats) 29 | end 30 | 31 | local options = { 32 | ["statistics.interval.ms"] = "1000", 33 | } 34 | if additional_opts ~= nil then 35 | for key, value in pairs(additional_opts) do 36 | options[key] = value 37 | end 38 | end 39 | 40 | producer, err = tnt_kafka.Producer.create({ 41 | brokers = brokers, 42 | options = options, 43 | log_callback = log_callback, 44 | stats_callback = stats_callback, 45 | error_callback = error_callback, 46 | default_topic_options = { 47 | ["partitioner"] = "murmur2_random", 48 | }, 49 | }) 50 | if err ~= nil then 51 | log.error("got err %s", err) 52 | box.error{code = 500, reason = err} 53 | end 54 | end 55 | 56 | local function produce(messages) 57 | for _, message in ipairs(messages) do 58 | local err = producer:produce({ 59 | topic = TOPIC_NAME, 60 | key = message.key, 61 | value = message.value, 62 | headers = message.headers, 63 | }) 64 | if err ~= nil then 65 | log.error("got error '%s' while sending value '%s'", err, json.encode(message)) 66 | else 67 | log.error("successfully sent value '%s'", json.encode(message)) 68 | end 69 | end 70 | end 71 | 72 | local function dump_conf() 73 | return producer:dump_conf() 74 | end 75 | 76 | local function get_errors() 77 | return errors 78 | end 79 | 80 | local function get_logs() 81 | return logs 82 | end 83 | 84 | local function get_stats() 85 | return stats 86 | end 87 | 88 | local function metadata(timeout_ms, topic) 89 | return producer:metadata({timeout_ms = timeout_ms, topic = topic}) 90 | end 91 | 92 | local function list_groups(timeout_ms) 93 | local res, err = producer:list_groups({timeout_ms = timeout_ms}) 94 | if err ~= nil then 95 | return nil, err 96 | end 97 | log.info("Groups: %s", json.encode(res)) 98 | -- Some fields can have binary data that won't 99 | -- be correctly processed by connector. 100 | for _, group in ipairs(res) do 101 | group['members'] = nil 102 | end 103 | return res 104 | end 105 | 106 | local function close() 107 | local _, err = producer:close() 108 | if err ~= nil then 109 | log.error("got err %s", err) 110 | box.error{code = 500, reason = err} 111 | end 112 | end 113 | 114 | local function test_create_errors() 115 | log.info('Create without config') 116 | local _, err = tnt_kafka.Producer.create() 117 | assert(err == 'config must not be nil') 118 | 119 | log.info('Create with empty config') 120 | local _, err = tnt_kafka.Producer.create({}) 121 | assert(err == 'producer config table must have non nil key \'brokers\' which contains string') 122 | 123 | log.info('Create with empty brokers') 124 | local _, err = tnt_kafka.Producer.create({brokers = ''}) 125 | assert(err == 'No valid brokers specified') 126 | 127 | log.info('Create with invalid default_topic_options keys') 128 | local _, err = tnt_kafka.Producer.create({brokers = '', default_topic_options = {[{}] = 2}}) 129 | assert(err == 'producer config default topic options must contains only string keys and string values') 130 | 131 | log.info('Create with invalid default_topic_options property') 132 | local _, err = tnt_kafka.Producer.create({brokers = '', default_topic_options = {[2] = 2}}) 133 | assert(err == 'No such configuration property: "2"') 134 | 135 | log.info('Create with invalid options keys') 136 | local _, err = tnt_kafka.Producer.create({brokers = '', options = {[{}] = 2}}) 137 | assert(err == 'producer config options must contains only string keys and string values') 138 | 139 | log.info('Create with invalid options property') 140 | local _, err = tnt_kafka.Producer.create({brokers = '', options = {[2] = 2}}) 141 | assert(err == 'No such configuration property: "2"') 142 | 143 | log.info('Create with incompatible properties') 144 | local _, err = tnt_kafka.Producer.create({brokers = '', options = {['reconnect.backoff.max.ms'] = '2', ['reconnect.backoff.ms'] = '1000'}}) 145 | assert(err == '`reconnect.backoff.max.ms` must be >= `reconnect.backoff.ms`') 146 | end 147 | 148 | return { 149 | create = create, 150 | produce = produce, 151 | get_errors = get_errors, 152 | get_logs = get_logs, 153 | get_stats = get_stats, 154 | close = close, 155 | dump_conf = dump_conf, 156 | metadata = metadata, 157 | list_groups = list_groups, 158 | 159 | test_create_errors = test_create_errors, 160 | } 161 | -------------------------------------------------------------------------------- /tests/requirements.txt: -------------------------------------------------------------------------------- 1 | pytest==8.2.0 2 | pytest-timeout==2.3.1 3 | kafka-python==2.0.2 4 | aiokafka==0.10.0 5 | tarantool==1.2.0 6 | -------------------------------------------------------------------------------- /tests/test_consumer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import json 4 | import asyncio 5 | from contextlib import contextmanager 6 | import random 7 | import string 8 | 9 | import pytest 10 | from aiokafka import AIOKafkaProducer 11 | import tarantool 12 | 13 | KAFKA_HOST = os.getenv("KAFKA_HOST", "kafka:9092") 14 | 15 | 16 | def randomword(length): 17 | letters = string.ascii_lowercase 18 | return ''.join(random.choice(letters) for i in range(length)) 19 | 20 | 21 | def get_message_values(messages): 22 | result = [] 23 | for msg in messages: 24 | if 'value' in msg: 25 | result.append(msg['value']) 26 | return result 27 | 28 | 29 | def get_server(): 30 | return tarantool.Connection("127.0.0.1", 3301, 31 | user="guest", 32 | password=None, 33 | socket_timeout=40, 34 | connection_timeout=40, 35 | reconnect_max_attempts=3, 36 | reconnect_delay=1, 37 | connect_now=True) 38 | 39 | 40 | @contextmanager 41 | def create_consumer(server, *args): 42 | try: 43 | server.call("consumer.create", args) 44 | yield 45 | 46 | finally: 47 | server.call("consumer.close", []) 48 | 49 | 50 | def write_into_kafka(topic, messages): 51 | loop = asyncio.get_event_loop_policy().new_event_loop() 52 | 53 | async def send(): 54 | producer = AIOKafkaProducer(bootstrap_servers='localhost:9092') 55 | # Get cluster layout and initial topic/partition leadership information 56 | await producer.start() 57 | try: 58 | # Produce message 59 | for msg in messages: 60 | headers = None 61 | if 'headers' in msg: 62 | headers = [] 63 | for k, v in msg['headers'].items(): 64 | headers.append((k, v.encode('utf-8') if v is not None else v)) 65 | await producer.send_and_wait( 66 | topic, 67 | value=msg['value'].encode('utf-8'), 68 | key=msg['key'].encode('utf-8'), 69 | headers=headers, 70 | ) 71 | 72 | finally: 73 | # Wait for all pending messages to be delivered or expire. 74 | await producer.stop() 75 | 76 | loop.run_until_complete(send()) 77 | loop.close() 78 | 79 | 80 | def test_consumer_should_consume_msgs(): 81 | message1 = { 82 | "key": "test1", 83 | "value": "test1", 84 | } 85 | 86 | message2 = { 87 | "key": "test1", 88 | "value": "test2", 89 | } 90 | 91 | message3 = { 92 | "key": "test1", 93 | "value": "test3", 94 | "headers": {"key1": "value1", "key2": "value2", "nullable": None}, 95 | } 96 | 97 | message4 = { 98 | "key": "", 99 | "value": "test4", 100 | } 101 | 102 | message5 = { 103 | "key": "", 104 | "value": "", 105 | } 106 | 107 | write_into_kafka("test_consume", ( 108 | message1, 109 | message2, 110 | message3, 111 | message4, 112 | message5, 113 | )) 114 | 115 | server = get_server() 116 | 117 | with create_consumer(server, KAFKA_HOST, {"group.id": "should_consume_msgs"}): 118 | server.call("consumer.subscribe", [["test_consume"]]) 119 | 120 | response = server.call("consumer.consume", [10])[0] 121 | 122 | assert set(get_message_values(response)) == { 123 | "test1", 124 | "test2", 125 | "test3", 126 | "test4", 127 | } 128 | 129 | for msg in filter(lambda x: 'value' in x, response): 130 | if msg['value'] == 'test1': 131 | assert msg['key'] == 'test1' 132 | elif msg['value'] == 'test3': 133 | assert msg['headers'] == {'key1': 'value1', 'key2': 'value2', 'nullable': None} 134 | 135 | 136 | def test_consumer_seek_partitions(): 137 | key = "test_seek_unique_key" 138 | value = "test_seek_unique_value" 139 | message = { 140 | "key": key, 141 | "value": value, 142 | } 143 | 144 | topic = 'test_consumer_seek' + randomword(15) 145 | write_into_kafka(topic, (message,)) 146 | 147 | server = get_server() 148 | 149 | with create_consumer(server, KAFKA_HOST, {'group.id': 'consumer_seek'}): 150 | server.call('consumer.subscribe', [[topic]]) 151 | 152 | response = server.call("consumer.test_seek_partitions") 153 | assert len(response[0]) == 5 154 | 155 | for item in response[0]: 156 | assert item['key'] == key 157 | assert item['value'] == value 158 | 159 | 160 | def test_consumer_create_errors(): 161 | server = get_server() 162 | server.call("consumer.test_create_errors") 163 | 164 | 165 | def test_consumer_should_consume_msgs_from_multiple_topics(): 166 | message1 = { 167 | "key": "test1", 168 | "value": "test1" 169 | } 170 | 171 | message2 = { 172 | "key": "test1", 173 | "value": "test2" 174 | } 175 | 176 | message3 = { 177 | "key": "test1", 178 | "value": "test33" 179 | } 180 | 181 | write_into_kafka("test_multi_consume_1", (message1, message2)) 182 | write_into_kafka("test_multi_consume_2", (message3, )) 183 | 184 | server = get_server() 185 | 186 | with create_consumer(server, KAFKA_HOST, {"group.id": "should_consume_msgs_from_multiple_topics"}): 187 | server.call("consumer.subscribe", [["test_multi_consume_1", "test_multi_consume_2"]]) 188 | 189 | response = server.call("consumer.consume", [10])[0] 190 | 191 | assert set(get_message_values(response)) == { 192 | "test1", 193 | "test2", 194 | "test33" 195 | } 196 | 197 | 198 | def test_consumer_should_completely_unsubscribe_from_topics(): 199 | message1 = { 200 | "key": "test1", 201 | "value": "test1" 202 | } 203 | 204 | message2 = { 205 | "key": "test1", 206 | "value": "test2" 207 | } 208 | 209 | message3 = { 210 | "key": "test1", 211 | "value": "test34" 212 | } 213 | 214 | write_into_kafka("test_unsubscribe", (message1, message2)) 215 | 216 | server = get_server() 217 | 218 | with create_consumer(server, KAFKA_HOST, {"group.id": "should_completely_unsubscribe_from_topics"}): 219 | server.call("consumer.subscribe", [["test_unsubscribe"]]) 220 | 221 | response = server.call("consumer.consume", [10])[0] 222 | 223 | assert set(get_message_values(response)) == { 224 | "test1", 225 | "test2", 226 | } 227 | 228 | server.call("consumer.unsubscribe", [["test_unsubscribe"]]) 229 | 230 | write_into_kafka("test_unsubscribe", (message3, )) 231 | 232 | response = server.call("consumer.consume", [10]) 233 | 234 | assert set(*response) == set() 235 | 236 | 237 | def test_consumer_should_partially_unsubscribe_from_topics(): 238 | message1 = { 239 | "key": "test1", 240 | "value": "test1" 241 | } 242 | 243 | message2 = { 244 | "key": "test1", 245 | "value": "test2" 246 | } 247 | 248 | message3 = { 249 | "key": "test1", 250 | "value": "test35" 251 | } 252 | 253 | message4 = { 254 | "key": "test1", 255 | "value": "test45" 256 | } 257 | 258 | server = get_server() 259 | 260 | with create_consumer(server, KAFKA_HOST, {"group.id": "should_partially_unsubscribe_from_topics"}): 261 | server.call("consumer.subscribe", [["test_unsub_partially_1", "test_unsub_partially_2"]]) 262 | 263 | write_into_kafka("test_unsub_partially_1", (message1, )) 264 | write_into_kafka("test_unsub_partially_2", (message2, )) 265 | time.sleep(5) 266 | 267 | # waiting up to 30 seconds 268 | response = server.call("consumer.consume", [30])[0] 269 | 270 | assert set(get_message_values(response)) == { 271 | "test1", 272 | "test2", 273 | } 274 | 275 | server.call("consumer.unsubscribe", [["test_unsub_partially_1"]]) 276 | 277 | write_into_kafka("test_unsub_partially_1", (message3, )) 278 | write_into_kafka("test_unsub_partially_2", (message4, )) 279 | time.sleep(5) 280 | 281 | response = server.call("consumer.consume", [30])[0] 282 | 283 | assert set(get_message_values(response)) == {"test45"} 284 | 285 | 286 | def test_consumer_should_log_errors(): 287 | server = get_server() 288 | 289 | with create_consumer(server, "kafka:9090"): 290 | time.sleep(5) 291 | 292 | response = server.call("consumer.get_errors", []) 293 | 294 | assert len(response.data[0]) > 0 295 | 296 | 297 | def test_consumer_stats(): 298 | server = get_server() 299 | 300 | with create_consumer(server, "kafka:9090"): 301 | time.sleep(2) 302 | 303 | response = server.call("consumer.get_stats", []) 304 | assert len(response) > 0 305 | assert len(response[0]) > 0 306 | stat = json.loads(response[0][0]) 307 | 308 | assert 'rdkafka#consumer' in stat['name'] 309 | assert 'kafka:9090/bootstrap' in stat['brokers'] 310 | assert stat['type'] == 'consumer' 311 | 312 | 313 | def test_consumer_dump_conf(): 314 | server = get_server() 315 | 316 | with create_consumer(server, "kafka:9090"): 317 | time.sleep(2) 318 | 319 | response = server.call("consumer.dump_conf", []) 320 | assert len(response) > 0 321 | assert len(response[0]) > 0 322 | assert 'session.timeout.ms' in response[0] 323 | assert 'socket.max.fails' in response[0] 324 | assert 'compression.codec' in response[0] 325 | 326 | 327 | def test_consumer_metadata(): 328 | server = get_server() 329 | 330 | with create_consumer(server, KAFKA_HOST): 331 | time.sleep(2) 332 | 333 | response = server.call("consumer.metadata", []) 334 | assert 'orig_broker_name' in response[0] 335 | assert 'orig_broker_id' in response[0] 336 | assert 'brokers' in response[0] 337 | assert 'topics' in response[0] 338 | assert 'host' in response[0]['brokers'][0] 339 | assert 'port' in response[0]['brokers'][0] 340 | assert 'id' in response[0]['brokers'][0] 341 | 342 | response = server.call("consumer.metadata", [0]) 343 | assert tuple(response) == (None, 'Local: Timed out') 344 | 345 | response = server.call("consumer.list_groups", []) 346 | assert response[0] is not None 347 | response = server.call("consumer.list_groups", [0]) 348 | assert tuple(response) == (None, 'Local: Timed out') 349 | 350 | with create_consumer(server, "badhost:9090"): 351 | response = server.call("consumer.metadata", [0]) 352 | assert tuple(response) == (None, 'Local: Broker transport failure') 353 | 354 | response = server.call("consumer.metadata", [0]) 355 | assert tuple(response) == (None, 'Local: Broker transport failure') 356 | 357 | 358 | def test_consumer_should_log_debug(): 359 | server = get_server() 360 | 361 | with create_consumer(server, KAFKA_HOST, {"debug": "consumer,cgrp,topic,fetch"}): 362 | time.sleep(2) 363 | 364 | response = server.call("consumer.get_logs", []) 365 | 366 | assert len(response.data[0]) > 0 367 | 368 | 369 | def test_consumer_should_log_rebalances(): 370 | server = get_server() 371 | 372 | with create_consumer(server, KAFKA_HOST): 373 | time.sleep(5) 374 | 375 | server.call("consumer.subscribe", [["test_unsub_partially_1"]]) 376 | 377 | time.sleep(20) 378 | 379 | response = server.call("consumer.get_rebalances", []) 380 | 381 | assert len(response.data[0]) > 0 382 | 383 | 384 | def test_consumer_rebalance_protocol(): 385 | server = get_server() 386 | 387 | with create_consumer(server, KAFKA_HOST, {"bootstrap.servers": KAFKA_HOST}): 388 | time.sleep(5) 389 | response = server.call("consumer.rebalance_protocol", []) 390 | assert response[0] == 'NONE' 391 | 392 | server.call("consumer.subscribe", [["test_unsub_partially_1"]]) 393 | response = server.call("consumer.rebalance_protocol", []) 394 | assert response[0] == 'NONE' 395 | 396 | 397 | def test_consumer_should_continue_consuming_from_last_committed_offset(): 398 | message1 = { 399 | "key": "test1", 400 | "value": "test1" 401 | } 402 | 403 | message2 = { 404 | "key": "test1", 405 | "value": "test2" 406 | } 407 | 408 | message3 = { 409 | "key": "test1", 410 | "value": "test3" 411 | } 412 | 413 | message4 = { 414 | "key": "test1", 415 | "value": "test4" 416 | } 417 | 418 | server = get_server() 419 | 420 | with create_consumer(server, KAFKA_HOST, {"group.id": "should_continue_consuming_from_last_committed_offset"}): 421 | server.call("consumer.subscribe", [["test_consuming_from_last_committed_offset"]]) 422 | 423 | write_into_kafka("test_consuming_from_last_committed_offset", (message1, )) 424 | write_into_kafka("test_consuming_from_last_committed_offset", (message2, )) 425 | 426 | # waiting up to 30 seconds 427 | response = server.call("consumer.consume", [30])[0] 428 | 429 | assert set(get_message_values(response)) == { 430 | "test1", 431 | "test2", 432 | } 433 | 434 | time.sleep(2) 435 | 436 | with create_consumer(server, KAFKA_HOST, {"group.id": "should_continue_consuming_from_last_committed_offset"}): 437 | server.call("consumer.subscribe", [["test_consuming_from_last_committed_offset"]]) 438 | 439 | write_into_kafka("test_consuming_from_last_committed_offset", (message3, )) 440 | write_into_kafka("test_consuming_from_last_committed_offset", (message4, )) 441 | 442 | response = server.call("consumer.consume", [30])[0] 443 | 444 | assert set(get_message_values(response)) == { 445 | "test3", 446 | "test4", 447 | } 448 | 449 | 450 | def test_consumer_pause_resume(): 451 | message_before_pause = { 452 | "key": "message_before_pause", 453 | "value": "message_before_pause", 454 | } 455 | 456 | message_on_pause = { 457 | "key": "message_on_pause", 458 | "value": "message_on_pause", 459 | } 460 | 461 | message_after_pause = { 462 | "key": "message_after_pause", 463 | "value": "message_after_pause", 464 | } 465 | 466 | server = get_server() 467 | 468 | with create_consumer(server, KAFKA_HOST, {"group.id": "should_consume_msgs"}): 469 | server.call("consumer.subscribe", [["test_resume_pause"]]) 470 | 471 | write_into_kafka("test_resume_pause", (message_before_pause,)) 472 | 473 | response = server.call("consumer.consume", [10])[0] 474 | 475 | assert set(get_message_values(response)) == { 476 | "message_before_pause", 477 | } 478 | 479 | response = server.call("consumer.pause") 480 | assert len(response) == 0 481 | 482 | write_into_kafka("test_resume_pause", (message_on_pause,)) 483 | response = server.call("consumer.consume", [2])[0] 484 | assert len(response) == 0 485 | 486 | response = server.call("consumer.resume") 487 | assert len(response) == 0 488 | write_into_kafka("test_resume_pause", (message_after_pause,)) 489 | 490 | response = server.call("consumer.consume", [2])[0] 491 | assert set(get_message_values(response)) == { 492 | "message_on_pause", 493 | "message_after_pause", 494 | } 495 | 496 | 497 | @pytest.mark.timeout(5) 498 | def test_consumer_should_be_closed(): 499 | server = get_server() 500 | 501 | with create_consumer(server, '127.0.0.1:12345', {"group.id": None}): 502 | pass 503 | -------------------------------------------------------------------------------- /tests/test_producer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import json 4 | import asyncio 5 | 6 | from aiokafka import AIOKafkaConsumer 7 | import tarantool 8 | 9 | KAFKA_HOST = os.getenv("KAFKA_HOST", "kafka:9092") 10 | 11 | 12 | def get_server(): 13 | return tarantool.Connection("127.0.0.1", 3301, 14 | user="guest", 15 | password=None, 16 | socket_timeout=10, 17 | connection_timeout=40, 18 | reconnect_max_attempts=3, 19 | reconnect_delay=1, 20 | connect_now=True) 21 | 22 | 23 | def test_producer_should_produce_msgs(): 24 | server = get_server() 25 | 26 | server.call("producer.create", [KAFKA_HOST]) 27 | 28 | messages = [ 29 | {'key': '1', 'value': '1'}, 30 | {'key': '2', 'value': '2'}, 31 | {'key': '3', 'value': '3'}, 32 | {'key': '4', 'value': '4', 'headers': {'header1_key': 'header1_value', 'header2_key': 'header2_value'}}, 33 | ] 34 | server.call("producer.produce", [messages]) 35 | 36 | loop = asyncio.get_event_loop_policy().new_event_loop() 37 | 38 | async def test(): 39 | kafka_output = [] 40 | 41 | async def consume(): 42 | consumer = AIOKafkaConsumer( 43 | 'test_producer', 44 | group_id="test_group", 45 | bootstrap_servers='localhost:9092', 46 | auto_offset_reset="earliest", 47 | ) 48 | # Get cluster layout 49 | await consumer.start() 50 | 51 | try: 52 | # Consume messages 53 | async for msg in consumer: 54 | kafka_msg = { 55 | 'key': msg.key if msg.key is None else msg.key.decode('utf8'), 56 | 'value': msg.value if msg.value is None else msg.value.decode('utf8') 57 | } 58 | if msg.headers: 59 | kafka_msg['headers'] = {} 60 | for k, v in msg.headers: 61 | kafka_msg['headers'][k] = v.decode('utf8') 62 | kafka_output.append(kafka_msg) 63 | 64 | finally: 65 | # Will leave consumer group; perform autocommit if enabled. 66 | await consumer.stop() 67 | 68 | try: 69 | await asyncio.wait_for(consume(), 10) 70 | except asyncio.TimeoutError: 71 | pass 72 | 73 | assert kafka_output == messages 74 | 75 | loop.run_until_complete(test()) 76 | loop.close() 77 | 78 | server.call("producer.close", []) 79 | 80 | 81 | def test_producer_should_log_errors(): 82 | server = get_server() 83 | 84 | server.call("producer.create", ["kafka:9090"]) 85 | 86 | time.sleep(2) 87 | 88 | response = server.call("producer.get_errors", []) 89 | 90 | assert len(response) > 0 91 | assert len(response[0]) > 0 92 | 93 | server.call("producer.close", []) 94 | 95 | 96 | def test_producer_stats(): 97 | server = get_server() 98 | 99 | server.call("producer.create", ["kafka:9090"]) 100 | 101 | time.sleep(2) 102 | 103 | response = server.call("producer.get_stats", []) 104 | assert len(response) > 0 105 | assert len(response[0]) > 0 106 | stat = json.loads(response[0][0]) 107 | 108 | assert 'rdkafka#producer' in stat['name'] 109 | assert 'kafka:9090/bootstrap' in stat['brokers'] 110 | assert stat['type'] == 'producer' 111 | 112 | server.call("producer.close", []) 113 | 114 | 115 | def test_producer_dump_conf(): 116 | server = get_server() 117 | 118 | server.call("producer.create", ["kafka:9090"]) 119 | 120 | time.sleep(2) 121 | 122 | response = server.call("producer.dump_conf", []) 123 | assert len(response) > 0 124 | assert len(response[0]) > 0 125 | assert 'session.timeout.ms' in response[0] 126 | assert 'socket.max.fails' in response[0] 127 | assert 'compression.codec' in response[0] 128 | 129 | server.call("producer.close", []) 130 | 131 | 132 | def test_producer_metadata(): 133 | server = get_server() 134 | 135 | server.call("producer.create", [KAFKA_HOST]) 136 | 137 | time.sleep(2) 138 | 139 | response = server.call("producer.metadata", []) 140 | assert 'orig_broker_name' in response[0] 141 | assert 'orig_broker_id' in response[0] 142 | assert 'brokers' in response[0] 143 | assert 'topics' in response[0] 144 | assert 'host' in response[0]['brokers'][0] 145 | assert 'port' in response[0]['brokers'][0] 146 | assert 'id' in response[0]['brokers'][0] 147 | 148 | response = server.call("producer.list_groups", []) 149 | assert response[0] is not None 150 | response = server.call("producer.list_groups", [0]) 151 | assert tuple(response) == (None, 'Local: Timed out') 152 | 153 | response = server.call("producer.metadata", [0]) 154 | assert tuple(response) == (None, 'Local: Timed out') 155 | 156 | server.call("producer.close", []) 157 | 158 | server.call("producer.create", ["badhost:8080"]) 159 | response = server.call("producer.metadata", [200]) 160 | assert tuple(response) == (None, 'Local: Broker transport failure') 161 | response = server.call("producer.list_groups", [200]) 162 | assert response[0] is None 163 | server.call("producer.close", []) 164 | 165 | 166 | def test_producer_should_log_debug(): 167 | server = get_server() 168 | 169 | server.call("producer.create", [KAFKA_HOST, {"debug": "broker,topic,msg"}]) 170 | 171 | time.sleep(2) 172 | 173 | response = server.call("producer.get_logs", []) 174 | 175 | assert len(response) > 0 176 | assert len(response[0]) > 0 177 | 178 | server.call("producer.close", []) 179 | 180 | 181 | def test_producer_create_errors(): 182 | server = get_server() 183 | server.call("producer.test_create_errors") 184 | --------------------------------------------------------------------------------