├── .github └── workflows │ └── test.yaml ├── .gitignore ├── .travis.yml ├── LICENSE ├── Makefile ├── README.md ├── dist.ini ├── lib └── resty │ └── kafka │ ├── basic-consumer.lua │ ├── broker.lua │ ├── client.lua │ ├── errors.lua │ ├── producer.lua │ ├── protocol │ ├── common.lua │ ├── consumer.lua │ └── record.lua │ ├── request.lua │ ├── response.lua │ ├── ringbuffer.lua │ ├── sasl.lua │ ├── scramsha.lua │ ├── sendbuffer.lua │ └── utils.lua ├── lua-resty-kafka-0.09-0.rockspec ├── lua-resty-kafka-0.20-0.rockspec ├── lua-resty-kafka-0.22-0.rockspec ├── lua-resty-kafka-0.23-0.rockspec └── t ├── basic-consumer.t ├── buffer.t ├── client.t ├── producer.t ├── request.t ├── ringbuffer.t └── sendbuffer.t /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - master 10 | 11 | env: 12 | KAFKA_VER: 2.4.0 13 | SCALA_VER: 2.11 14 | OPENRESTY_VER: 1.19.9.1 15 | RESTY_OPENSSL_VER: 0.8.8 16 | JIT_UUID_VER: 0.0.7 17 | 18 | jobs: 19 | run-test: 20 | runs-on: ubuntu-latest 21 | steps: 22 | - uses: actions/checkout@v2 23 | with: 24 | submodules: recursive 25 | 26 | - name: Install Apache Kafak 27 | run: | 28 | wget https://archive.apache.org/dist/kafka/$KAFKA_VER/kafka_$SCALA_VER-$KAFKA_VER.tgz 29 | sudo tar -xzf kafka_$SCALA_VER-$KAFKA_VER.tgz -C /usr/local/ 30 | sudo mv /usr/local/kafka_$SCALA_VER-$KAFKA_VER /usr/local/kafka 31 | sudo sed -i '$aauthProvider\.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider' /usr/local/kafka/config/zookeeper.properties 32 | sudo sed -i '$azookeeper\.sasl\.client=true' /usr/local/kafka/config/zookeeper.properties 33 | sudo sed -i '$arequireClientAuthScheme=sasl' /usr/local/kafka/config/zookeeper.properties 34 | sudo sed -i '$ajaasLoginRenew=3600000' /usr/local/kafka/config/zookeeper.properties 35 | sudo touch /usr/local/kafka/config/zk_server_jass.conf 36 | echo -e '''Server { 37 | org.apache.kafka.common.security.plain.PlainLoginModule required 38 | username="admin" 39 | password="admin-secret" 40 | user_admin="admin-secret";};''' | sudo tee /usr/local/kafka/config/zk_server_jass.conf 41 | sudo yes "" | keytool -genkeypair -keyalg RSA -dname "CN=127.0.0.1" -alias 127.0.0.1 -keystore selfsigned.jks -validity 365 -keysize 2048 -storepass changeit 42 | sudo mv selfsigned.jks /usr/local/selfsigned.jks; chmod 755 /usr/local/selfsigned.jks 43 | sudo sed -i '$ahost\.name=127.0.0.1' /usr/local/kafka/config/server.properties 44 | sudo sed -i '$alisteners=PLAINTEXT://127.0.0.1:9092,SSL://127.0.0.1:9093,SASL_PLAINTEXT://127.0.0.1:9094' /usr/local/kafka/config/server.properties 45 | sudo sed -i '$aadvertised\.listeners=PLAINTEXT://127.0.0.1:9092,SSL://127.0.0.1:9093,SASL_PLAINTEXT://127.0.0.1:9094' /usr/local/kafka/config/server.properties 46 | sudo sed -i '$assl\.keystore\.location = \/usr\/local\/selfsigned.jks' /usr/local/kafka/config/server.properties 47 | sudo sed -i '$assl\.keystore\.password = changeit' /usr/local/kafka/config/server.properties 48 | sudo sed -i '$assl\.key\.password = changeit' /usr/local/kafka/config/server.properties 49 | sudo sed -i '$assl\.key\.password = changeit' /usr/local/kafka/config/server.properties 50 | sudo sed -i '$asuper\.users=User:admin' /usr/local/kafka/config/server.properties 51 | sudo sed -i '$asuper\.users=User:admin' /usr/local/kafka/config/server.properties 52 | sudo sed -i '$asasl\.mechanism\.inter\.broker\.protocol=PLAIN' /usr/local/kafka/config/server.properties 53 | sudo sed -i '$asasl\.enabled\.mechanisms=PLAIN,SCRAM-SHA-256,SCRAM-SHA-512' /usr/local/kafka/config/server.properties 54 | sudo sed -i '$aallow\.everyone\.if\.no\.acl\.found=true' /usr/local/kafka/config/server.properties 55 | sudo sed -i '$aauthorizer\.class\.name=kafka.security.auth.SimpleAclAuthorizer' /usr/local/kafka/config/server.properties 56 | sudo sed -i '$aauto\.create\.topics\.enable=false' /usr/local/kafka/config/server.properties 57 | sudo cat /usr/local/kafka/config/server.properties 58 | 59 | sudo touch /usr/local/kafka/config/kafka_server_jass.conf 60 | echo -e ''' KafkaServer { 61 | org.apache.kafka.common.security.scram.ScramLoginModule required 62 | username="admin" 63 | password="admin-secret"; 64 | org.apache.kafka.common.security.plain.PlainLoginModule required 65 | username="admin" 66 | password="admin-secret" 67 | user_admin="admin-secret"; }; 68 | Client { 69 | org.apache.kafka.common.security.plain.PlainLoginModule required 70 | username="admin" 71 | password="admin-secret";}; ''' | sudo tee /usr/local/kafka/config/kafka_server_jass.conf 72 | sudo cat /usr/local/kafka/config/kafka_server_jass.conf 73 | sudo sed -i '$c\exec $base_dir/kafka-run-class.sh $EXTRA_ARGS -Djava.security.auth.login.config=/usr/local/kafka/config/zk_server_jass.conf org.apache.zookeeper.server.quorum.QuorumPeerMain "$@"' /usr/local/kafka/bin/zookeeper-server-start.sh 74 | sudo /usr/local/kafka/bin/zookeeper-server-start.sh -daemon /usr/local/kafka/config/zookeeper.properties 75 | 76 | sudo sed -i '$c\exec $base_dir/kafka-run-class.sh $EXTRA_ARGS -Djava.security.auth.login.config=/usr/local/kafka/config/kafka_server_jass.conf kafka.Kafka "$@"' /usr/local/kafka/bin/kafka-server-start.sh 77 | sudo /usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties 78 | 79 | sleep 5 80 | /usr/local/kafka/bin/kafka-configs.sh --zookeeper localhost:2181 --alter --add-config 'SCRAM-SHA-256=[password=admin-secret],SCRAM-SHA-512=[password=admin-secret]' --entity-type users --entity-name admin 81 | /usr/local/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 2 --topic test 82 | /usr/local/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 2 --topic test2 83 | /usr/local/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 2 --topic test3 84 | /usr/local/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 2 --topic test4 85 | /usr/local/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 2 --topic test5 86 | /usr/local/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 2 --topic test-consumer 87 | 88 | - name: Install OpenResty 89 | run: | 90 | wget https://openresty.org/download/openresty-$OPENRESTY_VER.tar.gz 91 | tar -xzf openresty-$OPENRESTY_VER.tar.gz 92 | cd openresty-$OPENRESTY_VER 93 | ./configure --prefix=/usr/local/openresty-debug --with-debug > build.log 2>&1 || (cat build.log && exit 1) 94 | make -j4 > build.log 2>&1 || (cat build.log && exit 1) 95 | sudo make install > build.log 2>&1 || (cat build.log && exit 1) 96 | sudo apt install libtest-base-perl libtext-diff-perl libipc-run3-perl liburi-perl libwww-perl libtest-longstring-perl liblist-moreutils-perl libgd-dev > build.log 2>&1 || (cat build.log && exit 1) 97 | sudo cpan Test::Nginx > build.log 2>&1 || (cat build.log && exit 1) 98 | sudo mkdir -p /usr/local/{lua-resty-jit-uuid,lua-resty-openssl} 99 | sudo git clone https://github.com/thibaultcha/lua-resty-jit-uuid.git /usr/local/lua-resty-jit-uuid 100 | sudo git clone https://github.com/fffonion/lua-resty-openssl.git /usr/local/lua-resty-openssl 101 | cd /usr/local/lua-resty-jit-uuid 102 | sudo git tag 103 | sudo git checkout $JIT_UUID_VER 104 | sudo cp ./lib/resty/jit-uuid.lua /usr/local/openresty-debug/lualib/resty 105 | cd /usr/local/lua-resty-openssl 106 | sudo git tag 107 | sudo git checkout $RESTY_OPENSSL_VER 108 | sudo cp -r ./lib/resty/* /usr/local/openresty-debug/lualib/resty 109 | 110 | - name: Run Test 111 | run: | 112 | make test -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | t/servroot 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: xenial 3 | 4 | os: linux 5 | 6 | language: c 7 | 8 | addons: 9 | apt: 10 | packages: 11 | - libtest-base-perl 12 | - libtext-diff-perl 13 | - libipc-run3-perl 14 | - liburi-perl 15 | - libwww-perl 16 | - libtest-longstring-perl 17 | - liblist-moreutils-perl 18 | - libgd-dev 19 | 20 | cache: 21 | directories: 22 | - download-cache 23 | 24 | env: 25 | global: 26 | -JOBS=3 27 | -KAFKA_VER=2.4.0 28 | -SCALA_VER=2.11 29 | -OPENRESTY_VER=1.15.8.2 30 | -RESTY_OPENSSL_VER=0.8.8 31 | -JIT_UUID_VER=0.0.7 32 | 33 | install: 34 | - git clone https://github.com/openresty/test-nginx.git ../test-nginx 35 | - if [ ! -f download-cache/kafka_$SCALA_VER-$KAFKA_VER.tgz ]; then wget -P download-cache https://archive.apache.org/dist/kafka/$KAFKA_VER/kafka_$SCALA_VER-$KAFKA_VER.tgz;fi 36 | - if [ ! -f download-cache/openresty-$OPENRESTY_VER.tar.gz ]; then wget -P download-cache https://openresty.org/download/openresty-$OPENRESTY_VER.tar.gz;fi 37 | - git clone https://github.com/thibaultcha/lua-resty-jit-uuid.git ./lua-resty-jit-uuid 38 | - git clone https://github.com/fffonion/lua-resty-openssl.git ./lua-resty-openssl 39 | 40 | script: 41 | - sudo tar -xzf download-cache/kafka_$SCALA_VER-$KAFKA_VER.tgz -C /usr/local/ 42 | - sudo mv /usr/local/kafka_$SCALA_VER-$KAFKA_VER /usr/local/kafka 43 | - sudo sed -i '$aauthProvider\.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider' /usr/local/kafka/config/zookeeper.properties 44 | - sudo sed -i '$azookeeper\.sasl\.client=true' /usr/local/kafka/config/zookeeper.properties 45 | - sudo sed -i '$arequireClientAuthScheme=sasl' /usr/local/kafka/config/zookeeper.properties 46 | - sudo sed -i '$ajaasLoginRenew=3600000' /usr/local/kafka/config/zookeeper.properties 47 | - sudo touch /usr/local/kafka/config/zk_server_jass.conf 48 | - echo -e '''Server { 49 | org.apache.kafka.common.security.plain.PlainLoginModule required 50 | username="admin" 51 | password="admin-secret" 52 | user_admin="admin-secret";};''' | sudo tee /usr/local/kafka/config/zk_server_jass.conf 53 | - sudo yes "" | keytool -genkeypair -keyalg RSA -dname "CN=127.0.0.1" -alias 127.0.0.1 -keystore selfsigned.jks -validity 365 -keysize 2048 -storepass changeit 54 | - sudo mv selfsigned.jks /usr/local/selfsigned.jks; chmod 755 /usr/local/selfsigned.jks 55 | - sudo sed -i '$ahost\.name=127.0.0.1' /usr/local/kafka/config/server.properties 56 | - sudo sed -i '$alisteners=PLAINTEXT://127.0.0.1:9092,SSL://127.0.0.1:9093,SASL_PLAINTEXT://127.0.0.1:9094' /usr/local/kafka/config/server.properties 57 | - sudo sed -i '$aadvertised\.listeners=PLAINTEXT://127.0.0.1:9092,SSL://127.0.0.1:9093,SASL_PLAINTEXT://127.0.0.1:9094' /usr/local/kafka/config/server.properties 58 | - sudo sed -i '$assl\.keystore\.location = \/usr\/local\/selfsigned.jks' /usr/local/kafka/config/server.properties 59 | - sudo sed -i '$assl\.keystore\.password = changeit' /usr/local/kafka/config/server.properties 60 | - sudo sed -i '$assl\.key\.password = changeit' /usr/local/kafka/config/server.properties 61 | - sudo sed -i '$assl\.key\.password = changeit' /usr/local/kafka/config/server.properties 62 | - sudo sed -i '$asuper\.users=User:admin' /usr/local/kafka/config/server.properties 63 | - sudo sed -i '$asuper\.users=User:admin' /usr/local/kafka/config/server.properties 64 | - sudo sed -i '$asasl\.mechanism\.inter\.broker\.protocol=PLAIN' /usr/local/kafka/config/server.properties 65 | - sudo sed -i '$asasl\.enabled\.mechanisms=PLAIN,SCRAM-SHA-256,SCRAM-SHA-512' /usr/local/kafka/config/server.properties 66 | - sudo sed -i '$aallow\.everyone\.if\.no\.acl\.found=true' /usr/local/kafka/config/server.properties 67 | - sudo sed -i '$aauthorizer\.class\.name=kafka.security.auth.SimpleAclAuthorizer' /usr/local/kafka/config/server.properties 68 | - sudo sed -i '$aauto\.create\.topics\.enable=false' /usr/local/kafka/config/server.properties 69 | - sudo cat /usr/local/kafka/config/server.properties 70 | 71 | - sudo touch /usr/local/kafka/config/kafka_server_jass.conf 72 | - echo -e ''' KafkaServer { 73 | org.apache.kafka.common.security.scram.ScramLoginModule required 74 | username="admin" 75 | password="admin-secret"; 76 | org.apache.kafka.common.security.plain.PlainLoginModule required 77 | username="admin" 78 | password="admin-secret" 79 | user_admin="admin-secret";}; 80 | Client { 81 | org.apache.kafka.common.security.plain.PlainLoginModule required 82 | username="admin" 83 | password="admin-secret";}; ''' | sudo tee /usr/local/kafka/config/kafka_server_jass.conf 84 | - sudo cat /usr/local/kafka/config/kafka_server_jass.conf 85 | - sudo sed -i '$c\exec $base_dir/kafka-run-class.sh $EXTRA_ARGS -Djava.security.auth.login.config=/usr/local/kafka/config/zk_server_jass.conf org.apache.zookeeper.server.quorum.QuorumPeerMain "$@"' /usr/local/kafka/bin/zookeeper-server-start.sh 86 | - sudo /usr/local/kafka/bin/zookeeper-server-start.sh -daemon /usr/local/kafka/config/zookeeper.properties 87 | 88 | - sudo sed -i '$c\exec $base_dir/kafka-run-class.sh $EXTRA_ARGS -Djava.security.auth.login.config=/usr/local/kafka/config/kafka_server_jass.conf kafka.Kafka "$@"' /usr/local/kafka/bin/kafka-server-start.sh 89 | - sudo /usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties 90 | - sleep 5 91 | - /usr/local/kafka/bin/kafka-configs.sh --zookeeper localhost:2181 --alter --add-config 'SCRAM-SHA-256=[password=admin-secret],SCRAM-SHA-512=[password=admin-secret]' --entity-type users --entity-name admin 92 | - /usr/local/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 2 --topic test 93 | - /usr/local/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 2 --topic test2 94 | - /usr/local/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 2 --topic test3 95 | - /usr/local/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 2 --topic test4 96 | - /usr/local/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 2 --topic test5 97 | - /usr/local/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 2 --topic test-consumer 98 | - tar -xzf download-cache/openresty-$OPENRESTY_VER.tar.gz 99 | - cd openresty-$OPENRESTY_VER 100 | - ./configure --prefix=/usr/local/openresty-debug --with-debug > build.log 2>&1 || (cat build.log && exit 1) 101 | - make -j$JOBS > build.log 2>&1 || (cat build.log && exit 1) 102 | - sudo make install > build.log 2>&1 || (cat build.log && exit 1) 103 | - cd ../lua-resty-jit-uuid 104 | - git tag 105 | - git checkout $JIT_UUID_VER 106 | - sudo cp ./lib/resty/jit-uuid.lua /usr/local/openresty-debug/lualib/resty 107 | - cd ../lua-resty-openssl 108 | - git tag 109 | - git checkout $RESTY_OPENSSL_VER 110 | - sudo cp -r ./lib/resty/* /usr/local/openresty-debug/lualib/resty 111 | - cd .. 112 | - make test 113 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, doujiang 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of lua-resty-kafka nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | OPENRESTY_PREFIX=/usr/local/openresty-debug 2 | 3 | PREFIX ?= /usr/local 4 | LUA_INCLUDE_DIR ?= $(PREFIX)/include 5 | LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION) 6 | INSTALL ?= install 7 | 8 | .PHONY: all test install 9 | 10 | all: ; 11 | 12 | install: all 13 | $(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/kafka 14 | $(INSTALL) lib/resty/kafka/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/kafka 15 | 16 | test: all 17 | PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I ./../test-nginx/lib -r t/ 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Name 2 | ===== 3 | 4 | lua-resty-kafka - Lua kafka client driver for the ngx_lua based on the cosocket API 5 | 6 | Table of Contents 7 | ================= 8 | 9 | * [Name](#name) 10 | * [Status](#status) 11 | * [Description](#description) 12 | * [Synopsis](#synopsis) 13 | * [Modules](#modules) 14 | * [resty.kafka.client](#restykafkaclient) 15 | * [Methods](#methods) 16 | * [new](#new) 17 | * [fetch_metadata](#fetch_metadata) 18 | * [refresh](#refresh) 19 | * [choose_api_version](#choose_api_version) 20 | * [resty.kafka.producer](#restykafkaproducer) 21 | * [Methods](#methods) 22 | * [new](#new) 23 | * [send](#send) 24 | * [offset](#offset) 25 | * [flush](#flush) 26 | * [resty.kafka.basic-consumer](#restykafkabasic-consumer) 27 | * [Methods](#methods) 28 | * [new](#new) 29 | * [list_offset](#list_offset) 30 | * [fetch](#fetch) 31 | * [Errors](#errors) 32 | * [Installation](#installation) 33 | * [TODO](#todo) 34 | * [Author](#author) 35 | * [Copyright and License](#copyright-and-license) 36 | * [See Also](#see-also) 37 | 38 | Status 39 | ====== 40 | 41 | This library is still under early development and is still experimental. 42 | 43 | Description 44 | =========== 45 | 46 | This Lua library is a Kafka client driver for the ngx_lua nginx module: 47 | 48 | http://wiki.nginx.org/HttpLuaModule 49 | 50 | This Lua library takes advantage of ngx_lua's cosocket API, which ensures 51 | 100% nonblocking behavior. 52 | 53 | Note that at least [ngx_lua 0.9.3](https://github.com/openresty/lua-nginx-module/tags) or [openresty 1.4.3.7](http://openresty.org/#Download) is required, and unfortunately only LuaJIT supported (`--with-luajit`). 54 | 55 | Note for `ssl` connections at least [ngx_lua 0.9.11](https://github.com/openresty/lua-nginx-module/tags) or [openresty 1.7.4.1](http://openresty.org/#Download) is required, and unfortunately only LuaJIT supported (`--with-luajit`). 56 | 57 | Synopsis 58 | ======== 59 | 60 | ```lua 61 | lua_package_path "/path/to/lua-resty-kafka/lib/?.lua;;"; 62 | 63 | server { 64 | location /test { 65 | content_by_lua ' 66 | local cjson = require "cjson" 67 | local client = require "resty.kafka.client" 68 | local producer = require "resty.kafka.producer" 69 | 70 | local broker_list = { 71 | { 72 | host = "127.0.0.1", 73 | port = 9092, 74 | 75 | -- optional auth 76 | sasl_config = { 77 | mechanism = "PLAIN", 78 | user = "USERNAME", 79 | password = "PASSWORD", 80 | }, 81 | }, 82 | } 83 | 84 | local key = "key" 85 | local message = "halo world" 86 | 87 | -- usually we do not use this library directly 88 | local cli = client:new(broker_list) 89 | local brokers, partitions = cli:fetch_metadata("test") 90 | if not brokers then 91 | ngx.say("fetch_metadata failed, err:", partitions) 92 | end 93 | ngx.say("brokers: ", cjson.encode(brokers), "; partitions: ", cjson.encode(partitions)) 94 | 95 | 96 | -- sync producer_type 97 | local p = producer:new(broker_list) 98 | 99 | local offset, err = p:send("test", key, message) 100 | if not offset then 101 | ngx.say("send err:", err) 102 | return 103 | end 104 | ngx.say("send success, offset: ", tonumber(offset)) 105 | 106 | -- this is async producer_type and bp will be reused in the whole nginx worker 107 | local bp = producer:new(broker_list, { producer_type = "async" }) 108 | 109 | local ok, err = bp:send("test", key, message) 110 | if not ok then 111 | ngx.say("send err:", err) 112 | return 113 | end 114 | 115 | ngx.say("send success, ok:", ok) 116 | '; 117 | } 118 | } 119 | ``` 120 | 121 | 122 | [Back to TOC](#table-of-contents) 123 | 124 | Modules 125 | ======= 126 | 127 | 128 | resty.kafka.client 129 | ---------------------- 130 | 131 | To load this module, just do this 132 | 133 | ```lua 134 | local client = require "resty.kafka.client" 135 | ``` 136 | 137 | [Back to TOC](#table-of-contents) 138 | 139 | ### Methods 140 | 141 | #### new 142 | 143 | `syntax: c = client:new(broker_list, client_config)` 144 | 145 | The `broker_list` is a list of broker, like the below 146 | 147 | ```json 148 | [ 149 | { 150 | "host": "127.0.0.1", 151 | "port": 9092, 152 | 153 | // optional auth 154 | "sasl_config": { 155 | //support mechanism: PLAIN、SCRAM-SHA-256、SCRAM-SHA-512 156 | "mechanism": "PLAIN", 157 | "user": "USERNAME", 158 | "password": "PASSWORD" 159 | } 160 | } 161 | ] 162 | ``` 163 | * `sasl_config` 164 | 165 | support mechanism: PLAIN、SCRAM-SHA-256、SCRAM-SHA-512. 166 | 167 | warn:SCRAM-SHA-256、SCRAM-SHA-512 need install lua-resty-jit-uuid and lua-resty-openssl 168 | 169 | An optional `client_config` table can be specified. The following options are as follows: 170 | 171 | client config 172 | 173 | * `socket_timeout` 174 | 175 | Specifies the network timeout threshold in milliseconds. *SHOULD* lagrer than the `request_timeout`. 176 | 177 | * `keepalive_timeout` 178 | 179 | Specifies the maximal idle timeout (in milliseconds) for the keepalive connection. 180 | 181 | * `keepalive_size` 182 | 183 | Specifies the maximal number of connections allowed in the connection pool for per Nginx worker. 184 | 185 | * `refresh_interval` 186 | 187 | Specifies the time to auto refresh the metadata in milliseconds. Then metadata will not auto refresh if is nil. 188 | 189 | * `ssl` 190 | 191 | Specifies if client should use ssl connection. Defaults to false. See: https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake 192 | 193 | * `ssl_verify` 194 | 195 | Specifies if client should perform SSL verification. Defaults to false. See: https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake 196 | 197 | * `resolver` 198 | 199 | Specifies a function to host resolving, which returns a string of IP or `nil`, to override system default host resolver. Default `nil`, no resolving performed. Example `function(host) if host == "some_host" then return "10.11.12.13" end end` 200 | 201 | [Back to TOC](#table-of-contents) 202 | 203 | #### fetch_metadata 204 | `syntax: brokers, partitions = c:fetch_metadata(topic)` 205 | 206 | In case of success, return the all brokers and partitions of the `topic`. 207 | In case of errors, returns `nil` with a string describing the error. 208 | 209 | 210 | [Back to TOC](#table-of-contents) 211 | 212 | #### refresh 213 | `syntax: brokers, partitions = c:refresh()` 214 | 215 | This will refresh the metadata of all topics which have been fetched by `fetch_metadata`. 216 | In case of success, return all brokers and all partitions of all topics. 217 | In case of errors, returns `nil` with a string describing the error. 218 | 219 | 220 | [Back to TOC](#table-of-contents) 221 | 222 | #### choose_api_version 223 | 224 | `syntax: api_version = c:choose_api_version(api_key, min_version, max_version)` 225 | 226 | This helps the client to select the correct version of the `api_key` corresponding to the API. 227 | 228 | When `min_version` and `max_version` are provided, it will act as a limit and the selected versions in the return value will not exceed their limits no matter how high or low the broker supports the API version. When they are not provided, it will follow the range of versions supported by the broker. 229 | 230 | Tip: The version selection strategy is to choose the maximum version within the allowed range. 231 | 232 | 233 | [Back to TOC](#table-of-contents) 234 | 235 | resty.kafka.producer 236 | ---------------------- 237 | 238 | To load this module, just do this 239 | 240 | ```lua 241 | local producer = require "resty.kafka.producer" 242 | ``` 243 | 244 | [Back to TOC](#table-of-contents) 245 | 246 | ### Methods 247 | 248 | #### new 249 | 250 | `syntax: p = producer:new(broker_list, producer_config?, cluster_name?)` 251 | 252 | It's recommend to use async producer_type. 253 | 254 | `broker_list` is the same as in `client` 255 | 256 | An optional options table can be specified. The following options are as follows: 257 | 258 | `socket_timeout`, `keepalive_timeout`, `keepalive_size`, `refresh_interval`, `ssl`, `ssl_verify` are the same as in `client_config` 259 | 260 | producer config, most like in 261 | 262 | * `producer_type` 263 | 264 | Specifies the `producer.type`. "async" or "sync" 265 | 266 | * `request_timeout` 267 | 268 | Specifies the `request.timeout.ms`. Default `2000 ms` 269 | 270 | * `required_acks` 271 | 272 | Specifies the `request.required.acks`, *SHOULD NOT* be zero. Default `1`. 273 | 274 | * `max_retry` 275 | 276 | Specifies the `message.send.max.retries`. Default `3`. 277 | 278 | * `retry_backoff` 279 | 280 | Specifies the `retry.backoff.ms`. Default `100`. 281 | 282 | * `api_version` 283 | 284 | Specifies the produce API version. Default `0`. 285 | If you use Kafka 0.10.0.0 or higher, `api_version` can use `0`, `1` or `2`. 286 | If you use Kafka 0.9.x, `api_version` should be `0` or `1`. 287 | If you use Kafka 0.8.x, `api_version` should be `0`. 288 | 289 | * `partitioner` 290 | 291 | Specifies the partitioner that choose partition from key and partition num. 292 | `syntax: partitioner = function (key, partition_num, correlation_id) end`, 293 | the correlation_id is an auto increment id in producer. Default partitioner is: 294 | 295 | ```lua 296 | local function default_partitioner(key, num, correlation_id) 297 | local id = key and crc32(key) or correlation_id 298 | -- partition_id is continuous and start from 0 299 | return id % num 300 | end 301 | ``` 302 | 303 | buffer config ( only work `producer_type` = "async" ) 304 | 305 | * `flush_time` 306 | 307 | Specifies the `queue.buffering.max.ms`. Default `1000`. 308 | 309 | * `batch_num` 310 | 311 | Specifies the `batch.num.messages`. Default `200`. 312 | 313 | * `batch_size` 314 | 315 | Specifies the `send.buffer.bytes`. Default `1M`(may reach 2M). 316 | Be careful, *SHOULD* be smaller than the `socket.request.max.bytes / 2 - 10k` config in kafka server. 317 | 318 | * `max_buffering` 319 | 320 | Specifies the `queue.buffering.max.messages`. Default `50,000`. 321 | 322 | * `error_handle` 323 | 324 | Specifies the error handle, handle data when buffer send to kafka error. 325 | `syntax: error_handle = function (topic, partition_id, message_queue, index, err, retryable) end`, 326 | the failed messages in the message_queue is like ```{ key1, msg1, key2, msg2 } ```, 327 | `key` in the message_queue is empty string `""` even if orign is `nil`. 328 | `index` is the message_queue length, should not use `#message_queue`. 329 | when `retryable` is `true` that means kafka server surely not committed this messages, you can safely retry to send; 330 | and else means maybe, recommend to log to somewhere. 331 | 332 | * `wait_on_buffer_full` 333 | 334 | Specifies whether to wait when the buffer queue is full, Default `false`. 335 | When buffer queue is full, if option passed `true`, 336 | will use semaphore wait function to block coroutine until timeout or buffer queue has reduced, 337 | Otherwise, return "buffer overflow" error with `false`. 338 | Notice, it could not be used in those phases which do not support yields, i.e. log phase. 339 | 340 | * `wait_buffer_timeout` 341 | 342 | Specifies the max wait time when buffer is full, Default `5` seconds. 343 | 344 | Not support compression now. 345 | 346 | The third optional `cluster_name` specifies the name of the cluster, default `1` (yeah, it's number). You can Specifies different names when you have two or more kafka clusters. And this only works with `async` producer_type. 347 | 348 | 349 | [Back to TOC](#table-of-contents) 350 | 351 | #### send 352 | `syntax: ok, err = p:send(topic, key, message)` 353 | 354 | 1. In sync model 355 | 356 | In case of success, returns the offset (** cdata: LL **) of the current broker and partition. 357 | In case of errors, returns `nil` with a string describing the error. 358 | 359 | 2. In async model 360 | 361 | The `message` will write to the buffer first. 362 | It will send to the kafka server when the buffer exceed the `batch_num`, 363 | or every `flush_time` flush the buffer. 364 | 365 | It case of success, returns `true`. 366 | In case of errors, returns `nil` with a string describing the error (`buffer overflow`). 367 | 368 | 369 | [Back to TOC](#table-of-contents) 370 | 371 | #### offset 372 | 373 | `syntax: sum, details = p:offset()` 374 | 375 | Return the sum of all the topic-partition offset (return by the ProduceRequest api); 376 | and the details of each topic-partition 377 | 378 | 379 | [Back to TOC](#table-of-contents) 380 | 381 | #### flush 382 | 383 | `syntax: ok = p:flush()` 384 | 385 | Always return `true`. 386 | 387 | 388 | [Back to TOC](#table-of-contents) 389 | 390 | 391 | resty.kafka.basic-consumer 392 | ---------------------- 393 | 394 | To load this module, just do this 395 | 396 | ```lua 397 | local bconsumer = require "resty.kafka.basic-consumer" 398 | ``` 399 | 400 | This module is a minimalist implementation of a consumer, providing the `list_offset` API for querying by time or getting the start and end offset and the `fetch` API for getting messages in a topic. 401 | 402 | In a single call, only the information of a single partition in a single topic can be fetched, and batch fetching is not supported for now. The basic consumer does not support the consumer group related API, so you need to fetch the message after getting the offset through the `list_offset` API, or your service can manage the offset itself. 403 | 404 | [Back to TOC](#table-of-contents) 405 | 406 | ### Methods 407 | 408 | #### new 409 | 410 | `syntax: c = bconsumer:new(broker_list, client_config)` 411 | 412 | The `broker_list` is a list of broker, like the below 413 | 414 | ```json 415 | [ 416 | { 417 | "host": "127.0.0.1", 418 | "port": 9092, 419 | 420 | // optional auth 421 | "sasl_config": { 422 | "mechanism": "PLAIN", 423 | "user": "USERNAME", 424 | "password": "PASSWORD" 425 | } 426 | } 427 | ] 428 | ``` 429 | 430 | An optional `client_config` table can be specified. The following options are as follows: 431 | 432 | client config 433 | 434 | * `socket_timeout` 435 | 436 | Specifies the network timeout threshold in milliseconds. *SHOULD* lagrer than the `request_timeout`. 437 | 438 | * `keepalive_timeout` 439 | 440 | Specifies the maximal idle timeout (in milliseconds) for the keepalive connection. 441 | 442 | * `keepalive_size` 443 | 444 | Specifies the maximal number of connections allowed in the connection pool for per Nginx worker. 445 | 446 | * `refresh_interval` 447 | 448 | Specifies the time to auto refresh the metadata in milliseconds. Then metadata will not auto refresh if is nil. 449 | 450 | * `ssl` 451 | 452 | Specifies if client should use ssl connection. Defaults to false. See: https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake 453 | 454 | * `ssl_verify` 455 | 456 | Specifies if client should perform SSL verification. Defaults to false. See: https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake 457 | 458 | * `isolation_level` 459 | This setting controls the visibility of transactional records. See: https://kafka.apache.org/protocol.html 460 | 461 | * `client_rack` 462 | 463 | Rack ID of the consumer making this request. See: https://kafka.apache.org/protocol.html 464 | 465 | [Back to TOC](#table-of-contents) 466 | 467 | #### list_offset 468 | `syntax: offset, err = c:list_offset(topic, partition, timestamp)` 469 | 470 | The parameter timestamp can be a UNIX timestamp or a constant defined in `resty.kafka.protocol.consumer`, `LIST_OFFSET_TIMESTAMP_LAST`, `LIST_OFFSET_TIMESTAMP_FIRST`, `LIST_OFFSET_TIMESTAMP_MAX`, used to get the initial and latest offsets, etc., semantics with the ListOffsets API in Apache Kafka. See: https://kafka.apache.org/protocol.html#The_Messages_ListOffsets 471 | 472 | In case of success, return the offset of the specified case. 473 | In case of errors, returns `nil` with a string describing the error. 474 | 475 | [Back to TOC](#table-of-contents) 476 | 477 | #### fetch 478 | 479 | `syntax: result, err = c:fetch(topic, partition, offset)` 480 | 481 | In case of success, return the following `result` of the specified case. 482 | In case of errors, returns `nil` with a string describing the error. 483 | 484 | The `result` will contain more information such as the messages: 485 | 486 | * `records` 487 | 488 | The table containing the content of the message. 489 | 490 | * `errcode` 491 | 492 | The error code of Fetch API. See: https://kafka.apache.org/protocol.html#protocol_error_codes 493 | 494 | * `high_watermark` 495 | 496 | The high watermark of Fetch API. See: https://kafka.apache.org/protocol.html#The_Messages_Fetch 497 | 498 | * `last_stable_offset` 499 | 500 | The last stable offset of Fetch API. Content depends on the API version, maybe nil. See: https://kafka.apache.org/protocol.html#The_Messages_Fetch that response API version above v4 501 | 502 | * `log_start_offset` 503 | 504 | The log start offset of Fetch API. Content depends on the API version, maybe nil. See: https://kafka.apache.org/protocol.html#The_Messages_Fetch that response API version above v5 505 | 506 | * `aborted_transactions` 507 | 508 | The aborted transactions of Fetch API. Content depends on the API version, maybe nil. See: https://kafka.apache.org/protocol.html#The_Messages_Fetch that response API version above v4 509 | 510 | * `preferred_read_replica` 511 | 512 | The preferred read replica of Fetch API. Content depends on the API version, maybe nil. See: https://kafka.apache.org/protocol.html#The_Messages_Fetch that response API version above v11 513 | 514 | 515 | [Back to TOC](#table-of-contents) 516 | 517 | 518 | Errors 519 | ====== 520 | 521 | When you call the modules provided in this library, you may get some errors. 522 | Depending on the source, they can be divided into the following categories. 523 | 524 | * Network errors: such as connection rejected, connection timeout, etc. You need to check the connection status of each service in your environment. 525 | 526 | * Metadata-related errors: such as Metadata or ApiVersion data cannot be retrieved properly; the specified topic or partition does not exist, etc. You need to check the Kafka Broker and client configuration. 527 | 528 | * Error returned by Kafka: sometimes Kafka will include err_code data in the response data, When this problem occurs, the `err` in the return value looks like this `OFFSET_OUT_OF_RANGE`, all uppercase characters, and separated by underscores, and in the current library we provide [a error list of mappings](lib/resty/kafka/errors.lua) corresponding to the textual descriptions. To learn more about these errors, see the descriptions in the [Kafka documentation](https://kafka.apache.org/protocol.html#protocol_error_codes). 529 | 530 | 531 | Installation 532 | ============ 533 | 534 | You need to configure 535 | the lua_package_path directive to add the path of your lua-resty-kafka source 536 | tree to ngx_lua's LUA_PATH search path, as in 537 | 538 | ```nginx 539 | # nginx.conf 540 | http { 541 | lua_package_path "/path/to/lua-resty-kafka/lib/?.lua;;"; 542 | ... 543 | } 544 | ``` 545 | 546 | Ensure that the system account running your Nginx ''worker'' proceses have 547 | enough permission to read the `.lua` file. 548 | 549 | 550 | [Back to TOC](#table-of-contents) 551 | 552 | TODO 553 | ==== 554 | 555 | 1. Fetch API 556 | 2. Offset API 557 | 3. Offset Commit/Fetch API 558 | 559 | 560 | [Back to TOC](#table-of-contents) 561 | 562 | Author 563 | ====== 564 | 565 | Dejiang Zhu (doujiang24) . 566 | 567 | 568 | [Back to TOC](#table-of-contents) 569 | 570 | Copyright and License 571 | ===================== 572 | 573 | This module is licensed under the BSD license. 574 | 575 | Copyright (C) 2014-2020, by Dejiang Zhu (doujiang24) . 576 | 577 | All rights reserved. 578 | 579 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 580 | 581 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 582 | 583 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 584 | 585 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 586 | 587 | 588 | [Back to TOC](#table-of-contents) 589 | 590 | See Also 591 | ======== 592 | * the ngx_lua module: http://wiki.nginx.org/HttpLuaModule 593 | * the kafka protocol: https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol 594 | * the [lua-resty-redis](https://github.com/openresty/lua-resty-redis) library 595 | * the [lua-resty-logger-socket](https://github.com/cloudflare/lua-resty-logger-socket) library 596 | * the [sarama](https://github.com/Shopify/sarama) 597 | 598 | [Back to TOC](#table-of-contents) 599 | -------------------------------------------------------------------------------- /dist.ini: -------------------------------------------------------------------------------- 1 | name=lua-resty-kafka 2 | abstract=Lua kafka client driver for the Openresty based on the cosocket API 3 | author=doujiang24 4 | is_original=yes 5 | license=2bsd 6 | lib_dir=lib 7 | main_module=lib/resty/kafka/producer.lua 8 | repo_link=https://github.com/doujiang24/lua-resty-kafka 9 | -------------------------------------------------------------------------------- /lib/resty/kafka/basic-consumer.lua: -------------------------------------------------------------------------------- 1 | local client = require("resty.kafka.client") 2 | local broker = require("resty.kafka.broker") 3 | local protocol_consumer = require("resty.kafka.protocol.consumer") 4 | local Errors = require("resty.kafka.errors") 5 | 6 | local ngx_log = ngx.log 7 | local INFO = ngx.INFO 8 | 9 | 10 | local _M = { _VERSION = "0.20" } 11 | local mt = { __index = _M } 12 | 13 | function _M.new(self, broker_list, client_config) 14 | local opts = client_config or {} 15 | 16 | local cli = client:new(broker_list, client_config) 17 | local p = setmetatable({ 18 | client = cli, 19 | correlation_id = 1, 20 | isolation_level = opts.isolation_level or 0, 21 | client_rack = opts.client_rack or "default", 22 | socket_config = cli.socket_config, 23 | }, mt) 24 | 25 | return p 26 | end 27 | 28 | 29 | --- Get the available offsets of the partition of the specified topic 30 | -- When the error in request, offset will be nil and err will be the error message. 31 | -- @author bzp2010 32 | -- @param self 33 | -- @param topic The name of topic 34 | -- @param partition The partition of topic 35 | -- @param timestamp The starting timestamp of the obtained message offset 36 | -- @return offset The obtained offset value, may be nil 37 | -- @return err The error of request, may be nil 38 | function _M.list_offset(self, topic, partition, timestamp) 39 | timestamp = timestamp or protocol_consumer.LIST_OFFSET_TIMESTAMP_FIRST 40 | 41 | local cli = self.client 42 | local broker_conf, err = cli:choose_broker(topic, partition) 43 | if not broker_conf then 44 | return nil, err 45 | end 46 | 47 | local bk, err = broker:new(broker_conf.host, broker_conf.port, self.socket_config, broker_conf.sasl_config) 48 | if not bk then 49 | return nil, err 50 | end 51 | 52 | local req, err = protocol_consumer.list_offset_encode(self, { 53 | topic_num = 1, 54 | topics = { 55 | [topic] = { 56 | partition_num = 1, 57 | partitions = { 58 | [partition] = { 59 | timestamp = timestamp 60 | } 61 | }, 62 | } 63 | }, 64 | }) 65 | if not req then 66 | return nil, err 67 | end 68 | 69 | local resp, err = bk:send_receive(req) 70 | if not resp then 71 | return nil, err 72 | end 73 | 74 | local result = protocol_consumer.list_offset_decode(resp) 75 | local data = result.topics[topic].partitions[partition] 76 | 77 | local errcode = data.errcode 78 | if errcode ~= 0 then 79 | err = Errors[errcode] or Errors[-1] 80 | 81 | ngx_log(INFO, "list offset err: ", err.msg, "(", errcode, "), topic: ", topic, 82 | ", partition_id: ", partition) 83 | 84 | return nil, err.msg 85 | end 86 | 87 | return data.offset, nil 88 | end 89 | 90 | 91 | --- Fetch message 92 | -- The maximum waiting time is 100 ms, and the maximum message response is 100 MiB. 93 | -- @author bzp2010 94 | -- @param self 95 | -- @param topic The name of topic 96 | -- @param partition The partition of topic 97 | -- @param offset The starting offset of the message to get 98 | -- @return messages The obtained offset messages, which is in a table, may be nil 99 | -- @return err The error of request, may be nil 100 | function _M.fetch(self, topic, partition, offset) 101 | local cli = self.client 102 | local broker_conf, err = cli:choose_broker(topic, partition) 103 | if not broker_conf then 104 | return nil, err 105 | end 106 | 107 | local bk, err = broker:new(broker_conf.host, broker_conf.port, self.socket_config, broker_conf.sasl_config) 108 | if not bk then 109 | return nil, err 110 | end 111 | 112 | local req = protocol_consumer.fetch_encode(self, { 113 | topic_num = 1, 114 | topics = { 115 | [topic] = { 116 | partition_num = 1, 117 | partitions = { 118 | [partition] = { 119 | offset = offset 120 | } 121 | }, 122 | } 123 | }, 124 | }) 125 | if not req then 126 | return nil, err 127 | end 128 | 129 | local resp, err = bk:send_receive(req) 130 | if not resp then 131 | return nil, err 132 | end 133 | 134 | local result = protocol_consumer.fetch_decode(resp, offset) 135 | local data = result.topics[topic].partitions[partition] 136 | 137 | local errcode = data.errcode 138 | if errcode ~= 0 then 139 | err = Errors[errcode] or Errors[-1] 140 | 141 | ngx_log(INFO, "fetch message err: ", err.msg, "(", errcode, "), topic: ", topic, 142 | ", partition_id: ", partition) 143 | 144 | return nil, err.msg 145 | end 146 | 147 | return data, nil 148 | end 149 | 150 | 151 | return _M 152 | -------------------------------------------------------------------------------- /lib/resty/kafka/broker.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) Dejiang Zhu(doujiang24) 2 | 3 | 4 | local response = require "resty.kafka.response" 5 | local request = require "resty.kafka.request" 6 | 7 | local to_int32 = response.to_int32 8 | local setmetatable = setmetatable 9 | local tcp = ngx.socket.tcp 10 | local pid = ngx.worker.pid 11 | local tostring = tostring 12 | 13 | local sasl = require "resty.kafka.sasl" 14 | 15 | local _M = {} 16 | local mt = { __index = _M } 17 | 18 | 19 | local function _sock_send_recieve(sock, request) 20 | local bytes, err = sock:send(request:package()) 21 | if not bytes then 22 | return nil, err, true 23 | end 24 | 25 | local len, err = sock:receive(4) 26 | if not len then 27 | if err == "timeout" then 28 | sock:close() 29 | return nil, err 30 | end 31 | return nil, err, true 32 | end 33 | 34 | local data, err = sock:receive(to_int32(len)) 35 | if not data then 36 | if err == "timeout" then 37 | sock:close() 38 | return nil, err 39 | end 40 | return nil, err, true 41 | end 42 | 43 | return response:new(data, request.api_version), nil, true 44 | end 45 | 46 | 47 | local function _sasl_handshake(sock, brk) 48 | local cli_id = "worker" .. pid() 49 | local req = request:new(request.SaslHandshakeRequest, 0, cli_id, 50 | request.API_VERSION_V1) 51 | 52 | req:string(brk.auth.mechanism) 53 | 54 | local resp, err = _sock_send_recieve(sock, req, brk.config) 55 | if not resp then 56 | return nil, err 57 | end 58 | 59 | local err_code = resp:int16() 60 | if err_code ~= 0 then 61 | local error_msg = resp:string() 62 | 63 | return nil, error_msg 64 | end 65 | 66 | return true 67 | end 68 | 69 | 70 | local function _sasl_auth(sock, brk) 71 | local cli_id = "worker" .. pid() 72 | local req = request:new(request.SaslAuthenticateRequest, 0, cli_id, 73 | request.API_VERSION_V1) 74 | 75 | local ok, msg = sasl.encode(brk.auth.mechanism, nil, brk.auth.user, 76 | brk.auth.password, sock) 77 | if not ok then 78 | return nil, msg 79 | end 80 | req:bytes(msg) 81 | 82 | local resp, err = _sock_send_recieve(sock, req, brk.config) 83 | if not resp then 84 | return nil, err 85 | end 86 | 87 | local err_code = resp:int16() 88 | local error_msg = resp:string() 89 | local auth_bytes = resp:bytes() 90 | 91 | if err_code ~= 0 then 92 | return nil, error_msg 93 | end 94 | 95 | return true 96 | end 97 | 98 | 99 | local function sasl_auth(sock, broker) 100 | local ok, err = _sasl_handshake(sock, broker) 101 | if not ok then 102 | return nil, err 103 | end 104 | 105 | local ok, err = _sasl_auth(sock, broker) 106 | if not ok then 107 | return nil, err 108 | end 109 | 110 | return true 111 | end 112 | 113 | 114 | function _M.new(self, host, port, socket_config, sasl_config) 115 | return setmetatable({ 116 | host = host, 117 | port = port, 118 | config = socket_config, 119 | auth = sasl_config, 120 | }, mt) 121 | end 122 | 123 | 124 | function _M.send_receive(self, request) 125 | local sock, err = tcp() 126 | if not sock then 127 | return nil, err, true 128 | end 129 | 130 | sock:settimeout(self.config.socket_timeout) 131 | 132 | local ok, err = sock:connect(self.host, self.port) 133 | if not ok then 134 | return nil, err, true 135 | end 136 | 137 | local times, err = sock:getreusedtimes() 138 | if not times then 139 | return nil, "failed to get reused time: " .. tostring(err), true 140 | end 141 | 142 | if self.config.ssl and times == 0 then 143 | -- first connectted connnection 144 | local ok, err = sock:sslhandshake(false, self.host, 145 | self.config.ssl_verify) 146 | if not ok then 147 | return nil, "failed to do SSL handshake with " 148 | .. self.host .. ":" .. tostring(self.port) .. ": " 149 | .. err, true 150 | end 151 | end 152 | 153 | if self.auth and times == 0 then -- SASL AUTH 154 | local ok, err = sasl_auth(sock, self) 155 | if not ok then 156 | return nil, "failed to do " .. self.auth.mechanism .." auth with " 157 | .. self.host .. ":" .. tostring(self.port) .. ": " 158 | .. err, true 159 | 160 | end 161 | end 162 | 163 | local data, err, retryable = _sock_send_recieve(sock, request) 164 | 165 | sock:setkeepalive(self.config.keepalive_timeout, self.config.keepalive_size) 166 | 167 | return data, err, retryable 168 | end 169 | 170 | 171 | return _M 172 | -------------------------------------------------------------------------------- /lib/resty/kafka/client.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) Dejiang Zhu(doujiang24) 2 | 3 | 4 | local broker = require "resty.kafka.broker" 5 | local request = require "resty.kafka.request" 6 | local Errors = require "resty.kafka.errors" 7 | 8 | 9 | local setmetatable = setmetatable 10 | local timer_at = ngx.timer.at 11 | local ngx_log = ngx.log 12 | local ERR = ngx.ERR 13 | local INFO = ngx.INFO 14 | local DEBUG = ngx.DEBUG 15 | local debug = ngx.config.debug 16 | local pid = ngx.worker.pid 17 | local time = ngx.time 18 | local sleep = ngx.sleep 19 | local ceil = math.ceil 20 | local pairs = pairs 21 | 22 | 23 | local ok, new_tab = pcall(require, "table.new") 24 | if not ok then 25 | new_tab = function (narr, nrec) return {} end 26 | end 27 | 28 | 29 | local _M = { _VERSION = "0.20" } 30 | local mt = { __index = _M } 31 | 32 | 33 | local function _metadata_cache(self, topic) 34 | if not topic then 35 | return self.brokers, self.topic_partitions 36 | end 37 | 38 | local partitions = self.topic_partitions[topic] 39 | if partitions and partitions.num and partitions.num > 0 then 40 | return self.brokers, partitions 41 | end 42 | 43 | return nil, "not found topic" 44 | end 45 | 46 | 47 | local function metadata_encode(client_id, topics, num) 48 | local id = 0 -- hard code correlation_id 49 | local req = request:new(request.MetadataRequest, id, client_id, request.API_VERSION_V1) 50 | 51 | req:int32(num) 52 | 53 | for i = 1, num do 54 | req:string(topics[i]) 55 | end 56 | 57 | return req 58 | end 59 | 60 | 61 | local function metadata_decode(resp) 62 | local bk_num = resp:int32() 63 | local brokers = new_tab(0, bk_num) 64 | 65 | for i = 1, bk_num do 66 | local nodeid = resp:int32(); 67 | brokers[nodeid] = { 68 | host = resp:string(), 69 | port = resp:int32(), 70 | rack = resp:string(), 71 | } 72 | end 73 | local conrtrol_id = resp:int32() 74 | local topic_num = resp:int32() 75 | local topics = new_tab(0, topic_num) 76 | for i = 1, topic_num do 77 | local tp_errcode = resp:int16() 78 | local topic = resp:string() 79 | local is_internal = resp:int8() 80 | local partition_num = resp:int32() 81 | local topic_info = new_tab(partition_num - 1, 3) 82 | 83 | topic_info.errcode = tp_errcode 84 | topic_info.num = partition_num 85 | 86 | for j = 1, partition_num do 87 | local partition_info = new_tab(0, 5) 88 | 89 | partition_info.errcode = resp:int16() 90 | partition_info.id = resp:int32() 91 | partition_info.leader = resp:int32() 92 | 93 | local repl_num = resp:int32() 94 | local replicas = new_tab(repl_num, 0) 95 | for m = 1, repl_num do 96 | replicas[m] = resp:int32() 97 | end 98 | partition_info.replicas = replicas 99 | 100 | local isr_num = resp:int32() 101 | local isr = new_tab(isr_num, 0) 102 | for m = 1, isr_num do 103 | isr[m] = resp:int32() 104 | end 105 | partition_info.isr = isr 106 | 107 | topic_info[partition_info.id] = partition_info 108 | end 109 | topics[topic] = topic_info 110 | end 111 | 112 | return brokers, topics 113 | end 114 | 115 | 116 | local function api_versions_encode(client_id) 117 | local id = 1 -- hard code correlation_id 118 | return request:new(request.ApiVersionsRequest, id, client_id, request.API_VERSION_V2) 119 | end 120 | 121 | 122 | local function api_versions_decode(resp) 123 | local errcode = resp:int16() 124 | 125 | local api_keys_num = resp:int32() 126 | local api_keys = new_tab(0, api_keys_num) 127 | for i = 1, api_keys_num do 128 | local api_key, min_version, max_version = resp:int16(), resp:int16(), resp:int16() 129 | api_keys[api_key] = { 130 | min_version = min_version, 131 | max_version = max_version, 132 | } 133 | end 134 | 135 | return errcode, api_keys 136 | end 137 | 138 | 139 | local function _fetch_api_versions(broker, client_id) 140 | local resp, err = broker:send_receive(api_versions_encode(client_id)) 141 | if not resp then 142 | return nil, err 143 | else 144 | local errcode, api_versions = api_versions_decode(resp) 145 | 146 | if errcode ~= 0 then 147 | local err = Errors[errcode] or Errors[-1] 148 | return nil, err.msg 149 | else 150 | return api_versions, nil 151 | end 152 | end 153 | end 154 | 155 | 156 | local function _fetch_metadata(self, new_topic) 157 | local topics, num = {}, 0 158 | for tp, _p in pairs(self.topic_partitions) do 159 | num = num + 1 160 | topics[num] = tp 161 | end 162 | 163 | if new_topic and not self.topic_partitions[new_topic] then 164 | num = num + 1 165 | topics[num] = new_topic 166 | end 167 | 168 | if num == 0 then 169 | return nil, "not topic" 170 | end 171 | 172 | local broker_list = self.broker_list 173 | local sc = self.socket_config 174 | local req = metadata_encode(self.client_id, topics, num) 175 | 176 | for i = 1, #broker_list do 177 | local host, port, sasl_config = broker_list[i].host, 178 | broker_list[i].port, 179 | broker_list[i].sasl_config 180 | host = sc.resolver and sc.resolver(host) or host 181 | local bk = broker:new(host, port, sc, sasl_config) 182 | 183 | local resp, err = bk:send_receive(req) 184 | if not resp then 185 | ngx_log(INFO, "broker fetch metadata failed, err:", err, 186 | ", host: ", host, ", port: ", port) 187 | else 188 | local brokers, topic_partitions = metadata_decode(resp) 189 | -- Confluent Cloud need the SASL auth on all requests, including to brokers 190 | -- we have been referred to. This injects the SASL auth in. 191 | for _, b in pairs(brokers) do 192 | b.sasl_config = sasl_config 193 | b.host = sc.resolver and sc.resolver(b.host) or b.host 194 | end 195 | self.brokers, self.topic_partitions = brokers, topic_partitions 196 | 197 | -- fetch ApiVersions for compatibility 198 | local api_versions, err = _fetch_api_versions(bk, self.client_id) 199 | if not api_versions then 200 | ngx_log(INFO, "broker fetch api versions failed, err:", err, 201 | ", host: ", broker.host, ", port: ", broker.port) 202 | else 203 | self.api_versions = api_versions 204 | 205 | return brokers, topic_partitions, api_versions 206 | end 207 | end 208 | end 209 | 210 | ngx_log(ERR, "all brokers failed in fetch topic metadata") 211 | return nil, "all brokers failed in fetch topic metadata" 212 | end 213 | 214 | 215 | _M.refresh = _fetch_metadata 216 | 217 | 218 | local function meta_refresh(premature, self, interval) 219 | if premature then 220 | return 221 | end 222 | 223 | _fetch_metadata(self) 224 | 225 | local ok, err = timer_at(interval, meta_refresh, self, interval) 226 | if not ok then 227 | ngx_log(ERR, "failed to create timer at meta_refresh, err: ", err) 228 | end 229 | end 230 | 231 | 232 | function _M.new(self, broker_list, client_config) 233 | local opts = client_config or {} 234 | local socket_config = { 235 | socket_timeout = opts.socket_timeout or 3000, 236 | keepalive_timeout = opts.keepalive_timeout or (600 * 1000), -- 10 min 237 | keepalive_size = opts.keepalive_size or 2, 238 | ssl = opts.ssl or false, 239 | ssl_verify = opts.ssl_verify or false, 240 | resolver = opts.resolver -- or nil 241 | } 242 | 243 | local cli = setmetatable({ 244 | broker_list = broker_list, 245 | topic_partitions = {}, 246 | brokers = {}, 247 | api_versions = {}, -- support APIs version on broker 248 | client_id = "worker" .. pid(), 249 | socket_config = socket_config, 250 | }, mt) 251 | 252 | if opts.refresh_interval then 253 | meta_refresh(nil, cli, opts.refresh_interval / 1000) -- in ms 254 | end 255 | 256 | return cli 257 | end 258 | 259 | 260 | function _M.fetch_metadata(self, topic) 261 | local brokers, partitions = _metadata_cache(self, topic) 262 | if brokers then 263 | return brokers, partitions 264 | end 265 | 266 | _fetch_metadata(self, topic) 267 | 268 | return _metadata_cache(self, topic) 269 | end 270 | 271 | 272 | function _M.choose_broker(self, topic, partition_id) 273 | local brokers, partitions = self:fetch_metadata(topic) 274 | if not brokers then 275 | return nil, partitions 276 | end 277 | 278 | local partition = partitions[partition_id] 279 | if not partition then 280 | return nil, "not found partition" 281 | end 282 | 283 | local config = brokers[partition.leader] 284 | if not config then 285 | return nil, "not found broker" 286 | end 287 | 288 | return config 289 | end 290 | 291 | 292 | -- select the api version to use, the maximum version will 293 | -- be used within the allowed range 294 | function _M.choose_api_version(self, api_key, min_version, max_version) 295 | local api_version = self.api_versions[api_key] 296 | 297 | if not api_version then 298 | return -1 299 | end 300 | 301 | local broker_min_version, broker_max_version = api_version.min_version, api_version.max_version 302 | 303 | if min_version and max_version then 304 | if broker_max_version < max_version then 305 | if broker_max_version < min_version then 306 | return -1 307 | else 308 | return broker_max_version 309 | end 310 | elseif broker_min_version > max_version then 311 | return -1 312 | else 313 | return max_version 314 | end 315 | else 316 | return broker_max_version 317 | end 318 | end 319 | 320 | 321 | return _M 322 | -------------------------------------------------------------------------------- /lib/resty/kafka/errors.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) Dejiang Zhu(doujiang24) 2 | 3 | -- Reference: https://kafka.apache.org/protocol.html#protocol_error_codes 4 | local _M = { 5 | [0] = {msg = 'NONE', retriable = false}, 6 | [-1] = {msg = 'UNKNOWN_SERVER_ERROR', retriable = false}, 7 | [1] = {msg = 'OFFSET_OUT_OF_RANGE', retriable = false}, 8 | [2] = {msg = 'CORRUPT_MESSAGE', retriable = true}, 9 | [3] = {msg = 'UNKNOWN_TOPIC_OR_PARTITION', retriable = true}, 10 | [4] = {msg = 'INVALID_FETCH_SIZE', retriable = false}, 11 | [5] = {msg = 'LEADER_NOT_AVAILABLE', retriable = true}, 12 | [6] = {msg = 'NOT_LEADER_OR_FOLLOWER', retriable = true}, 13 | [7] = {msg = 'REQUEST_TIMED_OUT', retriable = true}, 14 | [8] = {msg = 'BROKER_NOT_AVAILABLE', retriable = false}, 15 | [9] = {msg = 'REPLICA_NOT_AVAILABLE', retriable = true}, 16 | [10] = {msg = 'MESSAGE_TOO_LARGE', retriable = false}, 17 | [11] = {msg = 'STALE_CONTROLLER_EPOCH', retriable = false}, 18 | [12] = {msg = 'OFFSET_METADATA_TOO_LARGE', retriable = false}, 19 | [13] = {msg = 'NETWORK_EXCEPTION', retriable = true}, 20 | [14] = {msg = 'COORDINATOR_LOAD_IN_PROGRESS', retriable = true}, 21 | [15] = {msg = 'COORDINATOR_NOT_AVAILABLE', retriable = true}, 22 | [16] = {msg = 'NOT_COORDINATOR', retriable = true}, 23 | [17] = {msg = 'INVALID_TOPIC_EXCEPTION', retriable = false}, 24 | [18] = {msg = 'RECORD_LIST_TOO_LARGE', retriable = false}, 25 | [19] = {msg = 'NOT_ENOUGH_REPLICAS', retriable = true}, 26 | [20] = {msg = 'NOT_ENOUGH_REPLICAS_AFTER_APPEND', retriable = true}, 27 | [21] = {msg = 'INVALID_REQUIRED_ACKS', retriable = false}, 28 | [22] = {msg = 'ILLEGAL_GENERATION', retriable = false}, 29 | [23] = {msg = 'INCONSISTENT_GROUP_PROTOCOL', retriable = false}, 30 | [24] = {msg = 'INVALID_GROUP_ID', retriable = false}, 31 | [25] = {msg = 'UNKNOWN_MEMBER_ID', retriable = false}, 32 | [26] = {msg = 'INVALID_SESSION_TIMEOUT', retriable = false}, 33 | [27] = {msg = 'REBALANCE_IN_PROGRESS', retriable = false}, 34 | [28] = {msg = 'INVALID_COMMIT_OFFSET_SIZE', retriable = false}, 35 | [29] = {msg = 'TOPIC_AUTHORIZATION_FAILED', retriable = false}, 36 | [30] = {msg = 'GROUP_AUTHORIZATION_FAILED', retriable = false}, 37 | [31] = {msg = 'CLUSTER_AUTHORIZATION_FAILED', retriable = false}, 38 | [32] = {msg = 'INVALID_TIMESTAMP', retriable = false}, 39 | [33] = {msg = 'UNSUPPORTED_SASL_MECHANISM', retriable = false}, 40 | [34] = {msg = 'ILLEGAL_SASL_STATE', retriable = false}, 41 | [35] = {msg = 'UNSUPPORTED_VERSION', retriable = false}, 42 | [36] = {msg = 'TOPIC_ALREADY_EXISTS', retriable = false}, 43 | [37] = {msg = 'INVALID_PARTITIONS', retriable = false}, 44 | [38] = {msg = 'INVALID_REPLICATION_FACTOR', retriable = false}, 45 | [39] = {msg = 'INVALID_REPLICA_ASSIGNMENT', retriable = false}, 46 | [40] = {msg = 'INVALID_CONFIG', retriable = false}, 47 | [41] = {msg = 'NOT_CONTROLLER', retriable = true}, 48 | [42] = {msg = 'INVALID_REQUEST', retriable = false}, 49 | [43] = {msg = 'UNSUPPORTED_FOR_MESSAGE_FORMAT', retriable = false}, 50 | [44] = {msg = 'POLICY_VIOLATION', retriable = false}, 51 | [45] = {msg = 'OUT_OF_ORDER_SEQUENCE_NUMBER', retriable = false}, 52 | [46] = {msg = 'DUPLICATE_SEQUENCE_NUMBER', retriable = false}, 53 | [47] = {msg = 'INVALID_PRODUCER_EPOCH', retriable = false}, 54 | [48] = {msg = 'INVALID_TXN_STATE', retriable = false}, 55 | [49] = {msg = 'INVALID_PRODUCER_ID_MAPPING', retriable = false}, 56 | [50] = {msg = 'INVALID_TRANSACTION_TIMEOUT', retriable = false}, 57 | [51] = {msg = 'CONCURRENT_TRANSACTIONS', retriable = false}, 58 | [52] = {msg = 'TRANSACTION_COORDINATOR_FENCED', retriable = false}, 59 | [53] = {msg = 'TRANSACTIONAL_ID_AUTHORIZATION_FAILED', retriable = false}, 60 | [54] = {msg = 'SECURITY_DISABLED', retriable = false}, 61 | [55] = {msg = 'OPERATION_NOT_ATTEMPTED', retriable = false}, 62 | [56] = {msg = 'KAFKA_STORAGE_ERROR', retriable = true}, 63 | [57] = {msg = 'LOG_DIR_NOT_FOUND', retriable = false}, 64 | [58] = {msg = 'SASL_AUTHENTICATION_FAILED', retriable = false}, 65 | [59] = {msg = 'UNKNOWN_PRODUCER_ID', retriable = false}, 66 | [60] = {msg = 'REASSIGNMENT_IN_PROGRESS', retriable = false}, 67 | [61] = {msg = 'DELEGATION_TOKEN_AUTH_DISABLED', retriable = false}, 68 | [62] = {msg = 'DELEGATION_TOKEN_NOT_FOUND', retriable = false}, 69 | [63] = {msg = 'DELEGATION_TOKEN_OWNER_MISMATCH', retriable = false}, 70 | [64] = {msg = 'DELEGATION_TOKEN_REQUEST_NOT_ALLOWED', retriable = false}, 71 | [65] = {msg = 'DELEGATION_TOKEN_AUTHORIZATION_FAILED', retriable = false}, 72 | [66] = {msg = 'DELEGATION_TOKEN_EXPIRED', retriable = false}, 73 | [67] = {msg = 'INVALID_PRINCIPAL_TYPE', retriable = false}, 74 | [68] = {msg = 'NON_EMPTY_GROUP', retriable = false}, 75 | [69] = {msg = 'GROUP_ID_NOT_FOUND', retriable = false}, 76 | [70] = {msg = 'FETCH_SESSION_ID_NOT_FOUND', retriable = true}, 77 | [71] = {msg = 'INVALID_FETCH_SESSION_EPOCH', retriable = true}, 78 | [72] = {msg = 'LISTENER_NOT_FOUND', retriable = true}, 79 | [73] = {msg = 'TOPIC_DELETION_DISABLED', retriable = false}, 80 | [74] = {msg = 'FENCED_LEADER_EPOCH', retriable = true}, 81 | [75] = {msg = 'UNKNOWN_LEADER_EPOCH', retriable = true}, 82 | [76] = {msg = 'UNSUPPORTED_COMPRESSION_TYPE', retriable = false}, 83 | [77] = {msg = 'STALE_BROKER_EPOCH', retriable = false}, 84 | [78] = {msg = 'OFFSET_NOT_AVAILABLE', retriable = true}, 85 | [79] = {msg = 'MEMBER_ID_REQUIRED', retriable = false}, 86 | [80] = {msg = 'PREFERRED_LEADER_NOT_AVAILABLE', retriable = true}, 87 | [81] = {msg = 'GROUP_MAX_SIZE_REACHED', retriable = false}, 88 | [82] = {msg = 'FENCED_INSTANCE_ID', retriable = false}, 89 | [83] = {msg = 'ELIGIBLE_LEADERS_NOT_AVAILABLE', retriable = true}, 90 | [84] = {msg = 'ELECTION_NOT_NEEDED', retriable = true}, 91 | [85] = {msg = 'NO_REASSIGNMENT_IN_PROGRESS', retriable = false}, 92 | [86] = {msg = 'GROUP_SUBSCRIBED_TO_TOPIC', retriable = false}, 93 | [87] = {msg = 'INVALID_RECORD', retriable = false}, 94 | [88] = {msg = 'UNSTABLE_OFFSET_COMMIT', retriable = true}, 95 | [89] = {msg = 'THROTTLING_QUOTA_EXCEEDED', retriable = true}, 96 | [90] = {msg = 'PRODUCER_FENCED', retriable = false}, 97 | [91] = {msg = 'RESOURCE_NOT_FOUND', retriable = false}, 98 | [92] = {msg = 'DUPLICATE_RESOURCE', retriable = false}, 99 | [93] = {msg = 'UNACCEPTABLE_CREDENTIAL', retriable = false}, 100 | [94] = {msg = 'INCONSISTENT_VOTER_SET', retriable = false}, 101 | [95] = {msg = 'INVALID_UPDATE_VERSION', retriable = false}, 102 | [96] = {msg = 'FEATURE_UPDATE_FAILED', retriable = false}, 103 | [97] = {msg = 'PRINCIPAL_DESERIALIZATION_FAILURE', retriable = false}, 104 | [98] = {msg = 'SNAPSHOT_NOT_FOUND', retriable = false}, 105 | [99] = {msg = 'POSITION_OUT_OF_RANGE', retriable = false}, 106 | [100] = {msg = 'UNKNOWN_TOPIC_ID', retriable = true}, 107 | [101] = {msg = 'DUPLICATE_BROKER_REGISTRATION', retriable = false}, 108 | [102] = {msg = 'BROKER_ID_NOT_REGISTERED', retriable = false}, 109 | [103] = {msg = 'INCONSISTENT_TOPIC_ID', retriable = true}, 110 | [104] = {msg = 'INCONSISTENT_CLUSTER_ID', retriable = false}, 111 | [105] = {msg = 'TRANSACTIONAL_ID_NOT_FOUND', retriable = false}, 112 | [106] = {msg = 'FETCH_SESSION_TOPIC_ID_ERROR', retriable = true}, 113 | } 114 | 115 | 116 | return _M 117 | -------------------------------------------------------------------------------- /lib/resty/kafka/producer.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) Dejiang Zhu(doujiang24) 2 | 3 | 4 | local response = require "resty.kafka.response" 5 | local request = require "resty.kafka.request" 6 | local broker = require "resty.kafka.broker" 7 | local client = require "resty.kafka.client" 8 | local Errors = require "resty.kafka.errors" 9 | local sendbuffer = require "resty.kafka.sendbuffer" 10 | local ringbuffer = require "resty.kafka.ringbuffer" 11 | 12 | 13 | local setmetatable = setmetatable 14 | local timer_at = ngx.timer.at 15 | local timer_every = ngx.timer.every 16 | local is_exiting = ngx.worker.exiting 17 | local ngx_sleep = ngx.sleep 18 | local ngx_log = ngx.log 19 | local ERR = ngx.ERR 20 | local INFO = ngx.INFO 21 | local DEBUG = ngx.DEBUG 22 | local debug = ngx.config.debug 23 | local crc32 = ngx.crc32_short 24 | local pcall = pcall 25 | local pairs = pairs 26 | 27 | local API_VERSION_V0 = 0 28 | local API_VERSION_V1 = 1 29 | local API_VERSION_V2 = 2 30 | 31 | local ok, new_tab = pcall(require, "table.new") 32 | if not ok then 33 | new_tab = function (narr, nrec) return {} end 34 | end 35 | 36 | 37 | local _M = { _VERSION = "0.20" } 38 | local mt = { __index = _M } 39 | 40 | 41 | -- weak value table is useless here, cause _timer_flush always ref p 42 | -- so, weak value table won't works 43 | local cluster_inited = {} 44 | local DEFAULT_CLUSTER_NAME = 1 45 | 46 | 47 | local function default_partitioner(key, num, correlation_id) 48 | local id = key and crc32(key) or correlation_id 49 | 50 | -- partition_id is continuous and start from 0 51 | return id % num 52 | end 53 | 54 | 55 | local function correlation_id(self) 56 | local id = (self.correlation_id + 1) % 1073741824 -- 2^30 57 | self.correlation_id = id 58 | 59 | return id 60 | end 61 | 62 | 63 | local function produce_encode(self, topic_partitions) 64 | local req = request:new(request.ProduceRequest, 65 | correlation_id(self), self.client.client_id, self.api_version) 66 | 67 | req:int16(self.required_acks) 68 | req:int32(self.request_timeout) 69 | req:int32(topic_partitions.topic_num) 70 | 71 | for topic, partitions in pairs(topic_partitions.topics) do 72 | req:string(topic) 73 | req:int32(partitions.partition_num) 74 | 75 | for partition_id, buffer in pairs(partitions.partitions) do 76 | req:int32(partition_id) 77 | 78 | -- MessageSetSize and MessageSet 79 | req:message_set(buffer.queue, buffer.index) 80 | end 81 | end 82 | 83 | return req 84 | end 85 | 86 | 87 | local function produce_decode(resp) 88 | local topic_num = resp:int32() 89 | local ret = new_tab(0, topic_num) 90 | local api_version = resp.api_version 91 | 92 | for i = 1, topic_num do 93 | local topic = resp:string() 94 | local partition_num = resp:int32() 95 | 96 | ret[topic] = {} 97 | 98 | -- ignore ThrottleTime 99 | for j = 1, partition_num do 100 | local partition = resp:int32() 101 | 102 | if api_version == API_VERSION_V0 or api_version == API_VERSION_V1 then 103 | ret[topic][partition] = { 104 | errcode = resp:int16(), 105 | offset = resp:int64(), 106 | } 107 | 108 | elseif api_version == API_VERSION_V2 then 109 | ret[topic][partition] = { 110 | errcode = resp:int16(), 111 | offset = resp:int64(), 112 | timestamp = resp:int64(), -- If CreateTime is used, this field is always -1 113 | } 114 | end 115 | end 116 | end 117 | 118 | return ret 119 | end 120 | 121 | 122 | local function choose_partition(self, topic, key) 123 | local brokers, partitions = self.client:fetch_metadata(topic) 124 | if not brokers then 125 | return nil, partitions 126 | end 127 | 128 | return self.partitioner(key, partitions.num, self.correlation_id) 129 | end 130 | 131 | 132 | local function _flush_lock(self) 133 | if not self.flushing then 134 | if debug then 135 | ngx_log(DEBUG, "flush lock accquired") 136 | end 137 | self.flushing = true 138 | return true 139 | end 140 | return false 141 | end 142 | 143 | 144 | local function _flush_unlock(self) 145 | if debug then 146 | ngx_log(DEBUG, "flush lock released") 147 | end 148 | self.flushing = false 149 | end 150 | 151 | 152 | local function _send(self, broker_conf, topic_partitions) 153 | local sendbuffer = self.sendbuffer 154 | local resp, retryable = nil, true 155 | local bk, err = broker:new(broker_conf.host, broker_conf.port, self.socket_config, broker_conf.sasl_config) 156 | if bk then 157 | local req = produce_encode(self, topic_partitions) 158 | 159 | resp, err, retryable = bk:send_receive(req) 160 | if resp then 161 | local result = produce_decode(resp) 162 | 163 | for topic, partitions in pairs(result) do 164 | for partition_id, r in pairs(partitions) do 165 | local errcode = r.errcode 166 | 167 | if errcode == 0 then 168 | sendbuffer:offset(topic, partition_id, r.offset) 169 | sendbuffer:clear(topic, partition_id) 170 | else 171 | err = Errors[errcode] or Errors[-1] 172 | 173 | -- set retries according to the error list 174 | local retryable0 = retryable or err.retriable 175 | 176 | local index = sendbuffer:err(topic, partition_id, err.msg, retryable0) 177 | 178 | ngx_log(INFO, "retry to send messages to kafka err: ", err.msg, "(", errcode, "), retryable: ", 179 | retryable0, ", topic: ", topic, ", partition_id: ", partition_id, ", length: ", index / 2) 180 | end 181 | end 182 | end 183 | 184 | return 185 | end 186 | end 187 | 188 | -- when broker new failed or send_receive failed 189 | for topic, partitions in pairs(topic_partitions.topics) do 190 | for partition_id, partition in pairs(partitions.partitions) do 191 | sendbuffer:err(topic, partition_id, err, retryable) 192 | end 193 | end 194 | end 195 | 196 | 197 | local function _batch_send(self, sendbuffer) 198 | local try_num = 1 199 | while try_num <= self.max_retry do 200 | -- aggregator 201 | local send_num, sendbroker = sendbuffer:aggregator(self.client) 202 | if send_num == 0 then 203 | break 204 | end 205 | 206 | for i = 1, send_num, 2 do 207 | local broker_conf, topic_partitions = sendbroker[i], sendbroker[i + 1] 208 | 209 | _send(self, broker_conf, topic_partitions) 210 | end 211 | 212 | if sendbuffer:done() then 213 | return true 214 | end 215 | 216 | self.client:refresh() 217 | 218 | try_num = try_num + 1 219 | if try_num < self.max_retry then 220 | ngx_sleep(self.retry_backoff / 1000) -- ms to s 221 | end 222 | end 223 | end 224 | 225 | 226 | local _flush_buffer 227 | 228 | 229 | local function _flush(premature, self) 230 | if not _flush_lock(self) then 231 | if debug then 232 | ngx_log(DEBUG, "previous flush not finished") 233 | end 234 | return 235 | end 236 | 237 | local ringbuffer = self.ringbuffer 238 | local sendbuffer = self.sendbuffer 239 | while true do 240 | local topic, key, msg = ringbuffer:pop() 241 | if not topic then 242 | break 243 | end 244 | 245 | local partition_id, err = choose_partition(self, topic, key) 246 | if not partition_id then 247 | partition_id = -1 248 | end 249 | 250 | local overflow = sendbuffer:add(topic, partition_id, key, msg) 251 | if overflow then -- reached batch_size in one topic-partition 252 | break 253 | end 254 | end 255 | 256 | local all_done = _batch_send(self, sendbuffer) 257 | 258 | if not all_done then 259 | for topic, partition_id, buffer in sendbuffer:loop() do 260 | local queue, index, err, retryable = buffer.queue, buffer.index, buffer.err, buffer.retryable 261 | 262 | if self.error_handle then 263 | local ok, err = pcall(self.error_handle, topic, partition_id, queue, index, err, retryable) 264 | if not ok then 265 | ngx_log(ERR, "failed to callback error_handle: ", err) 266 | end 267 | else 268 | ngx_log(ERR, "buffered messages send to kafka err: ", err, 269 | ", retryable: ", retryable, ", topic: ", topic, 270 | ", partition_id: ", partition_id, ", length: ", index / 2) 271 | end 272 | 273 | sendbuffer:clear(topic, partition_id) 274 | end 275 | end 276 | 277 | _flush_unlock(self) 278 | 279 | -- reset _timer_flushing_buffer after flushing complete 280 | self._timer_flushing_buffer = false 281 | 282 | if ringbuffer:need_send() then 283 | _flush_buffer(self) 284 | 285 | elseif is_exiting() and ringbuffer:left_num() > 0 then 286 | -- still can create 0 timer even exiting 287 | _flush_buffer(self) 288 | end 289 | 290 | return true 291 | end 292 | 293 | 294 | _flush_buffer = function (self) 295 | if self._timer_flushing_buffer then 296 | if debug then 297 | ngx_log(DEBUG, "another timer is flushing buffer, skipping it") 298 | end 299 | 300 | return 301 | end 302 | 303 | local ok, err = timer_at(0, _flush, self) 304 | if ok then 305 | self._timer_flushing_buffer = true 306 | return 307 | end 308 | 309 | ngx_log(ERR, "failed to create timer_at timer, err:", err) 310 | end 311 | 312 | 313 | local function _timer_flush(premature, self) 314 | self._timer_flushing_buffer = false 315 | _flush_buffer(self) 316 | end 317 | 318 | 319 | function _M.new(self, broker_list, producer_config, cluster_name) 320 | local name = cluster_name or DEFAULT_CLUSTER_NAME 321 | local opts = producer_config or {} 322 | local async = opts.producer_type == "async" 323 | if async and cluster_inited[name] then 324 | return cluster_inited[name] 325 | end 326 | 327 | local cli = client:new(broker_list, producer_config) 328 | local p = setmetatable({ 329 | client = cli, 330 | correlation_id = 1, 331 | request_timeout = opts.request_timeout or 2000, 332 | retry_backoff = opts.retry_backoff or 100, -- ms 333 | max_retry = opts.max_retry or 3, 334 | required_acks = opts.required_acks or 1, 335 | partitioner = opts.partitioner or default_partitioner, 336 | error_handle = opts.error_handle, 337 | api_version = opts.api_version or API_VERSION_V1, 338 | async = async, 339 | socket_config = cli.socket_config, 340 | _timer_flushing_buffer = false, 341 | ringbuffer = ringbuffer:new(opts.batch_num or 200, opts.max_buffering or 50000, 342 | opts.wait_on_buffer_full or false, opts.wait_buffer_timeout or 5), -- 200, 50K, flase, 5s 343 | sendbuffer = sendbuffer:new(opts.batch_num or 200, opts.batch_size or 1048576) 344 | -- default: 1K, 1M 345 | -- batch_size should less than (MaxRequestSize / 2 - 10KiB) 346 | -- config in the kafka server, default 100M 347 | }, mt) 348 | 349 | if async then 350 | cluster_inited[name] = p 351 | local ok, err = timer_every((opts.flush_time or 1000) / 1000, _timer_flush, p) -- default: 1s 352 | if not ok then 353 | ngx_log(ERR, "failed to create timer_every, err: ", err) 354 | end 355 | 356 | end 357 | 358 | return p 359 | end 360 | 361 | 362 | -- offset is cdata (LL in luajit) 363 | function _M.send(self, topic, key, message) 364 | if self.async then 365 | local ringbuffer = self.ringbuffer 366 | 367 | local ok, err = ringbuffer:add(topic, key, message) 368 | if not ok then 369 | return nil, err 370 | end 371 | 372 | if not self.flushing and (ringbuffer:need_send() or is_exiting()) then 373 | _flush_buffer(self) 374 | end 375 | 376 | return true 377 | end 378 | 379 | local partition_id, err = choose_partition(self, topic, key) 380 | if not partition_id then 381 | return nil, err 382 | end 383 | 384 | local sendbuffer = self.sendbuffer 385 | sendbuffer:add(topic, partition_id, key, message) 386 | 387 | local ok = _batch_send(self, sendbuffer) 388 | if not ok then 389 | sendbuffer:clear(topic, partition_id) 390 | return nil, sendbuffer:err(topic, partition_id) 391 | end 392 | 393 | return sendbuffer:offset(topic, partition_id) 394 | end 395 | 396 | 397 | function _M.flush(self) 398 | return _flush(nil, self) 399 | end 400 | 401 | 402 | -- offset is cdata (LL in luajit) 403 | function _M.offset(self) 404 | local topics = self.sendbuffer.topics 405 | local sum, details = 0, {} 406 | 407 | for topic, partitions in pairs(topics) do 408 | details[topic] = {} 409 | for partition_id, buffer in pairs(partitions) do 410 | sum = sum + buffer.offset 411 | details[topic][partition_id] = buffer.offset 412 | end 413 | end 414 | 415 | return sum, details 416 | end 417 | 418 | 419 | return _M 420 | -------------------------------------------------------------------------------- /lib/resty/kafka/protocol/common.lua: -------------------------------------------------------------------------------- 1 | local ffi = require("ffi") 2 | local cast = ffi.cast 3 | local bxor = bit.bxor 4 | local bnot = bit.bnot 5 | local band = bit.band 6 | local rshift = bit.rshift 7 | 8 | 9 | local _M = {} 10 | 11 | 12 | -- API versions 13 | _M.API_VERSION_V0 = 0 14 | _M.API_VERSION_V1 = 1 15 | _M.API_VERSION_V2 = 2 16 | _M.API_VERSION_V3 = 3 17 | _M.API_VERSION_V4 = 4 18 | _M.API_VERSION_V5 = 5 19 | _M.API_VERSION_V6 = 6 20 | _M.API_VERSION_V7 = 7 21 | _M.API_VERSION_V8 = 8 22 | _M.API_VERSION_V9 = 9 23 | _M.API_VERSION_V10 = 10 24 | _M.API_VERSION_V11 = 11 25 | _M.API_VERSION_V12 = 12 26 | _M.API_VERSION_V13 = 13 27 | 28 | 29 | -- API keys 30 | _M.ProduceRequest = 0 31 | _M.FetchRequest = 1 32 | _M.OffsetRequest = 2 33 | _M.MetadataRequest = 3 34 | _M.OffsetCommitRequest = 8 35 | _M.OffsetFetchRequest = 9 36 | _M.ConsumerMetadataRequest = 10 37 | _M.SaslHandshakeRequest = 17 38 | _M.ApiVersionsRequest = 18 39 | _M.SaslAuthenticateRequest = 36 40 | 41 | 42 | local crc32_t = ffi.new('const uint32_t[256]', (function() 43 | local function init_lookup_table(crc) 44 | local iteration = crc 45 | 46 | for _=1,8 do 47 | crc = band(crc, 1) == 1 48 | and bxor(rshift(crc, 1), 0x82f63b78) 49 | or rshift(crc, 1) 50 | end 51 | 52 | if iteration < 256 then 53 | return crc, init_lookup_table(iteration + 1) 54 | end 55 | end 56 | 57 | return init_lookup_table(0) 58 | end)()) 59 | 60 | 61 | -- Generate and self-increment correlation IDs 62 | -- The correlated is a table containing the correlation_id attribute 63 | function _M.correlation_id(correlated) 64 | local id = (correlated.correlation_id + 1) % 1073741824 -- 2^30 65 | correlated.correlation_id = id 66 | 67 | return id 68 | end 69 | 70 | 71 | -- The crc32c algorithm is implemented from the following url. 72 | -- https://gist.github.com/bjne/ab9efaab585563418cb7462bb1254b6e 73 | function _M.crc32c(buf, len, crc) 74 | len = len or #buf 75 | buf, crc = cast('const uint8_t*', buf), crc or 0 76 | 77 | for i=0, len-1 do 78 | crc = bnot(crc) 79 | crc = bnot(bxor(rshift(crc, 8), crc32_t[bxor(crc % 256, buf[i])])) 80 | end 81 | 82 | return crc 83 | end 84 | 85 | 86 | return _M 87 | -------------------------------------------------------------------------------- /lib/resty/kafka/protocol/consumer.lua: -------------------------------------------------------------------------------- 1 | local protocol = require "resty.kafka.protocol.common" 2 | local proto_record = require "resty.kafka.protocol.record" 3 | local request = require "resty.kafka.request" 4 | 5 | local ffi = require "ffi" 6 | local table_insert = table.insert 7 | 8 | local _M = {} 9 | 10 | 11 | _M.LIST_OFFSET_TIMESTAMP_LAST = -1 12 | _M.LIST_OFFSET_TIMESTAMP_FIRST = -2 13 | _M.LIST_OFFSET_TIMESTAMP_MAX = -3 14 | 15 | 16 | local function _list_offset_encode(req, isolation_level, topic_partitions) 17 | req:int32(-1) -- replica_id 18 | 19 | if req.api_version >= protocol.API_VERSION_V2 then 20 | req:int8(isolation_level) -- isolation_level 21 | end 22 | 23 | req:int32(topic_partitions.topic_num) -- [topics] array length 24 | 25 | for topic, partitions in pairs(topic_partitions.topics) do 26 | req:string(topic) -- [topics] name 27 | req:int32(partitions.partition_num) -- [topics] [partitions] array length 28 | 29 | for partition_id, partition_info in pairs(partitions.partitions) do 30 | req:int32(partition_id) -- [topics] [partitions] partition_index 31 | req:int64(partition_info.timestamp) -- [topics] [partitions] timestamp 32 | 33 | if req.api_version == protocol.API_VERSION_V0 then 34 | req:int32(1) -- [topics] [partitions] max_num_offsets 35 | end 36 | end 37 | end 38 | 39 | return req 40 | end 41 | 42 | 43 | local function _fetch_encode(req, isolation_level, topic_partitions, rack_id) 44 | req:int32(-1) -- replica_id 45 | req:int32(100) -- max_wait_ms 46 | req:int32(0) -- min_bytes 47 | 48 | if req.api_version >= protocol.API_VERSION_V3 then 49 | req:int32(10 * 1024 * 1024) -- max_bytes: 10MB 50 | end 51 | 52 | if req.api_version >= protocol.API_VERSION_V4 then 53 | req:int8(isolation_level) -- isolation_level 54 | end 55 | 56 | if req.api_version >= protocol.API_VERSION_V7 then 57 | req:int32(0) -- session_id 58 | req:int32(-1) -- session_epoch 59 | end 60 | 61 | req:int32(topic_partitions.topic_num) -- [topics] array length 62 | 63 | for topic, partitions in pairs(topic_partitions.topics) do 64 | req:string(topic) -- [topics] name 65 | req:int32(partitions.partition_num) -- [topics] [partitions] array length 66 | 67 | for partition_id, partition_info in pairs(partitions.partitions) do 68 | req:int32(partition_id) -- [topics] [partitions] partition 69 | 70 | if req.api_version >= protocol.API_VERSION_V9 then 71 | req:int32(-1) -- [topics] [partitions] current_leader_epoch 72 | end 73 | 74 | req:int64(partition_info.offset) -- [topics] [partitions] fetch_offset 75 | 76 | if req.api_version >= protocol.API_VERSION_V5 then 77 | req:int64(-1) -- [topics] [partitions] log_start_offset 78 | end 79 | 80 | req:int32(10 * 1024 * 1024) -- [topics] [partitions] partition_max_bytes 81 | end 82 | end 83 | 84 | if req.api_version >= protocol.API_VERSION_V7 then 85 | -- ForgottenTopics list add by KIP-227, only brokers use it, consumers do not use it 86 | req:int32(0) -- [forgotten_topics_data] array length 87 | end 88 | 89 | if req.api_version >= protocol.API_VERSION_V11 then 90 | req:string(rack_id) -- rack_id 91 | end 92 | 93 | return req 94 | end 95 | 96 | 97 | function _M.list_offset_encode(consumer, topic_partitions, isolation_level) 98 | local client = consumer.client 99 | 100 | isolation_level = isolation_level or 0 101 | 102 | -- determine API version (min: v0; max: v2) 103 | local api_version = client:choose_api_version(protocol.OffsetRequest, 104 | protocol.API_VERSION_V0, 105 | protocol.API_VERSION_V2) 106 | 107 | if api_version < 0 then 108 | return nil, "API version choice failed" 109 | end 110 | 111 | local req = request:new(protocol.OffsetRequest, 112 | protocol.correlation_id(consumer), 113 | client.client_id, api_version) 114 | 115 | return _list_offset_encode(req, isolation_level, topic_partitions) 116 | end 117 | 118 | 119 | function _M.list_offset_decode(resp) 120 | 121 | local api_version = resp.api_version 122 | 123 | local throttle_time_ms -- throttle_time_ms 124 | if api_version >= protocol.API_VERSION_V2 then 125 | throttle_time_ms = resp:int32() 126 | end 127 | 128 | local topic_num = resp:int32() -- [topics] array length 129 | 130 | local topic_partitions = { 131 | topic_num = topic_num, 132 | topics = {}, 133 | } 134 | 135 | for i = 1, topic_num do 136 | local topic = resp:string() -- [topics] name 137 | local partition_num = resp:int32() -- [topics] [partitions] array length 138 | 139 | topic_partitions.topics[topic] = { 140 | partition_num = partition_num, 141 | partitions = {} 142 | } 143 | 144 | for j = 1, partition_num do 145 | local partition = resp:int32() -- [topics] [partitions] partition_index 146 | 147 | if api_version == protocol.API_VERSION_V0 then 148 | topic_partitions.topics[topic].partitions[partition] = { 149 | errcode = resp:int16(), -- [topics] [partitions] error_code 150 | offset = tostring(resp:int64()), -- [topics] [partitions] offset 151 | } 152 | else 153 | topic_partitions.topics[topic].partitions[partition] = { 154 | errcode = resp:int16(), -- [topics] [partitions] error_code 155 | timestamp = tostring(resp:int64()), -- [topics] [partitions] timestamp 156 | offset = tostring(resp:int64()), -- [topics] [partitions] offset 157 | } 158 | end 159 | end 160 | end 161 | 162 | return topic_partitions, throttle_time_ms 163 | end 164 | 165 | 166 | function _M.fetch_encode(consumer, topic_partitions, isolation_level, client_rack) 167 | local client = consumer.client 168 | 169 | isolation_level = isolation_level or 0 170 | client_rack = client_rack or "default" 171 | 172 | -- determine API version (min: v0; max: v11) 173 | local api_version = client:choose_api_version(request.FetchRequest, 174 | protocol.API_VERSION_V0, 175 | protocol.API_VERSION_V11) 176 | 177 | if api_version < 0 then 178 | return nil, "API version choice failed" 179 | end 180 | 181 | local req = request:new(request.FetchRequest, 182 | protocol.correlation_id(consumer), 183 | client.client_id, api_version) 184 | 185 | return _fetch_encode(req, isolation_level, topic_partitions, client_rack) 186 | end 187 | 188 | 189 | function _M.fetch_decode(resp, fetch_offset) 190 | local fetch_info = {} 191 | local api_version = resp.api_version 192 | 193 | if api_version >= protocol.API_VERSION_V1 then 194 | fetch_info.throttle_time_ms = resp:int32() -- throttle_time_ms 195 | end 196 | 197 | if api_version >= protocol.API_VERSION_V7 then 198 | fetch_info.errcode = resp:int16() -- error_code 199 | fetch_info.session_id = resp:int32() -- session_id 200 | end 201 | 202 | local topic_num = resp:int32() -- [responses] array length 203 | 204 | local topic_partitions = { 205 | topic_num = topic_num, 206 | topics = {}, 207 | } 208 | 209 | for i = 1, topic_num do 210 | local topic = resp:string() -- [responses] topic 211 | local partition_num = resp:int32() -- [responses] [partitions] array length 212 | 213 | topic_partitions.topics[topic] = { 214 | partition_num = partition_num, 215 | partitions = {} 216 | } 217 | 218 | for j = 1, partition_num do 219 | local partition = resp:int32() -- [responses] [partitions] partition_index 220 | 221 | local partition_ret = { 222 | errcode = resp:int16(), -- [responses] [partitions] error_code 223 | high_watermark = resp:int64(), -- [responses] [partitions] high_watermark 224 | } 225 | 226 | if api_version >= protocol.API_VERSION_V4 then 227 | partition_ret.last_stable_offset = resp:int64() -- [responses] [partitions] last_stable_offset 228 | 229 | if api_version >= protocol.API_VERSION_V5 then 230 | partition_ret.log_start_offset = resp:int64() -- [responses] [partitions] log_start_offset 231 | end 232 | 233 | local aborted_transactions_num = resp:int32() 234 | partition_ret.aborted_transactions = {} 235 | for k = 1, aborted_transactions_num do 236 | table_insert(partition_ret.aborted_transaction, { 237 | producer_id = resp:int64(), -- [responses] [partitions] [aborted_transactions] producer_id 238 | first_offset = resp:int64(), -- [responses] [partitions] [aborted_transactions] first_offset 239 | }) 240 | end 241 | end 242 | 243 | if api_version >= protocol.API_VERSION_V11 then 244 | partition_ret.preferred_read_replica = resp:int32() -- [responses] [partitions] preferred_read_replica 245 | end 246 | 247 | partition_ret.records = proto_record.message_set_decode(resp, fetch_offset) -- [responses] [partitions] records 248 | 249 | topic_partitions.topics[topic].partitions[partition] = partition_ret 250 | end 251 | end 252 | 253 | return topic_partitions, fetch_info 254 | end 255 | 256 | 257 | return _M 258 | -------------------------------------------------------------------------------- /lib/resty/kafka/protocol/record.lua: -------------------------------------------------------------------------------- 1 | local protocol = require("resty.kafka.protocol.common") 2 | local bit = require("bit") 3 | local math = require("math") 4 | 5 | local crc32c = protocol.crc32c 6 | local ngx_log = ngx.log 7 | local ngx_crc32 = ngx.crc32_long 8 | local ERR = ngx.ERR 9 | local INFO = ngx.INFO 10 | local table_insert = table.insert 11 | local band = bit.band 12 | local math_abs = math.abs 13 | 14 | 15 | local _M = {} 16 | 17 | 18 | --- Decode MessageSet v0 and v1 (the old format) data. 19 | -- Tip: The return value message contains the int64 value, which is of 20 | -- type cdata and cannot be used directly in some scenarios. 21 | -- @author bzp2010 22 | local function _message_set_v0_1_decode(resp, ret) 23 | local message = {} 24 | 25 | message.offset = resp:int64() 26 | 27 | -- Sometimes in low version protocols many useless empty data are 28 | -- returned, they are characterized by a -1 offset at the beginning 29 | -- and 0 for each offset after that, we will discard the data after that. 30 | -- The response will be closed (offset is set to the end), 31 | -- i.e. it cannot continue reading any data. 32 | -- Tip: offset uses int64 storage, and it is almost impossible for us 33 | -- to write so many messages, so the case where offset does have a 34 | -- negative value is not considered here. 35 | if message.offset < 0 then 36 | resp:close() 37 | return "empty message" -- return error 38 | end 39 | 40 | local message_size = resp:int32() 41 | local crc = resp:int32() 42 | local crc_content = resp:peek_bytes(resp.offset, message_size - 4) 43 | local calc_crc = ngx_crc32(crc_content) 44 | if crc ~= calc_crc and math_abs(crc) + math_abs(calc_crc) ~= 4294967296 then 45 | return "crc checksum error" 46 | end 47 | 48 | local magic_byte = resp:int8() 49 | 50 | -- TODO: support compressed Message Set 51 | local attributes = resp:int8() 52 | 53 | -- message version 1 added timestamp 54 | if magic_byte == protocol.API_VERSION_V1 then 55 | message.timestamp = resp:int64() 56 | else 57 | message.timestamp = 0 58 | end 59 | 60 | message.key = resp:bytes() 61 | message.value = resp:bytes() 62 | 63 | table_insert(ret, message) 64 | 65 | return nil -- error 66 | end 67 | 68 | 69 | --- Decode MessageSet v2 (aka RecordBatch) data. 70 | -- Tip: The return value message contains the int64 value, which is of 71 | -- type cdata and cannot be used directly in some scenarios. 72 | -- @author bzp2010 73 | local function _message_set_v2_decode(resp, ret, fetch_offset) 74 | ret = ret or {} 75 | 76 | -- RecordBatch decoder, refer to this documents 77 | -- https://kafka.apache.org/documentation/#recordbatch 78 | local base_offset = resp:int64() -- baseOffset 79 | local batch_length = resp:int32() -- batchLength 80 | local partition_leader_epoch = resp:int32() -- partitionLeaderEpoch 81 | local magic_byte = resp:int8() -- magic 82 | local crc = resp:int32() -- crc 83 | 84 | -- Get all remaining message bodies by length for crc. The crc content 85 | -- starts with attributes, so here we need to reduce its middle 86 | -- three fields' length, include partition_leader_epoch (4 byte), 87 | -- magic_byte (1 byte) and crc (4 byte). 88 | local crc_content = resp:peek_bytes(resp.offset, batch_length - 4 - 1 - 4) 89 | local calc_crc = crc32c(crc_content) 90 | if crc ~= calc_crc then 91 | return "crc checksum error" 92 | end 93 | 94 | -- TODO: support compressed Message Set 95 | local attributes = resp:int16() -- attributes 96 | 97 | local last_offset_delta = resp:int32() -- lastOffsetDelta 98 | local last_offset = base_offset + last_offset_delta 99 | 100 | -- If the last record's offset is also less than fetch's offset, 101 | -- all outdated records are discarded. 102 | if last_offset < fetch_offset then 103 | resp:close() 104 | return "all records outdated" 105 | end 106 | 107 | -- RecordBatch contains the timestamp starting value and the 108 | -- maximum value of these records. 109 | local first_timestamp = resp:int64() -- firstTimestamp 110 | local max_timestamp = resp:int64() -- maxTimestamp 111 | 112 | -- These fields are intended to support idempotent messages. 113 | -- The features are NYI 114 | local producer_id = resp:int64() -- producerId 115 | local producer_epoch = resp:int16() -- producerEpoch 116 | local base_sequence = resp:int32() -- baseSequence 117 | 118 | local record_num = resp:int32() -- [records] array length 119 | 120 | for i = 1, record_num do 121 | local message = {} 122 | 123 | -- Record decoder, refer to this documents 124 | -- https://kafka.apache.org/documentation/#record 125 | local len = resp:varint() 126 | local message_end = resp.offset + len 127 | 128 | -- According to the protocol, only reserved. 129 | local record_attributes = resp:int8() 130 | 131 | -- Offset of this Record from RecordBatch's base value. 132 | local timestamp_delta = resp:varlong() 133 | local offset_delta = resp:varint() 134 | 135 | -- The sixth bit of isControlBatch (bit 5) in attributes has a value of 1, 136 | -- i.e. the current MessageSet (RecordBatch) is a ControlBatch. 137 | -- !! Not process Conrtol Batch for now, they will be skipped !! 138 | if band(attributes, 0x20) > 0 then 139 | resp:varint() -- keyLength skipped 140 | resp:int16() -- ControlBatch version skipped 141 | resp:int16() -- ControlBatch type skipped 142 | 143 | ngx_log(INFO, "A Control Batch was skipped during the parsing of the message v2") 144 | 145 | goto continue 146 | end 147 | 148 | message.offset = base_offset + offset_delta 149 | message.timestamp = first_timestamp + timestamp_delta 150 | message.key = resp:varint_bytes() 151 | message.value = resp:varint_bytes() 152 | 153 | table_insert(ret, message) 154 | 155 | -- Calculates the length of the header field by the expected end position 156 | -- of the message and skips the specified number of bytes. 157 | -- !! Not parse message headers for now, they will be skipped !! 158 | local header_len = message_end - resp.offset 159 | resp.offset = resp.offset + header_len 160 | ::continue:: 161 | end 162 | 163 | return nil 164 | end 165 | 166 | 167 | --------- 168 | -- Decode the message set, a different version of decoder will be selected 169 | -- automatically according to the MagicByte inside the message 170 | -- @author bzp2010 171 | function _M.message_set_decode(resp, fetch_offset) 172 | local ret = {} 173 | local message_set_size = resp:int32() 174 | 175 | -- Keep parsing the message until all the data in the 176 | -- current response is exhausted 177 | while resp:remain() > 0 do 178 | -- Get 1 byte integer after 2 byte offset 179 | -- [MessageSet] message magic_byte, it contains Message version 180 | local message_version = resp:peek_int(16, 1) 181 | 182 | local messages, messages_set_info, err 183 | if message_version == 0 or message_version == 1 then 184 | -- old MessageSet v0 or v1 185 | err = _message_set_v0_1_decode(resp, ret) 186 | else 187 | -- MessageSet v2 aka RecordBatch 188 | err = _message_set_v2_decode(resp, ret, fetch_offset) 189 | end 190 | 191 | if err then 192 | ngx_log(ERR, "failed to decode message set, err: ", err) 193 | end 194 | end 195 | 196 | return ret 197 | end 198 | 199 | 200 | return _M 201 | -------------------------------------------------------------------------------- /lib/resty/kafka/request.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) Dejiang Zhu(doujiang24) 2 | local ffi = require "ffi" 3 | 4 | 5 | local bit = require "bit" 6 | 7 | 8 | local setmetatable = setmetatable 9 | local concat = table.concat 10 | local rshift = bit.rshift 11 | local band = bit.band 12 | local char = string.char 13 | local crc32 = ngx.crc32_long 14 | local ngx_now = ngx.now 15 | local tonumber = tonumber 16 | 17 | 18 | local _M = {} 19 | local mt = { __index = _M } 20 | 21 | local MESSAGE_VERSION_0 = 0 22 | local MESSAGE_VERSION_1 = 1 23 | 24 | 25 | local API_VERSION_V0 = 0 26 | local API_VERSION_V1 = 1 27 | local API_VERSION_V2 = 2 28 | local API_VERSION_V3 = 3 29 | 30 | 31 | _M.API_VERSION_V0 = 0 32 | _M.API_VERSION_V1 = 1 33 | _M.API_VERSION_V2 = 2 34 | _M.API_VERSION_V3 = 3 35 | 36 | _M.ProduceRequest = 0 37 | _M.FetchRequest = 1 38 | _M.OffsetRequest = 2 39 | _M.MetadataRequest = 3 40 | _M.OffsetCommitRequest = 8 41 | _M.OffsetFetchRequest = 9 42 | _M.ConsumerMetadataRequest = 10 43 | 44 | _M.SaslHandshakeRequest = 17 45 | _M.ApiVersionsRequest = 18 46 | _M.SaslAuthenticateRequest = 36 47 | 48 | 49 | local function str_int8(int) 50 | return char(band(int, 0xff)) 51 | end 52 | 53 | 54 | local function str_int16(int) 55 | return char(band(rshift(int, 8), 0xff), 56 | band(int, 0xff)) 57 | end 58 | 59 | 60 | local function str_int32(int) 61 | -- ngx.say(debug.traceback()) 62 | return char(band(rshift(int, 24), 0xff), 63 | band(rshift(int, 16), 0xff), 64 | band(rshift(int, 8), 0xff), 65 | band(int, 0xff)) 66 | end 67 | 68 | 69 | -- XX int can be cdata: LL or lua number 70 | local function str_int64(int) 71 | int = int * 1LL 72 | 73 | return char(tonumber(band(rshift(int, 56), 0xff)), 74 | tonumber(band(rshift(int, 48), 0xff)), 75 | tonumber(band(rshift(int, 40), 0xff)), 76 | tonumber(band(rshift(int, 32), 0xff)), 77 | tonumber(band(rshift(int, 24), 0xff)), 78 | tonumber(band(rshift(int, 16), 0xff)), 79 | tonumber(band(rshift(int, 8), 0xff)), 80 | tonumber(band(int, 0xff))) 81 | end 82 | 83 | 84 | function _M.new(self, apikey, correlation_id, client_id, api_version) 85 | api_version = api_version or API_VERSION_V0 86 | local len = 8 87 | local offset = 5 88 | local req = { 89 | 0, -- request size: int32 90 | str_int16(apikey), 91 | str_int16(api_version), 92 | str_int32(correlation_id), 93 | } 94 | 95 | if api_version > API_VERSION_V0 then 96 | local cid, clen 97 | if not client_id or #client_id == 0 then 98 | cid, clen = str_int16(-1), 2 99 | else 100 | cid, clen = client_id, #client_id 101 | end 102 | 103 | req[5] = str_int16(clen) 104 | req[6] = cid 105 | len = len + 2 + clen 106 | offset = offset + 2 107 | end 108 | 109 | return setmetatable({ 110 | _req = req, 111 | api_key = apikey, 112 | api_version = api_version, 113 | offset = offset, 114 | len = len, 115 | }, mt) 116 | end 117 | 118 | 119 | function _M.int8(self, int) 120 | local req = self._req 121 | local offset = self.offset 122 | 123 | req[offset] = str_int8(int) 124 | 125 | self.offset = offset + 1 126 | self.len = self.len + 1 127 | end 128 | 129 | 130 | function _M.int16(self, int) 131 | local req = self._req 132 | local offset = self.offset 133 | 134 | req[offset] = str_int16(int) 135 | 136 | self.offset = offset + 1 137 | self.len = self.len + 2 138 | end 139 | 140 | 141 | function _M.int32(self, int) 142 | local req = self._req 143 | local offset = self.offset 144 | 145 | req[offset] = str_int32(int) 146 | 147 | self.offset = offset + 1 148 | self.len = self.len + 4 149 | end 150 | 151 | 152 | function _M.int64(self, int) 153 | local req = self._req 154 | local offset = self.offset 155 | 156 | req[offset] = str_int64(int) 157 | 158 | self.offset = offset + 1 159 | self.len = self.len + 8 160 | end 161 | 162 | 163 | function _M.string(self, str) 164 | if not str then 165 | -- -1 mean null 166 | return self:int16(-1) 167 | end 168 | 169 | local req = self._req 170 | local offset = self.offset 171 | local str_len = #str 172 | 173 | req[offset] = str_int16(str_len) 174 | req[offset + 1] = str 175 | 176 | self.offset = offset + 2 177 | self.len = self.len + 2 + str_len 178 | end 179 | 180 | 181 | function _M.bytes(self, str) 182 | local req = self._req 183 | local offset = self.offset 184 | local str_len = #str 185 | 186 | req[offset] = str_int32(str_len) 187 | req[offset + 1] = str 188 | 189 | self.offset = offset + 2 190 | self.len = self.len + 4 + str_len 191 | end 192 | 193 | 194 | local function message_package(key, msg, message_version) 195 | local key = key or "" 196 | local key_len = #key 197 | local len = #msg 198 | 199 | local req 200 | local head_len 201 | if message_version == MESSAGE_VERSION_1 then 202 | req = { 203 | -- MagicByte 204 | str_int8(1), 205 | -- XX hard code no Compression 206 | str_int8(0), 207 | str_int64(ffi.new("int64_t", (ngx_now() * 1000))), -- timestamp 208 | str_int32(key_len), 209 | key, 210 | str_int32(len), 211 | msg, 212 | } 213 | head_len = 22 214 | 215 | else 216 | req = { 217 | -- MagicByte 218 | str_int8(0), 219 | -- XX hard code no Compression 220 | str_int8(0), 221 | str_int32(key_len), 222 | key, 223 | str_int32(len), 224 | msg, 225 | } 226 | head_len = 14 227 | end 228 | 229 | local str = concat(req) 230 | return crc32(str), str, key_len + len + head_len 231 | end 232 | 233 | 234 | function _M.message_set(self, messages, index) 235 | local req = self._req 236 | local off = self.offset 237 | local msg_set_size = 0 238 | local index = index or #messages 239 | 240 | local message_version = MESSAGE_VERSION_0 241 | if self.api_key == _M.ProduceRequest and self.api_version == API_VERSION_V2 then 242 | message_version = MESSAGE_VERSION_1 243 | end 244 | 245 | for i = 1, index, 2 do 246 | local crc32, str, msg_len = message_package(messages[i], messages[i + 1], message_version) 247 | 248 | req[off + 1] = str_int64(0) -- offset 249 | req[off + 2] = str_int32(msg_len) -- include the crc32 length 250 | 251 | req[off + 3] = str_int32(crc32) 252 | req[off + 4] = str 253 | 254 | off = off + 4 255 | msg_set_size = msg_set_size + msg_len + 12 256 | end 257 | 258 | req[self.offset] = str_int32(msg_set_size) -- MessageSetSize 259 | 260 | self.offset = off + 1 261 | self.len = self.len + 4 + msg_set_size 262 | end 263 | 264 | 265 | function _M.package(self) 266 | local req = self._req 267 | req[1] = str_int32(self.len) 268 | 269 | return req 270 | end 271 | 272 | 273 | return _M 274 | -------------------------------------------------------------------------------- /lib/resty/kafka/response.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) Dejiang Zhu(doujiang24) 2 | 3 | 4 | local bit = require("bit") 5 | local request = require("resty.kafka.request") 6 | 7 | 8 | local setmetatable = setmetatable 9 | local byte = string.byte 10 | local sub = string.sub 11 | local band = bit.band 12 | local lshift = bit.lshift 13 | local arshift = bit.arshift 14 | local bor = bit.bor 15 | local bxor = bit.bxor 16 | local strbyte = string.byte 17 | local floor = math.floor 18 | 19 | 20 | local _M = {} 21 | local mt = { __index = _M } 22 | 23 | 24 | function _M.new(self, str, api_version) 25 | local resp = setmetatable({ 26 | str = str, 27 | offset = 1, 28 | correlation_id = 0, 29 | api_version = api_version, 30 | }, mt) 31 | 32 | resp.correlation_id = resp:int32() 33 | 34 | return resp 35 | end 36 | 37 | 38 | local function _int8(str, offset) 39 | return byte(str, offset) 40 | end 41 | 42 | 43 | function _M.int8(self) 44 | local str = self.str 45 | local offset = self.offset 46 | self.offset = offset + 1 47 | return _int8(str, offset) 48 | end 49 | 50 | 51 | local function _int16(str, offset) 52 | local high = byte(str, offset) 53 | -- high padded 54 | return bor((high >= 128) and 0xffff0000 or 0, 55 | lshift(high, 8), 56 | byte(str, offset + 1)) 57 | end 58 | 59 | 60 | function _M.int16(self) 61 | local str = self.str 62 | local offset = self.offset 63 | self.offset = offset + 2 64 | 65 | return _int16(str, offset) 66 | end 67 | 68 | 69 | local function _int32(str, offset) 70 | local offset = offset or 1 71 | local a, b, c, d = strbyte(str, offset, offset + 3) 72 | return bor(lshift(a, 24), lshift(b, 16), lshift(c, 8), d) 73 | end 74 | _M.to_int32 = _int32 75 | 76 | 77 | function _M.int32(self) 78 | local str = self.str 79 | local offset = self.offset 80 | self.offset = offset + 4 81 | 82 | return _int32(str, offset) 83 | end 84 | 85 | 86 | local function _int64(str, offset) 87 | local a, b, c, d, e, f, g, h = strbyte(str, offset, offset + 7) 88 | 89 | --[[ 90 | -- only 52 bit accuracy 91 | local hi = bor(lshift(a, 24), lshift(b, 16), lshift(c, 8), d) 92 | local lo = bor(lshift(f, 16), lshift(g, 8), h) 93 | return hi * 4294967296 + 16777216 * e + lo 94 | --]] 95 | 96 | return 4294967296LL * bor(lshift(a, 56), lshift(b, 48), lshift(c, 40), lshift(d, 32)) 97 | + 16777216LL * e 98 | + bor(lshift(f, 16), lshift(g, 8), h) 99 | end 100 | 101 | 102 | -- XX return cdata: LL 103 | function _M.int64(self) 104 | local str = self.str 105 | local offset = self.offset 106 | self.offset = offset + 8 107 | 108 | return _int64(str, offset) 109 | end 110 | 111 | 112 | -- Get a fixed-length integer from an offset position without 113 | -- modifying the global offset of the response 114 | -- The lengths of offset and length are in byte 115 | function _M.peek_int(self, peek_offset, length) 116 | local str = self.str 117 | local offset = self.offset + peek_offset 118 | 119 | if length == 8 then 120 | return _int64(str, offset) 121 | elseif length == 4 then 122 | return _int32(str, offset) 123 | elseif length == 2 then 124 | return _int16(str. offset) 125 | else 126 | return _int8(str, offset) 127 | end 128 | end 129 | 130 | 131 | function _M.string(self) 132 | local len = self:int16() 133 | -- len = -1 means null 134 | if len < 0 then 135 | return nil 136 | end 137 | 138 | local offset = self.offset 139 | self.offset = offset + len 140 | 141 | return sub(self.str, offset, offset + len - 1) 142 | end 143 | 144 | 145 | function _M.bytes(self) 146 | local len = self:int32() 147 | if len < 0 then 148 | return nil 149 | end 150 | 151 | local offset = self.offset 152 | self.offset = offset + len 153 | 154 | return sub(self.str, offset, offset + len - 1) 155 | end 156 | 157 | 158 | function _M.correlation_id(self) 159 | return self.correlation_id 160 | end 161 | 162 | 163 | -- The following code is referenced in this section. 164 | -- https://github.com/Neopallium/lua-pb/blob/master/pb/standard/unpack.lua#L64-L133 165 | local function _uvar64(self, num) 166 | -- encode first 48bits 167 | local b1 = band(num, 0xFF) 168 | num = floor(num / 256) 169 | local b2 = band(num, 0xFF) 170 | num = floor(num / 256) 171 | local b3 = band(num, 0xFF) 172 | num = floor(num / 256) 173 | local b4 = band(num, 0xFF) 174 | num = floor(num / 256) 175 | local b5 = band(num, 0xFF) 176 | num = floor(num / 256) 177 | local b6 = band(num, 0xFF) 178 | num = floor(num / 256) 179 | 180 | local seg = self:int8() 181 | local base_factor = 2 -- still one bit in 'num' 182 | num = num + (band(seg, 0x7F) * base_factor) 183 | while seg >= 128 do 184 | base_factor = base_factor * 128 185 | seg = self:int8() 186 | num = num + (band(seg, 0x7F) * base_factor) 187 | end 188 | -- encode last 16bits 189 | local b7 = band(num, 0xFF) 190 | num = floor(num / 256) 191 | local b8 = band(num, 0xFF) 192 | 193 | return 4294967296LL * bor(lshift(b8, 56), lshift(b7, 48), lshift(b6, 40), lshift(b5, 32)) 194 | + 16777216LL * b4 195 | + bor(lshift(b3, 16), lshift(b2, 8), b1) 196 | end 197 | 198 | 199 | -- Decode bytes as Zig-Zag encoded unsigned integer (32-bit or 64-bit) 200 | local function _uvar(self) 201 | local seg = self:int8() 202 | local num = band(seg, 0x7F) 203 | 204 | -- In every 1byte (i.e., in every 8 bit), the first bit is used to 205 | -- identify whether there is data to follow, and the remaining 7 bits 206 | -- indicate the actual data. 207 | -- So the maximum value that can be expressed per byte is 128, and when 208 | -- the next byte is fetched, factor will be squared to calculate the 209 | -- correct value. 210 | local base_factor = 128 211 | 212 | -- The value of the first bit of the per byte (8 bit) is 1, marking the 213 | -- next byte as still a segment of this varint. Keep taking values until 214 | -- there are no remaining segments. 215 | while seg >= 128 do 216 | seg = self:int8() 217 | 218 | -- When out of range, change to 64-bit parsing mode. 219 | if base_factor > 128 ^ 6 and seg > 0x1F then 220 | return _uvar64(self, num) 221 | end 222 | 223 | num = num + (band(seg, 0x7F) * base_factor) 224 | base_factor = base_factor * 128 225 | end 226 | 227 | return num 228 | end 229 | 230 | 231 | -- Decode Zig-Zag encoded unsigned 32-bit integer as 32-bit integer 232 | function _M.varint(self) 233 | local num = _uvar(self) 234 | 235 | -- decode 32-bit integer Zig-Zag 236 | return bxor(arshift(num, 1), -band(num, 1)) 237 | end 238 | 239 | 240 | -- Decode Zig-Zag encoded unsigned 64-bit integer as 64-bit integer 241 | function _M.varlong(self) 242 | local num = _uvar(self) 243 | 244 | -- decode 64-bit integer Zig-Zag 245 | local high_bit = false 246 | -- we need to work with a positive number 247 | if num < 0 then 248 | high_bit = true 249 | num = 0x8000000000000000 + num 250 | end 251 | if num % 2 == 1 then 252 | num = -(num + 1) 253 | end 254 | if high_bit then 255 | return (num / 2) + 0x4000000000000000 256 | end 257 | return num / 2 258 | end 259 | 260 | 261 | function _M.peek_bytes(self, offset, len) 262 | offset = offset or self.offset 263 | return sub(self.str, offset, offset + len - 1) 264 | end 265 | 266 | 267 | -- Decode the fixed-length bytes used in Record indicate the length by varint. 268 | function _M.varint_bytes(self) 269 | local len = self:varint() 270 | 271 | if len < 0 then 272 | return nil 273 | end 274 | 275 | local offset = self.offset 276 | self.offset = offset + len 277 | 278 | return self:peek_bytes(offset, len) 279 | end 280 | 281 | 282 | -- Get the number of data in the response that has not yet been parsed 283 | function _M.remain(self) 284 | return #self.str - self.offset 285 | end 286 | 287 | 288 | -- Forcibly close the response and set the offset to the end so that 289 | -- it can no longer read more data. 290 | function _M.close(self) 291 | self.offset = #self.str 292 | end 293 | 294 | 295 | return _M 296 | -------------------------------------------------------------------------------- /lib/resty/kafka/ringbuffer.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) Dejiang Zhu(doujiang24) 2 | 3 | local semaphore = require "ngx.semaphore" 4 | 5 | local setmetatable = setmetatable 6 | local ngx_null = ngx.null 7 | 8 | local ok, new_tab = pcall(require, "table.new") 9 | if not ok then 10 | new_tab = function (narr, nrec) return {} end 11 | end 12 | 13 | 14 | local _M = {} 15 | local mt = { __index = _M } 16 | 17 | function _M.new(self, batch_num, max_buffering, wait_on_buffer_full, wait_buffer_timeout) 18 | local sendbuffer = { 19 | queue = new_tab(max_buffering * 3, 0), 20 | batch_num = batch_num, 21 | size = max_buffering * 3, 22 | start = 1, 23 | num = 0, 24 | wait_on_buffer_full = wait_on_buffer_full, 25 | wait_buffer_timeout = wait_buffer_timeout, 26 | } 27 | 28 | if wait_on_buffer_full then 29 | sendbuffer.sema = semaphore.new() 30 | end 31 | 32 | return setmetatable(sendbuffer, mt) 33 | end 34 | 35 | 36 | function _M.add(self, topic, key, message, wait_timeout, depth) 37 | local num = self.num 38 | local size = self.size 39 | 40 | if num >= size then 41 | if not self.wait_on_buffer_full then 42 | return nil, "buffer overflow" 43 | end 44 | 45 | depth = depth or 1 46 | if depth > 10 then 47 | return nil, "buffer overflow and over max depth" 48 | end 49 | 50 | local timeout = wait_timeout or self.wait_buffer_timeout 51 | if timeout <= 0 then 52 | return nil, "buffer overflow and timeout" 53 | end 54 | 55 | local start_time = ngx.now() 56 | local ok, err = self.sema:wait(timeout) 57 | if not ok then 58 | return nil, "buffer overflow " .. err 59 | end 60 | timeout = timeout - (ngx.now() - start_time) 61 | 62 | -- since sema:post to sema:wait is async, so need to check ringbuffer is available again 63 | return self:add(topic, key, message, timeout, depth + 1) 64 | end 65 | 66 | local index = (self.start + num) % size 67 | local queue = self.queue 68 | 69 | queue[index] = topic 70 | queue[index + 1] = key 71 | queue[index + 2] = message 72 | 73 | self.num = num + 3 74 | 75 | return true 76 | end 77 | 78 | 79 | function _M.release_buffer_wait(self) 80 | if not self.wait_on_buffer_full then 81 | return 82 | end 83 | 84 | -- It is enough to release a waiter as only one message pops up 85 | if self.sema:count() < 0 then 86 | self.sema:post(1) 87 | end 88 | end 89 | 90 | 91 | function _M.pop(self) 92 | local num = self.num 93 | if num <= 0 then 94 | return nil, "empty buffer" 95 | end 96 | 97 | self.num = num - 3 98 | 99 | local start = self.start 100 | local queue = self.queue 101 | 102 | self.start = (start + 3) % self.size 103 | 104 | local key, topic, message = queue[start], queue[start + 1], queue[start + 2] 105 | 106 | queue[start], queue[start + 1], queue[start + 2] = ngx_null, ngx_null, ngx_null 107 | 108 | self:release_buffer_wait() 109 | 110 | return key, topic, message 111 | end 112 | 113 | 114 | function _M.left_num(self) 115 | return self.num / 3 116 | end 117 | 118 | 119 | function _M.need_send(self) 120 | return self.num / 3 >= self.batch_num 121 | end 122 | 123 | 124 | return _M 125 | -------------------------------------------------------------------------------- /lib/resty/kafka/sasl.lua: -------------------------------------------------------------------------------- 1 | local _M = {} 2 | 3 | local scramsha = require "resty.kafka.scramsha" 4 | local MECHANISM_PLAINTEXT = "PLAIN" 5 | local MECHANISM_SCRAMSHA256 = "SCRAM-SHA-256" -- to do 6 | local MECHANISM_SCRAMSHA512 = "SCRAM-SHA-512" 7 | local SEP = string.char(0) 8 | 9 | 10 | local function _encode_plaintext(authz_id, user, pwd) 11 | local msg = "" 12 | if authz_id then 13 | msg = msg ..authz_id 14 | end 15 | 16 | return (authz_id or "") .. SEP .. user .. SEP .. pwd 17 | end 18 | 19 | 20 | _M.encode = function(mechanism, authz_id, user, pwd,sock) 21 | if mechanism == MECHANISM_PLAINTEXT then 22 | return true, _encode_plaintext(authz_id, user, pwd) 23 | end 24 | if mechanism == MECHANISM_SCRAMSHA512 or mechanism == MECHANISM_SCRAMSHA256 then 25 | local scramsha_new = scramsha.new(sock,user,pwd) 26 | local ok, client_msg = scramsha_new:scram_sha_auth(mechanism) 27 | return ok, client_msg 28 | end 29 | return true, "" 30 | end 31 | 32 | 33 | return _M 34 | -------------------------------------------------------------------------------- /lib/resty/kafka/scramsha.lua: -------------------------------------------------------------------------------- 1 | local request = require("resty.kafka.request") 2 | local response = require("resty.kafka.response") 3 | local ngx_re = require("ngx.re") 4 | local ngx = ngx 5 | local pid = ngx.worker.pid 6 | local str_gsub = ngx.re.gsub 7 | local to_int32 = response.to_int32 8 | local string = string 9 | local table = table 10 | local ipairs = ipairs 11 | local tonumber = tonumber 12 | local tostring = tostring 13 | local setmetatable = setmetatable 14 | 15 | local scram_sha_type = { 16 | ["SCRAM-SHA-256"]={ 17 | ["name"]="sha256", 18 | ["out_len"]=32 19 | }, 20 | ["SCRAM-SHA-512"]={ 21 | ["name"]="sha512", 22 | ["out_len"]=64 23 | } 24 | } 25 | 26 | local _M = {} 27 | 28 | local mt = { __index = _M } 29 | 30 | function _M.new(sock, user,password) 31 | local self = { 32 | user = user, 33 | password = password, 34 | sock = sock 35 | } 36 | return setmetatable(self, mt) 37 | end 38 | 39 | local rshift, lshift, band, bxor 40 | do 41 | local _obj_0 = require("bit") 42 | rshift, lshift, band = _obj_0.rshift, _obj_0.lshift, _obj_0.band 43 | bxor = _obj_0.bxor 44 | end 45 | 46 | local function pbkdf2_hmac(scram_sha_conf,str, salt, i) 47 | local openssl_kdf = require("resty.openssl.kdf") 48 | 49 | salt = ngx.decode_base64(salt) 50 | 51 | local key, err = openssl_kdf.derive({ 52 | type = openssl_kdf.PBKDF2, 53 | md = scram_sha_conf.name, 54 | salt = salt, 55 | pbkdf2_iter = i, 56 | pass = str, 57 | outlen = scram_sha_conf.out_len -- our H() produces a 64 byte hash value (SHA-512) SHA-256=32 58 | }) 59 | 60 | if not (key) then 61 | return nil, "failed to derive pbkdf2 key: " .. tostring(err) 62 | end 63 | return key 64 | end 65 | 66 | local function hmac(scram_sha_type,key, str) 67 | local openssl_hmac = require("resty.openssl.hmac") 68 | local hmac, err = openssl_hmac.new(key, scram_sha_type) 69 | 70 | if not (hmac) then 71 | return nil, tostring(err) 72 | end 73 | 74 | hmac:update(str) 75 | 76 | local final_hmac, err = hmac:final() 77 | 78 | if not (final_hmac) then 79 | return nil, tostring(err) 80 | end 81 | 82 | return final_hmac 83 | end 84 | 85 | local function hash_func(scram_sha_type,str) 86 | local openssl_digest, err = require("resty.openssl.digest").new(scram_sha_type) 87 | 88 | if not (openssl_digest) then 89 | return nil, tostring(err) 90 | end 91 | 92 | openssl_digest:update(str) 93 | 94 | local digest, err = openssl_digest:final() 95 | 96 | if not (digest) then 97 | return nil, tostring(err) 98 | end 99 | 100 | return digest 101 | end 102 | 103 | local function xor(a, b) 104 | local result = {} 105 | for i = 1, #a do 106 | local x = a:byte(i) 107 | local y = b:byte(i) 108 | 109 | if not (x) or not (y) then 110 | return 111 | end 112 | 113 | result[i] = string.char(bxor(x, y)) 114 | end 115 | 116 | return table.concat(result) 117 | end 118 | 119 | function _M.scram_sha_auth(self, msg) 120 | local uuid = require("resty.jit-uuid") 121 | local c_nonce = str_gsub(uuid(),"-","") 122 | local nonce = "r=" .. c_nonce 123 | local sasl_name = self.user 124 | local username = "n=" .. sasl_name 125 | local auth_message = username .. "," .. nonce 126 | 127 | local scram_sha_conf = scram_sha_type[msg] 128 | if not scram_sha_conf then 129 | return nil, "unsupported SCRAM mechanism name: " .. tostring(msg) 130 | end 131 | 132 | local gs2_header = "n,," 133 | local client_first_message = gs2_header .. auth_message 134 | 135 | local t, server_first_message = self:send_first_message(client_first_message) 136 | if not (t) then 137 | return nil, server_first_message 138 | end 139 | 140 | auth_message = auth_message .. ',' .. server_first_message 141 | local pairs = ngx_re.split(server_first_message,",") 142 | if not pairs or #pairs == 0 then 143 | return nil, "server_first_message error,message:" .. server_first_message 144 | end 145 | 146 | local params = {} 147 | for _,v in ipairs(pairs) do 148 | local dict = ngx_re.split(v,"=") 149 | local key = dict[1] 150 | local value = dict[2] 151 | params[key] = value 152 | end 153 | 154 | local server_nonce = params["r"] 155 | local from, _, _ = ngx.re.find(server_nonce, c_nonce, "jo") 156 | if not from then 157 | return nil, "Server nonce, did not start with client nonce!" 158 | end 159 | 160 | auth_message = auth_message .. ',c=biws,r=' .. server_nonce 161 | local salt = params['s'] 162 | local length = tonumber(params['i']) 163 | if length < 4096 then 164 | return nil, "the iteration-count sent by the server is less than 4096" 165 | end 166 | 167 | local salted_password, err = pbkdf2_hmac(scram_sha_conf,self.password, salt, length) 168 | if not (salted_password) then 169 | return nil, tostring(err) 170 | end 171 | local client_key, err = hmac(scram_sha_conf.name,salted_password, "Client Key") 172 | if not (client_key) then 173 | return nil, tostring(err) 174 | end 175 | local stored_key, err = hash_func(scram_sha_conf.name,client_key) 176 | if not (stored_key) then 177 | return nil, tostring(err) 178 | end 179 | local client_signature, err = hmac(scram_sha_conf.name,stored_key, auth_message) 180 | if not (client_signature) then 181 | return nil, tostring(err) 182 | end 183 | local proof = xor(client_key, client_signature) 184 | if not (proof) then 185 | return nil, "failed to generate the client proof" 186 | end 187 | local client_final_message = 'c=biws,r=' .. server_nonce .. ",p=" .. ngx.encode_base64(proof) 188 | return true, client_final_message 189 | end 190 | 191 | function _M.sock_send_receive(self, request) 192 | local sock = self.sock 193 | local req = request:package() 194 | local bytes, err = sock:send(req) 195 | if not bytes then 196 | return nil, err, true 197 | end 198 | local len, err = sock:receive(4) 199 | if not len then 200 | if err == "timeout" then 201 | sock:close() 202 | return nil, err 203 | end 204 | return nil, err, true 205 | end 206 | local data, err = sock:receive(to_int32(len)) 207 | if not data then 208 | if err == "timeout" then 209 | sock:close() 210 | return nil, err 211 | end 212 | return nil, err, true 213 | end 214 | return response:new(data, request.api_version), nil, true 215 | end 216 | 217 | function _M.send_first_message(self,msg) 218 | local cli_id = "worker" .. pid() 219 | local req = request:new(request.SaslAuthenticateRequest, 0, cli_id, 220 | request.API_VERSION_V1) 221 | req:bytes(msg) 222 | 223 | local resp, err = self:sock_send_receive(req) 224 | if not resp then 225 | return nil, err 226 | end 227 | local err_code = resp:int16() 228 | local error_msg = resp:string() 229 | local auth_bytes = resp:bytes() 230 | if err_code ~= 0 then 231 | return nil, error_msg 232 | end 233 | return true, auth_bytes 234 | end 235 | 236 | return _M -------------------------------------------------------------------------------- /lib/resty/kafka/sendbuffer.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) Dejiang Zhu(doujiang24) 2 | 3 | 4 | local setmetatable = setmetatable 5 | local pairs = pairs 6 | local next = next 7 | 8 | 9 | local ok, new_tab = pcall(require, "table.new") 10 | if not ok then 11 | new_tab = function (narr, nrec) return {} end 12 | end 13 | 14 | local MAX_REUSE = 10000 15 | 16 | 17 | local _M = {} 18 | local mt = { __index = _M } 19 | 20 | function _M.new(self, batch_num, batch_size) 21 | local sendbuffer = { 22 | topics = {}, 23 | queue_num = 0, 24 | batch_num = batch_num * 2, 25 | batch_size = batch_size, 26 | } 27 | return setmetatable(sendbuffer, mt) 28 | end 29 | 30 | 31 | function _M.add(self, topic, partition_id, key, msg) 32 | local topics = self.topics 33 | 34 | if not topics[topic] then 35 | topics[topic] = {} 36 | end 37 | 38 | if not topics[topic][partition_id] then 39 | topics[topic][partition_id] = { 40 | queue = new_tab(self.batch_num, 0), 41 | index = 0, 42 | used = 0, 43 | size = 0, 44 | offset = 0, 45 | retryable = true, 46 | err = "", 47 | } 48 | end 49 | 50 | local buffer = topics[topic][partition_id] 51 | local index = buffer.index 52 | local queue = buffer.queue 53 | 54 | if index == 0 then 55 | self.queue_num = self.queue_num + 1 56 | buffer.retryable = true 57 | end 58 | 59 | queue[index + 1] = key 60 | queue[index + 2] = msg 61 | 62 | buffer.index = index + 2 63 | buffer.size = buffer.size + #msg + (key and #key or 0) 64 | 65 | if (buffer.size >= self.batch_size) or (buffer.index >= self.batch_num) then 66 | return true 67 | end 68 | end 69 | 70 | 71 | function _M.offset(self, topic, partition_id, offset) 72 | local buffer = self.topics[topic][partition_id] 73 | 74 | if not offset then 75 | return buffer.offset 76 | end 77 | 78 | buffer.offset = offset + (buffer.index / 2) 79 | end 80 | 81 | 82 | function _M.clear(self, topic, partition_id) 83 | local buffer = self.topics[topic][partition_id] 84 | buffer.index = 0 85 | buffer.size = 0 86 | buffer.used = buffer.used + 1 87 | 88 | if buffer.used >= MAX_REUSE then 89 | buffer.queue = new_tab(self.batch_num, 0) 90 | buffer.used = 0 91 | end 92 | 93 | self.queue_num = self.queue_num - 1 94 | end 95 | 96 | 97 | function _M.done(self) 98 | return self.queue_num == 0 99 | end 100 | 101 | 102 | function _M.err(self, topic, partition_id, err, retryable) 103 | local buffer = self.topics[topic][partition_id] 104 | 105 | if err then 106 | buffer.err = err 107 | buffer.retryable = retryable 108 | return buffer.index 109 | else 110 | return buffer.err, buffer.retryable 111 | end 112 | end 113 | 114 | 115 | function _M.loop(self) 116 | local topics, t, p = self.topics 117 | 118 | return function () 119 | if t then 120 | for partition_id, queue in next, topics[t], p do 121 | p = partition_id 122 | if queue.index > 0 then 123 | return t, partition_id, queue 124 | end 125 | end 126 | end 127 | 128 | 129 | for topic, partitions in next, topics, t do 130 | t = topic 131 | p = nil 132 | for partition_id, queue in next, partitions, p do 133 | p = partition_id 134 | if queue.index > 0 then 135 | return topic, partition_id, queue 136 | end 137 | end 138 | end 139 | 140 | return 141 | end 142 | end 143 | 144 | 145 | function _M.aggregator(self, client) 146 | local num = 0 147 | local sendbroker = {} 148 | local brokers = {} 149 | 150 | local i = 1 151 | for topic, partition_id, queue in self:loop() do 152 | if queue.retryable then 153 | local broker_conf, err = client:choose_broker(topic, partition_id) 154 | if not broker_conf then 155 | self:err(topic, partition_id, err, true) 156 | 157 | else 158 | if not brokers[broker_conf] then 159 | brokers[broker_conf] = { 160 | topics = {}, 161 | topic_num = 0, 162 | size = 0, 163 | } 164 | end 165 | 166 | local broker = brokers[broker_conf] 167 | if not broker.topics[topic] then 168 | brokers[broker_conf].topics[topic] = { 169 | partitions = {}, 170 | partition_num = 0, 171 | } 172 | 173 | broker.topic_num = broker.topic_num + 1 174 | end 175 | 176 | local broker_topic = broker.topics[topic] 177 | 178 | broker_topic.partitions[partition_id] = queue 179 | broker_topic.partition_num = broker_topic.partition_num + 1 180 | 181 | broker.size = broker.size + queue.size 182 | 183 | if broker.size >= self.batch_size then 184 | sendbroker[num + 1] = broker_conf 185 | sendbroker[num + 2] = brokers[broker_conf] 186 | 187 | num = num + 2 188 | brokers[broker_conf] = nil 189 | end 190 | end 191 | end 192 | end 193 | 194 | for broker_conf, topic_partitions in pairs(brokers) do 195 | sendbroker[num + 1] = broker_conf 196 | sendbroker[num + 2] = brokers[broker_conf] 197 | num = num + 2 198 | end 199 | 200 | return num, sendbroker 201 | end 202 | 203 | 204 | return _M 205 | -------------------------------------------------------------------------------- /lib/resty/kafka/utils.lua: -------------------------------------------------------------------------------- 1 | local _M = { _VERSION = "0.20" } 2 | 3 | 4 | function _M.correlation_id(index) 5 | return (index + 1) % 1073741824 -- 2^30 6 | end 7 | 8 | 9 | return _M 10 | -------------------------------------------------------------------------------- /lua-resty-kafka-0.09-0.rockspec: -------------------------------------------------------------------------------- 1 | package = "lua-resty-kafka" 2 | version = "0.09-0" 3 | source = { 4 | url = "git://github.com/doujiang24/lua-resty-kafka", 5 | tag = "v0.09" 6 | } 7 | description = { 8 | summary = "Lua Kafka client driver for the ngx_lua based on the cosocket API", 9 | detailed = [[ 10 | This Lua library is a Kafka client driver for the ngx_lua nginx module: 11 | 12 | http://wiki.nginx.org/HttpLuaModule 13 | 14 | This Lua library takes advantage of ngx_lua's cosocket API, which ensures 100% nonblocking behavior. 15 | 16 | Note that at least ngx_lua 0.9.3 or ngx_openresty 1.4.3.7 is required, and unfortunately only LuaJIT supported (--with-luajit). 17 | ]], 18 | homepage = "https://github.com/doujiang24/lua-resty-kafka", 19 | license = "BSD" 20 | } 21 | dependencies = { 22 | "lua >= 5.1" 23 | } 24 | build = { 25 | type = "builtin", 26 | modules = { 27 | ["resty.kafka.broker"] = "lib/resty/kafka/broker.lua", 28 | ["resty.kafka.client"] = "lib/resty/kafka/client.lua", 29 | ["resty.kafka.errors"] = "lib/resty/kafka/errors.lua", 30 | ["resty.kafka.producer"] = "lib/resty/kafka/producer.lua", 31 | ["resty.kafka.request"] = "lib/resty/kafka/request.lua", 32 | ["resty.kafka.response"] = "lib/resty/kafka/response.lua", 33 | ["resty.kafka.ringbuffer"] = "lib/resty/kafka/ringbuffer.lua", 34 | ["resty.kafka.sendbuffer"] = "lib/resty/kafka/sendbuffer.lua" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /lua-resty-kafka-0.20-0.rockspec: -------------------------------------------------------------------------------- 1 | package = "lua-resty-kafka" 2 | version = "0.20-0" 3 | source = { 4 | url = "git+https://github.com/doujiang24/lua-resty-kafka", 5 | tag = "v0.20" 6 | } 7 | description = { 8 | summary = "Lua Kafka client driver for the ngx_lua based on the cosocket API", 9 | detailed = [[ 10 | This Lua library is a Kafka client driver for the ngx_lua nginx module: 11 | 12 | http://wiki.nginx.org/HttpLuaModule 13 | 14 | This Lua library takes advantage of ngx_lua's cosocket API, which ensures 100% nonblocking behavior. 15 | 16 | Note that at least ngx_lua 0.9.3 or ngx_openresty 1.4.3.7 is required, and unfortunately only LuaJIT supported (--with-luajit). 17 | ]], 18 | homepage = "https://github.com/doujiang24/lua-resty-kafka", 19 | license = "BSD" 20 | } 21 | dependencies = { 22 | "lua >= 5.1" 23 | } 24 | build = { 25 | type = "builtin", 26 | modules = { 27 | ["resty.kafka.broker"] = "lib/resty/kafka/broker.lua", 28 | ["resty.kafka.client"] = "lib/resty/kafka/client.lua", 29 | ["resty.kafka.errors"] = "lib/resty/kafka/errors.lua", 30 | ["resty.kafka.producer"] = "lib/resty/kafka/producer.lua", 31 | ["resty.kafka.request"] = "lib/resty/kafka/request.lua", 32 | ["resty.kafka.response"] = "lib/resty/kafka/response.lua", 33 | ["resty.kafka.ringbuffer"] = "lib/resty/kafka/ringbuffer.lua", 34 | ["resty.kafka.sendbuffer"] = "lib/resty/kafka/sendbuffer.lua", 35 | ["resty.kafka.basic-consumer"] = "lib/resty/kafka/basic-consumer.lua", 36 | ["resty.kafka.sasl"] = "lib/resty/kafka/sasl.lua", 37 | ["resty.kafka.utils"] = "lib/resty/kafka/utils.lua", 38 | ["resty.kafka.protocol.consumer"] = "lib/resty/kafka/protocol/consumer.lua", 39 | ["resty.kafka.protocol.common"] = "lib/resty/kafka/protocol/common.lua", 40 | ["resty.kafka.protocol.record"] = "lib/resty/kafka/protocol/record.lua" 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /lua-resty-kafka-0.22-0.rockspec: -------------------------------------------------------------------------------- 1 | package = "lua-resty-kafka" 2 | version = "0.22-0" 3 | source = { 4 | url = "git+https://github.com/doujiang24/lua-resty-kafka", 5 | tag = "v0.22" 6 | } 7 | description = { 8 | summary = "Lua Kafka client driver for the ngx_lua based on the cosocket API", 9 | detailed = [[ 10 | This Lua library is a Kafka client driver for the ngx_lua nginx module: 11 | 12 | http://wiki.nginx.org/HttpLuaModule 13 | 14 | This Lua library takes advantage of ngx_lua's cosocket API, which ensures 100% nonblocking behavior. 15 | 16 | Note that at least ngx_lua 0.9.3 or ngx_openresty 1.4.3.7 is required, and unfortunately only LuaJIT supported (--with-luajit). 17 | ]], 18 | homepage = "https://github.com/doujiang24/lua-resty-kafka", 19 | license = "BSD" 20 | } 21 | dependencies = { 22 | "lua >= 5.1" 23 | } 24 | build = { 25 | type = "builtin", 26 | modules = { 27 | ["resty.kafka.basic-consumer"] = "lib/resty/kafka/basic-consumer.lua", 28 | ["resty.kafka.broker"] = "lib/resty/kafka/broker.lua", 29 | ["resty.kafka.client"] = "lib/resty/kafka/client.lua", 30 | ["resty.kafka.errors"] = "lib/resty/kafka/errors.lua", 31 | ["resty.kafka.producer"] = "lib/resty/kafka/producer.lua", 32 | ["resty.kafka.protocol.common"] = "lib/resty/kafka/protocol/common.lua", 33 | ["resty.kafka.protocol.consumer"] = "lib/resty/kafka/protocol/consumer.lua", 34 | ["resty.kafka.protocol.record"] = "lib/resty/kafka/protocol/record.lua", 35 | ["resty.kafka.request"] = "lib/resty/kafka/request.lua", 36 | ["resty.kafka.response"] = "lib/resty/kafka/response.lua", 37 | ["resty.kafka.ringbuffer"] = "lib/resty/kafka/ringbuffer.lua", 38 | ["resty.kafka.sasl"] = "lib/resty/kafka/sasl.lua", 39 | ["resty.kafka.scramsha"] = "lib/resty/kafka/scramsha.lua", 40 | ["resty.kafka.sendbuffer"] = "lib/resty/kafka/sendbuffer.lua", 41 | ["resty.kafka.utils"] = "lib/resty/kafka/utils.lua", 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /lua-resty-kafka-0.23-0.rockspec: -------------------------------------------------------------------------------- 1 | package = "lua-resty-kafka" 2 | version = "0.23-0" 3 | source = { 4 | url = "git+https://github.com/doujiang24/lua-resty-kafka", 5 | tag = "v0.23" 6 | } 7 | description = { 8 | summary = "Lua Kafka client driver for the ngx_lua based on the cosocket API", 9 | detailed = [[ 10 | This Lua library is a Kafka client driver for the ngx_lua nginx module: 11 | 12 | http://wiki.nginx.org/HttpLuaModule 13 | 14 | This Lua library takes advantage of ngx_lua's cosocket API, which ensures 100% nonblocking behavior. 15 | 16 | Note that at least ngx_lua 0.9.3 or ngx_openresty 1.4.3.7 is required, and unfortunately only LuaJIT supported (--with-luajit). 17 | ]], 18 | homepage = "https://github.com/doujiang24/lua-resty-kafka", 19 | license = "BSD" 20 | } 21 | dependencies = { 22 | "lua >= 5.1" 23 | } 24 | build = { 25 | type = "builtin", 26 | modules = { 27 | ["resty.kafka.basic-consumer"] = "lib/resty/kafka/basic-consumer.lua", 28 | ["resty.kafka.broker"] = "lib/resty/kafka/broker.lua", 29 | ["resty.kafka.client"] = "lib/resty/kafka/client.lua", 30 | ["resty.kafka.errors"] = "lib/resty/kafka/errors.lua", 31 | ["resty.kafka.producer"] = "lib/resty/kafka/producer.lua", 32 | ["resty.kafka.protocol.common"] = "lib/resty/kafka/protocol/common.lua", 33 | ["resty.kafka.protocol.consumer"] = "lib/resty/kafka/protocol/consumer.lua", 34 | ["resty.kafka.protocol.record"] = "lib/resty/kafka/protocol/record.lua", 35 | ["resty.kafka.request"] = "lib/resty/kafka/request.lua", 36 | ["resty.kafka.response"] = "lib/resty/kafka/response.lua", 37 | ["resty.kafka.ringbuffer"] = "lib/resty/kafka/ringbuffer.lua", 38 | ["resty.kafka.sasl"] = "lib/resty/kafka/sasl.lua", 39 | ["resty.kafka.scramsha"] = "lib/resty/kafka/scramsha.lua", 40 | ["resty.kafka.sendbuffer"] = "lib/resty/kafka/sendbuffer.lua", 41 | ["resty.kafka.utils"] = "lib/resty/kafka/utils.lua", 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /t/basic-consumer.t: -------------------------------------------------------------------------------- 1 | # vim:set ts=4 sw=4 et: 2 | 3 | use Test::Nginx::Socket::Lua; 4 | use Cwd qw(cwd); 5 | 6 | repeat_each(1); 7 | 8 | plan tests => repeat_each() * (3 * blocks()); 9 | 10 | my $pwd = cwd(); 11 | 12 | our $HttpConfig = qq{ 13 | lua_package_path "$pwd/lib/?.lua;;"; 14 | lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; 15 | }; 16 | 17 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 18 | $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; 19 | $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; 20 | $ENV{TEST_NGINX_KAFKA_SSL_PORT} = '9093'; 21 | $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; 22 | $ENV{TEST_NGINX_KAFKA_SASL_PORT} = '9094'; 23 | $ENV{TEST_NGINX_KAFKA_SASL_USER} = 'admin'; 24 | $ENV{TEST_NGINX_KAFKA_SASL_PWD} = 'admin-secret'; 25 | 26 | 27 | no_shuffle(); 28 | no_long_string(); 29 | 30 | run_tests(); 31 | 32 | __DATA__ 33 | 34 | === TEST 1: send some test messages 35 | --- http_config eval: $::HttpConfig 36 | --- config 37 | location /t { 38 | content_by_lua_block { 39 | local cjson = require "cjson" 40 | local producer = require "resty.kafka.producer" 41 | 42 | local broker_list = { 43 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 44 | } 45 | 46 | local p = producer:new(broker_list) 47 | 48 | for i = 1, 135 do 49 | local offset, err = p:send("test-consumer", nil, tostring(i)) 50 | if not offset then 51 | ngx.say("send err:", err) 52 | return 53 | end 54 | end 55 | 56 | ngx.say("offset: ", tostring(offset)) 57 | } 58 | } 59 | --- request 60 | GET /t 61 | --- response_body_like 62 | .*offset.* 63 | --- no_error_log 64 | [error] 65 | 66 | 67 | 68 | === TEST 2: list offset (first) 69 | --- http_config eval: $::HttpConfig 70 | --- config 71 | location /t { 72 | content_by_lua_block { 73 | 74 | local cjson = require("cjson") 75 | local bconsumer = require("resty.kafka.basic-consumer") 76 | local protocol_consumer = require("resty.kafka.protocol.consumer") 77 | 78 | local broker_list = { 79 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 80 | } 81 | 82 | local c = bconsumer:new(broker_list) 83 | 84 | local offset0, err = c:list_offset("test-consumer", 0, protocol_consumer.LIST_OFFSET_TIMESTAMP_FIRST) 85 | if not offset0 then 86 | ngx.say(err) 87 | return 88 | end 89 | 90 | ngx.say("test-consumer: partition 0, offset: ", offset0) 91 | 92 | local offset1, err = c:list_offset("test-consumer", 1, protocol_consumer.LIST_OFFSET_TIMESTAMP_FIRST) 93 | if not offset1 then 94 | ngx.say(err) 95 | return 96 | end 97 | 98 | ngx.say("test-consumer: partition 1, offset: ", offset1) 99 | } 100 | } 101 | --- request 102 | GET /t 103 | --- response_body 104 | test-consumer: partition 0, offset: 0LL 105 | test-consumer: partition 1, offset: 0LL 106 | --- no_error_log 107 | [error] 108 | 109 | 110 | 111 | === TEST 3: list offset (last) 112 | --- http_config eval: $::HttpConfig 113 | --- config 114 | location /t { 115 | content_by_lua_block { 116 | ngx.sleep(1) -- wait 1 second for kafka 117 | local cjson = require("cjson") 118 | local bconsumer = require("resty.kafka.basic-consumer") 119 | local protocol_consumer = require("resty.kafka.protocol.consumer") 120 | 121 | local broker_list = { 122 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 123 | } 124 | 125 | local c = bconsumer:new(broker_list) 126 | 127 | local offset0, err = c:list_offset("test-consumer", 0, protocol_consumer.LIST_OFFSET_TIMESTAMP_LAST) 128 | if not offset0 then 129 | ngx.say(err) 130 | return 131 | end 132 | 133 | ngx.say("test-consumer: partition 0, offset: ", offset0) 134 | 135 | local offset1, err = c:list_offset("test-consumer", 1, protocol_consumer.LIST_OFFSET_TIMESTAMP_LAST) 136 | if not offset1 then 137 | ngx.say(err) 138 | return 139 | end 140 | 141 | ngx.say("test-consumer: partition 1, offset: ", offset1) 142 | } 143 | } 144 | --- request 145 | GET /t 146 | --- response_body 147 | test-consumer: partition 0, offset: 67LL 148 | test-consumer: partition 1, offset: 68LL 149 | --- no_error_log 150 | [error] 151 | 152 | 153 | 154 | === TEST 4: list offset (topic not exist) 155 | --- http_config eval: $::HttpConfig 156 | --- config 157 | location /t { 158 | content_by_lua_block { 159 | ngx.sleep(1) -- wait 1 second for kafka 160 | local cjson = require("cjson") 161 | local bconsumer = require("resty.kafka.basic-consumer") 162 | local protocol_consumer = require("resty.kafka.protocol.consumer") 163 | 164 | local broker_list = { 165 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 166 | } 167 | 168 | local c = bconsumer:new(broker_list) 169 | 170 | -- It will return an error at the "choose broker" step, because the corresponding topic cannot be found 171 | local offset, err = c:list_offset("not-exist-topic", 0, protocol_consumer.LIST_OFFSET_TIMESTAMP_LAST) 172 | if not offset then 173 | ngx.say(err) 174 | return 175 | end 176 | } 177 | } 178 | --- request 179 | GET /t 180 | --- response_body 181 | not found topic 182 | --- no_error_log 183 | [error] 184 | 185 | 186 | 187 | === TEST 5: list offset (partition not exist) 188 | --- http_config eval: $::HttpConfig 189 | --- config 190 | location /t { 191 | content_by_lua_block { 192 | ngx.sleep(1) -- wait 1 second for kafka 193 | local cjson = require("cjson") 194 | local bconsumer = require("resty.kafka.basic-consumer") 195 | local protocol_consumer = require("resty.kafka.protocol.consumer") 196 | 197 | local broker_list = { 198 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 199 | } 200 | 201 | local c = bconsumer:new(broker_list) 202 | 203 | -- It will return an error at the "choose broker" step, because the corresponding topic cannot be found 204 | local offset, err = c:list_offset("test-consumer", 999, protocol_consumer.LIST_OFFSET_TIMESTAMP_LAST) 205 | if not offset then 206 | ngx.say(err) 207 | return 208 | end 209 | } 210 | } 211 | --- request 212 | GET /t 213 | --- response_body 214 | not found partition 215 | --- no_error_log 216 | [error] 217 | 218 | 219 | 220 | === TEST 6: fetch message (first) 221 | --- http_config eval: $::HttpConfig 222 | --- config 223 | location /t { 224 | content_by_lua_block { 225 | local cjson = require("cjson") 226 | local bconsumer = require("resty.kafka.basic-consumer") 227 | local protocol_consumer = require("resty.kafka.protocol.consumer") 228 | 229 | local broker_list = { 230 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 231 | } 232 | 233 | local c = bconsumer:new(broker_list) 234 | 235 | local ret0, err = c:fetch("test-consumer", 0, 0) -- partition 0, offset 0 236 | local message0 = "" 237 | for _, record in pairs(ret0.records) do 238 | message0 = message0 .. record.value 239 | end 240 | ngx.say(message0) 241 | 242 | local ret1, err = c:fetch("test-consumer", 1, 0) -- partition 1, offset 0 243 | local message1 = "" 244 | for _, record in pairs(ret1.records) do 245 | message1 = message1 .. record.value 246 | end 247 | ngx.say(message1) 248 | } 249 | } 250 | --- request 251 | GET /t 252 | --- response_body 253 | 2468101214161820222426283032343638404244464850525456586062646668707274767880828486889092949698100102104106108110112114116118120122124126128130132134 254 | 13579111315171921232527293133353739414345474951535557596163656769717375777981838587899193959799101103105107109111113115117119121123125127129131133135 255 | --- no_error_log 256 | [error] 257 | 258 | 259 | 260 | === TEST 7: fetch message (offset) 261 | --- http_config eval: $::HttpConfig 262 | --- config 263 | location /t { 264 | content_by_lua_block { 265 | local cjson = require("cjson") 266 | local bconsumer = require("resty.kafka.basic-consumer") 267 | local protocol_consumer = require("resty.kafka.protocol.consumer") 268 | 269 | local broker_list = { 270 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 271 | } 272 | 273 | local c = bconsumer:new(broker_list) 274 | 275 | local ret0, err = c:fetch("test-consumer", 0, 50) -- partition 0, offset 50 276 | local message0 = "" 277 | for _, record in pairs(ret0.records) do 278 | message0 = message0 .. record.value 279 | end 280 | ngx.say(message0) 281 | 282 | local ret1, err = c:fetch("test-consumer", 1, 50) -- partition 1, offset 50 283 | local message1 = "" 284 | for _, record in pairs(ret1.records) do 285 | message1 = message1 .. record.value 286 | end 287 | ngx.say(message1) 288 | } 289 | } 290 | --- request 291 | GET /t 292 | --- response_body 293 | 102104106108110112114116118120122124126128130132134 294 | 101103105107109111113115117119121123125127129131133135 295 | --- no_error_log 296 | [error] 297 | 298 | 299 | 300 | === TEST 8: fetch message (empty) 301 | --- http_config eval: $::HttpConfig 302 | --- config 303 | location /t { 304 | content_by_lua_block { 305 | local cjson = require("cjson") 306 | local bconsumer = require("resty.kafka.basic-consumer") 307 | local protocol_consumer = require("resty.kafka.protocol.consumer") 308 | 309 | local broker_list = { 310 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 311 | } 312 | 313 | local c = bconsumer:new(broker_list) 314 | 315 | local _, err = c:fetch("test-consumer", 0, 200) -- partition 0, offset 200 316 | if err == "OFFSET_OUT_OF_RANGE" then 317 | ngx.say(err.."0") 318 | end 319 | 320 | local _, err = c:fetch("test-consumer", 1, 200) -- partition 1, offset 200 321 | if err == "OFFSET_OUT_OF_RANGE" then 322 | ngx.say(err.."1") 323 | end 324 | } 325 | } 326 | --- request 327 | GET /t 328 | --- response_body 329 | OFFSET_OUT_OF_RANGE0 330 | OFFSET_OUT_OF_RANGE1 331 | --- no_error_log 332 | [error] 333 | 334 | 335 | 336 | === TEST 9: fetch message (topic not exist) 337 | --- http_config eval: $::HttpConfig 338 | --- config 339 | location /t { 340 | content_by_lua_block { 341 | local cjson = require("cjson") 342 | local bconsumer = require("resty.kafka.basic-consumer") 343 | local protocol_consumer = require("resty.kafka.protocol.consumer") 344 | 345 | local broker_list = { 346 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 347 | } 348 | 349 | local c = bconsumer:new(broker_list) 350 | 351 | local ret, err = c:fetch("not-exist-topic", 0, 0) -- partition 0, offset 0 352 | if not ret then 353 | ngx.say(err) 354 | end 355 | } 356 | } 357 | --- request 358 | GET /t 359 | --- response_body 360 | not found topic 361 | --- no_error_log 362 | [error] 363 | 364 | 365 | 366 | === TEST 10: fetch message (partition not exist) 367 | --- http_config eval: $::HttpConfig 368 | --- config 369 | location /t { 370 | content_by_lua_block { 371 | local cjson = require("cjson") 372 | local bconsumer = require("resty.kafka.basic-consumer") 373 | local protocol_consumer = require("resty.kafka.protocol.consumer") 374 | 375 | local broker_list = { 376 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 377 | } 378 | 379 | local c = bconsumer:new(broker_list) 380 | 381 | local ret, err = c:fetch("test-consumer", 999, 0) -- partition 999, offset 0 382 | if not ret then 383 | ngx.say(err) 384 | end 385 | } 386 | } 387 | --- request 388 | GET /t 389 | --- response_body 390 | not found partition 391 | --- no_error_log 392 | [error] 393 | -------------------------------------------------------------------------------- /t/buffer.t: -------------------------------------------------------------------------------- 1 | # vim:set ts=4 sw=4 et: 2 | 3 | use Test::Nginx::Socket::Lua; 4 | use Cwd qw(cwd); 5 | 6 | repeat_each(2); 7 | 8 | plan tests => repeat_each() * (3 * blocks()); 9 | 10 | my $pwd = cwd(); 11 | 12 | our $HttpConfig = qq{ 13 | lua_package_path "$pwd/lib/?.lua;;"; 14 | lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; 15 | }; 16 | 17 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 18 | $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; 19 | $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; 20 | $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; 21 | 22 | no_long_string(); 23 | #no_diff(); 24 | 25 | run_tests(); 26 | 27 | __DATA__ 28 | 29 | === TEST 1: force flush 30 | --- http_config eval: $::HttpConfig 31 | --- config 32 | location /t { 33 | content_by_lua ' 34 | local cjson = require "cjson" 35 | local producer = require "resty.kafka.producer" 36 | 37 | local broker_list = { 38 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 39 | } 40 | 41 | local key = "key" 42 | local message = "halo world" 43 | 44 | local p = producer:new(broker_list, { producer_type = "async", flush_time = 10000 }) 45 | ngx.sleep(0.1) -- will have an immediately flush by timer_flush 46 | 47 | local ok, err = p:send("test", key, message) 48 | if not ok then 49 | ngx.say("send err:", err) 50 | return 51 | end 52 | ngx.say("send ok:", ok) 53 | 54 | p:flush() 55 | local offset0 = p:offset() 56 | 57 | local ok, err = p:send("test", key, message) 58 | if not ok then 59 | ngx.say("send err:", err) 60 | return 61 | end 62 | ngx.say("send ok:", ok) 63 | 64 | p:flush() 65 | local offset1 = p:offset() 66 | 67 | ngx.say("send num:", tonumber(offset1 - offset0)) 68 | '; 69 | } 70 | --- request 71 | GET /t 72 | --- response_body 73 | send ok:true 74 | send ok:true 75 | send num:1 76 | --- no_error_log 77 | [error] 78 | 79 | 80 | 81 | === TEST 2: timer flush 82 | --- http_config eval: $::HttpConfig 83 | --- config 84 | location /t { 85 | content_by_lua ' 86 | local cjson = require "cjson" 87 | local producer = require "resty.kafka.producer" 88 | 89 | local broker_list = { 90 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 91 | } 92 | 93 | local key = "key" 94 | local message = "halo world" 95 | 96 | local p = producer:new(broker_list, { producer_type = "async", flush_time = 1000 }) 97 | ngx.sleep(0.1) -- will have an immediately flush by timer_flush 98 | 99 | local size, err = p:send("test", key, message) 100 | if not size then 101 | ngx.say("send err:", err) 102 | return 103 | end 104 | 105 | ngx.sleep(1.1) 106 | 107 | local offset = p:offset() 108 | ngx.say("offset bigger than 0: ", tonumber(offset) > 0) 109 | '; 110 | } 111 | --- request 112 | GET /t 113 | --- response_body 114 | offset bigger than 0: true 115 | --- no_error_log 116 | [error] 117 | 118 | 119 | 120 | === TEST 3: buffer flush 121 | --- http_config eval: $::HttpConfig 122 | --- config 123 | location /t { 124 | content_by_lua ' 125 | local cjson = require "cjson" 126 | local producer = require "resty.kafka.producer" 127 | 128 | local broker_list = { 129 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 130 | } 131 | 132 | local key = "key" 133 | local message = "halo world" 134 | 135 | local p = producer:new(broker_list, { producer_type = "async", batch_num = 1, flush_time = 10000}) 136 | ngx.sleep(0.1) -- will have an immediately flush by timer_flush 137 | 138 | local ok, err = p:send("test", nil, message) 139 | if not ok then 140 | ngx.say("send err:", err) 141 | return 142 | end 143 | ngx.say("send ok:", ok) 144 | 145 | ngx.sleep(1) 146 | 147 | local offset0 = p:offset() 148 | local send_num = p:flush() 149 | local offset1 = p:offset() 150 | ngx.say("send num:", tonumber(offset1 - offset0)) 151 | 152 | '; 153 | } 154 | --- request 155 | GET /t 156 | --- response_body 157 | send ok:true 158 | send num:0 159 | --- no_error_log 160 | [error] 161 | 162 | 163 | 164 | === TEST 4: error handle 165 | --- http_config eval: $::HttpConfig 166 | --- config 167 | location /t { 168 | content_by_lua ' 169 | local cjson = require "cjson" 170 | local producer = require "resty.kafka.producer" 171 | 172 | local broker_list = { 173 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_ERR_PORT }, 174 | } 175 | 176 | local key = "key" 177 | local message = "halo world" 178 | 179 | local error_handle = function (topic, partition_id, queue, index, err, retryable) 180 | ngx.log(ngx.ERR, "failed to send to kafka, topic: ", topic, "; partition_id: ", partition_id, "; retryable: ", retryable) 181 | end 182 | 183 | local p = producer:new(broker_list, { producer_type = "async", max_retry = 1, batch_num = 1, error_handle = error_handle }) 184 | 185 | local ok, err = p:send("test", key, message) 186 | if not ok then 187 | ngx.say("send err:", err) 188 | return 189 | end 190 | 191 | ngx.say("send ok:", ok) 192 | 193 | p:flush() 194 | 195 | '; 196 | } 197 | --- request 198 | GET /t 199 | --- response_body 200 | send ok:true 201 | --- error_log: failed to send to kafka, topic: test; partition_id: -1; retryable: true 202 | 203 | 204 | 205 | === TEST 5: wrong in error handle 206 | --- http_config eval: $::HttpConfig 207 | --- config 208 | location /t { 209 | content_by_lua ' 210 | local cjson = require "cjson" 211 | local producer = require "resty.kafka.producer" 212 | 213 | local broker_list = { 214 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_ERR_PORT }, 215 | } 216 | 217 | local key = "key" 218 | local message = "halo world" 219 | 220 | local error_handle = function (topic, partition_id, queue, index, err, retryable) 221 | local num = topic + 1 222 | return true 223 | end 224 | ngx.log(ngx.ERR, tostring(error_handle)) 225 | 226 | local p = producer:new(broker_list, { producer_type = "async", max_retry = 1, batch_num = 1, error_handle = error_handle }) 227 | 228 | local ok, err = p:send("test", key, message) 229 | if not ok then 230 | ngx.say("send err:", err) 231 | return 232 | end 233 | 234 | ngx.say("send ok:", ok) 235 | 236 | p:flush() 237 | 238 | '; 239 | } 240 | --- request 241 | GET /t 242 | --- response_body 243 | send ok:true 244 | --- error_log: failed to callback error_handle 245 | 246 | 247 | 248 | === TEST 6: work in log phase 249 | --- http_config eval: $::HttpConfig 250 | --- config 251 | location /t { 252 | content_by_lua ' 253 | ngx.req.read_body(); 254 | local body = ngx.req.get_body_data(); 255 | ngx.say(body); 256 | '; 257 | 258 | log_by_lua ' 259 | local cjson = require "cjson" 260 | local producer = require "resty.kafka.producer" 261 | 262 | local broker_list = { 263 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT}, 264 | } 265 | 266 | local key = "key" 267 | local message = ngx.var.request_body 268 | 269 | local p = producer:new(broker_list, { producer_type = "async", batch_num = 1, flush_time = 10000}) 270 | -- 1 message 271 | local size, err = p:send("test", key, message) 272 | 273 | '; 274 | } 275 | --- request 276 | POST /t 277 | Hello world 278 | --- response_body 279 | Hello world 280 | --- no_error_log 281 | [error] 282 | 283 | 284 | 285 | === TEST 7: two topic in a batch 286 | --- http_config eval: $::HttpConfig 287 | --- config 288 | location /t { 289 | content_by_lua ' 290 | ngx.req.read_body(); 291 | 292 | local cjson = require "cjson" 293 | local producer = require "resty.kafka.producer" 294 | 295 | local broker_list = { 296 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT}, 297 | } 298 | 299 | local key = "key" 300 | local message = ngx.req.get_body_data(); 301 | 302 | local p = producer:new(broker_list, { producer_type = "async", flush_time = 10000}) 303 | ngx.sleep(0.01) 304 | -- 2 message 305 | local size, err = p:send("test", key, message) 306 | local size, err = p:send("test2", key, message) 307 | p:flush() 308 | local offset0 = p:offset() 309 | 310 | local size, err = p:send("test", key, message) 311 | local size, err = p:send("test2", key, message) 312 | p:flush() 313 | 314 | local offset1 = p:offset() 315 | 316 | ngx.say("send num:", tonumber(offset1 - offset0)) 317 | '; 318 | } 319 | --- request 320 | POST /t 321 | Hello world 322 | --- response_body 323 | send num:2 324 | --- no_error_log 325 | [error] 326 | 327 | 328 | 329 | === TEST 8: unretryable 330 | --- http_config eval: $::HttpConfig 331 | --- config 332 | location /t { 333 | content_by_lua ' 334 | ngx.req.read_body(); 335 | 336 | local cjson = require "cjson" 337 | local producer = require "resty.kafka.producer" 338 | 339 | local broker_list = { 340 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT}, 341 | } 342 | 343 | local key = "key" 344 | local message = ngx.req.get_body_data(); 345 | 346 | local p = producer:new(broker_list, { producer_type = "async", flush_time = 10000}) 347 | ngx.sleep(0.01) 348 | local size, err = p:send("test", key, message) 349 | p:flush() 350 | local offset0 = p:offset() 351 | 352 | -- XX: just hack for testing 353 | p.sendbuffer.topics.test[1].retryable = false 354 | 355 | local size, err = p:send("test", key, message) 356 | p:flush() 357 | 358 | local offset1 = p:offset() 359 | 360 | ngx.say("send num:", tonumber(offset1 - offset0)) 361 | '; 362 | } 363 | --- request 364 | POST /t 365 | Hello world 366 | --- response_body 367 | send num:1 368 | --- no_error_log 369 | [error] 370 | 371 | 372 | 373 | === TEST 9: two send in a batch 374 | --- http_config eval: $::HttpConfig 375 | --- config 376 | location /t { 377 | content_by_lua ' 378 | ngx.req.read_body(); 379 | 380 | local cjson = require "cjson" 381 | local producer = require "resty.kafka.producer" 382 | 383 | local broker_list = { 384 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT}, 385 | } 386 | 387 | local key = "key" 388 | local message = ngx.req.get_body_data(); 389 | 390 | local p = producer:new(broker_list, { producer_type = "async", flush_time = 10000}) 391 | ngx.sleep(0.01) 392 | -- 2 message 393 | local size, err = p:send("test", key, message) 394 | p:flush() 395 | local offset0 = p:offset() 396 | 397 | local size, err = p:send("test", key, message) 398 | local size, err = p:send("test", key, message) 399 | p:flush() 400 | 401 | local offset1 = p:offset() 402 | 403 | ngx.say("send num:", tonumber(offset1 - offset0)) 404 | '; 405 | } 406 | --- request 407 | POST /t 408 | Hello world 409 | --- response_body 410 | send num:2 411 | --- no_error_log 412 | [error] 413 | -------------------------------------------------------------------------------- /t/client.t: -------------------------------------------------------------------------------- 1 | # vim:set ts=4 sw=4 et: 2 | 3 | use Test::Nginx::Socket::Lua; 4 | use Cwd qw(cwd); 5 | 6 | repeat_each(2); 7 | 8 | plan tests => repeat_each() * (3 * blocks()); 9 | 10 | my $pwd = cwd(); 11 | 12 | our $HttpConfig = qq{ 13 | lua_package_path "$pwd/lib/?.lua;;"; 14 | lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; 15 | }; 16 | 17 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 18 | $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; 19 | $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; 20 | $ENV{TEST_NGINX_KAFKA_SSL_PORT} = '9093'; 21 | $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; 22 | $ENV{TEST_NGINX_KAFKA_SASL_PORT} = '9094'; 23 | $ENV{TEST_NGINX_KAFKA_SASL_USER} = 'admin'; 24 | $ENV{TEST_NGINX_KAFKA_SASL_PWD} = 'admin-secret'; 25 | 26 | 27 | no_long_string(); 28 | #no_diff(); 29 | 30 | run_tests(); 31 | 32 | __DATA__ 33 | 34 | === TEST 1: simple fetch 35 | --- http_config eval: $::HttpConfig 36 | --- config 37 | location /t { 38 | content_by_lua ' 39 | 40 | local cjson = require "cjson" 41 | local client = require "resty.kafka.client" 42 | 43 | local broker_list = { 44 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 45 | } 46 | 47 | local messages = { 48 | "halo world", 49 | } 50 | 51 | local cli = client:new(broker_list) 52 | 53 | local brokers, partitions = cli:fetch_metadata("test") 54 | if not brokers then 55 | ngx.say("fetch err:", partitions) 56 | return 57 | end 58 | 59 | ngx.say(cjson.encode(partitions)) 60 | '; 61 | } 62 | --- request 63 | GET /t 64 | --- response_body_like 65 | .*replicas.* 66 | --- no_error_log 67 | [error] 68 | 69 | 70 | 71 | === TEST 2: simple ssl fetch 72 | --- http_config eval: $::HttpConfig 73 | --- config 74 | location /t { 75 | content_by_lua ' 76 | 77 | local cjson = require "cjson" 78 | local client = require "resty.kafka.client" 79 | 80 | local broker_list = { 81 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_SSL_PORT }, 82 | } 83 | 84 | local messages = { 85 | "halo world", 86 | } 87 | 88 | local cli = client:new(broker_list, { ssl = true}) 89 | 90 | local brokers, partitions = cli:fetch_metadata("test") 91 | if not brokers then 92 | ngx.say("fetch err:", partitions) 93 | return 94 | end 95 | 96 | ngx.say(cjson.encode(partitions)) 97 | '; 98 | } 99 | --- request 100 | GET /t 101 | --- response_body_like 102 | .*replicas.* 103 | --- no_error_log 104 | [error] 105 | 106 | 107 | 108 | === TEST 3: timer refresh 109 | --- http_config eval: $::HttpConfig 110 | --- config 111 | location /t { 112 | content_by_lua ' 113 | 114 | local cjson = require "cjson" 115 | local client = require "resty.kafka.client" 116 | 117 | local broker_list = { 118 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 119 | } 120 | 121 | local messages = { 122 | "halo world", 123 | } 124 | 125 | local cli = client:new(broker_list, { refresh_interval = 100 }) 126 | -- XXX just hack for test 127 | cli.topic_partitions = { test = {}, test1 = {} } 128 | 129 | ngx.sleep(0.5) 130 | 131 | ngx.say(cjson.encode(cli.topic_partitions)) 132 | '; 133 | } 134 | --- request 135 | GET /t 136 | --- response_body_like 137 | .*replicas.* 138 | --- no_error_log 139 | [error] 140 | 141 | 142 | 143 | === TEST 4: simple fetch sasl 144 | --- http_config eval: $::HttpConfig 145 | --- config 146 | location /t { 147 | content_by_lua ' 148 | 149 | local cjson = require "cjson" 150 | local client = require "resty.kafka.client" 151 | 152 | local broker_list = { 153 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_SASL_PORT , 154 | sasl_config = { mechanism="PLAIN", user="$TEST_NGINX_KAFKA_SASL_USER", password = "$TEST_NGINX_KAFKA_SASL_PWD" },}, 155 | } 156 | 157 | local messages = { 158 | "halo world", 159 | } 160 | 161 | local cli = client:new(broker_list) 162 | 163 | local brokers, partitions = cli:fetch_metadata("test") 164 | if not brokers then 165 | ngx.say("fetch err:", partitions) 166 | return 167 | end 168 | 169 | ngx.say(cjson.encode(partitions)) 170 | '; 171 | } 172 | --- request 173 | GET /t 174 | --- response_body_like 175 | .*replicas.* 176 | --- no_error_log 177 | [error] 178 | 179 | 180 | 181 | === TEST 5: timer refresh sasl 182 | --- http_config eval: $::HttpConfig 183 | --- config 184 | location /t { 185 | content_by_lua ' 186 | 187 | local cjson = require "cjson" 188 | local client = require "resty.kafka.client" 189 | 190 | local broker_list = { 191 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_SASL_PORT , 192 | sasl_config = { mechanism="PLAIN", user="$TEST_NGINX_KAFKA_SASL_USER", password = "$TEST_NGINX_KAFKA_SASL_PWD" },}, 193 | } 194 | 195 | local messages = { 196 | "halo world", 197 | } 198 | 199 | local cli = client:new(broker_list, { refresh_interval = 100 }) 200 | -- XXX just hack for test 201 | cli.topic_partitions = { test = {}, test1 = {} } 202 | 203 | ngx.sleep(0.5) 204 | 205 | ngx.say(cjson.encode(cli.topic_partitions)) 206 | '; 207 | } 208 | --- request 209 | GET /t 210 | --- response_body_like 211 | .*replicas.* 212 | --- no_error_log 213 | [error] 214 | 215 | 216 | 217 | === TEST 6: ApiVersions fetch 218 | --- http_config eval: $::HttpConfig 219 | --- config 220 | location /t { 221 | content_by_lua ' 222 | 223 | local cjson = require "cjson" 224 | local client = require "resty.kafka.client" 225 | 226 | local broker_list = { 227 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 228 | } 229 | 230 | local messages = { 231 | "halo world", 232 | } 233 | 234 | local cli = client:new(broker_list) 235 | 236 | local brokers, partitions = cli:fetch_metadata("test") 237 | 238 | ngx.say(cjson.encode(cli.api_versions)) 239 | '; 240 | } 241 | --- request 242 | GET /t 243 | --- response_body eval 244 | qr/\"max_version\":/ and qr /\"min_version\":/ 245 | --- no_error_log 246 | [error] 247 | 248 | 249 | 250 | === TEST 7: ApiVersions choose 251 | --- http_config eval: $::HttpConfig 252 | --- config 253 | location /t { 254 | content_by_lua ' 255 | 256 | local cjson = require "cjson" 257 | local request = require "resty.kafka.request" 258 | local client = require "resty.kafka.client" 259 | 260 | local broker_list = { 261 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 262 | } 263 | 264 | local messages = { 265 | "halo world", 266 | } 267 | 268 | local cli = client:new(broker_list) 269 | 270 | local brokers, partitions = cli:fetch_metadata("test") 271 | 272 | -- not input version range 273 | ngx.say(cli:choose_api_version(request.FetchRequest)) 274 | 275 | -- not exist api_key 276 | ngx.say(cli:choose_api_version(-1)) 277 | 278 | -- set max version to -1 to break version choose 279 | ngx.say(cli:choose_api_version(request.FetchRequest, 0, -1)) 280 | 281 | -- set lower max version to limit the API version 282 | ngx.say(cli:choose_api_version(request.FetchRequest, 0, 5)) 283 | 284 | -- set higher max version to use the highest API version supported by broker 285 | ngx.say(cli:choose_api_version(request.FetchRequest, 0, 9999)) 286 | '; 287 | } 288 | --- request 289 | GET /t 290 | --- response_body 291 | 11 292 | -1 293 | -1 294 | 5 295 | 11 296 | --- no_error_log 297 | [error] 298 | 299 | 300 | === TEST 8: fetch with resolving 301 | --- http_config eval: $::HttpConfig 302 | --- config 303 | location /t { 304 | content_by_lua ' 305 | local cjson = require "cjson" 306 | local client = require "resty.kafka.client" 307 | 308 | local count = 0 309 | local function resolver(host) 310 | count = count + 1 311 | return "$TEST_NGINX_KAFKA_HOST" 312 | end 313 | local broker_list = { 314 | { host = "toresolve", port = $TEST_NGINX_KAFKA_PORT }, 315 | } 316 | 317 | local cli = client:new(broker_list, { resolver = resolver }) 318 | local brokers, partitions = cli:fetch_metadata("test") 319 | ngx.say("result ", count) 320 | '; 321 | } 322 | --- request 323 | GET /t 324 | --- response_body_like 325 | .*result [1-9].* 326 | --- no_error_log 327 | [error] -------------------------------------------------------------------------------- /t/producer.t: -------------------------------------------------------------------------------- 1 | # vim:set ts=4 sw=4 et: 2 | 3 | use Test::Nginx::Socket::Lua; 4 | use Cwd qw(cwd); 5 | 6 | repeat_each(2); 7 | 8 | plan tests => repeat_each() * (3 * blocks()); 9 | 10 | my $pwd = cwd(); 11 | 12 | our $HttpConfig = qq{ 13 | lua_package_path "$pwd/lib/?.lua;;"; 14 | lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; 15 | }; 16 | 17 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 18 | $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; 19 | $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; 20 | $ENV{TEST_NGINX_KAFKA_SSL_PORT} = '9093'; 21 | $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; 22 | $ENV{TEST_NGINX_KAFKA_SASL_PORT} = '9094'; 23 | $ENV{TEST_NGINX_KAFKA_SASL_USER} = 'admin'; 24 | $ENV{TEST_NGINX_KAFKA_SASL_PWD} = 'admin-secret'; 25 | 26 | 27 | no_long_string(); 28 | #no_diff(); 29 | 30 | run_tests(); 31 | 32 | __DATA__ 33 | 34 | === TEST 1: simple send 35 | --- http_config eval: $::HttpConfig 36 | --- config 37 | location /t { 38 | content_by_lua ' 39 | 40 | local cjson = require "cjson" 41 | local producer = require "resty.kafka.producer" 42 | 43 | local broker_list = { 44 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 45 | } 46 | 47 | local message = "halo world" 48 | 49 | local p = producer:new(broker_list) 50 | 51 | local offset, err = p:send("test", nil, message) 52 | if not offset then 53 | ngx.say("send err:", err) 54 | return 55 | end 56 | 57 | ngx.say("offset: ", tostring(offset)) 58 | '; 59 | } 60 | --- request 61 | GET /t 62 | --- response_body_like 63 | .*offset.* 64 | --- no_error_log 65 | [error] 66 | 67 | 68 | 69 | === TEST 2: simple ssl send 70 | --- http_config eval: $::HttpConfig 71 | --- config 72 | location /t { 73 | content_by_lua ' 74 | 75 | local cjson = require "cjson" 76 | local producer = require "resty.kafka.producer" 77 | 78 | local broker_list = { 79 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_SSL_PORT }, 80 | } 81 | 82 | local message = "halo world" 83 | 84 | local p = producer:new(broker_list, { ssl = true }) 85 | 86 | local offset, err = p:send("test", nil, message) 87 | if not offset then 88 | ngx.say("send err:", err) 89 | return 90 | end 91 | 92 | ngx.say("offset: ", tostring(offset)) 93 | '; 94 | } 95 | --- request 96 | GET /t 97 | --- response_body_like 98 | .*offset.* 99 | --- no_error_log 100 | [error] 101 | 102 | 103 | 104 | === TEST 3: broker list has bad one 105 | --- http_config eval: $::HttpConfig 106 | --- config 107 | location /t { 108 | content_by_lua ' 109 | 110 | local cjson = require "cjson" 111 | local producer = require "resty.kafka.producer" 112 | 113 | local broker_list = { 114 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_ERR_PORT }, 115 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 116 | } 117 | 118 | local message = "halo world" 119 | 120 | local p, err = producer:new(broker_list) 121 | 122 | local offset, err = p:send("test", nil, message) 123 | if not offset then 124 | ngx.say("send err:", err) 125 | return 126 | end 127 | 128 | ngx.say("offset: ", tostring(offset)) 129 | '; 130 | } 131 | --- request 132 | GET /t 133 | --- response_body_like 134 | .*offset.* 135 | --- error_log: fetch_metadata 136 | 137 | 138 | 139 | === TEST 4: two send 140 | --- http_config eval: $::HttpConfig 141 | --- config 142 | location /t { 143 | content_by_lua ' 144 | 145 | local cjson = require "cjson" 146 | local producer = require "resty.kafka.producer" 147 | 148 | local broker_list = { 149 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 150 | } 151 | 152 | local key = "key" 153 | local message = "halo world" 154 | 155 | local p = producer:new(broker_list) 156 | 157 | local offset1, err = p:send("test", key, message) 158 | if not offset1 then 159 | ngx.say("send1 err:", err) 160 | return 161 | end 162 | 163 | local offset2, err = p:send("test", key, message) 164 | if not offset2 then 165 | ngx.say("send2 err:", err) 166 | return 167 | end 168 | 169 | ngx.say("offset diff: ", tonumber(offset2 - offset1)) 170 | '; 171 | } 172 | --- request 173 | GET /t 174 | --- response_body 175 | offset diff: 1 176 | --- no_error_log 177 | [error] 178 | 179 | 180 | 181 | === TEST 5: two topic send 182 | --- http_config eval: $::HttpConfig 183 | --- config 184 | location /t { 185 | content_by_lua ' 186 | 187 | local cjson = require "cjson" 188 | local producer = require "resty.kafka.producer" 189 | 190 | local broker_list = { 191 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 192 | } 193 | 194 | local key = "key" 195 | local message = "halo world" 196 | 197 | local p = producer:new(broker_list) 198 | 199 | local offset1, err = p:send("test", key, message) 200 | if not offset1 then 201 | ngx.say("send1 err:", err) 202 | return 203 | end 204 | 205 | local offset2, err = p:send("test2", key, message) 206 | if not offset2 then 207 | ngx.say("send2 err:", err) 208 | return 209 | end 210 | 211 | ngx.say("two topic successed!") 212 | '; 213 | } 214 | --- request 215 | GET /t 216 | --- response_body 217 | two topic successed! 218 | --- no_error_log 219 | [error] 220 | 221 | 222 | 223 | === TEST 6: kafka return error 224 | --- http_config eval: $::HttpConfig 225 | --- config 226 | location /t { 227 | content_by_lua ' 228 | 229 | local cjson = require "cjson" 230 | local producer = require "resty.kafka.producer" 231 | 232 | local broker_list = { 233 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 234 | } 235 | 236 | local message = "halo world" 237 | local p, err = producer:new(broker_list) 238 | 239 | local offset, err = p:send("test", "a", message) 240 | if not offset then 241 | ngx.say("send err:", err) 242 | return 243 | end 244 | 245 | -- XX: just hack for testing 246 | p.client.topic_partitions.test = { [2] = { id = 2, leader = 0 }, [1] = { id = 1, leader = 0 }, [0] = { id = 0, leader = 0 }, num = 3 } 247 | 248 | local offset2, err = p:send("test", "b", message) 249 | if not offset2 then 250 | ngx.say("send err:", err) 251 | return 252 | end 253 | ngx.say("offset: ", tostring(offset2 - offset)) 254 | '; 255 | } 256 | --- request 257 | GET /t 258 | --- response_body 259 | send err:not found partition 260 | --- no_error_log 261 | [error] 262 | 263 | 264 | 265 | === TEST 7: add a lot of messages 266 | --- http_config eval: $::HttpConfig 267 | --- config 268 | location /t { 269 | content_by_lua_block { 270 | 271 | local cjson = require "cjson" 272 | local producer = require "resty.kafka.producer" 273 | 274 | local broker_list = { 275 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 276 | } 277 | local topic = "test" 278 | local key = "key" 279 | local message = "halo world" 280 | local p, err = producer:new(broker_list, { producer_type = "async", flush_time = 100}) 281 | -- init offset 282 | p:send(topic, key, message) 283 | p:flush() 284 | local offset,_ = p:offset() 285 | local i = 0 286 | while i < 2000 do 287 | p:send(topic, key, message..tostring(i)) 288 | i = i + 1 289 | end 290 | ngx.sleep(0.2) 291 | local offset2, _ = p:offset() 292 | ngx.say("offset: ", tostring(offset2 - offset)) 293 | } 294 | } 295 | --- request 296 | GET /t 297 | --- response_body 298 | offset: 2000LL 299 | --- no_error_log 300 | [error] 301 | 302 | 303 | 304 | === TEST 8: sasl simple send 305 | --- http_config eval: $::HttpConfig 306 | --- config 307 | location /t { 308 | content_by_lua ' 309 | 310 | local cjson = require "cjson" 311 | local producer = require "resty.kafka.producer" 312 | 313 | local broker_list = { 314 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_SASL_PORT , 315 | sasl_config = { mechanism="PLAIN", user="$TEST_NGINX_KAFKA_SASL_USER", password = "$TEST_NGINX_KAFKA_SASL_PWD" },}, 316 | } 317 | 318 | local message = "halo world" 319 | 320 | local p = producer:new(broker_list) 321 | 322 | local offset, err = p:send("test", nil, message) 323 | if not offset then 324 | ngx.say("send err:", err) 325 | return 326 | end 327 | 328 | ngx.say("offset: ", tostring(offset)) 329 | '; 330 | } 331 | --- request 332 | GET /t 333 | --- response_body_like 334 | .*offset.* 335 | --- no_error_log 336 | [error] 337 | 338 | === TEST 9: sasl SCRAM-SHA-256 simple send 339 | --- http_config eval: $::HttpConfig 340 | --- config 341 | location /t { 342 | content_by_lua ' 343 | 344 | local cjson = require "cjson" 345 | local producer = require "resty.kafka.producer" 346 | 347 | local broker_list = { 348 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_SASL_PORT , 349 | sasl_config = { mechanism="SCRAM-SHA-256", user="$TEST_NGINX_KAFKA_SASL_USER", password = "$TEST_NGINX_KAFKA_SASL_PWD" },}, 350 | } 351 | 352 | local message = "halo world" 353 | 354 | local p = producer:new(broker_list) 355 | 356 | local offset, err = p:send("test", nil, message) 357 | if not offset then 358 | ngx.say("send err:", err) 359 | return 360 | end 361 | 362 | ngx.say("offset: ", tostring(offset)) 363 | '; 364 | } 365 | --- request 366 | GET /t 367 | --- response_body_like 368 | .*offset.* 369 | --- no_error_log 370 | [error] 371 | 372 | === TEST 10: sasl SCRAM-SHA-512 simple send 373 | --- http_config eval: $::HttpConfig 374 | --- config 375 | location /t { 376 | content_by_lua ' 377 | 378 | local cjson = require "cjson" 379 | local producer = require "resty.kafka.producer" 380 | 381 | local broker_list = { 382 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_SASL_PORT , 383 | sasl_config = { mechanism="SCRAM-SHA-512", user="$TEST_NGINX_KAFKA_SASL_USER", password = "$TEST_NGINX_KAFKA_SASL_PWD" },}, 384 | } 385 | 386 | local message = "halo world" 387 | 388 | local p = producer:new(broker_list) 389 | 390 | local offset, err = p:send("test", nil, message) 391 | if not offset then 392 | ngx.say("send err:", err) 393 | return 394 | end 395 | 396 | ngx.say("offset: ", tostring(offset)) 397 | '; 398 | } 399 | --- request 400 | GET /t 401 | --- response_body_like 402 | .*offset.* 403 | --- no_error_log 404 | [error] 405 | -------------------------------------------------------------------------------- /t/request.t: -------------------------------------------------------------------------------- 1 | # vim:set ts=4 sw=4 et: 2 | 3 | use Test::Nginx::Socket::Lua; 4 | use Cwd qw(cwd); 5 | 6 | repeat_each(2); 7 | 8 | plan tests => repeat_each() * (3 * blocks()); 9 | 10 | my $pwd = cwd(); 11 | 12 | our $HttpConfig = qq{ 13 | lua_package_path "$pwd/lib/?.lua;;"; 14 | lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; 15 | }; 16 | 17 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 18 | $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; 19 | $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; 20 | $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; 21 | 22 | no_long_string(); 23 | #no_diff(); 24 | 25 | run_tests(); 26 | 27 | __DATA__ 28 | 29 | === TEST 1: simple pack 30 | --- http_config eval: $::HttpConfig 31 | --- config 32 | location /t { 33 | content_by_lua ' 34 | 35 | local request = require "resty.kafka.request" 36 | local req = request:new(request.ProduceRequest, 1, "clientid") 37 | 38 | local function printx() 39 | local str = req._req[#req._req] 40 | for i = 1, #str do 41 | ngx.print(bit.tohex(string.byte(str, i), 2)) 42 | end 43 | ngx.say("") 44 | end 45 | 46 | req:int16(-1 * math.pow(2, 15)); printx() 47 | req:int16(math.pow(2, 15) - 1); printx() 48 | req:int16(-1); printx() 49 | req:int32(-1 * math.pow(2, 31)); printx() 50 | req:int32(math.pow(2, 31) - 1); printx() 51 | req:int64(-1LL * math.pow(2, 32) * math.pow(2, 31)); printx() 52 | req:int64(1ULL * math.pow(2, 32) * math.pow(2, 31) - 1); printx() 53 | '; 54 | } 55 | --- request 56 | GET /t 57 | --- response_body 58 | 8000 59 | 7fff 60 | ffff 61 | 80000000 62 | 7fffffff 63 | 8000000000000000 64 | 7fffffffffffffff 65 | --- no_error_log 66 | [error] 67 | 68 | 69 | 70 | === TEST 2: response unpack 71 | --- http_config eval: $::HttpConfig 72 | --- config 73 | location /t { 74 | content_by_lua ' 75 | 76 | local request = require "resty.kafka.request" 77 | local response = require "resty.kafka.response" 78 | 79 | local function compare(func, number) 80 | local req = request:new(request.ProduceRequest, 1, "clientid") 81 | req:int32(100) 82 | local correlation_id = req._req[#req._req] 83 | 84 | req[func](req, number) 85 | local str = correlation_id .. req._req[#req._req] 86 | 87 | local resp = response:new(str, req.api_version) 88 | 89 | local cnumber = resp[func](resp) 90 | 91 | ngx.say(func, ": ", tostring(number), ", ", number == cnumber) 92 | end 93 | 94 | compare("int16", 0x7fff) 95 | compare("int16", 0x7fff * -1 - 1) 96 | compare("int32", 0x7fffffff) 97 | compare("int32", 0x7fffffff * -1 - 1) 98 | compare("int64", 1ULL * math.pow(2, 32) * math.pow(2, 31) - 1) 99 | compare("int64", -1LL * math.pow(2, 32) * math.pow(2, 31)) 100 | '; 101 | } 102 | --- request 103 | GET /t 104 | --- response_body 105 | int16: 32767, true 106 | int16: -32768, true 107 | int32: 2147483647, true 108 | int32: -2147483648, true 109 | int64: 9223372036854775807ULL, true 110 | int64: -9223372036854775808LL, true 111 | --- no_error_log 112 | [error] 113 | -------------------------------------------------------------------------------- /t/ringbuffer.t: -------------------------------------------------------------------------------- 1 | # vim:set ts=4 sw=4 et: 2 | 3 | use Test::Nginx::Socket::Lua; 4 | use Cwd qw(cwd); 5 | 6 | repeat_each(2); 7 | 8 | plan tests => repeat_each() * (3 * blocks()); 9 | 10 | my $pwd = cwd(); 11 | 12 | our $HttpConfig = qq{ 13 | lua_package_path "$pwd/lib/?.lua;;"; 14 | lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; 15 | }; 16 | 17 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 18 | $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; 19 | $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; 20 | $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; 21 | 22 | no_long_string(); 23 | #no_diff(); 24 | 25 | run_tests(); 26 | 27 | __DATA__ 28 | 29 | === TEST 1: add 30 | --- http_config eval: $::HttpConfig 31 | --- config 32 | location /t { 33 | content_by_lua ' 34 | local ringbuffer = require "resty.kafka.ringbuffer" 35 | local buffer = ringbuffer:new(2, 3) 36 | 37 | local topic = "test" 38 | local key = "key" 39 | local message = "halo world" 40 | 41 | local ok, err = buffer:add(topic, key, message) 42 | ngx.say("add ok:", ok, "; batch:", buffer:need_send()) 43 | 44 | local ok, err = buffer:add(topic, key, message) 45 | ngx.say("add ok:", ok, "; batch:", buffer:need_send()) 46 | 47 | local ok, err = buffer:add(topic, key, message) 48 | local ok, err = buffer:add(topic, key, message) 49 | if not ok then 50 | ngx.say("add err:", err) 51 | return 52 | end 53 | ngx.say("add ok:", ok, "; batch:", buffer:need_send()) 54 | '; 55 | } 56 | --- request 57 | GET /t 58 | --- response_body 59 | add ok:true; batch:false 60 | add ok:true; batch:true 61 | add err:buffer overflow 62 | --- no_error_log 63 | [error] 64 | 65 | 66 | 67 | === TEST 2: pop 68 | --- http_config eval: $::HttpConfig 69 | --- config 70 | location /t { 71 | content_by_lua ' 72 | local ringbuffer = require "resty.kafka.ringbuffer" 73 | local buffer = ringbuffer:new(2, 3) 74 | 75 | for i = 1, 2 do 76 | buffer:add("topic1", "key1", "message1") 77 | buffer:add("topic2", "key2", "message2") 78 | 79 | local topic, key, message = buffer:pop() 80 | ngx.say(topic, key, message) 81 | 82 | local topic, key, message = buffer:pop() 83 | ngx.say(topic, key, message) 84 | end 85 | 86 | local topic, key, message = buffer:pop() 87 | ngx.say(topic) 88 | '; 89 | } 90 | --- request 91 | GET /t 92 | --- response_body 93 | topic1key1message1 94 | topic2key2message2 95 | topic1key1message1 96 | topic2key2message2 97 | nil 98 | --- no_error_log 99 | [error] 100 | 101 | 102 | 103 | === TEST 3: left_num 104 | --- http_config eval: $::HttpConfig 105 | --- config 106 | location /t { 107 | content_by_lua ' 108 | local ringbuffer = require "resty.kafka.ringbuffer" 109 | local buffer = ringbuffer:new(2, 3) 110 | 111 | buffer:add("topic1", "key1", "message1") 112 | buffer:add("topic2", "key2", "message2") 113 | buffer:add("topic2", "key2", "message2") 114 | 115 | local topic, key, message = buffer:pop() 116 | buffer:add("topic2", "key2", "message2") 117 | 118 | local num = buffer:left_num() 119 | ngx.say("num:", num) 120 | '; 121 | } 122 | --- request 123 | GET /t 124 | --- response_body 125 | num:3 126 | --- no_error_log 127 | [error] 128 | 129 | 130 | 131 | === TEST 4: wait buffer full 132 | --- http_config eval: $::HttpConfig 133 | --- config 134 | location /t { 135 | content_by_lua ' 136 | local ringbuffer = require "resty.kafka.ringbuffer" 137 | local buffer = ringbuffer:new(1, 2, true, 2) 138 | 139 | local function handler() 140 | ngx.sleep(1) 141 | local topic, key, message = buffer:pop() 142 | ngx.say(topic, key, message) 143 | end 144 | 145 | buffer:add("topic1", "key1", "message1") 146 | buffer:add("topic2", "key2", "message2") 147 | 148 | ngx.thread.spawn(handler) 149 | 150 | start = ngx.now() 151 | local ok, err = buffer:add("topic3", "key3", "message3") 152 | if not ok then 153 | ngx.say("add err:", err) 154 | return 155 | end 156 | assert((ngx.now()-start) >= 1) 157 | 158 | for i = 1, 2 do 159 | local topic, key, message = buffer:pop() 160 | ngx.say(topic, key, message) 161 | end 162 | '; 163 | } 164 | --- request 165 | GET /t 166 | --- response_body 167 | topic1key1message1 168 | topic2key2message2 169 | topic3key3message3 170 | --- no_error_log 171 | [error] 172 | 173 | 174 | 175 | === TEST 5: wait buffer full with timeout 176 | --- http_config eval: $::HttpConfig 177 | --- config 178 | location /t { 179 | content_by_lua ' 180 | local ringbuffer = require "resty.kafka.ringbuffer" 181 | local buffer = ringbuffer:new(1, 1, true, 2) 182 | 183 | local function handler() 184 | ngx.sleep(2.1) 185 | local topic, key, message = buffer:pop() 186 | ngx.say(topic, key, message) 187 | end 188 | 189 | buffer:add("topic1", "key1", "message1") 190 | local co = ngx.thread.spawn(handler) 191 | 192 | start = ngx.now() 193 | local ok, err = buffer:add("topic3", "key3", "message3") 194 | if not ok then 195 | ngx.say("add err:", err) 196 | end 197 | assert((ngx.now()-start) >= 2) 198 | 199 | ngx.thread.wait(co) 200 | 201 | local topic, key, message = buffer:pop() 202 | ngx.say(topic) 203 | '; 204 | } 205 | --- request 206 | GET /t 207 | --- response_body 208 | add err:buffer overflow timeout 209 | topic1key1message1 210 | nil 211 | --- no_error_log 212 | [error] 213 | 214 | 215 | 216 | === TEST 6: wait buffer full with depth 217 | --- http_config eval: $::HttpConfig 218 | --- config 219 | location /t { 220 | content_by_lua ' 221 | local ringbuffer = require "resty.kafka.ringbuffer" 222 | local buffer = ringbuffer:new(1, 2, true, 2) 223 | 224 | buffer:add("topic1", "key1", "message1") 225 | buffer:add("topic2", "key2", "message2") 226 | 227 | local ok, err = buffer:add("topic3", "key3", "message3", nil, 11) 228 | if not ok then 229 | ngx.say("add err:", err) 230 | end 231 | 232 | for i = 1, 2 do 233 | local topic, key, message = buffer:pop() 234 | ngx.say(topic, key, message) 235 | end 236 | 237 | local topic, key, message = buffer:pop() 238 | ngx.say(topic) 239 | '; 240 | } 241 | --- request 242 | GET /t 243 | --- response_body 244 | add err:buffer overflow and over max depth 245 | topic1key1message1 246 | topic2key2message2 247 | nil 248 | --- no_error_log 249 | [error] 250 | -------------------------------------------------------------------------------- /t/sendbuffer.t: -------------------------------------------------------------------------------- 1 | # vim:set ts=4 sw=4 et: 2 | 3 | use Test::Nginx::Socket::Lua; 4 | use Cwd qw(cwd); 5 | 6 | repeat_each(2); 7 | 8 | plan tests => repeat_each() * (3 * blocks()); 9 | 10 | my $pwd = cwd(); 11 | 12 | our $HttpConfig = qq{ 13 | lua_package_path "$pwd/lib/?.lua;;"; 14 | lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;"; 15 | }; 16 | 17 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 18 | $ENV{TEST_NGINX_KAFKA_HOST} = '127.0.0.1'; 19 | $ENV{TEST_NGINX_KAFKA_PORT} = '9092'; 20 | $ENV{TEST_NGINX_KAFKA_ERR_PORT} = '9091'; 21 | 22 | no_long_string(); 23 | #no_diff(); 24 | 25 | run_tests(); 26 | 27 | __DATA__ 28 | 29 | === TEST 1: add 30 | --- http_config eval: $::HttpConfig 31 | --- config 32 | location /t { 33 | content_by_lua ' 34 | local sendbuffer = require "resty.kafka.sendbuffer" 35 | local buffer = sendbuffer:new(2, 20) 36 | 37 | local topic = "test" 38 | local partition_id = 1 39 | local key = "key" 40 | local message = "halo world" 41 | 42 | local overflow = buffer:add(topic, partition_id, key, message) 43 | ngx.say("overflow:", overflow) 44 | 45 | local overflow = buffer:add(topic, partition_id, key, message) 46 | ngx.say("overflow:", overflow) 47 | '; 48 | } 49 | --- request 50 | GET /t 51 | --- response_body 52 | overflow:nil 53 | overflow:true 54 | --- no_error_log 55 | [error] 56 | 57 | 58 | 59 | === TEST 2: offset 60 | --- http_config eval: $::HttpConfig 61 | --- config 62 | location /t { 63 | content_by_lua ' 64 | local sendbuffer = require "resty.kafka.sendbuffer" 65 | local buffer = sendbuffer:new(2, 20) 66 | 67 | local topic = "test" 68 | local partition_id = 1 69 | local key = "key" 70 | local message = "halo world" 71 | 72 | local overflow = buffer:add(topic, partition_id, key, message) 73 | ngx.say("overflow:", overflow) 74 | 75 | local offset = buffer:offset(topic, partition_id) 76 | ngx.say("offset:", offset) 77 | 78 | local offset = buffer:offset(topic, partition_id, 100) 79 | 80 | local offset = buffer:offset(topic, partition_id) 81 | ngx.say("offset:", offset) 82 | '; 83 | } 84 | --- request 85 | GET /t 86 | --- response_body 87 | overflow:nil 88 | offset:0 89 | offset:101 90 | --- no_error_log 91 | [error] 92 | 93 | 94 | 95 | === TEST 3: clear 96 | --- http_config eval: $::HttpConfig 97 | --- config 98 | location /t { 99 | content_by_lua ' 100 | local sendbuffer = require "resty.kafka.sendbuffer" 101 | local buffer = sendbuffer:new(2, 20) 102 | 103 | local topic = "test" 104 | local partition_id = 1 105 | local key = "key" 106 | local message = "halo world" 107 | 108 | local overflow = buffer:add(topic, partition_id, key, message) 109 | ngx.say("overflow:", overflow) 110 | 111 | ngx.say("used:", buffer.topics[topic][partition_id].used) 112 | 113 | ngx.say("queue_num:", buffer.queue_num) 114 | 115 | buffer:clear(topic, partition_id) 116 | 117 | ngx.say("done:", buffer:done()) 118 | 119 | ngx.say("queue_num:", buffer.queue_num) 120 | 121 | for i = 1, 10000 do 122 | buffer:clear(topic, partition_id) 123 | end 124 | 125 | ngx.say("used:", buffer.topics[topic][partition_id].used) 126 | '; 127 | } 128 | --- request 129 | GET /t 130 | --- response_body 131 | overflow:nil 132 | used:0 133 | queue_num:1 134 | done:true 135 | queue_num:0 136 | used:1 137 | --- no_error_log 138 | [error] 139 | 140 | 141 | 142 | === TEST 4: loop 143 | --- http_config eval: $::HttpConfig 144 | --- config 145 | location /t { 146 | content_by_lua ' 147 | local sendbuffer = require "resty.kafka.sendbuffer" 148 | local buffer = sendbuffer:new(2, 20) 149 | 150 | local topic = "test" 151 | local partition_id = 1 152 | local key = "key" 153 | local message = "halo world" 154 | 155 | local overflow = buffer:add(topic, partition_id, key, message) 156 | local overflow = buffer:add("test2", partition_id, key, message) 157 | 158 | for t, p in buffer:loop() do 159 | ngx.say("topic:", t, "; partition_id:", p) 160 | end 161 | '; 162 | } 163 | --- request 164 | GET /t 165 | --- response_body eval 166 | qr/topic:test(.*); partition_id:1 167 | topic:test(.*); partition_id:1/ 168 | --- no_error_log 169 | [error] 170 | 171 | 172 | 173 | === TEST 5: aggregator 174 | --- http_config eval: $::HttpConfig 175 | --- config 176 | location /t { 177 | content_by_lua ' 178 | local sendbuffer = require "resty.kafka.sendbuffer" 179 | local client = require "resty.kafka.client" 180 | 181 | local broker_list = { 182 | { host = "$TEST_NGINX_KAFKA_HOST", port = $TEST_NGINX_KAFKA_PORT }, 183 | } 184 | 185 | local cli = client:new(broker_list) 186 | 187 | local buffer = sendbuffer:new(2, 20) 188 | 189 | local topic = "test" 190 | local partition_id = 1 191 | local key = "key" 192 | local message = "halo world" 193 | 194 | cli:fetch_metadata(topic) 195 | cli:fetch_metadata("test2") 196 | cli:fetch_metadata("test3") 197 | cli:fetch_metadata("test4") 198 | cli:fetch_metadata("test5") 199 | 200 | local overflow = buffer:add(topic, partition_id, key, message) 201 | local overflow = buffer:add("test2", partition_id, key, message) 202 | local overflow = buffer:add("test3", partition_id, key, message) 203 | local overflow = buffer:add("test4", partition_id, key, message) 204 | local overflow = buffer:add("test5", partition_id, key, message) 205 | 206 | local num, sendbroker = buffer:aggregator(cli) 207 | ngx.say("num:", num/2) 208 | 209 | buffer:err("test5", partition_id, "timeout", false) 210 | buffer:err("test4", partition_id, "timeout", false) 211 | 212 | local num, sendbroker = buffer:aggregator(cli) 213 | ngx.say("num:", num/2) 214 | 215 | buffer:clear("test3", partition_id) 216 | buffer:clear("test2", partition_id) 217 | 218 | local num, sendbroker = buffer:aggregator(cli) 219 | ngx.say("num:", num/2) 220 | 221 | for t, p in buffer:loop() do 222 | ngx.say("topic:", t, "; partition_id:", p) 223 | end 224 | '; 225 | } 226 | --- request 227 | GET /t 228 | --- response_body eval 229 | qr/num:3 230 | num:2 231 | num:1 232 | topic:test(.*); partition_id:1 233 | topic:test(.*); partition_id:1 234 | topic:test(.*); partition_id:1/ 235 | --- no_error_log 236 | [error] 237 | --------------------------------------------------------------------------------