├── .appveyor.yml ├── .appveyor ├── build.ps1 ├── install.ps1 └── package.ps1 ├── .editorconfig ├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE └── workflows │ ├── test.yml │ └── test │ ├── build-librdkafka.sh │ ├── build-php-simple-kafka-client.sh │ ├── build-php.sh │ ├── start-kafka.sh │ └── tests.sh ├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── config.m4 ├── config.w32 ├── configuration.c ├── configuration.stub.php ├── configuration_arginfo.h ├── consumer.c ├── consumer.stub.php ├── consumer_arginfo.h ├── functions.c ├── functions.stub.php ├── functions_arginfo.h ├── kafka_exception.c ├── kafka_exception.stub.php ├── kafka_exception_arginfo.h ├── message.c ├── message.stub.php ├── message_arginfo.h ├── metadata.c ├── metadata.stub.php ├── metadata_arginfo.h ├── metadata_broker.c ├── metadata_broker.stub.php ├── metadata_broker_arginfo.h ├── metadata_collection.c ├── metadata_collection.stub.php ├── metadata_collection_arginfo.h ├── metadata_partition.c ├── metadata_partition.stub.php ├── metadata_partition_arginfo.h ├── metadata_topic.c ├── metadata_topic.stub.php ├── metadata_topic_arginfo.h ├── package.xml ├── php_simple_kafka_client_int.h ├── producer.c ├── producer.stub.php ├── producer_arginfo.h ├── simple_kafka_client.c ├── simple_kafka_client.stub.php ├── simple_kafka_client_arginfo.h ├── tests ├── allow_null_payload.phpt ├── allow_null_payload_and_key.phpt ├── conf.phpt ├── conf_callbacks_integration.phpt ├── conf_extend.phpt ├── constants.phpt ├── consumer_topic_destruct.phpt ├── functions.phpt ├── init_transaction_not_configured.phpt ├── integration-tests-check.php ├── kafka_error_exception.phpt ├── message_headers.phpt ├── metadata.phpt ├── oauthbearer_cb.phpt ├── offsets_for_times.phpt ├── produce_consume.phpt ├── produce_consume_transactional.phpt ├── query_watermark_offsets.phpt ├── rd_kafka_get_err_descs.phpt ├── set_oauthbearer_failure.phpt ├── set_oauthbearer_token.phpt ├── test_env.php.sample └── topic_partition.phpt ├── topic.c ├── topic.stub.php ├── topic_arginfo.h ├── topic_partition.c ├── topic_partition.stub.php └── topic_partition_arginfo.h /.appveyor.yml: -------------------------------------------------------------------------------- 1 | # general configuration 2 | version: '{branch}.{build}' 3 | 4 | # environment configuration 5 | image: Visual Studio 2017 6 | clone_folder: C:\projects\php_simple_kafka_client 7 | environment: 8 | BIN_SDK_VER: 2.2.0 9 | DEP: librdkafka-1.6.2 10 | matrix: 11 | - PHP_VER: 7.4 12 | TS: 0 13 | VC: vc15 14 | ARCH: x64 15 | OPCACHE: 0 16 | - PHP_VER: 7.4 17 | TS: 1 18 | VC: vc15 19 | ARCH: x64 20 | OPCACHE: 1 21 | - PHP_VER: 8.0 22 | TS: 0 23 | VC: vs16 24 | ARCH: x64 25 | OPCACHE: 0 26 | APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 27 | - PHP_VER: 8.0 28 | TS: 1 29 | VC: vs16 30 | ARCH: x64 31 | OPCACHE: 1 32 | APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 33 | - PHP_VER: 8.1 34 | TS: 0 35 | VC: vs16 36 | ARCH: x64 37 | OPCACHE: 0 38 | APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 39 | - PHP_VER: 8.1 40 | TS: 1 41 | VC: vs16 42 | ARCH: x64 43 | OPCACHE: 1 44 | APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 45 | - PHP_VER: 8.2 46 | TS: 0 47 | VC: vs16 48 | ARCH: x64 49 | OPCACHE: 0 50 | APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 51 | - PHP_VER: 8.2 52 | TS: 1 53 | VC: vs16 54 | ARCH: x64 55 | OPCACHE: 1 56 | APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 57 | cache: 58 | - C:\build-cache -> .appveyor.yml, .appveyor\install.ps1 59 | install: 60 | - ps: .appveyor\install.ps1 61 | 62 | # build configuration 63 | build_script: 64 | - ps: .appveyor\build.ps1 65 | 66 | after_build: 67 | - ps: .appveyor\package.ps1 68 | -------------------------------------------------------------------------------- /.appveyor/build.ps1: -------------------------------------------------------------------------------- 1 | $ErrorActionPreference = "Stop" 2 | 3 | Set-Location 'C:\projects\php_simple_kafka_client' 4 | 5 | $task = New-Item 'task.bat' -Force 6 | Add-Content $task "call phpize 2>&1" 7 | Add-Content $task "call configure --with-php-build=C:\build-cache\deps --with-simple-kafka-client --enable-debug-pack 2>&1" 8 | Add-Content $task "nmake /nologo 2>&1" 9 | Add-Content $task "exit %errorlevel%" 10 | & "C:\build-cache\php-sdk-$env:BIN_SDK_VER\phpsdk-$env:VC-$env:ARCH.bat" -t $task 11 | if (-not $?) { 12 | throw "build failed with errorlevel $LastExitCode" 13 | } 14 | -------------------------------------------------------------------------------- /.appveyor/install.ps1: -------------------------------------------------------------------------------- 1 | $ErrorActionPreference = "Stop" 2 | 3 | if (-not (Test-Path 'C:\build-cache')) { 4 | [void](New-Item 'C:\build-cache' -ItemType 'directory') 5 | } 6 | 7 | $bname = "php-sdk-$env:BIN_SDK_VER.zip" 8 | if (-not (Test-Path "C:\build-cache\$bname")) { 9 | Invoke-WebRequest "https://github.com/Microsoft/php-sdk-binary-tools/archive/$bname" -OutFile "C:\build-cache\$bname" 10 | } 11 | $dname0 = "php-sdk-binary-tools-php-sdk-$env:BIN_SDK_VER" 12 | $dname1 = "php-sdk-$env:BIN_SDK_VER" 13 | if (-not (Test-Path "C:\build-cache\$dname1")) { 14 | Expand-Archive "C:\build-cache\$bname" 'C:\build-cache' 15 | Move-Item "C:\build-cache\$dname0" "C:\build-cache\$dname1" 16 | } 17 | 18 | $gareleases = Invoke-WebRequest "https://windows.php.net/downloads/releases/releases.json" | ConvertFrom-Json 19 | $qareleases = Invoke-WebRequest "https://windows.php.net/downloads/qa/releases.json" | ConvertFrom-Json 20 | $garev = [regex]::split($gareleases.$env:PHP_VER.version, '[^\d]')[2] 21 | $qarev = [regex]::split($qareleases.$env:PHP_VER.version, '[^\d]')[2] 22 | if ($qarev -gt $garev) { 23 | $phpversion = $qareleases.$env:PHP_VER.version 24 | $phprelease = 'QA' 25 | } else { 26 | $phpversion = $gareleases.$env:PHP_VER.version 27 | $phprelease = 'GA' 28 | } 29 | 30 | $ts_part = '' 31 | if ($env:TS -eq '0') { 32 | $ts_part += '-nts' 33 | } 34 | $bname = "php-devel-pack-$phpversion$ts_part-Win32-$env:VC-$env:ARCH.zip" 35 | if (-not (Test-Path "C:\build-cache\$bname")) { 36 | if ($phprelease -eq "GA") { 37 | Invoke-WebRequest "https://windows.php.net/downloads/releases/$bname" -OutFile "C:\build-cache\$bname" 38 | } else { 39 | Invoke-WebRequest "https://windows.php.net/downloads/qa/$bname" -OutFile "C:\build-cache\$bname" 40 | } 41 | } 42 | $dname0 = "php-$phpversion-devel-$env:VC-$env:ARCH" 43 | $dname1 = "php-$phpversion$ts_part-devel-$env:VC-$env:ARCH" 44 | if (-not (Test-Path "C:\build-cache\$dname1")) { 45 | Expand-Archive "C:\build-cache\$bname" 'C:\build-cache' 46 | if ($dname0 -ne $dname1) { 47 | Move-Item "C:\build-cache\$dname0" "C:\build-cache\$dname1" 48 | } 49 | } 50 | $env:PATH = "C:\build-cache\$dname1;$env:PATH" 51 | 52 | $bname = "php-$phpversion$ts_part-Win32-$env:VC-$env:ARCH.zip" 53 | if (-not (Test-Path "C:\build-cache\$bname")) { 54 | if ($phprelease -eq "GA") { 55 | Invoke-WebRequest "https://windows.php.net/downloads/releases/$bname" -OutFile "C:\build-cache\$bname" 56 | } else { 57 | Invoke-WebRequest "https://windows.php.net/downloads/qa/$bname" -OutFile "C:\build-cache\$bname" 58 | } 59 | } 60 | $dname = "php-$phpversion$ts_part-$env:VC-$env:ARCH" 61 | if (-not (Test-Path "C:\build-cache\$dname")) { 62 | Expand-Archive "C:\build-cache\$bname" "C:\build-cache\$dname" 63 | } 64 | $env:PATH = "c:\build-cache\$dname;$env:PATH" 65 | 66 | $bname = "$env:DEP-$env:VC-$env:ARCH.zip" 67 | if (-not (Test-Path "C:\build-cache\$bname")) { 68 | Invoke-WebRequest "http://windows.php.net/downloads/pecl/deps/$bname" -OutFile "C:\build-cache\$bname" 69 | Expand-Archive "C:\build-cache\$bname" 'C:\build-cache\deps' 70 | Copy-Item "C:\build-cache\deps\LICENSE" "C:\build-cache\deps\LICENSE.LIBRDKAFKA" 71 | } 72 | -------------------------------------------------------------------------------- /.appveyor/package.ps1: -------------------------------------------------------------------------------- 1 | $ErrorActionPreference = "Stop" 2 | 3 | if ($env:TS -eq '0') { 4 | $ts_part = 'nts' 5 | } else { 6 | $ts_part = 'ts'; 7 | } 8 | 9 | if ($env:APPVEYOR_REPO_TAG -eq "true") { 10 | $bname = "php_simple_kafka_client-$env:APPVEYOR_REPO_TAG_NAME-$env:PHP_VER-$ts_part-$env:VC-$env:ARCH" 11 | } else { 12 | $bname = "php_simple_kafka_client-$($env:APPVEYOR_REPO_COMMIT.substring(0, 8))-$env:PHP_VER-$ts_part-$env:VC-$env:ARCH" 13 | } 14 | $zip_bname = "$bname.zip" 15 | 16 | $dir = 'C:\projects\php_simple_kafka_client\'; 17 | if ($env:ARCH -eq 'x64') { 18 | $dir += 'x64\' 19 | } 20 | $dir += 'Release' 21 | if ($env:TS -eq '1') { 22 | $dir += '_TS' 23 | } 24 | 25 | $files = @( 26 | "$dir\php_simple_kafka_client.dll", 27 | "$dir\php_simple_kafka_client.pdb", 28 | "C:\projects\php_simple_kafka_client\LICENSE", 29 | "C:\projects\php_simple_kafka_client\README.md", 30 | "C:\build-cache\deps\bin\librdkafka.dll", 31 | "C:\build-cache\deps\LICENSE.LIBRDKAFKA" 32 | ) 33 | Compress-Archive $files "C:\$zip_bname" 34 | Push-AppveyorArtifact "C:\$zip_bname" 35 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | insert_final_newline = true 5 | 6 | [*.{c,h}] 7 | indent_style = space 8 | indent_size = 4 9 | 10 | [*.md] 11 | trim_trailing_whitespace = false 12 | 13 | [*.phpt] 14 | trim_trailing_whitespace = true 15 | indent_style = space 16 | indent_size = 4 17 | 18 | [package.xml] 19 | indent_style = space 20 | indent_size = 1 21 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [nick-zh] 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE: -------------------------------------------------------------------------------- 1 | 7 | 8 | * PHP version: 9 | * librdkafka version: 10 | * simple-kafka-client version: 11 | * kafka version: 12 | 13 | Add debug logs, by adjusting your configuration: 14 | ```php 15 | set('log_level', (string) LOG_DEBUG); 19 | $conf->set('debug', 'all'); 20 | ``` 21 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: 'Tests' 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | jobs: 10 | tests: 11 | name: 'Tests' 12 | strategy: 13 | matrix: 14 | include: 15 | - php: '8.2.5' 16 | librdkafka: 'v1.8.2' 17 | memcheck: '1' 18 | - php: '8.2.5' 19 | librdkafka: 'v1.8.2' 20 | - php: '8.1.18' 21 | librdkafka: 'v1.8.2' 22 | - php: '8.0.28' 23 | librdkafka: 'v1.8.2' 24 | - php: '7.4.33' 25 | librdkafka: 'v1.8.2' 26 | - php: '8.2.5' 27 | librdkafka: 'v1.7.0' 28 | - php: '8.1.18' 29 | librdkafka: 'v1.7.0' 30 | - php: '8.0.28' 31 | librdkafka: 'v1.7.0' 32 | - php: '7.4.33' 33 | librdkafka: 'v1.7.0' 34 | - php: '8.2.5' 35 | librdkafka: 'v1.6.2' 36 | - php: '8.1.18' 37 | librdkafka: 'v1.6.2' 38 | - php: '8.0.28' 39 | librdkafka: 'v1.6.2' 40 | - php: '7.4.33' 41 | librdkafka: 'v1.6.2' 42 | - php: '8.2.5' 43 | librdkafka: 'master' 44 | experimental: true 45 | - php: '8.1.0' 46 | librdkafka: 'master' 47 | experimental: true 48 | - php: '8.0.28' 49 | librdkafka: 'master' 50 | experimental: true 51 | - php: '7.4.33' 52 | librdkafka: 'master' 53 | experimental: true 54 | 55 | runs-on: 'ubuntu-20.04' 56 | continue-on-error: ${{ !!matrix.experimental }} 57 | env: 58 | PHP_VERSION: ${{ matrix.php }} 59 | LIBRDKAFKA_VERSION: ${{ matrix.librdkafka }} 60 | MEMORY_CHECK: ${{ matrix.memcheck }} 61 | TEST_KAFKA_BROKERS: kafka:9092 62 | TEST_KAFKA_BROKER_VERSION: 2.6 63 | steps: 64 | - name: 'Check out repository' 65 | uses: 'actions/checkout@v2' 66 | with: 67 | path: 'php-simple-kafka-client' 68 | 69 | - uses: actions/cache@v2 70 | with: 71 | path: ~/build-cache/php 72 | key: ${{ runner.os }}-${{ matrix.php }}-${{ matrix.memcheck }} 73 | 74 | - uses: actions/cache@v2 75 | with: 76 | path: ~/build-cache/librdkafka 77 | key: ${{ runner.os }}-${{ matrix.librdkafka }} 78 | 79 | - name: 'Build librdkafka' 80 | run: './php-simple-kafka-client/.github/workflows/test/build-librdkafka.sh' 81 | 82 | - name: 'Build PHP' 83 | run: './php-simple-kafka-client/.github/workflows/test/build-php.sh' 84 | 85 | - name: 'Build php-simple-kafka-client' 86 | run: './php-simple-kafka-client/.github/workflows/test/build-php-simple-kafka-client.sh' 87 | 88 | - name: 'Start Kafka' 89 | run: './php-simple-kafka-client/.github/workflows/test/start-kafka.sh' 90 | 91 | - name: 'Run tests' 92 | run: './php-simple-kafka-client/.github/workflows/test/tests.sh' 93 | -------------------------------------------------------------------------------- /.github/workflows/test/build-librdkafka.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -ex 4 | 5 | if ! [ -f ~/build-cache/librdkafka/usr/local/include/librdkafka/rdkafka.h ] || ! [ -f ~/build-cache/librdkafka/usr/local/bin/kcat ]; then 6 | echo "librdkafka build is not cached" 7 | 8 | git clone --depth 1 --branch "${LIBRDKAFKA_VERSION:-1.6.0}" "${LIBRDKAFKA_REPOSITORY_URL:-https://github.com/edenhill/librdkafka.git}" 9 | 10 | cd librdkafka 11 | ./configure 12 | make 13 | mkdir -p ~/build-cache/librdkafka 14 | sudo make install DESTDIR=$HOME/build-cache/librdkafka 15 | test -f ~/build-cache/librdkafka/usr/local/include/librdkafka/rdkafka.h || echo "librdkafka build failed" 16 | 17 | sudo rsync -a ~/build-cache/librdkafka/ / 18 | sudo ldconfig 19 | cd .. 20 | 21 | git clone --depth 1 --branch "1.7.0" "${LIBRDKAFKA_REPOSITORY_URL:-https://github.com/edenhill/kcat.git}" 22 | 23 | cd kcat 24 | ./configure 25 | make 26 | sudo make install DESTDIR=$HOME/build-cache/librdkafka 27 | 28 | else 29 | echo "librdkafka build is cached" 30 | fi 31 | 32 | sudo rsync -av ~/build-cache/librdkafka/ / 33 | sudo ldconfig 34 | -------------------------------------------------------------------------------- /.github/workflows/test/build-php-simple-kafka-client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | echo "Building php-simple-kafka-client with PHP version:" 6 | php --version 7 | 8 | if [ $MEMORY_CHECK -eq 1 ]; then 9 | PHP_SIMPLE_KAFKA_CLIENT_CFLAGS="-Wall -Werror -Wno-deprecated-declarations" 10 | fi 11 | 12 | cd php-simple-kafka-client 13 | phpize 14 | CFLAGS="$PHP_SIMPLE_KAFKA_CLIENT_CFLAGS" ./configure 15 | make 16 | 17 | echo "extension=$(pwd)/modules/simple_kafka_client.so"|sudo tee /usr/local/etc/php/simple_kafka_client.ini >/dev/null 18 | -------------------------------------------------------------------------------- /.github/workflows/test/build-php.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -ex 4 | 5 | if [ $MEMORY_CHECK -eq 1 ]; then 6 | sudo bash -c 'apt-get update;apt-get -y install valgrind' 7 | fi 8 | 9 | if ! [ -f ~/build-cache/php/usr/local/bin/php ]; then 10 | echo "PHP build is not cached" 11 | 12 | wget https://secure.php.net/distributions/php-${PHP_VERSION}.tar.bz2 13 | tar xjf php-${PHP_VERSION}.tar.bz2 14 | cd php-${PHP_VERSION} 15 | 16 | PHP_BUILD_FLAGS="--prefix=/usr/local --disable-all --enable-cli --enable-cgi --with-config-file-scan-dir=/usr/local/etc/php --with-zlib" 17 | 18 | if [ $MEMORY_CHECK -eq 1 ]; then 19 | PHP_BUILD_FLAGS="$PHP_BUILD_FLAGS --enable-debug --with-valgrind" 20 | else 21 | case $PHP_VERSION in 22 | 8.*) 23 | PHP_BUILD_FLAGS="$PHP_BUILD_FLAGS --enable-zts" 24 | ;; 25 | 7.*) 26 | PHP_BUILD_FLAGS="$PHP_BUILD_FLAGS --enable-maintainer-zts" 27 | ;; 28 | esac 29 | fi 30 | 31 | ./configure $PHP_BUILD_FLAGS $PHP_BUILD_EXTRA_FLAGS 32 | make -j $(nproc) 33 | mkdir -p ~/build-cache/php 34 | sudo make install INSTALL_ROOT=$HOME/build-cache/php 35 | else 36 | echo "PHP build is cached" 37 | fi 38 | 39 | sudo rsync -av ~/build-cache/php/ / 40 | sudo mkdir -p /usr/local/etc/php 41 | -------------------------------------------------------------------------------- /.github/workflows/test/start-kafka.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | docker network create kafka_network 4 | docker pull wurstmeister/zookeeper:3.4.6 5 | docker run -d --network kafka_network --name zookeeper wurstmeister/zookeeper:3.4.6 6 | docker pull wurstmeister/kafka:2.13-2.6.0 7 | docker run -d -p 9092:9092 --network kafka_network -e "KAFKA_AUTO_CREATE_TOPICS_ENABLE=true" -e "KAFKA_CREATE_TOPICS=test-topic:1:1:compact" -e "KAFKA_ADVERTISED_HOST_NAME=kafka" -e "KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181" -e "KAFKA_ADVERTISED_PORT=9092" -e "KAFKA_BROKER_ID=1" --name kafka wurstmeister/kafka:2.13-2.6.0 8 | printf "\n127.0.0.1 kafka\n"|sudo tee /etc/hosts >/dev/null 9 | 10 | echo "Waiting for Kafka to be ready" 11 | 12 | for i in $(seq 1 20); do 13 | if kcat -b 127.0.0.1 -L; then 14 | echo "Kafka is ready" 15 | exit 0 16 | fi 17 | done 18 | 19 | echo "Timedout waiting for Kafka to be ready" 20 | exit 1 21 | -------------------------------------------------------------------------------- /.github/workflows/test/tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -xve 4 | 5 | cd php-simple-kafka-client 6 | 7 | if [ $MEMORY_CHECK -eq 1 ]; then 8 | echo "Enabling memory checking" 9 | showmem=--show-mem 10 | checkmem=-m 11 | fi 12 | 13 | cp tests/test_env.php.sample tests/test_env.php 14 | 15 | PHP=$(which php) 16 | REPORT_EXIT_STATUS=1 TEST_PHP_EXECUTABLE="$PHP" "$PHP" run-tests.php -q $checkmem --show-diff $showmem 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.la 2 | *.lo 3 | *.swp 4 | .deps 5 | *.dep 6 | .libs 7 | Makefile 8 | Makefile.fragments 9 | Makefile.global 10 | Makefile.objects 11 | acinclude.m4 12 | aclocal.m4 13 | autom4te.cache 14 | build 15 | config.guess 16 | config.h 17 | config.h.in 18 | config.h.in~ 19 | config.log 20 | config.nice 21 | config.status 22 | config.sub 23 | configure 24 | configure.in 25 | configure.ac 26 | include 27 | install-sh 28 | libtool 29 | ltmain.sh 30 | ltmain.sh.backup 31 | missing 32 | mkinstalldirs 33 | modules 34 | kafka-*.tgz 35 | run-tests.php 36 | tests/*/*.diff 37 | tests/*/*.exp 38 | tests/*/*.log 39 | tests/*/*.out 40 | tests/*/*.php 41 | tests/*/*.sh 42 | tmp-php.ini 43 | .idea 44 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to contribute 2 | All contributions to the project are welcome. 3 | Please keep the following in mind when contributing: 4 | 5 | ## Branches 6 | Pull requests should be made against the main branch, which supports both PHP 7 and PHP 8. 7 | 8 | ## Testing 9 | Tests are in phpt file format in the tests directory. 10 | 11 | ### Using your own machine for building and testing. 12 | Tests can be run by following compilation and installation procedure 13 | and executing `make test`. 14 | To run integration tests, make sure you have Kafka instance running. 15 | Then, rename `test_env.php.sample` to `test_env.php` and adjust it 16 | with values proper for your kafka instance. 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2016, Arnaud Le Blanc (Author) 4 | Copyright (c) 2020, Nick Chiu 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions are met: 9 | 10 | 1. Redistributions of source code must retain the above copyright notice, this 11 | list of conditions and the following disclaimer. 12 | 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | 3. Neither the name of the copyright holder nor the names of its 18 | contributors may be used to endorse or promote products derived from 19 | this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 25 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PHP Kafka extension (php-simple-kafka-client) 2 | 3 | [![Supported librdkafka versions: >= 1.6.0](https://img.shields.io/badge/librdkafka-%3E%3D%201.6.0-blue.svg)](https://github.com/edenhill/librdkafka/releases) 4 | [![Supported Kafka versions: >= 0.9](https://img.shields.io/badge/kafka-%3E%3D%200.9-blue.svg)](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md#broker-version-compatibility) 5 | ![Supported Redpanda versions: >= 20.x](https://img.shields.io/badge/redpanda-%3E%3D20.x-red) 6 | ![Supported PHP versions: 7.4 .. 8.x](https://img.shields.io/badge/php-7.4%20..%208.x-blue.svg) 7 | [![License: BSD-3](https://img.shields.io/badge/License-BSD--3-green.svg)](https://github.com/php-kafka/php-simple-kafka-client/blob/main/LICENSE) 8 | [![Join the chat at https://gitter.im/php-kafka/php-simple-kafka-client](https://badges.gitter.im/php-kafka/php-simple-kafka-client.svg)](https://gitter.im/php-kafka/php-simple-kafka-client?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 9 | 10 | ## Documentation 11 | Please read the documentation [here](https://php-kafka.github.io/php-simple-kafka-client.github.io/about/) 12 | 13 | ## Libraries 14 | - [php-kafka/php-simple-kafka-lib](https://github.com/php-kafka/php-simple-kafka-lib) a small wrapper library to make usage easier 15 | 16 | ## Support 17 | Join the [Slack Workspace](https://join.slack.com/t/php-kafka/shared_invite/zt-a73huj9v-Nl3n9RjGgjrE8OI4bfsH6Q) or [Gitter](https://gitter.im/php-kafka/php-simple-kafka-client) 18 | 19 | ## Credits 20 | This extension relies on [librdkafka](https://github.com/confluentinc/librdkafka) 21 | This extension is based on [php-rdkafka](https://github.com/arnaud-lb/php-rdkafka) 22 | Many thanks to all [contributors](https://github.com/php-kafka/php-simple-kafka-client/graphs/contributors) :heart: 23 | -------------------------------------------------------------------------------- /config.m4: -------------------------------------------------------------------------------- 1 | dnl $Id$ 2 | dnl config.m4 for extension simple_kafka_client 3 | 4 | PHP_ARG_WITH(simple_kafka_client, for kafka client support, 5 | [ --with-simple-kafka-client Include kafka client support]) 6 | 7 | dnl Check whether the extension is enabled at all 8 | if test "$PHP_SIMPLE_KAFKA_CLIENT" != "no"; then 9 | 10 | SEARCH_PATH="/usr/local /usr" # you might want to change this 11 | SEARCH_FOR="/include/librdkafka/rdkafka.h" # you most likely want to change this 12 | if test -r $PHP_SIMPLE_KAFKA_CLIENT/$SEARCH_FOR; then # path given as parameter 13 | RDKAFKA_DIR=$PHP_SIMPLE_KAFKA_CLIENT 14 | else # search default path list 15 | AC_MSG_CHECKING([for librdkafka/rdkafka.h" in default path]) 16 | for i in $SEARCH_PATH ; do 17 | if test -r $i/$SEARCH_FOR; then 18 | RDKAFKA_DIR=$i 19 | AC_MSG_RESULT(found in $i) 20 | fi 21 | done 22 | fi 23 | 24 | if test -z "$RDKAFKA_DIR"; then 25 | AC_MSG_RESULT([not found]) 26 | AC_MSG_ERROR([Please reinstall the rdkafka distribution]) 27 | fi 28 | 29 | PHP_ADD_INCLUDE($RDKAFKA_DIR/include) 30 | 31 | SOURCES="simple_kafka_client.c producer.c metadata.c metadata_broker.c metadata_topic.c metadata_partition.c metadata_collection.c configuration.c topic.c message.c functions.c consumer.c topic_partition.c kafka_exception.c" 32 | 33 | LIBNAME=rdkafka 34 | LIBSYMBOL=rd_kafka_new 35 | 36 | PHP_CHECK_LIBRARY($LIBNAME,$LIBSYMBOL, 37 | [ 38 | PHP_ADD_LIBRARY_WITH_PATH($LIBNAME, $RDKAFKA_DIR/$PHP_LIBDIR, SIMPLE_KAFKA_CLIENT_SHARED_LIBADD) 39 | AC_DEFINE(HAVE_RDKAFKALIB,1,[ ]) 40 | ],[ 41 | AC_MSG_ERROR([wrong rdkafka lib version or lib not found]) 42 | ],[ 43 | -L$RDKAFKA_DIR/$PHP_LIBDIR -lm 44 | ]) 45 | 46 | ORIG_LDFLAGS="$LDFLAGS" 47 | ORIG_CPPFLAGS="$CPPFLAGS" 48 | LDFLAGS="-L$RDKAFKA_DIR/$PHP_LIBDIR -lm" 49 | CPPFLAGS="-I$RDKAFKA_DIR/include" 50 | 51 | AC_MSG_CHECKING([for librdkafka version]) 52 | AC_EGREP_CPP(yes,[ 53 | #include 54 | #if RD_KAFKA_VERSION >= 0x000b0000 55 | yes 56 | #endif 57 | ],[ 58 | AC_MSG_RESULT([>= 1.6.0]) 59 | ],[ 60 | AC_MSG_ERROR([librdkafka version 1.6.0 or greater required.]) 61 | ]) 62 | 63 | LDFLAGS="$ORIG_LDFLAGS" 64 | CPPFLAGS="$ORIG_CPPFLAGS" 65 | 66 | PHP_SUBST(SIMPLE_KAFKA_CLIENT_SHARED_LIBADD) 67 | 68 | PHP_NEW_EXTENSION(simple_kafka_client, $SOURCES, $ext_shared) 69 | fi 70 | -------------------------------------------------------------------------------- /config.w32: -------------------------------------------------------------------------------- 1 | // $Id$ 2 | // vim:ft=javascript 3 | 4 | ARG_WITH("simple-kafka-client", "for kafka support", "no"); 5 | 6 | if (PHP_SIMPLE_KAFKA_CLIENT != "no") { 7 | if (CHECK_LIB("librdkafka.lib", "simple_kafka_client", PHP_SIMPLE_KAFKA_CLIENT) && 8 | CHECK_HEADER_ADD_INCLUDE("librdkafka/rdkafka.h", "CFLAGS_SIMPLE_KAFKA_CLIENT")) { 9 | 10 | EXTENSION("simple_kafka_client", "simple_kafka_client.c producer.c metadata.c metadata_broker.c metadata_topic.c \ 11 | metadata_partition.c metadata_collection.c configuration.c \ 12 | topic.c message.c functions.c consumer.c topic_partition.c kafka_exception.c"); 13 | 14 | AC_DEFINE('HAVE_SIMPLE_KAFKA_CLIENT', 1, ''); 15 | } else { 16 | WARNING("simple_kafka_client not enabled; libraries and headers not found"); 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /configuration.stub.php: -------------------------------------------------------------------------------- 1 | code == 0) { 65 | if (seen_zero) { 66 | continue; 67 | } 68 | seen_zero = 1; 69 | } 70 | 71 | ZVAL_NULL(&el); 72 | array_init(&el); 73 | add_assoc_long(&el, "code", desc->code); 74 | if (desc->name) { 75 | add_assoc_string(&el, "name", (char*) desc->name); 76 | } else { 77 | add_assoc_null(&el, "name"); 78 | } 79 | if (desc->desc) { 80 | add_assoc_string(&el, "desc", (char*) desc->desc); 81 | }else { 82 | add_assoc_null(&el, "desc"); 83 | } 84 | add_next_index_zval(return_value, &el); 85 | } 86 | } 87 | /* }}} */ 88 | 89 | /* {{{ proto string kafka_err2name(int $err) 90 | * Returns a human readable representation of a kafka error. 91 | */ 92 | ZEND_FUNCTION(kafka_err2name) 93 | { 94 | zend_long errorCode; 95 | const char *errname; 96 | 97 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 98 | Z_PARAM_LONG(errorCode) 99 | ZEND_PARSE_PARAMETERS_END(); 100 | 101 | errname = rd_kafka_err2name(errorCode); 102 | 103 | if (errname) { 104 | RETURN_STRING(errname); 105 | } 106 | } 107 | /* }}} */ 108 | 109 | /* {{{ proto string kafka_err2str(int $err) 110 | * Returns a human readable representation of a kafka error. 111 | */ 112 | ZEND_FUNCTION(kafka_err2str) 113 | { 114 | zend_long errorCode; 115 | const char *errstr; 116 | 117 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 118 | Z_PARAM_LONG(errorCode) 119 | ZEND_PARSE_PARAMETERS_END(); 120 | 121 | errstr = rd_kafka_err2str(errorCode); 122 | 123 | if (errstr) { 124 | RETURN_STRING(errstr); 125 | } 126 | } 127 | /* }}} */ 128 | 129 | /* {{{ proto int kafka_thread_cnt() 130 | * Retrieve the current number of threads in use by librdkafka. 131 | */ 132 | ZEND_FUNCTION(kafka_thread_cnt) 133 | { 134 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 135 | ZEND_PARSE_PARAMETERS_END(); 136 | 137 | RETURN_LONG(rd_kafka_thread_cnt()); 138 | } 139 | /* }}} */ 140 | 141 | /* {{{ proto int kafka_offset_tail(int $offset) 142 | * Start consuming `$cnt` messages from topic's current `.._END` offset. 143 | */ 144 | ZEND_FUNCTION(kafka_offset_tail) 145 | { 146 | zend_long offset; 147 | 148 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 149 | Z_PARAM_LONG(offset) 150 | ZEND_PARSE_PARAMETERS_END(); 151 | 152 | RETURN_LONG(RD_KAFKA_OFFSET_TAIL(offset)); 153 | } 154 | 155 | -------------------------------------------------------------------------------- /functions.stub.php: -------------------------------------------------------------------------------- 1 | err); 65 | 66 | if (message->rkt) { 67 | zend_update_property_string(NULL, Z_KAFKA_PROP_OBJ(return_value), ZEND_STRL("topic_name"), rd_kafka_topic_name(message->rkt)); 68 | } 69 | zend_update_property_long(NULL, Z_KAFKA_PROP_OBJ(return_value), ZEND_STRL("partition"), message->partition); 70 | if (message->payload) { 71 | zend_update_property_long(NULL, Z_KAFKA_PROP_OBJ(return_value), ZEND_STRL("timestamp"), timestamp); 72 | zend_update_property_stringl(NULL, Z_KAFKA_PROP_OBJ(return_value), ZEND_STRL("payload"), message->payload, message->len); 73 | zend_update_property_long(NULL, Z_KAFKA_PROP_OBJ(return_value), ZEND_STRL("len"), message->len); 74 | } 75 | if (message->key) { 76 | zend_update_property_stringl(NULL, Z_KAFKA_PROP_OBJ(return_value), ZEND_STRL("key"), message->key, message->key_len); 77 | } 78 | zend_update_property_long(NULL, Z_KAFKA_PROP_OBJ(return_value), ZEND_STRL("offset"), message->offset); 79 | 80 | if (message->err == RD_KAFKA_RESP_ERR_NO_ERROR) { 81 | rd_kafka_message_headers(message, &message_headers); 82 | if (message_headers != NULL) { 83 | array_init(&headers_array); 84 | for (i = 0; i < rd_kafka_header_cnt(message_headers); i++) { 85 | header_response = rd_kafka_header_get_all(message_headers, i, &header_name, &header_value, &header_size); 86 | if (header_response != RD_KAFKA_RESP_ERR_NO_ERROR) { 87 | break; 88 | } 89 | add_assoc_stringl(&headers_array, header_name, (const char*)header_value, header_size); 90 | } 91 | zend_update_property(NULL, Z_KAFKA_PROP_OBJ(return_value), ZEND_STRL("headers"), &headers_array); 92 | zval_ptr_dtor(&headers_array); 93 | } 94 | } 95 | } 96 | 97 | void kafka_message_list_to_array(zval *return_value, rd_kafka_message_t **messages, long size) /* {{{ */ 98 | { 99 | rd_kafka_message_t *msg; 100 | zval zmsg; 101 | int i; 102 | 103 | array_init_size(return_value, size); 104 | 105 | for (i = 0; i < size; i++) { 106 | msg = messages[i]; 107 | ZVAL_NULL(&zmsg); 108 | kafka_message_new(&zmsg, msg); 109 | add_next_index_zval(return_value, &zmsg); 110 | } 111 | } /* }}} */ 112 | 113 | /* {{{ proto string SimpleKafkaClient\Message::getErrorString() 114 | * Returns the error string for an errored KrSimpleKafkaClient\Message or NULL if there was no error. 115 | */ 116 | ZEND_METHOD(SimpleKafkaClient_Message, getErrorString) 117 | { 118 | zval *zerr; 119 | zval *zpayload; 120 | const char *errstr; 121 | 122 | if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) { 123 | return; 124 | } 125 | 126 | zerr = kafka_read_property(NULL, Z_KAFKA_PROP_OBJ(getThis()), ZEND_STRL("err"), 0); 127 | 128 | if (!zerr || Z_TYPE_P(zerr) != IS_LONG) { 129 | return; 130 | } 131 | 132 | errstr = rd_kafka_err2str(Z_LVAL_P(zerr)); 133 | 134 | if (errstr) { 135 | RETURN_STRING(errstr); 136 | } 137 | 138 | zpayload = kafka_read_property(NULL, Z_KAFKA_PROP_OBJ(getThis()), ZEND_STRL("payload"), 0); 139 | 140 | if (zpayload && Z_TYPE_P(zpayload) == IS_STRING) { 141 | RETURN_ZVAL(zpayload, 1, 0); 142 | } 143 | } 144 | /* }}} */ 145 | 146 | void kafka_message_init(INIT_FUNC_ARGS) { /* {{{ */ 147 | zend_class_entry ce; 148 | 149 | INIT_NS_CLASS_ENTRY(ce, "SimpleKafkaClient", "Message", class_SimpleKafkaClient_Message_methods); 150 | ce_kafka_message = zend_register_internal_class(&ce); 151 | 152 | zend_declare_property_null(ce_kafka_message, ZEND_STRL("err"), ZEND_ACC_PUBLIC); 153 | zend_declare_property_null(ce_kafka_message, ZEND_STRL("topic_name"), ZEND_ACC_PUBLIC); 154 | zend_declare_property_null(ce_kafka_message, ZEND_STRL("timestamp"), ZEND_ACC_PUBLIC); 155 | zend_declare_property_null(ce_kafka_message, ZEND_STRL("partition"), ZEND_ACC_PUBLIC); 156 | zend_declare_property_null(ce_kafka_message, ZEND_STRL("payload"), ZEND_ACC_PUBLIC); 157 | zend_declare_property_null(ce_kafka_message, ZEND_STRL("len"), ZEND_ACC_PUBLIC); 158 | zend_declare_property_null(ce_kafka_message, ZEND_STRL("key"), ZEND_ACC_PUBLIC); 159 | zend_declare_property_null(ce_kafka_message, ZEND_STRL("offset"), ZEND_ACC_PUBLIC); 160 | zend_declare_property_null(ce_kafka_message, ZEND_STRL("headers"), ZEND_ACC_PUBLIC); 161 | } /* }}} */ 162 | -------------------------------------------------------------------------------- /message.stub.php: -------------------------------------------------------------------------------- 1 | metadata->brokers, intern->metadata->broker_cnt, sizeof(*intern->metadata->brokers), kafka_metadata_broker_ctor); 55 | } 56 | /* }}} */ 57 | 58 | static void topics_collection(zval *return_value, Z_KAFKA_OBJ *parent, object_intern *intern) { /* {{{ */ 59 | kafka_metadata_collection_obj_init(return_value, parent, intern->metadata->topics, intern->metadata->topic_cnt, sizeof(*intern->metadata->topics), kafka_metadata_topic_ctor); 60 | } 61 | /* }}} */ 62 | 63 | static void kafka_metadata_free(zend_object *object) /* {{{ */ 64 | { 65 | object_intern *intern = php_kafka_from_obj(object_intern, object); 66 | 67 | if (intern->metadata) { 68 | rd_kafka_metadata_destroy(intern->metadata); 69 | } 70 | 71 | zend_object_std_dtor(&intern->std); 72 | } 73 | /* }}} */ 74 | 75 | static zend_object *kafka_metadata_new(zend_class_entry *class_type) /* {{{ */ 76 | { 77 | zend_object* retval; 78 | object_intern *intern; 79 | 80 | intern = ecalloc(1, sizeof(object_intern)+ zend_object_properties_size(class_type)); 81 | zend_object_std_init(&intern->std, class_type); 82 | object_properties_init(&intern->std, class_type); 83 | 84 | retval = &intern->std; 85 | retval->handlers = &handlers; 86 | 87 | return retval; 88 | } 89 | /* }}} */ 90 | 91 | static object_intern * get_object(zval *zmetadata) 92 | { 93 | object_intern *ometadata = Z_KAFKA_P(object_intern, zmetadata); 94 | 95 | if (!ometadata->metadata) { 96 | zend_throw_exception_ex(NULL, 0, "SimpleKafkaClient\\Metadata::__construct() has not been called"); 97 | return NULL; 98 | } 99 | 100 | return ometadata; 101 | } 102 | 103 | static HashTable *get_debug_info(Z_KAFKA_OBJ *object, int *is_temp) /* {{{ */ 104 | { 105 | zval ary; 106 | object_intern *intern; 107 | zval brokers; 108 | zval topics; 109 | 110 | *is_temp = 1; 111 | 112 | array_init(&ary); 113 | 114 | intern = kafka_get_debug_object(object_intern, object); 115 | if (!intern) { 116 | return Z_ARRVAL(ary); 117 | } 118 | 119 | ZVAL_NULL(&brokers); 120 | brokers_collection(&brokers, object, intern); 121 | add_assoc_zval(&ary, "brokers", &brokers); 122 | 123 | ZVAL_NULL(&topics); 124 | topics_collection(&topics, object, intern); 125 | add_assoc_zval(&ary, "topics", &topics); 126 | 127 | add_assoc_long(&ary, "orig_broker_id", intern->metadata->orig_broker_id); 128 | add_assoc_string(&ary, "orig_broker_name", intern->metadata->orig_broker_name); 129 | 130 | return Z_ARRVAL(ary); 131 | } 132 | /* }}} */ 133 | 134 | /* {{{ proto long SimpleKafkaClient\Metadata::getOrigBrokerId() 135 | Broker originating this metadata */ 136 | ZEND_METHOD(SimpleKafkaClient_Metadata, getOrigBrokerId) 137 | { 138 | object_intern *intern; 139 | 140 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 141 | ZEND_PARSE_PARAMETERS_END(); 142 | 143 | intern = get_object(getThis()); 144 | if (!intern) { 145 | return; 146 | } 147 | 148 | RETURN_LONG(intern->metadata->orig_broker_id); 149 | } 150 | /* }}} */ 151 | 152 | /* {{{ proto string SimpleKafkaClient\Metadata::getOrigBrokerName() 153 | Name of originating broker */ 154 | ZEND_METHOD(SimpleKafkaClient_Metadata, getOrigBrokerName) 155 | { 156 | object_intern *intern; 157 | 158 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 159 | ZEND_PARSE_PARAMETERS_END(); 160 | 161 | intern = get_object(getThis()); 162 | if (!intern) { 163 | return; 164 | } 165 | 166 | RETURN_STRING(intern->metadata->orig_broker_name); 167 | } 168 | /* }}} */ 169 | 170 | /* {{{ proto SimpleKafkaClient\Metadata\Collection SimpleKafkaClient\Metadata::getBrokers() 171 | Topics */ 172 | ZEND_METHOD(SimpleKafkaClient_Metadata, getBrokers) 173 | { 174 | object_intern *intern; 175 | 176 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 177 | ZEND_PARSE_PARAMETERS_END(); 178 | 179 | intern = get_object(getThis()); 180 | if (!intern) { 181 | return; 182 | } 183 | 184 | brokers_collection(return_value, Z_KAFKA_PROP_OBJ(getThis()), intern); 185 | } 186 | /* }}} */ 187 | 188 | /* {{{ proto SimpleKafkaClient\Metadata\Collection SimpleKafkaClient\Metadata::getTopics() 189 | Topics */ 190 | ZEND_METHOD(SimpleKafkaClient_Metadata, getTopics) 191 | { 192 | object_intern *intern; 193 | 194 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 195 | ZEND_PARSE_PARAMETERS_END(); 196 | 197 | intern = get_object(getThis()); 198 | if (!intern) { 199 | return; 200 | } 201 | 202 | topics_collection(return_value, Z_KAFKA_PROP_OBJ(getThis()), intern); 203 | } 204 | /* }}} */ 205 | 206 | void kafka_metadata_init(INIT_FUNC_ARGS) 207 | { 208 | zend_class_entry tmpce; 209 | 210 | INIT_NS_CLASS_ENTRY(tmpce, "SimpleKafkaClient", "Metadata", class_SimpleKafkaClient_Metadata_methods); 211 | ce = zend_register_internal_class(&tmpce); 212 | ce->create_object = kafka_metadata_new; 213 | 214 | handlers = kafka_default_object_handlers; 215 | handlers.get_debug_info = get_debug_info; 216 | handlers.free_obj = kafka_metadata_free; 217 | handlers.offset = XtOffsetOf(object_intern, std); 218 | 219 | kafka_metadata_topic_init(INIT_FUNC_ARGS_PASSTHRU); 220 | kafka_metadata_broker_init(INIT_FUNC_ARGS_PASSTHRU); 221 | kafka_metadata_partition_init(INIT_FUNC_ARGS_PASSTHRU); 222 | kafka_metadata_collection_init(INIT_FUNC_ARGS_PASSTHRU); 223 | } 224 | 225 | void kafka_metadata_obj_init(zval *return_value, const rd_kafka_metadata_t *metadata) 226 | { 227 | object_intern *intern; 228 | 229 | if (object_init_ex(return_value, ce) != SUCCESS) { 230 | return; 231 | } 232 | 233 | intern = Z_KAFKA_P(object_intern, return_value); 234 | if (!intern) { 235 | return; 236 | } 237 | 238 | intern->metadata = metadata; 239 | } 240 | -------------------------------------------------------------------------------- /metadata.stub.php: -------------------------------------------------------------------------------- 1 | metadata_broker) { 61 | zval_dtor(&intern->zmetadata); 62 | } 63 | 64 | zend_object_std_dtor(&intern->std); 65 | } 66 | /* }}} */ 67 | 68 | static zend_object *create_object(zend_class_entry *class_type) /* {{{ */ 69 | { 70 | zend_object* retval; 71 | object_intern *intern; 72 | 73 | intern = ecalloc(1, sizeof(object_intern)+ zend_object_properties_size(class_type)); 74 | zend_object_std_init(&intern->std, class_type); 75 | object_properties_init(&intern->std, class_type); 76 | 77 | retval = &intern->std; 78 | retval->handlers = &handlers; 79 | 80 | return retval; 81 | } 82 | /* }}} */ 83 | 84 | static object_intern * get_object(zval *zmt) 85 | { 86 | object_intern *omt = Z_KAFKA_P(object_intern, zmt); 87 | 88 | if (!omt->metadata_broker) { 89 | zend_throw_exception_ex(NULL, 0, "SimpleKafkaClient\\Metadata\\Broker::__construct() has not been called"); 90 | return NULL; 91 | } 92 | 93 | return omt; 94 | } 95 | 96 | static HashTable *get_debug_info(Z_KAFKA_OBJ *object, int *is_temp) /* {{{ */ 97 | { 98 | zval ary; 99 | object_intern *intern; 100 | 101 | *is_temp = 1; 102 | 103 | array_init(&ary); 104 | 105 | intern = kafka_get_debug_object(object_intern, object); 106 | if (!intern) { 107 | return Z_ARRVAL(ary); 108 | } 109 | 110 | add_assoc_long(&ary, "id", intern->metadata_broker->id); 111 | add_assoc_string(&ary, "host", intern->metadata_broker->host); 112 | add_assoc_long(&ary, "port", intern->metadata_broker->port); 113 | 114 | return Z_ARRVAL(ary); 115 | } 116 | /* }}} */ 117 | 118 | /* {{{ proto int SimpleKafkaClient\Metadata\Broker::getId() 119 | Broker id */ 120 | ZEND_METHOD(SimpleKafkaClient_Metadata_Broker, getId) 121 | { 122 | object_intern *intern; 123 | 124 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 125 | ZEND_PARSE_PARAMETERS_END(); 126 | 127 | intern = get_object(getThis()); 128 | if (!intern) { 129 | return; 130 | } 131 | 132 | RETURN_LONG(intern->metadata_broker->id); 133 | } 134 | /* }}} */ 135 | 136 | /* {{{ proto string SimpleKafkaClient\Metadata\Broker::getHost() 137 | Broker hostname */ 138 | ZEND_METHOD(SimpleKafkaClient_Metadata_Broker, getHost) 139 | { 140 | object_intern *intern; 141 | 142 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 143 | ZEND_PARSE_PARAMETERS_END(); 144 | 145 | intern = get_object(getThis()); 146 | if (!intern) { 147 | return; 148 | } 149 | 150 | RETURN_STRING(intern->metadata_broker->host); 151 | } 152 | /* }}} */ 153 | 154 | /* {{{ proto int SimpleKafkaClient\Metadata\Broker::getPort() 155 | Broker port */ 156 | ZEND_METHOD(SimpleKafkaClient_Metadata_Broker, getPort) 157 | { 158 | object_intern *intern; 159 | 160 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 161 | ZEND_PARSE_PARAMETERS_END(); 162 | 163 | intern = get_object(getThis()); 164 | if (!intern) { 165 | return; 166 | } 167 | 168 | RETURN_LONG(intern->metadata_broker->port); 169 | } 170 | /* }}} */ 171 | 172 | void kafka_metadata_broker_init(INIT_FUNC_ARGS) 173 | { 174 | zend_class_entry tmpce; 175 | 176 | INIT_NS_CLASS_ENTRY(tmpce, "SimpleKafkaClient", "Metadata\\Broker", class_SimpleKafkaClient_Metadata_Broker_methods); 177 | ce = zend_register_internal_class(&tmpce); 178 | ce->create_object = create_object; 179 | 180 | handlers = kafka_default_object_handlers; 181 | handlers.get_debug_info = get_debug_info; 182 | handlers.free_obj = free_object; 183 | handlers.offset = XtOffsetOf(object_intern, std); 184 | } 185 | 186 | void kafka_metadata_broker_ctor(zval *return_value, zval *zmetadata, const void *data) 187 | { 188 | rd_kafka_metadata_broker_t *metadata_broker = (rd_kafka_metadata_broker_t*)data; 189 | object_intern *intern; 190 | 191 | if (object_init_ex(return_value, ce) != SUCCESS) { 192 | return; 193 | } 194 | 195 | intern = Z_KAFKA_P(object_intern, return_value); 196 | if (!intern) { 197 | return; 198 | } 199 | 200 | ZVAL_ZVAL(&intern->zmetadata, zmetadata, 1, 0); 201 | intern->metadata_broker = metadata_broker; 202 | } 203 | -------------------------------------------------------------------------------- /metadata_broker.stub.php: -------------------------------------------------------------------------------- 1 | items) { 71 | zval_dtor(&intern->zmetadata); 72 | } 73 | 74 | zend_object_std_dtor(&intern->std); 75 | } 76 | /* }}} */ 77 | 78 | static zend_object *create_object(zend_class_entry *class_type) /* {{{ */ 79 | { 80 | zend_object* retval; 81 | object_intern *intern; 82 | 83 | intern = ecalloc(1, sizeof(object_intern)+ zend_object_properties_size(class_type)); 84 | zend_object_std_init(&intern->std, class_type); 85 | object_properties_init(&intern->std, class_type); 86 | 87 | retval = &intern->std; 88 | retval->handlers = &handlers; 89 | 90 | return retval; 91 | } 92 | /* }}} */ 93 | 94 | static object_intern * get_object(zval *zmti) 95 | { 96 | object_intern *omti = Z_KAFKA_P(object_intern, zmti); 97 | 98 | if (!omti->items) { 99 | zend_throw_exception_ex(NULL, 0, "SimpleKafkaClient\\Metadata\\Collection::__construct() has not been called"); 100 | return NULL; 101 | } 102 | 103 | return omti; 104 | } 105 | 106 | static HashTable *get_debug_info(Z_KAFKA_OBJ *object, int *is_temp) /* {{{ */ 107 | { 108 | zval ary; 109 | object_intern *intern; 110 | size_t i; 111 | zval item; 112 | 113 | *is_temp = 1; 114 | 115 | array_init(&ary); 116 | 117 | intern = kafka_get_debug_object(object_intern, object); 118 | if (!intern) { 119 | return Z_ARRVAL(ary); 120 | } 121 | 122 | for (i = 0; i < intern->item_cnt; i++) { 123 | ZVAL_NULL(&item); 124 | intern->ctor(&item, &intern->zmetadata, (char *)intern->items + i * intern->item_size); 125 | add_next_index_zval(&ary, &item); 126 | } 127 | 128 | return Z_ARRVAL(ary); 129 | } 130 | /* }}} */ 131 | 132 | /* {{{ proto int SimpleKafkaClient\Metadata\Collection::count() 133 | */ 134 | ZEND_METHOD(SimpleKafkaClient_Metadata_Collection, count) 135 | { 136 | object_intern *intern; 137 | 138 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 139 | ZEND_PARSE_PARAMETERS_END(); 140 | 141 | intern = get_object(getThis()); 142 | if (!intern) { 143 | return; 144 | } 145 | 146 | RETURN_LONG(intern->item_cnt); 147 | } 148 | /* }}} */ 149 | 150 | /* {{{ proto void SimpleKafkaClient\Metadata\Collection::rewind() 151 | */ 152 | ZEND_METHOD(SimpleKafkaClient_Metadata_Collection, rewind) 153 | { 154 | object_intern *intern; 155 | 156 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 157 | ZEND_PARSE_PARAMETERS_END(); 158 | 159 | intern = get_object(getThis()); 160 | if (!intern) { 161 | return; 162 | } 163 | 164 | intern->position = 0; 165 | } 166 | /* }}} */ 167 | 168 | /* {{{ proto mixed SimpleKafkaClient\Metadata\Collection::current() 169 | */ 170 | ZEND_METHOD(SimpleKafkaClient_Metadata_Collection, current) 171 | { 172 | object_intern *intern; 173 | 174 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 175 | ZEND_PARSE_PARAMETERS_END(); 176 | 177 | intern = get_object(getThis()); 178 | if (!intern) { 179 | return; 180 | } 181 | 182 | if (intern->position >= intern->item_cnt) { 183 | zend_throw_exception(ce_kafka_exception, "Called current() on invalid iterator", 0); 184 | return; 185 | } 186 | 187 | intern->ctor(return_value, &intern->zmetadata, (char *)intern->items + intern->position * intern->item_size); 188 | } 189 | /* }}} */ 190 | 191 | /* {{{ proto mixed SimpleKafkaClient\Metadata\Collection::key() 192 | */ 193 | ZEND_METHOD(SimpleKafkaClient_Metadata_Collection, key) 194 | { 195 | object_intern *intern; 196 | 197 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 198 | ZEND_PARSE_PARAMETERS_END(); 199 | 200 | intern = get_object(getThis()); 201 | if (!intern) { 202 | return; 203 | } 204 | 205 | if (intern->position >= intern->item_cnt) { 206 | zend_throw_exception(ce_kafka_exception, "Called key() on invalid iterator", 0); 207 | return; 208 | } 209 | 210 | RETURN_LONG(intern->position); 211 | } 212 | /* }}} */ 213 | 214 | /* {{{ proto void SimpleKafkaClient\Metadata\Collection::next() 215 | */ 216 | ZEND_METHOD(SimpleKafkaClient_Metadata_Collection, next) 217 | { 218 | object_intern *intern; 219 | 220 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 221 | ZEND_PARSE_PARAMETERS_END(); 222 | 223 | intern = get_object(getThis()); 224 | if (!intern) { 225 | return; 226 | } 227 | 228 | intern->position++; 229 | } 230 | /* }}} */ 231 | 232 | /* {{{ proto bool SimpleKafkaClient\Metadata\Collection::valid() 233 | */ 234 | ZEND_METHOD(SimpleKafkaClient_Metadata_Collection, valid) 235 | { 236 | object_intern *intern; 237 | 238 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 239 | ZEND_PARSE_PARAMETERS_END(); 240 | 241 | intern = get_object(getThis()); 242 | if (!intern) { 243 | return; 244 | } 245 | 246 | RETURN_BOOL(intern->position < intern->item_cnt); 247 | } 248 | /* }}} */ 249 | 250 | void kafka_metadata_collection_init(INIT_FUNC_ARGS) 251 | { 252 | zend_class_entry tmpce; 253 | 254 | INIT_NS_CLASS_ENTRY(tmpce, "SimpleKafkaClient\\Metadata", "Collection", class_SimpleKafkaClient_Metadata_Collection_methods); 255 | ce = zend_register_internal_class(&tmpce); 256 | ce->create_object = create_object; 257 | zend_class_implements(ce, 2, zend_ce_countable, zend_ce_iterator); 258 | 259 | handlers = kafka_default_object_handlers; 260 | handlers.get_debug_info = get_debug_info; 261 | handlers.free_obj = free_object; 262 | handlers.offset = XtOffsetOf(object_intern, std); 263 | } 264 | 265 | void kafka_metadata_collection_obj_init(zval *return_value, Z_KAFKA_OBJ *zmetadata, const void * items, size_t item_cnt, size_t item_size, kafka_metadata_collection_ctor_t ctor) 266 | { 267 | object_intern *intern; 268 | 269 | if (object_init_ex(return_value, ce) != SUCCESS) { 270 | return; 271 | } 272 | 273 | intern = Z_KAFKA_P(object_intern, return_value); 274 | if (!intern) { 275 | return; 276 | } 277 | 278 | #if PHP_MAJOR_VERSION < 8 279 | ZVAL_ZVAL(&intern->zmetadata, zmetadata, 1, 0); 280 | #endif 281 | intern->items = items; 282 | intern->item_cnt = item_cnt; 283 | intern->item_size = item_size; 284 | intern->ctor = ctor; 285 | intern->position = 0; 286 | } 287 | -------------------------------------------------------------------------------- /metadata_collection.stub.php: -------------------------------------------------------------------------------- 1 | metadata_partition) { 61 | zval_dtor(&intern->zmetadata); 62 | } 63 | 64 | zend_object_std_dtor(&intern->std); 65 | } 66 | /* }}} */ 67 | 68 | static zend_object *create_object(zend_class_entry *class_type) /* {{{ */ 69 | { 70 | zend_object* retval; 71 | object_intern *intern; 72 | 73 | intern = ecalloc(1, sizeof(object_intern)+ zend_object_properties_size(class_type)); 74 | zend_object_std_init(&intern->std, class_type); 75 | object_properties_init(&intern->std, class_type); 76 | 77 | retval = &intern->std; 78 | retval->handlers = &handlers; 79 | 80 | return retval; 81 | } 82 | /* }}} */ 83 | 84 | static object_intern * get_object(zval *zmt) 85 | { 86 | object_intern *omt = Z_KAFKA_P(object_intern, zmt); 87 | 88 | if (!omt->metadata_partition) { 89 | zend_throw_exception_ex(NULL, 0, "SimpleKafkaClient\\Metadata\\Partition::__construct() has not been called"); 90 | return NULL; 91 | } 92 | 93 | return omt; 94 | } 95 | 96 | static HashTable *get_debug_info(Z_KAFKA_OBJ *object, int *is_temp) /* {{{ */ 97 | { 98 | zval ary; 99 | object_intern *intern; 100 | 101 | *is_temp = 1; 102 | 103 | array_init(&ary); 104 | 105 | intern = kafka_get_debug_object(object_intern, object); 106 | if (!intern) { 107 | return Z_ARRVAL(ary); 108 | } 109 | 110 | add_assoc_long(&ary, "id", intern->metadata_partition->id); 111 | add_assoc_long(&ary, "err", intern->metadata_partition->err); 112 | add_assoc_long(&ary, "leader", intern->metadata_partition->leader); 113 | add_assoc_long(&ary, "replica_cnt", intern->metadata_partition->replica_cnt); 114 | add_assoc_long(&ary, "isr_cnt", intern->metadata_partition->isr_cnt); 115 | 116 | return Z_ARRVAL(ary); 117 | } 118 | /* }}} */ 119 | 120 | /* {{{ proto int SimpleKafkaClient\Metadata\Partition::getId() 121 | Partition id */ 122 | ZEND_METHOD(SimpleKafkaClient_Metadata_Partition, getId) 123 | { 124 | object_intern *intern; 125 | 126 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 127 | ZEND_PARSE_PARAMETERS_END(); 128 | 129 | intern = get_object(getThis()); 130 | if (!intern) { 131 | return; 132 | } 133 | 134 | RETURN_LONG(intern->metadata_partition->id); 135 | } 136 | /* }}} */ 137 | 138 | /* {{{ proto int SimpleKafkaClient\Metadata\Partition::getErrorCode() 139 | Partition error reported by broker */ 140 | ZEND_METHOD(SimpleKafkaClient_Metadata_Partition, getErrorCode) 141 | { 142 | object_intern *intern; 143 | 144 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 145 | ZEND_PARSE_PARAMETERS_END(); 146 | 147 | intern = get_object(getThis()); 148 | if (!intern) { 149 | return; 150 | } 151 | 152 | RETURN_LONG(intern->metadata_partition->err); 153 | } 154 | /* }}} */ 155 | 156 | /* {{{ proto int SimpleKafkaClient\Metadata\Partition::getLeader() 157 | Leader broker */ 158 | ZEND_METHOD(SimpleKafkaClient_Metadata_Partition, getLeader) 159 | { 160 | object_intern *intern; 161 | 162 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 163 | ZEND_PARSE_PARAMETERS_END(); 164 | 165 | intern = get_object(getThis()); 166 | if (!intern) { 167 | return; 168 | } 169 | 170 | RETURN_LONG(intern->metadata_partition->leader); 171 | } 172 | /* }}} */ 173 | 174 | void int32_ctor(zval *return_value, zval *zmetadata, const void *data) { 175 | ZVAL_LONG(return_value, *(int32_t*)data); 176 | } 177 | 178 | /* {{{ proto array SimpleKafkaClient\Metadata\Partition::getReplicas() 179 | Replica broker ids */ 180 | ZEND_METHOD(SimpleKafkaClient_Metadata_Partition, getReplicas) 181 | { 182 | object_intern *intern; 183 | 184 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 185 | ZEND_PARSE_PARAMETERS_END(); 186 | 187 | intern = get_object(getThis()); 188 | if (!intern) { 189 | return; 190 | } 191 | 192 | kafka_metadata_collection_obj_init(return_value, Z_KAFKA_PROP_OBJ(getThis()), intern->metadata_partition->replicas, intern->metadata_partition->replica_cnt, sizeof(*intern->metadata_partition->replicas), int32_ctor); 193 | } 194 | /* }}} */ 195 | 196 | /* {{{ proto array SimpleKafkaClient\Metadata\Partition::getIsrs() 197 | In-Sync-Replica broker ids */ 198 | ZEND_METHOD(SimpleKafkaClient_Metadata_Partition, getIsrs) 199 | { 200 | object_intern *intern; 201 | 202 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 203 | ZEND_PARSE_PARAMETERS_END(); 204 | 205 | intern = get_object(getThis()); 206 | if (!intern) { 207 | return; 208 | } 209 | 210 | kafka_metadata_collection_obj_init(return_value, Z_KAFKA_PROP_OBJ(getThis()), intern->metadata_partition->isrs, intern->metadata_partition->isr_cnt, sizeof(*intern->metadata_partition->isrs), int32_ctor); 211 | } 212 | /* }}} */ 213 | 214 | void kafka_metadata_partition_init(INIT_FUNC_ARGS) 215 | { 216 | zend_class_entry tmpce; 217 | 218 | INIT_NS_CLASS_ENTRY(tmpce, "SimpleKafkaClient", "Metadata\\Partition", class_SimpleKafkaClient_Metadata_Partition_methods); 219 | ce = zend_register_internal_class(&tmpce); 220 | ce->create_object = create_object; 221 | 222 | handlers = kafka_default_object_handlers; 223 | handlers.get_debug_info = get_debug_info; 224 | handlers.free_obj = free_object; 225 | handlers.offset = XtOffsetOf(object_intern, std); 226 | } 227 | 228 | void kafka_metadata_partition_ctor(zval *return_value, zval *zmetadata, const void *data) 229 | { 230 | rd_kafka_metadata_partition_t *metadata_partition = (rd_kafka_metadata_partition_t*)data; 231 | object_intern *intern; 232 | 233 | if (object_init_ex(return_value, ce) != SUCCESS) { 234 | return; 235 | } 236 | 237 | intern = Z_KAFKA_P(object_intern, return_value); 238 | if (!intern) { 239 | return; 240 | } 241 | 242 | ZVAL_ZVAL(&intern->zmetadata, zmetadata, 1, 0); 243 | intern->metadata_partition = metadata_partition; 244 | } 245 | -------------------------------------------------------------------------------- /metadata_partition.stub.php: -------------------------------------------------------------------------------- 1 | metadata_topic->partitions, intern->metadata_topic->partition_cnt, sizeof(*intern->metadata_topic->partitions), kafka_metadata_partition_ctor); 58 | } 59 | /* }}} */ 60 | 61 | static void free_object(zend_object *object) /* {{{ */ 62 | { 63 | object_intern *intern = php_kafka_from_obj(object_intern, object); 64 | 65 | if (intern->metadata_topic) { 66 | zval_dtor(&intern->zmetadata); 67 | } 68 | 69 | zend_object_std_dtor(&intern->std); 70 | } 71 | /* }}} */ 72 | 73 | static zend_object *create_object(zend_class_entry *class_type) /* {{{ */ 74 | { 75 | zend_object* retval; 76 | object_intern *intern; 77 | 78 | intern = ecalloc(1, sizeof(object_intern)+ zend_object_properties_size(class_type)); 79 | zend_object_std_init(&intern->std, class_type); 80 | object_properties_init(&intern->std, class_type); 81 | 82 | retval = &intern->std; 83 | retval->handlers = &handlers; 84 | 85 | return retval; 86 | } 87 | /* }}} */ 88 | 89 | static object_intern * get_object(zval *zmt) 90 | { 91 | object_intern *omt = Z_KAFKA_P(object_intern, zmt); 92 | 93 | if (!omt->metadata_topic) { 94 | zend_throw_exception_ex(NULL, 0, "SimpleKafkaClient\\Metadata\\Topic::__construct() has not been called"); 95 | return NULL; 96 | } 97 | 98 | return omt; 99 | } 100 | 101 | static HashTable *get_debug_info(Z_KAFKA_OBJ *object, int *is_temp) /* {{{ */ 102 | { 103 | zval ary; 104 | object_intern *intern; 105 | zval partitions; 106 | 107 | *is_temp = 1; 108 | 109 | array_init(&ary); 110 | 111 | intern = kafka_get_debug_object(object_intern, object); 112 | if (!intern) { 113 | return Z_ARRVAL(ary); 114 | } 115 | 116 | add_assoc_string(&ary, "topic", intern->metadata_topic->topic); 117 | 118 | ZVAL_NULL(&partitions); 119 | partitions_collection(&partitions, object, intern); 120 | add_assoc_zval(&ary, "partitions", &partitions); 121 | 122 | add_assoc_long(&ary, "err", intern->metadata_topic->err); 123 | 124 | return Z_ARRVAL(ary); 125 | } 126 | /* }}} */ 127 | 128 | /* {{{ proto string SimpleKafkaClient\MetadataTopic::getName() 129 | Topic name */ 130 | ZEND_METHOD(SimpleKafkaClient_Metadata_Topic, getName) 131 | { 132 | object_intern *intern; 133 | 134 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 135 | ZEND_PARSE_PARAMETERS_END(); 136 | 137 | intern = get_object(getThis()); 138 | if (!intern) { 139 | return; 140 | } 141 | 142 | RETURN_STRING(intern->metadata_topic->topic); 143 | } 144 | /* }}} */ 145 | 146 | /* {{{ proto int SimpleKafkaClient\MetadataTopic::getErrorCode() 147 | Error */ 148 | ZEND_METHOD(SimpleKafkaClient_Metadata_Topic, getErrorCode) 149 | { 150 | object_intern *intern; 151 | 152 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 153 | ZEND_PARSE_PARAMETERS_END(); 154 | 155 | intern = get_object(getThis()); 156 | if (!intern) { 157 | return; 158 | } 159 | 160 | RETURN_LONG(intern->metadata_topic->err); 161 | } 162 | /* }}} */ 163 | 164 | 165 | /* {{{ proto SimpleKafkaClient\Metadata\Collection SimpleKafkaClient\Metadata\Topic::getPartitions() 166 | Partitions */ 167 | ZEND_METHOD(SimpleKafkaClient_Metadata_Topic, getPartitions) 168 | { 169 | object_intern *intern; 170 | 171 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 172 | ZEND_PARSE_PARAMETERS_END(); 173 | 174 | intern = get_object(getThis()); 175 | if (!intern) { 176 | return; 177 | } 178 | 179 | partitions_collection(return_value, Z_KAFKA_PROP_OBJ(getThis()), intern); 180 | } 181 | /* }}} */ 182 | 183 | void kafka_metadata_topic_init(INIT_FUNC_ARGS) 184 | { 185 | zend_class_entry tmpce; 186 | 187 | INIT_NS_CLASS_ENTRY(tmpce, "SimpleKafkaClient\\Metadata", "Topic", class_SimpleKafkaClient_Metadata_Topic_methods); 188 | ce = zend_register_internal_class(&tmpce); 189 | ce->create_object = create_object; 190 | 191 | handlers = kafka_default_object_handlers; 192 | handlers.get_debug_info = get_debug_info; 193 | handlers.free_obj = free_object; 194 | handlers.offset = XtOffsetOf(object_intern, std); 195 | } 196 | 197 | void kafka_metadata_topic_ctor(zval *return_value, zval *zmetadata, const void *data) 198 | { 199 | rd_kafka_metadata_topic_t *metadata_topic = (rd_kafka_metadata_topic_t*)data; 200 | object_intern *intern; 201 | 202 | if (object_init_ex(return_value, ce) != SUCCESS) { 203 | return; 204 | } 205 | 206 | intern = Z_KAFKA_P(object_intern, return_value); 207 | if (!intern) { 208 | return; 209 | } 210 | 211 | ZVAL_ZVAL(&intern->zmetadata, zmetadata, 1, 0); 212 | intern->metadata_topic = metadata_topic; 213 | } 214 | -------------------------------------------------------------------------------- /metadata_topic.stub.php: -------------------------------------------------------------------------------- 1 | 2 | 3 | simple_kafka_client 4 | pecl.php.net 5 | Kafka client based on librdkafka 6 | This extension is a librdkafka binding providing a working client for Kafka 7 | 8 | Nick Chiu 9 | nickzh 10 | coding.nikazu@gmail.com 11 | yes 12 | 13 | 2021-08-04 14 | 15 | 16 | 0.1.4 17 | 0.1.0 18 | 19 | 20 | stable 21 | stable 22 | 23 | BSD-3 License 24 | 25 | ## Bugfixes 26 | - fix for PHP 8.1 (#54, @remicollet) 27 | ## Internals 28 | - add all sources to package.xml (#54, @remicollet) 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 7.4.0 107 | 8.99.99 108 | 109 | 110 | 1.4.8 111 | 112 | 113 | 114 | simple_kafka_client 115 | 116 | 117 | 118 | 2021-07-01 119 | 120 | 121 | 0.1.3 122 | 0.1.0 123 | 124 | 125 | stable 126 | stable 127 | 128 | BSD-3 License 129 | 130 | ## Features 131 | - support oauthbearer mechanism (#47, #48, #49) 132 | ## Bugfixes 133 | - fix for PHP 8.1 (#52) 134 | ## Internals 135 | - cleanup and refactor (#43, #44, #45, #46) 136 | 137 | 138 | 139 | 2021-04-12 140 | 141 | 142 | 0.1.2 143 | 0.1.0 144 | 145 | 146 | stable 147 | stable 148 | 149 | BSD-3 License 150 | 151 | ## Internals 152 | - add AppVeyor build (#39, @cmb69) 153 | ## Bugfixes 154 | - version and test fixes (#35, #36, @remicollet) 155 | - fix windows build (#38, #40, @cmb69) 156 | 157 | 158 | 159 | 2021-04-11 160 | 161 | 162 | 0.1.1 163 | 0.1.0 164 | 165 | 166 | stable 167 | stable 168 | 169 | BSD-3 License 170 | 171 | ## Bugfixes 172 | - fix Consumer:assign argument type (#33) 173 | - fix Producer:getTopicHandle return type (#33) 174 | 175 | 176 | 177 | 2021-04-10 178 | 179 | 180 | 0.1.0 181 | 0.1.0 182 | 183 | 184 | stable 185 | stable 186 | 187 | BSD-3 License 188 | 189 | Initial release 190 | 191 | 192 | 193 | 194 | -------------------------------------------------------------------------------- /php_simple_kafka_client_int.h: -------------------------------------------------------------------------------- 1 | /** 2 | * BSD 3-Clause License 3 | * 4 | * Copyright (c) 2016, Arnaud Le Blanc (Author) 5 | * Copyright (c) 2020, Nick Chiu 6 | * All rights reserved. 7 | * 8 | * Redistribution and use in source and binary forms, with or without 9 | * modification, are permitted provided that the following conditions are met: 10 | * 11 | * 1. Redistributions of source code must retain the above copyright notice, this 12 | * list of conditions and the following disclaimer. 13 | * 14 | * 2. Redistributions in binary form must reproduce the above copyright notice, 15 | * this list of conditions and the following disclaimer in the documentation 16 | * and/or other materials provided with the distribution. 17 | * 18 | * 3. Neither the name of the copyright holder nor the names of its 19 | * contributors may be used to endorse or promote products derived from 20 | * this software without specific prior written permission. 21 | * 22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 | * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 28 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | */ 33 | 34 | #ifndef PHP_KAFKA_INT_H 35 | #define PHP_KAFKA_INT_H 36 | 37 | #include "librdkafka/rdkafka.h" 38 | 39 | typedef struct _kafka_topic_object { 40 | rd_kafka_topic_t *rkt; 41 | zval zrk; 42 | zend_object std; 43 | } kafka_topic_object; 44 | 45 | typedef struct _kafka_conf_callback { 46 | zend_fcall_info fci; 47 | zend_fcall_info_cache fcc; 48 | } kafka_conf_callback; 49 | 50 | typedef struct _kafka_conf_callbacks { 51 | zval zrk; 52 | kafka_conf_callback *error; 53 | kafka_conf_callback *rebalance; 54 | kafka_conf_callback *dr_msg; 55 | kafka_conf_callback *stats; 56 | kafka_conf_callback *consume; 57 | kafka_conf_callback *offset_commit; 58 | kafka_conf_callback *log; 59 | kafka_conf_callback *oauthbearer_refresh; 60 | } kafka_conf_callbacks; 61 | 62 | typedef struct _kafka_conf_object { 63 | rd_kafka_conf_t *conf; 64 | kafka_conf_callbacks cbs; 65 | zend_object std; 66 | } kafka_conf_object; 67 | 68 | typedef struct _kafka_topic_partition_intern { 69 | char *topic; 70 | int32_t partition; 71 | int64_t offset; 72 | zend_object std; 73 | } kafka_topic_partition_intern; 74 | 75 | typedef struct _kafka_object { 76 | rd_kafka_type_t type; 77 | rd_kafka_t *rk; 78 | kafka_conf_callbacks cbs; 79 | HashTable topics; 80 | zend_object std; 81 | } kafka_object; 82 | 83 | typedef void (*kafka_metadata_collection_ctor_t)(zval *renurn_value, zval *zmetadata, const void *object); 84 | 85 | 86 | #if PHP_MAJOR_VERSION >= 8 87 | 88 | #define Z_KAFKA_OBJ zend_object 89 | 90 | #define Z_KAFKA_PROP_OBJ(object) Z_OBJ_P(object) 91 | 92 | #define kafka_get_debug_object(type, object) php_kafka_from_obj(type, object) 93 | 94 | #else // PHP 7 95 | 96 | #define Z_KAFKA_OBJ zval 97 | 98 | #define Z_KAFKA_PROP_OBJ(object) object 99 | 100 | #define kafka_get_debug_object(type, object) get_object(object) 101 | 102 | #define ZEND_ARG_TYPE_INFO_WITH_DEFAULT_VALUE(pass_by_ref, name, type_hint, allow_null, default_value) ZEND_ARG_INFO(pass_by_ref, name) 103 | 104 | #define Z_PARAM_ARRAY_HT_OR_NULL(dest) \ 105 | Z_PARAM_ARRAY_HT_EX(dest, 1, 0) 106 | 107 | #define Z_PARAM_LONG_OR_NULL(dest, is_null) \ 108 | Z_PARAM_LONG_EX(dest, is_null, 1, 0) 109 | 110 | #define Z_PARAM_OBJECT_OF_CLASS_OR_NULL(dest, _ce) \ 111 | Z_PARAM_OBJECT_OF_CLASS_EX(dest, _ce, 1, 0) 112 | 113 | #define Z_PARAM_STRING_OR_NULL(dest, dest_len) \ 114 | Z_PARAM_STRING_EX(dest, dest_len, 1, 0) 115 | 116 | #endif 117 | 118 | #ifdef PHP_WIN32 119 | # define PHP_SIMPLE_KAFKA_CLIENT_API __declspec(dllexport) 120 | #elif defined(__GNUC__) && __GNUC__ >= 4 121 | # define PHP_SIMPLE_KAFKA_CLIENT_API __attribute__ ((visibility("default"))) 122 | #else 123 | # define PHP_SIMPLE_KAFKA_CLIENT_API 124 | #endif 125 | 126 | extern zend_class_entry * ce_kafka_conf; 127 | extern zend_class_entry * ce_kafka_consumer; 128 | extern zend_class_entry * ce_kafka_error_exception; 129 | extern zend_class_entry * ce_kafka_exception; 130 | extern zend_class_entry * ce_kafka_producer; 131 | extern zend_class_entry * ce_kafka_message; 132 | extern zend_class_entry * ce_kafka_consumer_topic; 133 | extern zend_class_entry * ce_kafka_producer_topic; 134 | extern zend_class_entry * ce_kafka_topic; 135 | extern zend_class_entry * ce_kafka_topic_partition; 136 | extern zend_module_entry simple_kafka_client_module_entry; 137 | extern zend_object_handlers kafka_default_object_handlers; 138 | 139 | #define Z_KAFKA_P(php_kafka_type, zobject) php_kafka_from_obj(php_kafka_type, Z_OBJ_P(zobject)) 140 | 141 | #define php_kafka_from_obj(php_kafka_type, object) \ 142 | ((php_kafka_type*)((char *)(object) - XtOffsetOf(php_kafka_type, std))) 143 | 144 | #define phpext_kafka_ptr &simple_kafka_client_module_entry 145 | 146 | #define PHP_SIMPLE_KAFKA_CLIENT_VERSION "0.1.4" 147 | 148 | 149 | static inline void kafka_call_function(zend_fcall_info *fci, zend_fcall_info_cache *fci_cache, zval *retval, uint32_t param_count, zval params[]) 150 | { 151 | int local_retval; 152 | zval local_retval_zv; 153 | 154 | if (retval) { 155 | local_retval = 0; 156 | } else { 157 | local_retval = 1; 158 | retval = &local_retval_zv; 159 | } 160 | 161 | fci->retval = retval; 162 | fci->params = params; 163 | fci->param_count = param_count; 164 | 165 | zend_call_function(fci, fci_cache); 166 | 167 | if (local_retval) { 168 | zval_ptr_dtor(retval); 169 | } 170 | } 171 | 172 | static inline zval *kafka_read_property(zend_class_entry *scope, Z_KAFKA_OBJ *object, const char *name, size_t name_length, zend_bool silent) 173 | { 174 | zval rv; 175 | return zend_read_property(scope, object, name, name_length, silent, &rv); 176 | } 177 | 178 | 179 | static inline char *kafka_hash_get_current_key_ex(HashTable *ht, HashPosition *pos) 180 | { 181 | zend_string* key; 182 | zend_ulong index; 183 | 184 | if (zend_hash_get_current_key_ex(ht, &key, &index, pos) == HASH_KEY_IS_STRING) { 185 | return key->val; 186 | } 187 | 188 | return NULL; 189 | } 190 | 191 | void kafka_error_init(); 192 | void create_kafka_error(zval *return_value, const rd_kafka_error_t *error); 193 | void kafka_conf_init(INIT_FUNC_ARGS); 194 | void kafka_conf_callbacks_dtor(kafka_conf_callbacks *cbs); 195 | void kafka_conf_callbacks_copy(kafka_conf_callbacks *to, kafka_conf_callbacks *from); 196 | void kafka_message_init(INIT_FUNC_ARGS); 197 | void kafka_message_new(zval *return_value, const rd_kafka_message_t *message); 198 | void kafka_message_list_to_array(zval *return_value, rd_kafka_message_t **messages, long size); 199 | void kafka_metadata_init(INIT_FUNC_ARGS); 200 | void kafka_metadata_obj_init(zval *return_value, const rd_kafka_metadata_t *metadata); 201 | void kafka_metadata_broker_init(INIT_FUNC_ARGS); 202 | void kafka_metadata_broker_ctor(zval *return_value, zval *zmetadata, const void *metadata_broker); 203 | void kafka_metadata_collection_init(INIT_FUNC_ARGS); 204 | void kafka_metadata_collection_obj_init(zval *return_value, Z_KAFKA_OBJ *zmetadata, const void * items, size_t item_cnt, size_t item_size, kafka_metadata_collection_ctor_t ctor); 205 | void kafka_metadata_partition_init(INIT_FUNC_ARGS); 206 | void kafka_metadata_partition_ctor(zval *return_value, zval *zmetadata, const void *metadata_partition); 207 | void kafka_metadata_topic_init(INIT_FUNC_ARGS); 208 | void kafka_metadata_topic_ctor(zval *return_value, zval *zmetadata, const void *metadata_topic); 209 | void kafka_topic_init(INIT_FUNC_ARGS); 210 | void kafka_metadata_topic_partition_init(INIT_FUNC_ARGS); 211 | void kafka_topic_partition_init(zval *z, char *topic, int32_t partition, int64_t offset); 212 | void kafka_topic_partition_list_to_array(zval *return_value, rd_kafka_topic_partition_list_t *list); 213 | 214 | kafka_topic_partition_intern * get_topic_partition_object(zval *z); 215 | rd_kafka_topic_partition_list_t * array_arg_to_kafka_topic_partition_list(int argnum, HashTable *ary); 216 | kafka_topic_object * get_kafka_topic_object(zval *zrkt); 217 | kafka_conf_object * get_kafka_conf_object(zval *zconf); 218 | kafka_object * get_kafka_object(zval *zrk); 219 | 220 | ZEND_METHOD(Kafka, __construct); 221 | 222 | #endif /* PHP_KAFKA_INT_H */ 223 | -------------------------------------------------------------------------------- /producer.c: -------------------------------------------------------------------------------- 1 | /** 2 | * BSD 3-Clause License 3 | * 4 | * Copyright (c) 2016, Arnaud Le Blanc (Author) 5 | * Copyright (c) 2020, Nick Chiu 6 | * All rights reserved. 7 | * 8 | * Redistribution and use in source and binary forms, with or without 9 | * modification, are permitted provided that the following conditions are met: 10 | * 11 | * 1. Redistributions of source code must retain the above copyright notice, this 12 | * list of conditions and the following disclaimer. 13 | * 14 | * 2. Redistributions in binary form must reproduce the above copyright notice, 15 | * this list of conditions and the following disclaimer in the documentation 16 | * and/or other materials provided with the distribution. 17 | * 18 | * 3. Neither the name of the copyright holder nor the names of its 19 | * contributors may be used to endorse or promote products derived from 20 | * this software without specific prior written permission. 21 | * 22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 | * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 28 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | */ 33 | 34 | #ifdef HAVE_CONFIG_H 35 | #include "config.h" 36 | #endif 37 | 38 | #include "php.h" 39 | #include "php_simple_kafka_client_int.h" 40 | #include "Zend/zend_exceptions.h" 41 | #include "producer_arginfo.h" 42 | 43 | zend_class_entry * ce_kafka_producer; 44 | 45 | static void kafka_topic_object_pre_free(kafka_topic_object ** pp) { 46 | kafka_topic_object *intern = *pp; 47 | rd_kafka_topic_destroy(intern->rkt); 48 | intern->rkt = NULL; 49 | zval_ptr_dtor(&intern->zrk); 50 | } 51 | 52 | static void kafka_init(zval *this_ptr, rd_kafka_type_t type, zval *zconf) /* {{{ */ 53 | { 54 | char errstr[512]; 55 | rd_kafka_t *rk; 56 | kafka_object *intern; 57 | kafka_conf_object *conf_intern; 58 | rd_kafka_conf_t *conf = NULL; 59 | 60 | intern = Z_KAFKA_P(kafka_object, this_ptr); 61 | intern->type = type; 62 | 63 | if (zconf) { 64 | conf_intern = get_kafka_conf_object(zconf); 65 | if (conf_intern) { 66 | conf = rd_kafka_conf_dup(conf_intern->conf); 67 | kafka_conf_callbacks_copy(&intern->cbs, &conf_intern->cbs); 68 | intern->cbs.zrk = *this_ptr; 69 | rd_kafka_conf_set_opaque(conf, &intern->cbs); 70 | } 71 | } 72 | 73 | rk = rd_kafka_new(type, conf, errstr, sizeof(errstr)); 74 | 75 | if (rk == NULL) { 76 | zend_throw_exception(ce_kafka_exception, errstr, 0); 77 | return; 78 | } 79 | 80 | if (intern->cbs.log) { 81 | //rd_kafka_set_log_queue(rk, NULL); 82 | } 83 | 84 | intern->rk = rk; 85 | 86 | zend_hash_init(&intern->topics, 0, NULL, (dtor_func_t)kafka_topic_object_pre_free, 0); 87 | } 88 | /* }}} */ 89 | 90 | /* {{{ proto SimpleKafkaClient\Producer::__construct([SimpleKafkaClient\Configuration $configuration]) */ 91 | ZEND_METHOD(SimpleKafkaClient_Producer, __construct) 92 | { 93 | zval *zconf = NULL; 94 | 95 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 96 | Z_PARAM_OBJECT_OF_CLASS(zconf, ce_kafka_conf) 97 | ZEND_PARSE_PARAMETERS_END(); 98 | 99 | kafka_init(getThis(), RD_KAFKA_PRODUCER, zconf); 100 | } 101 | /* }}} */ 102 | 103 | /* {{{ proto int SimpleKafkaClient\Producer::flush(int $timeout_ms) 104 | Wait until all outstanding produce requests, et.al, are completed. */ 105 | ZEND_METHOD(SimpleKafkaClient_Producer, flush) 106 | { 107 | kafka_object *intern; 108 | zend_long timeout_ms; 109 | 110 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 111 | Z_PARAM_LONG(timeout_ms) 112 | ZEND_PARSE_PARAMETERS_END(); 113 | 114 | intern = get_kafka_object(getThis()); 115 | if (!intern) { 116 | return; 117 | } 118 | 119 | RETURN_LONG(rd_kafka_flush(intern->rk, timeout_ms)); 120 | } 121 | /* }}} */ 122 | 123 | /* {{{ proto int SimpleKafkaClient\Producer::poll(int $timeoutMs) 124 | Polls the provided kafka handle for events */ 125 | ZEND_METHOD(SimpleKafkaClient_Producer, poll) 126 | { 127 | kafka_object *intern; 128 | zend_long timeout_ms; 129 | 130 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 131 | Z_PARAM_LONG(timeout_ms) 132 | ZEND_PARSE_PARAMETERS_END(); 133 | 134 | intern = get_kafka_object(getThis()); 135 | if (!intern) { 136 | return; 137 | } 138 | 139 | RETURN_LONG(rd_kafka_poll(intern->rk, timeout_ms)); 140 | } 141 | /* }}} */ 142 | 143 | /* {{{ proto int SimpleKafkaClient\Producer::purge(int $purge_flags) 144 | Purge messages that are in queue or in flight */ 145 | ZEND_METHOD(SimpleKafkaClient_Producer, purge) 146 | { 147 | kafka_object *intern; 148 | zend_long purge_flags; 149 | 150 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 151 | Z_PARAM_LONG(purge_flags) 152 | ZEND_PARSE_PARAMETERS_END(); 153 | 154 | intern = get_kafka_object(getThis()); 155 | if (!intern) { 156 | return; 157 | } 158 | 159 | RETURN_LONG(rd_kafka_purge(intern->rk, purge_flags)); 160 | } 161 | /* }}} */ 162 | 163 | 164 | /* {{{ proto SimpleKafkaClient\ProducerTopic SimpleKafkaClient\Producer::getTopicHandle(string $topic) 165 | Returns an SimpleKafkaClient\ProducerTopic object */ 166 | ZEND_METHOD(SimpleKafkaClient_Producer, getTopicHandle) 167 | { 168 | char *topic; 169 | size_t topic_len; 170 | rd_kafka_topic_t *rkt; 171 | kafka_object *intern; 172 | kafka_topic_object *topic_intern; 173 | 174 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 175 | Z_PARAM_STRING(topic, topic_len) 176 | ZEND_PARSE_PARAMETERS_END(); 177 | 178 | intern = get_kafka_object(getThis()); 179 | if (!intern) { 180 | return; 181 | } 182 | 183 | rkt = rd_kafka_topic_new(intern->rk, topic, NULL); 184 | 185 | if (!rkt) { 186 | return; 187 | } 188 | 189 | if (object_init_ex(return_value, ce_kafka_producer_topic) != SUCCESS) { 190 | return; 191 | } 192 | 193 | topic_intern = Z_KAFKA_P(kafka_topic_object, return_value); 194 | if (!topic_intern) { 195 | return; 196 | } 197 | 198 | topic_intern->rkt = rkt; 199 | topic_intern->zrk = *getThis(); 200 | 201 | Z_ADDREF_P(&topic_intern->zrk); 202 | 203 | zend_hash_index_add_ptr(&intern->topics, (zend_ulong)topic_intern, topic_intern); 204 | } 205 | /* }}} */ 206 | 207 | /* {{{ proto int SimpleKafkaClient\Producer::initTransactions(int timeout_ms) 208 | Initializes transactions, needs to be done before producing and starting a transaction */ 209 | ZEND_METHOD(SimpleKafkaClient_Producer, initTransactions) 210 | { 211 | kafka_object *intern; 212 | zend_long timeout_ms; 213 | const rd_kafka_error_t *error; 214 | 215 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 216 | Z_PARAM_LONG(timeout_ms) 217 | ZEND_PARSE_PARAMETERS_END(); 218 | 219 | intern = get_kafka_object(getThis()); 220 | if (!intern) { 221 | return; 222 | } 223 | 224 | error = rd_kafka_init_transactions(intern->rk, timeout_ms); 225 | 226 | if (NULL == error) { 227 | return; 228 | } 229 | 230 | create_kafka_error(return_value, error); 231 | zend_throw_exception_object(return_value); 232 | } 233 | /* }}} */ 234 | 235 | /* {{{ proto int SimpleKafkaClient\Producer::beginTransaction() 236 | Start a transaction */ 237 | ZEND_METHOD(SimpleKafkaClient_Producer, beginTransaction) 238 | { 239 | kafka_object *intern; 240 | const rd_kafka_error_t *error; 241 | 242 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 243 | ZEND_PARSE_PARAMETERS_END(); 244 | 245 | intern = get_kafka_object(getThis()); 246 | if (!intern) { 247 | return; 248 | } 249 | 250 | error = rd_kafka_begin_transaction(intern->rk); 251 | 252 | if (NULL == error) { 253 | return; 254 | } 255 | 256 | create_kafka_error(return_value, error); 257 | zend_throw_exception_object(return_value); 258 | } 259 | /* }}} */ 260 | 261 | /* {{{ proto int SimpleKafkaClient\Producer::commitTransaction(int timeout_ms) 262 | Commit a transaction */ 263 | ZEND_METHOD(SimpleKafkaClient_Producer, commitTransaction) 264 | { 265 | kafka_object *intern; 266 | zend_long timeout_ms; 267 | const rd_kafka_error_t *error; 268 | 269 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 270 | Z_PARAM_LONG(timeout_ms) 271 | ZEND_PARSE_PARAMETERS_END(); 272 | 273 | intern = get_kafka_object(getThis()); 274 | if (!intern) { 275 | return; 276 | } 277 | 278 | error = rd_kafka_commit_transaction(intern->rk, timeout_ms); 279 | 280 | if (NULL == error) { 281 | return; 282 | } 283 | 284 | create_kafka_error(return_value, error); 285 | zend_throw_exception_object(return_value); 286 | } 287 | /* }}} */ 288 | 289 | /* {{{ proto int SimpleKafkaClient\Producer::abortTransaction(int timeout_ms) 290 | Commit a transaction */ 291 | ZEND_METHOD(SimpleKafkaClient_Producer, abortTransaction) 292 | { 293 | kafka_object *intern; 294 | zend_long timeout_ms; 295 | const rd_kafka_error_t *error; 296 | 297 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 298 | Z_PARAM_LONG(timeout_ms) 299 | ZEND_PARSE_PARAMETERS_END(); 300 | 301 | intern = get_kafka_object(getThis()); 302 | if (!intern) { 303 | return; 304 | } 305 | 306 | error = rd_kafka_abort_transaction(intern->rk, timeout_ms); 307 | 308 | if (NULL == error) { 309 | return; 310 | } 311 | 312 | create_kafka_error(return_value, error); 313 | zend_throw_exception_object(return_value); 314 | } 315 | /* }}} */ 316 | -------------------------------------------------------------------------------- /producer.stub.php: -------------------------------------------------------------------------------- 1 | rk) { 70 | if (RD_KAFKA_CONSUMER == intern->type) { 71 | rd_kafka_resp_err_t err; 72 | 73 | err = rd_kafka_consumer_close(intern->rk); 74 | 75 | if (err) { 76 | php_error(E_WARNING, "rd_kafka_consumer_close failed: %s", rd_kafka_err2str(err)); 77 | } 78 | } else if (RD_KAFKA_PRODUCER == intern->type) { 79 | zend_hash_destroy(&intern->topics); 80 | } 81 | 82 | rd_kafka_destroy(intern->rk); 83 | intern->rk = NULL; 84 | } 85 | 86 | kafka_conf_callbacks_dtor(&intern->cbs); 87 | 88 | zend_object_std_dtor(&intern->std); 89 | } 90 | /* }}} */ 91 | 92 | static zend_object *kafka_new(zend_class_entry *class_type) /* {{{ */ 93 | { 94 | zend_object* retval; 95 | kafka_object *intern; 96 | 97 | intern = ecalloc(1, sizeof(kafka_object)+ zend_object_properties_size(class_type)); 98 | zend_object_std_init(&intern->std, class_type); 99 | object_properties_init(&intern->std, class_type); 100 | 101 | retval = &intern->std; 102 | retval->handlers = &kafka_object_handlers; 103 | 104 | return retval; 105 | } 106 | 107 | kafka_object * get_kafka_object(zval *zrk) 108 | { 109 | kafka_object *ork = Z_KAFKA_P(kafka_object, zrk); 110 | 111 | if (!ork->rk) { 112 | zend_throw_exception_ex(NULL, 0, "SimpleKafkaClient\\Kafka::__construct() has not been called"); 113 | return NULL; 114 | } 115 | 116 | return ork; 117 | } 118 | 119 | /* {{{ private constructor */ 120 | ZEND_METHOD(Kafka, __construct) 121 | { 122 | zend_throw_exception(NULL, "Private constructor", 0); 123 | return; 124 | } 125 | /* }}} */ 126 | 127 | /* {{{ proto SimpleKafkaClient\Metadata::getMetadata(bool $all_topics, int $timeout_ms, SimpleKafkaClient\Topic $topic) 128 | Request Metadata from broker */ 129 | ZEND_METHOD(SimpleKafkaClient_Kafka, getMetadata) 130 | { 131 | zend_bool all_topics; 132 | zval *only_zrkt = NULL; 133 | zend_long timeout_ms; 134 | rd_kafka_resp_err_t err; 135 | kafka_object *intern; 136 | const rd_kafka_metadata_t *metadata; 137 | kafka_topic_object *only_orkt = NULL; 138 | 139 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 2, 3) 140 | Z_PARAM_BOOL(all_topics) 141 | Z_PARAM_LONG(timeout_ms) 142 | Z_PARAM_OPTIONAL 143 | Z_PARAM_OBJECT_OF_CLASS_OR_NULL(only_zrkt, ce_kafka_topic) 144 | ZEND_PARSE_PARAMETERS_END(); 145 | 146 | intern = get_kafka_object(getThis()); 147 | if (!intern) { 148 | return; 149 | } 150 | 151 | if (only_zrkt) { 152 | only_orkt = get_kafka_topic_object(only_zrkt); 153 | if (!only_orkt) { 154 | return; 155 | } 156 | } 157 | 158 | err = rd_kafka_metadata(intern->rk, all_topics, only_orkt ? only_orkt->rkt : NULL, &metadata, timeout_ms); 159 | 160 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 161 | zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err); 162 | return; 163 | } 164 | 165 | kafka_metadata_obj_init(return_value, metadata); 166 | } 167 | /* }}} */ 168 | 169 | /* {{{ proto int SimpleKafkaClient\Kafka::getOutQLen() 170 | Returns the current out queue length */ 171 | ZEND_METHOD(SimpleKafkaClient_Kafka, getOutQLen) 172 | { 173 | kafka_object *intern; 174 | 175 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 176 | ZEND_PARSE_PARAMETERS_END(); 177 | 178 | intern = get_kafka_object(getThis()); 179 | if (!intern) { 180 | return; 181 | } 182 | 183 | RETURN_LONG(rd_kafka_outq_len(intern->rk)); 184 | } 185 | /* }}} */ 186 | 187 | /* {{{ proto void SimpleKafkaClient\Kafka::queryWatermarkOffsets(string $topic, int $partition, int &$low, int &$high, int $timeout_ms) 188 | Query broker for low (oldest/beginning) or high (newest/end) offsets for partition */ 189 | ZEND_METHOD(SimpleKafkaClient_Kafka, queryWatermarkOffsets) 190 | { 191 | kafka_object *intern; 192 | char *topic; 193 | size_t topic_length; 194 | long low, high; 195 | zend_long partition, timeout_ms; 196 | zval *lowResult, *highResult; 197 | rd_kafka_resp_err_t err; 198 | 199 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 5, 5) 200 | Z_PARAM_STRING(topic, topic_length) 201 | Z_PARAM_LONG(partition) 202 | Z_PARAM_ZVAL(lowResult) 203 | Z_PARAM_ZVAL(highResult) 204 | Z_PARAM_LONG(timeout_ms) 205 | ZEND_PARSE_PARAMETERS_END(); 206 | 207 | ZVAL_DEREF(lowResult); 208 | ZVAL_DEREF(highResult); 209 | 210 | intern = get_kafka_object(getThis()); 211 | if (!intern) { 212 | return; 213 | } 214 | 215 | err = rd_kafka_query_watermark_offsets(intern->rk, topic, partition, &low, &high, timeout_ms); 216 | 217 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 218 | zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err); 219 | return; 220 | } 221 | 222 | ZVAL_LONG(lowResult, low); 223 | ZVAL_LONG(highResult, high); 224 | } 225 | /* }}} */ 226 | 227 | /* {{{ proto void SimpleKafkaClient\Kafka::offsetsForTimes(array $topicPartitions, int $timeout_ms) 228 | Look up the offsets for the given partitions by timestamp. */ 229 | ZEND_METHOD(SimpleKafkaClient_Kafka, offsetsForTimes) 230 | { 231 | HashTable *htopars = NULL; 232 | kafka_object *intern; 233 | rd_kafka_topic_partition_list_t *topicPartitions; 234 | zend_long timeout_ms; 235 | rd_kafka_resp_err_t err; 236 | 237 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 2, 2) 238 | Z_PARAM_ARRAY_HT(htopars) 239 | Z_PARAM_LONG(timeout_ms) 240 | ZEND_PARSE_PARAMETERS_END(); 241 | 242 | intern = get_kafka_object(getThis()); 243 | if (!intern) { 244 | return; 245 | } 246 | 247 | topicPartitions = array_arg_to_kafka_topic_partition_list(1, htopars); 248 | if (!topicPartitions) { 249 | return; 250 | } 251 | 252 | err = rd_kafka_offsets_for_times(intern->rk, topicPartitions, timeout_ms); 253 | 254 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 255 | rd_kafka_topic_partition_list_destroy(topicPartitions); 256 | zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err); 257 | return; 258 | } 259 | kafka_topic_partition_list_to_array(return_value, topicPartitions); 260 | rd_kafka_topic_partition_list_destroy(topicPartitions); 261 | } 262 | /* }}} */ 263 | 264 | /* {{{ proto void SimpleKafkaClient\Kafka::setOAuthBearerTokenFailure(string $errorString) 265 | The token refresh callback or event handler should invoke this method upon failure. */ 266 | ZEND_METHOD(SimpleKafkaClient_Kafka, setOAuthBearerTokenFailure) 267 | { 268 | char *error_string; 269 | size_t error_string_len; 270 | kafka_object *intern; 271 | rd_kafka_resp_err_t err; 272 | 273 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 274 | Z_PARAM_STRING(error_string, error_string_len) 275 | ZEND_PARSE_PARAMETERS_END(); 276 | 277 | intern = get_kafka_object(getThis()); 278 | if (!intern) { 279 | return; 280 | } 281 | 282 | err = rd_kafka_oauthbearer_set_token_failure(intern->rk, error_string); 283 | 284 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 285 | zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err); 286 | return; 287 | } 288 | } 289 | /* }}} */ 290 | 291 | /* {{{ proto void SimpleKafkaClient\Kafka::setOAuthBearerToken(string $token, int $lifetimeMs, string $principalName, ?array $extensions = null) 292 | Set SASL/OAUTHBEARER token and metadata. */ 293 | ZEND_METHOD(SimpleKafkaClient_Kafka, setOAuthBearerToken) 294 | { 295 | zend_long lifetime_ms; 296 | const char **extensions = NULL; 297 | char *header_key, *header_value, *token, *principal_name, *errstr = NULL; 298 | size_t token_len, principal_name_len, errstr_size = 0, extension_size = 0; 299 | kafka_object *intern; 300 | rd_kafka_resp_err_t err; 301 | HashTable *ht_extensions = NULL; 302 | HashPosition extensionsPos; 303 | zval *z_header_value; 304 | 305 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 3, 4) 306 | Z_PARAM_STRING(token, token_len) 307 | Z_PARAM_LONG(lifetime_ms) 308 | Z_PARAM_STRING(principal_name, principal_name_len) 309 | Z_PARAM_OPTIONAL 310 | Z_PARAM_ARRAY_HT_OR_NULL(ht_extensions) 311 | ZEND_PARSE_PARAMETERS_END(); 312 | 313 | intern = get_kafka_object(getThis()); 314 | if (!intern) { 315 | return; 316 | } 317 | 318 | if (ht_extensions) { 319 | for (zend_hash_internal_pointer_reset_ex(ht_extensions, &extensionsPos); 320 | (z_header_value = zend_hash_get_current_data_ex(ht_extensions, &extensionsPos)) != NULL && 321 | (header_key = kafka_hash_get_current_key_ex(ht_extensions, &extensionsPos)) != NULL; 322 | zend_hash_move_forward_ex(ht_extensions, &extensionsPos)) { 323 | convert_to_string_ex(z_header_value); 324 | extensions = realloc(extensions, (extension_size + 1) * sizeof (header_key)); 325 | extensions[extension_size] = header_key; 326 | header_value = Z_STRVAL_P(z_header_value); 327 | extensions = realloc(extensions, (extension_size + 2) * sizeof (header_value)); 328 | extensions[extension_size+1] = Z_STRVAL_P(z_header_value); 329 | extension_size+=2; 330 | } 331 | } 332 | 333 | err = rd_kafka_oauthbearer_set_token(intern->rk, token, lifetime_ms, principal_name, extensions, extension_size, errstr, errstr_size); 334 | 335 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 336 | zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err); 337 | return; 338 | } 339 | 340 | free(extensions); 341 | } 342 | /* }}} */ 343 | 344 | #define COPY_CONSTANT(name) \ 345 | REGISTER_LONG_CONSTANT(#name, name, CONST_CS | CONST_PERSISTENT) 346 | 347 | void register_err_constants(INIT_FUNC_ARGS) /* {{{ */ 348 | { 349 | const struct rd_kafka_err_desc *errdescs; 350 | size_t cnt; 351 | size_t i; 352 | char buf[128]; 353 | 354 | rd_kafka_get_err_descs(&errdescs, &cnt); 355 | 356 | for (i = 0; i < cnt; i++) { 357 | const struct rd_kafka_err_desc *desc = &errdescs[i]; 358 | int len; 359 | 360 | if (!desc->name) { 361 | continue; 362 | } 363 | 364 | len = snprintf(buf, sizeof(buf), "RD_KAFKA_RESP_ERR_%s", desc->name); 365 | if ((size_t)len >= sizeof(buf)) { 366 | len = sizeof(buf)-1; 367 | } 368 | 369 | zend_register_long_constant(buf, len, desc->code, CONST_CS | CONST_PERSISTENT, module_number); 370 | } 371 | } /* }}} */ 372 | 373 | /* {{{ PHP_MINIT_FUNCTION 374 | */ 375 | PHP_MINIT_FUNCTION(simple_kafka_client) 376 | { 377 | COPY_CONSTANT(RD_KAFKA_OFFSET_BEGINNING); 378 | COPY_CONSTANT(RD_KAFKA_OFFSET_END); 379 | COPY_CONSTANT(RD_KAFKA_OFFSET_STORED); 380 | COPY_CONSTANT(RD_KAFKA_PARTITION_UA); 381 | COPY_CONSTANT(RD_KAFKA_MSG_F_BLOCK); 382 | COPY_CONSTANT(RD_KAFKA_PURGE_F_QUEUE); 383 | COPY_CONSTANT(RD_KAFKA_PURGE_F_INFLIGHT); 384 | COPY_CONSTANT(RD_KAFKA_PURGE_F_NON_BLOCKING); 385 | REGISTER_LONG_CONSTANT("RD_KAFKA_VERSION", rd_kafka_version(), CONST_CS | CONST_PERSISTENT); 386 | REGISTER_LONG_CONSTANT("RD_KAFKA_BUILD_VERSION", RD_KAFKA_VERSION, CONST_CS | CONST_PERSISTENT); 387 | 388 | register_err_constants(INIT_FUNC_ARGS_PASSTHRU); 389 | 390 | COPY_CONSTANT(RD_KAFKA_CONF_UNKNOWN); 391 | COPY_CONSTANT(RD_KAFKA_CONF_INVALID); 392 | COPY_CONSTANT(RD_KAFKA_CONF_OK); 393 | 394 | REGISTER_LONG_CONSTANT("RD_KAFKA_LOG_PRINT", RD_KAFKA_LOG_PRINT, CONST_CS | CONST_PERSISTENT); 395 | REGISTER_LONG_CONSTANT("RD_KAFKA_LOG_SYSLOG", RD_KAFKA_LOG_SYSLOG, CONST_CS | CONST_PERSISTENT); 396 | REGISTER_LONG_CONSTANT("RD_KAFKA_LOG_SYSLOG_PRINT", RD_KAFKA_LOG_SYSLOG_PRINT, CONST_CS | CONST_PERSISTENT); 397 | zend_class_entry ce; 398 | 399 | memcpy(&kafka_default_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); 400 | kafka_default_object_handlers.clone_obj = NULL; 401 | 402 | kafka_object_handlers = kafka_default_object_handlers; 403 | kafka_object_handlers.free_obj = kafka_free; 404 | kafka_object_handlers.offset = XtOffsetOf(kafka_object, std); 405 | 406 | INIT_CLASS_ENTRY(ce, "SimpleKafkaClient", class_SimpleKafkaClient_Kafka_methods); 407 | ce_kafka = zend_register_internal_class(&ce); 408 | ce_kafka->ce_flags |= ZEND_ACC_EXPLICIT_ABSTRACT_CLASS; 409 | ce_kafka->create_object = kafka_new; 410 | 411 | INIT_NS_CLASS_ENTRY(ce, "SimpleKafkaClient", "Producer", class_SimpleKafkaClient_Producer_methods); 412 | ce_kafka_producer = zend_register_internal_class_ex(&ce, ce_kafka); 413 | 414 | INIT_NS_CLASS_ENTRY(ce, "SimpleKafkaClient", "Consumer", class_SimpleKafkaClient_Consumer_methods); 415 | ce_kafka_consumer = zend_register_internal_class_ex(&ce, ce_kafka); 416 | ce_kafka_consumer->create_object = kafka_new; 417 | 418 | kafka_conf_init(INIT_FUNC_ARGS_PASSTHRU); 419 | kafka_error_init(); 420 | kafka_message_init(INIT_FUNC_ARGS_PASSTHRU); 421 | kafka_metadata_init(INIT_FUNC_ARGS_PASSTHRU); 422 | kafka_metadata_topic_partition_init(INIT_FUNC_ARGS_PASSTHRU); 423 | kafka_topic_init(INIT_FUNC_ARGS_PASSTHRU); 424 | 425 | return SUCCESS; 426 | } 427 | /* }}} */ 428 | 429 | /* {{{ PHP_MINFO_FUNCTION 430 | */ 431 | PHP_MINFO_FUNCTION(simple_kafka_client) 432 | { 433 | char *rd_kafka_version; 434 | 435 | php_info_print_table_start(); 436 | php_info_print_table_row(2, "kafka support", "enabled"); 437 | 438 | php_info_print_table_row(2, "version", PHP_SIMPLE_KAFKA_CLIENT_VERSION); 439 | php_info_print_table_row(2, "build date", __DATE__ " " __TIME__); 440 | 441 | spprintf( 442 | &rd_kafka_version, 443 | 0, 444 | "%u.%u.%u.%u", 445 | (RD_KAFKA_VERSION & 0xFF000000) >> 24, 446 | (RD_KAFKA_VERSION & 0x00FF0000) >> 16, 447 | (RD_KAFKA_VERSION & 0x0000FF00) >> 8, 448 | (RD_KAFKA_VERSION & 0x000000FF) 449 | ); 450 | 451 | php_info_print_table_row(2, "librdkafka version (runtime)", rd_kafka_version_str()); 452 | php_info_print_table_row(2, "librdkafka version (build)", rd_kafka_version); 453 | 454 | 455 | efree(rd_kafka_version); 456 | 457 | php_info_print_table_end(); 458 | } 459 | /* }}} */ 460 | 461 | /* {{{ kafka_client_module_entry 462 | */ 463 | zend_module_entry simple_kafka_client_module_entry = { 464 | STANDARD_MODULE_HEADER, 465 | "simple_kafka_client", 466 | ext_functions, 467 | PHP_MINIT(simple_kafka_client), 468 | NULL, 469 | NULL, 470 | NULL, 471 | PHP_MINFO(simple_kafka_client), 472 | PHP_SIMPLE_KAFKA_CLIENT_VERSION, 473 | STANDARD_MODULE_PROPERTIES 474 | }; 475 | /* }}} */ 476 | 477 | #ifdef COMPILE_DL_SIMPLE_KAFKA_CLIENT 478 | ZEND_GET_MODULE(simple_kafka_client) 479 | #endif 480 | -------------------------------------------------------------------------------- /simple_kafka_client.stub.php: -------------------------------------------------------------------------------- 1 | set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 12 | 13 | $topicName = sprintf('test_kafka_%s', uniqid()); 14 | 15 | $producer = new SimpleKafkaClient\Producer($conf); 16 | $topic = $producer->getTopicHandle($topicName); 17 | 18 | $topic->produce(0, 0, NULL, 'message_key_1'); 19 | 20 | $producer->flush(10000); 21 | 22 | $conf->set('group.id','test'); 23 | $conf->set('auto.offset.reset','earliest'); 24 | $conf->setErrorCb(function ($producer, $errorCode, $errstr) { 25 | // non fatal errors are retried by librdkafka 26 | if (RD_KAFKA_RESP_ERR__FATAL !== $errorCode) { 27 | return; 28 | } 29 | 30 | printf("%s: %s\n", rd_kafka_err2str($errorCode), $errstr); 31 | exit; 32 | }); 33 | $consumer = new SimpleKafkaClient\Consumer($conf); 34 | 35 | $consumer->subscribe([$topicName]); 36 | 37 | while (true) { 38 | $message = $consumer->consume(1000); 39 | if ($message === null) { 40 | continue; 41 | } 42 | 43 | if (RD_KAFKA_RESP_ERR_NO_ERROR === $message->err) { 44 | var_dump($message->payload); 45 | var_dump($message->key); 46 | break; 47 | } 48 | } 49 | 50 | --EXPECTF-- 51 | NULL 52 | string(13) "message_key_1" 53 | -------------------------------------------------------------------------------- /tests/allow_null_payload_and_key.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | Allow null payload 3 | --SKIPIF-- 4 | set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 12 | 13 | $topicName = sprintf('test_kafka_%s', uniqid()); 14 | 15 | $producer = new SimpleKafkaClient\Producer($conf); 16 | $topic = $producer->getTopicHandle($topicName); 17 | 18 | $topic->produce(0, 0); 19 | 20 | $producer->flush(10000); 21 | 22 | $conf->set('group.id','test'); 23 | $conf->set('auto.offset.reset','earliest'); 24 | $conf->setErrorCb(function ($producer, $errorCode, $errstr) { 25 | // non fatal errors are retried by librdkafka 26 | if (RD_KAFKA_RESP_ERR__FATAL !== $errorCode) { 27 | return; 28 | } 29 | 30 | printf("%s: %s\n", rd_kafka_err2str($errorCode), $errstr); 31 | exit; 32 | }); 33 | $consumer = new SimpleKafkaClient\Consumer($conf); 34 | 35 | $consumer->subscribe([$topicName]); 36 | 37 | while (true) { 38 | $message = $consumer->consume(1000); 39 | if ($message === null) { 40 | continue; 41 | } 42 | 43 | if (RD_KAFKA_RESP_ERR_NO_ERROR === $message->err) { 44 | var_dump($message->payload); 45 | var_dump($message->key); 46 | break; 47 | } 48 | } 49 | 50 | --EXPECTF-- 51 | NULL 52 | NULL 53 | -------------------------------------------------------------------------------- /tests/conf.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | SimpleKafkaClient\Configuration 3 | --FILE-- 4 | set("client.id", "acme"); 10 | 11 | echo "Setting an integer property\n"; 12 | $conf->set("message.max.bytes", 1 << 20); 13 | 14 | echo "Setting a boolean property\n"; 15 | $conf->set("topic.metadata.refresh.sparse", "true"); 16 | 17 | echo "Setting a boolean property to an invalid value\n"; 18 | try { 19 | $conf->set("topic.metadata.refresh.sparse", "xx"); 20 | } catch(Exception $e) { 21 | printf("Caught a %s: %s\n", get_class($e), $e->getMessage()); 22 | } 23 | 24 | echo "Setting an invalid property\n"; 25 | try { 26 | $conf->set("invalid", "xx"); 27 | } catch(Exception $e) { 28 | printf("Caught a %s: %s\n", get_class($e), $e->getMessage()); 29 | } 30 | 31 | echo "Setting error callback\n"; 32 | $conf->setErrorCb(function () { }); 33 | $dump = $conf->dump(); 34 | var_dump(isset($dump["error_cb"])); 35 | 36 | echo "Setting dr_msg callback\n"; 37 | $conf->setDrMsgCb(function () { }); 38 | $dump = $conf->dump(); 39 | var_dump(isset($dump["dr_msg_cb"])); 40 | 41 | echo "Setting stats callback\n"; 42 | $conf->setStatsCb(function () { }); 43 | $dump = $conf->dump(); 44 | var_dump(isset($dump["stats_cb"])); 45 | 46 | echo "Setting offset_commit callback\n"; 47 | $conf->setOffsetCommitCb(function () { }); 48 | $dump = $conf->dump(); 49 | var_dump(isset($dump["offset_commit_cb"])); 50 | 51 | echo "Setting rebalance callback\n"; 52 | $conf->setRebalanceCb(function () { }); 53 | $dump = $conf->dump(); 54 | var_dump(isset($dump["rebalance_cb"])); 55 | 56 | echo "Setting log callback\n"; 57 | $conf->setLogCb(function () { }); 58 | $dump = $conf->dump(); 59 | var_dump(isset($dump["log_cb"])); 60 | 61 | echo "Dumping conf\n"; 62 | var_dump(array_intersect_key($conf->dump(), array( 63 | "client.id" => true, 64 | "message.max.bytes" => true, 65 | "topic.metadata.refresh.sparse" => true, 66 | ))); 67 | 68 | --EXPECT-- 69 | Setting a string property 70 | Setting an integer property 71 | Setting a boolean property 72 | Setting a boolean property to an invalid value 73 | Caught a SimpleKafkaClient\Exception: Expected bool value for "topic.metadata.refresh.sparse": true or false 74 | Setting an invalid property 75 | Caught a SimpleKafkaClient\Exception: No such configuration property: "invalid" 76 | Setting error callback 77 | bool(true) 78 | Setting dr_msg callback 79 | bool(true) 80 | Setting stats callback 81 | bool(true) 82 | Setting offset_commit callback 83 | bool(true) 84 | Setting rebalance callback 85 | bool(true) 86 | Setting log callback 87 | bool(true) 88 | Dumping conf 89 | array(3) { 90 | ["client.id"]=> 91 | string(4) "acme" 92 | ["message.max.bytes"]=> 93 | string(7) "1048576" 94 | ["topic.metadata.refresh.sparse"]=> 95 | string(4) "true" 96 | } 97 | -------------------------------------------------------------------------------- /tests/conf_callbacks_integration.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | SimpleKafkaClient\Configuration 3 | --SKIPIF-- 4 | set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 12 | 13 | $delivered = 0; 14 | $conf->setDrMsgCb(function (SimpleKafkaClient\Producer $producer, SimpleKafkaClient\Message $message) use (&$delivered) { 15 | if (RD_KAFKA_RESP_ERR_NO_ERROR !== $message->err) { 16 | $errorStr = rd_kafka_err2str($message->err); 17 | 18 | echo sprintf('Message FAILED (%s, %s) to send with payload => %s', $message->err, $errorStr, $message->payload) . PHP_EOL; 19 | } else { 20 | // message successfully delivered 21 | ++$delivered; 22 | } 23 | }); 24 | 25 | $producer = new SimpleKafkaClient\Producer($conf); 26 | 27 | $topicName = sprintf("test_kafka_%s", uniqid()); 28 | $topic = $producer->getTopicHandle($topicName); 29 | 30 | for ($i = 0; $i < 10; $i++) { 31 | $topic->produce(0, 0, "message $i"); 32 | } 33 | 34 | $producer->flush(10000); 35 | 36 | $conf = new SimpleKafkaClient\Configuration(); 37 | $conf->set('auto.offset.reset', 'earliest'); 38 | $conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 39 | $conf->set('group.id', sprintf("test_kafka_group_%s", uniqid())); 40 | $conf->set('enable.partition.eof', 'true'); 41 | $conf->set('statistics.interval.ms', 10); 42 | $conf->set('log_level', (string) LOG_DEBUG); 43 | $conf->set('debug', 'all'); 44 | 45 | $offsetCommitCount = 0; 46 | $conf->setOffsetCommitCb(function ($consumer, $error, $topicPartitions) use (&$offsetCommitCount) { 47 | ++$offsetCommitCount; 48 | }); 49 | 50 | $statsCbCalled = false; 51 | $conf->setStatsCb(function ($consumer, $json) use (&$statsCbCalled) { 52 | if ($statsCbCalled) { 53 | return; 54 | } 55 | 56 | $statsCbCalled = true; 57 | }); 58 | 59 | $logCbCalled = false; 60 | $conf->setLogCb(function (SimpleKafkaClient\Consumer $consumer, int $level, string $facility, string $message) use (&$logCbCalled) { 61 | // suppress current bug in librdkafka https://github.com/edenhill/librdkafka/issues/2767 62 | $logCbCalled = true ; 63 | }); 64 | 65 | $conf->setErrorCb(function ($kafka, int $errorCode, string $reason) { 66 | // suppress current bug in librdkafka https://github.com/edenhill/librdkafka/issues/2767 67 | }); 68 | 69 | $topicsAssigned = false; 70 | $conf->setRebalanceCb( 71 | function (SimpleKafkaClient\Consumer $kafka, $err, array $partitions = null) use (&$topicsAssigned){ 72 | switch ($err) { 73 | case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: 74 | $kafka->assign($partitions); 75 | $topicsAssigned = true; 76 | break; 77 | 78 | case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: 79 | $kafka->assign(NULL); 80 | break; 81 | 82 | default: 83 | $kafka->assign(NULL); // sync state 84 | break; 85 | } 86 | } 87 | ); 88 | 89 | $consumer = new SimpleKafkaClient\Consumer($conf); 90 | $consumer->subscribe([$topicName]); 91 | 92 | while (true) { 93 | $msg = $consumer->consume(15000); 94 | 95 | if (RD_KAFKA_RESP_ERR__PARTITION_EOF === $msg->err) { 96 | break; 97 | } 98 | 99 | if (RD_KAFKA_RESP_ERR_NO_ERROR !== $msg->err) { 100 | throw new Exception($msg->getErrorString(), $msg->err); 101 | } 102 | 103 | $consumer->commit($msg); 104 | } 105 | 106 | var_dump($offsetCommitCount); 107 | var_dump($statsCbCalled); 108 | var_dump($logCbCalled); 109 | var_dump($topicsAssigned); 110 | var_dump($delivered); 111 | 112 | --EXPECT-- 113 | int(10) 114 | bool(true) 115 | bool(true) 116 | bool(true) 117 | int(10) 118 | -------------------------------------------------------------------------------- /tests/conf_extend.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | SimpleKafkaClient\Configuration 3 | --FILE-- 4 | set('metadata.broker.list', '127.0.0.1'); 16 | 17 | echo "done" . PHP_EOL; 18 | --EXPECT-- 19 | done 20 | -------------------------------------------------------------------------------- /tests/constants.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | constants 3 | --FILE-- 4 | set('group.id','test'); 10 | $consumer = new Consumer($conf); 11 | $topic = $consumer->getTopicHandle('test'); 12 | unset($topic); 13 | var_dump(isset($topic)); 14 | --EXPECT-- 15 | bool(false) 16 | -------------------------------------------------------------------------------- /tests/functions.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | constants 3 | --FILE-- 4 | 7 | --FILE-- 8 | set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 14 | 15 | $producer = new SimpleKafkaClient\Producer($conf); 16 | 17 | try { 18 | $producer->initTransactions(10000); 19 | } catch (SimpleKafkaClient\KafkaErrorException $e) { 20 | echo $e->getMessage() . PHP_EOL; 21 | echo $e->getCode() . PHP_EOL; 22 | echo $e->getFile() . PHP_EOL; 23 | echo $e->getLine() . PHP_EOL; 24 | } 25 | 26 | --EXPECTF-- 27 | _NOT_CONFIGURED 28 | -145 29 | %s/tests/init_transaction_not_configured.php 30 | 11 31 | -------------------------------------------------------------------------------- /tests/integration-tests-check.php: -------------------------------------------------------------------------------- 1 | getMessage()) . PHP_EOL; 9 | echo sprintf('Exception code: %d', $e->getCode()) . PHP_EOL; 10 | echo sprintf('Exception description: %s', $e->getErrorString()) . PHP_EOL; 11 | echo sprintf('Exception is fatal: %b', $e->isFatal()) . PHP_EOL; 12 | echo sprintf('Exception is retriable: %b', $e->isRetriable()) . PHP_EOL; 13 | echo sprintf('Exception requires transaction abort: %b', $e->transactionRequiresAbort()) . PHP_EOL; 14 | --EXPECT-- 15 | Exception message: exception message 16 | Exception code: -100 17 | Exception description: exception description 18 | Exception is fatal: 1 19 | Exception is retriable: 0 20 | Exception requires transaction abort: 1 21 | -------------------------------------------------------------------------------- /tests/message_headers.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | Message headers 3 | --SKIPIF-- 4 | setErrorCb(function ($producer, $errorCode, $errstr) { 14 | printf("%s: %s\n", rd_kafka_err2str($errorCode), $errstr); 15 | exit; 16 | }); 17 | $conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 18 | 19 | $conf->setDrMsgCb(function ($producer, $msg) use (&$delivered) { 20 | if ($msg->err) { 21 | throw new Exception("Message delivery failed: " . $msg->getErrorString()); 22 | } 23 | $delivered++; 24 | }); 25 | 26 | $producer = new SimpleKafkaClient\Producer($conf); 27 | 28 | $topicName = sprintf("test_kafka_%s", uniqid()); 29 | 30 | $topic = $producer->getTopicHandle($topicName); 31 | 32 | if (!$producer->getMetadata(false, 2*1000, $topic)) { 33 | echo "Failed to get metadata, is broker down?\n"; 34 | } 35 | 36 | $headers = [ 37 | ['key' => 'value'], 38 | [ 39 | 'key1' => 'value1', 40 | 'key2' => 'value2', 41 | 'key3' => 'value3', 42 | ], 43 | ['gzencoded' => gzencode('gzdata')], 44 | [], 45 | null, 46 | ['key'], 47 | ]; 48 | 49 | foreach ($headers as $index => $header) { 50 | $topic->producev(0, 0, "message $index", null, $header); 51 | $producer->poll(0); 52 | } 53 | 54 | $producer->flush(10000); 55 | 56 | printf("%d messages delivered\n", $delivered); 57 | 58 | $conf = new SimpleKafkaClient\Configuration(); 59 | $conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 60 | $conf->set('group.id','test'); 61 | $conf->set('auto.offset.reset','earliest'); 62 | $conf->set('enable.partition.eof', 'true'); 63 | $conf->setErrorCb(function ($producer, $errorCode, $errstr) { 64 | // non fatal errors are retried by librdkafka 65 | if (RD_KAFKA_RESP_ERR__FATAL !== $errorCode) { 66 | return; 67 | } 68 | 69 | printf("%s: %s\n", rd_kafka_err2str($errorCode), $errstr); 70 | exit; 71 | }); 72 | $consumer = new SimpleKafkaClient\Consumer($conf); 73 | $consumer->subscribe([$topicName]); 74 | 75 | $messages = []; 76 | 77 | while (true) { 78 | $msg = $consumer->consume(10000); 79 | if (!$msg || $msg->err === RD_KAFKA_RESP_ERR__PARTITION_EOF) { 80 | break; 81 | } 82 | 83 | if (RD_KAFKA_RESP_ERR_NO_ERROR !== $msg->err) { 84 | throw new Exception($msg->getErrorString(), $msg->err); 85 | } 86 | 87 | $headersString = isset($msg->headers) ? $msg->headers : []; 88 | array_walk($headersString, function(&$value, $key) { 89 | if ('gzencoded' === $key) { 90 | $value = gzdecode($value); 91 | } 92 | $value = "{$key}: {$value}"; 93 | }); 94 | if (empty($headersString)) { 95 | $headersString = "none"; 96 | } else { 97 | $headersString = implode(", ", $headersString); 98 | } 99 | printf("Got message: %s | Headers: %s\n", $msg->payload, $headersString); 100 | } 101 | --EXPECT-- 102 | 6 messages delivered 103 | Got message: message 0 | Headers: key: value 104 | Got message: message 1 | Headers: key1: value1, key2: value2, key3: value3 105 | Got message: message 2 | Headers: gzencoded: gzdata 106 | Got message: message 3 | Headers: none 107 | Got message: message 4 | Headers: none 108 | Got message: message 5 | Headers: none 109 | -------------------------------------------------------------------------------- /tests/metadata.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | Metadata 3 | --SKIPIF-- 4 | setErrorCb(function ($producer, $errorCode, $errstr) { 14 | printf("%s: %s\n", rd_kafka_err2str($errorCode), $errstr); 15 | exit; 16 | }); 17 | $conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 18 | 19 | $conf->setDrMsgCb(function ($producer, $msg) use (&$delivered) { 20 | if ($msg->err) { 21 | throw new Exception("Message delivery failed: " . $msg->getErrorString()); 22 | } 23 | $delivered++; 24 | }); 25 | 26 | $producer = new SimpleKafkaClient\Producer($conf); 27 | 28 | $topicName = "test_kafka_metadata"; 29 | 30 | $topic = $producer->getTopicHandle($topicName); 31 | 32 | if (!$producer->getMetadata(false, 2*1000, $topic)) { 33 | echo "Failed to get metadata, is broker down?\n"; 34 | } 35 | 36 | $topic->produce(0, 0, "some test mesage"); 37 | 38 | $producer->flush(10000); 39 | printf("%d messages delivered\n", $delivered); 40 | $metadata = $producer->getMetadata(false, 2*1000, $topic); 41 | echo $metadata->getOrigBrokerName() . PHP_EOL; 42 | 43 | $topics = $metadata->getTopics(); 44 | 45 | while ($topics->valid()) { 46 | echo $topics->current()->getName() . PHP_EOL; 47 | echo $topics->current()->getPartitions()->count() . PHP_EOL; 48 | echo $topics->current()->getPartitions()->current()->getLeader() . PHP_EOL; 49 | $topics->next(); 50 | } 51 | 52 | echo $metadata->getBrokers()->current()->getHost() . PHP_EOL; 53 | echo $metadata->getBrokers()->current()->getPort() . PHP_EOL; 54 | --EXPECT-- 55 | 1 messages delivered 56 | kafka:9092/1 57 | test_kafka_metadata 58 | 1 59 | 1 60 | kafka 61 | 9092 62 | 63 | 64 | -------------------------------------------------------------------------------- /tests/oauthbearer_cb.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | Produce, consume 3 | --SKIPIF-- 4 | set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 12 | $conf->set('security.protocol', 'SASL_PLAINTEXT'); 13 | $conf->set('sasl.mechanisms', 'OAUTHBEARER'); 14 | $conf->set('sasl.oauthbearer.config', 'principalClaimName=azp'); 15 | $conf->setOAuthBearerTokenRefreshCb(function($kafka, $oAuthBearerConfig) { 16 | var_dump($oAuthBearerConfig); 17 | }); 18 | 19 | $conf->setErrorCb(function($kafka, $errorCode, $errorString) { 20 | var_dump($errorString); 21 | }); 22 | 23 | $producer = new SimpleKafkaClient\Producer($conf); 24 | $producer->poll(-1); 25 | --EXPECT-- 26 | string(22) "principalClaimName=azp" 27 | -------------------------------------------------------------------------------- /tests/offsets_for_times.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | Produce, consume 3 | --SKIPIF-- 4 | set('client.id', 'pure-php-producer'); 12 | $conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 13 | 14 | $producer = new SimpleKafkaClient\Producer($conf); 15 | $topic = $producer->getTopicHandle('pure-php-test-topic-offsets'); 16 | $time = time(); 17 | $topic->producev( 18 | RD_KAFKA_PARTITION_UA, 19 | RD_KAFKA_MSG_F_BLOCK, // will block produce if queue is full 20 | 'special-message', 21 | 'special-key', 22 | [ 23 | 'special-header' => 'awesome' 24 | ] 25 | ); 26 | $result = $producer->flush(20000); 27 | 28 | $topicPartition = new SimpleKafkaClient\TopicPartition('pure-php-test-topic-offsets', 0, $time); 29 | $result = $producer->offsetsForTimes([$topicPartition], 10000); 30 | var_dump($result[0]->getTopicName()); 31 | var_dump($result[0]->getPartition()); 32 | var_dump($result[0]->getOffset()); 33 | --EXPECT-- 34 | string(27) "pure-php-test-topic-offsets" 35 | int(0) 36 | int(0) 37 | -------------------------------------------------------------------------------- /tests/produce_consume.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | Produce, consume 3 | --SKIPIF-- 4 | setErrorCb(function ($producer, $errorCode, $errstr) { 14 | printf("%s: %s\n", rd_kafka_err2str($errorCode), $errstr); 15 | exit; 16 | }); 17 | 18 | $conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 19 | 20 | $conf->setDrMsgCb(function ($producer, $msg) use (&$delivered) { 21 | if ($msg->err) { 22 | throw new Exception("Message delivery failed: " . $msg->getErrorString()); 23 | } 24 | $delivered++; 25 | }); 26 | 27 | $producer = new SimpleKafkaClient\Producer($conf); 28 | 29 | $topicName = sprintf("test_kafka_%s", uniqid()); 30 | 31 | $topic = $producer->getTopicHandle($topicName); 32 | 33 | if (!$producer->getMetadata(false, 2*1000, $topic)) { 34 | echo "Failed to get metadata, is broker down?\n"; 35 | } 36 | 37 | for ($i = 0; $i < 10; $i++) { 38 | $topic->produce(0, 0, "message $i"); 39 | $producer->poll(0); 40 | } 41 | 42 | $producer->flush(10000); 43 | 44 | printf("%d messages delivered\n", $delivered); 45 | 46 | $conf = new SimpleKafkaClient\Configuration(); 47 | $conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 48 | $conf->set('group.id','test'); 49 | $conf->set('auto.offset.reset','earliest'); 50 | $conf->set('enable.partition.eof', 'true'); 51 | $conf->setErrorCb(function ($producer, $errorCode, $errstr) { 52 | // non fatal errors are retried by librdkafka 53 | if (RD_KAFKA_RESP_ERR__FATAL !== $errorCode) { 54 | return; 55 | } 56 | 57 | printf("%s: %s\n", rd_kafka_err2str($errorCode), $errstr); 58 | exit; 59 | }); 60 | $consumer = new SimpleKafkaClient\Consumer($conf); 61 | $consumer->subscribe([$topicName]); 62 | 63 | $messages = []; 64 | 65 | while (true) { 66 | $msg = $consumer->consume(10000); 67 | // librdkafka before 1.0 returns message with RD_KAFKA_RESP_ERR__PARTITION_EOF when reaching topic end. 68 | if (!$msg || $msg->err === RD_KAFKA_RESP_ERR__PARTITION_EOF) { 69 | break; 70 | } 71 | 72 | if (RD_KAFKA_RESP_ERR_NO_ERROR !== $msg->err) { 73 | throw new Exception($msg->getErrorString(), $msg->err); 74 | } 75 | 76 | printf("Got message: %s\n", $msg->payload); 77 | } 78 | --EXPECT-- 79 | 10 messages delivered 80 | Got message: message 0 81 | Got message: message 1 82 | Got message: message 2 83 | Got message: message 3 84 | Got message: message 4 85 | Got message: message 5 86 | Got message: message 6 87 | Got message: message 7 88 | Got message: message 8 89 | Got message: message 9 90 | -------------------------------------------------------------------------------- /tests/produce_consume_transactional.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | Produce, consume 3 | --SKIPIF-- 4 | set('transactional.id', 'transactional-producer'); 17 | 18 | $conf->setLogCb(function ($kafka, $level, $facility, $message) {}); 19 | $conf->setErrorCb(function ($producer, $errorCode, $errstr) { 20 | printf("%s: %s\n", rd_kafka_err2str($errorCode), $errstr); 21 | exit; 22 | }); 23 | $conf->setDrMsgCb(function ($producer, $msg) use (&$delivered) { 24 | if ($msg->err) { 25 | throw new Exception("Message delivery failed: " . $msg->getErrorString()); 26 | } 27 | $delivered++; 28 | }); 29 | $conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 30 | 31 | $producer = new SimpleKafkaClient\Producer($conf); 32 | 33 | $producer->initTransactions(10000); 34 | $producer->beginTransaction(); 35 | 36 | $topicName = sprintf("test_kafka_%s", uniqid()); 37 | 38 | $topic = $producer->getTopicHandle($topicName); 39 | 40 | if (!$producer->getMetadata(false, 5*1000, $topic)) { 41 | echo "Failed to get metadata, is broker down?\n"; 42 | } 43 | 44 | for ($i = 0; $i < 10; $i++) { 45 | $topic->produce(0, 0, "message $i"); 46 | $producer->poll(0); 47 | } 48 | 49 | $producer->flush(10000); 50 | 51 | $producer->commitTransaction(10000); 52 | 53 | printf("%d messages delivered\n", $delivered); 54 | 55 | $conf = new SimpleKafkaClient\Configuration(); 56 | $conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 57 | $conf->setErrorCb(function ($producer, $errorCode, $errstr) { 58 | // non fatal errors are retried by librdkafka 59 | if (RD_KAFKA_RESP_ERR__FATAL !== $errorCode) { 60 | return; 61 | } 62 | 63 | printf("%s: %s\n", rd_kafka_err2str($errorCode), $errstr); 64 | exit; 65 | }); 66 | $conf->set('group.id','test'); 67 | $conf->set('auto.offset.reset','earliest'); 68 | $conf->set('enable.partition.eof', 'true'); 69 | $consumer = new SimpleKafkaClient\Consumer($conf); 70 | $consumer->subscribe([$topicName]); 71 | 72 | $messages = []; 73 | 74 | while (true) { 75 | $msg = $consumer->consume(10000); 76 | // librdkafka before 1.0 returns message with RD_KAFKA_RESP_ERR__PARTITION_EOF when reaching topic end. 77 | if (!$msg || $msg->err === RD_KAFKA_RESP_ERR__PARTITION_EOF) { 78 | break; 79 | } 80 | 81 | if (RD_KAFKA_RESP_ERR_NO_ERROR !== $msg->err) { 82 | throw new Exception($msg->getErrorString(), $msg->err); 83 | } 84 | 85 | printf("Got message: %s\n", $msg->payload); 86 | } 87 | --EXPECT-- 88 | 10 messages delivered 89 | Got message: message 0 90 | Got message: message 1 91 | Got message: message 2 92 | Got message: message 3 93 | Got message: message 4 94 | Got message: message 5 95 | Got message: message 6 96 | Got message: message 7 97 | Got message: message 8 98 | Got message: message 9 99 | -------------------------------------------------------------------------------- /tests/query_watermark_offsets.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | Produce, consume 3 | --SKIPIF-- 4 | set('client.id', 'pure-php-producer'); 12 | $conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 13 | 14 | $producer = new SimpleKafkaClient\Producer($conf); 15 | $topic = $producer->getTopicHandle('pure-php-test-topic-watermark'); 16 | $topic->producev( 17 | RD_KAFKA_PARTITION_UA, 18 | RD_KAFKA_MSG_F_BLOCK, // will block produce if queue is full 19 | 'special-message', 20 | 'special-key', 21 | [ 22 | 'special-header' => 'awesome' 23 | ] 24 | ); 25 | $result = $producer->flush(20000); 26 | $high = 0; 27 | $low = 0; 28 | $result = $producer->queryWatermarkOffsets('pure-php-test-topic-watermark', 0,$low, $high, 10000); 29 | var_dump($low); 30 | var_dump($high); 31 | --EXPECT-- 32 | int(0) 33 | int(1) 34 | -------------------------------------------------------------------------------- /tests/rd_kafka_get_err_descs.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | kafka_get_err_descs() 3 | --FILE-- 4 | 16 | int(-192) 17 | ["name"]=> 18 | string(14) "_MSG_TIMED_OUT" 19 | ["desc"]=> 20 | string(24) "Local: Message timed out" 21 | } 22 | -------------------------------------------------------------------------------- /tests/set_oauthbearer_failure.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | Produce, consume 3 | --SKIPIF-- 4 | set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 12 | $conf->set('security.protocol', 'SASL_PLAINTEXT'); 13 | $conf->set('sasl.mechanisms', 'OAUTHBEARER'); 14 | 15 | $conf->setErrorCb(function($kafka, $errorCode, $errorString) { 16 | var_dump($errorString); 17 | }); 18 | 19 | $producer = new SimpleKafkaClient\Producer($conf); 20 | $producer->setOAuthBearerTokenFailure('something'); 21 | $producer->poll(-1); 22 | --EXPECT-- 23 | string(51) "Failed to acquire SASL OAUTHBEARER token: something" 24 | -------------------------------------------------------------------------------- /tests/set_oauthbearer_token.phpt: -------------------------------------------------------------------------------- 1 | --TEST-- 2 | Produce, consume 3 | --SKIPIF-- 4 | set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS')); 12 | $conf->set('security.protocol', 'SASL_PLAINTEXT'); 13 | $conf->set('sasl.mechanisms', 'OAUTHBEARER'); 14 | 15 | $conf->setErrorCb(function($kafka, $errorCode, $errorString) { 16 | var_dump($errorString); 17 | }); 18 | 19 | $producer = new SimpleKafkaClient\Producer($conf); 20 | $producer->setOAuthBearerToken('token', 100000 + time() * 1000, 'principal', ['test'=>'key']); 21 | $producer->poll(-1); 22 | echo 'Done'; 23 | --EXPECT-- 24 | Done 25 | -------------------------------------------------------------------------------- /tests/test_env.php.sample: -------------------------------------------------------------------------------- 1 | $topar->getTopicName(), 16 | "partition" => $topar->getPartition(), 17 | "offset" => $topar->getOffset(), 18 | )); 19 | 20 | $topar 21 | ->setTopicName("foo") 22 | ->setPartition(123) 23 | ->setOffset(43); 24 | 25 | var_dump($topar); 26 | --EXPECT-- 27 | object(SimpleKafkaClient\TopicPartition)#1 (3) { 28 | ["topic"]=> 29 | string(4) "test" 30 | ["partition"]=> 31 | int(-1) 32 | ["offset"]=> 33 | int(0) 34 | } 35 | object(SimpleKafkaClient\TopicPartition)#2 (3) { 36 | ["topic"]=> 37 | string(4) "test" 38 | ["partition"]=> 39 | int(-1) 40 | ["offset"]=> 41 | int(42) 42 | } 43 | array(3) { 44 | ["topic"]=> 45 | string(4) "test" 46 | ["partition"]=> 47 | int(-1) 48 | ["offset"]=> 49 | int(42) 50 | } 51 | object(SimpleKafkaClient\TopicPartition)#2 (3) { 52 | ["topic"]=> 53 | string(3) "foo" 54 | ["partition"]=> 55 | int(123) 56 | ["offset"]=> 57 | int(43) 58 | } 59 | -------------------------------------------------------------------------------- /topic.c: -------------------------------------------------------------------------------- 1 | /** 2 | * BSD 3-Clause License 3 | * 4 | * Copyright (c) 2016, Arnaud Le Blanc (Author) 5 | * Copyright (c) 2020, Nick Chiu 6 | * All rights reserved. 7 | * 8 | * Redistribution and use in source and binary forms, with or without 9 | * modification, are permitted provided that the following conditions are met: 10 | * 11 | * 1. Redistributions of source code must retain the above copyright notice, this 12 | * list of conditions and the following disclaimer. 13 | * 14 | * 2. Redistributions in binary form must reproduce the above copyright notice, 15 | * this list of conditions and the following disclaimer in the documentation 16 | * and/or other materials provided with the distribution. 17 | * 18 | * 3. Neither the name of the copyright holder nor the names of its 19 | * contributors may be used to endorse or promote products derived from 20 | * this software without specific prior written permission. 21 | * 22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 | * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 28 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | */ 33 | 34 | #ifdef HAVE_CONFIG_H 35 | #include "config.h" 36 | #endif 37 | 38 | #include "php.h" 39 | #include "php_simple_kafka_client_int.h" 40 | #include "ext/spl/spl_iterators.h" 41 | #include "Zend/zend_interfaces.h" 42 | #include "Zend/zend_exceptions.h" 43 | #include "ext/spl/spl_exceptions.h" 44 | #include "topic_arginfo.h" 45 | 46 | static zend_object_handlers object_handlers; 47 | zend_class_entry * ce_kafka_consumer_topic; 48 | zend_class_entry * ce_kafka_producer_topic; 49 | zend_class_entry * ce_kafka_topic; 50 | 51 | typedef struct _php_callback { 52 | zend_fcall_info fci; 53 | zend_fcall_info_cache fcc; 54 | } php_callback; 55 | 56 | static void kafka_topic_free(zend_object *object) /* {{{ */ 57 | { 58 | kafka_topic_object *intern = php_kafka_from_obj(kafka_topic_object, object); 59 | 60 | if (Z_TYPE(intern->zrk) != IS_UNDEF && intern->rkt) { 61 | kafka_object *kafka_intern = get_kafka_object(&intern->zrk); 62 | if (kafka_intern) { 63 | zend_hash_index_del(&kafka_intern->topics, (zend_ulong)intern); 64 | } 65 | } 66 | 67 | zend_object_std_dtor(&intern->std); 68 | } 69 | /* }}} */ 70 | 71 | static zend_object *kafka_topic_new(zend_class_entry *class_type) /* {{{ */ 72 | { 73 | zend_object* retval; 74 | kafka_topic_object *intern; 75 | 76 | intern = ecalloc(1, sizeof(kafka_topic_object)+ zend_object_properties_size(class_type)); 77 | zend_object_std_init(&intern->std, class_type); 78 | object_properties_init(&intern->std, class_type); 79 | 80 | retval = &intern->std; 81 | retval->handlers = &object_handlers; 82 | 83 | return retval; 84 | } 85 | /* }}} */ 86 | 87 | kafka_topic_object * get_kafka_topic_object(zval *zrkt) 88 | { 89 | kafka_topic_object *orkt = Z_KAFKA_P(kafka_topic_object, zrkt); 90 | 91 | if (!orkt->rkt) { 92 | zend_throw_exception_ex(NULL, 0, "SimpleKafkaClient\\Topic::__construct() has not been called"); 93 | return NULL; 94 | } 95 | 96 | return orkt; 97 | } 98 | 99 | /* {{{ private constructor */ 100 | ZEND_METHOD(SimpleKafkaClient_ProducerTopic, __construct) {} 101 | /* }}} */ 102 | 103 | /* {{{ proto void SimpleKafkaClient\ProducerTopic::produce(int $partition, int $msgflags[, string $payload, string $key]) 104 | Produce and send a single message to broker. */ 105 | ZEND_METHOD(SimpleKafkaClient_ProducerTopic, produce) 106 | { 107 | zend_long partition; 108 | zend_long msgflags; 109 | char *payload = NULL; 110 | size_t payload_len = 0; 111 | char *key = NULL; 112 | size_t key_len = 0; 113 | int ret; 114 | rd_kafka_resp_err_t err; 115 | kafka_topic_object *intern; 116 | 117 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 2, 4) 118 | Z_PARAM_LONG(partition) 119 | Z_PARAM_LONG(msgflags) 120 | Z_PARAM_OPTIONAL 121 | Z_PARAM_STRING_OR_NULL(payload, payload_len) 122 | Z_PARAM_STRING_OR_NULL(key, key_len) 123 | ZEND_PARSE_PARAMETERS_END(); 124 | 125 | if (partition != RD_KAFKA_PARTITION_UA && (partition < 0 || partition > 0x7FFFFFFF)) { 126 | zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Out of range value '%ld' for $partition", partition); 127 | return; 128 | } 129 | 130 | if (msgflags != 0 && msgflags != RD_KAFKA_MSG_F_BLOCK) { 131 | zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Invalid value '%ld' for $msgflags", msgflags); 132 | return; 133 | } 134 | 135 | intern = get_kafka_topic_object(getThis()); 136 | 137 | ret = rd_kafka_produce(intern->rkt, partition, msgflags | RD_KAFKA_MSG_F_COPY, payload, payload_len, key, key_len, NULL); 138 | 139 | if (ret == -1) { 140 | err = rd_kafka_last_error(); 141 | zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err); 142 | return; 143 | } 144 | } 145 | /* }}} */ 146 | 147 | /* {{{ proto void SimpleKafkaClient\ProducerTopic::producev(int $partition, int $msgflags[, string $payload, string $key, array $headers, int $timestamp_ms]) 148 | Produce and send a single message to broker (with headers possibility and timestamp). */ 149 | ZEND_METHOD(SimpleKafkaClient_ProducerTopic, producev) 150 | { 151 | zend_long partition; 152 | zend_long msgflags; 153 | char *payload = NULL; 154 | size_t payload_len = 0; 155 | char *key = NULL; 156 | size_t key_len = 0; 157 | rd_kafka_resp_err_t err; 158 | kafka_topic_object *intern; 159 | kafka_object *kafka_intern; 160 | HashTable *headersParam = NULL; 161 | HashPosition headersParamPos; 162 | char *header_key; 163 | zval *header_value; 164 | rd_kafka_headers_t *headers; 165 | zend_long timestamp_ms = 0; 166 | zend_bool timestamp_ms_is_null = 0; 167 | 168 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 2, 6) 169 | Z_PARAM_LONG(partition) 170 | Z_PARAM_LONG(msgflags) 171 | Z_PARAM_OPTIONAL 172 | Z_PARAM_STRING_OR_NULL(payload, payload_len) 173 | Z_PARAM_STRING_OR_NULL(key, key_len) 174 | Z_PARAM_ARRAY_HT_OR_NULL(headersParam) 175 | Z_PARAM_LONG_OR_NULL(timestamp_ms, timestamp_ms_is_null) 176 | ZEND_PARSE_PARAMETERS_END(); 177 | 178 | if (partition != RD_KAFKA_PARTITION_UA && (partition < 0 || partition > 0x7FFFFFFF)) { 179 | zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Out of range value '%ld' for $partition", partition); 180 | return; 181 | } 182 | 183 | if (msgflags != 0 && msgflags != RD_KAFKA_MSG_F_BLOCK) { 184 | zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Invalid value '%ld' for $msgflags", msgflags); 185 | return; 186 | } 187 | 188 | if (timestamp_ms_is_null == 1) { 189 | timestamp_ms = 0; 190 | } 191 | 192 | intern = get_kafka_topic_object(getThis()); 193 | 194 | if (headersParam != NULL && zend_hash_num_elements(headersParam) > 0) { 195 | headers = rd_kafka_headers_new(zend_hash_num_elements(headersParam)); 196 | for (zend_hash_internal_pointer_reset_ex(headersParam, &headersParamPos); 197 | (header_value = zend_hash_get_current_data_ex(headersParam, &headersParamPos)) != NULL && 198 | (header_key = kafka_hash_get_current_key_ex(headersParam, &headersParamPos)) != NULL; 199 | zend_hash_move_forward_ex(headersParam, &headersParamPos)) { 200 | convert_to_string_ex(header_value); 201 | rd_kafka_header_add( 202 | headers, 203 | header_key, 204 | -1, // Auto detect header title length 205 | Z_STRVAL_P(header_value), 206 | Z_STRLEN_P(header_value) 207 | ); 208 | } 209 | } else { 210 | headers = rd_kafka_headers_new(0); 211 | } 212 | 213 | kafka_intern = get_kafka_object(&intern->zrk); 214 | if (!kafka_intern) { 215 | return; 216 | } 217 | 218 | err = rd_kafka_producev( 219 | kafka_intern->rk, 220 | RD_KAFKA_V_RKT(intern->rkt), 221 | RD_KAFKA_V_PARTITION(partition), 222 | RD_KAFKA_V_MSGFLAGS(msgflags | RD_KAFKA_MSG_F_COPY), 223 | RD_KAFKA_V_VALUE(payload, payload_len), 224 | RD_KAFKA_V_KEY(key, key_len), 225 | RD_KAFKA_V_TIMESTAMP(timestamp_ms), 226 | RD_KAFKA_V_HEADERS(headers), 227 | RD_KAFKA_V_END 228 | ); 229 | 230 | if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { 231 | rd_kafka_headers_destroy(headers); 232 | zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err); 233 | return; 234 | } 235 | } 236 | /* }}} */ 237 | 238 | /* {{{ private constructor */ 239 | ZEND_METHOD(SimpleKafkaClient_ConsumerTopic, __construct) {} 240 | /* }}} */ 241 | 242 | /* {{{ proto string SimpleKafkaClient\Topic::getName() */ 243 | ZEND_METHOD(SimpleKafkaClient_Topic, getName) 244 | { 245 | kafka_topic_object *intern; 246 | 247 | if (zend_parse_parameters_none() == FAILURE) { 248 | return; 249 | } 250 | 251 | intern = get_kafka_topic_object(getThis()); 252 | if (!intern) { 253 | return; 254 | } 255 | 256 | RETURN_STRING(rd_kafka_topic_name(intern->rkt)); 257 | } 258 | /* }}} */ 259 | 260 | void kafka_topic_init(INIT_FUNC_ARGS) { /* {{{ */ 261 | 262 | zend_class_entry ce; 263 | 264 | memcpy(&object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); 265 | object_handlers.clone_obj = NULL; 266 | object_handlers.free_obj = kafka_topic_free; 267 | object_handlers.offset = XtOffsetOf(kafka_topic_object, std); 268 | 269 | INIT_NS_CLASS_ENTRY(ce, "SimpleKafkaClient", "Topic", class_SimpleKafkaClient_Topic_methods); 270 | ce_kafka_topic = zend_register_internal_class(&ce); 271 | ce_kafka_topic->ce_flags |= ZEND_ACC_EXPLICIT_ABSTRACT_CLASS; 272 | ce_kafka_topic->create_object = kafka_topic_new; 273 | 274 | INIT_NS_CLASS_ENTRY(ce, "SimpleKafkaClient", "ConsumerTopic", class_SimpleKafkaClient_ConsumerTopic_methods); 275 | ce_kafka_consumer_topic = zend_register_internal_class_ex(&ce, ce_kafka_topic); 276 | 277 | INIT_NS_CLASS_ENTRY(ce, "SimpleKafkaClient", "ProducerTopic", class_SimpleKafkaClient_ProducerTopic_methods); 278 | ce_kafka_producer_topic = zend_register_internal_class_ex(&ce, ce_kafka_topic); 279 | } /* }}} */ 280 | -------------------------------------------------------------------------------- /topic.stub.php: -------------------------------------------------------------------------------- 1 | topic) { 58 | efree(intern->topic); 59 | } 60 | 61 | zend_object_std_dtor(&intern->std); 62 | } 63 | /* }}} */ 64 | 65 | static zend_object *create_object(zend_class_entry *class_type) /* {{{ */ 66 | { 67 | zend_object* retval; 68 | object_intern *intern; 69 | 70 | intern = ecalloc(1, sizeof(*intern)); 71 | zend_object_std_init(&intern->std, class_type); 72 | object_properties_init(&intern->std, class_type); 73 | 74 | retval = &intern->std; 75 | retval->handlers = &handlers; 76 | 77 | return retval; 78 | } 79 | /* }}} */ 80 | 81 | static object_intern * get_object(zval *z) /* {{{ */ 82 | { 83 | object_intern *intern = Z_KAFKA_P(object_intern, z); 84 | 85 | if (!intern->topic) { 86 | zend_throw_exception_ex(NULL, 0, "SimpleKafkaClient\\TopicPartition::__construct() has not been called"); 87 | return NULL; 88 | } 89 | 90 | return intern; 91 | } /* }}} */ 92 | 93 | kafka_topic_partition_intern * get_topic_partition_object(zval *z) /* {{{ */ 94 | { 95 | return get_object(z); 96 | } /* }}} */ 97 | 98 | static HashTable *get_debug_info(Z_KAFKA_OBJ *object, int *is_temp) /* {{{ */ 99 | { 100 | zval ary; 101 | object_intern *intern; 102 | 103 | *is_temp = 1; 104 | 105 | array_init(&ary); 106 | 107 | intern = kafka_get_debug_object(object_intern, object); 108 | 109 | if (!intern) { 110 | return Z_ARRVAL(ary); 111 | } 112 | 113 | if (intern->topic) { 114 | add_assoc_string(&ary, "topic", intern->topic); 115 | } else { 116 | add_assoc_null(&ary, "topic"); 117 | } 118 | 119 | add_assoc_long(&ary, "partition", intern->partition); 120 | add_assoc_long(&ary, "offset", intern->offset); 121 | 122 | return Z_ARRVAL(ary); 123 | } 124 | /* }}} */ 125 | 126 | void kafka_topic_partition_init(zval *zobj, char * topic, int32_t partition, int64_t offset) /* {{{ */ 127 | { 128 | object_intern *intern; 129 | 130 | intern = Z_KAFKA_P(object_intern, zobj); 131 | if (!intern) { 132 | return; 133 | } 134 | 135 | if (intern->topic) { 136 | efree(intern->topic); 137 | } 138 | intern->topic = estrdup(topic); 139 | 140 | intern->partition = partition; 141 | intern->offset = offset; 142 | } /* }}} */ 143 | 144 | void kafka_topic_partition_list_to_array(zval *return_value, rd_kafka_topic_partition_list_t *list) /* {{{ */ 145 | { 146 | rd_kafka_topic_partition_t *topar; 147 | zval ztopar; 148 | int i; 149 | 150 | array_init_size(return_value, list->cnt); 151 | 152 | for (i = 0; i < list->cnt; i++) { 153 | topar = &list->elems[i]; 154 | ZVAL_NULL(&ztopar); 155 | object_init_ex(&ztopar, ce_kafka_topic_partition); 156 | kafka_topic_partition_init(&ztopar, topar->topic, topar->partition, topar->offset); 157 | add_next_index_zval(return_value, &ztopar); 158 | } 159 | } /* }}} */ 160 | 161 | rd_kafka_topic_partition_list_t * array_arg_to_kafka_topic_partition_list(int argnum, HashTable *ary) { /* {{{ */ 162 | 163 | HashPosition pos; 164 | rd_kafka_topic_partition_list_t *list; 165 | zval *zv; 166 | 167 | list = rd_kafka_topic_partition_list_new(zend_hash_num_elements(ary)); 168 | 169 | for (zend_hash_internal_pointer_reset_ex(ary, &pos); 170 | (zv = zend_hash_get_current_data_ex(ary, &pos)) != NULL; 171 | zend_hash_move_forward_ex(ary, &pos)) { 172 | kafka_topic_partition_intern *topar_intern; 173 | rd_kafka_topic_partition_t *topar; 174 | 175 | if (Z_TYPE_P(zv) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(zv), ce_kafka_topic_partition)) { 176 | const char *space; 177 | const char *class_name = get_active_class_name(&space); 178 | rd_kafka_topic_partition_list_destroy(list); 179 | php_error(E_ERROR, 180 | "Argument %d passed to %s%s%s() must be an array of SimpleKafkaClient\\TopicPartition, at least one element is a(n) %s", 181 | argnum, 182 | class_name, space, 183 | get_active_function_name(), 184 | zend_zval_type_name(zv)); 185 | return NULL; 186 | } 187 | 188 | topar_intern = get_topic_partition_object(zv); 189 | if (!topar_intern) { 190 | rd_kafka_topic_partition_list_destroy(list); 191 | return NULL; 192 | } 193 | 194 | topar = rd_kafka_topic_partition_list_add(list, topar_intern->topic, topar_intern->partition); 195 | topar->offset = topar_intern->offset; 196 | } 197 | 198 | return list; 199 | } /* }}} */ 200 | 201 | 202 | /* {{{ proto void SimpleKafkaClient\TopicPartition::__construct(string $topic, int $partition[, int $offset]) 203 | Constructor */ 204 | ZEND_METHOD(SimpleKafkaClient_TopicPartition, __construct) 205 | { 206 | char *topic; 207 | size_t topic_len; 208 | zend_long partition; 209 | zend_long offset = 0; 210 | 211 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 2, 3) 212 | Z_PARAM_STRING(topic, topic_len) 213 | Z_PARAM_LONG(partition) 214 | Z_PARAM_OPTIONAL 215 | Z_PARAM_LONG(offset) 216 | ZEND_PARSE_PARAMETERS_END(); 217 | 218 | kafka_topic_partition_init(getThis(), topic, partition, offset); 219 | } 220 | /* }}} */ 221 | 222 | /* {{{ proto string SimpleKafkaClient\TopicPartition::getTopicName() 223 | Returns topic name */ 224 | ZEND_METHOD(SimpleKafkaClient_TopicPartition, getTopicName) 225 | { 226 | object_intern *intern; 227 | 228 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 229 | ZEND_PARSE_PARAMETERS_END(); 230 | 231 | intern = get_object(getThis()); 232 | if (!intern) { 233 | return; 234 | } 235 | 236 | if (intern->topic) { 237 | RETURN_STRING(intern->topic); 238 | } else { 239 | RETURN_NULL(); 240 | } 241 | } 242 | /* }}} */ 243 | 244 | /* {{{ proto TopicPartition SimpleKafkaClient\TopicPartition::setTopicName($topicName) 245 | Sets topic name */ 246 | ZEND_METHOD(SimpleKafkaClient_TopicPartition, setTopicName) 247 | { 248 | char * topic; 249 | size_t topic_len; 250 | object_intern *intern; 251 | 252 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 253 | Z_PARAM_STRING(topic, topic_len) 254 | ZEND_PARSE_PARAMETERS_END(); 255 | 256 | intern = get_object(getThis()); 257 | if (!intern) { 258 | return; 259 | } 260 | 261 | if (intern->topic) { 262 | efree(intern->topic); 263 | } 264 | 265 | intern->topic = estrdup(topic); 266 | 267 | RETURN_ZVAL(getThis(), 1, 0); 268 | } 269 | /* }}} */ 270 | 271 | /* {{{ proto int SimpleKafkaClient\TopicPartition::getPartition() 272 | Returns partition */ 273 | ZEND_METHOD(SimpleKafkaClient_TopicPartition, getPartition) 274 | { 275 | object_intern *intern; 276 | 277 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 278 | ZEND_PARSE_PARAMETERS_END(); 279 | 280 | intern = get_object(getThis()); 281 | if (!intern) { 282 | return; 283 | } 284 | 285 | RETURN_LONG(intern->partition); 286 | } 287 | /* }}} */ 288 | 289 | /* {{{ proto TopicPartition SimpleKafkaClient\TopicPartition::setPartition($partition) 290 | Sets partition */ 291 | ZEND_METHOD(SimpleKafkaClient_TopicPartition, setPartition) 292 | { 293 | zend_long partition; 294 | object_intern *intern; 295 | 296 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 297 | Z_PARAM_LONG(partition) 298 | ZEND_PARSE_PARAMETERS_END(); 299 | 300 | intern = get_object(getThis()); 301 | if (!intern) { 302 | return; 303 | } 304 | 305 | intern->partition = partition; 306 | 307 | RETURN_ZVAL(getThis(), 1, 0); 308 | } 309 | /* }}} */ 310 | 311 | /* {{{ proto int SimpleKafkaClient\TopicPartition::getOffset() 312 | Returns offset */ 313 | ZEND_METHOD(SimpleKafkaClient_TopicPartition, getOffset) 314 | { 315 | object_intern *intern; 316 | 317 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 0, 0) 318 | ZEND_PARSE_PARAMETERS_END(); 319 | 320 | intern = get_object(getThis()); 321 | if (!intern) { 322 | return; 323 | } 324 | 325 | RETURN_LONG(intern->offset); 326 | } 327 | /* }}} */ 328 | 329 | /* {{{ proto TopicPartition SimpleKafkaClient\TopicPartition::setOffset($offset) 330 | Sets offset */ 331 | ZEND_METHOD(SimpleKafkaClient_TopicPartition, setOffset) 332 | { 333 | zend_long offset; 334 | object_intern *intern; 335 | 336 | ZEND_PARSE_PARAMETERS_START_EX(ZEND_PARSE_PARAMS_THROW, 1, 1) 337 | Z_PARAM_LONG(offset) 338 | ZEND_PARSE_PARAMETERS_END(); 339 | 340 | intern = get_object(getThis()); 341 | if (!intern) { 342 | return; 343 | } 344 | 345 | intern->offset = offset; 346 | 347 | RETURN_ZVAL(getThis(), 1, 0); 348 | } 349 | /* }}} */ 350 | 351 | void kafka_metadata_topic_partition_init(INIT_FUNC_ARGS) /* {{{ */ 352 | { 353 | zend_class_entry tmpce; 354 | 355 | INIT_NS_CLASS_ENTRY(tmpce, "SimpleKafkaClient", "TopicPartition", class_SimpleKafkaClient_TopicPartition_methods); 356 | ce_kafka_topic_partition = zend_register_internal_class(&tmpce); 357 | ce_kafka_topic_partition->create_object = create_object; 358 | 359 | handlers = kafka_default_object_handlers; 360 | handlers.get_debug_info = get_debug_info; 361 | handlers.free_obj = free_object; 362 | handlers.offset = XtOffsetOf(object_intern, std); 363 | } /* }}} */ 364 | -------------------------------------------------------------------------------- /topic_partition.stub.php: -------------------------------------------------------------------------------- 1 |