├── .gitignore ├── .travis.yml ├── Build.PL ├── Changes ├── LICENSE ├── MANIFEST ├── META.json ├── META.yml ├── README ├── README.md ├── cpanfile ├── dist.ini ├── inc └── .gitignore ├── install_kafka.sh ├── lib ├── Kafka.pm └── Kafka │ ├── Connection.pm │ ├── Consumer.pm │ ├── Exceptions.pm │ ├── IO.pm │ ├── IO │ └── Async.pm │ ├── Int64.pm │ ├── Internals.pm │ ├── Message.pm │ ├── Producer.pm │ └── Protocol.pm ├── t ├── 00_api.t ├── 01_bits64.t ├── 02_io.t ├── 03_io_ipv6.t ├── 04_mockio.t ├── 05_protocol.t ├── 06_decode_encode.t ├── 07_cluster.t ├── 08_cluster_start.t ├── 09_connection.t ├── 10_message.t ├── 11_producer.t ├── 12_consumer.t ├── 13_leader_not_found.t ├── 14_sasl.t ├── 20_kafka_usage.t ├── 21_kafka_bench.t ├── 30_cluster_stop.t ├── 40_autocreate_topics.t ├── 41_fork.t ├── 42_nonfatal_errors.t ├── 43_competition.t ├── 44_bad_sending.t ├── 45_compression.t ├── 46_destroy_connection.t ├── 47_kafka_usage_ipv6.t ├── 48_acknowledgement.t ├── 50_debug_level.t ├── 60_sockets.t ├── 90_mock_io.t ├── 91_mock_usage.t ├── 92_mock_bench.t ├── 99_data_cleanup.t ├── data │ └── .gitignore └── lib │ └── Kafka │ ├── Cluster.pm │ ├── MockIO.pm │ ├── MockProtocol.pm │ └── TestInternals.pm ├── tools ├── .gitignore ├── benchmark_consumer.pl ├── benchmark_producer.pl ├── coverage.sh ├── profiling.pl └── profiling.sh ├── verify.sh └── xt ├── author ├── 02_fixme.t ├── 03_vars.t ├── 07_notabs.t ├── 08_PureASCII.t └── perlcritic.rc └── release ├── 01_kwalitee.t ├── 03_distribution.t ├── 04_pod.t └── 05_pod_coverage.t /.gitignore: -------------------------------------------------------------------------------- 1 | /Debian_CPANTS.txt 2 | /Makefile 3 | /blib/ 4 | /pm_to_blib 5 | /Makefile.old 6 | /crash.pl 7 | /Kafka-*.tar.gz 8 | /CPANTS_Analyse.t 9 | /MYMETA.* 10 | /vendor 11 | /kafka 12 | /perl_modules/ 13 | .build/ 14 | Kafka-* 15 | local/ 16 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: perl 2 | 3 | perl: 4 | - "5.22" 5 | 6 | jdk: 7 | - oraclejdk8 8 | 9 | env: 10 | global: 11 | - RELEASE_TESTS=1 12 | - KAFKA_BASE_DIR="${TRAVIS_BUILD_DIR}/kafka" 13 | 14 | cache: 15 | directories: 16 | - vendor 17 | - perl_modules 18 | 19 | before_install: 20 | - ./install_kafka.sh 21 | - cpanm local::lib 22 | - eval "$(perl -Mlocal::lib=${PWD}/perl_modules)" 23 | - cpanm --notest Test::Distribution 24 | - cpanm --notest Test::Kwalitee 25 | - cpanm --notest Test::Kwalitee::Extra 26 | -------------------------------------------------------------------------------- /Build.PL: -------------------------------------------------------------------------------- 1 | # This Build.PL for Kafka was generated by Dist::Zilla::Plugin::ModuleBuildTiny 0.015. 2 | use strict; 3 | use warnings; 4 | 5 | use 5.010; 6 | use Module::Build::Tiny 0.034; 7 | Build_PL(); 8 | -------------------------------------------------------------------------------- /MANIFEST: -------------------------------------------------------------------------------- 1 | Build.PL 2 | Changes 3 | LICENSE 4 | MANIFEST 5 | META.json 6 | META.yml 7 | README 8 | cpanfile 9 | dist.ini 10 | lib/Kafka.pm 11 | lib/Kafka/Connection.pm 12 | lib/Kafka/Consumer.pm 13 | lib/Kafka/Exceptions.pm 14 | lib/Kafka/IO.pm 15 | lib/Kafka/IO/Async.pm 16 | lib/Kafka/Int64.pm 17 | lib/Kafka/Internals.pm 18 | lib/Kafka/Message.pm 19 | lib/Kafka/Producer.pm 20 | lib/Kafka/Protocol.pm 21 | t/00_api.t 22 | t/01_bits64.t 23 | t/02_io.t 24 | t/03_io_ipv6.t 25 | t/04_mockio.t 26 | t/05_protocol.t 27 | t/06_decode_encode.t 28 | t/07_cluster.t 29 | t/08_cluster_start.t 30 | t/09_connection.t 31 | t/10_message.t 32 | t/11_producer.t 33 | t/12_consumer.t 34 | t/13_leader_not_found.t 35 | t/14_sasl.t 36 | t/20_kafka_usage.t 37 | t/21_kafka_bench.t 38 | t/30_cluster_stop.t 39 | t/40_autocreate_topics.t 40 | t/41_fork.t 41 | t/42_nonfatal_errors.t 42 | t/43_competition.t 43 | t/44_bad_sending.t 44 | t/45_compression.t 45 | t/46_destroy_connection.t 46 | t/47_kafka_usage_ipv6.t 47 | t/48_acknowledgement.t 48 | t/50_debug_level.t 49 | t/60_sockets.t 50 | t/90_mock_io.t 51 | t/91_mock_usage.t 52 | t/92_mock_bench.t 53 | t/99_data_cleanup.t 54 | xt/author/02_fixme.t 55 | xt/author/03_vars.t 56 | xt/author/07_notabs.t 57 | xt/author/08_PureASCII.t 58 | xt/author/perlcritic.rc 59 | t/lib/Kafka/Cluster.pm 60 | t/lib/Kafka/MockIO.pm 61 | t/lib/Kafka/MockProtocol.pm 62 | t/lib/Kafka/TestInternals.pm 63 | xt/release/01_kwalitee.t 64 | xt/release/03_distribution.t 65 | xt/release/04_pod.t 66 | xt/release/05_pod_coverage.t 67 | tools/benchmark_consumer.pl 68 | tools/benchmark_producer.pl 69 | tools/coverage.sh 70 | tools/profiling.pl 71 | tools/profiling.sh 72 | -------------------------------------------------------------------------------- /META.json: -------------------------------------------------------------------------------- 1 | { 2 | "abstract" : "Apache Kafka low-level synchronous API, which does not use Zookeeper.", 3 | "author" : [ 4 | "Sergey Gladkov" 5 | ], 6 | "dynamic_config" : 0, 7 | "generated_by" : "Dist::Milla version v1.0.20, Dist::Zilla version 6.014, CPAN::Meta::Converter version 2.150010", 8 | "license" : [ 9 | "perl_5" 10 | ], 11 | "meta-spec" : { 12 | "url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec", 13 | "version" : 2 14 | }, 15 | "name" : "Kafka", 16 | "no_index" : { 17 | "directory" : [ 18 | "eg", 19 | "examples", 20 | "inc", 21 | "share", 22 | "t", 23 | "xt" 24 | ] 25 | }, 26 | "prereqs" : { 27 | "configure" : { 28 | "requires" : { 29 | "Module::Build::Tiny" : "0.034", 30 | "perl" : "5.010" 31 | } 32 | }, 33 | "develop" : { 34 | "requires" : { 35 | "Dist::Milla" : "v1.0.20", 36 | "Test::EOL" : "0", 37 | "Test::MinimumVersion" : "0", 38 | "Test::More" : "0.88", 39 | "Test::Perl::Critic" : "0", 40 | "Test::Pod" : "1.41", 41 | "Test::Version" : "1" 42 | } 43 | }, 44 | "runtime" : { 45 | "requires" : { 46 | "AnyEvent" : "7.17", 47 | "Authen::SCRAM" : "0", 48 | "Carp" : "1.08", 49 | "Compress::LZ4Frame" : "0.012001", 50 | "Compress::Snappy" : "0.23", 51 | "Const::Fast" : "0.014", 52 | "Data::Compare" : "1.24", 53 | "Data::HexDump::Range" : "0", 54 | "Data::Validate::Domain" : "0.11", 55 | "Data::Validate::IP" : "0.24", 56 | "Exception::Class" : "1.37", 57 | "Exporter" : "5.62", 58 | "Gzip::Faster" : "0.19", 59 | "IO::Select" : "1.17", 60 | "List::Util" : "1.19", 61 | "Params::Util" : "1.07", 62 | "Scalar::Util" : "1.19", 63 | "Scalar::Util::Numeric" : "0.22", 64 | "Socket" : "1.80", 65 | "String::CRC32" : "1.5", 66 | "Sys::SigAction" : "0.21", 67 | "Time::HiRes" : "1.9711", 68 | "Try::Tiny" : "0.21", 69 | "perl" : "5.010" 70 | } 71 | }, 72 | "test" : { 73 | "requires" : { 74 | "Capture::Tiny" : "0.24", 75 | "Carp" : "1.08", 76 | "Clone" : "0.36", 77 | "Config::IniFiles" : "2.83", 78 | "Const::Fast" : "0.014", 79 | "Cwd" : "3.2501", 80 | "Data::Dumper" : "2.121_14", 81 | "Exporter" : "5.62", 82 | "File::HomeDir" : "1.00", 83 | "File::Path" : "2.04", 84 | "File::Spec" : "3.2501", 85 | "File::Spec::Functions" : "3.2501", 86 | "IO::File" : "1.14", 87 | "IO::Socket::INET" : "1.31", 88 | "IO::Socket::IP" : "0.37", 89 | "List::Util" : "1.19", 90 | "Net::EmptyPort" : "0", 91 | "Params::Util" : "1.07", 92 | "Proc::Daemon" : "0.14", 93 | "Proc::ProcessTable" : "0.51", 94 | "Scalar::Util" : "1.19", 95 | "Scalar::Util::Numeric" : "0.22", 96 | "Socket" : "1.80", 97 | "Sub::Install" : "0.927", 98 | "Sys::SigAction" : "0.21", 99 | "Test::Deep" : "0.119", 100 | "Test::Dist::VersionSync" : "0", 101 | "Test::Distribution" : "0", 102 | "Test::EOL" : "0", 103 | "Test::Exception" : "0.32", 104 | "Test::Fixme" : "0", 105 | "Test::Kwalitee" : "0", 106 | "Test::Kwalitee::Extra" : "0", 107 | "Test::MinimumVersion" : "0", 108 | "Test::Mojibake" : "0", 109 | "Test::More" : "0.72", 110 | "Test::NoTabs" : "0", 111 | "Test::NoWarnings" : "1.04", 112 | "Test::Perl::Critic" : "0", 113 | "Test::PureASCII" : "0", 114 | "Test::Spelling" : "0", 115 | "Test::Strict" : "0", 116 | "Test::Synopsis" : "0", 117 | "Test::TCP" : "2.08", 118 | "Test::Vars" : "0", 119 | "Test::Version" : "0", 120 | "Time::HiRes" : "1.9711", 121 | "Try::Tiny" : "0.21", 122 | "perl" : "5.010" 123 | } 124 | } 125 | }, 126 | "provides" : { 127 | "Kafka" : { 128 | "file" : "lib/Kafka.pm", 129 | "version" : "1.08" 130 | }, 131 | "Kafka::Connection" : { 132 | "file" : "lib/Kafka/Connection.pm", 133 | "version" : "1.08" 134 | }, 135 | "Kafka::Consumer" : { 136 | "file" : "lib/Kafka/Consumer.pm", 137 | "version" : "1.08" 138 | }, 139 | "Kafka::Exceptions" : { 140 | "file" : "lib/Kafka/Exceptions.pm", 141 | "version" : "1.08" 142 | }, 143 | "Kafka::IO" : { 144 | "file" : "lib/Kafka/IO.pm", 145 | "version" : "1.08" 146 | }, 147 | "Kafka::IO::Async" : { 148 | "file" : "lib/Kafka/IO/Async.pm", 149 | "version" : "1.08" 150 | }, 151 | "Kafka::Int64" : { 152 | "file" : "lib/Kafka/Int64.pm", 153 | "version" : "1.08" 154 | }, 155 | "Kafka::Internals" : { 156 | "file" : "lib/Kafka/Internals.pm", 157 | "version" : "1.08" 158 | }, 159 | "Kafka::Message" : { 160 | "file" : "lib/Kafka/Message.pm", 161 | "version" : "1.08" 162 | }, 163 | "Kafka::Producer" : { 164 | "file" : "lib/Kafka/Producer.pm", 165 | "version" : "1.08" 166 | }, 167 | "Kafka::Protocol" : { 168 | "file" : "lib/Kafka/Protocol.pm", 169 | "version" : "1.08" 170 | } 171 | }, 172 | "release_status" : "stable", 173 | "resources" : { 174 | "bugtracker" : { 175 | "web" : "https://github.com/TrackingSoft/Kafka/issues" 176 | }, 177 | "homepage" : "https://github.com/TrackingSoft/Kafka", 178 | "repository" : { 179 | "type" : "git", 180 | "url" : "https://github.com/TrackingSoft/Kafka.git", 181 | "web" : "https://github.com/TrackingSoft/Kafka" 182 | } 183 | }, 184 | "version" : "1.08", 185 | "x_contributors" : [ 186 | "Aleksandr Matveev", 187 | "Alexander Kazakov", 188 | "Alex Solovey", 189 | "Damien Krotkine", 190 | "Dan Book", 191 | "David Schmidt", 192 | "Filimonov", 193 | "Nikolay Shulyakovskiy", 194 | "Petr Plavjanik", 195 | "Sergey Gladkov", 196 | "Sergiy Zuban", 197 | "Vlad Marchenko" 198 | ], 199 | "x_generated_by_perl" : "v5.28.2", 200 | "x_serialization_backend" : "JSON::PP version 2.97001", 201 | "x_spdx_expression" : "Artistic-1.0-Perl OR GPL-1.0-or-later", 202 | "x_static_install" : 1 203 | } 204 | 205 | -------------------------------------------------------------------------------- /META.yml: -------------------------------------------------------------------------------- 1 | --- 2 | abstract: 'Apache Kafka low-level synchronous API, which does not use Zookeeper.' 3 | author: 4 | - 'Sergey Gladkov' 5 | build_requires: 6 | Capture::Tiny: '0.24' 7 | Carp: '1.08' 8 | Clone: '0.36' 9 | Config::IniFiles: '2.83' 10 | Const::Fast: '0.014' 11 | Cwd: '3.2501' 12 | Data::Dumper: 2.121_14 13 | Exporter: '5.62' 14 | File::HomeDir: '1.00' 15 | File::Path: '2.04' 16 | File::Spec: '3.2501' 17 | File::Spec::Functions: '3.2501' 18 | IO::File: '1.14' 19 | IO::Socket::INET: '1.31' 20 | IO::Socket::IP: '0.37' 21 | List::Util: '1.19' 22 | Net::EmptyPort: '0' 23 | Params::Util: '1.07' 24 | Proc::Daemon: '0.14' 25 | Proc::ProcessTable: '0.51' 26 | Scalar::Util: '1.19' 27 | Scalar::Util::Numeric: '0.22' 28 | Socket: '1.80' 29 | Sub::Install: '0.927' 30 | Sys::SigAction: '0.21' 31 | Test::Deep: '0.119' 32 | Test::Dist::VersionSync: '0' 33 | Test::Distribution: '0' 34 | Test::EOL: '0' 35 | Test::Exception: '0.32' 36 | Test::Fixme: '0' 37 | Test::Kwalitee: '0' 38 | Test::Kwalitee::Extra: '0' 39 | Test::MinimumVersion: '0' 40 | Test::Mojibake: '0' 41 | Test::More: '0.72' 42 | Test::NoTabs: '0' 43 | Test::NoWarnings: '1.04' 44 | Test::Perl::Critic: '0' 45 | Test::PureASCII: '0' 46 | Test::Spelling: '0' 47 | Test::Strict: '0' 48 | Test::Synopsis: '0' 49 | Test::TCP: '2.08' 50 | Test::Vars: '0' 51 | Test::Version: '0' 52 | Time::HiRes: '1.9711' 53 | Try::Tiny: '0.21' 54 | perl: '5.010' 55 | configure_requires: 56 | Module::Build::Tiny: '0.034' 57 | perl: '5.010' 58 | dynamic_config: 0 59 | generated_by: 'Dist::Milla version v1.0.20, Dist::Zilla version 6.015, CPAN::Meta::Converter version 2.150010' 60 | license: perl 61 | meta-spec: 62 | url: http://module-build.sourceforge.net/META-spec-v1.4.html 63 | version: '1.4' 64 | name: Kafka 65 | no_index: 66 | directory: 67 | - eg 68 | - examples 69 | - inc 70 | - share 71 | - t 72 | - xt 73 | provides: 74 | Kafka: 75 | file: lib/Kafka.pm 76 | version: '1.08' 77 | Kafka::Connection: 78 | file: lib/Kafka/Connection.pm 79 | version: '1.08' 80 | Kafka::Consumer: 81 | file: lib/Kafka/Consumer.pm 82 | version: '1.08' 83 | Kafka::Exceptions: 84 | file: lib/Kafka/Exceptions.pm 85 | version: '1.08' 86 | Kafka::IO: 87 | file: lib/Kafka/IO.pm 88 | version: '1.08' 89 | Kafka::IO::Async: 90 | file: lib/Kafka/IO/Async.pm 91 | version: '1.08' 92 | Kafka::Int64: 93 | file: lib/Kafka/Int64.pm 94 | version: '1.08' 95 | Kafka::Internals: 96 | file: lib/Kafka/Internals.pm 97 | version: '1.08' 98 | Kafka::Message: 99 | file: lib/Kafka/Message.pm 100 | version: '1.08' 101 | Kafka::Producer: 102 | file: lib/Kafka/Producer.pm 103 | version: '1.08' 104 | Kafka::Protocol: 105 | file: lib/Kafka/Protocol.pm 106 | version: '1.08' 107 | requires: 108 | AnyEvent: '7.17' 109 | Authen::SCRAM: '0' 110 | Carp: '1.08' 111 | Compress::LZ4Frame: '0.012001' 112 | Compress::Snappy: '0.23' 113 | Const::Fast: '0.014' 114 | Data::Compare: '1.24' 115 | Data::HexDump::Range: '0' 116 | Data::Validate::Domain: '0.11' 117 | Data::Validate::IP: '0.24' 118 | Exception::Class: '1.37' 119 | Exporter: '5.62' 120 | Gzip::Faster: '0.19' 121 | IO::Select: '1.17' 122 | List::Util: '1.19' 123 | Params::Util: '1.07' 124 | Scalar::Util: '1.19' 125 | Scalar::Util::Numeric: '0.22' 126 | Socket: '1.80' 127 | String::CRC32: '1.5' 128 | Sys::SigAction: '0.21' 129 | Time::HiRes: '1.9711' 130 | Try::Tiny: '0.21' 131 | perl: '5.010' 132 | resources: 133 | bugtracker: https://github.com/TrackingSoft/Kafka/issues 134 | homepage: https://github.com/TrackingSoft/Kafka 135 | repository: https://github.com/TrackingSoft/Kafka.git 136 | version: '1.08' 137 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Kafka package version 1.08 2 | ============================== 3 | 4 | The Kafka package is a set of Perl modules which provides a 5 | simple and consistent application programming interface (API) to the 6 | Apache Kafka 0.9+, a high-throughput distributed messaging system. 7 | The main focus of the package is to provide classes 8 | and functions that allow you to write Apache Kafka clients. 9 | 10 | 11 | The Kafka package was written, tested, and found working on 12 | recent Linux distributions. 13 | 14 | INSTALLATION 15 | 16 | To install this package type the following: 17 | 18 | perl Build.PL 19 | ./Build 20 | ./Build test 21 | ./Build install 22 | 23 | DEPENDENCIES 24 | 25 | In order to install and use this package you will need Perl version 26 | 5.010 or higher. Some modules within this package depend on other 27 | packages that are distributed separately from Perl. We recommend that 28 | you have the following packages installed before you install 29 | Kafka: 30 | 31 | Authen::SCRAM::Client 32 | Carp 33 | Compress::Snappy 34 | Const::Fast 35 | Data::Compare 36 | Data::HexDump::Range 37 | Data::Validate::Domain 38 | Data::Validate::IP 39 | Exception::Class 40 | Exporter 41 | Gzip::Faster 42 | IO::Select 43 | List::Util 44 | Params::Util 45 | Scalar::Util 46 | Scalar::Util::Numeric 47 | Socket 48 | String::CRC32 49 | Sys::SigAction 50 | Time::HiRes 51 | Try::Tiny 52 | 53 | The Kafka package has the following optional dependencies: 54 | 55 | Capture::Tiny 56 | Carp 57 | Clone 58 | Config::IniFiles 59 | Const::Fast 60 | Cwd 61 | Data::Dumper 62 | Exporter 63 | File::HomeDir 64 | File::Path 65 | File::Spec 66 | File::Spec::Functions 67 | IO::File 68 | IO::Socket::INET 69 | IO::Socket::IP 70 | List::Util 71 | Net::EmptyPort 72 | Params::Util 73 | Proc::Daemon 74 | Scalar::Util 75 | Scalar::Util::Numeric 76 | Socket 77 | Sub::Install 78 | Sys::SigAction 79 | Test::Deep 80 | Test::Exception 81 | Test::Fixme 82 | Test::More 83 | Test::NoWarnings 84 | Test::TCP 85 | Time::HiRes 86 | Try::Tiny 87 | 88 | If the optional modules are missing, some "prereq" tests are skipped. 89 | 90 | The installation of the missing dependencies can either be accomplished 91 | through your OS package manager or through CPAN (or downloading the source 92 | for all dependencies and compiling them manually). 93 | 94 | TESTING 95 | 96 | The following environment variable changes the way Kafka Benchmark test behave: 97 | 98 | # The root directory of the installation Kafka 0.9+ 99 | KAFKA_BASE_DIR 100 | 101 | SIMPLE BENCHMARK 102 | 103 | Use tools/benchmark_consumer.pl and tools/benchmark_producer.pl to run a simple 104 | benchmark. 105 | Run one or more consumer or producer instances using tools/benchmark_consumer.pl or 106 | tools/benchmark_producer.pl . 107 | 108 | DOCUMENTATION 109 | 110 | Every Kafka package module has a man page. A simple description of each module is 111 | included below in the SEE ALSO section. 112 | 113 | SEE ALSO 114 | 115 | The basic operation of the Kafka package modules: 116 | 117 | Kafka - constants and messages used by the Kafka package. 118 | Kafka::Connection - API to connect to a kafka cluster. 119 | Kafka::Producer - interface to the 'producer' client. 120 | Kafka::Consumer - interface to the 'consumer' client. 121 | Kafka::Message - interface to the Kafka message properties. 122 | Kafka::Int64 - functions to work with 64 bit elements of the 123 | protocol on 32 bit systems. 124 | Kafka::Protocol - functions to process messages in the 125 | Apache Kafka's Protocol. 126 | Kafka::IO - low-level communications with the Apache Kafka server. 127 | Kafka::Exceptions - module designated to handle Kafka exceptions. 128 | Kafka::Internals - Internal constants and functions used 129 | by several packaged modules. 130 | 131 | COPYRIGHT AND LICENCE 132 | 133 | Copyright (C) 2012-2017 by TrackingSoft LLC. 134 | 135 | This package is free software; you can redistribute it and/or modify it under 136 | the same terms as Perl itself. See perlartistic at 137 | http://dev.perl.org/licenses/artistic.html . 138 | 139 | This program is distributed in the hope that it will be useful, 140 | but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 141 | or FITNESS FOR A PARTICULAR PURPOSE. 142 | -------------------------------------------------------------------------------- /cpanfile: -------------------------------------------------------------------------------- 1 | requires 'AnyEvent' => '7.17'; 2 | requires 'Authen::SCRAM' => '0'; 3 | requires 'Carp' => '1.08'; 4 | requires 'Compress::Snappy' => '0.23'; 5 | requires 'Compress::LZ4Frame' => '0.012001'; 6 | requires 'Const::Fast' => '0.014'; 7 | requires 'Data::Compare' => '1.24'; 8 | requires 'Data::HexDump::Range' => 0; 9 | requires 'Data::Validate::Domain' => '0.11'; 10 | requires 'Data::Validate::IP' => '0.24'; 11 | requires 'Exception::Class' => '1.37'; 12 | requires 'Exporter' => '5.62'; 13 | requires 'Gzip::Faster' => '0.19'; 14 | requires 'IO::Select' => '1.17'; 15 | requires 'List::Util' => '1.19'; 16 | requires 'Params::Util' => '1.07'; 17 | requires 'Scalar::Util' => '1.19'; 18 | requires 'Scalar::Util::Numeric' => '0.22'; 19 | requires 'Socket' => '1.80'; 20 | requires 'String::CRC32' => '1.5'; 21 | requires 'Sys::SigAction' => '0.21'; 22 | requires 'Time::HiRes' => '1.9711'; 23 | requires 'Try::Tiny' => '0.21'; 24 | 25 | on test => sub { 26 | requires 'Capture::Tiny' => '0.24'; 27 | requires 'Carp' => '1.08'; 28 | requires 'Clone' => '0.36'; 29 | requires 'Config::IniFiles' => '2.83'; 30 | requires 'Const::Fast' => '0.014'; 31 | requires 'Cwd' => '3.2501'; 32 | requires 'Data::Dumper' => '2.121_14'; 33 | requires 'Exporter' => '5.62'; 34 | requires 'File::HomeDir' => '1.00'; 35 | requires 'File::Path' => '2.04'; 36 | requires 'File::Spec' => '3.2501'; 37 | requires 'File::Spec::Functions' => '3.2501'; 38 | requires 'IO::File' => '1.14'; 39 | requires 'IO::Socket::INET' => '1.31'; 40 | requires 'IO::Socket::IP' => '0.37'; 41 | requires 'List::Util' => '1.19'; 42 | requires 'Net::EmptyPort' => 0, # does not have version so any versio; 43 | requires 'Params::Util' => '1.07'; 44 | requires 'Proc::Daemon' => '0.14'; 45 | requires 'Proc::ProcessTable' => '0.51'; 46 | requires 'Scalar::Util' => '1.19'; 47 | requires 'Scalar::Util::Numeric' => '0.22'; 48 | requires 'Socket' => '1.80'; 49 | requires 'Sub::Install' => '0.927'; 50 | requires 'Sys::SigAction' => '0.21'; 51 | requires 'Test::Deep' => '0.119'; 52 | requires 'Test::Exception' => '0.32'; 53 | requires 'Test::More' => '0.72'; 54 | requires 'Test::NoWarnings' => '1.04'; 55 | requires 'Test::Perl::Critic' => 0; 56 | requires 'Test::Fixme' => 0; 57 | requires 'Test::Vars' => 0; 58 | requires 'Test::Version' => 0; 59 | requires 'Test::Dist::VersionSync' => 0; 60 | requires 'Test::Synopsis' => 0; 61 | requires 'Test::Strict' => 0; 62 | requires 'Test::Spelling' => 0; 63 | requires 'Test::NoTabs' => 0; 64 | requires 'Test::Mojibake' => 0; 65 | requires 'Test::MinimumVersion' => 0; 66 | requires 'Test::EOL' => 0; 67 | requires 'Test::PureASCII' => 0; 68 | requires 'Test::Kwalitee::Extra' => 0; 69 | requires 'Test::Distribution' => 0; 70 | requires 'Test::Kwalitee' => 0; 71 | requires 'Test::TCP' => '2.08'; 72 | requires 'Time::HiRes' => '1.9711'; 73 | requires 'Try::Tiny' => '0.21'; 74 | }; 75 | -------------------------------------------------------------------------------- /dist.ini: -------------------------------------------------------------------------------- 1 | [MinimumPerl] 2 | [MetaProvides::Package] 3 | [Test::Version] 4 | is_strict = 1 5 | has_version = 1 6 | [Test::EOL] 7 | [Test::Perl::Critic] 8 | [Test::MinimumVersion] 9 | [Test::Kwalitee::Extra] 10 | arg = :core 11 | arg = :optional 12 | arg = :experimental 13 | arg = !has_separate_license_file 14 | arg = !prereq_matches_use 15 | arg = !build_prereq_matches_use 16 | [@Milla] 17 | -------------------------------------------------------------------------------- /inc/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrackingSoft/Kafka/e184f27f9900fde8cd6608ba6aec89c7b28ac2a1/inc/.gitignore -------------------------------------------------------------------------------- /install_kafka.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | MIRROR=https://archive.apache.org/dist/kafka/ 5 | 6 | VERSION=${1:-0.10.2.2} 7 | 8 | if [[ $VERSION == "1.0.0" ]]; then 9 | DIST="kafka_2.11-${VERSION}.tgz" 10 | elif [[ $VERSION == "0.11.0.2" ]]; then 11 | DIST="kafka_2.11-${VERSION}.tgz" 12 | elif [[ $VERSION == "0.10.2.2" ]]; then 13 | DIST="kafka_2.12-${VERSION}.tgz" 14 | elif [[ $VERSION == "0.9.0.1" ]]; then 15 | DIST="kafka_2.11-${VERSION}.tgz" 16 | else 17 | >&2 echo "ERROR: unknown version '${VERSION}'" 18 | exit 1 19 | fi 20 | 21 | if [[ ! -d vendor ]]; then 22 | mkdir -p vendor 23 | fi 24 | 25 | SOURCE="vendor/${DIST}" 26 | 27 | if [[ ! -e $SOURCE ]]; then 28 | if ! wget -O $SOURCE "${MIRROR}/${VERSION}/${DIST}" ; then 29 | rm $SOURCE 30 | exit 1 31 | fi 32 | fi 33 | 34 | if [[ -e kafka ]]; then 35 | rm -r kafka 36 | fi 37 | 38 | mkdir -p kafka 39 | tar xzf $SOURCE -C kafka --strip-components 1 40 | 41 | # fix java options issue: https://stackoverflow.com/questions/36970622/kafka-unrecognized-vm-option-printgcdatestamps 42 | find kafka -name \*.sh -exec sed -i'' 's/-XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps//g' {} \; 43 | -------------------------------------------------------------------------------- /lib/Kafka/Exceptions.pm: -------------------------------------------------------------------------------- 1 | package Kafka::Exceptions; 2 | 3 | =head1 NAME 4 | 5 | Kafka::Exceptions - Perl Kafka API exception definitions. 6 | 7 | =head1 VERSION 8 | 9 | This documentation refers to C version 1.08 . 10 | 11 | =cut 12 | 13 | 14 | 15 | use 5.010; 16 | use strict; 17 | use warnings; 18 | 19 | 20 | 21 | our $DEBUG = 0; 22 | 23 | our $VERSION = 'v1.08'; 24 | 25 | use Exporter qw( 26 | import 27 | ); 28 | our @EXPORT = qw( 29 | throw_args 30 | ); 31 | 32 | 33 | 34 | use Exception::Class ( 35 | 'Kafka::Exception' => { 36 | fields => [ 'code', 'message' ], 37 | }, 38 | 'Kafka::Exception::Connection' => { 39 | isa => 'Kafka::Exception', 40 | fields => [ 'request', 'response', 'io_error' ], 41 | }, 42 | 'Kafka::Exception::Consumer' => { 43 | isa => 'Kafka::Exception', 44 | }, 45 | 'Kafka::Exception::Int64' => { 46 | isa => 'Kafka::Exception', 47 | }, 48 | 'Kafka::Exception::IO' => { 49 | fields => [ 'errno' ], 50 | isa => 'Kafka::Exception', 51 | }, 52 | 'Kafka::Exception::Producer' => { 53 | isa => 'Kafka::Exception', 54 | }, 55 | 'Kafka::Exception::Protocol' => { 56 | isa => 'Kafka::Exception', 57 | }, 58 | ); 59 | 60 | use Kafka qw( 61 | %ERROR 62 | ); 63 | use Kafka::Internals qw( 64 | format_message 65 | ); 66 | 67 | Kafka::Exception->Trace(1); # include stack traces 68 | 69 | 70 | =head1 SYNOPSIS 71 | 72 | use 5.010; 73 | use strict; 74 | use warnings; 75 | 76 | use Scalar::Util qw( 77 | blessed 78 | ); 79 | use Try::Tiny; 80 | 81 | # A simple example of Kafka::Connection usage: 82 | use Kafka::Connection; 83 | 84 | # connect to local cluster with the defaults 85 | my $connection; 86 | try { 87 | $connection = Kafka::Connection->new( host => 'localhost' ); 88 | } catch { 89 | my $error = $_; 90 | if ( blessed( $error ) && $error->isa( 'Kafka::Exception' ) ) { 91 | if ( $error->isa( 'Kafka::Exception::Connection' ) ) { 92 | # Specific treatment for 'Kafka::Connection' class error 93 | } elsif ( $error->isa( 'Kafka::Exception::IO' ) ) { 94 | # Specific treatment for 'Kafka::IO' class error 95 | } 96 | warn ref( $error ), " error:\n", $error->message, "\n", $error->trace->as_string, "\n"; 97 | exit; 98 | } else { 99 | die $error; 100 | } 101 | }; 102 | 103 | # Closes the connection and cleans up 104 | $connection->close; 105 | undef $connection; 106 | 107 | =head1 DESCRIPTION 108 | 109 | The purpose of the C module is: 110 | 111 | =over 3 112 | 113 | =item * 114 | 115 | Declare a Kafka API exceptions hierarchy. 116 | 117 | =item * 118 | 119 | Provide additional methods for working with exceptions. 120 | 121 | =back 122 | 123 | It is designed to make exception handling structured, simpler and better by encouraging use 124 | of hierarchy of exceptions in application (vs single catch-all exception class). 125 | 126 | The following additional attributes are available in C and its subclasses: 127 | 128 | =over 3 129 | 130 | =item C 131 | 132 | An error code that references error in C<%Kafka::ERROR> hash. 133 | 134 | =item C 135 | 136 | An error message that contains information about the encountered failure. 137 | This message may contain additional details which are not provided by C<%Kafka::ERROR> hash. 138 | 139 | =back 140 | 141 | Exception objects provide accessor methods for these attributes. Attributes are inherited by 142 | subclasses. 143 | 144 | Various Kafka API modules throw exceptions objects of a C subclass specific 145 | to that module: 146 | 147 | =over 3 148 | 149 | =item C 150 | 151 | See L methods. 152 | 153 | =item C 154 | 155 | See L methods. 156 | 157 | =item C 158 | 159 | See L methods. 160 | 161 | =item C 162 | 163 | See L methods. 164 | 165 | =item C 166 | 167 | See L methods. 168 | 169 | =item C 170 | 171 | See L methods. 172 | 173 | =back 174 | 175 | Authors suggest using of L's C and C to handle exceptions while 176 | working with L package. 177 | 178 | You may also want to review documentation of L, 179 | which is the default base class for all exception objects created by this module. 180 | 181 | =cut 182 | 183 | #-- constructor ---------------------------------------------------------------- 184 | 185 | #-- public attributes ---------------------------------------------------------- 186 | 187 | =head2 FUNCTIONS 188 | 189 | The following functions are exported by C module: 190 | 191 | =cut 192 | 193 | =head3 C 194 | 195 | Converts arguments into C constructor attributes L and L. 196 | 197 | C accepts the following arguments: 198 | 199 | =over 3 200 | 201 | =item C<$error_code> 202 | 203 | The code of the last error. 204 | The code must match the error codes defined in the module L. 205 | 206 | =item C<$description> 207 | 208 | An additional error description that contains information about the encountered problem. 209 | 210 | =back 211 | 212 | =cut 213 | sub throw_args { 214 | my $error_code = shift; 215 | my $description = shift; 216 | 217 | return ( 218 | code => $error_code, 219 | message => format_message( '%s%s', $ERROR{ $error_code }, $description ? ": $description" : '' ), 220 | @_, 221 | ); 222 | } 223 | 224 | #-- private attributes --------------------------------------------------------- 225 | 226 | #-- private methods ------------------------------------------------------------ 227 | 228 | 229 | 230 | 1; 231 | 232 | __END__ 233 | 234 | =head1 SEE ALSO 235 | 236 | The basic operation of the Kafka package modules: 237 | 238 | L - constants and messages used by the Kafka package modules. 239 | 240 | L - interface to connect to a Kafka cluster. 241 | 242 | L - interface for producing client. 243 | 244 | L - interface for consuming client. 245 | 246 | L - interface to access Kafka message 247 | properties. 248 | 249 | L - functions to work with 64 bit elements of the 250 | protocol on 32 bit systems. 251 | 252 | L - functions to process messages in the 253 | Apache Kafka's Protocol. 254 | 255 | L - low-level interface for communication with Kafka server. 256 | 257 | L - module designated to handle Kafka exceptions. 258 | 259 | L - internal constants and functions used 260 | by several package modules. 261 | 262 | A wealth of detail about the Apache Kafka and the Kafka Protocol: 263 | 264 | Main page at L 265 | 266 | Kafka Protocol at L 267 | 268 | =head1 SOURCE CODE 269 | 270 | Kafka package is hosted on GitHub: 271 | L 272 | 273 | =head1 AUTHOR 274 | 275 | Sergey Gladkov 276 | 277 | Please use GitHub project link above to report problems or contact authors. 278 | 279 | =head1 CONTRIBUTORS 280 | 281 | Alexander Solovey 282 | 283 | Jeremy Jordan 284 | 285 | Sergiy Zuban 286 | 287 | Vlad Marchenko 288 | 289 | =head1 COPYRIGHT AND LICENSE 290 | 291 | Copyright (C) 2012-2017 by TrackingSoft LLC. 292 | 293 | This package is free software; you can redistribute it and/or modify it under 294 | the same terms as Perl itself. See I at 295 | L. 296 | 297 | This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; 298 | without even the implied warranty of MERCHANTABILITY or FITNESS FOR A 299 | PARTICULAR PURPOSE. 300 | 301 | =cut 302 | -------------------------------------------------------------------------------- /lib/Kafka/Int64.pm: -------------------------------------------------------------------------------- 1 | package Kafka::Int64; 2 | 3 | =head1 NAME 4 | 5 | Kafka::Int64 - Functions to work with 64 bit elements of the 6 | protocol on 32 bit systems. 7 | 8 | =head1 VERSION 9 | 10 | This documentation refers to C version 1.08 . 11 | 12 | =cut 13 | 14 | 15 | 16 | use 5.010; 17 | use strict; 18 | use warnings; 19 | 20 | use bigint; # this allows integers of practially any size at the cost of significant performance drop 21 | 22 | our $VERSION = 'v1.08'; 23 | 24 | use Exporter qw( 25 | import 26 | ); 27 | 28 | our @EXPORT_OK = qw( 29 | intsum 30 | packq 31 | unpackq 32 | ); 33 | 34 | 35 | 36 | use Carp; 37 | 38 | use Kafka qw( 39 | %ERROR 40 | $ERROR_MISMATCH_ARGUMENT 41 | ); 42 | use Kafka::Exceptions; 43 | 44 | 45 | 46 | =head1 SYNOPSIS 47 | 48 | use 5.010; 49 | use strict; 50 | use warnings; 51 | 52 | use Scalar::Util qw( 53 | blessed 54 | ); 55 | use Try::Tiny; 56 | 57 | use Kafka qw( 58 | $BITS64 59 | ); 60 | 61 | try { 62 | 63 | # Apache Kafka Protocol: FetchOffset, Time 64 | 65 | my $offset = 123; 66 | 67 | my $encoded = $BITS64 68 | ? pack( 'q>', $offset ) 69 | : Kafka::Int64::packq( $offset ); 70 | 71 | my $response = chr( 0 ) x 8; 72 | 73 | $offset = $BITS64 74 | ? unpack( 'q>', substr( $response, 0, 8 ) ) 75 | : Kafka::Int64::unpackq( substr( $response, 0, 8 ) ); 76 | 77 | my $next_offset = $BITS64 78 | ? $offset + 1 79 | : Kafka::Int64::intsum( $offset, 1 ); 80 | 81 | } catch { 82 | my $error = $_; 83 | if ( blessed( $error ) && $error->isa( 'Kafka::Exception' ) ) { 84 | warn 'Error: (', $error->code, ') ', $error->message, "\n"; 85 | exit; 86 | } else { 87 | die $error; 88 | } 89 | }; 90 | 91 | =head1 DESCRIPTION 92 | 93 | This module is not intended to be used by end user. 94 | 95 | In order to achieve better performance, functions of this module do not perform 96 | validation of arguments. 97 | 98 | Transparent L support on 32-bit platforms where native 99 | integer type is limited to 32 bits and slow bigint must be used instead. 100 | Use L from this module in such case. 101 | 102 | The main features of the C module are: 103 | 104 | =over 3 105 | 106 | =item * 107 | 108 | Support for working with 64 bit elements of the Kafka protocol 109 | on 32 bit systems. 110 | 111 | =back 112 | 113 | =cut 114 | 115 | #-- public functions ----------------------------------------------------------- 116 | 117 | =head2 FUNCTIONS 118 | 119 | The following functions are available for the C module. 120 | 121 | =cut 122 | 123 | =head3 C 124 | 125 | Adds two numbers to emulate bigint adding 64-bit integers in 32-bit systems. 126 | 127 | The both arguments must be a number. That is, it is defined and Perl thinks 128 | it's a number. Arguments may be a L 129 | integers. 130 | 131 | Returns the value as a L integer. 132 | 133 | =cut 134 | sub intsum { 135 | my ( $frst, $scnd ) = @_; 136 | 137 | my $ret = $frst + $scnd + 0; # bigint coercion 138 | Kafka::Exception::Int64->throw( throw_args( $ERROR_MISMATCH_ARGUMENT, 'intsum' ) ) 139 | if $ret->is_nan(); 140 | 141 | return $ret; 142 | } 143 | 144 | =head3 C 145 | 146 | Emulates C}, $bint )> to 32-bit systems - assumes decimal string 147 | or integer input. 148 | 149 | An argument must be a positive number. That is, it is defined and Perl thinks 150 | it's a number. The argument may be a L integer. 151 | 152 | The special values -1, -2 are allowed 153 | (C<$Kafka::RECEIVE_LATEST_OFFSET>, C<$Kafka::RECEIVE_EARLIEST_OFFSETS>). 154 | 155 | Returns the value as a packed binary string. 156 | 157 | =cut 158 | sub packq { 159 | my ( $n ) = @_; 160 | 161 | if ( $n == -1 ) { return pack q{C8}, ( 255 ) x 8; } 162 | elsif ( $n == -2 ) { return pack q{C8}, ( 255 ) x 7, 254; } 163 | elsif ( $n < 0 ) { Kafka::Exception::Int64->throw( throw_args( $ERROR_MISMATCH_ARGUMENT, 'packq' ) ); } 164 | 165 | return pack q{H16}, substr( '00000000000000000000000000000000'.substr( ( $n + 0 )->as_hex(), 2 ), -16 ); 166 | } 167 | 168 | =head3 C 169 | 170 | Emulates C}, $bstr )> to 32-bit systems - assumes binary input. 171 | 172 | The argument must be a binary string of 8 bytes length. 173 | 174 | Returns the value as a L integer. 175 | 176 | =cut 177 | sub unpackq { 178 | my ( $s ) = @_; 179 | 180 | my $ret = Math::BigInt->from_hex( '0x'.unpack( q{H16}, $s ) ); 181 | $ret = -1 if $ret == 18446744073709551615; 182 | $ret = -2 if $ret == 18446744073709551614; 183 | 184 | return $ret; 185 | } 186 | 187 | #-- private functions ---------------------------------------------------------- 188 | 189 | 1; 190 | 191 | __END__ 192 | 193 | =head1 DIAGNOSTICS 194 | 195 | When error is detected, an exception, represented by object of C class, 196 | is thrown (see L). 197 | 198 | Any error L is FATAL. 199 | FATAL errors will cause the program to halt (C), since the 200 | problem is so severe that it would be dangerous to continue. 201 | 202 | L and a more descriptive L provide 203 | information about thrown exception. Consult documentation of the L 204 | for the list of all available methods. 205 | 206 | Authors suggest using of L's C and C to handle exceptions while 207 | working with L package. 208 | 209 | =over 3 210 | 211 | =item C 212 | 213 | This means that you didn't give the right argument to some of the 214 | L. 215 | 216 | =back 217 | 218 | =head1 SEE ALSO 219 | 220 | The basic operation of the Kafka package modules: 221 | 222 | L - constants and messages used by the Kafka package modules. 223 | 224 | L - interface to connect to a Kafka cluster. 225 | 226 | L - interface for producing client. 227 | 228 | L - interface for consuming client. 229 | 230 | L - interface to access Kafka message 231 | properties. 232 | 233 | L - functions to work with 64 bit elements of the 234 | protocol on 32 bit systems. 235 | 236 | L - functions to process messages in the 237 | Apache Kafka's Protocol. 238 | 239 | L - low-level interface for communication with Kafka server. 240 | 241 | L - module designated to handle Kafka exceptions. 242 | 243 | L - internal constants and functions used 244 | by several package modules. 245 | 246 | A wealth of detail about the Apache Kafka and the Kafka Protocol: 247 | 248 | Main page at L 249 | 250 | Kafka Protocol at L 251 | 252 | =head1 SOURCE CODE 253 | 254 | Kafka package is hosted on GitHub: 255 | L 256 | 257 | =head1 AUTHOR 258 | 259 | Sergey Gladkov 260 | 261 | Please use GitHub project link above to report problems or contact authors. 262 | 263 | =head1 CONTRIBUTORS 264 | 265 | Alexander Solovey 266 | 267 | Jeremy Jordan 268 | 269 | Sergiy Zuban 270 | 271 | Vlad Marchenko 272 | 273 | =head1 COPYRIGHT AND LICENSE 274 | 275 | Copyright (C) 2012-2017 by TrackingSoft LLC. 276 | 277 | This package is free software; you can redistribute it and/or modify it under 278 | the same terms as Perl itself. See I at 279 | L. 280 | 281 | This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; 282 | without even the implied warranty of MERCHANTABILITY or FITNESS FOR A 283 | PARTICULAR PURPOSE. 284 | 285 | =cut 286 | -------------------------------------------------------------------------------- /lib/Kafka/Message.pm: -------------------------------------------------------------------------------- 1 | package Kafka::Message; 2 | 3 | =head1 NAME 4 | 5 | Kafka::Message - Interface to the Kafka message properties. 6 | 7 | =head1 VERSION 8 | 9 | This documentation refers to C version 1.08 . 10 | 11 | =cut 12 | 13 | 14 | 15 | use 5.010; 16 | use strict; 17 | use warnings; 18 | 19 | our $VERSION = 'v1.08'; 20 | 21 | 22 | 23 | our @_standard_fields = qw( 24 | Attributes 25 | Timestamp 26 | error 27 | HighwaterMarkOffset 28 | key 29 | MagicByte 30 | next_offset 31 | payload 32 | offset 33 | valid 34 | ); 35 | 36 | #-- constructor ---------------------------------------------------------------- 37 | 38 | sub new { 39 | my ( $class, $self ) = @_; 40 | 41 | bless $self, $class; 42 | 43 | return $self; 44 | } 45 | 46 | #-- public attributes ---------------------------------------------------------- 47 | 48 | { 49 | no strict 'refs'; ## no critic 50 | 51 | # getters 52 | foreach my $method ( @_standard_fields ) 53 | { 54 | *{ __PACKAGE__.'::'.$method } = sub { 55 | my ( $self ) = @_; 56 | return $self->{ $method }; 57 | }; 58 | } 59 | } 60 | 61 | #-- public methods ------------------------------------------------------------- 62 | 63 | #-- private attributes --------------------------------------------------------- 64 | 65 | #-- private methods ------------------------------------------------------------ 66 | 67 | 68 | 69 | 1; 70 | 71 | __END__ 72 | 73 | =head1 SYNOPSIS 74 | 75 | use 5.010; 76 | use strict; 77 | use warnings; 78 | 79 | use Kafka qw( 80 | $DEFAULT_MAX_BYTES 81 | ); 82 | use Kafka::Connection; 83 | use Kafka::Consumer; 84 | 85 | #-- Connection 86 | my $connection = Kafka::Connection->new( host => 'localhost' ); 87 | 88 | #-- Consumer 89 | my $consumer = Kafka::Consumer->new( Connection => $connection ); 90 | 91 | # The Kafka consumer response has an ARRAY reference type. 92 | # For the fetch response array has the class name Kafka::Message elements. 93 | 94 | # Consuming messages 95 | my $messages = $consumer->fetch( 96 | 'mytopic', # topic 97 | 0, # partition 98 | 0, # offset 99 | $DEFAULT_MAX_BYTES # Maximum size of MESSAGE(s) to receive 100 | ); 101 | if ( $messages ) { 102 | foreach my $message ( @$messages ) { 103 | if ( $message->valid ) { 104 | say 'key : ', $message->key; 105 | say 'payload : ', $message->payload; 106 | say 'offset : ', $message->offset; 107 | say 'next_offset: ', $message->next_offset; 108 | } else { 109 | say 'error : ', $message->error; 110 | } 111 | } 112 | } 113 | 114 | # Closes and cleans up 115 | undef $consumer; 116 | $connection->close; 117 | undef $connection; 118 | 119 | =head1 DESCRIPTION 120 | 121 | This module is not intended to be used by the end user. 122 | 123 | L class implements API for L message. 124 | 125 | C method of the L client returns reference to an array of objects of this class. 126 | 127 | The main features of the C class are: 128 | 129 | =over 3 130 | 131 | =item * 132 | 133 | Represents Apache Kafka Message structure. Description of the structure 134 | is available at L 135 | 136 | =back 137 | 138 | =head2 CONSTRUCTOR 139 | 140 | =head3 C 141 | 142 | Creates a new C object. 143 | C takes an argument - HASH reference with the message attributes corresponding to 144 | L. 145 | 146 | =head2 METHODS 147 | 148 | =head3 C 149 | 150 | A simple message received from the Apache Kafka server. 151 | 152 | =head3 C 153 | 154 | The key is an optional message key that was used for partition assignment. 155 | The key can be an empty string. 156 | 157 | =head3 Timestamp 158 | 159 | Integer of BigInt on 32 bits platforms: the message timestamp ( might be -1 if 160 | the message has no timestamp). Requires Kafka version > 0.10.0 and timestamp 161 | enabled in the topic messages format. 162 | 163 | =head3 C 164 | 165 | Boolean value: indicates whether received message is valid or not. 166 | 167 | =head3 C 168 | 169 | A description why message is invalid. 170 | 171 | =head3 C 172 | 173 | The offset of the message in the Apache Kafka server. 174 | 175 | =head3 C 176 | 177 | The offset of the next message in the Apache Kafka server. 178 | 179 | =head3 C 180 | 181 | This holds metadata attributes about the message. 182 | The lowest 2 bits contain the compression codec used for the message. 183 | The other bits are currently unused. 184 | 185 | =head3 C 186 | 187 | The offset at the end of the log for this partition. 188 | This can be used by the client to determine how many messages behind the end of the log they are. 189 | 190 | =head3 C 191 | 192 | This is version id used to allow backwards compatible evolution of the message binary format. 193 | 194 | =head1 DIAGNOSTICS 195 | 196 | In order to achieve better performance, constructor of this module does not perform validation of 197 | arguments. 198 | 199 | =head1 SEE ALSO 200 | 201 | The basic operation of the Kafka package modules: 202 | 203 | L - constants and messages used by the Kafka package modules. 204 | 205 | L - interface to connect to a Kafka cluster. 206 | 207 | L - interface for producing client. 208 | 209 | L - interface for consuming client. 210 | 211 | L - interface to access Kafka message 212 | properties. 213 | 214 | L - functions to work with 64 bit elements of the 215 | protocol on 32 bit systems. 216 | 217 | L - functions to process messages in the 218 | Apache Kafka's Protocol. 219 | 220 | L - low-level interface for communication with Kafka server. 221 | 222 | L - module designated to handle Kafka exceptions. 223 | 224 | L - internal constants and functions used 225 | by several package modules. 226 | 227 | A wealth of detail about the Apache Kafka and the Kafka Protocol: 228 | 229 | Main page at L 230 | 231 | Kafka Protocol at L 232 | 233 | =head1 SOURCE CODE 234 | 235 | Kafka package is hosted on GitHub: 236 | L 237 | 238 | =head1 AUTHOR 239 | 240 | Sergey Gladkov 241 | 242 | Please use GitHub project link above to report problems or contact authors. 243 | 244 | =head1 CONTRIBUTORS 245 | 246 | Alexander Solovey 247 | 248 | Jeremy Jordan 249 | 250 | Sergiy Zuban 251 | 252 | Vlad Marchenko 253 | 254 | =head1 COPYRIGHT AND LICENSE 255 | 256 | Copyright (C) 2012-2017 by TrackingSoft LLC. 257 | 258 | This package is free software; you can redistribute it and/or modify it under 259 | the same terms as Perl itself. See I at 260 | L. 261 | 262 | This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; 263 | without even the implied warranty of MERCHANTABILITY or FITNESS FOR A 264 | PARTICULAR PURPOSE. 265 | 266 | =cut 267 | -------------------------------------------------------------------------------- /t/01_bits64.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | eval 'use Test::Exception'; ## no critic 17 | plan skip_all => "because Test::Exception required for testing" if $@; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::NoWarnings'; ## no critic 22 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 23 | } 24 | 25 | plan 'no_plan'; 26 | 27 | use Kafka qw ( 28 | $BITS64 29 | %ERROR 30 | $ERROR_MISMATCH_ARGUMENT 31 | ); 32 | 33 | SKIP: { 34 | 35 | skip 'You have a 64 bit system', 1, if $BITS64; 36 | 37 | my $error_mismatch_argument = $ERROR{ $ERROR_MISMATCH_ARGUMENT }; 38 | my $qr = qr/$error_mismatch_argument/; 39 | 40 | 41 | eval { my $ret = unpack( 'Q', 0xff x 8 ) }; 42 | if ( !$@ ) { 43 | ok( $BITS64, 'Your system supports 64-bit integer values' ); 44 | } else { 45 | ok( !$BITS64, 'Your system not supports 64-bit integer values' ); 46 | 47 | our $n0; 48 | our $n4; 49 | our $n45; 50 | our $n_neg1; 51 | our $n_neg2; 52 | our $n_neg3; 53 | our $n_neg5; 54 | 55 | our $s = q{}; 56 | our $s4 = '4'; 57 | our $ss = 'abcd'; 58 | 59 | BEGIN { 60 | unless ( $BITS64 ) { 61 | # numbers does not bigint 62 | $n0 = 0; 63 | $n4 = 4; 64 | $n45 = 4.5; 65 | $n_neg1 = -1; 66 | $n_neg2 = -2; 67 | $n_neg3 = -3; 68 | $n_neg5 = -5; 69 | 70 | # after the announcement of the new numbers will be bigint 71 | #use bigint; 72 | require bigint; 73 | import bigint; 74 | 75 | require Kafka::Int64; 76 | } 77 | } 78 | 79 | ok( !ref( $n0 ), 'number does not bigint' ); 80 | 81 | #-- intsum 82 | 83 | foreach my $pair ( 84 | [ 0, $n0, 0 ], 85 | [ 0, $n4, 4 ], 86 | [ 2, $n4, 6 ], 87 | [ 2, $n0, 2 ], 88 | [ 2, $n_neg5, -3 ], 89 | [ $n0, 0, 0 ], 90 | [ $n4, 0, 4 ], 91 | [ $n4, 2, 6 ], 92 | [ $n0, 2, 2 ], 93 | [ $n_neg5, 2, -3 ], 94 | [ $n4, $n4, 8 ], 95 | [ $n0, $n0, 0 ], 96 | [ $n_neg5, $n_neg5, -10 ], 97 | ) { 98 | my $ret; 99 | is( $ret = Kafka::Int64::intsum( $pair->[0], $pair->[1] ), $pair->[2], 100 | $pair->[0].' ('.( ref( $pair->[0] ) eq 'Math::BigInt' ? q{} : 'non ' ).'bigint) + '.$pair->[1].' ('.( ref( $pair->[1] ) eq 'Math::BigInt' ? q{} : 'non ' ).'bigint) is bigint 0' ); 101 | isa_ok( $ret, 'Math::BigInt' ); 102 | } 103 | 104 | #-- packq 105 | 106 | foreach my $num ( 107 | $n4, 108 | $n0, 109 | $n_neg1, 110 | $n_neg2, 111 | 4, 112 | 4.5, 113 | 0, 114 | -1, 115 | -2, 116 | ) { 117 | is( length( Kafka::Int64::packq( $num ) ), 8, 'binary string of length 64 bits ('.( ref( $num ) eq 'Math::BigInt' ? q{} : 'non ' )."bigint '$num)" ); 118 | } 119 | 120 | throws_ok { Kafka::Int64::packq( -3 ); } 'Kafka::Exception::Int64', 'error thrown'; 121 | 122 | #-- unpackq 123 | 124 | foreach my $pair ( 125 | [ chr(0) x 8, 0 ], 126 | [ chr(0xff) x 8, -1 ], 127 | [ chr(1) x 8, 72340172838076673 ], 128 | [ chr(0x10) x 8, 1157442765409226768 ], 129 | ) { 130 | my $ret; 131 | is( $ret = Kafka::Int64::unpackq( $pair->[0] ), $pair->[1], 'bigint '.$pair->[1] ); 132 | isa_ok( $ret, "Math::BigInt" ); 133 | } 134 | } 135 | 136 | } # end of SKIP 137 | 138 | -------------------------------------------------------------------------------- /t/02_io.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | eval 'use Test::Exception'; ## no critic 17 | plan skip_all => "because Test::Exception required for testing" if $@; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::TCP'; ## no critic 22 | plan skip_all => "because Test::TCP required for testing" if $@; 23 | } 24 | 25 | BEGIN { 26 | eval 'use Test::NoWarnings'; ## no critic 27 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 28 | } 29 | 30 | plan 'no_plan'; 31 | 32 | use IO::Socket::INET; 33 | use Net::EmptyPort qw( 34 | empty_port 35 | ); 36 | use POSIX ':signal_h'; 37 | use Socket qw( 38 | AF_INET 39 | AF_INET6 40 | PF_INET 41 | PF_INET6 42 | inet_aton 43 | inet_ntop 44 | ); 45 | use Sub::Install; 46 | use Sys::SigAction qw( 47 | set_sig_handler 48 | ); 49 | use Time::HiRes qw(); 50 | 51 | use Kafka qw( 52 | $IP_V4 53 | $IP_V6 54 | $KAFKA_SERVER_PORT 55 | $REQUEST_TIMEOUT 56 | ); 57 | use Kafka::IO; 58 | use Kafka::TestInternals qw( 59 | @not_posint 60 | @not_posnumber 61 | @not_string 62 | ); 63 | 64 | # See Kafka::IO 65 | use constant DEBUG => 0; 66 | #use constant DEBUG => 1; 67 | #use constant DEBUG => 2; 68 | 69 | Kafka::IO->debug_level( DEBUG ) if DEBUG; 70 | 71 | STDOUT->autoflush; 72 | 73 | my ( $server, $port, $io, $sig_handler, $marker_signal_handling, $original, $timer, $timeout, $sent, $resp, $test_message, $inet_aton, $hostname ); 74 | 75 | $inet_aton = inet_aton( '127.0.0.1' ); # localhost 76 | $hostname = gethostbyaddr( $inet_aton, AF_INET ); 77 | 78 | my $server_code = sub { 79 | my ( $port ) = @_; 80 | 81 | my $sock = IO::Socket::INET->new( 82 | LocalPort => $port, 83 | LocalAddr => $hostname, 84 | Proto => 'tcp', 85 | Listen => 5, 86 | Type => SOCK_STREAM, 87 | ReuseAddr => 1, 88 | ) or die "Cannot open server socket $hostname:$port : $!"; 89 | 90 | $SIG{TERM} = sub { exit }; 91 | 92 | while ( my $remote = $sock->accept ) { 93 | while ( my $line = <$remote> ) { 94 | print { $remote } $line; 95 | } 96 | } 97 | }; 98 | 99 | sub debug_msg { 100 | my ( $message ) = @_; 101 | 102 | return if Kafka::IO->debug_level != 2; 103 | 104 | diag '[ time = ', Time::HiRes::time(), ' ] ', $message; 105 | } 106 | 107 | my $server_port = empty_port( $KAFKA_SERVER_PORT ); 108 | $server = Test::TCP->new( 109 | code => $server_code, 110 | port => $server_port, 111 | ); 112 | $port = $server->port; 113 | ok $port, "server port = $port"; 114 | wait_port( $port ); 115 | 116 | $test_message = "Test message\n"; 117 | 118 | # NOTE: Kafka::IO->new uses alarm clock internally 119 | 120 | #-- ALRM handler 121 | 122 | debug_msg( 'ALRM handler verification' ); 123 | 124 | # cancel the previous timer 125 | alarm 0; 126 | 127 | $sig_handler = set_sig_handler( SIGALRM ,sub { 128 | ++$marker_signal_handling; 129 | debug_msg( 'SIGALRM: signal handler triggered' ); 130 | } 131 | ); 132 | ok( !defined( $marker_signal_handling ), 'marker signal handling not defined' ); 133 | 134 | throws_ok { 135 | debug_msg( "ALRM handler: host => 'something bad'" ); 136 | $io = Kafka::IO->new( 137 | host => 'something bad', 138 | port => $port, 139 | timeout => $REQUEST_TIMEOUT, 140 | ); 141 | } 'Kafka::Exception::IO', 'error thrown'; 142 | 143 | debug_msg( "ALRM handler: host => $hostname" ); 144 | eval { 145 | $io = Kafka::IO->new( 146 | host => $hostname, 147 | port => $port, 148 | timeout => $REQUEST_TIMEOUT, 149 | ); 150 | }; 151 | SKIP: { 152 | skip "gethostbyname( '$hostname' ) takes too long: $@" if $@; 153 | 154 | isa_ok( $io, 'Kafka::IO' ); 155 | 156 | ok( !defined( $marker_signal_handling ), 'marker signal handling not defined' ); 157 | # signal handler triggered 158 | kill ALRM => $$; 159 | is $marker_signal_handling, 1, 'the signal handler to be reset to the previous value'; 160 | 161 | #-- ALRM timer 162 | 163 | # Kafka::IO->new is badly ended before 'timer' and before 'timeout' 164 | 165 | # cancel the previous timer 166 | alarm 0; 167 | 168 | $SIG{ALRM} = sub { 169 | ++$marker_signal_handling; 170 | debug_msg( 'SIGALRM: signal handler triggered' ); 171 | }; 172 | $timer = 10; 173 | $timeout = $timer; 174 | 175 | debug_msg( "Kafka::IO->new is badly ended before 'timer' and before 'timeout'" ); 176 | debug_msg( "timer = $timer, timeout = $timeout, host => 'something bad'" ); 177 | $marker_signal_handling = 0; 178 | eval { 179 | alarm $timer; 180 | eval { 181 | $io = Kafka::IO->new( 182 | host => 'something bad', 183 | port => $port, 184 | timeout => $timeout, 185 | ); 186 | }; 187 | alarm 0; 188 | }; 189 | ok !$marker_signal_handling, 'signal handler is not triggered'; 190 | 191 | # Kafka::IO->new is correctly ended before 'timer' and before 'timeout' 192 | 193 | # cancel the previous timer 194 | alarm 0; 195 | 196 | $SIG{ALRM} = sub { 197 | ++$marker_signal_handling; 198 | debug_msg( 'SIGALRM: signal handler triggered' ); 199 | }; 200 | $timer = 10; 201 | $timeout = $timer + 5; 202 | 203 | $original = \&Kafka::IO::_gethostbyname; 204 | Sub::Install::reinstall_sub( { 205 | code => sub { 206 | my $self = shift; 207 | 208 | $self->{af} = AF_INET; 209 | $self->{pf} = PF_INET; 210 | $self->{ip} = '127.0.0.1'; 211 | 212 | debug_msg( '_gethostbyname called (without sleep)' ); 213 | 214 | return $self->{ip}; 215 | }, 216 | into => 'Kafka::IO', 217 | as => '_gethostbyname', 218 | } ); 219 | 220 | debug_msg( "Kafka::IO->new is correctly ended before 'timer' and before 'timeout'" ); 221 | debug_msg( "timer = $timer, timeout = $timeout, host => 'something bad'" ); 222 | $marker_signal_handling = 0; 223 | eval { 224 | alarm $timer; 225 | eval { 226 | $io = Kafka::IO->new( 227 | host => 'something bad', 228 | port => $port, 229 | timeout => $timeout, 230 | ); 231 | 232 | ok !$marker_signal_handling, 'signal handler is not triggered yet'; 233 | # 'sleep' to be interrupted by an external signal 234 | sleep $timeout * 2; 235 | }; 236 | alarm 0; 237 | }; 238 | is $marker_signal_handling, 1, 'signal handler is triggered'; 239 | 240 | Sub::Install::reinstall_sub( { 241 | code => $original, 242 | into => 'Kafka::IO', 243 | as => '_gethostbyname', 244 | } ); 245 | 246 | # Kafka::IO->new is correctly ended after 'timer' and before 'timeout' 247 | 248 | # cancel the previous timer 249 | alarm 0; 250 | 251 | $SIG{ALRM} = sub { 252 | ++$marker_signal_handling; 253 | debug_msg( 'SIGALRM: signal handler triggered' ); 254 | }; 255 | $timer = 10; 256 | $timeout = $timer + 10; 257 | 258 | Sub::Install::reinstall_sub( { 259 | code => sub { 260 | my $self = shift; 261 | 262 | $self->{af} = AF_INET; 263 | $self->{pf} = PF_INET; 264 | $self->{ip} = '127.0.0.1'; 265 | 266 | debug_msg( '_gethostbyname called (sleep ', $timer + 5, ')' ); 267 | # 'sleep' should not be interrupted by an external signal 268 | sleep $timer + 5; 269 | 270 | return $self->{ip}; 271 | }, 272 | into => 'Kafka::IO', 273 | as => '_gethostbyname', 274 | } ); 275 | 276 | debug_msg( "Kafka::IO->new is correctly ended after 'timer' and before 'timeout'" ); 277 | debug_msg( "timer = $timer, timeout = $timeout, host => $hostname" ); 278 | $marker_signal_handling = 0; 279 | eval { 280 | alarm $timer; 281 | eval { 282 | $io = Kafka::IO->new( 283 | host => $hostname, 284 | port => $port, 285 | timeout => $timeout, 286 | ); 287 | }; 288 | alarm 0; 289 | }; 290 | is $marker_signal_handling, 1, 'signal handler is triggered'; 291 | 292 | Sub::Install::reinstall_sub( { 293 | code => $original, 294 | into => 'Kafka::IO', 295 | as => '_gethostbyname', 296 | } ); 297 | 298 | debug_msg( "external 'alarm' tested" ); 299 | 300 | #-- _is_alive 301 | 302 | $io = Kafka::IO->new( 303 | host => $hostname, 304 | port => $port, 305 | timeout => $REQUEST_TIMEOUT, 306 | ); 307 | 308 | ok $io->_is_alive, 'socket alive'; 309 | 310 | #-- close 311 | 312 | ok $io->{socket}, 'socket defined'; 313 | $io->close; 314 | ok !$io->{socket}, 'socket not defined'; 315 | 316 | #-- _is_alive 317 | 318 | ok !$io->_is_alive, 'socket not alive'; 319 | 320 | #-- send 321 | 322 | $io = Kafka::IO->new( 323 | host => $hostname, 324 | port => $port, 325 | timeout => $REQUEST_TIMEOUT, 326 | ); 327 | 328 | lives_ok { $sent = $io->send( $test_message ); } 'expecting to live'; 329 | is $sent, length( $test_message ), 'sent '.length( $test_message ).' bytes'; 330 | 331 | #-- receive 332 | 333 | lives_ok { $resp = $io->receive( length( $test_message ) ); } 'expecting to live'; 334 | is( $$resp, $test_message, 'receive OK' ); 335 | 336 | # ip_version 337 | 338 | foreach my $ip_version ( undef, $IP_V4 ) { 339 | $io = Kafka::IO->new( 340 | host => '127.0.0.1', 341 | port => $port, 342 | ip_version => $ip_version, 343 | ); 344 | is $io->{af}, AF_INET, 'af OK'; 345 | is $io->{pf}, PF_INET, 'pf OK'; 346 | is $io->{ip}, '127.0.0.1', 'ip OK'; 347 | } 348 | 349 | foreach my $ip_version ( undef, $IP_V4 ) { 350 | $io = Kafka::IO->new( 351 | host => 'localhost', 352 | port => $port, 353 | ip_version => $ip_version, 354 | ); 355 | is $io->{af}, AF_INET, 'af OK'; 356 | is $io->{pf}, PF_INET, 'pf OK'; 357 | is $io->{ip}, inet_ntop( AF_INET, scalar( gethostbyname( 'localhost' ) ) ), 'ip OK'; 358 | } 359 | 360 | my $host = '127.0.0.1'; 361 | throws_ok { 362 | Kafka::IO->new( 363 | host => $host, 364 | port => $port, 365 | ip_version => $IP_V6, 366 | ); 367 | } 'Kafka::Exception::IO', "bad ip_version for $host"; 368 | 369 | $host = 'localhost'; 370 | dies_ok { 371 | Kafka::IO->new( 372 | host => $host, 373 | port => $port, 374 | ip_version => $IP_V6, 375 | ); 376 | } "bad ip_version for $host"; 377 | 378 | #-- close connection 379 | 380 | undef $server; 381 | ok $io, 'IO exists'; 382 | ok !$io->_is_alive, 'socket not alive'; 383 | # throws_ok { $sent = $io->send( $test_message ); } 'Kafka::Exception::IO', 'error thrown'; 384 | } 385 | 386 | undef $server; 387 | 388 | -------------------------------------------------------------------------------- /t/03_io_ipv6.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env perl 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | eval 'use Test::Exception'; ## no critic 17 | plan skip_all => "because Test::Exception required for testing" if $@; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::TCP'; ## no critic 22 | plan skip_all => "because Test::TCP required for testing" if $@; 23 | } 24 | 25 | BEGIN { 26 | eval 'use Test::NoWarnings'; ## no critic 27 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 28 | } 29 | 30 | plan 'no_plan'; 31 | 32 | use Socket qw( 33 | getaddrinfo 34 | AF_INET 35 | AF_INET6 36 | PF_INET 37 | PF_INET6 38 | 39 | SOCK_STREAM 40 | IPPROTO_TCP 41 | 42 | inet_aton 43 | inet_ntop 44 | pack_sockaddr_in 45 | ); 46 | use Net::EmptyPort qw( 47 | can_bind 48 | ); 49 | 50 | use Kafka qw( 51 | $IP_V4 52 | $IP_V6 53 | $REQUEST_TIMEOUT 54 | ); 55 | use Kafka::IO; 56 | 57 | { 58 | package TestServer; 59 | 60 | use 5.010; 61 | use strict; 62 | use warnings; 63 | 64 | use base qw( 65 | Exporter 66 | ); 67 | 68 | our @EXPORT = qw( 69 | new_sock 70 | ); 71 | 72 | use IO::Socket::IP; 73 | 74 | sub new_sock { 75 | my ( $host, $port ) = @_; 76 | 77 | my $sock = IO::Socket::IP->new( 78 | LocalPort => $port, 79 | LocalAddr => $host, 80 | Proto => 'tcp', 81 | Listen => 5, 82 | Type => SOCK_STREAM, 83 | V6Only => 1, 84 | ReuseAddr => 1, 85 | ) or die "Cannot open server socket: $!"; 86 | 87 | return $sock; 88 | } 89 | 90 | sub new { 91 | my ( $class, $host, $port ) = @_; 92 | 93 | my $sock = new_sock( $host, $port ); 94 | 95 | return bless { sock => $sock }, $class; 96 | } 97 | 98 | sub run { 99 | my ( $self, $code ) = @_; 100 | 101 | while ( my $remote = $self->{sock}->accept ) { 102 | while ( my $line = <$remote> ) { 103 | $code->( $remote, $line ); 104 | } 105 | } 106 | } 107 | 108 | 1; 109 | } 110 | 111 | sub is_alive_v4 { 112 | my ( $ip, $port ) = @_; 113 | 114 | socket( my $tmp_socket, PF_INET, SOCK_STREAM, IPPROTO_TCP ); 115 | my $is_alive = connect( $tmp_socket, pack_sockaddr_in( $port, inet_aton( $ip ) ) ); 116 | CORE::close( $tmp_socket ); 117 | 118 | return $is_alive; 119 | } 120 | 121 | sub doit { 122 | my ( $host, $af, $pf ) = @_; 123 | 124 | ok 1, 'starting the test'; 125 | test_tcp( 126 | client => sub { 127 | my $port = shift; 128 | 129 | ok $port, 'test case for sharedfork (client)'; 130 | 131 | my $io = Kafka::IO->new( 132 | host => $host, 133 | port => $port, 134 | #timeout => $REQUEST_TIMEOUT, 135 | ); 136 | ok $io->_is_alive, 'socket alive'; 137 | is $io->{af}, $af, 'Address family ok'; 138 | is $io->{pf}, $pf, 'Protocol family ok'; 139 | 140 | my $test_message = "Test message\n"; 141 | my ( $sent, $resp ); 142 | 143 | lives_ok { $sent = $io->send( $test_message ); } 'expecting to live'; 144 | is $sent, length( $test_message ), 'sent '.length( $test_message ).' bytes'; 145 | 146 | lives_ok { $resp = $io->receive( length( $test_message ) ); } 'expecting to live'; 147 | is( $$resp, $test_message, 'receive OK' ); 148 | 149 | foreach my $ip_version ( undef, $IP_V6 ) { 150 | $io = Kafka::IO->new( 151 | host => $host, 152 | port => $port, 153 | ip_version => $ip_version, 154 | ); 155 | is $io->{af}, AF_INET6, 'af OK'; 156 | is $io->{pf}, PF_INET6, 'pf OK'; 157 | is $io->{ip}, $host, 'ip OK'; 158 | } 159 | 160 | foreach my $hostname ( '127.0.0.1', 'localhost' ) { 161 | my $ip_v4; 162 | if ( my $ipaddr = gethostbyname( $hostname ) ) { 163 | $ip_v4 = inet_ntop( AF_INET, $ipaddr ); 164 | } 165 | if ( $ip_v4 && !is_alive_v4( $ip_v4, $port ) ) { 166 | dies_ok { 167 | my $bad_io = Kafka::IO->new( 168 | host => $hostname, 169 | port => $port, 170 | ip_version => $IP_V4, 171 | ); 172 | } "bad ip_version for $hostname"; 173 | } 174 | } 175 | 176 | my $host_v6 = 'ip6-localhost'; 177 | 178 | my ( $err, @addrs ) = getaddrinfo( 179 | $host_v6, 180 | '', # not interested in the service name 181 | { 182 | family => AF_INET6, 183 | socktype => SOCK_STREAM, 184 | protocol => IPPROTO_TCP, 185 | }, 186 | ); 187 | unless ( $err ) { 188 | # /etc/hosts contains: 189 | # ::1 ip6-localhost 190 | my $v6_io = Kafka::IO->new( 191 | host => $host_v6, 192 | port => $port, 193 | ip_version => $IP_V6, 194 | ); 195 | is $v6_io->{af}, AF_INET6, 'af OK'; 196 | is $v6_io->{pf}, PF_INET6, 'pf OK'; 197 | is $v6_io->{ip}, '::1', 'ip OK'; 198 | } 199 | 200 | foreach my $ip_version ( undef, $IP_V4 ) { 201 | dies_ok { 202 | $io = Kafka::IO->new( 203 | host => $host_v6, 204 | port => $port, 205 | ip_version => $ip_version, 206 | ); 207 | } "bad ip_version for host_v6"; 208 | } 209 | 210 | throws_ok { 211 | $io = Kafka::IO->new( 212 | host => '::1', 213 | port => $port, 214 | ip_version => $IP_V4, 215 | ); 216 | } 'Kafka::Exception::IO', "bad ip_version for IPv6"; 217 | 218 | ok $io->close, 'Socket closed'; 219 | }, 220 | server => sub { 221 | my $port = shift; 222 | 223 | ok $port, 'test case for sharedfork (server)'; 224 | TestServer->new( $host, $port )->run( sub { 225 | my ( $remote, $line, $sock ) = @_; 226 | note 'new request'; 227 | print { $remote } $line; 228 | } 229 | ); 230 | }, 231 | host => $host, 232 | ); 233 | } 234 | 235 | 236 | 237 | 238 | 239 | subtest 'v6' => sub { 240 | plan skip_all => 'IPv6 not supported' 241 | unless eval { Socket::IPV6_V6ONLY } && can_bind( '::1' ); 242 | 243 | foreach my $host_name ( 244 | '0:0:0:0:0:0:0:1', 245 | '::1', 246 | #TODO: v6 fqdn resolve test 247 | ) { 248 | doit( $host_name, AF_INET6, PF_INET6 ); 249 | } 250 | }; 251 | 252 | -------------------------------------------------------------------------------- /t/04_mockio.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | eval 'use Test::Exception'; ## no critic 17 | plan skip_all => "because Test::Exception required for testing" if $@; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::NoWarnings'; ## no critic 22 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 23 | } 24 | 25 | plan 'no_plan'; 26 | 27 | use Params::Util qw( 28 | _STRING 29 | ); 30 | 31 | use Kafka qw( 32 | $KAFKA_SERVER_PORT 33 | $REQUEST_TIMEOUT 34 | ); 35 | use Kafka::MockIO; 36 | use Kafka::TestInternals qw( 37 | @not_posint 38 | @not_posnumber 39 | @not_string 40 | ); 41 | 42 | my @IO_API_names = qw( 43 | new 44 | send 45 | receive 46 | close 47 | _is_alive 48 | ); 49 | 50 | #-- Mocking Kafka::IO ---------- 51 | my %original_IO_API = map { $_ => \&{ "Kafka::IO::$_" } } @IO_API_names; 52 | 53 | my $test_message = "Test message\n"; 54 | 55 | my $topic = 'mytopic'; 56 | my $partition = $Kafka::MockIO::PARTITION; 57 | 58 | # description of requests, see t/??_decode_encode.t 59 | my $encoded_produce_request = pack( "H*", '00000049000000000000000400000001000005dc0000000100076d79746f7069630000000100000000000000200000000000000000000000148dc795a20000ffffffff0000000648656c6c6f21' ); 60 | my $encoded_fetch_request = pack( "H*", '0000004d00010000000000000016636f6e736f6c652d636f6e73756d65722d3235353535ffffffff00000064000000010000000100076d79746f7069630000000100000000000000000000000000100000' ); 61 | my $encoded_offset_request = pack( "H*", '0000004500020000000000000016636f6e736f6c652d636f6e73756d65722d3235353535ffffffff0000000100076d79746f7069630000000100000000fffffffffffffffe00000001' ); 62 | my $encoded_metadata_request = pack( "H*", '0000002d00030000000000000016636f6e736f6c652d636f6e73756d65722d32353535350000000100076d79746f706963' ); 63 | 64 | #-- override 65 | 66 | Kafka::MockIO::override(); 67 | ok( \&{ "Kafka::IO::$_" } ne $original_IO_API{ $_ }, "IO API mocked: $_" ) foreach @IO_API_names; 68 | 69 | #-- restore 70 | 71 | Kafka::MockIO::restore(); 72 | ok( \&{ "Kafka::IO::$_" } eq $original_IO_API{ $_ }, "IO API restored: $_" ) foreach @IO_API_names; 73 | 74 | #-- Kafka::MockIO API ---------- 75 | 76 | Kafka::MockIO::override(); 77 | 78 | #-- special_cases 79 | 80 | ok !%{ Kafka::MockIO::special_cases() }, 'There are no special cases'; 81 | 82 | #-- add_special_case 83 | 84 | Kafka::MockIO::add_special_case( { $test_message => $test_message } ); 85 | ok( scalar( keys( %{ Kafka::MockIO::special_cases() } ) ) == 1 && Kafka::MockIO::special_cases()->{ $test_message } eq $test_message, 'The special case added' ); 86 | 87 | #-- del_special_case 88 | 89 | Kafka::MockIO::del_special_case( $test_message ); 90 | ok !%{ Kafka::MockIO::special_cases() }, 'The special case deleted'; 91 | 92 | #-- Kafka::IO API ---------- 93 | 94 | Kafka::MockIO::add_special_case( { $test_message => $test_message } ); 95 | 96 | # NOTE: Is duplicated test code t/02_io.t partially (Section INSTRUCTIONS) 97 | 98 | my $io = Kafka::IO->new( 99 | host => 'localhost', 100 | port => $KAFKA_SERVER_PORT, 101 | timeout => $REQUEST_TIMEOUT, 102 | ); 103 | isa_ok( $io, 'Kafka::IO' ); 104 | 105 | #-- _is_alive 106 | 107 | ok $io->_is_alive, 'socket alive'; 108 | 109 | #-- close 110 | 111 | ok $io->{socket}, 'socket defined'; 112 | $io->close; 113 | ok !$io->{socket}, 'socket not defined'; 114 | 115 | #-- _is_alive 116 | 117 | ok !$io->_is_alive, 'socket not alive'; 118 | 119 | undef $io; 120 | dies_ok { 121 | $io = Kafka::IO->new( 122 | host => 'incorrect host', 123 | port => 'incorrect port', 124 | timeout => 'incorrect timeout', 125 | ); 126 | } 'expecting to die'; 127 | 128 | #-- new 129 | 130 | # host 131 | 132 | $@ = $test_message; 133 | 134 | foreach my $bad_host ( @not_string ) { 135 | undef $io; 136 | dies_ok { 137 | $io = Kafka::IO->new( 138 | host => $bad_host, 139 | port => $KAFKA_SERVER_PORT, 140 | timeout => $REQUEST_TIMEOUT, 141 | ); 142 | } 'Invalid host'; 143 | } 144 | 145 | # port 146 | 147 | foreach my $bad_port ( @not_posint ) { 148 | undef $io; 149 | dies_ok { 150 | $io = Kafka::IO->new( 151 | host => 'localhost', 152 | port => $bad_port, 153 | timeout => $REQUEST_TIMEOUT, 154 | ); 155 | } 'Invalid port'; 156 | } 157 | 158 | # timeout 159 | 160 | foreach my $bad_timeout ( @not_posnumber ) { 161 | undef $io; 162 | dies_ok { 163 | $io = Kafka::IO->new( 164 | host => 'localhost', 165 | port => $KAFKA_SERVER_PORT, 166 | timeout => $bad_timeout, 167 | ); 168 | } 'Invalid timeout'; 169 | } 170 | 171 | #-- send 172 | 173 | $io = Kafka::IO->new( 174 | host => 'localhost', 175 | port => $KAFKA_SERVER_PORT, 176 | timeout => $REQUEST_TIMEOUT, 177 | ); 178 | 179 | is( $io->send( $test_message ), length( $test_message ), 'sent '.length( $test_message ).' bytes' ); 180 | 181 | #-- receive 182 | 183 | is( ${ $io->receive( length( $test_message ) ) }, $test_message, 'receive OK' ); 184 | 185 | #-- send 186 | 187 | foreach my $bad_message ( @not_string ) { 188 | $io = Kafka::IO->new( 189 | host => 'localhost', 190 | port => $KAFKA_SERVER_PORT, 191 | timeout => $REQUEST_TIMEOUT, 192 | ); 193 | ok $io->_is_alive, 'socket alive'; 194 | 195 | dies_ok { $io->send( $bad_message ); } 'expecting to die'; 196 | } 197 | 198 | #-- receive 199 | 200 | ok $io->_is_alive, 'socket alive'; 201 | 202 | foreach my $bad_len ( @not_posint ) { 203 | dies_ok { $io->receive( $bad_len ); } 'expecting to die'; 204 | } 205 | 206 | #-- Kafka server capabilities ---------- 207 | 208 | #-- APIKEY_PRODUCE 209 | 210 | ok !exists( $Kafka::MockIO::_received_data{ $topic }->{ $partition } ), 'data is not received'; 211 | $io->send( $encoded_produce_request ); 212 | is( scalar( @{ $Kafka::MockIO::_received_data{ $topic }->{ $partition } } ), 1, 'data is received' ); 213 | 214 | #-- all requests 215 | 216 | foreach my $encoded_request ( 217 | $encoded_produce_request, 218 | $encoded_fetch_request, 219 | $encoded_offset_request, 220 | $encoded_metadata_request, 221 | ) { 222 | $io->send( $encoded_request ); 223 | my $encoded_response = ${ $io->receive( 4 ) }; # response length 224 | $encoded_response .= ${ $io->receive( unpack( 'l>', $encoded_response ) ) }; 225 | ok( defined( _STRING( $encoded_response ) ), 'response received' ); 226 | } 227 | 228 | #-- APIKEY_OFFSET 229 | 230 | #-- APIKEY_METADATA 231 | 232 | Kafka::MockIO::restore(); 233 | 234 | -------------------------------------------------------------------------------- /t/05_protocol.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | eval 'use Test::NoWarnings'; ## no critic 17 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 18 | } 19 | 20 | plan 'no_plan'; 21 | 22 | use Kafka qw( 23 | $COMPRESSION_NONE 24 | $DEFAULT_MAX_BYTES 25 | $DEFAULT_MAX_NUMBER_OF_OFFSETS 26 | $DEFAULT_MAX_WAIT_TIME 27 | $MIN_BYTES_RESPOND_IMMEDIATELY 28 | $NOT_SEND_ANY_RESPONSE 29 | $RECEIVE_LATEST_OFFSETS 30 | $REQUEST_TIMEOUT 31 | ); 32 | use Kafka::Internals qw( 33 | $PRODUCER_ANY_OFFSET 34 | ); 35 | use Kafka::Protocol qw( 36 | decode_fetch_response 37 | ); 38 | 39 | # NOTE: We presume that the verification of the correctness of the arguments made by the user. 40 | 41 | my $request = {}; 42 | 43 | # CorrelationId 44 | $request->{CorrelationId} = 0; 45 | 46 | # ClientId 47 | $request->{ClientId} = q{}; 48 | 49 | # RequiredAcks 50 | $request->{RequiredAcks} = $NOT_SEND_ANY_RESPONSE; 51 | 52 | # Timeout 53 | $request->{Timeout} = $REQUEST_TIMEOUT * 1000; # ms 54 | 55 | # MaxWaitTime 56 | $request->{MaxWaitTime} = $DEFAULT_MAX_WAIT_TIME; 57 | 58 | # MinBytes 59 | $request->{MinBytes} = $MIN_BYTES_RESPOND_IMMEDIATELY; 60 | 61 | # topics 62 | 63 | # TopicName 64 | $request->{topics} = [ { TopicName => 'mytopic' } ]; 65 | 66 | # partitions 67 | 68 | # Partition 69 | $request->{topics}->[0]->{partitions}->[0]->{Partition} = 0; 70 | 71 | # MessageSet 72 | $request->{topics}->[0]->{partitions}->[0]->{MessageSet} = [ {} ]; 73 | 74 | # FetchOffset 75 | $request->{topics}->[0]->{partitions}->[0]->{FetchOffset} = 0; 76 | 77 | # MaxBytes 78 | $request->{topics}->[0]->{partitions}->[0]->{MaxBytes} = $DEFAULT_MAX_BYTES; 79 | 80 | # Time 81 | $request->{topics}->[0]->{partitions}->[0]->{Time} = $RECEIVE_LATEST_OFFSETS; 82 | 83 | # MaxNumberOfOffsets 84 | $request->{topics}->[0]->{partitions}->[0]->{MaxNumberOfOffsets} = $DEFAULT_MAX_NUMBER_OF_OFFSETS; 85 | 86 | #- MessageSet internal 87 | 88 | # Offset 89 | $request->{topics}->[0]->{partitions}->[0]->{MessageSet}->[0]->{Offset} = $PRODUCER_ANY_OFFSET; 90 | 91 | # NOTE: MagicByte, Attributes should not be assigned by the user 92 | $request->{topics}->[0]->{partitions}->[0]->{MessageSet}->[0]->{MagicByte} = 0; 93 | $request->{topics}->[0]->{partitions}->[0]->{MessageSet}->[0]->{Attributes} = $COMPRESSION_NONE; 94 | 95 | # Key, Value 96 | $request->{topics}->[0]->{partitions}->[0]->{MessageSet}->[0]->{Key} = q{}; 97 | $request->{topics}->[0]->{partitions}->[0]->{MessageSet}->[0]->{Value} = q{}; 98 | 99 | #-- decode_fetch_response (_decode_MessageSet_template) 100 | 101 | # According to Apache Kafka documentation: 102 | # As an optimization the server is allowed to return a partial message at the end of the message set. 103 | # Clients should handle this case. 104 | 105 | # description of responses, see t/??_decode_encode.t 106 | 107 | # the correct encoded fetch response hex stream 108 | my $encoded_response = pack( "H*", '0000006e000000000000000100076d79746f706963000000010000000000000000000000000002000000470000000000000000000000148dc795a20000ffffffff0000000648656c6c6f2100000000000000010000001b989feb390000ffffffff0000000d48656c6c6f2c20576f726c6421' ); 109 | # the correct decoded fetch response 110 | #$decoded_response = { 111 | # CorrelationId => 0, 112 | # topics => [ 113 | # { 114 | # TopicName => 'mytopic', 115 | # partitions => [ 116 | # { 117 | # Partition => 0, 118 | # ErrorCode => 0, 119 | # HighwaterMarkOffset => 2, 120 | # MessageSet => [ 121 | # { 122 | # Offset => 0, 123 | # MagicByte => 0, 124 | # Attributes => 0, 125 | # Key => q{}, 126 | # Value => 'Hello!', 127 | # }, 128 | # { 129 | # Offset => 1, 130 | # MagicByte => 0, 131 | # Attributes => 0, 132 | # Key => q{}, 133 | # Value => 'Hello, World!', 134 | # }, 135 | # ], 136 | # }, 137 | # ], 138 | # }, 139 | # ], 140 | #}; 141 | 142 | my $decoded = decode_fetch_response( \$encoded_response ); 143 | is scalar( @{ $decoded->{topics}->[0]->{partitions}->[0]->{MessageSet} } ), 2, 'all messages are decoded'; 144 | 145 | foreach my $corrupted ( 146 | # Not the full MessageSet: $MessageSetSize < 22; 147 | # [q] Offset 148 | # [l] MessageSize 149 | # [l] Crc 150 | # [c] MagicByte 151 | # [c] Attributes 152 | # [l] Key length 153 | # [l] Value length 154 | { 155 | hex_stream => '0000006e000000000000000100076d79746f706963000000010000000000000000000000000002000000150000000000000000000000148dc795a20000ffffffff0000000648656c6c6f2100000000000000010000001b989feb390000ffffffff0000000d48656c6c6f2c20576f726c6421', 156 | received => 0, 157 | description => 'no decoded messages', 158 | }, 159 | { 160 | hex_stream => '0000006e000000000000000100076d79746f706963000000010000000000000000000000000002000000150000000000000000000000148dc795a20000ffffff', 161 | received => 0, 162 | description => 'no decoded messages', 163 | }, 164 | # Not the full MessageSet: not the full Key or Value length 165 | { 166 | hex_stream => '0000006e000000000000000100076d79746f706963000000010000000000000000000000000002000000470000000000000000000000148dc795a20000ffffffff000000', 167 | received => 0, 168 | description => 'no decoded messages', 169 | }, 170 | { 171 | hex_stream => '0000006e000000000000000100076d79746f706963000000010000000000000000000000000002000000470000000000000000000000148dc795a20000ffffffff0000000648656c6c6f2100000000000000010000001b989feb390000ffffffff0000', 172 | received => 1, 173 | description => 'the first message is decoded', 174 | }, 175 | # Not the full MessageSet: not the full Value 176 | { 177 | hex_stream => '0000006e000000000000000100076d79746f706963000000010000000000000000000000000002000000470000000000000000000000148dc795a20000ffffffff0000000648656c6c6f', 178 | received => 0, 179 | description => 'no decoded messages', 180 | }, 181 | { 182 | hex_stream => '0000006e000000000000000100076d79746f706963000000010000000000000000000000000002000000470000000000000000000000148dc795a20000ffffffff0000000648656c6c6f2100000000000000010000001b989feb390000ffffffff0000000d48656c6c6f2c20576f726c64', 183 | received => 1, 184 | description => 'the first message is decoded', 185 | }, 186 | ) { 187 | $encoded_response = pack( "H*", $corrupted->{hex_stream} ); 188 | $decoded = decode_fetch_response( \$encoded_response ); 189 | is scalar( @{ $decoded->{topics}->[0]->{partitions}->[0]->{MessageSet} } ), $corrupted->{received}, $corrupted->{description}; 190 | } 191 | 192 | -------------------------------------------------------------------------------- /t/07_cluster.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | plan skip_all => 'Unknown base directory of Kafka server' 17 | unless $ENV{KAFKA_BASE_DIR}; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::NoWarnings'; ## no critic 22 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 23 | } 24 | 25 | plan 'no_plan'; 26 | 27 | use Params::Util qw( 28 | _HASH 29 | _SCALAR 30 | ); 31 | 32 | use Kafka::IO; 33 | use Kafka::Protocol qw( 34 | decode_metadata_response 35 | decode_produce_response 36 | ); 37 | 38 | use Kafka::Cluster; 39 | 40 | ok defined( Kafka::Cluster::data_cleanup() ), 'data directory cleaned'; 41 | 42 | #Kafka::IO->debug_level( 1 ); 43 | my $cluster = Kafka::Cluster->new(); 44 | isa_ok( $cluster, 'Kafka::Cluster' ); 45 | 46 | #Kafka::IO->debug_level( 1 ); 47 | $cluster->init; 48 | $cluster->start_all; 49 | 50 | my ( $response, $decode ); 51 | 52 | #-- MetadataRequest 53 | #Kafka::IO->debug_level( 1 ); 54 | $response = $cluster->request( ( $cluster->servers )[0], '000000230003000000000000000C746573742D726571756573740000000100076D79746F706963' ); 55 | ok _SCALAR( $response ), 'correct request'; 56 | #diag( 'Hex Stream: ', unpack( 'H*', $$response ) ); 57 | $decode = decode_metadata_response( $response ); 58 | ok _HASH( $decode ), 'correct decode'; 59 | #diag( Data::Dumper->Dump( [ $decode ], [ 'metadata_response' ] ) ); 60 | 61 | #-- ProduceRequest 62 | #Kafka::IO->debug_level( 1 ); 63 | $response = $cluster->request( ( $cluster->servers )[0], '00000049000000000000000400000001000005dc0000000100076d79746f7069630000000100000000000000200000000000000000000000148dc795a20000ffffffff0000000648656c6c6f21' ); 64 | ok _SCALAR( $response ), 'correct request'; 65 | #diag( 'Hex Stream: ', unpack( 'H*', $$response ) ); 66 | $decode = decode_produce_response( $response ); 67 | ok _HASH( $decode ), 'correct decode'; 68 | #diag( Data::Dumper->Dump( [ $decode ], [ 'produce_response' ] ) ); 69 | 70 | $cluster->close; 71 | 72 | -------------------------------------------------------------------------------- /t/08_cluster_start.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | plan skip_all => 'Unknown base directory of Kafka server' 17 | unless $ENV{KAFKA_BASE_DIR}; 18 | } 19 | 20 | 21 | BEGIN { 22 | eval 'use Test::NoWarnings'; ## no critic 23 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 24 | } 25 | 26 | plan 'no_plan'; 27 | 28 | use Kafka::Cluster; 29 | 30 | my $cluster = Kafka::Cluster->new(); 31 | isa_ok( $cluster, 'Kafka::Cluster' ); 32 | 33 | -------------------------------------------------------------------------------- /t/10_message.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | eval 'use Test::Exception'; ## no critic 17 | plan skip_all => "because Test::Exception required for testing" if $@; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::NoWarnings'; ## no critic 22 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 23 | } 24 | 25 | plan 'no_plan'; 26 | 27 | use Params::Util qw( 28 | _HASH 29 | ); 30 | 31 | use Kafka::Message; 32 | use Kafka::TestInternals qw( 33 | @not_hash 34 | ); 35 | 36 | my $msg = { 37 | Attributes => 0, 38 | error => 0, 39 | HighwaterMarkOffset => 0, 40 | key => q{}, 41 | MagicByte => 0, 42 | next_offset => 0, 43 | payload => q{}, 44 | offset => 0, 45 | valid => 1, 46 | }; 47 | 48 | # NOTE: We presume that the verification of the correctness of the arguments made by the user. 49 | 50 | #foreach my $bad_arg ( @not_hash ) { 51 | # dies_ok { $message = Kafka::Message->new( $bad_arg ) } 'exception to die'; 52 | #} 53 | 54 | my $message; 55 | 56 | lives_ok { $message = Kafka::Message->new( $msg ) } 'expecting to live'; 57 | isa_ok( $message, 'Kafka::Message' ); 58 | 59 | foreach my $method ( @Kafka::Message::_standard_fields ) 60 | { 61 | is $message->$method, $msg->{ $method }, "proper operation"; 62 | } 63 | 64 | -------------------------------------------------------------------------------- /t/11_producer.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | eval 'use Test::Exception'; ## no critic 17 | plan skip_all => "because Test::Exception required for testing" if $@; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::NoWarnings'; ## no critic 22 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 23 | } 24 | 25 | plan 'no_plan'; 26 | 27 | use Const::Fast; 28 | use Params::Util qw( 29 | _ARRAY0 30 | _HASH 31 | ); 32 | use Sub::Install; 33 | 34 | use Kafka qw( 35 | $BLOCK_UNTIL_IS_COMMITTED 36 | $COMPRESSION_GZIP 37 | $COMPRESSION_NONE 38 | $COMPRESSION_SNAPPY 39 | $COMPRESSION_LZ4 40 | $ERROR_MISMATCH_ARGUMENT 41 | $NOT_SEND_ANY_RESPONSE 42 | $REQUEST_TIMEOUT 43 | $RETRY_BACKOFF 44 | $WAIT_WRITTEN_TO_LOCAL_LOG 45 | ); 46 | use Kafka::Cluster; 47 | use Kafka::Connection; 48 | use Kafka::MockIO; 49 | use Kafka::Producer; 50 | use Kafka::TestInternals qw( 51 | @not_empty_string 52 | @not_isint 53 | @not_nonnegint 54 | @not_number 55 | @not_right_object 56 | @not_string_array 57 | $topic 58 | ); 59 | 60 | # WARNING: must match the settings of your system 61 | const my $KAFKA_BASE_DIR => $ENV{KAFKA_BASE_DIR}; 62 | 63 | my ( $port, $connect, $partition, $producer, $response ); 64 | 65 | sub new_ERROR_MISMATCH_ARGUMENT { 66 | my ( $field, @bad_values ) = @_; 67 | 68 | foreach my $bad_value ( @bad_values ) { 69 | undef $producer; 70 | throws_ok { 71 | $producer = Kafka::Producer->new( 72 | Connection => $connect, 73 | ClientId => 'producer', 74 | RequiredAcks => $WAIT_WRITTEN_TO_LOCAL_LOG, 75 | Timeout => $REQUEST_TIMEOUT * 1000, # This provides a maximum time (ms) the server can await the receipt of the number of acknowledgements in RequiredAcks 76 | $field => $bad_value, 77 | ); 78 | } 'Kafka::Exception::Producer', 'error thrown'; 79 | } 80 | } 81 | 82 | sub send_ERROR_MISMATCH_ARGUMENT { 83 | my ( $topic, $partition, $messages, $key, $compression_codec ) = @_; 84 | 85 | $producer = Kafka::Producer->new( 86 | Connection => $connect, 87 | ); 88 | undef $response; 89 | throws_ok { 90 | $response = $producer->send( 91 | $topic, 92 | $partition, 93 | $messages, 94 | $key, 95 | $compression_codec, 96 | ); 97 | } 'Kafka::Exception', 'error thrown'; 98 | } 99 | 100 | sub communication_error { 101 | my ( $module, $name ) = @_; 102 | 103 | my $method_name = "${module}::${name}"; 104 | my $method = \&$method_name; 105 | 106 | $connect = Kafka::Connection->new( 107 | host => 'localhost', 108 | port => $port, 109 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 110 | ); 111 | $producer = Kafka::Producer->new( 112 | Connection => $connect, 113 | ); 114 | 115 | Sub::Install::reinstall_sub( { 116 | code => sub { 117 | my ( $self ) = @_; 118 | $self->_error( $ERROR_MISMATCH_ARGUMENT ); 119 | }, 120 | into => $module, 121 | as => $name, 122 | } ); 123 | 124 | undef $response; 125 | throws_ok { 126 | $response = $producer->send( 127 | $topic, 128 | $partition, 129 | 'Single message', 130 | ); 131 | } 'Kafka::Exception', 'error thrown'; 132 | 133 | Sub::Install::reinstall_sub( { 134 | code => $method, 135 | into => $module, 136 | as => $name, 137 | } ); 138 | } 139 | 140 | $partition = $Kafka::MockIO::PARTITION;; 141 | 142 | testing(); 143 | testing( $KAFKA_BASE_DIR ) if $KAFKA_BASE_DIR; 144 | 145 | sub testing { 146 | my ( $kafka_base_dir ) = @_; 147 | 148 | my $no_api_versions = 0; 149 | 150 | if ( $kafka_base_dir ) { 151 | #-- Connecting to the Kafka server port (for example for node_id = 0) 152 | ( $port ) = Kafka::Cluster->new( kafka_dir => $KAFKA_BASE_DIR, reuse_existing => 1 )->servers; 153 | } else { 154 | $port = $Kafka::MockIO::KAFKA_MOCK_SERVER_PORT; 155 | Kafka::MockIO::override(); 156 | $no_api_versions = 1; # no API versions support in Mock protocol 157 | } 158 | 159 | $connect = Kafka::Connection->new( 160 | host => 'localhost', 161 | port => $port, 162 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 163 | dont_load_supported_api_versions => $no_api_versions, 164 | ); 165 | 166 | #-- simple start 167 | 168 | $producer = Kafka::Producer->new( 169 | Connection => $connect, 170 | ); 171 | isa_ok( $producer, 'Kafka::Producer' ); 172 | 173 | undef $producer; 174 | ok !$producer, 'producer object is destroyed'; 175 | 176 | #-- new 177 | 178 | new_ERROR_MISMATCH_ARGUMENT( 'Connection', @not_right_object ); 179 | new_ERROR_MISMATCH_ARGUMENT( 'ClientId', @not_empty_string ); 180 | new_ERROR_MISMATCH_ARGUMENT( 'RequiredAcks', @not_isint ); 181 | new_ERROR_MISMATCH_ARGUMENT( 'Timeout', grep { defined $_ } @not_number ); # undef is allowed and replaced with connection timeout or $REQUEST_TIMEOUT 182 | 183 | #-- send 184 | send_ERROR_MISMATCH_ARGUMENT( $_, $partition, 'Some value', 'Some key' ) 185 | foreach @not_empty_string; 186 | send_ERROR_MISMATCH_ARGUMENT( $topic, $_, 'Some value', 'Some key' ) 187 | foreach @not_isint; 188 | 189 | foreach my $bad_message ( 190 | grep( { !_ARRAY0( $_ ) } @not_empty_string ), 191 | @not_string_array, 192 | ) { 193 | send_ERROR_MISMATCH_ARGUMENT( $topic, $partition, $bad_message, 'Some key' ); 194 | } 195 | 196 | send_ERROR_MISMATCH_ARGUMENT( $topic, $partition, 'Some value', $_ ) 197 | foreach grep { !( _ARRAY0( $_ ) && @$_ == 1 ) } @not_empty_string; 198 | 199 | send_ERROR_MISMATCH_ARGUMENT( $topic, $partition, 'Some value', 'Some key', $_ ) 200 | foreach ( @not_isint, $COMPRESSION_NONE - 1, $COMPRESSION_LZ4 + 1 ); 201 | # Valid values for $compression_codec checked in the test *_consumer.t 202 | 203 | #-- ProduceRequest 204 | 205 | for my $mode ( 206 | $NOT_SEND_ANY_RESPONSE, 207 | $WAIT_WRITTEN_TO_LOCAL_LOG, 208 | $BLOCK_UNTIL_IS_COMMITTED, 209 | ) { 210 | 211 | $producer = Kafka::Producer->new( 212 | Connection => $connect, 213 | RequiredAcks => $mode, 214 | ); 215 | isa_ok( $producer, 'Kafka::Producer' ); 216 | 217 | # Sending a single message 218 | $response = $producer->send( 219 | $topic, 220 | $partition, 221 | 'Single message' # message 222 | ); 223 | ok _HASH( $response ), 'response is received'; 224 | 225 | # Sending a series of messages 226 | $response = $producer->send( 227 | $topic, 228 | $partition, 229 | [ # messages 230 | 'The first message', 231 | 'The second message', 232 | 'The third message', 233 | ] 234 | ); 235 | ok _HASH( $response ), 'response is received'; 236 | 237 | # TODO: MockIO supports only v0, for timestamps v2 is used 238 | if ( $kafka_base_dir ) { 239 | # Sending a single message with timestamp 240 | $response = $producer->send( 241 | $topic, 242 | $partition, 243 | 'Single message', # message 244 | undef, 245 | undef, 246 | time()*1000 247 | ); 248 | ok _HASH( $response ), 'response is received'; 249 | 250 | # Sending a series of messages with timestamp 251 | $response = $producer->send( 252 | $topic, 253 | $partition, 254 | [ # messages 255 | 'The first message', 256 | 'The second message', 257 | 'The third message', 258 | ], 259 | undef, 260 | undef, 261 | time()*1000 262 | ); 263 | ok _HASH( $response ), 'response is received'; 264 | 265 | # Sending a series of messages with series of timestamps 266 | $response = $producer->send( 267 | $topic, 268 | $partition, 269 | [ # messages 270 | 'The first message', 271 | 'The second message', 272 | 'The third message', 273 | ], 274 | undef, 275 | undef, 276 | [time()*1000 - 3, time()*1000 - 2, time()*1000 - 1] 277 | ); 278 | ok _HASH( $response ), 'response is received'; 279 | } 280 | 281 | } 282 | 283 | #-- Response to errors in communication modules 284 | 285 | # Kafka::IO 286 | communication_error( 'Kafka::IO', 'send' ); 287 | 288 | # Kafka::Connection 289 | communication_error( 'Kafka::Connection', 'receive_response_to_request' ); 290 | 291 | Kafka::MockIO::restore() 292 | unless $kafka_base_dir; 293 | } 294 | 295 | -------------------------------------------------------------------------------- /t/13_leader_not_found.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | plan skip_all => 'Unknown base directory of Kafka server' 17 | unless $ENV{KAFKA_BASE_DIR}; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::Exception'; ## no critic 22 | plan skip_all => "because Test::Exception required for testing" if $@; 23 | } 24 | 25 | BEGIN { 26 | eval 'use Test::NoWarnings'; ## no critic 27 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 28 | } 29 | 30 | plan 'no_plan'; 31 | 32 | use Kafka qw( 33 | $RETRY_BACKOFF 34 | ); 35 | use Kafka::Cluster; 36 | use Kafka::Connection; 37 | use Kafka::MockIO; 38 | use Kafka::TestInternals qw( 39 | $topic 40 | ); 41 | 42 | my $partition = $Kafka::MockIO::PARTITION;; 43 | 44 | testing(); 45 | 46 | sub testing { 47 | #-- Connecting to the Kafka server port (for example for node_id = 0) 48 | my $cluster = Kafka::Cluster->new( reuse_existing => 1 ); 49 | my @server_ports = $cluster->servers; 50 | my $port = $server_ports[0]; 51 | 52 | #-- simple start 53 | my $connect = Kafka::Connection->new( 54 | host => 'localhost', 55 | port => $port, 56 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 57 | dont_load_supported_api_versions => 1, 58 | ); 59 | isa_ok( $connect, 'Kafka::Connection' ); 60 | 61 | #-- stop leader 62 | my ( $leader_server, $leader_port ) = get_leader( $connect ); 63 | ok $connect->_is_server_alive( $leader_server ), 'leader is alive'; 64 | ok $connect->_is_server_connected( $leader_server ), 'leader is connected'; 65 | $cluster->stop( $leader_port ); 66 | ok !$connect->_is_server_alive( $leader_server ), 'leader is not alive'; 67 | ok !$connect->_is_server_connected( $leader_server ), 'leader is connected'; 68 | my ( $next_leader_server ) = get_leader( $connect ); 69 | ok $connect->_is_server_alive( $next_leader_server ), 'new leader is alive'; 70 | ok $connect->_is_server_connected( $next_leader_server ), 'new leader is connected'; 71 | 72 | #-- start previous leader 73 | $cluster->_remove_log_tree( $leader_port ); 74 | $cluster->start( $leader_port ); 75 | ok $connect->_is_server_alive( $leader_server ), 'leader is alive'; 76 | 77 | #-- close 78 | $connect->close; 79 | my $tmp = 0; 80 | foreach my $server ( $connect->get_known_servers() ) { 81 | ++$tmp if $connect->_is_server_connected( $server ); 82 | } 83 | ok !$tmp, 'server is not connected'; 84 | } 85 | 86 | sub get_leader { 87 | my ( $connect ) = @_; 88 | 89 | my $metadata = $connect->get_metadata( $topic ); 90 | my $leader_id = $metadata->{ $topic }->{ $partition }->{Leader}; 91 | my $leader_server = $connect->_find_leader_server( $leader_id ); 92 | my ( $leader_port ) = $leader_server =~ /:(\d{1,5})$/; 93 | 94 | return( $leader_server, $leader_port ); 95 | } 96 | 97 | -------------------------------------------------------------------------------- /t/14_sasl.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | eval 'use Test::Exception'; ## no critic 17 | plan skip_all => "because Test::Exception required for testing" if $@; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::NoWarnings'; ## no critic 22 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 23 | } 24 | 25 | plan 'no_plan'; 26 | 27 | use Const::Fast; 28 | #use Data::Dumper; 29 | use Sub::Install; 30 | 31 | use Kafka qw( 32 | %ERROR 33 | 34 | $ERROR_NO_ERROR 35 | $ERROR_UNKNOWN 36 | $ERROR_OFFSET_OUT_OF_RANGE 37 | $ERROR_INVALID_MESSAGE 38 | $ERROR_UNKNOWN_TOPIC_OR_PARTITION 39 | $ERROR_INVALID_FETCH_SIZE 40 | $ERROR_LEADER_NOT_AVAILABLE 41 | $ERROR_NOT_LEADER_FOR_PARTITION 42 | $ERROR_REQUEST_TIMED_OUT 43 | $ERROR_BROKER_NOT_AVAILABLE 44 | $ERROR_REPLICA_NOT_AVAILABLE 45 | $ERROR_MESSAGE_TOO_LARGE 46 | $ERROR_STALE_CONTROLLER_EPOCH 47 | $ERROR_OFFSET_METADATA_TOO_LARGE 48 | $ERROR_NETWORK_EXCEPTION 49 | $ERROR_GROUP_LOAD_IN_PROGRESS 50 | $ERROR_GROUP_COORDINATOR_NOT_AVAILABLE 51 | $ERROR_NOT_COORDINATOR_FOR_GROUP 52 | $ERROR_INVALID_TOPIC_EXCEPTION 53 | $ERROR_RECORD_LIST_TOO_LARGE 54 | $ERROR_NOT_ENOUGH_REPLICAS 55 | $ERROR_NOT_ENOUGH_REPLICAS_AFTER_APPEND 56 | $ERROR_INVALID_REQUIRED_ACKS 57 | $ERROR_ILLEGAL_GENERATION 58 | $ERROR_INCONSISTENT_GROUP_PROTOCOL 59 | $ERROR_INVALID_GROUP_ID 60 | $ERROR_UNKNOWN_MEMBER_ID 61 | $ERROR_INVALID_SESSION_TIMEOUT 62 | $ERROR_REBALANCE_IN_PROGRESS 63 | $ERROR_INVALID_COMMIT_OFFSET_SIZE 64 | $ERROR_TOPIC_AUTHORIZATION_FAILED 65 | $ERROR_GROUP_AUTHORIZATION_FAILED 66 | $ERROR_CLUSTER_AUTHORIZATION_FAILED 67 | $ERROR_INVALID_TIMESTAMP 68 | $ERROR_UNSUPPORTED_SASL_MECHANISM 69 | $ERROR_ILLEGAL_SASL_STATE 70 | $ERROR_UNSUPPORTED_VERSION 71 | 72 | $ERROR_CANNOT_GET_METADATA 73 | $ERROR_LEADER_NOT_FOUND 74 | $ERROR_MISMATCH_ARGUMENT 75 | 76 | $ERROR_CANNOT_BIND 77 | $ERROR_CANNOT_RECV 78 | $ERROR_CANNOT_SEND 79 | $ERROR_SEND_NO_ACK 80 | $ERROR_NO_CONNECTION 81 | $MIN_BYTES_RESPOND_HAS_DATA 82 | $RECEIVE_EARLIEST_OFFSET 83 | $REQUEST_TIMEOUT 84 | $RETRY_BACKOFF 85 | $SEND_MAX_ATTEMPTS 86 | $WAIT_WRITTEN_TO_LOCAL_LOG 87 | ); 88 | use Kafka::Connection qw( 89 | %RETRY_ON_ERRORS 90 | ); 91 | use Kafka::Consumer; 92 | use Kafka::Producer; 93 | 94 | use Kafka::Internals qw( 95 | $APIKEY_OFFSET 96 | $APIKEY_PRODUCE 97 | $PRODUCER_ANY_OFFSET 98 | ); 99 | 100 | use Kafka::MockIO; 101 | 102 | Kafka::MockIO::override(); 103 | #$Kafka::Connection::DEBUG = 1; 104 | 105 | const my $host => $Kafka::MockIO::KAFKA_MOCK_HOSTNAME; 106 | const my $port => $Kafka::MockIO::KAFKA_MOCK_SERVER_PORT; 107 | const my $topic => $Kafka::MockIO::TOPIC; 108 | const my $partition => $Kafka::MockIO::PARTITION; 109 | const my $CorrelationId => 0; 110 | 111 | Sub::Install::reinstall_sub( 112 | { 113 | code => sub { 0 }, 114 | into => 'Kafka::Internals', 115 | as => '_get_CorrelationId', 116 | } 117 | ); 118 | our ( $replaced_method, $skip_calls ); 119 | sub Kafka_IO_error { 120 | my $method_name = shift; 121 | $skip_calls = shift; 122 | my $expected_error_code = shift; 123 | my $expected_nonfatals = shift; 124 | my $decoded_request = shift; 125 | my $throw_error = shift // $ERROR_CANNOT_SEND; 126 | 127 | my $replaced_method_name = 'Kafka::IO::'.$method_name; 128 | $replaced_method = \&$replaced_method_name; 129 | 130 | Sub::Install::reinstall_sub( 131 | { 132 | code => sub { 133 | if ( $main::skip_calls ) { 134 | --$main::skip_calls; 135 | return $main::replaced_method->( @_ ); 136 | } else { 137 | my ( $self ) = @_; 138 | $self->_error( $throw_error ); 139 | } 140 | }, 141 | into => 'Kafka::IO', 142 | as => $method_name, 143 | } 144 | ); 145 | 146 | my $connection = Kafka::Connection->new( 147 | host => $host, 148 | port => $port, 149 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 150 | dont_load_supported_api_versions => 1, 151 | ); 152 | 153 | is scalar( @{ $connection->nonfatal_errors } ), 0, 'non-fatal errors are not fixed'; 154 | eval { $connection->receive_response_to_request( $decoded_request ); }; 155 | my $result_error = $@; 156 | isa_ok( $result_error, 'Kafka::Exception::Connection' ); 157 | is $result_error->code, $expected_error_code, 'non-fatal error: '.$ERROR{ $expected_error_code }; 158 | # because connection is available, but you can not send a request for metadata 159 | is scalar( @{ $connection->nonfatal_errors } ), $expected_nonfatals, "$expected_nonfatals non-fatal errors are fixed"; 160 | 161 | Sub::Install::reinstall_sub( 162 | { 163 | code => $replaced_method, 164 | into => 'Kafka::IO', 165 | as => $method_name, 166 | } 167 | ); 168 | } 169 | 170 | my ( $connection, $error ); 171 | 172 | #-- Connecting to the Kafka mocked server port 173 | 174 | #-- Connection 175 | 176 | $connection = Kafka::Connection->new( 177 | host => $host, 178 | port => $port, 179 | sasl_username => 'test_user', 180 | sasl_password => 'test_password', 181 | sasl_mechanizm => 'SCRAM-SHA-512', 182 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 183 | dont_load_supported_api_versions => 1, 184 | ); 185 | 186 | my $meta = eval { $connection->get_metadata() }; 187 | isnt($@, 'Exception on auth'); 188 | ok(exists $meta->{mytopic}, "Get topic"); 189 | -------------------------------------------------------------------------------- /t/20_kafka_usage.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | plan skip_all => 'Unknown base directory of Kafka server' 17 | unless $ENV{KAFKA_BASE_DIR}; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::Exception'; ## no critic 22 | plan skip_all => "because Test::Exception required for testing" if $@; 23 | } 24 | 25 | BEGIN { 26 | eval 'use Test::NoWarnings'; ## no critic 27 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 28 | } 29 | 30 | plan 'no_plan'; 31 | 32 | use Const::Fast; 33 | use Socket; 34 | 35 | # Usage - Basic functionalities to include a simple Producer and Consumer 36 | # You need to have access to your Kafka instance and be able to connect through TCP 37 | 38 | use Kafka qw( 39 | $DEFAULT_MAX_BYTES 40 | $DEFAULT_MAX_NUMBER_OF_OFFSETS 41 | $KAFKA_SERVER_PORT 42 | $RECEIVE_LATEST_OFFSETS 43 | $RECEIVE_EARLIEST_OFFSET 44 | $REQUEST_TIMEOUT 45 | $RETRY_BACKOFF 46 | ); 47 | use Kafka::Cluster; 48 | use Kafka::Connection; 49 | use Kafka::Consumer; 50 | use Kafka::Producer; 51 | 52 | # If the reader closes the connection, though, the writer will get a SIGPIPE when it next tries to write there. 53 | $SIG{PIPE} = sub { die }; 54 | 55 | # port to start the search Kafka server 56 | const my $START_PORT => 9094; # Port Number 9094-9099 Unassigned 57 | const my $ITERATIONS => 100; # The maximum number of attempts 58 | 59 | const my $topic => $Kafka::Cluster::DEFAULT_TOPIC; 60 | const my $partition => 0; 61 | 62 | 63 | my ( $connect, $producer, $consumer, $response, $offsets ); 64 | 65 | #-- Connecting to the Kafka server port 66 | my $cluster = Kafka::Cluster->new( reuse_existing => 1 ); 67 | isa_ok( $cluster, 'Kafka::Cluster' ); 68 | 69 | my( $port ) = $cluster->servers; # for example for node_id = 0 70 | 71 | for my $host_name ( 'localhost', '127.0.0.1' ) { 72 | 73 | pass "Host name: $host_name"; 74 | 75 | #-- Connection 76 | 77 | dies_ok { $connect = Kafka::Connection->new( 78 | host => $host_name, 79 | port => $port, 80 | timeout => 'nothing', 81 | dont_load_supported_api_versions => 1, 82 | ) } 'expecting to die'; 83 | 84 | $connect = Kafka::Connection->new( 85 | host => $host_name, 86 | port => $port, 87 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 88 | dont_load_supported_api_versions => 1, 89 | ); 90 | isa_ok( $connect, 'Kafka::Connection'); 91 | 92 | #-- Producer 93 | 94 | dies_ok { $producer = Kafka::Producer->new( 95 | Connection => "nothing", 96 | ) } 'expecting to die'; 97 | 98 | undef $producer; 99 | lives_ok { $producer = Kafka::Producer->new( 100 | Connection => $connect, 101 | ) } 'expecting to live'; 102 | isa_ok( $producer, 'Kafka::Producer'); 103 | 104 | # Sending a single message 105 | if ( !( $response = $producer->send( 106 | $topic, # topic 107 | $partition, # partition 108 | 'Single message', # message 109 | ) ) ) { 110 | fail 'message is not sent'; 111 | } else { 112 | pass 'message is sent'; 113 | } 114 | 115 | # Sending a series of messages 116 | if ( !( $response = $producer->send( 117 | $topic, # topic 118 | $partition, # partition 119 | [ # messages 120 | "The first message", 121 | "The second message", 122 | "The third message", 123 | ], 124 | ) ) ) { 125 | fail 'messages is not sent'; 126 | } else { 127 | pass 'messages sent'; 128 | } 129 | 130 | # Closes the connection producer and cleans up 131 | undef $producer; 132 | ok( !defined( $producer ), 'the producer object is an empty' ); 133 | $connect->close; 134 | 135 | #-- Consumer 136 | 137 | undef $connect; 138 | unless ( $connect = Kafka::Connection->new( 139 | host => $host_name, 140 | port => $port, 141 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 142 | dont_load_supported_api_versions => 1, 143 | ) ) { 144 | fail 'connection is not created'; 145 | } 146 | 147 | dies_ok { $consumer = Kafka::Consumer->new( 148 | Connection => "nothing", 149 | ) } 'expecting to die'; 150 | 151 | lives_ok { $consumer = Kafka::Consumer->new( 152 | Connection => $connect, 153 | ) } 'expecting to live'; 154 | unless ( $consumer ) { 155 | fail 'consumer is not created'; 156 | } 157 | isa_ok( $consumer, 'Kafka::Consumer'); 158 | 159 | # Offsets are monotonically increasing integers unique to a partition. 160 | # Consumers track the maximum offset they have consumed in each partition. 161 | 162 | # Get a list of valid offsets (up max_number) before the given time. 163 | $offsets = $consumer->offsets( 164 | $topic, # topic 165 | $partition, # partition 166 | $RECEIVE_LATEST_OFFSETS, # time 167 | $DEFAULT_MAX_NUMBER_OF_OFFSETS, # max_number 168 | ); 169 | if ( $offsets ) { 170 | pass 'received offsets'; 171 | foreach my $offset ( @$offsets ) { 172 | note "Received offset: $offset"; 173 | } 174 | } 175 | # may be both physical and logical errors 176 | if ( !$offsets ) { 177 | fail 'offsets are not received'; 178 | } 179 | 180 | # Consuming messages one by one 181 | my $messages = $consumer->fetch( 182 | $topic, # topic 183 | $partition, # partition 184 | 0, # offset 185 | $DEFAULT_MAX_BYTES, # Maximum size of MESSAGE(s) to receive 186 | ); 187 | if ( $messages ) { 188 | pass 'received messages'; 189 | my $cnt = 0; 190 | foreach my $m ( @$messages ) { 191 | if ( $m->valid ) { 192 | # note "Payload : ", $m->payload; 193 | # note "offset : ", $m->offset; 194 | # note "next_offset: ", $m->next_offset; 195 | } else { 196 | diag "Message No $cnt, Error: ", $m->error; 197 | diag 'Payload : ', $m->payload; 198 | diag 'offset : ', $m->offset; 199 | diag 'next_offset: ', $m->next_offset; 200 | } 201 | ++$cnt; 202 | last if $cnt > 100; # enough 203 | } 204 | } 205 | # may be both physical and logical errors 206 | if ( !$messages ) { 207 | fail 'messages are not received'; 208 | } 209 | 210 | # Closes the consumer and cleans up 211 | undef $consumer; 212 | ok( !defined( $producer ), 'the consumer object is an empty' ); 213 | $connect->close; 214 | 215 | } 216 | 217 | -------------------------------------------------------------------------------- /t/30_cluster_stop.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | plan skip_all => 'Unknown base directory of Kafka server' 17 | unless $ENV{KAFKA_BASE_DIR}; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::NoWarnings'; ## no critic 22 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 23 | } 24 | 25 | plan 'no_plan'; 26 | 27 | use Const::Fast; 28 | use File::HomeDir; 29 | use File::Spec; 30 | 31 | use Kafka::Cluster; 32 | 33 | my $cluster = Kafka::Cluster->new( 34 | reuse_existing => 1, 35 | ); 36 | isa_ok( $cluster, 'Kafka::Cluster' ); 37 | 38 | $cluster->close; 39 | 40 | -------------------------------------------------------------------------------- /t/40_autocreate_topics.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | plan skip_all => 'Unknown base directory of Kafka server' 17 | unless $ENV{KAFKA_BASE_DIR}; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::Exception'; ## no critic 22 | plan skip_all => "because Test::Exception required for testing" if $@; 23 | } 24 | 25 | BEGIN { 26 | eval 'use Test::NoWarnings'; ## no critic 27 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 28 | } 29 | 30 | plan 'no_plan'; 31 | 32 | use Const::Fast; 33 | use FindBin qw( 34 | $Bin 35 | ); 36 | use File::Spec::Functions qw( 37 | catdir 38 | catfile 39 | ); 40 | use Params::Util qw( 41 | _ARRAY 42 | _ARRAY0 43 | _HASH 44 | ); 45 | 46 | use Kafka qw( 47 | $BLOCK_UNTIL_IS_COMMITTED 48 | $DEFAULT_MAX_BYTES 49 | $RECEIVE_LATEST_OFFSETS 50 | $RETRY_BACKOFF 51 | ); 52 | use Kafka::Cluster; 53 | use Kafka::Connection; 54 | use Kafka::Consumer; 55 | use Kafka::MockIO; 56 | use Kafka::Producer; 57 | 58 | const my $TOPIC_PATTERN => 'stranger0'; 59 | 60 | my ( $connection, $topic, $partition, $producer, $response, $consumer, $offsets, $messages ); 61 | 62 | sub sending { 63 | return $producer->send( 64 | ++$topic, # unknown topic 65 | $partition, 66 | 'Single message' # message 67 | ); 68 | } 69 | 70 | sub getting_offsets { 71 | return $consumer->offsets( 72 | ++$topic, 73 | $partition, 74 | $RECEIVE_LATEST_OFFSETS, # time 75 | ); 76 | } 77 | 78 | sub fetching { 79 | return $consumer->fetch( 80 | ++$topic, 81 | $partition, 82 | 0, # offset 83 | $DEFAULT_MAX_BYTES # Maximum size of MESSAGE(s) to receive 84 | ); 85 | } 86 | 87 | $partition = $Kafka::MockIO::PARTITION;; 88 | $topic = $TOPIC_PATTERN; 89 | 90 | for my $auto_create_topics_enable ( 'true', 'false' ) { 91 | my $cluster = Kafka::Cluster->new( 92 | properties => { 93 | 'auto.create.topics.enable' => $auto_create_topics_enable, 94 | }, 95 | ); 96 | isa_ok( $cluster, 'Kafka::Cluster' ); 97 | 98 | #-- Connecting to the Kafka server port (for example for node_id = 0) 99 | my( $port ) = $cluster->servers; 100 | 101 | for my $AutoCreateTopicsEnable ( 0, 1 ) { 102 | #-- Connecting to the Kafka server port 103 | $connection = Kafka::Connection->new( 104 | host => 'localhost', 105 | port => $port, 106 | AutoCreateTopicsEnable => $AutoCreateTopicsEnable, 107 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 108 | dont_load_supported_api_versions => 1, 109 | ); 110 | $producer = Kafka::Producer->new( 111 | Connection => $connection, 112 | # Require verification of the number of messages sent and recorded 113 | RequiredAcks => $BLOCK_UNTIL_IS_COMMITTED, 114 | ); 115 | $consumer = Kafka::Consumer->new( 116 | Connection => $connection, 117 | ); 118 | 119 | # Sending a single message 120 | undef $response; 121 | if ( $auto_create_topics_enable eq 'true' && $AutoCreateTopicsEnable ) { 122 | ok $connection->exists_topic_partition( $topic, $partition ), 'existing topic'; 123 | my $next_topic = $topic; 124 | ++$next_topic; 125 | ok !$connection->exists_topic_partition( $next_topic, $partition ), 'not yet existing topic'; 126 | lives_ok { $response = sending() } 'expecting to live'; 127 | ok _HASH( $response ), 'response is not received'; 128 | $connection->get_metadata( $topic ); 129 | ok $connection->exists_topic_partition( $next_topic, $partition ), 'autocreated topic'; 130 | } else { 131 | if ( $auto_create_topics_enable ne 'true' ) { 132 | dies_ok { $response = sending() } 'expecting to die'; 133 | ok !defined( $response ), 'response is not received'; 134 | } 135 | } 136 | 137 | # Get a list of valid offsets up max_number before the given time 138 | undef $offsets; 139 | if ( $auto_create_topics_enable eq 'true' && $AutoCreateTopicsEnable ) { 140 | lives_ok { $offsets = getting_offsets() } 'expecting to live'; 141 | ok _ARRAY( $offsets ), 'offsets are received'; 142 | } else { 143 | if ( $auto_create_topics_enable ne 'true' ) { 144 | dies_ok { $offsets = getting_offsets() } 'expecting to die'; 145 | ok !defined( $offsets ), 'offsets are not received'; 146 | } 147 | } 148 | 149 | # Consuming messages 150 | undef $messages; 151 | if ( $auto_create_topics_enable eq 'true' && $AutoCreateTopicsEnable ) { 152 | lives_ok { $messages = fetching() } 'expecting to live'; 153 | ok _ARRAY0( $messages ), 'messages are received'; 154 | } else { 155 | if ( $auto_create_topics_enable ne 'true' ) { 156 | dies_ok { $messages = fetching() } 'expecting to die'; 157 | ok !defined( $messages ), 'messages are not received'; 158 | } 159 | } 160 | } 161 | 162 | $cluster->close; 163 | } 164 | 165 | -------------------------------------------------------------------------------- /t/41_fork.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | plan skip_all => 'Unknown base directory of Kafka server' 17 | unless $ENV{KAFKA_BASE_DIR}; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::Exception'; ## no critic 22 | plan skip_all => "because Test::Exception required for testing" if $@; 23 | } 24 | 25 | BEGIN { 26 | eval 'use Test::NoWarnings'; ## no critic 27 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 28 | } 29 | 30 | plan 'no_plan'; 31 | 32 | use Clone qw( 33 | clone 34 | ); 35 | use Const::Fast; 36 | 37 | use Kafka qw( 38 | $BLOCK_UNTIL_IS_COMMITTED 39 | $RECEIVE_LATEST_OFFSETS 40 | $RETRY_BACKOFF 41 | ); 42 | use Kafka::Cluster qw( 43 | $DEFAULT_TOPIC 44 | ); 45 | use Kafka::Connection; 46 | use Kafka::MockIO; 47 | use Kafka::Consumer; 48 | use Kafka::Producer; 49 | 50 | STDOUT->autoflush; 51 | 52 | my $cluster = Kafka::Cluster->new( 53 | replication_factor => 1, 54 | ); 55 | isa_ok( $cluster, 'Kafka::Cluster' ); 56 | 57 | my ( $connection, $topic, $partition, $producer, $response, $consumer, $is_ready, $pid, $ppid, $success, $etalon_messages, $starting_offset ); 58 | 59 | sub random_strings { 60 | my @chars = ( " ", "A" .. "Z", "a" .. "z", 0 .. 9, qw(! @ $ % ^ & *) ); 61 | my $msg_len = 100; 62 | my $number_of_messages = 500; 63 | 64 | note 'generation of messages can take a while'; 65 | my ( @strings, $size ); 66 | $strings[ $number_of_messages - 1 ] = undef; 67 | foreach my $i ( 0 .. ( $number_of_messages - 1 ) ) { 68 | my $len = int( rand( $msg_len ) ) + 1; 69 | $strings[ $i ] = join( q{}, @chars[ map { rand( scalar( @chars ) ) } ( 1 .. $len ) ] ); 70 | } 71 | note 'generation of messages complited'; 72 | return \@strings; 73 | } 74 | 75 | sub sending { 76 | my ( $messages ) = @_; 77 | 78 | my $response; 79 | eval { 80 | foreach my $message ( @$messages ) { 81 | undef $response; 82 | $response = $producer->send( 83 | $topic, 84 | $partition, 85 | $message 86 | ); 87 | } 88 | }; 89 | fail "sending error: $@" if $@; 90 | 91 | return $response; 92 | } 93 | 94 | sub next_offset { 95 | my ( $consumer, $topic, $partition ) = @_; 96 | 97 | my $offsets; 98 | eval { 99 | $offsets = $consumer->offsets( 100 | $topic, 101 | $partition, 102 | $RECEIVE_LATEST_OFFSETS, # time 103 | ); 104 | }; 105 | fail "offsets are not received: $@" if $@; 106 | 107 | if ( $offsets ) { 108 | return $offsets->[0]; 109 | } else { 110 | return; 111 | } 112 | } 113 | 114 | sub testing_sending { 115 | my $first_offset; 116 | 117 | return 118 | unless defined( $first_offset = next_offset( $consumer, $topic, $partition ) ); 119 | return 120 | unless sending( $etalon_messages ); 121 | 122 | ++$success; 123 | 124 | return $first_offset; 125 | } 126 | 127 | sub testing_fetching { 128 | my ( $first_offset ) = @_; 129 | 130 | my $messages; 131 | eval { 132 | $messages = $consumer->fetch( $topic, $partition, $first_offset ); 133 | }; 134 | fail "messages are not fetched: $@" if $@; 135 | 136 | if ( $messages ) { 137 | foreach my $i ( 0 .. $#$etalon_messages ) { 138 | my $message = $messages->[ $i ]; 139 | return unless $message->valid && $message->payload eq $etalon_messages->[ $i++ ]; 140 | } 141 | } else { 142 | return; 143 | } 144 | 145 | ++$success; 146 | 147 | return $messages; 148 | } 149 | 150 | sub wait_until_ready { 151 | my ( $level, $pid ) = @_; 152 | 153 | my $count = 0; 154 | while ( ( $is_ready // 0 ) != $level ) { 155 | if ( ++$count > 5 ) { 156 | kill 'KILL' => $pid; 157 | fail "too long a wait for $pid"; 158 | last; 159 | } 160 | sleep 1; 161 | } 162 | } 163 | 164 | $SIG{USR1} = sub { ++$is_ready }; 165 | $SIG{USR2} = sub { ++$success }; 166 | 167 | $partition = $Kafka::MockIO::PARTITION; 168 | $topic = $DEFAULT_TOPIC; 169 | 170 | #-- Connecting to the Kafka server port (for example for node_id = 0) 171 | my( $port ) = $cluster->servers; 172 | 173 | # connecting to the Kafka server port 174 | $connection = Kafka::Connection->new( 175 | host => 'localhost', 176 | port => $port, 177 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 178 | dont_load_supported_api_versions => 1, 179 | ); 180 | $producer = Kafka::Producer->new( 181 | Connection => $connection, 182 | # Ensure that all messages sent and recorded 183 | RequiredAcks => $BLOCK_UNTIL_IS_COMMITTED, 184 | ); 185 | $consumer = Kafka::Consumer->new( 186 | Connection => $connection, 187 | ); 188 | 189 | # simple sending 190 | ok sending( [ 'single message' ] ), 'simple sending ok'; 191 | 192 | my $clone_connection = clone( $connection ); 193 | # the clients are destroyed 194 | undef $producer; 195 | is_deeply( $connection, $clone_connection, 'connection is not destroyed' ); 196 | undef $consumer; 197 | is_deeply( $connection, $clone_connection, 'connection is not destroyed' ); 198 | 199 | # recreating clients 200 | $producer = Kafka::Producer->new( 201 | Connection => $connection, 202 | # Ensure that all messages sent and recorded 203 | RequiredAcks => $BLOCK_UNTIL_IS_COMMITTED, 204 | ); 205 | $consumer = Kafka::Consumer->new( 206 | Connection => $connection, 207 | ); 208 | 209 | $success = 0; 210 | 211 | $etalon_messages = random_strings(); 212 | $starting_offset = testing_sending(); 213 | ok $success == 1, 'sending OK'; 214 | testing_fetching( $starting_offset ); 215 | ok $success == 2, 'fetching OK'; 216 | 217 | #-- the producer and connection are destroyed in the child 218 | 219 | $is_ready = 0; 220 | 221 | $etalon_messages = random_strings(); 222 | if ( $pid = fork ) { # herein the parent 223 | $success = 0; 224 | 225 | # producer destroyed in the child 226 | $starting_offset = testing_sending(); 227 | # $success == 1 228 | 229 | wait_until_ready( 1, $pid ); # expect readiness of the child process 230 | 231 | testing_fetching( $starting_offset ); 232 | # $success == 2 233 | kill 'USR1' => $pid; 234 | 235 | wait_until_ready( 2, $pid ); # expect readiness of the child process 236 | 237 | # connection destroyed in the child 238 | $producer = Kafka::Producer->new( 239 | Connection => $connection, # potentially destroyed connection 240 | # Ensure that all messages sent and recorded 241 | RequiredAcks => $BLOCK_UNTIL_IS_COMMITTED, 242 | ); 243 | $consumer = Kafka::Consumer->new( 244 | Connection => $connection, 245 | ); 246 | # $connection ok 247 | 248 | $etalon_messages = random_strings(); 249 | $starting_offset = testing_sending(); 250 | # $success == 3 251 | testing_fetching( $starting_offset ); 252 | # $success == 4 253 | kill 'USR1' => $pid; 254 | 255 | wait; # forward to the completion of a child process 256 | } elsif ( defined $pid ) { # herein the child process 257 | $ppid = getppid(); 258 | 259 | undef $producer; 260 | kill 'USR1' => $ppid; 261 | 262 | wait_until_ready( 1, $ppid ); # expect readiness of the parent process 263 | $connection->close; 264 | undef $connection; 265 | kill 'USR1' => $ppid; 266 | 267 | wait_until_ready( 2, $ppid ); # expect readiness of the parent process 268 | exit; 269 | } else { 270 | fail 'An unexpected error (fork 1)'; 271 | } 272 | 273 | ok $success == 4, 'Testing of destruction in the child'; 274 | 275 | #-- the producer and connection are destroyed in the parent 276 | 277 | $is_ready = 0; 278 | 279 | if ( $pid = fork ) { # herein the parent 280 | $success = 0; 281 | 282 | wait_until_ready( 1, $pid ); # expect readiness of the child process 283 | undef $producer; 284 | kill 'USR1' => $pid; 285 | 286 | wait_until_ready( 2, $pid ); # expect readiness of the child process 287 | $connection->close; 288 | undef $connection; 289 | kill 'USR1' => $pid; 290 | 291 | wait; # forward to the completion of a child process 292 | } elsif ( defined $pid ) { # herein the child process 293 | $success = 0; 294 | $ppid = getppid(); 295 | 296 | # producer is not destroyed in the parent 297 | $etalon_messages = random_strings(); 298 | $starting_offset = testing_sending(); 299 | # $success == 1 300 | testing_fetching( $starting_offset ); 301 | # $success == 2 302 | 303 | $etalon_messages = random_strings(); 304 | kill 'USR1' => $ppid; 305 | # producer destroyed in the parent 306 | $starting_offset = testing_sending(); 307 | # $success == 3 308 | testing_fetching( $starting_offset ); 309 | # $success == 4 310 | wait_until_ready( 1, $ppid ); # expect readiness of the parent process 311 | 312 | $etalon_messages = random_strings(); 313 | kill 'USR1' => $ppid; 314 | wait_until_ready( 2, $ppid ); # expect readiness of the parent process 315 | 316 | # connection destroyed in the parent 317 | $producer = Kafka::Producer->new( 318 | Connection => $connection, # potentially destroyed connection 319 | # Ensure that all messages sent and recorded 320 | RequiredAcks => $BLOCK_UNTIL_IS_COMMITTED, 321 | ); 322 | $consumer = Kafka::Consumer->new( 323 | Connection => $connection, 324 | ); 325 | # $connection ok 326 | 327 | $starting_offset = testing_sending(); 328 | # $success == 5 329 | testing_fetching( $starting_offset ); 330 | # $success == 6 331 | 332 | kill 'USR2' => $ppid if $success == 6; # parent $success increment 333 | 334 | exit; 335 | } else { 336 | fail 'An unexpected error (fork 2)'; 337 | } 338 | 339 | ok $success == 1, 'Testing of destruction in the parent'; 340 | 341 | $cluster->close; 342 | 343 | -------------------------------------------------------------------------------- /t/45_compression.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | plan skip_all => 'Unknown base directory of Kafka server' 17 | unless $ENV{KAFKA_BASE_DIR}; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::NoWarnings'; ## no critic 22 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 23 | } 24 | 25 | plan 'no_plan'; 26 | 27 | use Kafka qw( 28 | $BLOCK_UNTIL_IS_COMMITTED 29 | $COMPRESSION_GZIP 30 | $COMPRESSION_NONE 31 | $COMPRESSION_SNAPPY 32 | $COMPRESSION_LZ4 33 | $DEFAULT_MAX_BYTES 34 | $DEFAULT_MAX_NUMBER_OF_OFFSETS 35 | $RECEIVE_LATEST_OFFSETS 36 | $RETRY_BACKOFF 37 | ); 38 | use Kafka::Cluster; 39 | use Kafka::Connection; 40 | use Kafka::Consumer; 41 | use Kafka::MockIO; 42 | use Kafka::Producer; 43 | use Kafka::TestInternals qw( 44 | $topic 45 | ); 46 | 47 | STDOUT->autoflush; 48 | 49 | my $cluster = Kafka::Cluster->new( 50 | replication_factor => 1, 51 | ); 52 | 53 | my $partition = $Kafka::MockIO::PARTITION; 54 | 55 | #-- Connecting to the Kafka server port (for example for node_id = 0) 56 | my( $port ) = $cluster->servers; 57 | 58 | #-- Connecting to the Kafka server port 59 | 60 | my $connect = Kafka::Connection->new( 61 | host => 'localhost', 62 | port => $port, 63 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 64 | dont_load_supported_api_versions => 1, 65 | ); 66 | 67 | my $producer = Kafka::Producer->new( 68 | Connection => $connect, 69 | # Require verification the number of messages sent and recorded 70 | RequiredAcks => $BLOCK_UNTIL_IS_COMMITTED, 71 | ); 72 | 73 | my $consumer = Kafka::Consumer->new( 74 | Connection => $connect, 75 | ApiVersion => 2, 76 | ); 77 | 78 | my $messages_to_send; 79 | push @$messages_to_send, "Message #$_" foreach ( 1..3 ); 80 | 81 | my @compession_codecs = ( 82 | [ $COMPRESSION_NONE, 'NONE' ], 83 | [ $COMPRESSION_GZIP, 'GZIP' ], 84 | [ $COMPRESSION_SNAPPY, 'SNAPPY' ], 85 | [ $COMPRESSION_LZ4, 'LZ4' ], 86 | ); 87 | 88 | foreach my $codec ( @compession_codecs ) 89 | { 90 | # Sending a series of messages 91 | $producer->send( 92 | $topic, 93 | $partition, 94 | $messages_to_send, 95 | undef, 96 | $codec->[0], 97 | ); 98 | } 99 | 100 | # Get a list of valid offsets up max_number before the given time 101 | my $offsets = $consumer->offsets( 102 | $topic, 103 | $partition, 104 | $RECEIVE_LATEST_OFFSETS, # time 105 | $DEFAULT_MAX_NUMBER_OF_OFFSETS, # max_number 106 | ); 107 | if ( $offsets ) { 108 | foreach my $offset ( @$offsets ) { 109 | note "Received offset: $offset"; 110 | } 111 | } 112 | 113 | foreach my $return_all ( 0, 1 ) { 114 | foreach my $start_offset ( $offsets->[1] .. ( $offsets->[0] - 1 ) ) { 115 | my $compression_codec = int( $start_offset / scalar( @compession_codecs ) ); 116 | 117 | # Consuming messages 118 | my $messages = $consumer->fetch( 119 | $topic, 120 | $partition, 121 | $start_offset, 122 | $DEFAULT_MAX_BYTES, # Maximum size of MESSAGE(s) to receive 123 | $return_all, 124 | ); 125 | if ( $messages ) { 126 | note '--------------------'; 127 | note "Start offset = $start_offset, return_all = $return_all, codec when sending = ".$compession_codecs[ $compression_codec ]->[1]; 128 | foreach my $message ( @$messages ) { 129 | if ( $message->valid ) { 130 | note 'consumed offset: ', $message->offset; 131 | } else { 132 | diag 'error : ', $message->error; 133 | } 134 | 135 | if ( $message->offset == $start_offset ) { 136 | pass 'Starting Message is present'; 137 | } elsif ( $message->offset > $start_offset ) { 138 | pass 'additional message is present'; 139 | } else { # $message->offset < $start_offset 140 | if ( $return_all ) { 141 | if ( $compression_codec != $COMPRESSION_NONE ) { 142 | pass 'returned redundant message'; 143 | } else { 144 | fail 'returned invalid data'; 145 | } 146 | } else { 147 | fail 'returned invalid data'; 148 | } 149 | } 150 | } 151 | } 152 | } 153 | } 154 | 155 | $cluster->close; 156 | 157 | -------------------------------------------------------------------------------- /t/46_destroy_connection.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | plan skip_all => 'Unknown base directory of Kafka server' 17 | unless $ENV{KAFKA_BASE_DIR}; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::NoWarnings'; ## no critic 22 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 23 | } 24 | 25 | plan 'no_plan'; 26 | 27 | use Kafka qw( 28 | $RETRY_BACKOFF 29 | ); 30 | use Kafka::Cluster; 31 | use Kafka::Connection; 32 | 33 | my $CLUSTER = Kafka::Cluster->new( 34 | replication_factor => 1, 35 | ); 36 | isa_ok( $CLUSTER, 'Kafka::Cluster' ); 37 | 38 | #-- Connecting to the Kafka server port (for example for node_id = 0) 39 | my ( $PORT ) = $CLUSTER->servers; 40 | 41 | my ( $CONNECTION, $HOSTS, $IO_CACHE ); 42 | 43 | sub new_connection { 44 | my ( $port ) = @_; 45 | 46 | # connecting to the Kafka server port 47 | my $connection = Kafka::Connection->new( 48 | host => 'localhost', 49 | port => $port, 50 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 51 | dont_load_supported_api_versions => 1, 52 | ); 53 | isa_ok( $connection, 'Kafka::Connection' ); 54 | 55 | # simple communication 56 | is scalar( $connection->get_known_servers() ), 1, 'Known only one server'; 57 | my ( $server ) = $connection->get_known_servers(); 58 | ok $connection->is_server_known( $server ), 'known server'; 59 | my $metadata = $connection->get_metadata; 60 | ok $metadata, 'communication OK'; 61 | 62 | my $IO_cache = $connection->{_IO_cache}; 63 | my @hosts = keys %$IO_cache; 64 | ok scalar( @hosts ), 'IO cache filled'; 65 | 66 | is_sockets_opened( \@hosts, $IO_cache ); 67 | 68 | return( $connection, \@hosts, $IO_cache ); 69 | } 70 | 71 | sub is_sockets_opened { 72 | my ( $hosts, $IO_cache ) = @_; 73 | 74 | foreach my $host_port ( @$hosts ) { 75 | my $io = $IO_cache->{ $host_port }->{IO}; 76 | is ref( $io ), 'Kafka::IO', 'Kafka::IO'; 77 | my $socket = $io->{socket}; 78 | ok defined( $socket ), 'socket exists'; 79 | my $fn = fileno $socket; 80 | ok defined( $fn ), 'socket opened'; 81 | } 82 | } 83 | 84 | sub is_sockets_closed { 85 | my ( $hosts, $IO_cache ) = @_; 86 | 87 | foreach my $host_port ( @$hosts ) { 88 | my $io = $IO_cache->{ $host_port }->{IO}; 89 | ok !defined( $io ), 'IO (socket) closed'; 90 | } 91 | } 92 | 93 | 94 | 95 | ( $CONNECTION, $HOSTS, $IO_CACHE ) = new_connection( $PORT ); 96 | 97 | undef $CONNECTION; 98 | is_sockets_opened( $HOSTS, $IO_CACHE ); 99 | 100 | ( $CONNECTION, $HOSTS, $IO_CACHE ) = new_connection( $PORT ); 101 | 102 | $CONNECTION->close; 103 | is_sockets_closed( $HOSTS, $IO_CACHE ); 104 | 105 | $CLUSTER->close; 106 | 107 | -------------------------------------------------------------------------------- /t/47_kafka_usage_ipv6.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | plan skip_all => 'Unknown base directory of Kafka server' 17 | unless $ENV{KAFKA_BASE_DIR}; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::Exception'; ## no critic 22 | plan skip_all => "because Test::Exception required for testing" if $@; 23 | } 24 | 25 | 26 | 27 | BEGIN { 28 | eval 'use Test::TCP'; ## no critic 29 | plan skip_all => "because Test::TCP required for testing" if $@; 30 | } 31 | 32 | BEGIN { 33 | eval 'use Test::NoWarnings'; ## no critic 34 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 35 | } 36 | 37 | plan 'no_plan'; 38 | 39 | use Const::Fast; 40 | use Net::EmptyPort qw( 41 | can_bind 42 | ); 43 | use Socket; 44 | 45 | use Kafka qw( 46 | $DEFAULT_MAX_BYTES 47 | $DEFAULT_MAX_NUMBER_OF_OFFSETS 48 | $RECEIVE_LATEST_OFFSETS 49 | $RETRY_BACKOFF 50 | ); 51 | use Kafka::Cluster; 52 | use Kafka::Connection; 53 | use Kafka::Consumer; 54 | use Kafka::Producer; 55 | 56 | const my $IPV6_HOST => '::1'; 57 | 58 | SKIP: { 59 | skip "'IPv6 not supported'" unless eval { Socket::IPV6_V6ONLY } && can_bind( $IPV6_HOST ); 60 | 61 | ok 1, 'starting IPv6 test'; 62 | 63 | my $CLUSTER = Kafka::Cluster->new( 64 | replication_factor => 1, 65 | host => $IPV6_HOST, 66 | ); 67 | isa_ok( $CLUSTER, 'Kafka::Cluster' ); 68 | 69 | #-- Connecting to the Kafka server port (for example for node_id = 0) 70 | my ( $PORT ) = $CLUSTER->servers; 71 | 72 | my ( $connect, $producer, $consumer, $response, $offsets ); 73 | 74 | const my $topic => $Kafka::Cluster::DEFAULT_TOPIC; 75 | const my $partition => 0; 76 | 77 | $connect = Kafka::Connection->new( 78 | host => $IPV6_HOST, 79 | port => $PORT, 80 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 81 | dont_load_supported_api_versions => 1, 82 | ); 83 | isa_ok( $connect, 'Kafka::Connection'); 84 | 85 | lives_ok { $producer = Kafka::Producer->new( 86 | Connection => $connect, 87 | ) } 'expecting to live'; 88 | isa_ok( $producer, 'Kafka::Producer'); 89 | 90 | # Sending a single message 91 | if ( !( $response = $producer->send( 92 | $topic, # topic 93 | $partition, # partition 94 | 'Single message', # message 95 | ) ) ) { 96 | fail 'message is not sent'; 97 | } else { 98 | pass 'message is sent'; 99 | } 100 | 101 | # Closes the connection 102 | $connect->close; 103 | 104 | #-- Consumer 105 | 106 | lives_ok { $consumer = Kafka::Consumer->new( 107 | Connection => $connect, 108 | ) } 'expecting to live'; 109 | unless ( $consumer ) { 110 | fail 'consumer is not created'; 111 | } 112 | isa_ok( $consumer, 'Kafka::Consumer'); 113 | 114 | # Get a list of valid offsets (up max_number) before the given time. 115 | $offsets = $consumer->offsets( 116 | $topic, # topic 117 | $partition, # partition 118 | $RECEIVE_LATEST_OFFSETS, # time 119 | $DEFAULT_MAX_NUMBER_OF_OFFSETS, # max_number 120 | ); 121 | if ( $offsets ) { 122 | pass 'received offsets'; 123 | foreach my $offset ( @$offsets ) { 124 | note "Received offset: $offset"; 125 | } 126 | } 127 | # may be both physical and logical errors 128 | if ( !$offsets ) { 129 | fail 'offsets are not received'; 130 | } 131 | 132 | # Consuming messages one by one 133 | my $messages = $consumer->fetch( 134 | $topic, # topic 135 | $partition, # partition 136 | 0, # offset 137 | $DEFAULT_MAX_BYTES, # Maximum size of MESSAGE(s) to receive 138 | ); 139 | if ( $messages ) { 140 | pass 'received messages'; 141 | my $cnt = 0; 142 | foreach my $m ( @$messages ) { 143 | if( $m->valid ) { 144 | # note "Payload : ", $m->payload; 145 | # note "offset : ", $m->offset; 146 | # note "next_offset: ", $m->next_offset; 147 | } else { 148 | diag "Message No $cnt, Error: ", $m->error; 149 | diag 'Payload : ', $m->payload; 150 | diag 'offset : ', $m->offset; 151 | diag 'next_offset: ', $m->next_offset; 152 | } 153 | ++$cnt; 154 | last if $cnt > 100; # enough 155 | } 156 | } 157 | # may be both physical and logical errors 158 | if ( !$messages ) { 159 | fail 'messages are not received'; 160 | } 161 | 162 | # Closes the connection 163 | $connect->close; 164 | 165 | $CLUSTER->close; 166 | 167 | } # end of SKIP 168 | 169 | -------------------------------------------------------------------------------- /t/48_acknowledgement.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | plan skip_all => 'Unknown base directory of Kafka server' 17 | unless $ENV{KAFKA_BASE_DIR}; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::Exception'; ## no critic 22 | plan skip_all => "because Test::Exception required for testing" if $@; 23 | } 24 | 25 | BEGIN { 26 | eval 'use Test::NoWarnings'; ## no critic 27 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 28 | } 29 | 30 | plan 'no_plan'; 31 | 32 | use Const::Fast; 33 | use Data::Dumper; 34 | use Time::HiRes (); 35 | use Try::Tiny; 36 | 37 | use Kafka qw( 38 | $BLOCK_UNTIL_IS_COMMITTED 39 | %ERROR 40 | $ERROR_SEND_NO_ACK 41 | $RECEIVE_LATEST_OFFSETS 42 | $REQUEST_TIMEOUT 43 | $RETRY_BACKOFF 44 | $SEND_MAX_ATTEMPTS 45 | ); 46 | use Kafka::Cluster qw( 47 | $DEFAULT_TOPIC 48 | ); 49 | use Kafka::Connection; 50 | use Kafka::Consumer; 51 | use Kafka::MockIO; 52 | use Kafka::Producer; 53 | 54 | STDOUT->autoflush; 55 | 56 | my $cluster = Kafka::Cluster->new( 57 | replication_factor => 3, 58 | ); 59 | 60 | const my $PARTITION => $Kafka::MockIO::PARTITION; 61 | const my $TOPIC => $DEFAULT_TOPIC; 62 | const my $MESSAGE => '*' x 200; 63 | const my $SEND_NO_ACK_REPEATS => 20; 64 | const my $SEND_NO_ACK_ERROR => $ERROR{ $ERROR_SEND_NO_ACK }; 65 | 66 | const my $TIMEOUT_DIVIDER => 2; 67 | const my $RETRIES => 2; 68 | 69 | my ( $CONNECTION, $PRODUCER, $CONSUMER, $TIMEOUT ); 70 | my ( $port, $response, $previous_offset, $next_offset, $success_sendings ); 71 | 72 | $TIMEOUT = $REQUEST_TIMEOUT; # normal timeout 73 | $success_sendings = 0; 74 | 75 | # report variables 76 | my $TOTAL_SENDINGS = 0; 77 | my $send_with_NO_ACK_errors = 0; 78 | my $NO_ACK_message_stored = 0; 79 | my $NO_ACK_message_not_stored = 0; 80 | my $send_with_other_errors = 0; 81 | my $other_message_stored = 0; 82 | my $other_message_not_stored = 0; 83 | my $not_stored_without_error = 0; 84 | my %found_ERRORS; 85 | 86 | sub sending { 87 | my $response; 88 | my $error; 89 | ++$TOTAL_SENDINGS; 90 | try { 91 | $response = $PRODUCER->send( 92 | $TOPIC, 93 | $PARTITION, 94 | $MESSAGE, 95 | ); 96 | } catch { 97 | $error = $_; 98 | }; 99 | 100 | # control fetching stored messages 101 | my $prev_timeout = $TIMEOUT; 102 | $TIMEOUT = $REQUEST_TIMEOUT; # restore normal timeout 103 | my $stored_messages; 104 | my $retries = $RETRIES; 105 | while ( $retries-- ) { 106 | Time::HiRes::sleep 0.5; 107 | get_new_objects(); 108 | # last if $stored_messages = fetching(); 109 | last if $stored_messages = next_offset(); 110 | } 111 | BAIL_OUT( 'sending - Cannot fetch messages' ) unless $stored_messages; 112 | 113 | # my $stored = scalar @$stored_messages; 114 | my $stored = $stored_messages; 115 | my $prev_success_sendings = $success_sendings; 116 | $success_sendings = $stored; 117 | 118 | unless ( $error ) { 119 | if ( $stored == $prev_success_sendings + 1 ) { 120 | return 1; 121 | } else { 122 | ++$not_stored_without_error; 123 | diag( sprintf( "\n%s WARN: data not stored without error! Sending %d, expected %d but got %d stored records. Timeout %.5f", 124 | localtime().'', 125 | $TOTAL_SENDINGS, 126 | $prev_success_sendings + 1, 127 | $stored, 128 | $prev_timeout, 129 | ) 130 | ); 131 | return -1; 132 | } 133 | } 134 | 135 | ++$found_ERRORS{ $error }->{total}; 136 | 137 | if ( $error->message =~ /$SEND_NO_ACK_ERROR/ ) { 138 | ++$send_with_NO_ACK_errors; 139 | diag( sprintf( "\r[%d/%d] %s: stored %d, not stored without error %d, timeout %.5f\r", 140 | $send_with_NO_ACK_errors, 141 | $SEND_NO_ACK_REPEATS, 142 | localtime().'', 143 | $success_sendings, 144 | $not_stored_without_error, 145 | $prev_timeout, 146 | ) 147 | ); 148 | 149 | if ( $stored == $prev_success_sendings ) { 150 | ++$NO_ACK_message_not_stored; 151 | pass 'possible not stored on SEND_NO_ACK_ERROR'; 152 | ++$found_ERRORS{ $error }->{not_stored}; 153 | $found_ERRORS{ $error }->{max_not_stored_timeout} = $prev_timeout if !exists( $found_ERRORS{ $error }->{max_not_stored_timeout} ) || $prev_timeout > $found_ERRORS{ $error }->{max_not_stored_timeout}; 154 | } elsif ( $stored == $prev_success_sendings + 1 ) { 155 | ++$NO_ACK_message_stored; 156 | pass 'success stored on SEND_NO_ACK_ERROR'; 157 | ++$found_ERRORS{ $error }->{stored}; 158 | $found_ERRORS{ $error }->{max_stored_timeout} = $prev_timeout if !exists( $found_ERRORS{ $error }->{max_stored_timeout} ) || $prev_timeout > $found_ERRORS{ $error }->{max_stored_timeout}; 159 | } else { 160 | fail "unexpected stored on SEND_NO_ACK_ERROR: fetched $stored, prev_success_sendings $prev_success_sendings"; 161 | } 162 | } else { 163 | ++$send_with_other_errors; 164 | # diag "sending - ignore possible not SEND_NO_ACK_ERROR error: '$error'"; 165 | 166 | if ( $stored == $prev_success_sendings ) { 167 | ++$other_message_not_stored; 168 | pass 'possible not stored on error'; 169 | ++$found_ERRORS{ $error }->{not_stored}; 170 | $found_ERRORS{ $error }->{max_not_stored_timeout} = $prev_timeout if !exists( $found_ERRORS{ $error }->{max_not_stored_timeout} ) || $prev_timeout > $found_ERRORS{ $error }->{max_not_stored_timeout}; 171 | } elsif ( $stored == $prev_success_sendings + 1 ) { 172 | pass 'possible stored on error'; 173 | ++$other_message_stored; 174 | ++$found_ERRORS{ $error }->{stored}; 175 | $found_ERRORS{ $error }->{max_stored_timeout} = $prev_timeout if !exists( $found_ERRORS{ $error }->{max_stored_timeout} ) || $prev_timeout > $found_ERRORS{ $error }->{max_stored_timeout}; 176 | } else { 177 | fail "unexpected stored on error: fetched $stored, prev_success_sendings $prev_success_sendings"; 178 | } 179 | } 180 | 181 | return; 182 | } 183 | 184 | sub next_offset { 185 | $TIMEOUT = $REQUEST_TIMEOUT; # restore normal timeout 186 | my ( $error, $offsets ); 187 | my $retries = $RETRIES; 188 | while ( $retries-- ) { 189 | get_new_objects(); 190 | try { 191 | $offsets = $CONSUMER->offsets( 192 | $TOPIC, 193 | $PARTITION, 194 | $RECEIVE_LATEST_OFFSETS, 195 | ); 196 | } catch { 197 | $error = $_; 198 | }; 199 | last if $offsets && @$offsets; 200 | sleep 1; 201 | } 202 | BAIL_OUT( 'next_offset - offsets are not received' ) unless $offsets && @$offsets; 203 | 204 | return $offsets->[0]; 205 | } 206 | 207 | #sub fetching { 208 | # my $messages; 209 | # my $error; 210 | # try { 211 | # $messages = $CONSUMER->fetch( $TOPIC, $PARTITION, 0 ); 212 | # } catch { 213 | # $error = $_; 214 | # }; 215 | # fail "fetching - messages are not fetched: '$error'" if $error; 216 | # 217 | # return unless @$messages; 218 | # 219 | # foreach my $i ( 0 .. $#$messages ) { 220 | # my $message = $messages->[ $i ]; 221 | # unless ( $message->valid && $message->payload ) { 222 | # fail "fetching - not valid message: message error '".$message->error."'"; 223 | # return; 224 | # } 225 | # } 226 | # 227 | # return $messages; 228 | #} 229 | 230 | sub get_new_objects { 231 | pass "get_new_objects - TIMEOUT = ".sprintf( "%.6f", $TIMEOUT ); 232 | 233 | $CONNECTION->close if $CONNECTION; 234 | undef $CONSUMER; 235 | undef $PRODUCER; 236 | undef $CONNECTION; 237 | 238 | lives_ok { 239 | $CONNECTION = Kafka::Connection->new( 240 | host => 'localhost', 241 | port => $port, 242 | timeout => $TIMEOUT, 243 | dont_load_supported_api_versions => 1, 244 | ); 245 | } 'Expecting to live new CONNECTION'; 246 | lives_ok { 247 | $PRODUCER = Kafka::Producer->new( 248 | Connection => $CONNECTION, 249 | # Ensure that all messages sent and recorded 250 | RequiredAcks => $BLOCK_UNTIL_IS_COMMITTED, 251 | Timeout => $TIMEOUT, 252 | ); 253 | } 'Expecting to live new PRODUCER'; 254 | lives_ok { 255 | $CONSUMER = Kafka::Consumer->new( 256 | Connection => $CONNECTION, 257 | ); 258 | } 'Expecting to live new CONSUMER'; 259 | } 260 | 261 | 262 | 263 | #-- Connecting to the Kafka server port (for example for node_id = 0) 264 | ( $port ) = $cluster->servers; 265 | 266 | diag 'Started at '.localtime()."\n"; 267 | my $stored_messages; 268 | my $work_timeout = $TIMEOUT; 269 | my $max_error_timeout = 0; 270 | while ( $send_with_NO_ACK_errors < $SEND_NO_ACK_REPEATS ) { 271 | my $prev_success_sendings = $success_sendings; 272 | 273 | $TIMEOUT = $work_timeout; 274 | get_new_objects(); 275 | my $success_sending = sending(); 276 | 277 | if ( $success_sending ) { 278 | last if $success_sending == -1; 279 | $work_timeout /= $TIMEOUT_DIVIDER; 280 | $work_timeout = 0.001 if $work_timeout < 0.001; # minimum timeout is 1ms 281 | } else { 282 | $max_error_timeout = $work_timeout if $work_timeout > $max_error_timeout; 283 | $work_timeout = $TIMEOUT; # return to normal timeout 284 | } 285 | } 286 | diag "\nFinished at ".localtime(); 287 | 288 | ok $success_sendings, 'messages stored'; 289 | is $TOTAL_SENDINGS, 290 | $success_sendings 291 | - $not_stored_without_error 292 | + $NO_ACK_message_not_stored 293 | + $other_message_not_stored, 294 | 'all sendings accounted'; 295 | is $send_with_other_errors, 296 | $other_message_stored 297 | + $other_message_not_stored, 298 | 'all other errors accounted'; 299 | is $send_with_NO_ACK_errors, 300 | $NO_ACK_message_stored 301 | + $NO_ACK_message_not_stored, 302 | 'all NO_ACK_ERROR sendings accounted'; 303 | 304 | # report 305 | diag "total sendings $TOTAL_SENDINGS"; 306 | diag "stored messages $success_sendings"; 307 | #fail( "NOT STORED WITHOUT ERROR $not_stored_without_error" ) if $not_stored_without_error; 308 | diag( "NOT STORED WITHOUT ERROR $not_stored_without_error" ) if $not_stored_without_error; 309 | diag "max error timeout $max_error_timeout"; 310 | diag "sendings with NO_ACK_ERROR $send_with_NO_ACK_errors"; 311 | diag "sendings with NO_ACK_ERROR stored $NO_ACK_message_stored"; 312 | diag "sendings with NO_ACK_ERROR not stored $NO_ACK_message_not_stored"; 313 | diag "sendings with other errors $send_with_other_errors"; 314 | diag "sendings with other errors stored $other_message_stored"; 315 | diag "sendings with other errors not stored $other_message_not_stored"; 316 | 317 | $Data::Dumper::Sortkeys = 1; 318 | diag( Data::Dumper->Dump( [ \%found_ERRORS ], [ 'found_ERRORS' ] ) ); 319 | 320 | $cluster->close; 321 | 322 | -------------------------------------------------------------------------------- /t/50_debug_level.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | eval 'use Test::NoWarnings'; ## no critic 17 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 18 | } 19 | 20 | plan 'no_plan'; 21 | 22 | use Kafka::Connection; 23 | use Kafka::IO; 24 | use Kafka::Internals; 25 | 26 | my $PERL_KAFKA_DEBUG = $ENV{PERL_KAFKA_DEBUG}; 27 | my $DEBUG_Connection = $Kafka::Connection::DEBUG; 28 | my $DEBUG_IO = $Kafka::IO::DEBUG; 29 | 30 | delete $ENV{PERL_KAFKA_DEBUG}; 31 | %Kafka::Internals::_debug_levels = (); 32 | 33 | package Kafka::TestDebugLevel; 34 | 35 | use 5.010; 36 | use strict; 37 | use warnings; 38 | 39 | use Kafka::Internals qw( 40 | debug_level 41 | ); 42 | 43 | our $DEBUG = 0; 44 | 45 | sub new { 46 | my ( $class ) = @_; 47 | 48 | my $self = bless {}, $class; 49 | 50 | return $self; 51 | } 52 | 53 | package main; 54 | 55 | #-- direct control 56 | 57 | $Kafka::TestDebugLevel::DEBUG = 0; 58 | is( $Kafka::TestDebugLevel::DEBUG, 0, 'debug level not set' ); 59 | $Kafka::TestDebugLevel::DEBUG = 1; 60 | is( $Kafka::TestDebugLevel::DEBUG, 1, 'debug level set' ); 61 | 62 | #----- control through a method/function 63 | 64 | #-- establish a simple value 65 | 66 | $Kafka::TestDebugLevel::DEBUG = 0; 67 | is( Kafka::TestDebugLevel->debug_level(), 0, 'debug level not set' ); 68 | is( Kafka::TestDebugLevel->debug_level( 1 ), 1, 'debug level set' ); 69 | is( $Kafka::TestDebugLevel::DEBUG, 1, 'debug level set' ); 70 | is( Kafka::TestDebugLevel->debug_level(), 1, 'debug level set' ); 71 | is( Kafka::TestDebugLevel->debug_level( undef ), 1, 'debug level set' ); 72 | is( $Kafka::TestDebugLevel::DEBUG, 1, 'debug level set' ); 73 | 74 | #-- set for the "correct" module 75 | 76 | $Kafka::TestDebugLevel::DEBUG = 0; 77 | is( Kafka::TestDebugLevel->debug_level( 'TestDebugLevel:1' ), 1, 'debug level set' ); 78 | is( Kafka::TestDebugLevel->debug_level(), 1, 'debug level set' ); 79 | is( $Kafka::TestDebugLevel::DEBUG, 1, 'debug level set' ); 80 | 81 | #-- set for the "another" module 82 | 83 | $Kafka::Connection::DEBUG = 0; 84 | $Kafka::IO::DEBUG = 0; 85 | $Kafka::TestDebugLevel::DEBUG = 0; 86 | 87 | is( Kafka::TestDebugLevel->debug_level( 'IO:1' ), 0, 'debug level not set' ); 88 | 89 | is( Kafka::IO->debug_level(), 1, 'debug level set' ); 90 | is( $Kafka::IO::DEBUG, 1, 'debug level set' ); 91 | 92 | is( Kafka::TestDebugLevel->debug_level(), 0, 'debug level not set' ); 93 | is( $Kafka::TestDebugLevel::DEBUG, 0, 'debug level notset' ); 94 | 95 | is( Kafka::Connection->debug_level(), 0, 'debug level not set' ); 96 | is( $Kafka::Connection::DEBUG, 0, 'debug level not set' ); 97 | 98 | $Kafka::IO::DEBUG = 0; 99 | 100 | #-- set for the "incorrect" module 101 | 102 | is( Kafka::TestDebugLevel->debug_level( 'SomethingBad:1' ), 0, 'debug level not set' ); 103 | 104 | is( $Kafka::IO::DEBUG, 0, 'debug level not set' ); 105 | is( $Kafka::TestDebugLevel::DEBUG, 0, 'debug level not set' ); 106 | is( $Kafka::Connection::DEBUG, 0, 'debug level not set' ); 107 | 108 | #----- control through an environment variable 109 | 110 | $ENV{PERL_KAFKA_DEBUG} = 1; 111 | %Kafka::Internals::_debug_levels = (); 112 | 113 | is( Kafka::TestDebugLevel->debug_level(), 1, 'debug level set' ); 114 | is( $Kafka::TestDebugLevel::DEBUG, 1, 'debug level set' ); 115 | 116 | is( $Kafka::IO::DEBUG, 0, 'debug level not set' ); 117 | is( $Kafka::Connection::DEBUG, 0, 'debug level not set' ); 118 | 119 | $Kafka::TestDebugLevel::DEBUG = 0; 120 | 121 | $ENV{PERL_KAFKA_DEBUG} = 'IO:1'; 122 | %Kafka::Internals::_debug_levels = (); 123 | 124 | is( Kafka::TestDebugLevel->debug_level(), 0, 'debug level not set' ); 125 | 126 | is( $Kafka::IO::DEBUG, 1, 'debug level set' ); 127 | is( $Kafka::TestDebugLevel::DEBUG, 0, 'debug level not set' ); 128 | is( $Kafka::Connection::DEBUG, 0, 'debug level not set' ); 129 | 130 | $Kafka::IO::DEBUG = 0; 131 | 132 | #----- several specifications 133 | 134 | is( Kafka::TestDebugLevel->debug_level( '1,IO:2' ), 1, 'debug level set' ); 135 | 136 | is( $Kafka::TestDebugLevel::DEBUG, 1, 'debug level set' ); 137 | is( $Kafka::IO::DEBUG, 2, 'debug level not set' ); 138 | is( $Kafka::Connection::DEBUG, 0, 'debug level set' ); 139 | 140 | $Kafka::TestDebugLevel::DEBUG = 0; 141 | $Kafka::IO::DEBUG = 0; 142 | 143 | is( Kafka::TestDebugLevel->debug_level( 'IO:1,2' ), 2, 'debug level set' ); 144 | 145 | is( $Kafka::TestDebugLevel::DEBUG, 2, 'debug level set' ); 146 | is( $Kafka::IO::DEBUG, 1, 'debug level not set' ); 147 | is( $Kafka::Connection::DEBUG, 0, 'debug level set' ); 148 | 149 | $Kafka::TestDebugLevel::DEBUG = 0; 150 | $Kafka::IO::DEBUG = 0; 151 | 152 | #----- control via an object method 153 | 154 | delete $ENV{PERL_KAFKA_DEBUG}; 155 | %Kafka::Internals::_debug_levels = (); 156 | 157 | my $obj = Kafka::TestDebugLevel->new; 158 | 159 | is( $obj->debug_level(), 0, 'debug level not set' ); 160 | is( $obj->debug_level( 'TestDebugLevel:1,IO:2' ), 1, 'debug level set' ); 161 | is( $obj->debug_level(), 1, 'debug level set' ); 162 | 163 | is( $Kafka::TestDebugLevel::DEBUG, 1, 'debug level set' ); 164 | is( $Kafka::IO::DEBUG, 2, 'debug level set' ); 165 | is( $Kafka::Connection::DEBUG, 0, 'debug level not set' ); 166 | 167 | $Kafka::Connection::DEBUG = $DEBUG_Connection; 168 | $Kafka::IO::DEBUG = $DEBUG_IO; 169 | $ENV{PERL_KAFKA_DEBUG} = $PERL_KAFKA_DEBUG; 170 | 171 | -------------------------------------------------------------------------------- /t/60_sockets.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | plan 'no_plan'; 16 | 17 | BEGIN { 18 | plan skip_all => 'Unknown base directory of Kafka server' 19 | unless $ENV{KAFKA_BASE_DIR}; 20 | } 21 | 22 | BEGIN { 23 | eval 'use Test::NoWarnings'; ## no critic 24 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 25 | } 26 | 27 | 28 | use Const::Fast; 29 | use Kafka qw( 30 | $BLOCK_UNTIL_IS_COMMITTED 31 | $RETRY_BACKOFF 32 | ); 33 | use Kafka::Cluster; 34 | use Kafka::Connection; 35 | use Kafka::Producer; 36 | use Kafka::TestInternals qw( 37 | $topic 38 | ); 39 | 40 | ok defined( Kafka::Cluster::data_cleanup() ), 'data directory cleaned'; 41 | 42 | const my $HOST => 'localhost'; # use only 'localhost' for test 43 | const my $PARTITIONS => 5; 44 | const my $SENDINGS => 1_000; 45 | 46 | my $CLUSTER = Kafka::Cluster->new( 47 | replication_factor => 1, 48 | partition => $PARTITIONS, 49 | ); 50 | 51 | my ( $PORT ) = $CLUSTER->servers; 52 | 53 | my $connection = get_new_connection(); 54 | 55 | for ( my $i = 0; $i < $SENDINGS; ++$i ) { 56 | send_beacon( $connection, "Some beacon #$i" ); 57 | } 58 | 59 | # the same connection but new producer 60 | for ( my $i = 0; $i < $SENDINGS; ++$i ) { 61 | send_beacon( $connection, "Other beacon #$i" ); 62 | } 63 | 64 | my @first_used_sockets = get_used_socket_ids( $connection ); 65 | 66 | $connection->close; 67 | undef $connection; 68 | 69 | # renew connection 70 | $connection = get_new_connection(); 71 | 72 | # the new connection 73 | for ( my $i = 0; $i < $SENDINGS; ++$i ) { 74 | send_beacon( $connection, "Next beacon #$i" ); 75 | } 76 | 77 | my @second_used_sockets = get_used_socket_ids( $connection ); 78 | 79 | is scalar( @first_used_sockets ), scalar( @second_used_sockets ), "used socket number not changed"; 80 | is scalar( @second_used_sockets ), 1, 'only one socket used'; 81 | ok "@first_used_sockets" ne "@second_used_sockets", 'the new socket used'; 82 | 83 | 84 | 85 | $CLUSTER->close; 86 | 87 | exit; 88 | 89 | sub get_new_connection { 90 | return Kafka::Connection->new( 91 | host => $HOST, 92 | port => $PORT, 93 | AutoCreateTopicsEnable => 1, 94 | RETRY_BACKOFF => $RETRY_BACKOFF * 2, 95 | dont_load_supported_api_versions => 1, 96 | ); 97 | } 98 | 99 | sub get_used_socket_ids { 100 | my $connection = shift; 101 | 102 | my @sockets; 103 | foreach my $server ( keys %{ $connection->{_IO_cache} } ) { 104 | if ( my $io = $connection->{_IO_cache}->{ $server }->{IO} ) { 105 | my $socket = $io->{socket}; 106 | push @sockets, ''.$socket; 107 | } 108 | } 109 | 110 | return( sort @sockets ); 111 | } 112 | 113 | sub send_beacon { 114 | my $connection = shift; 115 | my @beacons = @_; 116 | 117 | my $producer = Kafka::Producer->new( 118 | Connection => $connection, 119 | RequiredAcks => $BLOCK_UNTIL_IS_COMMITTED, 120 | ); 121 | 122 | my @array = ( 0 .. $PARTITIONS - 1 ); 123 | 124 | my $random_partition = $array[ rand scalar @array ]; 125 | 126 | my @start_used_sockets = get_used_socket_ids( $connection ); 127 | 128 | ok $producer->send( 129 | $topic, # topic 130 | $random_partition, # partition 131 | [ # message 132 | @beacons, 133 | ] 134 | ), "sent OK: @beacons"; 135 | 136 | my @finish_used_sockets = get_used_socket_ids( $connection ); 137 | 138 | if ( @start_used_sockets ) { 139 | is scalar( @start_used_sockets ), scalar( @finish_used_sockets ), "used socket number not changed"; 140 | is scalar( @start_used_sockets ), 1, 'only one socket used'; 141 | is "@start_used_sockets", "@finish_used_sockets", "the same sockets used: @start_used_sockets"; 142 | } 143 | 144 | undef $producer; 145 | } 146 | 147 | -------------------------------------------------------------------------------- /t/90_mock_io.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | eval 'use Test::NoWarnings'; ## no critic 17 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 18 | } 19 | 20 | plan 'no_plan'; 21 | 22 | use Const::Fast; 23 | #use Data::Dumper; 24 | use Params::Util qw( 25 | _STRING 26 | ); 27 | 28 | use Kafka qw( 29 | $BLOCK_UNTIL_IS_COMMITTED 30 | $COMPRESSION_NONE 31 | $DEFAULT_MAX_BYTES 32 | $DEFAULT_MAX_WAIT_TIME 33 | $KAFKA_SERVER_PORT 34 | $DEFAULT_MAX_NUMBER_OF_OFFSETS 35 | $MIN_BYTES_RESPOND_HAS_DATA 36 | $NOT_SEND_ANY_RESPONSE 37 | $RECEIVE_EARLIEST_OFFSET 38 | $RECEIVE_LATEST_OFFSETS 39 | $REQUEST_TIMEOUT 40 | $WAIT_WRITTEN_TO_LOCAL_LOG 41 | ); 42 | use Kafka::Internals qw( 43 | $APIKEY_PRODUCE 44 | $APIKEY_FETCH 45 | $APIKEY_OFFSET 46 | $APIKEY_METADATA 47 | $PRODUCER_ANY_OFFSET 48 | ); 49 | use Kafka::MockIO; 50 | use Kafka::MockProtocol qw( 51 | encode_fetch_response 52 | ); 53 | use Kafka::Protocol qw( 54 | decode_fetch_response 55 | decode_metadata_response 56 | decode_offset_response 57 | decode_produce_response 58 | encode_fetch_request 59 | encode_metadata_request 60 | encode_offset_request 61 | encode_produce_request 62 | ); 63 | 64 | const my $TOPIC => 'mytopic'; 65 | # Use Kafka::MockIO only with the following information: 66 | const my $PARTITION => $Kafka::MockIO::PARTITION; 67 | 68 | my ( $io, $decoded_request, $encoded_request, $decoded_response, $encoded_response, $len ); 69 | 70 | sub fulfill_request { 71 | $len = $io->send( $encoded_request ); 72 | is $len, length $encoded_request, 'request sent correctly'; 73 | $len = 4; 74 | undef $encoded_response; 75 | $encoded_response = $io->receive( $len ); 76 | $len = unpack( 'l>', $$encoded_response ); 77 | $$encoded_response .= ${ $io->receive( $len ) }; 78 | ok _STRING( $$encoded_response ), 'response received'; 79 | } 80 | 81 | Kafka::MockIO::override(); 82 | 83 | $io = Kafka::IO->new( 84 | host => 'localhost', 85 | port => $KAFKA_SERVER_PORT, 86 | timeout => $REQUEST_TIMEOUT, 87 | ); 88 | isa_ok( $io, 'Kafka::IO' ); 89 | 90 | #Kafka::IO->debug_level( 1 ); 91 | 92 | #-- Special cases -------------------------------------------------------------- 93 | 94 | # a decoded fetch request 95 | $decoded_request = { 96 | ApiKey => $APIKEY_FETCH, 97 | CorrelationId => 0, 98 | ClientId => 'console-consumer-25555', 99 | MaxWaitTime => 100, 100 | MinBytes => $MIN_BYTES_RESPOND_HAS_DATA, 101 | topics => [ 102 | { 103 | TopicName => 'mytopic', 104 | partitions => [ 105 | { 106 | Partition => 0, 107 | FetchOffset => 0, 108 | MaxBytes => 1_048_576, 109 | }, 110 | ], 111 | }, 112 | ], 113 | }; 114 | $encoded_request = encode_fetch_request( $decoded_request ); 115 | 116 | # a decoded fetch response 117 | $decoded_response = { 118 | CorrelationId => 0, 119 | topics => [ 120 | { 121 | TopicName => 'mytopic', 122 | partitions => [ 123 | { 124 | Partition => 0, 125 | ErrorCode => 0, 126 | HighwaterMarkOffset => 2, 127 | MessageSet => [ 128 | { 129 | Offset => 0, 130 | MagicByte => 0, 131 | Attributes => 0, 132 | Key => q{}, 133 | Value => 'Hello!', 134 | }, 135 | { 136 | Offset => 1, 137 | MagicByte => 0, 138 | Attributes => 0, 139 | Key => q{}, 140 | Value => 'Hello, World!', 141 | }, 142 | ], 143 | }, 144 | ], 145 | }, 146 | ], 147 | }; 148 | $encoded_response = encode_fetch_response( $decoded_response ); 149 | 150 | is scalar( keys %{ Kafka::MockIO::special_cases() } ), 0, 'special case not present'; 151 | Kafka::MockIO::add_special_case( { $encoded_request => $encoded_response } ); 152 | is scalar( keys %{ Kafka::MockIO::special_cases() } ), 1, 'special case present'; 153 | 154 | fulfill_request(); 155 | is_deeply( decode_fetch_response( $encoded_response ), $decoded_response, 'decoded correctly' ); 156 | 157 | Kafka::MockIO::del_special_case( $encoded_request ); 158 | is scalar( keys %{ Kafka::MockIO::special_cases() } ), 0, 'special case deleted'; 159 | 160 | #-- MetadataRequest ------------------------------------------------------------ 161 | 162 | # a decoded metadata request 163 | $decoded_request = { 164 | ApiKey => $APIKEY_METADATA, 165 | CorrelationId => 0, 166 | ClientId => 'console-consumer-25555', 167 | topics => [ 168 | $TOPIC, 169 | ], 170 | }; 171 | 172 | $encoded_request = encode_metadata_request( $decoded_request ); 173 | fulfill_request(); 174 | $decoded_response = decode_metadata_response( $encoded_response ); 175 | 176 | #-- ProduceRequest -------------------------------------------------------------- 177 | 178 | # a decoded produce request 179 | $decoded_request = { 180 | ApiKey => $APIKEY_PRODUCE, 181 | CorrelationId => 4, 182 | ClientId => q{}, 183 | RequiredAcks => $NOT_SEND_ANY_RESPONSE, 184 | Timeout => int( $REQUEST_TIMEOUT * 1000 ), 185 | topics => [ 186 | { 187 | TopicName => $TOPIC, 188 | partitions => [ 189 | { 190 | Partition => $PARTITION, 191 | MessageSet => [ 192 | { 193 | Offset => $PRODUCER_ANY_OFFSET, 194 | MagicByte => 0, 195 | Attributes => $COMPRESSION_NONE, 196 | Key => q{}, 197 | Value => 'Hello!', 198 | }, 199 | ], 200 | }, 201 | ], 202 | }, 203 | ], 204 | }; 205 | 206 | $encoded_request = encode_produce_request( $decoded_request ); 207 | fulfill_request(); 208 | $decoded_response = decode_produce_response( $encoded_response ); 209 | 210 | #-- FetchRequest --------------------------------------------------------------- 211 | 212 | # a decoded fetch request 213 | $decoded_request = { 214 | ApiKey => $APIKEY_FETCH, 215 | CorrelationId => 0, 216 | ClientId => 'console-consumer-25555', 217 | MaxWaitTime => int( $DEFAULT_MAX_WAIT_TIME * 1000 ), 218 | MinBytes => $MIN_BYTES_RESPOND_HAS_DATA, 219 | topics => [ 220 | { 221 | TopicName => $TOPIC, 222 | partitions => [ 223 | { 224 | Partition => $PARTITION, 225 | FetchOffset => 0, 226 | MaxBytes => $DEFAULT_MAX_BYTES, 227 | }, 228 | ], 229 | }, 230 | ], 231 | }; 232 | 233 | $encoded_request = encode_fetch_request( $decoded_request ); 234 | fulfill_request(); 235 | $decoded_response = decode_fetch_response( $encoded_response ); 236 | 237 | #-- OffsetRequest -------------------------------------------------------------- 238 | 239 | # a decoded offset request 240 | $decoded_request = { 241 | ApiKey => $APIKEY_OFFSET, 242 | CorrelationId => 0, 243 | ClientId => 'console-consumer-25555', 244 | topics => [ 245 | { 246 | TopicName => $TOPIC, 247 | partitions => [ 248 | { 249 | Partition => $PARTITION, 250 | # Time => $RECEIVE_EARLIEST_OFFSET, 251 | Time => $RECEIVE_LATEST_OFFSETS, 252 | MaxNumberOfOffsets => $DEFAULT_MAX_NUMBER_OF_OFFSETS, 253 | }, 254 | ], 255 | }, 256 | ], 257 | }; 258 | 259 | $encoded_request = encode_offset_request( $decoded_request ); 260 | fulfill_request(); 261 | $decoded_response = decode_offset_response( $encoded_response ); 262 | 263 | $io->close; 264 | 265 | Kafka::MockIO::restore(); 266 | 267 | -------------------------------------------------------------------------------- /t/91_mock_usage.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | eval 'use Test::Exception'; ## no critic 17 | plan skip_all => "because Test::Exception required for testing" if $@; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::NoWarnings'; ## no critic 22 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 23 | } 24 | 25 | plan 'no_plan'; 26 | 27 | use Const::Fast; 28 | #use Data::Dumper; 29 | 30 | use Kafka qw( 31 | $DEFAULT_MAX_BYTES 32 | $DEFAULT_MAX_NUMBER_OF_OFFSETS 33 | $KAFKA_SERVER_PORT 34 | $RECEIVE_LATEST_OFFSETS 35 | $RECEIVE_EARLIEST_OFFSET 36 | $REQUEST_TIMEOUT 37 | ); 38 | use Kafka::Connection; 39 | use Kafka::Consumer; 40 | use Kafka::Producer; 41 | 42 | use Kafka::MockIO; 43 | 44 | const my $topic => 'mytopic'; 45 | # Use Kafka::MockIO only with the following information: 46 | const my $partition => $Kafka::MockIO::PARTITION; 47 | 48 | my ( $connect, $producer, $consumer, $response, $offsets ); 49 | 50 | #-- Connecting to the Kafka mocked server port 51 | my $port = $KAFKA_SERVER_PORT; 52 | 53 | Kafka::MockIO::override(); 54 | 55 | #-- Connection 56 | 57 | dies_ok { $connect = Kafka::Connection->new( 58 | host => 'localhost', 59 | port => $port, 60 | timeout => 'nothing', 61 | dont_load_supported_api_versions => 1, 62 | ) } 'expecting to die'; 63 | 64 | $connect = Kafka::Connection->new( 65 | host => 'localhost', 66 | port => $port, 67 | dont_load_supported_api_versions => 1, 68 | ); 69 | isa_ok( $connect, 'Kafka::Connection'); 70 | 71 | #-- Producer 72 | 73 | dies_ok { $producer = Kafka::Producer->new( 74 | Connection => "nothing", 75 | ) } 'expecting to die'; 76 | 77 | undef $producer; 78 | lives_ok { $producer = Kafka::Producer->new( 79 | Connection => $connect, 80 | ) } 'expecting to live'; 81 | unless ( $producer ) { 82 | BAIL_OUT 'producer is not created'; 83 | } 84 | isa_ok( $producer, 'Kafka::Producer'); 85 | 86 | # Sending a single message 87 | if ( !( $response = $producer->send( 88 | $topic, # topic 89 | $partition, # partition 90 | 'Single message', # message 91 | ) ) ) { 92 | BAIL_OUT 'response is not received'; 93 | } 94 | else { 95 | pass 'message is sent'; 96 | } 97 | 98 | # Sending a series of messages 99 | if ( !( $response = $producer->send( 100 | $topic, # topic 101 | $partition, # partition 102 | [ # messages 103 | "The first message", 104 | "The second message", 105 | "The third message", 106 | ], 107 | ) ) ) { 108 | BAIL_OUT 'producer is not created'; 109 | } 110 | else { 111 | pass 'messages sent'; 112 | } 113 | 114 | # Closes the connection producer and cleans up 115 | undef $producer; 116 | ok( !defined( $producer ), 'the producer object is an empty' ); 117 | $connect->close; 118 | 119 | #-- Consumer 120 | 121 | $connect->close; 122 | undef $connect; 123 | unless ( $connect = Kafka::Connection->new( 124 | host => 'localhost', 125 | port => $port, 126 | dont_load_supported_api_versions => 1, 127 | ) ) { 128 | BAIL_OUT 'connection is not created'; 129 | } 130 | 131 | dies_ok { $consumer = Kafka::Consumer->new( 132 | Connection => "nothing", 133 | ) } 'expecting to die'; 134 | 135 | lives_ok { $consumer = Kafka::Consumer->new( 136 | Connection => $connect, 137 | ) } 'expecting to live'; 138 | unless ( $consumer ) { 139 | BAIL_OUT 'consumer is not created'; 140 | } 141 | isa_ok( $consumer, 'Kafka::Consumer'); 142 | 143 | # Offsets are monotonically increasing integers unique to a partition. 144 | # Consumers track the maximum offset they have consumed in each partition. 145 | 146 | # Get a list of valid offsets (up max_number) before the given time. 147 | $offsets = $consumer->offsets( 148 | $topic, # topic 149 | $partition, # partition 150 | $RECEIVE_LATEST_OFFSETS, # time 151 | $DEFAULT_MAX_NUMBER_OF_OFFSETS, # max_number 152 | ); 153 | if ( $offsets ) { 154 | pass 'received offsets'; 155 | foreach my $offset ( @$offsets ) { 156 | note "Received offset: $offset"; 157 | } 158 | } 159 | # may be both physical and logical errors 160 | if ( !$offsets ) { 161 | fail 'offsets are not received'; 162 | } 163 | 164 | # Consuming messages one by one 165 | my $messages = $consumer->fetch( 166 | $topic, # topic 167 | $partition, # partition 168 | 0, # offset 169 | $DEFAULT_MAX_BYTES, # Maximum size of MESSAGE(s) to receive 170 | ); 171 | if ( $messages ) { 172 | pass 'received messages'; 173 | my $cnt = 0; 174 | foreach my $m ( @$messages ) { 175 | if ( $m->valid ) { 176 | # note "Payload : ", $m->payload; 177 | # note "offset : ", $m->offset; 178 | # note "next_offset: ", $m->next_offset; 179 | } else { 180 | diag "Message No $cnt, Error: ", $m->error; 181 | diag 'Payload : ', $m->payload; 182 | diag 'offset : ', $m->offset; 183 | diag 'next_offset: ', $m->next_offset; 184 | } 185 | ++$cnt; 186 | last if $cnt > 100; # enough 187 | } 188 | } 189 | # may be both physical and logical errors 190 | if ( !$messages ) { 191 | fail 'messages are not received'; 192 | } 193 | 194 | # Closes the consumer and cleans up 195 | undef $consumer; 196 | ok( !defined( $producer ), 'the consumer object is an empty' ); 197 | $connect->close; 198 | 199 | -------------------------------------------------------------------------------- /t/99_data_cleanup.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | use 5.010; 4 | use strict; 5 | use warnings; 6 | 7 | use lib qw( 8 | lib 9 | t/lib 10 | ../lib 11 | ); 12 | 13 | use Test::More; 14 | 15 | BEGIN { 16 | plan skip_all => 'Unknown base directory of Kafka server' 17 | unless $ENV{KAFKA_BASE_DIR}; 18 | } 19 | 20 | BEGIN { 21 | eval 'use Test::NoWarnings'; ## no critic 22 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 23 | } 24 | 25 | plan 'no_plan'; 26 | 27 | use Kafka::Cluster; 28 | 29 | ok defined( Kafka::Cluster::data_cleanup() ), 'data directory cleaned'; 30 | 31 | -------------------------------------------------------------------------------- /t/data/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore 5 | -------------------------------------------------------------------------------- /t/lib/Kafka/TestInternals.pm: -------------------------------------------------------------------------------- 1 | package Kafka::TestInternals; 2 | 3 | =head1 NAME 4 | 5 | Kafka::TestInternals - Constants and functions used in the tests. 6 | 7 | =head1 VERSION 8 | 9 | This documentation refers to C version 1.08 . 10 | 11 | =cut 12 | 13 | use 5.010; 14 | use strict; 15 | use warnings; 16 | 17 | our $VERSION = '1.08'; 18 | 19 | use Exporter qw( 20 | import 21 | ); 22 | 23 | our @EXPORT_OK = qw( 24 | _is_suitable_int 25 | @not_array 26 | @not_array0 27 | @not_empty_string 28 | @not_hash 29 | @not_is_like_server_list 30 | @not_isint 31 | @not_nonnegint 32 | @not_number 33 | @not_posint 34 | @not_posnumber 35 | @not_right_object 36 | @not_string 37 | @not_string_array 38 | @not_topics_array 39 | $topic 40 | ); 41 | 42 | 43 | 44 | use Const::Fast; 45 | use Scalar::Util::Numeric qw( 46 | isbig 47 | isint 48 | ); 49 | 50 | use Kafka::Cluster qw( 51 | $DEFAULT_TOPIC 52 | ); 53 | use Kafka::Internals qw( 54 | $APIKEY_FETCH 55 | $APIKEY_PRODUCE 56 | $APIKEY_OFFSET 57 | ); 58 | 59 | 60 | 61 | =head1 DESCRIPTION 62 | 63 | This module is not a user module. 64 | 65 | In order to achieve better performance, 66 | functions of this module do not perform arguments validation. 67 | 68 | =head2 EXPORT 69 | 70 | The following constants are available for export 71 | 72 | =cut 73 | 74 | # options for testing arguments: 75 | # "\x{20ac}", 76 | # undef, 77 | # 0, 78 | # 0.5, 79 | # 1, 80 | # -1, 81 | # -3, 82 | # q{}, 83 | # '0', 84 | # '0.5', 85 | # '1', 86 | # 9999999999999999, 87 | # \1, 88 | # [], 89 | # {}, 90 | # [ 'something' ], 91 | # { foo => 'bar' }, 92 | # bless( {}, 'FakeName' ), 93 | # 'simple string', 94 | 95 | =head3 C<$topic> 96 | 97 | Name topic used in the tests. 98 | 99 | =cut 100 | const our $topic => $DEFAULT_TOPIC; 101 | 102 | =head3 C<@not_right_object> 103 | 104 | The values do not correspond to the object type you want. 105 | 106 | =cut 107 | const our @not_right_object => ( 108 | "\x{20ac}", 109 | undef, 110 | 0, 111 | 0.5, 112 | 1, 113 | -1, 114 | -3, 115 | q{}, 116 | '0', 117 | '0.5', 118 | '1', 119 | 9999999999999999, 120 | \1, 121 | [], 122 | {}, 123 | [ 'something' ], 124 | { foo => 'bar' }, 125 | bless( {}, 'FakeName' ), 126 | 'simple string', 127 | ); 128 | 129 | =head3 C<@not_nonnegint> 130 | 131 | The values do not correspond to not negative integers. 132 | 133 | =cut 134 | const our @not_nonnegint => ( 135 | "\x{20ac}", 136 | undef, 137 | 0.5, 138 | -1, 139 | -3, 140 | q{}, 141 | '0.5', 142 | \'scalar', 143 | [], 144 | {}, 145 | [ 'something' ], 146 | { foo => 'bar' }, 147 | bless( {}, 'FakeName' ), 148 | ); 149 | 150 | =head3 C<@not_empty_string> 151 | 152 | The values do not correspond to a non-empty string. 153 | 154 | =cut 155 | const our @not_empty_string => ( 156 | "\x{20ac}", 157 | \1, 158 | [], 159 | {}, 160 | [ 'something' ], 161 | { foo => 'bar' }, 162 | \'string', 163 | bless( {}, 'FakeName' ), 164 | ); 165 | 166 | =head3 C<@not_string> 167 | 168 | The values do not correspond to any string. 169 | 170 | =cut 171 | const our @not_string => ( 172 | @not_empty_string, 173 | undef, 174 | q{}, 175 | ); 176 | 177 | =head3 C<@not_posint> 178 | 179 | The values do not correspond to a positive integers. 180 | 181 | =cut 182 | const our @not_posint => ( 183 | "\x{20ac}", 184 | undef, 185 | 0, 186 | 0.5, 187 | -1, 188 | -3, 189 | q{}, 190 | '0', 191 | '0.5', 192 | \'scalar', 193 | [], 194 | {}, 195 | [ 'something' ], 196 | { foo => 'bar' }, 197 | bless( {}, 'FakeName' ), 198 | ); 199 | 200 | =head3 C<@not_number> 201 | 202 | The values do not correspond to any number. 203 | 204 | =cut 205 | const our @not_number => ( 206 | "\x{20ac}", 207 | undef, 208 | q{}, 209 | \'scalar', 210 | [], 211 | {}, 212 | [ 'something' ], 213 | { foo => 'bar' }, 214 | bless( {}, 'FakeName' ), 215 | 'incorrect number,' 216 | ); 217 | 218 | =head3 C<@not_posnumber> 219 | 220 | The values do not correspond to a positive number. 221 | 222 | =cut 223 | const our @not_posnumber => ( 224 | @not_number, 225 | 0, 226 | -1, 227 | ); 228 | 229 | =head3 C<@not_isint> 230 | 231 | The values do not correspond to any integers. 232 | 233 | =cut 234 | const our @not_isint => ( 235 | "\x{20ac}", 236 | # undef, 237 | 0.5, 238 | q{}, 239 | '0.5', 240 | \'scalar', 241 | [], 242 | {}, 243 | [ 'something' ], 244 | { foo => 'bar' }, 245 | bless( {}, 'FakeName' ), 246 | ); 247 | 248 | =head3 C<@not_array0> 249 | 250 | The values do not correspond to a raw and unblessed ARRAY reference. 251 | 252 | =cut 253 | const our @not_array0 => ( 254 | "\x{20ac}", 255 | undef, 256 | 0, 257 | 0.5, 258 | 1, 259 | -1, 260 | -3, 261 | q{}, 262 | '0', 263 | '0.5', 264 | '1', 265 | 9999999999999999, 266 | \'scalar', 267 | {}, 268 | { foo => 'bar' }, 269 | bless( {}, 'FakeName' ), 270 | ); 271 | 272 | =head3 C<@not_array0> 273 | 274 | The values do not correspond to a raw and unblessed ARRAY reference containing at least one element of any kind. 275 | 276 | =cut 277 | const our @not_array => ( 278 | @not_array0, 279 | [], 280 | ); 281 | 282 | =head3 C<@not_topics_array> 283 | 284 | The values do not correspond to a 'topics' ARRAY reference. 285 | For 'topics' ARRAY examples see C. 286 | 287 | =cut 288 | our @not_topics_array; 289 | foreach my $bad_value ( 290 | @not_array0, 291 | [], 292 | 'scalar', 293 | ) { 294 | push @not_topics_array, 295 | { 296 | ApiKey => $APIKEY_PRODUCE, 297 | topics => $bad_value, 298 | }, 299 | { 300 | ApiKey => $APIKEY_PRODUCE, 301 | topics => [ $bad_value ], 302 | }, 303 | { 304 | ApiKey => $APIKEY_PRODUCE, 305 | topics => [ 306 | { 307 | TopicName => $topic, 308 | partitions => $bad_value, 309 | }, 310 | ], 311 | }, 312 | { 313 | ApiKey => $APIKEY_PRODUCE, 314 | topics => [ 315 | { 316 | TopicName => $topic, 317 | partitions => [ $bad_value ], 318 | }, 319 | ], 320 | }, 321 | ; 322 | } 323 | 324 | =head3 C<@not_is_like_server_list> 325 | 326 | The values do not correspond to a reference to an array of server names. 327 | 328 | =cut 329 | const our @not_is_like_server_list => ( 330 | [ "\x{20ac}" ], 331 | [ undef ], 332 | [ 0 ], 333 | [ 0.5 ], 334 | [ 1 ], 335 | [ -1 ], 336 | [ -3 ], 337 | [ q{} ], 338 | [ '0' ], 339 | [ '0.5' ], 340 | [ '1' ], 341 | [ 9999999999999999 ], 342 | [ \'scalar' ], 343 | [ [] ], 344 | [ {} ], 345 | [ [ 'something' ] ], 346 | [ { foo => 'bar' } ], 347 | [ bless( {}, 'FakeName' ) ], 348 | [ 'string' ], 349 | ); 350 | 351 | =head3 C<@not_string_array> 352 | 353 | The values do not correspond to a reference to an array of any strings. 354 | 355 | =cut 356 | const our @not_string_array => ( 357 | [ "\x{20ac}" ], 358 | [ undef ], 359 | [ \1 ], 360 | [ [] ], 361 | [ {} ], 362 | [ [ 'something' ] ], 363 | [ { foo => 'bar' } ], 364 | [ bless( {}, 'FakeName' ) ], 365 | ); 366 | 367 | =head3 C<@not_hash> 368 | 369 | The values do not correspond to a raw and unblessed HASH reference with at least one entry. 370 | 371 | =cut 372 | const our @not_hash => ( 373 | "\x{20ac}", 374 | undef, 375 | 0, 376 | 0.5, 377 | 1, 378 | -1, 379 | -3, 380 | q{}, 381 | '0', 382 | '0.5', 383 | '1', 384 | 9999999999999999, 385 | # \'scalar', 386 | [], 387 | {}, 388 | [ 'something' ], 389 | bless( {}, 'FakeName' ), 390 | 'simple string', 391 | ); 392 | 393 | #-- public functions ----------------------------------------------------------- 394 | 395 | #-- private functions ---------------------------------------------------------- 396 | 397 | # Verifying whether the argument is a simple int or a bigint 398 | sub _is_suitable_int { 399 | my ( $n ) = @_; 400 | 401 | $n // return; 402 | return isint( $n ) || isbig( $n ); 403 | } 404 | 405 | #-- public attributes ---------------------------------------------------------- 406 | 407 | #-- public methods ------------------------------------------------------------- 408 | 409 | #-- private attributes --------------------------------------------------------- 410 | 411 | #-- private methods ------------------------------------------------------------ 412 | 413 | 1; 414 | 415 | __END__ 416 | 417 | =head1 SEE ALSO 418 | 419 | The basic operation of the Kafka package modules: 420 | 421 | L - constants and messages used by the Kafka package modules. 422 | 423 | L - interface to connect to a Kafka cluster. 424 | 425 | L - interface for producing client. 426 | 427 | L - interface for consuming client. 428 | 429 | L - interface to access Kafka message 430 | properties. 431 | 432 | L - functions to work with 64 bit elements of the 433 | protocol on 32 bit systems. 434 | 435 | L - functions to process messages in the 436 | Apache Kafka's Protocol. 437 | 438 | L - low-level interface for communication with Kafka server. 439 | 440 | L - module designated to handle Kafka exceptions. 441 | 442 | L - internal constants and functions used 443 | by several package modules. 444 | 445 | A wealth of detail about the Apache Kafka and the Kafka Protocol: 446 | 447 | Main page at L 448 | 449 | Kafka Protocol at L 450 | 451 | =head1 SOURCE CODE 452 | 453 | Kafka package is hosted on GitHub: 454 | L 455 | 456 | =head1 AUTHOR 457 | 458 | Sergey Gladkov 459 | 460 | =head1 CONTRIBUTORS 461 | 462 | Alexander Solovey 463 | 464 | Jeremy Jordan 465 | 466 | Sergiy Zuban 467 | 468 | Vlad Marchenko 469 | 470 | =head1 COPYRIGHT AND LICENSE 471 | 472 | Copyright (C) 2012-2016 by TrackingSoft LLC. 473 | 474 | This package is free software; you can redistribute it and/or modify it under 475 | the same terms as Perl itself. See I at 476 | L. 477 | 478 | This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; 479 | without even the implied warranty of MERCHANTABILITY or FITNESS FOR A 480 | PARTICULAR PURPOSE. 481 | 482 | =cut 483 | -------------------------------------------------------------------------------- /tools/.gitignore: -------------------------------------------------------------------------------- 1 | /nytprof/ 2 | nytprof.out 3 | -------------------------------------------------------------------------------- /tools/benchmark_consumer.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | # NAME: Consuming messages 4 | 5 | 6 | 7 | use 5.010; 8 | use strict; 9 | use warnings; 10 | 11 | use lib qw( 12 | lib 13 | ../lib 14 | ); 15 | 16 | 17 | 18 | 19 | 20 | use Getopt::Long; 21 | use Scalar::Util qw( 22 | blessed 23 | ); 24 | use Time::HiRes qw( 25 | gettimeofday 26 | ); 27 | use Try::Tiny; 28 | 29 | use Kafka qw( 30 | $MESSAGE_SIZE_OVERHEAD 31 | $RECEIVE_EARLIEST_OFFSET 32 | $RECEIVE_LATEST_OFFSETS 33 | ); 34 | use Kafka::Connection; 35 | use Kafka::Consumer; 36 | 37 | 38 | 39 | my $host = 'localhost', 40 | my $port = undef; 41 | my $topic = 'mytopic'; 42 | my $partition = 0; 43 | my $msg_len = 200; 44 | my $number_of_messages = 20_000; 45 | my $re_read = 0; 46 | my $no_infinite = 0; 47 | 48 | my ( $ret, $help ); 49 | 50 | $ret = GetOptions( 51 | 'host=s' => \$host, 52 | 'port=i' => \$port, 53 | 'topic=s' => \$topic, 54 | 'partition=i' => \$partition, 55 | 'package=i' => \$number_of_messages, 56 | 'length=i' => \$msg_len, 57 | 're_read' => \$re_read, 58 | 'no_infinite' => \$no_infinite, 59 | 'help|?' => \$help, 60 | ); 61 | 62 | if ( !$ret || $help || !$host || !$port || !$topic || !$msg_len || !$number_of_messages ) { 63 | print <isa( 'Kafka::Exception' ) ) { 110 | $message = $error; 111 | } else { 112 | $message = $_->message; 113 | } 114 | say STDERR $message; 115 | exit 1; 116 | } 117 | 118 | sub fetch_messages { 119 | my ( $offset, $max_size ) = @_; 120 | 121 | my ( $messages, $time_before, $time_after ); 122 | $time_before = gettimeofday(); 123 | try { 124 | $messages = $consumer->fetch( $topic, $partition, $offset, $max_size ); 125 | } catch { 126 | exit_on_error( $_ ); 127 | }; 128 | $time_after = gettimeofday(); 129 | 130 | my $cnt = 0; 131 | foreach my $m ( @$messages ) { 132 | unless ( $m->valid ) { 133 | say STDERR "Message No $cnt, Error: ", $m->error; 134 | say STDERR 'Payload : ', length( $m->payload ) > 100 ? substr( $m->payload, 0, 100 ).'...' : $m->payload; 135 | say STDERR 'offset : ', $m->offset; 136 | say STDERR 'next_offset: ', $m->next_offset; 137 | } 138 | ++$cnt; 139 | } 140 | 141 | return $messages, $time_after - $time_before; 142 | } 143 | 144 | 145 | 146 | try { 147 | $connect = Kafka::Connection->new( host => $host, port => $port ); 148 | $consumer = Kafka::Consumer->new( Connection => $connect ); 149 | } catch { 150 | exit_on_error( $_ ); 151 | }; 152 | 153 | $desired_size = ( $MESSAGE_SIZE_OVERHEAD + $msg_len ) * $number_of_messages; 154 | 155 | 156 | 157 | $fetch = []; 158 | $messages_recv = 0; 159 | $dispatch_time = 0; 160 | my $cnt = 0; 161 | 162 | INFINITE: # an infinite loop 163 | { 164 | $first_offset = ( $re_read || !@$fetch ) ? 0 : $fetch->[ @$fetch - 1 ]->next_offset; 165 | $fetch = []; 166 | 167 | CONSUMPTION: 168 | while (1) { 169 | # until all messages 170 | FETCH: 171 | { 172 | last INFINITE if $ctrl_c; 173 | 174 | # useful work 175 | my ( $fetched, $to_bench ) = fetch_messages( 176 | @$fetch ? $fetch->[ @$fetch - 1 ]->next_offset : $first_offset, 177 | $desired_size, 178 | ); 179 | last FETCH unless @$fetched; # all messages fetched 180 | 181 | $dispatch_time += $to_bench; 182 | $messages_recv += scalar @$fetched; 183 | push @$fetch, @$fetched; 184 | 185 | # decoration 186 | $mbs = ( $messages_recv * $msg_len ) / ( 1024 * 1024 ); 187 | print( STDERR 188 | sprintf( '[%s] Received %d messages (%.3f MB) %s messages/sec (%s MB/sec)', 189 | scalar localtime, 190 | $messages_recv, 191 | $mbs, 192 | $dispatch_time ? sprintf( '%d', int( $messages_recv / $dispatch_time ) ) : 'N/A', 193 | $dispatch_time ? sprintf( '%.3f', $mbs / $dispatch_time ) : 'N/A', 194 | ), 195 | ' ' x 10, 196 | ); 197 | if ( ( $cnt += scalar @$fetched ) < 200_000 ) { 198 | print STDERR "\r"; 199 | } else { 200 | print STDERR "\n"; 201 | $cnt = 0; 202 | } 203 | 204 | redo FETCH; # could still remain unread messages 205 | } 206 | last CONSUMPTION if $dispatch_time; # achieved significant time 207 | } 208 | redo INFINITE unless $no_infinite; 209 | } 210 | 211 | 212 | 213 | # Closes and cleans up 214 | 215 | undef $consumer; 216 | $connect->close; 217 | undef $connect; 218 | 219 | # Statistics 220 | 221 | $mbs = ( $messages_recv * $msg_len ) / ( 1024 * 1024 ); 222 | say( STDERR sprintf( '[%s] Total: Received %d messages (%.3f MB), %s messages/sec (%s MB/sec)', 223 | scalar localtime, 224 | $messages_recv, 225 | $mbs, 226 | $dispatch_time ? sprintf( '%d', int( $messages_recv / $dispatch_time ) ) : 'N/A', 227 | $dispatch_time ? sprintf( '%.3f', $mbs / $dispatch_time ) : 'N/A', 228 | ), 229 | ); 230 | -------------------------------------------------------------------------------- /tools/benchmark_producer.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | # NAME: Producer test 4 | 5 | 6 | 7 | use 5.010; 8 | use strict; 9 | use warnings; 10 | 11 | use lib qw( 12 | lib 13 | ../lib 14 | ); 15 | 16 | 17 | 18 | 19 | 20 | use Getopt::Long; 21 | use Scalar::Util qw( 22 | blessed 23 | ); 24 | use Time::HiRes qw( 25 | gettimeofday 26 | ); 27 | use Try::Tiny; 28 | 29 | use Kafka::Connection; 30 | use Kafka::Producer; 31 | 32 | 33 | 34 | my $host = 'localhost', 35 | my $port = undef; 36 | my $topic = 'mytopic'; 37 | my $partitions = 1; 38 | my $msg_len = 200; 39 | my $number_of_messages = 10_000; 40 | 41 | my ( $ret, $help ); 42 | 43 | $ret = GetOptions( 44 | 'host=s' => \$host, 45 | 'port=i' => \$port, 46 | 'topic=s' => \$topic, 47 | 'partitions=i' => \$partitions, 48 | 'messages=i' => \$number_of_messages, 49 | 'length=i' => \$msg_len, 50 | 'help|?' => \$help, 51 | ); 52 | 53 | if ( !$ret || $help || !$host || !$port || !$topic || !$partitions || !$number_of_messages || !$msg_len ) { 54 | print <isa( 'Kafka::Exception' ) ) { 89 | $message = $error; 90 | } else { 91 | $message = $_->message; 92 | } 93 | say STDERR $message; 94 | exit 1; 95 | } 96 | 97 | sub random_strings { 98 | my @chars = ( ' ', 'A'..'Z', 'a'..'z', 0..9, qw( ! @ $ % ^ & * ) ); 99 | 100 | print STDERR 'generation of messages can take a while'; 101 | my @strings; 102 | $strings[ $number_of_messages - 1 ] = undef; 103 | foreach my $i ( 0..( $number_of_messages - 1 ) ) { 104 | $strings[ $i ] = join( q{}, @chars[ map { rand @chars } ( 1..$msg_len ) ] ); 105 | } 106 | 107 | return \@strings; 108 | } 109 | 110 | sub send_message { 111 | my ( $partition, $message ) = @_; 112 | 113 | my ( $ret, $time_before, $time_after ); 114 | $time_before = gettimeofday(); 115 | try { 116 | $ret = $producer->send( $topic, $partition, $message ); 117 | } catch { 118 | exit_on_error( $_ ); 119 | }; 120 | $time_after = gettimeofday(); 121 | 122 | return $time_after - $time_before; 123 | } 124 | 125 | 126 | 127 | try { 128 | $connect = Kafka::Connection->new( host => $host, port => $port ); 129 | $producer = Kafka::Producer->new( Connection => $connect ); 130 | } catch { 131 | exit_on_error( $_ ); 132 | }; 133 | 134 | 135 | 136 | $messages = random_strings(); 137 | $messages_sent = 0; 138 | $dispatch_time = 0; 139 | 140 | while (1) { 141 | print STDERR "\rmessage sending one by one, please wait...\r"; 142 | foreach my $idx ( 0..( $number_of_messages - 1 ) ) { 143 | $dispatch_time += send_message( int( rand( $partitions ) ), $messages->[ $idx ] ); 144 | 145 | # decoration 146 | unless ( ( my $num = $idx + 1 ) % 1000 ) { 147 | $mbs = ( $num * $msg_len ) / ( 1024 * 1024 ); 148 | print( STDERR 149 | sprintf( '[%s] Sent %d messages (%.3f MB) %s messages/sec (%s MB/sec)', 150 | scalar localtime, 151 | $num, 152 | $mbs, 153 | $dispatch_time ? sprintf( '%d', int( $num / $dispatch_time ) ) : 'N/A', 154 | $dispatch_time ? sprintf( '%.3f', $mbs / $dispatch_time ) : 'N/A' 155 | ), 156 | ' ' x 10, 157 | "\r", 158 | ); 159 | } 160 | } 161 | $messages_sent += $number_of_messages; 162 | 163 | last if $dispatch_time; # achieved significant time 164 | } 165 | 166 | 167 | 168 | # Closes and cleans up 169 | 170 | undef $producer; 171 | $connect->close; 172 | undef $connect; 173 | 174 | # Statistics 175 | 176 | $mbs = ( $messages_sent * $msg_len ) / ( 1024 * 1024 ); 177 | say( STDERR sprintf( '[%s] Total: Sent %d messages (%.3f MB), %d messages/sec (%.3f MB/sec)', 178 | scalar localtime, 179 | $messages_sent, 180 | $mbs, 181 | int( $messages_sent / $dispatch_time ), 182 | $mbs / $dispatch_time, 183 | ), 184 | ); 185 | -------------------------------------------------------------------------------- /tools/coverage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | cd .. 4 | cover -delete 5 | 6 | #find t -name \*.t -print -exec perl -MDevel::Cover {} \; 7 | 8 | perl -MDevel::Cover t/00_api.t 9 | perl -MDevel::Cover t/01_bits64.t 10 | perl -MDevel::Cover t/02_io.t 11 | perl -MDevel::Cover t/03_io_ipv6.t 12 | perl -MDevel::Cover t/04_mockio.t 13 | perl -MDevel::Cover t/05_protocol.t 14 | perl -MDevel::Cover t/06_decode_encode.t 15 | perl -MDevel::Cover t/07_cluster.t 16 | perl -MDevel::Cover t/08_cluster_start.t 17 | perl -MDevel::Cover t/09_connection.t 18 | perl -MDevel::Cover t/10_message.t 19 | perl -MDevel::Cover t/11_producer.t 20 | perl -MDevel::Cover t/12_consumer.t 21 | perl -MDevel::Cover t/13_leader_not_found.t 22 | perl -MDevel::Cover t/20_kafka_usage.t 23 | perl -MDevel::Cover t/21_kafka_bench.t 24 | perl -MDevel::Cover t/30_cluster_stop.t 25 | perl -MDevel::Cover t/40_autocreate_topics.t 26 | perl -MDevel::Cover t/41_fork.t 27 | perl -MDevel::Cover t/42_nonfatal_errors.t 28 | perl -MDevel::Cover t/43_competition.t 29 | perl -MDevel::Cover t/44_bad_sending.t 30 | perl -MDevel::Cover t/45_compression.t 31 | perl -MDevel::Cover t/46_destroy_connection.t 32 | perl -MDevel::Cover t/47_kafka_usage_ipv6.t 33 | perl -MDevel::Cover t/50_debug_level.t 34 | perl -MDevel::Cover t/90_mock_io.t 35 | perl -MDevel::Cover t/91_mock_usage.t 36 | perl -MDevel::Cover t/92_mock_bench.t 37 | perl -MDevel::Cover t/99_data_cleanup.t 38 | 39 | cover 40 | -------------------------------------------------------------------------------- /tools/profiling.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | # WARNING: Ensure kafka cluster is started before executing this program (see t/??_cluster_start.t for examples) 4 | 5 | 6 | 7 | use 5.010; 8 | use strict; 9 | use warnings; 10 | 11 | use lib qw( 12 | lib 13 | t/lib 14 | ../lib 15 | ../t/lib 16 | ); 17 | 18 | 19 | 20 | defined( $ENV{KAFKA_BASE_DIR} ) or exit_on_error( 'Unknown base directory of Kafka server' ); 21 | 22 | 23 | 24 | use Const::Fast; 25 | #use File::HomeDir; 26 | use Cwd; 27 | use File::Spec::Functions qw( 28 | catdir 29 | ); 30 | use Getopt::Long; 31 | use Scalar::Util qw( 32 | blessed 33 | ); 34 | use Try::Tiny; 35 | 36 | use Kafka qw ( 37 | $MESSAGE_SIZE_OVERHEAD 38 | $RECEIVE_EARLIEST_OFFSET 39 | $RECEIVE_LATEST_OFFSETS 40 | ); 41 | use Kafka::Cluster; 42 | use Kafka::Connection; 43 | use Kafka::Consumer; 44 | use Kafka::MockIO; 45 | use Kafka::Producer; 46 | use Kafka::TestInternals; 47 | 48 | 49 | 50 | 51 | 52 | const my @T_DIRS => ( 't', catdir( '..', 't' ) ); 53 | const my @TEST_OFFSETS => ( $RECEIVE_LATEST_OFFSETS, $RECEIVE_EARLIEST_OFFSET ); 54 | 55 | my ( $ret, $help, $base_dir, $topic, $partition, $msg_len, $number_of_messages ); 56 | 57 | $base_dir = $ENV{KAFKA_BASE_DIR}; # WARNING: must match the settings of your system 58 | $topic = $Kafka::TestInternals::topic; 59 | $partition = $Kafka::MockIO::PARTITION; 60 | $msg_len = 200; 61 | $number_of_messages = 20_000; 62 | 63 | $ret = GetOptions( 64 | 'kafka=s' => \$base_dir, 65 | 'topic=s' => \$topic, 66 | 'partition=i' => \$partition, 67 | 'length=i' => \$msg_len, 68 | 'messages=i' => \$number_of_messages, 69 | 'help|?' => \$help, 70 | ); 71 | 72 | if ( !$ret || $help ) 73 | { 74 | print <isa( 'Kafka::Exception' ) ) { 104 | $message = $error; 105 | } else { 106 | $message = $_->message; 107 | } 108 | say STDERR $message; 109 | exit 1; 110 | } 111 | 112 | sub random_strings { 113 | my ( $msg_len, $number_of ) = @_; 114 | 115 | my @chars = ( " ", "A" .. "Z", "a" .. "z", 0 .. 9, qw(! @ $ % ^ & *) ); 116 | 117 | my @strings; 118 | $strings[ $number_of - 1 ] = undef; 119 | foreach my $i ( 0 .. ( $number_of - 1 ) ) 120 | { 121 | $strings[ $i ] = join( q{}, @chars[ map { rand @chars } ( 1..$msg_len ) ] ); 122 | } 123 | return \@strings, $number_of * $msg_len; 124 | } 125 | 126 | 127 | 128 | foreach my $t_dir ( @T_DIRS ) { 129 | if ( -d $t_dir ) { 130 | my $cwd = getcwd(); 131 | chdir $t_dir; 132 | #-- the Kafka server port (for example for node_id = 0) 133 | try { 134 | ( $port ) = Kafka::Cluster->new( 135 | kafka_dir => $base_dir, 136 | reuse_existing => 1, 137 | t_dir => $t_dir, 138 | )->servers; 139 | } catch { 140 | exit_on_error( "Running Kafka server not found: $_" ); 141 | }; 142 | chdir $cwd; 143 | last; 144 | } 145 | } 146 | 147 | try { 148 | $connect = Kafka::Connection->new( 149 | host => 'localhost', 150 | port => $port, 151 | ); 152 | $producer = Kafka::Producer->new( Connection => $connect ); 153 | $consumer = Kafka::Consumer->new( Connection => $connect ); 154 | } catch { 155 | exit_on_error( $_ ); 156 | }; 157 | 158 | 159 | 160 | try { 161 | $offsets = $consumer->offsets( $topic, $partition, $RECEIVE_LATEST_OFFSETS ); 162 | } catch { 163 | exit_on_error( $_ ); 164 | }; 165 | my $start_offset = $offsets->[0]; 166 | 167 | say STDERR 'generation of messages can take a while ...'; 168 | ( $strings ) = random_strings( $msg_len, $number_of_messages ); 169 | 170 | say STDERR 'send messages (one by one):'; 171 | foreach my $num ( 1..$number_of_messages ) 172 | { 173 | try { 174 | $producer->send( $topic, $partition, $strings->[ $num - 1 ] ); 175 | } catch { 176 | exit_on_error( $_ ); 177 | }; 178 | print STDERR '.' unless $num % 1000; 179 | } 180 | print STDERR "\n"; 181 | 182 | say STDERR 'consume offsets (for a set of statistics):'; 183 | foreach my $num ( 1..$number_of_messages ) 184 | { 185 | try { 186 | $offsets = $consumer->offsets( $topic, $partition, $TEST_OFFSETS[ int( rand 2 ) ] ); 187 | } catch { 188 | exit_on_error( $_ ); 189 | }; 190 | scalar( @$offsets ) or exit_on_error( 'no offsets' ); 191 | print STDERR '.' unless $num % 1000; 192 | } 193 | print STDERR "\n"; 194 | 195 | say STDERR 'fetch messages (one by one):'; 196 | foreach my $num ( 1..$number_of_messages ) 197 | { 198 | try { 199 | $messages = $consumer->fetch( $topic, $partition, $start_offset + $num - 1, $msg_len + $MESSAGE_SIZE_OVERHEAD ); 200 | } catch { 201 | exit_on_error( $_ ); 202 | }; 203 | $messages->[0]->payload eq $strings->[ $num - 1 ] or exit_on_error( 'the received message does not match the original' ); 204 | print STDERR '.' unless $num % 1000; 205 | } 206 | print STDERR "\n"; 207 | 208 | 209 | 210 | $connect->close; 211 | $connect = $consumer = $producer = undef; 212 | -------------------------------------------------------------------------------- /tools/profiling.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | cd ../t 4 | perl 07_cluster_start.t 5 | cd ../tools 6 | perl -d:NYTProf profiling.pl "$@" 7 | cd ../t 8 | perl 30_cluster_stop.t 9 | cd ../tools 10 | nytprofhtml --open 11 | -------------------------------------------------------------------------------- /verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # install dependencies locally (requires local::lib and cpanm to be installed already) 4 | eval "$(perl -Mlocal::lib)" 5 | cpanm --installdeps . 6 | cpanm --notest Test::Distribution 7 | cpanm --notest Test::Kwalitee 8 | cpanm --notest Test::Kwalitee::Extra 9 | 10 | export KAFKA_BASE_DIR="$( dirname $0 )/kafka" 11 | 12 | export RELEASE_TESTS=1 13 | perl Build.PL && \ 14 | ./Build build && \ 15 | ./Build test 16 | 17 | -------------------------------------------------------------------------------- /xt/author/02_fixme.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | 4 | 5 | use 5.010; 6 | use strict; 7 | use warnings; 8 | 9 | 10 | 11 | use Test::More; 12 | 13 | 14 | 15 | 16 | 17 | use Cwd qw( 18 | abs_path 19 | ); 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | #-- Test::Fixme - check code for FIXMEs. 30 | eval 'use Test::Fixme'; ## no critic 31 | plan skip_all => 'because Test::Fixme required for testing' if $@; 32 | 33 | run_tests( 34 | where => abs_path( 'lib' ), 35 | match => qr/^#[TODO|FIXME|BUG]/, 36 | ); 37 | 38 | 39 | 40 | done_testing(); 41 | -------------------------------------------------------------------------------- /xt/author/03_vars.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | 4 | 5 | use 5.010; 6 | use strict; 7 | use warnings; 8 | 9 | use lib qw( 10 | lib 11 | ); 12 | 13 | 14 | 15 | use Test::More; 16 | 17 | 18 | 19 | 20 | 21 | use Cwd qw( 22 | abs_path 23 | ); 24 | use File::Find; 25 | use File::Spec::Functions qw( 26 | catdir 27 | ); 28 | 29 | 30 | 31 | 32 | 33 | my ( $modules_dir, $t_modules_dir, @modules, @t_modules ); 34 | 35 | 36 | 37 | $modules_dir = abs_path( 'lib' ); 38 | $t_modules_dir = abs_path( catdir( 't', 'lib' ) ); 39 | 40 | foreach my $config ( 41 | { dir => $modules_dir, re => qr(\.pm$), array => \@modules, }, 42 | { dir => $t_modules_dir, re => qr(\.pm$), array => \@t_modules, }, 43 | ) { 44 | find( 45 | { 46 | wanted => sub { 47 | my $file = $File::Find::name; 48 | push( @{ $config->{array} }, abs_path( $file ) ) if -f $file && $file =~ $config->{re}; 49 | }, 50 | preprocess => sub { sort @_; }, 51 | }, 52 | $config->{dir}, 53 | ); 54 | } 55 | 56 | 57 | 58 | eval 'use Test::Vars;'; ## no critic 59 | plan( skip_all => 'Test::Vars not installed: $@; skipping' ) if $@; 60 | 61 | vars_ok( $_ ) foreach @modules, @t_modules; 62 | 63 | 64 | 65 | done_testing(); 66 | -------------------------------------------------------------------------------- /xt/author/07_notabs.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | 4 | 5 | use 5.010; 6 | use strict; 7 | use warnings; 8 | 9 | 10 | 11 | use Test::More; 12 | 13 | 14 | 15 | BEGIN { 16 | eval 'use Test::NoWarnings'; ## no critic 17 | plan skip_all => 'because Test::NoWarnings required for testing' if $@; 18 | } 19 | 20 | 21 | 22 | use Cwd qw( 23 | abs_path 24 | ); 25 | use File::Spec::Functions qw( 26 | catdir 27 | ); 28 | 29 | 30 | 31 | 32 | 33 | my ( $modules_dir, $t_dir, $author_dir, $release_dir, $t_modules_dir, $tools_dir ); 34 | 35 | 36 | 37 | $modules_dir = abs_path( 'lib' ); 38 | $t_dir = abs_path( 't' ); 39 | $author_dir = abs_path( catdir( 'xt', 'author' ) ); 40 | $release_dir = abs_path( catdir( 'xt', 'release' ) ); 41 | $t_modules_dir = catdir( $t_dir, 'lib' ); 42 | $tools_dir = abs_path( 'tools' ); 43 | 44 | 45 | 46 | eval { use Test::NoTabs; }; 47 | plan skip_all => 'because Test::NoTabs required for testing' if $@; 48 | 49 | all_perl_files_ok( 50 | grep {defined $_} 51 | $modules_dir, 52 | $t_dir, 53 | $author_dir, 54 | $release_dir, 55 | $t_modules_dir, 56 | $tools_dir, 57 | ); 58 | 59 | 60 | -------------------------------------------------------------------------------- /xt/author/08_PureASCII.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | 4 | 5 | use 5.010; 6 | use strict; 7 | use warnings; 8 | 9 | use lib qw( 10 | lib 11 | ); 12 | 13 | 14 | 15 | use Test::More; 16 | 17 | 18 | 19 | 20 | 21 | use Cwd qw( 22 | abs_path 23 | ); 24 | use File::Spec::Functions qw( 25 | catdir 26 | ); 27 | 28 | 29 | 30 | 31 | 32 | my ( $modules_dir, $t_dir, $author_dir, $release_dir, $t_modules_dir, $tools_dir ); 33 | 34 | 35 | 36 | $modules_dir = abs_path( 'lib' ); 37 | $t_dir = abs_path( 't' ); 38 | $author_dir = abs_path( catdir( 'xt', 'author' ) ); 39 | $release_dir = abs_path( catdir( 'xt', 'release' ) ); 40 | $t_modules_dir = catdir( $t_dir, 'lib' ); 41 | $tools_dir = abs_path( 'tools' ); 42 | 43 | 44 | 45 | eval 'use Test::PureASCII;'; ## no critic 46 | plan skip_all => 'because Test::PureASCII required for testing' if $@; 47 | 48 | all_perl_files_are_pure_ascii( 49 | {}, 50 | grep {defined $_} 51 | $modules_dir, 52 | $t_dir, 53 | $author_dir, 54 | $release_dir, 55 | $t_modules_dir, 56 | $tools_dir, 57 | ); 58 | 59 | 60 | 61 | done_testing(); 62 | -------------------------------------------------------------------------------- /xt/author/perlcritic.rc: -------------------------------------------------------------------------------- 1 | # PerlCritic Profile 2 | 3 | [CodeLayout::ProhibitQuotedWordLists] 4 | severity = 5 5 | min_elements = 5 6 | strict = 1 7 | 8 | [CodeLayout::ProhibitHardTabs] 9 | severity = 5 10 | allow_leading_tabs = 0 11 | 12 | [CodeLayout::ProhibitTrailingWhitespace] 13 | severity = 5 14 | 15 | [ValuesAndExpressions::ProhibitMixedBooleanOperators] 16 | severity = 5 17 | 18 | [ValuesAndExpressions::ProhibitMismatchedOperators] 19 | severity = 5 20 | 21 | [Documentation::RequirePackageMatchesPodName] 22 | severity = 5 23 | 24 | -------------------------------------------------------------------------------- /xt/release/01_kwalitee.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | 4 | 5 | use 5.010; 6 | use strict; 7 | use warnings; 8 | 9 | 10 | 11 | use Test::More; 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | eval 'use Test::Kwalitee;'; ## no critic 26 | plan skip_all => 'because Test::Kwalitee required for testing' if $@; 27 | 28 | 29 | 30 | #done_testing(); 31 | -------------------------------------------------------------------------------- /xt/release/03_distribution.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | 4 | 5 | use 5.010; 6 | use strict; 7 | use warnings; 8 | 9 | 10 | 11 | use Test::More; 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | # 'prereq' falsely triggered by: 26 | # Const::Fast 27 | # String::CRC32 28 | # 'prereq' verification test is performed correctly by ??_kwalitee.t 29 | eval "use Test::Distribution not => 'prereq';"; ## no critic 30 | plan skip_all => 'because Test::Distribution required for testing' if $@; 31 | 32 | 33 | -------------------------------------------------------------------------------- /xt/release/04_pod.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | 4 | 5 | use 5.010; 6 | use strict; 7 | use warnings; 8 | 9 | 10 | 11 | use Test::More; 12 | 13 | 14 | 15 | 16 | 17 | use Cwd qw( 18 | abs_path 19 | ); 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | my $modules_dir = abs_path( 'lib' ); 28 | 29 | 30 | 31 | eval 'use Test::Pod;'; ## no critic 32 | plan skip_all => 'because Test::Pod required for testing' if $@; 33 | 34 | all_pod_files_ok( $modules_dir ); 35 | 36 | 37 | -------------------------------------------------------------------------------- /xt/release/05_pod_coverage.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | 3 | 4 | 5 | use 5.010; 6 | use strict; 7 | use warnings; 8 | 9 | use lib qw( 10 | lib 11 | ); 12 | 13 | 14 | 15 | use Test::More; 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | eval 'use Test::Pod::Coverage;'; ## no critic 30 | plan skip_all => 'because Test::Pod::Coverage required for testing' if $@; 31 | 32 | all_pod_coverage_ok(); 33 | 34 | 35 | --------------------------------------------------------------------------------