├── .gitignore ├── rdkafka-sys ├── readme_template ├── bindings.h ├── tests │ └── version_check.rs ├── CONTRIBUTING.md ├── update-bindings.sh ├── changelog.md ├── simple_producer.rs ├── Cargo.toml ├── README.md ├── src │ ├── lib.rs │ └── helpers.rs └── build.rs ├── .gitmodules ├── .gitattributes ├── .github ├── dependabot.yml └── workflows │ └── ci.yml ├── rdkafka.suppressions ├── src ├── log.rs ├── groups.rs ├── metadata.rs ├── lib.rs ├── config.rs └── error.rs ├── Dockerfile ├── tests ├── test_topic_partition_list.rs ├── test_metadata.rs ├── test_high_producers.rs ├── test_transactions.rs └── utils.rs ├── docker-compose.yaml ├── readme_template ├── LICENSE ├── examples ├── example_utils.rs ├── mocking.rs ├── simple_producer.rs ├── runtime_async_std.rs ├── roundtrip.rs ├── runtime_smol.rs ├── metadata.rs ├── simple_consumer.rs ├── at_least_once.rs └── asynchronous_processing.rs ├── CONTRIBUTING.md ├── generate_readme.py ├── test_suite.sh ├── coverage.sh ├── Cargo.toml └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | src/*.bk 3 | .idea 4 | -------------------------------------------------------------------------------- /rdkafka-sys/readme_template: -------------------------------------------------------------------------------- 1 | # rdkafka-sys 2 | 3 | __INCLUDE_RUST_DOC__$src/lib.rs 4 | -------------------------------------------------------------------------------- /rdkafka-sys/bindings.h: -------------------------------------------------------------------------------- 1 | #include "librdkafka/src/rdkafka.h" 2 | #include "librdkafka/src/rdkafka_mock.h" 3 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "rdkafka-sys/librdkafka"] 2 | path = rdkafka-sys/librdkafka 3 | url = https://github.com/edenhill/librdkafka 4 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | rdkafka-sys/src/bindings/linux.rs linguist-generated=true 2 | rdkafka-sys/src/bindings/macos.rs linguist-generated=true 3 | rdkafka-sys/src/bindings/linux_64.rs linguist-generated=true 4 | rdkafka-sys/src/bindings/macos_64.rs linguist-generated=true 5 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: / 5 | schedule: { interval: weekly } 6 | open-pull-requests-limit: 10 7 | ignore: 8 | - dependency-name: "*" 9 | update-types: [version-update:semver-minor, version-update:semver-patch] 10 | -------------------------------------------------------------------------------- /rdkafka.suppressions: -------------------------------------------------------------------------------- 1 | # Valgrind suppression file. 2 | 3 | # Spurious statx complaints: https://github.com/rust-lang/rust/issues/68979. 4 | # TODO(benesch): remove when CI upgrades to Valgrind 3.16. 5 | { 6 | 7 | Memcheck:Param 8 | statx(file_name) 9 | fun:statx 10 | fun:statx 11 | fun:_ZN3std3sys4unix2fs9try_statx* 12 | ... 13 | } 14 | { 15 | 16 | Memcheck:Param 17 | statx(buf) 18 | fun:statx 19 | fun:statx 20 | fun:_ZN3std3sys4unix2fs9try_statx* 21 | ... 22 | } 23 | -------------------------------------------------------------------------------- /rdkafka-sys/tests/version_check.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::CStr; 2 | 3 | #[test] 4 | fn check_version() { 5 | let cargo_version = match env!("CARGO_PKG_VERSION") 6 | .split('+') 7 | .collect::>() 8 | .as_slice() 9 | { 10 | [_rdsys_version, librdkafka_version] => *librdkafka_version, 11 | _ => panic!("Version format is not valid"), 12 | }; 13 | 14 | let librdkafka_version = 15 | unsafe { CStr::from_ptr(rdkafka_sys::rd_kafka_version_str()).to_string_lossy() }; 16 | println!("librdkafka version: {}", librdkafka_version); 17 | 18 | assert_eq!(cargo_version, &librdkafka_version); 19 | } 20 | -------------------------------------------------------------------------------- /rdkafka-sys/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Maintainer and contributor instructions 2 | 3 | ## Upgrading librdkafka 4 | 5 | To update to a new version of librdkafka: 6 | 7 | ``` bash 8 | git submodule update --init 9 | cd rdkafka-sys/librdkafka 10 | git checkout $DESIRED_VERSION 11 | cargo install bindgen-cli 12 | ./update-bindings.sh 13 | ``` 14 | 15 | Then: 16 | 17 | * Add a changelog entry to rdkafka-sys/changelog.md. 18 | * Update src/lib.rs with the new version. 19 | 20 | ## Releasing 21 | 22 | * Checkout into master and pull the latest changes. 23 | * Ensure the changelog is up to date (i.e not Unreleased changes). 24 | * Run `cd rdkafka-sys && ../generate_readme.py > README.md`. 25 | * Bump the version in Cargo.toml and commit locally. 26 | * Run `cargo publish`. 27 | * Push the commit. 28 | -------------------------------------------------------------------------------- /src/log.rs: -------------------------------------------------------------------------------- 1 | //! A wrapper module to export logging functionality from 2 | //! [`log`] or [`tracing`] depending on the `tracing` feature. 3 | //! 4 | //! [`log`]: https://docs.rs/log 5 | //! [`tracing`]: https://docs.rs/tracing 6 | 7 | #[cfg(not(feature = "tracing"))] 8 | pub use log::Level::{Debug as DEBUG, Info as INFO, Warn as WARN}; 9 | #[cfg(not(feature = "tracing"))] 10 | pub use log::{debug, error, info, log_enabled, trace, warn}; 11 | 12 | #[cfg(feature = "tracing")] 13 | pub use tracing::{debug, enabled as log_enabled, error, info, trace, warn}; 14 | #[cfg(feature = "tracing")] 15 | pub const DEBUG: tracing::Level = tracing::Level::DEBUG; 16 | #[cfg(feature = "tracing")] 17 | pub const INFO: tracing::Level = tracing::Level::INFO; 18 | #[cfg(feature = "tracing")] 19 | pub const WARN: tracing::Level = tracing::Level::WARN; 20 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:24.10 2 | 3 | RUN apt-get update && apt-get install -y build-essential \ 4 | curl \ 5 | openssl libssl-dev \ 6 | pkg-config \ 7 | python \ 8 | valgrind \ 9 | zlib1g-dev 10 | 11 | RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.74 12 | ENV PATH=/root/.cargo/bin/:$PATH 13 | 14 | # # Create dummy project for rdkafka 15 | # COPY Cargo.toml /rdkafka/ 16 | # RUN mkdir -p /rdkafka/src && echo "fn main() {}" > /rdkafka/src/main.rs 17 | # 18 | # # Create dummy project for rdkafka 19 | # RUN mkdir /rdkafka/rdkafka-sys 20 | # COPY rdkafka-sys/Cargo.toml /rdkafka/rdkafka-sys 21 | # RUN mkdir -p /rdkafka/rdkafka-sys/src && touch /rdkafka/rdkafka-sys/src/lib.rs 22 | # RUN echo "fn main() {}" > /rdkafka/rdkafka-sys/build.rs 23 | # 24 | # RUN cd /rdkafka && test --no-run 25 | 26 | COPY docker/run_tests.sh /rdkafka/ 27 | 28 | ENV KAFKA_HOST=kafka:9092 29 | 30 | WORKDIR /rdkafka 31 | -------------------------------------------------------------------------------- /tests/test_topic_partition_list.rs: -------------------------------------------------------------------------------- 1 | use rdkafka::{Offset, TopicPartitionList}; 2 | 3 | /// Test topic partition list API and wrappers. 4 | 5 | #[test] 6 | fn test_fmt_debug() { 7 | { 8 | let tpl = TopicPartitionList::new(); 9 | assert_eq!(format!("{tpl:?}"), "[]"); 10 | } 11 | 12 | { 13 | let mut tpl = TopicPartitionList::new(); 14 | tpl.add_topic_unassigned("foo"); 15 | tpl.add_partition("bar", 8); 16 | tpl.add_partition_offset("bar", 7, Offset::Offset(42)) 17 | .unwrap(); 18 | assert_eq!( 19 | format!("{tpl:?}"), 20 | "[TopicPartitionListElem { topic: \"foo\", partition: -1, offset: Invalid, metadata: \"\", error: Ok(()) }, \ 21 | TopicPartitionListElem { topic: \"bar\", partition: 8, offset: Invalid, metadata: \"\", error: Ok(()) }, \ 22 | TopicPartitionListElem { topic: \"bar\", partition: 7, offset: Offset(42), metadata: \"\", error: Ok(()) }]"); 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | kafka: 3 | image: bitnamilegacy/kafka:${KAFKA_VERSION:-4.0} 4 | environment: 5 | # Enable KRaft mode (combined broker and controller) 6 | - KAFKA_CFG_NODE_ID=0 7 | - KAFKA_CFG_BROKER_ID=0 # In KRaft, this should be the same as the node ID 8 | - KAFKA_CFG_PROCESS_ROLES=broker,controller 9 | - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER 10 | - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093 11 | - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 12 | - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT 13 | - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@localhost:9093 14 | 15 | # Bitnami defaults 16 | - KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR=1 17 | - KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR=1 18 | - KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR=1 19 | - KAFKA_CFG_NUM_PARTITIONS=3 20 | 21 | # This is a Bitnami-specific variable to disable ZooKeeper 22 | - KAFKA_KRAFT_ENABLED=true 23 | ports: ["9092:9092"] 24 | -------------------------------------------------------------------------------- /readme_template: -------------------------------------------------------------------------------- 1 | # rust-rdkafka 2 | 3 | [![crates.io](https://img.shields.io/crates/v/rdkafka.svg)](https://crates.io/crates/rdkafka) 4 | [![docs.rs](https://docs.rs/rdkafka/badge.svg)](https://docs.rs/rdkafka/) 5 | [![Build Status](https://travis-ci.org/fede1024/rust-rdkafka.svg?branch=master)](https://travis-ci.org/fede1024/rust-rdkafka) 6 | [![coverate](https://codecov.io/gh/fede1024/rust-rdkafka/graphs/badge.svg?branch=master)](https://codecov.io/gh/fede1024/rust-rdkafka/) 7 | [![Join the chat at https://gitter.im/rust-rdkafka/Lobby](https://badges.gitter.im/rust-rdkafka/Lobby.svg)](https://gitter.im/rust-rdkafka/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 8 | 9 | __INCLUDE_RUST_DOC__$src/lib.rs 10 | 11 | ## rdkafka-sys 12 | 13 | See [rdkafka-sys](https://github.com/fede1024/rust-rdkafka/tree/master/rdkafka-sys). 14 | 15 | ## Contributors 16 | 17 | Thanks to: 18 | * Thijs Cadier - [thijsc](https://github.com/thijsc) 19 | 20 | ## Alternatives 21 | 22 | * [kafka-rust]: a pure Rust implementation of the Kafka client. 23 | 24 | [kafka-rust]: https://github.com/spicavigo/kafka-rust 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016 Federico Giraud 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples/example_utils.rs: -------------------------------------------------------------------------------- 1 | use std::io::Write; 2 | use std::thread; 3 | 4 | use chrono::prelude::*; 5 | use env_logger::fmt::Formatter; 6 | use env_logger::Builder; 7 | use log::{LevelFilter, Record}; 8 | 9 | pub fn setup_logger(log_thread: bool, rust_log: Option<&String>) { 10 | let output_format = move |formatter: &mut Formatter, record: &Record| { 11 | let thread_name = if log_thread { 12 | format!("(t: {}) ", thread::current().name().unwrap_or("unknown")) 13 | } else { 14 | "".to_string() 15 | }; 16 | 17 | let local_time: DateTime = Local::now(); 18 | let time_str = local_time.format("%H:%M:%S%.3f").to_string(); 19 | writeln!( 20 | formatter, 21 | "{} {}{} - {} - {}", 22 | time_str, 23 | thread_name, 24 | record.level(), 25 | record.target(), 26 | record.args() 27 | ) 28 | }; 29 | 30 | let mut builder = Builder::new(); 31 | builder 32 | .format(output_format) 33 | .filter(None, LevelFilter::Info); 34 | 35 | rust_log.map(|conf| builder.parse_filters(conf)); 36 | 37 | builder.init(); 38 | } 39 | 40 | #[allow(dead_code)] 41 | fn main() { 42 | println!("This is not an example"); 43 | } 44 | -------------------------------------------------------------------------------- /rdkafka-sys/update-bindings.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # rd_kafka_conf_set_open_cb/rd_kafka_conf_set_resolve_cb are blocklisted 4 | # because it is not compiled on Windows due to its usage of the Unix-only 5 | # `mode_t` type. With a bit of elbow grease we could include it if not 6 | # targeting Windows, but it doesn't seem worthwhile at the moment. 7 | 8 | bindgen \ 9 | --no-doc-comments \ 10 | --no-layout-tests \ 11 | --rustified-enum ".*" \ 12 | --allowlist-function "rd_kafka.*" \ 13 | --allowlist-type "rd_kafka.*" \ 14 | --allowlist-var "rd_kafka.*|RD_KAFKA_.*" \ 15 | --no-recursive-allowlist \ 16 | --blocklist-function "rd_kafka_conf_set_open_cb" \ 17 | --blocklist-function "rd_kafka_conf_set_resolve_cb" \ 18 | --raw-line "use libc::{FILE, sockaddr, c_int, c_void, c_char};" \ 19 | --raw-line "use num_enum::TryFromPrimitive;" \ 20 | --default-macro-constant-type "signed" \ 21 | --rust-edition 2021 \ 22 | --rust-target 1.74 \ 23 | "bindings.h" -o "src/bindings.rs" 24 | 25 | # Derive TryFromPrimitive for rd_kafka_resp_err_t. 26 | perl -i -p0e 's/#\[derive\((.*)\)\]\npub enum rd_kafka_resp_err_t/#\[derive($1, TryFromPrimitive)\]\npub enum rd_kafka_resp_err_t/s' src/bindings.rs 27 | 28 | # Clean up the bindings a bit. 29 | 30 | sed \ 31 | -e 's/::std::option::Option/Option/' \ 32 | -e 's/::std::os::raw::c_int/c_int/' \ 33 | -e 's/::std::os::raw::c_void/c_void/' \ 34 | -e 's/::std::os::raw::c_char/c_char/' \ 35 | src/bindings.rs > src/bindings.rs.new 36 | 37 | mv src/bindings.rs{.new,} 38 | 39 | rustfmt src/bindings.rs 40 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Maintainer and contributor instructions 2 | 3 | ## Compiling from source 4 | 5 | To compile from source, you'll have to initialize the submodule containing 6 | librdkafka: 7 | 8 | ```bash 9 | git submodule update --init 10 | ``` 11 | 12 | and then compile using `cargo`, selecting the features that you want. 13 | Example: 14 | 15 | ```bash 16 | cargo build --features "ssl gssapi" 17 | ``` 18 | 19 | ## Tests 20 | 21 | ### Unit tests 22 | 23 | The unit tests can run without a Kafka broker present: 24 | 25 | ```bash 26 | cargo test --lib 27 | ``` 28 | 29 | ### Automatic testing 30 | 31 | rust-rdkafka contains a suite of tests which is automatically executed by travis in 32 | docker-compose. Given the interaction with C code that rust-rdkafka has to do, tests 33 | are executed in valgrind to check eventual memory errors and leaks. 34 | 35 | To run the full suite using docker-compose: 36 | 37 | ```bash 38 | ./test_suite.sh 39 | ``` 40 | 41 | To run locally, instead: 42 | 43 | ```bash 44 | KAFKA_HOST="kafka_server:9092" cargo test 45 | ``` 46 | 47 | In this case there is a broker expected to be running on `KAFKA_HOST`. 48 | The broker must be configured with default partition number 3 and topic 49 | autocreation in order for the tests to succeed. 50 | 51 | ## Releasing 52 | 53 | * Checkout into master and pull the latest changes. 54 | * Ensure `rdkafka-sys` has no unreleased changes. If it does, release `rdkafka-sys` first. 55 | * Ensure the changelog is up to date (i.e not Unreleased changes). 56 | * Run `./generate_readme.py > README.md`. 57 | * Bump the version in Cargo.toml and commit locally. 58 | * Run `cargo publish`. 59 | * Run `git tag -am $VERSION $VERSION`. 60 | * Run `git push`. 61 | * Run `git push origin $VERSION`. 62 | -------------------------------------------------------------------------------- /generate_readme.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | from collections import namedtuple 4 | import sys 5 | 6 | 7 | INCLUDE_MARKER = "__INCLUDE_RUST_DOC__" 8 | 9 | Template = namedtuple("Template", ["header", "footer", "doc_path", "start"]) 10 | 11 | 12 | def read_rust_doc_lines(path): 13 | with open(path, "r") as rust_doc: 14 | for line in rust_doc: 15 | if line.startswith('//! '): 16 | yield line[4:] 17 | elif line.startswith('//!'): 18 | yield line[3:] 19 | else: 20 | break 21 | 22 | 23 | def parse_template_file(path): 24 | content = [line for line in open(path, "r")] 25 | try: 26 | marker_position = [n for (n, line) in enumerate(content) 27 | if line.startswith(INCLUDE_MARKER)][0] 28 | except IndexError: 29 | raise Exception("Missing include marker") 30 | include_info = content[marker_position].strip().split('$') 31 | doc_path = include_info[1] 32 | start = None 33 | if len(include_info) > 2: 34 | start = include_info[2] 35 | return Template( 36 | header=content[0:marker_position], footer=content[marker_position+1:], 37 | doc_path=include_info[1], start=start, 38 | ) 39 | 40 | 41 | template = parse_template_file("readme_template") 42 | doc = read_rust_doc_lines(template.doc_path) 43 | 44 | output = sys.stdout 45 | 46 | for line in template.header: 47 | output.write(line) 48 | 49 | if template.start: 50 | for line in doc: 51 | if line.startswith(template.start): 52 | output.write(line) 53 | break 54 | 55 | for line in doc: 56 | output.write(line) 57 | 58 | for line in template.footer: 59 | output.write(line) 60 | -------------------------------------------------------------------------------- /test_suite.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | GREEN='\033[0;32m' 4 | RED='\033[0;31m' 5 | NC='\033[0m' # No Color 6 | 7 | set -euo pipefail 8 | 9 | echo_good() { 10 | tput setaf 2 11 | echo "$@" 12 | tput sgr0 13 | } 14 | 15 | echo_bad() { 16 | tput setaf 1 17 | echo "$@" 18 | tput sgr0 19 | } 20 | 21 | run_with_valgrind() { 22 | if ! valgrind --error-exitcode=100 --suppressions=rdkafka.suppressions --gen-suppressions=all --leak-check=full "$1" --nocapture --test-threads=1 23 | then 24 | echo_bad "*** Failure in $1 ***" 25 | exit 1 26 | fi 27 | } 28 | 29 | # Initialize. 30 | 31 | git submodule update --init 32 | docker compose up --wait 33 | 34 | # Run integration tests 35 | export RUST_LOG=${RUST_LOG:-off} 36 | RUST_BACKTRACE=1 cargo test "$@" 37 | 38 | 39 | # Run unit tests. 40 | 41 | #echo_good "*** Run unit tests ***" 42 | #for test_file in target/debug/deps/rdkafka-* 43 | #do 44 | # if [[ -x "$test_file" ]] 45 | # then 46 | # echo_good "Executing "$test_file"" 47 | # run_with_valgrind "$test_file" 48 | # fi 49 | #done 50 | #echo_good "*** Unit tests succeeded ***" 51 | # 52 | ## Run integration tests. 53 | # 54 | #echo_good "*** Run integration tests ***" 55 | #for test_file in target/debug/deps/test_* 56 | #do 57 | # if [[ -x "$test_file" ]] 58 | # then 59 | # #echo_good "*** Restarting kafka/zk ***" 60 | # #docker-compose restart --timeout 30 61 | # echo_good "Executing "$test_file"" 62 | # run_with_valgrind "$test_file" 63 | # fi 64 | #done 65 | #echo_good "*** Integration tests succeeded ***" 66 | 67 | # Run smol runtime example. 68 | 69 | echo_good "*** Run runtime_smol example ***" 70 | cargo run --example runtime_smol --no-default-features --features cmake-build -- --topic smol 71 | echo_good "*** runtime_smol example succeeded ***" 72 | 73 | # Run async-std runtime example. 74 | 75 | echo_good "*** Run runtime_async_std example ***" 76 | cargo run --example runtime_async_std --no-default-features --features cmake-build -- --topic async-std 77 | echo_good "*** runtime_async_std example succeeded ***" 78 | -------------------------------------------------------------------------------- /coverage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | GREEN='\033[0;32m' 4 | RED='\033[0;31m' 5 | NC='\033[0m' # No Color 6 | 7 | INCLUDE="/src" 8 | EXCLUDE="/.cargo,rdkafka-sys/librdkafka,rdkafka-sys/src/bindings" 9 | TARGET="target/cov" 10 | 11 | KCOV_ARGS="--include-pattern=$INCLUDE --exclude-pattern=$EXCLUDE --verify $TARGET" 12 | 13 | RDKAFKA_UNIT_TESTS="target/debug/rdkafka-" 14 | RDKAFKASYS_UNIT_TESTS="rdkafka-sys/target/debug/rdkafka_sys-" 15 | INTEGRATION_TESTS="target/debug/test_" 16 | 17 | export RUSTFLAGS="-C link-dead-code" 18 | 19 | echo -e "${GREEN}*** Clean previous coverage results and executables ***${NC}" 20 | rm -rf "$TARGET" 21 | rm -f "$RDKAFKA_UNIT_TESTS"* 22 | rm -f "$RDKAFKASYS_UNIT_TESTS"* 23 | rm -f "$INTEGRATION_TESTS"* 24 | 25 | echo -e "${GREEN}*** Rebuilding tests ***${NC}" 26 | cargo test --no-run 27 | pushd rdkafka-sys && cargo test --no-run && popd 28 | 29 | echo -e "${GREEN}*** Run coverage on rdkafka unit tests ***${NC}" 30 | for test_file in `ls "$RDKAFKA_UNIT_TESTS"*` 31 | do 32 | if [[ ! -x "$test_file" ]]; then 33 | continue 34 | fi 35 | kcov $KCOV_ARGS "$test_file" 36 | if [ "$?" != "0" ]; then 37 | echo -e "${RED}*** Failure during unit test converage ***${NC}" 38 | exit 1 39 | fi 40 | done 41 | 42 | echo -e "${GREEN}*** Run coverage on rdkafka-sys unit tests ***${NC}" 43 | for test_file in `ls "$RDKAFKASYS_UNIT_TESTS"*` 44 | do 45 | if [[ ! -x "$test_file" ]]; then 46 | continue 47 | fi 48 | kcov $KCOV_ARGS "$test_file" 49 | if [ "$?" != "0" ]; then 50 | echo -e "${RED}*** Failure during rdkafka-sys unit test converage ***${NC}" 51 | exit 1 52 | fi 53 | done 54 | 55 | echo -e "${GREEN}*** Run coverage on rdkafka integration tests ***${NC}" 56 | for test_file in `ls "$INTEGRATION_TESTS"*` 57 | do 58 | if [[ ! -x "$test_file" ]]; then 59 | continue 60 | fi 61 | echo -e "${GREEN}Executing "$test_file"${NC}" 62 | kcov $KCOV_ARGS "$test_file" 63 | if [ "$?" != "0" ]; then 64 | echo -e "${RED}*** Failure during integration converage ***${NC}" 65 | exit 1 66 | fi 67 | done 68 | 69 | echo -e "${GREEN}*** Coverage completed successfully ***${NC}" 70 | -------------------------------------------------------------------------------- /rdkafka-sys/changelog.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## vNEXT+2.12.1 (2025-11-05) 4 | * Bump librdkafka to v2.12.1. 5 | 6 | ## v4.9.0+2.10.0 7 | * Add cargo enforcement of MSRV of 1.74. 8 | * Bump librdkafka to v2.10.0. 9 | 10 | ## v4.7.0+2.2.0 (2023-11-07) 11 | 12 | * Upgrade to librdkafka v2.3.0. 13 | * Add cargo enforcement of MSRV of 1.61. 14 | 15 | ## v4.6.0+2.2.0 (2023-08-25) 16 | 17 | * Upgrade to librdkafka v2.2.0. 18 | 19 | ## v4.5.0+1.9.2 (2023-06-09) 20 | 21 | * Add support for the cluster mock API. 22 | 23 | ## v4.4.0+1.9.2 (2023-05-12) 24 | 25 | * Add the `RDKafkaDeleteGroup` and `RDKafkaGroupResult` type aliases to the 26 | `types` module. 27 | 28 | ## v4.3.0+1.9.2 (2022-10-29) 29 | 30 | * Upgrade to librdkafka v1.9.2. 31 | 32 | * Support building outside of Cargo, where the `CARGO_MAKEFLAGS` environment 33 | variable is unlikely to be set. The build script would previously panic if 34 | `CARGO_MAKEFLAGS` was not set. 35 | 36 | ## v4.2.0+1.8.2 (2021-11-27) 37 | 38 | * Upgrade to librdkafka v1.8.2. 39 | 40 | ## v4.1.0+1.7.0 (2021-10-16) 41 | 42 | * Upgrade to librdkafka v1.7.0. 43 | 44 | ## v4.0.0+1.6.1 (2021-03-16) 45 | 46 | * **Breaking change.** Mark the `RDKafkaErrorCode` enum as [non-exhaustive], so 47 | that future additions to the enum will not be considered breaking changes. 48 | 49 | * Upgrade to librdkafka v1.6.1. 50 | 51 | ## v3.0.0+1.6.0 (2021-01-30) 52 | 53 | * **Breaking change.** Rename `RDKafkaError` to `RDKafkaErrorCode`. This makes 54 | space for the new `RDKafkaError` type, which mirrors the `rd_kafka_error_t` 55 | type added to librdkafka in v1.4.0. 56 | 57 | This change was made to reduce long-term confusion by ensuring the types in 58 | rust-rdkafka map to types in librdkafka as directly as possible. The 59 | maintainers apologize for the difficulty in upgrading through this change. 60 | 61 | * Upgrade to librdkafka v1.6.0. 62 | 63 | * Enforce a minimum zstd-sys version of 1.4.19. This bumps the vendored version 64 | of libzstd to at least v1.4.8, which avoids a bug in libzstd v1.4.5 that could 65 | cause decompression failures ([edenhill/librdkafka#2672]). 66 | 67 | ## v2.1.1+1.5.3 (2021-01-05) 68 | 69 | * Yanked due to an inadvertent breaking change. 70 | 71 | ## v2.1.0+1.5.0 (2020-08-02) 72 | 73 | * Upgrade to librdkafka v1.5.0. 74 | 75 | ## v2.0.0+1.4.2 (2020-07-08) 76 | 77 | * Start separate changelog for rdkafka-sys. 78 | 79 | * Upgrade to librdkafka v1.4.2. 80 | 81 | * Correct several references to `usize` in the generated bindings to `size_t`. 82 | 83 | [edenhill/librdkafka#2672]: https://github.com/edenhill/librdkafka/issues/2672 84 | [edenhill/librdkafka#3249]: https://github.com/edenhill/librdkafka/issues/3249 85 | [non-exhaustive]: https://doc.rust-lang.org/reference/attributes/type_system.html#the-non_exhaustive-attribute 86 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rdkafka" 3 | version = "0.38.0" 4 | authors = ["Federico Giraud "] 5 | repository = "https://github.com/fede1024/rust-rdkafka" 6 | readme = "README.md" 7 | license = "MIT" 8 | description = "Rust wrapper for librdkafka" 9 | keywords = ["kafka", "rdkafka"] 10 | categories = ["api-bindings"] 11 | edition = "2021" 12 | exclude = ["Cargo.lock"] 13 | rust-version = "1.74" 14 | 15 | [workspace] 16 | members = ["rdkafka-sys"] 17 | 18 | [dependencies] 19 | rdkafka-sys = { path = "rdkafka-sys", version = "4.9.0", default-features = false } 20 | futures-channel = "0.3.31" 21 | futures-executor = { version = "0.3.31", optional = true } 22 | futures-util = { version = "0.3.31", default-features = false } 23 | libc = "0.2.172" 24 | log = "0.4.27" 25 | serde = { version = "1.0.219", features = ["derive"] } 26 | serde_derive = "1.0.219" 27 | serde_json = "1.0.140" 28 | slab = "0.4" 29 | tokio = { version = "1.45.0", features = ["rt", "time"], optional = true } 30 | tracing = { version = "0.1.41", optional = true } 31 | 32 | [dev-dependencies] 33 | async-std = { version = "1.13.1", features = ["attributes"] } 34 | backon = { version = "1.5.0", default-features = false, features = ["std-blocking-sleep"] } 35 | chrono = "0.4.41" 36 | clap = "4.5.38" 37 | env_logger = "0.11.8" 38 | futures = "0.3.31" 39 | hdrhistogram = "7.0.0" 40 | maplit = "1.0.2" 41 | rand = "0.9.1" 42 | regex = "1.11.1" 43 | smol = "2.0.2" 44 | tokio = { version = "1.18", features = ["macros", "rt-multi-thread", "time"] } 45 | 46 | # These features are re-exports of the features that the rdkafka-sys crate 47 | # provides. See the rdkafka-sys documentation for details. 48 | [features] 49 | default = ["libz", "tokio"] 50 | naive-runtime = ["futures-executor"] 51 | cmake-build = ["rdkafka-sys/cmake-build"] 52 | cmake_build = ["rdkafka-sys/cmake_build"] 53 | dynamic-linking = ["rdkafka-sys/dynamic-linking"] 54 | dynamic_linking = ["rdkafka-sys/dynamic_linking"] 55 | ssl = ["rdkafka-sys/ssl"] 56 | ssl-vendored = ["rdkafka-sys/ssl-vendored"] 57 | gssapi = ["rdkafka-sys/gssapi"] 58 | gssapi-vendored = ["rdkafka-sys/gssapi-vendored"] 59 | sasl = ["rdkafka-sys/sasl"] 60 | libz = ["rdkafka-sys/libz"] 61 | libz-static = ["rdkafka-sys/libz-static"] 62 | curl = ["rdkafka-sys/curl"] 63 | curl-static = ["rdkafka-sys/curl-static"] 64 | zstd = ["rdkafka-sys/zstd"] 65 | zstd-pkg-config = ["rdkafka-sys/zstd-pkg-config"] 66 | external-lz4 = ["rdkafka-sys/external-lz4"] 67 | external_lz4 = ["rdkafka-sys/external_lz4"] 68 | static-linking = ["rdkafka-sys/static-linking"] 69 | 70 | [package.metadata.docs.rs] 71 | # docs.rs doesn't allow writing to ~/.cargo/registry (reasonably), so we have to 72 | # use the CMake build for a proper out-of-tree build. 73 | features = ["cmake-build", "naive-runtime", "tracing", "tokio"] 74 | rustdoc-args = ["--cfg", "docsrs"] 75 | -------------------------------------------------------------------------------- /examples/mocking.rs: -------------------------------------------------------------------------------- 1 | //! This example is similar to the roundtrip one but uses the mock API. 2 | 3 | use std::convert::TryInto; 4 | use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; 5 | 6 | use hdrhistogram::Histogram; 7 | 8 | use rdkafka::config::ClientConfig; 9 | use rdkafka::consumer::{Consumer, StreamConsumer}; 10 | use rdkafka::message::Message; 11 | use rdkafka::mocking::MockCluster; 12 | use rdkafka::producer::{FutureProducer, FutureRecord}; 13 | 14 | #[tokio::main] 15 | async fn main() { 16 | const TOPIC: &str = "test_topic"; 17 | let mock_cluster = MockCluster::new(3).unwrap(); 18 | mock_cluster 19 | .create_topic(TOPIC, 32, 3) 20 | .expect("Failed to create topic"); 21 | 22 | let producer: FutureProducer = ClientConfig::new() 23 | .set("bootstrap.servers", mock_cluster.bootstrap_servers()) 24 | .create() 25 | .expect("Producer creation error"); 26 | 27 | let consumer: StreamConsumer = ClientConfig::new() 28 | .set("bootstrap.servers", mock_cluster.bootstrap_servers()) 29 | .set("group.id", "rust-rdkafka-mock-example") 30 | .create() 31 | .expect("Consumer creation failed"); 32 | consumer.subscribe(&[TOPIC]).unwrap(); 33 | 34 | tokio::spawn(async move { 35 | let mut i = 0_usize; 36 | loop { 37 | producer 38 | .send_result( 39 | FutureRecord::to(TOPIC) 40 | .key(&i.to_string()) 41 | .payload("dummy") 42 | .timestamp(now()), 43 | ) 44 | .unwrap() 45 | .await 46 | .unwrap() 47 | .unwrap(); 48 | i += 1; 49 | } 50 | }); 51 | 52 | let start = Instant::now(); 53 | let mut latencies = Histogram::::new(5).unwrap(); 54 | println!("Warming up for 10s..."); 55 | loop { 56 | let message = consumer.recv().await.unwrap(); 57 | let then = message.timestamp().to_millis().unwrap(); 58 | if start.elapsed() < Duration::from_secs(10) { 59 | // Warming up. 60 | } else if start.elapsed() < Duration::from_secs(20) { 61 | if latencies.is_empty() { 62 | println!("Recording for 10s..."); 63 | } 64 | latencies += (now() - then) as u64; 65 | } else { 66 | break; 67 | } 68 | } 69 | 70 | println!("measurements: {}", latencies.len()); 71 | println!("mean latency: {}ms", latencies.mean()); 72 | println!("p50 latency: {}ms", latencies.value_at_quantile(0.50)); 73 | println!("p90 latency: {}ms", latencies.value_at_quantile(0.90)); 74 | println!("p99 latency: {}ms", latencies.value_at_quantile(0.99)); 75 | } 76 | 77 | fn now() -> i64 { 78 | SystemTime::now() 79 | .duration_since(UNIX_EPOCH) 80 | .unwrap() 81 | .as_millis() 82 | .try_into() 83 | .unwrap() 84 | } 85 | -------------------------------------------------------------------------------- /rdkafka-sys/simple_producer.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use clap::{Arg, Command}; 4 | use log::info; 5 | 6 | use rdkafka::config::ClientConfig; 7 | use rdkafka::message::OwnedHeaders; 8 | use rdkafka::producer::{FutureProducer, FutureRecord}; 9 | use rdkafka::util::get_rdkafka_version; 10 | 11 | use crate::example_utils::setup_logger; 12 | 13 | mod example_utils; 14 | 15 | async fn produce(brokers: &str, topic_name: &str) { 16 | let producer: &FutureProducer = &ClientConfig::new() 17 | .set("bootstrap.servers", brokers) 18 | .set("message.timeout.ms", "5000") 19 | .create() 20 | .expect("Producer creation error"); 21 | 22 | // This loop is non blocking: all messages will be sent one after the other, without waiting 23 | // for the results. 24 | let futures = (0..5) 25 | .map(|i| async move { 26 | // The send operation on the topic returns a future, which will be 27 | // completed once the result or failure from Kafka is received. 28 | let delivery_status = producer 29 | .send( 30 | FutureRecord::to(topic_name) 31 | .payload(&format!("Message {}", i)) 32 | .key(&format!("Key {}", i)) 33 | .headers(OwnedHeaders::new().add("header_key", "header_value")), 34 | Duration::from_secs(0), 35 | ) 36 | .await; 37 | 38 | // This will be executed when the result is received. 39 | info!("Delivery status for message {} received", i); 40 | delivery_status 41 | }) 42 | .collect::>(); 43 | 44 | // This loop will wait until all delivery statuses have been received. 45 | for future in futures { 46 | info!("Future completed. Result: {:?}", future.await); 47 | } 48 | } 49 | 50 | #[tokio::main] 51 | async fn main() { 52 | let matches = Command::new("producer example") 53 | .version(option_env!("CARGO_PKG_VERSION").unwrap_or("")) 54 | .about("Simple command line producer") 55 | .arg( 56 | Arg::new("brokers") 57 | .short('b') 58 | .long("brokers") 59 | .help("Broker list in kafka format") 60 | .default_value("localhost:9092"), 61 | ) 62 | .arg( 63 | Arg::new("log-conf") 64 | .long("log-conf") 65 | .help("Configure the logging format (example: 'rdkafka=trace')"), 66 | ) 67 | .arg( 68 | Arg::new("topic") 69 | .short('t') 70 | .long("topic") 71 | .help("Destination topic") 72 | .required(true), 73 | ) 74 | .get_matches(); 75 | 76 | setup_logger(true, *matches.get_one("log-conf").unwrap()); 77 | 78 | let (version_n, version_s) = get_rdkafka_version(); 79 | info!("rd_kafka_version: 0x{:08x}, {}", version_n, version_s); 80 | 81 | let topic = matches.get_one::("topic").unwrap(); 82 | let brokers = matches.get_one::("brokers").unwrap(); 83 | 84 | produce(brokers, topic).await; 85 | } 86 | -------------------------------------------------------------------------------- /examples/simple_producer.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use clap::{Arg, Command}; 4 | use log::info; 5 | 6 | use rdkafka::config::ClientConfig; 7 | use rdkafka::message::{Header, OwnedHeaders}; 8 | use rdkafka::producer::{FutureProducer, FutureRecord}; 9 | use rdkafka::util::get_rdkafka_version; 10 | 11 | use crate::example_utils::setup_logger; 12 | 13 | mod example_utils; 14 | 15 | async fn produce(brokers: &str, topic_name: &str) { 16 | let producer: &FutureProducer = &ClientConfig::new() 17 | .set("bootstrap.servers", brokers) 18 | .set("message.timeout.ms", "5000") 19 | .create() 20 | .expect("Producer creation error"); 21 | 22 | // This loop is non blocking: all messages will be sent one after the other, without waiting 23 | // for the results. 24 | let futures = (0..5) 25 | .map(|i| async move { 26 | // The send operation on the topic returns a future, which will be 27 | // completed once the result or failure from Kafka is received. 28 | let delivery_status = producer 29 | .send( 30 | FutureRecord::to(topic_name) 31 | .payload(&format!("Message {}", i)) 32 | .key(&format!("Key {}", i)) 33 | .headers(OwnedHeaders::new().insert(Header { 34 | key: "header_key", 35 | value: Some("header_value"), 36 | })), 37 | Duration::from_secs(0), 38 | ) 39 | .await; 40 | 41 | // This will be executed when the result is received. 42 | info!("Delivery status for message {} received", i); 43 | delivery_status 44 | }) 45 | .collect::>(); 46 | 47 | // This loop will wait until all delivery statuses have been received. 48 | for future in futures { 49 | info!("Future completed. Result: {:?}", future.await); 50 | } 51 | } 52 | 53 | #[tokio::main] 54 | async fn main() { 55 | let matches = Command::new("producer example") 56 | .version(option_env!("CARGO_PKG_VERSION").unwrap_or("")) 57 | .about("Simple command line producer") 58 | .arg( 59 | Arg::new("brokers") 60 | .short('b') 61 | .long("brokers") 62 | .help("Broker list in kafka format") 63 | .default_value("localhost:9092"), 64 | ) 65 | .arg( 66 | Arg::new("log-conf") 67 | .long("log-conf") 68 | .help("Configure the logging format (example: 'rdkafka=trace')"), 69 | ) 70 | .arg( 71 | Arg::new("topic") 72 | .short('t') 73 | .long("topic") 74 | .help("Destination topic") 75 | .required(true), 76 | ) 77 | .get_matches(); 78 | 79 | setup_logger(true, matches.get_one("log-conf")); 80 | 81 | let (version_n, version_s) = get_rdkafka_version(); 82 | info!("rd_kafka_version: 0x{:08x}, {}", version_n, version_s); 83 | 84 | let topic = matches.get_one::("topic").unwrap(); 85 | let brokers = matches.get_one::("brokers").unwrap(); 86 | 87 | produce(brokers, topic).await; 88 | } 89 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | pull_request: 7 | branches: [master] 8 | 9 | env: 10 | rust_version: 1.85 11 | 12 | jobs: 13 | lint: 14 | runs-on: ubuntu-24.04 15 | steps: 16 | - uses: actions/checkout@v4 17 | - uses: dtolnay/rust-toolchain@stable 18 | with: 19 | toolchain: ${{ env.rust_version }} 20 | components: rustfmt, clippy 21 | - run: cargo fmt -- --check 22 | - run: cargo clippy -- -Dwarnings 23 | - run: cargo clippy --tests -- -Dwarnings 24 | - run: cargo test --doc 25 | 26 | check: 27 | strategy: 28 | matrix: 29 | include: 30 | - os: macos-14.0 31 | - os: windows-2025 32 | features: cmake-build,libz-static,curl-static 33 | rdkafka-sys-features: cmake-build,libz-static,curl-static 34 | - os: ubuntu-24.04 35 | features: tracing 36 | - os: ubuntu-24.04 37 | features: cmake-build,ssl-vendored,gssapi-vendored,libz-static,curl-static,zstd 38 | rdkafka-sys-features: cmake-build,ssl-vendored,gssapi-vendored,libz-static,curl-static,zstd 39 | runs-on: ${{ matrix.os }} 40 | steps: 41 | - uses: actions/checkout@v4 42 | - uses: lukka/get-cmake@latest 43 | - uses: dtolnay/rust-toolchain@stable 44 | with: 45 | toolchain: ${{ env.rust_version }} 46 | - run: cargo build --all-targets --verbose --features "${{ matrix.features }}" 47 | - run: cd rdkafka-sys && cargo test --features "${{ matrix.rdkafka-sys-features }}" 48 | 49 | # Use the `minimal-versions` resolver to ensure we're not claiming to support 50 | # an older version of a dependency than we actually do. 51 | check-minimal-versions: 52 | runs-on: ubuntu-24.04 53 | steps: 54 | - uses: actions/checkout@v4 55 | - uses: dtolnay/rust-toolchain@stable 56 | with: 57 | # The version of this toolchain doesn't matter much. It's only used to 58 | # generate the minimal-versions lockfile, not to actually run `cargo 59 | # check`. 60 | toolchain: nightly 61 | components: rustfmt, clippy 62 | - uses: dtolnay/rust-toolchain@stable 63 | with: 64 | toolchain: ${{ env.rust_version }} 65 | - run: rustup default ${{ env.rust_version }} 66 | - run: cargo +nightly -Z minimal-versions generate-lockfile 67 | # Default features and features that require optional dependencies should be 68 | # explicitly checked. 69 | - run: cargo check --features libz,tokio,tracing 70 | 71 | test: 72 | strategy: 73 | fail-fast: false 74 | # The test suite doesn't support concurrent runs. 75 | max-parallel: 1 76 | matrix: 77 | include: 78 | - kafka-version: "4.0" 79 | - kafka-version: "3.9" 80 | - kafka-version: "3.8" 81 | - kafka-version: "3.7" 82 | runs-on: ubuntu-24.04 83 | steps: 84 | - uses: actions/checkout@v4 85 | - uses: lukka/get-cmake@latest 86 | - uses: dtolnay/rust-toolchain@stable 87 | with: 88 | toolchain: ${{ env.rust_version }} 89 | - run: sudo apt-get update 90 | - run: sudo apt-get install -y libcurl4-openssl-dev 91 | # - run: sudo apt-get install -qy valgrind # Valgrind currently disabled in testing 92 | - run: ./test_suite.sh 93 | env: 94 | KAFKA_VERSION: ${{ matrix.kafka-version }} 95 | TERM: xterm-256color 96 | -------------------------------------------------------------------------------- /rdkafka-sys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rdkafka-sys" 3 | version = "4.9.0+2.12.1" 4 | authors = ["Federico Giraud "] 5 | build = "build.rs" 6 | links = "rdkafka" 7 | repository = "https://github.com/fede1024/rust-rdkafka" 8 | license = "MIT" 9 | description = "Native bindings to the librdkafka library" 10 | keywords = ["kafka", "rdkafka"] 11 | categories = ["external-ffi-bindings"] 12 | edition = "2021" 13 | rust-version = "1.74" 14 | 15 | [dependencies] 16 | num_enum = "0.7.3" 17 | libc = "0.2.172" 18 | openssl-sys = { version = "0.9.108", optional = true } 19 | libz-sys = { version = "1.1.22", optional = true } 20 | curl-sys = { version = "0.4.80", optional = true } 21 | zstd-sys = { version = "2.0.15", optional = true } 22 | lz4-sys = { version = "1.11.1", optional = true } 23 | sasl2-sys = { version = "0.1.22", optional = true } 24 | 25 | [build-dependencies] 26 | pkg-config = "0.3.32" 27 | cmake = { version = "0.1.54", optional = true } 28 | 29 | [lib] 30 | name = "rdkafka_sys" 31 | path = "src/lib.rs" 32 | 33 | [features] 34 | default = ["libz"] 35 | 36 | # Build librdkafka with its CMake build system, rather than its default "mklove" 37 | # build system. This feature requires that the system has CMake installed. 38 | cmake-build = ["cmake"] 39 | 40 | # Deprecated alias for the `cmake-build` feature. 41 | cmake_build = ["cmake-build"] 42 | 43 | # Dynamically link the system's librdkafka, rather than building and linking the 44 | # bundled version statically. This feature requires that the system has 45 | # librdkafka installed somewhere where pkg-config can find it. 46 | dynamic-linking = [] 47 | 48 | # Deprecated alias for the `dynamic-linking` feature. 49 | dynamic_linking = ["dynamic-linking"] 50 | 51 | # Enable SSL support. 52 | ssl = ["openssl-sys"] 53 | 54 | # Build and link against the version of OpenSSL bundled with the openssl-sys 55 | # crate. 56 | ssl-vendored = ["ssl", "openssl-sys/vendored"] 57 | 58 | # Enable SASL GSSAPI support with Cyrus libsasl2. 59 | gssapi = ["ssl", "sasl2-sys"] 60 | 61 | # Build and link against the libsasl2 bundled with the sasl2-sys crate. 62 | gssapi-vendored = ["gssapi", "sasl2-sys/gssapi-vendored"] 63 | 64 | # Deprecated alias for the `gssapi` feature. 65 | sasl = ["gssapi"] 66 | 67 | # Enable support for libz compression. 68 | libz = ["libz-sys"] 69 | 70 | # Link against the version of libz bundled with the libz-sys crate, rather than 71 | # the system's version. 72 | libz-static = ["libz", "libz-sys/static"] 73 | 74 | # Enable support for HTTP client via curl. 75 | curl = ["curl-sys"] 76 | 77 | # Link against the version of curl bundled with the curl-sys crate, rather than 78 | # the system's version. 79 | curl-static = ["curl-sys/static-curl"] 80 | 81 | # Enable support for zstd compression. 82 | zstd = ["zstd-sys"] 83 | 84 | # Link against the system's version of libzstd, rather than the version bundled 85 | # with the zstd-sys crate. 86 | zstd-pkg-config = ["zstd", "zstd-sys/pkg-config"] 87 | 88 | # Link against the lz4 compression library that is bundled with the lz4-sys 89 | # crate. By default, librdkafka builds and statically links against its own 90 | # bundled copy of lz4. 91 | external-lz4 = ["lz4-sys"] 92 | 93 | # Deprecated alias for the `external-lz4` feature. 94 | external_lz4 = ["external-lz4"] 95 | 96 | # Link against precompiled static build of librdkafka 97 | static-linking = [] 98 | 99 | [package.metadata.docs.rs] 100 | # docs.rs doesn't allow writing to ~/.cargo/registry (reasonably), so we have to 101 | # use the CMake build for a proper out-of-tree build. 102 | features = ["cmake_build"] 103 | -------------------------------------------------------------------------------- /examples/runtime_async_std.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::process; 4 | use std::time::Duration; 5 | 6 | use clap::{Arg, Command}; 7 | use futures::stream::StreamExt; 8 | 9 | use rdkafka::config::ClientConfig; 10 | use rdkafka::consumer::{Consumer, StreamConsumer}; 11 | use rdkafka::message::Message; 12 | use rdkafka::producer::{FutureProducer, FutureRecord}; 13 | use rdkafka::util::AsyncRuntime; 14 | 15 | use crate::example_utils::setup_logger; 16 | 17 | mod example_utils; 18 | 19 | pub struct AsyncStdRuntime; 20 | 21 | impl AsyncRuntime for AsyncStdRuntime { 22 | type Delay = Pin + Send>>; 23 | 24 | fn spawn(task: T) 25 | where 26 | T: Future + Send + 'static, 27 | { 28 | async_std::task::spawn(task); 29 | } 30 | 31 | fn delay_for(duration: Duration) -> Self::Delay { 32 | Box::pin(async_std::task::sleep(duration)) 33 | } 34 | } 35 | 36 | #[async_std::main] 37 | async fn main() { 38 | let matches = Command::new("smol runtime example") 39 | .version(option_env!("CARGO_PKG_VERSION").unwrap_or("")) 40 | .about("Demonstrates using rust-rdkafka with a custom async runtime") 41 | .arg( 42 | Arg::new("brokers") 43 | .short('b') 44 | .long("brokers") 45 | .help("Broker list in kafka format") 46 | .default_value("localhost:9092"), 47 | ) 48 | .arg(Arg::new("topic").long("topic").help("topic").required(true)) 49 | .arg( 50 | Arg::new("log-conf") 51 | .long("log-conf") 52 | .help("Configure the logging format (example: 'rdkafka=trace')"), 53 | ) 54 | .get_matches(); 55 | 56 | setup_logger(true, matches.get_one("log-conf")); 57 | 58 | let brokers = matches.get_one::("brokers").unwrap(); 59 | let topic = matches.get_one::("topic").unwrap().to_owned(); 60 | 61 | let producer: FutureProducer<_, AsyncStdRuntime> = ClientConfig::new() 62 | .set("bootstrap.servers", brokers) 63 | .set("message.timeout.ms", "5000") 64 | .create() 65 | .expect("Producer creation error"); 66 | 67 | let delivery_status = producer 68 | .send::, _, _>( 69 | FutureRecord::to(&topic).payload("hello from async-std"), 70 | Duration::from_secs(0), 71 | ) 72 | .await; 73 | if let Err((e, _)) = delivery_status { 74 | eprintln!("unable to send message: {}", e); 75 | process::exit(1); 76 | } 77 | 78 | let consumer: StreamConsumer<_, AsyncStdRuntime> = ClientConfig::new() 79 | .set("bootstrap.servers", brokers) 80 | .set("session.timeout.ms", "6000") 81 | .set("enable.auto.commit", "false") 82 | .set("auto.offset.reset", "earliest") 83 | .set("group.id", "rust-rdkafka-smol-runtime-example") 84 | .create() 85 | .expect("Consumer creation failed"); 86 | consumer.subscribe(&[&topic]).unwrap(); 87 | 88 | let mut stream = consumer.stream(); 89 | let message = stream.next().await; 90 | match message { 91 | Some(Ok(message)) => println!( 92 | "Received message: {}", 93 | match message.payload_view::() { 94 | None => "", 95 | Some(Ok(s)) => s, 96 | Some(Err(_)) => "", 97 | } 98 | ), 99 | Some(Err(e)) => { 100 | eprintln!("Error receiving message: {}", e); 101 | process::exit(1); 102 | } 103 | None => { 104 | eprintln!("Consumer unexpectedly returned no messages"); 105 | process::exit(1); 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /examples/roundtrip.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryInto; 2 | use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; 3 | 4 | use clap::{Arg, Command}; 5 | use hdrhistogram::Histogram; 6 | 7 | use rdkafka::config::ClientConfig; 8 | use rdkafka::consumer::{Consumer, StreamConsumer}; 9 | use rdkafka::message::Message; 10 | use rdkafka::producer::{FutureProducer, FutureRecord}; 11 | 12 | use crate::example_utils::setup_logger; 13 | 14 | mod example_utils; 15 | 16 | #[tokio::main] 17 | async fn main() { 18 | let matches = Command::new("Roundtrip example") 19 | .version(option_env!("CARGO_PKG_VERSION").unwrap_or("")) 20 | .about("Measures latency between producer and consumer") 21 | .arg( 22 | Arg::new("brokers") 23 | .short('b') 24 | .long("brokers") 25 | .help("Broker list in kafka format") 26 | .default_value("localhost:9092"), 27 | ) 28 | .arg(Arg::new("topic").long("topic").help("topic").required(true)) 29 | .arg( 30 | Arg::new("log-conf") 31 | .long("log-conf") 32 | .help("Configure the logging format (example: 'rdkafka=trace')"), 33 | ) 34 | .get_matches(); 35 | 36 | setup_logger(true, matches.get_one("log-conf")); 37 | 38 | let brokers = matches.get_one::("brokers").unwrap(); 39 | let topic = matches.get_one::("topic").unwrap().to_owned(); 40 | 41 | let producer: FutureProducer = ClientConfig::new() 42 | .set("bootstrap.servers", brokers) 43 | .set("message.timeout.ms", "5000") 44 | .create() 45 | .expect("Producer creation error"); 46 | 47 | let consumer: StreamConsumer = ClientConfig::new() 48 | .set("bootstrap.servers", brokers) 49 | .set("session.timeout.ms", "6000") 50 | .set("enable.auto.commit", "false") 51 | .set("group.id", "rust-rdkafka-roundtrip-example") 52 | .create() 53 | .expect("Consumer creation failed"); 54 | consumer.subscribe(&[&topic]).unwrap(); 55 | 56 | tokio::spawn(async move { 57 | let mut i = 0_usize; 58 | loop { 59 | producer 60 | .send_result( 61 | FutureRecord::to(&topic) 62 | .key(&i.to_string()) 63 | .payload("dummy") 64 | .timestamp(now()), 65 | ) 66 | .unwrap() 67 | .await 68 | .unwrap() 69 | .unwrap(); 70 | i += 1; 71 | } 72 | }); 73 | 74 | let start = Instant::now(); 75 | let mut latencies = Histogram::::new(5).unwrap(); 76 | println!("Warming up for 10s..."); 77 | loop { 78 | let message = consumer.recv().await.unwrap(); 79 | let then = message.timestamp().to_millis().unwrap(); 80 | if start.elapsed() < Duration::from_secs(10) { 81 | // Warming up. 82 | } else if start.elapsed() < Duration::from_secs(20) { 83 | if latencies.is_empty() { 84 | println!("Recording for 10s..."); 85 | } 86 | latencies += (now() - then) as u64; 87 | } else { 88 | break; 89 | } 90 | } 91 | 92 | println!("measurements: {}", latencies.len()); 93 | println!("mean latency: {}ms", latencies.mean()); 94 | println!("p50 latency: {}ms", latencies.value_at_quantile(0.50)); 95 | println!("p90 latency: {}ms", latencies.value_at_quantile(0.90)); 96 | println!("p99 latency: {}ms", latencies.value_at_quantile(0.99)); 97 | } 98 | 99 | fn now() -> i64 { 100 | SystemTime::now() 101 | .duration_since(UNIX_EPOCH) 102 | .unwrap() 103 | .as_millis() 104 | .try_into() 105 | .unwrap() 106 | } 107 | -------------------------------------------------------------------------------- /examples/runtime_smol.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::process; 3 | use std::time::{Duration, Instant}; 4 | 5 | use clap::{Arg, Command}; 6 | use futures::future::{self, FutureExt}; 7 | use futures::stream::StreamExt; 8 | 9 | use rdkafka::config::ClientConfig; 10 | use rdkafka::consumer::{Consumer, StreamConsumer}; 11 | use rdkafka::message::Message; 12 | use rdkafka::producer::{FutureProducer, FutureRecord}; 13 | use rdkafka::util::AsyncRuntime; 14 | 15 | use crate::example_utils::setup_logger; 16 | 17 | mod example_utils; 18 | 19 | pub struct SmolRuntime; 20 | 21 | impl AsyncRuntime for SmolRuntime { 22 | type Delay = future::Map; 23 | 24 | fn spawn(task: T) 25 | where 26 | T: Future + Send + 'static, 27 | { 28 | smol::spawn(task).detach() 29 | } 30 | 31 | fn delay_for(duration: Duration) -> Self::Delay { 32 | FutureExt::map(smol::Timer::after(duration), |_| ()) 33 | } 34 | } 35 | 36 | fn main() { 37 | let matches = Command::new("smol runtime example") 38 | .version(option_env!("CARGO_PKG_VERSION").unwrap_or("")) 39 | .about("Demonstrates using rust-rdkafka with a custom async runtime") 40 | .arg( 41 | Arg::new("brokers") 42 | .short('b') 43 | .long("brokers") 44 | .help("Broker list in kafka format") 45 | .default_value("localhost:9092"), 46 | ) 47 | .arg(Arg::new("topic").long("topic").help("topic").required(true)) 48 | .arg( 49 | Arg::new("log-conf") 50 | .long("log-conf") 51 | .help("Configure the logging format (example: 'rdkafka=trace')"), 52 | ) 53 | .get_matches(); 54 | 55 | setup_logger(true, matches.get_one("log-conf")); 56 | 57 | let brokers = matches.get_one::("brokers").unwrap(); 58 | let topic = matches.get_one::("topic").unwrap().to_owned(); 59 | 60 | smol::block_on(async { 61 | let producer: FutureProducer<_, SmolRuntime> = ClientConfig::new() 62 | .set("bootstrap.servers", brokers) 63 | .set("message.timeout.ms", "5000") 64 | .create() 65 | .expect("Producer creation error"); 66 | 67 | let delivery_status = producer 68 | .send::, _, _>( 69 | FutureRecord::to(&topic).payload("hello from smol"), 70 | Duration::from_secs(0), 71 | ) 72 | .await; 73 | if let Err((e, _)) = delivery_status { 74 | eprintln!("unable to send message: {}", e); 75 | process::exit(1); 76 | } 77 | 78 | let consumer: StreamConsumer<_, SmolRuntime> = ClientConfig::new() 79 | .set("bootstrap.servers", brokers) 80 | .set("session.timeout.ms", "6000") 81 | .set("enable.auto.commit", "false") 82 | .set("auto.offset.reset", "earliest") 83 | .set("group.id", "rust-rdkafka-smol-runtime-example") 84 | .create() 85 | .expect("Consumer creation failed"); 86 | consumer.subscribe(&[&topic]).unwrap(); 87 | 88 | let mut stream = consumer.stream(); 89 | let message = stream.next().await; 90 | match message { 91 | Some(Ok(message)) => println!( 92 | "Received message: {}", 93 | match message.payload_view::() { 94 | None => "", 95 | Some(Ok(s)) => s, 96 | Some(Err(_)) => "", 97 | } 98 | ), 99 | Some(Err(e)) => { 100 | eprintln!("Error receiving message: {}", e); 101 | process::exit(1); 102 | } 103 | None => { 104 | eprintln!("Consumer unexpectedly returned no messages"); 105 | process::exit(1); 106 | } 107 | } 108 | }) 109 | } 110 | -------------------------------------------------------------------------------- /examples/metadata.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use clap::{Arg, Command}; 4 | use log::trace; 5 | 6 | use rdkafka::config::ClientConfig; 7 | use rdkafka::consumer::{BaseConsumer, Consumer}; 8 | 9 | use crate::example_utils::setup_logger; 10 | 11 | mod example_utils; 12 | 13 | fn print_metadata( 14 | brokers: &String, 15 | topic: Option<&String>, 16 | timeout: Duration, 17 | fetch_offsets: bool, 18 | ) { 19 | let consumer: BaseConsumer = ClientConfig::new() 20 | .set("bootstrap.servers", brokers) 21 | .create() 22 | .expect("Consumer creation failed"); 23 | 24 | trace!("Consumer created"); 25 | 26 | let metadata = consumer 27 | .fetch_metadata(topic.map(|x| x.as_str()), timeout) 28 | .expect("Failed to fetch metadata"); 29 | 30 | let mut message_count = 0; 31 | 32 | println!("Cluster information:"); 33 | println!(" Broker count: {}", metadata.brokers().len()); 34 | println!(" Topics count: {}", metadata.topics().len()); 35 | println!(" Metadata broker name: {}", metadata.orig_broker_name()); 36 | println!(" Metadata broker id: {}\n", metadata.orig_broker_id()); 37 | 38 | println!("Brokers:"); 39 | for broker in metadata.brokers() { 40 | println!( 41 | " Id: {} Host: {}:{} ", 42 | broker.id(), 43 | broker.host(), 44 | broker.port() 45 | ); 46 | } 47 | 48 | println!("\nTopics:"); 49 | for topic in metadata.topics() { 50 | println!(" Topic: {} Err: {:?}", topic.name(), topic.error()); 51 | for partition in topic.partitions() { 52 | println!( 53 | " Partition: {} Leader: {} Replicas: {:?} ISR: {:?} Err: {:?}", 54 | partition.id(), 55 | partition.leader(), 56 | partition.replicas(), 57 | partition.isr(), 58 | partition.error() 59 | ); 60 | if fetch_offsets { 61 | let (low, high) = consumer 62 | .fetch_watermarks(topic.name(), partition.id(), Duration::from_secs(1)) 63 | .unwrap_or((-1, -1)); 64 | println!( 65 | " Low watermark: {} High watermark: {} (difference: {})", 66 | low, 67 | high, 68 | high - low 69 | ); 70 | message_count += high - low; 71 | } 72 | } 73 | if fetch_offsets { 74 | println!(" Total message count: {}", message_count); 75 | } 76 | } 77 | } 78 | 79 | fn main() { 80 | let matches = Command::new("metadata fetch example") 81 | .version(option_env!("CARGO_PKG_VERSION").unwrap_or("")) 82 | .about("Fetch and print the cluster metadata") 83 | .arg( 84 | Arg::new("brokers") 85 | .short('b') 86 | .long("brokers") 87 | .help("Broker list in kafka format") 88 | .num_args(1) 89 | .default_value("localhost:9092"), 90 | ) 91 | .arg( 92 | Arg::new("offsets") 93 | .long("offsets") 94 | .help("Enables offset fetching"), 95 | ) 96 | .arg( 97 | Arg::new("topic") 98 | .long("topic") 99 | .help("Only fetch the metadata of the specified topic") 100 | .num_args(1), 101 | ) 102 | .arg( 103 | Arg::new("log-conf") 104 | .long("log-conf") 105 | .default_value("trace") 106 | .help("Configure the logging format (example: 'rdkafka=trace')") 107 | .num_args(1), 108 | ) 109 | .arg( 110 | Arg::new("timeout") 111 | .long("timeout") 112 | .value_parser(clap::value_parser!(u64)) 113 | .help("Metadata fetch timeout in milliseconds") 114 | .num_args(1) 115 | .default_value("60000"), 116 | ) 117 | .get_matches(); 118 | 119 | setup_logger(true, matches.get_one::("log-conf")); 120 | 121 | let brokers = matches.get_one("brokers").unwrap(); 122 | let timeout = matches.get_one::("timeout").unwrap(); 123 | let topic = matches.get_one::("topic"); 124 | let fetch_offsets = matches.contains_id("offsets"); 125 | 126 | print_metadata( 127 | brokers, 128 | topic, 129 | Duration::from_millis(*timeout), 130 | fetch_offsets, 131 | ); 132 | } 133 | -------------------------------------------------------------------------------- /src/groups.rs: -------------------------------------------------------------------------------- 1 | //! Group membership API. 2 | 3 | use std::ffi::CStr; 4 | use std::fmt; 5 | use std::slice; 6 | 7 | use rdkafka_sys as rdsys; 8 | use rdkafka_sys::types::*; 9 | 10 | use crate::util::{KafkaDrop, NativePtr}; 11 | 12 | /// Group member information container. 13 | pub struct GroupMemberInfo(RDKafkaGroupMemberInfo); 14 | 15 | impl GroupMemberInfo { 16 | /// Returns the ID of the member. 17 | pub fn id(&self) -> &str { 18 | unsafe { 19 | CStr::from_ptr(self.0.member_id) 20 | .to_str() 21 | .expect("Member id is not a valid UTF-8 string") 22 | } 23 | } 24 | 25 | /// Returns the client ID of the member. 26 | pub fn client_id(&self) -> &str { 27 | unsafe { 28 | CStr::from_ptr(self.0.client_id) 29 | .to_str() 30 | .expect("Client id is not a valid UTF-8 string") 31 | } 32 | } 33 | 34 | /// Return the client host of the member. 35 | pub fn client_host(&self) -> &str { 36 | unsafe { 37 | CStr::from_ptr(self.0.client_host) 38 | .to_str() 39 | .expect("Member host is not a valid UTF-8 string") 40 | } 41 | } 42 | 43 | /// Return the metadata of the member. 44 | pub fn metadata(&self) -> Option<&[u8]> { 45 | unsafe { 46 | if self.0.member_metadata.is_null() { 47 | None 48 | } else { 49 | Some(slice::from_raw_parts::( 50 | self.0.member_metadata as *const u8, 51 | self.0.member_metadata_size as usize, 52 | )) 53 | } 54 | } 55 | } 56 | 57 | /// Return the partition assignment of the member. 58 | pub fn assignment(&self) -> Option<&[u8]> { 59 | unsafe { 60 | if self.0.member_assignment.is_null() { 61 | None 62 | } else { 63 | Some(slice::from_raw_parts::( 64 | self.0.member_assignment as *const u8, 65 | self.0.member_assignment_size as usize, 66 | )) 67 | } 68 | } 69 | } 70 | } 71 | 72 | /// Group information container. 73 | pub struct GroupInfo(RDKafkaGroupInfo); 74 | 75 | impl GroupInfo { 76 | /// Returns the name of the group. 77 | pub fn name(&self) -> &str { 78 | unsafe { 79 | CStr::from_ptr(self.0.group) 80 | .to_str() 81 | .expect("Group name is not a valid UTF-8 string") 82 | } 83 | } 84 | 85 | /// Returns the members of the group. 86 | pub fn members(&self) -> &[GroupMemberInfo] { 87 | if self.0.members.is_null() { 88 | return &[]; 89 | } 90 | unsafe { 91 | slice::from_raw_parts( 92 | self.0.members as *const GroupMemberInfo, 93 | self.0.member_cnt as usize, 94 | ) 95 | } 96 | } 97 | 98 | /// Returns the state of the group. 99 | pub fn state(&self) -> &str { 100 | unsafe { 101 | CStr::from_ptr(self.0.state) 102 | .to_str() 103 | .expect("State is not a valid UTF-8 string") 104 | } 105 | } 106 | 107 | /// Returns the protocol of the group. 108 | pub fn protocol(&self) -> &str { 109 | unsafe { 110 | CStr::from_ptr(self.0.protocol) 111 | .to_str() 112 | .expect("Protocol name is not a valid UTF-8 string") 113 | } 114 | } 115 | 116 | /// Returns the protocol type of the group. 117 | pub fn protocol_type(&self) -> &str { 118 | unsafe { 119 | CStr::from_ptr(self.0.protocol_type) 120 | .to_str() 121 | .expect("Protocol type is not a valid UTF-8 string") 122 | } 123 | } 124 | } 125 | 126 | impl fmt::Debug for GroupInfo { 127 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 128 | write!(f, "{}", self.name()) 129 | } 130 | } 131 | 132 | /// List of groups. 133 | /// 134 | /// This structure wraps the pointer returned by rdkafka-sys, and deallocates 135 | /// all the native resources when dropped. 136 | pub struct GroupList(NativePtr); 137 | 138 | unsafe impl KafkaDrop for RDKafkaGroupList { 139 | const TYPE: &'static str = "group"; 140 | const DROP: unsafe extern "C" fn(*mut Self) = drop_group_list; 141 | } 142 | 143 | unsafe extern "C" fn drop_group_list(ptr: *mut RDKafkaGroupList) { 144 | rdsys::rd_kafka_group_list_destroy(ptr as *const _) 145 | } 146 | 147 | impl GroupList { 148 | /// Creates a new group list given a pointer to the native rdkafka-sys group list structure. 149 | pub(crate) unsafe fn from_ptr(ptr: *const RDKafkaGroupList) -> GroupList { 150 | GroupList(NativePtr::from_ptr(ptr as *mut _).unwrap()) 151 | } 152 | 153 | /// Returns all the groups in the list. 154 | pub fn groups(&self) -> &[GroupInfo] { 155 | unsafe { 156 | slice::from_raw_parts(self.0.groups as *const GroupInfo, self.0.group_cnt as usize) 157 | } 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /rdkafka-sys/README.md: -------------------------------------------------------------------------------- 1 | # rdkafka-sys 2 | 3 | Low level bindings to [librdkafka](https://github.com/edenhill/librdkafka), 4 | a C library for the [Apache Kafka] protocol with producer, consumer, and 5 | admin clients. 6 | 7 | For a safe wrapper, see the [rdkafka] crate. 8 | 9 | ## Version 10 | 11 | The rdkafka-sys version number is in the format `X.Y.Z+RX.RY.RZ`, where 12 | `X.Y.Z` is the version of this crate and follows SemVer conventions, while 13 | `RX.RY.RZ` is the version of the bundled librdkafka. 14 | 15 | Note that versions before v2.0.0+1.4.2 did not follow this convention, and 16 | instead directly correspond to the bundled librdkafka version. 17 | 18 | ## Build 19 | 20 | ### Known issues 21 | 22 | * When any of librdkafka's optional dependencies are enabled, like libz or 23 | OpenSSL, if you have multiple versions of that library installed upon your 24 | system, librdkafka's build system may disagree with Cargo about which 25 | version of the library to use! **This can result in subtly broken 26 | builds,** if librdkafka compiles against the headers for one version but 27 | Cargo links against a different version. For complete confidence when 28 | building release binaries, use an environment like a Docker container or a 29 | chroot jail where you can guarantee that only one version of each 30 | dependency is present. The current design of Cargo unfortunately makes 31 | this nearly impossible to fix. 32 | 33 | * Windows is only supported when using the CMake build system via the 34 | `cmake-build` Cargo feature. 35 | 36 | ### Features 37 | 38 | By default a submodule with the librdkafka sources will be used to compile 39 | and statically link the library. 40 | 41 | The **`dynamic-linking`** feature can be used to link rdkafka to a locally 42 | installed version of librdkafka: if the feature is enabled, the build script 43 | will use `pkg-config` to check the version of the library installed in the 44 | system, and it will configure the compiler to dynamically link against it. 45 | The system version of librdkafka must exactly match the version of 46 | librdkafka bundled with this crate. 47 | 48 | The **`static-linking`** feature can be used to link rdkafka to a locally 49 | built version of librdkafka: if the feature is enabled, the build script 50 | will try to find `DEP_LIBRDKAFKA_STATIC_ROOT` environment variable 51 | and it will statically link against it. 52 | 53 | The **`cmake-build`** feature builds librdkafka with its [CMake] build 54 | system, rather than its default [mklove]-based build system. This feature 55 | requires that CMake is installed on the build machine. 56 | 57 | The following features directly correspond to librdkafka features (i.e., 58 | flags you would pass to `configure` if you were compiling manually). 59 | 60 | * The **`ssl`** feature enables SSL support. By default, the system's 61 | OpenSSL library is dynamically linked, but static linking of the version 62 | bundled with the [openssl-sys] crate can be requested with the 63 | `ssl-vendored` feature. 64 | * The **`gssapi`** feature enables SASL GSSAPI support with Cyrus 65 | libsasl2. By default the system's libsasl2 is dynamically linked, but 66 | static linking of the version bundled with the [sasl2-sys] crate can be 67 | requested with the `gssapi-vendored` feature. 68 | * The **`libz`** feature enables support for zlib compression. This 69 | feature is enabled by default. By default, the system's libz is 70 | dynamically linked, but static linking of the version bundled with the 71 | [libz-sys] crate can be requested with the `libz-static` feature. 72 | * The **`curl`** feature enables the HTTP client via curl. By default, the 73 | system's curl is dynamically linked, but static linking of the version 74 | bundled with the [curl-sys] create can be requested with the 75 | `curl-static` feature. 76 | * The **`zstd`** feature enables support for ZSTD compression. By default, 77 | this builds and statically links the version bundled with the [zstd-sys] 78 | crate, but dynamic linking of the system's version can be requested with 79 | the `zstd-pkg-config` feature. 80 | * The **`external-lz4`** feature statically links against the copy of 81 | liblz4 bundled with the [lz4-sys] crate. By default, librdkafka 82 | statically links against its own bundled version of liblz4. Due to 83 | limitations with lz4-sys, it is not yet possible to dynamically link 84 | against the system's version of liblz4. 85 | 86 | All features are disabled by default unless noted otherwise above. The build 87 | process is defined in [`build.rs`]. 88 | 89 | [`build.rs`]: https://github.com/fede1024/rust-rdkafka/tree/master/rdkafka-sys/build.rs 90 | [Apache Kafka]: https://kafka.apache.org 91 | [CMake]: https://cmake.org 92 | [libz-sys]: https://crates.io/crates/libz-sys 93 | [curl-sys]: https://crates.io/crates/curl-sys 94 | [lz4-sys]: https://crates.io/crates/lz4-sys 95 | [mklove]: https://github.com/edenhill/mklove 96 | [openssl-sys]: https://crates.io/crates/openssl-sys 97 | [rdkafka]: https://docs.rs/rdkafka 98 | [sasl2-sys]: https://docs.rs/sasl2-sys 99 | [zstd-sys]: https://crates.io/crates/zstd-sys 100 | -------------------------------------------------------------------------------- /examples/simple_consumer.rs: -------------------------------------------------------------------------------- 1 | use clap::{Arg, Command}; 2 | use log::{info, warn}; 3 | 4 | use rdkafka::client::ClientContext; 5 | use rdkafka::config::{ClientConfig, RDKafkaLogLevel}; 6 | use rdkafka::consumer::stream_consumer::StreamConsumer; 7 | use rdkafka::consumer::{BaseConsumer, CommitMode, Consumer, ConsumerContext, Rebalance}; 8 | use rdkafka::error::KafkaResult; 9 | use rdkafka::message::{Headers, Message}; 10 | use rdkafka::topic_partition_list::TopicPartitionList; 11 | use rdkafka::util::get_rdkafka_version; 12 | 13 | use crate::example_utils::setup_logger; 14 | 15 | mod example_utils; 16 | 17 | // A context can be used to change the behavior of producers and consumers by adding callbacks 18 | // that will be executed by librdkafka. 19 | // This particular context sets up custom callbacks to log rebalancing events. 20 | struct CustomContext; 21 | 22 | impl ClientContext for CustomContext {} 23 | 24 | impl ConsumerContext for CustomContext { 25 | fn pre_rebalance(&self, _: &BaseConsumer, rebalance: &Rebalance) { 26 | info!("Pre rebalance {:?}", rebalance); 27 | } 28 | 29 | fn post_rebalance(&self, _: &BaseConsumer, rebalance: &Rebalance) { 30 | info!("Post rebalance {:?}", rebalance); 31 | } 32 | 33 | fn commit_callback(&self, result: KafkaResult<()>, _offsets: &TopicPartitionList) { 34 | info!("Committing offsets: {:?}", result); 35 | } 36 | } 37 | 38 | // A type alias with your custom consumer can be created for convenience. 39 | type LoggingConsumer = StreamConsumer; 40 | 41 | async fn consume_and_print( 42 | brokers: &str, 43 | group_id: &str, 44 | topics: &[&str], 45 | assignor: Option<&String>, 46 | ) { 47 | let context = CustomContext; 48 | 49 | let mut config = ClientConfig::new(); 50 | 51 | config 52 | .set("group.id", group_id) 53 | .set("bootstrap.servers", brokers) 54 | .set("enable.partition.eof", "false") 55 | .set("session.timeout.ms", "6000") 56 | .set("enable.auto.commit", "true") 57 | //.set("statistics.interval.ms", "30000") 58 | //.set("auto.offset.reset", "smallest") 59 | .set_log_level(RDKafkaLogLevel::Debug); 60 | 61 | if let Some(assignor) = assignor { 62 | config 63 | .set("group.remote.assignor", assignor) 64 | .set("group.protocol", "consumer") 65 | .remove("session.timeout.ms"); 66 | } 67 | 68 | let consumer: LoggingConsumer = config 69 | .create_with_context(context) 70 | .expect("Consumer creation failed"); 71 | 72 | consumer 73 | .subscribe(topics) 74 | .expect("Can't subscribe to specified topics"); 75 | 76 | loop { 77 | match consumer.recv().await { 78 | Err(e) => warn!("Kafka error: {}", e), 79 | Ok(m) => { 80 | let payload = match m.payload_view::() { 81 | None => "", 82 | Some(Ok(s)) => s, 83 | Some(Err(e)) => { 84 | warn!("Error while deserializing message payload: {:?}", e); 85 | "" 86 | } 87 | }; 88 | info!( 89 | "key: '{:?}', payload: '{}', topic: {}, partition: {}, offset: {}, timestamp: {:?}", 90 | m.key(), 91 | payload, 92 | m.topic(), 93 | m.partition(), 94 | m.offset(), 95 | m.timestamp() 96 | ); 97 | if let Some(headers) = m.headers() { 98 | for header in headers.iter() { 99 | info!(" Header {:#?}: {:?}", header.key, header.value); 100 | } 101 | } 102 | consumer.commit_message(&m, CommitMode::Async).unwrap(); 103 | } 104 | }; 105 | } 106 | } 107 | 108 | #[tokio::main] 109 | async fn main() { 110 | let matches = Command::new("consumer example") 111 | .version(option_env!("CARGO_PKG_VERSION").unwrap_or("")) 112 | .about("Simple command line consumer") 113 | .arg( 114 | Arg::new("brokers") 115 | .short('b') 116 | .long("brokers") 117 | .help("Broker list in kafka format") 118 | .default_value("localhost:9092"), 119 | ) 120 | .arg( 121 | Arg::new("group-id") 122 | .short('g') 123 | .long("group-id") 124 | .help("Consumer group id") 125 | .default_value("example_consumer_group_id"), 126 | ) 127 | .arg( 128 | Arg::new("log-conf") 129 | .long("log-conf") 130 | .help("Configure the logging format (example: 'rdkafka=trace')"), 131 | ) 132 | .arg( 133 | Arg::new("topics") 134 | .short('t') 135 | .long("topics") 136 | .help("Topic list") 137 | .num_args(0..) 138 | .required(true), 139 | ) 140 | .arg( 141 | Arg::new("assignor") 142 | .short('a') 143 | .long("assignor") 144 | .help("Partition assignor"), 145 | ) 146 | .get_matches(); 147 | 148 | setup_logger(true, matches.get_one("log-conf")); 149 | 150 | let (version_n, version_s) = get_rdkafka_version(); 151 | info!("rd_kafka_version: 0x{:08x}, {}", version_n, version_s); 152 | 153 | let topics = matches 154 | .get_many::("topics") 155 | .into_iter() 156 | .flatten() 157 | .map(|s| s.as_str()) 158 | .collect::>(); 159 | 160 | let brokers = matches.get_one::("brokers").unwrap(); 161 | let group_id = matches.get_one::("group-id").unwrap(); 162 | let assignor = matches.get_one::("assignor"); 163 | 164 | consume_and_print(brokers, group_id, &topics, assignor).await 165 | } 166 | -------------------------------------------------------------------------------- /rdkafka-sys/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Low level bindings to [librdkafka](https://github.com/edenhill/librdkafka), 2 | //! a C library for the [Apache Kafka] protocol with producer, consumer, and 3 | //! admin clients. 4 | //! 5 | //! For a safe wrapper, see the [rdkafka] crate. 6 | //! 7 | //! ## Version 8 | //! 9 | //! The rdkafka-sys version number is in the format `X.Y.Z+RX.RY.RZ`, where 10 | //! `X.Y.Z` is the version of this crate and follows SemVer conventions, while 11 | //! `RX.RY.RZ` is the version of the bundled librdkafka. 12 | //! 13 | //! Note that versions before v2.0.0+1.4.2 did not follow this convention, and 14 | //! instead directly correspond to the bundled librdkafka version. 15 | //! 16 | //! ## Build 17 | //! 18 | //! ### Known issues 19 | //! 20 | //! * When any of librdkafka's optional dependencies are enabled, like libz or 21 | //! OpenSSL, if you have multiple versions of that library installed upon your 22 | //! system, librdkafka's build system may disagree with Cargo about which 23 | //! version of the library to use! **This can result in subtly broken 24 | //! builds,** if librdkafka compiles against the headers for one version but 25 | //! Cargo links against a different version. For complete confidence when 26 | //! building release binaries, use an environment like a Docker container or a 27 | //! chroot jail where you can guarantee that only one version of each 28 | //! dependency is present. The current design of Cargo unfortunately makes 29 | //! this nearly impossible to fix. 30 | //! 31 | //! * Windows is only supported when using the CMake build system via the 32 | //! `cmake-build` Cargo feature. 33 | //! 34 | //! ### Features 35 | //! 36 | //! By default a submodule with the librdkafka sources will be used to compile 37 | //! and statically link the library. 38 | //! 39 | //! The **`dynamic-linking`** feature can be used to link rdkafka to a locally 40 | //! installed version of librdkafka: if the feature is enabled, the build script 41 | //! will use `pkg-config` to check the version of the library installed in the 42 | //! system, and it will configure the compiler to dynamically link against it. 43 | //! The system version of librdkafka must exactly match the version of 44 | //! librdkafka bundled with this crate. 45 | //! 46 | //! The **`cmake-build`** feature builds librdkafka with its [CMake] build 47 | //! system, rather than its default [mklove]-based build system. This feature 48 | //! requires that CMake is installed on the build machine. 49 | //! 50 | //! The following features directly correspond to librdkafka features (i.e., 51 | //! flags you would pass to `configure` if you were compiling manually). 52 | //! 53 | //! * The **`ssl`** feature enables SSL support. By default, the system's 54 | //! OpenSSL library is dynamically linked, but static linking of the version 55 | //! bundled with the [openssl-sys] crate can be requested with the 56 | //! `ssl-vendored` feature. 57 | //! * The **`gssapi`** feature enables SASL GSSAPI support with Cyrus 58 | //! libsasl2. By default the system's libsasl2 is dynamically linked, but 59 | //! static linking of the version bundled with the [sasl2-sys] crate can be 60 | //! requested with the `gssapi-vendored` feature. 61 | //! * The **`libz`** feature enables support for zlib compression. This 62 | //! feature is enabled by default. By default, the system's libz is 63 | //! dynamically linked, but static linking of the version bundled with the 64 | //! [libz-sys] crate can be requested with the `libz-static` feature. 65 | //! * The **`curl`** feature enables the HTTP client via curl. By default, the 66 | //! system's curl is dynamically linked, but static linking of the version 67 | //! bundled with the [curl-sys] create can be requested with the 68 | //! `curl-static` feature. 69 | //! * The **`zstd`** feature enables support for ZSTD compression. By default, 70 | //! this builds and statically links the version bundled with the [zstd-sys] 71 | //! crate, but dynamic linking of the system's version can be requested with 72 | //! the `zstd-pkg-config` feature. 73 | //! * The **`external-lz4`** feature statically links against the copy of 74 | //! liblz4 bundled with the [lz4-sys] crate. By default, librdkafka 75 | //! statically links against its own bundled version of liblz4. Due to 76 | //! limitations with lz4-sys, it is not yet possible to dynamically link 77 | //! against the system's version of liblz4. 78 | //! 79 | //! All features are disabled by default unless noted otherwise above. The build 80 | //! process is defined in [`build.rs`]. 81 | //! 82 | //! [`build.rs`]: https://github.com/fede1024/rust-rdkafka/tree/master/rdkafka-sys/build.rs 83 | //! [Apache Kafka]: https://kafka.apache.org 84 | //! [CMake]: https://cmake.org 85 | //! [libz-sys]: https://crates.io/crates/libz-sys 86 | //! [curl-sys]: https://crates.io/crates/curl-sys 87 | //! [lz4-sys]: https://crates.io/crates/lz4-sys 88 | //! [mklove]: https://github.com/edenhill/mklove 89 | //! [openssl-sys]: https://crates.io/crates/openssl-sys 90 | //! [rdkafka]: https://docs.rs/rdkafka 91 | //! [sasl2-sys]: https://docs.rs/sasl2-sys 92 | //! [zstd-sys]: https://crates.io/crates/zstd-sys 93 | 94 | #[cfg(feature = "openssl-sys")] 95 | extern crate openssl_sys; 96 | 97 | #[cfg(feature = "sasl2-sys")] 98 | extern crate sasl2_sys; 99 | 100 | #[cfg(feature = "libz-sys")] 101 | extern crate libz_sys; 102 | 103 | #[cfg(any(feature = "curl-sys", feature = "curl-static"))] 104 | extern crate curl_sys; 105 | 106 | #[cfg(feature = "zstd-sys")] 107 | extern crate zstd_sys; 108 | 109 | #[cfg(feature = "lz4-sys")] 110 | extern crate lz4_sys; 111 | 112 | /// FFI bindings. 113 | /// 114 | /// These bindings are automatically generated 115 | /// with [bindgen](https://github.com/rust-lang/rust-bindgen). 116 | #[allow( 117 | non_camel_case_types, 118 | non_upper_case_globals, 119 | non_snake_case, 120 | clippy::all 121 | )] 122 | pub mod bindings; 123 | pub mod helpers; 124 | pub mod types; 125 | 126 | pub use bindings::*; 127 | pub use helpers::*; 128 | pub use types::*; 129 | -------------------------------------------------------------------------------- /examples/at_least_once.rs: -------------------------------------------------------------------------------- 1 | //! This example shows how to achieve at-least-once message delivery semantics. This stream 2 | //! processing code will simply read from an input topic, and duplicate the content to any number of 3 | //! output topics. In case of failure (client or server side), messages might be duplicated, 4 | //! but they won't be lost. 5 | //! 6 | //! The key point is committing the offset only once the message has been fully processed. 7 | //! Note that this technique only works when messages are processed in order. If a message with 8 | //! offset `i+n` is processed and committed before message `i`, in case of failure messages in 9 | //! the interval `[i, i+n)` might be lost. 10 | //! 11 | //! For a simpler example of consumers and producers, check the `simple_consumer` and 12 | //! `simple_producer` files in the example folder. 13 | 14 | use std::time::Duration; 15 | 16 | use clap::{Arg, Command}; 17 | use futures::future; 18 | use log::{info, warn}; 19 | 20 | use rdkafka::client::ClientContext; 21 | use rdkafka::config::{ClientConfig, RDKafkaLogLevel}; 22 | use rdkafka::consumer::stream_consumer::StreamConsumer; 23 | use rdkafka::consumer::{Consumer, ConsumerContext}; 24 | use rdkafka::error::KafkaResult; 25 | use rdkafka::producer::{FutureProducer, FutureRecord}; 26 | use rdkafka::topic_partition_list::TopicPartitionList; 27 | use rdkafka::util::get_rdkafka_version; 28 | use rdkafka::Message; 29 | 30 | use crate::example_utils::setup_logger; 31 | 32 | mod example_utils; 33 | 34 | // A simple context to customize the consumer behavior and print a log line every time 35 | // offsets are committed 36 | struct LoggingConsumerContext; 37 | 38 | impl ClientContext for LoggingConsumerContext {} 39 | 40 | impl ConsumerContext for LoggingConsumerContext { 41 | fn commit_callback(&self, result: KafkaResult<()>, _offsets: &TopicPartitionList) { 42 | match result { 43 | Ok(_) => info!("Offsets committed successfully"), 44 | Err(e) => warn!("Error while committing offsets: {}", e), 45 | }; 46 | } 47 | } 48 | 49 | // Define a new type for convenience 50 | type LoggingConsumer = StreamConsumer; 51 | 52 | fn create_consumer(brokers: &str, group_id: &str, topic: &str) -> LoggingConsumer { 53 | let context = LoggingConsumerContext; 54 | 55 | let consumer: LoggingConsumer = ClientConfig::new() 56 | .set("group.id", group_id) 57 | .set("bootstrap.servers", brokers) 58 | .set("enable.partition.eof", "false") 59 | .set("session.timeout.ms", "6000") 60 | // Commit automatically every 5 seconds. 61 | .set("enable.auto.commit", "true") 62 | .set("auto.commit.interval.ms", "5000") 63 | // but only commit the offsets explicitly stored via `consumer.store_offset`. 64 | .set("enable.auto.offset.store", "false") 65 | .set_log_level(RDKafkaLogLevel::Debug) 66 | .create_with_context(context) 67 | .expect("Consumer creation failed"); 68 | 69 | consumer 70 | .subscribe(&[topic]) 71 | .expect("Can't subscribe to specified topic"); 72 | 73 | consumer 74 | } 75 | 76 | fn create_producer(brokers: &str) -> FutureProducer { 77 | ClientConfig::new() 78 | .set("bootstrap.servers", brokers) 79 | .set("queue.buffering.max.ms", "0") // Do not buffer 80 | .create() 81 | .expect("Producer creation failed") 82 | } 83 | 84 | #[tokio::main] 85 | async fn main() { 86 | let matches = Command::new("at-least-once") 87 | .version(option_env!("CARGO_PKG_VERSION").unwrap_or("")) 88 | .about("At-least-once delivery example") 89 | .arg( 90 | Arg::new("brokers") 91 | .short('b') 92 | .long("brokers") 93 | .help("Broker list in kafka format") 94 | .default_value("localhost:9092"), 95 | ) 96 | .arg( 97 | Arg::new("group-id") 98 | .short('g') 99 | .long("group-id") 100 | .help("Consumer group id") 101 | .default_value("example_consumer_group_id"), 102 | ) 103 | .arg( 104 | Arg::new("log-conf") 105 | .long("log-conf") 106 | .help("Configure the logging format (example: 'rdkafka=trace')"), 107 | ) 108 | .arg( 109 | Arg::new("input-topic") 110 | .long("input-topic") 111 | .help("Input topic name") 112 | .required(true), 113 | ) 114 | .arg( 115 | Arg::new("output-topics") 116 | .long("output-topics") 117 | .help("Output topics names") 118 | .num_args(0..) 119 | .required(true), 120 | ) 121 | .get_matches(); 122 | 123 | setup_logger(true, matches.get_one("log-conf")); 124 | 125 | let (_, version) = get_rdkafka_version(); 126 | info!("rd_kafka_version: {}", version); 127 | 128 | let input_topic = matches.get_one::("input-topic").unwrap(); 129 | let output_topics = matches 130 | .get_many::("output-topics") 131 | .into_iter() 132 | .flatten() 133 | .collect::>(); 134 | let brokers = matches.get_one::("brokers").unwrap(); 135 | let group_id = matches.get_one::("group-id").unwrap(); 136 | 137 | let consumer = create_consumer(brokers, group_id, input_topic); 138 | let producer = create_producer(brokers); 139 | 140 | loop { 141 | match consumer.recv().await { 142 | Err(e) => { 143 | warn!("Kafka error: {}", e); 144 | } 145 | Ok(m) => { 146 | // Send a copy to the message to every output topic in parallel, and wait for the 147 | // delivery report to be received. 148 | future::try_join_all(output_topics.iter().map(|output_topic| { 149 | let mut record = FutureRecord::to(output_topic); 150 | if let Some(p) = m.payload() { 151 | record = record.payload(p); 152 | } 153 | if let Some(k) = m.key() { 154 | record = record.key(k); 155 | } 156 | producer.send(record, Duration::from_secs(1)) 157 | })) 158 | .await 159 | .expect("Message delivery failed for some topic"); 160 | // Now that the message is completely processed, add it's position to the offset 161 | // store. The actual offset will be committed every 5 seconds. 162 | if let Err(e) = consumer.store_offset_from_message(&m) { 163 | warn!("Error while storing offset: {}", e); 164 | } 165 | } 166 | } 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /tests/test_metadata.rs: -------------------------------------------------------------------------------- 1 | //! Test metadata fetch, group membership, consumer metadata. 2 | 3 | use std::time::Duration; 4 | 5 | use rdkafka::config::ClientConfig; 6 | use rdkafka::consumer::{Consumer, StreamConsumer}; 7 | use rdkafka::error::KafkaError; 8 | use rdkafka::topic_partition_list::TopicPartitionList; 9 | 10 | use rdkafka_sys::types::RDKafkaConfRes; 11 | 12 | use crate::utils::*; 13 | 14 | mod utils; 15 | 16 | fn create_consumer(group_id: &str) -> StreamConsumer { 17 | ClientConfig::new() 18 | .set("group.id", group_id) 19 | .set("enable.partition.eof", "true") 20 | .set("client.id", "rdkafka_integration_test_client") 21 | .set("bootstrap.servers", get_bootstrap_server().as_str()) 22 | .set("session.timeout.ms", "6000") 23 | .set("debug", "all") 24 | .set("auto.offset.reset", "earliest") 25 | .create() 26 | .expect("Failed to create StreamConsumer") 27 | } 28 | 29 | #[tokio::test] 30 | async fn test_metadata() { 31 | let _r = env_logger::try_init(); 32 | 33 | let topic_name = rand_test_topic("test_metadata"); 34 | populate_topic(&topic_name, 1, &value_fn, &key_fn, Some(0), None).await; 35 | populate_topic(&topic_name, 1, &value_fn, &key_fn, Some(1), None).await; 36 | populate_topic(&topic_name, 1, &value_fn, &key_fn, Some(2), None).await; 37 | let consumer = create_consumer(&rand_test_group()); 38 | 39 | let metadata = consumer 40 | .fetch_metadata(None, Duration::from_secs(5)) 41 | .unwrap(); 42 | let orig_broker_id = metadata.orig_broker_id(); 43 | // The orig_broker_id may be -1 if librdkafka's bootstrap "broker" handles 44 | // the request. 45 | if orig_broker_id != -1 && orig_broker_id != 0 { 46 | panic!( 47 | "metadata.orig_broker_id = {}, not 0 or 1 as expected", 48 | orig_broker_id 49 | ) 50 | } 51 | assert!(!metadata.orig_broker_name().is_empty()); 52 | 53 | let broker_metadata = metadata.brokers(); 54 | assert_eq!(broker_metadata.len(), 1); 55 | assert_eq!(broker_metadata[0].id(), 0); 56 | assert!(!broker_metadata[0].host().is_empty()); 57 | assert_eq!(broker_metadata[0].port(), 9092); 58 | 59 | let topic_metadata = metadata 60 | .topics() 61 | .iter() 62 | .find(|m| m.name() == topic_name) 63 | .unwrap(); 64 | 65 | let mut ids = topic_metadata 66 | .partitions() 67 | .iter() 68 | .map(|p| { 69 | assert_eq!(p.error(), None); 70 | p.id() 71 | }) 72 | .collect::>(); 73 | ids.sort(); 74 | 75 | assert_eq!(ids, vec![0, 1, 2]); 76 | assert_eq!(topic_metadata.error(), None); 77 | assert_eq!(topic_metadata.partitions().len(), 3); 78 | assert_eq!(topic_metadata.partitions()[0].leader(), 0); 79 | assert_eq!(topic_metadata.partitions()[1].leader(), 0); 80 | assert_eq!(topic_metadata.partitions()[2].leader(), 0); 81 | assert_eq!(topic_metadata.partitions()[0].replicas(), &[0]); 82 | assert_eq!(topic_metadata.partitions()[0].isr(), &[0]); 83 | 84 | let metadata_one_topic = consumer 85 | .fetch_metadata(Some(&topic_name), Duration::from_secs(5)) 86 | .unwrap(); 87 | assert_eq!(metadata_one_topic.topics().len(), 1); 88 | } 89 | 90 | #[tokio::test] 91 | async fn test_subscription() { 92 | let _r = env_logger::try_init(); 93 | 94 | let topic_name = rand_test_topic("test_subscription"); 95 | populate_topic(&topic_name, 10, &value_fn, &key_fn, None, None).await; 96 | let consumer = create_consumer(&rand_test_group()); 97 | consumer.subscribe(&[topic_name.as_str()]).unwrap(); 98 | 99 | // Make sure the consumer joins the group. 100 | let _consumer_future = consumer.recv().await; 101 | 102 | let mut tpl = TopicPartitionList::new(); 103 | tpl.add_topic_unassigned(&topic_name); 104 | assert_eq!(tpl, consumer.subscription().unwrap()); 105 | } 106 | 107 | #[tokio::test] 108 | async fn test_group_membership() { 109 | let _r = env_logger::try_init(); 110 | 111 | let topic_name = rand_test_topic("test_group_membership"); 112 | let group_name = rand_test_group(); 113 | populate_topic(&topic_name, 1, &value_fn, &key_fn, Some(0), None).await; 114 | populate_topic(&topic_name, 1, &value_fn, &key_fn, Some(1), None).await; 115 | populate_topic(&topic_name, 1, &value_fn, &key_fn, Some(2), None).await; 116 | let consumer = create_consumer(&group_name); 117 | consumer.subscribe(&[topic_name.as_str()]).unwrap(); 118 | 119 | // Make sure the consumer joins the group. 120 | let _consumer_future = consumer.recv().await; 121 | 122 | let group_list = consumer 123 | .fetch_group_list(None, Duration::from_secs(5)) 124 | .unwrap(); 125 | 126 | // Print all the data, valgrind will check memory access 127 | for group in group_list.groups().iter() { 128 | println!( 129 | "{} {} {} {}", 130 | group.name(), 131 | group.state(), 132 | group.protocol(), 133 | group.protocol_type() 134 | ); 135 | for member in group.members() { 136 | println!( 137 | " {} {} {}", 138 | member.id(), 139 | member.client_id(), 140 | member.client_host() 141 | ); 142 | } 143 | } 144 | 145 | let group_list2 = consumer 146 | .fetch_group_list(Some(&group_name), Duration::from_secs(5)) 147 | .unwrap(); 148 | assert_eq!(group_list2.groups().len(), 1); 149 | 150 | let consumer_group = group_list2 151 | .groups() 152 | .iter() 153 | .find(|&g| g.name() == group_name) 154 | .unwrap(); 155 | assert_eq!(consumer_group.members().len(), 1); 156 | 157 | let consumer_member = &consumer_group.members()[0]; 158 | assert_eq!( 159 | consumer_member.client_id(), 160 | "rdkafka_integration_test_client" 161 | ); 162 | } 163 | 164 | #[tokio::test] 165 | async fn test_client_config() { 166 | // If not overridden, `NativeConfig::get` should get the default value for 167 | // a valid parameter. 168 | let config = ClientConfig::new().create_native_config().unwrap(); 169 | assert_eq!(config.get("session.timeout.ms").unwrap(), "45000"); 170 | 171 | // But if the parameter is overridden, `NativeConfig::get` should reflect 172 | // the overridden value. 173 | let config = ClientConfig::new() 174 | .set("session.timeout.ms", "42") 175 | .create_native_config() 176 | .unwrap(); 177 | assert_eq!(config.get("session.timeout.ms").unwrap(), "42"); 178 | 179 | // Getting an invalid parameter should produce a nice error message. 180 | assert_eq!( 181 | config.get("noexist"), 182 | Err(KafkaError::ClientConfig( 183 | RDKafkaConfRes::RD_KAFKA_CONF_UNKNOWN, 184 | "Unknown configuration name".into(), 185 | "noexist".into(), 186 | "".into(), 187 | )) 188 | ); 189 | } 190 | -------------------------------------------------------------------------------- /src/metadata.rs: -------------------------------------------------------------------------------- 1 | //! Cluster metadata. 2 | 3 | use std::ffi::CStr; 4 | use std::fmt; 5 | use std::slice; 6 | 7 | use rdkafka_sys as rdsys; 8 | use rdkafka_sys::types::*; 9 | 10 | use crate::error::IsError; 11 | use crate::util::{KafkaDrop, NativePtr}; 12 | 13 | /// Broker metadata information. 14 | pub struct MetadataBroker(RDKafkaMetadataBroker); 15 | 16 | impl MetadataBroker { 17 | /// Returns the id of the broker. 18 | pub fn id(&self) -> i32 { 19 | self.0.id 20 | } 21 | 22 | /// Returns the host name of the broker. 23 | pub fn host(&self) -> &str { 24 | unsafe { 25 | CStr::from_ptr(self.0.host) 26 | .to_str() 27 | .expect("Broker host is not a valid UTF-8 string") 28 | } 29 | } 30 | 31 | /// Returns the port of the broker. 32 | pub fn port(&self) -> i32 { 33 | self.0.port 34 | } 35 | } 36 | 37 | impl fmt::Debug for MetadataBroker { 38 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 39 | f.debug_struct("MetadataBroker") 40 | .field("id", &self.id()) 41 | .field("host", &self.host()) 42 | .field("port", &self.port()) 43 | .finish() 44 | } 45 | } 46 | 47 | /// Partition metadata information. 48 | pub struct MetadataPartition(RDKafkaMetadataPartition); 49 | 50 | impl MetadataPartition { 51 | /// Returns the id of the partition. 52 | pub fn id(&self) -> i32 { 53 | self.0.id 54 | } 55 | 56 | /// Returns the broker id of the leader broker for the partition. 57 | pub fn leader(&self) -> i32 { 58 | self.0.leader 59 | } 60 | 61 | // TODO: return result? 62 | /// Returns the metadata error for the partition, or `None` if there is no 63 | /// error. 64 | pub fn error(&self) -> Option { 65 | if self.0.err.is_error() { 66 | Some(self.0.err) 67 | } else { 68 | None 69 | } 70 | } 71 | 72 | /// Returns the broker IDs of the replicas. 73 | pub fn replicas(&self) -> &[i32] { 74 | unsafe { slice::from_raw_parts(self.0.replicas, self.0.replica_cnt as usize) } 75 | } 76 | 77 | /// Returns the broker IDs of the in-sync replicas. 78 | pub fn isr(&self) -> &[i32] { 79 | unsafe { slice::from_raw_parts(self.0.isrs, self.0.isr_cnt as usize) } 80 | } 81 | } 82 | 83 | impl fmt::Debug for MetadataPartition { 84 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 85 | let mut debug_struct = f.debug_struct("MetadataPartition"); 86 | debug_struct.field("id", &self.id()); 87 | if let Some(err) = self.error() { 88 | debug_struct.field("error", &err); 89 | } 90 | debug_struct 91 | .field("leader", &self.leader()) 92 | .field("replicas", &self.replicas()) 93 | .field("isr", &self.isr()) // In-Sync Replicas 94 | .finish() 95 | } 96 | } 97 | 98 | /// Topic metadata information. 99 | pub struct MetadataTopic(RDKafkaMetadataTopic); 100 | 101 | impl MetadataTopic { 102 | /// Returns the name of the topic. 103 | pub fn name(&self) -> &str { 104 | unsafe { 105 | CStr::from_ptr(self.0.topic) 106 | .to_str() 107 | .expect("Topic name is not a valid UTF-8 string") 108 | } 109 | } 110 | 111 | /// Returns the partition metadata information for all the partitions. 112 | pub fn partitions(&self) -> &[MetadataPartition] { 113 | unsafe { 114 | slice::from_raw_parts( 115 | self.0.partitions as *const MetadataPartition, 116 | self.0.partition_cnt as usize, 117 | ) 118 | } 119 | } 120 | 121 | /// Returns the metadata error for the topic, or `None` if there was no 122 | /// error. 123 | pub fn error(&self) -> Option { 124 | if self.0.err.is_error() { 125 | Some(self.0.err) 126 | } else { 127 | None 128 | } 129 | } 130 | } 131 | 132 | impl fmt::Debug for MetadataTopic { 133 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 134 | let mut debug_struct = f.debug_struct("MetadataTopic"); 135 | debug_struct.field("name", &self.name()); 136 | if let Some(err) = self.error() { 137 | debug_struct.field("error", &err); 138 | } 139 | debug_struct.field("partitions", &self.partitions()); 140 | debug_struct.finish() 141 | } 142 | } 143 | 144 | /// Metadata container. 145 | /// 146 | /// This structure wraps the metadata pointer returned by rdkafka-sys, and 147 | /// deallocates all the native resources when dropped. 148 | pub struct Metadata(NativePtr); 149 | 150 | unsafe impl KafkaDrop for RDKafkaMetadata { 151 | const TYPE: &'static str = "metadata"; 152 | const DROP: unsafe extern "C" fn(*mut Self) = drop_metadata; 153 | } 154 | 155 | unsafe extern "C" fn drop_metadata(ptr: *mut RDKafkaMetadata) { 156 | rdsys::rd_kafka_metadata_destroy(ptr as *const _) 157 | } 158 | 159 | impl Metadata { 160 | /// Creates a new Metadata container given a pointer to the native rdkafka-sys metadata. 161 | pub(crate) unsafe fn from_ptr(ptr: *const RDKafkaMetadata) -> Metadata { 162 | Metadata(NativePtr::from_ptr(ptr as *mut _).unwrap()) 163 | } 164 | 165 | /// Returns the ID of the broker originating this metadata. 166 | pub fn orig_broker_id(&self) -> i32 { 167 | self.0.orig_broker_id 168 | } 169 | 170 | /// Returns the hostname of the broker originating this metadata. 171 | pub fn orig_broker_name(&self) -> &str { 172 | unsafe { 173 | CStr::from_ptr(self.0.orig_broker_name) 174 | .to_str() 175 | .expect("Broker name is not a valid UTF-8 string") 176 | } 177 | } 178 | 179 | /// Returns the metadata information for all the brokers in the cluster. 180 | pub fn brokers(&self) -> &[MetadataBroker] { 181 | unsafe { 182 | slice::from_raw_parts( 183 | self.0.brokers as *const MetadataBroker, 184 | self.0.broker_cnt as usize, 185 | ) 186 | } 187 | } 188 | 189 | /// Returns the metadata information for all the topics in the cluster. 190 | pub fn topics(&self) -> &[MetadataTopic] { 191 | unsafe { 192 | slice::from_raw_parts( 193 | self.0.topics as *const MetadataTopic, 194 | self.0.topic_cnt as usize, 195 | ) 196 | } 197 | } 198 | } 199 | 200 | impl fmt::Debug for Metadata { 201 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 202 | f.debug_struct("Metadata") 203 | .field("orig_broker_name", &self.orig_broker_name()) 204 | .field("orig_broker_id", &self.orig_broker_id()) 205 | .field("brokers", &self.brokers()) 206 | .field("topics", &self.topics()) 207 | .finish() 208 | } 209 | } 210 | 211 | unsafe impl Send for Metadata {} 212 | unsafe impl Sync for Metadata {} 213 | -------------------------------------------------------------------------------- /tests/test_high_producers.rs: -------------------------------------------------------------------------------- 1 | //! Test data production using high level producers. 2 | 3 | use std::collections::HashMap; 4 | use std::time::{Duration, Instant}; 5 | 6 | use futures::stream::{FuturesUnordered, StreamExt}; 7 | 8 | use rdkafka::client::DefaultClientContext; 9 | use rdkafka::config::ClientConfig; 10 | use rdkafka::error::{KafkaError, RDKafkaErrorCode}; 11 | use rdkafka::message::{Header, Headers, Message, OwnedHeaders}; 12 | use rdkafka::producer::{FutureProducer, FutureRecord, Producer}; 13 | use rdkafka::util::Timeout; 14 | use rdkafka::Timestamp; 15 | 16 | use crate::utils::*; 17 | 18 | mod utils; 19 | 20 | fn future_producer(config_overrides: HashMap<&str, &str>) -> FutureProducer { 21 | let mut config = ClientConfig::new(); 22 | config 23 | .set("bootstrap.servers", "localhost") 24 | .set("message.timeout.ms", "5000"); 25 | for (key, value) in config_overrides { 26 | config.set(key, value); 27 | } 28 | config.create().expect("Failed to create producer") 29 | } 30 | 31 | #[tokio::test] 32 | async fn test_future_producer_send() { 33 | let producer = future_producer(HashMap::new()); 34 | let topic_name = rand_test_topic("test_future_producer_send"); 35 | 36 | let results: FuturesUnordered<_> = (0..10) 37 | .map(|_| { 38 | producer.send( 39 | FutureRecord::to(&topic_name).payload("A").key("B"), 40 | Duration::from_secs(0), 41 | ) 42 | }) 43 | .collect(); 44 | 45 | let results: Vec<_> = results.collect().await; 46 | assert!(results.len() == 10); 47 | for (i, result) in results.into_iter().enumerate() { 48 | let delivered = result.unwrap(); 49 | assert_eq!(delivered.partition, 1); 50 | assert_eq!(delivered.offset, i as i64); 51 | assert!(delivered.timestamp < Timestamp::now()); 52 | } 53 | } 54 | 55 | #[tokio::test] 56 | async fn test_future_producer_send_full() { 57 | // Connect to a nonexistent Kafka broker with a long message timeout and a 58 | // tiny producer queue, so we can fill up the queue for a while by sending a 59 | // single message. 60 | let mut config = HashMap::new(); 61 | config.insert("bootstrap.servers", ""); 62 | config.insert("message.timeout.ms", "5000"); 63 | config.insert("queue.buffering.max.messages", "1"); 64 | let producer = &future_producer(config); 65 | let topic_name = &rand_test_topic("test_future_producer_send_full"); 66 | 67 | // Fill up the queue. 68 | producer 69 | .send_result(FutureRecord::to(topic_name).payload("A").key("B")) 70 | .unwrap(); 71 | 72 | let send_message = |timeout| async move { 73 | let start = Instant::now(); 74 | let res = producer 75 | .send(FutureRecord::to(topic_name).payload("A").key("B"), timeout) 76 | .await; 77 | match res { 78 | Ok(_) => panic!("send unexpectedly succeeded"), 79 | Err((KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull), _)) => start.elapsed(), 80 | Err((e, _)) => panic!("got incorrect error: {}", e), 81 | } 82 | }; 83 | 84 | // Sending a message with no timeout should return a `QueueFull` error 85 | // approximately immediately. 86 | let elapsed = send_message(Duration::from_secs(0)).await; 87 | assert!(elapsed < Duration::from_millis(20)); 88 | 89 | // Sending a message with a 1s timeout should return a `QueueFull` error 90 | // in about 1s. 91 | let elapsed = send_message(Duration::from_secs(1)).await; 92 | assert!(elapsed > Duration::from_millis(800)); 93 | assert!(elapsed < Duration::from_millis(1200)); 94 | 95 | producer.flush(Timeout::Never).unwrap(); 96 | } 97 | 98 | #[tokio::test] 99 | async fn test_future_producer_send_fail() { 100 | let producer = future_producer(HashMap::new()); 101 | 102 | let future = producer.send( 103 | FutureRecord::to("topic") 104 | .payload("payload") 105 | .key("key") 106 | .partition(100) // Fail 107 | .headers( 108 | OwnedHeaders::new() 109 | .insert(Header { 110 | key: "0", 111 | value: Some("A"), 112 | }) 113 | .insert(Header { 114 | key: "1", 115 | value: Some("B"), 116 | }) 117 | .insert(Header { 118 | key: "2", 119 | value: Some("C"), 120 | }), 121 | ), 122 | Duration::from_secs(10), 123 | ); 124 | 125 | match future.await { 126 | Err((kafka_error, owned_message)) => { 127 | assert_eq!( 128 | kafka_error.to_string(), 129 | "Message production error: UnknownPartition (Local: Unknown partition)" 130 | ); 131 | assert_eq!(owned_message.topic(), "topic"); 132 | let headers = owned_message.headers().unwrap(); 133 | assert_eq!(headers.count(), 3); 134 | assert_eq!( 135 | headers.get_as::(0), 136 | Ok(Header { 137 | key: "0", 138 | value: Some("A") 139 | }) 140 | ); 141 | assert_eq!( 142 | headers.get_as::(1), 143 | Ok(Header { 144 | key: "1", 145 | value: Some("B") 146 | }) 147 | ); 148 | assert_eq!( 149 | headers.get_as::(2), 150 | Ok(Header { 151 | key: "2", 152 | value: Some("C") 153 | }) 154 | ); 155 | } 156 | e => { 157 | panic!("Unexpected return value: {:?}", e); 158 | } 159 | } 160 | } 161 | 162 | #[tokio::test] 163 | async fn test_future_undelivered() { 164 | let delivery_future = { 165 | let mut config = ClientConfig::new(); 166 | // There's no server running there 167 | config 168 | .set("bootstrap.servers", "localhost:47021") 169 | .set("message.timeout.ms", "1"); 170 | let producer: FutureProducer = config.create().expect("Failed to create producer"); 171 | 172 | producer 173 | .send_result( 174 | FutureRecord::to("topic") 175 | .payload("payload") 176 | .key("key") 177 | .partition(100), 178 | ) 179 | .expect("Failed to queue message") 180 | 181 | // drop producer. This should resolve the future as per purge API (couldn't be delivered) 182 | }; 183 | 184 | match delivery_future.await { 185 | Ok(Err((kafka_error, owned_message))) => { 186 | assert_eq!( 187 | kafka_error.to_string(), 188 | "Message production error: PurgeQueue (Local: Purged in queue)" 189 | ); 190 | assert_eq!(owned_message.topic(), "topic"); 191 | assert_eq!(owned_message.key(), Some(b"key" as _)); 192 | assert_eq!(owned_message.payload(), Some(b"payload" as _)); 193 | } 194 | v => { 195 | panic!("Unexpected return value: {:?}", v); 196 | } 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /tests/test_transactions.rs: -------------------------------------------------------------------------------- 1 | //! Test transactions using the base consumer and producer. 2 | 3 | use std::collections::HashMap; 4 | use std::error::Error; 5 | use std::time::Duration; 6 | 7 | use log::info; 8 | use maplit::hashmap; 9 | 10 | use rdkafka::config::ClientConfig; 11 | use rdkafka::config::RDKafkaLogLevel; 12 | use rdkafka::consumer::{BaseConsumer, CommitMode, Consumer}; 13 | use rdkafka::error::KafkaError; 14 | use rdkafka::producer::{BaseProducer, BaseRecord, Producer}; 15 | use rdkafka::topic_partition_list::{Offset, TopicPartitionList}; 16 | use rdkafka::util::Timeout; 17 | 18 | use utils::*; 19 | 20 | mod utils; 21 | 22 | fn create_consumer( 23 | config_overrides: Option>, 24 | ) -> Result { 25 | configure_logging_for_tests(); 26 | consumer_config(&rand_test_group(), config_overrides).create() 27 | } 28 | 29 | fn create_producer() -> Result { 30 | configure_logging_for_tests(); 31 | let mut config = ClientConfig::new(); 32 | config 33 | .set("bootstrap.servers", get_bootstrap_server()) 34 | .set("message.timeout.ms", "5000") 35 | .set("enable.idempotence", "true") 36 | .set("transactional.id", rand_test_transactional_id()) 37 | .set("debug", "eos"); 38 | config.set_log_level(RDKafkaLogLevel::Debug); 39 | config.create() 40 | } 41 | 42 | enum IsolationLevel { 43 | ReadUncommitted, 44 | ReadCommitted, 45 | } 46 | 47 | fn count_records(topic: &str, iso: IsolationLevel) -> Result { 48 | let consumer = create_consumer(Some(hashmap! { 49 | "isolation.level" => match iso { 50 | IsolationLevel::ReadUncommitted => "read_uncommitted", 51 | IsolationLevel::ReadCommitted => "read_committed", 52 | }, 53 | "enable.partition.eof" => "true" 54 | }))?; 55 | let mut tpl = TopicPartitionList::new(); 56 | tpl.add_partition(topic, 0); 57 | consumer.assign(&tpl)?; 58 | let mut count = 0; 59 | for message in consumer.iter() { 60 | match message { 61 | Ok(_) => count += 1, 62 | Err(KafkaError::PartitionEOF(_)) => break, 63 | Err(e) => return Err(e), 64 | } 65 | } 66 | Ok(count) 67 | } 68 | 69 | #[tokio::test] 70 | async fn test_transaction_abort() -> Result<(), Box> { 71 | let consume_topic = rand_test_topic("test_transaction_abort"); 72 | let produce_topic = rand_test_topic("test_transaction_abort"); 73 | 74 | populate_topic(&consume_topic, 30, &value_fn, &key_fn, Some(0), None).await; 75 | 76 | // Create consumer and subscribe to `consume_topic`. 77 | let consumer = create_consumer(None)?; 78 | consumer.subscribe(&[&consume_topic])?; 79 | consumer.poll(Timeout::Never).unwrap()?; 80 | 81 | // Commit the first 10 messages. 82 | let mut commit_tpl = TopicPartitionList::new(); 83 | commit_tpl.add_partition_offset(&consume_topic, 0, Offset::Offset(10))?; 84 | consumer.commit(&commit_tpl, CommitMode::Sync).unwrap(); 85 | 86 | // Create a producer and start a transaction. 87 | let producer = create_producer()?; 88 | producer.init_transactions(Timeout::Never)?; 89 | producer.begin_transaction()?; 90 | 91 | // Tie the commit of offset 20 to the transaction. 92 | let cgm = consumer.group_metadata().unwrap(); 93 | let mut txn_tpl = TopicPartitionList::new(); 94 | txn_tpl.add_partition_offset(&consume_topic, 0, Offset::Offset(20))?; 95 | producer.send_offsets_to_transaction(&txn_tpl, &cgm, Timeout::Never)?; 96 | 97 | // Produce 10 records in the transaction. 98 | for _ in 0..10 { 99 | producer 100 | .send( 101 | BaseRecord::to(&produce_topic) 102 | .payload("A") 103 | .key("B") 104 | .partition(0), 105 | ) 106 | .unwrap(); 107 | } 108 | 109 | // Abort the transaction, but only after producing all messages. 110 | info!("BEFORE FLUSH"); 111 | producer.flush(Duration::from_secs(20))?; 112 | info!("AFTER FLUSH"); 113 | producer.abort_transaction(Duration::from_secs(20))?; 114 | info!("AFTER ABORT"); 115 | 116 | // Check that no records were produced in read committed mode, but that 117 | // the records are visible in read uncommitted mode. 118 | assert_eq!( 119 | count_records(&produce_topic, IsolationLevel::ReadCommitted)?, 120 | 0, 121 | ); 122 | assert_eq!( 123 | count_records(&produce_topic, IsolationLevel::ReadUncommitted)?, 124 | 10, 125 | ); 126 | 127 | // Check that the consumer's committed offset is still 10. 128 | let committed = consumer.committed(Timeout::Never)?; 129 | assert_eq!( 130 | committed 131 | .find_partition(&consume_topic, 0) 132 | .unwrap() 133 | .offset(), 134 | Offset::Offset(10) 135 | ); 136 | 137 | Ok(()) 138 | } 139 | 140 | #[tokio::test] 141 | async fn test_transaction_commit() -> Result<(), Box> { 142 | let consume_topic = rand_test_topic("test_transaction_commit"); 143 | let produce_topic = rand_test_topic("test_transaction_commit"); 144 | 145 | populate_topic(&consume_topic, 30, &value_fn, &key_fn, Some(0), None).await; 146 | 147 | // Create consumer and subscribe to `consume_topic`. 148 | let consumer = create_consumer(None)?; 149 | consumer.subscribe(&[&consume_topic])?; 150 | consumer.poll(Timeout::Never).unwrap()?; 151 | 152 | // Commit the first 10 messages. 153 | let mut commit_tpl = TopicPartitionList::new(); 154 | commit_tpl.add_partition_offset(&consume_topic, 0, Offset::Offset(10))?; 155 | consumer.commit(&commit_tpl, CommitMode::Sync).unwrap(); 156 | 157 | // Create a producer and start a transaction. 158 | let producer = create_producer()?; 159 | producer.init_transactions(Timeout::Never)?; 160 | producer.begin_transaction()?; 161 | 162 | // Tie the commit of offset 20 to the transaction. 163 | let cgm = consumer.group_metadata().unwrap(); 164 | let mut txn_tpl = TopicPartitionList::new(); 165 | txn_tpl.add_partition_offset(&consume_topic, 0, Offset::Offset(20))?; 166 | producer.send_offsets_to_transaction(&txn_tpl, &cgm, Timeout::Never)?; 167 | 168 | // Produce 10 records in the transaction. 169 | for _ in 0..10 { 170 | producer 171 | .send( 172 | BaseRecord::to(&produce_topic) 173 | .payload("A") 174 | .key("B") 175 | .partition(0), 176 | ) 177 | .unwrap(); 178 | } 179 | 180 | // Commit the transaction. 181 | producer.commit_transaction(Timeout::Never)?; 182 | 183 | // Check that 10 records were produced. 184 | assert_eq!( 185 | count_records(&produce_topic, IsolationLevel::ReadUncommitted)?, 186 | 10, 187 | ); 188 | assert_eq!( 189 | count_records(&produce_topic, IsolationLevel::ReadCommitted)?, 190 | 10, 191 | ); 192 | 193 | // Check that the consumer's committed offset is now 20. 194 | let committed = consumer.committed(Timeout::Never)?; 195 | assert_eq!( 196 | committed 197 | .find_partition(&consume_topic, 0) 198 | .unwrap() 199 | .offset(), 200 | Offset::Offset(20) 201 | ); 202 | 203 | Ok(()) 204 | } 205 | -------------------------------------------------------------------------------- /examples/asynchronous_processing.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | use std::time::Duration; 3 | 4 | use clap::{Arg, Command}; 5 | use futures::stream::FuturesUnordered; 6 | use futures::{StreamExt, TryStreamExt}; 7 | use log::info; 8 | 9 | use rdkafka::config::ClientConfig; 10 | use rdkafka::consumer::stream_consumer::StreamConsumer; 11 | use rdkafka::consumer::Consumer; 12 | use rdkafka::message::{BorrowedMessage, OwnedMessage}; 13 | use rdkafka::producer::{FutureProducer, FutureRecord}; 14 | use rdkafka::Message; 15 | 16 | use crate::example_utils::setup_logger; 17 | 18 | mod example_utils; 19 | 20 | async fn record_borrowed_message_receipt(msg: &BorrowedMessage<'_>) { 21 | // Simulate some work that must be done in the same order as messages are 22 | // received; i.e., before truly parallel processing can begin. 23 | info!("Message received: {}", msg.offset()); 24 | } 25 | 26 | async fn record_owned_message_receipt(_msg: &OwnedMessage) { 27 | // Like `record_borrowed_message_receipt`, but takes an `OwnedMessage` 28 | // instead, as in a real-world use case an `OwnedMessage` might be more 29 | // convenient than a `BorrowedMessage`. 30 | } 31 | 32 | // Emulates an expensive, synchronous computation. 33 | fn expensive_computation(msg: OwnedMessage) -> String { 34 | info!("Starting expensive computation on message {}", msg.offset()); 35 | thread::sleep(Duration::from_millis(rand::random::() % 5000)); 36 | info!( 37 | "Expensive computation completed on message {}", 38 | msg.offset() 39 | ); 40 | match msg.payload_view::() { 41 | Some(Ok(payload)) => format!("Payload len for {} is {}", payload, payload.len()), 42 | Some(Err(_)) => "Message payload is not a string".to_owned(), 43 | None => "No payload".to_owned(), 44 | } 45 | } 46 | 47 | // Creates all the resources and runs the event loop. The event loop will: 48 | // 1) receive a stream of messages from the `StreamConsumer`. 49 | // 2) filter out eventual Kafka errors. 50 | // 3) send the message to a thread pool for processing. 51 | // 4) produce the result to the output topic. 52 | // `tokio::spawn` is used to handle IO-bound tasks in parallel (e.g., producing 53 | // the messages), while `tokio::task::spawn_blocking` is used to handle the 54 | // simulated CPU-bound task. 55 | async fn run_async_processor( 56 | brokers: String, 57 | group_id: String, 58 | input_topic: String, 59 | output_topic: String, 60 | ) { 61 | // Create the `StreamConsumer`, to receive the messages from the topic in form of a `Stream`. 62 | let consumer: StreamConsumer = ClientConfig::new() 63 | .set("group.id", &group_id) 64 | .set("bootstrap.servers", &brokers) 65 | .set("enable.partition.eof", "false") 66 | .set("session.timeout.ms", "6000") 67 | .set("enable.auto.commit", "false") 68 | .create() 69 | .expect("Consumer creation failed"); 70 | 71 | consumer 72 | .subscribe(&[&input_topic]) 73 | .expect("Can't subscribe to specified topic"); 74 | 75 | // Create the `FutureProducer` to produce asynchronously. 76 | let producer: FutureProducer = ClientConfig::new() 77 | .set("bootstrap.servers", &brokers) 78 | .set("message.timeout.ms", "5000") 79 | .create() 80 | .expect("Producer creation error"); 81 | 82 | // Create the outer pipeline on the message stream. 83 | let stream_processor = consumer.stream().try_for_each(|borrowed_message| { 84 | let producer = producer.clone(); 85 | let output_topic = output_topic.to_string(); 86 | async move { 87 | // Process each message 88 | record_borrowed_message_receipt(&borrowed_message).await; 89 | // Borrowed messages can't outlive the consumer they are received from, so they need to 90 | // be owned in order to be sent to a separate thread. 91 | let owned_message = borrowed_message.detach(); 92 | record_owned_message_receipt(&owned_message).await; 93 | tokio::spawn(async move { 94 | // The body of this block will be executed on the main thread pool, 95 | // but we perform `expensive_computation` on a separate thread pool 96 | // for CPU-intensive tasks via `tokio::task::spawn_blocking`. 97 | let computation_result = 98 | tokio::task::spawn_blocking(|| expensive_computation(owned_message)) 99 | .await 100 | .expect("failed to wait for expensive computation"); 101 | let produce_future = producer.send( 102 | FutureRecord::to(&output_topic) 103 | .key("some key") 104 | .payload(&computation_result), 105 | Duration::from_secs(0), 106 | ); 107 | match produce_future.await { 108 | Ok(delivery) => println!("Sent: {:?}", delivery), 109 | Err((e, _)) => println!("Error: {:?}", e), 110 | } 111 | }); 112 | Ok(()) 113 | } 114 | }); 115 | 116 | info!("Starting event loop"); 117 | stream_processor.await.expect("stream processing failed"); 118 | info!("Stream processing terminated"); 119 | } 120 | 121 | #[tokio::main] 122 | async fn main() { 123 | let matches = Command::new("Async example") 124 | .version(option_env!("CARGO_PKG_VERSION").unwrap_or("")) 125 | .about("Asynchronous computation example") 126 | .arg( 127 | Arg::new("brokers") 128 | .short('b') 129 | .long("brokers") 130 | .help("Broker list in kafka format") 131 | .default_value("localhost:9092"), 132 | ) 133 | .arg( 134 | Arg::new("group-id") 135 | .short('g') 136 | .long("group-id") 137 | .help("Consumer group id") 138 | .default_value("example_consumer_group_id"), 139 | ) 140 | .arg( 141 | Arg::new("log-conf") 142 | .long("log-conf") 143 | .help("Configure the logging format (example: 'rdkafka=trace')"), 144 | ) 145 | .arg( 146 | Arg::new("input-topic") 147 | .long("input-topic") 148 | .help("Input topic") 149 | .required(true), 150 | ) 151 | .arg( 152 | Arg::new("output-topic") 153 | .long("output-topic") 154 | .help("Output topic") 155 | .required(true), 156 | ) 157 | .arg( 158 | Arg::new("num-workers") 159 | .long("num-workers") 160 | .help("Number of workers") 161 | .value_parser(clap::value_parser!(usize)) 162 | .default_value("1"), 163 | ) 164 | .get_matches(); 165 | 166 | setup_logger(true, matches.get_one("log-conf")); 167 | 168 | let brokers = matches.get_one::("brokers").unwrap(); 169 | let group_id = matches.get_one::("group-id").unwrap(); 170 | let input_topic = matches.get_one::("input-topic").unwrap(); 171 | let output_topic = matches.get_one::("output-topic").unwrap(); 172 | let num_workers = *matches.get_one::("num-workers").unwrap(); 173 | 174 | (0..num_workers) 175 | .map(|_| { 176 | tokio::spawn(run_async_processor( 177 | brokers.to_owned(), 178 | group_id.to_owned(), 179 | input_topic.to_owned(), 180 | output_topic.to_owned(), 181 | )) 182 | }) 183 | .collect::>() 184 | .for_each(|_| async {}) 185 | .await 186 | } 187 | -------------------------------------------------------------------------------- /tests/utils.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use std::collections::HashMap; 4 | use std::env::{self, VarError}; 5 | use std::sync::Once; 6 | use std::time::Duration; 7 | 8 | use rand::distr::{Alphanumeric, SampleString}; 9 | use regex::Regex; 10 | 11 | use rdkafka::admin::{AdminClient, AdminOptions, NewTopic, TopicReplication}; 12 | use rdkafka::client::ClientContext; 13 | use rdkafka::config::ClientConfig; 14 | use rdkafka::consumer::ConsumerContext; 15 | use rdkafka::error::KafkaResult; 16 | use rdkafka::message::ToBytes; 17 | use rdkafka::producer::{FutureProducer, FutureRecord}; 18 | use rdkafka::statistics::Statistics; 19 | use rdkafka::TopicPartitionList; 20 | 21 | pub fn rand_test_topic(test_name: &str) -> String { 22 | let id = Alphanumeric.sample_string(&mut rand::rng(), 10); 23 | format!("__{}_{}", test_name, id) 24 | } 25 | 26 | pub fn rand_test_group() -> String { 27 | let id = Alphanumeric.sample_string(&mut rand::rng(), 10); 28 | format!("__test_{}", id) 29 | } 30 | 31 | pub fn rand_test_transactional_id() -> String { 32 | let id = Alphanumeric.sample_string(&mut rand::rng(), 10); 33 | format!("__test_{}", id) 34 | } 35 | 36 | pub fn get_bootstrap_server() -> String { 37 | env::var("KAFKA_HOST").unwrap_or_else(|_| "localhost:9092".to_owned()) 38 | } 39 | 40 | pub fn get_broker_version() -> KafkaVersion { 41 | // librdkafka doesn't expose this directly, sadly. 42 | match env::var("KAFKA_VERSION") { 43 | Ok(v) => { 44 | let regex = Regex::new(r"^(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.(\d+))?$").unwrap(); 45 | match regex.captures(&v) { 46 | Some(captures) => { 47 | let extract = |i| { 48 | captures 49 | .get(i) 50 | .map(|m| m.as_str().parse().unwrap()) 51 | .unwrap_or(0) 52 | }; 53 | KafkaVersion(extract(1), extract(2), extract(3), extract(4)) 54 | } 55 | None => panic!("KAFKA_VERSION env var was not in expected [n[.n[.n[.n]]]] format"), 56 | } 57 | } 58 | Err(VarError::NotUnicode(_)) => { 59 | panic!("KAFKA_VERSION env var contained non-unicode characters") 60 | } 61 | // If the environment variable is unset, assume we're running the latest version. 62 | Err(VarError::NotPresent) => KafkaVersion(u32::MAX, u32::MAX, u32::MAX, u32::MAX), 63 | } 64 | } 65 | 66 | #[derive(Debug, Eq, PartialEq, Ord, PartialOrd)] 67 | pub struct KafkaVersion(pub u32, pub u32, pub u32, pub u32); 68 | 69 | pub struct ProducerTestContext { 70 | _some_data: i64, // Add some data so that valgrind can check proper allocation 71 | } 72 | 73 | impl ClientContext for ProducerTestContext { 74 | fn stats(&self, _: Statistics) {} // Don't print stats 75 | } 76 | 77 | pub async fn create_topic(name: &str, partitions: i32) { 78 | let client: AdminClient<_> = consumer_config("create_topic", None).create().unwrap(); 79 | client 80 | .create_topics( 81 | &[NewTopic::new(name, partitions, TopicReplication::Fixed(1))], 82 | &AdminOptions::new(), 83 | ) 84 | .await 85 | .unwrap(); 86 | } 87 | 88 | /// Produce the specified count of messages to the topic and partition specified. A map 89 | /// of (partition, offset) -> message id will be returned. It panics if any error is encountered 90 | /// while populating the topic. 91 | pub async fn populate_topic( 92 | topic_name: &str, 93 | count: i32, 94 | value_fn: &P, 95 | key_fn: &K, 96 | partition: Option, 97 | timestamp: Option, 98 | ) -> HashMap<(i32, i64), i32> 99 | where 100 | P: Fn(i32) -> J, 101 | K: Fn(i32) -> Q, 102 | J: ToBytes, 103 | Q: ToBytes, 104 | { 105 | let prod_context = ProducerTestContext { _some_data: 1234 }; 106 | 107 | // Produce some messages 108 | let producer = &ClientConfig::new() 109 | .set("bootstrap.servers", get_bootstrap_server().as_str()) 110 | .set("statistics.interval.ms", "500") 111 | .set("debug", "all") 112 | .set("message.timeout.ms", "30000") 113 | .create_with_context::>(prod_context) 114 | .expect("Producer creation error"); 115 | 116 | let futures = (0..count) 117 | .map(|id| { 118 | let future = async move { 119 | producer 120 | .send( 121 | FutureRecord { 122 | topic: topic_name, 123 | payload: Some(&value_fn(id)), 124 | key: Some(&key_fn(id)), 125 | partition, 126 | timestamp, 127 | headers: None, 128 | }, 129 | Duration::from_secs(1), 130 | ) 131 | .await 132 | }; 133 | (id, future) 134 | }) 135 | .collect::>(); 136 | 137 | let mut message_map = HashMap::new(); 138 | for (id, future) in futures { 139 | match future.await { 140 | Ok(delivered) => message_map.insert((delivered.partition, delivered.offset), id), 141 | Err((kafka_error, _message)) => panic!("Delivery failed: {}", kafka_error), 142 | }; 143 | } 144 | 145 | message_map 146 | } 147 | 148 | pub fn value_fn(id: i32) -> String { 149 | format!("Message {}", id) 150 | } 151 | 152 | pub fn key_fn(id: i32) -> String { 153 | format!("Key {}", id) 154 | } 155 | 156 | pub struct ConsumerTestContext { 157 | pub _n: i64, // Add data for memory access validation 158 | } 159 | 160 | impl ClientContext for ConsumerTestContext { 161 | // Access stats 162 | fn stats(&self, stats: Statistics) { 163 | let stats_str = format!("{:?}", stats); 164 | println!("Stats received: {} bytes", stats_str.len()); 165 | } 166 | } 167 | 168 | impl ConsumerContext for ConsumerTestContext { 169 | fn commit_callback(&self, result: KafkaResult<()>, _offsets: &TopicPartitionList) { 170 | println!("Committing offsets: {:?}", result); 171 | } 172 | } 173 | 174 | pub fn consumer_config( 175 | group_id: &str, 176 | config_overrides: Option>, 177 | ) -> ClientConfig { 178 | let mut config = ClientConfig::new(); 179 | 180 | config.set("group.id", group_id); 181 | config.set("client.id", "rdkafka_integration_test_client"); 182 | config.set("bootstrap.servers", get_bootstrap_server().as_str()); 183 | config.set("enable.partition.eof", "false"); 184 | config.set("session.timeout.ms", "6000"); 185 | config.set("enable.auto.commit", "false"); 186 | config.set("debug", "all"); 187 | config.set("auto.offset.reset", "earliest"); 188 | 189 | if let Some(overrides) = config_overrides { 190 | for (key, value) in overrides { 191 | config.set(key, value); 192 | } 193 | } 194 | 195 | config 196 | } 197 | 198 | static INIT: Once = Once::new(); 199 | 200 | pub fn configure_logging_for_tests() { 201 | INIT.call_once(|| { 202 | env_logger::try_init().expect("Failed to initialize env_logger"); 203 | }); 204 | } 205 | 206 | #[cfg(test)] 207 | mod tests { 208 | use super::*; 209 | 210 | #[tokio::test] 211 | async fn test_populate_topic() { 212 | let topic_name = rand_test_topic("test_populate_topic"); 213 | let message_map = populate_topic(&topic_name, 100, &value_fn, &key_fn, Some(0), None).await; 214 | 215 | let total_messages = message_map 216 | .iter() 217 | .filter(|&(&(partition, _), _)| partition == 0) 218 | .count(); 219 | assert_eq!(total_messages, 100); 220 | 221 | let mut ids = message_map.values().copied().collect::>(); 222 | ids.sort(); 223 | assert_eq!(ids, (0..100).collect::>()); 224 | } 225 | } 226 | -------------------------------------------------------------------------------- /rdkafka-sys/src/helpers.rs: -------------------------------------------------------------------------------- 1 | //! Utility functions. 2 | 3 | use crate::types::RDKafkaErrorCode; 4 | use crate::types::RDKafkaErrorCode::*; 5 | use crate::types::RDKafkaRespErr; 6 | use crate::types::RDKafkaRespErr::*; 7 | 8 | pub fn rd_kafka_resp_err_t_to_rdkafka_error(err: RDKafkaRespErr) -> RDKafkaErrorCode { 9 | match err { 10 | RD_KAFKA_RESP_ERR__BEGIN => Begin, 11 | RD_KAFKA_RESP_ERR__BAD_MSG => BadMessage, 12 | RD_KAFKA_RESP_ERR__BAD_COMPRESSION => BadCompression, 13 | RD_KAFKA_RESP_ERR__DESTROY => BrokerDestroy, 14 | RD_KAFKA_RESP_ERR__FAIL => Fail, 15 | RD_KAFKA_RESP_ERR__TRANSPORT => BrokerTransportFailure, 16 | RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE => CriticalSystemResource, 17 | RD_KAFKA_RESP_ERR__RESOLVE => Resolve, 18 | RD_KAFKA_RESP_ERR__MSG_TIMED_OUT => MessageTimedOut, 19 | RD_KAFKA_RESP_ERR__PARTITION_EOF => PartitionEOF, 20 | RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION => UnknownPartition, 21 | RD_KAFKA_RESP_ERR__FS => FileSystem, 22 | RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC => UnknownTopic, 23 | RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN => AllBrokersDown, 24 | RD_KAFKA_RESP_ERR__INVALID_ARG => InvalidArgument, 25 | RD_KAFKA_RESP_ERR__TIMED_OUT => OperationTimedOut, 26 | RD_KAFKA_RESP_ERR__QUEUE_FULL => QueueFull, 27 | RD_KAFKA_RESP_ERR__ISR_INSUFF => ISRInsufficient, 28 | RD_KAFKA_RESP_ERR__NODE_UPDATE => NodeUpdate, 29 | RD_KAFKA_RESP_ERR__SSL => SSL, 30 | RD_KAFKA_RESP_ERR__WAIT_COORD => WaitingForCoordinator, 31 | RD_KAFKA_RESP_ERR__UNKNOWN_GROUP => UnknownGroup, 32 | RD_KAFKA_RESP_ERR__IN_PROGRESS => InProgress, 33 | RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS => PreviousInProgress, 34 | RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION => ExistingSubscription, 35 | RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS => AssignPartitions, 36 | RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS => RevokePartitions, 37 | RD_KAFKA_RESP_ERR__CONFLICT => Conflict, 38 | RD_KAFKA_RESP_ERR__STATE => State, 39 | RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL => UnknownProtocol, 40 | RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED => NotImplemented, 41 | RD_KAFKA_RESP_ERR__AUTHENTICATION => Authentication, 42 | RD_KAFKA_RESP_ERR__NO_OFFSET => NoOffset, 43 | RD_KAFKA_RESP_ERR__OUTDATED => Outdated, 44 | RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE => TimedOutQueue, 45 | RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE => UnsupportedFeature, 46 | RD_KAFKA_RESP_ERR__WAIT_CACHE => WaitCache, 47 | RD_KAFKA_RESP_ERR__INTR => Interrupted, 48 | RD_KAFKA_RESP_ERR__KEY_SERIALIZATION => KeySerialization, 49 | RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION => ValueSerialization, 50 | RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION => KeyDeserialization, 51 | RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION => ValueDeserialization, 52 | RD_KAFKA_RESP_ERR__PARTIAL => Partial, 53 | RD_KAFKA_RESP_ERR__READ_ONLY => ReadOnly, 54 | RD_KAFKA_RESP_ERR__NOENT => NoEnt, 55 | RD_KAFKA_RESP_ERR__UNDERFLOW => Underflow, 56 | RD_KAFKA_RESP_ERR__INVALID_TYPE => InvalidType, 57 | RD_KAFKA_RESP_ERR__RETRY => Retry, 58 | RD_KAFKA_RESP_ERR__PURGE_QUEUE => PurgeQueue, 59 | RD_KAFKA_RESP_ERR__PURGE_INFLIGHT => PurgeInflight, 60 | RD_KAFKA_RESP_ERR__FATAL => Fatal, 61 | RD_KAFKA_RESP_ERR__INCONSISTENT => Inconsistent, 62 | RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE => GaplessGuarantee, 63 | RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED => PollExceeded, 64 | RD_KAFKA_RESP_ERR__UNKNOWN_BROKER => UnknownBroker, 65 | RD_KAFKA_RESP_ERR__NOT_CONFIGURED => NotConfigured, 66 | RD_KAFKA_RESP_ERR__FENCED => Fenced, 67 | RD_KAFKA_RESP_ERR__APPLICATION => Application, 68 | RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST => AssignmentLost, 69 | RD_KAFKA_RESP_ERR__NOOP => Noop, 70 | RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET => AutoOffsetReset, 71 | RD_KAFKA_RESP_ERR__END => End, 72 | RD_KAFKA_RESP_ERR_UNKNOWN => Unknown, 73 | RD_KAFKA_RESP_ERR_NO_ERROR => NoError, 74 | RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE => OffsetOutOfRange, 75 | RD_KAFKA_RESP_ERR_INVALID_MSG => InvalidMessage, 76 | RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART => UnknownTopicOrPartition, 77 | RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE => InvalidMessageSize, 78 | RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE => LeaderNotAvailable, 79 | RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION => NotLeaderForPartition, 80 | RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT => RequestTimedOut, 81 | RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE => BrokerNotAvailable, 82 | RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE => ReplicaNotAvailable, 83 | RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE => MessageSizeTooLarge, 84 | RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH => StaleControllerEpoch, 85 | RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE => OffsetMetadataTooLarge, 86 | RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION => NetworkException, 87 | RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS => CoordinatorLoadInProgress, 88 | RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE => CoordinatorNotAvailable, 89 | RD_KAFKA_RESP_ERR_NOT_COORDINATOR => NotCoordinator, 90 | RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION => InvalidTopic, 91 | RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE => MessageBatchTooLarge, 92 | RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS => NotEnoughReplicas, 93 | RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND => NotEnoughReplicasAfterAppend, 94 | RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS => InvalidRequiredAcks, 95 | RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION => IllegalGeneration, 96 | RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL => InconsistentGroupProtocol, 97 | RD_KAFKA_RESP_ERR_INVALID_GROUP_ID => InvalidGroupId, 98 | RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID => UnknownMemberId, 99 | RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT => InvalidSessionTimeout, 100 | RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS => RebalanceInProgress, 101 | RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE => InvalidCommitOffsetSize, 102 | RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED => TopicAuthorizationFailed, 103 | RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED => GroupAuthorizationFailed, 104 | RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED => ClusterAuthorizationFailed, 105 | RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP => InvalidTimestamp, 106 | RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM => UnsupportedSASLMechanism, 107 | RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE => IllegalSASLState, 108 | RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION => UnsupportedVersion, 109 | RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS => TopicAlreadyExists, 110 | RD_KAFKA_RESP_ERR_INVALID_PARTITIONS => InvalidPartitions, 111 | RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR => InvalidReplicationFactor, 112 | RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT => InvalidReplicaAssignment, 113 | RD_KAFKA_RESP_ERR_INVALID_CONFIG => InvalidConfig, 114 | RD_KAFKA_RESP_ERR_NOT_CONTROLLER => NotController, 115 | RD_KAFKA_RESP_ERR_INVALID_REQUEST => InvalidRequest, 116 | RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT => UnsupportedForMessageFormat, 117 | RD_KAFKA_RESP_ERR_POLICY_VIOLATION => PolicyViolation, 118 | RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER => OutOfOrderSequenceNumber, 119 | RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER => DuplicateSequenceNumber, 120 | RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH => InvalidProducerEpoch, 121 | RD_KAFKA_RESP_ERR_INVALID_TXN_STATE => InvalidTransactionalState, 122 | RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING => InvalidProducerIdMapping, 123 | RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT => InvalidTransactionTimeout, 124 | RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS => ConcurrentTransactions, 125 | RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED => TransactionCoordinatorFenced, 126 | RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED => { 127 | TransactionalIdAuthorizationFailed 128 | } 129 | RD_KAFKA_RESP_ERR_SECURITY_DISABLED => SecurityDisabled, 130 | RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED => OperationNotAttempted, 131 | RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR => KafkaStorageError, 132 | RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND => LogDirNotFound, 133 | RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED => SaslAuthenticationFailed, 134 | RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID => UnknownProducerId, 135 | RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS => ReassignmentInProgress, 136 | RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED => DelegationTokenAuthDisabled, 137 | RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND => DelegationTokenNotFound, 138 | RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH => DelegationTokenOwnerMismatch, 139 | RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED => DelegationTokenRequestNotAllowed, 140 | RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED => { 141 | DelegationTokenAuthorizationFailed 142 | } 143 | RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED => DelegationTokenExpired, 144 | RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE => InvalidPrincipalType, 145 | RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP => NonEmptyGroup, 146 | RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND => GroupIdNotFound, 147 | RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND => FetchSessionIdNotFound, 148 | RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH => InvalidFetchSessionEpoch, 149 | RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND => ListenerNotFound, 150 | RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED => TopicDeletionDisabled, 151 | RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH => FencedLeaderEpoch, 152 | RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH => UnknownLeaderEpoch, 153 | RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE => UnsupportedCompressionType, 154 | RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH => StaleBrokerEpoch, 155 | RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE => OffsetNotAvailable, 156 | RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED => MemberIdRequired, 157 | RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE => PreferredLeaderNotAvailable, 158 | RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED => GroupMaxSizeReached, 159 | RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID => FencedInstanceId, 160 | RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE => EligibleLeadersNotAvailable, 161 | RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED => ElectionNotNeeded, 162 | RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS => NoReassignmentInProgress, 163 | RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC => GroupSubscribedToTopic, 164 | RD_KAFKA_RESP_ERR_INVALID_RECORD => InvalidRecord, 165 | RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT => UnstableOffsetCommit, 166 | RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED => ThrottlingQuotaExceeded, 167 | RD_KAFKA_RESP_ERR_PRODUCER_FENCED => ProducerFenced, 168 | RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND => ResourceNotFound, 169 | RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE => DuplicateResource, 170 | RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL => UnacceptableCredential, 171 | RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET => InconsistentVoterSet, 172 | RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION => InvalidUpdateVersion, 173 | RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED => FeatureUpdateFailed, 174 | RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE => PrincipalDeserializationFailure, 175 | RD_KAFKA_RESP_ERR_END_ALL => EndAll, 176 | RD_KAFKA_RESP_ERR__LOG_TRUNCATION => LogTruncation, 177 | RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD => InvalidDifferentRecord, 178 | RD_KAFKA_RESP_ERR__DESTROY_BROKER => DestroyBroker, 179 | RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID => UnknownTopicId, 180 | RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH => FencedMemberEpoch, 181 | RD_KAFKA_RESP_ERR_UNRELEASED_INSTANCE_ID => UnreleasedInstanceId, 182 | RD_KAFKA_RESP_ERR_UNSUPPORTED_ASSIGNOR => UnsupportedAssignor, 183 | RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH => StaleMemberEpoch, 184 | RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID => UnknownSubscriptionId, 185 | RD_KAFKA_RESP_ERR_TELEMETRY_TOO_LARGE => TelemetryTooLarge, 186 | RD_KAFKA_RESP_ERR_REBOOTSTRAP_REQUIRED => RebootstrapRequired, 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /rdkafka-sys/build.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Borrow; 2 | use std::env; 3 | use std::ffi::OsStr; 4 | #[cfg(feature = "cmake-build")] 5 | use std::fs; 6 | use std::path::{Path, PathBuf}; 7 | use std::process::{self, Command}; 8 | 9 | fn run_command_or_fail(dir: &str, cmd: P, args: &[S]) 10 | where 11 | P: AsRef, 12 | S: Borrow + AsRef, 13 | { 14 | let cmd = cmd.as_ref(); 15 | let cmd = if cmd.components().count() > 1 && cmd.is_relative() { 16 | // If `cmd` is a relative path (and not a bare command that should be 17 | // looked up in PATH), absolutize it relative to `dir`, as otherwise the 18 | // behavior of std::process::Command is undefined. 19 | // https://github.com/rust-lang/rust/issues/37868 20 | PathBuf::from(dir) 21 | .join(cmd) 22 | .canonicalize() 23 | .expect("canonicalization failed") 24 | } else { 25 | PathBuf::from(cmd) 26 | }; 27 | eprintln!( 28 | "Running command: \"{} {}\" in dir: {}", 29 | cmd.display(), 30 | args.join(" "), 31 | dir 32 | ); 33 | let ret = Command::new(cmd).current_dir(dir).args(args).status(); 34 | match ret.map(|status| (status.success(), status.code())) { 35 | Ok((true, _)) => (), 36 | Ok((false, Some(c))) => panic!("Command failed with error code {}", c), 37 | Ok((false, None)) => panic!("Command got killed"), 38 | Err(e) => panic!("Command failed with error: {}", e), 39 | } 40 | } 41 | 42 | fn main() { 43 | if env::var("CARGO_FEATURE_DYNAMIC_LINKING").is_ok() { 44 | eprintln!("librdkafka will be linked dynamically"); 45 | 46 | let librdkafka_version = match env!("CARGO_PKG_VERSION") 47 | .split('+') 48 | .collect::>() 49 | .as_slice() 50 | { 51 | [_rdsys_version, librdkafka_version] => *librdkafka_version, 52 | _ => panic!("Version format is not valid"), 53 | }; 54 | 55 | let pkg_probe = pkg_config::Config::new() 56 | .cargo_metadata(true) 57 | .atleast_version(librdkafka_version) 58 | .probe("rdkafka"); 59 | 60 | match pkg_probe { 61 | Ok(library) => { 62 | eprintln!("librdkafka found on the system:"); 63 | eprintln!(" Name: {:?}", library.libs); 64 | eprintln!(" Path: {:?}", library.link_paths); 65 | eprintln!(" Version: {}", library.version); 66 | } 67 | Err(err) => { 68 | eprintln!( 69 | "librdkafka {} cannot be found on the system: {}", 70 | librdkafka_version, err 71 | ); 72 | eprintln!("Dynamic linking failed. Exiting."); 73 | process::exit(1); 74 | } 75 | } 76 | } else if env::var("CARGO_FEATURE_STATIC_EXTERNAL").is_ok() { 77 | if let Ok(rdkafka_dir) = env::var("DEP_LIBRDKAFKA_STATIC_ROOT") { 78 | println!("cargo:rustc-link-search=native={}/src", rdkafka_dir); 79 | println!("cargo:rustc-link-lib=static=rdkafka"); 80 | println!("cargo:root={}", rdkafka_dir); 81 | } else { 82 | eprintln!( 83 | "Path to DEP_LIBRDKAFKA_STATIC_ROOT not set. Static linking failed. Exiting." 84 | ); 85 | process::exit(1); 86 | } 87 | eprintln!("librdkafka will be linked statically using prebuilt binaries"); 88 | } else { 89 | // Ensure that we are in the right directory 90 | let rdkafkasys_root = Path::new("rdkafka-sys"); 91 | if rdkafkasys_root.exists() { 92 | assert!(env::set_current_dir(rdkafkasys_root).is_ok()); 93 | } 94 | if !Path::new("librdkafka/LICENSE").exists() { 95 | eprintln!("Setting up submodules"); 96 | run_command_or_fail("../", "git", &["submodule", "update", "--init"]); 97 | } 98 | eprintln!("Building and linking librdkafka statically"); 99 | build_librdkafka(); 100 | } 101 | } 102 | 103 | fn needs_curl() -> bool { 104 | env::var("CARGO_FEATURE_CURL").is_ok() || env::var("CARGO_FEATURE_CURL_STATIC").is_ok() 105 | } 106 | 107 | #[cfg(not(feature = "cmake-build"))] 108 | fn build_librdkafka() { 109 | let mut configure_flags: Vec = Vec::new(); 110 | 111 | let mut cflags = Vec::new(); 112 | if let Ok(var) = env::var("CFLAGS") { 113 | cflags.push(var); 114 | } 115 | 116 | let mut ldflags = Vec::new(); 117 | if let Ok(var) = env::var("LDFLAGS") { 118 | ldflags.push(var); 119 | } 120 | 121 | if env::var("CARGO_FEATURE_SSL").is_ok() { 122 | configure_flags.push("--enable-ssl".into()); 123 | if let Ok(openssl_root) = env::var("DEP_OPENSSL_ROOT") { 124 | cflags.push(format!("-I{}/include", openssl_root)); 125 | ldflags.push(format!("-L{}/lib", openssl_root)); 126 | } 127 | } else { 128 | configure_flags.push("--disable-ssl".into()); 129 | } 130 | 131 | if env::var("CARGO_FEATURE_GSSAPI").is_ok() { 132 | configure_flags.push("--enable-gssapi".into()); 133 | if let Ok(sasl2_root) = env::var("DEP_SASL2_ROOT") { 134 | cflags.push(format!("-I{}/include", sasl2_root)); 135 | ldflags.push(format!("-L{}/build", sasl2_root)); 136 | } 137 | } else { 138 | configure_flags.push("--disable-gssapi".into()); 139 | } 140 | 141 | if env::var("CARGO_FEATURE_LIBZ").is_ok() { 142 | // There is no --enable-zlib option, but it is enabled by default. 143 | if let Ok(z_root) = env::var("DEP_Z_ROOT") { 144 | cflags.push(format!("-I{}/include", z_root)); 145 | ldflags.push(format!("-L{}/build", z_root)); 146 | } 147 | } else { 148 | configure_flags.push("--disable-zlib".into()); 149 | } 150 | 151 | if needs_curl() { 152 | // There is no --enable-curl option, but it is enabled by default. 153 | if let Ok(curl_root) = env::var("DEP_CURL_ROOT") { 154 | cflags.push("-DCURLSTATIC_LIB".to_string()); 155 | cflags.push(format!("-I{}/include", curl_root)); 156 | } 157 | } else { 158 | configure_flags.push("--disable-curl".into()); 159 | } 160 | 161 | if env::var("CARGO_FEATURE_ZSTD").is_ok() { 162 | configure_flags.push("--enable-zstd".into()); 163 | if let Ok(zstd_root) = env::var("DEP_ZSTD_ROOT") { 164 | cflags.push(format!("-I{}/include", zstd_root)); 165 | ldflags.push(format!("-L{}", zstd_root)); 166 | } 167 | } else { 168 | configure_flags.push("--disable-zstd".into()); 169 | } 170 | 171 | if env::var("CARGO_FEATURE_EXTERNAL_LZ4").is_ok() { 172 | configure_flags.push("--enable-lz4-ext".into()); 173 | if let Ok(lz4_root) = env::var("DEP_LZ4_ROOT") { 174 | cflags.push(format!("-I{}/include", lz4_root)); 175 | ldflags.push(format!("-L{}", lz4_root)); 176 | } 177 | } else { 178 | configure_flags.push("--disable-lz4-ext".into()); 179 | } 180 | 181 | env::set_var("CFLAGS", cflags.join(" ")); 182 | env::set_var("LDFLAGS", ldflags.join(" ")); 183 | 184 | let out_dir = env::var("OUT_DIR").expect("OUT_DIR missing"); 185 | 186 | if !Path::new(&out_dir).join("LICENSE").exists() { 187 | // We're not allowed to build in-tree directly, as ~/.cargo/registry is 188 | // globally shared. mklove doesn't support out-of-tree builds [0], so we 189 | // work around the issue by creating a clone of librdkafka inside of 190 | // OUT_DIR, and build inside of *that* tree. 191 | // 192 | // https://github.com/edenhill/mklove/issues/17 193 | println!("Cloning librdkafka"); 194 | run_command_or_fail(".", "cp", &["-a", "librdkafka/.", &out_dir]); 195 | } 196 | 197 | println!("Configuring librdkafka"); 198 | run_command_or_fail(&out_dir, "./configure", configure_flags.as_slice()); 199 | 200 | println!("Compiling librdkafka"); 201 | if let Some(makeflags) = env::var_os("CARGO_MAKEFLAGS") { 202 | env::set_var("MAKEFLAGS", makeflags); 203 | } 204 | run_command_or_fail( 205 | &out_dir, 206 | if cfg!(target_os = "freebsd") { 207 | "gmake" 208 | } else { 209 | "make" 210 | }, 211 | &["libs"], 212 | ); 213 | 214 | println!("cargo:rustc-link-search=native={}/src", out_dir); 215 | println!("cargo:rustc-link-lib=static=rdkafka"); 216 | println!("cargo:root={}", out_dir); 217 | } 218 | 219 | #[cfg(feature = "cmake-build")] 220 | fn build_librdkafka() { 221 | let mut config = cmake::Config::new("librdkafka"); 222 | let mut cmake_library_paths = vec![]; 223 | 224 | config 225 | .define("RDKAFKA_BUILD_STATIC", "1") 226 | .define("RDKAFKA_BUILD_TESTS", "0") 227 | .define("RDKAFKA_BUILD_EXAMPLES", "0") 228 | // CMAKE_INSTALL_LIBDIR is inferred as "lib64" on some platforms, but we 229 | // want a stable location that we can add to the linker search path. 230 | // Since we're not actually installing to /usr or /usr/local, there's no 231 | // harm to always using "lib" here. 232 | .define("CMAKE_INSTALL_LIBDIR", "lib") 233 | // CMake 4.0.0 drops support for 3.2 compatibility, which is 234 | // required by librdkafka 2.3.0. 235 | .define("CMAKE_POLICY_VERSION_MINIMUM", "3.5"); 236 | 237 | if env::var("CARGO_FEATURE_LIBZ").is_ok() { 238 | config.define("WITH_ZLIB", "1"); 239 | config.register_dep("z"); 240 | if let Ok(z_root) = env::var("DEP_Z_ROOT") { 241 | cmake_library_paths.push(format!("{}/build", z_root)); 242 | } 243 | } else { 244 | config.define("WITH_ZLIB", "0"); 245 | } 246 | 247 | if needs_curl() { 248 | config.define("WITH_CURL", "1"); 249 | config.register_dep("curl"); 250 | if let Ok(curl_root) = env::var("DEP_CURL_ROOT") { 251 | config.define("CURL_STATICLIB", "1"); 252 | cmake_library_paths.push(format!("{}/lib", curl_root)); 253 | 254 | config.cflag("-DCURL_STATICLIB"); 255 | config.cxxflag("-DCURL_STATICLIB"); 256 | config.cflag(format!("-I{}/include", curl_root)); 257 | config.cxxflag(format!("-I{}/include", curl_root)); 258 | config.cflag(format!("-L{}/lib", curl_root)); 259 | config.cxxflag(format!("-L{}/lib", curl_root)); 260 | //FIXME: Upstream should be copying this in their build.rs 261 | fs::copy( 262 | format!("{}/build/libcurl.a", curl_root), 263 | format!("{}/lib/libcurl.a", curl_root), 264 | ) 265 | .unwrap(); 266 | } 267 | } else { 268 | config.define("WITH_CURL", "0"); 269 | } 270 | 271 | if env::var("CARGO_FEATURE_SSL").is_ok() { 272 | config.define("WITH_SSL", "1"); 273 | config.define("WITH_SASL_SCRAM", "1"); 274 | config.define("WITH_SASL_OAUTHBEARER", "1"); 275 | config.register_dep("openssl"); 276 | } else { 277 | config.define("WITH_SSL", "0"); 278 | } 279 | 280 | if env::var("CARGO_FEATURE_GSSAPI").is_ok() { 281 | config.define("WITH_SASL", "1"); 282 | config.register_dep("sasl2"); 283 | if let Ok(sasl2_root) = env::var("DEP_SASL2_ROOT") { 284 | config.cflag(format!("-I{}/include", sasl2_root)); 285 | config.cxxflag(format!("-I{}/include", sasl2_root)); 286 | } 287 | } else { 288 | config.define("WITH_SASL", "0"); 289 | } 290 | 291 | if env::var("CARGO_FEATURE_ZSTD").is_ok() { 292 | config.define("WITH_ZSTD", "1"); 293 | config.register_dep("zstd"); 294 | } else { 295 | config.define("WITH_ZSTD", "0"); 296 | } 297 | 298 | if env::var("CARGO_FEATURE_EXTERNAL_LZ4").is_ok() { 299 | config.define("ENABLE_LZ4_EXT", "1"); 300 | config.register_dep("lz4"); 301 | } else { 302 | config.define("ENABLE_LZ4_EXT", "0"); 303 | } 304 | 305 | if let Ok(system_name) = env::var("CMAKE_SYSTEM_NAME") { 306 | config.define("CMAKE_SYSTEM_NAME", system_name); 307 | } 308 | 309 | if let Ok(make_program) = env::var("CMAKE_MAKE_PROGRAM") { 310 | config.define("CMAKE_MAKE_PROGRAM", make_program); 311 | } 312 | 313 | if !cmake_library_paths.is_empty() { 314 | env::set_var("CMAKE_LIBRARY_PATH", cmake_library_paths.join(";")); 315 | } 316 | 317 | println!("Configuring and compiling librdkafka"); 318 | let dst = config.build(); 319 | 320 | println!("cargo:rustc-link-search=native={}/lib", dst.display()); 321 | println!("cargo:rustc-link-lib=static=rdkafka"); 322 | } 323 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rust-rdkafka 2 | 3 | [![crates.io](https://img.shields.io/crates/v/rdkafka.svg)](https://crates.io/crates/rdkafka) 4 | [![docs.rs](https://docs.rs/rdkafka/badge.svg)](https://docs.rs/rdkafka/) 5 | [![Build Status](https://travis-ci.org/fede1024/rust-rdkafka.svg?branch=master)](https://travis-ci.org/fede1024/rust-rdkafka) 6 | [![coverate](https://codecov.io/gh/fede1024/rust-rdkafka/graphs/badge.svg?branch=master)](https://codecov.io/gh/fede1024/rust-rdkafka/) 7 | [![Join the chat at https://gitter.im/rust-rdkafka/Lobby](https://badges.gitter.im/rust-rdkafka/Lobby.svg)](https://gitter.im/rust-rdkafka/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 8 | 9 | A fully asynchronous, [futures]-enabled [Apache Kafka] client 10 | library for Rust based on [librdkafka]. 11 | 12 | ## The library 13 | 14 | `rust-rdkafka` provides a safe Rust interface to librdkafka. This version 15 | is compatible with librdkafka v1.9.2+. 16 | 17 | ### Documentation 18 | 19 | - [Current master branch](https://fede1024.github.io/rust-rdkafka/) 20 | - [Latest release](https://docs.rs/rdkafka/) 21 | - [Changelog](https://github.com/fede1024/rust-rdkafka/blob/master/changelog.md) 22 | 23 | ### Features 24 | 25 | The main features provided at the moment are: 26 | 27 | - Support for all Kafka versions since 0.8.x. For more information about 28 | broker compatibility options, check the [librdkafka 29 | documentation][broker-compat]. 30 | - Consume from single or multiple topics. 31 | - Automatic consumer rebalancing. 32 | - Customizable rebalance, with pre and post rebalance callbacks. 33 | - Synchronous or asynchronous message production. 34 | - Customizable offset commit. 35 | - Create and delete topics and add and edit partitions. 36 | - Alter broker and topic configurations. 37 | - Access to cluster metadata (list of topic-partitions, replicas, active 38 | brokers etc). 39 | - Access to group metadata (list groups, list members of groups, hostnames, 40 | etc.). 41 | - Access to producer and consumer metrics, errors and callbacks. 42 | - Exactly-once semantics (EOS) via idempotent and transactional producers 43 | and read-committed consumers. 44 | 45 | ### One million messages per second 46 | 47 | `rust-rdkafka` is designed to be easy and safe to use thanks to the 48 | abstraction layer written in Rust, while at the same time being extremely 49 | fast thanks to the librdkafka C library. 50 | 51 | Here are some benchmark results using the [`BaseProducer`], 52 | sending data to a single Kafka 0.11 process running in localhost (default 53 | configuration, 3 partitions). Hardware: Dell laptop, with Intel Core 54 | i7-4712HQ @ 2.30GHz. 55 | 56 | - Scenario: produce 5 million messages, 10 bytes each, wait for all of them to be acked 57 | - 1045413 messages/s, 9.970 MB/s (average over 5 runs) 58 | 59 | - Scenario: produce 100000 messages, 10 KB each, wait for all of them to be acked 60 | - 24623 messages/s, 234.826 MB/s (average over 5 runs) 61 | 62 | For more numbers, check out the [kafka-benchmark] project. 63 | 64 | ### Client types 65 | 66 | `rust-rdkafka` provides low level and high level consumers and producers. 67 | 68 | Low level: 69 | 70 | * [`BaseConsumer`]: a simple wrapper around the librdkafka consumer. It 71 | must be periodically `poll()`ed in order to execute callbacks, rebalances 72 | and to receive messages. 73 | * [`BaseProducer`]: a simple wrapper around the librdkafka producer. As in 74 | the consumer case, the user must call `poll()` periodically to execute 75 | delivery callbacks. 76 | * [`ThreadedProducer`]: a `BaseProducer` with a separate thread dedicated to 77 | polling the producer. 78 | 79 | High level: 80 | 81 | * [`StreamConsumer`]: a [`Stream`] of messages that takes care of 82 | polling the consumer automatically. 83 | * [`FutureProducer`]: a [`Future`] that will be completed once 84 | the message is delivered to Kafka (or failed). 85 | 86 | For more information about consumers and producers, refer to their 87 | module-level documentation. 88 | 89 | *Warning*: the library is under active development and the APIs are likely 90 | to change. 91 | 92 | ### Asynchronous data processing with Tokio 93 | 94 | [Tokio] is a platform for fast processing of asynchronous events in Rust. 95 | The interfaces exposed by the [`StreamConsumer`] and the [`FutureProducer`] 96 | allow rust-rdkafka users to easily integrate Kafka consumers and producers 97 | within the Tokio platform, and write asynchronous message processing code. 98 | Note that rust-rdkafka can be used without Tokio. 99 | 100 | To see rust-rdkafka in action with Tokio, check out the 101 | [asynchronous processing example] in the examples folder. 102 | 103 | ### At-least-once delivery 104 | 105 | At-least-once delivery semantics are common in many streaming applications: 106 | every message is guaranteed to be processed at least once; in case of 107 | temporary failure, the message can be re-processed and/or re-delivered, 108 | but no message will be lost. 109 | 110 | In order to implement at-least-once delivery the stream processing 111 | application has to carefully commit the offset only once the message has 112 | been processed. Committing the offset too early, instead, might cause 113 | message loss, since upon recovery the consumer will start from the next 114 | message, skipping the one where the failure occurred. 115 | 116 | To see how to implement at-least-once delivery with `rdkafka`, check out the 117 | [at-least-once delivery example] in the examples folder. To know more about 118 | delivery semantics, check the [message delivery semantics] chapter in the 119 | Kafka documentation. 120 | 121 | ### Exactly-once semantics 122 | 123 | Exactly-once semantics (EOS) can be achieved using transactional producers, 124 | which allow produced records and consumer offsets to be committed or aborted 125 | atomically. Consumers that set their `isolation.level` to `read_committed` 126 | will only observe committed messages. 127 | 128 | EOS is useful in read-process-write scenarios that require messages to be 129 | processed exactly once. 130 | 131 | To learn more about using transactions in rust-rdkafka, see the 132 | [Transactions][producer-transactions] section of the producer documentation. 133 | 134 | ### Users 135 | 136 | Here are some of the projects using rust-rdkafka: 137 | 138 | - [kafka-view]: a web interface for Kafka clusters. 139 | - [kafka-benchmark]: a high performance benchmarking tool for Kafka. 140 | - [callysto]: Stream processing framework in Rust. 141 | - [bytewax]: Python stream processing framework using Timely Dataflow. 142 | - [kafka-mock-gen] easy to use mock data producer allowing stress broker 143 | 144 | *If you are using rust-rdkafka, please let us know!* 145 | 146 | ## Installation 147 | 148 | Add this to your `Cargo.toml`: 149 | 150 | ```toml 151 | [dependencies] 152 | rdkafka = { version = "0.25", features = ["cmake-build"] } 153 | ``` 154 | 155 | This crate will compile librdkafka from sources and link it statically to 156 | your executable. To compile librdkafka you'll need: 157 | 158 | * the GNU toolchain 159 | * GNU `make` 160 | * `pthreads` 161 | * `zlib`: optional, but included by default (feature: `libz`) 162 | * `cmake`: optional, *not* included by default (feature: `cmake-build`) 163 | * `libssl-dev`: optional, *not* included by default (feature: `ssl`) 164 | * `libsasl2-dev`: optional, *not* included by default (feature: `gssapi`) 165 | * `libzstd-dev`: optional, *not* included by default (feature: `zstd-pkg-config`) 166 | 167 | Note that using the CMake build system, via the `cmake-build` feature, is 168 | encouraged if you can take the dependency on CMake. 169 | 170 | By default a submodule with the librdkafka sources pinned to a specific 171 | commit will be used to compile and statically link the library. The 172 | `dynamic-linking` feature can be used to instead dynamically link rdkafka to 173 | the system's version of librdkafka. Example: 174 | 175 | ```toml 176 | [dependencies] 177 | rdkafka = { version = "0.25", features = ["dynamic-linking"] } 178 | ``` 179 | 180 | If you'd like to compile librdkafka statically yourself, then use 181 | that, you can use `static-linking` while supplying `DEP_LIBRDKAFKA_STATIC_ROOT` 182 | with path to where librdkafka was built. 183 | 184 | For a full listing of features, consult the [rdkafka-sys crate's 185 | documentation][rdkafka-sys-features]. All of rdkafka-sys features are 186 | re-exported as rdkafka features. 187 | 188 | ### Minimum supported Rust version (MSRV) 189 | 190 | The current minimum supported Rust version (MSRV) is 1.70.0. Note that 191 | bumping the MSRV is not considered a breaking change. Any release of 192 | rust-rdkafka may bump the MSRV. 193 | 194 | ### Asynchronous runtimes 195 | 196 | Some features of the [`StreamConsumer`] and [`FutureProducer`] depend on 197 | Tokio, which can be a heavyweight dependency for users who only intend to 198 | use the low-level consumers and producers. The Tokio integration is 199 | enabled by default, but can be disabled by turning off default features: 200 | 201 | ```toml 202 | [dependencies] 203 | rdkafka = { version = "0.25", default-features = false } 204 | ``` 205 | 206 | If you would like to use an asynchronous runtime besides Tokio, you can 207 | integrate it with rust-rdkafka by providing a shim that implements the 208 | [`AsyncRuntime`] trait. See the following examples for details: 209 | 210 | * [smol][runtime-smol] 211 | * [async-std][runtime-async-std] 212 | 213 | ## Examples 214 | 215 | You can find examples in the [`examples`] folder. To run them: 216 | 217 | ```bash 218 | cargo run --example -- 219 | ``` 220 | 221 | ## Debugging 222 | 223 | rust-rdkafka uses the [`log`] crate to handle logging. 224 | Optionally, enable the `tracing` feature to emit [`tracing`] 225 | events as opposed to [`log`] records. 226 | 227 | In test and examples, rust-rdkafka uses the [`env_logger`] crate 228 | to format logs. In those contexts, logging can be enabled 229 | using the `RUST_LOG` environment variable, for example: 230 | 231 | ```bash 232 | RUST_LOG="librdkafka=trace,rdkafka::client=debug" cargo test 233 | ``` 234 | 235 | This will configure the logging level of librdkafka to trace, and the level 236 | of the client module of the Rust client to debug. To actually receive logs 237 | from librdkafka, you also have to set the `debug` option in the producer or 238 | consumer configuration (see librdkafka 239 | [configuration][librdkafka-config]). 240 | 241 | To enable debugging in your project, make sure you initialize the logger 242 | with `env_logger::init()`, or the equivalent for any `log`-compatible 243 | logging framework. 244 | 245 | [`AsyncRuntime`]: https://docs.rs/rdkafka/*/rdkafka/util/trait.AsyncRuntime.html 246 | [`BaseConsumer`]: https://docs.rs/rdkafka/*/rdkafka/consumer/base_consumer/struct.BaseConsumer.html 247 | [`BaseProducer`]: https://docs.rs/rdkafka/*/rdkafka/producer/base_producer/struct.BaseProducer.html 248 | [`Future`]: https://doc.rust-lang.org/stable/std/future/trait.Future.html 249 | [`FutureProducer`]: https://docs.rs/rdkafka/*/rdkafka/producer/future_producer/struct.FutureProducer.html 250 | [`Stream`]: https://docs.rs/futures/*/futures/stream/trait.Stream.html 251 | [`StreamConsumer`]: https://docs.rs/rdkafka/*/rdkafka/consumer/stream_consumer/struct.StreamConsumer.html 252 | [`ThreadedProducer`]: https://docs.rs/rdkafka/*/rdkafka/producer/base_producer/struct.ThreadedProducer.html 253 | [`log`]: https://docs.rs/log 254 | [`tracing`]: https://docs.rs/tracing 255 | [`env_logger`]: https://docs.rs/env_logger 256 | [Apache Kafka]: https://kafka.apache.org 257 | [asynchronous processing example]: https://github.com/fede1024/rust-rdkafka/blob/master/examples/asynchronous_processing.rs 258 | [at-least-once delivery example]: https://github.com/fede1024/rust-rdkafka/blob/master/examples/at_least_once.rs 259 | [runtime-smol]: https://github.com/fede1024/rust-rdkafka/blob/master/examples/runtime_smol.rs 260 | [runtime-async-std]: https://github.com/fede1024/rust-rdkafka/blob/master/examples/runtime_async_std.rs 261 | [broker-compat]: https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md#broker-version-compatibility 262 | [bytewax]: https://github.com/bytewax/bytewax 263 | [callysto]: https://github.com/vertexclique/callysto 264 | [`examples`]: https://github.com/fede1024/rust-rdkafka/blob/master/examples/ 265 | [futures]: https://github.com/rust-lang/futures-rs 266 | [kafka-benchmark]: https://github.com/fede1024/kafka-benchmark 267 | [kafka-view]: https://github.com/fede1024/kafka-view 268 | [librdkafka]: https://github.com/edenhill/librdkafka 269 | [librdkafka-config]: https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md 270 | [message delivery semantics]: https://kafka.apache.org/0101/documentation.html#semantics 271 | [producer-transactions]: https://docs.rs/rdkafka/*/rdkafka/producer/#transactions 272 | [rdkafka-sys-features]: https://github.com/fede1024/rust-rdkafka/tree/master/rdkafka-sys/README.md#features 273 | [rdkafka-sys-known-issues]: https://github.com/fede1024/rust-rdkafka/tree/master/rdkafka-sys/README.md#known-issues 274 | [smol]: https://docs.rs/smol 275 | [Tokio]: https://tokio.rs/ 276 | [kafka-mock-gen]: https://github.com/tomaszkubacki/kafka-mock-gen 277 | 278 | ## rdkafka-sys 279 | 280 | See [rdkafka-sys](https://github.com/fede1024/rust-rdkafka/tree/master/rdkafka-sys). 281 | 282 | ## Contributors 283 | 284 | Thanks to: 285 | * Thijs Cadier - [thijsc](https://github.com/thijsc) 286 | 287 | ## Alternatives 288 | 289 | * [kafka-rust]: a pure Rust implementation of the Kafka client. 290 | 291 | [kafka-rust]: https://github.com/spicavigo/kafka-rust 292 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A fully asynchronous, [futures]-enabled [Apache Kafka] client 2 | //! library for Rust based on [librdkafka]. 3 | //! 4 | //! ## The library 5 | //! 6 | //! `rust-rdkafka` provides a safe Rust interface to librdkafka. This version 7 | //! is compatible with librdkafka v1.9.2+. 8 | //! 9 | //! ### Documentation 10 | //! 11 | //! - [Current master branch](https://fede1024.github.io/rust-rdkafka/) 12 | //! - [Latest release](https://docs.rs/rdkafka/) 13 | //! - [Changelog](https://github.com/fede1024/rust-rdkafka/blob/master/changelog.md) 14 | //! 15 | //! ### Features 16 | //! 17 | //! The main features provided at the moment are: 18 | //! 19 | //! - Support for all Kafka versions since 0.8.x. For more information about 20 | //! broker compatibility options, check the [librdkafka 21 | //! documentation][broker-compat]. 22 | //! - Consume from single or multiple topics. 23 | //! - Automatic consumer rebalancing. 24 | //! - Customizable rebalance, with pre and post rebalance callbacks. 25 | //! - Synchronous or asynchronous message production. 26 | //! - Customizable offset commit. 27 | //! - Create and delete topics and add and edit partitions. 28 | //! - Alter broker and topic configurations. 29 | //! - Access to cluster metadata (list of topic-partitions, replicas, active 30 | //! brokers etc). 31 | //! - Access to group metadata (list groups, list members of groups, hostnames, 32 | //! etc.). 33 | //! - Access to producer and consumer metrics, errors and callbacks. 34 | //! - Exactly-once semantics (EOS) via idempotent and transactional producers 35 | //! and read-committed consumers. 36 | //! 37 | //! ### One million messages per second 38 | //! 39 | //! `rust-rdkafka` is designed to be easy and safe to use thanks to the 40 | //! abstraction layer written in Rust, while at the same time being extremely 41 | //! fast thanks to the librdkafka C library. 42 | //! 43 | //! Here are some benchmark results using the [`BaseProducer`], 44 | //! sending data to a single Kafka 0.11 process running in localhost (default 45 | //! configuration, 3 partitions). Hardware: Dell laptop, with Intel Core 46 | //! i7-4712HQ @ 2.30GHz. 47 | //! 48 | //! - Scenario: produce 5 million messages, 10 bytes each, wait for all of them to be acked 49 | //! - 1045413 messages/s, 9.970 MB/s (average over 5 runs) 50 | //! 51 | //! - Scenario: produce 100000 messages, 10 KB each, wait for all of them to be acked 52 | //! - 24623 messages/s, 234.826 MB/s (average over 5 runs) 53 | //! 54 | //! For more numbers, check out the [kafka-benchmark] project. 55 | //! 56 | //! ### Client types 57 | //! 58 | //! `rust-rdkafka` provides low level and high level consumers and producers. 59 | //! 60 | //! Low level: 61 | //! 62 | //! * [`BaseConsumer`]: a simple wrapper around the librdkafka consumer. It 63 | //! must be periodically `poll()`ed in order to execute callbacks, rebalances 64 | //! and to receive messages. 65 | //! * [`BaseProducer`]: a simple wrapper around the librdkafka producer. As in 66 | //! the consumer case, the user must call `poll()` periodically to execute 67 | //! delivery callbacks. 68 | //! * [`ThreadedProducer`]: a `BaseProducer` with a separate thread dedicated to 69 | //! polling the producer. 70 | //! 71 | //! High level: 72 | //! 73 | //! * [`StreamConsumer`]: a [`Stream`] of messages that takes care of 74 | //! polling the consumer automatically. 75 | //! * [`FutureProducer`]: a [`Future`] that will be completed once 76 | //! the message is delivered to Kafka (or failed). 77 | //! 78 | //! For more information about consumers and producers, refer to their 79 | //! module-level documentation. 80 | //! 81 | //! *Warning*: the library is under active development and the APIs are likely 82 | //! to change. 83 | //! 84 | //! ### Asynchronous data processing with Tokio 85 | //! 86 | //! [Tokio] is a platform for fast processing of asynchronous events in Rust. 87 | //! The interfaces exposed by the [`StreamConsumer`] and the [`FutureProducer`] 88 | //! allow rust-rdkafka users to easily integrate Kafka consumers and producers 89 | //! within the Tokio platform, and write asynchronous message processing code. 90 | //! Note that rust-rdkafka can be used without Tokio. 91 | //! 92 | //! To see rust-rdkafka in action with Tokio, check out the 93 | //! [asynchronous processing example] in the examples folder. 94 | //! 95 | //! ### At-least-once delivery 96 | //! 97 | //! At-least-once delivery semantics are common in many streaming applications: 98 | //! every message is guaranteed to be processed at least once; in case of 99 | //! temporary failure, the message can be re-processed and/or re-delivered, 100 | //! but no message will be lost. 101 | //! 102 | //! In order to implement at-least-once delivery the stream processing 103 | //! application has to carefully commit the offset only once the message has 104 | //! been processed. Committing the offset too early, instead, might cause 105 | //! message loss, since upon recovery the consumer will start from the next 106 | //! message, skipping the one where the failure occurred. 107 | //! 108 | //! To see how to implement at-least-once delivery with `rdkafka`, check out the 109 | //! [at-least-once delivery example] in the examples folder. To know more about 110 | //! delivery semantics, check the [message delivery semantics] chapter in the 111 | //! Kafka documentation. 112 | //! 113 | //! ### Exactly-once semantics 114 | //! 115 | //! Exactly-once semantics (EOS) can be achieved using transactional producers, 116 | //! which allow produced records and consumer offsets to be committed or aborted 117 | //! atomically. Consumers that set their `isolation.level` to `read_committed` 118 | //! will only observe committed messages. 119 | //! 120 | //! EOS is useful in read-process-write scenarios that require messages to be 121 | //! processed exactly once. 122 | //! 123 | //! To learn more about using transactions in rust-rdkafka, see the 124 | //! [Transactions][producer-transactions] section of the producer documentation. 125 | //! 126 | //! ### Users 127 | //! 128 | //! Here are some of the projects using rust-rdkafka: 129 | //! 130 | //! - [timely-dataflow]: a distributed data-parallel compute engine. See also 131 | //! the [blog post][timely-blog] announcing its Kafka integration. 132 | //! - [kafka-view]: a web interface for Kafka clusters. 133 | //! - [kafka-benchmark]: a high performance benchmarking tool for Kafka. 134 | //! - [callysto]: Stream processing framework in Rust. 135 | //! - [bytewax]: Python stream processing framework using Timely Dataflow. 136 | //! 137 | //! *If you are using rust-rdkafka, please let us know!* 138 | //! 139 | //! ## Installation 140 | //! 141 | //! Add this to your `Cargo.toml`: 142 | //! 143 | //! ```toml 144 | //! [dependencies] 145 | //! rdkafka = { version = "0.25", features = ["cmake-build"] } 146 | //! ``` 147 | //! 148 | //! This crate will compile librdkafka from sources and link it statically to 149 | //! your executable. To compile librdkafka you'll need: 150 | //! 151 | //! * the GNU toolchain 152 | //! * GNU `make` 153 | //! * `pthreads` 154 | //! * `zlib`: optional, but included by default (feature: `libz`) 155 | //! * `cmake`: optional, *not* included by default (feature: `cmake-build`) 156 | //! * `libssl-dev`: optional, *not* included by default (feature: `ssl`) 157 | //! * `libsasl2-dev`: optional, *not* included by default (feature: `gssapi`) 158 | //! * `libzstd-dev`: optional, *not* included by default (feature: `zstd-pkg-config`) 159 | //! 160 | //! Note that using the CMake build system, via the `cmake-build` feature, is 161 | //! encouraged if you can take the dependency on CMake. 162 | //! 163 | //! By default a submodule with the librdkafka sources pinned to a specific 164 | //! commit will be used to compile and statically link the library. The 165 | //! `dynamic-linking` feature can be used to instead dynamically link rdkafka to 166 | //! the system's version of librdkafka. Example: 167 | //! 168 | //! ```toml 169 | //! [dependencies] 170 | //! rdkafka = { version = "0.25", features = ["dynamic-linking"] } 171 | //! ``` 172 | //! 173 | //! For a full listing of features, consult the [rdkafka-sys crate's 174 | //! documentation][rdkafka-sys-features]. All of rdkafka-sys features are 175 | //! re-exported as rdkafka features. 176 | //! 177 | //! ### Minimum supported Rust version (MSRV) 178 | //! 179 | //! The current minimum supported Rust version (MSRV) is 1.70.0. Note that 180 | //! bumping the MSRV is not considered a breaking change. Any release of 181 | //! rust-rdkafka may bump the MSRV. 182 | //! 183 | //! ### Asynchronous runtimes 184 | //! 185 | //! Some features of the [`StreamConsumer`] and [`FutureProducer`] depend on 186 | //! Tokio, which can be a heavyweight dependency for users who only intend to 187 | //! use the low-level consumers and producers. The Tokio integration is 188 | //! enabled by default, but can be disabled by turning off default features: 189 | //! 190 | //! ```toml 191 | //! [dependencies] 192 | //! rdkafka = { version = "0.25", default-features = false } 193 | //! ``` 194 | //! 195 | //! If you would like to use an asynchronous runtime besides Tokio, you can 196 | //! integrate it with rust-rdkafka by providing a shim that implements the 197 | //! [`AsyncRuntime`] trait. See the following examples for details: 198 | //! 199 | //! * [smol][runtime-smol] 200 | //! * [async-std][runtime-async-std] 201 | //! 202 | //! ## Examples 203 | //! 204 | //! You can find examples in the [`examples`] folder. To run them: 205 | //! 206 | //! ```bash 207 | //! cargo run --example -- 208 | //! ``` 209 | //! 210 | //! ## Debugging 211 | //! 212 | //! rust-rdkafka uses the [`log`] crate to handle logging. 213 | //! Optionally, enable the `tracing` feature to emit [`tracing`] 214 | //! events as opposed to [`log`] records. 215 | //! 216 | //! In test and examples, rust-rdkafka uses the [`env_logger`] crate 217 | //! to format logs. In those contexts, logging can be enabled 218 | //! using the `RUST_LOG` environment variable, for example: 219 | //! 220 | //! ```bash 221 | //! RUST_LOG="librdkafka=trace,rdkafka::client=debug" cargo test 222 | //! ``` 223 | //! 224 | //! This will configure the logging level of librdkafka to trace, and the level 225 | //! of the client module of the Rust client to debug. To actually receive logs 226 | //! from librdkafka, you also have to set the `debug` option in the producer or 227 | //! consumer configuration (see librdkafka 228 | //! [configuration][librdkafka-config]). 229 | //! 230 | //! To enable debugging in your project, make sure you initialize the logger 231 | //! with `env_logger::init()`, or the equivalent for any `log`-compatible 232 | //! logging framework. 233 | //! 234 | //! [`AsyncRuntime`]: https://docs.rs/rdkafka/*/rdkafka/util/trait.AsyncRuntime.html 235 | //! [`BaseConsumer`]: https://docs.rs/rdkafka/*/rdkafka/consumer/base_consumer/struct.BaseConsumer.html 236 | //! [`BaseProducer`]: https://docs.rs/rdkafka/*/rdkafka/producer/base_producer/struct.BaseProducer.html 237 | //! [`Future`]: https://doc.rust-lang.org/stable/std/future/trait.Future.html 238 | //! [`FutureProducer`]: https://docs.rs/rdkafka/*/rdkafka/producer/future_producer/struct.FutureProducer.html 239 | //! [`Stream`]: https://docs.rs/futures/*/futures/stream/trait.Stream.html 240 | //! [`StreamConsumer`]: https://docs.rs/rdkafka/*/rdkafka/consumer/stream_consumer/struct.StreamConsumer.html 241 | //! [`ThreadedProducer`]: https://docs.rs/rdkafka/*/rdkafka/producer/base_producer/struct.ThreadedProducer.html 242 | //! [`log`]: https://docs.rs/log 243 | //! [`tracing`]: https://docs.rs/tracing 244 | //! [`env_logger`]: https://docs.rs/env_logger 245 | //! [Apache Kafka]: https://kafka.apache.org 246 | //! [asynchronous processing example]: https://github.com/fede1024/rust-rdkafka/blob/master/examples/asynchronous_processing.rs 247 | //! [at-least-once delivery example]: https://github.com/fede1024/rust-rdkafka/blob/master/examples/at_least_once.rs 248 | //! [runtime-smol]: https://github.com/fede1024/rust-rdkafka/blob/master/examples/runtime_smol.rs 249 | //! [runtime-async-std]: https://github.com/fede1024/rust-rdkafka/blob/master/examples/runtime_async_std.rs 250 | //! [broker-compat]: https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md#broker-version-compatibility 251 | //! [bytewax]: https://github.com/bytewax/bytewax 252 | //! [callysto]: https://github.com/vertexclique/callysto 253 | //! [`examples`]: https://github.com/fede1024/rust-rdkafka/blob/master/examples/ 254 | //! [futures]: https://github.com/rust-lang/futures-rs 255 | //! [kafka-benchmark]: https://github.com/fede1024/kafka-benchmark 256 | //! [kafka-view]: https://github.com/fede1024/kafka-view 257 | //! [librdkafka]: https://github.com/edenhill/librdkafka 258 | //! [librdkafka-config]: https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md 259 | //! [message delivery semantics]: https://kafka.apache.org/0101/documentation.html#semantics 260 | //! [producer-transactions]: https://docs.rs/rdkafka/*/rdkafka/producer/#transactions 261 | //! [rdkafka-sys-features]: https://github.com/fede1024/rust-rdkafka/tree/master/rdkafka-sys/README.md#features 262 | //! [rdkafka-sys-known-issues]: https://github.com/fede1024/rust-rdkafka/tree/master/rdkafka-sys/README.md#known-issues 263 | //! [smol]: https://docs.rs/smol 264 | //! [timely-blog]: https://github.com/frankmcsherry/blog/blob/master/posts/2017-11-08.md 265 | //! [timely-dataflow]: https://github.com/frankmcsherry/timely-dataflow 266 | //! [Tokio]: https://tokio.rs/ 267 | 268 | #![forbid(missing_docs)] 269 | #![deny(rust_2018_idioms)] 270 | #![allow(clippy::type_complexity)] 271 | #![cfg_attr(docsrs, feature(doc_cfg))] 272 | 273 | mod log; 274 | 275 | pub use rdkafka_sys::{bindings, helpers, types}; 276 | 277 | pub mod admin; 278 | pub mod client; 279 | pub mod config; 280 | pub mod consumer; 281 | pub mod error; 282 | pub mod groups; 283 | pub mod message; 284 | pub mod metadata; 285 | pub mod mocking; 286 | pub mod producer; 287 | pub mod statistics; 288 | pub mod topic_partition_list; 289 | pub mod util; 290 | 291 | // Re-exports. 292 | pub use crate::client::ClientContext; 293 | pub use crate::config::ClientConfig; 294 | pub use crate::message::{Message, Timestamp}; 295 | pub use crate::statistics::Statistics; 296 | pub use crate::topic_partition_list::{Offset, TopicPartitionList}; 297 | pub use crate::util::IntoOpaque; 298 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | //! Producer and consumer configuration. 2 | //! 3 | //! ## C library configuration 4 | //! 5 | //! The Rust library will forward all the configuration to the C library. The 6 | //! most frequently used parameters are listed here. 7 | //! 8 | //! ### Frequently used parameters 9 | //! 10 | //! For producer-specific and consumer-specific parameters check the producer 11 | //! and consumer modules documentation. The full list of available parameters is 12 | //! available in the [librdkafka documentation][librdkafka-config]. 13 | //! 14 | //! - `client.id`: Client identifier. Default: `rdkafka`. 15 | //! - `bootstrap.servers`: Initial list of brokers as a CSV list of broker host 16 | //! or host:port. Default: empty. 17 | //! - `message.max.bytes`: Maximum message size. Default: 1000000. 18 | //! - `debug`: A comma-separated list of debug contexts to enable. Use 'all' to 19 | //! print all the debugging information. Default: empty (off). 20 | //! - `statistics.interval.ms`: how often the statistic callback 21 | //! specified in the [`ClientContext`] will be called. Default: 0 (disabled). 22 | //! 23 | //! [librdkafka-config]: https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md 24 | 25 | use std::collections::{BTreeMap, HashMap}; 26 | use std::ffi::CString; 27 | use std::fmt::Debug; 28 | use std::iter::FromIterator; 29 | use std::os::raw::c_char; 30 | use std::ptr; 31 | 32 | use rdkafka_sys as rdsys; 33 | use rdkafka_sys::types::*; 34 | 35 | use crate::client::ClientContext; 36 | use crate::error::{IsError, KafkaError, KafkaResult}; 37 | use crate::log::{log_enabled, DEBUG, INFO, WARN}; 38 | use crate::util::{ErrBuf, KafkaDrop, NativePtr}; 39 | 40 | const SENSITIVE_CONFIG_KEYS: &[&str] = &[ 41 | "sasl.password", 42 | "ssl.key.password", 43 | "ssl.keystore.password", 44 | "ssl.truststore.password", 45 | "sasl.oauthbearer.client.secret", 46 | ]; 47 | 48 | const SANITIZED_VALUE_PLACEHOLDER: &str = "[sanitized for safety]"; 49 | 50 | /// The log levels supported by librdkafka. 51 | #[derive(Copy, Clone, Debug)] 52 | pub enum RDKafkaLogLevel { 53 | /// Higher priority then [`Level::Error`](log::Level::Error) from the log 54 | /// crate. 55 | Emerg = 0, 56 | /// Higher priority then [`Level::Error`](log::Level::Error) from the log 57 | /// crate. 58 | Alert = 1, 59 | /// Higher priority then [`Level::Error`](log::Level::Error) from the log 60 | /// crate. 61 | Critical = 2, 62 | /// Equivalent to [`Level::Error`](log::Level::Error) from the log crate. 63 | Error = 3, 64 | /// Equivalent to [`Level::Warn`](log::Level::Warn) from the log crate. 65 | Warning = 4, 66 | /// Higher priority then [`Level::Info`](log::Level::Info) from the log 67 | /// crate. 68 | Notice = 5, 69 | /// Equivalent to [`Level::Info`](log::Level::Info) from the log crate. 70 | Info = 6, 71 | /// Equivalent to [`Level::Debug`](log::Level::Debug) from the log crate. 72 | Debug = 7, 73 | } 74 | 75 | impl RDKafkaLogLevel { 76 | pub(crate) fn from_int(level: i32) -> RDKafkaLogLevel { 77 | match level { 78 | 0 => RDKafkaLogLevel::Emerg, 79 | 1 => RDKafkaLogLevel::Alert, 80 | 2 => RDKafkaLogLevel::Critical, 81 | 3 => RDKafkaLogLevel::Error, 82 | 4 => RDKafkaLogLevel::Warning, 83 | 5 => RDKafkaLogLevel::Notice, 84 | 6 => RDKafkaLogLevel::Info, 85 | _ => RDKafkaLogLevel::Debug, 86 | } 87 | } 88 | } 89 | 90 | // 91 | // ********** CLIENT CONFIG ********** 92 | // 93 | 94 | /// A native rdkafka-sys client config. 95 | pub struct NativeClientConfig { 96 | ptr: NativePtr, 97 | } 98 | 99 | unsafe impl KafkaDrop for RDKafkaConf { 100 | const TYPE: &'static str = "client config"; 101 | const DROP: unsafe extern "C" fn(*mut Self) = rdsys::rd_kafka_conf_destroy; 102 | } 103 | 104 | impl NativeClientConfig { 105 | /// Wraps a pointer to an `RDKafkaConfig` object and returns a new `NativeClientConfig`. 106 | pub(crate) unsafe fn from_ptr(ptr: *mut RDKafkaConf) -> NativeClientConfig { 107 | NativeClientConfig { 108 | ptr: NativePtr::from_ptr(ptr).unwrap(), 109 | } 110 | } 111 | 112 | /// Returns the pointer to the librdkafka RDKafkaConf structure. 113 | pub fn ptr(&self) -> *mut RDKafkaConf { 114 | self.ptr.ptr() 115 | } 116 | 117 | /// Gets the value of a parameter in the configuration. 118 | /// 119 | /// This method reflects librdkafka's view of the current value of the 120 | /// parameter. If the parameter was overridden by the user, it returns the 121 | /// user-specified value. Otherwise, it returns librdkafka's default value 122 | /// for the parameter. 123 | pub fn get(&self, key: &str) -> KafkaResult { 124 | let make_err = |res| { 125 | KafkaError::ClientConfig( 126 | res, 127 | match res { 128 | RDKafkaConfRes::RD_KAFKA_CONF_UNKNOWN => "Unknown configuration name", 129 | RDKafkaConfRes::RD_KAFKA_CONF_INVALID => "Invalid configuration value", 130 | RDKafkaConfRes::RD_KAFKA_CONF_OK => "OK", 131 | } 132 | .into(), 133 | key.into(), 134 | "".into(), 135 | ) 136 | }; 137 | let key_c = CString::new(key.to_string())?; 138 | 139 | // Call with a `NULL` buffer to determine the size of the string. 140 | let mut size = 0_usize; 141 | let res = unsafe { 142 | rdsys::rd_kafka_conf_get(self.ptr(), key_c.as_ptr(), ptr::null_mut(), &mut size) 143 | }; 144 | if res.is_error() { 145 | return Err(make_err(res)); 146 | } 147 | 148 | // Allocate a buffer of that size and call again to get the actual 149 | // string. 150 | let mut buf = vec![0_u8; size]; 151 | let res = unsafe { 152 | rdsys::rd_kafka_conf_get( 153 | self.ptr(), 154 | key_c.as_ptr(), 155 | buf.as_mut_ptr() as *mut c_char, 156 | &mut size, 157 | ) 158 | }; 159 | if res.is_error() { 160 | return Err(make_err(res)); 161 | } 162 | 163 | // Convert the C string to a Rust string. 164 | Ok(String::from_utf8_lossy(&buf) 165 | .trim_matches(char::from(0)) 166 | .to_string()) 167 | } 168 | 169 | pub(crate) fn set(&self, key: &str, value: &str) -> KafkaResult<()> { 170 | let mut err_buf = ErrBuf::new(); 171 | let key_c = CString::new(key)?; 172 | let value_c = CString::new(value)?; 173 | let ret = unsafe { 174 | rdsys::rd_kafka_conf_set( 175 | self.ptr(), 176 | key_c.as_ptr(), 177 | value_c.as_ptr(), 178 | err_buf.as_mut_ptr(), 179 | err_buf.capacity(), 180 | ) 181 | }; 182 | if ret.is_error() { 183 | return Err(KafkaError::ClientConfig( 184 | ret, 185 | err_buf.to_string(), 186 | key.to_string(), 187 | value.to_string(), 188 | )); 189 | } 190 | Ok(()) 191 | } 192 | } 193 | 194 | /// Client configuration. 195 | #[derive(Clone)] 196 | pub struct ClientConfig { 197 | conf_map: HashMap, 198 | /// The librdkafka logging level. Refer to [`RDKafkaLogLevel`] for the list 199 | /// of available levels. 200 | pub log_level: RDKafkaLogLevel, 201 | } 202 | 203 | impl Debug for ClientConfig { 204 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 205 | let sanitized: BTreeMap<&str, &str> = self 206 | .conf_map 207 | .iter() 208 | .filter_map(|(key, value)| { 209 | if SENSITIVE_CONFIG_KEYS.contains(&key.as_str()) { 210 | None 211 | } else { 212 | Some((key.as_str(), value.as_str())) 213 | } 214 | }) 215 | .collect(); 216 | 217 | let mut debug_struct = f.debug_struct("ClientConfig"); 218 | debug_struct.field("log_level", &self.log_level); 219 | debug_struct.field("conf_map", &sanitized); 220 | debug_struct.finish() 221 | } 222 | } 223 | 224 | impl Default for ClientConfig { 225 | fn default() -> Self { 226 | Self::new() 227 | } 228 | } 229 | 230 | impl ClientConfig { 231 | /// Creates a new empty configuration. 232 | pub fn new() -> ClientConfig { 233 | ClientConfig { 234 | conf_map: HashMap::new(), 235 | log_level: log_level_from_global_config(), 236 | } 237 | } 238 | 239 | /// Returns a sanitized view of the underlying config map. 240 | /// 241 | /// Sensitive keys have their values replaced with a placeholder string so they never appear in 242 | /// clear text when inspected. 243 | pub fn config_map(&self) -> BTreeMap<&str, &str> { 244 | self.conf_map 245 | .iter() 246 | .map(|(key, value)| { 247 | if SENSITIVE_CONFIG_KEYS.contains(&key.as_str()) { 248 | (key.as_str(), SANITIZED_VALUE_PLACEHOLDER) 249 | } else { 250 | (key.as_str(), value.as_str()) 251 | } 252 | }) 253 | .collect() 254 | } 255 | 256 | /// Gets the value of a parameter in the configuration. 257 | /// 258 | /// Returns the current value set for `key`, or `None` if no value for `key` 259 | /// exists. 260 | /// 261 | /// Note that this method will only ever return values that were installed 262 | /// by a call to [`ClientConfig::set`]. To retrieve librdkafka's default 263 | /// value for a parameter, build a [`NativeClientConfig`] and then call 264 | /// [`NativeClientConfig::get`] on the resulting object. 265 | pub fn get(&self, key: &str) -> Option<&str> { 266 | self.conf_map.get(key).map(|val| val.as_str()) 267 | } 268 | 269 | /// Sets a parameter in the configuration. 270 | /// 271 | /// If there is an existing value for `key` in the configuration, it is 272 | /// overridden with the new `value`. 273 | pub fn set(&mut self, key: K, value: V) -> &mut ClientConfig 274 | where 275 | K: Into, 276 | V: Into, 277 | { 278 | self.conf_map.insert(key.into(), value.into()); 279 | self 280 | } 281 | 282 | /// Removes a parameter from the configuration. 283 | pub fn remove<'a>(&'a mut self, key: &str) -> &'a mut ClientConfig { 284 | self.conf_map.remove(key); 285 | self 286 | } 287 | 288 | /// Sets the log level of the client. If not specified, the log level will be calculated based 289 | /// on the global log level of the log crate. 290 | pub fn set_log_level(&mut self, log_level: RDKafkaLogLevel) -> &mut ClientConfig { 291 | self.log_level = log_level; 292 | self 293 | } 294 | 295 | /// Builds a native librdkafka configuration. 296 | pub fn create_native_config(&self) -> KafkaResult { 297 | let conf = unsafe { NativeClientConfig::from_ptr(rdsys::rd_kafka_conf_new()) }; 298 | for (key, value) in &self.conf_map { 299 | conf.set(key, value)?; 300 | } 301 | Ok(conf) 302 | } 303 | 304 | /// Uses the current configuration to create a new Consumer or Producer. 305 | pub fn create(&self) -> KafkaResult { 306 | T::from_config(self) 307 | } 308 | 309 | /// Uses the current configuration and the provided context to create a new Consumer or Producer. 310 | pub fn create_with_context(&self, context: C) -> KafkaResult 311 | where 312 | C: ClientContext, 313 | T: FromClientConfigAndContext, 314 | { 315 | T::from_config_and_context(self, context) 316 | } 317 | } 318 | 319 | impl FromIterator<(String, String)> for ClientConfig { 320 | fn from_iter(iter: I) -> ClientConfig 321 | where 322 | I: IntoIterator, 323 | { 324 | let mut config = ClientConfig::new(); 325 | config.extend(iter); 326 | config 327 | } 328 | } 329 | 330 | impl Extend<(String, String)> for ClientConfig { 331 | fn extend(&mut self, iter: I) 332 | where 333 | I: IntoIterator, 334 | { 335 | self.conf_map.extend(iter) 336 | } 337 | } 338 | 339 | /// Return the log level 340 | fn log_level_from_global_config() -> RDKafkaLogLevel { 341 | if log_enabled!(target: "librdkafka", DEBUG) { 342 | RDKafkaLogLevel::Debug 343 | } else if log_enabled!(target: "librdkafka", INFO) { 344 | RDKafkaLogLevel::Info 345 | } else if log_enabled!(target: "librdkafka", WARN) { 346 | RDKafkaLogLevel::Warning 347 | } else { 348 | RDKafkaLogLevel::Error 349 | } 350 | } 351 | 352 | /// Create a new client based on the provided configuration. 353 | pub trait FromClientConfig: Sized { 354 | /// Creates a client from a client configuration. The default client context 355 | /// will be used. 356 | fn from_config(_: &ClientConfig) -> KafkaResult; 357 | } 358 | 359 | /// Create a new client based on the provided configuration and context. 360 | pub trait FromClientConfigAndContext: Sized { 361 | /// Creates a client from a client configuration and a client context. 362 | fn from_config_and_context(_: &ClientConfig, _: C) -> KafkaResult; 363 | } 364 | 365 | #[cfg(test)] 366 | mod tests { 367 | use super::ClientConfig; 368 | 369 | #[test] 370 | fn test_client_config_set_map() { 371 | let mut config: ClientConfig = vec![("a".into(), "1".into()), ("b".into(), "1".into())] 372 | .into_iter() 373 | .collect(); 374 | config.extend([("b".into(), "2".into()), ("c".into(), "3".into())]); 375 | 376 | assert_eq!(config.get("a").unwrap(), "1"); 377 | assert_eq!(config.get("b").unwrap(), "2"); 378 | assert_eq!(config.get("c").unwrap(), "3"); 379 | } 380 | } 381 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | //! Error manipulations. 2 | 3 | use std::error::Error; 4 | use std::ffi::{self, CStr}; 5 | use std::fmt; 6 | use std::ptr; 7 | use std::sync::Arc; 8 | 9 | use rdkafka_sys as rdsys; 10 | use rdkafka_sys::types::*; 11 | 12 | use crate::util::{KafkaDrop, NativePtr}; 13 | 14 | // Re-export rdkafka error code 15 | pub use rdsys::types::RDKafkaErrorCode; 16 | 17 | /// Kafka result. 18 | pub type KafkaResult = Result; 19 | 20 | /// Verify if the value represents an error condition. 21 | /// 22 | /// Some librdkafka codes are informational, rather than true errors. 23 | pub trait IsError { 24 | /// Reports whether the value represents an error. 25 | fn is_error(&self) -> bool; 26 | } 27 | 28 | impl IsError for RDKafkaRespErr { 29 | fn is_error(&self) -> bool { 30 | *self != RDKafkaRespErr::RD_KAFKA_RESP_ERR_NO_ERROR 31 | } 32 | } 33 | 34 | impl IsError for RDKafkaConfRes { 35 | fn is_error(&self) -> bool { 36 | *self != RDKafkaConfRes::RD_KAFKA_CONF_OK 37 | } 38 | } 39 | 40 | impl IsError for RDKafkaError { 41 | fn is_error(&self) -> bool { 42 | self.0.is_some() 43 | } 44 | } 45 | 46 | /// Native rdkafka error. 47 | #[derive(Clone)] 48 | pub struct RDKafkaError(Option>>); 49 | 50 | unsafe impl KafkaDrop for rdsys::rd_kafka_error_t { 51 | const TYPE: &'static str = "error"; 52 | const DROP: unsafe extern "C" fn(*mut Self) = rdsys::rd_kafka_error_destroy; 53 | } 54 | 55 | unsafe impl Send for RDKafkaError {} 56 | unsafe impl Sync for RDKafkaError {} 57 | 58 | impl RDKafkaError { 59 | pub(crate) unsafe fn from_ptr(ptr: *mut rdsys::rd_kafka_error_t) -> RDKafkaError { 60 | RDKafkaError(NativePtr::from_ptr(ptr).map(Arc::new)) 61 | } 62 | 63 | fn ptr(&self) -> *const rdsys::rd_kafka_error_t { 64 | match &self.0 { 65 | None => ptr::null(), 66 | Some(p) => p.ptr(), 67 | } 68 | } 69 | 70 | /// Returns the error code or [`RDKafkaErrorCode::NoError`] if the error is 71 | /// null. 72 | pub fn code(&self) -> RDKafkaErrorCode { 73 | unsafe { rdsys::rd_kafka_error_code(self.ptr()).into() } 74 | } 75 | 76 | /// Returns the error code name, e.g., "ERR_UNKNOWN_MEMBER_ID" or an empty 77 | /// string if the error is null. 78 | pub fn name(&self) -> String { 79 | let cstr = unsafe { rdsys::rd_kafka_error_name(self.ptr()) }; 80 | unsafe { CStr::from_ptr(cstr).to_string_lossy().into_owned() } 81 | } 82 | 83 | /// Returns a human readable error string or an empty string if the error is 84 | /// null. 85 | pub fn string(&self) -> String { 86 | let cstr = unsafe { rdsys::rd_kafka_error_string(self.ptr()) }; 87 | unsafe { CStr::from_ptr(cstr).to_string_lossy().into_owned() } 88 | } 89 | 90 | /// Reports whether the error is a fatal error. 91 | /// 92 | /// A fatal error indicates that the client instance is no longer usable. 93 | pub fn is_fatal(&self) -> bool { 94 | unsafe { rdsys::rd_kafka_error_is_fatal(self.ptr()) != 0 } 95 | } 96 | 97 | /// Reports whether the operation that encountered the error can be retried. 98 | pub fn is_retriable(&self) -> bool { 99 | unsafe { rdsys::rd_kafka_error_is_retriable(self.ptr()) != 0 } 100 | } 101 | 102 | /// Reports whether the error is an abortable transaction error. 103 | pub fn txn_requires_abort(&self) -> bool { 104 | unsafe { rdsys::rd_kafka_error_txn_requires_abort(self.ptr()) != 0 } 105 | } 106 | } 107 | 108 | impl PartialEq for RDKafkaError { 109 | fn eq(&self, other: &RDKafkaError) -> bool { 110 | self.code() == other.code() 111 | } 112 | } 113 | 114 | impl Eq for RDKafkaError {} 115 | 116 | impl fmt::Debug for RDKafkaError { 117 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 118 | write!(f, "RDKafkaError({})", self) 119 | } 120 | } 121 | 122 | impl fmt::Display for RDKafkaError { 123 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 124 | f.write_str(&self.string()) 125 | } 126 | } 127 | 128 | impl Error for RDKafkaError {} 129 | 130 | // TODO: consider using macro 131 | 132 | /// Represents all possible Kafka errors. 133 | /// 134 | /// If applicable, check the underlying [`RDKafkaErrorCode`] to get details. 135 | #[derive(Clone, PartialEq, Eq)] 136 | #[non_exhaustive] 137 | pub enum KafkaError { 138 | /// Creation of admin operation failed. 139 | AdminOpCreation(String), 140 | /// The admin operation itself failed. 141 | AdminOp(RDKafkaErrorCode), 142 | /// The client was dropped before the operation completed. 143 | Canceled, 144 | /// Invalid client configuration. 145 | ClientConfig(RDKafkaConfRes, String, String, String), 146 | /// Client creation failed. 147 | ClientCreation(String), 148 | /// Consumer commit failed. 149 | ConsumerCommit(RDKafkaErrorCode), 150 | /// Consumer queue close failed. 151 | ConsumerQueueClose(RDKafkaErrorCode), 152 | /// Flushing failed 153 | Flush(RDKafkaErrorCode), 154 | /// Global error. 155 | Global(RDKafkaErrorCode), 156 | /// Group list fetch failed. 157 | GroupListFetch(RDKafkaErrorCode), 158 | /// Message consumption failed. 159 | MessageConsumption(RDKafkaErrorCode), 160 | /// Message consumption failed with fatal error. 161 | MessageConsumptionFatal(RDKafkaErrorCode), 162 | /// Message production error. 163 | MessageProduction(RDKafkaErrorCode), 164 | /// Metadata fetch error. 165 | MetadataFetch(RDKafkaErrorCode), 166 | /// No message was received. 167 | NoMessageReceived, 168 | /// Unexpected null pointer 169 | Nul(ffi::NulError), 170 | /// Offset fetch failed. 171 | OffsetFetch(RDKafkaErrorCode), 172 | /// End of partition reached. 173 | PartitionEOF(i32), 174 | /// Pause/Resume failed. 175 | PauseResume(String), 176 | /// Rebalance failed. 177 | Rebalance(RDKafkaErrorCode), 178 | /// Seeking a partition failed. 179 | Seek(String), 180 | /// Setting partition offset failed. 181 | SetPartitionOffset(RDKafkaErrorCode), 182 | /// Offset store failed. 183 | StoreOffset(RDKafkaErrorCode), 184 | /// Subscription creation failed. 185 | Subscription(String), 186 | /// Transaction error. 187 | Transaction(RDKafkaError), 188 | /// Mock Cluster error 189 | MockCluster(RDKafkaErrorCode), 190 | } 191 | 192 | impl fmt::Debug for KafkaError { 193 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 194 | match self { 195 | KafkaError::AdminOp(err) => write!(f, "KafkaError (Admin operation error: {})", err), 196 | KafkaError::AdminOpCreation(err) => { 197 | write!(f, "KafkaError (Admin operation creation error: {})", err) 198 | } 199 | KafkaError::Canceled => write!(f, "KafkaError (Client dropped)"), 200 | KafkaError::ClientConfig(_, desc, key, value) => write!( 201 | f, 202 | "KafkaError (Client config error: {} {} {})", 203 | desc, key, value 204 | ), 205 | KafkaError::ClientCreation(err) => { 206 | write!(f, "KafkaError (Client creation error: {})", err) 207 | } 208 | KafkaError::ConsumerCommit(err) => { 209 | write!(f, "KafkaError (Consumer commit error: {})", err) 210 | } 211 | KafkaError::ConsumerQueueClose(err) => { 212 | write!(f, "KafkaError (Consumer queue close error: {})", err) 213 | } 214 | KafkaError::Flush(err) => write!(f, "KafkaError (Flush error: {})", err), 215 | KafkaError::Global(err) => write!(f, "KafkaError (Global error: {})", err), 216 | KafkaError::GroupListFetch(err) => { 217 | write!(f, "KafkaError (Group list fetch error: {})", err) 218 | } 219 | KafkaError::MessageConsumption(err) => { 220 | write!(f, "KafkaError (Message consumption error: {})", err) 221 | } 222 | KafkaError::MessageConsumptionFatal(err) => { 223 | write!(f, "(Fatal) KafkaError (Message consumption error: {})", err) 224 | } 225 | KafkaError::MessageProduction(err) => { 226 | write!(f, "KafkaError (Message production error: {})", err) 227 | } 228 | KafkaError::MetadataFetch(err) => { 229 | write!(f, "KafkaError (Metadata fetch error: {})", err) 230 | } 231 | KafkaError::NoMessageReceived => { 232 | write!(f, "No message received within the given poll interval") 233 | } 234 | KafkaError::Nul(_) => write!(f, "FFI null error"), 235 | KafkaError::OffsetFetch(err) => write!(f, "KafkaError (Offset fetch error: {})", err), 236 | KafkaError::PartitionEOF(part_n) => write!(f, "KafkaError (Partition EOF: {})", part_n), 237 | KafkaError::PauseResume(err) => { 238 | write!(f, "KafkaError (Pause/resume error: {})", err) 239 | } 240 | KafkaError::Rebalance(err) => write!(f, "KafkaError (Rebalance error: {})", err), 241 | KafkaError::Seek(err) => write!(f, "KafkaError (Seek error: {})", err), 242 | KafkaError::SetPartitionOffset(err) => { 243 | write!(f, "KafkaError (Set partition offset error: {})", err) 244 | } 245 | KafkaError::StoreOffset(err) => write!(f, "KafkaError (Store offset error: {})", err), 246 | KafkaError::Subscription(err) => { 247 | write!(f, "KafkaError (Subscription error: {})", err) 248 | } 249 | KafkaError::Transaction(err) => write!(f, "KafkaError (Transaction error: {})", err), 250 | KafkaError::MockCluster(err) => write!(f, "KafkaError (Mock cluster error: {})", err), 251 | } 252 | } 253 | } 254 | 255 | impl fmt::Display for KafkaError { 256 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 257 | match self { 258 | KafkaError::AdminOp(err) => write!(f, "Admin operation error: {}", err), 259 | KafkaError::AdminOpCreation(err) => { 260 | write!(f, "Admin operation creation error: {}", err) 261 | } 262 | KafkaError::Canceled => write!(f, "KafkaError (Client dropped)"), 263 | KafkaError::ClientConfig(_, desc, key, value) => { 264 | write!(f, "Client config error: {} {} {}", desc, key, value) 265 | } 266 | KafkaError::ClientCreation(err) => write!(f, "Client creation error: {}", err), 267 | KafkaError::ConsumerCommit(err) => write!(f, "Consumer commit error: {}", err), 268 | KafkaError::ConsumerQueueClose(err) => write!(f, "Consumer queue close error: {}", err), 269 | KafkaError::Flush(err) => write!(f, "Flush error: {}", err), 270 | KafkaError::Global(err) => write!(f, "Global error: {}", err), 271 | KafkaError::GroupListFetch(err) => write!(f, "Group list fetch error: {}", err), 272 | KafkaError::MessageConsumption(err) => write!(f, "Message consumption error: {}", err), 273 | KafkaError::MessageConsumptionFatal(err) => { 274 | write!(f, "(Fatal) Message consumption error: {}", err) 275 | } 276 | KafkaError::MessageProduction(err) => write!(f, "Message production error: {}", err), 277 | KafkaError::MetadataFetch(err) => write!(f, "Meta data fetch error: {}", err), 278 | KafkaError::NoMessageReceived => { 279 | write!(f, "No message received within the given poll interval") 280 | } 281 | KafkaError::Nul(_) => write!(f, "FFI nul error"), 282 | KafkaError::OffsetFetch(err) => write!(f, "Offset fetch error: {}", err), 283 | KafkaError::PartitionEOF(part_n) => write!(f, "Partition EOF: {}", part_n), 284 | KafkaError::PauseResume(err) => write!(f, "Pause/resume error: {}", err), 285 | KafkaError::Rebalance(err) => write!(f, "Rebalance error: {}", err), 286 | KafkaError::Seek(err) => write!(f, "Seek error: {}", err), 287 | KafkaError::SetPartitionOffset(err) => write!(f, "Set partition offset error: {}", err), 288 | KafkaError::StoreOffset(err) => write!(f, "Store offset error: {}", err), 289 | KafkaError::Subscription(err) => write!(f, "Subscription error: {}", err), 290 | KafkaError::Transaction(err) => write!(f, "Transaction error: {}", err), 291 | KafkaError::MockCluster(err) => write!(f, "Mock cluster error: {}", err), 292 | } 293 | } 294 | } 295 | 296 | impl Error for KafkaError { 297 | fn source(&self) -> Option<&(dyn Error + 'static)> { 298 | match self { 299 | KafkaError::AdminOp(_) => None, 300 | KafkaError::AdminOpCreation(_) => None, 301 | KafkaError::Canceled => None, 302 | KafkaError::ClientConfig(..) => None, 303 | KafkaError::ClientCreation(_) => None, 304 | KafkaError::ConsumerCommit(err) => Some(err), 305 | KafkaError::ConsumerQueueClose(err) => Some(err), 306 | KafkaError::Flush(err) => Some(err), 307 | KafkaError::Global(err) => Some(err), 308 | KafkaError::GroupListFetch(err) => Some(err), 309 | KafkaError::MessageConsumption(err) => Some(err), 310 | KafkaError::MessageConsumptionFatal(err) => Some(err), 311 | KafkaError::MessageProduction(err) => Some(err), 312 | KafkaError::MetadataFetch(err) => Some(err), 313 | KafkaError::NoMessageReceived => None, 314 | KafkaError::Nul(_) => None, 315 | KafkaError::OffsetFetch(err) => Some(err), 316 | KafkaError::PartitionEOF(_) => None, 317 | KafkaError::PauseResume(_) => None, 318 | KafkaError::Rebalance(err) => Some(err), 319 | KafkaError::Seek(_) => None, 320 | KafkaError::SetPartitionOffset(err) => Some(err), 321 | KafkaError::StoreOffset(err) => Some(err), 322 | KafkaError::Subscription(_) => None, 323 | KafkaError::Transaction(err) => Some(err), 324 | KafkaError::MockCluster(err) => Some(err), 325 | } 326 | } 327 | } 328 | 329 | impl From for KafkaError { 330 | fn from(err: ffi::NulError) -> KafkaError { 331 | KafkaError::Nul(err) 332 | } 333 | } 334 | 335 | impl KafkaError { 336 | /// Returns the [`RDKafkaErrorCode`] underlying this error, if any. 337 | #[allow(clippy::match_same_arms)] 338 | pub fn rdkafka_error_code(&self) -> Option { 339 | match self { 340 | KafkaError::AdminOp(_) => None, 341 | KafkaError::AdminOpCreation(_) => None, 342 | KafkaError::Canceled => None, 343 | KafkaError::ClientConfig(..) => None, 344 | KafkaError::ClientCreation(_) => None, 345 | KafkaError::ConsumerCommit(err) => Some(*err), 346 | KafkaError::ConsumerQueueClose(err) => Some(*err), 347 | KafkaError::Flush(err) => Some(*err), 348 | KafkaError::Global(err) => Some(*err), 349 | KafkaError::GroupListFetch(err) => Some(*err), 350 | KafkaError::MessageConsumption(err) => Some(*err), 351 | KafkaError::MessageConsumptionFatal(err) => Some(*err), 352 | KafkaError::MessageProduction(err) => Some(*err), 353 | KafkaError::MetadataFetch(err) => Some(*err), 354 | KafkaError::NoMessageReceived => None, 355 | KafkaError::Nul(_) => None, 356 | KafkaError::OffsetFetch(err) => Some(*err), 357 | KafkaError::PartitionEOF(_) => None, 358 | KafkaError::PauseResume(_) => None, 359 | KafkaError::Rebalance(err) => Some(*err), 360 | KafkaError::Seek(_) => None, 361 | KafkaError::SetPartitionOffset(err) => Some(*err), 362 | KafkaError::StoreOffset(err) => Some(*err), 363 | KafkaError::Subscription(_) => None, 364 | KafkaError::Transaction(err) => Some(err.code()), 365 | KafkaError::MockCluster(err) => Some(*err), 366 | } 367 | } 368 | } 369 | --------------------------------------------------------------------------------