├── .gitignore ├── README.md ├── c └── rdkafka.h ├── dub.json └── source └── deimos └── rdkafka.d /.gitignore: -------------------------------------------------------------------------------- 1 | *.a 2 | dub.selections.json 3 | .dub 4 | *.kate-swp 5 | .directory 6 | 7 | *.sublime-project 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # librdkafka 2 | D header for [librdkafka](https://github.com/edenhill/librdkafka) 3 | 4 | See also examples at [https://github.com/dushibaiyu/kfkaConsumer](dushibaiyu/kfkaConsumer). 5 | -------------------------------------------------------------------------------- /c/rdkafka.h: -------------------------------------------------------------------------------- 1 | /* 2 | * librdkafka - Apache Kafka C library 3 | * 4 | * Copyright (c) 2012-2013 Magnus Edenhill 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without 8 | * modification, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, 13 | * this list of conditions and the following disclaimer in the documentation 14 | * and/or other materials provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 | * POSSIBILITY OF SUCH DAMAGE. 27 | */ 28 | 29 | /** 30 | * @file rdkafka.h 31 | * @brief Apache Kafka C/C++ consumer and producer client library. 32 | * 33 | * rdkafka.h contains the public API for librdkafka. 34 | * The API is documented in this file as comments prefixing the function, type, 35 | * enum, define, etc. 36 | * 37 | * @sa For the C++ interface see rdkafkacpp.h 38 | * 39 | * @tableofcontents 40 | */ 41 | 42 | 43 | /* @cond NO_DOC */ 44 | #pragma once 45 | 46 | #include 47 | #include 48 | #include 49 | 50 | #ifdef __cplusplus 51 | extern "C" { 52 | #if 0 53 | } /* Restore indent */ 54 | #endif 55 | #endif 56 | 57 | #ifdef _MSC_VER 58 | #include 59 | #ifndef WIN32_MEAN_AND_LEAN 60 | #define WIN32_MEAN_AND_LEAN 61 | #endif 62 | #include /* for sockaddr, .. */ 63 | typedef SSIZE_T ssize_t; 64 | #define RD_UNUSED 65 | #define RD_INLINE __inline 66 | #define RD_DEPRECATED 67 | #undef RD_EXPORT 68 | #ifdef LIBRDKAFKA_STATICLIB 69 | #define RD_EXPORT 70 | #else 71 | #ifdef LIBRDKAFKA_EXPORTS 72 | #define RD_EXPORT __declspec(dllexport) 73 | #else 74 | #define RD_EXPORT __declspec(dllimport) 75 | #endif 76 | #ifndef LIBRDKAFKA_TYPECHECKS 77 | #define LIBRDKAFKA_TYPECHECKS 0 78 | #endif 79 | #endif 80 | 81 | #else 82 | #include /* for sockaddr, .. */ 83 | 84 | #define RD_UNUSED __attribute__((unused)) 85 | #define RD_INLINE inline 86 | #define RD_EXPORT 87 | #define RD_DEPRECATED __attribute__((deprecated)) 88 | 89 | #ifndef LIBRDKAFKA_TYPECHECKS 90 | #define LIBRDKAFKA_TYPECHECKS 1 91 | #endif 92 | #endif 93 | 94 | 95 | /** 96 | * @brief Type-checking macros 97 | * Compile-time checking that \p ARG is of type \p TYPE. 98 | * @returns \p RET 99 | */ 100 | #if LIBRDKAFKA_TYPECHECKS 101 | #define _LRK_TYPECHECK(RET,TYPE,ARG) \ 102 | ({ if (0) { TYPE __t RD_UNUSED = (ARG); } RET; }) 103 | 104 | #define _LRK_TYPECHECK2(RET,TYPE,ARG,TYPE2,ARG2) \ 105 | ({ \ 106 | if (0) { \ 107 | TYPE __t RD_UNUSED = (ARG); \ 108 | TYPE2 __t2 RD_UNUSED = (ARG2); \ 109 | } \ 110 | RET; }) 111 | #else 112 | #define _LRK_TYPECHECK(RET,TYPE,ARG) (RET) 113 | #define _LRK_TYPECHECK2(RET,TYPE,ARG,TYPE2,ARG2) (RET) 114 | #endif 115 | 116 | /* @endcond */ 117 | 118 | 119 | /** 120 | * @name librdkafka version 121 | * @{ 122 | * 123 | * 124 | */ 125 | 126 | /** 127 | * @brief librdkafka version 128 | * 129 | * Interpreted as hex \c MM.mm.rr.xx: 130 | * - MM = Major 131 | * - mm = minor 132 | * - rr = revision 133 | * - xx = pre-release id (0xff is the final release) 134 | * 135 | * E.g.: \c 0x000801ff = 0.8.1 136 | * 137 | * @remark This value should only be used during compile time, 138 | * for runtime checks of version use rd_kafka_version() 139 | */ 140 | #define RD_KAFKA_VERSION 0x000904ff 141 | 142 | /** 143 | * @brief Returns the librdkafka version as integer. 144 | * 145 | * @returns Version integer. 146 | * 147 | * @sa See RD_KAFKA_VERSION for how to parse the integer format. 148 | * @sa Use rd_kafka_version_str() to retreive the version as a string. 149 | */ 150 | RD_EXPORT 151 | int rd_kafka_version(void); 152 | 153 | /** 154 | * @brief Returns the librdkafka version as string. 155 | * 156 | * @returns Version string 157 | */ 158 | RD_EXPORT 159 | const char *rd_kafka_version_str (void); 160 | 161 | /**@}*/ 162 | 163 | 164 | /** 165 | * @name Constants, errors, types 166 | * @{ 167 | * 168 | * 169 | */ 170 | 171 | 172 | /** 173 | * @enum rd_kafka_type_t 174 | * 175 | * @brief rd_kafka_t handle type. 176 | * 177 | * @sa rd_kafka_new() 178 | */ 179 | typedef enum rd_kafka_type_t { 180 | RD_KAFKA_PRODUCER, /**< Producer client */ 181 | RD_KAFKA_CONSUMER /**< Consumer client */ 182 | } rd_kafka_type_t; 183 | 184 | 185 | /** 186 | * @enum Timestamp types 187 | * 188 | * @sa rd_kafka_message_timestamp() 189 | */ 190 | typedef enum rd_kafka_timestamp_type_t { 191 | RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ 192 | RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */ 193 | RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */ 194 | } rd_kafka_timestamp_type_t; 195 | 196 | 197 | 198 | /** 199 | * @brief Retrieve supported debug contexts for use with the \c \"debug\" 200 | * configuration property. (runtime) 201 | * 202 | * @returns Comma-separated list of available debugging contexts. 203 | */ 204 | RD_EXPORT 205 | const char *rd_kafka_get_debug_contexts(void); 206 | 207 | /** 208 | * @brief Supported debug contexts. (compile time) 209 | * 210 | * @deprecated This compile time value may be outdated at runtime due to 211 | * linking another version of the library. 212 | * Use rd_kafka_get_debug_contexts() instead. 213 | */ 214 | #define RD_KAFKA_DEBUG_CONTEXTS \ 215 | "all,generic,broker,topic,metadata,queue,msg,protocol,cgrp,security,fetch,feature" 216 | 217 | 218 | /* @cond NO_DOC */ 219 | /* Private types to provide ABI compatibility */ 220 | typedef struct rd_kafka_s rd_kafka_t; 221 | typedef struct rd_kafka_topic_s rd_kafka_topic_t; 222 | typedef struct rd_kafka_conf_s rd_kafka_conf_t; 223 | typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t; 224 | typedef struct rd_kafka_queue_s rd_kafka_queue_t; 225 | /* @endcond */ 226 | 227 | 228 | /** 229 | * @enum rd_kafka_resp_err_t 230 | * @brief Error codes. 231 | * 232 | * The negative error codes delimited by two underscores 233 | * (\c RD_KAFKA_RESP_ERR__..) denotes errors internal to librdkafka and are 234 | * displayed as \c \"Local: \\", while the error codes 235 | * delimited by a single underscore (\c RD_KAFKA_RESP_ERR_..) denote broker 236 | * errors and are displayed as \c \"Broker: \\". 237 | * 238 | * @sa Use rd_kafka_err2str() to translate an error code a human readable string 239 | */ 240 | typedef enum { 241 | /* Internal errors to rdkafka: */ 242 | /** Begin internal error codes */ 243 | RD_KAFKA_RESP_ERR__BEGIN = -200, 244 | /** Received message is incorrect */ 245 | RD_KAFKA_RESP_ERR__BAD_MSG = -199, 246 | /** Bad/unknown compression */ 247 | RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198, 248 | /** Broker is going away */ 249 | RD_KAFKA_RESP_ERR__DESTROY = -197, 250 | /** Generic failure */ 251 | RD_KAFKA_RESP_ERR__FAIL = -196, 252 | /** Broker transport failure */ 253 | RD_KAFKA_RESP_ERR__TRANSPORT = -195, 254 | /** Critical system resource */ 255 | RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194, 256 | /** Failed to resolve broker */ 257 | RD_KAFKA_RESP_ERR__RESOLVE = -193, 258 | /** Produced message timed out*/ 259 | RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192, 260 | /** Reached the end of the topic+partition queue on 261 | * the broker. Not really an error. */ 262 | RD_KAFKA_RESP_ERR__PARTITION_EOF = -191, 263 | /** Permanent: Partition does not exist in cluster. */ 264 | RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190, 265 | /** File or filesystem error */ 266 | RD_KAFKA_RESP_ERR__FS = -189, 267 | /** Permanent: Topic does not exist in cluster. */ 268 | RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188, 269 | /** All broker connections are down. */ 270 | RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187, 271 | /** Invalid argument, or invalid configuration */ 272 | RD_KAFKA_RESP_ERR__INVALID_ARG = -186, 273 | /** Operation timed out */ 274 | RD_KAFKA_RESP_ERR__TIMED_OUT = -185, 275 | /** Queue is full */ 276 | RD_KAFKA_RESP_ERR__QUEUE_FULL = -184, 277 | /** ISR count < required.acks */ 278 | RD_KAFKA_RESP_ERR__ISR_INSUFF = -183, 279 | /** Broker node update */ 280 | RD_KAFKA_RESP_ERR__NODE_UPDATE = -182, 281 | /** SSL error */ 282 | RD_KAFKA_RESP_ERR__SSL = -181, 283 | /** Waiting for coordinator to become available. */ 284 | RD_KAFKA_RESP_ERR__WAIT_COORD = -180, 285 | /** Unknown client group */ 286 | RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179, 287 | /** Operation in progress */ 288 | RD_KAFKA_RESP_ERR__IN_PROGRESS = -178, 289 | /** Previous operation in progress, wait for it to finish. */ 290 | RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177, 291 | /** This operation would interfere with an existing subscription */ 292 | RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176, 293 | /** Assigned partitions (rebalance_cb) */ 294 | RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175, 295 | /** Revoked partitions (rebalance_cb) */ 296 | RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174, 297 | /** Conflicting use */ 298 | RD_KAFKA_RESP_ERR__CONFLICT = -173, 299 | /** Wrong state */ 300 | RD_KAFKA_RESP_ERR__STATE = -172, 301 | /** Unknown protocol */ 302 | RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171, 303 | /** Not implemented */ 304 | RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170, 305 | /** Authentication failure*/ 306 | RD_KAFKA_RESP_ERR__AUTHENTICATION = -169, 307 | /** No stored offset */ 308 | RD_KAFKA_RESP_ERR__NO_OFFSET = -168, 309 | /** Outdated */ 310 | RD_KAFKA_RESP_ERR__OUTDATED = -167, 311 | /** Timed out in queue */ 312 | RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166, 313 | /** Feature not supported by broker */ 314 | RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165, 315 | /** Awaiting cache update */ 316 | RD_KAFKA_RESP_ERR__WAIT_CACHE = -164, 317 | 318 | /** End internal error codes */ 319 | RD_KAFKA_RESP_ERR__END = -100, 320 | 321 | /* Kafka broker errors: */ 322 | /** Unknown broker error */ 323 | RD_KAFKA_RESP_ERR_UNKNOWN = -1, 324 | /** Success */ 325 | RD_KAFKA_RESP_ERR_NO_ERROR = 0, 326 | /** Offset out of range */ 327 | RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1, 328 | /** Invalid message */ 329 | RD_KAFKA_RESP_ERR_INVALID_MSG = 2, 330 | /** Unknown topic or partition */ 331 | RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3, 332 | /** Invalid message size */ 333 | RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4, 334 | /** Leader not available */ 335 | RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5, 336 | /** Not leader for partition */ 337 | RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6, 338 | /** Request timed out */ 339 | RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7, 340 | /** Broker not available */ 341 | RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8, 342 | /** Replica not available */ 343 | RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9, 344 | /** Message size too large */ 345 | RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10, 346 | /** StaleControllerEpochCode */ 347 | RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11, 348 | /** Offset metadata string too large */ 349 | RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12, 350 | /** Broker disconnected before response received */ 351 | RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13, 352 | /** Group coordinator load in progress */ 353 | RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS = 14, 354 | /** Group coordinator not available */ 355 | RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15, 356 | /** Not coordinator for group */ 357 | RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP = 16, 358 | /** Invalid topic */ 359 | RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17, 360 | /** Message batch larger than configured server segment size */ 361 | RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18, 362 | /** Not enough in-sync replicas */ 363 | RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19, 364 | /** Message(s) written to insufficient number of in-sync replicas */ 365 | RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, 366 | /** Invalid required acks value */ 367 | RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21, 368 | /** Specified group generation id is not valid */ 369 | RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22, 370 | /** Inconsistent group protocol */ 371 | RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23, 372 | /** Invalid group.id */ 373 | RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24, 374 | /** Unknown member */ 375 | RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25, 376 | /** Invalid session timeout */ 377 | RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26, 378 | /** Group rebalance in progress */ 379 | RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27, 380 | /** Commit offset data size is not valid */ 381 | RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28, 382 | /** Topic authorization failed */ 383 | RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29, 384 | /** Group authorization failed */ 385 | RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30, 386 | /** Cluster authorization failed */ 387 | RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31, 388 | /** Invalid timestamp */ 389 | RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32, 390 | /** Unsupported SASL mechanism */ 391 | RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33, 392 | /** Illegal SASL state */ 393 | RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34, 394 | /** Unuspported version */ 395 | RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35, 396 | /** Topic already exists */ 397 | RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36, 398 | /** Invalid number of partitions */ 399 | RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37, 400 | /** Invalid replication factor */ 401 | RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38, 402 | /** Invalid replica assignment */ 403 | RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39, 404 | /** Invalid config */ 405 | RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40, 406 | /** Not controller for cluster */ 407 | RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41, 408 | /** Invalid request */ 409 | RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42, 410 | /** Message format on broker does not support request */ 411 | RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, 412 | 413 | RD_KAFKA_RESP_ERR_END_ALL, 414 | } rd_kafka_resp_err_t; 415 | 416 | 417 | /** 418 | * @brief Error code value, name and description. 419 | * Typically for use with language bindings to automatically expose 420 | * the full set of librdkafka error codes. 421 | */ 422 | struct rd_kafka_err_desc { 423 | rd_kafka_resp_err_t code;/**< Error code */ 424 | const char *name; /**< Error name, same as code enum sans prefix */ 425 | const char *desc; /**< Human readable error description. */ 426 | }; 427 | 428 | 429 | /** 430 | * @brief Returns the full list of error codes. 431 | */ 432 | RD_EXPORT 433 | void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs, 434 | size_t *cntp); 435 | 436 | 437 | 438 | 439 | /** 440 | * @brief Returns a human readable representation of a kafka error. 441 | * 442 | * @param err Error code to translate 443 | */ 444 | RD_EXPORT 445 | const char *rd_kafka_err2str (rd_kafka_resp_err_t err); 446 | 447 | 448 | 449 | /** 450 | * @brief Returns the error code name (enum name). 451 | * 452 | * @param err Error code to translate 453 | */ 454 | RD_EXPORT 455 | const char *rd_kafka_err2name (rd_kafka_resp_err_t err); 456 | 457 | 458 | /** 459 | * @brief Returns the last error code generated by a legacy API call 460 | * in the current thread. 461 | * 462 | * The legacy APIs are the ones using errno to propagate error value, namely: 463 | * - rd_kafka_topic_new() 464 | * - rd_kafka_consume_start() 465 | * - rd_kafka_consume_stop() 466 | * - rd_kafka_consume() 467 | * - rd_kafka_consume_batch() 468 | * - rd_kafka_consume_callback() 469 | * - rd_kafka_consume_queue() 470 | * - rd_kafka_produce() 471 | * 472 | * The main use for this function is to avoid converting system \p errno 473 | * values to rd_kafka_resp_err_t codes for legacy APIs. 474 | * 475 | * @remark The last error is stored per-thread, if multiple rd_kafka_t handles 476 | * are used in the same application thread the developer needs to 477 | * make sure rd_kafka_last_error() is called immediately after 478 | * a failed API call. 479 | */ 480 | RD_EXPORT 481 | rd_kafka_resp_err_t rd_kafka_last_error (void); 482 | 483 | 484 | /** 485 | * @brief Converts the system errno value \p errnox to a rd_kafka_resp_err_t 486 | * error code upon failure from the following functions: 487 | * - rd_kafka_topic_new() 488 | * - rd_kafka_consume_start() 489 | * - rd_kafka_consume_stop() 490 | * - rd_kafka_consume() 491 | * - rd_kafka_consume_batch() 492 | * - rd_kafka_consume_callback() 493 | * - rd_kafka_consume_queue() 494 | * - rd_kafka_produce() 495 | * 496 | * @param errnox System errno value to convert 497 | * 498 | * @returns Appropriate error code for \p errnox 499 | * 500 | * @remark A better alternative is to call rd_kafka_last_error() immediately 501 | * after any of the above functions return -1 or NULL. 502 | * 503 | * @sa rd_kafka_last_error() 504 | */ 505 | RD_EXPORT 506 | rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); 507 | 508 | 509 | /** 510 | * @brief Returns the thread-local system errno 511 | * 512 | * On most platforms this is the same as \p errno but in case of different 513 | * runtimes between library and application (e.g., Windows static DLLs) 514 | * this provides a means for expsing the errno librdkafka uses. 515 | * 516 | * @remark The value is local to the current calling thread. 517 | */ 518 | RD_EXPORT 519 | int rd_kafka_errno (void); 520 | 521 | 522 | 523 | /** 524 | * @brief Topic+Partition place holder 525 | * 526 | * Generic place holder for a Topic+Partition and its related information 527 | * used for multiple purposes: 528 | * - consumer offset (see rd_kafka_commit(), et.al.) 529 | * - group rebalancing callback (rd_kafka_conf_set_rebalance_cb()) 530 | * - offset commit result callback (rd_kafka_conf_set_offset_commit_cb()) 531 | */ 532 | 533 | /** 534 | * @brief Generic place holder for a specific Topic+Partition. 535 | * 536 | * @sa rd_kafka_topic_partition_list_new() 537 | */ 538 | typedef struct rd_kafka_topic_partition_s { 539 | char *topic; /**< Topic name */ 540 | int32_t partition; /**< Partition */ 541 | int64_t offset; /**< Offset */ 542 | void *metadata; /**< Metadata */ 543 | size_t metadata_size; /**< Metadata size */ 544 | void *opaque; /**< Application opaque */ 545 | rd_kafka_resp_err_t err; /**< Error code, depending on use. */ 546 | void *_private; /**< INTERNAL USE ONLY, 547 | * INITIALIZE TO ZERO, DO NOT TOUCH */ 548 | } rd_kafka_topic_partition_t; 549 | 550 | 551 | /** 552 | * @brief Destroy a rd_kafka_topic_partition_t. 553 | * @remark This must not be called for elements in a topic partition list. 554 | */ 555 | RD_EXPORT 556 | void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar); 557 | 558 | 559 | /** 560 | * @brief A growable list of Topic+Partitions. 561 | * 562 | */ 563 | typedef struct rd_kafka_topic_partition_list_s { 564 | int cnt; /**< Current number of elements */ 565 | int size; /**< Current allocated size */ 566 | rd_kafka_topic_partition_t *elems; /**< Element array[] */ 567 | } rd_kafka_topic_partition_list_t; 568 | 569 | 570 | /** 571 | * @brief Create a new list/vector Topic+Partition container. 572 | * 573 | * @param size Initial allocated size used when the expected number of 574 | * elements is known or can be estimated. 575 | * Avoids reallocation and possibly relocation of the 576 | * elems array. 577 | * 578 | * @returns A newly allocated Topic+Partition list. 579 | * 580 | * @remark Use rd_kafka_topic_partition_list_destroy() to free all resources 581 | * in use by a list and the list itself. 582 | * @sa rd_kafka_topic_partition_list_add() 583 | */ 584 | RD_EXPORT 585 | rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new (int size); 586 | 587 | 588 | /** 589 | * @brief Free all resources used by the list and the list itself. 590 | */ 591 | RD_EXPORT 592 | void 593 | rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rkparlist); 594 | 595 | /** 596 | * @brief Add topic+partition to list 597 | * 598 | * @param rktparlist List to extend 599 | * @param topic Topic name (copied) 600 | * @param partition Partition id 601 | * 602 | * @returns The object which can be used to fill in additionals fields. 603 | */ 604 | RD_EXPORT 605 | rd_kafka_topic_partition_t * 606 | rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist, 607 | const char *topic, int32_t partition); 608 | 609 | 610 | /** 611 | * @brief Add range of partitions from \p start to \p stop inclusive. 612 | * 613 | * @param rktparlist List to extend 614 | * @param topic Topic name (copied) 615 | * @param start Start partition of range 616 | * @param stop Last partition of range (inclusive) 617 | */ 618 | RD_EXPORT 619 | void 620 | rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t 621 | *rktparlist, 622 | const char *topic, 623 | int32_t start, int32_t stop); 624 | 625 | 626 | 627 | /** 628 | * @brief Delete partition from list. 629 | * 630 | * @param rktparlist List to modify 631 | * @param topic Topic name to match 632 | * @param partition Partition to match 633 | * 634 | * @returns 1 if partition was found (and removed), else 0. 635 | * 636 | * @remark Any held indices to elems[] are unusable after this call returns 1. 637 | */ 638 | RD_EXPORT 639 | int 640 | rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, 641 | const char *topic, int32_t partition); 642 | 643 | 644 | /** 645 | * @brief Delete partition from list by elems[] index. 646 | * 647 | * @returns 1 if partition was found (and removed), else 0. 648 | * 649 | * @sa rd_kafka_topic_partition_list_del() 650 | */ 651 | RD_EXPORT 652 | int 653 | rd_kafka_topic_partition_list_del_by_idx ( 654 | rd_kafka_topic_partition_list_t *rktparlist, 655 | int idx); 656 | 657 | 658 | /** 659 | * @brief Make a copy of an existing list. 660 | * 661 | * @param src The existing list to copy. 662 | * 663 | * @returns A new list fully populated to be identical to \p src 664 | */ 665 | RD_EXPORT 666 | rd_kafka_topic_partition_list_t * 667 | rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src); 668 | 669 | 670 | 671 | 672 | /** 673 | * @brief Set offset to \p offset for \p topic and \p partition 674 | * 675 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or 676 | * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if \p partition was not found 677 | * in the list. 678 | */ 679 | RD_EXPORT 680 | rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset ( 681 | rd_kafka_topic_partition_list_t *rktparlist, 682 | const char *topic, int32_t partition, int64_t offset); 683 | 684 | 685 | 686 | /** 687 | * @brief Find element by \p topic and \p partition. 688 | * 689 | * @returns a pointer to the first matching element, or NULL if not found. 690 | */ 691 | RD_EXPORT 692 | rd_kafka_topic_partition_t * 693 | rd_kafka_topic_partition_list_find (rd_kafka_topic_partition_list_t *rktparlist, 694 | const char *topic, int32_t partition); 695 | 696 | 697 | /** 698 | * @brief Sort list using comparator \p cmp. 699 | * 700 | * If \p cmp is NULL the default comparator will be used that 701 | * sorts by ascending topic name and partition. 702 | * 703 | */ 704 | RD_EXPORT void 705 | rd_kafka_topic_partition_list_sort (rd_kafka_topic_partition_list_t *rktparlist, 706 | int (*cmp) (const void *a, const void *b, 707 | void *opaque), 708 | void *opaque); 709 | 710 | 711 | /**@}*/ 712 | 713 | 714 | 715 | /** 716 | * @name Var-arg tag types 717 | * @{ 718 | * 719 | */ 720 | 721 | /** 722 | * @enum rd_kafka_vtype_t 723 | * 724 | * @brief Var-arg tag types 725 | * 726 | * @sa rd_kafka_producev() 727 | */ 728 | typedef enum rd_kafka_vtype_t { 729 | RD_KAFKA_VTYPE_END, /**< va-arg sentinel */ 730 | RD_KAFKA_VTYPE_TOPIC, /**< (const char *) Topic name */ 731 | RD_KAFKA_VTYPE_RKT, /**< (rd_kafka_topic_t *) Topic handle */ 732 | RD_KAFKA_VTYPE_PARTITION, /**< (int32_t) Partition */ 733 | RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/ 734 | RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */ 735 | RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Application opaque */ 736 | RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */ 737 | RD_KAFKA_VTYPE_TIMESTAMP, /**< (int64_t) Milliseconds since epoch UTC */ 738 | } rd_kafka_vtype_t; 739 | 740 | 741 | /** 742 | * @brief Convenience macros for rd_kafka_vtype_t that takes the 743 | * correct arguments for each vtype. 744 | */ 745 | 746 | /*! 747 | * va-arg end sentinel used to terminate the variable argument list 748 | */ 749 | #define RD_KAFKA_V_END RD_KAFKA_VTYPE_END 750 | 751 | /*! 752 | * Topic name (const char *) 753 | */ 754 | #define RD_KAFKA_V_TOPIC(topic) \ 755 | _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic), \ 756 | (const char *)topic 757 | /*! 758 | * Topic object (rd_kafka_topic_t *) 759 | */ 760 | #define RD_KAFKA_V_RKT(rkt) \ 761 | _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt), \ 762 | (rd_kafka_topic_t *)rkt 763 | /*! 764 | * Partition (int32_t) 765 | */ 766 | #define RD_KAFKA_V_PARTITION(partition) \ 767 | _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition), \ 768 | (int32_t)partition 769 | /*! 770 | * Message value/payload pointer and length (void *, size_t) 771 | */ 772 | #define RD_KAFKA_V_VALUE(VALUE,LEN) \ 773 | _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \ 774 | (void *)VALUE, (size_t)LEN 775 | /*! 776 | * Message key pointer and length (const void *, size_t) 777 | */ 778 | #define RD_KAFKA_V_KEY(KEY,LEN) \ 779 | _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \ 780 | (void *)KEY, (size_t)LEN 781 | /*! 782 | * Opaque pointer (void *) 783 | */ 784 | #define RD_KAFKA_V_OPAQUE(opaque) \ 785 | _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, opaque), \ 786 | (void *)opaque 787 | /*! 788 | * Message flags (int) 789 | * @sa RD_KAFKA_MSG_F_COPY, et.al. 790 | */ 791 | #define RD_KAFKA_V_MSGFLAGS(msgflags) \ 792 | _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags), \ 793 | (int)msgflags 794 | /*! 795 | * Timestamp (int64_t) 796 | */ 797 | #define RD_KAFKA_V_TIMESTAMP(timestamp) \ 798 | _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp), \ 799 | (int64_t)timestamp 800 | 801 | /**@}*/ 802 | 803 | 804 | /** 805 | * @name Kafka messages 806 | * @{ 807 | * 808 | */ 809 | 810 | 811 | 812 | // FIXME: This doesn't show up in docs for some reason 813 | // "Compound rd_kafka_message_t is not documented." 814 | 815 | /** 816 | * @brief A Kafka message as returned by the \c rd_kafka_consume*() family 817 | * of functions as well as provided to the Producer \c dr_msg_cb(). 818 | * 819 | * For the consumer this object has two purposes: 820 | * - provide the application with a consumed message. (\c err == 0) 821 | * - report per-topic+partition consumer errors (\c err != 0) 822 | * 823 | * The application must check \c err to decide what action to take. 824 | * 825 | * When the application is finished with a message it must call 826 | * rd_kafka_message_destroy() unless otherwise noted. 827 | */ 828 | typedef struct rd_kafka_message_s { 829 | rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */ 830 | rd_kafka_topic_t *rkt; /**< Topic */ 831 | int32_t partition; /**< Partition */ 832 | void *payload; /**< Producer: original message payload. 833 | * Consumer: Depends on the value of \c err : 834 | * - \c err==0: Message payload. 835 | * - \c err!=0: Error string */ 836 | size_t len; /**< Depends on the value of \c err : 837 | * - \c err==0: Message payload length 838 | * - \c err!=0: Error string length */ 839 | void *key; /**< Depends on the value of \c err : 840 | * - \c err==0: Optional message key */ 841 | size_t key_len; /**< Depends on the value of \c err : 842 | * - \c err==0: Optional message key length*/ 843 | int64_t offset; /**< Consume: 844 | * - Message offset (or offset for error 845 | * if \c err!=0 if applicable). 846 | * - dr_msg_cb: 847 | * Message offset assigned by broker. 848 | * If \c produce.offset.report is set then 849 | * each message will have this field set, 850 | * otherwise only the last message in 851 | * each produced internal batch will 852 | * have this field set, otherwise 0. */ 853 | void *_private; /**< Consume: 854 | * - rdkafka private pointer: DO NOT MODIFY 855 | * - dr_msg_cb: 856 | * msg_opaque from produce() call */ 857 | } rd_kafka_message_t; 858 | 859 | 860 | /** 861 | * @brief Frees resources for \p rkmessage and hands ownership back to rdkafka. 862 | */ 863 | RD_EXPORT 864 | void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage); 865 | 866 | 867 | 868 | 869 | /** 870 | * @brief Returns the error string for an errored rd_kafka_message_t or NULL if 871 | * there was no error. 872 | */ 873 | static RD_INLINE const char * 874 | RD_UNUSED 875 | rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage) { 876 | if (!rkmessage->err) 877 | return NULL; 878 | 879 | if (rkmessage->payload) 880 | return (const char *)rkmessage->payload; 881 | 882 | return rd_kafka_err2str(rkmessage->err); 883 | } 884 | 885 | 886 | 887 | /** 888 | * @brief Returns the message timestamp for a consumed message. 889 | * 890 | * The timestamp is the number of milliseconds since the epoch (UTC). 891 | * 892 | * \p tstype (if not NULL) is updated to indicate the type of timestamp. 893 | * 894 | * @returns message timestamp, or -1 if not available. 895 | * 896 | * @remark Message timestamps require broker version 0.10.0 or later. 897 | */ 898 | RD_EXPORT 899 | int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, 900 | rd_kafka_timestamp_type_t *tstype); 901 | 902 | 903 | 904 | /**@}*/ 905 | 906 | 907 | /** 908 | * @name Configuration interface 909 | * @{ 910 | * 911 | * @brief Main/global configuration property interface 912 | * 913 | */ 914 | 915 | /** 916 | * @enum rd_kafka_conf_res_t 917 | * @brief Configuration result type 918 | */ 919 | typedef enum { 920 | RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */ 921 | RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value. */ 922 | RD_KAFKA_CONF_OK = 0 /**< Configuration okay */ 923 | } rd_kafka_conf_res_t; 924 | 925 | 926 | /** 927 | * @brief Create configuration object. 928 | * 929 | * When providing your own configuration to the \c rd_kafka_*_new_*() calls 930 | * the rd_kafka_conf_t objects needs to be created with this function 931 | * which will set up the defaults. 932 | * I.e.: 933 | * @code 934 | * rd_kafka_conf_t *myconf; 935 | * rd_kafka_conf_res_t res; 936 | * 937 | * myconf = rd_kafka_conf_new(); 938 | * res = rd_kafka_conf_set(myconf, "socket.timeout.ms", "600", 939 | * errstr, sizeof(errstr)); 940 | * if (res != RD_KAFKA_CONF_OK) 941 | * die("%s\n", errstr); 942 | * 943 | * rk = rd_kafka_new(..., myconf); 944 | * @endcode 945 | * 946 | * Please see CONFIGURATION.md for the default settings or use 947 | * rd_kafka_conf_properties_show() to provide the information at runtime. 948 | * 949 | * The properties are identical to the Apache Kafka configuration properties 950 | * whenever possible. 951 | * 952 | * @returns A new rd_kafka_conf_t object with defaults set. 953 | * 954 | * @sa rd_kafka_conf_set(), rd_kafka_conf_destroy() 955 | */ 956 | RD_EXPORT 957 | rd_kafka_conf_t *rd_kafka_conf_new(void); 958 | 959 | 960 | /** 961 | * @brief Destroys a conf object. 962 | */ 963 | RD_EXPORT 964 | void rd_kafka_conf_destroy(rd_kafka_conf_t *conf); 965 | 966 | 967 | /** 968 | * @brief Creates a copy/duplicate of configuration object \p conf 969 | */ 970 | RD_EXPORT 971 | rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf); 972 | 973 | 974 | /** 975 | * @brief Sets a configuration property. 976 | * 977 | * \p conf must have been previously created with rd_kafka_conf_new(). 978 | * 979 | * Returns \c rd_kafka_conf_res_t to indicate success or failure. 980 | * In case of failure \p errstr is updated to contain a human readable 981 | * error string. 982 | */ 983 | RD_EXPORT 984 | rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, 985 | const char *name, 986 | const char *value, 987 | char *errstr, size_t errstr_size); 988 | 989 | 990 | /** 991 | * @brief Enable event sourcing. 992 | * \p events is a bitmask of \c RD_KAFKA_EVENT_* of events to enable 993 | * for consumption by `rd_kafka_queue_poll()`. 994 | */ 995 | RD_EXPORT 996 | void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events); 997 | 998 | 999 | /** 1000 | @deprecated See rd_kafka_conf_set_dr_msg_cb() 1001 | */ 1002 | RD_EXPORT 1003 | void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, 1004 | void (*dr_cb) (rd_kafka_t *rk, 1005 | void *payload, size_t len, 1006 | rd_kafka_resp_err_t err, 1007 | void *opaque, void *msg_opaque)); 1008 | 1009 | /** 1010 | * @brief \b Producer: Set delivery report callback in provided \p conf object. 1011 | * 1012 | * The delivery report callback will be called once for each message 1013 | * accepted by rd_kafka_produce() (et.al) with \p err set to indicate 1014 | * the result of the produce request. 1015 | * 1016 | * The callback is called when a message is succesfully produced or 1017 | * if librdkafka encountered a permanent failure, or the retry counter for 1018 | * temporary errors has been exhausted. 1019 | * 1020 | * An application must call rd_kafka_poll() at regular intervals to 1021 | * serve queued delivery report callbacks. 1022 | */ 1023 | RD_EXPORT 1024 | void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, 1025 | void (*dr_msg_cb) (rd_kafka_t *rk, 1026 | const rd_kafka_message_t * 1027 | rkmessage, 1028 | void *opaque)); 1029 | 1030 | 1031 | /** 1032 | * @brief \b Consumer: Set consume callback for use with rd_kafka_consumer_poll() 1033 | * 1034 | */ 1035 | RD_EXPORT 1036 | void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, 1037 | void (*consume_cb) (rd_kafka_message_t * 1038 | rkmessage, 1039 | void *opaque)); 1040 | 1041 | /** 1042 | * @brief \b Consumer: Set rebalance callback for use with 1043 | * coordinated consumer group balancing. 1044 | * 1045 | * The \p err field is set to either RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS 1046 | * or RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS and 'partitions' 1047 | * contains the full partition set that was either assigned or revoked. 1048 | * 1049 | * Registering a \p rebalance_cb turns off librdkafka's automatic 1050 | * partition assignment/revocation and instead delegates that responsibility 1051 | * to the application's \p rebalance_cb. 1052 | * 1053 | * The rebalance callback is responsible for updating librdkafka's 1054 | * assignment set based on the two events: RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS 1055 | * and RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS but should also be able to handle 1056 | * arbitrary rebalancing failures where \p err is neither of those. 1057 | * @remark In this latter case (arbitrary error), the application must 1058 | * call rd_kafka_assign(rk, NULL) to synchronize state. 1059 | * 1060 | * Without a rebalance callback this is done automatically by librdkafka 1061 | * but registering a rebalance callback gives the application flexibility 1062 | * in performing other operations along with the assinging/revocation, 1063 | * such as fetching offsets from an alternate location (on assign) 1064 | * or manually committing offsets (on revoke). 1065 | * 1066 | * @remark The \p partitions list is destroyed by librdkafka on return 1067 | * return from the rebalance_cb and must not be freed or 1068 | * saved by the application. 1069 | * 1070 | * The following example shows the application's responsibilities: 1071 | * @code 1072 | * static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, 1073 | * rd_kafka_topic_partition_list_t *partitions, 1074 | * void *opaque) { 1075 | * 1076 | * switch (err) 1077 | * { 1078 | * case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: 1079 | * // application may load offets from arbitrary external 1080 | * // storage here and update \p partitions 1081 | * 1082 | * rd_kafka_assign(rk, partitions); 1083 | * break; 1084 | * 1085 | * case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: 1086 | * if (manual_commits) // Optional explicit manual commit 1087 | * rd_kafka_commit(rk, partitions, 0); // sync commit 1088 | * 1089 | * rd_kafka_assign(rk, NULL); 1090 | * break; 1091 | * 1092 | * default: 1093 | * handle_unlikely_error(err); 1094 | * rd_kafka_assign(rk, NULL); // sync state 1095 | * break; 1096 | * } 1097 | * } 1098 | * @endcode 1099 | */ 1100 | RD_EXPORT 1101 | void rd_kafka_conf_set_rebalance_cb ( 1102 | rd_kafka_conf_t *conf, 1103 | void (*rebalance_cb) (rd_kafka_t *rk, 1104 | rd_kafka_resp_err_t err, 1105 | rd_kafka_topic_partition_list_t *partitions, 1106 | void *opaque)); 1107 | 1108 | 1109 | 1110 | /** 1111 | * @brief \b Consumer: Set offset commit callback for use with consumer groups. 1112 | * 1113 | * The results of automatic or manual offset commits will be scheduled 1114 | * for this callback and is served by rd_kafka_consumer_poll(). 1115 | * 1116 | * If no partitions had valid offsets to commit this callback will be called 1117 | * with \p err == RD_KAFKA_RESP_ERR__NO_OFFSET which is not to be considered 1118 | * an error. 1119 | * 1120 | * The \p offsets list contains per-partition information: 1121 | * - \c offset: committed offset (attempted) 1122 | * - \c err: commit error 1123 | */ 1124 | RD_EXPORT 1125 | void rd_kafka_conf_set_offset_commit_cb ( 1126 | rd_kafka_conf_t *conf, 1127 | void (*offset_commit_cb) (rd_kafka_t *rk, 1128 | rd_kafka_resp_err_t err, 1129 | rd_kafka_topic_partition_list_t *offsets, 1130 | void *opaque)); 1131 | 1132 | 1133 | /** 1134 | * @brief Set error callback in provided conf object. 1135 | * 1136 | * The error callback is used by librdkafka to signal critical errors 1137 | * back to the application. 1138 | * 1139 | * If no \p error_cb is registered then the errors will be logged instead. 1140 | */ 1141 | RD_EXPORT 1142 | void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, 1143 | void (*error_cb) (rd_kafka_t *rk, int err, 1144 | const char *reason, 1145 | void *opaque)); 1146 | 1147 | /** 1148 | * @brief Set throttle callback. 1149 | * 1150 | * The throttle callback is used to forward broker throttle times to the 1151 | * application for Produce and Fetch (consume) requests. 1152 | * 1153 | * Callbacks are triggered whenever a non-zero throttle time is returned by 1154 | * the broker, or when the throttle time drops back to zero. 1155 | * 1156 | * An application must call rd_kafka_poll() or rd_kafka_consumer_poll() at 1157 | * regular intervals to serve queued callbacks. 1158 | * 1159 | * @remark Requires broker version 0.9.0 or later. 1160 | */ 1161 | RD_EXPORT 1162 | void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf, 1163 | void (*throttle_cb) ( 1164 | rd_kafka_t *rk, 1165 | const char *broker_name, 1166 | int32_t broker_id, 1167 | int throttle_time_ms, 1168 | void *opaque)); 1169 | 1170 | 1171 | /** 1172 | * @brief Set logger callback. 1173 | * 1174 | * The default is to print to stderr, but a syslog logger is also available, 1175 | * see rd_kafka_log_print and rd_kafka_log_syslog for the builtin alternatives. 1176 | * Alternatively the application may provide its own logger callback. 1177 | * Or pass \p func as NULL to disable logging. 1178 | * 1179 | * This is the configuration alternative to the deprecated rd_kafka_set_logger() 1180 | * 1181 | * @remark The log_cb will be called spontaneously from librdkafka's internal 1182 | * threads unless logs have been forwarded to a poll queue through 1183 | * \c rd_kafka_set_log_queue(). 1184 | * An application MUST NOT call any librdkafka APIs or do any prolonged 1185 | * work in a non-forwarded \c log_cb. 1186 | */ 1187 | RD_EXPORT 1188 | void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, 1189 | void (*log_cb) (const rd_kafka_t *rk, int level, 1190 | const char *fac, const char *buf)); 1191 | 1192 | 1193 | /** 1194 | * @brief Set statistics callback in provided conf object. 1195 | * 1196 | * The statistics callback is triggered from rd_kafka_poll() every 1197 | * \c statistics.interval.ms (needs to be configured separately). 1198 | * Function arguments: 1199 | * - \p rk - Kafka handle 1200 | * - \p json - String containing the statistics data in JSON format 1201 | * - \p json_len - Length of \p json string. 1202 | * - \p opaque - Application-provided opaque. 1203 | * 1204 | * If the application wishes to hold on to the \p json pointer and free 1205 | * it at a later time it must return 1 from the \p stats_cb. 1206 | * If the application returns 0 from the \p stats_cb then librdkafka 1207 | * will immediately free the \p json pointer. 1208 | */ 1209 | RD_EXPORT 1210 | void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, 1211 | int (*stats_cb) (rd_kafka_t *rk, 1212 | char *json, 1213 | size_t json_len, 1214 | void *opaque)); 1215 | 1216 | 1217 | 1218 | /** 1219 | * @brief Set socket callback. 1220 | * 1221 | * The socket callback is responsible for opening a socket 1222 | * according to the supplied \p domain, \p type and \p protocol. 1223 | * The socket shall be created with \c CLOEXEC set in a racefree fashion, if 1224 | * possible. 1225 | * 1226 | * Default: 1227 | * - on linux: racefree CLOEXEC 1228 | * - others : non-racefree CLOEXEC 1229 | * 1230 | * @remark The callback will be called from an internal librdkafka thread. 1231 | */ 1232 | RD_EXPORT 1233 | void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, 1234 | int (*socket_cb) (int domain, int type, 1235 | int protocol, 1236 | void *opaque)); 1237 | 1238 | 1239 | 1240 | /** 1241 | * @brief Set connect callback. 1242 | * 1243 | * The connect callback is responsible for connecting socket \p sockfd 1244 | * to peer address \p addr. 1245 | * The \p id field contains the broker identifier. 1246 | * 1247 | * \p connect_cb shall return 0 on success (socket connected) or an error 1248 | * number (errno) on error. 1249 | * 1250 | * @remark The callback will be called from an internal librdkafka thread. 1251 | */ 1252 | RD_EXPORT void 1253 | rd_kafka_conf_set_connect_cb (rd_kafka_conf_t *conf, 1254 | int (*connect_cb) (int sockfd, 1255 | const struct sockaddr *addr, 1256 | int addrlen, 1257 | const char *id, 1258 | void *opaque)); 1259 | 1260 | /** 1261 | * @brief Set close socket callback. 1262 | * 1263 | * Close a socket (optionally opened with socket_cb()). 1264 | * 1265 | * @remark The callback will be called from an internal librdkafka thread. 1266 | */ 1267 | RD_EXPORT void 1268 | rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf, 1269 | int (*closesocket_cb) (int sockfd, 1270 | void *opaque)); 1271 | 1272 | 1273 | 1274 | #ifndef _MSC_VER 1275 | /** 1276 | * @brief Set open callback. 1277 | * 1278 | * The open callback is responsible for opening the file specified by 1279 | * pathname, flags and mode. 1280 | * The file shall be opened with \c CLOEXEC set in a racefree fashion, if 1281 | * possible. 1282 | * 1283 | * Default: 1284 | * - on linux: racefree CLOEXEC 1285 | * - others : non-racefree CLOEXEC 1286 | * 1287 | * @remark The callback will be called from an internal librdkafka thread. 1288 | */ 1289 | RD_EXPORT 1290 | void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf, 1291 | int (*open_cb) (const char *pathname, 1292 | int flags, mode_t mode, 1293 | void *opaque)); 1294 | #endif 1295 | 1296 | /** 1297 | * @brief Sets the application's opaque pointer that will be passed to callbacks 1298 | */ 1299 | RD_EXPORT 1300 | void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque); 1301 | 1302 | /** 1303 | * @brief Retrieves the opaque pointer previously set with rd_kafka_conf_set_opaque() 1304 | */ 1305 | RD_EXPORT 1306 | void *rd_kafka_opaque(const rd_kafka_t *rk); 1307 | 1308 | 1309 | 1310 | /** 1311 | * Sets the default topic configuration to use for automatically 1312 | * subscribed topics (e.g., through pattern-matched topics). 1313 | * The topic config object is not usable after this call. 1314 | */ 1315 | RD_EXPORT 1316 | void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf, 1317 | rd_kafka_topic_conf_t *tconf); 1318 | 1319 | 1320 | 1321 | /** 1322 | * @brief Retrieve configuration value for property \p name. 1323 | * 1324 | * If \p dest is non-NULL the value will be written to \p dest with at 1325 | * most \p dest_size. 1326 | * 1327 | * \p *dest_size is updated to the full length of the value, thus if 1328 | * \p *dest_size initially is smaller than the full length the application 1329 | * may reallocate \p dest to fit the returned \p *dest_size and try again. 1330 | * 1331 | * If \p dest is NULL only the full length of the value is returned. 1332 | * 1333 | * Returns \p RD_KAFKA_CONF_OK if the property name matched, else 1334 | * \p RD_KAFKA_CONF_UNKNOWN. 1335 | */ 1336 | RD_EXPORT 1337 | rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, 1338 | const char *name, 1339 | char *dest, size_t *dest_size); 1340 | 1341 | 1342 | /** 1343 | * @brief Retrieve topic configuration value for property \p name. 1344 | * 1345 | * @sa rd_kafka_conf_get() 1346 | */ 1347 | RD_EXPORT 1348 | rd_kafka_conf_res_t rd_kafka_topic_conf_get (const rd_kafka_topic_conf_t *conf, 1349 | const char *name, 1350 | char *dest, size_t *dest_size); 1351 | 1352 | 1353 | /** 1354 | * @brief Dump the configuration properties and values of \p conf to an array 1355 | * with \"key\", \"value\" pairs. 1356 | * 1357 | * The number of entries in the array is returned in \p *cntp. 1358 | * 1359 | * The dump must be freed with `rd_kafka_conf_dump_free()`. 1360 | */ 1361 | RD_EXPORT 1362 | const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp); 1363 | 1364 | 1365 | /** 1366 | * @brief Dump the topic configuration properties and values of \p conf 1367 | * to an array with \"key\", \"value\" pairs. 1368 | * 1369 | * The number of entries in the array is returned in \p *cntp. 1370 | * 1371 | * The dump must be freed with `rd_kafka_conf_dump_free()`. 1372 | */ 1373 | RD_EXPORT 1374 | const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, 1375 | size_t *cntp); 1376 | 1377 | /** 1378 | * @brief Frees a configuration dump returned from `rd_kafka_conf_dump()` or 1379 | * `rd_kafka_topic_conf_dump(). 1380 | */ 1381 | RD_EXPORT 1382 | void rd_kafka_conf_dump_free(const char **arr, size_t cnt); 1383 | 1384 | /** 1385 | * @brief Prints a table to \p fp of all supported configuration properties, 1386 | * their default values as well as a description. 1387 | */ 1388 | RD_EXPORT 1389 | void rd_kafka_conf_properties_show(FILE *fp); 1390 | 1391 | /**@}*/ 1392 | 1393 | 1394 | /** 1395 | * @name Topic configuration 1396 | * @{ 1397 | * 1398 | * @brief Topic configuration property interface 1399 | * 1400 | */ 1401 | 1402 | 1403 | /** 1404 | * @brief Create topic configuration object 1405 | * 1406 | * @sa Same semantics as for rd_kafka_conf_new(). 1407 | */ 1408 | RD_EXPORT 1409 | rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void); 1410 | 1411 | 1412 | /** 1413 | * @brief Creates a copy/duplicate of topic configuration object \p conf. 1414 | */ 1415 | RD_EXPORT 1416 | rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t 1417 | *conf); 1418 | 1419 | 1420 | /** 1421 | * @brief Destroys a topic conf object. 1422 | */ 1423 | RD_EXPORT 1424 | void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf); 1425 | 1426 | 1427 | /** 1428 | * @brief Sets a single rd_kafka_topic_conf_t value by property name. 1429 | * 1430 | * \p topic_conf should have been previously set up 1431 | * with `rd_kafka_topic_conf_new()`. 1432 | * 1433 | * @returns rd_kafka_conf_res_t to indicate success or failure. 1434 | */ 1435 | RD_EXPORT 1436 | rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, 1437 | const char *name, 1438 | const char *value, 1439 | char *errstr, size_t errstr_size); 1440 | 1441 | /** 1442 | * @brief Sets the application's opaque pointer that will be passed to all topic 1443 | * callbacks as the \c rkt_opaque argument. 1444 | */ 1445 | RD_EXPORT 1446 | void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *opaque); 1447 | 1448 | 1449 | /** 1450 | * @brief \b Producer: Set partitioner callback in provided topic conf object. 1451 | * 1452 | * The partitioner may be called in any thread at any time, 1453 | * it may be called multiple times for the same message/key. 1454 | * 1455 | * Partitioner function constraints: 1456 | * - MUST NOT call any rd_kafka_*() functions except: 1457 | * rd_kafka_topic_partition_available() 1458 | * - MUST NOT block or execute for prolonged periods of time. 1459 | * - MUST return a value between 0 and partition_cnt-1, or the 1460 | * special \c RD_KAFKA_PARTITION_UA value if partitioning 1461 | * could not be performed. 1462 | */ 1463 | RD_EXPORT 1464 | void 1465 | rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf, 1466 | int32_t (*partitioner) ( 1467 | const rd_kafka_topic_t *rkt, 1468 | const void *keydata, 1469 | size_t keylen, 1470 | int32_t partition_cnt, 1471 | void *rkt_opaque, 1472 | void *msg_opaque)); 1473 | 1474 | /** 1475 | * @brief Check if partition is available (has a leader broker). 1476 | * 1477 | * @returns 1 if the partition is available, else 0. 1478 | * 1479 | * @warning This function must only be called from inside a partitioner function 1480 | */ 1481 | RD_EXPORT 1482 | int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, 1483 | int32_t partition); 1484 | 1485 | 1486 | /******************************************************************* 1487 | * * 1488 | * Partitioners provided by rdkafka * 1489 | * * 1490 | *******************************************************************/ 1491 | 1492 | /** 1493 | * @brief Random partitioner. 1494 | * 1495 | * Will try not to return unavailable partitions. 1496 | * 1497 | * @returns a random partition between 0 and \p partition_cnt - 1. 1498 | * 1499 | */ 1500 | RD_EXPORT 1501 | int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, 1502 | const void *key, size_t keylen, 1503 | int32_t partition_cnt, 1504 | void *opaque, void *msg_opaque); 1505 | 1506 | /** 1507 | * @brief Consistent partitioner. 1508 | * 1509 | * Uses consistent hashing to map identical keys onto identical partitions. 1510 | * 1511 | * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on 1512 | * the CRC value of the key 1513 | */ 1514 | RD_EXPORT 1515 | int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt, 1516 | const void *key, size_t keylen, 1517 | int32_t partition_cnt, 1518 | void *opaque, void *msg_opaque); 1519 | 1520 | /** 1521 | * @brief Consistent-Random partitioner. 1522 | * 1523 | * This is the default partitioner. 1524 | * Uses consistent hashing to map identical keys onto identical partitions, and 1525 | * messages without keys will be assigned via the random partitioner. 1526 | * 1527 | * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on 1528 | * the CRC value of the key (if provided) 1529 | */ 1530 | RD_EXPORT 1531 | int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt, 1532 | const void *key, size_t keylen, 1533 | int32_t partition_cnt, 1534 | void *opaque, void *msg_opaque); 1535 | 1536 | 1537 | /**@}*/ 1538 | 1539 | 1540 | 1541 | /** 1542 | * @name Main Kafka and Topic object handles 1543 | * @{ 1544 | * 1545 | * 1546 | */ 1547 | 1548 | 1549 | 1550 | 1551 | /** 1552 | * @brief Creates a new Kafka handle and starts its operation according to the 1553 | * specified \p type (\p RD_KAFKA_CONSUMER or \p RD_KAFKA_PRODUCER). 1554 | * 1555 | * \p conf is an optional struct created with `rd_kafka_conf_new()` that will 1556 | * be used instead of the default configuration. 1557 | * The \p conf object is freed by this function on success and must not be used 1558 | * or destroyed by the application sub-sequently. 1559 | * See `rd_kafka_conf_set()` et.al for more information. 1560 | * 1561 | * \p errstr must be a pointer to memory of at least size \p errstr_size where 1562 | * `rd_kafka_new()` may write a human readable error message in case the 1563 | * creation of a new handle fails. In which case the function returns NULL. 1564 | * 1565 | * @remark \b RD_KAFKA_CONSUMER: When a new \p RD_KAFKA_CONSUMER 1566 | * rd_kafka_t handle is created it may either operate in the 1567 | * legacy simple consumer mode using the rd_kafka_consume_start() 1568 | * interface, or the High-level KafkaConsumer API. 1569 | * @remark An application must only use one of these groups of APIs on a given 1570 | * rd_kafka_t RD_KAFKA_CONSUMER handle. 1571 | 1572 | * 1573 | * @returns The Kafka handle on success or NULL on error (see \p errstr) 1574 | * 1575 | * @sa To destroy the Kafka handle, use rd_kafka_destroy(). 1576 | */ 1577 | RD_EXPORT 1578 | rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, 1579 | char *errstr, size_t errstr_size); 1580 | 1581 | 1582 | /** 1583 | * @brief Destroy Kafka handle. 1584 | * 1585 | * @remark This is a blocking operation. 1586 | */ 1587 | RD_EXPORT 1588 | void rd_kafka_destroy(rd_kafka_t *rk); 1589 | 1590 | 1591 | 1592 | /** 1593 | * @brief Returns Kafka handle name. 1594 | */ 1595 | RD_EXPORT 1596 | const char *rd_kafka_name(const rd_kafka_t *rk); 1597 | 1598 | 1599 | /** 1600 | * @brief Returns this client's broker-assigned group member id 1601 | * 1602 | * @remark This currently requires the high-level KafkaConsumer 1603 | * 1604 | * @returns An allocated string containing the current broker-assigned group 1605 | * member id, or NULL if not available. 1606 | * The application must free the string with \p free() or 1607 | * rd_kafka_mem_free() 1608 | */ 1609 | RD_EXPORT 1610 | char *rd_kafka_memberid (const rd_kafka_t *rk); 1611 | 1612 | 1613 | /** 1614 | * @brief Creates a new topic handle for topic named \p topic. 1615 | * 1616 | * \p conf is an optional configuration for the topic created with 1617 | * `rd_kafka_topic_conf_new()` that will be used instead of the default 1618 | * topic configuration. 1619 | * The \p conf object is freed by this function and must not be used or 1620 | * destroyed by the application sub-sequently. 1621 | * See `rd_kafka_topic_conf_set()` et.al for more information. 1622 | * 1623 | * Topic handles are refcounted internally and calling rd_kafka_topic_new() 1624 | * again with the same topic name will return the previous topic handle 1625 | * without updating the original handle's configuration. 1626 | * Applications must eventually call rd_kafka_topic_destroy() for each 1627 | * succesfull call to rd_kafka_topic_new() to clear up resources. 1628 | * 1629 | * @returns the new topic handle or NULL on error (use rd_kafka_errno2err() 1630 | * to convert system \p errno to an rd_kafka_resp_err_t error code. 1631 | * 1632 | * @sa rd_kafka_topic_destroy() 1633 | */ 1634 | RD_EXPORT 1635 | rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, 1636 | rd_kafka_topic_conf_t *conf); 1637 | 1638 | 1639 | 1640 | /** 1641 | * @brief Loose application's topic handle refcount as previously created 1642 | * with `rd_kafka_topic_new()`. 1643 | * 1644 | * @remark Since topic objects are refcounted (both internally and for the app) 1645 | * the topic object might not actually be destroyed by this call, 1646 | * but the application must consider the object destroyed. 1647 | */ 1648 | RD_EXPORT 1649 | void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt); 1650 | 1651 | 1652 | /** 1653 | * @brief Returns the topic name. 1654 | */ 1655 | RD_EXPORT 1656 | const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt); 1657 | 1658 | 1659 | /** 1660 | * @brief Get the \p rkt_opaque pointer that was set in the topic configuration. 1661 | */ 1662 | RD_EXPORT 1663 | void *rd_kafka_topic_opaque (const rd_kafka_topic_t *rkt); 1664 | 1665 | 1666 | /** 1667 | * @brief Unassigned partition. 1668 | * 1669 | * The unassigned partition is used by the producer API for messages 1670 | * that should be partitioned using the configured or default partitioner. 1671 | */ 1672 | #define RD_KAFKA_PARTITION_UA ((int32_t)-1) 1673 | 1674 | 1675 | /** 1676 | * @brief Polls the provided kafka handle for events. 1677 | * 1678 | * Events will cause application provided callbacks to be called. 1679 | * 1680 | * The \p timeout_ms argument specifies the maximum amount of time 1681 | * (in milliseconds) that the call will block waiting for events. 1682 | * For non-blocking calls, provide 0 as \p timeout_ms. 1683 | * To wait indefinately for an event, provide -1. 1684 | * 1685 | * @remark An application should make sure to call poll() at regular 1686 | * intervals to serve any queued callbacks waiting to be called. 1687 | * 1688 | * Events: 1689 | * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer] 1690 | * - error callbacks (rd_kafka_conf_set_error_cb()) [all] 1691 | * - stats callbacks (rd_kafka_conf_set_stats_cb()) [all] 1692 | * - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all] 1693 | * 1694 | * @returns the number of events served. 1695 | */ 1696 | RD_EXPORT 1697 | int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms); 1698 | 1699 | 1700 | /** 1701 | * @brief Cancels the current callback dispatcher (rd_kafka_poll(), 1702 | * rd_kafka_consume_callback(), etc). 1703 | * 1704 | * A callback may use this to force an immediate return to the calling 1705 | * code (caller of e.g. rd_kafka_poll()) without processing any further 1706 | * events. 1707 | * 1708 | * @remark This function MUST ONLY be called from within a librdkafka callback. 1709 | */ 1710 | RD_EXPORT 1711 | void rd_kafka_yield (rd_kafka_t *rk); 1712 | 1713 | 1714 | 1715 | 1716 | /** 1717 | * @brief Pause producing or consumption for the provided list of partitions. 1718 | * 1719 | * Success or error is returned per-partition \p err in the \p partitions list. 1720 | * 1721 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR 1722 | */ 1723 | RD_EXPORT rd_kafka_resp_err_t 1724 | rd_kafka_pause_partitions (rd_kafka_t *rk, 1725 | rd_kafka_topic_partition_list_t *partitions); 1726 | 1727 | 1728 | 1729 | /** 1730 | * @brief Resume producing consumption for the provided list of partitions. 1731 | * 1732 | * Success or error is returned per-partition \p err in the \p partitions list. 1733 | * 1734 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR 1735 | */ 1736 | RD_EXPORT rd_kafka_resp_err_t 1737 | rd_kafka_resume_partitions (rd_kafka_t *rk, 1738 | rd_kafka_topic_partition_list_t *partitions); 1739 | 1740 | 1741 | 1742 | 1743 | /** 1744 | * @brief Query broker for low (oldest/beginning) and high (newest/end) offsets 1745 | * for partition. 1746 | * 1747 | * Offsets are returned in \p *low and \p *high respectively. 1748 | * 1749 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. 1750 | */ 1751 | RD_EXPORT rd_kafka_resp_err_t 1752 | rd_kafka_query_watermark_offsets (rd_kafka_t *rk, 1753 | const char *topic, int32_t partition, 1754 | int64_t *low, int64_t *high, int timeout_ms); 1755 | 1756 | 1757 | /** 1758 | * @brief Get last known low (oldest/beginning) and high (newest/end) offsets 1759 | * for partition. 1760 | * 1761 | * The low offset is updated periodically (if statistics.interval.ms is set) 1762 | * while the high offset is updated on each fetched message set from the broker. 1763 | * 1764 | * If there is no cached offset (either low or high, or both) then 1765 | * RD_KAFKA_OFFSET_INVALID will be returned for the respective offset. 1766 | * 1767 | * Offsets are returned in \p *low and \p *high respectively. 1768 | * 1769 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. 1770 | * 1771 | * @remark Shall only be used with an active consumer instance. 1772 | */ 1773 | RD_EXPORT rd_kafka_resp_err_t 1774 | rd_kafka_get_watermark_offsets (rd_kafka_t *rk, 1775 | const char *topic, int32_t partition, 1776 | int64_t *low, int64_t *high); 1777 | 1778 | 1779 | 1780 | /** 1781 | * @brief Look up the offsets for the given partitions by timestamp. 1782 | * 1783 | * The returned offset for each partition is the earliest offset whose 1784 | * timestamp is greater than or equal to the given timestamp in the 1785 | * corresponding partition. 1786 | * 1787 | * The timestamps to query are represented as \c offset in \p offsets 1788 | * on input, and \c offset will contain the offset on output. 1789 | * 1790 | * The function will block for at most \p timeout_ms milliseconds. 1791 | * 1792 | * @remark Duplicate Topic+Partitions are not supported. 1793 | * @remark Per-partition errors may be returned in \c rd_kafka_topic_partition_t.err 1794 | * 1795 | * @returns an error code for general errors, else RD_KAFKA_RESP_ERR_NO_ERROR 1796 | * in which case per-partition errors might be set. 1797 | */ 1798 | RD_EXPORT rd_kafka_resp_err_t 1799 | rd_kafka_offsets_for_times (rd_kafka_t *rk, 1800 | rd_kafka_topic_partition_list_t *offsets, 1801 | int timeout_ms); 1802 | 1803 | 1804 | /** 1805 | * @brief Free pointer returned by librdkafka 1806 | * 1807 | * This is typically an abstraction for the free(3) call and makes sure 1808 | * the application can use the same memory allocator as librdkafka for 1809 | * freeing pointers returned by librdkafka. 1810 | * 1811 | * In standard setups it is usually not necessary to use this interface 1812 | * rather than the free(3) functione. 1813 | * 1814 | * @remark rd_kafka_mem_free() must only be used for pointers returned by APIs 1815 | * that explicitly mention using this function for freeing. 1816 | */ 1817 | RD_EXPORT 1818 | void rd_kafka_mem_free (rd_kafka_t *rk, void *ptr); 1819 | 1820 | 1821 | /**@}*/ 1822 | 1823 | 1824 | 1825 | 1826 | 1827 | /** 1828 | * @name Queue API 1829 | * @{ 1830 | * 1831 | * Message queues allows the application to re-route consumed messages 1832 | * from multiple topic+partitions into one single queue point. 1833 | * This queue point containing messages from a number of topic+partitions 1834 | * may then be served by a single rd_kafka_consume*_queue() call, 1835 | * rather than one call per topic+partition combination. 1836 | */ 1837 | 1838 | 1839 | /** 1840 | * @brief Create a new message queue. 1841 | * 1842 | * See rd_kafka_consume_start_queue(), rd_kafka_consume_queue(), et.al. 1843 | */ 1844 | RD_EXPORT 1845 | rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk); 1846 | 1847 | /** 1848 | * Destroy a queue, purging all of its enqueued messages. 1849 | */ 1850 | RD_EXPORT 1851 | void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu); 1852 | 1853 | 1854 | /** 1855 | * @returns a reference to the main librdkafka event queue. 1856 | * This is the queue served by rd_kafka_poll(). 1857 | * 1858 | * Use rd_kafka_queue_destroy() to loose the reference. 1859 | */ 1860 | RD_EXPORT 1861 | rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk); 1862 | 1863 | 1864 | /** 1865 | * @returns a reference to the librdkafka consumer queue. 1866 | * This is the queue served by rd_kafka_consumer_poll(). 1867 | * 1868 | * Use rd_kafka_queue_destroy() to loose the reference. 1869 | * 1870 | * @remark rd_kafka_queue_destroy() MUST be called on this queue 1871 | * prior to calling rd_kafka_consumer_close(). 1872 | */ 1873 | RD_EXPORT 1874 | rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk); 1875 | 1876 | /** 1877 | * @returns a reference to the partition's queue, or NULL if 1878 | * partition is invalid. 1879 | * 1880 | * Use rd_kafka_queue_destroy() to loose the reference. 1881 | * 1882 | * @remark rd_kafka_queue_destroy() MUST be called on this queue 1883 | * 1884 | * @remark This function only works on consumers. 1885 | */ 1886 | RD_EXPORT 1887 | rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk, 1888 | const char *topic, 1889 | int32_t partition); 1890 | 1891 | /** 1892 | * @brief Forward/re-route queue \p src to \p dst. 1893 | * If \p dst is \c NULL the forwarding is removed. 1894 | * 1895 | * The internal refcounts for both queues are increased. 1896 | * 1897 | * @remark Regardless of whether \p dst is NULL or not, after calling this 1898 | * function, \p src will not forward it's fetch queue to the consumer 1899 | * queue. 1900 | */ 1901 | RD_EXPORT 1902 | void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst); 1903 | 1904 | /** 1905 | * @brief Forward librdkafka logs (and debug) to the specified queue 1906 | * for serving with one of the ..poll() calls. 1907 | * 1908 | * This allows an application to serve log callbacks (\c log_cb) 1909 | * in its thread of choice. 1910 | * 1911 | * @param rkqu Queue to forward logs to. If the value is NULL the logs 1912 | * are forwarded to the main queue. 1913 | * 1914 | * @remark The configuration property \c log.queue MUST also be set to true. 1915 | * 1916 | * @remark librdkafka maintains its own reference to the provided queue. 1917 | * 1918 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. 1919 | */ 1920 | RD_EXPORT 1921 | rd_kafka_resp_err_t rd_kafka_set_log_queue (rd_kafka_t *rk, 1922 | rd_kafka_queue_t *rkqu); 1923 | 1924 | 1925 | /** 1926 | * @returns the current number of elements in queue. 1927 | */ 1928 | RD_EXPORT 1929 | size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu); 1930 | 1931 | 1932 | /** 1933 | * @brief Enable IO event triggering for queue. 1934 | * 1935 | * To ease integration with IO based polling loops this API 1936 | * allows an application to create a separate file-descriptor 1937 | * that librdkafka will write \p payload (of size \p size) to 1938 | * whenever a new element is enqueued on a previously empty queue. 1939 | * 1940 | * To remove event triggering call with \p fd = -1. 1941 | * 1942 | * librdkafka will maintain a copy of the \p payload. 1943 | * 1944 | * @remark When using forwarded queues the IO event must only be enabled 1945 | * on the final forwarded-to (destination) queue. 1946 | */ 1947 | RD_EXPORT 1948 | void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd, 1949 | const void *payload, size_t size); 1950 | 1951 | /**@}*/ 1952 | 1953 | /** 1954 | * 1955 | * @name Simple Consumer API (legacy) 1956 | * @{ 1957 | * 1958 | */ 1959 | 1960 | 1961 | #define RD_KAFKA_OFFSET_BEGINNING -2 /**< Start consuming from beginning of 1962 | * kafka partition queue: oldest msg */ 1963 | #define RD_KAFKA_OFFSET_END -1 /**< Start consuming from end of kafka 1964 | * partition queue: next msg */ 1965 | #define RD_KAFKA_OFFSET_STORED -1000 /**< Start consuming from offset retrieved 1966 | * from offset store */ 1967 | #define RD_KAFKA_OFFSET_INVALID -1001 /**< Invalid offset */ 1968 | 1969 | 1970 | /** @cond NO_DOC */ 1971 | #define RD_KAFKA_OFFSET_TAIL_BASE -2000 /* internal: do not use */ 1972 | /** @endcond */ 1973 | 1974 | /** 1975 | * @brief Start consuming \p CNT messages from topic's current end offset. 1976 | * 1977 | * That is, if current end offset is 12345 and \p CNT is 200, it will start 1978 | * consuming from offset \c 12345-200 = \c 12145. */ 1979 | #define RD_KAFKA_OFFSET_TAIL(CNT) (RD_KAFKA_OFFSET_TAIL_BASE - (CNT)) 1980 | 1981 | /** 1982 | * @brief Start consuming messages for topic \p rkt and \p partition 1983 | * at offset \p offset which may either be an absolute \c (0..N) 1984 | * or one of the logical offsets: 1985 | * - RD_KAFKA_OFFSET_BEGINNING 1986 | * - RD_KAFKA_OFFSET_END 1987 | * - RD_KAFKA_OFFSET_STORED 1988 | * - RD_KAFKA_OFFSET_TAIL 1989 | * 1990 | * rdkafka will attempt to keep \c queued.min.messages (config property) 1991 | * messages in the local queue by repeatedly fetching batches of messages 1992 | * from the broker until the threshold is reached. 1993 | * 1994 | * The application shall use one of the `rd_kafka_consume*()` functions 1995 | * to consume messages from the local queue, each kafka message being 1996 | * represented as a `rd_kafka_message_t *` object. 1997 | * 1998 | * `rd_kafka_consume_start()` must not be called multiple times for the same 1999 | * topic and partition without stopping consumption first with 2000 | * `rd_kafka_consume_stop()`. 2001 | * 2002 | * @returns 0 on success or -1 on error in which case errno is set accordingly: 2003 | * - EBUSY - Conflicts with an existing or previous subscription 2004 | * (RD_KAFKA_RESP_ERR__CONFLICT) 2005 | * - EINVAL - Invalid offset, or incomplete configuration (lacking group.id) 2006 | * (RD_KAFKA_RESP_ERR__INVALID_ARG) 2007 | * - ESRCH - requested \p partition is invalid. 2008 | * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) 2009 | * - ENOENT - topic is unknown in the Kafka cluster. 2010 | * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) 2011 | * 2012 | * Use `rd_kafka_errno2err()` to convert sytem \c errno to `rd_kafka_resp_err_t` 2013 | */ 2014 | RD_EXPORT 2015 | int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, 2016 | int64_t offset); 2017 | 2018 | /** 2019 | * @brief Same as rd_kafka_consume_start() but re-routes incoming messages to 2020 | * the provided queue \p rkqu (which must have been previously allocated 2021 | * with `rd_kafka_queue_new()`. 2022 | * 2023 | * The application must use one of the `rd_kafka_consume_*_queue()` functions 2024 | * to receive fetched messages. 2025 | * 2026 | * `rd_kafka_consume_start_queue()` must not be called multiple times for the 2027 | * same topic and partition without stopping consumption first with 2028 | * `rd_kafka_consume_stop()`. 2029 | * `rd_kafka_consume_start()` and `rd_kafka_consume_start_queue()` must not 2030 | * be combined for the same topic and partition. 2031 | */ 2032 | RD_EXPORT 2033 | int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, 2034 | int64_t offset, rd_kafka_queue_t *rkqu); 2035 | 2036 | /** 2037 | * @brief Stop consuming messages for topic \p rkt and \p partition, purging 2038 | * all messages currently in the local queue. 2039 | * 2040 | * NOTE: To enforce synchronisation this call will block until the internal 2041 | * fetcher has terminated and offsets are committed to configured 2042 | * storage method. 2043 | * 2044 | * The application needs to be stop all consumers before calling 2045 | * `rd_kafka_destroy()` on the main object handle. 2046 | * 2047 | * @returns 0 on success or -1 on error (see `errno`). 2048 | */ 2049 | RD_EXPORT 2050 | int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition); 2051 | 2052 | 2053 | 2054 | /** 2055 | * @brief Seek consumer for topic+partition to \p offset which is either an 2056 | * absolute or logical offset. 2057 | * 2058 | * If \p timeout_ms is not 0 the call will wait this long for the 2059 | * seek to be performed. If the timeout is reached the internal state 2060 | * will be unknown and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. 2061 | * If \p timeout_ms is 0 it will initiate the seek but return 2062 | * immediately without any error reporting (e.g., async). 2063 | * 2064 | * This call triggers a fetch queue barrier flush. 2065 | * 2066 | * @returns `RD_KAFKA_RESP_ERR__NO_ERROR` on success else an error code. 2067 | */ 2068 | RD_EXPORT 2069 | rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *rkt, 2070 | int32_t partition, 2071 | int64_t offset, 2072 | int timeout_ms); 2073 | 2074 | 2075 | /** 2076 | * @brief Consume a single message from topic \p rkt and \p partition 2077 | * 2078 | * \p timeout_ms is maximum amount of time to wait for a message to be received. 2079 | * Consumer must have been previously started with `rd_kafka_consume_start()`. 2080 | * 2081 | * Returns a message object on success or \c NULL on error. 2082 | * The message object must be destroyed with `rd_kafka_message_destroy()` 2083 | * when the application is done with it. 2084 | * 2085 | * Errors (when returning NULL): 2086 | * - ETIMEDOUT - \p timeout_ms was reached with no new messages fetched. 2087 | * - ENOENT - \p rkt + \p partition is unknown. 2088 | * (no prior `rd_kafka_consume_start()` call) 2089 | * 2090 | * NOTE: The returned message's \c ..->err must be checked for errors. 2091 | * NOTE: \c ..->err \c == \c RD_KAFKA_RESP_ERR__PARTITION_EOF signals that the 2092 | * end of the partition has been reached, which should typically not be 2093 | * considered an error. The application should handle this case 2094 | * (e.g., ignore). 2095 | */ 2096 | RD_EXPORT 2097 | rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, 2098 | int timeout_ms); 2099 | 2100 | 2101 | 2102 | /** 2103 | * @brief Consume up to \p rkmessages_size from topic \p rkt and \p partition 2104 | * putting a pointer to each message in the application provided 2105 | * array \p rkmessages (of size \p rkmessages_size entries). 2106 | * 2107 | * `rd_kafka_consume_batch()` provides higher throughput performance 2108 | * than `rd_kafka_consume()`. 2109 | * 2110 | * \p timeout_ms is the maximum amount of time to wait for all of 2111 | * \p rkmessages_size messages to be put into \p rkmessages. 2112 | * If no messages were available within the timeout period this function 2113 | * returns 0 and \p rkmessages remains untouched. 2114 | * This differs somewhat from `rd_kafka_consume()`. 2115 | * 2116 | * The message objects must be destroyed with `rd_kafka_message_destroy()` 2117 | * when the application is done with it. 2118 | * 2119 | * @returns the number of rkmessages added in \p rkmessages, 2120 | * or -1 on error (same error codes as for `rd_kafka_consume()`. 2121 | * 2122 | * @sa rd_kafka_consume() 2123 | */ 2124 | RD_EXPORT 2125 | ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, 2126 | int timeout_ms, 2127 | rd_kafka_message_t **rkmessages, 2128 | size_t rkmessages_size); 2129 | 2130 | 2131 | 2132 | /** 2133 | * @brief Consumes messages from topic \p rkt and \p partition, calling 2134 | * the provided callback for each consumed messsage. 2135 | * 2136 | * `rd_kafka_consume_callback()` provides higher throughput performance 2137 | * than both `rd_kafka_consume()` and `rd_kafka_consume_batch()`. 2138 | * 2139 | * \p timeout_ms is the maximum amount of time to wait for one or more messages 2140 | * to arrive. 2141 | * 2142 | * The provided \p consume_cb function is called for each message, 2143 | * the application \b MUST \b NOT call `rd_kafka_message_destroy()` on the 2144 | * provided \p rkmessage. 2145 | * 2146 | * The \p opaque argument is passed to the 'consume_cb' as \p opaque. 2147 | * 2148 | * @returns the number of messages processed or -1 on error. 2149 | * 2150 | * @sa rd_kafka_consume() 2151 | */ 2152 | RD_EXPORT 2153 | int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, 2154 | int timeout_ms, 2155 | void (*consume_cb) (rd_kafka_message_t 2156 | *rkmessage, 2157 | void *opaque), 2158 | void *opaque); 2159 | 2160 | 2161 | /** 2162 | * @name Simple Consumer API (legacy): Queue consumers 2163 | * @{ 2164 | * 2165 | * The following `..._queue()` functions are analogue to the functions above 2166 | * but reads messages from the provided queue \p rkqu instead. 2167 | * \p rkqu must have been previously created with `rd_kafka_queue_new()` 2168 | * and the topic consumer must have been started with 2169 | * `rd_kafka_consume_start_queue()` utilising the the same queue. 2170 | */ 2171 | 2172 | /** 2173 | * @brief Consume from queue 2174 | * 2175 | * @sa rd_kafka_consume() 2176 | */ 2177 | RD_EXPORT 2178 | rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, 2179 | int timeout_ms); 2180 | 2181 | /** 2182 | * @brief Consume batch of messages from queue 2183 | * 2184 | * @sa rd_kafka_consume_batch() 2185 | */ 2186 | RD_EXPORT 2187 | ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, 2188 | int timeout_ms, 2189 | rd_kafka_message_t **rkmessages, 2190 | size_t rkmessages_size); 2191 | 2192 | /** 2193 | * @brief Consume multiple messages from queue with callback 2194 | * 2195 | * @sa rd_kafka_consume_callback() 2196 | */ 2197 | RD_EXPORT 2198 | int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, 2199 | int timeout_ms, 2200 | void (*consume_cb) (rd_kafka_message_t 2201 | *rkmessage, 2202 | void *opaque), 2203 | void *opaque); 2204 | 2205 | 2206 | /**@}*/ 2207 | 2208 | 2209 | 2210 | 2211 | /** 2212 | * @name Simple Consumer API (legacy): Topic+partition offset store. 2213 | * @{ 2214 | * 2215 | * If \c auto.commit.enable is true the offset is stored automatically prior to 2216 | * returning of the message(s) in each of the rd_kafka_consume*() functions 2217 | * above. 2218 | */ 2219 | 2220 | 2221 | /** 2222 | * @brief Store offset \p offset for topic \p rkt partition \p partition. 2223 | * 2224 | * The offset will be committed (written) to the offset store according 2225 | * to \c `auto.commit.interval.ms`. 2226 | * 2227 | * @remark \c `auto.commit.enable` must be set to "false" when using this API. 2228 | * 2229 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. 2230 | */ 2231 | RD_EXPORT 2232 | rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, 2233 | int32_t partition, int64_t offset); 2234 | /**@}*/ 2235 | 2236 | 2237 | 2238 | 2239 | /** 2240 | * @name KafkaConsumer (C) 2241 | * @{ 2242 | * @brief High-level KafkaConsumer C API 2243 | * 2244 | * 2245 | * 2246 | */ 2247 | 2248 | /** 2249 | * @brief Subscribe to topic set using balanced consumer groups. 2250 | * 2251 | * Wildcard (regex) topics are supported by the librdkafka assignor: 2252 | * any topic name in the \p topics list that is prefixed with \c \"^\" will 2253 | * be regex-matched to the full list of topics in the cluster and matching 2254 | * topics will be added to the subscription list. 2255 | * 2256 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or 2257 | * RD_KAFKA_RESP_ERR__INVALID_ARG if list is empty, contains invalid 2258 | * topics or regexes. 2259 | */ 2260 | RD_EXPORT rd_kafka_resp_err_t 2261 | rd_kafka_subscribe (rd_kafka_t *rk, 2262 | const rd_kafka_topic_partition_list_t *topics); 2263 | 2264 | 2265 | /** 2266 | * @brief Unsubscribe from the current subscription set. 2267 | */ 2268 | RD_EXPORT 2269 | rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk); 2270 | 2271 | 2272 | /** 2273 | * @brief Returns the current topic subscription 2274 | * 2275 | * @returns An error code on failure, otherwise \p topic is updated 2276 | * to point to a newly allocated topic list (possibly empty). 2277 | * 2278 | * @remark The application is responsible for calling 2279 | * rd_kafka_topic_partition_list_destroy on the returned list. 2280 | */ 2281 | RD_EXPORT rd_kafka_resp_err_t 2282 | rd_kafka_subscription (rd_kafka_t *rk, 2283 | rd_kafka_topic_partition_list_t **topics); 2284 | 2285 | 2286 | 2287 | /** 2288 | * @brief Poll the consumer for messages or events. 2289 | * 2290 | * Will block for at most \p timeout_ms milliseconds. 2291 | * 2292 | * @remark An application should make sure to call consumer_poll() at regular 2293 | * intervals, even if no messages are expected, to serve any 2294 | * queued callbacks waiting to be called. This is especially 2295 | * important when a rebalance_cb has been registered as it needs 2296 | * to be called and handled properly to synchronize internal 2297 | * consumer state. 2298 | * 2299 | * @returns A message object which is a proper message if \p ->err is 2300 | * RD_KAFKA_RESP_ERR_NO_ERROR, or an event or error for any other 2301 | * value. 2302 | * 2303 | * @sa rd_kafka_message_t 2304 | */ 2305 | RD_EXPORT 2306 | rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, int timeout_ms); 2307 | 2308 | /** 2309 | * @brief Close down the KafkaConsumer. 2310 | * 2311 | * @remark This call will block until the consumer has revoked its assignment, 2312 | * calling the \c rebalance_cb if it is configured, committed offsets 2313 | * to broker, and left the consumer group. 2314 | * The maximum blocking time is roughly limited to session.timeout.ms. 2315 | * 2316 | * @returns An error code indicating if the consumer close was succesful 2317 | * or not. 2318 | * 2319 | * @remark The application still needs to call rd_kafka_destroy() after 2320 | * this call finishes to clean up the underlying handle resources. 2321 | * 2322 | */ 2323 | RD_EXPORT 2324 | rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk); 2325 | 2326 | 2327 | 2328 | /** 2329 | * @brief Atomic assignment of partitions to consume. 2330 | * 2331 | * The new \p partitions will replace the existing assignment. 2332 | * 2333 | * When used from a rebalance callback the application shall pass the 2334 | * partition list passed to the callback (or a copy of it) (even if the list 2335 | * is empty) rather than NULL to maintain internal join state. 2336 | 2337 | * A zero-length \p partitions will treat the partitions as a valid, 2338 | * albeit empty, assignment, and maintain internal state, while a \c NULL 2339 | * value for \p partitions will reset and clear the internal state. 2340 | */ 2341 | RD_EXPORT rd_kafka_resp_err_t 2342 | rd_kafka_assign (rd_kafka_t *rk, 2343 | const rd_kafka_topic_partition_list_t *partitions); 2344 | 2345 | /** 2346 | * @brief Returns the current partition assignment 2347 | * 2348 | * @returns An error code on failure, otherwise \p partitions is updated 2349 | * to point to a newly allocated partition list (possibly empty). 2350 | * 2351 | * @remark The application is responsible for calling 2352 | * rd_kafka_topic_partition_list_destroy on the returned list. 2353 | */ 2354 | RD_EXPORT rd_kafka_resp_err_t 2355 | rd_kafka_assignment (rd_kafka_t *rk, 2356 | rd_kafka_topic_partition_list_t **partitions); 2357 | 2358 | 2359 | 2360 | 2361 | /** 2362 | * @brief Commit offsets on broker for the provided list of partitions. 2363 | * 2364 | * \p offsets should contain \c topic, \c partition, \c offset and possibly 2365 | * \c metadata. 2366 | * If \p offsets is NULL the current partition assignment will be used instead. 2367 | * 2368 | * If \p async is false this operation will block until the broker offset commit 2369 | * is done, returning the resulting success or error code. 2370 | * 2371 | * If a rd_kafka_conf_set_offset_commit_cb() offset commit callback has been 2372 | * configured the callback will be enqueued for a future call to 2373 | * rd_kafka_poll(), rd_kafka_consumer_poll() or similar. 2374 | */ 2375 | RD_EXPORT rd_kafka_resp_err_t 2376 | rd_kafka_commit (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, 2377 | int async); 2378 | 2379 | 2380 | /** 2381 | * @brief Commit message's offset on broker for the message's partition. 2382 | * 2383 | * @sa rd_kafka_commit 2384 | */ 2385 | RD_EXPORT rd_kafka_resp_err_t 2386 | rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, 2387 | int async); 2388 | 2389 | 2390 | /** 2391 | * @brief Commit offsets on broker for the provided list of partitions. 2392 | * 2393 | * See rd_kafka_commit for \p offsets semantics. 2394 | * 2395 | * The result of the offset commit will be posted on the provided \p rkqu queue. 2396 | * 2397 | * If the application uses one of the poll APIs (rd_kafka_poll(), 2398 | * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), ..) to serve the queue 2399 | * the \p cb callback is required. \p opaque is passed to the callback. 2400 | * 2401 | * If using the event API the callback is ignored and the offset commit result 2402 | * will be returned as an RD_KAFKA_EVENT_COMMIT event. The \p opaque 2403 | * value will be available with rd_kafka_event_opaque() 2404 | * 2405 | * If \p rkqu is NULL a temporary queue will be created and the callback will 2406 | * be served by this call. 2407 | * 2408 | * @sa rd_kafka_commit() 2409 | * @sa rd_kafka_conf_set_offset_commit_cb() 2410 | */ 2411 | RD_EXPORT rd_kafka_resp_err_t 2412 | rd_kafka_commit_queue (rd_kafka_t *rk, 2413 | const rd_kafka_topic_partition_list_t *offsets, 2414 | rd_kafka_queue_t *rkqu, 2415 | void (*cb) (rd_kafka_t *rk, 2416 | rd_kafka_resp_err_t err, 2417 | rd_kafka_topic_partition_list_t *offsets, 2418 | void *opaque), 2419 | void *opaque); 2420 | 2421 | 2422 | /** 2423 | * @brief Retrieve committed offsets for topics+partitions. 2424 | * 2425 | * The \p offset field of each requested partition will either be set to 2426 | * stored offset or to RD_KAFKA_OFFSET_INVALID in case there was no stored 2427 | * offset for that partition. 2428 | * 2429 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the 2430 | * \p offset or \p err field of each \p partitions' element is filled 2431 | * in with the stored offset, or a partition specific error. 2432 | * Else returns an error code. 2433 | */ 2434 | RD_EXPORT rd_kafka_resp_err_t 2435 | rd_kafka_committed (rd_kafka_t *rk, 2436 | rd_kafka_topic_partition_list_t *partitions, 2437 | int timeout_ms); 2438 | 2439 | 2440 | 2441 | /** 2442 | * @brief Retrieve current positions (offsets) for topics+partitions. 2443 | * 2444 | * The \p offset field of each requested partition will be set to the offset 2445 | * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there was 2446 | * no previous message. 2447 | * 2448 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the 2449 | * \p offset or \p err field of each \p partitions' element is filled 2450 | * in with the stored offset, or a partition specific error. 2451 | * Else returns an error code. 2452 | */ 2453 | RD_EXPORT rd_kafka_resp_err_t 2454 | rd_kafka_position (rd_kafka_t *rk, 2455 | rd_kafka_topic_partition_list_t *partitions); 2456 | 2457 | 2458 | /**@}*/ 2459 | 2460 | 2461 | 2462 | /** 2463 | * @name Producer API 2464 | * @{ 2465 | * 2466 | * 2467 | */ 2468 | 2469 | 2470 | /** 2471 | * @brief Producer message flags 2472 | */ 2473 | #define RD_KAFKA_MSG_F_FREE 0x1 /**< Delegate freeing of payload to rdkafka. */ 2474 | #define RD_KAFKA_MSG_F_COPY 0x2 /**< rdkafka will make a copy of the payload. */ 2475 | #define RD_KAFKA_MSG_F_BLOCK 0x4 /**< Block produce*() on message queue full. 2476 | * WARNING: If a delivery report callback 2477 | * is used the application MUST 2478 | * call rd_kafka_poll() (or equiv.) 2479 | * to make sure delivered messages 2480 | * are drained from the internal 2481 | * delivery report queue. 2482 | * Failure to do so will result 2483 | * in indefinately blocking on 2484 | * the produce() call when the 2485 | * message queue is full. 2486 | */ 2487 | 2488 | 2489 | 2490 | /** 2491 | * @brief Produce and send a single message to broker. 2492 | * 2493 | * \p rkt is the target topic which must have been previously created with 2494 | * `rd_kafka_topic_new()`. 2495 | * 2496 | * `rd_kafka_produce()` is an asynch non-blocking API. 2497 | * 2498 | * \p partition is the target partition, either: 2499 | * - RD_KAFKA_PARTITION_UA (unassigned) for 2500 | * automatic partitioning using the topic's partitioner function, or 2501 | * - a fixed partition (0..N) 2502 | * 2503 | * \p msgflags is zero or more of the following flags OR:ed together: 2504 | * RD_KAFKA_MSG_F_BLOCK - block \p produce*() call if 2505 | * \p queue.buffering.max.messages or 2506 | * \p queue.buffering.max.kbytes are exceeded. 2507 | * Messages are considered in-queue from the point they 2508 | * are accepted by produce() until their corresponding 2509 | * delivery report callback/event returns. 2510 | * It is thus a requirement to call 2511 | * rd_kafka_poll() (or equiv.) from a separate 2512 | * thread when F_BLOCK is used. 2513 | * See WARNING on \c RD_KAFKA_MSG_F_BLOCK above. 2514 | * 2515 | * RD_KAFKA_MSG_F_FREE - rdkafka will free(3) \p payload when it is done 2516 | * with it. 2517 | * RD_KAFKA_MSG_F_COPY - the \p payload data will be copied and the 2518 | * \p payload pointer will not be used by rdkafka 2519 | * after the call returns. 2520 | * 2521 | * .._F_FREE and .._F_COPY are mutually exclusive. 2522 | * 2523 | * If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then 2524 | * the memory associated with the payload is still the caller's 2525 | * responsibility. 2526 | * 2527 | * \p payload is the message payload of size \p len bytes. 2528 | * 2529 | * \p key is an optional message key of size \p keylen bytes, if non-NULL it 2530 | * will be passed to the topic partitioner as well as be sent with the 2531 | * message to the broker and passed on to the consumer. 2532 | * 2533 | * \p msg_opaque is an optional application-provided per-message opaque 2534 | * pointer that will provided in the delivery report callback (`dr_cb`) for 2535 | * referencing this message. 2536 | * 2537 | * Returns 0 on success or -1 on error in which case errno is set accordingly: 2538 | * - ENOBUFS - maximum number of outstanding messages has been reached: 2539 | * "queue.buffering.max.messages" 2540 | * (RD_KAFKA_RESP_ERR__QUEUE_FULL) 2541 | * - EMSGSIZE - message is larger than configured max size: 2542 | * "messages.max.bytes". 2543 | * (RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE) 2544 | * - ESRCH - requested \p partition is unknown in the Kafka cluster. 2545 | * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) 2546 | * - ENOENT - topic is unknown in the Kafka cluster. 2547 | * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) 2548 | * 2549 | * @sa Use rd_kafka_errno2err() to convert `errno` to rdkafka error code. 2550 | */ 2551 | RD_EXPORT 2552 | int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, 2553 | int msgflags, 2554 | void *payload, size_t len, 2555 | const void *key, size_t keylen, 2556 | void *msg_opaque); 2557 | 2558 | 2559 | /** 2560 | * @brief Produce and send a single message to broker. 2561 | * 2562 | * The message is defined by a va-arg list using \c rd_kafka_vtype_t 2563 | * tag tuples which must be terminated with a single \c RD_KAFKA_V_END. 2564 | * 2565 | * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, else an error code. 2566 | * 2567 | * @sa rd_kafka_produce, RD_KAFKA_V_END 2568 | */ 2569 | RD_EXPORT 2570 | rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...); 2571 | 2572 | 2573 | /** 2574 | * @brief Produce multiple messages. 2575 | * 2576 | * If partition is RD_KAFKA_PARTITION_UA the configured partitioner will 2577 | * be run for each message (slower), otherwise the messages will be enqueued 2578 | * to the specified partition directly (faster). 2579 | * 2580 | * The messages are provided in the array \p rkmessages of count \p message_cnt 2581 | * elements. 2582 | * The \p partition and \p msgflags are used for all provided messages. 2583 | * 2584 | * Honoured \p rkmessages[] fields are: 2585 | * - payload,len Message payload and length 2586 | * - key,key_len Optional message key 2587 | * - _private Message opaque pointer (msg_opaque) 2588 | * - err Will be set according to success or failure. 2589 | * Application only needs to check for errors if 2590 | * return value != \p message_cnt. 2591 | * 2592 | * @returns the number of messages succesfully enqueued for producing. 2593 | */ 2594 | RD_EXPORT 2595 | int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, 2596 | int msgflags, 2597 | rd_kafka_message_t *rkmessages, int message_cnt); 2598 | 2599 | 2600 | 2601 | 2602 | /** 2603 | * @brief Wait until all outstanding produce requests, et.al, are completed. 2604 | * This should typically be done prior to destroying a producer instance 2605 | * to make sure all queued and in-flight produce requests are completed 2606 | * before terminating. 2607 | * 2608 | * @remark This function will call rd_kafka_poll() and thus trigger callbacks. 2609 | * 2610 | * @returns RD_KAFKA_RESP_ERR__TIMED_OUT if \p timeout_ms was reached before all 2611 | * outstanding requests were completed, else RD_KAFKA_RESP_ERR_NO_ERROR 2612 | */ 2613 | RD_EXPORT 2614 | rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms); 2615 | 2616 | 2617 | /**@}*/ 2618 | 2619 | 2620 | /** 2621 | * @name Metadata API 2622 | * @{ 2623 | * 2624 | * 2625 | */ 2626 | 2627 | 2628 | /** 2629 | * @brief Broker information 2630 | */ 2631 | typedef struct rd_kafka_metadata_broker { 2632 | int32_t id; /**< Broker Id */ 2633 | char *host; /**< Broker hostname */ 2634 | int port; /**< Broker listening port */ 2635 | } rd_kafka_metadata_broker_t; 2636 | 2637 | /** 2638 | * @brief Partition information 2639 | */ 2640 | typedef struct rd_kafka_metadata_partition { 2641 | int32_t id; /**< Partition Id */ 2642 | rd_kafka_resp_err_t err; /**< Partition error reported by broker */ 2643 | int32_t leader; /**< Leader broker */ 2644 | int replica_cnt; /**< Number of brokers in \p replicas */ 2645 | int32_t *replicas; /**< Replica brokers */ 2646 | int isr_cnt; /**< Number of ISR brokers in \p isrs */ 2647 | int32_t *isrs; /**< In-Sync-Replica brokers */ 2648 | } rd_kafka_metadata_partition_t; 2649 | 2650 | /** 2651 | * @brief Topic information 2652 | */ 2653 | typedef struct rd_kafka_metadata_topic { 2654 | char *topic; /**< Topic name */ 2655 | int partition_cnt; /**< Number of partitions in \p partitions*/ 2656 | struct rd_kafka_metadata_partition *partitions; /**< Partitions */ 2657 | rd_kafka_resp_err_t err; /**< Topic error reported by broker */ 2658 | } rd_kafka_metadata_topic_t; 2659 | 2660 | 2661 | /** 2662 | * @brief Metadata container 2663 | */ 2664 | typedef struct rd_kafka_metadata { 2665 | int broker_cnt; /**< Number of brokers in \p brokers */ 2666 | struct rd_kafka_metadata_broker *brokers; /**< Brokers */ 2667 | 2668 | int topic_cnt; /**< Number of topics in \p topics */ 2669 | struct rd_kafka_metadata_topic *topics; /**< Topics */ 2670 | 2671 | int32_t orig_broker_id; /**< Broker originating this metadata */ 2672 | char *orig_broker_name; /**< Name of originating broker */ 2673 | } rd_kafka_metadata_t; 2674 | 2675 | 2676 | /** 2677 | * @brief Request Metadata from broker. 2678 | * 2679 | * Parameters: 2680 | * - \p all_topics if non-zero: request info about all topics in cluster, 2681 | * if zero: only request info about locally known topics. 2682 | * - \p only_rkt only request info about this topic 2683 | * - \p metadatap pointer to hold metadata result. 2684 | * The \p *metadatap pointer must be released 2685 | * with rd_kafka_metadata_destroy(). 2686 | * - \p timeout_ms maximum response time before failing. 2687 | * 2688 | * Returns RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap) 2689 | * will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or 2690 | * other error code on error. 2691 | */ 2692 | RD_EXPORT 2693 | rd_kafka_resp_err_t 2694 | rd_kafka_metadata (rd_kafka_t *rk, int all_topics, 2695 | rd_kafka_topic_t *only_rkt, 2696 | const struct rd_kafka_metadata **metadatap, 2697 | int timeout_ms); 2698 | 2699 | /** 2700 | * @brief Release metadata memory. 2701 | */ 2702 | RD_EXPORT 2703 | void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata); 2704 | 2705 | 2706 | /**@}*/ 2707 | 2708 | 2709 | 2710 | /** 2711 | * @name Client group information 2712 | * @{ 2713 | * 2714 | * 2715 | */ 2716 | 2717 | 2718 | /** 2719 | * @brief Group member information 2720 | * 2721 | * For more information on \p member_metadata format, see 2722 | * https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI 2723 | * 2724 | */ 2725 | struct rd_kafka_group_member_info { 2726 | char *member_id; /**< Member id (generated by broker) */ 2727 | char *client_id; /**< Client's \p client.id */ 2728 | char *client_host; /**< Client's hostname */ 2729 | void *member_metadata; /**< Member metadata (binary), 2730 | * format depends on \p protocol_type. */ 2731 | int member_metadata_size; /**< Member metadata size in bytes */ 2732 | void *member_assignment; /**< Member assignment (binary), 2733 | * format depends on \p protocol_type. */ 2734 | int member_assignment_size; /**< Member assignment size in bytes */ 2735 | }; 2736 | 2737 | /** 2738 | * @brief Group information 2739 | */ 2740 | struct rd_kafka_group_info { 2741 | struct rd_kafka_metadata_broker broker; /**< Originating broker info */ 2742 | char *group; /**< Group name */ 2743 | rd_kafka_resp_err_t err; /**< Broker-originated error */ 2744 | char *state; /**< Group state */ 2745 | char *protocol_type; /**< Group protocol type */ 2746 | char *protocol; /**< Group protocol */ 2747 | struct rd_kafka_group_member_info *members; /**< Group members */ 2748 | int member_cnt; /**< Group member count */ 2749 | }; 2750 | 2751 | /** 2752 | * @brief List of groups 2753 | * 2754 | * @sa rd_kafka_group_list_destroy() to release list memory. 2755 | */ 2756 | struct rd_kafka_group_list { 2757 | struct rd_kafka_group_info *groups; /**< Groups */ 2758 | int group_cnt; /**< Group count */ 2759 | }; 2760 | 2761 | 2762 | /** 2763 | * @brief List and describe client groups in cluster. 2764 | * 2765 | * \p group is an optional group name to describe, otherwise (\p NULL) all 2766 | * groups are returned. 2767 | * 2768 | * \p timeout_ms is the (approximate) maximum time to wait for response 2769 | * from brokers and must be a positive value. 2770 | * 2771 | * @returns \p RD_KAFKA_RESP_ERR__NO_ERROR on success and \p grplistp is 2772 | * updated to point to a newly allocated list of groups. 2773 | * Else returns an error code on failure and \p grplistp remains 2774 | * untouched. 2775 | * 2776 | * @sa Use rd_kafka_group_list_destroy() to release list memory. 2777 | */ 2778 | RD_EXPORT 2779 | rd_kafka_resp_err_t 2780 | rd_kafka_list_groups (rd_kafka_t *rk, const char *group, 2781 | const struct rd_kafka_group_list **grplistp, 2782 | int timeout_ms); 2783 | 2784 | /** 2785 | * @brief Release list memory 2786 | */ 2787 | RD_EXPORT 2788 | void rd_kafka_group_list_destroy (const struct rd_kafka_group_list *grplist); 2789 | 2790 | 2791 | /**@}*/ 2792 | 2793 | 2794 | 2795 | /** 2796 | * @name Miscellaneous APIs 2797 | * @{ 2798 | * 2799 | */ 2800 | 2801 | 2802 | /** 2803 | * @brief Adds one or more brokers to the kafka handle's list of initial 2804 | * bootstrap brokers. 2805 | * 2806 | * Additional brokers will be discovered automatically as soon as rdkafka 2807 | * connects to a broker by querying the broker metadata. 2808 | * 2809 | * If a broker name resolves to multiple addresses (and possibly 2810 | * address families) all will be used for connection attempts in 2811 | * round-robin fashion. 2812 | * 2813 | * \p brokerlist is a ,-separated list of brokers in the format: 2814 | * \c \,\,.. 2815 | * Where each broker is in either the host or URL based format: 2816 | * \c \[:\] 2817 | * \c \://\[:port] 2818 | * \c \ is either \c PLAINTEXT, \c SSL, \c SASL, \c SASL_PLAINTEXT 2819 | * The two formats can be mixed but ultimately the value of the 2820 | * `security.protocol` config property decides what brokers are allowed. 2821 | * 2822 | * Example: 2823 | * brokerlist = "broker1:10000,broker2" 2824 | * brokerlist = "SSL://broker3:9000,ssl://broker2" 2825 | * 2826 | * @returns the number of brokers successfully added. 2827 | * 2828 | * @remark Brokers may also be defined with the \c metadata.broker.list or 2829 | * \c bootstrap.servers configuration property (preferred method). 2830 | */ 2831 | RD_EXPORT 2832 | int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist); 2833 | 2834 | 2835 | 2836 | 2837 | /** 2838 | * @brief Set logger function. 2839 | * 2840 | * The default is to print to stderr, but a syslog logger is also available, 2841 | * see rd_kafka_log_(print|syslog) for the builtin alternatives. 2842 | * Alternatively the application may provide its own logger callback. 2843 | * Or pass 'func' as NULL to disable logging. 2844 | * 2845 | * @deprecated Use rd_kafka_conf_set_log_cb() 2846 | * 2847 | * @remark \p rk may be passed as NULL in the callback. 2848 | */ 2849 | RD_EXPORT RD_DEPRECATED 2850 | void rd_kafka_set_logger(rd_kafka_t *rk, 2851 | void (*func) (const rd_kafka_t *rk, int level, 2852 | const char *fac, const char *buf)); 2853 | 2854 | 2855 | /** 2856 | * @brief Specifies the maximum logging level produced by 2857 | * internal kafka logging and debugging. 2858 | * 2859 | * If the \p \"debug\" configuration property is set the level is automatically 2860 | * adjusted to \c LOG_DEBUG (7). 2861 | */ 2862 | RD_EXPORT 2863 | void rd_kafka_set_log_level(rd_kafka_t *rk, int level); 2864 | 2865 | 2866 | /** 2867 | * @brief Builtin (default) log sink: print to stderr 2868 | */ 2869 | RD_EXPORT 2870 | void rd_kafka_log_print(const rd_kafka_t *rk, int level, 2871 | const char *fac, const char *buf); 2872 | 2873 | 2874 | /** 2875 | * @brief Builtin log sink: print to syslog. 2876 | */ 2877 | RD_EXPORT 2878 | void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, 2879 | const char *fac, const char *buf); 2880 | 2881 | 2882 | /** 2883 | * @brief Returns the current out queue length. 2884 | * 2885 | * The out queue contains messages waiting to be sent to, or acknowledged by, 2886 | * the broker. 2887 | * 2888 | * An application should wait for this queue to reach zero before terminating 2889 | * to make sure outstanding requests (such as offset commits) are fully 2890 | * processed. 2891 | * 2892 | * @returns number of messages in the out queue. 2893 | */ 2894 | RD_EXPORT 2895 | int rd_kafka_outq_len(rd_kafka_t *rk); 2896 | 2897 | 2898 | 2899 | /** 2900 | * @brief Dumps rdkafka's internal state for handle \p rk to stream \p fp 2901 | * 2902 | * This is only useful for debugging rdkafka, showing state and statistics 2903 | * for brokers, topics, partitions, etc. 2904 | */ 2905 | RD_EXPORT 2906 | void rd_kafka_dump(FILE *fp, rd_kafka_t *rk); 2907 | 2908 | 2909 | 2910 | /** 2911 | * @brief Retrieve the current number of threads in use by librdkafka. 2912 | * 2913 | * Used by regression tests. 2914 | */ 2915 | RD_EXPORT 2916 | int rd_kafka_thread_cnt(void); 2917 | 2918 | 2919 | /** 2920 | * @brief Wait for all rd_kafka_t objects to be destroyed. 2921 | * 2922 | * Returns 0 if all kafka objects are now destroyed, or -1 if the 2923 | * timeout was reached. 2924 | * Since `rd_kafka_destroy()` is an asynch operation the 2925 | * `rd_kafka_wait_destroyed()` function can be used for applications where 2926 | * a clean shutdown is required. 2927 | */ 2928 | RD_EXPORT 2929 | int rd_kafka_wait_destroyed(int timeout_ms); 2930 | 2931 | 2932 | /**@}*/ 2933 | 2934 | 2935 | 2936 | 2937 | /** 2938 | * @name Experimental APIs 2939 | * @{ 2940 | */ 2941 | 2942 | /** 2943 | * @brief Redirect the main (rd_kafka_poll()) queue to the KafkaConsumer's 2944 | * queue (rd_kafka_consumer_poll()). 2945 | * 2946 | * @warning It is not permitted to call rd_kafka_poll() after directing the 2947 | * main queue with rd_kafka_poll_set_consumer(). 2948 | */ 2949 | RD_EXPORT 2950 | rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk); 2951 | 2952 | 2953 | /**@}*/ 2954 | 2955 | /** 2956 | * @name Event interface 2957 | * 2958 | * @brief The event API provides an alternative pollable non-callback interface 2959 | * to librdkafka's message and event queues. 2960 | * 2961 | * @{ 2962 | */ 2963 | 2964 | 2965 | /** 2966 | * @brief Event types 2967 | */ 2968 | typedef int rd_kafka_event_type_t; 2969 | #define RD_KAFKA_EVENT_NONE 0x0 2970 | #define RD_KAFKA_EVENT_DR 0x1 /**< Producer Delivery report batch */ 2971 | #define RD_KAFKA_EVENT_FETCH 0x2 /**< Fetched message (consumer) */ 2972 | #define RD_KAFKA_EVENT_LOG 0x4 /**< Log message */ 2973 | #define RD_KAFKA_EVENT_ERROR 0x8 /**< Error */ 2974 | #define RD_KAFKA_EVENT_REBALANCE 0x10 /**< Group rebalance (consumer) */ 2975 | #define RD_KAFKA_EVENT_OFFSET_COMMIT 0x20 /**< Offset commit result */ 2976 | 2977 | 2978 | typedef struct rd_kafka_op_s rd_kafka_event_t; 2979 | 2980 | 2981 | /** 2982 | * @returns the event type for the given event. 2983 | * 2984 | * @remark As a convenience it is okay to pass \p rkev as NULL in which case 2985 | * RD_KAFKA_EVENT_NONE is returned. 2986 | */ 2987 | RD_EXPORT 2988 | rd_kafka_event_type_t rd_kafka_event_type (const rd_kafka_event_t *rkev); 2989 | 2990 | /** 2991 | * @returns the event type's name for the given event. 2992 | * 2993 | * @remark As a convenience it is okay to pass \p rkev as NULL in which case 2994 | * the name for RD_KAFKA_EVENT_NONE is returned. 2995 | */ 2996 | RD_EXPORT 2997 | const char *rd_kafka_event_name (const rd_kafka_event_t *rkev); 2998 | 2999 | 3000 | /** 3001 | * @brief Destroy an event. 3002 | * 3003 | * @remark Any references to this event, such as extracted messages, 3004 | * will not be usable after this call. 3005 | * 3006 | * @remark As a convenience it is okay to pass \p rkev as NULL in which case 3007 | * no action is performed. 3008 | */ 3009 | RD_EXPORT 3010 | void rd_kafka_event_destroy (rd_kafka_event_t *rkev); 3011 | 3012 | 3013 | /** 3014 | * @returns the next message from an event. 3015 | * 3016 | * Call repeatedly until it returns NULL. 3017 | * 3018 | * Event types: 3019 | * - RD_KAFKA_EVENT_FETCH (1 message) 3020 | * - RD_KAFKA_EVENT_DR (>=1 message(s)) 3021 | * 3022 | * @remark The returned message(s) MUST NOT be 3023 | * freed with rd_kafka_message_destroy(). 3024 | */ 3025 | RD_EXPORT 3026 | const rd_kafka_message_t *rd_kafka_event_message_next (rd_kafka_event_t *rkev); 3027 | 3028 | 3029 | /** 3030 | * @brief Extacts \p size message(s) from the event into the 3031 | * pre-allocated array \p rkmessages. 3032 | * 3033 | * Event types: 3034 | * - RD_KAFKA_EVENT_FETCH (1 message) 3035 | * - RD_KAFKA_EVENT_DR (>=1 message(s)) 3036 | * 3037 | * @returns the number of messages extracted. 3038 | */ 3039 | RD_EXPORT 3040 | size_t rd_kafka_event_message_array (rd_kafka_event_t *rkev, 3041 | const rd_kafka_message_t **rkmessages, 3042 | size_t size); 3043 | 3044 | 3045 | /** 3046 | * @returns the number of remaining messages in the event. 3047 | * 3048 | * Event types: 3049 | * - RD_KAFKA_EVENT_FETCH (1 message) 3050 | * - RD_KAFKA_EVENT_DR (>=1 message(s)) 3051 | */ 3052 | RD_EXPORT 3053 | size_t rd_kafka_event_message_count (rd_kafka_event_t *rkev); 3054 | 3055 | 3056 | /** 3057 | * @returns the error code for the event. 3058 | * 3059 | * Event types: 3060 | * - all 3061 | */ 3062 | RD_EXPORT 3063 | rd_kafka_resp_err_t rd_kafka_event_error (rd_kafka_event_t *rkev); 3064 | 3065 | 3066 | /** 3067 | * @returns the error string (if any). 3068 | * An application should check that rd_kafka_event_error() returns 3069 | * non-zero before calling this function. 3070 | * 3071 | * Event types: 3072 | * - all 3073 | */ 3074 | RD_EXPORT 3075 | const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev); 3076 | 3077 | 3078 | 3079 | /** 3080 | * @returns the user opaque (if any) 3081 | * 3082 | * Event types: 3083 | * - RD_KAFKA_OFFSET_COMMIT 3084 | */ 3085 | RD_EXPORT 3086 | void *rd_kafka_event_opaque (rd_kafka_event_t *rkev); 3087 | 3088 | 3089 | /** 3090 | * @brief Extract log message from the event. 3091 | * 3092 | * Event types: 3093 | * - RD_KAFKA_EVENT_LOG 3094 | * 3095 | * @returns 0 on success or -1 if unsupported event type. 3096 | */ 3097 | RD_EXPORT 3098 | int rd_kafka_event_log (rd_kafka_event_t *rkev, 3099 | const char **fac, const char **str, int *level); 3100 | 3101 | 3102 | /** 3103 | * @returns the topic partition list from the event. 3104 | * 3105 | * @remark The list MUST NOT be freed with rd_kafka_topic_partition_list_destroy() 3106 | * 3107 | * Event types: 3108 | * - RD_KAFKA_EVENT_REBALANCE 3109 | * - RD_KAFKA_EVENT_OFFSET_COMMIT 3110 | */ 3111 | RD_EXPORT rd_kafka_topic_partition_list_t * 3112 | rd_kafka_event_topic_partition_list (rd_kafka_event_t *rkev); 3113 | 3114 | 3115 | /** 3116 | * @returns a newly allocated topic_partition container, if applicable for the event type, 3117 | * else NULL. 3118 | * 3119 | * @remark The returned pointer MUST be freed with rd_kafka_topic_partition_destroy(). 3120 | * 3121 | * Event types: 3122 | * RD_KAFKA_EVENT_ERROR (for partition level errors) 3123 | */ 3124 | RD_EXPORT rd_kafka_topic_partition_t * 3125 | rd_kafka_event_topic_partition (rd_kafka_event_t *rkev); 3126 | 3127 | 3128 | /** 3129 | * @brief Poll a queue for an event for max \p timeout_ms. 3130 | * 3131 | * @returns an event, or NULL. 3132 | * 3133 | * @remark Use rd_kafka_event_destroy() to free the event. 3134 | */ 3135 | RD_EXPORT 3136 | rd_kafka_event_t *rd_kafka_queue_poll (rd_kafka_queue_t *rkqu, int timeout_ms); 3137 | 3138 | /** 3139 | * @brief Poll a queue for events served through callbacks for max \p timeout_ms. 3140 | * 3141 | * @returns the number of events served. 3142 | * 3143 | * @remark This API must only be used for queues with callbacks registered 3144 | * for all expected event types. E.g., not a message queue. 3145 | */ 3146 | RD_EXPORT 3147 | int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms); 3148 | 3149 | 3150 | /**@}*/ 3151 | 3152 | #ifdef __cplusplus 3153 | } 3154 | #endif 3155 | -------------------------------------------------------------------------------- /dub.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "librdkafka", 3 | "description": "rdkafka C header build in D", 4 | "copyright": "Copyright © 2016 DlangApache", 5 | "homepage": "https://github.com/edenhill/librdkafka", 6 | "license": "Apache", 7 | "authors": [ 8 | "Deimos" 9 | ], 10 | "libs": [ 11 | "rdkafka" 12 | ] 13 | } 14 | 15 | -------------------------------------------------------------------------------- /source/deimos/rdkafka.d: -------------------------------------------------------------------------------- 1 | /* 2 | * librdkafka - Apache Kafka C library 3 | * 4 | * Copyright (c) 2012-2013 Magnus Edenhill 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without 8 | * modification, are permitted provided that the following conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, 13 | * this list of conditions and the following disclaimer in the documentation 14 | * and/or other materials provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 | * POSSIBILITY OF SUCH DAMAGE. 27 | */ 28 | 29 | /** 30 | * @file rdkafka.h 31 | * @brief Apache Kafka C/C++ consumer and producer client library. 32 | * 33 | * rdkafka.h contains the public API for librdkafka. 34 | * The API is documented in this file as comments prefixing the function, type, 35 | * enum, define, etc. 36 | * 37 | * @sa For the C++ interface see rdkafkacpp.h 38 | * 39 | * @tableofcontents 40 | */ 41 | 42 | 43 | /* @cond NO_DOC */ 44 | module deimos.rdkafka; 45 | 46 | import core.stdc.stdio; 47 | import core.stdc.ctype; 48 | import core.stdc.inttypes; 49 | //import core.stdc.sys.types; 50 | import core.sys.posix.sys.types; 51 | //#include 52 | //#include 53 | version(Windows) 54 | { 55 | import core.sys.windows.basetsd: SSIZE_T; 56 | import core.sys.windows.winsock2: sockaddr; 57 | alias ssize_t = SSIZE_T; 58 | } 59 | else 60 | { 61 | import core.sys.posix.sys.socket; 62 | } 63 | 64 | extern(C) nothrow @nogc: 65 | 66 | /* @endcond */ 67 | 68 | 69 | /** 70 | * @brief Returns the librdkafka version as integer. 71 | * 72 | * @returns Version integer. 73 | * 74 | * @sa See RD_KAFKA_VERSION for how to parse the integer format. 75 | * @sa Use rd_kafka_version_str() to retreive the version as a string. 76 | */ 77 | 78 | int rd_kafka_version(); 79 | 80 | /** 81 | * @brief Returns the librdkafka version as string. 82 | * 83 | * @returns Version string 84 | */ 85 | 86 | const(char) * rd_kafka_version_str (); 87 | 88 | /**@}*/ 89 | 90 | 91 | /** 92 | * @name Constants, errors, types 93 | * @{ 94 | * 95 | * 96 | */ 97 | 98 | 99 | /** 100 | * @enum rd_kafka_type_t 101 | * 102 | * @brief rd_kafka_t handle type. 103 | * 104 | * @sa rd_kafka_new() 105 | */ 106 | enum rd_kafka_type_t { 107 | RD_KAFKA_PRODUCER, /**< Producer client */ 108 | RD_KAFKA_CONSUMER /**< Consumer client */ 109 | } 110 | //alias rd_kafka_type_t; 111 | 112 | 113 | /** 114 | * @enum Timestamp types 115 | * 116 | * @sa rd_kafka_message_timestamp() 117 | */ 118 | enum rd_kafka_timestamp_type_t { 119 | RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ 120 | RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */ 121 | RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */ 122 | } 123 | 124 | 125 | /** 126 | * @brief Retrieve supported debug contexts for use with the \c \"debug\" 127 | * configuration property. (runtime) 128 | * 129 | * @returns Comma-separated list of available debugging contexts. 130 | */ 131 | 132 | const(char) *rd_kafka_get_debug_contexts(); 133 | 134 | /** 135 | * @brief Supported debug contexts. (compile time) 136 | * 137 | * @deprecated This compile time value may be outdated at runtime due to 138 | * linking another version of the library. 139 | * Use rd_kafka_get_debug_contexts() instead. 140 | */ 141 | deprecated("please Use rd_kafka_get_debug_contexts() instead."){ 142 | enum RD_KAFKA_DEBUG_CONTEXTS = "all,generic,broker,topic,metadata,queue,msg,protocol,cgrp,security,fetch,feature"; 143 | } 144 | 145 | 146 | /* @cond NO_DOC */ 147 | /* Private types to provide ABI compatibility */ 148 | 149 | struct rd_kafka_s{} 150 | struct rd_kafka_topic_s{} 151 | struct rd_kafka_conf_s{} 152 | struct rd_kafka_topic_conf_s{} 153 | struct rd_kafka_queue_s{} 154 | 155 | alias rd_kafka_t = rd_kafka_s; 156 | alias rd_kafka_topic_t = rd_kafka_topic_s; 157 | alias rd_kafka_conf_t = rd_kafka_conf_s; 158 | alias rd_kafka_topic_conf_t = rd_kafka_topic_conf_s; 159 | alias rd_kafka_queue_t = rd_kafka_queue_s; 160 | /* 161 | typedef struct rd_kafka_s rd_kafka_t; 162 | typedef struct rd_kafka_topic_s rd_kafka_topic_t; 163 | typedef struct rd_kafka_conf_s rd_kafka_conf_t; 164 | typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t; 165 | typedef struct rd_kafka_queue_s rd_kafka_queue_t;*/ 166 | /* @endcond */ 167 | 168 | 169 | /** 170 | * @enum rd_kafka_resp_err_t 171 | * @brief Error codes. 172 | * 173 | * The negative error codes delimited by two underscores 174 | * (\c RD_KAFKA_RESP_ERR__..) denotes errors internal to librdkafka and are 175 | * displayed as \c \"Local: \\", while the error codes 176 | * delimited by a single underscore (\c RD_KAFKA_RESP_ERR_..) denote broker 177 | * errors and are displayed as \c \"Broker: \\". 178 | * 179 | * @sa Use rd_kafka_err2str() to translate an error code a human readable string 180 | */ 181 | enum rd_kafka_resp_err_t { 182 | /* Internal errors to rdkafka: */ 183 | /** Begin internal error codes */ 184 | RD_KAFKA_RESP_ERR__BEGIN = -200, 185 | /** Received message is incorrect */ 186 | RD_KAFKA_RESP_ERR__BAD_MSG = -199, 187 | /** Bad/unknown compression */ 188 | RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198, 189 | /** Broker is going away */ 190 | RD_KAFKA_RESP_ERR__DESTROY = -197, 191 | /** Generic failure */ 192 | RD_KAFKA_RESP_ERR__FAIL = -196, 193 | /** Broker transport failure */ 194 | RD_KAFKA_RESP_ERR__TRANSPORT = -195, 195 | /** Critical system resource */ 196 | RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194, 197 | /** Failed to resolve broker */ 198 | RD_KAFKA_RESP_ERR__RESOLVE = -193, 199 | /** Produced message timed out*/ 200 | RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192, 201 | /** Reached the end of the topic+partition queue on 202 | * the broker. Not really an error. */ 203 | RD_KAFKA_RESP_ERR__PARTITION_EOF = -191, 204 | /** Permanent: Partition does not exist in cluster. */ 205 | RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190, 206 | /** File or filesystem error */ 207 | RD_KAFKA_RESP_ERR__FS = -189, 208 | /** Permanent: Topic does not exist in cluster. */ 209 | RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188, 210 | /** All broker connections are down. */ 211 | RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187, 212 | /** Invalid argument, or invalid configuration */ 213 | RD_KAFKA_RESP_ERR__INVALID_ARG = -186, 214 | /** Operation timed out */ 215 | RD_KAFKA_RESP_ERR__TIMED_OUT = -185, 216 | /** Queue is full */ 217 | RD_KAFKA_RESP_ERR__QUEUE_FULL = -184, 218 | /** ISR count < required.acks */ 219 | RD_KAFKA_RESP_ERR__ISR_INSUFF = -183, 220 | /** Broker node update */ 221 | RD_KAFKA_RESP_ERR__NODE_UPDATE = -182, 222 | /** SSL error */ 223 | RD_KAFKA_RESP_ERR__SSL = -181, 224 | /** Waiting for coordinator to become available. */ 225 | RD_KAFKA_RESP_ERR__WAIT_COORD = -180, 226 | /** Unknown client group */ 227 | RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179, 228 | /** Operation in progress */ 229 | RD_KAFKA_RESP_ERR__IN_PROGRESS = -178, 230 | /** Previous operation in progress, wait for it to finish. */ 231 | RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177, 232 | /** This operation would interfere with an existing subscription */ 233 | RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176, 234 | /** Assigned partitions (rebalance_cb) */ 235 | RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175, 236 | /** Revoked partitions (rebalance_cb) */ 237 | RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174, 238 | /** Conflicting use */ 239 | RD_KAFKA_RESP_ERR__CONFLICT = -173, 240 | /** Wrong state */ 241 | RD_KAFKA_RESP_ERR__STATE = -172, 242 | /** Unknown protocol */ 243 | RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171, 244 | /** Not implemented */ 245 | RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170, 246 | /** Authentication failure*/ 247 | RD_KAFKA_RESP_ERR__AUTHENTICATION = -169, 248 | /** No stored offset */ 249 | RD_KAFKA_RESP_ERR__NO_OFFSET = -168, 250 | /** Outdated */ 251 | RD_KAFKA_RESP_ERR__OUTDATED = -167, 252 | /** Timed out in queue */ 253 | RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166, 254 | /** Feature not supported by broker */ 255 | RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165, 256 | /** Awaiting cache update */ 257 | RD_KAFKA_RESP_ERR__WAIT_CACHE = -164, 258 | 259 | /** End internal error codes */ 260 | RD_KAFKA_RESP_ERR__END = -100, 261 | 262 | /* Kafka broker errors: */ 263 | /** Unknown broker error */ 264 | RD_KAFKA_RESP_ERR_UNKNOWN = -1, 265 | /** Success */ 266 | RD_KAFKA_RESP_ERR_NO_ERROR = 0, 267 | /** Offset out of range */ 268 | RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1, 269 | /** Invalid message */ 270 | RD_KAFKA_RESP_ERR_INVALID_MSG = 2, 271 | /** Unknown topic or partition */ 272 | RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3, 273 | /** Invalid message size */ 274 | RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4, 275 | /** Leader not available */ 276 | RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5, 277 | /** Not leader for partition */ 278 | RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6, 279 | /** Request timed out */ 280 | RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7, 281 | /** Broker not available */ 282 | RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8, 283 | /** Replica not available */ 284 | RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9, 285 | /** Message size too large */ 286 | RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10, 287 | /** StaleControllerEpochCode */ 288 | RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11, 289 | /** Offset metadata string too large */ 290 | RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12, 291 | /** Broker disconnected before response received */ 292 | RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13, 293 | /** Group coordinator load in progress */ 294 | RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS = 14, 295 | /** Group coordinator not available */ 296 | RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15, 297 | /** Not coordinator for group */ 298 | RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP = 16, 299 | /** Invalid topic */ 300 | RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17, 301 | /** Message batch larger than configured server segment size */ 302 | RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18, 303 | /** Not enough in-sync replicas */ 304 | RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19, 305 | /** Message(s) written to insufficient number of in-sync replicas */ 306 | RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, 307 | /** Invalid required acks value */ 308 | RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21, 309 | /** Specified group generation id is not valid */ 310 | RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22, 311 | /** Inconsistent group protocol */ 312 | RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23, 313 | /** Invalid group.id */ 314 | RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24, 315 | /** Unknown member */ 316 | RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25, 317 | /** Invalid session timeout */ 318 | RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26, 319 | /** Group rebalance in progress */ 320 | RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27, 321 | /** Commit offset data size is not valid */ 322 | RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28, 323 | /** Topic authorization failed */ 324 | RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29, 325 | /** Group authorization failed */ 326 | RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30, 327 | /** Cluster authorization failed */ 328 | RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31, 329 | /** Invalid timestamp */ 330 | RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32, 331 | /** Unsupported SASL mechanism */ 332 | RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33, 333 | /** Illegal SASL state */ 334 | RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34, 335 | /** Unuspported version */ 336 | RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35, 337 | /** Topic already exists */ 338 | RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36, 339 | /** Invalid number of partitions */ 340 | RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37, 341 | /** Invalid replication factor */ 342 | RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38, 343 | /** Invalid replica assignment */ 344 | RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39, 345 | /** Invalid config */ 346 | RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40, 347 | /** Not controller for cluster */ 348 | RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41, 349 | /** Invalid request */ 350 | RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42, 351 | /** Message format on broker does not support request */ 352 | RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, 353 | 354 | RD_KAFKA_RESP_ERR_END_ALL, 355 | } 356 | 357 | 358 | /** 359 | * @brief Error code value, name and description. 360 | * Typically for use with language bindings to automatically expose 361 | * the full set of librdkafka error codes. 362 | */ 363 | struct rd_kafka_err_desc { 364 | rd_kafka_resp_err_t code;/**< Error code */ 365 | const(char) *name; /**< Error name, same as code enum sans prefix */ 366 | const(char) *desc; /**< Human readable error description. */ 367 | }; 368 | 369 | 370 | /** 371 | * @brief Returns the full list of error codes. 372 | */ 373 | 374 | void rd_kafka_get_err_descs (const rd_kafka_err_desc **errdescs,size_t *cntp); 375 | 376 | 377 | 378 | 379 | /** 380 | * @brief Returns a human readable representation of a kafka error. 381 | * 382 | * @param err Error code to translate 383 | */ 384 | 385 | const(char) *rd_kafka_err2str (rd_kafka_resp_err_t err); 386 | 387 | 388 | 389 | /** 390 | * @brief Returns the error code name (enum name). 391 | * 392 | * @param err Error code to translate 393 | */ 394 | 395 | const(char) *rd_kafka_err2name (rd_kafka_resp_err_t err); 396 | 397 | 398 | /** 399 | * @brief Returns the last error code generated by a legacy API call 400 | * in the current thread. 401 | * 402 | * The legacy APIs are the ones using errno to propagate error value, namely: 403 | * - rd_kafka_topic_new() 404 | * - rd_kafka_consume_start() 405 | * - rd_kafka_consume_stop() 406 | * - rd_kafka_consume() 407 | * - rd_kafka_consume_batch() 408 | * - rd_kafka_consume_callback() 409 | * - rd_kafka_consume_queue() 410 | * - rd_kafka_produce() 411 | * 412 | * The main use for this function is to avoid converting system \p errno 413 | * values to rd_kafka_resp_err_t codes for legacy APIs. 414 | * 415 | * @remark The last error is stored per-thread, if multiple rd_kafka_t handles 416 | * are used in the same application thread the developer needs to 417 | * make sure rd_kafka_last_error() is called immediately after 418 | * a failed API call. 419 | */ 420 | 421 | rd_kafka_resp_err_t rd_kafka_last_error (); 422 | 423 | 424 | /** 425 | * @brief Converts the system errno value \p errnox to a rd_kafka_resp_err_t 426 | * error code upon failure from the following functions: 427 | * - rd_kafka_topic_new() 428 | * - rd_kafka_consume_start() 429 | * - rd_kafka_consume_stop() 430 | * - rd_kafka_consume() 431 | * - rd_kafka_consume_batch() 432 | * - rd_kafka_consume_callback() 433 | * - rd_kafka_consume_queue() 434 | * - rd_kafka_produce() 435 | * 436 | * @param errnox System errno value to convert 437 | * 438 | * @returns Appropriate error code for \p errnox 439 | * 440 | * @remark A better alternative is to call rd_kafka_last_error() immediately 441 | * after any of the above functions return -1 or NULL. 442 | * 443 | * @sa rd_kafka_last_error() 444 | */ 445 | 446 | rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); 447 | 448 | 449 | /** 450 | * @brief Returns the thread-local system errno 451 | * 452 | * On most platforms this is the same as \p errno but in case of different 453 | * runtimes between library and application (e.g., Windows static DLLs) 454 | * this provides a means for expsing the errno librdkafka uses. 455 | * 456 | * @remark The value is local to the current calling thread. 457 | */ 458 | 459 | int rd_kafka_errno (); 460 | 461 | 462 | 463 | /** 464 | * @brief Topic+Partition place holder 465 | * 466 | * Generic place holder for a Topic+Partition and its related information 467 | * used for multiple purposes: 468 | * - consumer offset (see rd_kafka_commit(), et.al.) 469 | * - group rebalancing callback (rd_kafka_conf_set_rebalance_cb()) 470 | * - offset commit result callback (rd_kafka_conf_set_offset_commit_cb()) 471 | */ 472 | 473 | /** 474 | * @brief Generic place holder for a specific Topic+Partition. 475 | * 476 | * @sa rd_kafka_topic_partition_list_new() 477 | */ 478 | struct rd_kafka_topic_partition_s { 479 | char *topic; /**< Topic name */ 480 | int32_t partition; /**< Partition */ 481 | int64_t offset; /**< Offset */ 482 | void *metadata; /**< Metadata */ 483 | size_t metadata_size; /**< Metadata size */ 484 | void *opaque; /**< Application opaque */ 485 | rd_kafka_resp_err_t err; /**< Error code, depending on use. */ 486 | void *_private; /**< INTERNAL USE ONLY, 487 | * INITIALIZE TO ZERO, DO NOT TOUCH */ 488 | } ; 489 | 490 | alias rd_kafka_topic_partition_t = rd_kafka_topic_partition_s; 491 | 492 | /** 493 | * @brief Destroy a rd_kafka_topic_partition_t. 494 | * @remark This must not be called for elements in a topic partition list. 495 | */ 496 | void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar); 497 | 498 | 499 | /** 500 | * @brief A growable list of Topic+Partitions. 501 | * 502 | */ 503 | struct rd_kafka_topic_partition_list_s { 504 | int cnt; /**< Current number of elements */ 505 | int size; /**< Current allocated size */ 506 | rd_kafka_topic_partition_t *elems; /**< Element array[] */ 507 | } ; 508 | alias rd_kafka_topic_partition_list_t = rd_kafka_topic_partition_list_s; 509 | 510 | /** 511 | * @brief Create a new list/vector Topic+Partition container. 512 | * 513 | * @param size Initial allocated size used when the expected number of 514 | * elements is known or can be estimated. 515 | * Avoids reallocation and possibly relocation of the 516 | * elems array. 517 | * 518 | * @returns A newly allocated Topic+Partition list. 519 | * 520 | * @remark Use rd_kafka_topic_partition_list_destroy() to free all resources 521 | * in use by a list and the list itself. 522 | * @sa rd_kafka_topic_partition_list_add() 523 | */ 524 | 525 | rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new (int size); 526 | 527 | 528 | /** 529 | * @brief Free all resources used by the list and the list itself. 530 | */ 531 | 532 | void rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rkparlist); 533 | 534 | /** 535 | * @brief Add topic+partition to list 536 | * 537 | * @param rktparlist List to extend 538 | * @param topic Topic name (copied) 539 | * @param partition Partition id 540 | * 541 | * @returns The object which can be used to fill in additionals fields. 542 | */ 543 | 544 | rd_kafka_topic_partition_t * rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist, 545 | const(char) *topic, int32_t partition); 546 | 547 | 548 | /** 549 | * @brief Add range of partitions from \p start to \p stop inclusive. 550 | * 551 | * @param rktparlist List to extend 552 | * @param topic Topic name (copied) 553 | * @param start Start partition of range 554 | * @param stop Last partition of range (inclusive) 555 | */ 556 | 557 | void 558 | rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t 559 | *rktparlist, 560 | const(char) *topic, 561 | int32_t start, int32_t stop); 562 | 563 | /** 564 | * @brief Delete partition from list. 565 | * 566 | * @param rktparlist List to modify 567 | * @param topic Topic name to match 568 | * @param partition Partition to match 569 | * 570 | * @returns 1 if partition was found (and removed), else 0. 571 | * 572 | * @remark Any held indices to elems[] are unusable after this call returns 1. 573 | */ 574 | int 575 | rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, 576 | const (char) *topic, int32_t partition); 577 | 578 | 579 | 580 | /** 581 | * @brief Delete partition from list by elems[] index. 582 | * 583 | * @returns 1 if partition was found (and removed), else 0. 584 | * 585 | * @sa rd_kafka_topic_partition_list_del() 586 | */ 587 | int 588 | rd_kafka_topic_partition_list_del_by_idx ( 589 | rd_kafka_topic_partition_list_t *rktparlist, 590 | int idx); 591 | /** 592 | * @brief Make a copy of an existing list. 593 | * 594 | * @param src The existing list to copy. 595 | * 596 | * @returns A new list fully populated to be identical to \p src 597 | */ 598 | 599 | rd_kafka_topic_partition_list_t * 600 | rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src); 601 | 602 | 603 | 604 | 605 | /** 606 | * @brief Set offset to \p offset for \p topic and \p partition 607 | * 608 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or 609 | * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if \p partition was not found 610 | * in the list. 611 | */ 612 | 613 | rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset ( 614 | rd_kafka_topic_partition_list_t *rktparlist, 615 | const(char) *topic, int32_t partition, int64_t offset); 616 | 617 | 618 | 619 | /** 620 | * @brief Find element by \p topic and \p partition. 621 | * 622 | * @returns a pointer to the first matching element, or NULL if not found. 623 | */ 624 | 625 | rd_kafka_topic_partition_t * 626 | rd_kafka_topic_partition_list_find (rd_kafka_topic_partition_list_t *rktparlist, 627 | const(char) *topic, int32_t partition); 628 | 629 | /**@}*/ 630 | 631 | 632 | /** 633 | * @brief Sort list using comparator \p cmp. 634 | * 635 | * If \p cmp is NULL the default comparator will be used that 636 | * sorts by ascending topic name and partition. 637 | * 638 | */ 639 | void 640 | rd_kafka_topic_partition_list_sort (rd_kafka_topic_partition_list_t* rktparlist, 641 | int function (const void *a, const void *b, 642 | void *opaque) cmp, 643 | void *opaque); 644 | 645 | /** 646 | * @name Var-arg tag types 647 | * @{ 648 | * 649 | */ 650 | 651 | /** 652 | * @enum rd_kafka_vtype_t 653 | * 654 | * @brief Var-arg tag types 655 | * 656 | * @sa rd_kafka_producev() 657 | */ 658 | enum rd_kafka_vtype_t { 659 | RD_KAFKA_VTYPE_END, /**< va-arg sentinel */ 660 | RD_KAFKA_VTYPE_TOPIC, /**< (const char *) Topic name */ 661 | RD_KAFKA_VTYPE_RKT, /**< (rd_kafka_topic_t *) Topic handle */ 662 | RD_KAFKA_VTYPE_PARTITION, /**< (int32_t) Partition */ 663 | RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/ 664 | RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */ 665 | RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Application opaque */ 666 | RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */ 667 | RD_KAFKA_VTYPE_TIMESTAMP, /**< (int64_t) Milliseconds since epoch UTC */ 668 | } 669 | 670 | /** 671 | * @name Kafka messages 672 | * @{ 673 | * 674 | */ 675 | 676 | 677 | 678 | // FIXME: This doesn't show up in docs for some reason 679 | // "Compound rd_kafka_message_t is not documented." 680 | 681 | /** 682 | * @brief A Kafka message as returned by the \c rd_kafka_consume*() family 683 | * of functions as well as provided to the Producer \c dr_msg_cb(). 684 | * 685 | * For the consumer this object has two purposes: 686 | * - provide the application with a consumed message. (\c err == 0) 687 | * - report per-topic+partition consumer errors (\c err != 0) 688 | * 689 | * The application must check \c err to decide what action to take. 690 | * 691 | * When the application is finished with a message it must call 692 | * rd_kafka_message_destroy() unless otherwise noted. 693 | */ 694 | struct rd_kafka_message_s { 695 | rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */ 696 | rd_kafka_topic_t *rkt; /**< Topic */ 697 | int32_t partition; /**< Partition */ 698 | void *payload; /**< Producer: original message payload. 699 | * Consumer: Depends on the value of \c err : 700 | * - \c err==0: Message payload. 701 | * - \c err!=0: Error string */ 702 | size_t len; /**< Depends on the value of \c err : 703 | * - \c err==0: Message payload length 704 | * - \c err!=0: Error string length */ 705 | void *key; /**< Depends on the value of \c err : 706 | * - \c err==0: Optional message key */ 707 | size_t key_len; /**< Depends on the value of \c err : 708 | * - \c err==0: Optional message key length*/ 709 | int64_t offset; /**< Consume: 710 | * - Message offset (or offset for error 711 | * if \c err!=0 if applicable). 712 | * - dr_msg_cb: 713 | * Message offset assigned by broker. 714 | * If \c produce.offset.report is set then 715 | * each message will have this field set, 716 | * otherwise only the last message in 717 | * each produced internal batch will 718 | * have this field set, otherwise 0. */ 719 | void *_private; /**< Consume: 720 | * - rdkafka private pointer: DO NOT MODIFY 721 | * - dr_msg_cb: 722 | * msg_opaque from produce() call */ 723 | } ; 724 | 725 | alias rd_kafka_message_t = rd_kafka_message_s; 726 | 727 | 728 | /** 729 | * @brief Frees resources for \p rkmessage and hands ownership back to rdkafka. 730 | */ 731 | 732 | void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage); 733 | 734 | 735 | 736 | 737 | /** 738 | * @brief Returns the error string for an errored rd_kafka_message_t or NULL if 739 | * there was no error. 740 | */ 741 | const(char) * rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage) { 742 | if (!rkmessage.err) 743 | return null; 744 | 745 | if (rkmessage.payload) 746 | return cast(const(char) *)(rkmessage.payload); 747 | 748 | return rd_kafka_err2str(rkmessage.err); 749 | } 750 | 751 | 752 | /** 753 | * @brief Returns the message timestamp for a consumed message. 754 | * 755 | * The timestamp is the number of milliseconds since the epoch (UTC). 756 | * 757 | * \p tstype (if not NULL) is updated to indicate the type of timestamp. 758 | * 759 | * @returns message timestamp, or -1 if not available. 760 | * 761 | * @remark Message timestamps require broker version 0.10.0 or later. 762 | */ 763 | int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, 764 | rd_kafka_timestamp_type_t *tstype); 765 | 766 | 767 | /**@}*/ 768 | 769 | 770 | /** 771 | * @name Configuration interface 772 | * @{ 773 | * 774 | * @brief Main/global configuration property interface 775 | * 776 | */ 777 | 778 | /** 779 | * @enum rd_kafka_conf_res_t 780 | * @brief Configuration result type 781 | */ 782 | enum rd_kafka_conf_res_t { 783 | RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */ 784 | RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value. */ 785 | RD_KAFKA_CONF_OK = 0 /**< Configuration okay */ 786 | } ; 787 | 788 | 789 | /** 790 | * @brief Create configuration object. 791 | * 792 | * When providing your own configuration to the \c rd_kafka_*_new_*() calls 793 | * the rd_kafka_conf_t objects needs to be created with this function 794 | * which will set up the defaults. 795 | * I.e.: 796 | * @code 797 | * rd_kafka_conf_t *myconf; 798 | * rd_kafka_conf_res_t res; 799 | * 800 | * myconf = rd_kafka_conf_new(); 801 | * res = rd_kafka_conf_set(myconf, "socket.timeout.ms", "600", 802 | * errstr, sizeof(errstr)); 803 | * if (res != RD_KAFKA_CONF_OK) 804 | * die("%s\n", errstr); 805 | * 806 | * rk = rd_kafka_new(..., myconf); 807 | * @endcode 808 | * 809 | * Please see CONFIGURATION.md for the default settings or use 810 | * rd_kafka_conf_properties_show() to provide the information at runtime. 811 | * 812 | * The properties are identical to the Apache Kafka configuration properties 813 | * whenever possible. 814 | * 815 | * @returns A new rd_kafka_conf_t object with defaults set. 816 | * 817 | * @sa rd_kafka_conf_set(), rd_kafka_conf_destroy() 818 | */ 819 | 820 | rd_kafka_conf_t *rd_kafka_conf_new(); 821 | 822 | 823 | /** 824 | * @brief Destroys a conf object. 825 | */ 826 | 827 | void rd_kafka_conf_destroy(rd_kafka_conf_t *conf); 828 | 829 | 830 | /** 831 | * @brief Creates a copy/duplicate of configuration object \p conf 832 | */ 833 | 834 | rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf); 835 | 836 | 837 | /** 838 | * @brief Sets a configuration property. 839 | * 840 | * \p conf must have been previously created with rd_kafka_conf_new(). 841 | * 842 | * Returns \c rd_kafka_conf_res_t to indicate success or failure. 843 | * In case of failure \p errstr is updated to contain a human readable 844 | * error string. 845 | */ 846 | 847 | rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, 848 | const(char) *name, 849 | const(char) *value, 850 | char *errstr, size_t errstr_size); 851 | 852 | /** 853 | * @brief Enable event sourcing. 854 | * \p events is a bitmask of \c RD_KAFKA_EVENT_* of events to enable 855 | * for consumption by `rd_kafka_queue_poll()`. 856 | */ 857 | void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events); 858 | 859 | /** 860 | @deprecated See rd_kafka_conf_set_dr_msg_cb() 861 | $(D deprecated) 862 | */ 863 | 864 | deprecated("See rd_kafka_conf_set_dr_msg_cb"){ 865 | alias dr_cb_callback = extern(D) void function(rd_kafka_t *rk,void *payload, size_t len, rd_kafka_resp_err_t err,void *opaque, void *msg_opaque) nothrow @nogc; 866 | void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, dr_cb_callback dr_cb) ; 867 | } 868 | 869 | 870 | /** 871 | * @brief \b Producer: Set delivery report callback in provided \p conf object. 872 | * 873 | * The delivery report callback will be called once for each message 874 | * accepted by rd_kafka_produce() (et.al) with \p err set to indicate 875 | * the result of the produce request. 876 | * 877 | * The callback is called when a message is succesfully produced or 878 | * if librdkafka encountered a permanent failure, or the retry counter for 879 | * temporary errors has been exhausted. 880 | * 881 | * An application must call rd_kafka_poll() at regular intervals to 882 | * serve queued delivery report callbacks. 883 | */ 884 | alias dr_msg_cb_callback = extern(D) void function(rd_kafka_t *rk,const rd_kafka_message_t *rkmessage,void *opaque) nothrow @nogc; 885 | void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf,dr_msg_cb_callback dr_msg_cb); 886 | 887 | 888 | /** 889 | * @brief \b Consumer: Set consume callback for use with rd_kafka_consumer_poll() 890 | * 891 | */ 892 | void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, void function(rd_kafka_message_t *rkmessage,void *opaque) nothrow @nogc consume_cb); 893 | 894 | /** 895 | * @brief \b Consumer: Set rebalance callback for use with 896 | * coordinated consumer group balancing. 897 | * 898 | * The \p err field is set to either RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS 899 | * or RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS and 'partitions' 900 | * contains the full partition set that was either assigned or revoked. 901 | * 902 | * Registering a \p rebalance_cb turns off librdkafka's automatic 903 | * partition assignment/revocation and instead delegates that responsibility 904 | * to the application's \p rebalance_cb. 905 | * 906 | * The rebalance callback is responsible for updating librdkafka's 907 | * assignment set based on the two events: RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS 908 | * and RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS but should also be able to handle 909 | * arbitrary rebalancing failures where \p err is neither of those. 910 | * @remark In this latter case (arbitrary error), the application must 911 | * call rd_kafka_assign(rk, NULL) to synchronize state. 912 | * 913 | * Without a rebalance callback this is done automatically by librdkafka 914 | * but registering a rebalance callback gives the application flexibility 915 | * in performing other operations along with the assinging/revocation, 916 | * such as fetching offsets from an alternate location (on assign) 917 | * or manually committing offsets (on revoke). 918 | * 919 | * @remark The \p partitions list is destroyed by librdkafka on return 920 | * return from the rebalance_cb and must not be freed or 921 | * saved by the application. 922 | * 923 | * The following example shows the application's responsibilities: 924 | * @code 925 | * static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, 926 | * rd_kafka_topic_partition_list_t *partitions, 927 | * void *opaque) { 928 | * 929 | * switch (err) 930 | * { 931 | * case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: 932 | * // application may load offets from arbitrary external 933 | * // storage here and update \p partitions 934 | * 935 | * rd_kafka_assign(rk, partitions); 936 | * break; 937 | * 938 | * case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: 939 | * if (manual_commits) // Optional explicit manual commit 940 | * rd_kafka_commit(rk, partitions, 0); // sync commit 941 | * 942 | * rd_kafka_assign(rk, NULL); 943 | * break; 944 | * 945 | * default: 946 | * handle_unlikely_error(err); 947 | * rd_kafka_assign(rk, NULL); // sync state 948 | * break; 949 | * } 950 | * } 951 | * @endcode 952 | */ 953 | alias rebalance_cb_callback = extern(D) void function (rd_kafka_t *rk,rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions,void *opaque) nothrow @nogc; 954 | void rd_kafka_conf_set_rebalance_cb (rd_kafka_conf_t *conf, rebalance_cb_callback rebalance_cb); 955 | 956 | 957 | 958 | /** 959 | * @brief \b Consumer: Set offset commit callback for use with consumer groups. 960 | * 961 | * The results of automatic or manual offset commits will be scheduled 962 | * for this callback and is served by rd_kafka_consumer_poll(). 963 | * 964 | * If no partitions had valid offsets to commit this callback will be called 965 | * with \p err == RD_KAFKA_RESP_ERR__NO_OFFSET which is not to be considered 966 | * an error. 967 | * 968 | * The \p offsets list contains per-partition information: 969 | * - \c offset: committed offset (attempted) 970 | * - \c err: commit error 971 | */ 972 | alias offset_commit_cb_call_back = extern(D) void function(rd_kafka_t *rk,rd_kafka_resp_err_t err,rd_kafka_topic_partition_list_t *offsets,void *opaque); 973 | void rd_kafka_conf_set_offset_commit_cb (rd_kafka_conf_t *conf,offset_commit_cb_call_back offset_commit_cb); 974 | 975 | 976 | /** 977 | * @brief Set error callback in provided conf object. 978 | * 979 | * The error callback is used by librdkafka to signal critical errors 980 | * back to the application. 981 | * 982 | * If no \p error_cb is registered then the errors will be logged instead. 983 | */ 984 | alias error_cb_callback = extern(D) void function (rd_kafka_t *rk, int err,const(char) *reason,void *opaque) nothrow @nogc; 985 | void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf,error_cb_callback error_cb); 986 | 987 | /** 988 | * @brief Set throttle callback. 989 | * 990 | * The throttle callback is used to forward broker throttle times to the 991 | * application for Produce and Fetch (consume) requests. 992 | * 993 | * Callbacks are triggered whenever a non-zero throttle time is returned by 994 | * the broker, or when the throttle time drops back to zero. 995 | * 996 | * An application must call rd_kafka_poll() or rd_kafka_consumer_poll() at 997 | * regular intervals to serve queued callbacks. 998 | * 999 | * @remark Requires broker version 0.9.0 or later. 1000 | */ 1001 | alias throttle_cb_callback = extern(D) void function(rd_kafka_t *rk,const(char) *broker_name,int32_t broker_id,int throttle_time_ms,void *opaque) nothrow @nogc; 1002 | void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf,throttle_cb_callback throttle_cb); 1003 | 1004 | 1005 | /** 1006 | * @brief Set logger callback. 1007 | * 1008 | * The default is to print to stderr, but a syslog logger is also available, 1009 | * see rd_kafka_log_print and rd_kafka_log_syslog for the builtin alternatives. 1010 | * Alternatively the application may provide its own logger callback. 1011 | * Or pass \p func as NULL to disable logging. 1012 | * 1013 | * This is the configuration alternative to the deprecated rd_kafka_set_logger() 1014 | * 1015 | * @remark The log_cb will be called spontaneously from librdkafka's internal 1016 | * threads unless logs have been forwarded to a poll queue through 1017 | * \c rd_kafka_set_log_queue(). 1018 | * An application MUST NOT call any librdkafka APIs or do any prolonged 1019 | * work in a non-forwarded \c log_cb. 1020 | */ 1021 | 1022 | alias log_cb_callback = extern(D) void function (const rd_kafka_t *rk, int level,const(char) *fac, const(char) *buf) nothrow @nogc; 1023 | void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, log_cb_callback log_cb); 1024 | 1025 | 1026 | /** 1027 | * @brief Set statistics callback in provided conf object. 1028 | * 1029 | * The statistics callback is triggered from rd_kafka_poll() every 1030 | * \c statistics.interval.ms (needs to be configured separately). 1031 | * Function arguments: 1032 | * - \p rk - Kafka handle 1033 | * - \p json - String containing the statistics data in JSON format 1034 | * - \p json_len - Length of \p json string. 1035 | * - \p opaque - Application-provided opaque. 1036 | * 1037 | * If the application wishes to hold on to the \p json pointer and free 1038 | * it at a later time it must return 1 from the \p stats_cb. 1039 | * If the application returns 0 from the \p stats_cb then librdkafka 1040 | * will immediately free the \p json pointer. 1041 | */ 1042 | alias stats_cb_callback = extern(D) int function (rd_kafka_t *rk, 1043 | char *json, 1044 | size_t json_len, 1045 | void *opaque) nothrow @nogc; 1046 | void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf,stats_cb_callback stats_cb); 1047 | 1048 | 1049 | 1050 | /** 1051 | * @brief Set socket callback. 1052 | * 1053 | * The socket callback is responsible for opening a socket 1054 | * according to the supplied \p domain, \p type and \p protocol. 1055 | * The socket shall be created with \c CLOEXEC set in a racefree fashion, if 1056 | * possible. 1057 | * 1058 | * Default: 1059 | * - on linux: racefree CLOEXEC 1060 | * - others : non-racefree CLOEXEC 1061 | * 1062 | * @remark The callback will be called from an internal librdkafka thread. 1063 | */ 1064 | alias socket_cb_callback = extern(D) int function(int domain, int type,int protocol,void *opaque) nothrow @nogc; 1065 | void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, socket_cb_callback socket_cb); 1066 | 1067 | 1068 | /** 1069 | * @brief Set connect callback. 1070 | * 1071 | * The connect callback is responsible for connecting socket \p sockfd 1072 | * to peer address \p addr. 1073 | * The \p id field contains the broker identifier. 1074 | * 1075 | * \p connect_cb shall return 0 on success (socket connected) or an error 1076 | * number (errno) on error. 1077 | * 1078 | * @remark The callback will be called from an internal librdkafka thread. 1079 | */ 1080 | void 1081 | rd_kafka_conf_set_connect_cb (rd_kafka_conf_t *conf, 1082 | int function(int sockfd, 1083 | const sockaddr *addr, 1084 | int addrlen, 1085 | const char* id, 1086 | void* opaque) connect_cb); 1087 | 1088 | /** 1089 | * @brief Set close socket callback. 1090 | * 1091 | * Close a socket (optionally opened with socket_cb()). 1092 | * 1093 | * @remark The callback will be called from an internal librdkafka thread. 1094 | */ 1095 | void 1096 | rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t* conf, 1097 | int function(int sockfd, void* opaque) closesocket_cb); 1098 | 1099 | version(Windows) 1100 | { 1101 | } 1102 | else 1103 | { 1104 | /** 1105 | * @brief Set open callback. 1106 | * 1107 | * The open callback is responsible for opening the file specified by 1108 | * pathname, flags and mode. 1109 | * The file shall be opened with \c CLOEXEC set in a racefree fashion, if 1110 | * possible. 1111 | * 1112 | * Default: 1113 | * - on linux: racefree CLOEXEC 1114 | * - others : non-racefree CLOEXEC 1115 | * 1116 | * @remark The callback will be called from an internal librdkafka thread. 1117 | */ 1118 | alias open_cb_callback = extern(D) int function (const(char) *pathname,int flags, mode_t mode,void *opaque) nothrow @nogc; 1119 | void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf,open_cb_callback open_cb); 1120 | } 1121 | 1122 | /** 1123 | * @brief Sets the application's opaque pointer that will be passed to callbacks 1124 | */ 1125 | 1126 | void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque); 1127 | 1128 | /** 1129 | * @brief Retrieves the opaque pointer previously set with rd_kafka_conf_set_opaque() 1130 | */ 1131 | 1132 | void *rd_kafka_opaque(const rd_kafka_t *rk); 1133 | 1134 | 1135 | 1136 | /** 1137 | * Sets the default topic configuration to use for automatically 1138 | * subscribed topics (e.g., through pattern-matched topics). 1139 | * The topic config object is not usable after this call. 1140 | */ 1141 | 1142 | void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf); 1143 | 1144 | 1145 | 1146 | /** 1147 | * @brief Retrieve configuration value for property \p name. 1148 | * 1149 | * If \p dest is non-NULL the value will be written to \p dest with at 1150 | * most \p dest_size. 1151 | * 1152 | * \p *dest_size is updated to the full length of the value, thus if 1153 | * \p *dest_size initially is smaller than the full length the application 1154 | * may reallocate \p dest to fit the returned \p *dest_size and try again. 1155 | * 1156 | * If \p dest is NULL only the full length of the value is returned. 1157 | * 1158 | * Returns \p RD_KAFKA_CONF_OK if the property name matched, else 1159 | * \p RD_KAFKA_CONF_UNKNOWN. 1160 | */ 1161 | 1162 | rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, const(char) *name, char *dest, size_t *dest_size); 1163 | 1164 | 1165 | /** 1166 | * @brief Retrieve topic configuration value for property \p name. 1167 | * 1168 | * @sa rd_kafka_conf_get() 1169 | */ 1170 | 1171 | rd_kafka_conf_res_t rd_kafka_topic_conf_get (const rd_kafka_topic_conf_t *conf, const(char) *name, char *dest, size_t *dest_size); 1172 | 1173 | 1174 | /** 1175 | * @brief Dump the configuration properties and values of \p conf to an array 1176 | * with \"key\", \"value\" pairs. 1177 | * 1178 | * The number of entries in the array is returned in \p *cntp. 1179 | * 1180 | * The dump must be freed with `rd_kafka_conf_dump_free()`. 1181 | */ 1182 | 1183 | const(char) **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp); 1184 | 1185 | 1186 | /** 1187 | * @brief Dump the topic configuration properties and values of \p conf 1188 | * to an array with \"key\", \"value\" pairs. 1189 | * 1190 | * The number of entries in the array is returned in \p *cntp. 1191 | * 1192 | * The dump must be freed with `rd_kafka_conf_dump_free()`. 1193 | */ 1194 | 1195 | const(char) **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp); 1196 | 1197 | /** 1198 | * @brief Frees a configuration dump returned from `rd_kafka_conf_dump()` or 1199 | * `rd_kafka_topic_conf_dump(). 1200 | */ 1201 | 1202 | void rd_kafka_conf_dump_free(const(char) **arr, size_t cnt); 1203 | 1204 | /** 1205 | * @brief Prints a table to \p fp of all supported configuration properties, 1206 | * their default values as well as a description. 1207 | */ 1208 | 1209 | void rd_kafka_conf_properties_show(FILE *fp); 1210 | 1211 | /**@}*/ 1212 | 1213 | 1214 | /** 1215 | * @name Topic configuration 1216 | * @{ 1217 | * 1218 | * @brief Topic configuration property interface 1219 | * 1220 | */ 1221 | 1222 | 1223 | /** 1224 | * @brief Create topic configuration object 1225 | * 1226 | * @sa Same semantics as for rd_kafka_conf_new(). 1227 | */ 1228 | 1229 | rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(); 1230 | 1231 | 1232 | /** 1233 | * @brief Creates a copy/duplicate of topic configuration object \p conf. 1234 | */ 1235 | 1236 | rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t * conf); 1237 | 1238 | 1239 | /** 1240 | * @brief Destroys a topic conf object. 1241 | */ 1242 | 1243 | void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf); 1244 | 1245 | 1246 | /** 1247 | * @brief Sets a single rd_kafka_topic_conf_t value by property name. 1248 | * 1249 | * \p topic_conf should have been previously set up 1250 | * with `rd_kafka_topic_conf_new()`. 1251 | * 1252 | * @returns rd_kafka_conf_res_t to indicate success or failure. 1253 | */ 1254 | 1255 | rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const(char) *name,const(char) *value, char *errstr, size_t errstr_size); 1256 | 1257 | /** 1258 | * @brief Sets the application's opaque pointer that will be passed to all topic 1259 | * callbacks as the \c rkt_opaque argument. 1260 | */ 1261 | 1262 | void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *opaque); 1263 | 1264 | 1265 | /** 1266 | * @brief \b Producer: Set partitioner callback in provided topic conf object. 1267 | * 1268 | * The partitioner may be called in any thread at any time, 1269 | * it may be called multiple times for the same message/key. 1270 | * 1271 | * Partitioner function constraints: 1272 | * - MUST NOT call any rd_kafka_*() functions except: 1273 | * rd_kafka_topic_partition_available() 1274 | * - MUST NOT block or execute for prolonged periods of time. 1275 | * - MUST return a value between 0 and partition_cnt-1, or the 1276 | * special \c RD_KAFKA_PARTITION_UA value if partitioning 1277 | * could not be performed. 1278 | */ 1279 | alias partitioner_callback = extern(D) int32_t function(const rd_kafka_topic_t *rkt,const (void) *keydata,size_t keylen,int32_t partition_cnt,void *rkt_opaque,void *msg_opaque) nothrow @nogc; 1280 | void rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf, partitioner_callback partitioner ); 1281 | 1282 | /** 1283 | * @brief Check if partition is available (has a leader broker). 1284 | * 1285 | * @returns 1 if the partition is available, else 0. 1286 | * 1287 | * @warning This function must only be called from inside a partitioner function 1288 | */ 1289 | 1290 | int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition); 1291 | 1292 | 1293 | /******************************************************************* 1294 | * * 1295 | * Partitioners provided by rdkafka * 1296 | * * 1297 | *******************************************************************/ 1298 | 1299 | /** 1300 | * @brief Random partitioner. 1301 | * 1302 | * Will try not to return unavailable partitions. 1303 | * 1304 | * @returns a random partition between 0 and \p partition_cnt - 1. 1305 | * 1306 | */ 1307 | 1308 | int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const (void) *key, size_t keylen, int32_t partition_cnt,void *opaque, void *msg_opaque); 1309 | 1310 | /** 1311 | * @brief Consistent partitioner. 1312 | * 1313 | * Uses consistent hashing to map identical keys onto identical partitions. 1314 | * 1315 | * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on 1316 | * the CRC value of the key 1317 | */ 1318 | 1319 | int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt, const (void) *key, size_t keylen, int32_t partition_cnt, void *opaque, void *msg_opaque); 1320 | 1321 | /** 1322 | * @brief Consistent-Random partitioner. 1323 | * 1324 | * This is the default partitioner. 1325 | * Uses consistent hashing to map identical keys onto identical partitions, and 1326 | * messages without keys will be assigned via the random partitioner. 1327 | * 1328 | * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on 1329 | * the CRC value of the key (if provided) 1330 | */ 1331 | 1332 | int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt, const (void) *key, size_t keylen, int32_t partition_cnt, void *opaque, void *msg_opaque); 1333 | 1334 | 1335 | /**@}*/ 1336 | 1337 | 1338 | 1339 | /** 1340 | * @name Main Kafka and Topic object handles 1341 | * @{ 1342 | * 1343 | * 1344 | */ 1345 | 1346 | 1347 | 1348 | 1349 | /** 1350 | * @brief Creates a new Kafka handle and starts its operation according to the 1351 | * specified \p type (\p RD_KAFKA_CONSUMER or \p RD_KAFKA_PRODUCER). 1352 | * 1353 | * \p conf is an optional struct created with `rd_kafka_conf_new()` that will 1354 | * be used instead of the default configuration. 1355 | * The \p conf object is freed by this function on success and must not be used 1356 | * or destroyed by the application sub-sequently. 1357 | * See `rd_kafka_conf_set()` et.al for more information. 1358 | * 1359 | * \p errstr must be a pointer to memory of at least size \p errstr_size where 1360 | * `rd_kafka_new()` may write a human readable error message in case the 1361 | * creation of a new handle fails. In which case the function returns NULL. 1362 | * 1363 | * @remark \b RD_KAFKA_CONSUMER: When a new \p RD_KAFKA_CONSUMER 1364 | * rd_kafka_t handle is created it may either operate in the 1365 | * legacy simple consumer mode using the rd_kafka_consume_start() 1366 | * interface, or the High-level KafkaConsumer API. 1367 | * @remark An application must only use one of these groups of APIs on a given 1368 | * rd_kafka_t RD_KAFKA_CONSUMER handle. 1369 | 1370 | * 1371 | * @returns The Kafka handle on success or NULL on error (see \p errstr) 1372 | * 1373 | * @sa To destroy the Kafka handle, use rd_kafka_destroy(). 1374 | */ 1375 | 1376 | rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size); 1377 | 1378 | 1379 | /** 1380 | * @brief Destroy Kafka handle. 1381 | * 1382 | * @remark This is a blocking operation. 1383 | */ 1384 | 1385 | void rd_kafka_destroy(rd_kafka_t *rk); 1386 | 1387 | 1388 | 1389 | /** 1390 | * @brief Returns Kafka handle name. 1391 | */ 1392 | 1393 | const(char) *rd_kafka_name(const rd_kafka_t *rk); 1394 | 1395 | 1396 | /** 1397 | * @brief Returns this client's broker-assigned group member id 1398 | * 1399 | * @remark This currently requires the high-level KafkaConsumer 1400 | * 1401 | * @returns An allocated string containing the current broker-assigned group 1402 | * member id, or NULL if not available. 1403 | * The application must free the string with \p free() or 1404 | * rd_kafka_mem_free() 1405 | */ 1406 | 1407 | char *rd_kafka_memberid (const rd_kafka_t *rk); 1408 | 1409 | 1410 | /** 1411 | * @brief Creates a new topic handle for topic named \p topic. 1412 | * 1413 | * \p conf is an optional configuration for the topic created with 1414 | * `rd_kafka_topic_conf_new()` that will be used instead of the default 1415 | * topic configuration. 1416 | * The \p conf object is freed by this function and must not be used or 1417 | * destroyed by the application sub-sequently. 1418 | * See `rd_kafka_topic_conf_set()` et.al for more information. 1419 | * 1420 | * Topic handles are refcounted internally and calling rd_kafka_topic_new() 1421 | * again with the same topic name will return the previous topic handle 1422 | * without updating the original handle's configuration. 1423 | * Applications must eventually call rd_kafka_topic_destroy() for each 1424 | * succesfull call to rd_kafka_topic_new() to clear up resources. 1425 | * 1426 | * @returns the new topic handle or NULL on error (use rd_kafka_errno2err() 1427 | * to convert system \p errno to an rd_kafka_resp_err_t error code. 1428 | * 1429 | * @sa rd_kafka_topic_destroy() 1430 | */ 1431 | 1432 | rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const(char) *topic, rd_kafka_topic_conf_t *conf); 1433 | 1434 | 1435 | 1436 | /** 1437 | * @brief Loose application's topic handle refcount as previously created 1438 | * with `rd_kafka_topic_new()`. 1439 | * 1440 | * @remark Since topic objects are refcounted (both internally and for the app) 1441 | * the topic object might not actually be destroyed by this call, 1442 | * but the application must consider the object destroyed. 1443 | */ 1444 | 1445 | void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt); 1446 | 1447 | 1448 | /** 1449 | * @brief Returns the topic name. 1450 | */ 1451 | 1452 | const(char) *rd_kafka_topic_name(const rd_kafka_topic_t *rkt); 1453 | 1454 | 1455 | /** 1456 | * @brief Get the \p rkt_opaque pointer that was set in the topic configuration. 1457 | */ 1458 | 1459 | void *rd_kafka_topic_opaque (const rd_kafka_topic_t *rkt); 1460 | 1461 | 1462 | /** 1463 | * @brief Unassigned partition. 1464 | * 1465 | * The unassigned partition is used by the producer API for messages 1466 | * that should be partitioned using the configured or default partitioner. 1467 | */ 1468 | enum int RD_KAFKA_PARTITION_UA = -1; 1469 | 1470 | 1471 | /** 1472 | * @brief Polls the provided kafka handle for events. 1473 | * 1474 | * Events will cause application provided callbacks to be called. 1475 | * 1476 | * The \p timeout_ms argument specifies the maximum amount of time 1477 | * (in milliseconds) that the call will block waiting for events. 1478 | * For non-blocking calls, provide 0 as \p timeout_ms. 1479 | * To wait indefinately for an event, provide -1. 1480 | * 1481 | * @remark An application should make sure to call poll() at regular 1482 | * intervals to serve any queued callbacks waiting to be called. 1483 | * 1484 | * Events: 1485 | * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer] 1486 | * - error callbacks (rd_kafka_conf_set_error_cb()) [all] 1487 | * - stats callbacks (rd_kafka_conf_set_stats_cb()) [all] 1488 | * - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all] 1489 | * 1490 | * @returns the number of events served. 1491 | */ 1492 | 1493 | int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms); 1494 | 1495 | 1496 | /** 1497 | * @brief Cancels the current callback dispatcher (rd_kafka_poll(), 1498 | * rd_kafka_consume_callback(), etc). 1499 | * 1500 | * A callback may use this to force an immediate return to the calling 1501 | * code (caller of e.g. rd_kafka_poll()) without processing any further 1502 | * events. 1503 | * 1504 | * @remark This function MUST ONLY be called from within a librdkafka callback. 1505 | */ 1506 | 1507 | void rd_kafka_yield (rd_kafka_t *rk); 1508 | 1509 | 1510 | 1511 | 1512 | /** 1513 | * @brief Pause producing or consumption for the provided list of partitions. 1514 | * 1515 | * Success or error is returned per-partition \p err in the \p partitions list. 1516 | * 1517 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR 1518 | */ 1519 | rd_kafka_resp_err_t rd_kafka_pause_partitions (rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions); 1520 | 1521 | 1522 | 1523 | /** 1524 | * @brief Resume producing consumption for the provided list of partitions. 1525 | * 1526 | * Success or error is returned per-partition \p err in the \p partitions list. 1527 | * 1528 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR 1529 | */ 1530 | rd_kafka_resp_err_t rd_kafka_resume_partitions (rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions); 1531 | 1532 | 1533 | 1534 | 1535 | /** 1536 | * @brief Query broker for low (oldest/beginning) and high (newest/end) offsets 1537 | * for partition. 1538 | * 1539 | * Offsets are returned in \p *low and \p *high respectively. 1540 | * 1541 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. 1542 | */ 1543 | rd_kafka_resp_err_t 1544 | rd_kafka_query_watermark_offsets (rd_kafka_t *rk, 1545 | const(char) *topic, int32_t partition, 1546 | int64_t *low, int64_t *high, int timeout_ms); 1547 | 1548 | 1549 | /** 1550 | * @brief Get last known low (oldest/beginning) and high (newest/end) offsets 1551 | * for partition. 1552 | * 1553 | * The low offset is updated periodically (if statistics.interval.ms is set) 1554 | * while the high offset is updated on each fetched message set from the broker. 1555 | * 1556 | * If there is no cached offset (either low or high, or both) then 1557 | * RD_KAFKA_OFFSET_INVALID will be returned for the respective offset. 1558 | * 1559 | * Offsets are returned in \p *low and \p *high respectively. 1560 | * 1561 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. 1562 | * 1563 | * @remark Shall only be used with an active consumer instance. 1564 | */ 1565 | rd_kafka_resp_err_t 1566 | rd_kafka_get_watermark_offsets (rd_kafka_t *rk, 1567 | const(char) *topic, int32_t partition, 1568 | int64_t *low, int64_t *high); 1569 | 1570 | /** 1571 | * @brief Look up the offsets for the given partitions by timestamp. 1572 | * 1573 | * The returned offset for each partition is the earliest offset whose 1574 | * timestamp is greater than or equal to the given timestamp in the 1575 | * corresponding partition. 1576 | * 1577 | * The timestamps to query are represented as \c offset in \p offsets 1578 | * on input, and \c offset will contain the offset on output. 1579 | * 1580 | * The function will block for at most \p timeout_ms milliseconds. 1581 | * 1582 | * @remark Duplicate Topic+Partitions are not supported. 1583 | * @remark Per-partition errors may be returned in \c rd_kafka_topic_partition_t.err 1584 | * 1585 | * @returns an error code for general errors, else RD_KAFKA_RESP_ERR_NO_ERROR 1586 | * in which case per-partition errors might be set. 1587 | */ 1588 | rd_kafka_resp_err_t 1589 | rd_kafka_offsets_for_times (rd_kafka_t* rk, 1590 | rd_kafka_topic_partition_list_t* offsets, 1591 | int timeout_ms); 1592 | 1593 | /** 1594 | * @brief Free pointer returned by librdkafka 1595 | * 1596 | * This is typically an abstraction for the free(3) call and makes sure 1597 | * the application can use the same memory allocator as librdkafka for 1598 | * freeing pointers returned by librdkafka. 1599 | * 1600 | * In standard setups it is usually not necessary to use this interface 1601 | * rather than the free(3) functione. 1602 | * 1603 | * @remark rd_kafka_mem_free() must only be used for pointers returned by APIs 1604 | * that explicitly mention using this function for freeing. 1605 | */ 1606 | 1607 | void rd_kafka_mem_free (rd_kafka_t *rk, void *ptr); 1608 | 1609 | 1610 | /**@}*/ 1611 | 1612 | 1613 | 1614 | 1615 | 1616 | /** 1617 | * @name Queue API 1618 | * @{ 1619 | * 1620 | * Message queues allows the application to re-route consumed messages 1621 | * from multiple topic+partitions into one single queue point. 1622 | * This queue point containing messages from a number of topic+partitions 1623 | * may then be served by a single rd_kafka_consume*_queue() call, 1624 | * rather than one call per topic+partition combination. 1625 | */ 1626 | 1627 | 1628 | /** 1629 | * @brief Create a new message queue. 1630 | * 1631 | * See rd_kafka_consume_start_queue(), rd_kafka_consume_queue(), et.al. 1632 | */ 1633 | 1634 | rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk); 1635 | 1636 | /** 1637 | * Destroy a queue, purging all of its enqueued messages. 1638 | */ 1639 | 1640 | void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu); 1641 | 1642 | /** 1643 | * @returns a reference to the main librdkafka event queue. 1644 | * This is the queue served by rd_kafka_poll(). 1645 | * 1646 | * Use rd_kafka_queue_destroy() to loose the reference. 1647 | */ 1648 | rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk); 1649 | 1650 | 1651 | /** 1652 | * @returns a reference to the librdkafka consumer queue. 1653 | * This is the queue served by rd_kafka_consumer_poll(). 1654 | * 1655 | * Use rd_kafka_queue_destroy() to loose the reference. 1656 | * 1657 | * @remark rd_kafka_queue_destroy() MUST be called on this queue 1658 | * prior to calling rd_kafka_consumer_close(). 1659 | */ 1660 | rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk); 1661 | 1662 | /** 1663 | * @returns a reference to the partition's queue, or NULL if 1664 | * partition is invalid. 1665 | * 1666 | * Use rd_kafka_queue_destroy() to loose the reference. 1667 | * 1668 | * @remark rd_kafka_queue_destroy() MUST be called on this queue 1669 | * 1670 | * @remark This function only works on consumers. 1671 | */ 1672 | rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t* rk, 1673 | const char* topic, 1674 | int32_t partition); 1675 | /** 1676 | * @brief Forward/re-route queue \p src to \p dst. 1677 | * If \p dst is \c NULL the forwarding is removed. 1678 | * 1679 | * The internal refcounts for both queues are increased. 1680 | * 1681 | * @remark Regardless of whether \p dst is NULL or not, after calling this 1682 | * function, \p src will not forward it's fetch queue to the consumer 1683 | * queue. 1684 | */ 1685 | void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst); 1686 | 1687 | /** 1688 | * @brief Forward librdkafka logs (and debug) to the specified queue 1689 | * for serving with one of the ..poll() calls. 1690 | * 1691 | * This allows an application to serve log callbacks (\c log_cb) 1692 | * in its thread of choice. 1693 | * 1694 | * @param rkqu Queue to forward logs to. If the value is NULL the logs 1695 | * are forwarded to the main queue. 1696 | * 1697 | * @remark The configuration property \c log.queue MUST also be set to true. 1698 | * 1699 | * @remark librdkafka maintains its own reference to the provided queue. 1700 | * 1701 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. 1702 | */ 1703 | rd_kafka_resp_err_t rd_kafka_set_log_queue (rd_kafka_t* rk, 1704 | rd_kafka_queue_t* rkqu); 1705 | 1706 | /** 1707 | * @returns the current number of elements in queue. 1708 | */ 1709 | size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu); 1710 | 1711 | 1712 | /** 1713 | * @brief Enable IO event triggering for queue. 1714 | * 1715 | * To ease integration with IO based polling loops this API 1716 | * allows an application to create a separate file-descriptor 1717 | * that librdkafka will write \p payload (of size \p size) to 1718 | * whenever a new element is enqueued on a previously empty queue. 1719 | * 1720 | * To remove event triggering call with \p fd = -1. 1721 | * 1722 | * librdkafka will maintain a copy of the \p payload. 1723 | * 1724 | * @remark When using forwarded queues the IO event must only be enabled 1725 | * on the final forwarded-to (destination) queue. 1726 | */ 1727 | void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd, 1728 | const (void) *payload, size_t size); 1729 | 1730 | 1731 | /**@}*/ 1732 | 1733 | /** 1734 | * 1735 | * @name Simple Consumer API (legacy) 1736 | * @{ 1737 | * 1738 | */ 1739 | 1740 | 1741 | enum RD_KAFKA_OFFSET_BEGINNING = -2; /**< Start consuming from beginning of 1742 | * kafka partition queue: oldest msg */ 1743 | enum RD_KAFKA_OFFSET_END = -1; /**< Start consuming from end of kafka 1744 | * partition queue: next msg */ 1745 | enum RD_KAFKA_OFFSET_STORED = -1000; /**< Start consuming from offset retrieved 1746 | * from offset store */ 1747 | enum RD_KAFKA_OFFSET_INVALID = -1001; /**< Invalid offset */ 1748 | 1749 | 1750 | /** @cond NO_DOC */ 1751 | enum RD_KAFKA_OFFSET_TAIL_BASE = -2000; /* internal: do not use */ 1752 | /** @endcond */ 1753 | 1754 | /** 1755 | * @brief Start consuming \p CNT messages from topic's current end offset. 1756 | * 1757 | * That is, if current end offset is 12345 and \p CNT is 200, it will start 1758 | * consuming from offset \c 12345-200 = \c 12145. */ 1759 | auto RD_KAFKA_OFFSET_TAIL(T)(T CNT) { return (RD_KAFKA_OFFSET_TAIL_BASE - (CNT));} 1760 | 1761 | /** 1762 | * @brief Start consuming messages for topic \p rkt and \p partition 1763 | * at offset \p offset which may either be an absolute \c (0..N) 1764 | * or one of the logical offsets: 1765 | * - RD_KAFKA_OFFSET_BEGINNING 1766 | * - RD_KAFKA_OFFSET_END 1767 | * - RD_KAFKA_OFFSET_STORED 1768 | * - RD_KAFKA_OFFSET_TAIL 1769 | * 1770 | * rdkafka will attempt to keep \c queued.min.messages (config property) 1771 | * messages in the local queue by repeatedly fetching batches of messages 1772 | * from the broker until the threshold is reached. 1773 | * 1774 | * The application shall use one of the `rd_kafka_consume*()` functions 1775 | * to consume messages from the local queue, each kafka message being 1776 | * represented as a `rd_kafka_message_t *` object. 1777 | * 1778 | * `rd_kafka_consume_start()` must not be called multiple times for the same 1779 | * topic and partition without stopping consumption first with 1780 | * `rd_kafka_consume_stop()`. 1781 | * 1782 | * @returns 0 on success or -1 on error in which case errno is set accordingly: 1783 | * - EBUSY - Conflicts with an existing or previous subscription 1784 | * (RD_KAFKA_RESP_ERR__CONFLICT) 1785 | * - EINVAL - Invalid offset, or incomplete configuration (lacking group.id) 1786 | * (RD_KAFKA_RESP_ERR__INVALID_ARG) 1787 | * - ESRCH - requested \p partition is invalid. 1788 | * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) 1789 | * - ENOENT - topic is unknown in the Kafka cluster. 1790 | * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) 1791 | * 1792 | * Use `rd_kafka_errno2err()` to convert sytem \c errno to `rd_kafka_resp_err_t` 1793 | */ 1794 | 1795 | int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, 1796 | int64_t offset); 1797 | 1798 | /** 1799 | * @brief Same as rd_kafka_consume_start() but re-routes incoming messages to 1800 | * the provided queue \p rkqu (which must have been previously allocated 1801 | * with `rd_kafka_queue_new()`. 1802 | * 1803 | * The application must use one of the `rd_kafka_consume_*_queue()` functions 1804 | * to receive fetched messages. 1805 | * 1806 | * `rd_kafka_consume_start_queue()` must not be called multiple times for the 1807 | * same topic and partition without stopping consumption first with 1808 | * `rd_kafka_consume_stop()`. 1809 | * `rd_kafka_consume_start()` and `rd_kafka_consume_start_queue()` must not 1810 | * be combined for the same topic and partition. 1811 | */ 1812 | 1813 | int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, 1814 | int64_t offset, rd_kafka_queue_t *rkqu); 1815 | 1816 | /** 1817 | * @brief Stop consuming messages for topic \p rkt and \p partition, purging 1818 | * all messages currently in the local queue. 1819 | * 1820 | * NOTE: To enforce synchronisation this call will block until the internal 1821 | * fetcher has terminated and offsets are committed to configured 1822 | * storage method. 1823 | * 1824 | * The application needs to be stop all consumers before calling 1825 | * `rd_kafka_destroy()` on the main object handle. 1826 | * 1827 | * @returns 0 on success or -1 on error (see `errno`). 1828 | */ 1829 | 1830 | int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition); 1831 | 1832 | 1833 | 1834 | /** 1835 | * @brief Seek consumer for topic+partition to \p offset which is either an 1836 | * absolute or logical offset. 1837 | * 1838 | * If \p timeout_ms is not 0 the call will wait this long for the 1839 | * seek to be performed. If the timeout is reached the internal state 1840 | * will be unknown and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. 1841 | * If \p timeout_ms is 0 it will initiate the seek but return 1842 | * immediately without any error reporting (e.g., async). 1843 | * 1844 | * This call triggers a fetch queue barrier flush. 1845 | * 1846 | * @returns `RD_KAFKA_RESP_ERR__NO_ERROR` on success else an error code. 1847 | */ 1848 | 1849 | rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *rkt, 1850 | int32_t partition, 1851 | int64_t offset, 1852 | int timeout_ms); 1853 | 1854 | 1855 | /** 1856 | * @brief Consume a single message from topic \p rkt and \p partition 1857 | * 1858 | * \p timeout_ms is maximum amount of time to wait for a message to be received. 1859 | * Consumer must have been previously started with `rd_kafka_consume_start()`. 1860 | * 1861 | * Returns a message object on success or \c NULL on error. 1862 | * The message object must be destroyed with `rd_kafka_message_destroy()` 1863 | * when the application is done with it. 1864 | * 1865 | * Errors (when returning NULL): 1866 | * - ETIMEDOUT - \p timeout_ms was reached with no new messages fetched. 1867 | * - ENOENT - \p rkt + \p partition is unknown. 1868 | * (no prior `rd_kafka_consume_start()` call) 1869 | * 1870 | * NOTE: The returned message's \c ..->err must be checked for errors. 1871 | * NOTE: \c ..->err \c == \c RD_KAFKA_RESP_ERR__PARTITION_EOF signals that the 1872 | * end of the partition has been reached, which should typically not be 1873 | * considered an error. The application should handle this case 1874 | * (e.g., ignore). 1875 | */ 1876 | 1877 | rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, 1878 | int timeout_ms); 1879 | 1880 | 1881 | 1882 | /** 1883 | * @brief Consume up to \p rkmessages_size from topic \p rkt and \p partition 1884 | * putting a pointer to each message in the application provided 1885 | * array \p rkmessages (of size \p rkmessages_size entries). 1886 | * 1887 | * `rd_kafka_consume_batch()` provides higher throughput performance 1888 | * than `rd_kafka_consume()`. 1889 | * 1890 | * \p timeout_ms is the maximum amount of time to wait for all of 1891 | * \p rkmessages_size messages to be put into \p rkmessages. 1892 | * If no messages were available within the timeout period this function 1893 | * returns 0 and \p rkmessages remains untouched. 1894 | * This differs somewhat from `rd_kafka_consume()`. 1895 | * 1896 | * The message objects must be destroyed with `rd_kafka_message_destroy()` 1897 | * when the application is done with it. 1898 | * 1899 | * @returns the number of rkmessages added in \p rkmessages, 1900 | * or -1 on error (same error codes as for `rd_kafka_consume()`. 1901 | * 1902 | * @sa rd_kafka_consume() 1903 | */ 1904 | 1905 | ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, 1906 | int timeout_ms, 1907 | rd_kafka_message_t **rkmessages, 1908 | size_t rkmessages_size); 1909 | 1910 | 1911 | 1912 | /** 1913 | * @brief Consumes messages from topic \p rkt and \p partition, calling 1914 | * the provided callback for each consumed messsage. 1915 | * 1916 | * `rd_kafka_consume_callback()` provides higher throughput performance 1917 | * than both `rd_kafka_consume()` and `rd_kafka_consume_batch()`. 1918 | * 1919 | * \p timeout_ms is the maximum amount of time to wait for one or more messages 1920 | * to arrive. 1921 | * 1922 | * The provided \p consume_cb function is called for each message, 1923 | * the application \b MUST \b NOT call `rd_kafka_message_destroy()` on the 1924 | * provided \p rkmessage. 1925 | * 1926 | * The \p opaque argument is passed to the 'consume_cb' as \p opaque. 1927 | * 1928 | * @returns the number of messages processed or -1 on error. 1929 | * 1930 | * @sa rd_kafka_consume() 1931 | */ 1932 | alias consume_callback_callback = extern(D) void function(rd_kafka_message_t*rkmessage,void *opaque) nothrow @nogc; 1933 | /// ditto 1934 | int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition,int timeout_ms, consume_callback_callback consume_cb ,void *opaque); 1935 | 1936 | 1937 | /** 1938 | * @name Simple Consumer API (legacy): Queue consumers 1939 | * @{ 1940 | * 1941 | * The following `..._queue()` functions are analogue to the functions above 1942 | * but reads messages from the provided queue \p rkqu instead. 1943 | * \p rkqu must have been previously created with `rd_kafka_queue_new()` 1944 | * and the topic consumer must have been started with 1945 | * `rd_kafka_consume_start_queue()` utilising the the same queue. 1946 | */ 1947 | 1948 | /** 1949 | * @brief Consume from queue 1950 | * 1951 | * @sa rd_kafka_consume() 1952 | */ 1953 | 1954 | rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, 1955 | int timeout_ms); 1956 | 1957 | /** 1958 | * @brief Consume batch of messages from queue 1959 | * 1960 | * @sa rd_kafka_consume_batch() 1961 | */ 1962 | 1963 | ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, 1964 | int timeout_ms, 1965 | rd_kafka_message_t **rkmessages, 1966 | size_t rkmessages_size); 1967 | 1968 | /** 1969 | * @brief Consume multiple messages from queue with callback 1970 | * 1971 | * @sa rd_kafka_consume_callback() 1972 | */ 1973 | alias consume_callback_queue_callback = extern(D) void function(rd_kafka_message_t *rkmessage,void *opaque) nothrow @nogc; 1974 | /// ditto 1975 | int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu,int timeout_ms, 1976 | consume_callback_queue_callback consume_cb, 1977 | void *opaque); 1978 | 1979 | 1980 | /**@}*/ 1981 | 1982 | 1983 | 1984 | 1985 | /** 1986 | * @name Simple Consumer API (legacy): Topic+partition offset store. 1987 | * @{ 1988 | * 1989 | * If \c auto.commit.enable is true the offset is stored automatically prior to 1990 | * returning of the message(s) in each of the rd_kafka_consume*() functions 1991 | * above. 1992 | */ 1993 | 1994 | 1995 | /** 1996 | * @brief Store offset \p offset for topic \p rkt partition \p partition. 1997 | * 1998 | * The offset will be committed (written) to the offset store according 1999 | * to \c `auto.commit.interval.ms`. 2000 | * 2001 | * @remark \c `auto.commit.enable` must be set to "false" when using this API. 2002 | * 2003 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. 2004 | */ 2005 | 2006 | rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, 2007 | int32_t partition, int64_t offset); 2008 | /**@}*/ 2009 | 2010 | 2011 | 2012 | 2013 | /** 2014 | * @name KafkaConsumer (C) 2015 | * @{ 2016 | * @brief High-level KafkaConsumer C API 2017 | * 2018 | * 2019 | * 2020 | */ 2021 | 2022 | /** 2023 | * @brief Subscribe to topic set using balanced consumer groups. 2024 | * 2025 | * Wildcard (regex) topics are supported by the librdkafka assignor: 2026 | * any topic name in the \p topics list that is prefixed with \c \"^\" will 2027 | * be regex-matched to the full list of topics in the cluster and matching 2028 | * topics will be added to the subscription list. 2029 | * 2030 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or 2031 | * RD_KAFKA_RESP_ERR__INVALID_ARG if list is empty, contains invalid 2032 | * topics or regexes. 2033 | */ 2034 | rd_kafka_resp_err_t 2035 | rd_kafka_subscribe (rd_kafka_t *rk, 2036 | const rd_kafka_topic_partition_list_t *topics); 2037 | 2038 | 2039 | /** 2040 | * @brief Unsubscribe from the current subscription set. 2041 | */ 2042 | 2043 | rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk); 2044 | 2045 | 2046 | /** 2047 | * @brief Returns the current topic subscription 2048 | * 2049 | * @returns An error code on failure, otherwise \p topic is updated 2050 | * to point to a newly allocated topic list (possibly empty). 2051 | * 2052 | * @remark The application is responsible for calling 2053 | * rd_kafka_topic_partition_list_destroy on the returned list. 2054 | */ 2055 | rd_kafka_resp_err_t 2056 | rd_kafka_subscription (rd_kafka_t *rk, 2057 | rd_kafka_topic_partition_list_t **topics); 2058 | 2059 | 2060 | 2061 | /** 2062 | * @brief Poll the consumer for messages or events. 2063 | * 2064 | * Will block for at most \p timeout_ms milliseconds. 2065 | * 2066 | * @remark An application should make sure to call consumer_poll() at regular 2067 | * intervals, even if no messages are expected, to serve any 2068 | * queued callbacks waiting to be called. This is especially 2069 | * important when a rebalance_cb has been registered as it needs 2070 | * to be called and handled properly to synchronize internal 2071 | * consumer state. 2072 | * 2073 | * @returns A message object which is a proper message if \p ->err is 2074 | * RD_KAFKA_RESP_ERR_NO_ERROR, or an event or error for any other 2075 | * value. 2076 | * 2077 | * @sa rd_kafka_message_t 2078 | */ 2079 | 2080 | rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, int timeout_ms); 2081 | 2082 | /** 2083 | * @brief Close down the KafkaConsumer. 2084 | * 2085 | * @remark This call will block until the consumer has revoked its assignment, 2086 | * calling the \c rebalance_cb if it is configured, committed offsets 2087 | * to broker, and left the consumer group. 2088 | * The maximum blocking time is roughly limited to session.timeout.ms. 2089 | * 2090 | * @returns An error code indicating if the consumer close was succesful 2091 | * or not. 2092 | * 2093 | * @remark The application still needs to call rd_kafka_destroy() after 2094 | * this call finishes to clean up the underlying handle resources. 2095 | * 2096 | * 2097 | */ 2098 | 2099 | rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk); 2100 | 2101 | 2102 | 2103 | /** 2104 | * @brief Atomic assignment of partitions to consume. 2105 | * 2106 | * The new \p partitions will replace the existing assignment. 2107 | * 2108 | * When used from a rebalance callback the application shall pass the 2109 | * partition list passed to the callback (or a copy of it) (even if the list 2110 | * is empty) rather than NULL to maintain internal join state. 2111 | 2112 | * A zero-length \p partitions will treat the partitions as a valid, 2113 | * albeit empty, assignment, and maintain internal state, while a \c NULL 2114 | * value for \p partitions will reset and clear the internal state. 2115 | */ 2116 | rd_kafka_resp_err_t 2117 | rd_kafka_assign (rd_kafka_t *rk, 2118 | const rd_kafka_topic_partition_list_t *partitions); 2119 | 2120 | /** 2121 | * @brief Returns the current partition assignment 2122 | * 2123 | * @returns An error code on failure, otherwise \p partitions is updated 2124 | * to point to a newly allocated partition list (possibly empty). 2125 | * 2126 | * @remark The application is responsible for calling 2127 | * rd_kafka_topic_partition_list_destroy on the returned list. 2128 | */ 2129 | rd_kafka_resp_err_t 2130 | rd_kafka_assignment (rd_kafka_t *rk, 2131 | rd_kafka_topic_partition_list_t **partitions); 2132 | 2133 | 2134 | 2135 | 2136 | /** 2137 | * @brief Commit offsets on broker for the provided list of partitions. 2138 | * 2139 | * \p offsets should contain \c topic, \c partition, \c offset and possibly 2140 | * \c metadata. 2141 | * If \p offsets is NULL the current partition assignment will be used instead. 2142 | * 2143 | * If \p async is false this operation will block until the broker offset commit 2144 | * is done, returning the resulting success or error code. 2145 | * 2146 | * If a rd_kafka_conf_set_offset_commit_cb() offset commit callback has been 2147 | * configured the callback will be enqueued for a future call to 2148 | * rd_kafka_poll(), rd_kafka_consumer_poll() or similar. 2149 | */ 2150 | rd_kafka_resp_err_t 2151 | rd_kafka_commit (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, 2152 | int async); 2153 | 2154 | 2155 | /** 2156 | * @brief Commit message's offset on broker for the message's partition. 2157 | * 2158 | * @sa rd_kafka_commit 2159 | */ 2160 | rd_kafka_resp_err_t 2161 | rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, 2162 | int async); 2163 | 2164 | 2165 | /** 2166 | * @brief Commit offsets on broker for the provided list of partitions. 2167 | * 2168 | * See rd_kafka_commit for \p offsets semantics. 2169 | * 2170 | * The result of the offset commit will be posted on the provided \p rkqu queue. 2171 | * 2172 | * If the application uses one of the poll APIs (rd_kafka_poll(), 2173 | * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), ..) to serve the queue 2174 | * the \p cb callback is required. \p opaque is passed to the callback. 2175 | * 2176 | * If using the event API the callback is ignored and the offset commit result 2177 | * will be returned as an RD_KAFKA_EVENT_COMMIT event. The \p opaque 2178 | * value will be available with rd_kafka_event_opaque() 2179 | * 2180 | * If \p rkqu is NULL a temporary queue will be created and the callback will 2181 | * be served by this call. 2182 | * 2183 | * @sa rd_kafka_commit() 2184 | * @sa rd_kafka_conf_set_offset_commit_cb() 2185 | */ 2186 | rd_kafka_resp_err_t 2187 | rd_kafka_commit_queue (rd_kafka_t *rk, 2188 | const rd_kafka_topic_partition_list_t *offsets, 2189 | rd_kafka_queue_t *rkqu, 2190 | void function(rd_kafka_t *rk, 2191 | rd_kafka_resp_err_t err, 2192 | rd_kafka_topic_partition_list_t *offsets, 2193 | void *opaque) nothrow @nogc cb, 2194 | void *opaque); 2195 | 2196 | 2197 | /** 2198 | * @brief Retrieve committed offsets for topics+partitions. 2199 | * 2200 | * The \p offset field of each requested partition will either be set to 2201 | * stored offset or to RD_KAFKA_OFFSET_INVALID in case there was no stored 2202 | * offset for that partition. 2203 | * 2204 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the 2205 | * \p offset or \p err field of each \p partitions' element is filled 2206 | * in with the stored offset, or a partition specific error. 2207 | * Else returns an error code. 2208 | */ 2209 | rd_kafka_resp_err_t 2210 | rd_kafka_committed (rd_kafka_t *rk, 2211 | rd_kafka_topic_partition_list_t *partitions, 2212 | int timeout_ms); 2213 | 2214 | 2215 | 2216 | /** 2217 | * @brief Retrieve current positions (offsets) for topics+partitions. 2218 | * 2219 | * The \p offset field of each requested partition will be set to the offset 2220 | * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there was 2221 | * no previous message. 2222 | * 2223 | * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the 2224 | * \p offset or \p err field of each \p partitions' element is filled 2225 | * in with the stored offset, or a partition specific error. 2226 | * Else returns an error code. 2227 | */ 2228 | rd_kafka_resp_err_t 2229 | rd_kafka_position (rd_kafka_t *rk, 2230 | rd_kafka_topic_partition_list_t *partitions); 2231 | 2232 | /**@}*/ 2233 | 2234 | 2235 | 2236 | /** 2237 | * @name Producer API 2238 | * @{ 2239 | * 2240 | * 2241 | */ 2242 | 2243 | 2244 | /** 2245 | * @brief Producer message flags 2246 | */ 2247 | enum RD_KAFKA_MSG_F_FREE = 0x1; /**< Delegate freeing of payload to rdkafka. */ 2248 | enum RD_KAFKA_MSG_F_COPY = 0x2; /**< rdkafka will make a copy of the payload. */ 2249 | 2250 | 2251 | 2252 | /** 2253 | * @brief Produce and send a single message to broker. 2254 | * 2255 | * \p rkt is the target topic which must have been previously created with 2256 | * `rd_kafka_topic_new()`. 2257 | * 2258 | * `rd_kafka_produce()` is an asynch non-blocking API. 2259 | * 2260 | * \p partition is the target partition, either: 2261 | * - RD_KAFKA_PARTITION_UA (unassigned) for 2262 | * automatic partitioning using the topic's partitioner function, or 2263 | * - a fixed partition (0..N) 2264 | * 2265 | * \p msgflags is zero or more of the following flags OR:ed together: 2266 | * RD_KAFKA_MSG_F_BLOCK - block \p produce*() call if 2267 | * \p queue.buffering.max.messages or 2268 | * \p queue.buffering.max.kbytes are exceeded. 2269 | * Messages are considered in-queue from the point they 2270 | * are accepted by produce() until their corresponding 2271 | * delivery report callback/event returns. 2272 | * It is thus a requirement to call 2273 | * rd_kafka_poll() (or equiv.) from a separate 2274 | * thread when F_BLOCK is used. 2275 | * See WARNING on \c RD_KAFKA_MSG_F_BLOCK above. 2276 | * 2277 | * RD_KAFKA_MSG_F_FREE - rdkafka will free(3) \p payload when it is done 2278 | * with it. 2279 | * RD_KAFKA_MSG_F_COPY - the \p payload data will be copied and the 2280 | * \p payload pointer will not be used by rdkafka 2281 | * after the call returns. 2282 | * 2283 | * .._F_FREE and .._F_COPY are mutually exclusive. 2284 | * 2285 | * If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then 2286 | * the memory associated with the payload is still the caller's 2287 | * responsibility. 2288 | * 2289 | * \p payload is the message payload of size \p len bytes. 2290 | * 2291 | * \p key is an optional message key of size \p keylen bytes, if non-NULL it 2292 | * will be passed to the topic partitioner as well as be sent with the 2293 | * message to the broker and passed on to the consumer. 2294 | * 2295 | * \p msg_opaque is an optional application-provided per-message opaque 2296 | * pointer that will provided in the delivery report callback (`dr_cb`) for 2297 | * referencing this message. 2298 | * 2299 | * Returns 0 on success or -1 on error in which case errno is set accordingly: 2300 | * - ENOBUFS - maximum number of outstanding messages has been reached: 2301 | * "queue.buffering.max.messages" 2302 | * (RD_KAFKA_RESP_ERR__QUEUE_FULL) 2303 | * - EMSGSIZE - message is larger than configured max size: 2304 | * "messages.max.bytes". 2305 | * (RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE) 2306 | * - ESRCH - requested \p partition is unknown in the Kafka cluster. 2307 | * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) 2308 | * - ENOENT - topic is unknown in the Kafka cluster. 2309 | * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) 2310 | * 2311 | * @sa Use rd_kafka_errno2err() to convert `errno` to rdkafka error code. 2312 | */ 2313 | 2314 | int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, 2315 | int msgflags, 2316 | void *payload, size_t len, 2317 | const (void) *key, size_t keylen, 2318 | void *msg_opaque); 2319 | 2320 | /** 2321 | * @brief Produce and send a single message to broker. 2322 | * 2323 | * The message is defined by a va-arg list using \c rd_kafka_vtype_t 2324 | * tag tuples which must be terminated with a single \c RD_KAFKA_V_END. 2325 | * 2326 | * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, else an error code. 2327 | * 2328 | * @sa rd_kafka_produce, RD_KAFKA_V_END 2329 | */ 2330 | rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...); 2331 | 2332 | /** 2333 | * @brief Produce multiple messages. 2334 | * 2335 | * If partition is RD_KAFKA_PARTITION_UA the configured partitioner will 2336 | * be run for each message (slower), otherwise the messages will be enqueued 2337 | * to the specified partition directly (faster). 2338 | * 2339 | * The messages are provided in the array \p rkmessages of count \p message_cnt 2340 | * elements. 2341 | * The \p partition and \p msgflags are used for all provided messages. 2342 | * 2343 | * Honoured \p rkmessages[] fields are: 2344 | * - payload,len Message payload and length 2345 | * - key,key_len Optional message key 2346 | * - _private Message opaque pointer (msg_opaque) 2347 | * - err Will be set according to success or failure. 2348 | * Application only needs to check for errors if 2349 | * return value != \p message_cnt. 2350 | * 2351 | * @returns the number of messages succesfully enqueued for producing. 2352 | */ 2353 | 2354 | int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, 2355 | int msgflags, 2356 | rd_kafka_message_t *rkmessages, int message_cnt); 2357 | 2358 | 2359 | /** 2360 | * @brief Wait until all outstanding produce requests, et.al, are completed. 2361 | * This should typically be done prior to destroying a producer instance 2362 | * to make sure all queued and in-flight produce requests are completed 2363 | * before terminating. 2364 | * 2365 | * @remark This function will call rd_kafka_poll() and thus trigger callbacks. 2366 | * 2367 | * @returns RD_KAFKA_RESP_ERR__TIMED_OUT if \p timeout_ms was reached before all 2368 | * outstanding requests were completed, else RD_KAFKA_RESP_ERR_NO_ERROR 2369 | */ 2370 | rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms); 2371 | 2372 | 2373 | /**@}*/ 2374 | 2375 | 2376 | /** 2377 | * @name Metadata API 2378 | * @{ 2379 | * 2380 | * 2381 | */ 2382 | 2383 | 2384 | /** 2385 | * @brief Broker information 2386 | */ 2387 | struct rd_kafka_metadata_broker { 2388 | int32_t id; /**< Broker Id */ 2389 | char *host; /**< Broker hostname */ 2390 | int port; /**< Broker listening port */ 2391 | } ; 2392 | alias rd_kafka_metadata_broker_t = rd_kafka_metadata_broker; 2393 | 2394 | /** 2395 | * @brief Partition information 2396 | */ 2397 | struct rd_kafka_metadata_partition { 2398 | int32_t id; /**< Partition Id */ 2399 | rd_kafka_resp_err_t err; /**< Partition error reported by broker */ 2400 | int32_t leader; /**< Leader broker */ 2401 | int replica_cnt; /**< Number of brokers in \p replicas */ 2402 | int32_t *replicas; /**< Replica brokers */ 2403 | int isr_cnt; /**< Number of ISR brokers in \p isrs */ 2404 | int32_t *isrs; /**< In-Sync-Replica brokers */ 2405 | } ; 2406 | alias rd_kafka_metadata_partition_t = rd_kafka_metadata_partition; 2407 | 2408 | /** 2409 | * @brief Topic information 2410 | */ 2411 | struct rd_kafka_metadata_topic { 2412 | char *topic; /**< Topic name */ 2413 | int partition_cnt; /**< Number of partitions in \p partitions*/ 2414 | rd_kafka_metadata_partition *partitions; /**< Partitions */ 2415 | rd_kafka_resp_err_t err; /**< Topic error reported by broker */ 2416 | } ; 2417 | alias rd_kafka_metadata_topic_t = rd_kafka_metadata_topic; 2418 | 2419 | 2420 | /** 2421 | * @brief Metadata container 2422 | */ 2423 | struct rd_kafka_metadata_t { 2424 | int broker_cnt; /**< Number of brokers in \p brokers */ 2425 | rd_kafka_metadata_broker *brokers; /**< Brokers */ 2426 | 2427 | int topic_cnt; /**< Number of topics in \p topics */ 2428 | rd_kafka_metadata_topic *topics; /**< Topics */ 2429 | 2430 | int32_t orig_broker_id; /**< Broker originating this metadata */ 2431 | char *orig_broker_name; /**< Name of originating broker */ 2432 | } ; 2433 | //alias rd_kafka_metadata_t = rd_kafka_metadata; 2434 | 2435 | /** 2436 | * @brief Request Metadata from broker. 2437 | * 2438 | * Parameters: 2439 | * - \p all_topics if non-zero: request info about all topics in cluster, 2440 | * if zero: only request info about locally known topics. 2441 | * - \p only_rkt only request info about this topic 2442 | * - \p metadatap pointer to hold metadata result. 2443 | * The \p *metadatap pointer must be released 2444 | * with rd_kafka_metadata_destroy(). 2445 | * - \p timeout_ms maximum response time before failing. 2446 | * 2447 | * Returns RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap) 2448 | * will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or 2449 | * other error code on error. 2450 | */ 2451 | 2452 | rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt,const rd_kafka_metadata_t * * metadatap, int timeout_ms); 2453 | 2454 | /** 2455 | * @brief Release metadata memory. 2456 | */ 2457 | 2458 | void rd_kafka_metadata_destroy(const rd_kafka_metadata_t *metadata); 2459 | 2460 | 2461 | /**@}*/ 2462 | 2463 | 2464 | 2465 | /** 2466 | * @name Client group information 2467 | * @{ 2468 | * 2469 | * 2470 | */ 2471 | 2472 | 2473 | /** 2474 | * @brief Group member information 2475 | * 2476 | * For more information on \p member_metadata format, see 2477 | * https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI 2478 | * 2479 | */ 2480 | struct rd_kafka_group_member_info { 2481 | char *member_id; /**< Member id (generated by broker) */ 2482 | char *client_id; /**< Client's \p client.id */ 2483 | char *client_host; /**< Client's hostname */ 2484 | void *member_metadata; /**< Member metadata (binary), 2485 | * format depends on \p protocol_type. */ 2486 | int member_metadata_size; /**< Member metadata size in bytes */ 2487 | void *member_assignment; /**< Member assignment (binary), 2488 | * format depends on \p protocol_type. */ 2489 | int member_assignment_size; /**< Member assignment size in bytes */ 2490 | }; 2491 | 2492 | /** 2493 | * @brief Group information 2494 | */ 2495 | struct rd_kafka_group_info { 2496 | rd_kafka_metadata_broker broker; /**< Originating broker info */ 2497 | char *group; /**< Group name */ 2498 | rd_kafka_resp_err_t err; /**< Broker-originated error */ 2499 | char *state; /**< Group state */ 2500 | char *protocol_type; /**< Group protocol type */ 2501 | char *protocol; /**< Group protocol */ 2502 | rd_kafka_group_member_info *members; /**< Group members */ 2503 | int member_cnt; /**< Group member count */ 2504 | }; 2505 | 2506 | /** 2507 | * @brief List of groups 2508 | * 2509 | * @sa rd_kafka_group_list_destroy() to release list memory. 2510 | */ 2511 | struct rd_kafka_group_list { 2512 | rd_kafka_group_info *groups; /**< Groups */ 2513 | int group_cnt; /**< Group count */ 2514 | }; 2515 | 2516 | 2517 | /** 2518 | * @brief List and describe client groups in cluster. 2519 | * 2520 | * \p group is an optional group name to describe, otherwise (\p NULL) all 2521 | * groups are returned. 2522 | * 2523 | * \p timeout_ms is the (approximate) maximum time to wait for response 2524 | * from brokers and must be a positive value. 2525 | * 2526 | * @returns \p RD_KAFKA_RESP_ERR__NO_ERROR on success and \p grplistp is 2527 | * updated to point to a newly allocated list of groups. 2528 | * Else returns an error code on failure and \p grplistp remains 2529 | * untouched. 2530 | * 2531 | * @sa Use rd_kafka_group_list_destroy() to release list memory. 2532 | */ 2533 | 2534 | rd_kafka_resp_err_t 2535 | rd_kafka_list_groups (rd_kafka_t *rk, const(char) *group, 2536 | const rd_kafka_group_list **grplistp, 2537 | int timeout_ms); 2538 | 2539 | /** 2540 | * @brief Release list memory 2541 | */ 2542 | 2543 | void rd_kafka_group_list_destroy (const rd_kafka_group_list *grplist); 2544 | 2545 | 2546 | /**@}*/ 2547 | 2548 | 2549 | 2550 | /** 2551 | * @name Miscellaneous APIs 2552 | * @{ 2553 | * 2554 | */ 2555 | 2556 | 2557 | /** 2558 | * @brief Adds one or more brokers to the kafka handle's list of initial 2559 | * bootstrap brokers. 2560 | * 2561 | * Additional brokers will be discovered automatically as soon as rdkafka 2562 | * connects to a broker by querying the broker metadata. 2563 | * 2564 | * If a broker name resolves to multiple addresses (and possibly 2565 | * address families) all will be used for connection attempts in 2566 | * round-robin fashion. 2567 | * 2568 | * \p brokerlist is a ,-separated list of brokers in the format: 2569 | * \c \,\,.. 2570 | * Where each broker is in either the host or URL based format: 2571 | * \c \[:\] 2572 | * \c \://\[:port] 2573 | * \c \ is either \c PLAINTEXT, \c SSL, \c SASL, \c SASL_PLAINTEXT 2574 | * The two formats can be mixed but ultimately the value of the 2575 | * `security.protocol` config property decides what brokers are allowed. 2576 | * 2577 | * Example: 2578 | * brokerlist = "broker1:10000,broker2" 2579 | * brokerlist = "SSL://broker3:9000,ssl://broker2" 2580 | * 2581 | * @returns the number of brokers successfully added. 2582 | * 2583 | * @remark Brokers may also be defined with the \c metadata.broker.list or 2584 | * \c bootstrap.servers configuration property (preferred method). 2585 | */ 2586 | 2587 | int rd_kafka_brokers_add(rd_kafka_t *rk, const(char) *brokerlist); 2588 | 2589 | 2590 | 2591 | 2592 | /** 2593 | * @brief Set logger function. 2594 | * 2595 | * The default is to print to stderr, but a syslog logger is also available, 2596 | * see rd_kafka_log_(print|syslog) for the builtin alternatives. 2597 | * Alternatively the application may provide its own logger callback. 2598 | * Or pass 'func' as NULL to disable logging. 2599 | * 2600 | * @deprecated Use rd_kafka_conf_set_log_cb() 2601 | * 2602 | * @remark \p rk may be passed as NULL in the callback. 2603 | */ 2604 | 2605 | deprecated("please use rd_kafka_conf_set_log_cb"){ 2606 | alias func_callback = void function(const rd_kafka_t *rk, int level,const(char) *fac, const(char) *buf) nothrow @nogc; 2607 | void rd_kafka_set_logger(rd_kafka_t *rk,func_callback func); 2608 | } 2609 | 2610 | 2611 | /** 2612 | * @brief Specifies the maximum logging level produced by 2613 | * internal kafka logging and debugging. 2614 | * 2615 | * If the \p \"debug\" configuration property is set the level is automatically 2616 | * adjusted to \c LOG_DEBUG (7). 2617 | */ 2618 | 2619 | void rd_kafka_set_log_level(rd_kafka_t *rk, int level); 2620 | 2621 | 2622 | /** 2623 | * @brief Builtin (default) log sink: print to stderr 2624 | */ 2625 | 2626 | void rd_kafka_log_print(const rd_kafka_t *rk, int level, 2627 | const(char) *fac, const(char) *buf); 2628 | 2629 | 2630 | /** 2631 | * @brief Builtin log sink: print to syslog. 2632 | */ 2633 | 2634 | void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, 2635 | const(char) *fac, const(char) *buf); 2636 | 2637 | 2638 | /** 2639 | * @brief Returns the current out queue length. 2640 | * 2641 | * The out queue contains messages waiting to be sent to, or acknowledged by, 2642 | * the broker. 2643 | * 2644 | * An application should wait for this queue to reach zero before terminating 2645 | * to make sure outstanding requests (such as offset commits) are fully 2646 | * processed. 2647 | * 2648 | * @returns number of messages in the out queue. 2649 | */ 2650 | 2651 | int rd_kafka_outq_len(rd_kafka_t *rk); 2652 | 2653 | 2654 | 2655 | /** 2656 | * @brief Dumps rdkafka's internal state for handle \p rk to stream \p fp 2657 | * 2658 | * This is only useful for debugging rdkafka, showing state and statistics 2659 | * for brokers, topics, partitions, etc. 2660 | */ 2661 | 2662 | void rd_kafka_dump(FILE *fp, rd_kafka_t *rk); 2663 | 2664 | 2665 | 2666 | /** 2667 | * @brief Retrieve the current number of threads in use by librdkafka. 2668 | * 2669 | * Used by regression tests. 2670 | */ 2671 | 2672 | int rd_kafka_thread_cnt(); 2673 | 2674 | 2675 | /** 2676 | * @brief Wait for all rd_kafka_t objects to be destroyed. 2677 | * 2678 | * Returns 0 if all kafka objects are now destroyed, or -1 if the 2679 | * timeout was reached. 2680 | * Since `rd_kafka_destroy()` is an asynch operation the 2681 | * `rd_kafka_wait_destroyed()` function can be used for applications where 2682 | * a clean shutdown is required. 2683 | */ 2684 | 2685 | int rd_kafka_wait_destroyed(int timeout_ms); 2686 | 2687 | 2688 | /**@}*/ 2689 | 2690 | 2691 | 2692 | 2693 | /** 2694 | * @name Experimental APIs 2695 | * @{ 2696 | */ 2697 | 2698 | /** 2699 | * @brief Redirect the main (rd_kafka_poll()) queue to the KafkaConsumer's 2700 | * queue (rd_kafka_consumer_poll()). 2701 | * 2702 | * @warning It is not permitted to call rd_kafka_poll() after directing the 2703 | * main queue with rd_kafka_poll_set_consumer(). 2704 | */ 2705 | 2706 | rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk); 2707 | 2708 | 2709 | /**@}*/ 2710 | 2711 | /** 2712 | * @name Event interface 2713 | * 2714 | * @brief The event API provides an alternative pollable non-callback interface 2715 | * to librdkafka's message and event queues. 2716 | * 2717 | * @{ 2718 | */ 2719 | 2720 | 2721 | /** 2722 | * @brief Event types 2723 | */ 2724 | alias int rd_kafka_event_type_t; 2725 | enum RD_KAFKA_EVENT_NONE = 0x0; 2726 | enum RD_KAFKA_EVENT_DR = 0x1; /**< Producer Delivery report batch */ 2727 | enum RD_KAFKA_EVENT_FETCH = 0x2; /**< Fetched message (consumer) */ 2728 | enum RD_KAFKA_EVENT_LOG = 0x4; /**< Log message */ 2729 | enum RD_KAFKA_EVENT_ERROR = 0x8; /**< Error */ 2730 | enum RD_KAFKA_EVENT_REBALANCE = 0x10; /**< Group rebalance (consumer) */ 2731 | enum RD_KAFKA_EVENT_OFFSET_COMMIT = 0x20; /**< Offset commit result */ 2732 | 2733 | struct rd_kafka_op_s; 2734 | alias rd_kafka_op_s rd_kafka_event_t; 2735 | 2736 | 2737 | /** 2738 | * @returns the event type for the given event. 2739 | * 2740 | * @remark As a convenience it is okay to pass \p rkev as NULL in which case 2741 | * RD_KAFKA_EVENT_NONE is returned. 2742 | */ 2743 | rd_kafka_event_type_t rd_kafka_event_type (const rd_kafka_event_t *rkev); 2744 | 2745 | /** 2746 | * @returns the event type's name for the given event. 2747 | * 2748 | * @remark As a convenience it is okay to pass \p rkev as NULL in which case 2749 | * the name for RD_KAFKA_EVENT_NONE is returned. 2750 | */ 2751 | const (char) *rd_kafka_event_name (const rd_kafka_event_t *rkev); 2752 | 2753 | 2754 | /** 2755 | * @brief Destroy an event. 2756 | * 2757 | * @remark Any references to this event, such as extracted messages, 2758 | * will not be usable after this call. 2759 | * 2760 | * @remark As a convenience it is okay to pass \p rkev as NULL in which case 2761 | * no action is performed. 2762 | */ 2763 | void rd_kafka_event_destroy (rd_kafka_event_t *rkev); 2764 | 2765 | 2766 | /** 2767 | * @returns the next message from an event. 2768 | * 2769 | * Call repeatedly until it returns NULL. 2770 | * 2771 | * Event types: 2772 | * - RD_KAFKA_EVENT_FETCH (1 message) 2773 | * - RD_KAFKA_EVENT_DR (>=1 message(s)) 2774 | * 2775 | * @remark The returned message(s) MUST NOT be 2776 | * freed with rd_kafka_message_destroy(). 2777 | */ 2778 | const (rd_kafka_message_t) *rd_kafka_event_message_next (rd_kafka_event_t *rkev); 2779 | 2780 | 2781 | /** 2782 | * @brief Extacts \p size message(s) from the event into the 2783 | * pre-allocated array \p rkmessages. 2784 | * 2785 | * Event types: 2786 | * - RD_KAFKA_EVENT_FETCH (1 message) 2787 | * - RD_KAFKA_EVENT_DR (>=1 message(s)) 2788 | * 2789 | * @returns the number of messages extracted. 2790 | */ 2791 | size_t rd_kafka_event_message_array (rd_kafka_event_t *rkev, 2792 | const rd_kafka_message_t **rkmessages, 2793 | size_t size); 2794 | 2795 | 2796 | /** 2797 | * @returns the number of remaining messages in the event. 2798 | * 2799 | * Event types: 2800 | * - RD_KAFKA_EVENT_FETCH (1 message) 2801 | * - RD_KAFKA_EVENT_DR (>=1 message(s)) 2802 | */ 2803 | size_t rd_kafka_event_message_count (rd_kafka_event_t *rkev); 2804 | 2805 | 2806 | /** 2807 | * @returns the error code for the event. 2808 | * 2809 | * Event types: 2810 | * - all 2811 | */ 2812 | rd_kafka_resp_err_t rd_kafka_event_error (rd_kafka_event_t *rkev); 2813 | 2814 | 2815 | /** 2816 | * @returns the error string (if any). 2817 | * An application should check that rd_kafka_event_error() returns 2818 | * non-zero before calling this function. 2819 | * 2820 | * Event types: 2821 | * - all 2822 | */ 2823 | const (char) *rd_kafka_event_error_string (rd_kafka_event_t *rkev); 2824 | 2825 | 2826 | 2827 | /** 2828 | * @returns the user opaque (if any) 2829 | * 2830 | * Event types: 2831 | * - RD_KAFKA_OFFSET_COMMIT 2832 | */ 2833 | void *rd_kafka_event_opaque (rd_kafka_event_t *rkev); 2834 | 2835 | 2836 | /** 2837 | * @brief Extract log message from the event. 2838 | * 2839 | * Event types: 2840 | * - RD_KAFKA_EVENT_LOG 2841 | * 2842 | * @returns 0 on success or -1 if unsupported event type. 2843 | */ 2844 | int rd_kafka_event_log (rd_kafka_event_t *rkev, 2845 | const (char) **fac, const (char) **str, int *level); 2846 | 2847 | 2848 | /** 2849 | * @returns the topic partition list from the event. 2850 | * 2851 | * @remark The list MUST NOT be freed with rd_kafka_topic_partition_list_destroy() 2852 | * 2853 | * Event types: 2854 | * - RD_KAFKA_EVENT_REBALANCE 2855 | * - RD_KAFKA_EVENT_OFFSET_COMMIT 2856 | */ 2857 | rd_kafka_topic_partition_list_t * 2858 | rd_kafka_event_topic_partition_list (rd_kafka_event_t *rkev); 2859 | 2860 | 2861 | /** 2862 | * @returns a newly allocated topic_partition container, if applicable for the event type, 2863 | * else NULL. 2864 | * 2865 | * @remark The returned pointer MUST be freed with rd_kafka_topic_partition_destroy(). 2866 | * 2867 | * Event types: 2868 | * RD_KAFKA_EVENT_ERROR (for partition level errors) 2869 | */ 2870 | rd_kafka_topic_partition_t * 2871 | rd_kafka_event_topic_partition (rd_kafka_event_t *rkev); 2872 | 2873 | 2874 | /** 2875 | * @brief Poll a queue for an event for max \p timeout_ms. 2876 | * 2877 | * @returns an event, or NULL. 2878 | * 2879 | * @remark Use rd_kafka_event_destroy() to free the event. 2880 | */ 2881 | rd_kafka_event_t *rd_kafka_queue_poll (rd_kafka_queue_t *rkqu, int timeout_ms); 2882 | 2883 | /** 2884 | * @brief Poll a queue for events served through callbacks for max \p timeout_ms. 2885 | * 2886 | * @returns the number of events served. 2887 | * 2888 | * @remark This API must only be used for queues with callbacks registered 2889 | * for all expected event types. E.g., not a message queue. 2890 | */ 2891 | int rd_kafka_queue_poll_callback (rd_kafka_queue_t* rkqu, int timeout_ms); 2892 | 2893 | /**@}*/ 2894 | --------------------------------------------------------------------------------