├── .gitignore ├── CMakeLists.txt ├── LICENSE.txt ├── README.md ├── include ├── .DS_Store └── eosio │ ├── .DS_Store │ └── kafka_plugin │ ├── kafka_plugin.hpp │ └── kafka_producer.hpp ├── kafka_plugin.cpp └── kafka_producer.cpp /.gitignore: -------------------------------------------------------------------------------- 1 | # maven ignore 2 | target/ 3 | 4 | # eclipse ignore 5 | .settings/ 6 | .project 7 | .classpath 8 | 9 | # idea ignore 10 | .idea/ 11 | *.ipr 12 | *.iml 13 | *.iws 14 | 15 | # temp ignore 16 | *.log 17 | *.cache 18 | *.diff 19 | *.patch 20 | *.tmp 21 | 22 | # py 23 | *.pyc 24 | local/ 25 | include/ 26 | logs/ 27 | 28 | # system ignore 29 | .DS_Store 30 | Thumbs.db 31 | 32 | # package ignore (optional) 33 | # *.jar 34 | # *.war 35 | # *.zip 36 | # *.tar 37 | # *.tar.gz 38 | 39 | ### 40 | dependency-reduced-pom.xml 41 | 42 | tb.eos_trxsave_task.s 43 | glide.lock 44 | xlsoa/ 45 | check_service.sh 46 | monitor_service.sh 47 | s.sh 48 | start_service.sh 49 | stop_service.sh 50 | 51 | docker-compose.yml 52 | 53 | cmake-build-debug/ -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | #if(BUILD_KAFKA_PLUGIN) 2 | file(GLOB HEADERS "include/eosio/mongo_db_plugin/*.hpp") 3 | include_directories("/usr/local/include/librdkafka") 4 | LINK_LIBRARIES("/usr/local/lib/librdkafka.so" "/usr/local/lib/librdkafka.so.1") 5 | link_directories("/usr/local/lib") 6 | add_library( kafka_plugin 7 | kafka_plugin.cpp kafka_producer.cpp 8 | ${HEADERS} ) 9 | target_include_directories(kafka_plugin 10 | PUBLIC "include" 11 | ) 12 | target_link_libraries(kafka_plugin 13 | PUBLIC chain_plugin eosio_chain appbase fc 14 | ) 15 | 16 | message("mongo_db_plugin not selected and will be omitted.") 17 | #endif() 18 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright 2018 EOS TOKENPOCKET 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EOSIO Kafka Plugin 2 | ## what's eosio kafka plugin 3 | EOSIO Kafka Plugin is used to receive the transaction data fom blockchain and send out the transaction through kafka producer. Developer can receive the transaction data through kafka consumer in the background application. 4 | 5 | ## how does the kafka plugin work 6 | 7 | 1. it run a task to resume the transactions on chain. there's two type of transactions:"applied transaction" and "accepted transaction" 8 | 9 | 2. create two kafka topics, the producer of which store the applied transaction and accepted transaction in kafka queue 10 | 11 | 3. the dapp developer can get the transaction data through the consumer of the kafka topic. 12 | 13 | ## Based eosio version 14 | EOS-Mainnet/eos mainnet-1.6.1 or later 15 | 16 | ## Building the plugin [Install on your nodeos server] 17 | 18 | 1. install kafka library 19 | ``` 20 | cd /usr/local 21 | git clone https://github.com/edenhill/librdkafka.git 22 | cd librdkafka 23 | ./configure 24 | make 25 | sudo make install 26 | ``` 27 | 2. download the kafka plugin code in to eos file 28 | ``` 29 | cd /usr/local/eos/plugins/ 30 | git clone https://github.com/TP-Lab/kafka_plugin.git 31 | ``` 32 | 3. update the CMakeLists.txt to complie the kafka plugin 33 | ``` 34 | (1)edit /usr/local/eos/plugins/CMakeLists.txt: 35 | add_subdirectory(kafka_plugin) 36 | 37 | (2)edit /usr/local/eos/programs/nodeos/CMakeLists.txt: 38 | target_link_libraries( nodeos PRIVATE -Wl,${whole_archive_flag} kafka_plugin -Wl,${no_whole_archive_flag} ) 39 | ``` 40 | 41 | ## How to setup on your nodeos 42 | Enable this plugin using --plugin option to nodeos or in your config.ini. Use nodeos --help to see options used by this plugin. 43 | 44 | ## Configuration 45 | Add the following to config.ini to enable the plugin: 46 | ``` 47 | parmeters for kafka_plugin 48 | --plugin eosio::kafka_plugin //add kafka plugin 49 | --kafka-uri 192.168.31.225:9092 //the kafka service 50 | --accept_trx_topic eos_accept_topic //the kafka topic for accept transaction 51 | --applied_trx_topic eos_applied_topic //the applied topic for applied transaction 52 | --kafka-block-start 100 //the start block number, from which kafka begin to receive transactions 53 | --kafka-queue-size 5000 // the queue size of kafka 54 | ``` 55 | -------------------------------------------------------------------------------- /include/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TP-Lab/kafka_plugin/26aac3e9fe82c43399df5d698d531822dd9ec7b5/include/.DS_Store -------------------------------------------------------------------------------- /include/eosio/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TP-Lab/kafka_plugin/26aac3e9fe82c43399df5d698d531822dd9ec7b5/include/eosio/.DS_Store -------------------------------------------------------------------------------- /include/eosio/kafka_plugin/kafka_plugin.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * 3 | * 4 | */ 5 | #pragma once 6 | #include 7 | #include 8 | 9 | namespace eosio { 10 | /** 11 | * Provides persistence to kafka for: 12 | * transaction_traces 13 | * transactions 14 | * 15 | * See data dictionary (DB Schema Definition - EOS API) for description of MongoDB schema. 16 | * 17 | * If cmake -DBUILD_kafka_plugin=true not specified then this plugin not compiled/included. 18 | */ 19 | class kafka_plugin : public plugin { 20 | public: 21 | APPBASE_PLUGIN_REQUIRES((chain_plugin)) 22 | 23 | kafka_plugin(); 24 | 25 | virtual ~kafka_plugin(); 26 | 27 | virtual void set_program_options(options_description &cli, options_description &cfg) override; 28 | 29 | void plugin_initialize(const variables_map &options); 30 | 31 | void plugin_startup(); 32 | 33 | void plugin_shutdown(); 34 | 35 | private: 36 | unique_ptr my; 37 | }; 38 | 39 | } 40 | 41 | -------------------------------------------------------------------------------- /include/eosio/kafka_plugin/kafka_producer.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "rdkafka.h" 4 | 5 | namespace eosio { 6 | #define KAFKA_STATUS_OK 0 7 | #define KAFKA_STATUS_INIT_FAIL 1 8 | #define KAFKA_STATUS_MSG_INVALID 2 9 | #define KAFKA_STATUS_QUEUE_FULL 3 10 | 11 | #define KAFKA_TRX_ACCEPT 0 12 | #define KAFKA_TRX_APPLIED 1 13 | #define KAFKA_TRX_TRANSFER 2 14 | 15 | class kafka_producer { 16 | public: 17 | kafka_producer() { 18 | 19 | accept_rk = NULL; 20 | applied_rk = NULL; 21 | transfer_rk = NULL; 22 | accept_rkt = NULL; 23 | applied_rkt = NULL; 24 | transfer_rkt = NULL; 25 | accept_conf = NULL; 26 | applied_conf = NULL; 27 | transfer_conf = NULL; 28 | }; 29 | 30 | int trx_kafka_init(char *brokers, char *acceptopic,char *compression_code, char *appliedtopic, char *transfertopic); 31 | 32 | int trx_kafka_create_topic(char *brokers, char *topic,char *compression_code, rd_kafka_t **rk, rd_kafka_topic_t **rkt, 33 | rd_kafka_conf_t **conf); 34 | 35 | int trx_kafka_sendmsg(int trxtype, char *msgstr); 36 | 37 | int trx_kafka_destroy(void); 38 | 39 | rd_kafka_topic_t *trx_kafka_get_topic(int trxtype); 40 | 41 | private: 42 | rd_kafka_t *accept_rk; /*Producer instance handle*/ 43 | rd_kafka_t *applied_rk; /*Producer instance handle*/ 44 | rd_kafka_t *transfer_rk; /*Producer instance handle*/ 45 | rd_kafka_topic_t *accept_rkt; /*topic object*/ 46 | rd_kafka_topic_t *applied_rkt; /*topic object*/ 47 | rd_kafka_topic_t *transfer_rkt; /*topic object*/ 48 | rd_kafka_conf_t *accept_conf; /*kafka config*/ 49 | rd_kafka_conf_t *applied_conf; /*kafka config*/ 50 | rd_kafka_conf_t *transfer_conf; /*kafka config*/ 51 | 52 | static void dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {} 53 | }; 54 | } 55 | 56 | -------------------------------------------------------------------------------- /kafka_plugin.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * @file 3 | * @copyright defined in eos/LICENSE.txt 4 | */ 5 | // 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | 28 | #include 29 | #include 30 | 31 | namespace fc { class variant; } 32 | 33 | namespace eosio { 34 | using chain::name; 35 | using chain::account_name; 36 | using chain::action_name; 37 | using chain::block_id_type; 38 | using chain::permission_name; 39 | using chain::transaction; 40 | using chain::signed_transaction; 41 | using chain::signed_block; 42 | using chain::transaction_id_type; 43 | using chain::packed_transaction; 44 | using chain::permission_level; 45 | static auto _kafka_plugin = application::register_plugin(); 46 | using kafka_producer_ptr = std::shared_ptr; 47 | 48 | class kafka_plugin_impl { 49 | public: 50 | kafka_plugin_impl(); 51 | 52 | ~kafka_plugin_impl(); 53 | 54 | std::optional accepted_block_connection; 55 | std::optional irreversible_block_connection; 56 | std::optional accepted_transaction_connection; 57 | std::optional applied_transaction_connection; 58 | chain_plugin *chain_plug; 59 | struct action_info { 60 | account_name account; 61 | action_name name; 62 | vector authorization; 63 | string data_json; 64 | }; 65 | 66 | struct trasaction_info_st { 67 | uint64_t block_number; 68 | fc::time_point block_time; 69 | std::optional chain_id; 70 | chain::transaction_trace_ptr trace; 71 | vector action_vec; 72 | 73 | }; 74 | 75 | 76 | void consume_blocks(); 77 | 78 | void accepted_block(const chain::block_state_ptr &); 79 | 80 | void applied_irreversible_block(const chain::block_state_ptr &); 81 | 82 | void accepted_transaction(const chain::transaction_metadata_ptr &); 83 | 84 | void applied_transaction(const chain::transaction_trace_ptr &); 85 | 86 | void process_accepted_transaction(const chain::transaction_metadata_ptr &); 87 | 88 | void _process_accepted_transaction(const chain::transaction_metadata_ptr &); 89 | 90 | void process_applied_transaction(const trasaction_info_st &); 91 | 92 | void _process_applied_transaction(const trasaction_info_st &); 93 | 94 | void process_accepted_block(const chain::block_state_ptr &); 95 | 96 | void _process_accepted_block(const chain::block_state_ptr &); 97 | 98 | void process_irreversible_block(const chain::block_state_ptr &); 99 | 100 | void _process_irreversible_block(const chain::block_state_ptr &); 101 | 102 | void init(); 103 | 104 | bool configured{false}; 105 | 106 | void filter_traction_trace(const chain::transaction_trace_ptr trace, action_name act_name); 107 | 108 | void _process_trace(vector::iterator action_trace_ptr, action_name act_name); 109 | 110 | template 111 | void queue(Queue &queue, const Entry &e); 112 | 113 | uint32_t start_block_num = 0; 114 | bool start_block_reached = false; 115 | 116 | size_t max_queue_size = 10000; 117 | int queue_sleep_time = 0; 118 | std::deque transaction_metadata_queue; 119 | std::deque transaction_metadata_process_queue; 120 | std::deque transaction_trace_queue; 121 | std::deque transaction_trace_process_queue; 122 | std::deque block_state_queue; 123 | std::deque block_state_process_queue; 124 | std::deque irreversible_block_state_queue; 125 | std::deque irreversible_block_state_process_queue; 126 | std::mutex mtx; 127 | std::condition_variable condition; 128 | std::thread consume_thread; 129 | std::atomic_bool done{false}; 130 | std::atomic_bool startup{true}; 131 | std::optional chain_id; 132 | fc::microseconds abi_serializer_max_time = fc::seconds(10);; 133 | 134 | static const account_name newaccount; 135 | static const account_name setabi; 136 | 137 | static const std::string block_states_col; 138 | static const std::string blocks_col; 139 | static const std::string trans_col; 140 | static const std::string trans_traces_col; 141 | static const std::string actions_col; 142 | static const std::string accounts_col; 143 | kafka_producer_ptr producer; 144 | }; 145 | 146 | const account_name kafka_plugin_impl::newaccount = chain::newaccount::get_name(); 147 | const account_name kafka_plugin_impl::setabi = chain::setabi::get_name(); 148 | 149 | const std::string kafka_plugin_impl::block_states_col = "block_states"; 150 | const std::string kafka_plugin_impl::blocks_col = "blocks"; 151 | const std::string kafka_plugin_impl::trans_col = "transactions"; 152 | const std::string kafka_plugin_impl::trans_traces_col = "transaction_traces"; 153 | const std::string kafka_plugin_impl::actions_col = "actions"; 154 | const std::string kafka_plugin_impl::accounts_col = "accounts"; 155 | 156 | template 157 | void kafka_plugin_impl::queue(Queue &queue, const Entry &e) { 158 | std::unique_lock lock(mtx); 159 | auto queue_size = queue.size(); 160 | if (queue_size > max_queue_size) { 161 | lock.unlock(); 162 | condition.notify_one(); 163 | queue_sleep_time += 10; 164 | if (queue_sleep_time > 1000) 165 | wlog("queue size: ${q}", ("q", queue_size)); 166 | std::this_thread::sleep_for(std::chrono::milliseconds(queue_sleep_time)); 167 | lock.lock(); 168 | } else { 169 | queue_sleep_time -= 10; 170 | if (queue_sleep_time < 0) queue_sleep_time = 0; 171 | } 172 | queue.emplace_back(e); 173 | lock.unlock(); 174 | condition.notify_one(); 175 | } 176 | 177 | void kafka_plugin_impl::accepted_transaction(const chain::transaction_metadata_ptr &t) { 178 | try { 179 | queue(transaction_metadata_queue, t); 180 | } catch (fc::exception &e) { 181 | elog("FC Exception while accepted_transaction ${e}", ("e", e.to_string())); 182 | } catch (std::exception &e) { 183 | elog("STD Exception while accepted_transaction ${e}", ("e", e.what())); 184 | } catch (...) { 185 | elog("Unknown exception while accepted_transaction"); 186 | } 187 | } 188 | 189 | void kafka_plugin_impl::applied_transaction(const chain::transaction_trace_ptr &t) { 190 | if (!t->producer_block_id.has_value()) 191 | return; 192 | try { 193 | auto &chain = chain_plug->chain(); 194 | trasaction_info_st transactioninfo = trasaction_info_st{ 195 | .block_number = t->block_num,//chain.pending_block_state()->block_num, 196 | .chain_id = this->chain_id, 197 | .trace =chain::transaction_trace_ptr(t), 198 | .block_time = chain.pending_block_time(), 199 | }; 200 | 201 | trasaction_info_st &info_t = transactioninfo; 202 | //elog("###trxId = ${e}", ("e", t->id)); 203 | queue(transaction_trace_queue, info_t); 204 | } catch (fc::exception &e) { 205 | elog("FC Exception while applied_transaction ${e}", ("e", e.to_string())); 206 | } catch (std::exception &e) { 207 | elog("STD Exception while applied_transaction ${e}", ("e", e.what())); 208 | } catch (...) { 209 | elog("Unknown exception while applied_transaction"); 210 | } 211 | } 212 | 213 | void kafka_plugin_impl::applied_irreversible_block(const chain::block_state_ptr &bs) { 214 | try { 215 | queue(irreversible_block_state_queue, bs); 216 | } catch (fc::exception &e) { 217 | elog("FC Exception while applied_irreversible_block ${e}", ("e", e.to_string())); 218 | } catch (std::exception &e) { 219 | elog("STD Exception while applied_irreversible_block ${e}", ("e", e.what())); 220 | } catch (...) { 221 | elog("Unknown exception while applied_irreversible_block"); 222 | } 223 | } 224 | 225 | 226 | void kafka_plugin_impl::accepted_block(const chain::block_state_ptr &bs) { 227 | try { 228 | queue(block_state_queue, bs); 229 | } catch (fc::exception &e) { 230 | elog("FC Exception while accepted_block ${e}", ("e", e.to_string())); 231 | } catch (std::exception &e) { 232 | elog("STD Exception while accepted_block ${e}", ("e", e.what())); 233 | } catch (...) { 234 | elog("Unknown exception while accepted_block"); 235 | } 236 | } 237 | 238 | void kafka_plugin_impl::consume_blocks() { 239 | try { 240 | 241 | while (true) { 242 | std::unique_lock lock(mtx); 243 | while (transaction_metadata_queue.empty() && 244 | transaction_trace_queue.empty() && 245 | block_state_queue.empty() && 246 | irreversible_block_state_queue.empty() && 247 | !done) { 248 | condition.wait(lock); 249 | } 250 | // capture for processing 251 | size_t transaction_metadata_size = transaction_metadata_queue.size(); 252 | if (transaction_metadata_size > 0) { 253 | transaction_metadata_process_queue = move(transaction_metadata_queue); 254 | transaction_metadata_queue.clear(); 255 | } 256 | size_t transaction_trace_size = transaction_trace_queue.size(); 257 | if (transaction_trace_size > 0) { 258 | transaction_trace_process_queue = move(transaction_trace_queue); 259 | transaction_trace_queue.clear(); 260 | } 261 | 262 | size_t block_state_size = block_state_queue.size(); 263 | if (block_state_size > 0) { 264 | block_state_process_queue = move(block_state_queue); 265 | block_state_queue.clear(); 266 | } 267 | size_t irreversible_block_size = irreversible_block_state_queue.size(); 268 | if (irreversible_block_size > 0) { 269 | irreversible_block_state_process_queue = move(irreversible_block_state_queue); 270 | irreversible_block_state_queue.clear(); 271 | } 272 | 273 | lock.unlock(); 274 | 275 | // warn if queue size greater than 75% 276 | if (transaction_metadata_size > (max_queue_size * 0.75) || 277 | transaction_trace_size > (max_queue_size * 0.75) || 278 | block_state_size > (max_queue_size * 0.75) || 279 | irreversible_block_size > (max_queue_size * 0.75)) { 280 | // wlog("queue size: ${q}", ("q", transaction_metadata_size + transaction_trace_size )); 281 | } else if (done) { 282 | ilog("draining queue, size: ${q}", ("q", transaction_metadata_size + transaction_trace_size)); 283 | } 284 | 285 | // process transactions 286 | while (!transaction_metadata_process_queue.empty()) { 287 | const auto &t = transaction_metadata_process_queue.front(); 288 | process_accepted_transaction(t); 289 | transaction_metadata_process_queue.pop_front(); 290 | } 291 | 292 | while (!transaction_trace_process_queue.empty()) { 293 | const auto &t = transaction_trace_process_queue.front(); 294 | process_applied_transaction(t); 295 | transaction_trace_process_queue.pop_front(); 296 | } 297 | 298 | // process blocks 299 | while (!block_state_process_queue.empty()) { 300 | const auto &bs = block_state_process_queue.front(); 301 | process_accepted_block(bs); 302 | block_state_process_queue.pop_front(); 303 | } 304 | 305 | // process irreversible blocks 306 | while (!irreversible_block_state_process_queue.empty()) { 307 | const auto &bs = irreversible_block_state_process_queue.front(); 308 | process_irreversible_block(bs); 309 | irreversible_block_state_process_queue.pop_front(); 310 | } 311 | 312 | if (transaction_metadata_size == 0 && 313 | transaction_trace_size == 0 && 314 | block_state_size == 0 && 315 | irreversible_block_size == 0 && 316 | done) { 317 | break; 318 | } 319 | } 320 | ilog("kafka_plugin consume thread shutdown gracefully"); 321 | } catch (fc::exception &e) { 322 | elog("FC Exception while consuming block ${e}", ("e", e.to_string())); 323 | } catch (std::exception &e) { 324 | elog("STD Exception while consuming block ${e}", ("e", e.what())); 325 | } catch (...) { 326 | elog("Unknown exception while consuming block"); 327 | } 328 | } 329 | 330 | 331 | void kafka_plugin_impl::process_accepted_transaction(const chain::transaction_metadata_ptr &t) { 332 | try { 333 | // always call since we need to capture setabi on accounts even if not storing transactions 334 | _process_accepted_transaction(t); 335 | } catch (fc::exception &e) { 336 | elog("FC Exception while processing accepted transaction metadata: ${e}", ("e", e.to_detail_string())); 337 | } catch (std::exception &e) { 338 | elog("STD Exception while processing accepted tranasction metadata: ${e}", ("e", e.what())); 339 | } catch (...) { 340 | elog("Unknown exception while processing accepted transaction metadata"); 341 | } 342 | } 343 | 344 | void kafka_plugin_impl::process_applied_transaction(const trasaction_info_st &t) { 345 | try { 346 | if (start_block_reached) { 347 | _process_applied_transaction(t); 348 | } 349 | } catch (fc::exception &e) { 350 | elog("FC Exception while processing applied transaction trace: ${e}", ("e", e.to_detail_string())); 351 | } catch (std::exception &e) { 352 | elog("STD Exception while processing applied transaction trace: ${e}", ("e", e.what())); 353 | } catch (...) { 354 | elog("Unknown exception while processing applied transaction trace"); 355 | } 356 | } 357 | 358 | 359 | void kafka_plugin_impl::process_irreversible_block(const chain::block_state_ptr &bs) { 360 | try { 361 | if (start_block_reached) { 362 | _process_irreversible_block(bs); 363 | } 364 | } catch (fc::exception &e) { 365 | elog("FC Exception while processing irreversible block: ${e}", ("e", e.to_detail_string())); 366 | } catch (std::exception &e) { 367 | elog("STD Exception while processing irreversible block: ${e}", ("e", e.what())); 368 | } catch (...) { 369 | elog("Unknown exception while processing irreversible block"); 370 | } 371 | } 372 | 373 | void kafka_plugin_impl::process_accepted_block(const chain::block_state_ptr &bs) { 374 | try { 375 | if (!start_block_reached) { 376 | if (bs->block_num >= start_block_num) { 377 | start_block_reached = true; 378 | } 379 | } 380 | if (start_block_reached) { 381 | _process_accepted_block(bs); 382 | } 383 | } catch (fc::exception &e) { 384 | elog("FC Exception while processing accepted block trace ${e}", ("e", e.to_string())); 385 | } catch (std::exception &e) { 386 | elog("STD Exception while processing accepted block trace ${e}", ("e", e.what())); 387 | } catch (...) { 388 | elog("Unknown exception while processing accepted block trace"); 389 | } 390 | } 391 | 392 | void kafka_plugin_impl::_process_accepted_transaction(const chain::transaction_metadata_ptr &t) { 393 | const auto &trx = t->packed_trx(); 394 | string trx_json = fc::json::to_string(trx, fc::time_point::maximum()); 395 | //elog("trx_json: ${e}",("e",trx_json)); 396 | producer->trx_kafka_sendmsg(KAFKA_TRX_ACCEPT, (char *) trx_json.c_str()); 397 | } 398 | 399 | void kafka_plugin_impl::_process_applied_transaction(const trasaction_info_st &t) { 400 | uint64_t time = (t.block_time.time_since_epoch().count() / 1000); 401 | //elog("trxId = ${e}", ("e", t.trace->id)); 402 | string transaction_metadata_json = 403 | "{\"block_number\":" + std::to_string(t.block_number) + ",\"block_time\":" + std::to_string(time) + 404 | ",\"chain_id\":\"" + t.chain_id->str() + 405 | "\",\"trace\":" + fc::json::to_string(t.trace, fc::time_point::maximum()).c_str() + "}"; 406 | producer->trx_kafka_sendmsg(KAFKA_TRX_APPLIED, (char *) transaction_metadata_json.c_str()); 407 | // elog("transaction_metadata_json = ${e}",("e",transaction_metadata_json)); 408 | 409 | if (producer->trx_kafka_get_topic(KAFKA_TRX_TRANSFER) != NULL) { 410 | filter_traction_trace(t.trace, name("transfer")); 411 | if (t.trace->action_traces.size() > 0) { 412 | string transfer_json = 413 | "{\"block_number\":" + std::to_string(t.block_number) + ",\"block_time\":" + 414 | std::to_string(time) + 415 | ",\"chain_id\":" + "\"" + t.chain_id->str() + "\"" + 416 | ",\"trace\":" + fc::json::to_string(t.trace, fc::time_point::maximum()).c_str() + "}"; 417 | producer->trx_kafka_sendmsg(KAFKA_TRX_TRANSFER, (char *) transfer_json.c_str()); 418 | //elog("transfer_json = ${e}",("e",transfer_json)); 419 | } 420 | } 421 | } 422 | 423 | void 424 | kafka_plugin_impl::_process_trace(vector::iterator action_trace_ptr, action_name act_name) { 425 | /*auto inline_trace_ptr = action_trace_ptr->inline_traces.begin(); 426 | for(;inline_trace_ptr!=action_trace_ptr->inline_traces.end();inline_trace_ptr++){ 427 | //elog("inline action:"); 428 | _process_trace(inline_trace_ptr,act_name); 429 | }*/ 430 | 431 | if (action_trace_ptr->act.name == act_name) { 432 | auto readonly = chain_plug->get_read_only_api(abi_serializer_max_time); 433 | chain_apis::read_only::get_code_params get_code_params; 434 | get_code_params.account_name = action_trace_ptr->act.account; 435 | 436 | auto get_code_results = readonly.get_code(get_code_params, fc::time_point::now() + abi_serializer_max_time); 437 | if (!get_code_results.abi.has_value()){ 438 | return; 439 | } 440 | auto abi = std::make_shared(std::move(get_code_results.abi.value()), chain::abi_serializer::create_yield_function(abi_serializer_max_time)); 441 | auto data_variant = abi->binary_to_variant(abi->get_action_type(action_trace_ptr->act.name), action_trace_ptr->act.data, chain::abi_serializer::create_yield_function(abi_serializer_max_time)); 442 | string data_str = fc::json::to_string(data_variant, fc::time_point::maximum()); 443 | action_info action_info1 = { 444 | .account = action_trace_ptr->act.account, 445 | .name = action_trace_ptr->act.name, 446 | .authorization = action_trace_ptr->act.authorization, 447 | .data_json = data_str 448 | }; 449 | 450 | action_trace_ptr->act.data.resize(data_str.size()); 451 | action_trace_ptr->act.data.assign(data_str.begin(), data_str.end()); 452 | } 453 | } 454 | 455 | void kafka_plugin_impl::filter_traction_trace(const chain::transaction_trace_ptr trace, action_name act_name) { 456 | vector::iterator action_trace_ptr = trace->action_traces.begin(); 457 | 458 | for (; action_trace_ptr != trace->action_traces.end();) { 459 | if (action_trace_ptr->act.name == act_name) { 460 | _process_trace(action_trace_ptr, act_name); 461 | action_trace_ptr++; 462 | continue; 463 | } else { 464 | trace->action_traces.erase(action_trace_ptr); 465 | } 466 | } 467 | } 468 | 469 | 470 | void kafka_plugin_impl::_process_accepted_block(const chain::block_state_ptr &bs) { 471 | } 472 | 473 | void kafka_plugin_impl::_process_irreversible_block(const chain::block_state_ptr &bs) { 474 | } 475 | 476 | kafka_plugin_impl::kafka_plugin_impl() 477 | : producer(new kafka_producer) { 478 | } 479 | 480 | kafka_plugin_impl::~kafka_plugin_impl() { 481 | if (!startup) { 482 | try { 483 | ilog("kafka_db_plugin shutdown in process please be patient this can take a few minutes"); 484 | done = true; 485 | condition.notify_one(); 486 | 487 | consume_thread.join(); 488 | producer->trx_kafka_destroy(); 489 | } catch (std::exception &e) { 490 | elog("Exception on kafka_plugin shutdown of consume thread: ${e}", ("e", e.what())); 491 | } 492 | } 493 | } 494 | 495 | void kafka_plugin_impl::init() { 496 | ilog("starting kafka plugin thread"); 497 | consume_thread = std::thread([this] { consume_blocks(); }); 498 | startup = false; 499 | } 500 | 501 | //////////// 502 | // kafka_plugin 503 | //////////// 504 | 505 | kafka_plugin::kafka_plugin() 506 | : my(new kafka_plugin_impl) { 507 | } 508 | 509 | kafka_plugin::~kafka_plugin() { 510 | } 511 | 512 | void kafka_plugin::set_program_options(options_description &cli, options_description &cfg) { 513 | cfg.add_options() 514 | ("accept_trx_topic", bpo::value(), 515 | "The topic for accepted transaction.") 516 | ("applied_trx_topic", bpo::value(), 517 | "The topic for appiled transaction.") 518 | ("transfer_trx_topic", bpo::value(), 519 | "The topic for transfer transaction.") 520 | ("kafka-uri,k", bpo::value(), 521 | "the kafka brokers uri, as 192.168.31.225:9092") 522 | ("kafka-queue-size", bpo::value()->default_value(256), 523 | "The target queue size between nodeos and kafka plugin thread.") 524 | ("kafka-block-start", bpo::value()->default_value(256), 525 | "If specified then only abi data pushed to kafka until specified block is reached.") 526 | ("kafka-compression-codec", bpo::value(), 527 | "Compression codec to use for compressing message sets. This is the default value for all topics, may be overriden by the topic configuration property compression.codec.(none, gzip, snappy, lz4)"); 528 | 529 | } 530 | 531 | void kafka_plugin::plugin_initialize(const variables_map &options) { 532 | char *accept_trx_topic = NULL; 533 | char *applied_trx_topic = NULL; 534 | char *transfer_trx_topic = NULL; 535 | char *brokers_str = NULL; 536 | char *compression_codec = NULL; 537 | try { 538 | if (options.count("kafka-uri")) { 539 | brokers_str = (char *) (options.at("kafka-uri").as().c_str()); 540 | elog("brokers_str:${j}", ("j", brokers_str)); 541 | if (options.count("accept_trx_topic") != 0) { 542 | accept_trx_topic = (char *) (options.at("accept_trx_topic").as().c_str()); 543 | elog("accept_trx_topic:${j}", ("j", accept_trx_topic)); 544 | } 545 | if (options.count("applied_trx_topic") != 0) { 546 | applied_trx_topic = (char *) (options.at("applied_trx_topic").as().c_str()); 547 | elog("applied_trx_topic:${j}", ("j", applied_trx_topic)); 548 | } 549 | if (options.count("transfer_trx_topic") != 0) { 550 | transfer_trx_topic = (char *) (options.at("transfer_trx_topic").as().c_str()); 551 | elog("transfer_trx_topic:${j}", ("j", transfer_trx_topic)); 552 | } 553 | if (options.count("kafka-compression-codec") != 0) { 554 | compression_codec = (char *) (options.at("kafka-compression-codec").as().c_str()); 555 | elog("kafka-compression-codec:${j}", ("j", compression_codec)); 556 | } 557 | 558 | if (0 != 559 | my->producer->trx_kafka_init(brokers_str, accept_trx_topic, compression_codec, applied_trx_topic, 560 | transfer_trx_topic)) { 561 | elog("trx_kafka_init fail"); 562 | } else { 563 | elog("trx_kafka_init ok"); 564 | } 565 | } 566 | 567 | if (options.count("kafka-uri")) { 568 | ilog("initializing kafka_plugin"); 569 | my->configured = true; 570 | 571 | if (options.count("kafka-queue-size")) { 572 | my->max_queue_size = options.at("kafka-queue-size").as(); 573 | } 574 | if (options.count("kafka-block-start")) { 575 | my->start_block_num = options.at("kafka-block-start").as(); 576 | } 577 | if (my->start_block_num == 0) { 578 | my->start_block_reached = true; 579 | } 580 | 581 | // hook up to signals on controller 582 | //chain_plugin* chain_plug = app().find_plugiin(); 583 | my->chain_plug = app().find_plugin(); 584 | EOS_ASSERT(my->chain_plug, chain::missing_chain_plugin_exception, ""); 585 | auto &chain = my->chain_plug->chain(); 586 | my->chain_id.emplace(chain.get_chain_id()); 587 | 588 | my->accepted_block_connection.emplace( 589 | chain.accepted_block.connect([&](const chain::block_state_ptr &bs) { 590 | my->accepted_block(bs); 591 | })); 592 | 593 | my->irreversible_block_connection.emplace( 594 | chain.irreversible_block.connect([&](const chain::block_state_ptr &bs) { 595 | my->applied_irreversible_block(bs); 596 | })); 597 | 598 | my->accepted_transaction_connection.emplace( 599 | chain.accepted_transaction.connect([&](const chain::transaction_metadata_ptr &t) { 600 | my->accepted_transaction(t); 601 | })); 602 | my->applied_transaction_connection.emplace( 603 | chain.applied_transaction.connect( 604 | [&](std::tuple &, const std::shared_ptr &> t) { 605 | my->applied_transaction(std::get<0>(t)); 606 | })); 607 | my->init(); 608 | } else { 609 | wlog("eosio::kafka_plugin configured, but no --kafka-uri specified."); 610 | wlog("kafka_plugin disabled."); 611 | } 612 | 613 | } 614 | 615 | FC_LOG_AND_RETHROW() 616 | } 617 | 618 | void kafka_plugin::plugin_startup() { 619 | } 620 | 621 | void kafka_plugin::plugin_shutdown() { 622 | my->accepted_block_connection.reset(); 623 | my->irreversible_block_connection.reset(); 624 | my->accepted_transaction_connection.reset(); 625 | my->applied_transaction_connection.reset(); 626 | my.reset(); 627 | } 628 | 629 | } // namespace eosio 630 | 631 | 632 | -------------------------------------------------------------------------------- /kafka_producer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | 7 | /** 8 | * 每条消息调用一次该回调函数,说明消息是传递成功(rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) 9 | * 还是传递失败(rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR) 10 | * 该回调函数由rd_kafka_poll()触发,在应用程序的线程上执行 11 | */ 12 | namespace eosio { 13 | 14 | int kafka_producer::trx_kafka_create_topic(char *brokers, char *topic,char *compression_code, rd_kafka_t **rk, rd_kafka_topic_t **rkt, 15 | rd_kafka_conf_t **conf) { 16 | char errstr[512]; 17 | if (brokers == NULL || topic == NULL) { 18 | return KAFKA_STATUS_INIT_FAIL; 19 | } 20 | 21 | *conf = rd_kafka_conf_new(); 22 | 23 | if (compression_code != NULL) { 24 | if (rd_kafka_conf_set(*conf, "compression.codec", compression_code, errstr, 25 | sizeof(errstr)) != RD_KAFKA_CONF_OK) { 26 | fprintf(stderr, "%s\n", errstr); 27 | return KAFKA_STATUS_INIT_FAIL; 28 | } 29 | } 30 | 31 | 32 | if (rd_kafka_conf_set(*conf, "bootstrap.servers", brokers, errstr, 33 | sizeof(errstr)) != RD_KAFKA_CONF_OK) { 34 | fprintf(stderr, "%s\n", errstr); 35 | return KAFKA_STATUS_INIT_FAIL; 36 | } 37 | 38 | rd_kafka_conf_set_dr_msg_cb(*conf, dr_msg_cb); 39 | 40 | *rk = rd_kafka_new(RD_KAFKA_PRODUCER, *conf, errstr, sizeof(errstr)); 41 | if (!(*rk)) { 42 | fprintf(stderr, "%% Failed to create new producer:%s\n", errstr); 43 | return KAFKA_STATUS_INIT_FAIL; 44 | } 45 | 46 | *rkt = rd_kafka_topic_new(*rk, topic, NULL); 47 | if (!(*rkt)) { 48 | fprintf(stderr, "%% Failed to create topic object: %s\n", 49 | rd_kafka_err2str(rd_kafka_last_error())); 50 | rd_kafka_destroy(*rk); 51 | *rk = NULL; 52 | return KAFKA_STATUS_INIT_FAIL; 53 | } 54 | 55 | return KAFKA_STATUS_OK; 56 | 57 | } 58 | 59 | 60 | int kafka_producer::trx_kafka_init(char *brokers,char *acceptopic,char *compression_codec, char *appliedtopic, char *transfertopic) { 61 | 62 | if (brokers == NULL) { 63 | return KAFKA_STATUS_INIT_FAIL; 64 | } 65 | 66 | if (acceptopic != NULL) { 67 | if (KAFKA_STATUS_OK != trx_kafka_create_topic(brokers, acceptopic,compression_codec, &accept_rk, &accept_rkt, &accept_conf)) { 68 | return KAFKA_STATUS_INIT_FAIL; 69 | } 70 | } 71 | 72 | if (appliedtopic != NULL) { 73 | if (KAFKA_STATUS_OK != 74 | trx_kafka_create_topic(brokers, appliedtopic,compression_codec, &applied_rk, &applied_rkt, &applied_conf)) { 75 | return KAFKA_STATUS_INIT_FAIL; 76 | } 77 | } 78 | 79 | if (transfertopic != NULL) { 80 | if (KAFKA_STATUS_OK != 81 | trx_kafka_create_topic(brokers, transfertopic,compression_codec, &transfer_rk, &transfer_rkt, &transfer_conf)) { 82 | return KAFKA_STATUS_INIT_FAIL; 83 | } 84 | } 85 | 86 | return KAFKA_STATUS_OK; 87 | } 88 | 89 | int kafka_producer::trx_kafka_sendmsg(int trxtype, char *msgstr) { 90 | rd_kafka_t *rk; 91 | rd_kafka_topic_t *rkt; 92 | if (trxtype == KAFKA_TRX_ACCEPT && accept_rk != NULL && accept_rkt != NULL) { 93 | rk = accept_rk; 94 | rkt = accept_rkt; 95 | } else if (trxtype == KAFKA_TRX_APPLIED && applied_rk != NULL && applied_rkt != NULL) { 96 | rk = applied_rk; 97 | rkt = applied_rkt; 98 | } else if (trxtype == KAFKA_TRX_TRANSFER && transfer_rk != NULL && transfer_rkt != NULL) { 99 | rk = transfer_rk; 100 | rkt = transfer_rkt; 101 | } else { 102 | return KAFKA_STATUS_MSG_INVALID; 103 | } 104 | 105 | size_t len = strlen(msgstr); 106 | if (len == 0) { 107 | rd_kafka_poll(rk, 0); 108 | return KAFKA_STATUS_MSG_INVALID; 109 | } 110 | retry: 111 | if (rd_kafka_produce( 112 | rkt, 113 | RD_KAFKA_PARTITION_UA, 114 | RD_KAFKA_MSG_F_COPY, 115 | msgstr, len, 116 | NULL, 0, 117 | NULL) == -1) { 118 | fprintf(stderr, 119 | "%% Failed to produce to topic %s: %s\n", 120 | rd_kafka_topic_name(rkt), 121 | rd_kafka_err2str(rd_kafka_last_error())); 122 | 123 | if (rd_kafka_last_error() == RD_KAFKA_RESP_ERR__QUEUE_FULL) { 124 | rd_kafka_poll(rk, 1000); 125 | goto retry; 126 | } 127 | } else { 128 | // fprintf(stderr, "%% Enqueued message (%zd bytes) for topic %s\n", len, rd_kafka_topic_name(rkt)); 129 | } 130 | 131 | rd_kafka_poll(rk, 0); 132 | return KAFKA_STATUS_OK; 133 | 134 | } 135 | 136 | rd_kafka_topic_t *kafka_producer::trx_kafka_get_topic(int trxtype) { 137 | 138 | if (trxtype == KAFKA_TRX_ACCEPT) { 139 | return accept_rkt; 140 | } else if (trxtype == KAFKA_TRX_APPLIED) { 141 | return applied_rkt; 142 | } else if (trxtype == KAFKA_TRX_TRANSFER) { 143 | return transfer_rkt; 144 | } else { 145 | return NULL; 146 | } 147 | 148 | } 149 | 150 | int kafka_producer::trx_kafka_destroy(void) { 151 | fprintf(stderr, "=== trx_kafka_destroyFlushing final message.. \n"); 152 | if (accept_rk != NULL) { 153 | rd_kafka_flush(accept_rk, 10 * 1000); 154 | /* Destroy topic object */ 155 | rd_kafka_topic_destroy(accept_rkt); 156 | /* Destroy the producer instance */ 157 | rd_kafka_destroy(accept_rk); 158 | accept_rk = NULL; 159 | accept_rkt = NULL; 160 | } 161 | if (applied_rk != NULL) { 162 | rd_kafka_flush(applied_rk, 10 * 1000); 163 | /* Destroy topic object */ 164 | rd_kafka_topic_destroy(applied_rkt); 165 | /* Destroy the producer instance */ 166 | rd_kafka_destroy(applied_rk); 167 | applied_rk = NULL; 168 | applied_rkt = NULL; 169 | } 170 | if (transfer_rk != NULL) { 171 | rd_kafka_flush(transfer_rk, 10 * 1000); 172 | /* Destroy topic object */ 173 | rd_kafka_topic_destroy(transfer_rkt); 174 | /* Destroy the producer instance */ 175 | rd_kafka_destroy(transfer_rk); 176 | transfer_rk = NULL; 177 | transfer_rkt = NULL; 178 | } 179 | 180 | return KAFKA_STATUS_OK; 181 | } 182 | } 183 | #if 0 184 | int main(int argc, char **argv) 185 | { 186 | char buf[512]; 187 | int kafkastaus=KAFKA_STATUS_OK; 188 | trx_kafka_init(); 189 | 190 | 191 | fprintf(stderr, 192 | "%% Type some text and hit enter to produce message\n" 193 | "%% Or just hit enter to only serve delivery reports\n" 194 | "%% Press Ctrl-C or Ctrl-D to exit\n"); 195 | while(run && fgets(buf, sizeof(buf), stdin)) 196 | { 197 | do{ 198 | kafkastaus=trx_kafka_sendmsg(buf); 199 | }while(kafkastaus==KAFKA_STATUS_QUEUE_FULL); 200 | } 201 | 202 | trx_kafka_destroy(); 203 | } 204 | #endif 205 | 206 | #if 0 207 | int main(int argc, char **argv){ 208 | rd_kafka_t *rk; /*Producer instance handle*/ 209 | rd_kafka_topic_t *rkt; /*topic对象*/ 210 | rd_kafka_conf_t *conf; /*临时配置对象*/ 211 | char errstr[512]; 212 | char buf[512]; 213 | const char *brokers; 214 | const char *topic; 215 | 216 | if(argc != 3){ 217 | fprintf(stderr, "%% Usage: %s \n", argv[0]); 218 | return 1; 219 | } 220 | 221 | brokers = argv[1]; 222 | topic = argv[2]; 223 | 224 | /* 创建一个kafka配置占位 */ 225 | conf = rd_kafka_conf_new(); 226 | 227 | /*创建broker集群*/ 228 | if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, 229 | sizeof(errstr)) != RD_KAFKA_CONF_OK){ 230 | fprintf(stderr, "%s\n", errstr); 231 | return 1; 232 | } 233 | 234 | /*设置发送报告回调函数,rd_kafka_produce()接收的每条消息都会调用一次该回调函数 235 | *应用程序需要定期调用rd_kafka_poll()来服务排队的发送报告回调函数*/ 236 | rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); 237 | 238 | /*创建producer实例 239 | rd_kafka_new()获取conf对象的所有权,应用程序在此调用之后不得再次引用它*/ 240 | rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); 241 | if(!rk){ 242 | fprintf(stderr, "%% Failed to create new producer:%s\n", errstr); 243 | return 1; 244 | } 245 | 246 | /*实例化一个或多个topics(`rd_kafka_topic_t`)来提供生产或消费,topic 247 | 对象保存topic特定的配置,并在内部填充所有可用分区和leader brokers,*/ 248 | rkt = rd_kafka_topic_new(rk, topic, NULL); 249 | if (!rkt){ 250 | fprintf(stderr, "%% Failed to create topic object: %s\n", 251 | rd_kafka_err2str(rd_kafka_last_error())); 252 | rd_kafka_destroy(rk); 253 | return 1; 254 | } 255 | 256 | /*用于中断的信号*/ 257 | signal(SIGINT, stop); 258 | 259 | fprintf(stderr, 260 | "%% Type some text and hit enter to produce message\n" 261 | "%% Or just hit enter to only serve delivery reports\n" 262 | "%% Press Ctrl-C or Ctrl-D to exit\n"); 263 | 264 | while(run && fgets(buf, sizeof(buf), stdin)){ 265 | size_t len = strlen(buf); 266 | 267 | if(buf[len-1] == '\n') 268 | buf[--len] = '\0'; 269 | 270 | if(len == 0){ 271 | /*轮询用于事件的kafka handle, 272 | 事件将导致应用程序提供的回调函数被调用 273 | 第二个参数是最大阻塞时间,如果设为0,将会是非阻塞的调用*/ 274 | rd_kafka_poll(rk, 0); 275 | continue; 276 | } 277 | 278 | retry: 279 | /*Send/Produce message. 280 | 这是一个异步调用,在成功的情况下,只会将消息排入内部producer队列, 281 | 对broker的实际传递尝试由后台线程处理,之前注册的传递回调函数(dr_msg_cb) 282 | 用于在消息传递成功或失败时向应用程序发回信号*/ 283 | if (rd_kafka_produce( 284 | /* Topic object */ 285 | rkt, 286 | /*使用内置的分区来选择分区*/ 287 | RD_KAFKA_PARTITION_UA, 288 | /*生成payload的副本*/ 289 | RD_KAFKA_MSG_F_COPY, 290 | /*消息体和长度*/ 291 | buf, len, 292 | /*可选键及其长度*/ 293 | NULL, 0, 294 | NULL) == -1){ 295 | fprintf(stderr, 296 | "%% Failed to produce to topic %s: %s\n", 297 | rd_kafka_topic_name(rkt), 298 | rd_kafka_err2str(rd_kafka_last_error())); 299 | 300 | if (rd_kafka_last_error() == RD_KAFKA_RESP_ERR__QUEUE_FULL){ 301 | /*如果内部队列满,等待消息传输完成并retry, 302 | 内部队列表示要发送的消息和已发送或失败的消息, 303 | 内部队列受限于queue.buffering.max.messages配置项*/ 304 | rd_kafka_poll(rk, 1000); 305 | goto retry; 306 | } 307 | }else{ 308 | fprintf(stderr, "%% Enqueued message (%zd bytes) for topic %s\n", 309 | len, rd_kafka_topic_name(rkt)); 310 | } 311 | 312 | /*producer应用程序应不断地通过以频繁的间隔调用rd_kafka_poll()来为 313 | 传送报告队列提供服务。在没有生成消息以确定先前生成的消息已发送了其 314 | 发送报告回调函数(和其他注册过的回调函数)期间,要确保rd_kafka_poll() 315 | 仍然被调用*/ 316 | rd_kafka_poll(rk, 0); 317 | } 318 | 319 | fprintf(stderr, "%% Flushing final message.. \n"); 320 | /*rd_kafka_flush是rd_kafka_poll()的抽象化, 321 | 等待所有未完成的produce请求完成,通常在销毁producer实例前完成 322 | 以确保所有排列中和正在传输的produce请求在销毁前完成*/ 323 | rd_kafka_flush(rk, 10*1000); 324 | 325 | /* Destroy topic object */ 326 | rd_kafka_topic_destroy(rkt); 327 | 328 | /* Destroy the producer instance */ 329 | rd_kafka_destroy(rk); 330 | 331 | return 0; 332 | } 333 | #endif 334 | 335 | --------------------------------------------------------------------------------