├── .clippy.toml ├── .gitignore ├── .rustfmt.toml ├── .travis.yml ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── appveyor.yml ├── examples ├── console-consumer.rs ├── console-producer.rs └── topic-metadata.rs ├── src ├── client │ ├── builder.rs │ ├── client.rs │ ├── cluster.rs │ ├── config.rs │ ├── metadata.rs │ ├── metrics.rs │ ├── middleware.rs │ ├── mock.rs │ ├── mod.rs │ ├── record.rs │ ├── service.rs │ └── version.rs ├── compression │ ├── gzip.rs │ ├── lz4.rs │ ├── mod.rs │ └── snappy.rs ├── consumer │ ├── assignor.rs │ ├── builder.rs │ ├── config.rs │ ├── consumer.rs │ ├── coordinator.rs │ ├── fetcher.rs │ ├── mod.rs │ ├── protocol.rs │ ├── subscribed.rs │ └── subscriptions.rs ├── errors.rs ├── lib.rs ├── macros.rs ├── network │ ├── codec.rs │ ├── conn.rs │ ├── mod.rs │ ├── pool.rs │ ├── request.rs │ ├── response.rs │ └── stream.rs ├── producer │ ├── accumulator.rs │ ├── batch.rs │ ├── builder.rs │ ├── config.rs │ ├── interceptor.rs │ ├── mod.rs │ ├── partitioner.rs │ ├── producer.rs │ ├── record.rs │ └── sender.rs ├── protocol │ ├── api_key.rs │ ├── api_versions.rs │ ├── code.rs │ ├── encode.rs │ ├── fetch.rs │ ├── group.rs │ ├── header.rs │ ├── list_offset.rs │ ├── message.rs │ ├── metadata.rs │ ├── mod.rs │ ├── offset_commit.rs │ ├── offset_fetch.rs │ ├── parse.rs │ ├── produce.rs │ └── schema.rs └── serialization │ ├── bytes.rs │ ├── encoding.rs │ ├── json.rs │ ├── mod.rs │ ├── noop.rs │ ├── raw.rs │ └── str.rs └── tests ├── common └── mod.rs ├── docker ├── v0.10 │ ├── Dockerfile │ ├── docker-compose.yml │ ├── setup-tests.sh │ └── setup-topics.sh ├── v0.11 │ ├── Dockerfile │ ├── docker-compose.yml │ ├── setup-tests.sh │ └── setup-topics.sh ├── v0.8 │ ├── Dockerfile │ ├── docker-compose.yml │ ├── setup-tests.sh │ └── setup-topics.sh ├── v0.9 │ ├── Dockerfile │ ├── docker-compose.yml │ ├── setup-tests.sh │ └── setup-topics.sh ├── v1.0 │ ├── Dockerfile │ ├── docker-compose.yml │ ├── setup-tests.sh │ └── setup-topics.sh └── v1.1 │ ├── Dockerfile │ ├── docker-compose.yml │ ├── setup-tests.sh │ └── setup-topics.sh ├── integration_test.rs └── integration_test.sh /.clippy.toml: -------------------------------------------------------------------------------- 1 | too-many-arguments-threshold = 10 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | .vscode 4 | .env 5 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 120 2 | reorder_imports = true 3 | reorder_imports_in_group = true 4 | reorder_imported_names = true 5 | reorder_impl_items = true 6 | normalize_comments = true 7 | wrap_comments = true 8 | comment_width = 120 9 | use_field_init_shorthand = true -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | services: 3 | - docker 4 | language: rust 5 | rust: 6 | - stable 7 | - beta 8 | - nightly 9 | matrix: 10 | allow_failures: 11 | - rust: nightly 12 | cache: cargo 13 | os: 14 | - linux 15 | - osx 16 | after_failure: 17 | - tree -h 18 | env: 19 | global: 20 | - RUST_BACKTRACE=full 21 | - RUST_LOG=tokio_kafka=trace 22 | script: 23 | - (test $TRAVIS_RUST_VERSION != "nightly" || cargo build --features clippy) 24 | - (test $TRAVIS_RUST_VERSION == "nightly" || cargo build) 25 | - cargo test --verbose 26 | - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then ./tests/integration_test.sh all ; fi 27 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tokio-kafka" 3 | version = "0.1.0" 4 | authors = ["Flier Lu "] 5 | 6 | [features] 7 | default = ["snappy", "gzip", "lz4", "encoding", "json"] 8 | snappy = ["snap"] 9 | gzip = ["flate2"] 10 | lz4 = ["lz4-compress"] 11 | json = ["serde_json"] 12 | integration_test = [] 13 | 14 | [dependencies] 15 | log = "0.4" 16 | error-chain = "0.11" 17 | lazy_static = "1.0" 18 | bytes = "0.4" 19 | nom = { version = "3.2", features = ["verbose-errors"] } 20 | crc = "1.4" 21 | twox-hash = "1.1" 22 | time = "0.1" 23 | rand = "0.4" 24 | hexplay = "0.2" 25 | byteorder = "1.0" 26 | serde = "1.0" 27 | serde_derive = "1.0" 28 | prometheus = "0.4" 29 | 30 | futures = "0.1" 31 | futures-cpupool = "0.1" 32 | tokio-core = "0.1" 33 | tokio-io = "0.1" 34 | tokio-proto = "0.1" 35 | tokio-service = "0.1" 36 | tokio-timer = "0.1" 37 | tokio-retry = "0.2" 38 | tokio-tls = { version = "0.1", features = ["tokio-proto"] } 39 | native-tls = "0.1" 40 | abstract-ns = "0.4" 41 | ns-router = "0.1" 42 | ns-std-threaded = "0.3" 43 | 44 | flate2 = { version = "1.0", optional = true } 45 | snap = { version = "0.2", optional = true } 46 | lz4-compress = { version = "0.1", optional = true } 47 | 48 | encoding = { version = "0.2", optional = true } 49 | serde_json = { version = "1.0", optional = true } 50 | 51 | clippy = {version = "*", optional = true} 52 | 53 | [dev-dependencies] 54 | pretty_env_logger = "0.2" 55 | failure = "0.1" 56 | getopts = "0.2" 57 | typemap = "0.3" 58 | 59 | [target.'cfg(unix)'.dev-dependencies] 60 | tokio-file-unix = "0.4" 61 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 Alex Crichton 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | version: 1.0.{build} 2 | 3 | environment: 4 | global: 5 | PROJECT_NAME: tokio-kafka 6 | RUST_BACKTRACE: 1 7 | RUST_LOG: tokio_kafka=trace 8 | 9 | matrix: 10 | # Stable channel 11 | - TARGET: i686-pc-windows-gnu 12 | CHANNEL: stable 13 | - TARGET: i686-pc-windows-msvc 14 | CHANNEL: stable 15 | - TARGET: x86_64-pc-windows-gnu 16 | CHANNEL: stable 17 | - TARGET: x86_64-pc-windows-msvc 18 | CHANNEL: stable 19 | # Beta channel 20 | - TARGET: i686-pc-windows-gnu 21 | CHANNEL: beta 22 | - TARGET: i686-pc-windows-msvc 23 | CHANNEL: beta 24 | - TARGET: x86_64-pc-windows-gnu 25 | CHANNEL: beta 26 | - TARGET: x86_64-pc-windows-msvc 27 | CHANNEL: beta 28 | # Nightly channel 29 | - TARGET: i686-pc-windows-gnu 30 | CHANNEL: nightly 31 | - TARGET: i686-pc-windows-msvc 32 | CHANNEL: nightly 33 | - TARGET: x86_64-pc-windows-gnu 34 | CHANNEL: nightly 35 | - TARGET: x86_64-pc-windows-msvc 36 | CHANNEL: nightly 37 | 38 | build: false 39 | 40 | install: 41 | - curl -sSf -o rustup-init.exe https://win.rustup.rs 42 | - rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y 43 | - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin 44 | - set PATH=%PATH%;C:\Program Files (x86)\Rust\bin 45 | - set PATH=%PATH%;C:\MinGW\bin 46 | - rustc -Vv 47 | - cargo -V 48 | 49 | cache: 50 | - C:\Users\appveyor\.cargo\ -> appveyor.yml 51 | 52 | test_script: 53 | - cargo build --verbose 54 | - cargo test --verbose --lib 55 | 56 | branches: 57 | only: 58 | - master 59 | - develop 60 | # IMPORTANT Regex to match tags. Required, or appveyor may not trigger deploys when a new tag is pushed. 61 | # This regex matches semantic versions like v1.2.3-rc4+2016.02.22 62 | - /^v\d+\.\d+\.\d+.*$/ 63 | -------------------------------------------------------------------------------- /examples/console-consumer.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate error_chain; 3 | #[macro_use] 4 | extern crate log; 5 | extern crate getopts; 6 | extern crate pretty_env_logger; 7 | extern crate rand; 8 | 9 | extern crate futures; 10 | extern crate tokio_core; 11 | extern crate tokio_io; 12 | 13 | extern crate tokio_kafka; 14 | 15 | use std::env; 16 | use std::path::Path; 17 | use std::process; 18 | 19 | use getopts::Options; 20 | use rand::Rng; 21 | 22 | use futures::{Future, Stream}; 23 | use tokio_core::reactor::Core; 24 | 25 | use tokio_kafka::{Consumer, KafkaConsumer, SeekTo, StringDeserializer, Subscribed}; 26 | 27 | const DEFAULT_BROKER: &str = "localhost:9092"; 28 | const DEFAULT_CLIENT_ID: &str = "consumer-1"; 29 | const DEFAULT_TOPIC: &str = "my-topic"; 30 | 31 | error_chain!{ 32 | links { 33 | KafkaError(tokio_kafka::Error, tokio_kafka::ErrorKind); 34 | } 35 | foreign_links { 36 | IoError(::std::io::Error); 37 | ArgError(::getopts::Fail); 38 | } 39 | } 40 | 41 | #[derive(Clone, Debug)] 42 | struct Config { 43 | brokers: Vec, 44 | client_id: String, 45 | topics: Vec, 46 | group_id: String, 47 | offset: SeekTo, 48 | no_commit: bool, 49 | skip_message_on_error: bool, 50 | } 51 | 52 | impl Config { 53 | fn parse_cmdline() -> Result { 54 | let args: Vec = env::args().collect(); 55 | let program = Path::new(&args[0]).file_name().unwrap().to_str().unwrap(); 56 | let mut opts = Options::new(); 57 | 58 | opts.optflag("h", "help", "print this help menu"); 59 | opts.optopt( 60 | "b", 61 | "bootstrap-server", 62 | "Bootstrap broker(s) (host[:port], comma separated)", 63 | "HOSTS", 64 | ); 65 | opts.optopt("", "client-id", "Specify the client id.", "ID"); 66 | opts.optopt("g", "group-id", "Specify the consumer group.", "NAME"); 67 | opts.optopt("t", "topics", "The topic id to consume on (comma separated).", "NAMES"); 68 | opts.optopt("o", "offset", "The offset id to consume from (a non-negative number), or 'earliest' which means from beginning, or 'latest' which means from end (default: latest).", "OFFSET"); 69 | opts.optflag("", "from-beginning", "If the consumer does not already have an established offset to consume from, start with the earliest message present in the log rather than the latest message."); 70 | opts.optflag("", "no-commit", "Do not commit group offsets."); 71 | opts.optflag( 72 | "", 73 | "skip-message-on-error", 74 | "If there is an error when processing a message, skip it instead of halt.", 75 | ); 76 | 77 | let m = opts.parse(&args[1..])?; 78 | 79 | if m.opt_present("h") { 80 | let brief = format!("Usage: {} [options]", program); 81 | 82 | print!("{}", opts.usage(&brief)); 83 | 84 | process::exit(0); 85 | } 86 | 87 | Ok(Config { 88 | brokers: m.opt_str("b").map_or_else( 89 | || vec![DEFAULT_BROKER.to_owned()], 90 | |s| s.split(',').map(|s| s.trim().to_owned()).collect(), 91 | ), 92 | client_id: m.opt_str("client-id").unwrap_or(DEFAULT_CLIENT_ID.to_owned()), 93 | topics: m.opt_str("t").map_or_else( 94 | || vec![DEFAULT_TOPIC.to_owned()], 95 | |s| s.split(',').map(|s| s.trim().to_owned()).collect(), 96 | ), 97 | group_id: m.opt_str("g") 98 | .unwrap_or_else(|| format!("console-consumer-{}", rand::thread_rng().gen_range(1, 100000))), 99 | offset: m.opt_str("o").map_or_else( 100 | || { 101 | if m.opt_present("from-beginning") { 102 | SeekTo::Beginning 103 | } else { 104 | SeekTo::End 105 | } 106 | }, 107 | |s| s.parse().unwrap(), 108 | ), 109 | no_commit: m.opt_present("no-commit"), 110 | skip_message_on_error: m.opt_present("skip-message-on-error"), 111 | }) 112 | } 113 | } 114 | 115 | fn main() { 116 | pretty_env_logger::init(); 117 | 118 | let config = Config::parse_cmdline().unwrap(); 119 | 120 | debug!("parsed config: {:?}", config); 121 | 122 | run(config).unwrap(); 123 | } 124 | 125 | fn run(config: Config) -> Result<()> { 126 | let mut core = Core::new()?; 127 | 128 | let handle = core.handle(); 129 | 130 | let builder = KafkaConsumer::with_bootstrap_servers(config.brokers, handle) 131 | .with_client_id(config.client_id) 132 | .with_group_id(config.group_id) 133 | .with_key_deserializer(StringDeserializer::default()) 134 | .with_value_deserializer(StringDeserializer::default()); 135 | 136 | let mut consumer = builder.build()?; 137 | let offset = config.offset; 138 | 139 | let work = consumer 140 | .subscribe(config.topics) 141 | .and_then(move |topics| { 142 | for partition in topics.assigment() { 143 | topics.seek(&partition, offset)?; 144 | } 145 | 146 | Ok(topics) 147 | }) 148 | .and_then(|topics| { 149 | topics 150 | .clone() 151 | .for_each(|record| { 152 | println!( 153 | "{} {} {}", 154 | record.timestamp.map(|ts| ts.to_string()).unwrap_or_default(), 155 | record.key.unwrap_or_default(), 156 | record.value.unwrap_or_default() 157 | ); 158 | 159 | Ok(()) 160 | }) 161 | .and_then(move |_| topics.commit()) 162 | }) 163 | .map(|_| ()) 164 | .from_err(); 165 | 166 | core.run(work) 167 | } 168 | -------------------------------------------------------------------------------- /src/client/builder.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | use std::ops::{Deref, DerefMut}; 3 | use std::time::Duration; 4 | 5 | use tokio_core::reactor::Handle; 6 | 7 | use client::{ClientConfig, KafkaClient, KafkaVersion}; 8 | use errors::{ErrorKind, Result}; 9 | use protocol::ToMilliseconds; 10 | 11 | /// A `KafkaClient` builder easing the process of setting up various 12 | /// configuration settings. 13 | #[derive(Default)] 14 | pub struct ClientBuilder<'a> { 15 | config: ClientConfig, 16 | handle: Option, 17 | phantom: PhantomData<&'a u8>, 18 | } 19 | 20 | impl<'a> Deref for ClientBuilder<'a> { 21 | type Target = ClientConfig; 22 | 23 | fn deref(&self) -> &Self::Target { 24 | &self.config 25 | } 26 | } 27 | 28 | impl<'a> DerefMut for ClientBuilder<'a> { 29 | fn deref_mut(&mut self) -> &mut Self::Target { 30 | &mut self.config 31 | } 32 | } 33 | 34 | impl<'a> ClientBuilder<'a> { 35 | /// Construct a `ClientBuilder` from ClientConfig 36 | pub fn with_config(config: ClientConfig, handle: Handle) -> Self { 37 | ClientBuilder { 38 | config, 39 | handle: Some(handle), 40 | phantom: PhantomData, 41 | } 42 | } 43 | 44 | /// Construct a `ClientBuilder` from bootstrap servers of the Kafka cluster 45 | pub fn with_bootstrap_servers(hosts: I, handle: Handle) -> Self 46 | where 47 | I: IntoIterator, 48 | { 49 | Self::with_config(ClientConfig::with_bootstrap_servers(hosts), handle) 50 | } 51 | 52 | fn with_handle(mut self, handle: Handle) -> Self { 53 | self.handle = Some(handle); 54 | self 55 | } 56 | 57 | /// Sets the id string to pass to the server when making requests. 58 | pub fn with_client_id(mut self, client_id: String) -> Self { 59 | self.config.client_id = Some(client_id); 60 | self 61 | } 62 | 63 | /// Sets the number of milliseconds after this we close idle connections 64 | pub fn with_max_connection_idle(mut self, max_connection_idle: Duration) -> Self { 65 | self.config.max_connection_idle = max_connection_idle.as_millis(); 66 | self 67 | } 68 | 69 | /// Sets the maximum amount of time the client will wait for the response 70 | /// of a request. 71 | pub fn with_request_timeout(mut self, request_timeout: Duration) -> Self { 72 | self.config.request_timeout = request_timeout.as_millis(); 73 | self 74 | } 75 | 76 | /// Sets the request broker's supported API versions to adjust functionality to available 77 | /// protocol features. 78 | pub fn with_api_version_request(mut self) -> Self { 79 | self.config.api_version_request = true; 80 | self 81 | } 82 | 83 | /// Sets the fallback broker version will be used 84 | pub fn with_broker_version_fallback(mut self, version: KafkaVersion) -> Self { 85 | self.config.broker_version_fallback = version; 86 | self 87 | } 88 | 89 | /// Sets the period of time in milliseconds after which we force a refresh 90 | /// of metadata 91 | pub fn with_metadata_max_age(mut self, metadata_max_age: Duration) -> Self { 92 | self.config.metadata_max_age = metadata_max_age.as_millis(); 93 | self 94 | } 95 | 96 | /// Sets to record metrics for client operations 97 | pub fn with_metrics(mut self) -> Self { 98 | self.config.metrics = true; 99 | self 100 | } 101 | } 102 | 103 | impl<'a> ClientBuilder<'a> 104 | where 105 | Self: 'static, 106 | { 107 | pub fn build(self) -> Result> { 108 | let handle = self.handle.ok_or(ErrorKind::ConfigError("missed handle"))?; 109 | 110 | Ok(KafkaClient::new(self.config, handle)) 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /src/client/cluster.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use network::TopicPartition; 4 | use protocol::{ApiKeys, ApiVersion, NodeId, PartitionId, UsableApiVersions, SUPPORTED_API_VERSIONS}; 5 | 6 | /// A trait for representation of a subset of the nodes, topics, and partitions in the Kafka 7 | /// cluster. 8 | pub trait Cluster { 9 | /// The known set of brokers. 10 | fn brokers(&self) -> &[Broker]; 11 | 12 | /// Get all topic with partition information. 13 | fn topics(&self) -> HashMap<&str, &[PartitionInfo]>; 14 | 15 | /// Get all topic names. 16 | fn topic_names(&self) -> Vec<&str>; 17 | 18 | /// Find the broker by the node id (return `None` if no such node exists) 19 | fn find_broker(&self, broker: BrokerRef) -> Option<&Broker>; 20 | 21 | /// Get the current leader for the given topic-partition (return `None` if no such node 22 | /// exists) 23 | fn leader_for(&self, tp: &TopicPartition) -> Option<&Broker>; 24 | 25 | /// Get the metadata for the specified partition (return `None` if no such 26 | /// partition exists) 27 | fn find_partition(&self, tp: &TopicPartition) -> Option<&PartitionInfo>; 28 | 29 | /// Get the list of partitions for this topic (return `None` if no such 30 | /// topic exists) 31 | fn partitions_for_topic(&self, topic_name: &str) -> Option>; 32 | 33 | /// Get the list of partitions whose leader is this node 34 | fn partitions_for_broker(&self, broker: BrokerRef) -> Vec; 35 | } 36 | 37 | /// Describes a Kafka broker node is communicating with. 38 | #[derive(Clone, Debug)] 39 | pub struct Broker { 40 | /// The identifier of this broker as understood in a Kafka cluster. 41 | node_id: NodeId, 42 | 43 | /// host of this broker. 44 | /// 45 | /// This information is advertised by and originating from Kafka cluster itself. 46 | host: String, 47 | 48 | /// The port for this node 49 | port: u16, 50 | 51 | /// The version ranges of requests supported by the broker. 52 | api_versions: Option, 53 | } 54 | 55 | impl Broker { 56 | pub fn new(id: NodeId, host: &str, port: u16) -> Self { 57 | Broker { 58 | node_id: id, 59 | host: host.to_owned(), 60 | port, 61 | api_versions: None, 62 | } 63 | } 64 | 65 | /// Retrives the node_id of this broker as identified with the 66 | /// remote Kafka cluster. 67 | pub fn id(&self) -> NodeId { 68 | self.node_id 69 | } 70 | 71 | pub fn as_ref(&self) -> BrokerRef { 72 | BrokerRef::new(self.node_id) 73 | } 74 | 75 | pub fn host(&self) -> &str { 76 | &self.host 77 | } 78 | 79 | pub fn port(&self) -> u16 { 80 | self.port 81 | } 82 | 83 | /// Retrieves the host:port of the this Kafka broker. 84 | pub fn addr(&self) -> (&str, u16) { 85 | (&self.host, self.port) 86 | } 87 | 88 | fn api_versions(&self) -> Option<&UsableApiVersions> { 89 | self.api_versions.as_ref() 90 | } 91 | 92 | pub fn api_version(&self, api_key: ApiKeys) -> Option { 93 | if api_key == ApiKeys::Fetch { 94 | return Some(1); 95 | } 96 | let supported_version: i16 = SUPPORTED_API_VERSIONS 97 | .find(api_key).map(|v| v.max_version).unwrap_or(0); 98 | self.api_versions 99 | .as_ref() 100 | .and_then(|api_versions| 101 | api_versions.find(api_key) 102 | .map(|api_version| { 103 | debug!("Api Key {:?}, current: {}, supported: {}", api_key, api_version.max_version, supported_version); 104 | ::std::cmp::min(api_version.max_version, supported_version) 105 | }) 106 | ) 107 | } 108 | 109 | pub fn with_api_versions(&self, api_versions: Option) -> Self { 110 | Broker { 111 | node_id: self.node_id, 112 | host: self.host.clone(), 113 | port: self.port, 114 | api_versions, 115 | } 116 | } 117 | } 118 | 119 | /// The node index of this broker 120 | pub type BrokerIndex = i32; 121 | 122 | // See `Brokerref` 123 | static UNKNOWN_BROKER_INDEX: BrokerIndex = ::std::i32::MAX; 124 | 125 | /// A custom identifier that used to refer to a broker. 126 | #[derive(Debug, Copy, Clone, Default, PartialEq, Eq, Hash)] 127 | pub struct BrokerRef(BrokerIndex); 128 | 129 | impl BrokerRef { 130 | // ~ private constructor on purpose 131 | pub fn new(index: BrokerIndex) -> Self { 132 | BrokerRef(index) 133 | } 134 | 135 | pub fn index(&self) -> BrokerIndex { 136 | self.0 137 | } 138 | 139 | fn set(&mut self, other: BrokerRef) { 140 | if self.0 != other.0 { 141 | self.0 = other.0; 142 | } 143 | } 144 | 145 | fn set_unknown(&mut self) { 146 | self.set(BrokerRef::new(UNKNOWN_BROKER_INDEX)) 147 | } 148 | } 149 | 150 | impl From for BrokerRef { 151 | fn from(index: BrokerIndex) -> Self { 152 | BrokerRef::new(index) 153 | } 154 | } 155 | 156 | /// Information about a topic-partition. 157 | #[derive(Debug, Clone)] 158 | pub struct PartitionInfo { 159 | /// The partition id 160 | pub partition_id: PartitionId, 161 | /// The node id of the node currently acting as a leader for this partition or null if 162 | /// there is no leader 163 | pub leader: Option, 164 | /// The complete set of replicas for this partition regardless of whether they are alive or 165 | /// up-to-date 166 | pub replicas: Vec, 167 | /// The subset of the replicas that are in sync, that is caught-up to the leader and ready 168 | /// to take over as leader if the leader should fail 169 | pub in_sync_replicas: Vec, 170 | } 171 | 172 | impl<'a> Default for PartitionInfo { 173 | fn default() -> Self { 174 | PartitionInfo { 175 | partition_id: -1, 176 | leader: None, 177 | replicas: Vec::new(), 178 | in_sync_replicas: Vec::new(), 179 | } 180 | } 181 | } 182 | 183 | impl PartitionInfo { 184 | pub fn new(partition: PartitionId) -> Self { 185 | PartitionInfo { 186 | partition_id: partition, 187 | leader: None, 188 | replicas: vec![], 189 | in_sync_replicas: vec![], 190 | } 191 | } 192 | 193 | pub fn with_leader(partition: PartitionId, leader: BrokerRef) -> Self { 194 | PartitionInfo { 195 | partition_id: partition, 196 | leader: Some(leader), 197 | replicas: vec![], 198 | in_sync_replicas: vec![], 199 | } 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /src/client/config.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use tokio_retry::strategy::{jitter, ExponentialBackoff}; 4 | use tokio_timer::{wheel, Timer}; 5 | 6 | use client::KafkaVersion; 7 | 8 | /// The default milliseconds after which we close the idle connections. 9 | /// 10 | /// Defaults to 5 seconds, see 11 | /// [`ClientConfig::max_connection_idle`](struct.ClientConfig.html#max_connection_idle.v) 12 | pub const DEFAULT_MAX_CONNECTION_IDLE_TIMEOUT_MILLIS: u64 = 5000; 13 | 14 | /// The default milliseconds the client will wait for the response of a request. 15 | /// 16 | /// Defaults to 30 seconds, see 17 | /// [`ClientConfig::request_timeout`](struct.ClientConfig.html#request_timeout.v) 18 | pub const DEFAULT_REQUEST_TIMEOUT_MILLS: u64 = 30_000; 19 | 20 | /// The default milliseconds after which we force a refresh of metadata 21 | /// 22 | /// Defaults to 5 minutes, see 23 | /// [`ClientConfig::metadata_max_age`](struct.ClientConfig.html#metadata_max_age.v) 24 | pub const DEFAULT_METADATA_MAX_AGE_MILLS: u64 = 5 * 60 * 1000; 25 | 26 | /// The default milliseconds of the timer tick duration. 27 | /// 28 | /// Defaults to 100 ms 29 | pub const DEFAULT_TIMER_TICK_MILLS: u64 = 100; 30 | 31 | /// The default time to wait before attempting to retry a failed request to a given topic partition. 32 | /// 33 | /// Defaults to 100 ms, see 34 | /// [`ClientConfig::retry_backoff`](struct.ClientConfig.html#retry_backoff.v) 35 | pub const DEFAULT_RETRY_BACKOFF_MILLIS: u64 = 100; 36 | 37 | /// Configuration for the Kafka Client. 38 | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] 39 | #[serde(default)] 40 | pub struct ClientConfig { 41 | /// A list of host/port pairs to use for establishing the initial connection to the Kafka 42 | /// cluster. 43 | #[serde(rename = "bootstrap.servers")] 44 | pub hosts: Vec, 45 | 46 | /// An id string to pass to the server when making requests. 47 | /// 48 | /// The purpose of this is to be able to track the source of requests beyond just ip/port 49 | /// by allowing a logical application name to be included in server-side request logging. 50 | #[serde(rename = "client.id")] 51 | pub client_id: Option, 52 | 53 | /// Close idle connections after the number of milliseconds specified by 54 | /// this config. 55 | #[serde(rename = "connection.max.idle.ms")] 56 | pub max_connection_idle: u64, 57 | 58 | /// The maximum amount of time the client will wait for the response of a 59 | /// request. 60 | #[serde(rename = "request.timeout.ms")] 61 | pub request_timeout: u64, 62 | 63 | /// Request broker's supported API versions to adjust functionality to available protocol 64 | /// features. 65 | #[serde(rename = "api.version.request")] 66 | pub api_version_request: bool, 67 | 68 | /// Older broker versions (<0.10.0) provides no way for a client to query for supported 69 | /// protocol features 70 | /// making it impossible for the client to know what features it may use. 71 | /// As a workaround a user may set this property to the expected broker version and 72 | /// the client will automatically adjust its feature set accordingly if the 73 | /// ApiVersionRequest fails (or is disabled). 74 | /// The fallback broker version will be used for api.version.fallback.ms. Valid values are: 75 | /// 0.9.0, 0.8.2, 0.8.1, 0.8.0. 76 | #[serde(rename = "broker.version.fallback")] 77 | pub broker_version_fallback: KafkaVersion, 78 | 79 | /// The period of time in milliseconds after which we force a refresh of metadata 80 | /// even if we haven't seen any partition leadership changes to proactively discover any 81 | /// new brokers or partitions. 82 | #[serde(rename = "metadata.max.age.ms")] 83 | pub metadata_max_age: u64, 84 | 85 | /// Record metrics for client operations 86 | pub metrics: bool, 87 | 88 | /// Setting a value greater than zero will cause the client to resend any record 89 | /// whose send fails with a potentially transient error. 90 | pub retries: usize, 91 | 92 | /// The amount of time to wait before attempting to retry a failed request to a given topic 93 | /// partition. 94 | /// This avoids repeatedly sending requests in a tight loop under some failure scenarios. 95 | #[serde(rename = "retry.backoff.ms")] 96 | pub retry_backoff: u64, 97 | } 98 | 99 | impl Default for ClientConfig { 100 | fn default() -> Self { 101 | ClientConfig { 102 | hosts: vec![], 103 | client_id: None, 104 | max_connection_idle: DEFAULT_MAX_CONNECTION_IDLE_TIMEOUT_MILLIS, 105 | request_timeout: DEFAULT_REQUEST_TIMEOUT_MILLS, 106 | api_version_request: false, 107 | broker_version_fallback: KafkaVersion::default(), 108 | metadata_max_age: DEFAULT_METADATA_MAX_AGE_MILLS, 109 | metrics: false, 110 | retries: 0, 111 | retry_backoff: DEFAULT_RETRY_BACKOFF_MILLIS, 112 | } 113 | } 114 | } 115 | 116 | impl ClientConfig { 117 | /// Construct a `ClientConfig` from bootstrap servers of the Kafka cluster 118 | pub fn with_bootstrap_servers(hosts: I) -> Self 119 | where 120 | I: IntoIterator, 121 | { 122 | ClientConfig { 123 | hosts: hosts.into_iter().collect(), 124 | ..Default::default() 125 | } 126 | } 127 | 128 | /// Close idle connections after the number of milliseconds specified by 129 | /// this config. 130 | pub fn max_connection_idle(&self) -> Duration { 131 | Duration::from_millis(self.max_connection_idle) 132 | } 133 | 134 | /// The maximum amount of time the client will wait for the response of a 135 | /// request. 136 | pub fn request_timeout(&self) -> Duration { 137 | Duration::from_millis(self.request_timeout) 138 | } 139 | 140 | /// The period of time in milliseconds after which we force a refresh of metadata 141 | /// even if we haven't seen any partition leadership changes to proactively discover any 142 | /// new brokers or partitions. 143 | pub fn metadata_max_age(&self) -> Duration { 144 | Duration::from_millis(self.metadata_max_age) 145 | } 146 | 147 | /// Construct a `Timer` 148 | pub fn timer(&self) -> Timer { 149 | wheel() 150 | .tick_duration(Duration::from_millis(DEFAULT_TIMER_TICK_MILLS)) 151 | .num_slots((self.request_timeout / DEFAULT_TIMER_TICK_MILLS).next_power_of_two() as usize) 152 | .build() 153 | } 154 | 155 | /// The amount of time to wait before attempting to retry a failed request to a given topic 156 | /// partition. 157 | pub fn retry_backoff(&self) -> Duration { 158 | Duration::from_millis(self.retry_backoff) 159 | } 160 | 161 | /// The retry strategy when request failed 162 | pub fn retry_strategy(&self) -> Vec { 163 | ExponentialBackoff::from_millis(self.retry_backoff) 164 | .map(jitter) 165 | .take(self.retries) 166 | .collect() 167 | } 168 | } 169 | 170 | #[cfg(test)] 171 | mod tests { 172 | extern crate serde_json; 173 | 174 | use super::*; 175 | 176 | #[test] 177 | fn test_properties() { 178 | let config = ClientConfig { 179 | retries: 3, 180 | ..Default::default() 181 | }; 182 | 183 | assert_eq!( 184 | config.max_connection_idle(), 185 | Duration::from_millis(DEFAULT_MAX_CONNECTION_IDLE_TIMEOUT_MILLIS) 186 | ); 187 | assert_eq!( 188 | config.request_timeout(), 189 | Duration::from_millis(DEFAULT_REQUEST_TIMEOUT_MILLS) 190 | ); 191 | assert_eq!( 192 | config.metadata_max_age(), 193 | Duration::from_millis(DEFAULT_METADATA_MAX_AGE_MILLS) 194 | ); 195 | assert_eq!(config.retry_strategy().len(), 3); 196 | } 197 | 198 | #[test] 199 | fn test_serialize() { 200 | let config = ClientConfig { 201 | hosts: vec!["127.0.0.1:9092".parse().unwrap()], 202 | client_id: Some("tokio-kafka".to_owned()), 203 | ..Default::default() 204 | }; 205 | let json = r#"{ 206 | "bootstrap.servers": [ 207 | "127.0.0.1:9092" 208 | ], 209 | "client.id": "tokio-kafka", 210 | "connection.max.idle.ms": 5000, 211 | "request.timeout.ms": 30000, 212 | "api.version.request": false, 213 | "broker.version.fallback": "0.9.0", 214 | "metadata.max.age.ms": 300000, 215 | "metrics": false, 216 | "retries": 0, 217 | "retry.backoff.ms": 100 218 | }"#; 219 | 220 | assert_eq!(serde_json::to_string_pretty(&config).unwrap(), json); 221 | assert_eq!(serde_json::from_str::(json).unwrap(), config); 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /src/client/metrics.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | use std::ops::{Deref, DerefMut}; 3 | 4 | use prometheus::{CounterVec, GaugeVec, Registry}; 5 | 6 | use errors::Result; 7 | use network::{KafkaRequest, KafkaResponse}; 8 | use protocol::ApiKeys; 9 | 10 | pub const NAMESPACE_KAFKA: &str = "kafka"; 11 | pub const SUBSYSTEM_CLIENT: &str = "client"; 12 | 13 | pub struct Metrics { 14 | registry: Registry, 15 | 16 | send_requests: CounterVec, 17 | in_flight_requests: GaugeVec, 18 | received_responses: CounterVec, 19 | } 20 | 21 | impl Deref for Metrics { 22 | type Target = Registry; 23 | 24 | fn deref(&self) -> &Self::Target { 25 | &self.registry 26 | } 27 | } 28 | 29 | impl DerefMut for Metrics { 30 | fn deref_mut(&mut self) -> &mut Self::Target { 31 | &mut self.registry 32 | } 33 | } 34 | 35 | impl Metrics { 36 | pub fn new() -> Result { 37 | let registry = Registry::new(); 38 | 39 | let send_requests = CounterVec::new( 40 | opts!("api_requests", "sent API requests") 41 | .namespace(NAMESPACE_KAFKA.to_owned()) 42 | .subsystem(SUBSYSTEM_CLIENT.to_owned()), 43 | &["broker", "api_key", "api_version"], 44 | )?; 45 | 46 | let in_flight_requests = GaugeVec::new( 47 | opts!("in_flight_requests", "In flight API requests") 48 | .namespace(NAMESPACE_KAFKA.to_owned()) 49 | .subsystem(SUBSYSTEM_CLIENT.to_owned()), 50 | &["broker", "api_key"], 51 | )?; 52 | 53 | let received_responses = CounterVec::new( 54 | opts!("received_responses", "recieved API responses") 55 | .namespace(NAMESPACE_KAFKA.to_owned()) 56 | .subsystem(SUBSYSTEM_CLIENT.to_owned()), 57 | &["broker", "api_key"], 58 | )?; 59 | 60 | registry.register(Box::new(send_requests.clone()))?; 61 | registry.register(Box::new(in_flight_requests.clone()))?; 62 | registry.register(Box::new(received_responses.clone()))?; 63 | 64 | Ok(Metrics { 65 | registry, 66 | send_requests, 67 | in_flight_requests, 68 | received_responses, 69 | }) 70 | } 71 | 72 | pub fn send_request(&self, addr: &SocketAddr, request: &KafkaRequest) { 73 | let header = request.header(); 74 | let labels = [ 75 | &addr.to_string(), 76 | ApiKeys::from(header.api_key).name(), 77 | &header.api_version.to_string(), 78 | ]; 79 | 80 | self.send_requests.with_label_values(&labels).inc(); 81 | self.in_flight_requests.with_label_values(&labels[..2]).inc(); 82 | } 83 | 84 | pub fn received_response(&self, addr: &SocketAddr, response: &KafkaResponse) { 85 | let labels = [&addr.to_string(), response.api_key().name()]; 86 | 87 | self.received_responses.with_label_values(&labels).inc(); 88 | self.in_flight_requests.with_label_values(&labels).dec(); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/client/middleware.rs: -------------------------------------------------------------------------------- 1 | use std::cell::RefCell; 2 | use std::collections::HashMap; 3 | use std::error::Error as StdError; 4 | use std::net::SocketAddr; 5 | use std::rc::Rc; 6 | use std::time::Duration; 7 | use tokio_timer::{self as timer, Timer}; 8 | 9 | use futures::Future; 10 | use tokio_service::Service; 11 | 12 | use client::{StaticBoxFuture, ToStaticBoxFuture}; 13 | 14 | #[derive(Clone)] 15 | pub struct InFlightMiddleware { 16 | upstream: S, 17 | state: Rc>, 18 | } 19 | 20 | struct State { 21 | requests: HashMap, 22 | } 23 | 24 | impl State { 25 | pub fn send_request(&mut self, addr: SocketAddr) { 26 | let requests = self.requests.entry(addr).or_insert(0); 27 | 28 | if let Some(new) = requests.checked_add(1) { 29 | *requests = new; 30 | } 31 | } 32 | 33 | pub fn received_response(&mut self, addr: SocketAddr) { 34 | let requests = self.requests.entry(addr).or_insert(0); 35 | 36 | if let Some(new) = requests.checked_sub(1) { 37 | *requests = new; 38 | } 39 | } 40 | } 41 | 42 | impl InFlightMiddleware { 43 | pub fn new(upstream: S) -> InFlightMiddleware { 44 | InFlightMiddleware { 45 | upstream, 46 | state: Rc::new(RefCell::new(State { 47 | requests: HashMap::new(), 48 | })), 49 | } 50 | } 51 | 52 | pub fn in_flight_requests(&self, addr: &SocketAddr) -> Option { 53 | self.state.borrow().requests.get(addr).cloned() 54 | } 55 | } 56 | 57 | impl Service for InFlightMiddleware 58 | where 59 | Self: 'static, 60 | S: Service, 61 | S::Request: WithAddr, 62 | S::Error: StdError, 63 | { 64 | type Request = S::Request; 65 | type Response = S::Response; 66 | type Error = S::Error; 67 | type Future = StaticBoxFuture; 68 | 69 | fn call(&self, request: Self::Request) -> Self::Future { 70 | let addr = request.addr(); 71 | let state = self.state.clone(); 72 | 73 | state.borrow_mut().send_request(addr); 74 | 75 | self.upstream 76 | .call(request) 77 | .then(move |response| { 78 | state.borrow_mut().received_response(addr); 79 | 80 | response 81 | }) 82 | .from_err() 83 | .static_boxed() 84 | } 85 | } 86 | 87 | pub trait WithAddr { 88 | fn addr(&self) -> SocketAddr; 89 | } 90 | 91 | impl WithAddr for (SocketAddr, T) { 92 | fn addr(&self) -> SocketAddr { 93 | self.0 94 | } 95 | } 96 | 97 | /// Abort requests that are taking too long 98 | #[derive(Clone)] 99 | pub struct Timeout { 100 | upstream: S, 101 | timer: Timer, 102 | duration: Duration, 103 | } 104 | 105 | impl Timeout { 106 | /// Crate a new `Timeout` with the given `upstream` service. 107 | /// 108 | /// Requests will be limited to `duration` and aborted once the limit has 109 | /// been reached. 110 | pub fn new(upstream: S, timer: Timer, duration: Duration) -> Timeout { 111 | Timeout { 112 | upstream, 113 | duration, 114 | timer, 115 | } 116 | } 117 | } 118 | 119 | impl Service for Timeout 120 | where 121 | S: Service, 122 | E: From>, 123 | { 124 | type Request = S::Request; 125 | type Response = S::Response; 126 | type Error = S::Error; 127 | type Future = timer::Timeout; 128 | 129 | fn call(&self, request: Self::Request) -> Self::Future { 130 | let resp = self.upstream.call(request); 131 | self.timer.timeout(resp, self.duration) 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/client/mock.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused_variables)] 2 | 3 | use std::borrow::Cow; 4 | use std::cell::RefCell; 5 | use std::collections::HashMap; 6 | use std::rc::Rc; 7 | use std::time::Duration; 8 | use std::usize; 9 | 10 | use bytes::Bytes; 11 | use typemap::{Key, TypeMap}; 12 | 13 | use tokio_core::reactor::Handle; 14 | 15 | use client::{Broker, BrokerRef, Client, Cluster, ConsumerGroup, ConsumerGroupAssignment, ConsumerGroupProtocol, 16 | FetchRecords, Generation, GetMetadata, GroupCoordinator, Heartbeat, JoinGroup, LeaveGroup, ListOffsets, 17 | LoadMetadata, Metadata, OffsetCommit, OffsetFetch, PartitionData, ProduceRecords, SyncGroup, 18 | ToStaticBoxFuture}; 19 | use consumer::Assignment; 20 | use errors::{ErrorKind, Result}; 21 | use network::{OffsetAndMetadata, TopicPartition}; 22 | use protocol::{FetchOffset, KafkaCode, MessageSet, RequiredAcks, Schema}; 23 | 24 | #[derive(Clone)] 25 | pub struct MockClient<'a> { 26 | handle: Option, 27 | metadata: Rc, 28 | group_coordinators: HashMap, Broker>, 29 | consumer_groups: HashMap, ConsumerGroup>, 30 | member_assignments: HashMap, Assignment<'a>>, 31 | future_responses: Rc>, 32 | } 33 | 34 | impl<'a> MockClient<'a> { 35 | pub fn new() -> Self { 36 | MockClient::with_metadata(Metadata::default()) 37 | } 38 | 39 | pub fn with_metadata(metadata: Metadata) -> Self { 40 | MockClient { 41 | handle: None, 42 | metadata: Rc::new(metadata), 43 | group_coordinators: HashMap::new(), 44 | consumer_groups: HashMap::new(), 45 | member_assignments: HashMap::new(), 46 | future_responses: Rc::new(RefCell::new(TypeMap::custom())), 47 | } 48 | } 49 | 50 | pub fn with_handle(mut self, handle: Handle) -> Self { 51 | self.handle = Some(handle); 52 | self 53 | } 54 | 55 | pub fn with_group_coordinator(mut self, group_id: Cow<'a, str>, broker: Broker) -> Self { 56 | self.group_coordinators.insert(group_id, broker); 57 | self 58 | } 59 | 60 | pub fn with_consumer_group(mut self, consumer_group: ConsumerGroup) -> Self { 61 | let group_id = consumer_group.group_id.clone().into(); 62 | 63 | self.consumer_groups.insert(group_id, consumer_group); 64 | self 65 | } 66 | 67 | pub fn with_group_member_as_leader( 68 | mut self, 69 | member_id: Cow<'a, str>, 70 | partitions: &[TopicPartition<'a>], 71 | user_data: Option>, 72 | ) -> Self { 73 | self.member_assignments.insert( 74 | member_id, 75 | Assignment { 76 | partitions: partitions.to_vec(), 77 | user_data, 78 | }, 79 | ); 80 | self 81 | } 82 | 83 | pub fn with_group_member_as_follower(mut self, member_id: Cow<'a, str>) -> Self { 84 | self.member_assignments.insert(member_id, Assignment::default()); 85 | self 86 | } 87 | 88 | pub fn with_future_response(self, callback: F) -> Self 89 | where 90 | T: Key, 91 | F: 'static, 92 | { 93 | self.future_responses.borrow_mut().insert::(callback); 94 | self 95 | } 96 | } 97 | 98 | impl Key for GroupCoordinator { 99 | type Value = Box Result>; 100 | } 101 | 102 | impl<'a> Client<'a> for MockClient<'a> 103 | where 104 | Self: 'static, 105 | { 106 | fn handle(&self) -> &Handle { 107 | &self.handle.as_ref().expect("should attach event loop with `with_core`") 108 | } 109 | 110 | fn metadata(&self) -> GetMetadata { 111 | GetMetadata::Loaded(self.metadata.clone()) 112 | } 113 | 114 | fn retry_strategy(&self) -> Vec { 115 | unimplemented!() 116 | } 117 | 118 | fn produce_records( 119 | &self, 120 | acks: RequiredAcks, 121 | timeout: Duration, 122 | topic_partition: TopicPartition<'a>, 123 | records: Vec>, 124 | ) -> ProduceRecords { 125 | unimplemented!() 126 | } 127 | 128 | fn fetch_records( 129 | &self, 130 | fetch_max_wait: Duration, 131 | fetch_min_bytes: usize, 132 | fetch_max_bytes: usize, 133 | partitions: Vec<(TopicPartition<'a>, PartitionData)>, 134 | ) -> FetchRecords { 135 | unimplemented!() 136 | } 137 | 138 | fn list_offsets(&self, partitions: I) -> ListOffsets 139 | where 140 | I: IntoIterator, FetchOffset)>, 141 | { 142 | unimplemented!() 143 | } 144 | 145 | fn load_metadata(&mut self) -> LoadMetadata<'a> { 146 | unimplemented!() 147 | } 148 | 149 | fn offset_commit( 150 | &self, 151 | coordinator: Option, 152 | generation: Option, 153 | retention_time: Option, 154 | offsets: I, 155 | ) -> OffsetCommit 156 | where 157 | I: IntoIterator, OffsetAndMetadata)>, 158 | { 159 | unimplemented!() 160 | } 161 | 162 | fn offset_fetch(&self, coordinator: BrokerRef, generation: Generation, partitions: I) -> OffsetFetch 163 | where 164 | I: IntoIterator>, 165 | { 166 | unimplemented!() 167 | } 168 | 169 | fn group_coordinator(&self, group_id: Cow<'a, str>) -> GroupCoordinator { 170 | if let Some(callback) = self.future_responses.borrow_mut().remove::() { 171 | callback(String::from(group_id)).static_boxed() 172 | } else { 173 | let metadata = self.metadata.clone(); 174 | 175 | self.group_coordinators 176 | .get(&group_id) 177 | .cloned() 178 | .ok_or_else(|| ErrorKind::KafkaError(KafkaCode::CoordinatorNotAvailable).into()) 179 | .static_boxed() 180 | } 181 | } 182 | 183 | fn join_group( 184 | &self, 185 | coordinator: BrokerRef, 186 | group_id: Cow<'a, str>, 187 | session_timeout: i32, 188 | rebalance_timeout: i32, 189 | member_id: Cow<'a, str>, 190 | protocol_type: Cow<'a, str>, 191 | group_protocols: Vec>, 192 | ) -> JoinGroup { 193 | if self.metadata.find_broker(coordinator).is_none() { 194 | Err(ErrorKind::KafkaError(KafkaCode::CoordinatorNotAvailable).into()) 195 | } else if let Some(consumer_group) = self.consumer_groups.get(&group_id) { 196 | if group_protocols 197 | .iter() 198 | .all(|protocol| protocol.protocol_name != consumer_group.protocol) 199 | { 200 | Err(ErrorKind::KafkaError(KafkaCode::InconsistentGroupProtocol).into()) 201 | } else { 202 | Ok(consumer_group.clone()) 203 | } 204 | } else { 205 | Err(ErrorKind::KafkaError(KafkaCode::NotCoordinator).into()) 206 | }.static_boxed() 207 | } 208 | 209 | fn heartbeat(&self, coordinator: BrokerRef, generation: Generation) -> Heartbeat { 210 | unimplemented!() 211 | } 212 | 213 | fn leave_group(&self, coordinator: BrokerRef, generation: Generation) -> LeaveGroup { 214 | unimplemented!() 215 | } 216 | 217 | fn sync_group( 218 | &self, 219 | coordinator: BrokerRef, 220 | generation: Generation, 221 | group_assignment: Option>>, 222 | ) -> SyncGroup { 223 | let group_id: Cow<'a, str> = generation.group_id.clone().into(); 224 | let member_id: Cow<'a, str> = generation.member_id.clone().into(); 225 | 226 | if self.metadata.find_broker(coordinator).is_none() { 227 | Err(ErrorKind::KafkaError(KafkaCode::CoordinatorNotAvailable).into()) 228 | } else if let Some(consumer_group) = self.consumer_groups.get(&group_id) { 229 | if consumer_group.generation_id != generation.generation_id { 230 | Err(ErrorKind::KafkaError(KafkaCode::IllegalGeneration).into()) 231 | } else if let Some(assignment) = self.member_assignments.get(&member_id) { 232 | Ok(Bytes::from(Schema::serialize(assignment).unwrap())) 233 | } else { 234 | Err(ErrorKind::KafkaError(KafkaCode::UnknownMemberId).into()) 235 | } 236 | } else { 237 | Err(ErrorKind::KafkaError(KafkaCode::NotCoordinator).into()) 238 | }.static_boxed() 239 | } 240 | } 241 | -------------------------------------------------------------------------------- /src/client/mod.rs: -------------------------------------------------------------------------------- 1 | mod builder; 2 | mod client; 3 | mod cluster; 4 | mod config; 5 | mod metadata; 6 | mod metrics; 7 | mod middleware; 8 | mod record; 9 | mod service; 10 | mod version; 11 | 12 | #[cfg(test)] 13 | mod mock; 14 | 15 | pub use self::builder::ClientBuilder; 16 | pub use self::client::{Client, ConsumerGroup, ConsumerGroupAssignment, ConsumerGroupMember, ConsumerGroupProtocol, 17 | FetchRecords, FetchedRecords, Generation, GetMetadata, GroupCoordinator, Heartbeat, JoinGroup, 18 | KafkaClient, LeaveGroup, ListOffsets, ListedOffset, LoadMetadata, OffsetCommit, OffsetFetch, 19 | PartitionData, ProduceRecords, StaticBoxFuture, SyncGroup, ToStaticBoxFuture}; 20 | pub use self::cluster::{Broker, BrokerRef, Cluster, PartitionInfo}; 21 | pub use self::config::{ClientConfig, DEFAULT_MAX_CONNECTION_IDLE_TIMEOUT_MILLIS, DEFAULT_METADATA_MAX_AGE_MILLS, 22 | DEFAULT_REQUEST_TIMEOUT_MILLS, DEFAULT_RETRY_BACKOFF_MILLIS}; 23 | pub use self::metadata::{Metadata, TopicPartitions}; 24 | pub use self::metrics::Metrics; 25 | pub use self::middleware::InFlightMiddleware; 26 | pub use self::record::{PartitionRecord, TopicRecord}; 27 | pub use self::service::{FutureResponse, KafkaService}; 28 | pub use self::version::KafkaVersion; 29 | 30 | #[cfg(test)] 31 | pub use self::mock::MockClient; 32 | -------------------------------------------------------------------------------- /src/client/record.rs: -------------------------------------------------------------------------------- 1 | use std::hash::Hash; 2 | 3 | use protocol::{PartitionId, Timestamp}; 4 | 5 | /// A key/value pair to be sent to Kafka topic. 6 | /// 7 | /// This consists of an optional partition number, and an optional key and value. 8 | #[derive(Clone, Debug)] 9 | pub struct TopicRecord { 10 | /// The partition to which the record will be sent (or `None` if no partition was specified) 11 | pub partition_id: Option, 12 | /// The key (or `None` if no key is specified) 13 | pub key: Option, 14 | /// The value 15 | pub value: Option, 16 | /// The timestamp 17 | pub timestamp: Option, 18 | } 19 | 20 | impl TopicRecord 21 | where 22 | K: Hash, 23 | { 24 | /// Creates a record to be sent to a specified topic with no value 25 | pub fn from_key(key: K) -> Self { 26 | TopicRecord { 27 | partition_id: None, 28 | key: Some(key), 29 | value: None, 30 | timestamp: None, 31 | } 32 | } 33 | } 34 | 35 | impl TopicRecord<(), V> { 36 | /// Creates a record to be sent to a specified topic with no key 37 | pub fn from_value(value: V) -> Self { 38 | TopicRecord { 39 | partition_id: None, 40 | key: None, 41 | value: Some(value), 42 | timestamp: None, 43 | } 44 | } 45 | } 46 | 47 | impl TopicRecord 48 | where 49 | K: Hash, 50 | { 51 | /// Creates a record to be sent to a specified topic 52 | pub fn from_key_value(key: K, value: V) -> Self { 53 | TopicRecord { 54 | partition_id: None, 55 | key: Some(key), 56 | value: Some(value), 57 | timestamp: None, 58 | } 59 | } 60 | 61 | /// Creates a record with partition to be sent 62 | pub fn with_partition(mut self, partition_id: PartitionId) -> Self { 63 | self.partition_id = Some(partition_id); 64 | self 65 | } 66 | 67 | /// Creates a record with a specified timestamp to be sent 68 | pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { 69 | self.timestamp = Some(timestamp); 70 | self 71 | } 72 | 73 | fn from_partition_record(partition_id: PartitionId, record: PartitionRecord) -> TopicRecord { 74 | TopicRecord { 75 | partition_id: Some(partition_id), 76 | key: record.key, 77 | value: record.value, 78 | timestamp: record.timestamp, 79 | } 80 | } 81 | } 82 | 83 | /// A key/value pair to be sent to Kafka partition. 84 | #[derive(Clone, Debug)] 85 | pub struct PartitionRecord { 86 | /// The key (or `None` if no key is specified) 87 | pub key: Option, 88 | /// The value 89 | pub value: Option, 90 | /// The timestamp 91 | pub timestamp: Option, 92 | } 93 | 94 | impl PartitionRecord 95 | where 96 | K: Hash, 97 | { 98 | /// Creates a record to be sent to a specified topic and partition with no value 99 | pub fn from_key(key: K) -> Self { 100 | PartitionRecord { 101 | key: Some(key), 102 | value: None, 103 | timestamp: None, 104 | } 105 | } 106 | } 107 | 108 | impl PartitionRecord<(), V> { 109 | /// Creates a record to be sent to a specified topic and partition with no key 110 | pub fn from_value(value: V) -> Self { 111 | PartitionRecord { 112 | key: None, 113 | value: Some(value), 114 | timestamp: None, 115 | } 116 | } 117 | } 118 | 119 | impl PartitionRecord 120 | where 121 | K: Hash, 122 | { 123 | /// Creates a record to be sent to a specified topic and partition 124 | pub fn from_key_value(key: K, value: V) -> Self { 125 | PartitionRecord { 126 | key: Some(key), 127 | value: Some(value), 128 | timestamp: None, 129 | } 130 | } 131 | 132 | /// Creates a record with a specified timestamp to be sent 133 | pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { 134 | self.timestamp = Some(timestamp); 135 | self 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /src/client/service.rs: -------------------------------------------------------------------------------- 1 | use std::cell::RefCell; 2 | use std::fmt::Debug; 3 | use std::io; 4 | use std::net::SocketAddr; 5 | use std::rc::Rc; 6 | use std::time::Duration; 7 | 8 | use bytes::BytesMut; 9 | 10 | use futures::future::Future; 11 | use futures::unsync::oneshot; 12 | use futures::{Async, Poll, Stream}; 13 | use tokio_core::reactor::Handle; 14 | use tokio_io::{AsyncRead, AsyncWrite}; 15 | use tokio_proto::BindClient; 16 | use tokio_proto::streaming::pipeline::ClientProto; 17 | use tokio_proto::streaming::{Body, Message}; 18 | use tokio_proto::util::client_proxy::ClientProxy; 19 | use tokio_service::Service; 20 | use ns_router::{AutoName, Router}; 21 | 22 | use client::{Metrics, StaticBoxFuture, ToStaticBoxFuture}; 23 | use errors::Error; 24 | use network::{ConnectionId, KafkaCodec, KafkaConnection, KafkaConnector, KafkaRequest, KafkaResponse, Pool, Pooled}; 25 | 26 | #[derive(Debug, Default)] 27 | struct State { 28 | connection_id: ConnectionId, 29 | } 30 | 31 | impl State { 32 | pub fn next_connection_id(&mut self) -> ConnectionId { 33 | self.connection_id = self.connection_id.wrapping_add(1); 34 | self.connection_id - 1 35 | } 36 | } 37 | 38 | pub struct KafkaService<'a> { 39 | handle: Handle, 40 | pool: Pool>, 41 | connector: KafkaConnector, 42 | metrics: Option>, 43 | state: Rc>, 44 | } 45 | 46 | impl<'a> KafkaService<'a> { 47 | pub fn new( 48 | handle: Handle, 49 | router: Rc, 50 | max_connection_idle: Duration, 51 | metrics: Option>, 52 | ) -> Self { 53 | KafkaService { 54 | handle: handle.clone(), 55 | pool: Pool::new(max_connection_idle), 56 | connector: KafkaConnector::new(handle, router), 57 | metrics, 58 | state: Rc::new(RefCell::new(State::default())), 59 | } 60 | } 61 | } 62 | 63 | impl<'a> Service for KafkaService<'a> 64 | where 65 | Self: 'static, 66 | { 67 | type Request = (SocketAddr, KafkaRequest<'a>); 68 | type Response = KafkaResponse; 69 | type Error = Error; 70 | type Future = FutureResponse; 71 | 72 | fn call(&self, req: Self::Request) -> Self::Future { 73 | let (addr, request) = req; 74 | 75 | self.metrics 76 | .as_ref() 77 | .map(|metrics| metrics.send_request(&addr, &request)); 78 | 79 | let checkout = self.pool.checkout(addr); 80 | let connect = { 81 | let handle = self.handle.clone(); 82 | let connection_id = self.state.borrow_mut().next_connection_id(); 83 | let pool = self.pool.clone(); 84 | 85 | self.connector.tcp(AutoName::SocketAddr(addr)).map(move |io| { 86 | let (tx, rx) = oneshot::channel(); 87 | let client = RemoteClient { 88 | connection_id, 89 | client_rx: RefCell::new(Some(rx)), 90 | }.bind_client(&handle, io); 91 | let pooled = pool.pooled(addr, client); 92 | drop(tx.send(pooled.clone())); 93 | pooled 94 | }) 95 | }; 96 | 97 | let race = checkout 98 | .select(connect) 99 | .map(|(client, _work)| client) 100 | .map_err(|(err, _work)| { 101 | warn!("fail to checkout connection, {}", err); 102 | // the Pool Checkout cannot error, so the only error 103 | // is from the Connector 104 | // XXX: should wait on the Checkout? Problem is 105 | // that if the connector is failing, it may be that we 106 | // never had a pooled stream at all 107 | err 108 | }); 109 | 110 | let metrics = self.metrics.clone(); 111 | 112 | race.and_then(move |client| client.call(Message::WithoutBody(request))) 113 | .map(|msg| { 114 | debug!("received message: {:?}", msg); 115 | 116 | match msg { 117 | Message::WithoutBody(res) | Message::WithBody(res, _) => res, 118 | } 119 | }) 120 | .map(move |response| { 121 | metrics.map(|metrics| metrics.received_response(&addr, &response)); 122 | 123 | response 124 | }) 125 | .from_err() 126 | .static_boxed() 127 | } 128 | } 129 | 130 | pub type FutureResponse = StaticBoxFuture; 131 | 132 | type TokioBody = Body; 133 | 134 | pub struct KafkaBody(TokioBody); 135 | 136 | impl Stream for KafkaBody { 137 | type Item = BytesMut; 138 | type Error = io::Error; 139 | 140 | fn poll(&mut self) -> Poll, io::Error> { 141 | self.0.poll() 142 | } 143 | } 144 | 145 | type TokioClient<'a> = ClientProxy, KafkaBody>, Message, io::Error>; 146 | 147 | type PooledClient<'a> = Pooled>; 148 | 149 | struct RemoteClient<'a> { 150 | connection_id: u32, 151 | client_rx: RefCell>>>, 152 | } 153 | 154 | impl<'a, T> ClientProto for RemoteClient<'a> 155 | where 156 | T: AsyncRead + AsyncWrite + Debug + 'static, 157 | Self: 'static, 158 | { 159 | type Request = KafkaRequest<'a>; 160 | type RequestBody = ::Item; 161 | type Response = KafkaResponse; 162 | type ResponseBody = BytesMut; 163 | type Error = io::Error; 164 | type Transport = KafkaConnection<'a, T, PooledClient<'a>>; 165 | type BindTransport = BindingClient<'a, T>; 166 | 167 | fn bind_transport(&self, io: T) -> Self::BindTransport { 168 | trace!("connection #{} bind transport for {:?}", self.connection_id, io); 169 | 170 | BindingClient { 171 | connection_id: self.connection_id, 172 | rx: self.client_rx.borrow_mut().take().expect("client_rx was lost"), 173 | io: Some(io), 174 | } 175 | } 176 | } 177 | 178 | struct BindingClient<'a, T> { 179 | connection_id: u32, 180 | rx: oneshot::Receiver>, 181 | io: Option, 182 | } 183 | 184 | impl<'a, T> Future for BindingClient<'a, T> 185 | where 186 | T: AsyncRead + AsyncWrite + Debug + 'static, 187 | { 188 | type Item = KafkaConnection<'a, T, PooledClient<'a>>; 189 | type Error = io::Error; 190 | 191 | fn poll(&mut self) -> Poll { 192 | match self.rx.poll() { 193 | Ok(Async::Ready(client)) => { 194 | trace!("got connection #{} with {:?}", self.connection_id, client); 195 | 196 | Ok(Async::Ready(KafkaConnection::new( 197 | self.connection_id, 198 | self.io.take().expect("binding client io lost"), 199 | KafkaCodec::new(), 200 | client, 201 | ))) 202 | } 203 | Ok(Async::NotReady) => Ok(Async::NotReady), 204 | Err(_canceled) => unreachable!(), 205 | } 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /src/compression/gzip.rs: -------------------------------------------------------------------------------- 1 | use std::io::{Read, Write}; 2 | 3 | use flate2::Compression; 4 | use flate2::read::GzDecoder; 5 | use flate2::write::GzEncoder; 6 | 7 | use errors::Result; 8 | 9 | pub fn compress(src: &[u8]) -> Result> { 10 | let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); 11 | encoder.write_all(src)?; 12 | Ok(encoder.finish()?) 13 | } 14 | 15 | pub fn uncompress(src: T) -> Result> { 16 | let mut decoder = GzDecoder::new(src); 17 | let mut buffer: Vec = Vec::new(); 18 | decoder.read_to_end(&mut buffer)?; 19 | Ok(buffer) 20 | } 21 | 22 | #[cfg(test)] 23 | mod tests { 24 | use super::*; 25 | 26 | #[test] 27 | fn test_uncompress() { 28 | use std::io::Cursor; 29 | // The vector should uncompress to "test" 30 | let msg: Vec = vec![ 31 | 31, 139, 8, 0, 192, 248, 79, 85, 2, 255, 43, 73, 45, 46, 1, 0, 12, 126, 127, 216, 4, 0, 0, 0 32 | ]; 33 | let uncomp_msg = String::from_utf8(uncompress(Cursor::new(msg)).unwrap()).unwrap(); 34 | assert_eq!(&uncomp_msg[..], "test"); 35 | } 36 | 37 | #[test] 38 | #[should_panic] 39 | fn test_uncompress_panic() { 40 | use std::io::Cursor; 41 | let msg: Vec = vec![12, 42, 84, 104, 105, 115, 32, 105, 115, 32, 116, 101, 115, 116]; 42 | let uncomp_msg = String::from_utf8(uncompress(Cursor::new(msg)).unwrap()).unwrap(); 43 | assert_eq!(&uncomp_msg[..], "This is test"); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/compression/mod.rs: -------------------------------------------------------------------------------- 1 | use std::io::prelude::*; 2 | use std::mem; 3 | use std::str::FromStr; 4 | 5 | use errors::{Error, ErrorKind, Result}; 6 | use protocol::ApiVersion; 7 | 8 | #[cfg(feature = "gzip")] 9 | mod gzip; 10 | 11 | #[cfg(feature = "snappy")] 12 | mod snappy; 13 | 14 | #[cfg(feature = "lz4")] 15 | mod lz4; 16 | 17 | /// The compression type to use 18 | #[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)] 19 | #[serde(rename_all = "lowercase")] 20 | #[repr(i8)] 21 | pub enum Compression { 22 | None = 0, 23 | 24 | #[cfg(feature = "gzip")] 25 | GZIP = 1, 26 | 27 | #[cfg(feature = "snappy")] 28 | Snappy = 2, 29 | 30 | #[cfg(feature = "lz4")] 31 | LZ4 = 3, 32 | } 33 | 34 | impl Default for Compression { 35 | fn default() -> Self { 36 | Compression::None 37 | } 38 | } 39 | 40 | impl From for Compression { 41 | fn from(v: i8) -> Self { 42 | unsafe { mem::transmute(v) } 43 | } 44 | } 45 | 46 | impl FromStr for Compression { 47 | type Err = Error; 48 | 49 | fn from_str(s: &str) -> Result { 50 | match s.to_lowercase().as_str() { 51 | "none" => Ok(Compression::None), 52 | 53 | #[cfg(feature = "gzip")] 54 | "gzip" => Ok(Compression::GZIP), 55 | 56 | #[cfg(feature = "snappy")] 57 | "snappy" => Ok(Compression::Snappy), 58 | 59 | #[cfg(feature = "lz4")] 60 | "lz4" => Ok(Compression::LZ4), 61 | 62 | _ => bail!(ErrorKind::ParseError(format!("unknown compression: {}", s))), 63 | } 64 | } 65 | } 66 | 67 | impl Compression { 68 | pub fn compress(&self, api_version: ApiVersion, src: &[u8]) -> Result> { 69 | match *self { 70 | Compression::None => Ok(src.to_vec()), 71 | 72 | #[cfg(feature = "gzip")] 73 | Compression::GZIP => gzip::compress(src), 74 | 75 | #[cfg(feature = "snappy")] 76 | Compression::Snappy => snappy::compress(src), 77 | 78 | #[cfg(feature = "lz4")] 79 | Compression::LZ4 => { 80 | let mut compressed = Vec::new(); 81 | { 82 | let mut writer = 83 | lz4::Lz4Writer::new(&mut compressed, api_version < 2, lz4::BLOCKSIZE_64KB, true, false)?; 84 | writer.write_all(src)?; 85 | writer.close()?; 86 | } 87 | Ok(compressed) 88 | } 89 | } 90 | } 91 | 92 | pub fn decompress(&self, src: &[u8]) -> Result>> { 93 | let mut result = Vec::new(); 94 | match *self { 95 | Compression::None => Ok(None), 96 | 97 | #[cfg(feature = "snappy")] 98 | Compression::Snappy => { 99 | snappy::uncompress_framed_to(src, &mut result)?; 100 | Ok(Some(result)) 101 | } 102 | _ => unimplemented!() 103 | } 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /src/consumer/consumer.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::cell::RefCell; 3 | use std::hash::Hash; 4 | use std::rc::Rc; 5 | use std::ops::Deref; 6 | 7 | use futures::{Future, Stream}; 8 | use tokio_core::reactor::Handle; 9 | 10 | use client::{Client, Cluster, KafkaClient, StaticBoxFuture, ToStaticBoxFuture}; 11 | use consumer::{ConsumerBuilder, ConsumerConfig, ConsumerCoordinator, Fetcher, SubscribedTopics, Subscriptions}; 12 | use errors::{Error, ErrorKind}; 13 | use protocol::{MessageTimestamp, Offset, PartitionId}; 14 | use serialization::Deserializer; 15 | 16 | /// A trait for consuming records from a Kafka cluster. 17 | pub trait Consumer<'a> { 18 | /// The type of key 19 | type Key: Hash; 20 | /// The type of value 21 | type Value; 22 | /// The type of `Stream` to receive records from topics 23 | type Topics: Stream, Error = Error>; 24 | 25 | /// Subscribe to the given list of topics to get dynamically assigned 26 | /// partitions. 27 | fn subscribe(&mut self, topic_names: I) -> Subscribe 28 | where 29 | I: IntoIterator, 30 | S: Into; 31 | } 32 | 33 | /// A key/value pair to be received from Kafka. 34 | /// 35 | /// This also consists of a topic name and a partition number from which the record is being 36 | /// received, an offset that points to the record in a Kafka partition, and a timestamp as marked 37 | /// by the corresponding `ConsumerRecord`. 38 | #[derive(Clone, Debug)] 39 | pub struct ConsumerRecord<'a, K, V> { 40 | /// The topic this record is received from 41 | pub topic_name: Cow<'a, str>, 42 | /// The partition from which this record is received 43 | pub partition_id: PartitionId, 44 | /// The position of this record in the corresponding Kafka partition. 45 | pub offset: Offset, 46 | /// The key (or None if no key is specified) 47 | pub key: Option, 48 | /// The value 49 | pub value: Option, 50 | /// The timestamp of this record 51 | pub timestamp: Option, 52 | } 53 | 54 | pub type Subscribe = StaticBoxFuture; 55 | 56 | /// A Kafka consumer that consumes records from a Kafka cluster. 57 | #[derive(Clone)] 58 | pub struct KafkaConsumer<'a, K, V> { 59 | inner: Rc>, 60 | } 61 | 62 | struct Inner<'a, K, V> { 63 | client: KafkaClient<'a>, 64 | config: ConsumerConfig, 65 | key_deserializer: K, 66 | value_deserializer: V, 67 | } 68 | 69 | impl<'a, K, V> Deref for KafkaConsumer<'a, K, V> { 70 | type Target = KafkaClient<'a>; 71 | 72 | fn deref(&self) -> &Self::Target { 73 | &self.inner.client 74 | } 75 | } 76 | 77 | impl<'a, K, V> KafkaConsumer<'a, K, V> { 78 | /// Construct a `KafkaConsumer` 79 | pub fn new(client: KafkaClient<'a>, config: ConsumerConfig, key_deserializer: K, value_deserializer: V) -> Self { 80 | KafkaConsumer { 81 | inner: Rc::new(Inner { 82 | client, 83 | config, 84 | key_deserializer, 85 | value_deserializer, 86 | }), 87 | } 88 | } 89 | 90 | /// Construct a `ConsumerBuilder` from ConsumerConfig 91 | pub fn with_config(config: ConsumerConfig, handle: Handle) -> ConsumerBuilder<'a, K, V> { 92 | ConsumerBuilder::with_config(config, handle) 93 | } 94 | 95 | /// Construct a `ConsumerBuilder` from bootstrap servers of the Kafka 96 | /// cluster 97 | pub fn with_bootstrap_servers(hosts: I, handle: Handle) -> ConsumerBuilder<'a, K, V> 98 | where 99 | I: IntoIterator, 100 | { 101 | ConsumerBuilder::with_bootstrap_servers(hosts, handle) 102 | } 103 | 104 | pub fn config(&self) -> &ConsumerConfig { 105 | &self.inner.config 106 | } 107 | } 108 | 109 | impl<'a, K, V> KafkaConsumer<'a, K, V> 110 | where 111 | K: Deserializer + Clone, 112 | { 113 | pub fn key_deserializer(&self) -> K { 114 | self.inner.key_deserializer.clone() 115 | } 116 | } 117 | 118 | impl<'a, K, V> KafkaConsumer<'a, K, V> 119 | where 120 | V: Deserializer + Clone, 121 | { 122 | pub fn value_deserializer(&self) -> V { 123 | self.inner.value_deserializer.clone() 124 | } 125 | } 126 | 127 | impl<'a, K, V> Consumer<'a> for KafkaConsumer<'a, K, V> 128 | where 129 | K: Deserializer + Clone, 130 | K::Item: Hash, 131 | V: Deserializer + Clone, 132 | Self: 'static, 133 | { 134 | type Key = K::Item; 135 | type Value = V::Item; 136 | type Topics = SubscribedTopics<'a, K, V>; 137 | 138 | fn subscribe(&mut self, topic_names: I) -> Subscribe 139 | where 140 | I: IntoIterator, 141 | S: Into, 142 | { 143 | let topic_names: Vec = topic_names.into_iter().map(|s| s.into()).collect(); 144 | let inner = self.inner.clone(); 145 | let default_reset_strategy = self.inner.config.auto_offset_reset; 146 | let group_id = self.inner.config.group_id.clone(); 147 | let session_timeout = self.inner.config.session_timeout(); 148 | let rebalance_timeout = self.inner.config.rebalance_timeout(); 149 | let heartbeat_interval = self.inner.config.heartbeat_interval(); 150 | let fetch_min_bytes = self.inner.config.fetch_min_bytes; 151 | let fetch_max_bytes = self.inner.config.fetch_max_bytes; 152 | let fetch_max_wait = self.inner.config.fetch_max_wait(); 153 | let partition_fetch_bytes = self.inner.config.partition_fetch_bytes; 154 | let auto_commit_interval = self.inner.config.auto_commit_interval(); 155 | let assignors = self.inner 156 | .config 157 | .assignment_strategy 158 | .iter() 159 | .flat_map(|strategy| strategy.assignor()) 160 | .collect(); 161 | let timer = self.inner.client.timer().clone(); 162 | 163 | self.inner 164 | .client 165 | .metadata() 166 | .and_then(move |metadata| { 167 | let topics = metadata.topics(); 168 | 169 | if let Some(not_found) = topic_names 170 | .iter() 171 | .find(|topic_name| !topics.contains_key(topic_name.as_str())) 172 | { 173 | bail!(ErrorKind::TopicNotFound(not_found.clone())) 174 | } 175 | 176 | let subscriptions = Rc::new(RefCell::new(Subscriptions::with_topics( 177 | topic_names, 178 | default_reset_strategy, 179 | ))); 180 | 181 | let coordinator = group_id.map(|group_id| { 182 | ConsumerCoordinator::new( 183 | inner.client.clone(), 184 | group_id, 185 | subscriptions.clone(), 186 | session_timeout, 187 | rebalance_timeout, 188 | heartbeat_interval, 189 | None, 190 | auto_commit_interval, 191 | assignors, 192 | timer.clone(), 193 | ) 194 | }); 195 | 196 | let fetcher = Rc::new(Fetcher::new( 197 | inner.client.clone(), 198 | subscriptions.clone(), 199 | fetch_min_bytes, 200 | fetch_max_bytes, 201 | fetch_max_wait, 202 | partition_fetch_bytes, 203 | )); 204 | 205 | SubscribedTopics::new(KafkaConsumer { inner }, subscriptions, coordinator, fetcher, timer) 206 | }) 207 | .static_boxed() 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /src/consumer/mod.rs: -------------------------------------------------------------------------------- 1 | mod assignor; 2 | mod builder; 3 | mod config; 4 | mod consumer; 5 | mod coordinator; 6 | mod fetcher; 7 | mod protocol; 8 | mod subscribed; 9 | mod subscriptions; 10 | 11 | pub use self::assignor::{Assignment, AssignmentStrategy, PartitionAssignor, Subscription}; 12 | pub use self::builder::ConsumerBuilder; 13 | pub use self::config::{ConsumerConfig, DEFAULT_AUTO_COMMIT_INTERVAL_MILLIS, DEFAULT_HEARTBEAT_INTERVAL_MILLIS, 14 | DEFAULT_MAX_POLL_RECORDS, DEFAULT_SESSION_TIMEOUT_MILLIS}; 15 | pub use self::consumer::{Consumer, ConsumerRecord, KafkaConsumer}; 16 | pub use self::coordinator::{CommitOffset, ConsumerCoordinator, Coordinator, JoinGroup, LeaveGroup}; 17 | pub use self::fetcher::{Fetcher, RetrieveOffsets, UpdatePositions}; 18 | pub use self::protocol::{ConsumerProtocol, CONSUMER_PROTOCOL}; 19 | pub use self::subscribed::{Subscribed, SubscribedTopics}; 20 | pub use self::subscriptions::{OffsetResetStrategy, SeekTo, Subscriptions, TopicPartitionState}; 21 | -------------------------------------------------------------------------------- /src/consumer/protocol.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::collections::HashMap; 3 | 4 | use serde::{de, ser}; 5 | 6 | use consumer::{Assignment, Subscription}; 7 | use protocol::Nullable; 8 | 9 | const CONSUMER_PROTOCOL_V0: i16 = 0; 10 | 11 | pub const CONSUMER_PROTOCOL: &str = "consumer"; 12 | 13 | pub struct ConsumerProtocol {} 14 | 15 | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] 16 | pub struct ConsumerProtocolHeader { 17 | version: i16, 18 | } 19 | 20 | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] 21 | pub struct SubscriptionSchema { 22 | header: ConsumerProtocolHeader, 23 | topics: Vec, 24 | user_data: Nullable>, 25 | } 26 | 27 | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] 28 | pub struct TopicAssignment { 29 | topics: String, 30 | partitions: Vec, 31 | } 32 | 33 | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] 34 | pub struct AssignmentSchema { 35 | header: ConsumerProtocolHeader, 36 | topic_partitions: Vec, 37 | user_data: Nullable>, 38 | } 39 | 40 | impl<'a> ser::Serialize for Subscription<'a> { 41 | fn serialize(&self, serializer: S) -> Result 42 | where 43 | S: ser::Serializer, 44 | { 45 | let mut schema = SubscriptionSchema { 46 | header: ConsumerProtocolHeader { 47 | version: CONSUMER_PROTOCOL_V0, 48 | }, 49 | topics: self.topics 50 | .iter() 51 | .map(|topic_name| String::from(topic_name.to_owned())) 52 | .collect(), 53 | user_data: self.user_data.as_ref().map(|user_data| user_data.to_vec()).into(), 54 | }; 55 | 56 | schema.topics.sort(); 57 | 58 | schema.serialize(serializer) 59 | } 60 | } 61 | 62 | impl<'a, 'de> de::Deserialize<'de> for Subscription<'a> { 63 | fn deserialize(deserializer: D) -> Result, D::Error> 64 | where 65 | D: de::Deserializer<'de>, 66 | { 67 | let SubscriptionSchema { 68 | header, 69 | topics, 70 | user_data, 71 | } = SubscriptionSchema::deserialize(deserializer)?; 72 | 73 | if header.version < CONSUMER_PROTOCOL_V0 { 74 | Err(de::Error::custom(format!( 75 | "unsupported subscription version: {}", 76 | header.version 77 | ))) 78 | } else { 79 | Ok(Subscription { 80 | topics: topics.into_iter().map(Cow::Owned).collect(), 81 | user_data: user_data.into_raw().map(Cow::Owned), 82 | }) 83 | } 84 | } 85 | } 86 | 87 | impl<'a> ser::Serialize for Assignment<'a> { 88 | fn serialize(&self, serializer: S) -> Result 89 | where 90 | S: ser::Serializer, 91 | { 92 | let mut topic_partitions = HashMap::new(); 93 | 94 | for tp in &self.partitions { 95 | topic_partitions 96 | .entry(tp.topic_name.to_owned()) 97 | .or_insert_with(Vec::new) 98 | .push(tp.partition_id); 99 | } 100 | 101 | let mut schema = AssignmentSchema { 102 | header: ConsumerProtocolHeader { 103 | version: CONSUMER_PROTOCOL_V0, 104 | }, 105 | topic_partitions: topic_partitions 106 | .into_iter() 107 | .map(|(topic_name, partitions)| TopicAssignment { 108 | topics: String::from(topic_name.to_owned()), 109 | partitions, 110 | }) 111 | .collect(), 112 | user_data: self.user_data.as_ref().map(|user_data| user_data.to_vec()).into(), 113 | }; 114 | 115 | schema.topic_partitions.sort_by(|lhs, rhs| lhs.topics.cmp(&rhs.topics)); 116 | 117 | schema.serialize(serializer) 118 | } 119 | } 120 | 121 | impl<'a, 'de> de::Deserialize<'de> for Assignment<'a> { 122 | fn deserialize(deserializer: D) -> Result, D::Error> 123 | where 124 | D: de::Deserializer<'de>, 125 | { 126 | let AssignmentSchema { 127 | header, 128 | topic_partitions, 129 | user_data, 130 | } = AssignmentSchema::deserialize(deserializer)?; 131 | 132 | if header.version < CONSUMER_PROTOCOL_V0 { 133 | Err(de::Error::custom(format!( 134 | "unsupported assignment version: {}", 135 | header.version 136 | ))) 137 | } else { 138 | let partitions = topic_partitions 139 | .iter() 140 | .flat_map(|assignment| { 141 | let topic_name = assignment.topics.to_owned(); 142 | 143 | assignment 144 | .partitions 145 | .iter() 146 | .map(move |&partition| topic_partition!(topic_name.clone(), partition)) 147 | }) 148 | .collect(); 149 | 150 | Ok(Assignment { 151 | partitions, 152 | user_data: user_data.into_raw().map(Cow::Owned), 153 | }) 154 | } 155 | } 156 | } 157 | 158 | #[cfg(test)] 159 | mod tests { 160 | use std::io::Cursor; 161 | 162 | use super::*; 163 | use protocol::Schema; 164 | 165 | lazy_static! { 166 | static ref TEST_SUBSCRIPTION: Subscription<'static> = Subscription { 167 | topics: vec!["t0".into(), "t1".into()], 168 | user_data: Some(b"data".to_vec().into()), 169 | }; 170 | 171 | static ref TEST_SUBSCRIPTION_DATA: Vec = vec![ 172 | // SubscriptionSchema 173 | // header: ConsumerProtocolHeader 174 | 0, 0, // version 175 | 176 | // topic_partitions: [&str] 177 | 0, 0, 0, 2, 178 | 0, 2, b't', b'0', 179 | 0, 2, b't', b'1', 180 | 181 | // user_data 182 | 0, 0, 0, 4, b'd', b'a', b't', b'a', 183 | ]; 184 | 185 | static ref TEST_ASSIGNMENT: Assignment<'static> = Assignment { 186 | partitions: vec![ 187 | topic_partition!("t0", 0), 188 | topic_partition!("t0", 1), 189 | topic_partition!("t1", 0), 190 | topic_partition!("t1", 1) 191 | ], 192 | user_data: Some(b"data".to_vec().into()), 193 | }; 194 | 195 | static ref TEST_ASSIGNMENT_DATA: Vec = vec![ 196 | // AssignmentSchema 197 | // header: ConsumerProtocolHeader 198 | 0, 0, // version 199 | 200 | // partitions: [TopicAssignment] 201 | 0, 0, 0, 2, 202 | // TopicAssignment 203 | 0, 2, b't', b'0', // topics 204 | 0, 0, 0, 2, // partitions 205 | 0, 0, 0, 0, 206 | 0, 0, 0, 1, 207 | // TopicAssignment 208 | 0, 2, b't', b'1', // topics 209 | 0, 0, 0, 2, // partitions 210 | 0, 0, 0, 0, 211 | 0, 0, 0, 1, 212 | 213 | // user_data 214 | 0, 0, 0, 4, b'd', b'a', b't', b'a', 215 | ]; 216 | } 217 | 218 | #[test] 219 | fn test_subscription_serializer() { 220 | assert_eq!(Schema::serialize(&*TEST_SUBSCRIPTION).unwrap(), *TEST_SUBSCRIPTION_DATA); 221 | } 222 | 223 | #[test] 224 | fn test_subscription_deserializer() { 225 | let subscription: Subscription = Schema::deserialize(Cursor::new(TEST_SUBSCRIPTION_DATA.clone())).unwrap(); 226 | 227 | assert_eq!(subscription, *TEST_SUBSCRIPTION); 228 | } 229 | 230 | #[test] 231 | fn test_assignment_serializer() { 232 | assert_eq!(Schema::serialize(&*TEST_ASSIGNMENT).unwrap(), *TEST_ASSIGNMENT_DATA); 233 | } 234 | 235 | #[test] 236 | fn test_assignment_deserializer() { 237 | let assignment: Assignment = Schema::deserialize(Cursor::new(TEST_ASSIGNMENT_DATA.clone())).unwrap(); 238 | 239 | assert_eq!(assignment, *TEST_ASSIGNMENT); 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::{Borrow, Cow}; 2 | use std::error::Error as StdError; 3 | use std::fmt; 4 | 5 | use serde::{de, ser}; 6 | 7 | use client::BrokerRef; 8 | use protocol::{ApiKeys, KafkaCode, PartitionId}; 9 | 10 | error_chain!{ 11 | foreign_links { 12 | IoError(::std::io::Error); 13 | ParseIntError(::std::num::ParseIntError); 14 | Utf8Error(::std::str::Utf8Error); 15 | TlsError(::native_tls::Error); 16 | MetricsError(::prometheus::Error); 17 | SnappyError(::snap::Error) #[cfg(feature = "snappy")]; 18 | JsonError(::serde_json::Error) #[cfg(feature = "json")]; 19 | TimerError(::tokio_timer::TimerError); 20 | ResolveError(::abstract_ns::Error); 21 | } 22 | 23 | errors { 24 | ConfigError(reason: &'static str) { 25 | description("invalid config") 26 | display("invalid config, {}", reason) 27 | } 28 | LockError(reason: String) { 29 | description("lock failed") 30 | display("lock failed, {}", reason) 31 | } 32 | ParseError(reason: String) { 33 | description("fail to parse") 34 | display("fail to parse, {}", reason) 35 | } 36 | EncodeError(reason: &'static str) { 37 | description("fail to encode") 38 | display("fail to encode, {}", reason) 39 | } 40 | IllegalArgument(reason: String) { 41 | description("invalid argument") 42 | display("invalid argument, {}", reason) 43 | } 44 | UnexpectedResponse(api_key: ApiKeys) { 45 | description("unexpected response") 46 | display("unexpected response, {:?}", api_key) 47 | } 48 | Canceled(task: &'static str) { 49 | description("task canceled") 50 | display("task canceled, {}", task) 51 | } 52 | KafkaError(code: KafkaCode) { 53 | description("kafka error") 54 | display("kafka error, {:?}, {}", code, code.reason()) 55 | } 56 | TimeoutError(reason: String) { 57 | description("operation timed out") 58 | display("operation timed out, {}", reason) 59 | } 60 | RetryError(reason: String) { 61 | description("retry failed") 62 | display("retry failed, {}", reason) 63 | } 64 | UnsupportedCompression { 65 | description("Unsupported compression format") 66 | } 67 | UnsupportedAssignmentStrategy(name: String) { 68 | description("unsupported assignment strategy") 69 | display("unsupported assignment strategy, {}", name) 70 | } 71 | UnsupportedOffsetResetStrategy(name: String) { 72 | description("unsupported offset reset strategy") 73 | display("unsupported offset reset strategy, {}", name) 74 | } 75 | NoOffsetForPartition(topic_name: String, partition_id: PartitionId) { 76 | description("Undefined offset with no reset policy for partition") 77 | display("Undefined offset with no reset policy for partition, {}:{}", topic_name, partition_id) 78 | } 79 | UnexpectedEOF { 80 | description("Unexpected EOF") 81 | } 82 | #[cfg(feature = "lz4")] 83 | Lz4Error(reason: String) { 84 | description("LZ4 error") 85 | display("LZ4 error, {}", reason) 86 | } 87 | TopicNotFound(topic_name: String) { 88 | description("topic not found") 89 | display("topic `{}` not found", topic_name) 90 | } 91 | BrokerNotFound(broker: BrokerRef) { 92 | description("broker not found") 93 | display("broker `{}` not found", broker.index()) 94 | } 95 | SchemaError(reason: String) { 96 | description("schema error") 97 | display("schema error, {}", reason) 98 | } 99 | } 100 | } 101 | 102 | unsafe impl Sync for Error {} 103 | unsafe impl Send for Error {} 104 | 105 | impl ser::Error for Error { 106 | fn custom(msg: T) -> Self 107 | where 108 | T: fmt::Display, 109 | { 110 | ErrorKind::Msg(msg.to_string()).into() 111 | } 112 | } 113 | 114 | impl de::Error for Error { 115 | fn custom(msg: T) -> Self 116 | where 117 | T: fmt::Display, 118 | { 119 | ErrorKind::Msg(msg.to_string()).into() 120 | } 121 | } 122 | 123 | impl<'a> From> for Error { 124 | fn from(s: Cow<'a, str>) -> Self { 125 | ErrorKind::Msg(String::from(s.borrow())).into() 126 | } 127 | } 128 | 129 | impl From<::std::sync::PoisonError> for Error { 130 | fn from(err: ::std::sync::PoisonError) -> Self { 131 | ErrorKind::LockError(StdError::description(&err).to_owned()).into() 132 | } 133 | } 134 | 135 | impl

From<::nom::verbose_errors::Err

> for Error 136 | where 137 | P: ::std::fmt::Debug, 138 | { 139 | fn from(err: ::nom::verbose_errors::Err

) -> Self { 140 | ErrorKind::ParseError(err.to_string()).into() 141 | } 142 | } 143 | 144 | impl From<::tokio_timer::TimeoutError> for Error { 145 | fn from(err: ::tokio_timer::TimeoutError) -> Self { 146 | ErrorKind::TimeoutError(StdError::description(&err).to_owned()).into() 147 | } 148 | } 149 | 150 | impl From<::tokio_retry::Error> for Error { 151 | fn from(err: ::tokio_retry::Error) -> Self { 152 | ErrorKind::RetryError(StdError::description(&err).to_owned()).into() 153 | } 154 | } 155 | 156 | macro_rules! hexdump { 157 | ($buf: expr) => { 158 | hexdump!($buf, 0) 159 | }; 160 | ($buf: expr, $off: expr) => { 161 | ::hexplay::HexViewBuilder::new($buf) 162 | .codepage(::hexplay::CODEPAGE_ASCII) 163 | .address_offset($off) 164 | .row_width(16) 165 | .finish() 166 | }; 167 | } 168 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![recursion_limit = "256"] 2 | #![cfg_attr(feature = "clippy", feature(plugin))] 3 | #![cfg_attr(feature = "clippy", plugin(clippy(conf_file = "../.clippy.toml")))] 4 | #![cfg_attr(feature = "clippy", allow(module_inception, block_in_if_condition_stmt))] 5 | #![allow(dead_code)] 6 | 7 | #[macro_use] 8 | extern crate log; 9 | #[macro_use] 10 | extern crate error_chain; 11 | #[macro_use] 12 | extern crate lazy_static; 13 | extern crate bytes; 14 | #[macro_use] 15 | extern crate nom; 16 | extern crate byteorder; 17 | extern crate crc; 18 | #[cfg(feature = "encoding")] 19 | extern crate encoding; 20 | extern crate hexplay; 21 | extern crate rand; 22 | extern crate serde; 23 | extern crate time; 24 | extern crate twox_hash; 25 | #[macro_use] 26 | extern crate serde_derive; 27 | #[cfg(feature = "json")] 28 | extern crate serde_json; 29 | #[macro_use] 30 | extern crate prometheus; 31 | extern crate abstract_ns; 32 | extern crate ns_router; 33 | extern crate ns_std_threaded; 34 | 35 | #[macro_use] 36 | extern crate futures; 37 | extern crate futures_cpupool; 38 | extern crate native_tls; 39 | extern crate tokio_core; 40 | extern crate tokio_io; 41 | extern crate tokio_proto; 42 | extern crate tokio_retry; 43 | extern crate tokio_service; 44 | extern crate tokio_timer; 45 | extern crate tokio_tls; 46 | 47 | #[cfg(feature = "gzip")] 48 | extern crate flate2; 49 | 50 | #[cfg(feature = "snappy")] 51 | extern crate snap; 52 | 53 | #[cfg(feature = "lz4")] 54 | extern crate lz4_compress; 55 | 56 | #[cfg(test)] 57 | extern crate pretty_env_logger; 58 | #[cfg(test)] 59 | extern crate typemap; 60 | 61 | #[macro_use] 62 | mod errors; 63 | #[macro_use] 64 | mod macros; 65 | mod compression; 66 | #[macro_use] 67 | mod protocol; 68 | mod serialization; 69 | mod network; 70 | mod client; 71 | mod consumer; 72 | mod producer; 73 | 74 | pub use client::{Broker, BrokerRef, Client, ClientBuilder, ClientConfig, Cluster, KafkaClient, KafkaVersion, 75 | ListOffsets, ListedOffset, LoadMetadata, Metadata, PartitionRecord, ProduceRecords, 76 | ToStaticBoxFuture, TopicRecord, DEFAULT_MAX_CONNECTION_IDLE_TIMEOUT_MILLIS, 77 | DEFAULT_METADATA_MAX_AGE_MILLS, DEFAULT_REQUEST_TIMEOUT_MILLS, DEFAULT_RETRY_BACKOFF_MILLIS}; 78 | pub use compression::Compression; 79 | pub use consumer::{Consumer, ConsumerBuilder, KafkaConsumer, OffsetResetStrategy, SeekTo, Subscribed}; 80 | pub use errors::{Error, ErrorKind, Result}; 81 | pub use network::{OffsetAndMetadata, OffsetAndTimestamp, TopicPartition, DEFAULT_PORT}; 82 | pub use producer::{DefaultPartitioner, GetTopic, KafkaProducer, Partitioner, Producer, ProducerBuilder, 83 | ProducerConfig, ProducerInterceptor, ProducerPartition, ProducerRecord, ProducerTopic, 84 | RecordMetadata, SendRecord, DEFAULT_ACK_TIMEOUT_MILLIS, DEFAULT_BATCH_SIZE, DEFAULT_LINGER_MILLIS, 85 | DEFAULT_MAX_REQUEST_SIZE}; 86 | pub use protocol::{ApiKey, ApiKeys, ErrorCode, FetchOffset, KafkaCode, Offset, PartitionId, RequiredAcks, Timestamp, 87 | ToMilliseconds, UsableApiVersion, UsableApiVersions}; 88 | pub use serialization::{BytesDeserializer, BytesSerializer, Deserializer, NoopDeserializer, NoopSerializer, 89 | RawDeserializer, RawSerializer, Serializer, StringDeserializer, StringSerializer}; 90 | #[cfg(feature = "json")] 91 | pub use serialization::{JsonDeserializer, JsonSerializer}; 92 | #[cfg(feature = "encoding")] 93 | pub use serialization::{StrEncodingDeserializer, StrEncodingSerializer}; 94 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! topic_partition { 3 | ($topic_name: expr, $partition_id: expr) => { 4 | $crate::TopicPartition { 5 | topic_name: $topic_name.into(), 6 | partition_id: $partition_id, 7 | } 8 | }; 9 | ($topic_name: expr, $partition_id: expr) => { 10 | topic_partition!($topic_name, 0) 11 | }; 12 | } 13 | 14 | #[macro_export] 15 | macro_rules! offset_and_metadata { 16 | ($offset: expr) => { 17 | $crate::OffsetAndMetadata { 18 | offset: $offset, 19 | metadata: None, 20 | } 21 | }; 22 | ($offset: expr, $metadata: expr) => { 23 | $crate::OffsetAndMetadata { 24 | offset: $offset, 25 | metadata: Some($metadata.into()), 26 | } 27 | }; 28 | } 29 | 30 | #[macro_export] 31 | macro_rules! offset_and_timestamp { 32 | ($offset: expr) => { 33 | $crate::network::OffsetAndTimestamp { 34 | offset: $offset, 35 | timestamp: None, 36 | } 37 | }; 38 | ($offset: expr, $timestamp: expr) => { 39 | $crate::network::OffsetAndTimestamp { 40 | offset: $offset, 41 | timestamp: Some($timestamp), 42 | } 43 | }; 44 | } 45 | -------------------------------------------------------------------------------- /src/network/codec.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | use std::io; 3 | use std::marker::PhantomData; 4 | use std::mem; 5 | 6 | use bytes::{BigEndian, BufMut, ByteOrder, BytesMut}; 7 | 8 | use tokio_io::codec::{Decoder, Encoder}; 9 | 10 | use network::{KafkaRequest, KafkaResponse}; 11 | use protocol::{ApiKeys, ApiVersion, CorrelationId, Encodable, Record, RequestHeader}; 12 | 13 | #[derive(Debug)] 14 | pub struct KafkaCodec<'a> { 15 | requests: VecDeque<(ApiKeys, ApiVersion, CorrelationId)>, 16 | phantom: PhantomData<&'a u8>, 17 | } 18 | 19 | impl<'a> KafkaCodec<'a> { 20 | pub fn new() -> Self { 21 | KafkaCodec { 22 | requests: VecDeque::new(), 23 | phantom: PhantomData, 24 | } 25 | } 26 | } 27 | 28 | impl<'a> Encoder for KafkaCodec<'a> { 29 | type Item = KafkaRequest<'a>; 30 | type Error = io::Error; 31 | 32 | fn encode(&mut self, request: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { 33 | let off = dst.len(); 34 | 35 | let &RequestHeader { 36 | api_key, 37 | api_version, 38 | correlation_id, 39 | .. 40 | } = request.header(); 41 | 42 | dst.reserve(mem::size_of::() + request.size(api_version)); 43 | 44 | dst.put_i32::(0); 45 | 46 | request 47 | .encode::(dst) 48 | .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, format!("invalid request, {}", err)))?; 49 | 50 | let size = dst.len() - off - mem::size_of::(); 51 | 52 | BigEndian::write_i32(&mut dst[off..off + mem::size_of::()], size as i32); 53 | 54 | trace!( 55 | "encoded {} bytes frame:\n{}", 56 | size + mem::size_of::(), 57 | hexdump!(&dst[..]) 58 | ); 59 | 60 | self.requests 61 | .push_back((ApiKeys::from(api_key), api_version, correlation_id)); 62 | 63 | Ok(()) 64 | } 65 | } 66 | 67 | impl<'a> Decoder for KafkaCodec<'a> { 68 | type Item = KafkaResponse; 69 | type Error = io::Error; 70 | 71 | fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { 72 | let size_header_len = mem::size_of::(); 73 | 74 | if src.len() < size_header_len { 75 | Ok(None) 76 | } else { 77 | let size = BigEndian::read_i32(&src[..]) as usize; 78 | 79 | if size_header_len + size > src.len() { 80 | Ok(None) 81 | } else { 82 | trace!("received new frame with {} bytes:\n{}", src.len(), hexdump!(&src[..])); 83 | 84 | let buf = src.split_to(size + size_header_len).split_off(size_header_len).freeze(); 85 | 86 | if let Some((api_key, api_version, correlation_id)) = self.requests.pop_front() { 87 | if BigEndian::read_i32(&buf[..]) != correlation_id { 88 | Err(io::Error::new(io::ErrorKind::InvalidData, "correlation id mismatch")) 89 | } else { 90 | KafkaResponse::parse(&buf[..], api_key, api_version) 91 | } 92 | } else { 93 | Err(io::Error::new(io::ErrorKind::InvalidData, "unexpected response")) 94 | } 95 | } 96 | } 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/network/conn.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::io::prelude::*; 3 | use std::ops::{Deref, DerefMut}; 4 | use std::time::Instant; 5 | 6 | use bytes::BytesMut; 7 | 8 | use futures::sink::Sink; 9 | use futures::stream::Stream; 10 | use futures::{AsyncSink, Poll, StartSend}; 11 | use tokio_io::codec::Framed; 12 | use tokio_io::{AsyncRead, AsyncWrite}; 13 | use tokio_proto::streaming::pipeline::{Frame, Transport}; 14 | 15 | use network::{ConnectionId, KafkaCodec, KafkaRequest, KafkaResponse}; 16 | 17 | #[derive(Clone, Copy, Debug)] 18 | pub enum Status { 19 | Idle(Instant), 20 | Busy, 21 | Closed, 22 | } 23 | 24 | pub trait KeepAlive { 25 | fn status(&self) -> Status; 26 | fn busy(&mut self); 27 | fn close(&mut self); 28 | fn idle(&mut self); 29 | } 30 | 31 | #[derive(Debug)] 32 | struct State { 33 | keep_alive: K, 34 | } 35 | 36 | #[derive(Debug)] 37 | pub struct KafkaConnection<'a, I, K> { 38 | id: ConnectionId, 39 | stream: Framed>, 40 | state: State, 41 | } 42 | 43 | impl<'a, I, K> Deref for KafkaConnection<'a, I, K> { 44 | type Target = I; 45 | 46 | fn deref(&self) -> &Self::Target { 47 | self.stream.get_ref() 48 | } 49 | } 50 | 51 | impl<'a, I, K> DerefMut for KafkaConnection<'a, I, K> { 52 | fn deref_mut(&mut self) -> &mut Self::Target { 53 | self.stream.get_mut() 54 | } 55 | } 56 | 57 | impl<'a, I, K> Read for KafkaConnection<'a, I, K> 58 | where 59 | I: AsyncRead + AsyncWrite, 60 | { 61 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 62 | match self.stream.get_mut().read(buf) { 63 | Ok(size) => { 64 | trace!("read {} bytes:\n{}", size, hexdump!(&buf[..size])); 65 | 66 | Ok(size) 67 | } 68 | Err(err) => { 69 | trace!("read failed, {}", err); 70 | 71 | Err(err) 72 | } 73 | } 74 | } 75 | } 76 | 77 | impl<'a, I, K> Write for KafkaConnection<'a, I, K> 78 | where 79 | I: AsyncRead + AsyncWrite, 80 | { 81 | fn write(&mut self, buf: &[u8]) -> io::Result { 82 | trace!("write {} bytes:\n{}", buf.len(), hexdump!(buf)); 83 | 84 | self.stream.get_mut().write(buf) 85 | } 86 | 87 | fn flush(&mut self) -> io::Result<()> { 88 | trace!("flush stream"); 89 | 90 | self.stream.get_mut().flush() 91 | } 92 | } 93 | 94 | impl<'a, I, K> AsyncRead for KafkaConnection<'a, I, K> 95 | where 96 | I: AsyncRead + AsyncWrite, 97 | { 98 | } 99 | 100 | impl<'a, I, K> AsyncWrite for KafkaConnection<'a, I, K> 101 | where 102 | I: AsyncRead + AsyncWrite, 103 | { 104 | fn shutdown(&mut self) -> Poll<(), io::Error> { 105 | trace!("shutdown stream"); 106 | 107 | self.stream.get_mut().shutdown() 108 | } 109 | } 110 | 111 | impl<'a, I, K> Stream for KafkaConnection<'a, I, K> 112 | where 113 | I: AsyncRead + AsyncWrite, 114 | { 115 | type Item = Frame; 116 | type Error = io::Error; 117 | 118 | fn poll(&mut self) -> Poll, Self::Error> { 119 | self.stream.poll().map(|res| { 120 | res.map(|res| { 121 | res.map(|res| Frame::Message { 122 | message: res, 123 | body: false, 124 | }) 125 | }) 126 | }) 127 | } 128 | } 129 | 130 | impl<'a, I, K> Sink for KafkaConnection<'a, I, K> 131 | where 132 | I: AsyncRead + AsyncWrite, 133 | K: KeepAlive, 134 | { 135 | type SinkItem = Frame, BytesMut, io::Error>; 136 | type SinkError = io::Error; 137 | 138 | fn start_send(&mut self, frame: Self::SinkItem) -> StartSend { 139 | trace!("send request: {:?}", frame); 140 | 141 | match frame { 142 | Frame::Message { message: request, body } => self.stream.start_send(request).map(|async| match async { 143 | AsyncSink::Ready => AsyncSink::Ready, 144 | AsyncSink::NotReady(request) => AsyncSink::NotReady(Frame::Message { message: request, body }), 145 | }), 146 | Frame::Body { .. } | Frame::Error { .. } => Ok(AsyncSink::Ready), 147 | } 148 | } 149 | 150 | fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { 151 | trace!("poll complete"); 152 | 153 | self.state.keep_alive.idle(); 154 | 155 | self.stream.poll_complete() 156 | } 157 | } 158 | 159 | impl<'a, I, K> Transport for KafkaConnection<'a, I, K> 160 | where 161 | I: AsyncRead + AsyncWrite, 162 | K: KeepAlive, 163 | Self: 'static, 164 | { 165 | } 166 | 167 | impl<'a, I, K> KafkaConnection<'a, I, K> 168 | where 169 | I: AsyncRead + AsyncWrite, 170 | K: KeepAlive, 171 | { 172 | pub fn new(id: ConnectionId, stream: I, codec: KafkaCodec<'a>, keep_alive: K) -> Self { 173 | KafkaConnection { 174 | id, 175 | stream: stream.framed(codec), 176 | state: State { keep_alive }, 177 | } 178 | } 179 | 180 | pub fn id(&self) -> ConnectionId { 181 | self.id 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /src/network/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod request; 3 | mod codec; 4 | mod conn; 5 | mod pool; 6 | mod response; 7 | mod stream; 8 | 9 | pub use self::codec::KafkaCodec; 10 | pub use self::conn::{KafkaConnection, KeepAlive, Status}; 11 | pub use self::pool::{Pool, Pooled}; 12 | pub use self::request::KafkaRequest; 13 | pub use self::response::KafkaResponse; 14 | pub use self::stream::{Connect, KafkaConnector, KafkaStream}; 15 | 16 | use std::borrow::Cow; 17 | use std::fmt; 18 | 19 | use protocol::{Offset, PartitionId, Timestamp}; 20 | 21 | pub const DEFAULT_PORT: u16 = 9092; 22 | 23 | pub type ConnectionId = u32; 24 | 25 | /// A topic name and partition number 26 | #[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] 27 | pub struct TopicPartition<'a> { 28 | pub topic_name: Cow<'a, str>, 29 | pub partition_id: PartitionId, 30 | } 31 | 32 | impl<'a> fmt::Display for TopicPartition<'a> { 33 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 34 | write!(f, "{}#{}", self.topic_name, self.partition_id) 35 | } 36 | } 37 | 38 | /// A container class for offset and metadata 39 | /// 40 | /// The Kafka offset commit API allows users to provide additional metadata (in the form of a 41 | /// string) when an offset is committed. This can be useful (for example) to store information 42 | /// about which node made the commit, what time the commit was made, etc. 43 | #[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] 44 | pub struct OffsetAndMetadata { 45 | /// Message offset to be committed. 46 | pub offset: Offset, 47 | /// Any associated metadata the client wants to keep. 48 | pub metadata: Option, 49 | } 50 | 51 | impl OffsetAndMetadata { 52 | pub fn new(offset: Offset) -> Self { 53 | OffsetAndMetadata { offset, metadata: None } 54 | } 55 | 56 | pub fn with_metadata(offset: Offset, metadata: Option) -> Self { 57 | OffsetAndMetadata { offset, metadata } 58 | } 59 | } 60 | 61 | /// A container class for offset and timestamp 62 | #[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] 63 | pub struct OffsetAndTimestamp { 64 | /// Message offset to be committed. 65 | pub offset: Offset, 66 | /// Timestamp of the commit 67 | pub timestamp: Option, 68 | } 69 | 70 | impl OffsetAndTimestamp { 71 | pub fn new(offset: Offset) -> Self { 72 | OffsetAndTimestamp { 73 | offset, 74 | timestamp: None, 75 | } 76 | } 77 | 78 | pub fn with_timestamp(offset: Offset, timestamp: Option) -> Self { 79 | OffsetAndTimestamp { offset, timestamp } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/network/response.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use log::Level::Debug; 4 | 5 | use nom::{self, ErrorKind, IResult, Needed}; 6 | 7 | use protocol::{display_parse_error, ApiKeys, ApiVersion, ApiVersionsResponse, DescribeGroupsResponse, FetchResponse, 8 | GroupCoordinatorResponse, HeartbeatResponse, JoinGroupResponse, LeaveGroupResponse, ListGroupsResponse, 9 | ListOffsetResponse, MetadataResponse, OffsetCommitResponse, OffsetFetchResponse, ParseTag, 10 | ProduceResponse, SyncGroupResponse}; 11 | 12 | #[derive(Clone, Debug, PartialEq)] 13 | pub enum KafkaResponse { 14 | Produce(ProduceResponse), 15 | Fetch(FetchResponse), 16 | ListOffsets(ListOffsetResponse), 17 | Metadata(MetadataResponse), 18 | OffsetCommit(OffsetCommitResponse), 19 | OffsetFetch(OffsetFetchResponse), 20 | GroupCoordinator(GroupCoordinatorResponse), 21 | JoinGroup(JoinGroupResponse), 22 | Heartbeat(HeartbeatResponse), 23 | LeaveGroup(LeaveGroupResponse), 24 | SyncGroup(SyncGroupResponse), 25 | DescribeGroups(DescribeGroupsResponse), 26 | ListGroups(ListGroupsResponse), 27 | ApiVersions(ApiVersionsResponse), 28 | } 29 | 30 | impl KafkaResponse { 31 | pub fn api_key(&self) -> ApiKeys { 32 | match *self { 33 | KafkaResponse::Produce(_) => ApiKeys::Produce, 34 | KafkaResponse::Fetch(_) => ApiKeys::Fetch, 35 | KafkaResponse::ListOffsets(_) => ApiKeys::ListOffsets, 36 | KafkaResponse::Metadata(_) => ApiKeys::Metadata, 37 | KafkaResponse::OffsetCommit(_) => ApiKeys::OffsetCommit, 38 | KafkaResponse::OffsetFetch(_) => ApiKeys::OffsetFetch, 39 | KafkaResponse::GroupCoordinator(_) => ApiKeys::GroupCoordinator, 40 | KafkaResponse::JoinGroup(_) => ApiKeys::JoinGroup, 41 | KafkaResponse::Heartbeat(_) => ApiKeys::Heartbeat, 42 | KafkaResponse::LeaveGroup(_) => ApiKeys::LeaveGroup, 43 | KafkaResponse::SyncGroup(_) => ApiKeys::SyncGroup, 44 | KafkaResponse::DescribeGroups(_) => ApiKeys::DescribeGroups, 45 | KafkaResponse::ListGroups(_) => ApiKeys::ListGroups, 46 | KafkaResponse::ApiVersions(_) => ApiKeys::ApiVersions, 47 | } 48 | } 49 | 50 | pub fn parse>(src: T, api_key: ApiKeys, api_version: ApiVersion) -> io::Result> { 51 | let buf = src.as_ref(); 52 | 53 | debug!( 54 | "parsing {:?} response (api_version = {:?}) with {} bytes", 55 | api_key, 56 | api_version, 57 | buf.len(), 58 | ); 59 | 60 | let res = match api_key { 61 | ApiKeys::Produce => ProduceResponse::parse(buf, api_version).map(KafkaResponse::Produce), 62 | ApiKeys::Fetch => FetchResponse::parse(buf, api_version).map(KafkaResponse::Fetch), 63 | ApiKeys::ListOffsets => ListOffsetResponse::parse(buf, api_version).map(KafkaResponse::ListOffsets), 64 | ApiKeys::Metadata => MetadataResponse::parse(buf).map(KafkaResponse::Metadata), 65 | ApiKeys::OffsetCommit => OffsetCommitResponse::parse(buf).map(KafkaResponse::OffsetCommit), 66 | ApiKeys::OffsetFetch => OffsetFetchResponse::parse(buf).map(KafkaResponse::OffsetFetch), 67 | ApiKeys::GroupCoordinator => GroupCoordinatorResponse::parse(buf).map(KafkaResponse::GroupCoordinator), 68 | ApiKeys::JoinGroup => JoinGroupResponse::parse(buf).map(KafkaResponse::JoinGroup), 69 | ApiKeys::Heartbeat => HeartbeatResponse::parse(buf).map(KafkaResponse::Heartbeat), 70 | ApiKeys::LeaveGroup => LeaveGroupResponse::parse(buf).map(KafkaResponse::LeaveGroup), 71 | ApiKeys::SyncGroup => SyncGroupResponse::parse(buf).map(KafkaResponse::SyncGroup), 72 | ApiKeys::DescribeGroups => DescribeGroupsResponse::parse(buf).map(KafkaResponse::DescribeGroups), 73 | ApiKeys::ListGroups => ListGroupsResponse::parse(buf).map(KafkaResponse::ListGroups), 74 | ApiKeys::ApiVersions => ApiVersionsResponse::parse(buf).map(KafkaResponse::ApiVersions), 75 | _ => IResult::Error(nom::Err::Code(ErrorKind::Custom(ParseTag::ApiKey as u32))), 76 | }; 77 | 78 | match res { 79 | IResult::Done(remaining, res) => { 80 | debug!("parsed response: {:?}", res); 81 | 82 | if !remaining.is_empty() { 83 | warn!("remaining {} bytes not parsed", remaining.len()); 84 | } 85 | 86 | Ok(Some(res)) 87 | } 88 | IResult::Incomplete(needed) => { 89 | warn!( 90 | "incomplete response, need more {} bytes", 91 | if let Needed::Size(size) = needed { 92 | size.to_string() 93 | } else { 94 | "unknown".to_owned() 95 | } 96 | ); 97 | 98 | debug!("\n{}", hexdump!(buf)); 99 | 100 | Ok(None) 101 | } 102 | IResult::Error(err) => { 103 | if log_enabled!(Debug) { 104 | display_parse_error::(&buf[..], IResult::Error(err.clone())); 105 | } 106 | 107 | Err(io::Error::new( 108 | io::ErrorKind::InvalidData, 109 | format!("fail to parse response, {}", err), 110 | )) 111 | } 112 | } 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/network/stream.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::io; 3 | use std::rc::Rc; 4 | use std::io::prelude::*; 5 | use std::net::SocketAddr; 6 | 7 | use futures::future::Future; 8 | use futures::{Async, Poll}; 9 | use native_tls::TlsConnector; 10 | use tokio_core::net::{TcpStream, TcpStreamNew}; 11 | use tokio_core::reactor::Handle; 12 | use tokio_io::{AsyncRead, AsyncWrite}; 13 | use tokio_tls::{ConnectAsync, TlsConnectorExt, TlsStream}; 14 | use ns_router::{AutoName, Router}; 15 | use ns_router::future::ResolveFuture; 16 | 17 | use network::DEFAULT_PORT; 18 | 19 | pub struct KafkaConnector { 20 | handle: Handle, 21 | router: Rc, 22 | } 23 | 24 | impl KafkaConnector { 25 | pub fn new(handle: Handle, router: Rc) -> Self { 26 | KafkaConnector { handle, router } 27 | } 28 | 29 | pub fn tcp<'n, N>(&self, addr: N) -> Connect 30 | where 31 | N: Into> + fmt::Debug, 32 | { 33 | trace!("TCP connect to {:?}", addr); 34 | 35 | Connect { 36 | handle: self.handle.clone(), 37 | domain: None, 38 | connector: None, 39 | state: State::Resolving(self.router.resolve_auto(addr, DEFAULT_PORT)), 40 | } 41 | } 42 | 43 | pub fn tls<'n, N, S>(&self, addr: N, connector: TlsConnector, domain: S) -> Connect 44 | where 45 | N: Into> + fmt::Debug, 46 | S: Into, 47 | { 48 | trace!("TLS connect to {:?}", addr); 49 | 50 | Connect { 51 | handle: self.handle.clone(), 52 | domain: Some(domain.into()), 53 | connector: Some(connector), 54 | state: State::Resolving(self.router.resolve_auto(addr, DEFAULT_PORT)), 55 | } 56 | } 57 | } 58 | 59 | enum State { 60 | Resolving(ResolveFuture), 61 | Connecting(TcpStreamNew, SocketAddr, Vec), 62 | Handshaking(ConnectAsync, SocketAddr), 63 | } 64 | 65 | pub struct Connect { 66 | handle: Handle, 67 | domain: Option, 68 | connector: Option, 69 | state: State, 70 | } 71 | 72 | impl Future for Connect { 73 | type Item = KafkaStream; 74 | type Error = io::Error; 75 | 76 | fn poll(&mut self) -> Poll { 77 | loop { 78 | let domain = &self.domain; 79 | let connector = &self.connector; 80 | 81 | let state = match self.state { 82 | State::Resolving(ref mut resolving) => match resolving.poll() { 83 | Ok(Async::Ready(mut address)) => { 84 | let mut addrs = address 85 | .iter() 86 | .flat_map(|weighted_set| weighted_set.addresses().collect::>()) 87 | .collect::>(); 88 | 89 | addrs.reverse(); 90 | 91 | if let Some(addr) = addrs.pop() { 92 | trace!("TCP connecting to {}", addr); 93 | 94 | State::Connecting(TcpStream::connect(&addr, &self.handle), addr, addrs) 95 | } else { 96 | bail!(io::Error::new(io::ErrorKind::AddrNotAvailable, "no more address")); 97 | } 98 | } 99 | Ok(Async::NotReady) => return Ok(Async::NotReady), 100 | Err(err) => { 101 | bail!(io::Error::new(io::ErrorKind::AddrNotAvailable, err)); 102 | } 103 | }, 104 | State::Connecting(ref mut connecting, peer_addr, ref mut addrs) => match connecting.poll() { 105 | Ok(Async::Ready(stream)) => { 106 | if let (&Some(ref domain), &Some(ref connector)) = (domain, connector) { 107 | trace!("TCP connected to {}, start TLS handshake", peer_addr); 108 | 109 | State::Handshaking(connector.connect_async(domain, stream), peer_addr) 110 | } else { 111 | trace!("TCP connected to {}", peer_addr); 112 | 113 | return Ok(Async::Ready(KafkaStream::Tcp(peer_addr, stream))); 114 | } 115 | } 116 | Ok(Async::NotReady) => return Ok(Async::NotReady), 117 | Err(err) => { 118 | warn!("fail to connect {}, {}", peer_addr, err); 119 | 120 | if let Some(addr) = addrs.pop() { 121 | trace!("TCP connecting to {}", addr); 122 | 123 | State::Connecting(TcpStream::connect(&addr, &self.handle), addr, addrs.clone()) 124 | } else { 125 | bail!(io::Error::new(io::ErrorKind::NotConnected, err)); 126 | } 127 | } 128 | }, 129 | State::Handshaking(ref mut handshaking, peer_addr) => match handshaking.poll() { 130 | Ok(Async::Ready(stream)) => { 131 | trace!("TLS connected to {}", peer_addr); 132 | 133 | return Ok(Async::Ready(KafkaStream::Tls(peer_addr, stream))); 134 | } 135 | Ok(Async::NotReady) => return Ok(Async::NotReady), 136 | Err(err) => { 137 | warn!("fail to do TLS handshake to {}, {}", peer_addr, err); 138 | 139 | bail!(io::Error::new(io::ErrorKind::ConnectionAborted, "TLS handshake failed")); 140 | } 141 | }, 142 | }; 143 | 144 | self.state = state; 145 | } 146 | } 147 | } 148 | 149 | pub enum KafkaStream { 150 | Tcp(SocketAddr, TcpStream), 151 | Tls(SocketAddr, TlsStream), 152 | } 153 | 154 | impl fmt::Debug for KafkaStream { 155 | fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { 156 | match *self { 157 | KafkaStream::Tcp(ref addr, _) => write!(w, "TcpStream({})", addr), 158 | KafkaStream::Tls(ref addr, _) => write!(w, "TlsStream({})", addr), 159 | } 160 | } 161 | } 162 | 163 | impl Read for KafkaStream { 164 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 165 | match *self { 166 | KafkaStream::Tcp(_, ref mut stream) => stream.read(buf), 167 | KafkaStream::Tls(_, ref mut stream) => stream.read(buf), 168 | } 169 | } 170 | } 171 | 172 | impl Write for KafkaStream { 173 | fn write(&mut self, buf: &[u8]) -> io::Result { 174 | match *self { 175 | KafkaStream::Tcp(_, ref mut stream) => stream.write(buf), 176 | KafkaStream::Tls(_, ref mut stream) => stream.write(buf), 177 | } 178 | } 179 | 180 | fn flush(&mut self) -> io::Result<()> { 181 | match *self { 182 | KafkaStream::Tcp(_, ref mut stream) => stream.flush(), 183 | KafkaStream::Tls(_, ref mut stream) => stream.flush(), 184 | } 185 | } 186 | } 187 | 188 | impl AsyncRead for KafkaStream {} 189 | 190 | impl AsyncWrite for KafkaStream { 191 | fn shutdown(&mut self) -> Poll<(), io::Error> { 192 | match *self { 193 | KafkaStream::Tcp(_, ref mut stream) => AsyncWrite::shutdown(stream), 194 | KafkaStream::Tls(_, ref mut stream) => stream.shutdown(), 195 | } 196 | } 197 | } 198 | 199 | impl KafkaStream { 200 | pub fn addr(&self) -> &SocketAddr { 201 | match *self { 202 | KafkaStream::Tcp(ref addr, _) | KafkaStream::Tls(ref addr, _) => addr, 203 | } 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /src/producer/accumulator.rs: -------------------------------------------------------------------------------- 1 | use std::cell::RefCell; 2 | use std::collections::{HashMap, VecDeque}; 3 | use std::rc::Rc; 4 | use std::time::Duration; 5 | 6 | use bytes::Bytes; 7 | 8 | use futures::{Async, Future, IntoFuture, Poll, Stream}; 9 | 10 | use client::{StaticBoxFuture, ToStaticBoxFuture}; 11 | use compression::Compression; 12 | use errors::Error; 13 | use network::TopicPartition; 14 | use producer::{ProducerBatch, RecordMetadata}; 15 | use protocol::{ApiVersion, Timestamp}; 16 | 17 | /// Accumulator acts as a queue that accumulates records 18 | pub trait Accumulator<'a> { 19 | /// Add a record to the accumulator, return the append result 20 | fn push_record( 21 | &self, 22 | tp: TopicPartition<'a>, 23 | timestamp: Timestamp, 24 | key: Option, 25 | value: Option, 26 | api_version: ApiVersion, 27 | ) -> PushRecord; 28 | 29 | fn flush(&mut self); 30 | } 31 | 32 | /// `RecordAccumulator` acts as a queue that accumulates records into `ProducerRecord` instances to 33 | /// be sent to the server. 34 | pub struct RecordAccumulator<'a> { 35 | /// The size to use when allocating ProducerRecord instances 36 | batch_size: usize, 37 | 38 | /// The compression codec for the records 39 | compression: Compression, 40 | 41 | /// An artificial delay time to add before declaring a records instance that isn't full ready 42 | /// for sending. 43 | /// 44 | /// This allows time for more records to arrive. 45 | /// Setting a non-zero lingerMs will trade off some latency for potentially better throughput 46 | /// due to more batching (and hence fewer, larger requests). 47 | linger: Duration, 48 | 49 | batches: Rc, VecDeque>>>, 50 | } 51 | 52 | impl<'a> RecordAccumulator<'a> { 53 | pub fn new(batch_size: usize, compression: Compression, linger: Duration) -> Self { 54 | RecordAccumulator { 55 | batch_size, 56 | compression, 57 | linger, 58 | batches: Rc::new(RefCell::new(HashMap::new())), 59 | } 60 | } 61 | 62 | pub fn batches(&self, force: bool) -> Batches<'a> { 63 | Batches { 64 | batches: self.batches.clone(), 65 | linger: self.linger, 66 | force, 67 | } 68 | } 69 | } 70 | 71 | impl<'a> Accumulator<'a> for RecordAccumulator<'a> { 72 | fn push_record( 73 | &self, 74 | tp: TopicPartition<'a>, 75 | timestamp: Timestamp, 76 | key: Option, 77 | value: Option, 78 | api_version: ApiVersion, 79 | ) -> PushRecord { 80 | let mut batches = self.batches.borrow_mut(); 81 | let batches = batches.entry(tp).or_insert_with(VecDeque::new); 82 | 83 | if let Some(batch) = batches.back_mut() { 84 | match batch.push_record(timestamp, key.clone(), value.clone()) { 85 | Ok(push_recrod) => { 86 | trace!("pushed record to latest batch, {:?}", batch); 87 | 88 | return PushRecord::new(push_recrod, batch.is_full(), false); 89 | } 90 | Err(err) => { 91 | debug!("fail to push record, {}", err); 92 | } 93 | } 94 | } 95 | 96 | let mut batch = ProducerBatch::new(api_version, self.compression, self.batch_size); 97 | 98 | match batch.push_record(timestamp, key, value) { 99 | Ok(push_recrod) => { 100 | trace!("pushed record to a new batch, {:?}", batch); 101 | 102 | let batch_is_full = batch.is_full(); 103 | 104 | batches.push_back(batch); 105 | 106 | PushRecord::new(push_recrod, batch_is_full, true) 107 | } 108 | Err(err) => { 109 | warn!("fail to push record, {}", err); 110 | 111 | PushRecord::new(Err(err), false, true) 112 | } 113 | } 114 | } 115 | 116 | fn flush(&mut self) { 117 | trace!("flush all batches"); 118 | 119 | for (_, batches) in self.batches.borrow_mut().iter_mut() { 120 | let api_version = batches.back().map(|batch| batch.api_version()); 121 | 122 | if let Some(api_version) = api_version { 123 | batches.push_back(ProducerBatch::new(api_version, self.compression, self.batch_size)) 124 | } 125 | } 126 | } 127 | } 128 | 129 | pub struct PushRecord { 130 | future: StaticBoxFuture, 131 | is_full: bool, 132 | new_batch: bool, 133 | } 134 | 135 | impl PushRecord { 136 | pub fn new(future: F, is_full: bool, new_batch: bool) -> Self 137 | where 138 | F: IntoFuture + 'static, 139 | { 140 | PushRecord { 141 | future: future.static_boxed(), 142 | is_full, 143 | new_batch, 144 | } 145 | } 146 | 147 | pub fn is_full(&self) -> bool { 148 | self.is_full 149 | } 150 | 151 | pub fn new_batch(&self) -> bool { 152 | self.new_batch 153 | } 154 | } 155 | 156 | impl Future for PushRecord { 157 | type Item = RecordMetadata; 158 | type Error = Error; 159 | 160 | fn poll(&mut self) -> Poll { 161 | self.future.poll() 162 | } 163 | } 164 | 165 | pub struct Batches<'a> { 166 | batches: Rc, VecDeque>>>, 167 | linger: Duration, 168 | force: bool, 169 | } 170 | 171 | impl<'a> Stream for Batches<'a> { 172 | type Item = (TopicPartition<'a>, ProducerBatch); 173 | type Error = Error; 174 | 175 | fn poll(&mut self) -> Poll, Self::Error> { 176 | for (tp, batches) in self.batches.borrow_mut().iter_mut() { 177 | let ready = self.force || batches.back().map_or(false, |batch| { 178 | batch.is_full() || batch.create_time().elapsed() >= self.linger 179 | }); 180 | 181 | if ready { 182 | if let Some(batch) = batches.pop_front() { 183 | return Ok(Async::Ready(Some((tp.clone(), batch)))); 184 | } 185 | } 186 | } 187 | 188 | Ok(Async::NotReady) 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /src/producer/batch.rs: -------------------------------------------------------------------------------- 1 | use std::cell::RefCell; 2 | use std::hash::Hash; 3 | use std::ops::Deref; 4 | use std::rc::Rc; 5 | use std::time::Instant; 6 | 7 | use bytes::{BigEndian, Bytes}; 8 | 9 | use futures::unsync::oneshot::{channel, Canceled, Receiver, Sender}; 10 | use futures::{Async, Future, Poll}; 11 | 12 | use compression::Compression; 13 | use errors::{Error, ErrorKind, Result}; 14 | use producer::{ProducerInterceptor, ProducerInterceptors, RecordMetadata}; 15 | use protocol::{ApiVersion, KafkaCode, MessageSet, MessageSetBuilder, Offset, PartitionId, Timestamp}; 16 | 17 | #[derive(Debug)] 18 | pub struct Thunk { 19 | sender: Sender>, 20 | relative_offset: Offset, 21 | timestamp: Timestamp, 22 | key_size: usize, 23 | value_size: usize, 24 | } 25 | 26 | impl Thunk { 27 | pub fn fail(self, err: Error) -> ::std::result::Result<(), Result> { 28 | self.sender.send(Err(err)) 29 | } 30 | 31 | pub fn done( 32 | self, 33 | interceptors: Option>>>, 34 | topic_name: &str, 35 | partition_id: PartitionId, 36 | base_offset: Offset, 37 | error_code: KafkaCode, 38 | ) -> ::std::result::Result<(), Result> { 39 | let result = if error_code == KafkaCode::None { 40 | Ok(RecordMetadata { 41 | topic_name: topic_name.to_owned(), 42 | partition_id, 43 | offset: base_offset + self.relative_offset, 44 | timestamp: self.timestamp, 45 | serialized_key_size: self.key_size, 46 | serialized_value_size: self.value_size, 47 | }) 48 | } else { 49 | Err(ErrorKind::KafkaError(error_code).into()) 50 | }; 51 | 52 | if let Some(interceptors) = interceptors { 53 | (*interceptors).borrow().ack(&result); 54 | } 55 | 56 | self.sender.send(result) 57 | } 58 | } 59 | 60 | #[derive(Debug)] 61 | pub struct ProducerBatch { 62 | builder: MessageSetBuilder, 63 | thunks: Vec, 64 | create_time: Instant, 65 | last_push_time: Instant, 66 | } 67 | 68 | impl Deref for ProducerBatch { 69 | type Target = MessageSetBuilder; 70 | 71 | fn deref(&self) -> &Self::Target { 72 | &self.builder 73 | } 74 | } 75 | 76 | impl ProducerBatch { 77 | pub fn new(api_version: ApiVersion, compression: Compression, write_limit: usize) -> Self { 78 | let now = Instant::now(); 79 | 80 | ProducerBatch { 81 | builder: MessageSetBuilder::new(api_version, compression, write_limit, 0), 82 | thunks: vec![], 83 | create_time: now, 84 | last_push_time: now, 85 | } 86 | } 87 | 88 | pub fn create_time(&self) -> &Instant { 89 | &self.create_time 90 | } 91 | 92 | pub fn last_push_time(&self) -> &Instant { 93 | &self.last_push_time 94 | } 95 | 96 | pub fn push_record( 97 | &mut self, 98 | timestamp: Timestamp, 99 | key: Option, 100 | value: Option, 101 | ) -> Result { 102 | let key_size = key.as_ref().map_or(0, |b| b.len()); 103 | let value_size = value.as_ref().map_or(0, |b| b.len()); 104 | 105 | let relative_offset = self.builder.push(timestamp, key, value)?; 106 | 107 | let (sender, receiver) = channel(); 108 | 109 | self.thunks.push(Thunk { 110 | sender, 111 | relative_offset, 112 | timestamp, 113 | key_size, 114 | value_size, 115 | }); 116 | self.last_push_time = Instant::now(); 117 | 118 | Ok(FutureRecordMetadata { receiver }) 119 | } 120 | 121 | pub fn build(self) -> Result<(Vec, MessageSet)> { 122 | Ok((self.thunks, self.builder.build::()?)) 123 | } 124 | } 125 | 126 | pub struct FutureRecordMetadata { 127 | receiver: Receiver>, 128 | } 129 | 130 | impl Future for FutureRecordMetadata { 131 | type Item = RecordMetadata; 132 | type Error = Error; 133 | 134 | fn poll(&mut self) -> Poll { 135 | match self.receiver.poll() { 136 | Ok(Async::Ready(Ok(metadata))) => Ok(Async::Ready(metadata)), 137 | Ok(Async::Ready(Err(err))) => Err(err), 138 | Ok(Async::NotReady) => Ok(Async::NotReady), 139 | Err(Canceled) => bail!(ErrorKind::Canceled("produce record")), 140 | } 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /src/producer/config.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Deref, DerefMut}; 2 | use std::time::Duration; 3 | 4 | use client::ClientConfig; 5 | use compression::Compression; 6 | use protocol::RequiredAcks; 7 | 8 | /// The default amount of time the server will wait for acknowledgments 9 | /// 10 | /// Defaults to 30 seconds, see 11 | /// [`ProducerConfig::ack_timeout`](struct.ProducerConfig.html#ack_timeout.v) 12 | pub const DEFAULT_ACK_TIMEOUT_MILLIS: u64 = 30_000; 13 | 14 | /// The default bytes that producer will attempt to batch records together into fewer requests 15 | /// 16 | /// Defaults to 16 KB, see [`ProducerConfig::batch_size`](struct.ProducerConfig.html#batch_size.v) 17 | pub const DEFAULT_BATCH_SIZE: usize = 16 * 1024; 18 | 19 | /// The default maximum size of a request in bytes. 20 | /// 21 | /// Defaults to 1 MB, see 22 | /// [`ProducerConfig::max_request_size`](struct.ProducerConfig.html#max_request_size.v) 23 | pub const DEFAULT_MAX_REQUEST_SIZE: usize = 1024 * 1024; 24 | 25 | /// The default millionseconds that producer groups together any records 26 | /// that arrive in between request transmissions into a single batched request. 27 | /// 28 | /// Defaults to 0 ms, see [`ProducerConfig::linger`](struct.ProducerConfig.html#linger.v) 29 | pub const DEFAULT_LINGER_MILLIS: u64 = 0; 30 | 31 | /// Configuration for the `KafkaProducer`. 32 | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] 33 | #[serde(default)] 34 | pub struct ProducerConfig { 35 | pub client: ClientConfig, 36 | 37 | /// The number of acknowledgments the producer requires the leader 38 | /// to have received before considering a request complete. 39 | pub acks: RequiredAcks, 40 | 41 | /// The maximum amount of time the server will wait for acknowledgments 42 | /// from followers to meet the acknowledgment requirements 43 | #[serde(rename = "timeout.ms")] 44 | pub ack_timeout: u64, 45 | 46 | /// The compression type for all data generated by the producer. 47 | #[serde(rename = "compression.type")] 48 | pub compression: Compression, 49 | 50 | /// The producer will attempt to batch records together into fewer requests 51 | /// whenever multiple records are being sent to the same partition. 52 | #[serde(rename = "batch.size")] 53 | pub batch_size: usize, 54 | 55 | /// The maximum size of a request in bytes. 56 | #[serde(rename = "max.request.size")] 57 | pub max_request_size: usize, 58 | 59 | /// The producer groups together any records 60 | /// that arrive in between request transmissions into a single batched request. 61 | #[serde(rename = "linger.ms")] 62 | pub linger: u64, 63 | } 64 | 65 | impl Deref for ProducerConfig { 66 | type Target = ClientConfig; 67 | 68 | fn deref(&self) -> &Self::Target { 69 | &self.client 70 | } 71 | } 72 | 73 | impl DerefMut for ProducerConfig { 74 | fn deref_mut(&mut self) -> &mut Self::Target { 75 | &mut self.client 76 | } 77 | } 78 | 79 | impl Default for ProducerConfig { 80 | fn default() -> Self { 81 | ProducerConfig { 82 | client: ClientConfig::default(), 83 | acks: RequiredAcks::default(), 84 | ack_timeout: DEFAULT_ACK_TIMEOUT_MILLIS, 85 | compression: Compression::default(), 86 | batch_size: DEFAULT_BATCH_SIZE, 87 | max_request_size: DEFAULT_MAX_REQUEST_SIZE, 88 | linger: DEFAULT_LINGER_MILLIS, 89 | } 90 | } 91 | } 92 | 93 | impl ProducerConfig { 94 | /// Construct a `ProducerConfig` from bootstrap servers of the Kafka cluster 95 | pub fn with_bootstrap_servers(hosts: I) -> Self 96 | where 97 | I: IntoIterator, 98 | { 99 | ProducerConfig { 100 | client: ClientConfig::with_bootstrap_servers(hosts), 101 | ..Default::default() 102 | } 103 | } 104 | 105 | /// The producer groups together any records 106 | /// that arrive in between request transmissions into a single batched request. 107 | pub fn linger(&self) -> Duration { 108 | Duration::from_millis(self.linger) 109 | } 110 | 111 | /// The maximum amount of time the server will wait for acknowledgments 112 | /// from followers to meet the acknowledgment requirements 113 | pub fn ack_timeout(&self) -> Duration { 114 | Duration::from_millis(self.ack_timeout) 115 | } 116 | } 117 | 118 | #[cfg(test)] 119 | mod tests { 120 | extern crate serde_json; 121 | 122 | use super::*; 123 | 124 | #[test] 125 | fn test_properties() { 126 | let config = ProducerConfig::default(); 127 | 128 | assert_eq!(config.linger(), Duration::from_millis(DEFAULT_LINGER_MILLIS)); 129 | assert_eq!(config.ack_timeout(), Duration::from_millis(DEFAULT_ACK_TIMEOUT_MILLIS)); 130 | } 131 | 132 | #[test] 133 | fn test_serialize() { 134 | let config = ProducerConfig::default(); 135 | let json = r#"{ 136 | "client": { 137 | "bootstrap.servers": [], 138 | "client.id": null, 139 | "connection.max.idle.ms": 5000, 140 | "request.timeout.ms": 30000, 141 | "api.version.request": false, 142 | "broker.version.fallback": "0.9.0", 143 | "metadata.max.age.ms": 300000, 144 | "metrics": false, 145 | "retries": 0, 146 | "retry.backoff.ms": 100 147 | }, 148 | "acks": "one", 149 | "timeout.ms": 30000, 150 | "compression.type": "none", 151 | "batch.size": 16384, 152 | "max.request.size": 1048576, 153 | "linger.ms": 0 154 | }"#; 155 | 156 | assert_eq!(serde_json::to_string_pretty(&config).unwrap(), json); 157 | assert_eq!(serde_json::from_str::(json).unwrap(), config); 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /src/producer/interceptor.rs: -------------------------------------------------------------------------------- 1 | use std::cell::RefCell; 2 | use std::hash::Hash; 3 | use std::ops::{Deref, DerefMut}; 4 | use std::rc::Rc; 5 | 6 | use errors::Result; 7 | 8 | use producer::{ProducerRecord, RecordMetadata}; 9 | 10 | pub type Interceptors = Option>>>; 11 | 12 | /// A trait for intercepting (and possibly mutate) the records 13 | /// received by the producer before they are published to the Kafka cluster. 14 | pub trait ProducerInterceptor { 15 | /// The type of key 16 | type Key: Hash; 17 | /// The type of value 18 | type Value; 19 | 20 | /// This is called from [`KafkaProducer::send`](struct.KafkaProducer.html#send.v) method, 21 | /// before key and value get serialized and partition is assigned 22 | /// (if partition is not specified in ProducerRecord). 23 | fn send(&self, record: ProducerRecord) -> Result>; 24 | 25 | /// This method is called when the record sent to the server has been acknowledged, 26 | /// or when sending the record fails before it gets sent to the server. 27 | fn ack(&self, result: &Result); 28 | } 29 | 30 | pub struct ProducerInterceptors { 31 | interceptors: Vec>>, 32 | } 33 | 34 | impl Deref for ProducerInterceptors { 35 | type Target = Vec>>; 36 | 37 | fn deref(&self) -> &Self::Target { 38 | &self.interceptors 39 | } 40 | } 41 | 42 | impl DerefMut for ProducerInterceptors { 43 | fn deref_mut(&mut self) -> &mut Self::Target { 44 | &mut self.interceptors 45 | } 46 | } 47 | 48 | impl Default for ProducerInterceptors { 49 | fn default() -> Self { 50 | ProducerInterceptors { 51 | interceptors: Vec::new(), 52 | } 53 | } 54 | } 55 | 56 | impl ProducerInterceptors { 57 | pub fn new() -> Self { 58 | ProducerInterceptors::default() 59 | } 60 | } 61 | 62 | impl ProducerInterceptor for ProducerInterceptors 63 | where 64 | K: Hash, 65 | { 66 | type Key = K; 67 | type Value = V; 68 | 69 | fn send(&self, mut record: ProducerRecord) -> Result> { 70 | for interceptor in &self.interceptors { 71 | record = interceptor.send(record)?; 72 | } 73 | 74 | Ok(record) 75 | } 76 | 77 | fn ack(&self, result: &Result) { 78 | for interceptor in &self.interceptors { 79 | interceptor.ack(result); 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/producer/mod.rs: -------------------------------------------------------------------------------- 1 | mod accumulator; 2 | mod batch; 3 | mod builder; 4 | mod config; 5 | mod interceptor; 6 | mod partitioner; 7 | mod producer; 8 | mod record; 9 | mod sender; 10 | 11 | pub use self::accumulator::{Accumulator, PushRecord, RecordAccumulator}; 12 | pub use self::batch::{ProducerBatch, Thunk}; 13 | pub use self::builder::ProducerBuilder; 14 | pub use self::config::{ProducerConfig, DEFAULT_ACK_TIMEOUT_MILLIS, DEFAULT_BATCH_SIZE, DEFAULT_LINGER_MILLIS, 15 | DEFAULT_MAX_REQUEST_SIZE}; 16 | pub use self::interceptor::{Interceptors, ProducerInterceptor, ProducerInterceptors}; 17 | pub use self::partitioner::{DefaultPartitioner, Partitioner}; 18 | pub use self::producer::{Flush, GetTopic, KafkaProducer, Producer, ProducerPartition, ProducerTopic, SendRecord}; 19 | pub use self::record::{ProducerRecord, RecordMetadata}; 20 | pub use self::sender::{SendBatch, Sender}; 21 | -------------------------------------------------------------------------------- /src/producer/partitioner.rs: -------------------------------------------------------------------------------- 1 | use std::hash::{BuildHasher, BuildHasherDefault, Hash, Hasher}; 2 | use std::sync::atomic::{AtomicUsize, Ordering}; 3 | 4 | use twox_hash::XxHash; 5 | 6 | use client::{Cluster, Metadata}; 7 | use protocol::PartitionId; 8 | 9 | /// A trait for choosing a partition for a message to be sent to Kafka. 10 | pub trait Partitioner { 11 | /// Compute the partition for the given record. 12 | fn partition( 13 | &self, 14 | topic_name: &str, 15 | partition_id: Option, 16 | key: Option<&K>, 17 | value: Option<&V>, 18 | metadata: &Metadata, 19 | ) -> Option; 20 | } 21 | 22 | pub type DefaultHasher = XxHash; 23 | 24 | /// The default partitioning strategy 25 | /// 26 | /// - If a partition is specified in the record, use it 27 | /// - If no partition is specified but a key is present choose a partition based on a hash of the 28 | /// key 29 | /// - If no partition or key is present choose a partition in a round-robin fashion 30 | #[derive(Default)] 31 | pub struct DefaultPartitioner> { 32 | hash_builder: H, 33 | records: AtomicUsize, 34 | } 35 | 36 | impl DefaultPartitioner { 37 | /// Create a `DefaultPartitioner` with the default hasher. 38 | pub fn new() -> DefaultPartitioner> { 39 | Default::default() 40 | } 41 | 42 | /// Create a `DefaultPartitioner` with the special hasher. 43 | pub fn with_hasher(hash_builder: B) -> DefaultPartitioner { 44 | DefaultPartitioner { 45 | hash_builder, 46 | records: AtomicUsize::new(0), 47 | } 48 | } 49 | 50 | fn records(&self) -> usize { 51 | self.records.load(Ordering::Relaxed) 52 | } 53 | } 54 | 55 | impl Partitioner for DefaultPartitioner 56 | where 57 | H: BuildHasher, 58 | { 59 | fn partition( 60 | &self, 61 | topic_name: &str, 62 | partition_id: Option, 63 | key: Option<&K>, 64 | _value: Option<&V>, 65 | metadata: &Metadata, 66 | ) -> Option { 67 | if let Some(partition_id) = partition_id { 68 | if partition_id >= 0 { 69 | // If a partition is specified in the record, use it 70 | return Some(partition_id); 71 | } 72 | } 73 | 74 | // TODO: use available partitions for topic in cluster 75 | if let Some(partitions) = metadata.partitions_for_topic(topic_name) { 76 | let index = if let Some(key) = key { 77 | // If no partition is specified but a key is present choose a partition based on a 78 | // hash of the key 79 | let mut hasher = self.hash_builder.build_hasher(); 80 | key.hash(&mut hasher); 81 | hasher.finish() as usize 82 | } else { 83 | // If no partition or key is present choose a partition in a round-robin fashion 84 | self.records.fetch_add(1, Ordering::Relaxed) 85 | } % partitions.len(); 86 | 87 | trace!( 88 | "partition record to #{} base on {}", 89 | index, 90 | key.map_or("round-robin", |_| "hash-key") 91 | ); 92 | 93 | Some(partitions[index].partition_id) 94 | } else { 95 | warn!( 96 | "missed partitions info for topic `{}`, fallback to partition #0", 97 | topic_name 98 | ); 99 | 100 | None 101 | } 102 | } 103 | } 104 | 105 | #[cfg(test)] 106 | mod tests { 107 | use super::*; 108 | use client::PartitionInfo; 109 | 110 | #[test] 111 | fn test_skip_partitioning() { 112 | let metadata = Metadata::default(); 113 | let partitioner = DefaultPartitioner::new(); 114 | 115 | // partition without topics 116 | assert_eq!( 117 | partitioner.partition("topic", None, Some("key").as_ref(), Some("value").as_ref(), &metadata,), 118 | None 119 | ); 120 | } 121 | 122 | #[test] 123 | fn test_key_partitioning() { 124 | let partitions = (0..3) 125 | .map(|id| PartitionInfo { 126 | partition_id: id, 127 | ..Default::default() 128 | }) 129 | .collect(); 130 | let metadata = Metadata::with_topics(vec![("topic".to_owned(), partitions)]); 131 | 132 | let partitioner = DefaultPartitioner::new(); 133 | 134 | // partition with key 135 | assert!( 136 | partitioner 137 | .partition("topic", None, Some("key").as_ref(), Some("value").as_ref(), &metadata) 138 | .is_some() 139 | ); 140 | 141 | // partition without key 142 | for id in 0..100 { 143 | assert_eq!( 144 | partitioner.partition::<(), &str>("topic", None, None, Some("value").as_ref(), &metadata,), 145 | Some(id % 3) 146 | ); 147 | } 148 | 149 | assert_eq!(partitioner.records(), 100); 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/producer/record.rs: -------------------------------------------------------------------------------- 1 | use std::hash::Hash; 2 | 3 | use client::{PartitionRecord, TopicRecord}; 4 | use protocol::{Offset, PartitionId, Timestamp}; 5 | 6 | /// A key/value pair to be sent to Kafka. 7 | /// 8 | /// This consists of a topic name to which the record is being sent, 9 | /// an optional partition number, and an optional key and value. 10 | #[derive(Clone, Debug)] 11 | pub struct ProducerRecord 12 | where 13 | K: Hash, 14 | { 15 | /// The topic this record is being sent to 16 | pub topic_name: String, 17 | /// The partition to which the record will be sent (or `None` if no partition was specified) 18 | pub partition_id: Option, 19 | /// The key (or `None` if no key is specified) 20 | pub key: Option, 21 | /// The value 22 | pub value: Option, 23 | /// The timestamp 24 | pub timestamp: Option, 25 | } 26 | 27 | impl ProducerRecord 28 | where 29 | K: Hash, 30 | { 31 | /// Creates a record to be sent to a specified topic with no value 32 | pub fn from_key>(topic_name: S, key: K) -> Self { 33 | ProducerRecord { 34 | topic_name: topic_name.as_ref().to_owned(), 35 | partition_id: None, 36 | key: Some(key), 37 | value: None, 38 | timestamp: None, 39 | } 40 | } 41 | } 42 | 43 | impl ProducerRecord<(), V> { 44 | /// Creates a record to be sent to a specified topic with no key 45 | pub fn from_value>(topic_name: S, value: V) -> Self { 46 | ProducerRecord { 47 | topic_name: topic_name.as_ref().to_owned(), 48 | partition_id: None, 49 | key: None, 50 | value: Some(value), 51 | timestamp: None, 52 | } 53 | } 54 | } 55 | 56 | impl ProducerRecord 57 | where 58 | K: Hash, 59 | { 60 | /// Creates a record to be sent to a specified topic 61 | pub fn from_key_value>(topic_name: S, key: K, value: V) -> Self { 62 | ProducerRecord { 63 | topic_name: topic_name.as_ref().to_owned(), 64 | partition_id: None, 65 | key: Some(key), 66 | value: Some(value), 67 | timestamp: None, 68 | } 69 | } 70 | 71 | pub fn from_partition_record>( 72 | topic_name: S, 73 | partition_id: Option, 74 | record: PartitionRecord, 75 | ) -> Self { 76 | ProducerRecord { 77 | topic_name: topic_name.as_ref().to_owned(), 78 | partition_id, 79 | key: record.key, 80 | value: record.value, 81 | timestamp: record.timestamp, 82 | } 83 | } 84 | 85 | pub fn from_topic_record>(topic_name: S, record: TopicRecord) -> Self { 86 | ProducerRecord { 87 | topic_name: topic_name.as_ref().to_owned(), 88 | partition_id: record.partition_id, 89 | key: record.key, 90 | value: record.value, 91 | timestamp: record.timestamp, 92 | } 93 | } 94 | 95 | /// Creates a record with partition to be sent 96 | pub fn with_partition(mut self, partition_id: PartitionId) -> Self { 97 | self.partition_id = Some(partition_id); 98 | self 99 | } 100 | 101 | /// Creates a record with a specified timestamp to be sent 102 | pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { 103 | self.timestamp = Some(timestamp); 104 | self 105 | } 106 | } 107 | 108 | /// The metadata for a record that has been acknowledged by the server 109 | #[derive(Clone, Debug, Default)] 110 | pub struct RecordMetadata { 111 | /// The topic the record was appended to 112 | pub topic_name: String, 113 | /// The partition the record was sent to 114 | pub partition_id: PartitionId, 115 | /// The offset of the record in the topic/partition. 116 | pub offset: Offset, 117 | /// The timestamp of the record in the topic/partition. 118 | pub timestamp: Timestamp, 119 | /// The size of the serialized, uncompressed key in bytes. 120 | pub serialized_key_size: usize, 121 | /// The size of the serialized, uncompressed value in bytes. 122 | pub serialized_value_size: usize, 123 | } 124 | -------------------------------------------------------------------------------- /src/producer/sender.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::{Borrow, Cow}; 2 | use std::cell::RefCell; 3 | use std::hash::Hash; 4 | use std::rc::Rc; 5 | use std::time::Duration; 6 | 7 | use futures::Future; 8 | 9 | use client::{Client, KafkaClient, StaticBoxFuture, ToStaticBoxFuture}; 10 | use errors::Result; 11 | use network::TopicPartition; 12 | use producer::{Interceptors, ProducerBatch, Thunk}; 13 | use protocol::{MessageSet, RequiredAcks}; 14 | 15 | pub struct Sender<'a, K, V> { 16 | client: KafkaClient<'a>, 17 | interceptors: Interceptors, 18 | acks: RequiredAcks, 19 | ack_timeout: Duration, 20 | tp: TopicPartition<'a>, 21 | thunks: Rc>>>, 22 | message_set: MessageSet, 23 | } 24 | 25 | pub type SendBatch = StaticBoxFuture; 26 | 27 | impl<'a, K, V> Sender<'a, K, V> 28 | where 29 | K: Hash, 30 | Self: 'static, 31 | { 32 | pub fn new( 33 | client: KafkaClient<'a>, 34 | interceptors: Interceptors, 35 | acks: RequiredAcks, 36 | ack_timeout: Duration, 37 | tp: TopicPartition<'a>, 38 | batch: ProducerBatch, 39 | ) -> Result> { 40 | let (thunks, message_set) = batch.build()?; 41 | 42 | Ok(Sender { 43 | client, 44 | interceptors, 45 | acks, 46 | ack_timeout, 47 | tp, 48 | thunks: Rc::new(RefCell::new(Some(thunks))), 49 | message_set, 50 | }) 51 | } 52 | 53 | pub fn send_batch(&self) -> SendBatch { 54 | trace!("sending batch to {:?}: {:?}", self.tp, self.message_set); 55 | 56 | let topic_name: String = String::from(self.tp.topic_name.borrow()); 57 | let partition_id = self.tp.partition_id; 58 | let acks = self.acks; 59 | let ack_timeout = self.ack_timeout; 60 | let message_set = Cow::Owned(self.message_set.clone()); 61 | let thunks = self.thunks.clone(); 62 | let thunks1 = self.thunks.clone(); 63 | let interceptors = self.interceptors.clone(); 64 | 65 | self.client 66 | .produce_records( 67 | acks, 68 | ack_timeout, 69 | topic_partition!(topic_name.clone(), partition_id), 70 | vec![message_set], 71 | ) 72 | .map(move |responses| { 73 | responses.get(&topic_name).map(|partitions| { 74 | partitions 75 | .iter() 76 | .find(|partition| partition.partition_id == partition_id) 77 | .map(|partition| { 78 | if let Some(thunks) = (*thunks).borrow_mut().take() { 79 | for thunk in thunks { 80 | match thunk.done( 81 | interceptors.clone(), 82 | &topic_name, 83 | partition.partition_id, 84 | partition.base_offset, 85 | partition.error_code, 86 | ) { 87 | Ok(()) => {} 88 | Err(metadata) => warn!("fail to send record metadata, {:?}", metadata), 89 | } 90 | } 91 | } 92 | }); 93 | }); 94 | }) 95 | .map_err(move |err| { 96 | if let Some(thunks) = (*thunks1).borrow_mut().take() { 97 | for thunk in thunks { 98 | if let Err(err) = thunk.fail(format!("{}", err).into()) { 99 | warn!("fail to send error to thunk, {:?}", err); 100 | } 101 | } 102 | } 103 | err 104 | }) 105 | .static_boxed() 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/protocol/api_key.rs: -------------------------------------------------------------------------------- 1 | use std::mem; 2 | 3 | /// This is a numeric id for the API being invoked (i.e. is it a metadata request, a produce 4 | /// request, a fetch request, etc). 5 | /// 6 | /// See [`ApiKeys`](enum.ApiKeys.html) 7 | pub type ApiKey = i16; 8 | 9 | /// Identifiers for all the Kafka APIs 10 | /// 11 | /// The following are the numeric codes that the `ApiKey` in the request can take for each of the 12 | /// below request types. 13 | /// 14 | /// See [`ApiKey`](type.ApiKey.html) 15 | #[derive(Debug, Copy, Clone, PartialEq)] 16 | #[repr(i16)] 17 | pub enum ApiKeys { 18 | Produce, 19 | Fetch, 20 | ListOffsets, 21 | Metadata, 22 | LeaderAndIsr, 23 | StopReplica, 24 | UpdateMetadata, 25 | ControlledShutdown, 26 | OffsetCommit, 27 | OffsetFetch, 28 | GroupCoordinator, // ConsumerMetadata, 29 | JoinGroup, 30 | Heartbeat, 31 | LeaveGroup, 32 | SyncGroup, 33 | DescribeGroups, 34 | ListGroups, 35 | SaslHandshake, 36 | ApiVersions, 37 | CreateTopics, 38 | DeleteTopics, 39 | DeleteRecords, 40 | InitProducerId, 41 | OffsetForLeaderEpoch, 42 | AddPartitionsToTxn, 43 | AddOffsetsToTxn, 44 | EndTxn, 45 | WriteTxnMarkers, 46 | TxnOffsetCommit, 47 | DescribeAcls, 48 | CreateAcls, 49 | DeleteAcls, 50 | DescribeConfigs, 51 | AlterConfigs, 52 | AlterReplicaLogDirs, 53 | DescribeLogDirs, 54 | SaslAuthenticate, 55 | CreatePartitions, 56 | CreateDelegationToken, 57 | RenewDelegationToken, 58 | ExpireDelegationToken, 59 | DescribeDelegationToken, 60 | DeleteGroups, 61 | } 62 | 63 | impl ApiKeys { 64 | /// Gets the key value. 65 | pub fn key(&self) -> ApiKey { 66 | unsafe { mem::transmute(*self) } 67 | } 68 | 69 | /// Gets the name. 70 | pub fn name(&self) -> &'static str { 71 | match *self { 72 | ApiKeys::Produce => "Produce", 73 | ApiKeys::Fetch => "Fetch", 74 | ApiKeys::ListOffsets => "ListOffsets", 75 | ApiKeys::Metadata => "Metadata", 76 | ApiKeys::LeaderAndIsr => "LeaderAndIsr", 77 | ApiKeys::StopReplica => "StopReplica", 78 | ApiKeys::UpdateMetadata => "UpdateMetadata", 79 | ApiKeys::ControlledShutdown => "ControlledShutdown", 80 | ApiKeys::OffsetCommit => "OffsetCommit", 81 | ApiKeys::OffsetFetch => "OffsetFetch", 82 | ApiKeys::GroupCoordinator => "GroupCoordinator", 83 | ApiKeys::JoinGroup => "JoinGroup", 84 | ApiKeys::Heartbeat => "Heartbeat", 85 | ApiKeys::LeaveGroup => "LeaveGroup", 86 | ApiKeys::SyncGroup => "SyncGroup", 87 | ApiKeys::DescribeGroups => "DescribeGroups", 88 | ApiKeys::ListGroups => "ListGroups", 89 | ApiKeys::SaslHandshake => "SaslHandshake", 90 | ApiKeys::ApiVersions => "ApiVersions", 91 | ApiKeys::CreateTopics => "CreateTopics", 92 | ApiKeys::DeleteTopics => "DeleteTopics", 93 | ApiKeys::DeleteRecords => "DeleteRecords", 94 | ApiKeys::InitProducerId => "InitProducerId", 95 | ApiKeys::OffsetForLeaderEpoch => "OffsetForLeaderEpoch", 96 | ApiKeys::AddPartitionsToTxn => "AddPartitionsToTxn", 97 | ApiKeys::AddOffsetsToTxn => "AddOffsetsToTxn", 98 | ApiKeys::EndTxn => "EndTxn", 99 | ApiKeys::WriteTxnMarkers => "WriteTxnMarkers", 100 | ApiKeys::TxnOffsetCommit => "TxnOffsetCommit", 101 | ApiKeys::DescribeAcls => "DescribeAcls", 102 | ApiKeys::CreateAcls => "CreateAcls", 103 | ApiKeys::DeleteAcls => "DeleteAcls", 104 | ApiKeys::DescribeConfigs => "DescribeConfigs", 105 | ApiKeys::AlterConfigs => "AlterConfigs", 106 | ApiKeys::AlterReplicaLogDirs => "AlterReplicaLogDirs", 107 | ApiKeys::DescribeLogDirs => "DescribeLogDirs", 108 | ApiKeys::SaslAuthenticate => "SaslAuthenticate", 109 | ApiKeys::CreatePartitions => "CreatePartitions", 110 | ApiKeys::CreateDelegationToken => "CreateDelegationToken", 111 | ApiKeys::RenewDelegationToken => "RenewDelegationToken", 112 | ApiKeys::ExpireDelegationToken => "ExpireDelegationToken", 113 | ApiKeys::DescribeDelegationToken => "DescribeDelegationToken", 114 | ApiKeys::DeleteGroups => "DeleteGroups", 115 | } 116 | } 117 | } 118 | 119 | impl From for ApiKeys { 120 | fn from(v: ApiKey) -> Self { 121 | unsafe { mem::transmute(v) } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /src/protocol/encode.rs: -------------------------------------------------------------------------------- 1 | use std::i16; 2 | use std::i32; 3 | use std::str; 4 | 5 | use bytes::{BufMut, ByteOrder, BytesMut}; 6 | 7 | use errors::{ErrorKind, Result}; 8 | 9 | pub const STR_LEN_SIZE: usize = 2; 10 | pub const BYTES_LEN_SIZE: usize = 4; 11 | pub const ARRAY_LEN_SIZE: usize = 4; 12 | pub const REPLICA_ID_SIZE: usize = 4; 13 | pub const PARTITION_ID_SIZE: usize = 4; 14 | pub const TIMESTAMP_SIZE: usize = 8; 15 | pub const OFFSET_SIZE: usize = 8; 16 | 17 | pub trait Encodable { 18 | fn encode(&self, buf: &mut BytesMut) -> Result<()>; 19 | } 20 | 21 | pub trait WriteExt: BufMut + Sized { 22 | fn put_str>(&mut self, s: Option) -> Result<()> { 23 | match s.as_ref() { 24 | Some(v) if v.as_ref().len() > i16::MAX as usize => { 25 | bail!(ErrorKind::EncodeError("string exceeds the maximum size.")) 26 | } 27 | Some(v) => { 28 | self.put_i16::(v.as_ref().len() as i16); 29 | 30 | if !v.as_ref().is_empty() { 31 | self.put_slice(v.as_ref().as_bytes()); 32 | } 33 | } 34 | _ => { 35 | self.put_i16::(-1); 36 | } 37 | } 38 | 39 | Ok(()) 40 | } 41 | 42 | fn put_bytes>(&mut self, d: Option) -> Result<()> { 43 | match d.as_ref() { 44 | Some(v) if v.as_ref().len() > i32::MAX as usize => { 45 | bail!(ErrorKind::EncodeError("bytes exceeds the maximum size.")) 46 | } 47 | Some(v) => { 48 | self.put_i32::(v.as_ref().len() as i32); 49 | 50 | if !v.as_ref().is_empty() { 51 | self.put_slice(v.as_ref()); 52 | } 53 | } 54 | _ => { 55 | self.put_i32::(-1); 56 | } 57 | } 58 | 59 | Ok(()) 60 | } 61 | 62 | fn put_array(&mut self, items: &[E], mut callback: F) -> Result<()> 63 | where 64 | T: ByteOrder, 65 | F: FnMut(&mut Self, &E) -> Result<()>, 66 | { 67 | if items.len() > i32::MAX as usize { 68 | bail!(ErrorKind::EncodeError("array exceeds the maximum size.")) 69 | } 70 | 71 | self.put_i32::(items.len() as i32); 72 | 73 | for item in items { 74 | callback(self, item)?; 75 | } 76 | 77 | Ok(()) 78 | } 79 | } 80 | 81 | impl WriteExt for T {} 82 | 83 | #[cfg(test)] 84 | mod tests { 85 | use std::iter::repeat; 86 | use std::slice; 87 | 88 | use bytes::BigEndian; 89 | 90 | use super::*; 91 | 92 | #[test] 93 | fn nullable_str() { 94 | let mut buf = vec![]; 95 | 96 | // write empty nullable string 97 | buf.put_str::(Some("")).unwrap(); 98 | 99 | assert_eq!(buf.as_slice(), &[0, 0]); 100 | 101 | buf.clear(); 102 | 103 | // write null of nullable string 104 | buf.put_str::(None).unwrap(); 105 | 106 | assert_eq!(buf.as_slice(), &[255, 255]); 107 | 108 | buf.clear(); 109 | 110 | // write nullable string 111 | buf.put_str::(Some("test")).unwrap(); 112 | 113 | assert_eq!(buf.as_slice(), &[0, 4, 116, 101, 115, 116]); 114 | 115 | buf.clear(); 116 | 117 | // write encoded nullable string 118 | buf.put_str::(Some("测试")).unwrap(); 119 | 120 | assert_eq!(buf.as_slice(), &[0, 6, 230, 181, 139, 232, 175, 149]); 121 | 122 | buf.clear(); 123 | 124 | // write too long nullable string 125 | let s = repeat(20).take(i16::MAX as usize + 1).collect::>(); 126 | 127 | assert!( 128 | buf.put_str::(Some(String::from_utf8(s).unwrap())) 129 | .err() 130 | .is_some() 131 | ); 132 | } 133 | 134 | #[test] 135 | fn nullable_bytes() { 136 | let mut buf = vec![]; 137 | 138 | // write empty nullable bytes 139 | buf.put_bytes::(Some(&b""[..])).unwrap(); 140 | 141 | assert_eq!(buf.as_slice(), &[0, 0, 0, 0]); 142 | 143 | buf.clear(); 144 | 145 | // write null of nullable bytes 146 | buf.put_bytes::(None).unwrap(); 147 | 148 | assert_eq!(buf.as_slice(), &[255, 255, 255, 255]); 149 | 150 | buf.clear(); 151 | 152 | // write nullable bytes 153 | buf.put_bytes::(Some(&b"test"[..])).unwrap(); 154 | 155 | assert_eq!(buf.as_slice(), &[0, 0, 0, 4, 116, 101, 115, 116]); 156 | 157 | buf.clear(); 158 | 159 | // write too long nullable bytes 160 | let s = unsafe { slice::from_raw_parts(buf.as_ptr(), i32::MAX as usize + 1) }; 161 | 162 | assert!(buf.put_bytes::(Some(s)).err().is_some()); 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /src/protocol/header.rs: -------------------------------------------------------------------------------- 1 | use bytes::{BufMut, ByteOrder, BytesMut}; 2 | use std::borrow::Cow; 3 | 4 | use nom::be_i32; 5 | 6 | use errors::Result; 7 | use protocol::{ApiKey, ApiVersion, CorrelationId, Encodable, ParseTag, Record, WriteExt, STR_LEN_SIZE}; 8 | 9 | const API_KEY_SIZE: usize = 2; 10 | const API_VERSION_SIZE: usize = 2; 11 | const CORRELATION_ID_SIZE: usize = 4; 12 | const HEADER_OVERHEAD: usize = API_KEY_SIZE + API_VERSION_SIZE + CORRELATION_ID_SIZE; 13 | 14 | #[derive(Clone, Debug, PartialEq)] 15 | pub struct RequestHeader<'a> { 16 | pub api_key: ApiKey, 17 | pub api_version: ApiVersion, 18 | pub correlation_id: CorrelationId, 19 | pub client_id: Option>, 20 | } 21 | 22 | impl<'a> Record for RequestHeader<'a> { 23 | fn size(&self, _api_version: ApiVersion) -> usize { 24 | HEADER_OVERHEAD + STR_LEN_SIZE + self.client_id.as_ref().map_or(0, |s| s.len()) 25 | } 26 | } 27 | 28 | impl<'a> Encodable for RequestHeader<'a> { 29 | fn encode(&self, buf: &mut BytesMut) -> Result<()> { 30 | buf.put_i16::(self.api_key); 31 | buf.put_i16::(self.api_version); 32 | buf.put_i32::(self.correlation_id); 33 | buf.put_str::(self.client_id.as_ref()) 34 | } 35 | } 36 | 37 | #[derive(Clone, Debug, PartialEq)] 38 | pub struct ResponseHeader { 39 | pub correlation_id: CorrelationId, 40 | } 41 | 42 | named!(pub parse_response_header, 43 | parse_tag!(ParseTag::ResponseHeader, do_parse!( 44 | correlation_id: be_i32 45 | >> (ResponseHeader { 46 | correlation_id, 47 | }) 48 | )) 49 | ); 50 | 51 | #[cfg(test)] 52 | mod tests { 53 | 54 | use super::*; 55 | use bytes::BigEndian; 56 | use protocol::*; 57 | 58 | #[test] 59 | fn test_request_header() { 60 | let hdr = RequestHeader { 61 | api_key: ApiKeys::Fetch as ApiKey, 62 | api_version: 2, 63 | correlation_id: 123, 64 | client_id: Some("test".into()), 65 | }; 66 | 67 | let mut buf = BytesMut::with_capacity(64); 68 | 69 | hdr.encode::(&mut buf).unwrap(); 70 | 71 | assert_eq!(hdr.size(hdr.api_version), buf.len()); 72 | 73 | assert_eq!( 74 | &buf[..], 75 | &[ 76 | 0, 1 /* api_key */, 0, 2 /* api_version */, 0, 0, 0, 123 /* correlation_id */, 0, 4, 77 | 116, 101, 115, 116, 78 | ] 79 | ); 80 | 81 | let bytes = &[0, 0, 0, 123]; 82 | let res = parse_response_header(bytes); 83 | 84 | assert!(res.is_done()); 85 | 86 | let (remaning, hdr) = res.unwrap(); 87 | 88 | assert_eq!(remaning, b""); 89 | assert_eq!(hdr.correlation_id, 123); 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/protocol/metadata.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | 3 | use bytes::{ByteOrder, BytesMut}; 4 | 5 | use nom::{IResult, be_i16, be_i32}; 6 | 7 | use errors::Result; 8 | use protocol::{parse_response_header, parse_string, ApiVersion, Encodable, ErrorCode, NodeId, ParseTag, PartitionId, 9 | Record, RequestHeader, ResponseHeader, WriteExt, ARRAY_LEN_SIZE, STR_LEN_SIZE}; 10 | 11 | #[derive(Clone, Debug, PartialEq)] 12 | pub struct MetadataRequest<'a> { 13 | pub header: RequestHeader<'a>, 14 | pub topic_names: Vec>, 15 | } 16 | 17 | impl<'a> Record for MetadataRequest<'a> { 18 | fn size(&self, api_version: ApiVersion) -> usize { 19 | self.header.size(api_version) + self.topic_names.iter().fold(ARRAY_LEN_SIZE, |size, topic_name| { 20 | size + STR_LEN_SIZE + topic_name.len() 21 | }) 22 | } 23 | } 24 | 25 | impl<'a> Encodable for MetadataRequest<'a> { 26 | fn encode(&self, dst: &mut BytesMut) -> Result<()> { 27 | self.header.encode::(dst)?; 28 | 29 | dst.put_array::(&self.topic_names, |buf, topic_name| { 30 | buf.put_str::(Some(topic_name.as_ref())) 31 | })?; 32 | 33 | Ok(()) 34 | } 35 | } 36 | 37 | #[derive(Clone, Debug, PartialEq)] 38 | pub struct MetadataResponse { 39 | pub header: ResponseHeader, 40 | pub brokers: Vec, 41 | pub topics: Vec, 42 | } 43 | 44 | #[derive(Clone, Debug, PartialEq)] 45 | pub struct BrokerMetadata { 46 | pub node_id: NodeId, 47 | pub host: String, 48 | pub port: i32, 49 | } 50 | 51 | #[derive(Clone, Debug, PartialEq)] 52 | pub struct TopicMetadata { 53 | pub error_code: ErrorCode, 54 | pub topic_name: String, 55 | pub partitions: Vec, 56 | } 57 | 58 | #[derive(Clone, Debug, PartialEq)] 59 | pub struct PartitionMetadata { 60 | pub error_code: ErrorCode, 61 | pub partition_id: PartitionId, 62 | pub leader: NodeId, 63 | pub replicas: Vec, 64 | pub isr: Vec, 65 | } 66 | 67 | impl MetadataResponse { 68 | pub fn parse(buf: &[u8]) -> IResult<&[u8], Self> { 69 | parse_metadata_response(buf) 70 | } 71 | } 72 | 73 | named!( 74 | parse_metadata_response, 75 | parse_tag!( 76 | ParseTag::MetadataResponse, 77 | do_parse!( 78 | header: parse_response_header >> brokers: length_count!(be_i32, parse_broker_metadata) 79 | >> topics: length_count!(be_i32, parse_topic_metadata) >> (MetadataResponse { 80 | header, 81 | brokers, 82 | topics, 83 | }) 84 | ) 85 | ) 86 | ); 87 | 88 | named!( 89 | parse_broker_metadata, 90 | parse_tag!( 91 | ParseTag::BrokerMetadata, 92 | do_parse!(node_id: be_i32 >> host: parse_string >> port: be_i32 >> (BrokerMetadata { node_id, host, port })) 93 | ) 94 | ); 95 | 96 | named!( 97 | parse_topic_metadata, 98 | parse_tag!( 99 | ParseTag::TopicMetadata, 100 | do_parse!( 101 | error_code: be_i16 >> topic_name: parse_string 102 | >> partitions: length_count!(be_i32, parse_partition_metadata) >> (TopicMetadata { 103 | error_code, 104 | topic_name, 105 | partitions, 106 | }) 107 | ) 108 | ) 109 | ); 110 | 111 | named!( 112 | parse_partition_metadata, 113 | parse_tag!( 114 | ParseTag::PartitionMetadata, 115 | do_parse!( 116 | error_code: be_i16 >> partition_id: be_i32 >> leader: be_i32 >> replicas: length_count!(be_i32, be_i32) 117 | >> isr: length_count!(be_i32, be_i32) >> (PartitionMetadata { 118 | error_code, 119 | partition_id, 120 | leader, 121 | replicas, 122 | isr, 123 | }) 124 | ) 125 | ) 126 | ); 127 | 128 | #[cfg(test)] 129 | mod tests { 130 | use bytes::{BigEndian, BytesMut}; 131 | 132 | use nom::IResult; 133 | 134 | use super::*; 135 | use protocol::*; 136 | 137 | lazy_static!{ 138 | static ref TEST_REQUEST_DATA: Vec = vec![ 139 | // ProduceRequest 140 | // RequestHeader 141 | 0, 3, // api_key 142 | 0, 0, // api_version 143 | 0, 0, 0, 123, // correlation_id 144 | 0, 6, 99, 108, 105, 101, 110, 116, // client_id 145 | // topic_names: [String] 146 | 0, 0, 0, 1, 147 | 0, 5, b't', b'o', b'p', b'i', b'c', // topic_name 148 | ]; 149 | 150 | static ref TEST_RESPONSE_DATA: Vec = vec![ 151 | // ResponseHeader 152 | 0, 0, 0, 123, // correlation_id 153 | // brokers: [BrokerMetadata] 154 | 0, 0, 0, 1, 155 | 0, 0, 0, 1, // node_id 156 | 0, 4, b'h', b'o', b's', b't', // host 157 | 0, 0, 0, 80, // port 158 | // topics: [TopicMetadata] 159 | 0, 0, 0, 1, 160 | 0, 2, // error_code 161 | 0, 5, b't', b'o', b'p', b'i', b'c', // topic_name 162 | // partitions: [PartitionMetadata] 163 | 0, 0, 0, 1, 164 | 0, 3, // error_code 165 | 0, 0, 0, 4, // partition_id 166 | 0, 0, 0, 5, // leader 167 | // replicas: [ReplicaId] 168 | 0, 0, 0, 1, 169 | 0, 0, 0, 6, 170 | // isr: [i32] 171 | 0, 0, 0, 1, 172 | 0, 0, 0, 7, 173 | ]; 174 | 175 | static ref TEST_RESPONSE: MetadataResponse = MetadataResponse { 176 | header: ResponseHeader { correlation_id: 123 }, 177 | brokers: vec![BrokerMetadata { 178 | node_id: 1, 179 | host: "host".to_owned(), 180 | port: 80, 181 | }], 182 | topics: vec![TopicMetadata { 183 | error_code: 2, 184 | topic_name: "topic".to_owned(), 185 | partitions: vec![PartitionMetadata { 186 | error_code: 3, 187 | partition_id: 4, 188 | leader: 5, 189 | replicas: vec![6], 190 | isr: vec![7], 191 | }], 192 | }], 193 | }; 194 | } 195 | 196 | #[test] 197 | fn test_encode_metadata_request() { 198 | let req = MetadataRequest { 199 | header: RequestHeader { 200 | api_key: ApiKeys::Metadata as ApiKey, 201 | api_version: 0, 202 | correlation_id: 123, 203 | client_id: Some("client".into()), 204 | }, 205 | topic_names: vec!["topic".into()], 206 | }; 207 | 208 | let mut buf = BytesMut::with_capacity(128); 209 | 210 | req.encode::(&mut buf).unwrap(); 211 | 212 | assert_eq!(req.size(req.header.api_version), buf.len()); 213 | 214 | assert_eq!(&buf[..], &TEST_REQUEST_DATA[..]); 215 | } 216 | 217 | #[test] 218 | fn test_parse_metadata_response() { 219 | assert_eq!( 220 | parse_metadata_response(TEST_RESPONSE_DATA.as_slice()), 221 | IResult::Done(&[][..], TEST_RESPONSE.clone()) 222 | ); 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /src/protocol/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_camel_case_types)] 2 | 3 | use std::mem; 4 | use std::str::FromStr; 5 | use std::time::Duration; 6 | 7 | use time::Timespec; 8 | 9 | use errors::{Error, ErrorKind, Result}; 10 | 11 | mod api_key; 12 | mod code; 13 | mod encode; 14 | #[macro_use] 15 | mod parse; 16 | mod api_versions; 17 | mod fetch; 18 | mod group; 19 | mod header; 20 | mod list_offset; 21 | mod message; 22 | mod metadata; 23 | mod offset_commit; 24 | mod offset_fetch; 25 | mod produce; 26 | mod schema; 27 | 28 | pub use self::api_key::{ApiKey, ApiKeys}; 29 | pub use self::api_versions::{ApiVersionsRequest, ApiVersionsResponse, UsableApiVersion, UsableApiVersions, SUPPORTED_API_VERSIONS}; 30 | pub use self::code::{ErrorCode, KafkaCode}; 31 | pub use self::encode::{Encodable, WriteExt, ARRAY_LEN_SIZE, BYTES_LEN_SIZE, OFFSET_SIZE, PARTITION_ID_SIZE, 32 | REPLICA_ID_SIZE, STR_LEN_SIZE, TIMESTAMP_SIZE}; 33 | pub use self::fetch::{FetchPartition, FetchRequest, FetchResponse, FetchTopic, FetchTopicData, 34 | DEFAULT_RESPONSE_MAX_BYTES}; 35 | pub use self::group::{DescribeGroupsRequest, DescribeGroupsResponse, GroupCoordinatorRequest, 36 | GroupCoordinatorResponse, HeartbeatRequest, HeartbeatResponse, JoinGroupMember, 37 | JoinGroupProtocol, JoinGroupRequest, JoinGroupResponse, LeaveGroupRequest, LeaveGroupResponse, 38 | ListGroupsRequest, ListGroupsResponse, SyncGroupAssignment, SyncGroupRequest, SyncGroupResponse}; 39 | pub use self::header::{parse_response_header, RequestHeader, ResponseHeader}; 40 | pub use self::list_offset::{FetchOffset, ListOffsetRequest, ListOffsetResponse, ListPartitionOffset, ListTopicOffset, 41 | EARLIEST_TIMESTAMP, LATEST_TIMESTAMP}; 42 | pub use self::message::{parse_message_set, Message, MessageSet, MessageSetBuilder, MessageSetEncoder, 43 | MessageTimestamp, RecordFormat}; 44 | pub use self::metadata::{BrokerMetadata, MetadataRequest, MetadataResponse, PartitionMetadata, TopicMetadata}; 45 | pub use self::offset_commit::{OffsetCommitPartition, OffsetCommitRequest, OffsetCommitResponse, OffsetCommitTopic}; 46 | pub use self::offset_fetch::{OffsetFetchPartition, OffsetFetchRequest, OffsetFetchResponse, OffsetFetchTopic}; 47 | pub use self::parse::{display_parse_error, parse_bytes, parse_opt_bytes, parse_opt_str, parse_opt_string, parse_str, 48 | parse_string, ParseTag, PARSE_TAGS}; 49 | pub use self::produce::{ProducePartitionData, ProduceRequest, ProduceResponse, ProduceTopicData}; 50 | pub use self::schema::{Nullable, Schema, SchemaType, VarInt, VarLong}; 51 | 52 | /// Normal client consumers should always specify this as -1 as they have no 53 | /// node id. 54 | pub const CONSUMER_REPLICA_ID: ReplicaId = -1; 55 | /// The value -2 is accepted to allow a non-broker to issue fetch requests as if it were a replica 56 | /// broker for debugging purposes. 57 | pub const DEBUGGING_REPLICA_ID: ReplicaId = -2; 58 | 59 | pub const DEFAULT_TIMESTAMP: Timestamp = -1; 60 | 61 | /// This is a numeric version number for this api. 62 | /// 63 | /// We version each API and this version number allows the server to properly interpret the request 64 | /// as the protocol evolves. Responses will always be in the format corresponding to the request 65 | /// version. 66 | pub type ApiVersion = i16; 67 | 68 | /// This is a user-supplied integer. 69 | /// 70 | /// It will be passed back in the response by the server, unmodified. 71 | /// It is useful for matching request and response between the client and server. 72 | pub type CorrelationId = i32; 73 | 74 | /// The partition id. 75 | pub type PartitionId = i32; 76 | 77 | /// This is the offset used in kafka as the log sequence number. 78 | pub type Offset = i64; 79 | 80 | /// This is the timestamp of the message. 81 | /// 82 | /// The timestamp type is indicated in the attributes. 83 | /// Unit is milliseconds since beginning of the epoch (midnight Jan 1, 1970 (UTC)). 84 | pub type Timestamp = i64; 85 | 86 | /// The broker id. 87 | pub type NodeId = i32; 88 | 89 | /// Broker id of the follower. 90 | pub type ReplicaId = i32; 91 | 92 | /// The number of acknowledgments the producer 93 | /// requires the leader to have received before considering a request complete. 94 | pub type RequiredAck = i16; 95 | 96 | /// The generation of the group. 97 | pub type GenerationId = i32; 98 | 99 | /// Possible choices on acknowledgement requirements when producing/sending 100 | /// messages to Kafka. 101 | #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] 102 | #[serde(rename_all = "lowercase")] 103 | #[repr(i16)] 104 | pub enum RequiredAcks { 105 | /// Indicates to the receiving Kafka broker not to acknowlegde 106 | /// messages sent to it at all. Sending messages with this 107 | /// acknowledgement requirement translates into a fire-and-forget 108 | /// scenario which - of course - is very fast but not reliable. 109 | None = 0, 110 | /// Requires the receiving Kafka broker to wait until the sent 111 | /// messages are written to local disk. Such messages can be 112 | /// regarded as acknowledged by one broker in the cluster. 113 | One = 1, 114 | /// Requires the sent messages to be acknowledged by all in-sync 115 | /// replicas of the targeted topic partitions. 116 | All = -1, 117 | } 118 | 119 | impl Default for RequiredAcks { 120 | fn default() -> Self { 121 | RequiredAcks::One 122 | } 123 | } 124 | 125 | impl From for RequiredAcks { 126 | fn from(v: RequiredAck) -> Self { 127 | unsafe { mem::transmute(v) } 128 | } 129 | } 130 | 131 | impl FromStr for RequiredAcks { 132 | type Err = Error; 133 | 134 | fn from_str(s: &str) -> Result { 135 | match s.to_lowercase().as_str() { 136 | "none" => Ok(RequiredAcks::None), 137 | "one" => Ok(RequiredAcks::One), 138 | "all" => Ok(RequiredAcks::All), 139 | _ => bail!(ErrorKind::ParseError(format!("unknown required acks: {}", s),)), 140 | } 141 | } 142 | } 143 | 144 | pub trait Record { 145 | fn size(&self, api_version: ApiVersion) -> usize; 146 | } 147 | 148 | /// A trait for converting a value to a milliseconds. 149 | pub trait ToMilliseconds { 150 | fn as_millis(&self) -> u64; 151 | } 152 | 153 | impl ToMilliseconds for Duration { 154 | fn as_millis(&self) -> u64 { 155 | self.as_secs() * 1000 + u64::from(self.subsec_nanos()) / 1_000_000 156 | } 157 | } 158 | 159 | impl ToMilliseconds for Timespec { 160 | fn as_millis(&self) -> u64 { 161 | self.sec as u64 * 1000 + self.nsec as u64 / 1_000_000 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /src/protocol/offset_fetch.rs: -------------------------------------------------------------------------------- 1 | use bytes::{BufMut, ByteOrder, BytesMut}; 2 | use std::borrow::Cow; 3 | 4 | use nom::{IResult, be_i16, be_i32, be_i64}; 5 | 6 | use errors::Result; 7 | use protocol::{parse_opt_string, parse_response_header, parse_string, ApiVersion, Encodable, ErrorCode, Offset, 8 | ParseTag, PartitionId, Record, RequestHeader, ResponseHeader, WriteExt, ARRAY_LEN_SIZE, 9 | PARTITION_ID_SIZE, STR_LEN_SIZE}; 10 | 11 | #[derive(Clone, Debug, PartialEq)] 12 | pub struct OffsetFetchRequest<'a> { 13 | pub header: RequestHeader<'a>, 14 | /// The group id. 15 | pub group_id: Cow<'a, str>, 16 | /// Topic to fetch. 17 | pub topics: Vec>, 18 | } 19 | 20 | #[derive(Clone, Debug, PartialEq)] 21 | pub struct OffsetFetchTopic<'a> { 22 | /// The name of the topic. 23 | pub topic_name: Cow<'a, str>, 24 | /// Partitions to fetch offset. 25 | pub partitions: Vec, 26 | } 27 | 28 | #[derive(Clone, Debug, PartialEq)] 29 | pub struct OffsetFetchPartition { 30 | /// The id of the partition the fetch is for. 31 | pub partition_id: PartitionId, 32 | } 33 | 34 | #[derive(Clone, Debug, PartialEq)] 35 | pub struct OffsetFetchResponse { 36 | pub header: ResponseHeader, 37 | /// Topics to fetch offsets. 38 | pub topics: Vec, 39 | } 40 | 41 | #[derive(Clone, Debug, PartialEq)] 42 | pub struct OffsetFetchTopicStatus { 43 | /// The name of the topic. 44 | pub topic_name: String, 45 | /// Partitions to fetch offset. 46 | pub partitions: Vec, 47 | } 48 | 49 | #[derive(Clone, Debug, PartialEq)] 50 | pub struct OffsetFetchPartitionStatus { 51 | /// The id of the partition the fetch is for. 52 | pub partition_id: PartitionId, 53 | /// Last committed message offset. 54 | pub offset: Offset, 55 | /// Any associated metadata the client wants to keep. 56 | pub metadata: Option, 57 | /// Error code. 58 | pub error_code: ErrorCode, 59 | } 60 | 61 | impl<'a> Record for OffsetFetchRequest<'a> { 62 | fn size(&self, api_version: ApiVersion) -> usize { 63 | self.header.size(api_version) + STR_LEN_SIZE + self.group_id.len() 64 | + self.topics.iter().fold(ARRAY_LEN_SIZE, |size, topic| { 65 | size + STR_LEN_SIZE + topic.topic_name.len() 66 | + topic 67 | .partitions 68 | .iter() 69 | .fold(ARRAY_LEN_SIZE, |size, _| size + PARTITION_ID_SIZE) 70 | }) 71 | } 72 | } 73 | 74 | impl<'a> Encodable for OffsetFetchRequest<'a> { 75 | fn encode(&self, dst: &mut BytesMut) -> Result<()> { 76 | self.header.encode::(dst)?; 77 | 78 | dst.put_str::(Some(self.group_id.as_ref()))?; 79 | dst.put_array::(&self.topics, |buf, topic| { 80 | buf.put_str::(Some(topic.topic_name.as_ref()))?; 81 | buf.put_array::(&topic.partitions, |buf, partition| { 82 | buf.put_i32::(partition.partition_id); 83 | Ok(()) 84 | }) 85 | }) 86 | } 87 | } 88 | 89 | impl OffsetFetchResponse { 90 | pub fn parse(buf: &[u8]) -> IResult<&[u8], Self> { 91 | parse_offset_fetch_response(buf) 92 | } 93 | } 94 | 95 | named!( 96 | parse_offset_fetch_response, 97 | parse_tag!( 98 | ParseTag::OffsetFetchResponse, 99 | do_parse!( 100 | header: parse_response_header >> topics: length_count!(be_i32, parse_offset_fetch_topic_status) 101 | >> (OffsetFetchResponse { header, topics }) 102 | ) 103 | ) 104 | ); 105 | 106 | named!( 107 | parse_offset_fetch_topic_status, 108 | parse_tag!( 109 | ParseTag::OffsetFetchTopicStatus, 110 | do_parse!( 111 | topic_name: parse_string >> partitions: length_count!(be_i32, parse_offset_fetch_partition_status) 112 | >> (OffsetFetchTopicStatus { topic_name, partitions }) 113 | ) 114 | ) 115 | ); 116 | 117 | named!( 118 | parse_offset_fetch_partition_status, 119 | parse_tag!( 120 | ParseTag::OffsetFetchPartitionStatus, 121 | do_parse!( 122 | partition_id: be_i32 >> offset: be_i64 >> metadata: parse_opt_string >> error_code: be_i16 123 | >> (OffsetFetchPartitionStatus { 124 | partition_id, 125 | offset, 126 | metadata, 127 | error_code, 128 | }) 129 | ) 130 | ) 131 | ); 132 | 133 | #[cfg(test)] 134 | mod tests { 135 | use super::*; 136 | use bytes::BigEndian; 137 | 138 | use nom::IResult; 139 | use protocol::*; 140 | 141 | #[test] 142 | fn test_encode_offset_fetch_request() { 143 | let req = OffsetFetchRequest { 144 | header: RequestHeader { 145 | api_key: ApiKeys::OffsetFetch as ApiKey, 146 | api_version: 0, 147 | correlation_id: 123, 148 | client_id: Some("client".into()), 149 | }, 150 | group_id: "consumer".into(), 151 | topics: vec![ 152 | OffsetFetchTopic { 153 | topic_name: "topic".into(), 154 | partitions: vec![OffsetFetchPartition { partition_id: 1 }], 155 | }, 156 | ], 157 | }; 158 | 159 | let data = vec![ 160 | /* OffsetFetchRequest 161 | * RequestHeader */ 0, 9 /* api_key */, 0, 162 | 0 /* api_version */, 0, 0, 0, 123 /* correlation_id */, 0, 6, b'c', b'l', b'i', b'e', b'n', 163 | b't' /* client_id */, 0, 8, b'c', b'o', b'n', b's', b'u', b'm', b'e', b'r' /* group_id */, 164 | /* topics: [OffsetFetchTopic] */ 0, 0, 0, 1, /* OffsetFetchTopic */ 0, 5, b't', b'o', b'p', b'i', 165 | b'c' /* topic_name */, /* partitions: [OffsetFetchPartition] */ 0, 0, 0, 1, 166 | /* OffsetFetchPartition */ 0, 0, 0, 1 /* partition */, 167 | ]; 168 | 169 | let mut buf = BytesMut::with_capacity(128); 170 | 171 | req.encode::(&mut buf).unwrap(); 172 | 173 | assert_eq!(req.size(req.header.api_version), buf.len()); 174 | 175 | assert_eq!(&buf[..], &data[..]); 176 | } 177 | 178 | #[test] 179 | fn test_parse_offset_fetch_response() { 180 | let response = OffsetFetchResponse { 181 | header: ResponseHeader { correlation_id: 123 }, 182 | topics: vec![ 183 | OffsetFetchTopicStatus { 184 | topic_name: "topic".to_owned(), 185 | partitions: vec![ 186 | OffsetFetchPartitionStatus { 187 | partition_id: 1, 188 | offset: 2, 189 | metadata: Some("metadata".to_owned()), 190 | error_code: 3, 191 | }, 192 | ], 193 | }, 194 | ], 195 | }; 196 | 197 | let data = vec![ 198 | /* ResponseHeader */ 0, 0, 0, 123 /* correlation_id */, 199 | /* topics: [OffsetCommitTopicStatus] */ 0, 0, 0, 1, 0, 5, b't', b'o', b'p', b'i', 200 | b'c' /* topic_name */, /* partitions: [OffsetCommitPartitionStatus] */ 0, 0, 0, 1, 0, 0, 0, 201 | 1 /* partition */, 0, 0, 0, 0, 0, 0, 0, 2 /* offset */, 0, 8, b'm', b'e', b't', b'a', b'd', b'a', 202 | b't', b'a' /* metadata */, 0, 3 /* error_code */, 203 | ]; 204 | 205 | let res = parse_offset_fetch_response(&data[..]); 206 | 207 | display_parse_error::<_>(&data[..], res.clone()); 208 | 209 | assert_eq!(res, IResult::Done(&[][..], response)); 210 | } 211 | } 212 | -------------------------------------------------------------------------------- /src/serialization/bytes.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use bytes::buf::FromBuf; 4 | use bytes::{Buf, BufMut, Bytes, IntoBuf}; 5 | 6 | use errors::{Error, ErrorKind, Result}; 7 | use serialization::{Deserializer, Serializer}; 8 | 9 | /// Serialize `Buf` like type to it's raw bytes 10 | #[derive(Clone, Debug, Default)] 11 | pub struct BytesSerializer { 12 | phantom: PhantomData, 13 | } 14 | 15 | impl Serializer for BytesSerializer 16 | where 17 | T: IntoBuf, 18 | B: Buf, 19 | { 20 | type Item = T; 21 | type Error = Error; 22 | 23 | fn serialize_to(&self, _topic_name: &str, data: Self::Item, buf: &mut M) -> Result<()> { 24 | buf.put(data.into_buf()); 25 | Ok(()) 26 | } 27 | 28 | fn serialize(&self, _topic_name: &str, data: Self::Item) -> Result { 29 | Ok(Bytes::from_buf(data.into_buf())) 30 | } 31 | } 32 | 33 | /// Deserialize `Buf` like type from it's raw bytes 34 | #[derive(Clone, Debug, Default)] 35 | pub struct BytesDeserializer { 36 | phantom: PhantomData, 37 | } 38 | 39 | impl Deserializer for BytesDeserializer 40 | where 41 | T: BufMut, 42 | { 43 | type Item = T; 44 | type Error = Error; 45 | 46 | fn deserialize_to(&self, _topic_name: &str, buf: &mut B, data: &mut Self::Item) -> Result<()> { 47 | let len = buf.remaining(); 48 | if len > data.remaining_mut() { 49 | bail!(ErrorKind::EncodeError("buffer too small")); 50 | } 51 | data.put_slice(buf.bytes()); 52 | buf.advance(len); 53 | Ok(()) 54 | } 55 | } 56 | 57 | #[cfg(test)] 58 | mod tests { 59 | use std::io::Cursor; 60 | 61 | use super::*; 62 | use serialization::Serializer; 63 | 64 | #[test] 65 | fn test_seraizlie() { 66 | let serializer = BytesSerializer::default(); 67 | let mut buf = Vec::new(); 68 | let data = Vec::from("data"); 69 | 70 | serializer.serialize_to("topic", data.as_slice(), &mut buf).unwrap(); 71 | 72 | assert_eq!(&buf, &data); 73 | 74 | assert_eq!( 75 | serializer.serialize("topic", data.as_slice()).unwrap(), 76 | Bytes::from(data) 77 | ); 78 | } 79 | 80 | #[test] 81 | fn test_deserialize() { 82 | let deserializer = BytesDeserializer::default(); 83 | let data = Vec::from("data"); 84 | let mut cur = Cursor::new(data.clone()); 85 | let mut buf = Vec::new(); 86 | 87 | deserializer.deserialize_to("topic", &mut cur, &mut buf).unwrap(); 88 | 89 | assert_eq!(cur.position(), 4); 90 | assert_eq!(&buf, &data); 91 | 92 | cur.set_position(0); 93 | 94 | assert_eq!(deserializer.deserialize("topic", &mut cur).unwrap(), data); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/serialization/encoding.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use bytes::{Buf, BufMut}; 4 | 5 | use encoding::{ByteWriter, DecoderTrap, EncoderTrap, Encoding}; 6 | 7 | use errors::{Error, Result}; 8 | use serialization::{Deserializer, Serializer}; 9 | 10 | struct BufWriter(B) 11 | where 12 | B: BufMut; 13 | 14 | impl ByteWriter for BufWriter 15 | where 16 | B: BufMut, 17 | { 18 | fn write_byte(&mut self, b: u8) { 19 | self.0.put_u8(b) 20 | } 21 | 22 | fn write_bytes(&mut self, v: &[u8]) { 23 | self.0.put_slice(v) 24 | } 25 | } 26 | 27 | /// Serialize `String` base on the special encoding 28 | #[derive(Clone, Debug)] 29 | pub struct StrEncodingSerializer { 30 | encoding: E, 31 | phantom: PhantomData, 32 | } 33 | 34 | impl StrEncodingSerializer { 35 | pub fn new(encoding: E) -> Self { 36 | StrEncodingSerializer { 37 | encoding, 38 | phantom: PhantomData, 39 | } 40 | } 41 | } 42 | 43 | impl Serializer for StrEncodingSerializer 44 | where 45 | E: Encoding, 46 | T: AsRef, 47 | { 48 | type Item = T; 49 | type Error = Error; 50 | 51 | fn serialize_to(&self, _topic_name: &str, data: Self::Item, buf: &mut B) -> Result<()> { 52 | let mut w = BufWriter(buf); 53 | 54 | self.encoding.encode_to(data.as_ref(), EncoderTrap::Strict, &mut w)?; 55 | 56 | Ok(()) 57 | } 58 | } 59 | 60 | /// Deserialize `String` base on the special encoding 61 | #[derive(Clone, Debug)] 62 | pub struct StrEncodingDeserializer { 63 | encoding: E, 64 | phantom: PhantomData, 65 | } 66 | 67 | impl StrEncodingDeserializer { 68 | pub fn new(encoding: E) -> Self { 69 | StrEncodingDeserializer { 70 | encoding, 71 | phantom: PhantomData, 72 | } 73 | } 74 | } 75 | 76 | impl Deserializer for StrEncodingDeserializer 77 | where 78 | E: Encoding, 79 | T: BufMut, 80 | { 81 | type Item = T; 82 | type Error = Error; 83 | 84 | fn deserialize_to(&self, _topic_name: &str, buf: &mut B, data: &mut Self::Item) -> Result<()> { 85 | let len = buf.remaining(); 86 | data.put_slice(self.encoding.decode(buf.bytes(), DecoderTrap::Strict)?.as_bytes()); 87 | buf.advance(len); 88 | Ok(()) 89 | } 90 | } 91 | 92 | #[cfg(test)] 93 | mod tests { 94 | use std::io::Cursor; 95 | 96 | use bytes::Bytes; 97 | 98 | use encoding::codec::simpchinese::GB18030_ENCODING; 99 | 100 | use super::*; 101 | 102 | #[test] 103 | fn test_seraizlie() { 104 | let serializer = StrEncodingSerializer::new(GB18030_ENCODING); 105 | let mut buf = Vec::new(); 106 | let data = vec![178, 226, 202, 212]; 107 | 108 | serializer.serialize_to("topic", "测试", &mut buf).unwrap(); 109 | 110 | assert_eq!(&buf, &data); 111 | 112 | assert_eq!(serializer.serialize("topic", "测试").unwrap(), Bytes::from(data)); 113 | } 114 | 115 | #[test] 116 | fn test_deserialize() { 117 | let deserializer = StrEncodingDeserializer::new(GB18030_ENCODING); 118 | let data = vec![178, 226, 202, 212]; 119 | let mut cur = Cursor::new(data.clone()); 120 | let mut buf = Vec::new(); 121 | 122 | deserializer.deserialize_to("topic", &mut cur, &mut buf).unwrap(); 123 | 124 | assert_eq!(cur.position(), 4); 125 | assert_eq!(buf.as_slice(), "测试".as_bytes()); 126 | 127 | cur.set_position(0); 128 | 129 | assert_eq!( 130 | deserializer.deserialize("topic", &mut cur).unwrap(), 131 | "测试".as_bytes() 132 | ); 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /src/serialization/json.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | use std::str; 3 | 4 | use bytes::{Buf, BufMut}; 5 | 6 | use serde; 7 | use serde_json::{self, Value}; 8 | 9 | use errors::{Error, Result}; 10 | use serialization::{Deserializer, Serializer}; 11 | 12 | /// Serialize `String` with UTF-8 encoding 13 | #[derive(Clone, Debug, Default)] 14 | pub struct JsonSerializer { 15 | pretty: bool, 16 | phantom: PhantomData, 17 | } 18 | 19 | impl JsonSerializer { 20 | pub fn pretty() -> Self { 21 | JsonSerializer { 22 | pretty: true, 23 | phantom: PhantomData, 24 | } 25 | } 26 | } 27 | 28 | impl Serializer for JsonSerializer 29 | where 30 | T: serde::Serialize, 31 | { 32 | type Item = T; 33 | type Error = Error; 34 | 35 | fn serialize_to(&self, _topic_name: &str, data: Self::Item, buf: &mut B) -> Result<()> { 36 | let to_vec = if self.pretty { 37 | serde_json::to_vec_pretty 38 | } else { 39 | serde_json::to_vec 40 | }; 41 | 42 | buf.put_slice(&to_vec(&data)?); 43 | Ok(()) 44 | } 45 | } 46 | 47 | /// Deserialize `String` as UTF-8 encoding 48 | #[derive(Clone, Debug, Default)] 49 | pub struct JsonDeserializer { 50 | phantom: PhantomData, 51 | } 52 | 53 | impl<'de, T> Deserializer for JsonDeserializer 54 | where 55 | T: serde::Deserialize<'de>, 56 | { 57 | type Item = T; 58 | type Error = Error; 59 | 60 | fn deserialize_to(&self, _topic_name: &str, buf: &mut B, data: &mut Self::Item) -> Result<()> { 61 | let len = buf.remaining(); 62 | let v: Value = serde_json::from_slice(buf.bytes())?; 63 | *data = T::deserialize(v)?; 64 | buf.advance(len); 65 | Ok(()) 66 | } 67 | } 68 | 69 | #[cfg(test)] 70 | mod tests { 71 | use std::io::Cursor; 72 | use std::time::Duration; 73 | 74 | use bytes::Bytes; 75 | 76 | use super::*; 77 | 78 | #[test] 79 | fn test_seraizlie() { 80 | let serializer = JsonSerializer::default(); 81 | let mut buf = Vec::new(); 82 | let d = Duration::new(123, 456); 83 | let json = r#"{"secs":123,"nanos":456}"#; 84 | 85 | serializer.serialize_to("topic", d, &mut buf).unwrap(); 86 | 87 | assert_eq!(str::from_utf8(&buf).unwrap(), json); 88 | 89 | assert_eq!(serializer.serialize("topic", d).unwrap(), Bytes::from(json)); 90 | } 91 | 92 | #[test] 93 | fn test_deserialize() { 94 | let deserializer = JsonDeserializer::default(); 95 | let data = r#"{"secs":123,"nanos":456}"#; 96 | let d = Duration::new(123, 456); 97 | let mut cur = Cursor::new(data.clone()); 98 | let mut s = Duration::default(); 99 | 100 | deserializer.deserialize_to("topic", &mut cur, &mut s).unwrap(); 101 | 102 | assert_eq!(cur.position() as usize, data.len()); 103 | assert_eq!(s, d); 104 | 105 | cur.set_position(0); 106 | 107 | assert_eq!(deserializer.deserialize("topic", &mut cur).unwrap(), d); 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /src/serialization/mod.rs: -------------------------------------------------------------------------------- 1 | mod bytes; 2 | mod noop; 3 | mod raw; 4 | mod str; 5 | 6 | pub use self::bytes::{BytesDeserializer, BytesSerializer}; 7 | pub use self::noop::{NoopDeserializer, NoopSerializer}; 8 | pub use self::raw::{RawDeserializer, RawSerializer}; 9 | pub use self::str::{StringDeserializer, StringSerializer}; 10 | 11 | #[cfg(feature = "encoding")] 12 | mod encoding; 13 | #[cfg(feature = "encoding")] 14 | pub use self::encoding::{StrEncodingDeserializer, StrEncodingSerializer}; 15 | 16 | #[cfg(feature = "json")] 17 | mod json; 18 | #[cfg(feature = "json")] 19 | pub use self::json::{JsonDeserializer, JsonSerializer}; 20 | 21 | use std::mem; 22 | use std::result::Result; 23 | 24 | use bytes::buf::FromBuf; 25 | use bytes::{Buf, BufMut, Bytes}; 26 | 27 | /// A trait for serializing type to Kafka record 28 | pub trait Serializer { 29 | /// The type of value that this serializer will serialize. 30 | type Item; 31 | /// The type of error that this serializer will return if it fails. 32 | type Error; 33 | 34 | /// Serizalize data of topic to the given buffer 35 | fn serialize_to(&self, topic_name: &str, data: Self::Item, buf: &mut B) -> Result<(), Self::Error>; 36 | 37 | /// Serialize data of topic as `Bytes` 38 | fn serialize(&self, topic_name: &str, data: Self::Item) -> Result { 39 | let mut buf = Vec::with_capacity(16); 40 | self.serialize_to(topic_name, data, &mut buf)?; 41 | Ok(Bytes::from_buf(buf)) 42 | } 43 | } 44 | 45 | /// A trait for deserializing type from Kafka record 46 | pub trait Deserializer { 47 | /// The type of value that this deserializer will deserialize. 48 | type Item; 49 | /// The type of error that this deserializer will return if it fails. 50 | type Error; 51 | 52 | /// Deserizalize data of topic from the given buffer 53 | fn deserialize_to(&self, topic_name: &str, buf: &mut B, data: &mut Self::Item) -> Result<(), Self::Error>; 54 | 55 | fn deserialize(&self, topic_name: &str, buf: &mut B) -> Result { 56 | let mut data = unsafe { mem::zeroed() }; 57 | 58 | self.deserialize_to(topic_name, buf, &mut data)?; 59 | 60 | Ok(data) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/serialization/noop.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use bytes::{Buf, BufMut, Bytes}; 4 | 5 | use errors::{Error, Result}; 6 | use serialization::{Deserializer, Serializer}; 7 | 8 | /// Serialize type to nothing 9 | #[derive(Clone, Debug, Default)] 10 | pub struct NoopSerializer { 11 | phantom: PhantomData, 12 | } 13 | 14 | impl Serializer for NoopSerializer { 15 | type Item = T; 16 | type Error = Error; 17 | 18 | fn serialize_to(&self, _topic_name: &str, _data: Self::Item, _buf: &mut B) -> Result<()> { 19 | Ok(()) 20 | } 21 | 22 | fn serialize(&self, _topic_name: &str, _data: Self::Item) -> Result { 23 | Ok(Bytes::new()) 24 | } 25 | } 26 | 27 | /// Deserialize type from nothing 28 | #[derive(Clone, Debug, Default)] 29 | pub struct NoopDeserializer { 30 | phantom: PhantomData, 31 | } 32 | 33 | impl Deserializer for NoopDeserializer { 34 | type Item = T; 35 | type Error = Error; 36 | 37 | fn deserialize_to(&self, _topic_name: &str, buf: &mut B, _data: &mut Self::Item) -> Result<()> { 38 | let len = buf.remaining(); 39 | buf.advance(len); 40 | Ok(()) 41 | } 42 | } 43 | 44 | #[cfg(test)] 45 | mod tests { 46 | use std::io::Cursor; 47 | 48 | use super::*; 49 | 50 | #[test] 51 | fn test_seraizlie() { 52 | let serializer = NoopSerializer::default(); 53 | let mut buf = Vec::new(); 54 | 55 | serializer.serialize_to("topic", "data", &mut buf).unwrap(); 56 | 57 | assert!(buf.is_empty()); 58 | 59 | let data = serializer.serialize("topic", "data").unwrap(); 60 | 61 | assert!(data.is_empty()); 62 | } 63 | 64 | #[test] 65 | fn test_deserialize() { 66 | let deserializer = NoopDeserializer::default(); 67 | let mut cur = Cursor::new(Vec::from("data")); 68 | let mut s = ""; 69 | 70 | deserializer.deserialize_to("topic", &mut cur, &mut s).unwrap(); 71 | 72 | assert_eq!(cur.position(), 4); 73 | assert!(s.is_empty()); 74 | 75 | assert!(deserializer.deserialize("topic", &mut cur).unwrap().is_empty()); 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/serialization/raw.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | use std::mem; 3 | use std::ptr; 4 | use std::slice; 5 | 6 | use bytes::buf::FromBuf; 7 | use bytes::{Buf, BufMut, Bytes}; 8 | 9 | use errors::{Error, ErrorKind, Result}; 10 | use serialization::{Deserializer, Serializer}; 11 | 12 | /// Serialize type to it's raw data 13 | #[derive(Clone, Debug, Default)] 14 | pub struct RawSerializer { 15 | phantom: PhantomData, 16 | } 17 | 18 | impl Serializer for RawSerializer { 19 | type Item = T; 20 | type Error = Error; 21 | 22 | fn serialize_to(&self, _topic_name: &str, data: Self::Item, buf: &mut M) -> Result<()> { 23 | buf.put_slice(unsafe { slice::from_raw_parts(&data as *const T as *const u8, mem::size_of::()) }); 24 | 25 | Ok(()) 26 | } 27 | 28 | fn serialize(&self, _topic_name: &str, data: Self::Item) -> Result { 29 | Ok(Bytes::from_buf(unsafe { 30 | slice::from_raw_parts(&data as *const T as *const u8, mem::size_of::()) 31 | })) 32 | } 33 | } 34 | 35 | /// Deserialize type from it's raw data 36 | #[derive(Clone, Debug, Default)] 37 | pub struct RawDeserializer { 38 | phantom: PhantomData, 39 | } 40 | 41 | impl Deserializer for RawDeserializer { 42 | type Item = T; 43 | type Error = Error; 44 | 45 | fn deserialize_to(&self, _topic_name: &str, buf: &mut B, data: &mut Self::Item) -> Result<()> { 46 | let len = mem::size_of::(); 47 | 48 | if buf.remaining() < len { 49 | bail!(ErrorKind::ParseError("serialized data too small".to_owned(),)); 50 | } 51 | 52 | *data = unsafe { ptr::read(buf.bytes()[..len].as_ptr() as *const T) }; 53 | 54 | buf.advance(len); 55 | 56 | Ok(()) 57 | } 58 | } 59 | 60 | #[cfg(test)] 61 | mod tests { 62 | use std::io::Cursor; 63 | 64 | use super::*; 65 | use serialization::Serializer; 66 | 67 | #[test] 68 | fn test_seraizlie() { 69 | let serializer = RawSerializer::default(); 70 | let mut buf = Vec::new(); 71 | let v: u32 = 0x12345678; 72 | let data = vec![0x78, 0x56, 0x34, 0x12]; 73 | 74 | serializer.serialize_to("topic", v, &mut buf).unwrap(); 75 | 76 | assert_eq!(buf, data); 77 | 78 | assert_eq!(serializer.serialize("topic", v).unwrap(), Bytes::from(data.clone())); 79 | } 80 | 81 | #[test] 82 | fn test_deserialize() { 83 | let deserializer = RawDeserializer::default(); 84 | let mut cur = Cursor::new(vec![0x78, 0x56, 0x34, 0x12]); 85 | let mut v = 0u32; 86 | 87 | deserializer.deserialize_to("topic", &mut cur, &mut v).unwrap(); 88 | 89 | assert_eq!(cur.position(), 4); 90 | assert_eq!(v, 0x12345678); 91 | 92 | cur.set_position(0); 93 | 94 | assert_eq!(deserializer.deserialize("topic", &mut cur).unwrap(), v); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/serialization/str.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | use std::str; 3 | 4 | use bytes::{Buf, BufMut}; 5 | 6 | use errors::{Error, Result}; 7 | use serialization::{Deserializer, Serializer}; 8 | 9 | /// Serialize `String` with UTF-8 encoding 10 | #[derive(Clone, Debug, Default)] 11 | pub struct StringSerializer { 12 | phantom: PhantomData, 13 | } 14 | 15 | impl Serializer for StringSerializer 16 | where 17 | T: AsRef, 18 | { 19 | type Item = T; 20 | type Error = Error; 21 | 22 | fn serialize_to(&self, _topic_name: &str, data: Self::Item, buf: &mut B) -> Result<()> { 23 | buf.put_slice(data.as_ref().as_bytes()); 24 | Ok(()) 25 | } 26 | } 27 | 28 | /// Deserialize `String` as UTF-8 encoding 29 | #[derive(Clone, Debug, Default)] 30 | pub struct StringDeserializer { 31 | phantom: PhantomData, 32 | } 33 | 34 | impl Deserializer for StringDeserializer { 35 | type Item = String; 36 | type Error = Error; 37 | 38 | fn deserialize_to(&self, _topic_name: &str, buf: &mut B, data: &mut Self::Item) -> Result<()> { 39 | let len = buf.remaining(); 40 | *data = str::from_utf8(buf.bytes())?.to_owned(); 41 | buf.advance(len); 42 | Ok(()) 43 | } 44 | } 45 | 46 | #[cfg(test)] 47 | mod tests { 48 | use std::io::Cursor; 49 | 50 | use bytes::Bytes; 51 | 52 | use super::*; 53 | 54 | #[test] 55 | fn test_seraizlie() { 56 | let serializer = StringSerializer::default(); 57 | let mut buf = Vec::new(); 58 | let data = vec![230, 181, 139, 232, 175, 149]; 59 | 60 | serializer.serialize_to("topic", "测试", &mut buf).unwrap(); 61 | 62 | assert_eq!(&buf, &data); 63 | 64 | assert_eq!(serializer.serialize("topic", "测试").unwrap(), Bytes::from(data)); 65 | } 66 | 67 | #[test] 68 | fn test_deserialize() { 69 | let deserializer = StringDeserializer::default(); 70 | let data = vec![230, 181, 139, 232, 175, 149]; 71 | let mut cur = Cursor::new(data.clone()); 72 | let mut s = String::new(); 73 | 74 | deserializer.deserialize_to("topic", &mut cur, &mut s).unwrap(); 75 | 76 | assert_eq!(cur.position(), 6); 77 | assert_eq!(s, "测试"); 78 | 79 | cur.set_position(0); 80 | 81 | assert_eq!(deserializer.deserialize("topic", &mut cur).unwrap(), "测试"); 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /tests/common/mod.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | use failure::Error; 4 | use pretty_env_logger; 5 | 6 | use futures::Future; 7 | use tokio_core::reactor::Core; 8 | use tokio_kafka::{ClientConfig, KafkaClient}; 9 | 10 | const DEFAULT_BROKER: &str = "localhost:9092"; 11 | 12 | pub struct IntegrationTest { 13 | brokers: Vec, 14 | client_id: Option, 15 | } 16 | 17 | impl IntegrationTest { 18 | pub fn new() -> Result { 19 | let brokers = match env::var("KAFKA_BROKERS") { 20 | Ok(s) => s.split(",").map(|s| s.to_owned()).collect(), 21 | Err(env::VarError::NotPresent) => vec![DEFAULT_BROKER.to_owned()], 22 | Err(err) => bail!(err), 23 | }; 24 | let client_id = match env::var("KAFKA_CLIENT") { 25 | Ok(s) => Some(s), 26 | Err(env::VarError::NotPresent) => None, 27 | Err(err) => bail!(err), 28 | }; 29 | 30 | Ok(IntegrationTest { brokers, client_id }) 31 | } 32 | 33 | pub fn client_config(self) -> ClientConfig { 34 | let config = ClientConfig { 35 | hosts: self.brokers, 36 | client_id: self.client_id, 37 | ..Default::default() 38 | }; 39 | 40 | info!("connect kafka server with config: {:?}", config); 41 | 42 | config 43 | } 44 | } 45 | 46 | pub fn run<'a, F, R, O, E>(op: F) -> Result 47 | where 48 | F: FnOnce(KafkaClient<'static>) -> R, 49 | R: Future, 50 | E: Into, 51 | { 52 | pretty_env_logger::init(); 53 | 54 | let tests = IntegrationTest::new()?; 55 | 56 | let config = tests.client_config(); 57 | 58 | let mut core = Core::new()?; 59 | 60 | let client = KafkaClient::new(config, core.handle()); 61 | 62 | let work = op(client).map_err(|err| err.into()); 63 | 64 | core.run(work) 65 | } 66 | -------------------------------------------------------------------------------- /tests/docker/v0.10/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM wurstmeister/kafka:0.10.2.1 2 | 3 | COPY setup-tests.sh setup-topics.sh /tmp/ 4 | 5 | RUN chmod a+x /tmp/*.sh \ 6 | && /tmp/setup-tests.sh 7 | -------------------------------------------------------------------------------- /tests/docker/v0.10/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | image: wurstmeister/zookeeper 5 | ports: 6 | - "2181:2181" 7 | kafka: 8 | build: . 9 | ports: 10 | - "9092:9092" 11 | links: 12 | - zookeeper 13 | environment: 14 | KAFKA_BROKER_ID: 0 15 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 16 | KAFKA_ADVERTISED_HOST_NAME: ${IP_ADDRESS} 17 | KAFKA_CREATE_TOPICS: "foo:1:1,bar:4:1" 18 | volumes: 19 | - /var/run/docker.sock:/var/run/docker.sock 20 | -------------------------------------------------------------------------------- /tests/docker/v0.10/setup-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | echo Setup integration testing @ $KAFKA_HOME 4 | 5 | ln -s $KAFKA_HOME /opt/kafka 6 | -------------------------------------------------------------------------------- /tests/docker/v0.10/setup-topics.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | KAFKA_HOME=/opt/kafka 4 | KAFKA_TOPICS="$KAFKA_HOME/bin/kafka-topics.sh --zookeeper zookeeper:2181" 5 | KAFKA_CONSOLE_PRODUCER="$KAFKA_HOME/bin/kafka-console-producer.sh --broker-list localhost:9092" 6 | 7 | until $KAFKA_TOPICS --list | grep bar; do 8 | echo "Kafka is unavailable - sleeping" 9 | sleep 1 10 | done 11 | 12 | echo Setup integration testing @ $KAFKA_HOME 13 | 14 | for i in `seq 0 9`; do 15 | echo $i | $KAFKA_CONSOLE_PRODUCER --topic foo; 16 | echo $i | $KAFKA_CONSOLE_PRODUCER --topic bar; 17 | done 18 | 19 | $KAFKA_TOPICS --describe 20 | -------------------------------------------------------------------------------- /tests/docker/v0.11/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM wurstmeister/kafka:0.11.0.1 2 | 3 | COPY setup-tests.sh setup-topics.sh /tmp/ 4 | 5 | RUN chmod a+x /tmp/*.sh \ 6 | && /tmp/setup-tests.sh 7 | -------------------------------------------------------------------------------- /tests/docker/v0.11/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | image: wurstmeister/zookeeper 5 | ports: 6 | - "2181:2181" 7 | kafka: 8 | build: . 9 | ports: 10 | - "9092:9092" 11 | links: 12 | - zookeeper 13 | environment: 14 | KAFKA_BROKER_ID: 0 15 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 16 | KAFKA_ADVERTISED_HOST_NAME: ${IP_ADDRESS} 17 | KAFKA_CREATE_TOPICS: "foo:1:1,bar:4:1" 18 | volumes: 19 | - /var/run/docker.sock:/var/run/docker.sock 20 | -------------------------------------------------------------------------------- /tests/docker/v0.11/setup-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | echo Setup integration testing @ $KAFKA_HOME 4 | 5 | ln -s $KAFKA_HOME /opt/kafka 6 | -------------------------------------------------------------------------------- /tests/docker/v0.11/setup-topics.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | KAFKA_HOME=/opt/kafka 4 | KAFKA_TOPICS="$KAFKA_HOME/bin/kafka-topics.sh --zookeeper zookeeper:2181" 5 | KAFKA_CONSOLE_PRODUCER="$KAFKA_HOME/bin/kafka-console-producer.sh --broker-list localhost:9092" 6 | 7 | until $KAFKA_TOPICS --list | grep bar; do 8 | echo "Kafka is unavailable - sleeping" 9 | sleep 1 10 | done 11 | 12 | echo Setup integration testing @ $KAFKA_HOME 13 | 14 | for i in `seq 0 9`; do 15 | echo $i | $KAFKA_CONSOLE_PRODUCER --topic foo; 16 | echo $i | $KAFKA_CONSOLE_PRODUCER --topic bar; 17 | done 18 | 19 | $KAFKA_TOPICS --describe 20 | -------------------------------------------------------------------------------- /tests/docker/v0.8/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM wurstmeister/kafka:0.8.2.2-1 2 | 3 | COPY setup-tests.sh setup-topics.sh /tmp/ 4 | 5 | RUN chmod a+x /tmp/*.sh \ 6 | && /tmp/setup-tests.sh 7 | -------------------------------------------------------------------------------- /tests/docker/v0.8/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | image: wurstmeister/zookeeper 5 | ports: 6 | - "2181:2181" 7 | kafka: 8 | build: . 9 | ports: 10 | - "9092:9092" 11 | links: 12 | - zookeeper 13 | environment: 14 | KAFKA_BROKER_ID: 0 15 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 16 | KAFKA_ADVERTISED_HOST_NAME: ${IP_ADDRESS} 17 | KAFKA_CREATE_TOPICS: "foo:1:1,bar:4:1" 18 | volumes: 19 | - /var/run/docker.sock:/var/run/docker.sock 20 | -------------------------------------------------------------------------------- /tests/docker/v0.8/setup-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | echo Setup integration testing @ $KAFKA_HOME 4 | 5 | ln -s $KAFKA_HOME /opt/kafka 6 | -------------------------------------------------------------------------------- /tests/docker/v0.8/setup-topics.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | KAFKA_HOME=/opt/kafka 4 | KAFKA_TOPICS="$KAFKA_HOME/bin/kafka-topics.sh --zookeeper zookeeper:2181" 5 | KAFKA_CONSOLE_PRODUCER="$KAFKA_HOME/bin/kafka-console-producer.sh --broker-list localhost:9092" 6 | 7 | until $KAFKA_TOPICS --list | grep bar; do 8 | echo "Kafka is unavailable - sleeping" 9 | sleep 1 10 | done 11 | 12 | echo Setup integration testing @ $KAFKA_HOME 13 | 14 | for i in `seq 0 9`; do 15 | echo $i | $KAFKA_CONSOLE_PRODUCER --topic foo; 16 | echo $i | $KAFKA_CONSOLE_PRODUCER --topic bar; 17 | done 18 | 19 | $KAFKA_TOPICS --describe 20 | -------------------------------------------------------------------------------- /tests/docker/v0.9/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM wurstmeister/kafka:0.9.0.1-1 2 | 3 | COPY setup-tests.sh setup-topics.sh /tmp/ 4 | 5 | RUN chmod a+x /tmp/*.sh \ 6 | && /tmp/setup-tests.sh 7 | -------------------------------------------------------------------------------- /tests/docker/v0.9/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | image: wurstmeister/zookeeper 5 | ports: 6 | - "2181:2181" 7 | kafka: 8 | build: . 9 | ports: 10 | - "9092:9092" 11 | links: 12 | - zookeeper 13 | environment: 14 | KAFKA_BROKER_ID: 0 15 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 16 | KAFKA_ADVERTISED_HOST_NAME: ${IP_ADDRESS} 17 | KAFKA_CREATE_TOPICS: "foo:1:1,bar:4:1" 18 | volumes: 19 | - /var/run/docker.sock:/var/run/docker.sock 20 | -------------------------------------------------------------------------------- /tests/docker/v0.9/setup-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | echo Setup integration testing @ $KAFKA_HOME 4 | 5 | ln -s $KAFKA_HOME /opt/kafka 6 | -------------------------------------------------------------------------------- /tests/docker/v0.9/setup-topics.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | KAFKA_HOME=/opt/kafka 4 | KAFKA_TOPICS="$KAFKA_HOME/bin/kafka-topics.sh --zookeeper zookeeper:2181" 5 | KAFKA_CONSOLE_PRODUCER="$KAFKA_HOME/bin/kafka-console-producer.sh --broker-list localhost:9092" 6 | 7 | until $KAFKA_TOPICS --list | grep bar; do 8 | echo "Kafka is unavailable - sleeping" 9 | sleep 1 10 | done 11 | 12 | echo Setup integration testing @ $KAFKA_HOME 13 | 14 | for i in `seq 0 9`; do 15 | echo $i | $KAFKA_CONSOLE_PRODUCER --topic foo; 16 | echo $i | $KAFKA_CONSOLE_PRODUCER --topic bar; 17 | done 18 | 19 | $KAFKA_TOPICS --describe 20 | -------------------------------------------------------------------------------- /tests/docker/v1.0/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM wurstmeister/kafka:1.0.1 2 | 3 | COPY setup-tests.sh setup-topics.sh /tmp/ 4 | 5 | RUN chmod a+x /tmp/*.sh \ 6 | && /tmp/setup-tests.sh 7 | -------------------------------------------------------------------------------- /tests/docker/v1.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | image: wurstmeister/zookeeper 5 | ports: 6 | - "2181:2181" 7 | kafka: 8 | build: . 9 | ports: 10 | - "9092:9092" 11 | links: 12 | - zookeeper 13 | environment: 14 | KAFKA_BROKER_ID: 0 15 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 16 | KAFKA_ADVERTISED_HOST_NAME: ${IP_ADDRESS} 17 | KAFKA_CREATE_TOPICS: "foo:1:1,bar:4:1" 18 | volumes: 19 | - /var/run/docker.sock:/var/run/docker.sock 20 | -------------------------------------------------------------------------------- /tests/docker/v1.0/setup-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | echo Setup integration testing @ $KAFKA_HOME 4 | 5 | ln -s $KAFKA_HOME /opt/kafka 6 | -------------------------------------------------------------------------------- /tests/docker/v1.0/setup-topics.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | KAFKA_HOME=/opt/kafka 4 | KAFKA_TOPICS="$KAFKA_HOME/bin/kafka-topics.sh --zookeeper zookeeper:2181" 5 | KAFKA_CONSOLE_PRODUCER="$KAFKA_HOME/bin/kafka-console-producer.sh --broker-list localhost:9092" 6 | 7 | until $KAFKA_TOPICS --list | grep bar; do 8 | echo "Kafka is unavailable - sleeping" 9 | sleep 1 10 | done 11 | 12 | echo Setup integration testing @ $KAFKA_HOME 13 | 14 | for i in `seq 0 9`; do 15 | echo $i | $KAFKA_CONSOLE_PRODUCER --topic foo; 16 | echo $i | $KAFKA_CONSOLE_PRODUCER --topic bar; 17 | done 18 | 19 | $KAFKA_TOPICS --describe 20 | -------------------------------------------------------------------------------- /tests/docker/v1.1/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM wurstmeister/kafka:1.1.0 2 | 3 | COPY setup-tests.sh setup-topics.sh /tmp/ 4 | 5 | RUN chmod a+x /tmp/*.sh \ 6 | && /tmp/setup-tests.sh 7 | -------------------------------------------------------------------------------- /tests/docker/v1.1/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | image: wurstmeister/zookeeper 5 | ports: 6 | - "2181:2181" 7 | kafka: 8 | build: . 9 | ports: 10 | - "9092:9092" 11 | links: 12 | - zookeeper 13 | environment: 14 | KAFKA_BROKER_ID: 0 15 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 16 | KAFKA_ADVERTISED_HOST_NAME: ${IP_ADDRESS} 17 | KAFKA_CREATE_TOPICS: "foo:1:1,bar:4:1" 18 | volumes: 19 | - /var/run/docker.sock:/var/run/docker.sock 20 | -------------------------------------------------------------------------------- /tests/docker/v1.1/setup-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | echo Setup integration testing @ $KAFKA_HOME 4 | 5 | ln -s $KAFKA_HOME /opt/kafka 6 | -------------------------------------------------------------------------------- /tests/docker/v1.1/setup-topics.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | KAFKA_HOME=/opt/kafka 4 | KAFKA_TOPICS="$KAFKA_HOME/bin/kafka-topics.sh --zookeeper zookeeper:2181" 5 | KAFKA_CONSOLE_PRODUCER="$KAFKA_HOME/bin/kafka-console-producer.sh --broker-list localhost:9092" 6 | 7 | until $KAFKA_TOPICS --list | grep bar; do 8 | echo "Kafka is unavailable - sleeping" 9 | sleep 1 10 | done 11 | 12 | echo Setup integration testing @ $KAFKA_HOME 13 | 14 | for i in `seq 0 9`; do 15 | echo $i | $KAFKA_CONSOLE_PRODUCER --topic foo; 16 | echo $i | $KAFKA_CONSOLE_PRODUCER --topic bar; 17 | done 18 | 19 | $KAFKA_TOPICS --describe 20 | -------------------------------------------------------------------------------- /tests/integration_test.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | extern crate getopts; 4 | #[macro_use] 5 | extern crate failure; 6 | extern crate pretty_env_logger; 7 | 8 | extern crate futures; 9 | extern crate tokio_core; 10 | #[macro_use] 11 | extern crate tokio_kafka; 12 | 13 | #[cfg(feature = "integration_test")] 14 | mod common; 15 | 16 | #[cfg(feature = "integration_test")] 17 | mod tests { 18 | use futures::Future; 19 | 20 | use tokio_kafka::{Client, Cluster, FetchOffset, KafkaCode, ListedOffset}; 21 | 22 | use common; 23 | 24 | const TOPIC_FOO_PARTITIONS: usize = 1; 25 | const TOPIC_FOO_MESSAGE_COUNT: i64 = 10; 26 | const TOPIC_BAR_PARTITIONS: usize = 4; 27 | const TOPIC_BAR_MESSAGE_COUNT: i64 = 10; 28 | 29 | #[test] 30 | fn metadata() { 31 | common::run(|client| { 32 | client.metadata().and_then(move |metadata| { 33 | info!("fetch metadata: {:?}", metadata); 34 | 35 | assert!(!metadata.brokers().is_empty()); 36 | let partitions = { 37 | let topics = metadata.topics(); 38 | 39 | assert!(topics.contains_key("foo")); 40 | assert!(topics.contains_key("bar")); 41 | 42 | assert_eq!(topics["foo"].len(), TOPIC_FOO_PARTITIONS); 43 | assert_eq!(topics["bar"].len(), TOPIC_BAR_PARTITIONS); 44 | 45 | topics 46 | .into_iter() 47 | .flat_map(|(topic_name, partitions)| { 48 | partitions.into_iter().flat_map(move |partition| { 49 | let tp = topic_partition!(topic_name.to_owned(), partition.partition_id); 50 | 51 | vec![(tp.clone(), FetchOffset::Earliest), (tp.clone(), FetchOffset::Latest)] 52 | }) 53 | }) 54 | .collect::>() 55 | }; 56 | 57 | client.list_offsets(partitions).map(|responses| { 58 | assert!(responses.contains_key("foo")); 59 | assert!(responses.contains_key("bar")); 60 | 61 | assert_eq!( 62 | responses["foo"], 63 | vec![ 64 | ListedOffset { 65 | partition_id: 0, 66 | error_code: KafkaCode::None, 67 | offsets: vec![TOPIC_FOO_MESSAGE_COUNT, 0], 68 | timestamp: None, 69 | }, 70 | ] 71 | ); 72 | 73 | let offsets = &responses["bar"]; 74 | 75 | assert_eq!(offsets.len(), TOPIC_BAR_PARTITIONS); 76 | assert_eq!( 77 | offsets 78 | .iter() 79 | .map(|offset| offset.offsets.iter().cloned().max().unwrap_or_default()) 80 | .sum::(), 81 | TOPIC_BAR_MESSAGE_COUNT 82 | ); 83 | }) 84 | }) 85 | }).unwrap() 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /tests/integration_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TEST_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | DOCKER_DIR=$TEST_DIR/docker 5 | 6 | case "$OSTYPE" in 7 | linux*) 8 | IP_ADDRESS=`hostname -I | awk '{print $1}'` 9 | ;; 10 | darwin*) 11 | IP_ADDRESSES=(`ifconfig | grep "inet " | grep -Fv 127.0.0.1 | awk '{print $2}'`) 12 | IP_ADDRESS=${IP_ADDRESSES[0]} 13 | ;; 14 | *) 15 | echo "unknown: $OSTYPE" 16 | ;; 17 | esac 18 | 19 | function test { 20 | echo Testing Kafka $1 @ $version 21 | 22 | pushd $DOCKER_DIR/$1 23 | 24 | envfile=.env 25 | 26 | echo "### DOCKER-COMPOSE ENVIRONMENT VARIABLES AS OF $(date +"%Y-%m-%d @ %H-%M-%S")" > $envfile 27 | echo "IP_ADDRESS=$IP_ADDRESS" >> $envfile 28 | 29 | cat $envfile 30 | 31 | docker-compose kill 32 | docker-compose rm -f 33 | docker-compose build 34 | docker-compose up -d 35 | 36 | docker-compose exec kafka /tmp/setup-topics.sh 37 | 38 | RUST_LOG=tokio KAFKA_BROKERS=$IP_ADDRESS:9092 cargo test --features "integration_test" 39 | 40 | docker-compose down 41 | 42 | popd 43 | } 44 | 45 | POSITIONAL=() 46 | while [[ $# -gt 0 ]] 47 | do 48 | arg="$1" 49 | case $arg in 50 | v0.8|v0.9|v0.10|0.11|v1.0|v1.1) 51 | test $arg 52 | shift # past argument 53 | ;; 54 | all) 55 | for version in $DOCKER_DIR/v*/; do 56 | test $(basename $version) 57 | done 58 | shift # past argument 59 | ;; 60 | *) # unknown option 61 | POSITIONAL+=("$1") # save it in an array for later 62 | shift # past argument 63 | ;; 64 | esac 65 | done 66 | --------------------------------------------------------------------------------