├── .gitignore
├── LICENSE
├── README.md
├── docs
└── change-log.txt
├── pom.xml
├── resources
├── kafka.appender.log4j.properties
└── log4j.properties
└── src
├── main
└── java
│ └── com
│ └── blackberry
│ └── bdp
│ └── krackle
│ ├── Constants.java
│ ├── KafkaError.java
│ ├── Time.java
│ ├── auth
│ ├── AuthenticatedSocketSingleton.java
│ ├── Authenticator.java
│ ├── PlainTextAuthenticator.java
│ └── SaslPlainTextAuthenticator.java
│ ├── compression
│ ├── Compressor.java
│ ├── Decompressor.java
│ ├── GzipCompressor.java
│ ├── GzipDecompressor.java
│ ├── SnappyCompressor.java
│ └── SnappyDecompressor.java
│ ├── consumer
│ ├── BrokerUnavailableException.java
│ ├── Consumer.java
│ ├── ConsumerConfiguration.java
│ ├── MessageAndOffset.java
│ ├── MessageSetReader.java
│ └── OffsetOutOfRangeException.java
│ ├── exceptions
│ ├── AuthenticationException.java
│ ├── InvalidConfigurationTypeException.java
│ └── MissingConfigurationException.java
│ ├── jaas
│ └── Login.java
│ ├── meta
│ ├── Broker.java
│ ├── MetaData.java
│ ├── Partition.java
│ └── Topic.java
│ └── producer
│ ├── MessageSetBuffer.java
│ ├── MissingPartitionsException.java
│ ├── Producer.java
│ └── ProducerConfiguration.java
└── test
├── java
└── com
│ └── blackberry
│ ├── bdp
│ └── krackle
│ │ └── KafkaClientTest.java
│ └── testutil
│ ├── LocalKafkaServer.java
│ └── LocalZkServer.java
└── resources
└── log4j.properties
/.gitignore:
--------------------------------------------------------------------------------
1 | .classpath
2 | .project
3 | .settings/
4 | target/
5 | /target/
6 | /target/
7 | /target/
8 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Krackle - A Low Overhead Kafka Client
2 | =====================================
3 |
4 | While the standard Java Kafka client is easy to use, it does tend to have a
5 | high level of overhead. A lot of objects are created, only to be garbage
6 | collected very quickly, often within milliseconds on a heavily loaded
7 | producer.
8 |
9 | Krackle is a Kafka 0.8 client designed to minimize the number of objects
10 | created, and therefore to reduce garbage collection overhead. In my tests
11 | this has reduced the CPU usage by 50% under heavy load.
12 |
13 | I order to achieve these performance improvements, some compromises had to be
14 | made. In particular, this producer requires an instance for each topic and
15 | key, and the consumer requires an instance per partition. This means that it
16 | may not useful in the general case.
17 |
18 | Example use case: You have thousands of applications running, and you are
19 | using Kafka to centralize the logs. Since each application will know upfront
20 | what topic it will log to, and the key it will use (a unique app instance id),
21 | we can use the Krackle Producer effectively. This means small savings
22 | in CPU and memory usage on each instance, but that can add up over a large
23 | number of instances to provide large savings.
24 |
25 | Basic Usage
26 | -----------
27 | ```java
28 | Properties props = ... // load some configuration properties
29 | ProducerConfiguration conf = new ProducerConfiguration(props);
30 | Producer producer = new Producer(conf, "clientId", "topic", "key");
31 |
32 | // Use a byte buffer to store your data, and pass the data by referencing that.
33 | byte[] buffer = new byte[1024];
34 | while ( fillBuffer() ) { // Get some data in your buffer
35 | producer.send(buffer, offset, length);
36 | }
37 | producer.close();
38 | ```
39 |
40 | ```java
41 | Properties props = ... // load some configuration properties
42 | ConsumerConfiguration conf = new ConsumerConfiguration(props);
43 | Consumer consumer = new Consumer(conf, "clientId", "topic", "key");
44 |
45 | // Use a byte buffer to store the message you retrieve.
46 | byte[] buffer = new byte[1024];
47 | int bytesRead;
48 | while ( true ) {
49 | bytesRead = consumer.getMessage(buffer, 0, buffer.length);
50 | if (bytesRead != -1) {
51 | // the first bytesRead bytes of the buffer are the message.
52 | }
53 | }
54 | consumer.close();
55 | ```
56 |
57 | Producer Configuration
58 | ----------------------
59 | Configuration is done via properties. Many of these are the same as the
60 | standard Java client.
61 |
62 |
63 |
64 |
65 |
property
66 |
default
67 |
description
68 |
69 |
70 |
71 |
metadata.broker.list
72 |
73 |
(required) A comma separated list of seed brokers to connect to in order
74 | to get metadata about the cluster.
75 |
76 |
77 |
78 |
queue.buffering.max.ms
79 |
5000
80 |
Maximum time to buffer data. For example a setting of 100 will try to
81 | batch together 100ms of messages to send at once. This will improve
82 | throughput but adds message delivery latency due to the buffering.
83 |
84 |
85 |
86 |
request.required.acks
87 |
1
88 |
This value controls when a produce request is considered completed.
89 | Specifically, how many other brokers must have committed the data to their
90 | log and acknowledged this to the leader? Typical values are
91 |
92 |
0, which means that the producer never waits for an acknowledgement from
93 | the broker (the same behavior as 0.7). This option provides the lowest
94 | latency but the weakest durability guarantees (some data will be lost when a
95 | server fails).
96 |
1, which means that the producer gets an acknowledgement after the leader
97 | replica has received the data. This option provides better durability as the
98 | client waits until the server acknowledges the request as successful (only
99 | messages that were written to the now-dead leader but not yet replicated will
100 | be lost).
101 |
-1, which means that the producer gets an acknowledgement after all
102 | in-sync replicas have received the data. This option provides the best
103 | durability, we guarantee that no messages will be lost as long as at least
104 | one in sync replica remains.
105 |
106 |
107 |
108 |
109 |
110 |
request.timeout.ms
111 |
10000
112 |
The amount of time the broker will wait trying to meet the
113 | request.required.acks requirement before sending back an error to the client.
114 |
115 |
116 |
117 |
118 |
message.send.max.retries
119 |
3
120 |
This property will cause the producer to automatically retry a failed
121 | send request. This property specifies the number of retries when such
122 | failures occur. Note that setting a non-zero value here can lead to
123 | duplicates in the case of network errors that cause a message to be sent but
124 | the acknowledgement to be lost.
125 |
126 |
127 |
128 |
retry.backoff.ms
129 |
100
130 |
Before each retry, the producer refreshes the metadata of relevant topics
131 | to see if a new leader has been elected. Since leader election takes a bit of
132 | time, this property specifies the amount of time that the producer waits
133 | before refreshing the metadata.
134 |
135 |
136 |
137 |
topic.metadata.refresh.interval.ms
138 |
600 * 1000
139 |
The producer generally refreshes the topic metadata from brokers when
140 | there is a failure (partition missing, leader not available...). It will also
141 | poll regularly (default: every 10min so 600000ms). If you set this to a
142 | negative value, metadata will only get refreshed on failure. If you set this
143 | to zero, the metadata will get refreshed after each message sent (not
144 | recommended). Important note: the refresh happen only AFTER the message is
145 | sent, so if the producer never sends a message the metadata is never
146 | refreshed
147 |
148 |
149 |
150 |
message.buffer.size
151 |
1024*1024
152 |
The size of each buffer that is used to store raw messages before they
153 | are sent. Since a full buffer is sent at once, don't make this too big.
154 |
155 |
156 |
157 |
num.buffers
158 |
2
159 |
The number of buffers to use. At any given time, there is up to one
160 | buffer being filled with new data, up to one buffer having its data sent to
161 | the broker, and any number of buffers waiting to be filled and/or sent.
162 |
163 | Essentially, the limit of the amount of data that can be queued at at any
164 | given time is message.buffer.size * num.buffers. Although, in reality, you
165 | won't get buffers to 100% full each time.
166 |
167 |
168 |
169 |
send.buffer.size
170 |
message.buffer.size + 200
171 |
Size of the byte buffer used to store the final (with headers and
172 | compression applied) data to be sent to the broker.
173 |
174 |
175 |
176 |
compression.codec
177 |
none
178 |
This parameter allows you to specify the compression codec for all data
179 | generated by this producer. Valid values are "none", "gzip" and "snappy".
180 |
181 |
182 |
183 |
gzip.compression.level
184 |
java.util.zip.Deflater.DEFAULT_COMPRESSION
185 |
If compression.codec is set to gzip, then this allows configuration of
186 | the compression level.
187 |
188 |
-1: default compression level
189 |
0: no compression
190 |
1-9: 1=fastest compression ... 9=best compression
191 |
192 |
193 |
194 |
195 |
196 |
queue.enqueue.timeout.ms
197 |
-1
198 |
The amount of time to block before dropping messages when all buffers are
199 | full. If set to 0 events will be enqueued immediately or dropped if the queue
200 | is full (the producer send call will never block). If set to -1 the producer
201 | will block indefinitely and never willingly drop a send.
202 |
203 |
204 |
205 |
206 | Consumer Configuration
207 | ----------------------
208 | Configuration is done via properties. Many of these are the same as the
209 | standard Java client.
210 |
211 |
212 |
213 |
property
214 |
default
215 |
description
216 |
217 |
218 |
219 |
metadata.broker.list
220 |
221 |
(required) A list of seed brokers to connect to in order to get
222 | information about the Kafka broker cluster.
223 |
224 |
225 |
226 |
fetch.message.max.bytes
227 |
1024 * 1024
228 |
The number of byes of messages to attempt to fetch for each
229 | topic-partition in each fetch request. These bytes will be read into memory
230 | for each partition, so this helps control the memory used by the consumer.
231 | The fetch request size must be at least as large as the maximum message size
232 | the server allows or else it is possible for the producer to send messages
233 | larger than the consumer can fetch.
234 |
235 |
236 |
237 |
fetch.wait.max.ms
238 |
100
239 |
The maximum amount of time the server will block before answering the
240 | fetch request if there isn't sufficient data to immediately satisfy
241 | fetch.min.bytes
242 |
243 |
244 |
245 |
fetch.min.bytes
246 |
1
247 |
The minimum amount of data the server should return for a fetch request.
248 | If insufficient data is available the request will wait for that much data to
249 | accumulate before answering the request.
250 |
251 |
252 |
253 |
socket.receive.buffer.bytes
254 |
64 * 1024
255 |
The socket receive buffer for network requests
256 |
257 |
258 |
259 |
auto.offset.reset
260 |
largest
261 |
What to do when there is no initial offset in ZooKeeper or if an offset
262 | is out of range:
263 |
264 |
smallest : automatically reset the offset to the smallest offset
265 |
largest : automatically reset the offset to the largest offset
266 |
anything else: throw exception to the consumer
267 |
268 |
269 |
270 |
271 |
272 | ## Contributing
273 | To contribute code to this repository you must be [signed up as an official contributor](http://blackberry.github.com/howToContribute.html).
274 |
275 | ## Disclaimer
276 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/docs/change-log.txt:
--------------------------------------------------------------------------------
1 | Changes in Krackle 0.9.2
2 |
3 | - KRAC-22: Authentication Enhancements (AuthenticatedSocketBuilder is now a Singleton, deprecated SecurityConfiguration, and SaslPlainTextAuthenticator no longer requires the host name in the configuration mapping instead it pulls it off the socket (using either the FQDN if one was used or a reverse lookup)
4 |
5 | Changes in Krackle 0.9.1
6 |
7 | - KRAC-20: Release 0.9.1 as builds of 0.9.0 were made available that didn't include KRAC-16 and KRAC-17
8 |
9 | Changes in Krackle 0.9.0
10 |
11 | - KRAC-4: New package com.blackberry.bdp.krackle.auth
12 | - KRAC-5: Add Authentication package
13 | - KRAC-6: Update Producer, Consumer, and Metadata to Support Configurable Autentication Protocol
14 | - KRAC-7: Use Proper Principal
15 | - KRAC-8: Auto Renew TGT with SASL_PLAINTEXT
16 | - KRAC-9: Support All Topics
17 | - KRAC-10: Further Enhance MetaData to Fully Support KaBoom API
18 | - KRAC-11: Use the Login API within ConsumerConfiguration
19 | - KRAC-12: Fixes typo in consumer log
20 | - KRAC-14: Clean up/Organize and document klogger JMX stats
21 | - KRAC-15: Fix Unit Test that is causing all downstream project builds to fail
22 | - KRAC-16: Fix retry bug and improve socket timeout error handling
23 | - KRAC-17: Don't increment bytes sent for dropped messages
24 | - KRAC-18: Rename jaas.gssapi.login.context.name to kafka.client.jaas.login.context
25 |
26 | Changes in Krackle 0.8.3
27 |
28 | - KRAC-2: Krackle should deal with hosts with mutiple A records correctly
29 |
30 | Changes in Krackle 0.8.2
31 |
32 | - Reformat of all source files
33 |
34 | Changes in Krackle 0.8.1:
35 |
36 | - Bumps dependency version of snappy to 1.1.1.7
37 | - Fixes bug (transposed parameters) that would cause a corrupt message sent to Kafka after a partition rotation
38 |
39 | Changes in Krackle 0.8.0:
40 |
41 | - Supports bdp-common 0.0.6 which provides all logging and monitoring deps
42 | - Instruments log4j2 with io.dropwizard.metrics
43 | - Removes the force=true|false parameter in updateMetaDataAndConnection()
44 | - toSendBuffer is updated with partition after exceptions raised in sendMessage() incur a call to updateMetadataAndConnection()
45 |
46 | Changes in Krackle 0.7.10
47 |
48 | - Sets the keep alive flag on the consumer's broker socket to true
49 |
50 | Changes in Krackle 0.7.9
51 |
52 | - Adds the option for no rotation, sequential rotation or random rotation through partitions.
53 | - Changes how the transit time to kafka is calculated - now uses System.currentTimeMillis instead of nano second resolution.
54 |
55 | Changes in Krackle 0.7.8
56 |
57 | - Fixes partition rotation. Quick rotate is no longer a supported configuration item. Instead all topic meta data refreshes will rotate partitions and if quicker rotation is required than topic.metadata.refresh.interval.ms can be configured accordingly. Regular topic specific overrides are possible as well for topics that require faster rotaiton.
58 | - Fixes permission issues in the RPM creation with directories being created as 644 and not 755
59 |
60 | Changes in Krackle 0.7.7:
61 |
62 | - Adds a new BrokerUnavailableException exception
63 | - Adds a new configuration parameter: socket.timeout.seconds (defaults to 30s)
64 | - Adds a new attribute to Consumer: private Broker broker to identify the current broker
65 | - Adds a new method to Broker: getNiceDescription()
66 | - All attempts to create open socket connection to a broker now set a timeout
67 | - All SocketTimeoutException now log the error and call connectToBroker()
68 | - connectToBroker() now attempts a max number of connections before throwing a BrokerUnavailableException
69 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
16 |
17 |
19 | 4.0.0
20 |
21 | com.blackberry.bdp.krackle
22 | krackle
23 | 0.9.2
24 | jar
25 | krackle-0.9.2
26 | http://blackberry.com
27 |
28 |
29 | UTF-8
30 |
31 |
32 |
33 | scm:git:https://github.com/blackberry/Krackle.git
34 | scm:git:https://github.com/blackberry/Krackle.git
35 | https://github.com/blackberry/Krackle
36 |
37 |
38 |
39 |
40 |
41 | org.xerial.snappy
42 | snappy-java
43 | 1.1.1.7
44 |
45 |
46 |
47 | com.blackberry.bdp.common
48 | bdp-common
49 | 0.5.2
50 |
51 |
52 |
53 |
54 | junit
55 | junit
56 | 4.11
57 | test
58 |
59 |
60 | org.apache.kafka
61 | kafka_2.10
62 | 0.9.0.0
63 | test
64 |
65 |
66 | org.slf4j
67 | slf4j-simple
68 |
69 |
70 | javax.jms
71 | jms
72 |
73 |
74 | com.sun.jmx
75 | jmxri
76 |
77 |
78 | com.sun.jdmk
79 | jmxtools
80 |
81 |
82 |
83 |
84 | commons-io
85 | commons-io
86 | 2.4
87 | test
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 | org.apache.maven.plugins
96 | maven-compiler-plugin
97 | 3.0
98 |
99 | 1.7
100 | 1.7
101 |
102 |
103 |
104 |
105 | org.apache.maven.plugins
106 | maven-javadoc-plugin
107 | 2.9.1
108 |
109 |
110 | attach-javadocs
111 |
112 | jar
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 | maven-dependency-plugin
121 |
122 |
123 | install
124 |
125 | copy-dependencies
126 |
127 |
128 | ${project.build.directory}/lib
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 | org.codehaus.mojo
137 | buildnumber-maven-plugin
138 | 1.3
139 |
140 |
141 | validate
142 |
143 | create
144 |
145 |
146 |
147 |
148 | false
149 | false
150 |
151 |
152 |
153 |
154 |
155 | org.apache.maven.plugins
156 | maven-jar-plugin
157 | 2.5
158 |
159 |
160 |
161 | true
162 |
163 |
164 | ${buildNumber}
165 | ${scmBranch}
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
--------------------------------------------------------------------------------
/resources/kafka.appender.log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.logger.kafka=INFO, KCONSOLE
2 | log4j.additivity.kafka=false
3 |
4 | log4j.logger.com.blackberry.log4j.KafkaAppender=INFO, KCONSOLE
5 | log4j.additivity.com.blackberry.log4j.KafkaAppender=false
6 |
7 | log4j.logger.org.apache.zookeeper=INFO, KCONSOLE
8 | log4j.additivity.org.apache.zookeeper=false
9 |
10 | log4j.logger.org.I0Itec=INFO, KCONSOLE
11 | log4j.additivity.org.I0Itec=false
12 |
13 | log4j.logger.com.blackberry.logdriver.test=INFO, KCONSOLE
14 | log4j.additivity.com.blackberry.logdriver.test=false
15 |
16 | log4j.appender.KCONSOLE=org.apache.log4j.ConsoleAppender
17 | log4j.appender.KCONSOLE.layout=org.apache.log4j.PatternLayout
18 | log4j.appender.KCONSOLE.layout.ConversionPattern=%d{yyyy-MM-dd'T'HH:mm:ss'.'SSSZ} %p %c: %m%n
19 |
20 |
--------------------------------------------------------------------------------
/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Sample configuration for logging to Kafka.
2 |
3 | # Add the KAFKA logger to your usual list.
4 | log4j.rootLogger=DEBUG, KAFKA
5 |
6 | # This is the class for the custom appender.
7 | log4j.appender.KAFKA=com.blackberry.log4j.KafkaAppender
8 |
9 | # The pattern here should not include the timestamp. The KafkaAppender will
10 | # take care of putting that on the front in the correct format.
11 | log4j.appender.KAFKA.layout=org.apache.log4j.EnhancedPatternLayout
12 | log4j.appender.KAFKA.layout.ConversionPattern=%p %c: %m%n
13 |
14 | # These define which topic to send logs to, and where to send them.
15 | # These must be configured correctly for anything to work
16 | log4j.appender.KAFKA.topic=myservice
17 | log4j.appender.KAFKA.metadataBrokerList=localhost:9876
18 |
19 | # Your hostname will probably be guessed correctly, but you can set it
20 | # explicitly if you want to
21 | #log4j.appender.KAFKA.hostname=myhost123.mydomain
22 |
23 | # Any producer config can be set here, by convering the property name to
24 | # camel case.
25 | # List is here: http://kafka.apache.org/documentation.html#producerconfigs
26 |
27 | # To use synchronous messaging, use this setting. This will impact
28 | # performance greatly, and so is not recommended.
29 | #log4j.appender.KAFKA.producerType=sync
30 |
31 | # You can adjust the reliability of messaging here.
32 | # 0 : Fire and forget. High performance, low reliability
33 | # 1 : (default) Wait for logs to be written to one node.
34 | # -1 : Wait for logs to be written to 3 nodes. Low performance, high reliability.
35 | #log4j.appender.KAFKA.requestRequiredAcks=1
36 |
37 | # Compression codec.
38 | # none : no compression. Don't use this.
39 | # snappy : (default) Around 70% compression with low CPU overhead.
40 | # gzip : 85-90% compression with high CPU overhead.
41 | #log4j.appender.KAFKA.compressCodec=snappy
42 |
43 | # Settings to control batching for asynchronous logging.
44 | # Details on settings are here:
45 | # http://kafka.apache.org/documentation.html#producerconfigs
46 | #log4j.appender.KAFKA.queueBufferingMaxMs=10000
47 | #log4j.appender.KAFKA.queueBufferingMaxMessages=10000
48 | #log4j.appender.KAFKA.queueEnqueueTimeoutMs=0
49 | #log4j.appender.KAFKA.batchNumMessages=1000
50 |
51 |
52 | # Console logging of metrics for debugging and benchmarking
53 | #log4j.appender.KAFKA.metricsToConsole=true
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/Constants.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle;
17 |
18 | /**
19 | * Constants that are used throughout the client.
20 | */
21 | public class Constants {
22 |
23 | public static final byte MAGIC_BYTE = 0;
24 | public static final short API_VERSION = 0;
25 |
26 | public static final short APIKEY_PRODUCE = 0;
27 | public static final short APIKEY_FETCH_REQUEST = 1;
28 | public static final short APIKEY_OFFSET_REQUEST = 2;
29 | public static final short APIKEY_METADATA = 3;
30 |
31 | public static final byte COMPRESSION_MASK = 0x03;
32 | public static final byte NO_COMPRESSION = 0;
33 | public static final byte GZIP = 1; // gzip compression
34 | public static final byte SNAPPY = 2; // snappy compression
35 |
36 | public static final long EARLIEST_OFFSET = -2L;
37 | public static final long LATEST_OFFSET = -1L;
38 |
39 | }
40 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/KafkaError.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle;
17 |
18 | /**
19 | * Enum of possible errors and error codes that could be returned by the broker.
20 | */
21 | public enum KafkaError {
22 |
23 | NoError(
24 | (short) 0,
25 | "No error--it worked!"
26 | ),
27 | Unknown(
28 | (short) -1,
29 | "An unexpected server error"
30 | ),
31 | OffsetOutOfRange(
32 | (short) 1,
33 | "The requested offset is outside the range of offsets maintained by "
34 | + "the server for the given topic/partition."
35 | ),
36 | InvalidMessage(
37 | (short) 2,
38 | "This indicates that a message contents does not match its CRC"
39 | ),
40 | UnknownTopicOrPartition(
41 | (short) 3,
42 | "This request is for a topic or partition that does not exist on this broker."
43 | ),
44 | InvalidMessageSize(
45 | (short) 4,
46 | "The message has a negative size"
47 | ),
48 | LeaderNotAvailable(
49 | (short) 5,
50 | "This error is thrown if we are in the middle of a leadership election "
51 | + "and there is currently no leader for this partition and hence it is "
52 | + "unavailable for writes."
53 | ),
54 | NotLeaderForPartition(
55 | (short) 6,
56 | "This error is thrown if the client attempts to send messages to a "
57 | + "replica that is not the leader for some partition. It indicates that "
58 | + "the clients metadata is out of date."
59 | ),
60 | RequestTimedOut(
61 | (short) 7,
62 | "This error is thrown if the request exceeds the user-specified time limit in the request."
63 | ),
64 | BrokerNotAvailable(
65 | (short) 8,
66 | "This is not a client facing error and is used only internally by "
67 | + "intra-cluster broker communication."
68 | ),
69 | Unused(
70 | (short) 9,
71 | "Unused"
72 | ),
73 | MessageSizeTooLarge(
74 | (short) 10,
75 | "The server has a configurable maximum message size to avoid unbounded "
76 | + "memory allocation. This error is thrown if the client attempt to "
77 | + "produce a message larger than this maximum."
78 | ),
79 | StaleControllerEpochCode(
80 | (short) 11,
81 | "Internal error code for broker-to-broker communication."
82 | ),
83 | OffsetMetadataTooLargeCode(
84 | (short) 12,
85 | "If you specify a string larger than configured maximum for offset metadata"
86 | ),
87 | OffsetsLoadInProgressCode(
88 | (short) 14,
89 | "The broker returns this error code for an offset fetch request if it "
90 | + "is still loading offsets (after a leader change for that offsets "
91 | + "topic partition"
92 | ),
93 | ConsumerCoordinatorNotAvailableCode(
94 | (short) 15,
95 | "The broker returns this error code for consumer metadata requests or "
96 | + "offset commit requests if the offsets topic has not yet been created."
97 | ),
98 | NotCoordinatorForConsumerCode(
99 | (short) 16,
100 | "The broker returns this error code if it receives an offset fetch or "
101 | + "commit request for a consumer group that it is not a coordinator for."
102 | );
103 |
104 | private final short code;
105 | private final String message;
106 |
107 | private KafkaError(short code, String message) {
108 | this.code = code;
109 | this.message = message;
110 | }
111 |
112 | /**
113 | * Returns the numerical error code for the error.
114 | *
115 | * @return the error code.
116 | */
117 | public short getCode() {
118 | return code;
119 | }
120 |
121 | /**
122 | * Returns the message associated with the error.
123 | *
124 | * @return the error message.
125 | */
126 | public String getMessage() {
127 | return message;
128 | }
129 |
130 | /**
131 | * Gets the message for a given error code.
132 | *
133 | * @param errorCode
134 | * a numerical error code
135 | * @return the error message associated with the error code
136 | */
137 | public static String getMessage(short errorCode) {
138 | for (KafkaError e : KafkaError.values()) {
139 | if (e.getCode() == errorCode) {
140 | return e.getMessage();
141 | }
142 | }
143 | return null;
144 | }
145 |
146 | }
147 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/Time.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2015 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | /**
18 | * This class was copied from the Apache ZooKeeper project:
19 | *
20 | * git@github.com:apache/zookeeper.git
21 | * 92707a6a84a7965df2d7d8ead0acb721b6e62878
22 | *
23 | * Adapted as required to work with Krackle and it's implementation
24 | * of AuthenticatedSocketBuilder where needed. The following
25 | * major changes were performed:
26 | *
27 | * 1) Package name was changed
28 | * 2) Missing JavaDoc added
29 | *
30 | */
31 |
32 | package com.blackberry.bdp.krackle;
33 |
34 | import java.util.Date;
35 |
36 | public class Time {
37 | /**
38 | * Returns time in milliseconds as does System.currentTimeMillis(),
39 | * but uses elapsed time from an arbitrary epoch more like System.nanoTime().
40 | * The difference is that if somebody changes the system clock,
41 | * Time.currentElapsedTime will change but nanoTime won't. On the other hand,
42 | * all of ZK assumes that time is measured in milliseconds.
43 | * @return The time in milliseconds from some arbitrary point in time.
44 | */
45 | public static long currentElapsedTime() {
46 | return System.nanoTime() / 1000000;
47 | }
48 |
49 | /**
50 | * Explicitly returns system dependent current wall time.
51 | * @return Current time in msec.
52 | */
53 | public static long currentWallTime() {
54 | return System.currentTimeMillis();
55 | }
56 |
57 | /**
58 | * This is to convert the elapsedTime to a Date.
59 | * @param elapsedTime
60 | * @return A date object indicated by the elapsedTime.
61 | */
62 | public static Date elapsedTimeToDate(long elapsedTime) {
63 | long wallTime = currentWallTime() + elapsedTime - currentElapsedTime();
64 | return new Date(wallTime);
65 | }
66 | }
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/auth/AuthenticatedSocketSingleton.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2015 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.auth;
17 |
18 | import com.blackberry.bdp.krackle.exceptions.MissingConfigurationException;
19 | import com.blackberry.bdp.krackle.exceptions.InvalidConfigurationTypeException;
20 | import com.blackberry.bdp.krackle.exceptions.AuthenticationException;
21 | import com.blackberry.bdp.krackle.jaas.Login;
22 | import java.io.IOException;
23 | import java.net.InetAddress;
24 | import java.net.Socket;
25 | import java.util.HashMap;
26 | import java.util.Properties;
27 | import javax.security.auth.login.LoginException;
28 | import org.slf4j.Logger;
29 | import org.slf4j.LoggerFactory;
30 |
31 | public class AuthenticatedSocketSingleton {
32 |
33 | private static final Logger LOG = LoggerFactory.getLogger(AuthenticatedSocketSingleton.class);
34 |
35 | public static enum Protocol {
36 | PLAINTEXT,
37 | SSL,
38 | SASL_PLAINTEXT,
39 | SASL_SSL
40 | }
41 |
42 | private Protocol kafkaSecurityProtocol;
43 | private HashMap configuration;
44 |
45 | private AuthenticatedSocketSingleton() {
46 | kafkaSecurityProtocol = Protocol.PLAINTEXT;
47 | configuration = new HashMap<>();
48 | }
49 |
50 | private static class SingletonHolder {
51 | public static final AuthenticatedSocketSingleton INSTANCE = new AuthenticatedSocketSingleton();
52 | }
53 |
54 | public static AuthenticatedSocketSingleton getInstance() {
55 | return SingletonHolder.INSTANCE;
56 | }
57 |
58 |
59 | public void configure(Properties props)
60 | throws AuthenticationException, LoginException {
61 | configuration = new HashMap<>();
62 |
63 | kafkaSecurityProtocol = AuthenticatedSocketSingleton.Protocol.valueOf(
64 | props.getProperty("kafka.security.protocol", "PLAINTEXT").trim().toUpperCase());
65 |
66 | switch (kafkaSecurityProtocol) {
67 | case PLAINTEXT:
68 | break;
69 | case SASL_PLAINTEXT:
70 | Login login = new Login(
71 | props.getProperty("kafka.client.jaas.login.context", "kafkaClient").trim(),
72 | new Login.ClientCallbackHandler());
73 | login.startThreadIfNeeded();
74 | configuration.put("subject", login.getSubject());
75 | configuration.put("clientPrincipal", login.getPrincipal());
76 | configuration.put("servicePrincipal", props.getProperty(
77 | "kafka.security.protocol.service.principal", "kafka").trim());
78 | break;
79 | default:
80 | throw new AuthenticationException(String.format(
81 | "kafka.security.protocol=%s not recognized or supported",
82 | kafkaSecurityProtocol));
83 | }
84 | }
85 |
86 | public Socket build(InetAddress host, int port)
87 | throws AuthenticationException {
88 | try {
89 | Socket socket = new Socket(host, port);
90 | return build(socket);
91 | } catch (IOException | AuthenticationException e) {
92 | LOG.error("an {} exception occured {}: ",
93 | e.getClass().getCanonicalName(),
94 | e.getMessage(),
95 | e);
96 | throw new AuthenticationException(
97 | String.format("failed to a authenticate %s", e.getMessage()));
98 | }
99 | }
100 |
101 | public Socket build(String hostname, int port)
102 | throws AuthenticationException {
103 | try {
104 | Socket socket = new Socket(hostname, port);
105 | return build(socket);
106 | } catch (IOException | AuthenticationException e) {
107 | LOG.error("an {} exception occured {}: ",
108 | e.getClass().getCanonicalName(),
109 | e.getMessage(),
110 | e);
111 | throw new AuthenticationException(
112 | String.format("failed to a authenticate %s", e.getMessage()));
113 | }
114 | }
115 |
116 | public Socket build(Socket socket) throws AuthenticationException {
117 | try {
118 | switch (kafkaSecurityProtocol) {
119 | case PLAINTEXT:
120 | PlainTextAuthenticator pta = new PlainTextAuthenticator(socket);
121 | return pta.getSocket();
122 | case SASL_PLAINTEXT:
123 | SaslPlainTextAuthenticator spta = new SaslPlainTextAuthenticator(socket);
124 | spta.configure(configuration);
125 | spta.authenticate();
126 | LOG.info("sasl authenticated socket has been created");
127 | return socket;
128 | default:
129 | throw new AuthenticationException(
130 | String.format("%s not supported", kafkaSecurityProtocol));
131 | }
132 | } catch (IOException | MissingConfigurationException | InvalidConfigurationTypeException | AuthenticationException e) {
133 | LOG.error("failed to build socket to {}: ",
134 | socket.getInetAddress().getHostName(),
135 | e);
136 | throw new AuthenticationException(
137 | String.format("failed to a authenticate %s", e.getMessage()));
138 |
139 | }
140 | }
141 |
142 | /**
143 | * @return the kafkaSecurityProtocol
144 | */
145 | public AuthenticatedSocketSingleton.Protocol getKafkaSecurityProtocol() {
146 | return kafkaSecurityProtocol;
147 | }
148 |
149 | }
150 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/auth/Authenticator.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.auth;
17 |
18 | import com.blackberry.bdp.krackle.exceptions.MissingConfigurationException;
19 | import com.blackberry.bdp.krackle.exceptions.InvalidConfigurationTypeException;
20 | import java.io.IOException;
21 | import java.net.Socket;
22 | import java.util.Map;
23 |
24 | public interface Authenticator {
25 |
26 | void configure(Map configs) throws InvalidConfigurationTypeException, MissingConfigurationException, Exception;
27 |
28 | void authenticate() throws IOException;
29 |
30 | boolean configured();
31 |
32 | boolean complete();
33 |
34 | Socket getSocket();
35 |
36 | void close() throws IOException;
37 |
38 | }
39 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/auth/PlainTextAuthenticator.java:
--------------------------------------------------------------------------------
1 | /*
2 | * To change this license header, choose License Headers in Project Properties.
3 | * To change this template file, choose Tools | Templates
4 | * and open the template in the editor.
5 | */
6 |
7 | package com.blackberry.bdp.krackle.auth;
8 |
9 | import com.blackberry.bdp.krackle.exceptions.MissingConfigurationException;
10 | import com.blackberry.bdp.krackle.exceptions.InvalidConfigurationTypeException;
11 | import java.io.IOException;
12 | import java.net.Socket;
13 | import java.util.Map;
14 |
15 |
16 | public class PlainTextAuthenticator implements Authenticator {
17 |
18 | private final Socket socket;
19 |
20 | /**
21 | * Will create a socket based on host name and port
22 | * @param hostname
23 | * @param port
24 | * @throws IOException
25 | */
26 | public PlainTextAuthenticator(String hostname, int port)
27 | throws IOException {
28 | this(new Socket(hostname, port));
29 | }
30 |
31 | /**
32 | * Will use an existing socket
33 | * @param socket
34 | * @throws IOException
35 | */
36 | public PlainTextAuthenticator(Socket socket)
37 | throws IOException {
38 | this.socket = socket;
39 | }
40 |
41 | @Override
42 | public void configure(Map configs)
43 | throws InvalidConfigurationTypeException,
44 | MissingConfigurationException,
45 | Exception {
46 |
47 | }
48 |
49 | @Override
50 | public void authenticate() throws IOException {
51 |
52 | }
53 |
54 | @Override
55 | public boolean configured() {
56 | return true;
57 | }
58 |
59 | @Override
60 | public boolean complete() {
61 | return true;
62 | }
63 |
64 | @Override
65 | public Socket getSocket() {
66 | return socket;
67 | }
68 |
69 | @Override
70 | public void close() throws IOException {
71 |
72 | }
73 |
74 | }
75 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/auth/SaslPlainTextAuthenticator.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2015 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.auth;
17 |
18 | import com.blackberry.bdp.krackle.exceptions.MissingConfigurationException;
19 | import com.blackberry.bdp.krackle.exceptions.InvalidConfigurationTypeException;
20 | import java.io.DataInputStream;
21 | import java.io.DataOutputStream;
22 | import java.io.IOException;
23 | import java.net.Socket;
24 | import java.util.Arrays;
25 |
26 | import java.security.PrivilegedActionException;
27 | import java.security.PrivilegedExceptionAction;
28 | import java.util.Map;
29 |
30 | import javax.security.auth.Subject;
31 | import javax.security.auth.callback.Callback;
32 | import javax.security.auth.callback.CallbackHandler;
33 | import javax.security.auth.callback.NameCallback;
34 | import javax.security.auth.callback.PasswordCallback;
35 | import javax.security.auth.callback.UnsupportedCallbackException;
36 | import javax.security.sasl.AuthorizeCallback;
37 | import javax.security.sasl.RealmCallback;
38 | import javax.security.sasl.Sasl;
39 | import javax.security.sasl.SaslClient;
40 | import javax.security.sasl.SaslException;
41 |
42 | import org.slf4j.Logger;
43 | import org.slf4j.LoggerFactory;
44 |
45 | public class SaslPlainTextAuthenticator implements Authenticator{
46 |
47 | public enum SaslState {
48 |
49 | INITIAL, INTERMEDIATE, COMPLETE, FAILED
50 |
51 | }
52 |
53 | private static final Logger LOG = LoggerFactory.getLogger(SaslPlainTextAuthenticator.class);
54 |
55 | // Configurable Items
56 | private String hostname;
57 | private Subject subject;
58 | private String servicePrincipal;
59 |
60 | private SaslClient saslClient;
61 | private String clientPrincipal;
62 | private boolean configured;
63 |
64 | private final Socket socket;
65 | private final DataInputStream inStream;
66 | private final DataOutputStream outStream;
67 |
68 | private static final byte[] EMPTY = new byte[0];
69 |
70 | private SaslState saslState;
71 |
72 | /**
73 | * Will create a socket based on host name and port
74 | * @param hostname
75 | * @param port
76 | * @throws IOException
77 | * @throws SaslException
78 | */
79 | public SaslPlainTextAuthenticator(String hostname, int port)
80 | throws IOException, SaslException {
81 | this(new Socket(hostname, port));
82 | }
83 |
84 | /**
85 | * Will use an existing socket
86 | * @param socket
87 | * @throws IOException
88 | * @throws SaslException
89 | */
90 | public SaslPlainTextAuthenticator(Socket socket)
91 | throws IOException, SaslException {
92 | this.socket = socket;
93 | this.inStream = new DataInputStream(socket.getInputStream());
94 | this.outStream = new DataOutputStream(socket.getOutputStream());
95 | this.configured = false;
96 | saslState = SaslState.INITIAL;
97 | }
98 |
99 | @Override
100 | public void configure(Map configs) throws
101 | MissingConfigurationException, InvalidConfigurationTypeException, SaslException {
102 |
103 | // No longer required to be specified in config map allows config map to be shared
104 | hostname = socket.getInetAddress().getHostName();
105 |
106 | if (!configs.containsKey("subject")) {
107 | throw new MissingConfigurationException("`subject` not defined in configration");
108 | } else if (!configs.get("subject").getClass().equals(Subject.class)) {
109 | String type = Subject.class.getCanonicalName();
110 | throw new InvalidConfigurationTypeException("`subject` is not a " + type);
111 | } else {
112 | subject = (Subject) configs.get("subject");
113 | }
114 |
115 | if (!configs.containsKey("servicePrincipal")) {
116 | throw new MissingConfigurationException("`servicePrincipal` not defined in configration");
117 | } else if (!configs.get("servicePrincipal").getClass().equals(String.class)) {
118 | String type = String.class.getCanonicalName();
119 | throw new InvalidConfigurationTypeException("`servicePrincipal` is not a " + type);
120 | } else {
121 | servicePrincipal = (String) configs.get("servicePrincipal");
122 | }
123 |
124 | if (!configs.containsKey("clientPrincipal")) {
125 | throw new MissingConfigurationException("`clientPrincipal` not defined in configration");
126 | } else if (!configs.get("clientPrincipal").getClass().equals(String.class)) {
127 | String type = String.class.getCanonicalName();
128 | throw new InvalidConfigurationTypeException("`clientPrincipal` is not a " + type);
129 | } else {
130 | clientPrincipal = (String) configs.get("clientPrincipal");
131 | }
132 |
133 | this.saslClient = createSaslClient();
134 | configured = true;
135 | LOG.info("authenticator has been configured");
136 | }
137 |
138 | private SaslClient createSaslClient() throws SaslException {
139 | try {
140 | return Subject.doAs(subject, new PrivilegedExceptionAction() {
141 | @Override
142 | public SaslClient run() throws SaslException {
143 | String[] mechs = {"GSSAPI"};
144 | LOG.info("Creating SaslClient: client={}; service={}; serviceHostname={}; mechs={}",
145 | clientPrincipal, servicePrincipal, hostname, Arrays.toString(mechs));
146 | return Sasl.createSaslClient(mechs, clientPrincipal, servicePrincipal, hostname, null,
147 | new ClientCallbackHandler());
148 | }
149 |
150 | });
151 | } catch (PrivilegedActionException e) {
152 | throw new SaslException("Failed to create SaslClient", e.getCause());
153 | }
154 | }
155 |
156 | /**
157 | * Sends an empty message to the server to initiate the authentication process. It then evaluates server challenges
158 | * via `SaslClient.evaluateChallenge` and returns client responses until authentication succeeds or fails.
159 | *
160 | * The messages are sent and received as size delimited bytes that consists of a 4 byte network-ordered size N
161 | * followed by N bytes representing the opaque payload.
162 | * @throws java.io.IOException
163 | */
164 | @Override
165 | public void authenticate() throws IOException {
166 | if (!configured) {
167 | throw new IOException("authentication attempted on unconfigured authenticator");
168 | }
169 | while (!saslClient.isComplete()) {
170 | switch (saslState) {
171 | case INITIAL:
172 | LOG.debug("saslClient has initial response? {}",
173 | saslClient.hasInitialResponse());
174 | sendSaslToken(EMPTY);
175 | saslState = SaslState.INTERMEDIATE;
176 | LOG.debug("sent initial empty sasl token");
177 | break;
178 | case INTERMEDIATE:
179 | byte[] challenge;
180 | LOG.debug("in intermediate");
181 | int length = inStream.readInt();
182 | LOG.debug("in intermediate - read int, length of response is {}", length);
183 | challenge = new byte[length];
184 | inStream.readFully(challenge);
185 | LOG.debug("read response");
186 | sendSaslToken(challenge);
187 | if (saslClient.isComplete()) {
188 | LOG.debug("complete sasl state detected in intermediate");
189 | saslState = SaslState.COMPLETE;
190 | }
191 | break;
192 | case COMPLETE:
193 | break;
194 | case FAILED:
195 | throw new IOException("SASL handshake failed");
196 | }
197 | }
198 | LOG.debug("authentication complete");
199 | }
200 |
201 | private void sendSaslToken(byte[] serverToken) throws IOException {
202 | if (!saslClient.isComplete()) {
203 | try {
204 | byte[] saslToken = createSaslToken(serverToken);
205 | if (saslToken != null) {
206 | LOG.debug("sending sasl token of length: {}", saslToken.length);
207 | outStream.writeInt(saslToken.length);
208 | outStream.write(saslToken);
209 | outStream.flush();
210 | LOG.debug("sent sasl token of length: {}", saslToken.length);
211 | }
212 | } catch (IOException e) {
213 | saslState = SaslState.FAILED;
214 | throw e;
215 | }
216 | } else {
217 | LOG.warn("attempting to send sasl token to a completed sasl client");
218 | }
219 | }
220 |
221 | private byte[] createSaslToken(final byte[] saslToken) throws SaslException {
222 | if (saslToken == null) {
223 | throw new SaslException("Error authenticating with the Kafka Broker: received a nul saslToken.");
224 | }
225 | try {
226 | return Subject.doAs(subject, new PrivilegedExceptionAction() {
227 | @Override
228 | public byte[] run() throws SaslException {
229 | LOG.debug("evaluating challenge of length {} to {}",
230 | saslToken.length,
231 | socket.getInetAddress().getHostAddress());
232 | byte[] evaluation = saslClient.evaluateChallenge(saslToken);
233 | LOG.debug("evaluation length is {}", evaluation.length);
234 | return evaluation;
235 | }
236 | });
237 | } catch (PrivilegedActionException e) {
238 | String error = "An error: (" + e + ") occurred when evaluating SASL token received from the Kafka Broker.";
239 | // Try to provide hints to use about what went wrong so they can fix their configuration.
240 | // TODO: introspect about e: look for GSS information.
241 | final String unknownServerErrorText
242 | = "(Mechanism level: Server not found in Kerberos database (7) - UNKNOWN_SERVER)";
243 | if (e.toString().contains(unknownServerErrorText)) {
244 | error += " This may be caused by Java's being unable to resolve the Kafka Broker's"
245 | + " hostname correctly. You may want to try to adding"
246 | + " '-Dsun.net.spi.nameservice.provider.1=dns,sun' to your client's JVMFLAGS environment."
247 | + " Users must configure FQDN of kafka brokers when authenticating using SASL and"
248 | + " `socketChannel.socket().getInetAddress().getHostName()` must match the hostname in `principal/hostname@realm`";
249 | }
250 | error += " Kafka Client will go to AUTH_FAILED state.";
251 | //Unwrap the SaslException inside `PrivilegedActionException`
252 | throw new SaslException(error, e.getCause());
253 | }
254 | }
255 |
256 | private static class ClientCallbackHandler implements CallbackHandler {
257 |
258 | @Override
259 | public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
260 | for (Callback callback : callbacks) {
261 | LOG.info("callback {} received", callback.toString());
262 | if (callback instanceof NameCallback) {
263 | NameCallback nc = (NameCallback) callback;
264 | nc.setName(nc.getDefaultName());
265 | } else {
266 | if (callback instanceof PasswordCallback) {
267 | // Call `setPassword` once we support obtaining a password from the user and update message below
268 | throw new UnsupportedCallbackException(callback, "Could not login: the client is being asked for a password, but the Kafka"
269 | + " client code does not currently support obtaining a password from the user."
270 | + " Make sure -Djava.security.auth.login.config property passed to JVM and"
271 | + " the client is configured to use a ticket cache (using"
272 | + " the JAAS configuration setting 'useTicketCache=true)'. Make sure you are using"
273 | + " FQDN of the Kafka broker you are trying to connect to.");
274 | } else {
275 | if (callback instanceof RealmCallback) {
276 | RealmCallback rc = (RealmCallback) callback;
277 | rc.setText(rc.getDefaultText());
278 | } else {
279 | if (callback instanceof AuthorizeCallback) {
280 | AuthorizeCallback ac = (AuthorizeCallback) callback;
281 | String authId = ac.getAuthenticationID();
282 | String authzId = ac.getAuthorizationID();
283 | ac.setAuthorized(authId.equals(authzId));
284 | if (ac.isAuthorized()) {
285 | ac.setAuthorizedID(authzId);
286 | }
287 | } else {
288 | throw new UnsupportedCallbackException(callback, "Unrecognized SASL ClientCallback");
289 | }
290 | }
291 | }
292 | }
293 | }
294 | }
295 |
296 | }
297 |
298 | @Override
299 | public boolean complete() {
300 | return saslState == SaslState.COMPLETE;
301 | }
302 |
303 | @Override
304 | public void close() throws IOException {
305 | saslClient.dispose();
306 | }
307 |
308 | @Override
309 | public Socket getSocket() {
310 | return socket;
311 | }
312 |
313 | @Override
314 | public boolean configured() {
315 | return configured;
316 | }
317 |
318 | }
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/compression/Compressor.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.compression;
17 |
18 | import java.io.IOException;
19 |
20 | /**
21 | * Interface for compressors used to compress data for sending to the broker.
22 | */
23 | public interface Compressor {
24 |
25 | /**
26 | * Return the attribute value associated with this compression method.
27 | *
28 | * @return the attribute value associated with this compression method.
29 | */
30 | public byte getAttribute();
31 |
32 | /**
33 | * Compresses the data from the source array into the destination array.
34 | *
35 | * If the destination array is (potentially) not big enough to hold the
36 | * compressed data, then the compress method will not compress anything and
37 | * return -1.
38 | *
39 | * @param src
40 | * source byte array.
41 | * @param srcPos
42 | * start position of data in the source byte array.
43 | * @param length
44 | * length of data in the source byte array.
45 | * @param dest
46 | * destination byte array
47 | * @param destPos
48 | * position in destination byte array to write to
49 | * @return the number of bytes written to the destination array, or
50 | * -1 if there was not enough room to write the
51 | * compressed data.
52 | * @throws IOException
53 | */
54 | public int compress(byte[] src, int srcPos, int length, byte[] dest,
55 | int destPos) throws IOException;
56 |
57 | }
58 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/compression/Decompressor.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.compression;
17 |
18 | import java.io.IOException;
19 |
20 | /**
21 | * Interface for decompressors used to compress data for sending to the broker.
22 | */
23 | public interface Decompressor {
24 |
25 | /**
26 | * Return the attribute value associated with this compression method.
27 | *
28 | * @return the attribute value associated with this compression method.
29 | */
30 | public byte getAttribute();
31 |
32 | /**
33 | * Decompresses the data from the source array into the destination array.
34 | *
35 | * If the destination array is (potentially) not big enough to hold the
36 | * decompressed data, then the decompress method will not decompress anything
37 | * and return -1.
38 | *
39 | * @param src
40 | * source byte array.
41 | * @param srcPos
42 | * position in source byte array to start from.
43 | * @param length
44 | * length of data to decompress.
45 | * @param dest
46 | * destination byte array.
47 | * @param destPos
48 | * position in destination byte array to write to.
49 | * @param maxLength
50 | * max length of decompressed data.
51 | * @return the number of bytes written to the destination, or -1
52 | * if maxLength was not big enough to hold the data.
53 | * @throws IOException
54 | */
55 | public int decompress(byte[] src, int srcPos, int length, byte[] dest,
56 | int destPos, int maxLength) throws IOException;
57 |
58 | }
59 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/compression/GzipCompressor.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.compression;
17 |
18 | import java.io.IOException;
19 | import java.nio.ByteBuffer;
20 | import java.nio.ByteOrder;
21 | import java.util.zip.CRC32;
22 | import java.util.zip.Deflater;
23 |
24 | import com.blackberry.bdp.krackle.Constants;
25 |
26 | /**
27 | * Compressor implementation that used the GZIP algorithm.
28 | */
29 | public class GzipCompressor implements Compressor {
30 |
31 | private static final byte[] HEADER_BYTES = new byte[] //
32 | {(byte) 0x1f, (byte) 0x8b, // Magic number
33 | 8, // Deflate
34 | 0, // All flags zero
35 | 0, 0, 0, 0, // Set MTIME to zero, for ease of use
36 | 0, // No extra flags
37 | 3 // UNIX OS
38 | };
39 | private final Deflater deflater;
40 | private final CRC32 crc;
41 | private final ByteBuffer bb;
42 | private int compressedSize;
43 | private int maxOutputSize;
44 | private int deflaterOutputSize;
45 | private byte[] testBytes = new byte[1];
46 |
47 | /**
48 | * New instance with default compression level.
49 | */
50 | public GzipCompressor() {
51 | this(Deflater.DEFAULT_COMPRESSION);
52 | }
53 |
54 | /**
55 | * New instance with the given compression level
56 | *
57 | * @param compressionLevel
58 | * requested compression level. Valid values are -1
59 | * (default compression), 0 (no compression),
60 | * 1-9.
61 | */
62 | public GzipCompressor(int compressionLevel) {
63 | deflater = new Deflater(compressionLevel, true);
64 | crc = new CRC32();
65 | bb = ByteBuffer.allocate(8);
66 | bb.order(ByteOrder.LITTLE_ENDIAN);
67 | }
68 |
69 | @Override
70 | public byte getAttribute() {
71 | return Constants.GZIP;
72 | }
73 |
74 | @Override
75 | public int compress(byte[] src, int srcPos, int length, byte[] dest,
76 | int destPos) throws IOException {
77 | System.arraycopy(HEADER_BYTES, 0, dest, destPos, 10);
78 | compressedSize = 10;
79 |
80 | deflater.reset();
81 | deflater.setInput(src, srcPos, length);
82 | deflater.finish();
83 |
84 | // The output can't exceed the bytes we have to work with, less 10 bytes for
85 | // headers and 8 bytes for footers.
86 | maxOutputSize = dest.length - destPos - 10 - 8;
87 | deflaterOutputSize = deflater.deflate(dest, destPos + compressedSize,
88 | maxOutputSize);
89 | if (deflaterOutputSize == maxOutputSize) {
90 | // We just filled the output buffer! Either we have more to decompress, or
91 | // we don't. If we do, then that's an error. If we don't then that's fine.
92 | // So let's check.
93 | if (deflater.deflate(testBytes, 0, 1) == 1) {
94 | // We couldn't fit everything in the output buffer.
95 | return -1;
96 | }
97 | }
98 | compressedSize += deflaterOutputSize;
99 |
100 | crc.reset();
101 | crc.update(src, srcPos, length);
102 | bb.clear();
103 | bb.putInt((int) crc.getValue());
104 | bb.putInt(length);
105 | bb.rewind();
106 | bb.get(dest, destPos + compressedSize, 8);
107 |
108 | compressedSize += 8;
109 |
110 | return compressedSize;
111 | }
112 |
113 | }
114 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/compression/GzipDecompressor.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.compression;
17 |
18 | import java.io.IOException;
19 | import java.util.zip.DataFormatException;
20 | import java.util.zip.Inflater;
21 |
22 | import com.blackberry.bdp.krackle.Constants;
23 |
24 | /**
25 | * Decompressor implementation that used the GZIP algorithm.
26 | */
27 | public class GzipDecompressor implements Decompressor {
28 |
29 | // various fields, as named in rfc 1952
30 |
31 | private static final byte ID1 = (byte) 0x1f;
32 | private static final byte ID2 = (byte) 0x8b;
33 | private static final byte CM = 8; // Compression Method = Deflate
34 |
35 | // masks for various flags
36 | // FTEXT flag is ignored, but we keep it here for completeness
37 | @SuppressWarnings("unused")
38 | private static final byte FLG_FTEXT = 0x01;
39 | private static final byte FLG_FHCRC = 0x02;
40 | private static final byte FLG_FEXTRA = 0x04;
41 | private static final byte FLG_FNAME = 0x08;
42 | private static final byte FLG_COMMENT = 0x10;
43 |
44 | private byte flags;
45 | private boolean fhcrc;
46 | private boolean fextra;
47 | private boolean fname;
48 | private boolean comment;
49 |
50 | private Inflater inflater;
51 | private int pos;
52 | private short extraLength;
53 | private int decompressedLength;
54 |
55 | /**
56 | * Constructor.
57 | */
58 | public GzipDecompressor() {
59 | inflater = new Inflater(true);
60 | }
61 |
62 | @Override
63 | public byte getAttribute() {
64 | return Constants.GZIP;
65 | }
66 |
67 | @Override
68 | public int decompress(byte[] src, int srcPos, int length, byte[] dest,
69 | int destPos, int maxLength) throws IOException {
70 | pos = srcPos;
71 |
72 | // Check the magic number and compression method
73 | if (src[pos] != ID1 || src[pos + 1] != ID2) {
74 | throw new IOException("Wrong gzip magic number.");
75 | }
76 | if (src[pos + 2] != CM) {
77 | throw new IOException("Unrecognized compession method.");
78 | }
79 | pos += 3;
80 |
81 | // read flags
82 | flags = src[pos];
83 | // ftext = (FLG_FTEXT == (FLG_FTEXT & flags));
84 | fhcrc = (FLG_FHCRC == (FLG_FHCRC & flags));
85 | fextra = (FLG_FEXTRA == (FLG_FEXTRA & flags));
86 | fname = (FLG_FNAME == (FLG_FNAME & flags));
87 | comment = (FLG_COMMENT == (FLG_COMMENT & flags));
88 | pos++;
89 |
90 | // ignore the timestamp. 4 bytes
91 | // ignore extra flags. 1 byte
92 | // ignore OS. 1 byte
93 | pos += 6;
94 |
95 | if (fextra) {
96 | // skip it
97 | extraLength = readShort(src, pos);
98 | pos += 2 + extraLength;
99 | }
100 |
101 | if (fname) {
102 | // skip it
103 | while (src[pos] != 0x00) {
104 | pos++;
105 | }
106 | pos++;
107 | }
108 |
109 | if (comment) {
110 | // skip it
111 | while (src[pos] != 0x00) {
112 | pos++;
113 | }
114 | pos++;
115 | }
116 |
117 | if (fhcrc) {
118 | // skip it
119 | pos += 2;
120 | }
121 |
122 | inflater.reset();
123 | inflater.setInput(src, pos, length - (pos - srcPos) - 8);
124 | try {
125 | decompressedLength = inflater.inflate(dest, destPos, maxLength);
126 |
127 | // Sometimes we get truncated input. So we may not be 'finished'
128 | // inflating, but we still want to return success. So only fail if we have
129 | // the buffer full, and are not finished.
130 | if (inflater.finished() == false && decompressedLength == maxLength) {
131 | // There was not enough room to write the output
132 |
133 | return -1;
134 | }
135 | } catch (DataFormatException e) {
136 | throw new IOException("Error decompressing data.", e);
137 | }
138 |
139 | return decompressedLength;
140 | }
141 |
142 | private short readShort(byte[] src, int pos) {
143 | // little endian!
144 | return (short) ((src[pos] & 0xFF) | (src[pos + 1] & 0xFF) << 8);
145 | }
146 |
147 | }
148 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/compression/SnappyCompressor.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.compression;
17 |
18 | import java.io.IOException;
19 |
20 | import org.xerial.snappy.Snappy;
21 |
22 | import com.blackberry.bdp.krackle.Constants;
23 |
24 | /**
25 | * Compressor implementation that used the Snappy algorithm.
26 | */
27 | public class SnappyCompressor implements Compressor {
28 |
29 | private final static byte[] header = new byte[]{ //
30 | -126, 'S', 'N', 'A', 'P', 'P', 'Y', 0, // Magic number
31 | 0, 0, 0, 1, // version
32 | 0, 0, 0, 1 // min compatable version
33 | };
34 | private final static int headerLength = header.length;
35 |
36 | private int compressedLength;
37 | private int maxCompressedSize;
38 |
39 | @Override
40 | public byte getAttribute() {
41 | return Constants.SNAPPY;
42 | }
43 |
44 | @Override
45 | public int compress(byte[] src, int srcPos, int length, byte[] dest,
46 | int destPos) throws IOException {
47 | System.arraycopy(header, 0, dest, destPos, headerLength);
48 |
49 | // Compressed size cannot be greater than what we have available
50 | maxCompressedSize = dest.length - destPos - headerLength - 4;
51 | if (Snappy.maxCompressedLength(length) > maxCompressedSize) {
52 | return -1;
53 | }
54 |
55 | compressedLength = Snappy.compress(src, srcPos, length, dest, destPos
56 | + headerLength + 4);
57 | writeInt(compressedLength, dest, destPos + headerLength);
58 |
59 | return headerLength + 4 + compressedLength;
60 | }
61 |
62 | private void writeInt(int i, byte[] dest, int pos) {
63 | dest[pos] = (byte) (i >> 24);
64 | dest[pos + 1] = (byte) (i >> 16);
65 | dest[pos + 2] = (byte) (i >> 8);
66 | dest[pos + 3] = (byte) i;
67 | }
68 |
69 | }
70 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/compression/SnappyDecompressor.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.compression;
17 |
18 | import java.io.IOException;
19 |
20 | import org.xerial.snappy.Snappy;
21 |
22 | import com.blackberry.bdp.krackle.Constants;
23 |
24 | /**
25 | * Decompressor implementation that used the Snappy algorithm.
26 | */
27 | public class SnappyDecompressor implements Decompressor {
28 |
29 | private static final byte[] MAGIC_NUMBER = new byte[]{ //
30 | -126, 'S', 'N', 'A', 'P', 'P', 'Y', 0};
31 |
32 | private byte[] src;
33 | private int pos;
34 |
35 | private int blockLength;
36 | private int decompressedLength;
37 |
38 | private int uncompressedBlockLength;
39 |
40 | @Override
41 | public byte getAttribute() {
42 | return Constants.SNAPPY;
43 | }
44 |
45 | @Override
46 | public int decompress(byte[] src, int srcPos, int length, byte[] dest,
47 | int destPos, int maxLength) throws IOException {
48 | this.src = src;
49 | decompressedLength = 0;
50 |
51 | // Check for magic number
52 | if (src[srcPos] == MAGIC_NUMBER[0] //
53 | || src[srcPos + 1] == MAGIC_NUMBER[1] //
54 | || src[srcPos + 2] == MAGIC_NUMBER[2] //
55 | || src[srcPos + 3] == MAGIC_NUMBER[3] //
56 | || src[srcPos + 4] == MAGIC_NUMBER[4] //
57 | || src[srcPos + 5] == MAGIC_NUMBER[5] //
58 | || src[srcPos + 6] == MAGIC_NUMBER[6] //
59 | || src[srcPos + 7] == MAGIC_NUMBER[7]) {
60 |
61 | // advance past the magic number
62 | // assume the version (4 bytes), min compatable version (4 bytes) are fine
63 | pos = srcPos + 8 + 8;
64 |
65 | // TODO: limit the decompressed length
66 | while (pos < srcPos + length) {
67 | blockLength = readInt();
68 |
69 | // Check to see if this will exceed maxLength
70 | uncompressedBlockLength = Snappy.uncompressedLength(src, pos,
71 | blockLength);
72 | if (decompressedLength + uncompressedBlockLength > maxLength) {
73 | return -1;
74 | }
75 |
76 | decompressedLength += Snappy.uncompress(src, pos, blockLength, dest,
77 | destPos + decompressedLength);
78 | pos += blockLength;
79 | }
80 |
81 | return decompressedLength;
82 | } else {
83 | // Assume it's just straight compressed
84 | return Snappy.uncompress(src, pos, blockLength, dest, destPos);
85 | }
86 | }
87 |
88 | private int readInt() {
89 | pos += 4;
90 | return src[pos - 4] & 0xFF << 24 //
91 | | (src[pos - 3] & 0xFF) << 16 //
92 | | (src[pos - 2] & 0xFF) << 8 //
93 | | (src[pos - 1] & 0xFF);
94 | }
95 |
96 | }
97 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/consumer/BrokerUnavailableException.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.consumer;
17 |
18 | /**
19 | *
20 | * @author dariens
21 | */
22 | public class BrokerUnavailableException extends Exception {
23 |
24 | public BrokerUnavailableException(String error, Exception e) {
25 | super(error, e);
26 | }
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/consumer/Consumer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.consumer;
17 |
18 | import com.blackberry.bdp.common.jmx.MetricRegistrySingleton;
19 | import java.io.IOException;
20 | import java.io.InputStream;
21 | import java.io.OutputStream;
22 | import java.net.Socket;
23 | import java.nio.ByteBuffer;
24 | import java.nio.charset.Charset;
25 |
26 | import org.slf4j.Logger;
27 | import org.slf4j.LoggerFactory;
28 |
29 | import com.blackberry.bdp.krackle.Constants;
30 | import com.blackberry.bdp.krackle.KafkaError;
31 | import com.blackberry.bdp.krackle.auth.AuthenticatedSocketSingleton;
32 | import com.blackberry.bdp.krackle.meta.Broker;
33 | import com.blackberry.bdp.krackle.meta.MetaData;
34 | import com.codahale.metrics.Meter;
35 | import com.codahale.metrics.MetricRegistry;
36 | import java.net.SocketTimeoutException;
37 |
38 | /**
39 | * An implementation of the Kafka 0.9 consumer.
40 | *
41 | * This class acts as a consumer of data from a cluster of Kafka brokers.
42 | * Each instance only reads data from a single partition of a single topic.
43 | * If you need to read more than that, then instantiate more instances.
44 | *
45 | * This class was designed to be very light weight. The standard Java
46 | * client creates a lot of objects, and therefore causes a lot of garbage
47 | * collection that leads to a major slowdown in performance. This client
48 | * creates no new objects during steady state running, and so avoids
49 | * all garbage collection overhead.
50 | */
51 | public class Consumer {
52 |
53 | private static final Logger LOG = LoggerFactory.getLogger(Consumer.class);
54 | private static final Charset UTF8 = Charset.forName("UTF8");
55 |
56 | private ConsumerConfiguration conf;
57 |
58 | private String clientId;
59 | private byte[] clientIdBytes;
60 | private short clientIdLength;
61 |
62 | private String topic;
63 | private byte[] topicBytes;
64 | private short topicLength;
65 | private Broker broker;
66 | private int partition;
67 |
68 | private final MessageSetReader messageSetReader = new MessageSetReader();
69 |
70 | private long offset;
71 | private long lastOffset;
72 | private long highWaterMark;
73 |
74 | private byte[] offsetRequestBytes;
75 | private ByteBuffer offsetRequestBuffer;
76 |
77 | private byte[] offsetResponseBytes;
78 | private ByteBuffer offsetResponseBuffer;
79 |
80 | private byte[] requestBytes;
81 | private ByteBuffer requestBuffer;
82 |
83 | private int fetchMessageMaxBytes;
84 | private byte[] responseBytes;
85 | private ByteBuffer responseBuffer;
86 |
87 | private int fetchWaitMaxMs;
88 | private int fetchMinBytes;
89 |
90 | private MetricRegistry metrics;
91 | private Meter mMessageRequests = null;
92 | private Meter mMessageRequestsTotal = null;
93 | private Meter mMessagesReturned = null;
94 | private Meter mMessagesReturnedTotal = null;
95 | private Meter mBytesReturned = null;
96 | private Meter mBytesReturnedTotal = null;
97 | private Meter mMessageRequestsNoData = null;
98 | private Meter mMessageRequestsNoDataTotal = null;
99 | private Meter mBrokerReadAttempts = null;
100 | private Meter mBrokerReadAttemptsTotal = null;
101 | private Meter mBrokerReadSuccess = null;
102 | private Meter mBrokerReadSuccessTotal = null;
103 | private Meter mBrokerReadFailure = null;
104 | private Meter mBrokerReadFailureTotal = null;
105 |
106 | private int bytesReturned = 0;
107 | private Socket brokerSocket = null;
108 | private InputStream brokerIn = null;
109 | private OutputStream brokerOut = null;
110 | private int correlationId = 0;
111 | private int bytesRead;
112 | private int responseLength;
113 | private int responseCorrelationId;
114 | private short errorCode;
115 | private int messageSetSize;
116 |
117 | /*
118 | * Create a new consumer that reads from a given consumer. It attempts to start at offset 0.
119 | *
120 | * @param conf ConsumerConfiguration for this consumer.
121 | * @param clientId clientId to be send with requests to Kafka.
122 | * @param topic topic to read from.
123 | * @param partition id of the partition to read from.
124 | */
125 | public Consumer(ConsumerConfiguration conf, String clientId, String topic, int partition) throws BrokerUnavailableException {
126 | this(conf, clientId, topic, partition, 0L);
127 | }
128 |
129 | /*
130 | * Create a new consumer that reads from a given consumer that attempts to start reading at the given offset.
131 | *
132 | * @param conf ConsumerConfiguration for this consumer.
133 | * @param clientId clientId to be send with requests to Kafka.
134 | * @param topic topic to read from.
135 | * @param partition id of the partition to read from.
136 | * @param offset the offset to start reading from.
137 | */
138 | public Consumer(ConsumerConfiguration conf, String clientId, String topic, int partition, long offset) throws BrokerUnavailableException {
139 | this(conf, clientId, topic, partition, offset, null);
140 | }
141 |
142 | /*
143 | * Create a new consumer that reads from a given consumer that attempts to start reading at the given offset.
144 | *
145 | * Metrics are reported using the given instance of MetricRegistry instead the internal singleton instance.
146 | *
147 | * @param conf ConsumerConfiguration for this consumer.
148 | * @param clientId clientId to be send with requests to Kafka.
149 | * @param topic topic to read from.
150 | * @param partition id of the partition to read from.
151 | * @param offset the offset to start reading from.
152 | * @param metrics the instance of MetricRegistry to use for reporting metrics.
153 | */
154 | public Consumer(ConsumerConfiguration conf, String clientId, String topic, int partition, long offset, MetricRegistry metrics) throws BrokerUnavailableException {
155 | LOG.info("[{}-{}] creating consumer from offset {}", topic, partition, offset);
156 |
157 | this.conf = conf;
158 |
159 | if (metrics == null) {
160 | this.metrics = MetricRegistrySingleton.getInstance().getMetricsRegistry();
161 | MetricRegistrySingleton.getInstance().enableJmx();
162 | MetricRegistrySingleton.getInstance().enableConsole();
163 | } else {
164 | this.metrics = metrics;
165 | }
166 |
167 | this.clientId = clientId;
168 | clientIdBytes = clientId.getBytes(UTF8);
169 | clientIdLength = (short) clientIdBytes.length;
170 |
171 | this.topic = topic;
172 | topicBytes = topic.getBytes(UTF8);
173 | topicLength = (short) topicBytes.length;
174 |
175 | this.partition = partition;
176 | this.offset = offset;
177 |
178 | initializeMetrics();
179 |
180 | offsetRequestBytes = new byte[44 + clientIdLength + topicLength];
181 | offsetRequestBuffer = ByteBuffer.wrap(offsetRequestBytes);
182 |
183 | offsetResponseBytes = new byte[32 + topicLength];
184 | offsetResponseBuffer = ByteBuffer.wrap(offsetResponseBytes);
185 |
186 | requestBytes = new byte[52 + clientIdLength + topicLength];
187 | requestBuffer = ByteBuffer.wrap(requestBytes);
188 |
189 | fetchMessageMaxBytes = conf.getFetchMessageMaxBytes();
190 | responseBytes = new byte[fetchMessageMaxBytes + 32 + topicLength];
191 | responseBuffer = ByteBuffer.wrap(responseBytes);
192 |
193 | fetchWaitMaxMs = conf.getFetchWaitMaxMs();
194 | fetchMinBytes = conf.getFetchMinBytes();
195 |
196 | LOG.info("[{}-{}] connecting to broker", topic, partition);
197 | connectToBroker();
198 | }
199 |
200 | private void initializeMetrics() {
201 | String name = topic + "-" + partition;
202 |
203 | mMessageRequests = this.metrics.meter("krackle:consumer:partitions:" + name + ":message requests");
204 | mMessageRequestsTotal = this.metrics.meter("krackle:consumer:total:message requests");
205 | mMessagesReturned = this.metrics.meter("krackle:consumer:partitions:" + name + ":message returned");
206 | mMessagesReturnedTotal = this.metrics.meter("krackle:consumer:total:message returned");
207 | mBytesReturned = this.metrics.meter("krackle:consumer:partitions:" + name + ":bytes returned");
208 | mBytesReturnedTotal = this.metrics.meter("krackle:consumer:total:bytes returned");
209 | mMessageRequestsNoData = this.metrics.meter("krackle:consumer:partitions:" + name + ":no message returned");
210 | mMessageRequestsNoDataTotal = this.metrics.meter("krackle:consumer:total:no message returned");
211 | mBrokerReadAttempts = this.metrics.meter("krackle:consumer:partitions:" + name + ":broker consume attempts");
212 | mBrokerReadAttemptsTotal = this.metrics.meter("krackle:consumer:total:broker consume attempts");
213 | mBrokerReadSuccess = this.metrics.meter("krackle:consumer:partitions:" + name + ":broker consume success");
214 | mBrokerReadSuccessTotal = this.metrics.meter("krackle:consumer:total:broker consume success");
215 | mBrokerReadFailure = this.metrics.meter("krackle:consumer:partitions:" + name + ":broker consume failure");
216 | mBrokerReadFailureTotal = this.metrics.meter("krackle:consumer:total:broker consume failure");
217 | }
218 |
219 | /*
220 | * Read in a message from Kafka into the given byte array.
221 | *
222 | * If the size of the message exceeds maxLength, it will be truncated to fit.
223 | *
224 | * @param buffer the byte array to write into.
225 | * @param pos the position in the byte array to write to.
226 | * @param maxLength the max size of the message to write.
227 | * @return the number of bytes writen, or -1 if no data was returned.
228 | * @throws IOException
229 | */
230 | public int getMessage(byte[] buffer, int pos, int maxLength) throws IOException, BrokerUnavailableException {
231 | mMessageRequests.mark();
232 | mMessageRequestsTotal.mark();
233 |
234 | try {
235 | if (messageSetReader == null || messageSetReader.isReady() == false) {
236 | readFromBroker();
237 |
238 | if (messageSetReader == null || messageSetReader.isReady() == false) {
239 | mMessageRequestsNoData.mark();
240 | mMessageRequestsNoDataTotal.mark();
241 | return -1;
242 | }
243 | }
244 |
245 | bytesReturned = messageSetReader.getMessage(buffer, pos, maxLength);
246 |
247 | if (bytesReturned == -1) {
248 | mMessageRequestsNoData.mark();
249 | mMessageRequestsNoDataTotal.mark();
250 | return -1;
251 | }
252 |
253 | lastOffset = messageSetReader.getOffset();
254 | offset = messageSetReader.getNextOffset();
255 |
256 | //LOG.info("message received from messageSetReader latOffset {} offset {}" , lastOffset, offset);
257 | mMessagesReturned.mark();
258 | mMessagesReturnedTotal.mark();
259 | mBytesReturned.mark(bytesReturned);
260 | mBytesReturnedTotal.mark(bytesReturned);
261 |
262 | return bytesReturned;
263 | } catch (SocketTimeoutException e) {
264 | LOG.error("[{}-{}] socket timeout to {}: {}", topic, partition, broker.getNiceDescription(), e);
265 | connectToBroker();
266 | return -1;
267 | }
268 | }
269 |
270 | private void readFromBroker() throws IOException, BrokerUnavailableException {
271 | mBrokerReadAttempts.mark();
272 | mBrokerReadAttemptsTotal.mark();
273 |
274 | if (brokerSocket == null || brokerSocket.isClosed()) {
275 | LOG.info("[{}-{}] Connecting to broker", topic, partition);
276 | connectToBroker();
277 | }
278 |
279 | try {
280 | correlationId++;
281 |
282 | sendConsumeRequest(correlationId);
283 | receiveConsumeResponse(correlationId);
284 |
285 | mBrokerReadSuccess.mark();
286 | mBrokerReadSuccessTotal.mark();
287 | } catch (SocketTimeoutException e) {
288 | LOG.error("[{}-{}] socket timeout to {}", topic, partition, broker.getNiceDescription());
289 | connectToBroker();
290 | } catch (OffsetOutOfRangeException e) {
291 | mBrokerReadFailure.mark();
292 | mBrokerReadFailureTotal.mark();
293 |
294 | if (conf.getAutoOffsetReset().equals("smallest")) {
295 | LOG.warn("[{}-{}] offset {} out of range. Resetting to the earliest offset available {}", topic, partition, offset, getEarliestOffset());
296 | offset = getEarliestOffset();
297 | } else {
298 | if (conf.getAutoOffsetReset().equals("largest")) {
299 | LOG.warn("[{}-{}] Offset {} out of range. Resetting to the latest offset available {}", topic, partition, offset, getLatestOffset());
300 | offset = getLatestOffset();
301 | } else {
302 | LOG.error("[{}-{}] offset out of range and not configured to auto reset", topic, partition);
303 | throw e;
304 | }
305 | }
306 | } catch (Exception e) {
307 | LOG.error("[{}-{}] error getting data from broker: ", topic, partition, e);
308 |
309 | if (brokerSocket != null) {
310 | try {
311 | brokerSocket.close();
312 | } catch (IOException e1) {
313 | LOG.error("[{}-{}] error closing socket: ", topic, partition, e1);
314 | }
315 | }
316 |
317 | brokerSocket = null;
318 | mBrokerReadFailure.mark();
319 | mBrokerReadFailureTotal.mark();
320 | }
321 | }
322 |
323 | public long getEarliestOffset() throws BrokerUnavailableException {
324 | try {
325 | correlationId++;
326 | sendOffsetRequest(Constants.EARLIEST_OFFSET, correlationId);
327 | return getOffsetResponse(correlationId);
328 | } catch (SocketTimeoutException e) {
329 | LOG.error("[{}-{}] socket timeout to {}: {}", topic, partition, broker.getNiceDescription(), e);
330 | connectToBroker();
331 | } catch (IOException e) {
332 | LOG.error("[{}-{}] error getting earliest offset: ", topic, partition);
333 | }
334 | return 0L;
335 | }
336 |
337 | public long getLatestOffset() throws BrokerUnavailableException {
338 | try {
339 | correlationId++;
340 | sendOffsetRequest(Constants.LATEST_OFFSET, correlationId);
341 | return getOffsetResponse(correlationId);
342 | } catch (SocketTimeoutException e) {
343 | LOG.error("[{}-{}] socket timeout to {}: {}", topic, partition, broker.getNiceDescription(), e);
344 | connectToBroker();
345 | } catch (IOException e) {
346 | LOG.error("[{}-{}] error getting latest offset: ", topic, partition);
347 | }
348 | return Long.MAX_VALUE;
349 | }
350 |
351 | private void sendOffsetRequest(long time, int correlationId) throws IOException {
352 | LOG.debug("Sending request for offset. correlation id = {}, time = {}", correlationId, time);
353 |
354 | offsetRequestBuffer.clear();
355 |
356 | // skip 4 bytes for length
357 | offsetRequestBuffer.position(4);
358 |
359 | // API key
360 | offsetRequestBuffer.putShort(Constants.APIKEY_OFFSET_REQUEST);
361 |
362 | // API Version
363 | offsetRequestBuffer.putShort(Constants.API_VERSION);
364 |
365 | // Correlation Id
366 | offsetRequestBuffer.putInt(correlationId);
367 |
368 | // ClientId
369 | offsetRequestBuffer.putShort(clientIdLength);
370 | offsetRequestBuffer.put(clientIdBytes);
371 |
372 | // replica id is always -1
373 | offsetRequestBuffer.putInt(-1);
374 |
375 | // Only requesting for 1 topic
376 | offsetRequestBuffer.putInt(1);
377 |
378 | // Topic Name
379 | offsetRequestBuffer.putShort(topicLength);
380 | offsetRequestBuffer.put(topicBytes);
381 |
382 | // Only requesting for 1 partition
383 | offsetRequestBuffer.putInt(1);
384 |
385 | // Partition
386 | offsetRequestBuffer.putInt(partition);
387 |
388 | // Time for offset
389 | offsetRequestBuffer.putLong(time);
390 |
391 | // We only need one offset
392 | offsetRequestBuffer.putInt(1);
393 |
394 | // Add the length to the start
395 | offsetRequestBuffer.putInt(0, offsetRequestBuffer.position() - 4);
396 |
397 | brokerOut.write(offsetRequestBytes, 0, offsetRequestBuffer.position());
398 | }
399 |
400 | private long getOffsetResponse(int correlationId) throws IOException, BrokerUnavailableException {
401 | LOG.debug("[{}-{}] waiting for response. correlation id = {}", topic, partition, correlationId);
402 |
403 | try {
404 | // read the length of the response
405 | bytesRead = 0;
406 | while (bytesRead < 4) {
407 | bytesRead += brokerIn.read(offsetResponseBytes, bytesRead, 4 - bytesRead);
408 | }
409 |
410 | offsetResponseBuffer.clear();
411 | responseLength = offsetResponseBuffer.getInt();
412 |
413 | bytesRead = 0;
414 | while (bytesRead < responseLength) {
415 | bytesRead += brokerIn.read(offsetResponseBytes, bytesRead, responseLength - bytesRead);
416 | LOG.debug("[{}-{}] read {} bytes", topic, partition, bytesRead);
417 | }
418 |
419 | offsetResponseBuffer.clear();
420 |
421 | // Check correlation Id
422 | responseCorrelationId = offsetResponseBuffer.getInt();
423 |
424 | if (responseCorrelationId != correlationId) {
425 | LOG.error("[{}-{}] correlation id mismatch. Expected {}, got {}", topic, partition, correlationId, responseCorrelationId);
426 | throw new IOException("Correlation ID mismatch. Expected " + correlationId + ". Got " + responseCorrelationId + ".");
427 | }
428 |
429 | // We can skip a bunch of stuff here.
430 | // There is 1 topic (4 bytes), then the topic name (2 + topicLength
431 | // bytes), then the number of partitions (which is 1) (4 bytes),
432 | // then the partition id (4 bytes)
433 | offsetResponseBuffer.position(offsetResponseBuffer.position() + 4 + 2 + topicLength + 4 + 4);
434 |
435 | // Next is the error code.
436 | errorCode = offsetResponseBuffer.getShort();
437 |
438 | if (errorCode == KafkaError.OffsetOutOfRange.getCode()) {
439 | throw new OffsetOutOfRangeException();
440 | } else {
441 | if (errorCode != KafkaError.NoError.getCode()) {
442 | throw new IOException("Error from Kafka. (" + errorCode + ") " + KafkaError.getMessage(errorCode));
443 | }
444 | }
445 |
446 | // Finally, the offset. There is an array of one (skip 4 bytes)
447 | offsetResponseBuffer.position(offsetResponseBuffer.position() + 4);
448 | LOG.debug("Succeeded in request. correlation id = {}", correlationId);
449 |
450 | return offsetResponseBuffer.getLong();
451 |
452 | } catch (SocketTimeoutException e) {
453 | LOG.error("[{}-{}] socket timeout to {}: {}", topic, partition, broker.getNiceDescription(), e);
454 | connectToBroker();
455 | return -1;
456 | } finally {
457 | // Clean out any other data that is sitting on the socket to be read.
458 | // It's useless to us, but may through off future transactions if we
459 | // leave it there.
460 |
461 | bytesRead = 0;
462 | while (brokerIn.available() > 0) {
463 | bytesRead += brokerIn.read(offsetResponseBytes, bytesRead, offsetResponseBytes.length);
464 | }
465 | }
466 | }
467 |
468 | private void sendConsumeRequest(int correlationId) throws IOException {
469 | LOG.debug("[{}-{}] sending consume request. correlation id = {}", topic, partition, correlationId);
470 |
471 | requestBuffer.clear();
472 |
473 | // Skip 4 bytes for the request size
474 | requestBuffer.position(requestBuffer.position() + 4);
475 |
476 | // API key
477 | requestBuffer.putShort(Constants.APIKEY_FETCH_REQUEST);
478 |
479 | // API Version
480 | requestBuffer.putShort(Constants.API_VERSION);
481 |
482 | // Correlation Id
483 | requestBuffer.putInt(correlationId);
484 |
485 | // ClientId
486 | requestBuffer.putShort(clientIdLength);
487 | requestBuffer.put(clientIdBytes);
488 |
489 | // Replica ID is always -1
490 | requestBuffer.putInt(-1);
491 |
492 | // Max wait time
493 | requestBuffer.putInt(fetchWaitMaxMs);
494 |
495 | // Min bytes
496 | requestBuffer.putInt(fetchMinBytes);
497 |
498 | // Only requesting for 1 topic
499 | requestBuffer.putInt(1);
500 |
501 | // Topic Name
502 | requestBuffer.putShort(topicLength);
503 | requestBuffer.put(topicBytes);
504 |
505 | // Only requesting for 1 partition
506 | requestBuffer.putInt(1);
507 |
508 | // Partition
509 | requestBuffer.putInt(partition);
510 |
511 | // FetchOffset
512 | requestBuffer.putLong(offset);
513 |
514 | // MaxBytes
515 | requestBuffer.putInt(fetchMessageMaxBytes);
516 |
517 | /* Fill in missing data */
518 | // Full size
519 | requestBuffer.putInt(0, requestBuffer.position() - 4);
520 |
521 | // Send!
522 | brokerOut.write(requestBytes, 0, requestBuffer.position());
523 | }
524 |
525 | private void receiveConsumeResponse(int correlationId) throws IOException {
526 | LOG.debug("[{}-{}] waiting for response. correlation id = {}", topic, partition, correlationId);
527 |
528 | try {
529 | // read the length of the response
530 | bytesRead = 0;
531 | while (bytesRead < 4) {
532 | bytesRead += brokerIn.read(responseBytes, bytesRead, 4 - bytesRead);
533 | }
534 |
535 | responseBuffer.clear();
536 | responseLength = responseBuffer.getInt();
537 | bytesRead = 0;
538 |
539 | while (bytesRead < responseLength) {
540 | bytesRead += brokerIn.read(responseBytes, bytesRead, responseLength - bytesRead);
541 | }
542 |
543 | responseBuffer.clear();
544 |
545 | // Next is the corelation ID
546 | responseCorrelationId = responseBuffer.getInt();
547 |
548 | if (responseCorrelationId != correlationId) {
549 | LOG.error("[{}-{}] correlation id mismatch. Expected {}, got {}", topic, partition, correlationId, responseCorrelationId);
550 | throw new IOException("Correlation ID mismatch. Expected " + correlationId + ". Got " + responseCorrelationId + ".");
551 | }
552 |
553 | // We can skip a bunch of stuff here.
554 | // There is 1 topic (4 bytes), then the topic name (2 + topicLength
555 | // bytes), then the number of partitions (which is 1) (4 bytes),
556 | // then the partition id (4 bytes)
557 | responseBuffer.position(responseBuffer.position() + 4 + 2 + topicLength + 4 + 4);
558 |
559 | // Next is the error code.
560 | errorCode = responseBuffer.getShort();
561 |
562 | if (errorCode == KafkaError.OffsetOutOfRange.getCode()) {
563 | throw new OffsetOutOfRangeException();
564 | } else {
565 | if (errorCode != KafkaError.NoError.getCode()) {
566 | throw new IOException("Error from Kafka. (" + errorCode + ") " + KafkaError.getMessage(errorCode));
567 | }
568 | }
569 |
570 | // Next is the high watermark
571 | highWaterMark = responseBuffer.getLong();
572 |
573 | LOG.debug("[{}-{}] receiveConsumeRequest offset {} highWaterMark {}", topic, partition, offset, highWaterMark);
574 |
575 | // Message set size
576 | messageSetSize = responseBuffer.getInt();
577 |
578 | messageSetReader.init(responseBytes, responseBuffer.position(), messageSetSize);
579 |
580 | LOG.debug("[{}-{}] succeeded in request. correlation id = {}", topic, partition, correlationId);
581 |
582 | } finally {
583 | // Clean out any other data that is sitting on the socket to be read.
584 | // It's useless to us, but may through off future transactions if we
585 | // leave it there.
586 |
587 | bytesRead = 0;
588 |
589 | while (brokerIn.available() > 0) {
590 | bytesRead += brokerIn.read(responseBytes, bytesRead, responseBytes.length);
591 | }
592 | }
593 | }
594 |
595 | private void connectToBroker() throws BrokerUnavailableException {
596 | long backoff = 1000;
597 | long retries = 5;
598 | long attempt = 1;
599 | Boolean stopping = false;
600 |
601 | while (!stopping) {
602 | try {
603 | MetaData meta = MetaData.getMetaData(conf.getMetadataBrokerList(),
604 | topic,
605 | clientId);
606 | broker = meta.getTopic(topic).getPartition(partition).getLeader();
607 | LOG.info("[{}-{}] connecting to broker {}", topic, partition, broker.getNiceDescription());
608 |
609 | brokerSocket = AuthenticatedSocketSingleton.getInstance().build(broker.getHost(), broker.getPort());
610 | brokerSocket.setSoTimeout(conf.getSocketTimeoutMs());
611 | brokerSocket.setKeepAlive(true);
612 | brokerSocket.setReceiveBufferSize(conf.getSocketReceiveBufferBytes());
613 | brokerIn = brokerSocket.getInputStream();
614 | brokerOut = brokerSocket.getOutputStream();
615 |
616 | LOG.info("[{}-{}] successfully connected to broker {} and set a timeout of {}",
617 | topic, partition, broker.getNiceDescription(), conf.getSocketTimeoutMs());
618 |
619 | break;
620 | } catch (Exception e) {
621 | if (attempt < retries) {
622 | LOG.error("[{}-{}] error connecting to broker on attempt {}/{}, retrying in {} seconds... error message was: ",
623 | topic, partition, attempt, retries, (backoff / 1000), e);
624 | try {
625 | Thread.sleep(backoff);
626 | } catch (InterruptedException ie) {
627 | stopping = true;
628 | }
629 |
630 | backoff *= 2;
631 | attempt++;
632 | } else {
633 | throw new BrokerUnavailableException("Failed to connect to broker, no retries left--giving up", e);
634 | }
635 | }
636 | }
637 | }
638 |
639 | public long getLastOffset() {
640 | return lastOffset;
641 | }
642 |
643 | public long getNextOffset() {
644 | return offset;
645 | }
646 |
647 | public long getHighWaterMark() {
648 | return highWaterMark;
649 | }
650 |
651 | public void setNextOffset(long nextOffset) throws IOException, BrokerUnavailableException {
652 | LOG.info("[{}-{}] request to set the next offset to {} received", topic, partition, nextOffset);
653 |
654 | this.offset = nextOffset;
655 |
656 | correlationId++;
657 |
658 | try {
659 | sendConsumeRequest(correlationId);
660 | receiveConsumeResponse(correlationId);
661 | } catch (SocketTimeoutException e) {
662 | LOG.error("[{}-{}] socket timeout to {}", topic, partition, broker.getNiceDescription());
663 | connectToBroker();
664 | }
665 |
666 | LOG.info("[{}-{}] successfully set the next offset to {} via correlation ID {}", topic, partition, nextOffset, correlationId);
667 | }
668 |
669 | }
670 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/consumer/ConsumerConfiguration.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2015 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.consumer;
17 |
18 | import com.blackberry.bdp.krackle.auth.AuthenticatedSocketSingleton;
19 | import java.util.ArrayList;
20 | import java.util.List;
21 | import java.util.Properties;
22 |
23 | import org.slf4j.Logger;
24 | import org.slf4j.LoggerFactory;
25 |
26 | /**
27 | * Configuration for a consumer.
28 | *
29 | * Many of these properties are the same as those in the standard Java client, as documented at http://kafka.apache.org/documentation.html#consumerconfigs
30 | *
31 | * Valid properties are
32 | *
33 | *
34 | *
35 | *
property
36 | *
default
37 | *
description
38 | *
39 | *
40 | *
41 | *
metadata.broker.list
42 | *
43 | *
(required) A list of seed brokers to connect to in order to get information about the Kafka broker cluster.
44 | *
45 | *
46 | *
47 | *
fetch.message.max.bytes
48 | *
1024 * 1024
49 | *
The number of byes of messages to attempt to fetch for each topic-partition in each fetch request. These bytes will be read into memory for each partition, so this helps control the memory used by the consumer. The fetch request size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch.
50 | *
51 | *
52 | *
53 | *
fetch.wait.max.ms
54 | *
100
55 | *
The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy fetch.min.bytes
56 | *
57 | *
58 | *
59 | *
fetch.min.bytes
60 | *
1
61 | *
The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request.
62 | *
63 | *
64 | *
65 | *
socket.receive.buffer.bytes
66 | *
64 * 1024
67 | *
The socket receive buffer for network requests
68 | *
69 | *
70 | *
71 | *
auto.offset.reset
72 | *
largest
73 | *
What to do when there is no initial offset in ZooKeeper or if an offset is out of range:
74 | *
75 | *
smallest : automatically reset the offset to the smallest offset
76 | *
largest : automatically reset the offset to the largest offset
77 | *
anything else: throw exception to the consumer
78 | *
79 | *
80 | *
81 | *
82 | *
83 | */
84 | public class ConsumerConfiguration {
85 |
86 | private static final Logger LOG = LoggerFactory.getLogger(ConsumerConfiguration.class);
87 | private List metadataBrokerList;
88 | private int fetchMessageMaxBytes;
89 | private int fetchWaitMaxMs;
90 | private int fetchMinBytes;
91 | private int socketReceiveBufferBytes;
92 | private String autoOffsetReset;
93 | private int socketTimeoutMs;
94 |
95 | /**
96 | * Creates a new configuration from a given Properties object.
97 | *
98 | * @param props Properties to build configuration from.
99 | * @throws Exception
100 | */
101 | public ConsumerConfiguration(Properties props) throws Exception {
102 | LOG.info("Building configuration.");
103 |
104 | metadataBrokerList = new ArrayList<>();
105 | String metadataBrokerListString = props.getProperty("metadata.broker.list");
106 |
107 | if (metadataBrokerListString == null || metadataBrokerListString.isEmpty()) {
108 | throw new Exception("metadata.broker.list cannot be empty.");
109 | }
110 |
111 | for (String s : metadataBrokerListString.split(",")) {
112 | // This is not a good regex. Could make it better.
113 | if (s.matches("^[\\.a-zA-Z0-9-]*:\\d+$")) {
114 | metadataBrokerList.add(s);
115 | } else {
116 | throw new Exception(
117 | "metata.broker.list must contain a list of hosts and ports (localhost:123,192.168.1.1:456). Got "
118 | + metadataBrokerListString);
119 | }
120 | }
121 |
122 | LOG.info("metadata.broker.list = {}", metadataBrokerList);
123 |
124 | fetchMessageMaxBytes = Integer.parseInt(props.getProperty("fetch.message.max.bytes", "" + (1024 * 1024)));
125 |
126 | if (fetchMessageMaxBytes <= 0) {
127 | throw new Exception("fetch.message.max.bytes must be positive.");
128 | }
129 |
130 | LOG.info("fetch.message.max.bytes = {}", fetchMessageMaxBytes);
131 |
132 | fetchWaitMaxMs = Integer.parseInt(props.getProperty("fetch.wait.max.ms", "100"));
133 |
134 | if (fetchWaitMaxMs < 0) {
135 | throw new Exception("fetch.wait.max.ms cannot be negative.");
136 | }
137 |
138 | LOG.info("fetch.wait.max.ms = {}", fetchWaitMaxMs);
139 |
140 | fetchMinBytes = Integer.parseInt(props.getProperty("fetch.min.bytes", "1"));
141 |
142 | if (fetchMinBytes < 0) {
143 | throw new Exception("fetch.min.bytes cannot be negative.");
144 | }
145 |
146 | LOG.info("fetch.min.bytes = {}", fetchMinBytes);
147 |
148 | socketReceiveBufferBytes = Integer.parseInt(props.getProperty("socket.receive.buffer.bytes", "" + (64 * 1024)));
149 |
150 | if (socketReceiveBufferBytes < 0) {
151 | throw new Exception("socket.receive.buffer.bytes must be positive.");
152 | }
153 |
154 | LOG.info("socket.receive.buffer.bytes = {}", socketReceiveBufferBytes);
155 |
156 | autoOffsetReset = props.getProperty("auto.offset.reset", "largest");
157 |
158 | LOG.info("auto.offset.reset = {}", autoOffsetReset);
159 |
160 | socketTimeoutMs = Integer.parseInt(props.getProperty("socket.timeout.seconds", "" + (30 * 1000)));
161 |
162 | if (getSocketTimeoutMs() < 0) {
163 | throw new Exception("socket.timeout.seconds must be positive.");
164 | }
165 |
166 | AuthenticatedSocketSingleton.getInstance().configure(props);
167 | }
168 |
169 | public List getMetadataBrokerList() {
170 | return metadataBrokerList;
171 | }
172 |
173 | public void setMetadataBrokerList(List metadataBrokerList) {
174 | this.metadataBrokerList = metadataBrokerList;
175 | }
176 |
177 | public int getFetchMessageMaxBytes() {
178 | return fetchMessageMaxBytes;
179 | }
180 |
181 | public void setFetchMessageMaxBytes(int fetchMessageMaxBytes) {
182 | this.fetchMessageMaxBytes = fetchMessageMaxBytes;
183 | }
184 |
185 | public int getFetchWaitMaxMs() {
186 | return fetchWaitMaxMs;
187 | }
188 |
189 | public void setFetchWaitMaxMs(int fetchWaitMaxMs) {
190 | this.fetchWaitMaxMs = fetchWaitMaxMs;
191 | }
192 |
193 | public int getFetchMinBytes() {
194 | return fetchMinBytes;
195 | }
196 |
197 | public void setFetchMinBytes(int fetchMinBytes) {
198 | this.fetchMinBytes = fetchMinBytes;
199 | }
200 |
201 | public int getSocketReceiveBufferBytes() {
202 | return socketReceiveBufferBytes;
203 | }
204 |
205 | public void setSocketReceiveBufferBytes(int socketReceiveBufferBytes) {
206 | this.socketReceiveBufferBytes = socketReceiveBufferBytes;
207 | }
208 |
209 | public String getAutoOffsetReset() {
210 | return autoOffsetReset;
211 | }
212 |
213 | public void setAutoOffsetReset(String autoOffsetReset) {
214 | this.autoOffsetReset = autoOffsetReset;
215 | }
216 |
217 | /**
218 | * @return the socketTimeoutMs
219 | */
220 | final public int getSocketTimeoutMs() {
221 | return socketTimeoutMs;
222 | }
223 |
224 | /**
225 | * @param socketTimeoutMs the socketTimeoutMs to set
226 | */
227 | public void setSocketTimeoutMs(int socketTimeoutMs) {
228 | this.socketTimeoutMs = socketTimeoutMs;
229 | }
230 |
231 | }
232 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/consumer/MessageAndOffset.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.consumer;
17 |
18 | /**
19 | * A simple class for holding a message as as byte array and an offset as a
20 | * long.
21 | */
22 | public class MessageAndOffset {
23 |
24 | private byte[] message;
25 | private long offset;
26 |
27 | public MessageAndOffset() {
28 | }
29 |
30 | public MessageAndOffset(byte[] message, long offset) {
31 | this.message = message;
32 | this.offset = offset;
33 | }
34 |
35 | public byte[] getMessage() {
36 | return message;
37 | }
38 |
39 | public void setMessage(byte[] message) {
40 | this.message = message;
41 | }
42 |
43 | public long getOffset() {
44 | return offset;
45 | }
46 |
47 | public void setOffset(long offset) {
48 | this.offset = offset;
49 | }
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/consumer/MessageSetReader.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.consumer;
17 |
18 | import java.io.IOException;
19 | import java.nio.ByteBuffer;
20 | import java.util.zip.CRC32;
21 |
22 | import org.slf4j.Logger;
23 | import org.slf4j.LoggerFactory;
24 |
25 | import com.blackberry.bdp.krackle.Constants;
26 | import com.blackberry.bdp.krackle.compression.Decompressor;
27 | import com.blackberry.bdp.krackle.compression.GzipDecompressor;
28 | import com.blackberry.bdp.krackle.compression.SnappyDecompressor;
29 |
30 | public class MessageSetReader {
31 |
32 | private static final Logger LOG = LoggerFactory.getLogger(MessageSetReader.class);
33 |
34 | private boolean ready = false;
35 |
36 | private byte[] bytes = new byte[0];
37 | private ByteBuffer buffer = ByteBuffer.wrap(bytes);
38 |
39 | // This starts at 256KiB, and doubles as necessary. I doubt it will need to do
40 | // so often, unless message sizes keep growing in an unbounded way.
41 | private byte[] decompressionBytes = new byte[256 * 1024];
42 |
43 | private CRC32 crc32 = new CRC32();
44 |
45 | private SnappyDecompressor snappyDecompressor = null;
46 | private GzipDecompressor gzipDecompressor = null;
47 | private MessageSetReader messageSetReader = null;
48 |
49 | private long offset;
50 | private int messageSize;
51 | private int crc;
52 | private byte magicByte;
53 | private byte attributes;
54 | private byte compression;
55 | private int keyLength;
56 | private int valueLength;
57 | private int bytesCopied;
58 | private int decompressedSize;
59 |
60 | /**
61 | * Initialize with a message set.
62 | *
63 | * This copies the data from the source array, so the source can be altered afterwards with no impact to this class.
64 | *
65 | * @param src byte array holding the message set.
66 | * @param position position in the source where the message set starts.
67 | * @param length length of the message set.
68 | */
69 | public void init(byte[] src, int position, int length) {
70 | if (bytes.length < length) {
71 | bytes = new byte[length];
72 | buffer = ByteBuffer.wrap(bytes);
73 | }
74 |
75 | System.arraycopy(src, position, bytes, 0, length);
76 | buffer.clear();
77 | buffer.limit(length);
78 |
79 | if (length > 0) {
80 | ready = true;
81 | } else {
82 | ready = false;
83 | }
84 | }
85 |
86 | /**
87 | * Read in a message from message set into the given byte array.
88 | *
89 | * If the size of the message exceeds maxLength, it will be truncated to fit.
90 | *
91 | * @param dest the byte array to write into.
92 | * @param pos the position in the byte array to write to.
93 | * @param maxLength the max size of the message to write.
94 | * @return the number of bytes writen, or -1 if no data was returned.
95 | * @throws IOException
96 | */
97 | public int getMessage(byte[] dest, int pos, int maxLength) throws IOException {
98 | if (messageSetReader != null && messageSetReader.isReady()) {
99 | bytesCopied = messageSetReader.getMessage(dest, pos, maxLength);
100 | offset = messageSetReader.getOffset();
101 |
102 | if (!messageSetReader.isReady() && !buffer.hasRemaining()) {
103 | ready = false;
104 | } else {
105 | ready = true;
106 | }
107 | } else {
108 | // There are occasional truncated messages. If we don't have enough,
109 | // then return -1 and go not-ready
110 | // This will cover the offset, message size and crc
111 | if (buffer.remaining() < 8 + 4) {
112 | ready = false;
113 | return -1;
114 | }
115 |
116 | // offset
117 | offset = buffer.getLong();
118 |
119 | // messageSize
120 | messageSize = buffer.getInt();
121 |
122 | // This should be the last size check we need to do.
123 | if (buffer.remaining() < messageSize) {
124 | ready = false;
125 | return -1;
126 | }
127 |
128 | // Crc => int32
129 | crc = buffer.getInt();
130 |
131 | // check that the crc is correct
132 | crc32.reset();
133 | crc32.update(bytes, buffer.position(), messageSize - 4);
134 |
135 | if (crc != (int) crc32.getValue()) {
136 | LOG.error("CRC value mismatch.");
137 | ready = false;
138 | return -1;
139 | }
140 |
141 | // MagicByte => int8
142 | magicByte = buffer.get();
143 |
144 | if (magicByte != Constants.MAGIC_BYTE) {
145 | LOG.error("Incorrect magic byte.");
146 | ready = false;
147 | return -1;
148 | }
149 |
150 | // Attributes => int8
151 | attributes = buffer.get();
152 | compression = (byte) (attributes & Constants.COMPRESSION_MASK);
153 |
154 | // Key => bytes
155 | keyLength = buffer.getInt();
156 |
157 | if (keyLength == -1) {
158 | // null key
159 | } else {
160 | // ignore the key
161 | buffer.position(buffer.position() + keyLength);
162 | }
163 |
164 | // Value => bytes
165 | valueLength = buffer.getInt();
166 |
167 | if (valueLength == -1) {
168 | // null value. return -1, but we may still be ready.
169 | if (!buffer.hasRemaining()) {
170 | ready = false;
171 | }
172 | return -1;
173 | }
174 |
175 | if (compression == Constants.NO_COMPRESSION) {
176 | bytesCopied = Math.min(maxLength, valueLength);
177 | if (bytesCopied < valueLength) {
178 | LOG.warn("Truncating message from {} to {} bytes.", valueLength, maxLength);
179 | }
180 | System.arraycopy(bytes, buffer.position(), dest, pos, bytesCopied);
181 | } else {
182 | if (compression == Constants.SNAPPY) {
183 | decompressedSize = decompress(getSnappyDecompressor());
184 | ensureMessageSetReader();
185 | messageSetReader.init(decompressionBytes, 0, decompressedSize);
186 |
187 | if (messageSetReader.isReady()) {
188 | bytesCopied = messageSetReader.getMessage(dest, pos, maxLength);
189 | offset = messageSetReader.getOffset();
190 | }
191 | } else {
192 | if (compression == Constants.GZIP) {
193 | decompressedSize = decompress(getGzipDecompressor());
194 | ensureMessageSetReader();
195 | messageSetReader.init(decompressionBytes, 0, decompressedSize);
196 |
197 | if (messageSetReader.isReady()) {
198 | bytesCopied = messageSetReader.getMessage(dest, pos, maxLength);
199 | offset = messageSetReader.getOffset();
200 | }
201 | }
202 | }
203 | }
204 |
205 | buffer.position(buffer.position() + valueLength);
206 |
207 | if ((messageSetReader == null || !messageSetReader.isReady()) && !buffer.hasRemaining()) {
208 | ready = false;
209 | } else {
210 | ready = true;
211 | }
212 | }
213 |
214 | return bytesCopied;
215 | }
216 |
217 | private int decompress(Decompressor decompressor) throws IOException {
218 | while (true) {
219 | decompressedSize = decompressor.decompress(bytes, buffer.position(), valueLength, decompressionBytes, 0, decompressionBytes.length);
220 | if (decompressedSize == -1) {
221 | // Our output buffer was not big enough. So increase our
222 | // buffers and retry. This should be very rare.
223 |
224 | LOG.debug("Expanding decompression buffer from {} to {}", decompressionBytes.length, 2 * decompressionBytes.length);
225 |
226 | decompressionBytes = new byte[2 * decompressionBytes.length];
227 | } else {
228 | // we didn't fill the buffer. So our buffer was big enough.
229 | break;
230 | }
231 | }
232 |
233 | return decompressedSize;
234 | }
235 |
236 | private void ensureMessageSetReader() {
237 | if (messageSetReader == null) {
238 | messageSetReader = new MessageSetReader();
239 | }
240 | }
241 |
242 | private SnappyDecompressor getSnappyDecompressor() {
243 | if (snappyDecompressor == null) {
244 | snappyDecompressor = new SnappyDecompressor();
245 | }
246 | return snappyDecompressor;
247 | }
248 |
249 | private GzipDecompressor getGzipDecompressor() {
250 | if (gzipDecompressor == null) {
251 | gzipDecompressor = new GzipDecompressor();
252 | }
253 | return gzipDecompressor;
254 | }
255 |
256 | /**
257 | * Checks if this message set ready is ready.
258 | *
259 | * Ready means that it is initialized and has not reached the end of its data. This does not guarantee that the next request will succeed, since the remaining data could be a truncated message.
260 | *
261 | * @return true if this MessageSetReader is ready, otherwise false.
262 | */
263 | public boolean isReady() {
264 | return ready;
265 | }
266 |
267 | /**
268 | * Gets the offset of the last message read.
269 | *
270 | * @return the offset of the last message read.
271 | */
272 | public long getOffset() {
273 | return offset;
274 | }
275 |
276 | /**
277 | * Gets the offset of the next message that would be returned.
278 | *
279 | * @return the offset of the next message that would be returned.
280 | */
281 | public long getNextOffset() {
282 | return offset + 1;
283 | }
284 |
285 | }
286 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/consumer/OffsetOutOfRangeException.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.consumer;
17 |
18 | import java.io.IOException;
19 |
20 | /**
21 | * Exception used to handle offset out of range errors being returned by a
22 | * broker.
23 | */
24 | public class OffsetOutOfRangeException extends IOException {
25 |
26 | private static final long serialVersionUID = 1L;
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/exceptions/AuthenticationException.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2015 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.exceptions;
17 |
18 | public class AuthenticationException extends Exception {
19 | public AuthenticationException(String message) {
20 | super(message);
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/exceptions/InvalidConfigurationTypeException.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2015 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.exceptions;
17 |
18 | public class InvalidConfigurationTypeException extends Exception {
19 | public InvalidConfigurationTypeException(String message) {
20 | super(message);
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/exceptions/MissingConfigurationException.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2015 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.exceptions;
17 |
18 | public class MissingConfigurationException extends Exception {
19 | public MissingConfigurationException(String message) {
20 | super(message);
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/jaas/Login.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2015 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.jaas;
17 |
18 | /**
19 | * This class was copied from the Apache ZooKeeper project:
20 | *
21 | * git@github.com:apache/zookeeper.git
22 | * 92707a6a84a7965df2d7d8ead0acb721b6e62878
23 | *
24 | * Adapted as required to work with Krackle and it's implementation
25 | * of AuthenticatedSocketBuilder where needed. The following
26 | * major changes were performed:
27 | *
28 | * 1) Package name was changed
29 | * 2) Use of a login via ticket cache/kinit command disabled
30 | * 3) Use of org.apache.zookeeper.common.Time removed
31 | * 4) JavaDoc added where missing
32 | * 5) Required the principal to be found in the JAAS config
33 | * 6) Added getPrincipal()
34 | * 7) Added a client call back handler
35 | *
36 | */
37 |
38 | /**
39 | * This class is responsible for refreshing Kerberos credentials for
40 | * logins for both Zookeeper client and server.
41 | * See ZooKeeperSaslServer for server-side usage.
42 | * See ZooKeeperSaslClient for client-side usage.
43 | */
44 | import com.blackberry.bdp.krackle.Time;
45 | import javax.security.auth.kerberos.KerberosPrincipal;
46 | import javax.security.auth.login.AppConfigurationEntry;
47 | import javax.security.auth.login.Configuration;
48 | import javax.security.auth.login.LoginContext;
49 | import javax.security.auth.login.LoginException;
50 | import javax.security.auth.callback.CallbackHandler;
51 |
52 | import org.slf4j.Logger;
53 | import org.slf4j.LoggerFactory;
54 |
55 | import javax.security.auth.kerberos.KerberosTicket;
56 | import javax.security.auth.Subject;
57 |
58 | import java.util.Date;
59 | import java.util.Random;
60 | import java.util.Set;
61 | import javax.security.auth.callback.Callback;
62 | import javax.security.auth.callback.NameCallback;
63 | import javax.security.auth.callback.PasswordCallback;
64 | import javax.security.auth.callback.UnsupportedCallbackException;
65 | import javax.security.sasl.AuthorizeCallback;
66 | import javax.security.sasl.RealmCallback;
67 |
68 | public class Login {
69 |
70 | private static final Logger LOG = LoggerFactory.getLogger(Login.class);
71 | public CallbackHandler callbackHandler;
72 |
73 | // LoginThread will sleep until 80% of time from last refresh to
74 | // ticket's expiry has been reached, at which time it will wake
75 | // and try to renew the ticket.
76 | private static final float TICKET_RENEW_WINDOW = 0.80f;
77 |
78 | /**
79 | * Percentage of random jitter added to the renewal time
80 | */
81 | private static final float TICKET_RENEW_JITTER = 0.05f;
82 |
83 | // Regardless of TICKET_RENEW_WINDOW setting above and the ticket expiry time,
84 | // thread will not sleep between refresh attempts any less than 1 minute (60*1000 milliseconds = 1 minute).
85 | // Change the '1' to e.g. 5, to change this to 5 minutes.
86 | private static final long MIN_TIME_BEFORE_RELOGIN = 1 * 60 * 1000L;
87 |
88 | private Subject subject = null;
89 | private Thread t = null;
90 | private boolean isKrbTicket = false;
91 | private boolean isUsingTicketCache = false;
92 |
93 | /** Random number generator */
94 | private static final Random rng = new Random();
95 |
96 | private LoginContext login = null;
97 | private String loginContextName = null;
98 | private String principal = null;
99 |
100 | // Initialize 'lastLogin' to do a login at first time
101 | private long lastLogin = Time.currentElapsedTime() - MIN_TIME_BEFORE_RELOGIN;
102 |
103 | /**
104 | * LoginThread constructor. The constructor starts the thread used
105 | * to periodically re-login to the Kerberos Ticket Granting Server.
106 | * @param loginContextName
107 | * name of section in JAAS file that will be use to login.
108 | * Passed as first param to javax.security.auth.login.LoginContext().
109 | *
110 | * @param callbackHandler
111 | * Passed as second param to javax.security.auth.login.LoginContext().
112 | * @throws javax.security.auth.login.LoginException
113 | * Thrown if authentication fails.
114 | */
115 | public Login(final String loginContextName, CallbackHandler callbackHandler)
116 | throws LoginException {
117 | this.callbackHandler = callbackHandler;
118 | login = login(loginContextName);
119 | this.loginContextName = loginContextName;
120 | subject = login.getSubject();
121 | isKrbTicket = !subject.getPrivateCredentials(KerberosTicket.class).isEmpty();
122 | AppConfigurationEntry entries[] = Configuration.getConfiguration().getAppConfigurationEntry(loginContextName);
123 | for (AppConfigurationEntry entry : entries) {
124 | // there will only be a single entry, so this for() loop will only be iterated through once.
125 | if (entry.getOptions().get("useTicketCache") != null) {
126 | String val = (String) entry.getOptions().get("useTicketCache");
127 | if (val.equals("true")) {
128 | isUsingTicketCache = true;
129 | }
130 | }
131 | if (entry.getOptions().get("principal") != null) {
132 | principal = (String) entry.getOptions().get("principal");
133 | } else {
134 | throw new LoginException("could not determine principal from login context configuration");
135 | }
136 | break;
137 | }
138 |
139 | if (!isKrbTicket) {
140 | // if no TGT, do not bother with ticket management.
141 | return;
142 | }
143 |
144 | // Refresh the Ticket Granting Ticket (TGT) periodically. How often to refresh is determined by the
145 | // TGT's existing expiry date and the configured MIN_TIME_BEFORE_RELOGIN. For testing and development,
146 | // you can decrease the interval of expiration of tickets (for example, to 3 minutes) by running :
147 | // "modprinc -maxlife 3mins " in kadmin.
148 | t = new Thread(new Runnable() {
149 | @Override
150 | public void run() {
151 | LOG.info("TGT refresh thread started.");
152 | while (true) { // renewal thread's main loop. if it exits from here, thread will exit.
153 | KerberosTicket tgt = getTGT();
154 | long now = Time.currentWallTime();
155 | long nextRefresh;
156 | Date nextRefreshDate;
157 | if (tgt == null) {
158 | nextRefresh = now + MIN_TIME_BEFORE_RELOGIN;
159 | nextRefreshDate = new Date(nextRefresh);
160 | LOG.warn("No TGT found: will try again at {}", nextRefreshDate);
161 | } else {
162 | nextRefresh = getRefreshTime(tgt);
163 | long expiry = tgt.getEndTime().getTime();
164 | Date expiryDate = new Date(expiry);
165 | if ((isUsingTicketCache) && (tgt.getEndTime().equals(tgt.getRenewTill()))) {
166 | Object[] logPayload = {expiryDate, getPrincipal(), getPrincipal()};
167 | LOG.error("The TGT cannot be renewed beyond the next expiry date: {}."
168 | + "This process will not be able to authenticate new SASL connections after that "
169 | + "time (for example, it will not be authenticate a new connection with a Kafka "
170 | + "broker). Ask your system administrator to either increase the "
171 | + "'renew until' time by doing : 'modprinc -maxrenewlife {}' within "
172 | + "kadmin, or instead, to generate a keytab for {}. Because the TGT's "
173 | + "expiry cannot be further extended by refreshing, exiting refresh thread now.", logPayload);
174 | return;
175 | }
176 | // determine how long to sleep from looking at ticket's expiry.
177 | // We should not allow the ticket to expire, but we should take into consideration
178 | // MIN_TIME_BEFORE_RELOGIN. Will not sleep less than MIN_TIME_BEFORE_RELOGIN, unless doing so
179 | // would cause ticket expiration.
180 | if ((nextRefresh > expiry)
181 | || ((now + MIN_TIME_BEFORE_RELOGIN) > expiry)) {
182 | // expiry is before next scheduled refresh).
183 | nextRefresh = now;
184 | } else {
185 | if (nextRefresh < (now + MIN_TIME_BEFORE_RELOGIN)) {
186 | // next scheduled refresh is sooner than (now + MIN_TIME_BEFORE_LOGIN).
187 | Date until = new Date(nextRefresh);
188 | Date newuntil = new Date(now + MIN_TIME_BEFORE_RELOGIN);
189 | Object[] logPayload = {until, newuntil, (MIN_TIME_BEFORE_RELOGIN / 1000)};
190 | LOG.warn("TGT refresh thread time adjusted from : {} to : {} since "
191 | + "the former is sooner than the minimum refresh interval ("
192 | + "{} seconds) from now.", logPayload);
193 | }
194 | nextRefresh = Math.max(nextRefresh, now + MIN_TIME_BEFORE_RELOGIN);
195 | }
196 | nextRefreshDate = new Date(nextRefresh);
197 | if (nextRefresh > expiry) {
198 | Object[] logPayload = {nextRefreshDate, expiryDate};
199 | LOG.error("next refresh: {} is later than expiry {}."
200 | + " This may indicate a clock skew problem. Check that this host and the KDC's "
201 | + "hosts' clocks are in sync. Exiting refresh thread.", logPayload);
202 | return;
203 | }
204 | }
205 | if (now == nextRefresh) {
206 | LOG.info("refreshing now because expiry is before next scheduled refresh time.");
207 | } else {
208 | if (now < nextRefresh) {
209 | Date until = new Date(nextRefresh);
210 | LOG.info("TGT refresh sleeping until: {}", until.toString());
211 | try {
212 | Thread.sleep(nextRefresh - now);
213 | } catch (InterruptedException ie) {
214 | LOG.warn("TGT renewal thread has been interrupted and will exit.");
215 | break;
216 | }
217 | } else {
218 | LOG.error("nextRefresh:{} is in the past: exiting refresh thread. Check"
219 | + " clock sync between this host and KDC - (KDC's clock is likely ahead of this host)."
220 | + " Manual intervention will be required for this client to successfully authenticate."
221 | + " Exiting refresh thread.", nextRefreshDate);
222 | break;
223 | }
224 | }
225 | if (isUsingTicketCache) {
226 | LOG.error("use of ticket cache and login via kinit on cmd line not supported");
227 | break;
228 | }
229 | try {
230 | int retry = 1;
231 | while (retry >= 0) {
232 | try {
233 | reLogin();
234 | break;
235 | } catch (LoginException le) {
236 | if (retry > 0) {
237 | --retry;
238 | // sleep for 10 seconds.
239 | try {
240 | Thread.sleep(10 * 1000);
241 | } catch (InterruptedException e) {
242 | LOG.error("Interrupted during login retry after LoginException:", le);
243 | throw le;
244 | }
245 | } else {
246 | LOG.error("Could not refresh TGT for principal: {}.", getPrincipal(), le);
247 | }
248 | }
249 | }
250 | } catch (LoginException le) {
251 | LOG.error("Failed to refresh TGT: refresh thread exiting now.", le);
252 | break;
253 | }
254 | }
255 | }
256 |
257 | });
258 | t.setDaemon(true);
259 | }
260 |
261 | public void startThreadIfNeeded() {
262 | // thread object 't' will be null if a refresh thread is not needed.
263 | if (t != null) {
264 | t.start();
265 | }
266 | }
267 |
268 | public void shutdown() {
269 | if ((t != null) && (t.isAlive())) {
270 | t.interrupt();
271 | try {
272 | t.join();
273 | } catch (InterruptedException e) {
274 | LOG.warn("error while waiting for Login thread to shutdown: ", e);
275 | }
276 | }
277 | }
278 |
279 | public Subject getSubject() {
280 | return subject;
281 | }
282 |
283 | public String getLoginContextName() {
284 | return loginContextName;
285 | }
286 |
287 | private synchronized LoginContext login(final String loginContextName) throws LoginException {
288 | if (loginContextName == null) {
289 | throw new LoginException("loginContext name null ");
290 | }
291 | LoginContext loginContext = new LoginContext(loginContextName, callbackHandler);
292 | loginContext.login();
293 | LOG.info("successfully logged in.");
294 | return loginContext;
295 | }
296 |
297 | // c.f. org.apache.hadoop.security.UserGroupInformation.
298 | private long getRefreshTime(KerberosTicket tgt) {
299 | long start = tgt.getStartTime().getTime();
300 | long expires = tgt.getEndTime().getTime();
301 | LOG.info("TGT valid starting at: {}", tgt.getStartTime().toString());
302 | LOG.info("TGT expires: {}", tgt.getEndTime().toString());
303 | long proposedRefresh = start + (long) ((expires - start)
304 | * (TICKET_RENEW_WINDOW + (TICKET_RENEW_JITTER * rng.nextDouble())));
305 | if (proposedRefresh > expires) {
306 | // proposedRefresh is too far in the future: it's after ticket expires: simply return now.
307 | return Time.currentWallTime();
308 | } else {
309 | return proposedRefresh;
310 | }
311 | }
312 |
313 | private synchronized KerberosTicket getTGT() {
314 | Set tickets = subject.getPrivateCredentials(KerberosTicket.class);
315 | for (KerberosTicket ticket : tickets) {
316 | KerberosPrincipal server = ticket.getServer();
317 | if (server.getName().equals("krbtgt/" + server.getRealm() + "@" + server.getRealm())) {
318 | LOG.debug("Found tgt {}.", ticket);
319 | return ticket;
320 | }
321 | }
322 | return null;
323 | }
324 |
325 | private boolean hasSufficientTimeElapsed() {
326 | long now = Time.currentElapsedTime();
327 | if (now - getLastLogin() < MIN_TIME_BEFORE_RELOGIN) {
328 | LOG.warn("Not attempting to re-login since the last re-login was "
329 | + "attempted less than {} seconds before.",
330 | (MIN_TIME_BEFORE_RELOGIN / 1000));
331 | return false;
332 | }
333 | // register most recent relogin attempt
334 | setLastLogin(now);
335 | return true;
336 | }
337 |
338 | /**
339 | * Returns login object
340 | * @return login
341 | */
342 | private LoginContext getLogin() {
343 | return login;
344 | }
345 |
346 | /**
347 | * Set the login object
348 | * @param login
349 | */
350 | private void setLogin(LoginContext login) {
351 | this.login = login;
352 | }
353 |
354 | /**
355 | * Set the last login time.
356 | * @param time the number of milliseconds since the beginning of time
357 | */
358 | private void setLastLogin(long time) {
359 | lastLogin = time;
360 | }
361 |
362 | /**
363 | * Get the time of the last login.
364 | * @return the number of milliseconds since the beginning of time.
365 | */
366 | private long getLastLogin() {
367 | return lastLogin;
368 | }
369 |
370 | /**
371 | * Re-login a principal. This method assumes that {@link #login(String)} has happened already.
372 | * @throws javax.security.auth.login.LoginException on a failure
373 | */
374 | // c.f. HADOOP-6559
375 | private synchronized void reLogin()
376 | throws LoginException {
377 | if (!isKrbTicket) {
378 | return;
379 | }
380 | LoginContext existingLogin = getLogin();
381 | if (existingLogin == null) {
382 | throw new LoginException("login must be done first");
383 | }
384 | if (!hasSufficientTimeElapsed()) {
385 | return;
386 | }
387 | LOG.info("Initiating logout for {}", principal);
388 | synchronized (Login.class) {
389 | //clear up the kerberos state. But the tokens are not cleared! As per
390 | //the Java kerberos login module code, only the kerberos credentials
391 | //are cleared
392 | existingLogin.logout();
393 | //login and also update the subject field of this instance to
394 | //have the new credentials (pass it to the LoginContext constructor)
395 | existingLogin = new LoginContext(loginContextName, getSubject());
396 | LOG.info("Initiating re-login for {}", principal);
397 | existingLogin.login();
398 | setLogin(existingLogin);
399 | }
400 | }
401 |
402 | /**
403 | * @return the principal
404 | */
405 | public String getPrincipal() {
406 | return principal;
407 | }
408 |
409 | // The CallbackHandler interface here refers to
410 | // javax.security.auth.callback.CallbackHandler.
411 | // It should not be confused with Krackle packet callbacks like
412 | // ClientCallbackHandler in SaslPlainTextAuthenticator
413 | public static class ClientCallbackHandler implements CallbackHandler {
414 |
415 | @Override
416 | public void handle(Callback[] callbacks) throws
417 | UnsupportedCallbackException {
418 | for (Callback callback : callbacks) {
419 | if (callback instanceof NameCallback) {
420 | NameCallback nc = (NameCallback) callback;
421 | nc.setName(nc.getDefaultName());
422 | } else {
423 | if (callback instanceof PasswordCallback) {
424 | LOG.warn("Could not login: the client is being asked for a password");
425 | } else {
426 | if (callback instanceof RealmCallback) {
427 | RealmCallback rc = (RealmCallback) callback;
428 | rc.setText(rc.getDefaultText());
429 | } else {
430 | if (callback instanceof AuthorizeCallback) {
431 | AuthorizeCallback ac = (AuthorizeCallback) callback;
432 | String authid = ac.getAuthenticationID();
433 | String authzid = ac.getAuthorizationID();
434 | if (authid.equals(authzid)) {
435 | ac.setAuthorized(true);
436 | } else {
437 | ac.setAuthorized(false);
438 | }
439 | if (ac.isAuthorized()) {
440 | ac.setAuthorizedID(authzid);
441 | }
442 | } else {
443 | throw new UnsupportedCallbackException(callback, "Unrecognized SASL ClientCallback");
444 | }
445 | }
446 | }
447 | }
448 | }
449 | }
450 |
451 | }
452 |
453 | }
454 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/meta/Broker.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.meta;
17 |
18 | /**
19 | * Class to hold the id, host and port of a Kafka broker.
20 | */
21 | public class Broker {
22 |
23 | private int nodeId;
24 | private String host;
25 | private int port;
26 |
27 | public Broker() {
28 | }
29 |
30 | public Broker(int nodeId, String host, int port) {
31 | this.nodeId = nodeId;
32 | this.host = host;
33 | this.port = port;
34 | }
35 |
36 | public int getNodeId() {
37 | return nodeId;
38 | }
39 |
40 | public void setNodeId(int nodeId) {
41 | this.nodeId = nodeId;
42 | }
43 |
44 | public String getHost() {
45 | return host;
46 | }
47 |
48 | public void setHost(String host) {
49 | this.host = host;
50 | }
51 |
52 | public int getPort() {
53 | return port;
54 | }
55 |
56 | public void setPort(int port) {
57 | this.port = port;
58 | }
59 |
60 | public String getNiceDescription() {
61 | return "node " + nodeId + " @ " + host + ":" + port;
62 | }
63 |
64 | @Override
65 | public String toString() {
66 | return "Broker [nodeId=" + nodeId + ", host=" + host + ", port=" + port
67 | + "]";
68 | }
69 |
70 | }
71 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/meta/MetaData.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.meta;
17 |
18 | import java.io.IOException;
19 | import java.io.InputStream;
20 | import java.net.InetAddress;
21 | import java.net.Socket;
22 | import java.net.UnknownHostException;
23 | import java.nio.ByteBuffer;
24 | import java.nio.charset.Charset;
25 | import java.util.ArrayList;
26 | import java.util.Collections;
27 | import java.util.HashMap;
28 | import java.util.List;
29 | import java.util.Map;
30 |
31 | import org.slf4j.Logger;
32 | import org.slf4j.LoggerFactory;
33 |
34 | import com.blackberry.bdp.krackle.Constants;
35 | import com.blackberry.bdp.krackle.auth.AuthenticatedSocketSingleton;
36 | import com.blackberry.bdp.krackle.exceptions.AuthenticationException;
37 | import java.util.Arrays;
38 |
39 | /**
40 | * Gather and store metadata for a topic.
41 | *
42 | * Contents are
43 | *
44 | *
brokers: a map of broker id to Broker object
45 | *
topics: a map of topic names to Topic objects
46 | *
47 | */
48 | public class MetaData {
49 |
50 | private static final Logger LOG = LoggerFactory.getLogger(MetaData.class);
51 | private static final Charset UTF8 = Charset.forName("UTF-8");
52 |
53 | private final Map brokers = new HashMap<>();
54 | private final Map topics = new HashMap<>();
55 |
56 | private int correlationId;
57 | private List metadataBrokerList;
58 | private byte[] clientId;
59 |
60 | /**
61 | * Metadata for a single topic with a string of seed brokers for a given client
62 | *
63 | * @param metadataBrokerString
64 | * @param topic topic to get metadata about.
65 | * @param clientIdString clientId to send with request.
66 | * @return a new MetaData object containing information on the topic.
67 | */
68 | public static MetaData getMetaData(
69 | String metadataBrokerString,
70 | String topic,
71 | String clientIdString) {
72 | return getMetaData(Arrays.asList(metadataBrokerString.split(",")),
73 | topic,
74 | clientIdString);
75 | }
76 |
77 | /**
78 | * Metadata for a single topic with a list of seed brokers for a given client
79 | *
80 | * @param metadataBrokerList
81 | * @param topic topic to get metadata about.
82 | * @param clientIdString clientId to send with request.
83 | * @return a new MetaData object containing information on the topic.
84 | */
85 | public static MetaData getMetaData(List metadataBrokerList,
86 | String topic,
87 | String clientIdString) {
88 | LOG.info("Getting metadata for {}", topic);
89 |
90 | MetaData metadata = new MetaData();
91 | metadata.metadataBrokerList = metadataBrokerList;
92 | metadata.correlationId = (int) System.currentTimeMillis();
93 | metadata.clientId = clientIdString.getBytes(UTF8);
94 |
95 | return getMetaData(metadata,
96 | buildMetadataRequest(metadata, topic.getBytes(UTF8)));
97 | }
98 |
99 | /**
100 | * Metadata for all topics with a string of seed brokers for a given client
101 | *
102 | * @param metadataBrokerString
103 | * @param clientIdString clientId to send with request.
104 | * @return a new MetaData object containing information on the topic.
105 | */
106 | public static MetaData getMetaData(String metadataBrokerString,
107 | String clientIdString) {
108 |
109 | return getMetaData(Arrays.asList(metadataBrokerString.split(",")),
110 | clientIdString);
111 | }
112 |
113 | /**
114 | * Metadata for all topics with a list of seed brokers for a given client
115 | *
116 | * @param metadataBrokerList
117 | * @param clientIdString clientId to send with request.
118 | * @return a new MetaData object containing information on the topic.
119 | */
120 | public static MetaData getMetaData(List metadataBrokerList,
121 | String clientIdString) {
122 | LOG.info("Getting metadata for all topics");
123 |
124 | MetaData metadata = new MetaData();
125 | metadata.metadataBrokerList = metadataBrokerList;
126 | metadata.correlationId = (int) System.currentTimeMillis();
127 | metadata.clientId = clientIdString.getBytes(UTF8);
128 |
129 | return getMetaData(metadata,buildMetadataRequest(metadata));
130 | }
131 |
132 | private static MetaData getMetaData(MetaData metadata, byte[] request) {
133 |
134 | // Get the broker seeds from the config.
135 | List seedBrokers = new ArrayList<>();
136 | for (String hnp : metadata.metadataBrokerList) {
137 | String[] hostPort = hnp.split(":", 2);
138 | try {
139 | for(InetAddress curhost: InetAddress.getAllByName(hostPort[0])) {
140 | seedBrokers.add(new HostAndPort(curhost, Integer.parseInt(hostPort[1])));
141 | LOG.debug("Adding Broker Candidate - {}", curhost);
142 | }
143 | } catch (UnknownHostException e) {
144 | LOG.info("Unknown Host: {}", hostPort[0]);
145 | }
146 | }
147 |
148 | // Try each seed broker in a random order
149 | Collections.shuffle(seedBrokers);
150 | Socket sock = null;
151 | for (HostAndPort hnp : seedBrokers) {
152 | try {
153 | sock = AuthenticatedSocketSingleton.getInstance().build(hnp.host, hnp.port);
154 | sock.setSoTimeout(5000);
155 | } catch (AuthenticationException e) {
156 | LOG.warn("authentication exception: {}", hnp.host);
157 | continue;
158 | } catch (IOException e) {
159 | LOG.warn("Error connecting to {}:{}", hnp.host, hnp.port);
160 | continue;
161 | }
162 | break;
163 | }
164 |
165 | if (sock == null) {
166 | LOG.error("Unable to connect to any seed broker to updata metadata.");
167 | return null;
168 | }
169 |
170 | try {
171 | sock.getOutputStream().write(request);
172 | byte[] sizeBuffer = new byte[4];
173 | InputStream in = sock.getInputStream();
174 | int bytesRead = 0;
175 | while (bytesRead < 4) {
176 | int read = in.read(sizeBuffer, bytesRead, 4 - bytesRead);
177 | if (read == -1) {
178 | throw new IOException(
179 | "Stream ended before data length could be read.");
180 | }
181 | bytesRead += read;
182 | }
183 | int length = ByteBuffer.wrap(sizeBuffer).getInt();
184 |
185 | byte[] responseArray = new byte[length];
186 | bytesRead = 0;
187 | while (bytesRead < length) {
188 | int read = in.read(responseArray, bytesRead, length - bytesRead);
189 | if (read == -1) {
190 | throw new IOException("Stream ended before end of response.");
191 | }
192 | bytesRead += read;
193 | }
194 | ByteBuffer responseBuffer = ByteBuffer.wrap(responseArray);
195 | int cid = responseBuffer.getInt();
196 | if (cid != metadata.getCorrelationId()) {
197 | LOG.error("Got back wrong correlation id.");
198 | return null;
199 | }
200 |
201 | // Load the brokers
202 | int numBrokers = responseBuffer.getInt();
203 | for (int i = 0; i < numBrokers; i++) {
204 | int nodeId = responseBuffer.getInt();
205 | String host = readString(responseBuffer);
206 | int port = responseBuffer.getInt();
207 | metadata.getBrokers().put(nodeId, new Broker(nodeId, host, port));
208 | LOG.debug("Broker {} @ {}:{}", nodeId, host, port);
209 | }
210 |
211 | // Load the topics
212 | int numTopics = responseBuffer.getInt();
213 | for (int i = 0; i < numTopics; i++) {
214 |
215 | short errorCode = responseBuffer.getShort();
216 | String name = readString(responseBuffer);
217 | Topic t = new Topic(name);
218 | LOG.debug("Topic {} (Error {})", name, errorCode);
219 |
220 | int numParts = responseBuffer.getInt();
221 | for (int j = 0; j < numParts; j++) {
222 |
223 | short partError = responseBuffer.getShort();
224 | int partId = responseBuffer.getInt();
225 | int leaderId = responseBuffer.getInt();
226 | LOG.debug(" Partition ID={}, Leader={} (Error={})", partId,
227 | leaderId, partError);
228 |
229 | Partition part = new Partition(partId);
230 | part.setLeader(metadata.brokers.get(leaderId));
231 |
232 | int numReplicas = responseBuffer.getInt();
233 | for (int k = 0; k < numReplicas; k++) {
234 | int replicaBrokerId = responseBuffer.getInt();
235 | LOG.debug(" Replica on {}", replicaBrokerId);
236 | part.getReplicas().add(metadata.brokers.get(replicaBrokerId));
237 | }
238 |
239 | int numIsr = responseBuffer.getInt();
240 | for (int k = 0; k < numIsr; k++) {
241 | int isrBrokerId = responseBuffer.getInt();
242 | LOG.debug(" Isr on {}", isrBrokerId);
243 | part.getInSyncReplicas().add(metadata.brokers.get(isrBrokerId));
244 | }
245 |
246 | t.getPartitions().add(part);
247 | }
248 |
249 | metadata.getTopics().put(name, t);
250 | }
251 |
252 | } catch (IOException e) {
253 | LOG.error("Failed to get metadata");
254 | return null;
255 | } finally {
256 | try {
257 | sock.close();
258 | } catch (IOException ioe) {
259 | LOG.error("failed to close socket: ", ioe);
260 | }
261 | }
262 |
263 | LOG.info("Metadata request successful");
264 | return metadata;
265 | }
266 |
267 |
268 | private static String readString(ByteBuffer bb) {
269 | short length = bb.getShort();
270 | byte[] a = new byte[length];
271 | bb.get(a);
272 | return new String(a, UTF8);
273 | }
274 |
275 | private static byte[] buildMetadataRequest(MetaData md) {
276 | byte[] request = new byte[20 + md.clientId.length];
277 | ByteBuffer bb = ByteBuffer.wrap(request);
278 | bb.putInt(16 + md.clientId.length);
279 | bb.putShort(Constants.APIKEY_METADATA);
280 | bb.putShort(Constants.API_VERSION);
281 | bb.putInt(md.correlationId);
282 | bb.putShort((short) md.clientId.length);
283 | bb.put(md.clientId);
284 | bb.putInt(0);
285 | return request;
286 | }
287 |
288 | private static byte[] buildMetadataRequest(MetaData md, byte[] topic) {
289 | byte[] request = new byte[20 + md.clientId.length + topic.length];
290 | ByteBuffer bb = ByteBuffer.wrap(request);
291 | bb.putInt(16 + md.clientId.length + topic.length);
292 | bb.putShort(Constants.APIKEY_METADATA);
293 | bb.putShort(Constants.API_VERSION);
294 | bb.putInt(md.correlationId);
295 | bb.putShort((short) md.clientId.length);
296 | bb.put(md.clientId);
297 | // topics.
298 | bb.putInt(1);
299 | bb.putShort((short) topic.length);
300 | bb.put(topic);
301 | return request;
302 | }
303 |
304 | // We're storing the given hostname and not an InetAddress since we want to
305 | // re-resolve the address each time. This way changes to DNS can make us
306 | // change properly.
307 | private static class HostAndPort {
308 |
309 | InetAddress host;
310 | int port;
311 |
312 | HostAndPort(InetAddress host, int port) {
313 | this.host = host;
314 | this.port = port;
315 | }
316 |
317 | @Override
318 | public String toString() {
319 | return "HostAndPort [host=" + host.getHostName() + ", port=" + port + "]";
320 | }
321 |
322 | }
323 |
324 | public Map getBrokers() {
325 | return brokers;
326 | }
327 |
328 | public Broker getBroker(Integer id) {
329 | return brokers.get(id);
330 | }
331 |
332 | public Map getTopics() {
333 | return topics;
334 | }
335 |
336 | public Topic getTopic(String name) {
337 | return topics.get(name);
338 | }
339 |
340 | public int getCorrelationId() {
341 | return correlationId;
342 | }
343 |
344 | @Override
345 | public String toString() {
346 | return "MetaData [brokers=" + brokers + ", topics=" + topics + "]";
347 | }
348 |
349 | }
350 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/meta/Partition.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.meta;
17 |
18 | import java.util.HashSet;
19 | import java.util.Set;
20 |
21 | /**
22 | * Holder for information about a partition.
23 | */
24 | public class Partition {
25 |
26 | int partitionId;
27 | Broker leader;
28 |
29 | Set replicas = new HashSet<>();
30 | Set inSyncReplicas = new HashSet<>();
31 |
32 | public Partition() {
33 | }
34 |
35 | public Partition(int partId) {
36 | this.partitionId = partId;
37 | }
38 |
39 | public int getPartitionId() {
40 | return partitionId;
41 | }
42 |
43 | public void setPartitionId(int partId) {
44 | this.partitionId = partId;
45 | }
46 |
47 | public Broker getLeader() {
48 | return leader;
49 | }
50 |
51 | public void setLeader(Broker leader) {
52 | this.leader = leader;
53 | }
54 |
55 | public Set getReplicas() {
56 | return replicas;
57 | }
58 |
59 | public Set getInSyncReplicas() {
60 | return inSyncReplicas;
61 | }
62 |
63 | @Override
64 | public String toString() {
65 | return "Partition [partId=" + partitionId + ", leader=" + leader + ", replicas="
66 | + replicas + ", inSyncReplicas=" + inSyncReplicas + "]";
67 | }
68 |
69 | }
70 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/meta/Topic.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.meta;
17 |
18 | import java.util.ArrayList;
19 | import java.util.List;
20 |
21 | /**
22 | * Holder for information about a topic.
23 | */
24 | public class Topic {
25 |
26 | String name;
27 | List partitions = new ArrayList<>();
28 |
29 | public Topic() {
30 | }
31 |
32 | public Topic(String name) {
33 | this.name = name;
34 | }
35 |
36 | public int getNumPartitions() {
37 | return partitions.size();
38 | }
39 |
40 | public Partition getPartition(int i) {
41 | for (Partition p : partitions) {
42 | if (p.getPartitionId() == i) {
43 | return p;
44 | }
45 | }
46 | return null;
47 | }
48 |
49 | public String getName() {
50 | return name;
51 | }
52 |
53 | public void setName(String name) {
54 | this.name = name;
55 | }
56 |
57 | public List getPartitions() {
58 | return partitions;
59 | }
60 |
61 | @Override
62 | public String toString() {
63 | return "Topic [name=" + name + ", partitions=" + partitions + "]";
64 | }
65 |
66 | }
67 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/producer/MessageSetBuffer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.producer;
17 |
18 | import java.nio.ByteBuffer;
19 |
20 | /**
21 | * Buffer to hold data that is to be sent to Kafka.
22 | */
23 | public class MessageSetBuffer {
24 |
25 | private int size;
26 | private byte[] bytes;
27 | private ByteBuffer buffer;
28 | private int batchSize = 0;
29 | private Producer producer;
30 |
31 | public MessageSetBuffer(Producer producer, int size) {
32 | this.producer = producer;
33 | this.size = size;
34 | bytes = new byte[this.size];
35 | buffer = ByteBuffer.wrap(bytes);
36 | }
37 |
38 | public void clear() {
39 | batchSize = 0;
40 | buffer.clear();
41 | }
42 |
43 | public int getSize() {
44 | return size;
45 | }
46 |
47 | public void setSize(int size) {
48 | this.size = size;
49 | }
50 |
51 | public byte[] getBytes() {
52 | return bytes;
53 | }
54 |
55 | public void setBytes(byte[] bytes) {
56 | this.bytes = bytes;
57 | }
58 |
59 | public ByteBuffer getBuffer() {
60 | return buffer;
61 | }
62 |
63 | public void setBuffer(ByteBuffer buffer) {
64 | this.buffer = buffer;
65 | }
66 |
67 | public int getBatchSize() {
68 | return batchSize;
69 | }
70 |
71 | public void setBatchSize(int batchSize) {
72 | this.batchSize = batchSize;
73 | }
74 |
75 | public Producer getProducer() {
76 | return producer;
77 | }
78 |
79 | public void setProducer(Producer producer) {
80 | this.producer = producer;
81 | }
82 |
83 | public void incrementBatchSize() {
84 | batchSize++;
85 | }
86 |
87 | }
88 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/producer/MissingPartitionsException.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.producer;
17 |
18 | /**
19 | *
20 | * @author dariens
21 | */
22 | public class MissingPartitionsException extends Exception {
23 |
24 | /**
25 | * @param error
26 | * @param e
27 | */
28 | public MissingPartitionsException(String error, Exception e) {
29 | super(error, e);
30 | }
31 |
32 | }
33 |
--------------------------------------------------------------------------------
/src/main/java/com/blackberry/bdp/krackle/producer/ProducerConfiguration.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle.producer;
17 |
18 | import com.blackberry.bdp.krackle.auth.AuthenticatedSocketSingleton;
19 | import java.util.ArrayList;
20 | import java.util.List;
21 | import java.util.Properties;
22 | import java.util.zip.Deflater;
23 |
24 | import org.slf4j.Logger;
25 | import org.slf4j.LoggerFactory;
26 |
27 | /**
28 | * Configuration for a producer.
29 | *
30 | * Many of these properties are the same as those in the standard Java client, as documented at http://kafka.apache.org/documentation.html#producerconfigs
31 | *
32 | *
NOTE: Every single one of these properties can be overwritten for a specific topic by using the following property patten:
33 | *
34 | *
source.<topic>.<property>
35 | *
36 | *
Valid properties are
37 | *
38 | *
39 | *
40 | *
41 | *
property
42 | *
default
43 | *
description
44 | *
45 | *
46 | *
47 | *
metadata.broker.list
48 | *
49 | *
(required) A comma separated list of seed brokers to connect to in order to get metadata about the cluster.
50 | *
51 | *
52 | *
53 | *
queue.buffering.max.ms
54 | *
5000
55 | *
Maximum time to buffer data. For example a setting of 100 will try to batch together 100ms of messages to send at once. This will improve throughput but adds message delivery latency due to the buffering.
56 | *
57 | *
58 | *
59 | *
request.required.acks
60 | *
1
61 | *
This value controls when a produce request is considered completed. Specifically, how many other brokers must have committed the data to their log and acknowledged this to the leader? Typical values are
62 | *
63 | *
0, which means that the producer never waits for an acknowledgement from the broker (the same behavior as 0.7). This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails).
64 | *
1, which means that the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost).
65 | *
-1, which means that the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains.
66 | *
67 | *
68 | *
69 | *
70 | *
71 | *
request.timeout.ms
72 | *
10000
73 | *
The amount of time the broker will wait trying to meet the request.required.acks requirement before sending back an error to the client.
74 | *
75 | *
76 | *
77 | *
78 | *
message.send.max.retries
79 | *
3
80 | *
This property will cause the producer to automatically retry a failed send request. This property specifies the number of retries when such failures occur. Note that setting a non-zero value here can lead to duplicates in the case of network errors that cause a message to be sent but the acknowledgement to be lost.
81 | *
82 | *
83 | *
84 | *
retry.backoff.ms
85 | *
100
86 | *
Before each retry, the producer refreshes the metadata of relevant topics to see if a new leader has been elected. Since leader election takes a bit of time, this property specifies the amount of time that the producer waits before refreshing the metadata.
87 | *
88 | *
89 | *
90 | *
topic.metadata.refresh.interval.ms
91 | *
60 * 10 * 1000
92 | *
The producer generally refreshes the topic metadata from brokers when there is a failure (partition missing, leader not available...). It will also poll regularly (default: every 10min so 600000ms). If you set this to a negative value, metadata will only get refreshed on failure. If you set this to zero, the metadata will get refreshed after each message sent (not recommended). Important note: the refresh happen only AFTER the message is sent, so if the producer never sends a message the metadata is never refreshed
93 | *
94 | *
95 | *
96 | *
message.buffer.size
97 | *
1024*1024
98 | *
The size of each buffer that is used to store raw messages before they are sent. Since a full buffer is sent at once, don't make this too big.
99 | *
100 | *
101 | *
102 | *
use.shared.buffers
103 | *
false
104 | *
If this is set to true, then there will be one set of buffers that is used by all Producer instances. In that case, ensure that num.buffers is large enough to accommodate this.
105 | *
106 | *
107 | *
108 | *
num.buffers
109 | *
2
110 | *
The number of buffers to use. At any given time, there is up to one buffer being filled with new data, up to one buffer having its data sent to the broker, and any number of buffers waiting to be filled and/or sent.
111 | *
112 | * Essentially, the limit of the amount of data that can be queued at at any given time is message.buffer.size num.buffers. Although, in reality, you won't get buffers to 100% full each time.
113 | *
114 | * If use.shared.buffers=false, then this many buffers will be allocated per Producer. If use.shared.buffers=true then this is the total for the JVM.
115 | *
116 | *
117 | *
118 | *
send.buffer.size
119 | *
1.5 * 1024 * 1024
120 | *
Size of the byte buffer used to store the final (with headers and compression applied) data to be sent to the broker.
121 | *
122 | *
123 | *
124 | *
sender.threads
125 | *
1
126 | *
How many threads to use for sending to the broker. A larger number can be useful for higher latency high volume topics however message order cannot be guaranteed
127 | *
128 | *
129 | *
130 | *
compression.codec
131 | *
none
132 | *
This parameter allows you to specify the compression codec for all data generated by this producer. Valid values are "none", "gzip" and "snappy".
133 | *
134 | *
135 | *
136 | *
gzip.compression.level
137 | *
java.util.zip.Deflater.DEFAULT_COMPRESSION
138 | *
If compression.codec is set to gzip, then this allows configuration of the compression level.
139 | *
140 | *
-1: default compression level
141 | *
0: no compression
142 | *
1-9: 1=fastest compression ... 9=best compression
143 | *
144 | *
145 | *
146 | *
147 | *
148 | *
queue.enqueue.timeout.ms
149 | *
-1
150 | *
The amount of time to block before dropping messages when all buffers are full. If set to 0 events will be enqueued immediately or dropped if the queue is full (the producer send call will never block). If set to -1 the producer will block indefinitely and never willingly drop a send.
151 | *
152 | *
153 | *
154 | *
155 | *
NOTE: Quick rotate, rotate, and partition rotation in General
156 |
157 | *
Quick rotate is no longer a supported configuration item. Instead all topic meta data refreshes will rotate partitions and if quicker rotation is required than topic.metadata.refresh.interval.ms can be configured accordingly. Regular topic specific overrides are possible as well for topics that require faster rotaiton.
158 |
159 | *
160 | */
161 | public class ProducerConfiguration {
162 |
163 | private static final Logger LOG = LoggerFactory.getLogger(ProducerConfiguration.class);
164 |
165 | protected static final int ONE_MB = 1024 * 1024;
166 | private final Properties props;
167 | public String topicName;
168 |
169 | // Options matching the producer client
170 | private List metadataBrokerList;
171 | private short requestRequiredAcks;
172 | private int requestTimeoutMs;
173 | private int initialSocketConnectionTimeoutMs;
174 |
175 | private String compressionCodec;
176 | private int messageSendMaxRetries;
177 | private int retryBackoffMs;
178 | private final int retryBackoffExponent;
179 |
180 | private int senderThreads;
181 | private int partitionsRotate;
182 | private long topicMetadataRefreshIntervalMs;
183 | private long queueBufferingMaxMs;
184 | private long queueEnqueueTimeoutMs;
185 |
186 | // Client specific options
187 | private int messageBufferSize;
188 | private int numBuffers;
189 | private int sendBufferSize;
190 | private int compressionLevel;
191 | private boolean useSharedBuffers;
192 |
193 | /**
194 | * ProducerConfiguration class that supports parsing properties that all support
195 | * being prefixed with a topic name for overriding default values per topic as required
196 | * @param props the properties object to parse
197 | * @param topicName the topic being configured
198 | * @throws Exception
199 | */
200 | public ProducerConfiguration(Properties props,
201 | String topicName) throws Exception {
202 | this.props = props;
203 | this.topicName = topicName;
204 |
205 | LOG.info("Building configuration.");
206 |
207 | metadataBrokerList = parseMetadataBrokerList("metadata.broker.list");
208 | queueBufferingMaxMs = parseQueueBufferingMaxMs("queue.buffering.max.ms", "5000");
209 | requestRequiredAcks = parseRequestRequiredAcks("request.required.acks", "1");
210 | requestTimeoutMs = parseRequestTimeoutMs("request.timeout.ms", "10000");
211 | initialSocketConnectionTimeoutMs = Integer.parseInt(props.getProperty("initial.socket.connection.timeout.ms", "3000"));
212 | messageSendMaxRetries = parseMessageSendMaxRetries("message.send.max.retries", "3");
213 | retryBackoffMs = parseRetryBackoffMs("retry.backoff.ms", "100");
214 | retryBackoffExponent = parseRetryBackoffMs("retry.backoff.exponent", "1");
215 | topicMetadataRefreshIntervalMs = parseTopicMetadataRefreshIntervalMs("topic.metadata.refresh.interval.ms", "" + (60 * 10 * 1000));
216 | partitionsRotate = parsePartitionsRotate("partitions.rotate", "random");
217 | sendBufferSize = parseSendBufferSize("send.buffer.size", "" + (int) (1.5 * 1024 * 1024));
218 | compressionCodec = parseCompressionCodec("compression.codec", "none");
219 | compressionLevel = parsecCmpressionLevel("gzip.compression.level", "" + Deflater.DEFAULT_COMPRESSION);
220 | queueEnqueueTimeoutMs = parseQueueEnqueueTimeoutMs("queue.enqueue.timeout.ms", "-1");
221 | senderThreads = parseSenderThreads("sender.threads", "1");
222 |
223 | // The (receive) buffers are a special story, so we'll parse and set them in one go.
224 | parseAndSetBuffers("use.shared.buffers", "false", "message.buffer.size", "" + ONE_MB, "num.buffers", "2");
225 |
226 | AuthenticatedSocketSingleton.getInstance().configure(props);
227 | }
228 |
229 |
230 | /**
231 | *
232 | * @param propName the property name to obtain a topic aware override for
233 | * @return The name of the property
234 | */
235 | public String getTopicAwarePropName(String propName) {
236 | if (getTopicName() == null) {
237 | return propName;
238 | }
239 |
240 | String topicPropName = String.format("source.%s.%s", getTopicName(), propName);
241 |
242 | if (props.containsKey(topicPropName)) {
243 | LOG.debug("topic specific property {} exists that overrides {} ", topicPropName, propName);
244 | return topicPropName;
245 | }
246 |
247 | return propName;
248 | }
249 |
250 | private List parseMetadataBrokerList(String propName) throws Exception {
251 | List myMetadataBrokerList = new ArrayList<>();
252 | String propNameBrokerList = getTopicAwarePropName(propName);
253 | String metadataBrokerListString = props.getProperty(propNameBrokerList);
254 |
255 | if (metadataBrokerListString == null || metadataBrokerListString.isEmpty()) {
256 | throw new Exception(String.format("%s cannot be empty", propNameBrokerList));
257 | }
258 |
259 | for (String s : metadataBrokerListString.split(",")) {
260 | if (s.matches("^[\\.a-zA-Z0-9-]*:\\d+$")) {
261 | myMetadataBrokerList.add(s);
262 | } else {
263 | throw new Exception(String.format(
264 | "%s must contain a comma separated list of host:port, without spaces). Got %s",
265 | propNameBrokerList, metadataBrokerListString));
266 | }
267 | }
268 |
269 | LOG.info("{} = {}", propNameBrokerList, myMetadataBrokerList);
270 |
271 | return myMetadataBrokerList;
272 | }
273 |
274 | private Long parseQueueBufferingMaxMs(String propName, String defaultValue) throws Exception {
275 | String propNameQueueBufferignMaxMs = getTopicAwarePropName(propName);
276 | Long myQueueBufferingMaxMs = Long.parseLong(props.getProperty(propNameQueueBufferignMaxMs, defaultValue));
277 |
278 | if (myQueueBufferingMaxMs < 0) {
279 | throw new Exception(String.format("%s cannot be negative", propNameQueueBufferignMaxMs));
280 | }
281 |
282 | LOG.info("{} = {}", propNameQueueBufferignMaxMs, myQueueBufferingMaxMs);
283 | return myQueueBufferingMaxMs;
284 | }
285 |
286 | private Short parseRequestRequiredAcks(String propName, String defaultValue) throws Exception {
287 | String acksPropertyName = getTopicAwarePropName(propName);
288 | Short myRequestRequiredAcks = Short.parseShort(props.getProperty(acksPropertyName, defaultValue));
289 |
290 | if (myRequestRequiredAcks != -1 && myRequestRequiredAcks != 0 && myRequestRequiredAcks != 1) {
291 | throw new Exception(String.format("%s can only be -1, 0 or 1. Got %s", acksPropertyName, myRequestRequiredAcks));
292 | }
293 |
294 | LOG.info("{} = {}", acksPropertyName, myRequestRequiredAcks);
295 | return myRequestRequiredAcks;
296 | }
297 |
298 | private Integer parseRequestTimeoutMs(String propName, String defaultValue) throws Exception {
299 | String propNameRequestTimeoutMs = getTopicAwarePropName(propName);
300 | Integer myRequestTimeoutMs = Integer.parseInt(props.getProperty(propNameRequestTimeoutMs, defaultValue));
301 |
302 | if (myRequestTimeoutMs < 0) {
303 | throw new Exception(String.format("%s cannot be negative. Got %s ", propNameRequestTimeoutMs, myRequestTimeoutMs));
304 | }
305 |
306 | LOG.info("{} = {}", propNameRequestTimeoutMs, myRequestTimeoutMs);
307 | return myRequestTimeoutMs;
308 | }
309 |
310 | private Integer parseMessageSendMaxRetries(String propName, String defaultValue) throws Exception {
311 | String propNameSendMaxRetries = getTopicAwarePropName(propName);
312 | Integer myMessageSendMaxRetries = Integer.parseInt(props.getProperty(propNameSendMaxRetries, defaultValue));
313 |
314 | if (myMessageSendMaxRetries < 0) {
315 | throw new Exception(String.format("%s cannot be negative. Got %s", propNameSendMaxRetries, myMessageSendMaxRetries));
316 | }
317 |
318 | LOG.info("{} = {}", propNameSendMaxRetries, myMessageSendMaxRetries);
319 | return myMessageSendMaxRetries;
320 | }
321 |
322 | private Integer parseRetryBackoffMs(String propName, String defaultValue) throws Exception {
323 | String propNameRetryBackoffMs = getTopicAwarePropName(propName);
324 | Integer myRetryBackoffMs = Integer.parseInt(props.getProperty(propNameRetryBackoffMs, defaultValue));
325 |
326 | if (myRetryBackoffMs < 0) {
327 | throw new Exception(String.format("%s cannot be negative. Got %s", propNameRetryBackoffMs, myRetryBackoffMs));
328 | }
329 |
330 | LOG.info("{} = {}", propNameRetryBackoffMs, myRetryBackoffMs);
331 | return myRetryBackoffMs;
332 | }
333 |
334 | private Long parseTopicMetadataRefreshIntervalMs(String propName, String defaultValue) throws Exception {
335 | String propNameTopicMetadataRefreshIntervalMs = getTopicAwarePropName(propName);
336 | Long myTopicMetadataRefreshIntervalMs = Long.parseLong(props.getProperty(propNameTopicMetadataRefreshIntervalMs, defaultValue));
337 | LOG.info("{} = {}", propNameTopicMetadataRefreshIntervalMs, myTopicMetadataRefreshIntervalMs);
338 | return myTopicMetadataRefreshIntervalMs;
339 | }
340 |
341 | private int parsePartitionsRotate(String propName, String defaultValue) throws Exception {
342 | String propNamePartitionsRotate = getTopicAwarePropName(propName);
343 |
344 | String myPartitionsRotateString = props.getProperty(propNamePartitionsRotate, defaultValue);
345 | int myPartitionsRotate = 0;
346 |
347 | if (myPartitionsRotateString.compareToIgnoreCase("false") == 0) {
348 | myPartitionsRotate = 0;
349 | } else {
350 | if (myPartitionsRotateString.compareToIgnoreCase("true") == 0) {
351 | myPartitionsRotate = 1;
352 | } else {
353 | if (myPartitionsRotateString.compareToIgnoreCase("random") == 0) {
354 | myPartitionsRotate = 2;
355 | } else {
356 | throw new Exception(String.format("%s must be one of false, true, random. Got %s", propNamePartitionsRotate, myPartitionsRotateString));
357 | }
358 | }
359 | }
360 |
361 | LOG.info("{} = {}", propNamePartitionsRotate, myPartitionsRotateString);
362 | return myPartitionsRotate;
363 | }
364 |
365 | private int parseSenderThreads(String propName, String defaultValue) throws Exception {
366 | String propNameTopicSenderThreads = getTopicAwarePropName(propName);
367 | Integer myTopicSenderThreads = Integer.parseInt(props.getProperty(propNameTopicSenderThreads, defaultValue));
368 | LOG.info("{} = {}", propNameTopicSenderThreads, myTopicSenderThreads);
369 | return myTopicSenderThreads;
370 | }
371 |
372 | private void parseAndSetBuffers(
373 | String sharedBuffersPropName,
374 | String sharedBuffersDefault,
375 | String defaultPropNameBufferSize,
376 | String defaultBufferSize,
377 | String defaultPropNameNumBuffers,
378 | String defaultNumBuffers) throws Exception {
379 | /**
380 | *
381 | * The Buffers Story:
382 | *
383 | * We may be using shared buffers HOWEVER a topic can specify it's own num.buffers
384 | * and message.buffer.size and it's buffers become ently private. If that's the case
385 | * then we need to force useSharedBuffers=false and the remaining topic aware
386 | * property naming does the rest. In theory, it's brilliant. In reality we'll see...
387 | *
388 | */
389 |
390 | useSharedBuffers = Boolean.parseBoolean(props.getProperty(sharedBuffersPropName, sharedBuffersDefault).trim());
391 |
392 | LOG.info("The global/non-topic aware property {} = {}", sharedBuffersPropName, useSharedBuffers);
393 |
394 | String propNameBufferSize = getTopicAwarePropName(defaultPropNameBufferSize);
395 | String propNameNumBuffers = getTopicAwarePropName(defaultPropNameNumBuffers);
396 |
397 | /**
398 | * Ensure both number of buffers and buffer size are either default property names
399 | * or have topic specific properties defined. You can't overwrite one as topic specific
400 | * without overwriting the other.
401 | */
402 | if (propNameBufferSize.equals(defaultPropNameBufferSize) ^ propNameNumBuffers.equals(defaultPropNameNumBuffers)) {
403 | throw new Exception(String.format("%s and %s specified, cannot mix topic specific and global properties",
404 | propNameBufferSize, propNameNumBuffers));
405 | }
406 |
407 | if (false == (propNameNumBuffers.equals(defaultPropNameNumBuffers) && propNameBufferSize.equals(defaultPropNameBufferSize))) {
408 | useSharedBuffers = false;
409 | LOG.warn("{} = {}, and {} = {}", propNameBufferSize, defaultPropNameBufferSize, propNameNumBuffers, defaultPropNameNumBuffers);
410 | LOG.warn("{} forcing inherently private buffers as topic specific configuration exists", getTopicName());
411 | }
412 |
413 | messageBufferSize = Integer.parseInt(props.getProperty(propNameBufferSize, defaultBufferSize));
414 |
415 | if (messageBufferSize < 1) {
416 | throw new Exception(String.format("%s must be greater than 0. Got %s", propNameBufferSize, messageBufferSize));
417 | }
418 |
419 | LOG.info("{} = {}", propNameBufferSize, messageBufferSize);
420 |
421 | numBuffers = Integer.parseInt(props.getProperty(propNameNumBuffers, defaultNumBuffers));
422 |
423 | if (numBuffers < 2) {
424 | throw new Exception(String.format("%s must be at least 2. Got %s", propNameNumBuffers, numBuffers));
425 | }
426 |
427 | LOG.info("{} = {}", propNameNumBuffers, numBuffers);
428 | }
429 |
430 | private Integer parseSendBufferSize(String propName, String defaultValue) throws Exception {
431 | String propNameSendBufferSize = getTopicAwarePropName(propName);
432 | Integer mySendBufferSize = Integer.parseInt(props.getProperty(propNameSendBufferSize, defaultValue));
433 |
434 | if (mySendBufferSize < 1) {
435 | throw new Exception(String.format("%s must be greater than 0. Got %s", propNameSendBufferSize, mySendBufferSize));
436 | }
437 |
438 | LOG.info("{} = {}", propNameSendBufferSize, mySendBufferSize);
439 | return mySendBufferSize;
440 | }
441 |
442 | private String parseCompressionCodec(String propName, String defaultValue) throws Exception {
443 | String propNameRawCompressionCodec = getTopicAwarePropName(propName);
444 | String rawCompressionCodec = props.getProperty(propNameRawCompressionCodec, defaultValue);
445 | String myCompressionCodec = rawCompressionCodec.toLowerCase();
446 |
447 | if (myCompressionCodec.equals("none") == false
448 | && myCompressionCodec.equals("gzip") == false
449 | && myCompressionCodec.equals("snappy") == false) {
450 | throw new Exception(String.format("%s must be one of none, gzip or snappy. Got %s", propNameRawCompressionCodec, myCompressionCodec));
451 | }
452 |
453 | LOG.info("{} = {}", propNameRawCompressionCodec, myCompressionCodec);
454 | return myCompressionCodec;
455 | }
456 |
457 | private Integer parsecCmpressionLevel(String propName, String defaultValue) throws Exception {
458 | String propNameCompressionLevel = getTopicAwarePropName(propName);
459 | Integer myCompressionLevel = Integer.parseInt(props.getProperty(propNameCompressionLevel, defaultValue));
460 |
461 | if (myCompressionLevel < -1 || myCompressionLevel > 9) {
462 | throw new Exception(String.format("%s must be -1 (default), 0 (no compression) or in the range 1-9. Got %s", propNameCompressionLevel, myCompressionLevel));
463 | }
464 |
465 | LOG.info("{} = {}", propNameCompressionLevel, myCompressionLevel);
466 | return myCompressionLevel;
467 | }
468 |
469 | private Long parseQueueEnqueueTimeoutMs(String propName, String defaultValue) throws Exception {
470 | String propNameQueueEnqueueTimeoutMs = getTopicAwarePropName(propName);
471 | Long myQueueEnqueueTimeoutMs = Long.parseLong(props.getProperty(propNameQueueEnqueueTimeoutMs, defaultValue));
472 |
473 | if (myQueueEnqueueTimeoutMs != -1 && myQueueEnqueueTimeoutMs < 0) {
474 | throw new Exception(String.format("%s must either be -1 or a non-negative. Got %s", propNameQueueEnqueueTimeoutMs, myQueueEnqueueTimeoutMs));
475 | }
476 |
477 | LOG.info("{} = {}", propNameQueueEnqueueTimeoutMs, myQueueEnqueueTimeoutMs);
478 | return myQueueEnqueueTimeoutMs;
479 | }
480 |
481 | /**
482 | *
483 | * @return the metadata broker list
484 | */
485 | public List getMetadataBrokerList() {
486 | return metadataBrokerList;
487 | }
488 |
489 | /**
490 | *
491 | * @param metadataBrokerList the metadata broker list
492 | */
493 | public void setMetadataBrokerList(List metadataBrokerList) {
494 | this.metadataBrokerList = metadataBrokerList;
495 | }
496 |
497 | /**
498 | *
499 | * @return the queue buffering max milliseconds
500 | */
501 | public long getQueueBufferingMaxMs() {
502 | return queueBufferingMaxMs;
503 | }
504 |
505 | /**
506 | *
507 | * @param queueBufferingMaxMs the queue buffering max milliseconds
508 | */
509 | public void setQueueBufferingMaxMs(long queueBufferingMaxMs) {
510 | this.queueBufferingMaxMs = queueBufferingMaxMs;
511 | }
512 |
513 | /**
514 | *
515 | * @return the request required acks
516 | */
517 | public short getRequestRequiredAcks() {
518 | return requestRequiredAcks;
519 | }
520 |
521 | /**
522 | *
523 | * @param requestRequiredAcks the request required acks
524 | */
525 | public void setRequestRequiredAcks(short requestRequiredAcks) {
526 | this.requestRequiredAcks = requestRequiredAcks;
527 | }
528 |
529 | /**
530 | *
531 | * @return the request timeout milliseconds
532 | */
533 | public int getRequestTimeoutMs() {
534 | return requestTimeoutMs;
535 | }
536 |
537 | /**
538 | *
539 | * @param requestTimeoutMs the request timeout milliseconds
540 | */
541 | public void setRequestTimeoutMs(int requestTimeoutMs) {
542 | this.requestTimeoutMs = requestTimeoutMs;
543 | }
544 |
545 | /**
546 | *
547 | * @return message send max retries
548 | */
549 | public int getMessageSendMaxRetries() {
550 | return messageSendMaxRetries;
551 | }
552 |
553 | /**
554 | *
555 | * @param messageSendMaxRetries message send max retries
556 | */
557 | public void setMessageSendMaxRetries(int messageSendMaxRetries) {
558 | this.messageSendMaxRetries = messageSendMaxRetries;
559 | }
560 |
561 | /**
562 | *
563 | * @return the number of threads sending to the broker
564 | */
565 | public int getSenderThreads() {
566 | return senderThreads;
567 | }
568 |
569 | /**
570 | *
571 | * @param senderThreads the number of threads sending to the broker
572 | */
573 | public void setSenderThreads(int senderThreads) {
574 | this.senderThreads = senderThreads;
575 | }
576 |
577 | /**
578 | *
579 | * @return milliseconds to back off for when retrying
580 | */
581 | public int getRetryBackoffMs() {
582 | return retryBackoffMs;
583 | }
584 |
585 | /**
586 | *
587 | * @param retryBackoffMs milliseconds to back off for when retrying
588 | */
589 | public void setRetryBackoffMs(int retryBackoffMs) {
590 | this.retryBackoffMs = retryBackoffMs;
591 | }
592 |
593 | /**
594 | *
595 | * @return message buffer size
596 | */
597 | public int getMessageBufferSize() {
598 | return messageBufferSize;
599 | }
600 |
601 | /**
602 | *
603 | * @param messageBufferSize message buffer size
604 | */
605 | public void setMessageBufferSize(int messageBufferSize) {
606 | this.messageBufferSize = messageBufferSize;
607 | }
608 |
609 | /**
610 | *
611 | * @return are shared buffers being used?
612 | */
613 | public boolean isUseSharedBuffers() {
614 | return useSharedBuffers;
615 | }
616 |
617 | /**
618 | *
619 | * @param useSharedBuffers the shared buffer use
620 | */
621 | public void setUseSharedBuffers(boolean useSharedBuffers) {
622 | this.useSharedBuffers = useSharedBuffers;
623 | }
624 |
625 | /**
626 | *
627 | * @return the number of buffers
628 | */
629 | public int getNumBuffers() {
630 | return numBuffers;
631 | }
632 |
633 | /**
634 | *
635 | * @param numBuffers the number of buffers
636 | */
637 | public void setNumBuffers(int numBuffers) {
638 | this.numBuffers = numBuffers;
639 | }
640 |
641 | /**
642 | *
643 | * @return the send buffer size
644 | */
645 | public int getSendBufferSize() {
646 | return sendBufferSize;
647 | }
648 |
649 | /**
650 | *
651 | * @param sendBufferSize the send buffer size
652 | */
653 | public void setSendBufferSize(int sendBufferSize) {
654 | this.sendBufferSize = sendBufferSize;
655 | }
656 |
657 | /**
658 | *
659 | * @return the compression codec used
660 | */
661 | public String getCompressionCodec() {
662 | return compressionCodec;
663 | }
664 |
665 | /**
666 | *
667 | * @param compressionCodec the compression codec used
668 | */
669 | public void setCompressionCodec(String compressionCodec) {
670 | this.compressionCodec = compressionCodec;
671 | }
672 |
673 | /**
674 | *
675 | * @return the compression level
676 | */
677 | public int getCompressionLevel() {
678 | return compressionLevel;
679 | }
680 |
681 | /**
682 | *
683 | * @param compressionLevel the compression level
684 | */
685 | public void setCompressionLevel(int compressionLevel) {
686 | this.compressionLevel = compressionLevel;
687 | }
688 |
689 | /**
690 | *
691 | * @return the topic metadata refresh interval in milliseconds
692 | */
693 | public long getTopicMetadataRefreshIntervalMs() {
694 | return topicMetadataRefreshIntervalMs;
695 | }
696 |
697 | /**
698 | *
699 | * @param topicMetadataRefreshIntervalMs the topic metadata refresh interval in milliseconds
700 | */
701 | public void setTopicMetadataRefreshIntervalMs(long topicMetadataRefreshIntervalMs) {
702 | this.topicMetadataRefreshIntervalMs = topicMetadataRefreshIntervalMs;
703 | }
704 |
705 | /**
706 | *
707 | * @return the type of partitions rotation to use
708 | */
709 | public int getPartitionsRotate() {
710 | return partitionsRotate;
711 | }
712 |
713 | /**
714 | *
715 | * @param partitionsRotate
716 | */
717 | public void setPartitionsRotate(int partitionsRotate) {
718 | this.partitionsRotate = partitionsRotate;
719 | }
720 |
721 | /**
722 | *
723 | * @return the queue enqueue timeout milliseconds
724 | */
725 | public long getQueueEnqueueTimeoutMs() {
726 | return queueEnqueueTimeoutMs;
727 | }
728 |
729 | /**
730 | *
731 | * @param queueEnqueueTimeoutMs the queue enqueue timeout milliseconds
732 | */
733 | public void setQueueEnqueueTimeoutMs(long queueEnqueueTimeoutMs) {
734 | this.queueEnqueueTimeoutMs = queueEnqueueTimeoutMs;
735 | }
736 |
737 | /**
738 | * @return the topicName
739 | */
740 | public String getTopicName() {
741 | return topicName;
742 | }
743 |
744 | /**
745 | * @param topicName the topicName to set
746 | */
747 | public void setTopicName(String topicName) {
748 | this.topicName = topicName;
749 | }
750 |
751 | /**
752 | * @return the retryBackoffExponent
753 | */
754 | public int getRetryBackoffExponent() {
755 | return retryBackoffExponent;
756 | }
757 |
758 | /**
759 | * @return the initialSocketConnectionTimeoutMs
760 | */
761 | public int getInitialSocketConnectionTimeoutMs() {
762 | return initialSocketConnectionTimeoutMs;
763 | }
764 |
765 | }
766 |
--------------------------------------------------------------------------------
/src/test/java/com/blackberry/bdp/krackle/KafkaClientTest.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.bdp.krackle;
17 |
18 | import static org.junit.Assert.assertEquals;
19 |
20 | import java.util.ArrayList;
21 | import java.util.HashMap;
22 | import java.util.List;
23 | import java.util.Map;
24 | import java.util.Properties;
25 |
26 | import kafka.consumer.ConsumerConfig;
27 | import kafka.consumer.ConsumerIterator;
28 | import kafka.consumer.KafkaStream;
29 | import kafka.javaapi.consumer.ConsumerConnector;
30 | import kafka.producer.KeyedMessage;
31 | import kafka.producer.ProducerConfig;
32 |
33 | import org.junit.AfterClass;
34 | import org.junit.BeforeClass;
35 | import org.junit.Test;
36 |
37 | import com.blackberry.bdp.krackle.consumer.ConsumerConfiguration;
38 | import com.blackberry.bdp.krackle.consumer.Consumer;
39 | import com.blackberry.bdp.krackle.meta.MetaData;
40 | import com.blackberry.bdp.krackle.producer.Producer;
41 | import com.blackberry.bdp.krackle.producer.ProducerConfiguration;
42 | import com.blackberry.testutil.LocalKafkaServer;
43 | import com.blackberry.testutil.LocalZkServer;
44 |
45 | public class KafkaClientTest {
46 |
47 | private static final String[] COMPRESSION_METHODS = new String[]{"none",
48 | "snappy", "gzip"};
49 | Throwable error = null;
50 |
51 | static LocalZkServer zk;
52 | static LocalKafkaServer kafkaServer;
53 |
54 | static List logs;
55 |
56 | @BeforeClass
57 | public static void setup() throws Exception {
58 | zk = new LocalZkServer();
59 | kafkaServer = new LocalKafkaServer();
60 |
61 | logs = new ArrayList<>();
62 | for (int i = 0; i < 100000; i++) {
63 | logs.add("This is a test log line. Number " + i);
64 | }
65 | }
66 |
67 | @AfterClass
68 | public static void cleanup() throws Exception {
69 | kafkaServer.shutdown();
70 | zk.shutdown();
71 | }
72 |
73 | private void setupTopic(String topic) throws Exception {
74 | kafkaServer.createTopic(topic);
75 |
76 | // Wait for everything to finish starting up. We do this by checking to
77 | // ensure all the topics have leaders.
78 | Properties producerProps = new Properties();
79 | producerProps.setProperty("metadata.broker.list", "localhost:9876");
80 | ProducerConfiguration producerConf = new ProducerConfiguration(
81 | producerProps, topic);
82 | while (true) {
83 | MetaData meta;
84 | try {
85 | meta = MetaData.getMetaData(producerConf.getMetadataBrokerList(),
86 | topic, "test");
87 | meta.getTopic(topic).getPartition(0).getLeader();
88 | break;
89 | } catch (Exception e) {
90 | // System.err.print("Not ready yet: ");
91 | // e.printStackTrace();
92 | } finally {
93 | Thread.sleep(100);
94 | }
95 | }
96 | }
97 |
98 | private kafka.javaapi.producer.Producer getStdProducer(
99 | String compression) {
100 | Properties producerProps = new Properties();
101 | producerProps.setProperty("metadata.broker.list", "localhost:9876");
102 | producerProps.setProperty("compression.codec", compression);
103 | producerProps.setProperty("queue.buffering.max.ms", "100");
104 | producerProps.setProperty("queue.enqueue.timeout.ms", "-1");
105 | producerProps.setProperty("request.required.acks", "1");
106 | producerProps.setProperty("producer.type", "async");
107 | producerProps.setProperty("serializer.class",
108 | "kafka.serializer.StringEncoder");
109 | ProducerConfig producerConf = new ProducerConfig(producerProps);
110 | kafka.javaapi.producer.Producer producer = new kafka.javaapi.producer.Producer(
111 | producerConf);
112 | return producer;
113 | }
114 |
115 | private Producer getKrackleProducer(String topic, String compression)
116 | throws Exception {
117 | Properties producerProps = new Properties();
118 | producerProps.setProperty("metadata.broker.list", "localhost:9876");
119 | producerProps.setProperty("compression.code", compression);
120 | producerProps.setProperty("queue.buffering.max.ms", "100");
121 | producerProps.setProperty("queue.enqueue.timeout.ms", "-1");
122 | producerProps.setProperty("request.required.acks", "1");
123 | producerProps.setProperty("num.buffers", "10");
124 | ProducerConfiguration producerConf = new ProducerConfiguration(
125 | producerProps, topic);
126 | Producer producer = new Producer(producerConf, "myclient", topic, "mykey",
127 | null);
128 | return producer;
129 | }
130 |
131 | private ConsumerConnector getStdConsumer() {
132 | Properties props = new Properties();
133 | props.put("zookeeper.connect", "localhost:21818");
134 | props.put("group.id", "test");
135 | ConsumerConfig conf = new ConsumerConfig(props);
136 | return kafka.consumer.Consumer.createJavaConsumerConnector(conf);
137 | }
138 |
139 | private Consumer getKrackleConsumer(String topic, int partition)
140 | throws Exception {
141 | Properties props = new Properties();
142 | props.setProperty("metadata.broker.list", "localhost:9876");
143 | ConsumerConfiguration conf = new ConsumerConfiguration(props);
144 | return new Consumer(conf, "test-client", topic, partition);
145 | }
146 |
147 | // Sanity check. Standard producer and consumer
148 | @Test
149 | public void testStdProducerStdConsumer() throws Throwable {
150 | for (String compression : COMPRESSION_METHODS) {
151 | final String topic = "std-std-" + compression;
152 | setupTopic(topic);
153 |
154 | ConsumerConnector consumer = getStdConsumer();
155 | Map topicCountMap = new HashMap();
156 | topicCountMap.put(topic, 1);
157 | final Map>> streams = consumer
158 | .createMessageStreams(topicCountMap);
159 |
160 | error = null;
161 | Thread t = new Thread(new Runnable() {
162 | @Override
163 | public void run() {
164 | try {
165 | ConsumerIterator it = streams.get(topic).get(0)
166 | .iterator();
167 |
168 | for (int i = 0; i < logs.size(); i++) {
169 | String line = new String(it.next().message());
170 | String message = line.split(" ", 4)[3].trim();
171 | assertEquals(logs.get(i), message);
172 | }
173 | } catch (Throwable t) {
174 | setError(t);
175 | }
176 | }
177 |
178 | });
179 | t.start();
180 | Thread.sleep(100);
181 |
182 | kafka.javaapi.producer.Producer producer = getStdProducer(compression);
183 | for (String log : logs) {
184 | producer.send(new KeyedMessage(topic, "mykey", System
185 | .currentTimeMillis() + " test 123 " + log));
186 | }
187 |
188 | t.join();
189 | if (error != null) {
190 | throw error;
191 | }
192 | }
193 | }
194 |
195 | @Test
196 | public void testKrackleProducerStdConsumer() throws Throwable {
197 | for (String compression : COMPRESSION_METHODS) {
198 | final String topic = "lop-std-" + compression;
199 | setupTopic(topic);
200 |
201 | ConsumerConnector consumer = getStdConsumer();
202 | Map topicCountMap = new HashMap();
203 | topicCountMap.put(topic, 1);
204 | final Map>> streams = consumer
205 | .createMessageStreams(topicCountMap);
206 |
207 | error = null;
208 | Thread t = new Thread(new Runnable() {
209 | @Override
210 | public void run() {
211 | try {
212 | ConsumerIterator it = streams.get(topic).get(0)
213 | .iterator();
214 |
215 | for (int i = 0; i < logs.size(); i++) {
216 | String line = new String(it.next().message());
217 | String message = line.split(" ", 4)[3].trim();
218 | assertEquals(logs.get(i), message);
219 | }
220 | } catch (Throwable t) {
221 | setError(t);
222 | }
223 | }
224 |
225 | });
226 | t.start();
227 | Thread.sleep(100);
228 |
229 | Producer producer = getKrackleProducer(topic, compression);
230 | for (String log : logs) {
231 | byte[] msg = (System.currentTimeMillis() + " test 123 " + log)
232 | .getBytes();
233 | producer.send(msg, 0, msg.length);
234 | }
235 |
236 | t.join();
237 | if (error != null) {
238 | throw error;
239 | }
240 | }
241 | }
242 |
243 | @Test
244 | public void testStdProducerKrackleConsumer() throws Throwable {
245 | for (String compression : COMPRESSION_METHODS) {
246 |
247 | final String topic = "std-loc-" + compression;
248 | setupTopic(topic);
249 |
250 | final Consumer consumer = getKrackleConsumer(topic, 0);
251 |
252 | error = null;
253 | Thread t = new Thread(new Runnable() {
254 | @Override
255 | public void run() {
256 | try {
257 | byte[] bytes = new byte[1024 * 1024];
258 | String line;
259 | String message;
260 | int messageLength;
261 | for (int i = 0; i < logs.size(); i++) {
262 | messageLength = -1;
263 | while (messageLength == -1) {
264 | messageLength = consumer.getMessage(bytes, 0, bytes.length);
265 | }
266 | line = new String(bytes, 0, messageLength);
267 | message = line.split(" ", 4)[3].trim();
268 | assertEquals(logs.get(i), message);
269 | }
270 | } catch (Throwable t) {
271 | setError(t);
272 | }
273 | }
274 |
275 | });
276 | t.start();
277 | // TODO: this sleep just begs for race conditions. We should be
278 | // waiting for the consumer to confirm that it's up, not just
279 | // waiting a bit of time.
280 | Thread.sleep(100);
281 |
282 | kafka.javaapi.producer.Producer producer = getStdProducer(compression);
283 | for (String log : logs) {
284 | producer.send(new KeyedMessage(topic, "mykey", System
285 | .currentTimeMillis() + " test 123 " + log));
286 | }
287 |
288 | t.join();
289 | if (error != null) {
290 | throw error;
291 | }
292 | }
293 | }
294 |
295 | @Test
296 | public void testKrackleProducerKrackleConsumer() throws Throwable {
297 | for (String compression : COMPRESSION_METHODS) {
298 | final String topic = "lop-loc-" + compression;
299 | setupTopic(topic);
300 |
301 | final Consumer consumer = getKrackleConsumer(topic, 0);
302 |
303 | error = null;
304 | Thread t = new Thread(new Runnable() {
305 | @Override
306 | public void run() {
307 | try {
308 | byte[] bytes = new byte[1024 * 1024];
309 | String line;
310 | String message;
311 | int messageLength;
312 | for (int i = 0; i < logs.size(); i++) {
313 | messageLength = -1;
314 | while (messageLength == -1) {
315 | messageLength = consumer.getMessage(bytes, 0, bytes.length);
316 | }
317 | line = new String(bytes, 0, messageLength);
318 | message = line.split(" ", 4)[3].trim();
319 | assertEquals(logs.get(i), message);
320 | }
321 | } catch (Throwable t) {
322 | setError(t);
323 | }
324 | }
325 |
326 | });
327 | t.start();
328 | // TODO: this sleep just begs for race conditions. We should be
329 | // waiting for the consumer to confirm that it's up, not just
330 | // waiting a bit of time.
331 | Thread.sleep(100);
332 |
333 | Producer producer = getKrackleProducer(topic, compression);
334 | for (String log : logs) {
335 | byte[] msg = (System.currentTimeMillis() + " test 123 " + log)
336 | .getBytes();
337 | producer.send(msg, 0, msg.length);
338 | }
339 |
340 | t.join();
341 | if (error != null) {
342 | throw error;
343 | }
344 | }
345 | }
346 |
347 | private void setError(Throwable t) {
348 | error = t;
349 | }
350 |
351 | }
352 |
--------------------------------------------------------------------------------
/src/test/java/com/blackberry/testutil/LocalKafkaServer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.testutil;
17 |
18 | import java.io.File;
19 | import java.io.IOException;
20 | import java.util.Properties;
21 |
22 | import kafka.admin.TopicCommand;
23 | import kafka.server.KafkaConfig;
24 | import kafka.server.KafkaServerStartable;
25 | import kafka.admin.TopicCommand.*;
26 | import org.apache.kafka.common.security.JaasUtils;
27 | import kafka.utils.ZkUtils;
28 | import org.apache.commons.io.FileUtils;
29 |
30 | public class LocalKafkaServer {
31 |
32 | private String nodeId = "0";
33 | private String port = "9876";
34 | private String logDir = FileUtils.getTempDirectoryPath() + "/kafka.log";
35 | private String zkConnect = "localhost:21818";
36 | private KafkaServerStartable server;
37 | private ZkUtils zkUtils;
38 |
39 | public LocalKafkaServer() throws IOException {
40 |
41 | while (new File(logDir).exists()) {
42 | FileUtils.deleteDirectory(new File(logDir));
43 | }
44 |
45 | Properties props = new Properties();
46 | props.put("broker.id", nodeId);
47 | props.put("port", port);
48 | props.put("log.dir", logDir);
49 | props.put("zookeeper.connect", zkConnect);
50 | props.put("host.name", "127.0.0.1");
51 | KafkaConfig conf = new KafkaConfig(props);
52 |
53 | zkUtils = ZkUtils.apply(props.getProperty("zookeeper.connect"),
54 | 30000,
55 | 30000,
56 | JaasUtils.isZkSecurityEnabled());
57 |
58 |
59 | server = new KafkaServerStartable(conf);
60 | server.startup();
61 | }
62 |
63 | public void shutdown() throws IOException {
64 | server.shutdown();
65 | server.awaitShutdown();
66 | FileUtils.deleteDirectory(new File(logDir));
67 | }
68 |
69 | public void createTopic(String topic) {
70 | TopicCommandOptions createOpts = new TopicCommandOptions(new String[]{"--create", "--zookeeper",
71 | "localhost:21818", "--replication-factor", "1", "--partition", "1",
72 | "--topic", topic});
73 | TopicCommand.createTopic(zkUtils,createOpts);
74 | }
75 |
76 | }
77 |
--------------------------------------------------------------------------------
/src/test/java/com/blackberry/testutil/LocalZkServer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2014 BlackBerry, Limited.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.blackberry.testutil;
17 |
18 | import java.io.File;
19 | import java.io.IOException;
20 | import java.lang.reflect.Constructor;
21 | import java.lang.reflect.InvocationTargetException;
22 | import java.lang.reflect.Method;
23 | import java.net.InetSocketAddress;
24 |
25 | import org.apache.commons.io.FileUtils;
26 | import org.apache.zookeeper.server.ZooKeeperServer;
27 | import org.slf4j.Logger;
28 | import org.slf4j.LoggerFactory;
29 |
30 | public class LocalZkServer {
31 |
32 | private static final Logger LOG = LoggerFactory
33 | .getLogger(LocalZkServer.class);
34 |
35 | private final int clientPort = 21818; // non-standard
36 | private final int numConnections = 5000;
37 | private final int tickTime = 2000;
38 |
39 | private Class> factoryClass;
40 | private Object standaloneServerFactory;
41 | private File dir;
42 |
43 | private ZooKeeperServer server;
44 |
45 | public LocalZkServer() throws InstantiationException, IllegalAccessException,
46 | SecurityException, NoSuchMethodException, IllegalArgumentException,
47 | InvocationTargetException, ClassNotFoundException, IOException {
48 | String dataDirectory = System.getProperty("java.io.tmpdir");
49 |
50 | dir = new File(dataDirectory, "zookeeper").getAbsoluteFile();
51 |
52 | while (dir.exists()) {
53 | LOG.info("deleting {}", dir);
54 | FileUtils.deleteDirectory(dir);
55 | }
56 |
57 | server = new ZooKeeperServer(dir, dir, tickTime);
58 |
59 | // The class that we need changed name between CDH3 and CDH4, so let's
60 | // check
61 | // for the right version here.
62 | try {
63 | factoryClass = Class
64 | .forName("org.apache.zookeeper.server.NIOServerCnxnFactory");
65 |
66 | standaloneServerFactory = factoryClass.newInstance();
67 | Method configure = factoryClass.getMethod("configure",
68 | InetSocketAddress.class, Integer.TYPE);
69 | configure.invoke(standaloneServerFactory, new InetSocketAddress(
70 | clientPort), numConnections);
71 | Method startup = factoryClass.getMethod("startup", ZooKeeperServer.class);
72 | startup.invoke(standaloneServerFactory, server);
73 |
74 | } catch (ClassNotFoundException e) {
75 | LOG.info("Did not find NIOServerCnxnFactory");
76 | try {
77 | factoryClass = Class
78 | .forName("org.apache.zookeeper.server.NIOServerCnxn$Factory");
79 |
80 | Constructor> constructor = factoryClass.getConstructor(
81 | InetSocketAddress.class, Integer.TYPE);
82 | standaloneServerFactory = constructor.newInstance(
83 | new InetSocketAddress(clientPort), numConnections);
84 | Method startup = factoryClass.getMethod("startup",
85 | ZooKeeperServer.class);
86 | startup.invoke(standaloneServerFactory, server);
87 |
88 | } catch (ClassNotFoundException e1) {
89 | LOG.info("Did not find NIOServerCnxn.Factory");
90 | throw new ClassNotFoundException(
91 | "Can't find NIOServerCnxnFactory or NIOServerCnxn.Factory");
92 | }
93 | }
94 | }
95 |
96 | public void shutdown() throws IllegalArgumentException,
97 | IllegalAccessException, InvocationTargetException, SecurityException,
98 | NoSuchMethodException, IOException {
99 | server.shutdown();
100 |
101 | Method shutdown = factoryClass.getMethod("shutdown", new Class>[]{});
102 | shutdown.invoke(standaloneServerFactory, new Object[]{});
103 |
104 | while (dir.exists()) {
105 | LOG.info("deleting {}", dir);
106 | FileUtils.deleteDirectory(dir);
107 | }
108 | }
109 |
110 | public Class> getFactoryClass() {
111 | return factoryClass;
112 | }
113 |
114 | public void setFactoryClass(Class> factoryClass) {
115 | this.factoryClass = factoryClass;
116 | }
117 |
118 | public Object getStandaloneServerFactory() {
119 | return standaloneServerFactory;
120 | }
121 |
122 | public void setStandaloneServerFactory(Object standaloneServerFactory) {
123 | this.standaloneServerFactory = standaloneServerFactory;
124 | }
125 |
126 | public File getDir() {
127 | return dir;
128 | }
129 |
130 | public int getClientport() {
131 | return clientPort;
132 | }
133 |
134 | public int getNumconnections() {
135 | return numConnections;
136 | }
137 |
138 | public int getTicktime() {
139 | return tickTime;
140 | }
141 |
142 | public ZooKeeperServer getServer() {
143 | return server;
144 | }
145 |
146 | }
147 |
--------------------------------------------------------------------------------
/src/test/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Copyright 2014 BlackBerry, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | log4j.rootLogger=INFO, CONSOLE
16 |
17 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
18 | log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
19 | log4j.appender.CONSOLE.layout.ConversionPattern=%d{yyyy-MM-dd'T'HH:mm:ss'.'SSSZ} %p %c: %m%n
20 |
21 |
--------------------------------------------------------------------------------