├── .gitignore ├── README.md ├── dropwizard-extra-curator ├── pom.xml └── src │ ├── main │ └── java │ │ └── com │ │ └── datasift │ │ └── dropwizard │ │ └── curator │ │ ├── CuratorFactory.java │ │ ├── ManagedCuratorFramework.java │ │ ├── ensemble │ │ ├── DropwizardConfiguredEnsembleProvider.java │ │ └── DropwizardConfiguredZooKeeperFactory.java │ │ └── health │ │ └── CuratorHealthCheck.java │ └── test │ ├── java │ └── com │ │ └── datasift │ │ └── dropwizard │ │ └── curator │ │ ├── CuratorFactoryTest.java │ │ └── ensemble │ │ └── DynamicZooKeeperConfigurationTest.java │ └── resources │ └── yaml │ └── curator.yaml ├── dropwizard-extra-hbase ├── pom.xml └── src │ ├── main │ └── java │ │ └── com │ │ └── datasift │ │ └── dropwizard │ │ └── hbase │ │ ├── BoundedHBaseClient.java │ │ ├── HBaseClient.java │ │ ├── HBaseClientFactory.java │ │ ├── HBaseClientProxy.java │ │ ├── HBaseHealthCheck.java │ │ ├── InstrumentedHBaseClient.java │ │ ├── ManagedHBaseClient.java │ │ ├── metrics │ │ ├── HBaseInstrumentation.java │ │ └── ScannerInstrumentation.java │ │ ├── scanner │ │ ├── BoundedRowScanner.java │ │ ├── InstrumentedRowScanner.java │ │ ├── RowScanner.java │ │ └── RowScannerProxy.java │ │ └── util │ │ ├── PermitReleasingCallback.java │ │ └── TimerStoppingCallback.java │ └── test │ ├── java │ └── com │ │ └── datasift │ │ └── dropwizard │ │ └── hbase │ │ ├── BoundedHBaseClientTest.java │ │ ├── HBaseClientFactoryTest.java │ │ ├── HBaseClientProxyTest.java │ │ ├── InstrumentedHBaseClientTest.java │ │ └── util │ │ ├── PermitReleasingCallbackTest.java │ │ └── TimerStoppingCallbackTest.java │ └── resources │ └── yaml │ └── hbase.yml ├── dropwizard-extra-kafka ├── pom.xml └── src │ ├── main │ └── java │ │ └── com │ │ └── datasift │ │ └── dropwizard │ │ └── kafka │ │ ├── KafkaClientFactory.java │ │ ├── KafkaConsumerFactory.java │ │ ├── KafkaProducerFactory.java │ │ ├── consumer │ │ ├── KafkaConsumer.java │ │ ├── KafkaConsumerHealthCheck.java │ │ ├── MessageProcessor.java │ │ ├── StreamProcessor.java │ │ └── SynchronousConsumer.java │ │ ├── producer │ │ ├── InstrumentedProducer.java │ │ ├── KafkaProducer.java │ │ ├── ManagedProducer.java │ │ └── ProxyProducer.java │ │ ├── serializer │ │ ├── JacksonDecoder.java │ │ └── JacksonEncoder.java │ │ └── util │ │ └── Compression.java │ └── test │ ├── java │ └── com │ │ └── datasift │ │ └── dropwizard │ │ └── kafka │ │ ├── KafkaConsumerFactoryTest.java │ │ ├── KafkaProducerFactoryTest.java │ │ ├── consumer │ │ └── SynchronousConsumerTest.java │ │ └── util │ │ └── CompressionTest.java │ └── resources │ └── yaml │ ├── consumer.yaml │ └── producer.yaml ├── dropwizard-extra-kafka7 ├── pom.xml └── src │ ├── main │ └── java │ │ └── com │ │ └── datasift │ │ └── dropwizard │ │ └── kafka │ │ ├── KafkaClientFactory.java │ │ ├── KafkaConsumerFactory.java │ │ ├── KafkaProducerFactory.java │ │ ├── consumer │ │ ├── KafkaConsumer.java │ │ ├── KafkaConsumerHealthCheck.java │ │ ├── MessageProcessor.java │ │ ├── StreamProcessor.java │ │ └── SynchronousConsumer.java │ │ ├── producer │ │ ├── InstrumentedProducer.java │ │ ├── KafkaProducer.java │ │ ├── ManagedProducer.java │ │ └── ProxyProducer.java │ │ ├── serializer │ │ ├── JacksonDecoder.java │ │ └── JacksonEncoder.java │ │ └── util │ │ └── Compression.java │ └── test │ ├── java │ └── com │ │ └── datasift │ │ └── dropwizard │ │ └── kafka │ │ ├── KafkaConsumerFactoryTest.java │ │ ├── KafkaProducerFactoryTest.java │ │ ├── consumer │ │ └── SynchronousConsumerTest.java │ │ └── util │ │ └── CompressionTest.java │ └── resources │ └── yaml │ ├── consumer.yaml │ └── producer.yaml ├── dropwizard-extra-util ├── pom.xml └── src │ ├── main │ └── java │ │ └── com │ │ └── datasift │ │ └── dropwizard │ │ ├── health │ │ └── SocketHealthCheck.java │ │ └── util │ │ ├── Classes.java │ │ ├── Exceptions.java │ │ └── Primitives.java │ └── test │ └── java │ └── com │ └── datasift │ └── dropwizard │ └── util │ └── ClassesTest.java ├── dropwizard-extra-zookeeper ├── pom.xml └── src │ ├── main │ └── java │ │ └── com │ │ └── datasift │ │ └── dropwizard │ │ └── zookeeper │ │ ├── ManagedZooKeeper.java │ │ ├── ZooKeeperFactory.java │ │ └── health │ │ └── ZooKeeperHealthCheck.java │ └── test │ ├── java │ └── com │ │ └── datasift │ │ └── dropwizard │ │ └── zookeeper │ │ └── ZooKeeperFactoryTest.java │ └── resources │ └── yaml │ └── zookeeper.yaml ├── pom.xml └── src └── site └── site.xml /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | target/ 3 | *.iml 4 | *.ipr 5 | release.properties 6 | *atlassian* 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Dropwizard Extra 2 | ================ 3 | 4 | *For those not content with the already excellent [Dropwizard](http://github.com/codahale/dropwizard)* 5 | 6 | This is a bunch of additional abstractions and utilities that extend Dropwizard. 7 | 8 | To keep the nightmare of transitive dependencies at bay, there are several 9 | modules: 10 | 11 | * [dropwizard-extra-core](http://github.com/datasift/dropwizard-extra/tree/develop/dropwizard-extra-core) 12 | contains several simple but useful abstractions with no real external dependencies. 13 | * [dropwizard-extra-curator](http://github.com/datasift/dropwizard-extra/tree/develop/dropwizard-extra-curator) 14 | integrates [NetFlix's Curator](http://github.com/netflix/curator) high-level [ZooKeeper](http://zookeeper.apache.org) 15 | client with Dropwizard for working with ZooKeeper directly. 16 | * [dropwizard-extra-hbase](http://github.com/datasift/dropwizard-extra/tree/develop/dropwizard-extra-hbase) 17 | integrates [StumbleUpon's asynchbase](http://github.com/stumbleupon/asynchbase) with Dropwizard for 18 | working with [HBase](http://hbase.apache.org) 19 | * [dropwizard-extra-kafka](http://github.com/datasift/dropwizard-extra/tree/develop/dropwizard-extra-kafka) for 20 | working with [Apache Kafka](http://incubator.apache.org/kafka). 21 | * [dropwizard-extra-scala](http://github.com/datasift/dropwizard-extra/tree/develop/dropwizard-extra-scala) provides 22 | Scala integrations for Dropwizard and a more idiomatic Scala API to the other Dropwizard Extra modules. 23 | * [dropwizard-extra-zookeeper](http://github.com/datasift/dropwizard-extra/tree/develop/dropwizard-extra-zookeeper) 24 | integrates the low-level [Apache ZooKeeper](http://zookeeper.apache.org/) client in to Dropwizards life-cycle. If 25 | you're using ZooKeeper directly in your application, it's strongly recommended that you use the higher-level 26 | [dropwizard-extra-curator](http://github.com/datasift/dropwizard-extra/tree/develop/dropwizard-extra-curator) 27 | instead. 28 | 29 | Full documentation for the latest release is available on the 30 | [generated Maven Site](http://datasift.github.com/dropwizard-extra/). 31 | 32 | Usage 33 | ----- 34 | 35 | Dropwizard Extra is published to [Maven Central](http://search.maven.org/#search|ga|1|g%3Acom.datasift.dropwizard), 36 | so just add the module(s) you wish to use to your `pom.xml`: 37 | 38 | ```xml 39 | 40 | 41 | com.datasift.dropwizard 42 | dropwizard-extra-core 43 | 0.6.2-1 44 | 45 | 46 | ``` 47 | 48 | Or whatever you need to do to make SBT/Gradle/Ivy/Buildr/etc. happy. 49 | 50 | Versioning 51 | ---------- 52 | 53 | Dropwizard Extra is versioned in lock-step with upstream Dropwizard. 54 | 55 | All Dropwizard Extra modules have a transitive dependency on the version of Dropwizard they're built against. The 56 | versioning scheme for Dropwizard Extra is as follows: 57 | 58 | ${dropwizard.version}-{dw-extra.release.number} 59 | 60 | The "release number" signifies the differences between two builds of Dropwizard Extra that are built against the same 61 | upstream version of Dropwizard. 62 | 63 | The practical consequence of this is that an upgrade of Dropwizard Extra will often require an upgrade of Dropwizard 64 | itself, however, this is always clearly indicated by the version number of Dropwizard Extra itself. 65 | 66 | License 67 | ------- 68 | 69 | This software is licensed under the [Apache License Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) 70 | 71 | -------------------------------------------------------------------------------- /dropwizard-extra-curator/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 4.0.0 5 | 6 | com.datasift.dropwizard 7 | dropwizard-extra 8 | 0.7.1-2-SNAPSHOT 9 | ../pom.xml 10 | 11 | 12 | dropwizard-extra-curator 13 | 14 | Dropwizard Extra Curator 15 | http://datasift.github.com/dropwizard-extra/dropwizard-extra-curator 16 | 17 | Dropwizard integration for working with ZooKeeper using Netflix's Curator client. 18 | 19 | 20 | 21 | 22 | io.dropwizard 23 | dropwizard-core 24 | 25 | 26 | com.datasift.dropwizard 27 | dropwizard-extra-zookeeper 28 | ${project.version} 29 | 30 | 31 | org.apache.curator 32 | curator-framework 33 | 2.0.1-incubating 34 | 35 | 36 | 37 | com.google.guava 38 | guava 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | maven-javadoc-plugin 48 | 49 | 50 | http://netflix.github.com/curator/doc/ 51 | http://zookeeper.apache.org/doc/current/api/ 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | -------------------------------------------------------------------------------- /dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/CuratorFactory.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.curator; 2 | 3 | import io.dropwizard.util.Duration; 4 | import com.datasift.dropwizard.curator.ensemble.DropwizardConfiguredEnsembleProvider; 5 | import com.datasift.dropwizard.curator.ensemble.DropwizardConfiguredZooKeeperFactory; 6 | import com.datasift.dropwizard.curator.health.CuratorHealthCheck; 7 | import com.datasift.dropwizard.zookeeper.ZooKeeperFactory; 8 | import com.fasterxml.jackson.annotation.JsonProperty; 9 | import com.google.common.util.concurrent.ThreadFactoryBuilder; 10 | import org.apache.curator.RetryPolicy; 11 | import org.apache.curator.framework.CuratorFramework; 12 | import org.apache.curator.framework.CuratorFrameworkFactory; 13 | import io.dropwizard.setup.Environment; 14 | import org.apache.curator.framework.api.CompressionProvider; 15 | import org.apache.curator.framework.imps.GzipCompressionProvider; 16 | import org.apache.curator.retry.ExponentialBackoffRetry; 17 | 18 | import javax.validation.Valid; 19 | import javax.validation.constraints.Min; 20 | import javax.validation.constraints.NotNull; 21 | 22 | /** 23 | * A factory for creating and managing {@link CuratorFramework} instances. 24 | *

25 | * The resulting {@link CuratorFramework} will have its lifecycle managed by the {@link Environment} 26 | * and will have {@link com.codahale.metrics.health.HealthCheck}s installed for the underlying ZooKeeper 27 | * ensemble. 28 | * 29 | * @see CuratorFramework 30 | */ 31 | public class CuratorFactory { 32 | 33 | private static final String DEFAULT_NAME = "curator-default"; 34 | 35 | /** 36 | * An enumeration of the available compression codecs available for compressed entries. 37 | * 38 | * @see #getCompressionProvider() 39 | * @see CompressionProvider 40 | */ 41 | enum CompressionCodec { 42 | 43 | /** 44 | * GZIP compression. 45 | * 46 | * @see GzipCompressionProvider 47 | */ 48 | GZIP(new GzipCompressionProvider()); 49 | 50 | final private CompressionProvider provider; 51 | 52 | CompressionCodec(final CompressionProvider provider) { 53 | this.provider = provider; 54 | } 55 | 56 | /** 57 | * Gets the {@link CompressionProvider} for this codec. 58 | * 59 | * @return the provider for this codec. 60 | */ 61 | public CompressionProvider getProvider() { 62 | return provider; 63 | } 64 | } 65 | 66 | @Valid 67 | @NotNull 68 | protected ZooKeeperFactory ensemble = new ZooKeeperFactory(); 69 | 70 | @Min(0) 71 | protected int maxRetries = 1; 72 | 73 | @NotNull 74 | protected Duration backOffBaseTime = Duration.seconds(1); 75 | 76 | @NotNull 77 | protected CompressionCodec compression = CompressionCodec.GZIP; 78 | 79 | /** 80 | * Returns a {@link ZooKeeperFactory} for the ZooKeeper ensemble to connect to. 81 | * 82 | * @return a factory for the ZooKeeper ensemble for the client. 83 | */ 84 | @JsonProperty("ensemble") 85 | public ZooKeeperFactory getZooKeeperFactory() { 86 | return ensemble; 87 | } 88 | 89 | /** 90 | * Sets the {@link ZooKeeperFactory} for the ZooKeeper ensemble to connect to. 91 | * 92 | * @param factory the factory for the ZooKeeper ensemble for the client. 93 | */ 94 | @JsonProperty("ensemble") 95 | public void setZooKeeperFactory(final ZooKeeperFactory factory) { 96 | this.ensemble = factory; 97 | } 98 | 99 | /** 100 | * Returns the maximum number of retries to attempt to connect to the ensemble. 101 | * 102 | * @return the maximum number of connection attempts. 103 | */ 104 | @JsonProperty 105 | public int getMaxRetries() { 106 | return maxRetries; 107 | } 108 | 109 | /** 110 | * Sets the maximum number of retries to attempt to connect to the ensemble. 111 | * 112 | * @param maxRetries the maximum number of connection attempts. 113 | */ 114 | @JsonProperty 115 | public void setMaxRetries(final int maxRetries) { 116 | this.maxRetries = maxRetries; 117 | } 118 | 119 | /** 120 | * Returns the initial time to wait before retrying a failed connection. 121 | *

122 | * Subsequent retries will wait an exponential amount of time more than this. 123 | * 124 | * @return the initial time to wait before trying to connect again. 125 | */ 126 | @JsonProperty 127 | public Duration getBackOffBaseTime() { 128 | return backOffBaseTime; 129 | } 130 | 131 | /** 132 | * Sets the initial time to wait before retrying a failed connection. 133 | *

134 | * Subsequent retries will wait an exponential amount of time more than this. 135 | * 136 | * @param backOffBaseTime the initial time to wait before trying to connect again. 137 | */ 138 | @JsonProperty 139 | public void setBackOffBaseTime(final Duration backOffBaseTime) { 140 | this.backOffBaseTime = backOffBaseTime; 141 | } 142 | 143 | /** 144 | * Returns a {@link RetryPolicy} for handling failed connection attempts. 145 | *

146 | * Always configures an {@link ExponentialBackoffRetry} based on the {@link #getMaxRetries() 147 | * maximum retries} and {@link #getBackOffBaseTime() initial back-off} configured. 148 | * 149 | * @return a {@link RetryPolicy} for handling failed connection attempts. 150 | * 151 | * @see #getMaxRetries() 152 | * @see #getBackOffBaseTime() 153 | */ 154 | public RetryPolicy getRetryPolicy() { 155 | return new ExponentialBackoffRetry((int) backOffBaseTime.toMilliseconds(), maxRetries); 156 | } 157 | 158 | /** 159 | * Returns the {@link CompressionCodec} to compress values with. 160 | * 161 | * @return the compression codec to compress values with. 162 | * 163 | * @see CompressionCodec 164 | */ 165 | @JsonProperty("compression") 166 | public CompressionCodec getCompressionCodec() { 167 | return compression; 168 | } 169 | 170 | /** 171 | * Sets a {@link CompressionCodec} to compress values with. 172 | * 173 | * @param codec the compression codec to compress values with. 174 | * 175 | * @see CompressionCodec 176 | */ 177 | @JsonProperty("compression") 178 | public void setCompressionCodec(final CompressionCodec codec) { 179 | this.compression = codec; 180 | } 181 | 182 | /** 183 | * Returns a {@link CompressionProvider} to compress values with. 184 | * 185 | * @return the compression provider used to compress values. 186 | * 187 | * @see CompressionCodec 188 | */ 189 | public CompressionProvider getCompressionProvider() { 190 | return getCompressionCodec().getProvider(); 191 | } 192 | 193 | /** 194 | * Builds a default {@link CuratorFramework} for the given {@link Environment}. 195 | * 196 | * @param environment the {@link Environment} to build the {@link CuratorFramework} for. 197 | * 198 | * @return a {@link CuratorFramework} instance, managed and configured. 199 | */ 200 | public CuratorFramework build(final Environment environment) { 201 | return build(environment, DEFAULT_NAME); 202 | } 203 | 204 | /** 205 | * Builds a {@link CuratorFramework} instance with the given {@code name} for an {@link 206 | * Environment}. 207 | * 208 | * @param environment the {@link Environment} to build the {@link CuratorFramework} for. 209 | * @param name the name for the {@link CuratorFramework} instance. 210 | * 211 | * @return a {@link CuratorFramework} instance, managed and configured. 212 | */ 213 | public CuratorFramework build(final Environment environment, final String name) { 214 | final ZooKeeperFactory factory = getZooKeeperFactory(); 215 | final String namespace = factory.getNamespace(); 216 | final CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder() 217 | .zookeeperFactory(new DropwizardConfiguredZooKeeperFactory(environment, name)) 218 | .ensembleProvider(new DropwizardConfiguredEnsembleProvider(factory)) 219 | .connectionTimeoutMs((int) factory.getConnectionTimeout().toMilliseconds()) 220 | .threadFactory(new ThreadFactoryBuilder().setNameFormat(name + "-%d").build()) 221 | .sessionTimeoutMs((int) factory.getSessionTimeout().toMilliseconds()) 222 | .namespace(namespace.startsWith("/") ? namespace.substring(1) : namespace) 223 | .compressionProvider(getCompressionProvider()) 224 | .retryPolicy(getRetryPolicy()) 225 | .canBeReadOnly(factory.isReadOnly()); 226 | 227 | // add optional auth details 228 | final ZooKeeperFactory.Auth auth = factory.getAuth(); 229 | if (auth != null) { 230 | builder.authorization(auth.getScheme(), auth.getId().getBytes()); 231 | } 232 | 233 | final CuratorFramework framework = builder.build(); 234 | 235 | environment.healthChecks().register(name, new CuratorHealthCheck(framework)); 236 | environment.lifecycle().manage(new ManagedCuratorFramework(framework)); 237 | 238 | return framework; 239 | } 240 | } 241 | -------------------------------------------------------------------------------- /dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/ManagedCuratorFramework.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.curator; 2 | 3 | import org.apache.curator.framework.CuratorFramework; 4 | import io.dropwizard.lifecycle.Managed; 5 | 6 | /** 7 | * Manages the lifecycle of a {@link CuratorFramework} instance. 8 | */ 9 | class ManagedCuratorFramework implements Managed { 10 | 11 | private final CuratorFramework framework; 12 | 13 | /** 14 | * Manage the given {@link CuratorFramework} instance. 15 | * 16 | * @param framework the Curator instance to manage. 17 | */ 18 | public ManagedCuratorFramework(final CuratorFramework framework) { 19 | this.framework = framework; 20 | } 21 | 22 | @Override 23 | public void start() throws Exception { 24 | framework.start(); 25 | } 26 | 27 | @Override 28 | public void stop() throws Exception { 29 | framework.close(); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/ensemble/DropwizardConfiguredEnsembleProvider.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.curator.ensemble; 2 | 3 | import com.datasift.dropwizard.zookeeper.ZooKeeperFactory; 4 | import org.apache.curator.ensemble.EnsembleProvider; 5 | 6 | import java.io.IOException; 7 | 8 | /** 9 | * An {@link EnsembleProvider} for a fixed ensemble, configured by a {@link ZooKeeperFactory}. 10 | */ 11 | public class DropwizardConfiguredEnsembleProvider implements EnsembleProvider { 12 | 13 | private final ZooKeeperFactory factory; 14 | 15 | /** 16 | * Initializes this provider with the given {@code configuration}. 17 | * 18 | * @param factory a factory for ZooKeeper client instances. 19 | */ 20 | public DropwizardConfiguredEnsembleProvider(final ZooKeeperFactory factory) { 21 | this.factory = factory; 22 | } 23 | 24 | @Override 25 | public void start() throws Exception { 26 | // nothing to do 27 | } 28 | 29 | @Override 30 | public String getConnectionString() { 31 | return factory.getQuorumSpec(); 32 | } 33 | 34 | @Override 35 | public void close() throws IOException { 36 | // nothing to do 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/ensemble/DropwizardConfiguredZooKeeperFactory.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.curator.ensemble; 2 | 3 | import io.dropwizard.setup.Environment; 4 | import com.datasift.dropwizard.zookeeper.ZooKeeperFactory; 5 | import org.apache.curator.utils.ZookeeperFactory; 6 | import io.dropwizard.util.Duration; 7 | import org.apache.zookeeper.Watcher; 8 | import org.apache.zookeeper.ZooKeeper; 9 | 10 | import java.util.regex.Matcher; 11 | import java.util.regex.Pattern; 12 | 13 | /** 14 | * Provides integration for Dropwizard's ZooKeeper functionality with Curator. 15 | *

16 | * This ensures that {@link ZooKeeper} instances created by Curator integrate properly with the 17 | * Dropwizard application life-cycle. 18 | */ 19 | public class DropwizardConfiguredZooKeeperFactory implements ZookeeperFactory { 20 | 21 | private static final Pattern PORT_PATTERN = Pattern.compile(":(\\d+)"); 22 | 23 | private final String name; 24 | private final Environment environment; 25 | 26 | /** 27 | * Initializes this factory with the {@link ZooKeeperFactory} to create {@link ZooKeeper} 28 | * clients from. 29 | * 30 | * @param name the name of the Curator instance creating {@link ZooKeeper} clients. 31 | */ 32 | public DropwizardConfiguredZooKeeperFactory(final Environment environment, final String name) { 33 | this.environment = environment; 34 | this.name = name; 35 | } 36 | 37 | @Override 38 | public ZooKeeper newZooKeeper(final String connectString, 39 | final int sessionTimeout, 40 | final Watcher watcher, 41 | final boolean canBeReadOnly) throws Exception { 42 | 43 | return new DynamicZooKeeperFactory(connectString, sessionTimeout, canBeReadOnly) 44 | .build(environment, watcher, String.format("curator-%s", name)); 45 | } 46 | 47 | static class DynamicZooKeeperFactory extends ZooKeeperFactory { 48 | 49 | DynamicZooKeeperFactory(final String connectString, 50 | final int sessionTimeout, 51 | final boolean canBeReadOnly) { 52 | final int idx = connectString.indexOf('/'); 53 | final int hostLength = idx == -1 ? connectString.length() : idx; 54 | final String authority = connectString.substring(0, hostLength); 55 | final Matcher matcher = PORT_PATTERN.matcher(authority); 56 | this.port = matcher.find() ? Integer.parseInt(matcher.group(1)) : port; 57 | this.hosts = matcher.replaceAll("").split(","); 58 | this.namespace = idx == -1 ? "/" : connectString.substring(idx); 59 | this.sessionTimeout = Duration.milliseconds(sessionTimeout); 60 | this.readOnly = canBeReadOnly; 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /dropwizard-extra-curator/src/main/java/com/datasift/dropwizard/curator/health/CuratorHealthCheck.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.curator.health; 2 | 3 | import org.apache.curator.framework.CuratorFramework; 4 | import com.codahale.metrics.health.HealthCheck; 5 | import org.apache.curator.framework.imps.CuratorFrameworkState; 6 | 7 | /** 8 | * A {@link HealthCheck} that ensures a {@link CuratorFramework} is started and that the configured 9 | * root namespace exists. 10 | */ 11 | public class CuratorHealthCheck extends HealthCheck { 12 | 13 | private final CuratorFramework framework; 14 | 15 | /** 16 | * Create a new {@link HealthCheck} instance with the given name. 17 | * 18 | * @param framework The {@link CuratorFramework} instance to check the health of. 19 | */ 20 | public CuratorHealthCheck(final CuratorFramework framework) { 21 | this.framework = framework; 22 | } 23 | 24 | /** 25 | * Checks that the {@link CuratorFramework} instance is started and that the configured root 26 | * namespace exists. 27 | * 28 | * @return {@link Result#unhealthy(String)} if the {@link CuratorFramework} is not started or 29 | * the configured root namespace does not exist; otherwise, {@link Result#healthy()}. 30 | * @throws Exception if an error occurs checking the health of the ZooKeeper ensemble. 31 | */ 32 | @Override 33 | protected Result check() throws Exception { 34 | final String namespace = framework.getNamespace(); 35 | if (framework.getState() != CuratorFrameworkState.STARTED) { 36 | return Result.unhealthy("Client not started"); 37 | } else if (framework.checkExists().forPath(namespace.isEmpty() ? "/" : "") == null) { 38 | return Result.unhealthy("Root for namespace does not exist"); 39 | } 40 | 41 | return Result.healthy(); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /dropwizard-extra-curator/src/test/java/com/datasift/dropwizard/curator/CuratorFactoryTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.curator; 2 | 3 | import io.dropwizard.configuration.ConfigurationFactory; 4 | import io.dropwizard.jackson.Jackson; 5 | import com.datasift.dropwizard.zookeeper.ZooKeeperFactory; 6 | import com.google.common.io.Resources; 7 | import org.apache.curator.framework.api.CompressionProvider; 8 | import org.apache.curator.retry.ExponentialBackoffRetry; 9 | import org.junit.Before; 10 | import org.junit.Test; 11 | 12 | import javax.validation.Validation; 13 | import javax.validation.Validator; 14 | import java.io.File; 15 | 16 | import static org.hamcrest.Matchers.*; 17 | import static org.junit.Assert.assertThat; 18 | 19 | /** Tests {@link CuratorConfiguration} */ 20 | public class CuratorFactoryTest { 21 | 22 | private CuratorFactory factory = null; 23 | 24 | @Before 25 | public void setup() throws Exception { 26 | final Validator validator = Validation.buildDefaultValidatorFactory().getValidator(); 27 | factory = new ConfigurationFactory<>(CuratorFactory.class, validator, Jackson.newObjectMapper(), "dw") 28 | .build(new File(Resources.getResource("yaml/curator.yaml").toURI())); 29 | } 30 | 31 | @Test 32 | public void testZooKeeper() { 33 | assertThat("has ZooKeeperConfiguration", 34 | factory.getZooKeeperFactory(), 35 | instanceOf(ZooKeeperFactory.class)); 36 | } 37 | 38 | @Test 39 | public void testRetryPolicy() { 40 | assertThat("has RetryPolicy", 41 | factory.getRetryPolicy(), 42 | instanceOf(ExponentialBackoffRetry.class)); 43 | } 44 | 45 | @Test 46 | public void testCompressionCodec() { 47 | assertThat("has CompressionCodec", 48 | factory.getCompressionCodec(), 49 | is(CuratorFactory.CompressionCodec.GZIP)); 50 | } 51 | 52 | @Test 53 | public void testCompressionProvider() { 54 | assertThat("supplied CompressionProvider", 55 | factory.getCompressionProvider(), 56 | instanceOf(CompressionProvider.class)); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /dropwizard-extra-curator/src/test/java/com/datasift/dropwizard/curator/ensemble/DynamicZooKeeperConfigurationTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.curator.ensemble; 2 | 3 | import org.junit.Test; 4 | 5 | import static org.junit.Assert.*; 6 | import static org.hamcrest.Matchers.*; 7 | 8 | /** 9 | * Tests {@link DropwizardConfiguredZooKeeperFactory.DynamicZooKeeperFactory}. 10 | */ 11 | public class DynamicZooKeeperConfigurationTest { 12 | 13 | @Test 14 | public void parsesFullConnectionString() { 15 | final String hostname = "zookeeper.lan"; 16 | final int port = 2182; 17 | final DropwizardConfiguredZooKeeperFactory.DynamicZooKeeperFactory factory 18 | = new DropwizardConfiguredZooKeeperFactory.DynamicZooKeeperFactory( 19 | hostname + ":" + port, 0, true); 20 | 21 | assertThat("parses hostname from connection string", 22 | factory.getHosts(), 23 | is(equalTo(new String[] { hostname }))); 24 | 25 | assertThat("parses port from connection string", 26 | factory.getPort(), 27 | is(port)); 28 | } 29 | 30 | @Test 31 | public void parsesConnectionStringWithDefaultPort() { 32 | final String hostname = "zookeeper.lan"; 33 | final int port = 2181; 34 | final DropwizardConfiguredZooKeeperFactory.DynamicZooKeeperFactory conf 35 | = new DropwizardConfiguredZooKeeperFactory.DynamicZooKeeperFactory( 36 | hostname, 0, true); 37 | 38 | assertThat("parses hostname from connection string", 39 | conf.getHosts(), 40 | is(equalTo(new String[] { hostname }))); 41 | 42 | assertThat("uses default port", conf.getPort(), is(port)); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /dropwizard-extra-curator/src/test/resources/yaml/curator.yaml: -------------------------------------------------------------------------------- 1 | ensemble: 2 | hosts: 3 | - test1 4 | - test2 5 | port: 2182 6 | sessionTimeout: 30 seconds 7 | maxRetries: 5 8 | backOffBaseTime: 2 seconds 9 | compression: gzip 10 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 4.0.0 5 | 6 | com.datasift.dropwizard 7 | dropwizard-extra 8 | 0.7.1-2-SNAPSHOT 9 | ../pom.xml 10 | 11 | 12 | dropwizard-extra-hbase 13 | 14 | Dropwizard Extra HBase 15 | http://datasift.github.com/dropwizard-extra/dropwizard-extra-hbase 16 | 17 | Dropwizard integration for working with HBase in Scala. 18 | 19 | 20 | 21 | 22 | io.dropwizard 23 | dropwizard-core 24 | 25 | 26 | com.datasift.dropwizard 27 | dropwizard-extra-zookeeper 28 | ${project.version} 29 | 30 | 31 | org.hbase 32 | asynchbase 33 | 1.4.1 34 | 35 | 36 | 37 | 38 | 39 | 40 | maven-javadoc-plugin 41 | 42 | 43 | http://tsunanet.net/~tsuna/async/api/ 44 | http://tsunanet.net/~tsuna/asynchbase/api/ 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClient.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase; 2 | 3 | import com.datasift.dropwizard.hbase.scanner.RowScanner; 4 | import com.stumbleupon.async.Deferred; 5 | import io.dropwizard.util.Duration; 6 | import io.dropwizard.util.Size; 7 | import org.hbase.async.*; 8 | import org.jboss.netty.util.Timer; 9 | 10 | import java.util.ArrayList; 11 | 12 | /** 13 | * Client for interacting with an HBase cluster. 14 | *

15 | * To create an instance, use {@link HBaseClientFactory}. 16 | *

17 | * All implementations are wrapper proxies around {@link org.hbase.async.HBaseClient} providing 18 | * additional functionality. 19 | * 20 | * @see HBaseClientFactory 21 | * @see org.hbase.async.HBaseClient 22 | */ 23 | public interface HBaseClient { 24 | 25 | /** 26 | * Get the maximum time for which edits may be buffered before being flushed. 27 | * 28 | * @return the maximum time for which edits may be buffered. 29 | * 30 | * @see org.hbase.async.HBaseClient#getFlushInterval() 31 | */ 32 | public Duration getFlushInterval(); 33 | 34 | /** 35 | * Get the capacity of the increment buffer. 36 | * 37 | * @return the capacity of the increment buffer. 38 | * 39 | * @see org.hbase.async.HBaseClient#getIncrementBufferSize() 40 | */ 41 | public Size getIncrementBufferSize(); 42 | 43 | /** 44 | * Sets the maximum time for which edits may be buffered before being flushed. 45 | * 46 | * @param flushInterval the maximum time for which edits may be buffered. 47 | * 48 | * @return the previous flush interval. 49 | * 50 | * @see org.hbase.async.HBaseClient#setFlushInterval(short) 51 | */ 52 | public Duration setFlushInterval(Duration flushInterval); 53 | 54 | /** 55 | * Sets the capacity of the increment buffer. 56 | * 57 | * @param incrementBufferSize the capacity of the increment buffer. 58 | * 59 | * @return the previous increment buffer capacity. 60 | * 61 | * @see org.hbase.async.HBaseClient#setIncrementBufferSize(int) 62 | */ 63 | public Size setIncrementBufferSize(Size incrementBufferSize); 64 | 65 | /** 66 | * Atomically creates a cell if, and only if, it doesn't already exist. 67 | * 68 | * @param edit the new cell to create. 69 | * 70 | * @return true if the cell was created, false if the cell already exists. 71 | * 72 | * @see org.hbase.async.HBaseClient#atomicCreate(org.hbase.async.PutRequest) 73 | */ 74 | public Deferred create(PutRequest edit); 75 | 76 | /** 77 | * Buffer a durable increment for coalescing. 78 | * 79 | * @param request the increment to buffer 80 | * 81 | * @return the new value of the cell, after the increment. 82 | * 83 | * @see org.hbase.async.HBaseClient#bufferAtomicIncrement(org.hbase.async.AtomicIncrementRequest) 84 | */ 85 | public Deferred bufferIncrement(AtomicIncrementRequest request); 86 | 87 | /** 88 | * Atomically and durably increment a cell value. 89 | * 90 | * @param request the increment to make. 91 | * 92 | * @return the new value of the cell, after the increment. 93 | * 94 | * @see org.hbase.async.HBaseClient#atomicIncrement(org.hbase.async.AtomicIncrementRequest) 95 | */ 96 | public Deferred increment(AtomicIncrementRequest request); 97 | 98 | /** 99 | * Atomically increment a cell value, with optional durability. 100 | * 101 | * @param request the increment to make. 102 | * @param durable whether to guarantee this increment succeeded durably. 103 | * 104 | * @return the new value of the cell, after the increment. 105 | * 106 | * @see org.hbase.async.HBaseClient#atomicIncrement(org.hbase.async.AtomicIncrementRequest, boolean) 107 | */ 108 | public Deferred increment(AtomicIncrementRequest request, Boolean durable); 109 | 110 | /** 111 | * Atomically compares and sets (CAS) a single cell 112 | * 113 | * @param edit the cell to set. 114 | * @param expected the expected current value. 115 | * 116 | * @return true if the expectation was met and the cell was set, otherwise, false. 117 | * 118 | * @see org.hbase.async.HBaseClient#compareAndSet(org.hbase.async.PutRequest, byte[]) 119 | */ 120 | public Deferred compareAndSet(PutRequest edit, byte[] expected); 121 | 122 | /** 123 | * Atomically compares and sets (CAS) a single cell. 124 | * 125 | * @param edit the cell to set. 126 | * @param expected the expected current value. 127 | * 128 | * @return true if the expectation was met and the cell was set, otherwise, false. 129 | * 130 | * @see org.hbase.async.HBaseClient#compareAndSet(org.hbase.async.PutRequest, String) 131 | */ 132 | public Deferred compareAndSet(PutRequest edit, String expected); 133 | 134 | /** 135 | * Deletes the specified cells 136 | * 137 | * @param request the cell(s) to delete. 138 | * 139 | * @return a {@link Deferred} indicating when the deletion completes. 140 | * 141 | * @see org.hbase.async.HBaseClient#delete(org.hbase.async.DeleteRequest) 142 | */ 143 | public Deferred delete(DeleteRequest request); 144 | 145 | /** 146 | * Ensures that a specific table exists. 147 | * 148 | * @param table the table to check. 149 | * 150 | * @return a {@link Deferred} indicating the completion of the assertion. 151 | * 152 | * @throws TableNotFoundException (Deferred) if the table does not exist. 153 | * 154 | * @see org.hbase.async.HBaseClient#ensureTableExists(byte[]) 155 | */ 156 | public Deferred ensureTableExists(byte[] table); 157 | 158 | /** 159 | * Ensures that a specific table exists. 160 | * 161 | * @param table the table to check. 162 | * 163 | * @return a {@link Deferred} indicating the completion of the assertion. 164 | * 165 | * @throws TableNotFoundException (Deferred) if the table does not exist. 166 | * 167 | * @see org.hbase.async.HBaseClient#ensureTableExists(String) 168 | */ 169 | public Deferred ensureTableExists(String table); 170 | 171 | /** 172 | * Ensures that a specific table exists. 173 | * 174 | * @param table the table to check. 175 | * 176 | * @return a {@link Deferred} indicating the completion of the assertion. 177 | * 178 | * @throws TableNotFoundException (Deferred) if the table does not exist. 179 | * @throws NoSuchColumnFamilyException (Deferred) if the family doesn't exist. 180 | * 181 | * @see org.hbase.async.HBaseClient#ensureTableFamilyExists(byte[], byte[]) 182 | */ 183 | public Deferred ensureTableFamilyExists(byte[] table, byte[] family); 184 | 185 | /** 186 | * Ensures that a specific table exists. 187 | * 188 | * @param table the table to check. 189 | * 190 | * @return a {@link Deferred} indicating the completion of the assertion. 191 | * 192 | * @throws TableNotFoundException (Deferred) if the table does not exist. 193 | * @throws NoSuchColumnFamilyException (Deferred) if the family doesn't exist. 194 | * 195 | * @see org.hbase.async.HBaseClient#ensureTableFamilyExists(String, String) 196 | */ 197 | public Deferred ensureTableFamilyExists(String table, String family); 198 | 199 | /** 200 | * Flushes all requests buffered on the client-side 201 | * 202 | * @return a {@link Deferred} indicating the completion of the flush. 203 | * 204 | * @see org.hbase.async.HBaseClient#flush() 205 | */ 206 | public Deferred flush(); 207 | 208 | /** 209 | * Retrieves the specified cells 210 | * 211 | * @param request the cells to get. 212 | * 213 | * @return the requested cells. 214 | * 215 | * @see org.hbase.async.HBaseClient#get(org.hbase.async.GetRequest) 216 | */ 217 | public Deferred> get(GetRequest request); 218 | 219 | /** 220 | * Aqcuire an explicit row lock. 221 | * 222 | * @param request the row(s) to lock. 223 | * 224 | * @return the row lock. 225 | * 226 | * @see org.hbase.async.HBaseClient#lockRow(org.hbase.async.RowLockRequest) 227 | */ 228 | public Deferred lockRow(RowLockRequest request); 229 | 230 | /** 231 | * Create a new {@link RowScanner} for a table. 232 | * 233 | * @param table the table to scan. 234 | * 235 | * @return a new {@link RowScanner} for the specified table. 236 | * 237 | * @see org.hbase.async.HBaseClient#newScanner(byte[]) 238 | */ 239 | public RowScanner scan(byte[] table); 240 | 241 | /** 242 | * Create a new {@link RowScanner} for a table. 243 | * 244 | * @param table the table to scan. 245 | * 246 | * @return a new {@link RowScanner} for the specified table. 247 | * 248 | * @see org.hbase.async.HBaseClient#newScanner(String) 249 | */ 250 | public RowScanner scan(String table); 251 | 252 | /** 253 | * Store the specified cell(s). 254 | * 255 | * @param request the cell(s) to store. 256 | * 257 | * @return a {@link Deferred} indicating the completion of the store operation. 258 | * 259 | * @see org.hbase.async.HBaseClient#put(org.hbase.async.PutRequest) 260 | */ 261 | public Deferred put(PutRequest request); 262 | 263 | /** 264 | * Performs a graceful shutdown of this client, flushing any pending requests. 265 | * 266 | * @return a {@link Deferred} indicating the completion of the shutdown operation. 267 | * 268 | * @see org.hbase.async.HBaseClient#shutdown() 269 | */ 270 | public Deferred shutdown(); 271 | 272 | /** 273 | * Get an immutable snapshot of client usage statistics. 274 | * 275 | * @return an immutable snapshot of client usage statistics. 276 | * 277 | * @see org.hbase.async.HBaseClient#stats() 278 | */ 279 | public ClientStats stats(); 280 | 281 | /** 282 | * Get the underlying {@link Timer} used by the async client 283 | * 284 | * @return the underlying {@link Timer} used by the async client. 285 | * 286 | * @see org.hbase.async.HBaseClient#getTimer() 287 | */ 288 | public Timer getTimer(); 289 | 290 | /** 291 | * Release an explicit row lock. 292 | * 293 | * @param lock the lock to release. 294 | * 295 | * @return a {@link Deferred} indicating the completion of the unlock operation. 296 | * 297 | * @see org.hbase.async.HBaseClient#unlockRow(org.hbase.async.RowLock) 298 | */ 299 | public Deferred unlockRow(RowLock lock); 300 | } 301 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseClientFactory.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase; 2 | 3 | import io.dropwizard.util.Duration; 4 | import io.dropwizard.util.Size; 5 | import com.codahale.metrics.MetricRegistry; 6 | import com.datasift.dropwizard.zookeeper.ZooKeeperFactory; 7 | import io.dropwizard.setup.Environment; 8 | import com.fasterxml.jackson.annotation.JsonProperty; 9 | 10 | import javax.validation.Valid; 11 | import javax.validation.constraints.Min; 12 | import javax.validation.constraints.NotNull; 13 | 14 | /** 15 | * A factory for creating and managing {@link HBaseClient} instances. 16 | *

17 | * The resulting {@link HBaseClient} will have its lifecycle managed by an {@link Environment} and 18 | * will have {@link com.codahale.metrics.health.HealthCheck}s installed for the {@code .META.} and 19 | * {@code -ROOT-} tables. 20 | * 21 | * @see HBaseClient 22 | */ 23 | public class HBaseClientFactory { 24 | 25 | private static final String DEFAULT_NAME = "hbase-default"; 26 | 27 | @NotNull 28 | @Valid 29 | protected ZooKeeperFactory zookeeper = new ZooKeeperFactory(); 30 | 31 | @NotNull 32 | protected Duration flushInterval = Duration.seconds(1); 33 | 34 | @NotNull 35 | protected Size incrementBufferSize = Size.kilobytes(64); 36 | 37 | @Min(0) 38 | protected int maxConcurrentRequests = 0; 39 | 40 | @NotNull 41 | protected Duration connectionTimeout = Duration.seconds(5); 42 | 43 | protected boolean instrumented = true; 44 | 45 | /** 46 | * Returns the ZooKeeper quorum co-ordinating the HBase cluster. 47 | * 48 | * @return the factory for connecting to the ZooKeeper quorum co-ordinating the HBase cluster. 49 | */ 50 | @JsonProperty 51 | public ZooKeeperFactory getZookeeper() { 52 | return zookeeper; 53 | } 54 | 55 | /** 56 | * Sets the ZooKeeper quorum co-ordinating the HBase cluster. 57 | * 58 | * @param factory a factory for the ZooKeeper quorum co-ordinating the HBase cluster. 59 | */ 60 | @JsonProperty 61 | public void setZookeeper(final ZooKeeperFactory factory) { 62 | this.zookeeper = factory; 63 | } 64 | 65 | /** 66 | * Returns the maximum amount of time requests may be buffered client-side before sending them 67 | * to the server. 68 | * 69 | * @return the maximum amount of time requests may be buffered. 70 | * 71 | * @see org.hbase.async.HBaseClient#getFlushInterval() 72 | */ 73 | @JsonProperty 74 | public Duration getFlushInterval() { 75 | return flushInterval; 76 | } 77 | 78 | /** 79 | * Sets the maximum amount of time requests may be buffered client-side before sending them 80 | * to the server. 81 | * 82 | * @param flushInterval the maximum amount of time requests may be buffered. 83 | * 84 | * @see org.hbase.async.HBaseClient#setFlushInterval(short) 85 | */ 86 | @JsonProperty 87 | public void setFlushInterval(final Duration flushInterval) { 88 | this.flushInterval = flushInterval; 89 | } 90 | 91 | /** 92 | * Returns the maximum size of the buffer for increment operations. 93 | *

94 | * Once this buffer is full, a flush is forced irrespective of the {@link #getFlushInterval() 95 | * flushInterval}. 96 | * 97 | * @return the maximum number of increments to buffer. 98 | * 99 | * @see org.hbase.async.HBaseClient#getIncrementBufferSize() 100 | */ 101 | @JsonProperty 102 | public Size getIncrementBufferSize() { 103 | return incrementBufferSize; 104 | } 105 | 106 | /** 107 | * Sets the maximum size of the buffer for increment operations. 108 | *

109 | * Once this buffer is full, a flush is forced irrespective of the {@link #getFlushInterval() 110 | * flushInterval}. 111 | * 112 | * @param incrementBufferSize the maximum number of increments to buffer. 113 | * 114 | * @see org.hbase.async.HBaseClient#setIncrementBufferSize(int) 115 | */ 116 | @JsonProperty 117 | public void setIncrementBufferSize(final Size incrementBufferSize) { 118 | this.incrementBufferSize = incrementBufferSize; 119 | } 120 | 121 | /** 122 | * Returns maximum number of concurrent asynchronous requests for the client. 123 | *

124 | * Useful for throttling high-throughput applications when HBase is the bottle-neck to prevent 125 | * the client running out of memory. 126 | *

127 | * With this is zero ("0"), no limit will be placed on the number of concurrent asynchronous 128 | * requests. 129 | * 130 | * @return the maximum number of requests that may be executing concurrently. 131 | * 132 | * @see BoundedHBaseClient 133 | */ 134 | @JsonProperty 135 | public int getMaxConcurrentRequests() { 136 | return maxConcurrentRequests; 137 | } 138 | 139 | /** 140 | * Sets the maximum number of concurrent asynchronous requests for the client. 141 | *

142 | * Useful for throttling high-throughput applications when HBase is the bottle-neck to prevent 143 | * the client running out of memory. 144 | *

145 | * With this is zero ("0"), no limit will be placed on the number of concurrent asynchronous 146 | * requests. 147 | * 148 | * @param maxConcurrentRequests the maximum number of requests that may execute concurrently. 149 | * 150 | * @see BoundedHBaseClient 151 | */ 152 | @JsonProperty 153 | public void setMaxConcurrentRequests(final int maxConcurrentRequests) { 154 | this.maxConcurrentRequests = maxConcurrentRequests; 155 | } 156 | 157 | /** 158 | * Returns the maximum time to wait for a connection to a region server before failing. 159 | * 160 | * @return the maximum time to spend connecting to a server before failing. 161 | */ 162 | @JsonProperty 163 | public Duration getConnectionTimeout() { 164 | return connectionTimeout; 165 | } 166 | 167 | /** 168 | * Returns the maximum time to wait for a connection to a region server before failing. 169 | * 170 | * @param connectionTimeout the maximum time to spend connecting to a server before failing. 171 | */ 172 | @JsonProperty 173 | public void setConnectionTimeout(final Duration connectionTimeout) { 174 | this.connectionTimeout = connectionTimeout; 175 | } 176 | 177 | /** 178 | * Returns whether the {@link HBaseClient} should be instrumented with metrics. 179 | * 180 | * @return whether the {@link HBaseClient} should be instrumented with metrics. 181 | */ 182 | @JsonProperty 183 | public boolean isInstrumented() { 184 | return instrumented; 185 | } 186 | 187 | /** 188 | * Sets whether the {@link HBaseClient} should be instrumented with metrics. 189 | * 190 | * @param isInstrumented whether the {@link HBaseClient} should be instrumented with metrics. 191 | */ 192 | @JsonProperty 193 | public void setInstrumented(final boolean isInstrumented) { 194 | this.instrumented = isInstrumented; 195 | } 196 | 197 | /** 198 | * Builds a default {@link HBaseClient} instance from the specified {@link 199 | * HBaseClientFactory}. 200 | * 201 | * @param environment the {@link Environment} to build {@link HBaseClient} instances for. 202 | * @return an {@link HBaseClient}, managed and configured according to the {@code configuration} 203 | */ 204 | public HBaseClient build(final Environment environment) { 205 | return build(environment, DEFAULT_NAME); 206 | } 207 | 208 | /** 209 | * Builds an {@link HBaseClient} instance from the specified {@link HBaseClientFactory} 210 | * with the given {@code name}. 211 | * 212 | * @param environment the {@link Environment} to build {@link HBaseClient} instances for. 213 | * @param name the name for the {@link HBaseClient}. 214 | * 215 | * @return an {@link HBaseClient}, managed and configured according to the {@code 216 | * configuration}. 217 | */ 218 | public HBaseClient build(final Environment environment, final String name) { 219 | final ZooKeeperFactory zkFactory = getZookeeper(); 220 | 221 | final HBaseClient proxy = new HBaseClientProxy( 222 | new org.hbase.async.HBaseClient(zkFactory.getQuorumSpec(), zkFactory.getNamespace())); 223 | 224 | // optionally instrument and bound requests for the client 225 | final HBaseClient client = instrument(boundRequests(proxy), environment.metrics(), name); 226 | 227 | // configure client 228 | client.setFlushInterval(getFlushInterval()); 229 | client.setIncrementBufferSize(getIncrementBufferSize()); 230 | 231 | // add healthchecks for META and ROOT tables 232 | environment.healthChecks().register(name + "-meta", new HBaseHealthCheck(client, ".META.")); 233 | environment.healthChecks().register(name + "-root", new HBaseHealthCheck(client, "-ROOT-")); 234 | 235 | // manage client 236 | environment.lifecycle().manage(new ManagedHBaseClient( 237 | client, getConnectionTimeout())); 238 | 239 | return client; 240 | } 241 | 242 | /** 243 | * Builds a new {@link HBaseClient} according to the given {@link HBaseClientFactory}. 244 | *

245 | * If instrumentation {@link #instrumented is enabled} in the 246 | * configuration, this will build an {@link InstrumentedHBaseClient} wrapping the given {@link 247 | * HBaseClient}. 248 | *

249 | * If instrumentation is not enabled, the given {@link HBaseClient} will be returned verbatim. 250 | * 251 | * @param client an underlying {@link HBaseClient} implementation. 252 | * @param registry the {@link MetricRegistry} to register metrics with. 253 | * @param name the name of the client that is being instrumented. 254 | * @return an {@link HBaseClient} that satisfies the configuration of instrumentation. 255 | */ 256 | private HBaseClient instrument(final HBaseClient client, 257 | final MetricRegistry registry, 258 | final String name) { 259 | return isInstrumented() 260 | ? new InstrumentedHBaseClient(client, registry, name) 261 | : client; 262 | } 263 | 264 | /** 265 | * Builds a new {@link HBaseClient} according to the given {@link HBaseClientFactory}. 266 | *

267 | * If the {@link #maxConcurrentRequests} is non-zero in the 268 | * configuration, this will build a {@link BoundedHBaseClient} that wraps the given client. 269 | *

270 | * If {@link #maxConcurrentRequests} is zero, the given {@link 271 | * HBaseClient} will be returned verbatim. 272 | * 273 | * @param client an underlying {@link HBaseClient} implementation. 274 | * 275 | * @return an {@link HBaseClient} that satisfies the configuration of the maximum concurrent 276 | * requests. 277 | */ 278 | private HBaseClient boundRequests(final HBaseClient client) { 279 | return getMaxConcurrentRequests() > 0 280 | ? new BoundedHBaseClient(client, getMaxConcurrentRequests()) 281 | : client; 282 | } 283 | } 284 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/HBaseHealthCheck.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase; 2 | 3 | import com.stumbleupon.async.TimeoutException; 4 | import com.codahale.metrics.health.HealthCheck; 5 | import org.hbase.async.TableNotFoundException; 6 | 7 | /** 8 | * A {@link HealthCheck} for an HBase table using an {@link HBaseClient}. 9 | */ 10 | public class HBaseHealthCheck extends HealthCheck { 11 | 12 | private final HBaseClient client; 13 | private final String table; 14 | 15 | /** 16 | * Checks the health of the given {@link HBaseClient} by connecting and testing for the given 17 | * {@code table}. 18 | * 19 | * @param client the client to check the health of. 20 | * @param table the name of the table to look for. 21 | */ 22 | public HBaseHealthCheck(final HBaseClient client, final String table) { 23 | this.client = client; 24 | this.table = table; 25 | } 26 | 27 | /** 28 | * Checks the health of the configured {@link HBaseClient} by using it to test for the 29 | * configured {@code table}. 30 | * 31 | * @return {@link Result#healthy()} if the client can be used to confirm the table exists; or 32 | * {@link Result#unhealthy(String)} either if the table does not exist or the client 33 | * times out while checking for the table. 34 | * 35 | * @throws Exception if an unexpected Exception occurs while checking the health of the client. 36 | */ 37 | @Override 38 | protected Result check() throws Exception { 39 | try { 40 | client.ensureTableExists(table.getBytes()).joinUninterruptibly(5000); 41 | return Result.healthy(); 42 | } catch (final TimeoutException e) { 43 | return Result.unhealthy(String.format( 44 | "Timed out checking for '%s' after 5 seconds", table)); 45 | } catch (final TableNotFoundException e) { 46 | return Result.unhealthy(String.format( 47 | "Table '%s' does not exist", table)); 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/ManagedHBaseClient.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase; 2 | 3 | import io.dropwizard.lifecycle.Managed; 4 | import io.dropwizard.util.Duration; 5 | 6 | /** 7 | * Manages the lifecycle of an {@link HBaseClient}. 8 | */ 9 | public class ManagedHBaseClient implements Managed { 10 | 11 | private final HBaseClient client; 12 | private final Duration connectionTimeout; 13 | 14 | /** 15 | * Manage the specified {@link HBaseClient} with the given {@code connectionTimeout}. 16 | * 17 | * @param client the {@link HBaseClient} to manage. 18 | * @param connectionTimeout the maximum time to wait for a connection to a region server or 19 | * ZooKeeper quorum. 20 | */ 21 | public ManagedHBaseClient(final HBaseClient client, final Duration connectionTimeout) { 22 | this.client = client; 23 | this.connectionTimeout = connectionTimeout; 24 | } 25 | 26 | /** 27 | * Forces connection of the {@link HBaseClient}. 28 | * 29 | * To force the connection, we look for the prescence of the .META. table. 30 | * 31 | * @throws com.stumbleupon.async.TimeoutException if there is a problem connecting to HBase. 32 | * @throws org.hbase.async.TableNotFoundException if the .META. table can't be found. 33 | * @throws Exception if there is a problem verifying the .META. table exists. 34 | */ 35 | public void start() throws Exception { 36 | client.ensureTableExists(".META.").joinUninterruptibly(connectionTimeout.toMilliseconds()); 37 | } 38 | 39 | /** 40 | * Shutsdown the {@link HBaseClient}, waiting until shutdown is complete. 41 | * 42 | * @throws Exception if there is a problem shutting the {@link HBaseClient} down. 43 | */ 44 | public void stop() throws Exception { 45 | client.shutdown().joinUninterruptibly(); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/metrics/ScannerInstrumentation.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase.metrics; 2 | 3 | import com.datasift.dropwizard.hbase.scanner.RowScanner; 4 | import com.codahale.metrics.MetricRegistry; 5 | import com.codahale.metrics.Timer; 6 | 7 | /** 8 | * A container for {@link Timer}s used to time {@link RowScanner} requests. 9 | * 10 | * @see com.datasift.dropwizard.hbase.scanner.RowScanner 11 | */ 12 | public class ScannerInstrumentation { 13 | 14 | private final Timer scans; 15 | private final Timer closes; 16 | 17 | /** 18 | * Initialises instrumentation for the given {@link RowScanner} using the given {@link 19 | * MetricsRegistry}. 20 | * 21 | * @param scanner the scanner to create metrics for. 22 | * @param registry the registry to register the metrics with. 23 | * @param name the name of the client/scanner to register metrics under. 24 | */ 25 | 26 | ScannerInstrumentation(final RowScanner scanner, 27 | final MetricRegistry registry, 28 | final String name) { 29 | final Class clazz = scanner.getClass(); 30 | 31 | scans = registry.timer(MetricRegistry.name(name, "scans")); 32 | closes = registry.timer(MetricRegistry.name(name, "closes")); 33 | } 34 | 35 | /** 36 | * Gets the {@link Timer} for scan requests. 37 | * 38 | * @return the {@link Timer} for scan requests. 39 | */ 40 | public Timer getScans() { 41 | return scans; 42 | } 43 | 44 | /** 45 | * Gets the {@link Timer} for close requests. 46 | * 47 | * @return the {@link Timer} for close requests. 48 | */ 49 | public Timer getCloses() { 50 | return closes; 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScanner.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase.scanner; 2 | 3 | import com.stumbleupon.async.Deferred; 4 | import org.hbase.async.KeyValue; 5 | 6 | import java.nio.charset.Charset; 7 | import java.util.ArrayList; 8 | 9 | /** 10 | * Client for scanning over a selection of rows. 11 | *

12 | * To obtain an instance of a {@link RowScanner}, call {@link 13 | * com.datasift.dropwizard.hbase.HBaseClient#scan(byte[])}. 14 | *

15 | * All implementations are wrapper proxies around {@link org.hbase.async.Scanner} providing 16 | * additional functionality. 17 | */ 18 | public interface RowScanner { 19 | 20 | /** 21 | * Set the first key in the range to scan. 22 | * 23 | * @param key the first key to scan from (inclusive). 24 | * 25 | * @return this {@link RowScanner} to facilitate method chaining. 26 | * 27 | * @see org.hbase.async.Scanner#setStartKey(byte[]) 28 | */ 29 | public RowScanner setStartKey(byte[] key); 30 | 31 | /** 32 | * Set the first key in the range to scan. 33 | * 34 | * @param key the first key to scan from (inclusive). 35 | * 36 | * @return this {@link RowScanner} to facilitate method chaining. 37 | * 38 | * @see org.hbase.async.Scanner#setStartKey(String) 39 | */ 40 | public RowScanner setStartKey(String key); 41 | 42 | /** 43 | * Set the end key in the range to scan. 44 | * 45 | * @param key the end key to scan until (exclusive). 46 | * 47 | * @return this {@link RowScanner} to facilitate method chaining. 48 | * 49 | * @see org.hbase.async.Scanner#setStopKey(byte[]) 50 | */ 51 | public RowScanner setStopKey(byte[] key); 52 | 53 | /** 54 | * Set the end key in the range to scan. 55 | * 56 | * @param key the end key to scan until (exclusive). 57 | * 58 | * @return this {@link RowScanner} to facilitate method chaining. 59 | * 60 | * @see org.hbase.async.Scanner#setStopKey(byte[]) 61 | */ 62 | public RowScanner setStopKey(String key); 63 | 64 | /** 65 | * Set the family to scan. 66 | * 67 | * @param family the family to scan. 68 | * 69 | * @return this {@link RowScanner} to facilitate method chaining. 70 | * 71 | * @see org.hbase.async.Scanner#setFamily(byte[]) 72 | */ 73 | public RowScanner setFamily(byte[] family); 74 | 75 | /** 76 | * Set the family to scan. 77 | * 78 | * @param family the family to scan. 79 | * 80 | * @return this {@link RowScanner} to facilitate method chaining. 81 | * 82 | * @see org.hbase.async.Scanner#setFamily(String) 83 | */ 84 | public RowScanner setFamily(String family); 85 | 86 | /** 87 | * Set the qualifier to select from cells 88 | * 89 | * @param qualifier the family to select from cells. 90 | * 91 | * @return this {@link RowScanner} to facilitate method chaining. 92 | * 93 | * @see org.hbase.async.Scanner#setQualifier(byte[]) 94 | */ 95 | public RowScanner setQualifier(byte[] qualifier); 96 | 97 | /** 98 | * Set the qualifiers to select from cells 99 | * 100 | * @param qualifiers the family to select from cells. 101 | * 102 | * @return this {@link RowScanner} to facilitate method chaining. 103 | * 104 | * @see org.hbase.async.Scanner#setQualifiers(byte[][]) 105 | */ 106 | public RowScanner setQualifiers(byte[][] qualifiers); 107 | 108 | 109 | /** 110 | * Set the qualifier to select from cells 111 | * 112 | * @param qualifier the family to select from cells. 113 | * 114 | * @return this {@link RowScanner} to facilitate method chaining. 115 | * 116 | * @see org.hbase.async.Scanner#setQualifier(String) 117 | */ 118 | public RowScanner setQualifier(String qualifier); 119 | 120 | /** 121 | * Set a regular expression to filter keys being scanned. 122 | * 123 | * @param regexp a regular expression to filter keys with. 124 | * 125 | * @return this {@link RowScanner} to facilitate method chaining. 126 | * 127 | * @see org.hbase.async.Scanner#setKeyRegexp(String) 128 | */ 129 | public RowScanner setKeyRegexp(String regexp); 130 | 131 | /** 132 | * Set a regular expression to filter keys being scanned. 133 | * 134 | * @param regexp a regular expression to filter keys with. 135 | * @param charset the charset to decode the keys as. 136 | * 137 | * @return this {@link RowScanner} to facilitate method chaining. 138 | * 139 | * @see org.hbase.async.Scanner#setKeyRegexp(String) 140 | */ 141 | public RowScanner setKeyRegexp(String regexp, Charset charset); 142 | 143 | /** 144 | * Set whether to use the server-side block cache during the scan. 145 | * 146 | * @param populateBlockcache whether to use the server-side block cache. 147 | * 148 | * @return this {@link RowScanner} to facilitate method chaining. 149 | * 150 | * @see org.hbase.async.Scanner#setServerBlockCache(boolean) 151 | */ 152 | public RowScanner setServerBlockCache(boolean populateBlockcache); 153 | 154 | /** 155 | * Set the maximum number of rows to fetch in each batch. 156 | * 157 | * @param maxRows the maximum number of rows to fetch in each batch. 158 | * 159 | * @return this {@link RowScanner} to facilitate method chaining. 160 | * 161 | * @see org.hbase.async.Scanner#setMaxNumRows(int) 162 | */ 163 | public RowScanner setMaxNumRows(int maxRows); 164 | 165 | /** 166 | * Set the maximum number of {@link KeyValue}s to fetch in each batch. 167 | * 168 | * @param maxKeyValues the maximum number of {@link KeyValue}s to fetch in each batch. 169 | * 170 | * @return this {@link RowScanner} to facilitate method chaining. 171 | */ 172 | public RowScanner setMaxNumKeyValues(int maxKeyValues); 173 | 174 | /** 175 | * Sets the minimum timestamp of the cells to yield. 176 | * 177 | * @param timestamp the minimum timestamp of the cells to yield. 178 | * 179 | * @return this {@link RowScanner} to facilitate method chaining. 180 | * 181 | * @see org.hbase.async.Scanner#setMinTimestamp(long) 182 | */ 183 | public RowScanner setMinTimestamp(long timestamp); 184 | 185 | /** 186 | * Gets the minimum timestamp of the cells to yield. 187 | * 188 | * @return the minimum timestamp of the cells to yield. 189 | * 190 | * @see org.hbase.async.Scanner#getMinTimestamp() 191 | */ 192 | public long getMinTimestamp(); 193 | 194 | /** 195 | * Sets the maximum timestamp of the cells to yield. 196 | * 197 | * @param timestamp the maximum timestamp of the cells to yield. 198 | * 199 | * @return this {@link RowScanner} to facilitate method chaining. 200 | * 201 | * @see org.hbase.async.Scanner#setMaxTimestamp(long) 202 | */ 203 | public RowScanner setMaxTimestamp(long timestamp); 204 | 205 | /** 206 | * Gets the maximum timestamp of the cells to yield. 207 | * 208 | * @return the maximum timestamp of the cells to yield. 209 | * 210 | * @see org.hbase.async.Scanner#getMaxTimestamp() 211 | */ 212 | public long getMaxTimestamp(); 213 | 214 | /** 215 | * Sets the timerange of the cells to yield. 216 | * 217 | * @param minTimestamp the minimum timestamp of the cells to yield. 218 | * @param maxTimestamp the maximum timestamp of the cells to yield. 219 | * 220 | * @return this {@link RowScanner} to facilitate method chaining. 221 | * 222 | * @see org.hbase.async.Scanner#setMinTimestamp(long) 223 | */ 224 | public RowScanner setTimeRange(long minTimestamp, long maxTimestamp); 225 | 226 | /** 227 | * Get the key of the current row being scanned. 228 | * 229 | * @return the key of the current row. 230 | * 231 | * @see org.hbase.async.Scanner#getCurrentKey() 232 | */ 233 | public byte[] getCurrentKey(); 234 | 235 | /** 236 | * Closes this Scanner 237 | * 238 | * @return a Deferred indicating when the close operation has completed. 239 | * 240 | * @see org.hbase.async.Scanner#close() 241 | */ 242 | public Deferred close(); 243 | 244 | /** 245 | * Scans the next batch of rows 246 | * 247 | * @return next batch of rows that were scanned. 248 | * 249 | * @see org.hbase.async.Scanner#nextRows() 250 | */ 251 | public Deferred>> nextRows(); 252 | 253 | /** 254 | * Scans the next batch of rows 255 | * 256 | * @param rows maximum number of rows to retrieve in the batch. 257 | * 258 | * @return next batch of rows that were scanned. 259 | * 260 | * @see org.hbase.async.Scanner#nextRows(int) 261 | */ 262 | public Deferred>> nextRows(int rows); 263 | } 264 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/scanner/RowScannerProxy.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase.scanner; 2 | 3 | import com.stumbleupon.async.Deferred; 4 | import org.hbase.async.KeyValue; 5 | import org.hbase.async.Scanner; 6 | 7 | import java.nio.charset.Charset; 8 | import java.util.ArrayList; 9 | 10 | /** 11 | * Client for scanning over a selection of rows. 12 | *

13 | * To obtain an instance of a {@link RowScanner}, call {@link 14 | * com.datasift.dropwizard.hbase.HBaseClient#scan(byte[])}. 15 | *

16 | * This implementation is a proxy for a {@link org.hbase.async.Scanner}. 17 | */ 18 | public class RowScannerProxy implements RowScanner { 19 | 20 | private final Scanner scanner; 21 | 22 | /** 23 | * Creates a new {@link RowScannerProxy} for the given {@link Scanner}. 24 | * 25 | * @param scanner the underlying {@link Scanner} to wrap 26 | */ 27 | public RowScannerProxy(final Scanner scanner) { 28 | this.scanner = scanner; 29 | } 30 | 31 | /** 32 | * Set the first key in the range to scan. 33 | * 34 | * @param key the first key to scan from (inclusive). 35 | * 36 | * @return this {@link RowScanner} to facilitate method chaining. 37 | * 38 | * @see org.hbase.async.Scanner#setStartKey(byte[]) 39 | */ 40 | public RowScanner setStartKey(final byte[] key) { 41 | scanner.setStartKey(key); 42 | return this; 43 | } 44 | 45 | /** 46 | * Set the first key in the range to scan. 47 | * 48 | * @param key the first key to scan from (inclusive). 49 | * 50 | * @return this {@link RowScanner} to facilitate method chaining. 51 | * 52 | * @see org.hbase.async.Scanner#setStartKey(String) 53 | */ 54 | public RowScanner setStartKey(final String key) { 55 | scanner.setStartKey(key); 56 | return this; 57 | } 58 | 59 | /** 60 | * Set the end key in the range to scan. 61 | * 62 | * @param key the end key to scan until (exclusive) 63 | * 64 | * @return this {@link RowScanner} to facilitate method chaining. 65 | * 66 | * @see org.hbase.async.Scanner#setStopKey(byte[]) 67 | */ 68 | public RowScanner setStopKey(final byte[] key) { 69 | scanner.setStopKey(key); 70 | return this; 71 | } 72 | 73 | /** 74 | * Set the end key in the range to scan. 75 | * 76 | * @param key the end key to scan until (exclusive). 77 | * 78 | * @return this {@link RowScanner} to facilitate method chaining. 79 | * 80 | * @see org.hbase.async.Scanner#setStopKey(byte[]) 81 | */ 82 | public RowScanner setStopKey(final String key) { 83 | scanner.setStopKey(key); 84 | return this; 85 | } 86 | 87 | /** 88 | * Set the family to scan. 89 | * 90 | * @param family the family to scan. 91 | * 92 | * @return this {@link RowScanner} to facilitate method chaining. 93 | * 94 | * @see org.hbase.async.Scanner#setFamily(byte[]) 95 | */ 96 | public RowScanner setFamily(final byte[] family) { 97 | scanner.setFamily(family); 98 | return this; 99 | } 100 | 101 | /** 102 | * Set the family to scan. 103 | * 104 | * @param family the family to scan. 105 | * 106 | * @return this {@link RowScanner} to facilitate method chaining. 107 | * 108 | * @see org.hbase.async.Scanner#setFamily(String) 109 | */ 110 | public RowScanner setFamily(final String family) { 111 | scanner.setFamily(family); 112 | return this; 113 | } 114 | 115 | /** 116 | * Set the qualifier to select from cells 117 | * 118 | * @param qualifier the family to select from cells. 119 | * 120 | * @return this {@link RowScanner} to facilitate method chaining. 121 | * 122 | * @see org.hbase.async.Scanner#setQualifier(byte[]) 123 | */ 124 | public RowScanner setQualifier(final byte[] qualifier) { 125 | scanner.setQualifier(qualifier); 126 | return this; 127 | } 128 | 129 | /** 130 | * Set the qualifier to select from cells 131 | * 132 | * @param qualifiers the family to select from cells. 133 | * 134 | * @return this {@link RowScanner} to facilitate method chaining. 135 | * 136 | * @see org.hbase.async.Scanner#setQualifiers(byte[][]) 137 | */ 138 | public RowScanner setQualifiers(final byte[][] qualifiers) { 139 | scanner.setQualifiers(qualifiers); 140 | return this; 141 | } 142 | 143 | /** 144 | * Set the qualifier to select from cells 145 | * 146 | * @param qualifier the family to select from cells. 147 | * 148 | * @return this {@link RowScanner} to facilitate method chaining. 149 | * 150 | * @see org.hbase.async.Scanner#setQualifier(String) 151 | */ 152 | public RowScanner setQualifier(final String qualifier) { 153 | scanner.setQualifier(qualifier); 154 | return this; 155 | } 156 | 157 | /** 158 | * Set a regular expression to filter keys being scanned. 159 | * 160 | * @param regexp a regular expression to filter keys with. 161 | * 162 | * @return this {@link RowScanner} to facilitate method chaining. 163 | * 164 | * @see org.hbase.async.Scanner#setKeyRegexp(String) 165 | */ 166 | public RowScanner setKeyRegexp(final String regexp) { 167 | scanner.setKeyRegexp(regexp); 168 | return this; 169 | } 170 | 171 | /** 172 | * Set a regular expression to filter keys being scanned. 173 | * 174 | * @param regexp a regular expression to filter keys with. 175 | * @param charset the charset to decode the keys as. 176 | * 177 | * @return this {@link RowScanner} to facilitate method chaining. 178 | * 179 | * @see org.hbase.async.Scanner#setKeyRegexp(String) 180 | */ 181 | public RowScanner setKeyRegexp(final String regexp, final Charset charset) { 182 | scanner.setKeyRegexp(regexp, charset); 183 | return this; 184 | } 185 | 186 | /** 187 | * Set whether to use the server-side block cache during the scan. 188 | * 189 | * @param populateBlockcache whether to use the server-side block cache. 190 | * 191 | * @see org.hbase.async.Scanner#setServerBlockCache(boolean) 192 | * 193 | * @return this {@link RowScanner} to facilitate method chaining. 194 | */ 195 | public RowScanner setServerBlockCache(final boolean populateBlockcache) { 196 | scanner.setServerBlockCache(populateBlockcache); 197 | return this; 198 | } 199 | 200 | /** 201 | * Set the maximum number of rows to fetch in each batch. 202 | * 203 | * @param maxRows the maximum number of rows to fetch in each batch. 204 | * 205 | * @see org.hbase.async.Scanner#setMaxNumRows(int) 206 | * 207 | * @return this {@link RowScanner} to facilitate method chaining. 208 | */ 209 | public RowScanner setMaxNumRows(final int maxRows) { 210 | scanner.setMaxNumRows(maxRows); 211 | return this; 212 | } 213 | 214 | /** 215 | * Set the maximum number of {@link KeyValue}s to fetch in each batch. 216 | * 217 | * @param maxKeyValues the maximum number of {@link KeyValue}s to fetch in each batch. 218 | * 219 | * @return this {@link RowScanner} to facilitate method chaining. 220 | * 221 | * @see org.hbase.async.Scanner#setMaxNumKeyValues(int) 222 | */ 223 | public RowScanner setMaxNumKeyValues(final int maxKeyValues) { 224 | scanner.setMaxNumKeyValues(maxKeyValues); 225 | return this; 226 | } 227 | 228 | /** 229 | * Sets the minimum timestamp of the cells to yield. 230 | * 231 | * @param timestamp the minimum timestamp of the cells to yield. 232 | * 233 | * @return this {@link RowScanner} to facilitate method chaining. 234 | * 235 | * @see org.hbase.async.Scanner#setMinTimestamp(long) 236 | */ 237 | public RowScanner setMinTimestamp(final long timestamp) { 238 | scanner.setMinTimestamp(timestamp); 239 | return this; 240 | } 241 | 242 | /** 243 | * Gets the minimum timestamp of the cells to yield. 244 | * 245 | * @return the minimum timestamp of the cells to yield. 246 | * 247 | * @see org.hbase.async.Scanner#getMinTimestamp() 248 | */ 249 | public long getMinTimestamp() { 250 | return scanner.getMinTimestamp(); 251 | } 252 | 253 | /** 254 | * Sets the maximum timestamp of the cells to yield. 255 | * 256 | * @param timestamp the maximum timestamp of the cells to yield. 257 | * 258 | * @return this {@link RowScanner} to facilitate method chaining. 259 | * 260 | * @see org.hbase.async.Scanner#setMaxTimestamp(long) 261 | */ 262 | public RowScanner setMaxTimestamp(final long timestamp) { 263 | scanner.setMaxTimestamp(timestamp); 264 | return this; 265 | } 266 | 267 | /** 268 | * Gets the maximum timestamp of the cells to yield. 269 | * 270 | * @return the maximum timestamp of the cells to yield. 271 | * 272 | * @see org.hbase.async.Scanner#getMaxTimestamp() 273 | */ 274 | public long getMaxTimestamp() { 275 | return scanner.getMaxTimestamp(); 276 | } 277 | 278 | /** 279 | * Sets the timerange of the cells to yield. 280 | * 281 | * @param minTimestamp the minimum timestamp of the cells to yield. 282 | * @param maxTimestamp the maximum timestamp of the cells to yield. 283 | * 284 | * @return this {@link RowScanner} to facilitate method chaining. 285 | * 286 | * @see org.hbase.async.Scanner#setMinTimestamp(long) 287 | */ 288 | public RowScanner setTimeRange(final long minTimestamp, final long maxTimestamp) { 289 | scanner.setTimeRange(minTimestamp, maxTimestamp); 290 | return this; 291 | } 292 | 293 | /** 294 | * Get the key of the current row being scanned. 295 | * 296 | * @return the key of the current row. 297 | * 298 | * @see org.hbase.async.Scanner#getCurrentKey() 299 | */ 300 | public byte[] getCurrentKey() { 301 | return scanner.getCurrentKey(); 302 | } 303 | 304 | /** 305 | * Closes this Scanner 306 | * 307 | * @return a Deferred indicating when the close operation has completed. 308 | * 309 | * @see org.hbase.async.Scanner#close() 310 | */ 311 | public Deferred close() { 312 | return scanner.close(); 313 | } 314 | 315 | /** 316 | * Scans the next batch of rows 317 | * 318 | * @return next batch of rows that were scanned. 319 | * 320 | * @see org.hbase.async.Scanner#nextRows() 321 | */ 322 | public Deferred>> nextRows() { 323 | return scanner.nextRows(); 324 | } 325 | 326 | /** 327 | * Scans the next batch of rows 328 | * 329 | * @param rows maximum number of rows to retrieve in the batch. 330 | * 331 | * @return next batch of rows that were scanned. 332 | * 333 | * @see org.hbase.async.Scanner#nextRows(int) 334 | */ 335 | public Deferred>> nextRows(final int rows) { 336 | return scanner.nextRows(rows); 337 | } 338 | } 339 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/util/PermitReleasingCallback.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase.util; 2 | 3 | import com.stumbleupon.async.Callback; 4 | 5 | import java.util.concurrent.Semaphore; 6 | 7 | /** 8 | * A {@link Callback} that releases a permit on a given {@link Semaphore}. 9 | */ 10 | public class PermitReleasingCallback implements Callback { 11 | 12 | /** 13 | * The {@link Semaphore} to release the permit to. 14 | */ 15 | private final Semaphore semaphore; 16 | 17 | /** 18 | * Creates a new {@link Callback} that releases a permit on the given semaphore on completion. 19 | * 20 | * @param semaphore the {@link Semaphore} to release the permit to on completion. 21 | */ 22 | public PermitReleasingCallback(final Semaphore semaphore) { 23 | this.semaphore = semaphore; 24 | } 25 | 26 | /** 27 | * Releases a permit to the registered {@link Semaphore} and proxies any argument through 28 | * verbatim. 29 | * 30 | * @param arg the argument (if any) to pass-through. 31 | * 32 | * @return the argument (if any), returned verbatim. 33 | * 34 | * @throws Exception if an error occurs releasing the permit to the {@link Semaphore}. 35 | */ 36 | public T call(final T arg) throws Exception { 37 | semaphore.release(); 38 | return arg; 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/main/java/com/datasift/dropwizard/hbase/util/TimerStoppingCallback.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase.util; 2 | 3 | import com.stumbleupon.async.Callback; 4 | import com.codahale.metrics.Timer; 5 | 6 | /** 7 | * A {@link Callback} for stopping a {@link TimerContext} on completion. 8 | */ 9 | public class TimerStoppingCallback implements Callback { 10 | 11 | /** 12 | * The context of the active {@link com.yammer.metrics.core.Timer} to stop. 13 | */ 14 | private final Timer.Context timer; 15 | 16 | /** 17 | * Creates a new {@link Callback} that stops the given active timer on completion. 18 | * 19 | * @param timer the active {@link com.yammer.metrics.core.Timer} to stop on completion of the 20 | * {@link Callback}. 21 | */ 22 | public TimerStoppingCallback(final Timer.Context timer) { 23 | this.timer = timer; 24 | } 25 | 26 | /** 27 | * Stops the registered {@link com.yammer.metrics.core.Timer} and proxies any argument through 28 | * verbatim. 29 | * 30 | * @param arg the argument (if any) to pass-through. 31 | * 32 | * @return the argument (if any), proxied verbatim. 33 | * 34 | * @throws Exception if an error occurs stopping the {@link com.yammer.metrics.core.Timer}. 35 | */ 36 | public T call(final T arg) throws Exception { 37 | timer.stop(); 38 | return arg; 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/test/java/com/datasift/dropwizard/hbase/HBaseClientFactoryTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase; 2 | 3 | import io.dropwizard.jackson.Jackson; 4 | import com.google.common.io.Resources; 5 | import io.dropwizard.configuration.ConfigurationFactory; 6 | import io.dropwizard.util.Duration; 7 | import io.dropwizard.util.Size; 8 | import org.junit.Before; 9 | import org.junit.Test; 10 | 11 | import javax.validation.Validation; 12 | import javax.validation.Validator; 13 | import java.io.File; 14 | 15 | import static org.hamcrest.Matchers.*; 16 | import static org.junit.Assert.assertThat; 17 | 18 | /** 19 | * Tests {@link HBaseClientFactory}. 20 | */ 21 | public class HBaseClientFactoryTest { 22 | 23 | private HBaseClientFactory factory; 24 | 25 | @Before 26 | public void setUp() throws Exception { 27 | final Validator validator = Validation.buildDefaultValidatorFactory().getValidator(); 28 | factory = new ConfigurationFactory<>(HBaseClientFactory.class, validator, Jackson.newObjectMapper(), "dw") 29 | .build(new File(Resources.getResource("yaml/hbase.yml").getFile())); 30 | } 31 | 32 | @Test 33 | public void hasAFlushInterval() { 34 | assertThat("flush interval is 1 minute", 35 | factory.getFlushInterval(), is(Duration.minutes(1))); 36 | } 37 | 38 | @Test 39 | public void hasAnIncrementBufferSize() { 40 | assertThat("increment buffer size is 256KB", 41 | factory.getIncrementBufferSize(), is(Size.kilobytes(256))); 42 | } 43 | 44 | @Test 45 | public void hasAMaximumConcurrentRequests() { 46 | assertThat("maximum concurrent requests is 1000", 47 | factory.getMaxConcurrentRequests(), is(1000)); 48 | } 49 | 50 | @Test 51 | public void hasAConnectionTimeout() { 52 | assertThat("connection timeout is 10 seconds", 53 | factory.getConnectionTimeout(), is(Duration.seconds(10))); 54 | } 55 | 56 | @Test 57 | public void notInstrumentedWithMetrics() { 58 | assertThat("client is not instrumented with metrics", 59 | factory.isInstrumented(), is(false)); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/test/java/com/datasift/dropwizard/hbase/HBaseClientProxyTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase; 2 | 3 | import com.stumbleupon.async.Deferred; 4 | import io.dropwizard.util.Duration; 5 | import io.dropwizard.util.Size; 6 | import org.hbase.async.*; 7 | import org.jboss.netty.util.Timer; 8 | import org.junit.Test; 9 | import org.junit.Before; 10 | import org.junit.runner.RunWith; 11 | import org.powermock.core.classloader.annotations.PrepareForTest; 12 | import org.powermock.modules.junit4.PowerMockRunner; 13 | 14 | import java.util.ArrayList; 15 | 16 | import static org.mockito.Mockito.*; 17 | import static org.hamcrest.Matchers.is; 18 | import static org.junit.Assert.assertThat; 19 | 20 | /** 21 | * Tests {@link HBaseClientProxy} 22 | */ 23 | @RunWith(PowerMockRunner.class) 24 | @PrepareForTest({ 25 | org.hbase.async.HBaseClient.class, 26 | PutRequest.class, 27 | GetRequest.class, 28 | AtomicIncrementRequest.class, 29 | DeleteRequest.class, 30 | RowLockRequest.class, 31 | RowLock.class, 32 | ClientStats.class 33 | }) 34 | public class HBaseClientProxyTest { 35 | 36 | private HBaseClient client; 37 | private org.hbase.async.HBaseClient underlying; 38 | 39 | @Before 40 | public void setup() { 41 | underlying = mock(org.hbase.async.HBaseClient.class); 42 | client = new HBaseClientProxy(underlying); 43 | } 44 | 45 | @Test 46 | public void proxiesFlushInterval() { 47 | when(underlying.getFlushInterval()).thenReturn((short) 5000); 48 | 49 | assertThat("flush interval is proxied and boxed", 50 | client.getFlushInterval(), is(Duration.milliseconds(5000))); 51 | } 52 | 53 | @Test 54 | public void proxiesIncrementBufferSize() { 55 | when(underlying.getIncrementBufferSize()).thenReturn(10240); 56 | 57 | assertThat("increment buffer size is proxied and boxed", 58 | client.getIncrementBufferSize(), is(Size.bytes(10240))); 59 | } 60 | 61 | @Test 62 | public void setsFlushInterval() { 63 | when(underlying.setFlushInterval((short) 10000)).thenReturn((short) 5000); 64 | 65 | assertThat("flush interval begins with default", 66 | client.setFlushInterval(Duration.milliseconds(10000)), 67 | is(Duration.milliseconds(5000))); 68 | 69 | when(underlying.getFlushInterval()).thenReturn((short) 10000); 70 | 71 | assertThat("setting flush interval is unboxed and proxied", 72 | client.getFlushInterval(), is(Duration.milliseconds(10000))); 73 | } 74 | 75 | @Test 76 | public void setsIncrementBufferSize() { 77 | when(underlying.setIncrementBufferSize(2097152)).thenReturn(10240); 78 | 79 | assertThat("increment buffer size began with default", 80 | client.setIncrementBufferSize(Size.megabytes(2)), 81 | is(Size.bytes(10240))); 82 | 83 | when(underlying.getIncrementBufferSize()).thenReturn(2097152); 84 | assertThat("setting increment buffer size is unboxed and proxied", 85 | client.getIncrementBufferSize(), is(Size.bytes(2097152))); 86 | } 87 | 88 | @Test 89 | public void createsCell() { 90 | final PutRequest req = mock(PutRequest.class); 91 | final Deferred resp = new Deferred<>(); 92 | when(underlying.atomicCreate(req)).thenReturn(resp); 93 | 94 | assertThat("creates cell via proxy", client.create(req), is(resp)); 95 | } 96 | 97 | @Test 98 | public void buffersIncrement() { 99 | final AtomicIncrementRequest req = mock(AtomicIncrementRequest.class); 100 | final Deferred resp = new Deferred<>(); 101 | when(underlying.bufferAtomicIncrement(req)).thenReturn(resp); 102 | 103 | assertThat("buffers increment for a cell via proxy", 104 | client.bufferIncrement(req), is(resp)); 105 | } 106 | 107 | @Test 108 | public void increments() { 109 | final AtomicIncrementRequest req = mock(AtomicIncrementRequest.class); 110 | final Deferred resp = new Deferred<>(); 111 | when(underlying.atomicIncrement(req)).thenReturn(resp); 112 | 113 | assertThat("increments cell via proxy", 114 | client.increment(req), is(resp)); 115 | } 116 | 117 | @Test 118 | public void incrementsDurably() { 119 | final AtomicIncrementRequest req = mock(AtomicIncrementRequest.class); 120 | final Deferred resp = new Deferred<>(); 121 | when(underlying.atomicIncrement(req, true)).thenReturn(resp); 122 | 123 | assertThat("durably increments cell via proxy", 124 | client.increment(req, true), is(resp)); 125 | } 126 | 127 | @Test 128 | public void comparesAndSets() { 129 | final PutRequest req = mock(PutRequest.class); 130 | final byte[] expected = new byte[] { 0x0 }; 131 | final Deferred resp = new Deferred<>(); 132 | when(underlying.compareAndSet(req, expected)).thenReturn(resp); 133 | when(underlying.compareAndSet(req, new String(expected))).thenReturn(resp); 134 | 135 | assertThat("compares and sets bytes cell via proxy", 136 | client.compareAndSet(req, expected), is(resp)); 137 | 138 | assertThat("compares and sets String cell via proxy", 139 | client.compareAndSet(req, new String(expected)), is(resp)); 140 | } 141 | 142 | @Test 143 | public void deletes() { 144 | final DeleteRequest req = mock(DeleteRequest.class); 145 | final Deferred resp = new Deferred<>(); 146 | when(underlying.delete(req)).thenReturn(resp); 147 | 148 | assertThat("deletes cell via proxy", client.delete(req), is(resp)); 149 | } 150 | 151 | @Test 152 | public void ensuresTableExists() { 153 | final String table = "table"; 154 | final Deferred resp = new Deferred<>(); 155 | when(underlying.ensureTableExists(table)).thenReturn(resp); 156 | when(underlying.ensureTableExists(table.getBytes())).thenReturn(resp); 157 | 158 | assertThat("ensures table String exists via proxy", 159 | client.ensureTableExists(table), is(resp)); 160 | assertThat("ensures table bytes exists via proxy", 161 | client.ensureTableExists(table.getBytes()), is(resp)); 162 | } 163 | 164 | @Test 165 | public void ensuresTableAndFamilyExist() { 166 | final String table = "table"; 167 | final String family = "family"; 168 | final Deferred resp = new Deferred<>(); 169 | when(underlying.ensureTableFamilyExists(table, family)).thenReturn(resp); 170 | when(underlying.ensureTableFamilyExists(table.getBytes(), family.getBytes())) 171 | .thenReturn(resp); 172 | 173 | assertThat("ensures table and family Strings exist via proxy", 174 | client.ensureTableFamilyExists(table, family), is(resp)); 175 | assertThat("ensures table and family bytes exist via proxy", 176 | client.ensureTableFamilyExists(table.getBytes(), family.getBytes()), 177 | is(resp)); 178 | } 179 | 180 | @Test 181 | public void flushes() { 182 | final Deferred resp = new Deferred<>(); 183 | when(underlying.flush()).thenReturn(resp); 184 | 185 | assertThat("flushes via proxy", client.flush(), is(resp)); 186 | } 187 | 188 | @Test 189 | public void gets() { 190 | final GetRequest req = mock(GetRequest.class); 191 | final Deferred> resp = new Deferred<>(); 192 | when(underlying.get(req)).thenReturn(resp); 193 | 194 | assertThat("gets cell(s) via proxy", client.get(req), is(resp)); 195 | } 196 | 197 | @Test 198 | public void locksRow() { 199 | final RowLockRequest req = mock(RowLockRequest.class); 200 | final Deferred resp = new Deferred<>(); 201 | when(underlying.lockRow(req)).thenReturn(resp); 202 | 203 | assertThat("locks row via proxy", client.lockRow(req), is(resp)); 204 | } 205 | 206 | @Test 207 | public void puts() { 208 | final PutRequest req = mock(PutRequest.class); 209 | final Deferred resp = new Deferred<>(); 210 | when(underlying.put(req)).thenReturn(resp); 211 | 212 | assertThat("puts row(s) via proxy", client.put(req), is(resp)); 213 | } 214 | 215 | @Test 216 | public void shutsdown() { 217 | final Deferred resp = new Deferred<>(); 218 | when(underlying.shutdown()).thenReturn(resp); 219 | 220 | assertThat("shutsdown via proxy", client.shutdown(), is(resp)); 221 | } 222 | 223 | @Test 224 | public void clientStats() { 225 | final ClientStats stats = mock(ClientStats.class); 226 | when(underlying.stats()).thenReturn(stats); 227 | 228 | assertThat("gets client stats via proxy", client.stats(), is(stats)); 229 | } 230 | 231 | @Test 232 | public void timer() { 233 | final Timer timer = mock(Timer.class); 234 | when(underlying.getTimer()).thenReturn(timer); 235 | 236 | assertThat("gets underlying timer via proxy", 237 | client.getTimer(), is(timer)); 238 | } 239 | 240 | @Test 241 | public void unlocksRow() { 242 | final RowLock lock = mock(RowLock.class); 243 | final Deferred resp = new Deferred<>(); 244 | when(underlying.unlockRow(lock)).thenReturn(resp); 245 | 246 | assertThat("unlocks row(s) via proxy", client.unlockRow(lock), is(resp)); 247 | } 248 | } 249 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/test/java/com/datasift/dropwizard/hbase/util/PermitReleasingCallbackTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase.util; 2 | 3 | import org.junit.Test; 4 | import static org.junit.Assert.assertThat; 5 | import static org.hamcrest.Matchers.is; 6 | 7 | import java.util.concurrent.Semaphore; 8 | 9 | /** 10 | * Tests {@link PermitReleasingCallback}. 11 | */ 12 | public class PermitReleasingCallbackTest { 13 | 14 | @Test 15 | public void returnsArg() throws Exception { 16 | final String arg = "test"; 17 | assertThat("callback returns its argument", 18 | new PermitReleasingCallback(new Semaphore(1)).call(arg), 19 | is(arg)); 20 | } 21 | 22 | @Test 23 | public void releasesPermit() throws Exception { 24 | final Semaphore semaphore = new Semaphore(1); 25 | assertThat("semaphore begins with single permit", 26 | semaphore.availablePermits(), is(1)); 27 | semaphore.acquire(1); 28 | assertThat("semaphore has no available permits", 29 | semaphore.availablePermits(), is(0)); 30 | new PermitReleasingCallback<>(semaphore).call(new Object()); 31 | assertThat("callback releases a permit", 32 | semaphore.availablePermits(), is(1)); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/test/java/com/datasift/dropwizard/hbase/util/TimerStoppingCallbackTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.hbase.util; 2 | 3 | import com.codahale.metrics.Clock; 4 | import com.codahale.metrics.ExponentiallyDecayingReservoir; 5 | import com.codahale.metrics.MetricRegistry; 6 | import com.codahale.metrics.Timer; 7 | import org.junit.Before; 8 | import org.junit.Test; 9 | import static org.junit.Assert.assertThat; 10 | import static org.hamcrest.Matchers.*; 11 | 12 | /** 13 | * Tests {@link TimerStoppingCallback}. 14 | */ 15 | public class TimerStoppingCallbackTest { 16 | 17 | private MetricRegistry registry; 18 | private Timer timer; 19 | 20 | @Before 21 | public void setUp() throws Exception { 22 | this.registry = new MetricRegistry() { 23 | @Override 24 | public Timer timer(final String name) { 25 | return new Timer(new ExponentiallyDecayingReservoir(), new Clock() { 26 | 27 | private long val = 0; 28 | 29 | @Override 30 | public long getTick() { 31 | return val += 50; 32 | } 33 | }); 34 | } 35 | }; 36 | this.timer = registry.timer("test"); 37 | } 38 | 39 | @Test 40 | public void returnsArg() throws Exception { 41 | final String arg = "test"; 42 | final Timer.Context context = registry.timer("test").time(); 43 | assertThat("callback returns argument", 44 | new TimerStoppingCallback(context).call(arg), is(arg)); 45 | } 46 | 47 | @Test 48 | public void stopsTimer() throws Exception { 49 | final Timer.Context ctx = timer.time(); 50 | 51 | new TimerStoppingCallback<>(ctx).call(new Object()); 52 | 53 | assertThat("timer has 1 timed value", timer.getCount(), is(1L)); 54 | 55 | assertThat("timer recorded duration of call", timer.getSnapshot().getMax(), is(50L)); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /dropwizard-extra-hbase/src/test/resources/yaml/hbase.yml: -------------------------------------------------------------------------------- 1 | flushInterval: 1m 2 | incrementBufferSize: 256KB 3 | maxConcurrentRequests: 1000 4 | connectionTimeout: 10s 5 | instrumented: no 6 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 4.0.0 5 | 6 | com.datasift.dropwizard 7 | dropwizard-extra 8 | 0.7.1-2-SNAPSHOT 9 | ../pom.xml 10 | 11 | 12 | dropwizard-extra-kafka 13 | 14 | Dropwizard Extra Kafka 15 | http://datasift.github.com/dropwizard-extra/dropwizard-extra-kafka 16 | 17 | Dropwizard integration for working with Kafka. 18 | 19 | 20 | 21 | 0.8.1.1 22 | 2.10 23 | 24 | 25 | 26 | 27 | 28 | com.fasterxml.jackson.core 29 | jackson-databind 30 | 2.3.4 31 | 32 | 33 | 34 | 35 | 36 | 37 | io.dropwizard 38 | dropwizard-core 39 | 40 | 41 | com.datasift.dropwizard 42 | dropwizard-extra-zookeeper 43 | ${project.version} 44 | 45 | 46 | org.apache.kafka 47 | kafka_${scala.version} 48 | ${kafka.version} 49 | 50 | 51 | com.sun.jmx 52 | jmxri 53 | 54 | 55 | javax.jms 56 | jms 57 | 58 | 59 | com.sun.jdmk 60 | jmxtools 61 | 62 | 63 | log4j 64 | log4j 65 | 66 | 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/KafkaClientFactory.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka; 2 | 3 | import io.dropwizard.util.Duration; 4 | import com.fasterxml.jackson.annotation.JsonProperty; 5 | 6 | import javax.validation.constraints.NotNull; 7 | 8 | /** 9 | * Base configuration for Kafka clients. 10 | * 11 | * @see com.datasift.dropwizard.kafka.KafkaConsumerFactory 12 | * @see KafkaProducerFactory 13 | */ 14 | abstract public class KafkaClientFactory { 15 | 16 | @NotNull 17 | protected Duration socketTimeout = Duration.seconds(30); 18 | 19 | /** 20 | * Returns the time to wait on a network socket before timing out requests. 21 | * 22 | * @return the time to wait on a network socket before timing out requests. 23 | */ 24 | @JsonProperty 25 | public Duration getSocketTimeout() { 26 | return socketTimeout; 27 | } 28 | 29 | /** 30 | * Sets the time to wait on a network socket before timing out requests. 31 | * 32 | * @param socketTimeout the time to wait on a network socket before timing out requests. 33 | */ 34 | @JsonProperty 35 | public void setSocketTimeout(final Duration socketTimeout) { 36 | this.socketTimeout = socketTimeout; 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/KafkaConsumer.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.consumer; 2 | 3 | /** 4 | * Interface for consuming a stream of messages from Kafka. 5 | */ 6 | public interface KafkaConsumer { 7 | 8 | /** 9 | * Commit the offsets of the current position in the message streams. 10 | * 11 | * @see kafka.consumer.ConsumerConnector#commitOffsets() 12 | */ 13 | public void commitOffsets(); 14 | 15 | /** 16 | * Determines if this {@link KafkaConsumer} is currently consuming. 17 | * 18 | * @return true if this {@link KafkaConsumer} is currently consuming from at least one 19 | * partition; otherwise, false. 20 | */ 21 | public boolean isRunning(); 22 | } 23 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/KafkaConsumerHealthCheck.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.consumer; 2 | 3 | import com.codahale.metrics.health.HealthCheck; 4 | 5 | /** 6 | * A {@link HealthCheck} to monitor the health of a {@link KafkaConsumer}. 7 | */ 8 | public class KafkaConsumerHealthCheck extends HealthCheck { 9 | 10 | private final KafkaConsumer consumer; 11 | 12 | /** 13 | * Create a new {@link HealthCheck} instance with the given name. 14 | * 15 | * @param consumer the {@link KafkaConsumer} to monitor the health of. 16 | */ 17 | public KafkaConsumerHealthCheck(final KafkaConsumer consumer) { 18 | this.consumer = consumer; 19 | } 20 | 21 | /** 22 | * Checks that the {@link KafkaConsumer} is still in its running state. 23 | * 24 | * @return true if the {@link KafkaConsumer} is still running properly; false if it is not. 25 | * 26 | * @throws Exception if there is an error checking the state of the {@link KafkaConsumer}. 27 | */ 28 | @Override 29 | protected Result check() throws Exception { 30 | return consumer.isRunning() 31 | ? Result.healthy() 32 | : Result.unhealthy("Consumer not consuming any partitions"); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.consumer; 2 | 3 | import com.codahale.metrics.Metric; 4 | import com.codahale.metrics.Timer; 5 | import kafka.message.MessageAndMetadata; 6 | 7 | /** 8 | * Processes messages of type {@code T} from a Kafka message stream. 9 | *

10 | * This {@link StreamProcessor} is instrumented with {@link Metric}s; specifically, a {@link Timer} 11 | * that tracks the time taken to process each message in the stream. 12 | * 13 | * @param the decoded type of the key for each message being processed 14 | * @param the decoded type of the message to process 15 | */ 16 | public abstract class MessageProcessor implements StreamProcessor { 17 | // 18 | // /** 19 | // * {@link Timer} for the processing of each message in the stream. 20 | // */ 21 | // private final Timer processed; 22 | // 23 | // /** 24 | // * Creates a MessageProcessor; registers {@link Metric}s with the given {@link MetricRegistry}. 25 | // * 26 | // * @param registry the {@link MetricRegistry} to register metrics with. 27 | // * @param name the name to use for metrics of this processor. 28 | // */ 29 | // public MessageProcessor(final MetricRegistry registry, final String name) { 30 | // processed = registry.timer(MetricRegistry.name(name, "processed")); 31 | // } 32 | 33 | /** 34 | * Processes a {@code message} of type {@code T}. 35 | * 36 | * @param key the key of the message to process. 37 | * @param message the message to process. 38 | * @param topic the topic the entry belongs to. 39 | * @param partition the partition of the topic the entry is contained in. 40 | * @param offset the offset of the message within the partition of the topic. 41 | */ 42 | abstract public void process(K key, V message, String topic, int partition, long offset); 43 | 44 | /** 45 | * Processes a {@link Iterable} by iteratively processing each message. 46 | * 47 | * @param stream the stream of messages to process. 48 | * @param topic the topic the {@code stream} belongs to. 49 | * 50 | * @see StreamProcessor#process(Iterable, String) 51 | */ 52 | public void process(final Iterable> stream, final String topic) { 53 | for (final MessageAndMetadata entry : stream) { 54 | // final Timer.Context context = processed.time(); 55 | process(entry.key(), entry.message(), topic, entry.partition(), entry.offset()); 56 | // context.stop(); 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.consumer; 2 | 3 | import kafka.message.MessageAndMetadata; 4 | 5 | /** 6 | * Processes an {@link Iterable} of messages of type {@code T}. 7 | *

8 | * If you wish to process each message individually and iteratively, it's advised that you instead 9 | * use a {@link MessageProcessor}, as it provides a higher-level of abstraction. 10 | *

11 | * Note: since consumers may use multiple threads, it is important that implementations are 12 | * thread-safe. 13 | */ 14 | public interface StreamProcessor { 15 | 16 | /** 17 | * Process an {@link Iterable} of messages of type T. 18 | * 19 | * @param stream the stream of messages to process. 20 | * @param topic the topic the {@code stream} belongs to. 21 | */ 22 | public void process(Iterable> stream, String topic); 23 | } 24 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.consumer; 2 | 3 | import io.dropwizard.lifecycle.Managed; 4 | import io.dropwizard.lifecycle.ServerLifecycleListener; 5 | import io.dropwizard.util.Duration; 6 | import kafka.consumer.KafkaStream; 7 | import kafka.javaapi.consumer.ConsumerConnector; 8 | import kafka.serializer.Decoder; 9 | import org.eclipse.jetty.server.Server; 10 | import org.slf4j.Logger; 11 | import org.slf4j.LoggerFactory; 12 | 13 | import java.util.List; 14 | import java.util.Map; 15 | import java.util.Set; 16 | import java.util.concurrent.ExecutorService; 17 | import java.util.concurrent.ScheduledExecutorService; 18 | import java.util.concurrent.TimeUnit; 19 | 20 | /** 21 | * A {@link KafkaConsumer} that processes messages synchronously using an {@link ExecutorService}. 22 | */ 23 | public class SynchronousConsumer implements KafkaConsumer, Managed, ServerLifecycleListener { 24 | 25 | private final Logger LOG = LoggerFactory.getLogger(getClass()); 26 | 27 | private final ConsumerConnector connector; 28 | private final Map partitions; 29 | private final ScheduledExecutorService executor; 30 | private final Decoder keyDecoder; 31 | private final Decoder valueDecoder; 32 | private final StreamProcessor processor; 33 | private final Duration initialRecoveryDelay; 34 | private final Duration maxRecoveryDelay; 35 | private final Duration retryResetDelay; 36 | private final int maxRecoveryAttempts; 37 | private final boolean shutdownOnFatal; 38 | private final Duration startDelay; 39 | 40 | private Server server = null; 41 | private boolean fatalErrorOccurred = false; 42 | 43 | // a thread to asynchronously handle unrecoverable errors in the stream consumer 44 | private final Thread shutdownThread = new Thread("kafka-unrecoverable-error-handler"){ 45 | public void run() { 46 | while (true) { 47 | try { 48 | Thread.sleep(10000); 49 | } catch (final InterruptedException e) { 50 | // stop sleeping 51 | } 52 | if (fatalErrorOccurred) { 53 | try { 54 | if (shutdownOnFatal && server != null) { 55 | // shutdown the full service 56 | // note: shuts down the consumer as it's Managed by the Environment 57 | server.stop(); 58 | } else { 59 | // just shutdown the consumer 60 | SynchronousConsumer.this.stop(); 61 | } 62 | } catch (Exception e) { 63 | LOG.error("Error occurred while attempting emergency shut down.", e); 64 | } 65 | } 66 | } 67 | } 68 | }; 69 | 70 | /** 71 | * Creates a {@link SynchronousConsumer} to process a stream. 72 | * 73 | * @param connector the {@link ConsumerConnector} of the underlying consumer. 74 | * @param partitions a mapping of the topic -> partitions to consume. 75 | * @param keyDecoder a {@link Decoder} for decoding the key of each message before being processed. 76 | * @param valueDecoder a {@link Decoder} for decoding each message before being processed. 77 | * @param processor a {@link StreamProcessor} for processing messages. 78 | * @param executor the {@link ExecutorService} to process the stream with. 79 | */ 80 | public SynchronousConsumer(final ConsumerConnector connector, 81 | final Map partitions, 82 | final Decoder keyDecoder, 83 | final Decoder valueDecoder, 84 | final StreamProcessor processor, 85 | final ScheduledExecutorService executor, 86 | final Duration initialRecoveryDelay, 87 | final Duration maxRecoveryDelay, 88 | final Duration retryResetDelay, 89 | final int maxRecoveryAttempts, 90 | final boolean shutdownOnFatal, 91 | final Duration startDelay) { 92 | this.connector = connector; 93 | this.partitions = partitions; 94 | this.keyDecoder = keyDecoder; 95 | this.valueDecoder = valueDecoder; 96 | this.processor = processor; 97 | this.executor = executor; 98 | this.initialRecoveryDelay = initialRecoveryDelay; 99 | this.maxRecoveryDelay = maxRecoveryDelay; 100 | this.retryResetDelay = retryResetDelay; 101 | this.maxRecoveryAttempts = maxRecoveryAttempts; 102 | this.shutdownOnFatal = shutdownOnFatal; 103 | this.startDelay = startDelay; 104 | 105 | shutdownThread.setDaemon(true); 106 | shutdownThread.start(); 107 | } 108 | 109 | /** 110 | * Commits the currently consumed offsets. 111 | */ 112 | public void commitOffsets() { 113 | connector.commitOffsets(); 114 | } 115 | 116 | @Override 117 | public void serverStarted(final Server server) { 118 | this.server = server; 119 | } 120 | 121 | /** 122 | * Starts this {@link SynchronousConsumer} immediately. 123 | *

124 | * The consumer will immediately begin consuming from the configured topics using the configured 125 | * {@link Decoder} to decode messages and {@link StreamProcessor} to process the decoded 126 | * messages. 127 | *

128 | * Each partition will be consumed using a separate thread. 129 | * 130 | * @throws Exception if an error occurs starting the consumer 131 | */ 132 | @Override 133 | public void start() throws Exception { 134 | final Set>>> streams = 135 | connector.createMessageStreams(partitions, keyDecoder, valueDecoder).entrySet(); 136 | 137 | for (final Map.Entry>> e : streams) { 138 | final String topic = e.getKey(); 139 | final List> messageStreams = e.getValue(); 140 | 141 | LOG.info("Consuming from topic '{}' with {} threads", topic, messageStreams.size()); 142 | 143 | for (final KafkaStream stream : messageStreams) { 144 | executor.schedule( 145 | new StreamProcessorRunnable(topic, stream), 146 | startDelay.getQuantity(), 147 | startDelay.getUnit()); 148 | } 149 | } 150 | } 151 | 152 | /** 153 | * Stops this {@link SynchronousConsumer} immediately. 154 | * 155 | * @throws Exception 156 | */ 157 | @Override 158 | public void stop() throws Exception { 159 | LOG.trace("Shutting down Connector"); 160 | connector.shutdown(); 161 | LOG.trace("Connector shut down"); 162 | } 163 | 164 | /** 165 | * Determines if this {@link KafkaConsumer} is currently consuming. 166 | * 167 | * @return true if this {@link KafkaConsumer} is currently consuming from at least one 168 | * partition; otherwise, false. 169 | */ 170 | public boolean isRunning() { 171 | return !executor.isShutdown() && !executor.isTerminated() && !fatalErrorOccurred; 172 | } 173 | 174 | private void fatalError() { 175 | this.fatalErrorOccurred = true; 176 | this.shutdownThread.interrupt(); 177 | } 178 | 179 | /** 180 | * A {@link Runnable} that processes a {@link KafkaStream}. 181 | * 182 | * The configured {@link StreamProcessor} is used to process the stream. 183 | */ 184 | private class StreamProcessorRunnable implements Runnable { 185 | 186 | private final KafkaStream stream; 187 | private final String topic; 188 | private int attempts = 0; 189 | private long lastErrorTimestamp = 0; 190 | 191 | /** 192 | * Creates a {@link StreamProcessorRunnable} for the given topic and stream. 193 | * 194 | * @param topic the topic the {@link KafkaStream} belongs to. 195 | * @param stream a stream of {@link kafka.message.Message}s in the topic. 196 | */ 197 | public StreamProcessorRunnable(final String topic, final KafkaStream stream) { 198 | this.topic = topic; 199 | this.stream = stream; 200 | } 201 | 202 | /** 203 | * Process the stream using the configured {@link StreamProcessor}. 204 | *

205 | * If an {@link Exception} is thrown during processing, if it is deemed recoverable, 206 | * the stream will continue to be consumed. 207 | *

208 | * Unrecoverable {@link Exception}s will cause the consumer to shut down completely. 209 | */ 210 | @Override 211 | public void run() { 212 | try { 213 | processor.process(stream, topic); 214 | } catch (final IllegalStateException e) { 215 | error(e); 216 | } catch (final Exception e) { 217 | recoverableError(e); 218 | } catch (final Throwable e) { 219 | error(e); 220 | } 221 | } 222 | 223 | private void recoverableError(final Exception e) { 224 | LOG.warn("Error processing stream, restarting stream consumer ({} attempts remaining): {}", 225 | maxRecoveryAttempts - attempts, e.toString()); 226 | 227 | // reset attempts if there hasn't been a failure in a while 228 | if (System.currentTimeMillis() - lastErrorTimestamp >= retryResetDelay.toMilliseconds()) { 229 | attempts = 0; 230 | } 231 | 232 | // if a ceiling has been set on the number of retries, check if we have reached the ceiling 233 | attempts++; 234 | if (maxRecoveryAttempts > -1 && attempts >= maxRecoveryAttempts) { 235 | LOG.warn("Failed to restart consumer after {} retries", maxRecoveryAttempts); 236 | error(e); 237 | } else { 238 | try { 239 | final long sleepTime = Math.min( 240 | maxRecoveryDelay.toMilliseconds(), 241 | (long) (initialRecoveryDelay.toMilliseconds() * Math.pow( 2, attempts))); 242 | 243 | Thread.sleep(sleepTime); 244 | } catch(final InterruptedException ie){ 245 | LOG.warn("Error recovery grace period interrupted.", ie); 246 | } 247 | lastErrorTimestamp = System.currentTimeMillis(); 248 | if (!executor.isShutdown()) { 249 | executor.execute(this); 250 | } 251 | } 252 | } 253 | 254 | private void error(final Throwable e) { 255 | LOG.error("Unrecoverable error processing stream, shutting down", e); 256 | fatalError(); 257 | } 258 | } 259 | } 260 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/producer/InstrumentedProducer.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.producer; 2 | 3 | import com.codahale.metrics.Meter; 4 | import com.codahale.metrics.MetricRegistry; 5 | import kafka.javaapi.producer.Producer; 6 | import kafka.producer.KeyedMessage; 7 | 8 | import java.util.List; 9 | 10 | /** 11 | * A {@link Producer} that is instrumented with metrics. 12 | */ 13 | public class InstrumentedProducer implements KafkaProducer { 14 | 15 | private final KafkaProducer underlying; 16 | private final Meter sentMessages; 17 | 18 | public InstrumentedProducer(final KafkaProducer underlying, 19 | final MetricRegistry registry, 20 | final String name) { 21 | this.underlying = underlying; 22 | this.sentMessages = registry.meter(MetricRegistry.name(name, "sent")); 23 | } 24 | 25 | public void send(final String topic, final V message) { 26 | underlying.send(topic, message); 27 | sentMessages.mark(); 28 | } 29 | 30 | public void send(final String topic, final K key, final V message) { 31 | underlying.send(topic, key, message); 32 | sentMessages.mark(); 33 | } 34 | 35 | public void send(final KeyedMessage message) { 36 | underlying.send(message); 37 | sentMessages.mark(); 38 | } 39 | 40 | public void send(final List> messages) { 41 | underlying.send(messages); 42 | sentMessages.mark(messages.size()); 43 | } 44 | 45 | public void close() { 46 | underlying.close(); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/producer/KafkaProducer.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.producer; 2 | 3 | import kafka.javaapi.producer.Producer; 4 | import kafka.producer.KeyedMessage; 5 | 6 | import java.util.List; 7 | 8 | /** 9 | * Interface for {@link Producer} proxies. 10 | */ 11 | public interface KafkaProducer { 12 | 13 | void send(String topic, V message); 14 | 15 | void send(String topic, K key, V message); 16 | 17 | void send(KeyedMessage message); 18 | 19 | void send(List> messages); 20 | 21 | void close(); 22 | } 23 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/producer/ManagedProducer.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.producer; 2 | 3 | import io.dropwizard.lifecycle.Managed; 4 | import kafka.javaapi.producer.Producer; 5 | 6 | /** 7 | * Manages a Kafka {@link Producer} as part of the application lifecycle.. 8 | */ 9 | public class ManagedProducer implements Managed { 10 | 11 | private final KafkaProducer producer; 12 | 13 | public ManagedProducer(final KafkaProducer producer) { 14 | this.producer = producer; 15 | } 16 | 17 | @Override 18 | public void start() throws Exception { 19 | // nothing to do, already started 20 | } 21 | 22 | @Override 23 | public void stop() throws Exception { 24 | producer.close(); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/producer/ProxyProducer.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.producer; 2 | 3 | import kafka.javaapi.producer.Producer; 4 | import kafka.producer.KeyedMessage; 5 | 6 | import java.util.List; 7 | 8 | public class ProxyProducer implements KafkaProducer { 9 | 10 | private final Producer producer; 11 | 12 | public ProxyProducer(final Producer producer) { 13 | this.producer = producer; 14 | } 15 | 16 | @Override 17 | public void send(final String topic, final V message) { 18 | producer.send(new KeyedMessage(topic, message)); 19 | } 20 | 21 | @Override 22 | public void send(final String topic, final K key, final V message) { 23 | producer.send(new KeyedMessage<>(topic, key, message)); 24 | } 25 | 26 | @Override 27 | public void send(final KeyedMessage data) { 28 | producer.send(data); 29 | } 30 | 31 | @Override 32 | public void send(final List> data) { 33 | producer.send(data); 34 | } 35 | 36 | @Override 37 | public void close() { 38 | producer.close(); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/serializer/JacksonDecoder.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.serializer; 2 | 3 | import com.fasterxml.jackson.core.JsonLocation; 4 | import com.fasterxml.jackson.core.JsonParseException; 5 | import com.fasterxml.jackson.databind.ObjectMapper; 6 | import kafka.serializer.Decoder; 7 | 8 | import java.io.IOException; 9 | import java.nio.ByteBuffer; 10 | 11 | /** 12 | * A Kafka {@link Decoder} for decoding an arbitrary type from a JSON blob. 13 | */ 14 | public class JacksonDecoder implements Decoder { 15 | 16 | private final Class clazz; 17 | private final ObjectMapper mapper; 18 | 19 | public JacksonDecoder(final ObjectMapper mapper, final Class clazz) { 20 | this.mapper = mapper; 21 | this.clazz = clazz; 22 | } 23 | 24 | @Override 25 | public T fromBytes(final byte[] bytes) { 26 | try { 27 | try { 28 | return mapper.readValue(bytes, clazz); 29 | } catch (final JsonParseException ex) { 30 | final JsonLocation location = ex.getLocation(); 31 | Object src = location.getSourceRef(); 32 | if (src instanceof ByteBuffer) { 33 | src = ((ByteBuffer) src).asCharBuffer(); 34 | } else if (src instanceof byte[]) { 35 | src = new String((byte[]) src); 36 | } else if (src instanceof char[]) { 37 | src = new String((char[]) src); 38 | } 39 | throw new JsonParseException( 40 | ex.getMessage(), 41 | new JsonLocation( 42 | src, 43 | location.getByteOffset(), 44 | location.getCharOffset(), 45 | location.getLineNr(), 46 | location.getColumnNr()), 47 | ex.getCause()); 48 | } 49 | } catch (final IOException ex) { 50 | throw new RuntimeException(ex); 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/serializer/JacksonEncoder.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.serializer; 2 | 3 | import com.fasterxml.jackson.core.JsonProcessingException; 4 | import com.fasterxml.jackson.databind.ObjectMapper; 5 | import kafka.serializer.Encoder; 6 | 7 | /** 8 | * A Kafka {@link Encoder} for encoding an arbitrary type to a JSON blob. 9 | */ 10 | public class JacksonEncoder implements Encoder { 11 | 12 | private final ObjectMapper mapper; 13 | 14 | public JacksonEncoder(final ObjectMapper mapper) { 15 | this.mapper = mapper; 16 | } 17 | 18 | @Override 19 | public byte[] toBytes(final T msg) { 20 | try { 21 | return mapper.writeValueAsBytes(msg); 22 | } catch (final JsonProcessingException ex) { 23 | throw new RuntimeException(ex); 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.util; 2 | 3 | import com.fasterxml.jackson.annotation.JsonCreator; 4 | import kafka.message.*; 5 | import kafka.message.DefaultCompressionCodec; 6 | import kafka.message.GZIPCompressionCodec; 7 | import kafka.message.NoCompressionCodec; 8 | 9 | /** 10 | * A utility for parsing {@link CompressionCodec}s from a {@link 11 | * io.dropwizard.Configuration}. 12 | *

13 | * To create {@link Compression} instances, use {@link Compression#parse(String)} to parse an 14 | * instance from a {@link String}. 15 | *

16 | * This is provided to parse textual specifications of a {@link CompressionCodec}, for example in a 17 | * {@link io.dropwizard.Configuration}. 18 | */ 19 | public class Compression { 20 | 21 | private final CompressionCodec codec; 22 | 23 | /** 24 | * Creates a {@link Compression} instance for the given codec type. 25 | *

26 | * The valid codec values are defined by {@link CompressionCodec}. 27 | *

28 | * To create {@link Compression} instances, use the {@link Compression#parse(String)} factory 29 | * method to parse an instance from a {@link String}. 30 | * 31 | * @param codec the codec to use, as an integer index. 32 | * 33 | * @see Compression#parse(String) 34 | */ 35 | private Compression(final int codec) { 36 | this.codec = CompressionCodec$.MODULE$.getCompressionCodec(codec); 37 | } 38 | 39 | /** 40 | * Gets the {@link CompressionCodec} instance for this {@link Compression}. 41 | * 42 | * @return the {@link CompressionCodec} instance for this {@link Compression} 43 | */ 44 | public CompressionCodec getCodec() { 45 | return codec; 46 | } 47 | 48 | /** 49 | * Parses a String representation of a {@link CompressionCodec}. 50 | * 51 | * @param codec the name of the {@link CompressionCodec} to parse. 52 | * 53 | * @return a {@link Compression} instance for the codec. 54 | * 55 | * @throws IllegalArgumentException if codec is not a valid {@link CompressionCodec}. 56 | */ 57 | @JsonCreator 58 | public static Compression parse(final String codec) { 59 | if ("gzip".equals(codec) || "gz".equals(codec)) { 60 | return new Compression(GZIPCompressionCodec.codec()); 61 | } else if ("snappy".equals(codec)) { 62 | return new Compression(SnappyCompressionCodec.codec()); 63 | } else if ("none".equals(codec) || "no".equals(codec) || "false".equals(codec)) { 64 | return new Compression(NoCompressionCodec.codec()); 65 | } else if ("default".equals(codec) 66 | || "yes".equals(codec) 67 | || "null".equals(codec) 68 | || codec == null) 69 | { 70 | return new Compression(DefaultCompressionCodec.codec()); 71 | } else { 72 | throw new IllegalArgumentException("Invalid Compression: " + codec); 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/test/java/com/datasift/dropwizard/kafka/KafkaConsumerFactoryTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka; 2 | 3 | import io.dropwizard.jackson.Jackson; 4 | import com.datasift.dropwizard.zookeeper.ZooKeeperFactory; 5 | import com.google.common.io.Resources; 6 | import io.dropwizard.configuration.ConfigurationFactory; 7 | import io.dropwizard.util.Duration; 8 | import org.junit.Before; 9 | import org.junit.Test; 10 | 11 | import javax.validation.Validation; 12 | import javax.validation.Validator; 13 | import java.io.File; 14 | 15 | import static org.hamcrest.Matchers.*; 16 | import static org.junit.Assert.assertThat; 17 | 18 | /** 19 | * Tests {@link KafkaConsumerFactory}. 20 | */ 21 | public class KafkaConsumerFactoryTest { 22 | 23 | private KafkaConsumerFactory factory = null; 24 | 25 | @Before 26 | public void setup() throws Exception { 27 | final Validator validator = Validation.buildDefaultValidatorFactory().getValidator(); 28 | factory = new ConfigurationFactory<>(KafkaConsumerFactory.class, validator, Jackson.newObjectMapper(), "dw") 29 | .build(new File(Resources.getResource("yaml/consumer.yaml").toURI())); 30 | } 31 | 32 | @Test 33 | public void testZooKeeper() { 34 | assertThat("has ZooKeeperConfiguration", 35 | factory.getZookeeper(), 36 | instanceOf(ZooKeeperFactory.class)); 37 | } 38 | 39 | @Test 40 | public void testGroup() { 41 | assertThat("group is correctly configured", factory.getGroup(), is("test")); 42 | } 43 | 44 | @Test 45 | public void testPartitions() { 46 | assertThat("has correct partition configuration", 47 | factory.getPartitions(), 48 | allOf(hasEntry("foo", 1), hasEntry("bar", 2))); 49 | } 50 | 51 | @Test 52 | public void testRebalanceRetries() { 53 | assertThat("rebalanceRetries is overridden to 5", 54 | factory.getRebalanceRetries(), 55 | is(5)); 56 | } 57 | 58 | @Test 59 | public void testInitialOffset() { 60 | final KafkaConsumerFactory factory = new KafkaConsumerFactory(); 61 | assertThat("auto.offset.reset defaults to 'largest'", 62 | KafkaConsumerFactory.toConsumerConfig(factory).autoOffsetReset(), 63 | equalTo("largest")); 64 | factory.setInitialOffset(KafkaConsumerFactory.InitialOffset.SMALLEST); 65 | assertThat("auto.offset.reset changed to 'smallest'", 66 | KafkaConsumerFactory.toConsumerConfig(factory).autoOffsetReset(), 67 | equalTo("smallest")); 68 | } 69 | 70 | @Test 71 | public void testRetryResetDelay() { 72 | assertThat("retryResetDelay is overridden to 3 seconds", 73 | factory.getRetryResetDelay(), 74 | is(Duration.seconds(3))); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/test/java/com/datasift/dropwizard/kafka/KafkaProducerFactoryTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka; 2 | 3 | import com.google.common.io.Resources; 4 | import io.dropwizard.configuration.ConfigurationFactory; 5 | import io.dropwizard.jackson.Jackson; 6 | 7 | import kafka.producer.ProducerConfig; 8 | import kafka.serializer.DefaultEncoder; 9 | import org.junit.Before; 10 | import org.junit.Test; 11 | 12 | import javax.validation.Validation; 13 | import javax.validation.Validator; 14 | import java.io.File; 15 | 16 | import static org.hamcrest.Matchers.*; 17 | import static org.junit.Assert.assertThat; 18 | import static com.datasift.dropwizard.kafka.KafkaProducerFactory.DEFAULT_BROKER_PORT; 19 | 20 | /** 21 | * TODO: Document 22 | */ 23 | public class KafkaProducerFactoryTest { 24 | 25 | private KafkaProducerFactory factory; 26 | private ProducerConfig config; 27 | 28 | @Before 29 | public void setup() throws Exception { 30 | final Validator validator = Validation.buildDefaultValidatorFactory().getValidator(); 31 | factory = new ConfigurationFactory<>(KafkaProducerFactory.class, validator, Jackson.newObjectMapper(), "dw") 32 | .build(new File(Resources.getResource("yaml/producer.yaml").toURI())); 33 | config = KafkaProducerFactory.toProducerConfig(factory, DefaultEncoder.class, null, null, "test"); 34 | } 35 | 36 | @Test 37 | public void testExplicitBrokers() { 38 | assertThat("explcitly defined brokers are correctly parsed", 39 | config.brokerList(), 40 | equalTo("localhost:4321,192.168.10.12:123,localhost:" 41 | + DEFAULT_BROKER_PORT + ",192.168.4.21:" + DEFAULT_BROKER_PORT)); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/test/java/com/datasift/dropwizard/kafka/util/CompressionTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.util; 2 | 3 | import kafka.message.*; 4 | import org.junit.Test; 5 | 6 | import static org.junit.Assert.*; 7 | import static org.hamcrest.Matchers.*; 8 | 9 | /** 10 | * Tests {@link Compression} 11 | */ 12 | public class CompressionTest { 13 | 14 | @Test 15 | public void parsesGZIP() { 16 | assertCompression(GZIPCompressionCodec$.MODULE$, "gz"); 17 | assertCompression(GZIPCompressionCodec$.MODULE$, "gzip"); 18 | } 19 | 20 | @Test 21 | public void parseNoCodec() { 22 | assertCompression(NoCompressionCodec$.MODULE$, "none"); 23 | assertCompression(NoCompressionCodec$.MODULE$, "no"); 24 | assertCompression(NoCompressionCodec$.MODULE$, "false"); 25 | } 26 | 27 | @Test 28 | public void parseDefaultCodec() { 29 | assertCompression(GZIPCompressionCodec$.MODULE$, "default"); 30 | assertCompression(GZIPCompressionCodec$.MODULE$, "yes"); 31 | assertCompression(GZIPCompressionCodec$.MODULE$, "null"); 32 | assertCompression(GZIPCompressionCodec$.MODULE$, null); 33 | } 34 | 35 | private void assertCompression(final CompressionCodec expected, final String value) { 36 | assertThat(String.format("'%s' parses as %s", value, expected.getClass().getSimpleName()), 37 | Compression.parse(value).getCodec(), 38 | is(expected)); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/test/resources/yaml/consumer.yaml: -------------------------------------------------------------------------------- 1 | zookeeper: 2 | hosts: 3 | - localhost 4 | - fake.remotehost 5 | port: 2181 6 | group: test 7 | partitions: 8 | foo: 1 9 | bar: 2 10 | rebalanceRetries: 5 11 | retryResetDelay: 3 seconds 12 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka/src/test/resources/yaml/producer.yaml: -------------------------------------------------------------------------------- 1 | brokers: 2 | - "localhost:4321" 3 | - "192.168.10.12:123" 4 | - "localhost" 5 | - "192.168.4.21" 6 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 4.0.0 5 | 6 | com.datasift.dropwizard 7 | dropwizard-extra 8 | 0.7.1-2-SNAPSHOT 9 | ../pom.xml 10 | 11 | 12 | dropwizard-extra-kafka7 13 | 14 | Dropwizard Extra Kafka (0.7) 15 | http://datasift.github.com/dropwizard-extra/dropwizard-extra-kafka7 16 | 17 | Dropwizard integration for working with Kafka. 18 | 19 | 20 | 21 | 0.7.2 22 | 2.9.1 23 | 24 | 25 | 26 | 27 | 28 | com.fasterxml.jackson.core 29 | jackson-databind 30 | 2.3.4 31 | 32 | 33 | 34 | 35 | 36 | 37 | io.dropwizard 38 | dropwizard-core 39 | 40 | 41 | com.datasift.dropwizard 42 | dropwizard-extra-zookeeper 43 | ${project.version} 44 | 45 | 46 | org.scala-lang 47 | scala-library 48 | ${scala.version} 49 | 50 | 51 | kafka 52 | core-kafka_${scala.version} 53 | ${kafka.version} 54 | 55 | 56 | com.sun.jmx 57 | jmxri 58 | 59 | 60 | javax.jms 61 | jms 62 | 63 | 64 | com.sun.jdmk 65 | jmxtools 66 | 67 | 68 | log4j 69 | log4j 70 | 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/KafkaClientFactory.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka; 2 | 3 | import io.dropwizard.util.Duration; 4 | import com.fasterxml.jackson.annotation.JsonProperty; 5 | 6 | import javax.validation.constraints.NotNull; 7 | 8 | /** 9 | * Base configuration for Kafka clients. 10 | * 11 | * @see com.datasift.dropwizard.kafka.KafkaConsumerFactory 12 | * @see KafkaProducerFactory 13 | */ 14 | abstract public class KafkaClientFactory { 15 | 16 | @NotNull 17 | protected Duration socketTimeout = Duration.seconds(30); 18 | 19 | /** 20 | * Returns the time to wait on a network socket before timing out requests. 21 | * 22 | * @return the time to wait on a network socket before timing out requests. 23 | */ 24 | @JsonProperty 25 | public Duration getSocketTimeout() { 26 | return socketTimeout; 27 | } 28 | 29 | /** 30 | * Sets the time to wait on a network socket before timing out requests. 31 | * 32 | * @param socketTimeout the time to wait on a network socket before timing out requests. 33 | */ 34 | @JsonProperty 35 | public void setSocketTimeout(final Duration socketTimeout) { 36 | this.socketTimeout = socketTimeout; 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/KafkaProducerFactory.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka; 2 | 3 | import com.datasift.dropwizard.kafka.producer.InstrumentedProducer; 4 | import com.datasift.dropwizard.kafka.producer.KafkaProducer; 5 | import com.datasift.dropwizard.kafka.producer.ManagedProducer; 6 | import com.datasift.dropwizard.kafka.producer.ProxyProducer; 7 | import com.datasift.dropwizard.kafka.util.Compression; 8 | import com.datasift.dropwizard.zookeeper.ZooKeeperFactory; 9 | import com.fasterxml.jackson.annotation.JsonIgnore; 10 | import com.fasterxml.jackson.annotation.JsonProperty; 11 | import com.google.common.base.Joiner; 12 | import com.google.common.base.Optional; 13 | import com.google.common.collect.ImmutableMap; 14 | import com.google.common.collect.ImmutableSet; 15 | import io.dropwizard.setup.Environment; 16 | import io.dropwizard.util.Duration; 17 | import io.dropwizard.util.Size; 18 | import io.dropwizard.validation.ValidationMethod; 19 | import kafka.javaapi.producer.Producer; 20 | import kafka.producer.Partitioner; 21 | import kafka.producer.ProducerConfig; 22 | import kafka.serializer.Encoder; 23 | 24 | import javax.validation.Valid; 25 | import javax.validation.constraints.Min; 26 | import java.net.InetSocketAddress; 27 | import java.util.Map; 28 | import java.util.Properties; 29 | 30 | public class KafkaProducerFactory extends KafkaClientFactory { 31 | 32 | static final int DEFAULT_BROKER_PORT = 9092; 33 | 34 | @Valid 35 | private Optional zookeeper = Optional.absent(); 36 | 37 | @Valid 38 | private ImmutableMap brokers = ImmutableMap.of(); 39 | 40 | private boolean async = false; 41 | 42 | private Size sendBufferSize = Size.kilobytes(100); 43 | 44 | private Duration connectionTimeout = Duration.seconds(5); 45 | 46 | @Min(1) 47 | private long reconnectInterval = 30000; 48 | 49 | private Optional reconnectTimeInterval = Optional.of(Duration.seconds(10000)); 50 | 51 | private Size maxMessageSize = Size.megabytes(1); 52 | 53 | private Compression compression = Compression.parse("none"); 54 | 55 | private ImmutableSet compressedTopics = ImmutableSet.of(); 56 | 57 | private int zookeeperReadRetries = 3; 58 | 59 | private Duration queueTime = Duration.seconds(5); 60 | 61 | private long queueSize = 10000; 62 | 63 | private long batchSize = 200; 64 | 65 | @JsonIgnore 66 | @ValidationMethod(message = "only one of 'zookeeper' and 'brokers' may be set") 67 | public boolean isOneDiscoveryType() { 68 | return zookeeper.isPresent() ^ (!brokers.isEmpty()); 69 | } 70 | 71 | @JsonIgnore 72 | @ValidationMethod(message = "one of 'zookeeper' and 'brokers' must be set") 73 | public boolean isZookeeperOrBrokers() { 74 | return zookeeper.isPresent() || (!brokers.isEmpty()); 75 | } 76 | 77 | @JsonProperty 78 | public Optional getZookeeper() { 79 | return zookeeper; 80 | } 81 | 82 | @JsonProperty 83 | public void setZookeeper(final Optional zookeeper) { 84 | this.zookeeper = zookeeper; 85 | } 86 | 87 | @JsonProperty 88 | public ImmutableMap getBrokers() { 89 | return brokers; 90 | } 91 | 92 | @JsonProperty 93 | public void setBrokers(final ImmutableMap brokers) { 94 | this.brokers = brokers; 95 | } 96 | 97 | @JsonProperty 98 | public boolean isAsync() { 99 | return async; 100 | } 101 | 102 | @JsonProperty 103 | public void setAsync(final boolean async) { 104 | this.async = async; 105 | } 106 | 107 | @JsonProperty 108 | public Size getSendBufferSize() { 109 | return sendBufferSize; 110 | } 111 | 112 | @JsonProperty 113 | public void setSendBufferSize(final Size sendBufferSize) { 114 | this.sendBufferSize = sendBufferSize; 115 | } 116 | 117 | @JsonProperty 118 | public Duration getConnectionTimeout() { 119 | return connectionTimeout; 120 | } 121 | 122 | @JsonProperty 123 | public void setConnectionTimeout(final Duration connectionTimeout) { 124 | this.connectionTimeout = connectionTimeout; 125 | } 126 | 127 | @JsonProperty 128 | public long getReconnectInterval() { 129 | return reconnectInterval; 130 | } 131 | 132 | @JsonProperty 133 | public void setReconnectInterval(final long reconnectInterval) { 134 | this.reconnectInterval = reconnectInterval; 135 | } 136 | 137 | @JsonProperty 138 | public Optional getReconnectTimeInterval() { 139 | return reconnectTimeInterval; 140 | } 141 | 142 | @JsonProperty 143 | public void setReconnectTimeInterval(final Optional reconnectTimeInterval) { 144 | this.reconnectTimeInterval = reconnectTimeInterval; 145 | } 146 | 147 | @JsonProperty 148 | public Size getMaxMessageSize() { 149 | return maxMessageSize; 150 | } 151 | 152 | @JsonProperty 153 | public void setMaxMessageSize(final Size maxMessageSize) { 154 | this.maxMessageSize = maxMessageSize; 155 | } 156 | 157 | @JsonProperty 158 | public Compression getCompression() { 159 | return compression; 160 | } 161 | 162 | @JsonProperty 163 | public void setCompression(final Compression compression) { 164 | this.compression = compression; 165 | } 166 | 167 | @JsonProperty 168 | public ImmutableSet getCompressedTopics() { 169 | return compressedTopics; 170 | } 171 | 172 | @JsonProperty 173 | public void setCompressedTopics(final ImmutableSet compressedTopics) { 174 | this.compressedTopics = compressedTopics; 175 | } 176 | 177 | @JsonProperty 178 | public int getZookeeperReadRetries() { 179 | return zookeeperReadRetries; 180 | } 181 | 182 | @JsonProperty 183 | public void setZookeeperReadRetries(final int zookeeperReadRetries) { 184 | this.zookeeperReadRetries = zookeeperReadRetries; 185 | } 186 | 187 | @JsonProperty 188 | public Duration getQueueTime() { 189 | return queueTime; 190 | } 191 | 192 | @JsonProperty 193 | public void setQueueTime(final Duration queueTime) { 194 | this.queueTime = queueTime; 195 | } 196 | 197 | @JsonProperty 198 | public long getQueueSize() { 199 | return queueSize; 200 | } 201 | 202 | @JsonProperty 203 | public void setQueueSize(final long queueSize) { 204 | this.queueSize = queueSize; 205 | } 206 | 207 | @JsonProperty 208 | public long getBatchSize() { 209 | return batchSize; 210 | } 211 | 212 | @JsonProperty 213 | public void setBatchSize(final long batchSize) { 214 | this.batchSize = batchSize; 215 | } 216 | 217 | public KafkaProducer build(final Encoder encoder, 218 | final Environment environment, 219 | final String name) { 220 | return build(encoder, null, environment, name); 221 | } 222 | 223 | public KafkaProducer build(final Encoder encoder, 224 | final Partitioner partitioner, 225 | final Environment environment, 226 | final String name) { 227 | final KafkaProducer producer = build(encoder, partitioner); 228 | environment.lifecycle().manage(new ManagedProducer(producer)); 229 | return new InstrumentedProducer<>(producer, environment.metrics(), name); 230 | } 231 | 232 | public KafkaProducer build(final Encoder encoder) { 233 | return build(encoder, null); 234 | } 235 | 236 | public KafkaProducer build(final Encoder encoder, 237 | final Partitioner partitioner) { 238 | return new ProxyProducer<>(new Producer<>( 239 | toProducerConfig(this, encoder, partitioner), 240 | encoder, 241 | null, 242 | null, 243 | partitioner)); 244 | } 245 | 246 | static ProducerConfig toProducerConfig(final KafkaProducerFactory factory, 247 | final Encoder encoder, 248 | final Partitioner partitioner) { 249 | final Properties props = new Properties(); 250 | 251 | props.setProperty("serializer.class", encoder.getClass().getCanonicalName()); 252 | 253 | if (partitioner != null && factory.getBrokers().isEmpty()) { 254 | props.setProperty("partitioner.class", partitioner.getClass().getCanonicalName()); 255 | } 256 | 257 | props.setProperty("producer.type", factory.isAsync() ? "async" : "sync"); 258 | 259 | final Optional zooKeeperFactory = factory.getZookeeper(); 260 | if (zooKeeperFactory.isPresent()) { 261 | final ZooKeeperFactory zk = zooKeeperFactory.get(); 262 | props.setProperty("zk.connect", zk.getQuorumSpec() + zk.getNamespace()); 263 | } else { 264 | final StringBuilder sb = new StringBuilder(10*factory.getBrokers().size()); 265 | for (final ImmutableMap.Entry e : factory.getBrokers().entrySet()) { 266 | final String host = e.getValue().getHostString(); 267 | final int port = e.getValue().getPort() == 0 ? DEFAULT_BROKER_PORT : e.getValue().getPort(); 268 | sb.append(e.getKey()).append(':').append(host).append(':').append(port).append(','); 269 | } 270 | props.setProperty("broker.list", sb.substring(0, sb.length() - 1)); 271 | } 272 | 273 | props.setProperty("buffer.size", Long.toString(factory.getSendBufferSize().toBytes())); 274 | props.setProperty("connect.timeout.ms", 275 | Long.toString(factory.getConnectionTimeout().toMilliseconds())); 276 | props.setProperty("socket.timeout.ms", 277 | Long.toString(factory.getSocketTimeout().toMilliseconds())); 278 | props.setProperty("reconnect.interval", Long.toString(factory.reconnectInterval)); 279 | props.setProperty("reconnect.time.interval.ms", 280 | Long.toString(factory.reconnectTimeInterval 281 | .or(Duration.milliseconds(-1)).toMilliseconds())); 282 | props.setProperty("max.message.size", Long.toString(factory.maxMessageSize.toBytes())); 283 | props.setProperty("compression.codec", 284 | Integer.toString(factory.compression.getCodec().codec())); 285 | props.setProperty("zk.read.num.retries", 286 | Integer.toString(factory.getZookeeperReadRetries())); 287 | 288 | final ImmutableSet compressedTopics = factory.getCompressedTopics(); 289 | if (!compressedTopics.isEmpty()) { 290 | props.setProperty("compressed.topics", Joiner.on(',').join(compressedTopics)); 291 | } 292 | 293 | if (factory.isAsync()) { 294 | props.setProperty("queue.time", Long.toString(factory.getQueueTime().toMilliseconds())); 295 | props.setProperty("queue.size", Long.toString(factory.getQueueSize())); 296 | props.setProperty("batch.size", Long.toString(factory.getBatchSize())); 297 | } 298 | 299 | return new ProducerConfig(props); 300 | } 301 | } 302 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/KafkaConsumer.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.consumer; 2 | 3 | /** 4 | * Interface for consuming a stream of messages from Kafka. 5 | */ 6 | public interface KafkaConsumer { 7 | 8 | /** 9 | * Commit the offsets of the current position in the message streams. 10 | * 11 | * @see kafka.consumer.ConsumerConnector#commitOffsets() 12 | */ 13 | public void commitOffsets(); 14 | 15 | /** 16 | * Determines if this {@link KafkaConsumer} is currently consuming. 17 | * 18 | * @return true if this {@link KafkaConsumer} is currently consuming from at least one 19 | * partition; otherwise, false. 20 | */ 21 | public boolean isRunning(); 22 | } 23 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/KafkaConsumerHealthCheck.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.consumer; 2 | 3 | import com.codahale.metrics.health.HealthCheck; 4 | 5 | /** 6 | * A {@link HealthCheck} to monitor the health of a {@link KafkaConsumer}. 7 | */ 8 | public class KafkaConsumerHealthCheck extends HealthCheck { 9 | 10 | private final KafkaConsumer consumer; 11 | 12 | /** 13 | * Create a new {@link HealthCheck} instance with the given name. 14 | * 15 | * @param consumer the {@link KafkaConsumer} to monitor the health of. 16 | */ 17 | public KafkaConsumerHealthCheck(final KafkaConsumer consumer) { 18 | this.consumer = consumer; 19 | } 20 | 21 | /** 22 | * Checks that the {@link KafkaConsumer} is still in its running state. 23 | * 24 | * @return true if the {@link KafkaConsumer} is still running properly; false if it is not. 25 | * 26 | * @throws Exception if there is an error checking the state of the {@link KafkaConsumer}. 27 | */ 28 | @Override 29 | protected Result check() throws Exception { 30 | return consumer.isRunning() 31 | ? Result.healthy() 32 | : Result.unhealthy("Consumer not consuming any partitions"); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/MessageProcessor.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.consumer; 2 | 3 | import com.codahale.metrics.Metric; 4 | import com.codahale.metrics.Timer; 5 | import kafka.message.MessageAndMetadata; 6 | 7 | /** 8 | * Processes messages of type {@code T} from a Kafka message stream. 9 | *

10 | * This {@link StreamProcessor} is instrumented with {@link Metric}s; specifically, a {@link Timer} 11 | * that tracks the time taken to process each message in the stream. 12 | * 13 | * @param the decoded type of the message to process 14 | */ 15 | public abstract class MessageProcessor implements StreamProcessor { 16 | // 17 | // /** 18 | // * {@link Timer} for the processing of each message in the stream. 19 | // */ 20 | // private final Timer processed; 21 | // 22 | // /** 23 | // * Creates a MessageProcessor; registers {@link Metric}s with the given {@link MetricRegistry}. 24 | // * 25 | // * @param registry the {@link MetricRegistry} to register metrics with. 26 | // * @param name the name to use for metrics of this processor. 27 | // */ 28 | // public MessageProcessor(final MetricRegistry registry, final String name) { 29 | // processed = registry.timer(MetricRegistry.name(name, "processed")); 30 | // } 31 | 32 | /** 33 | * Processes a {@code message} of type {@code T}. 34 | * 35 | * @param key the key of the message to process. 36 | * @param message the message to process. 37 | * @param topic the topic the entry belongs to. 38 | */ 39 | abstract public void process(T message, String topic); 40 | 41 | /** 42 | * Processes a {@link Iterable} by iteratively processing each message. 43 | * 44 | * @param stream the stream of messages to process. 45 | * @param topic the topic the {@code stream} belongs to. 46 | * 47 | * @see StreamProcessor#process(Iterable, String) 48 | */ 49 | public void process(final Iterable> stream, final String topic) { 50 | for (final MessageAndMetadata entry : stream) { 51 | // final Timer.Context context = processed.time(); 52 | process(entry.message(), topic); 53 | // context.stop(); 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/StreamProcessor.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.consumer; 2 | 3 | import kafka.message.MessageAndMetadata; 4 | 5 | /** 6 | * Processes an {@link Iterable} of messages of type {@code T}. 7 | *

8 | * If you wish to process each message individually and iteratively, it's advised that you instead 9 | * use a {@link MessageProcessor}, as it provides a higher-level of abstraction. 10 | *

11 | * Note: since consumers may use multiple threads, it is important that implementations are 12 | * thread-safe. 13 | */ 14 | public interface StreamProcessor { 15 | 16 | /** 17 | * Process an {@link Iterable} of messages of type T. 18 | * 19 | * @param stream the stream of messages to process. 20 | * @param topic the topic the {@code stream} belongs to. 21 | */ 22 | public void process(Iterable> stream, String topic); 23 | } 24 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/consumer/SynchronousConsumer.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.consumer; 2 | 3 | import io.dropwizard.lifecycle.Managed; 4 | import io.dropwizard.lifecycle.ServerLifecycleListener; 5 | import io.dropwizard.util.Duration; 6 | import kafka.consumer.KafkaStream; 7 | import kafka.javaapi.consumer.ConsumerConnector; 8 | import kafka.serializer.Decoder; 9 | import org.eclipse.jetty.server.Server; 10 | import org.slf4j.Logger; 11 | import org.slf4j.LoggerFactory; 12 | 13 | import java.util.List; 14 | import java.util.Map; 15 | import java.util.Set; 16 | import java.util.concurrent.ExecutorService; 17 | import java.util.concurrent.ScheduledExecutorService; 18 | import java.util.concurrent.TimeUnit; 19 | 20 | /** 21 | * A {@link KafkaConsumer} that processes messages synchronously using an {@link ExecutorService}. 22 | */ 23 | public class SynchronousConsumer implements KafkaConsumer, Managed, ServerLifecycleListener { 24 | 25 | private final Logger LOG = LoggerFactory.getLogger(getClass()); 26 | 27 | private final ConsumerConnector connector; 28 | private final Map partitions; 29 | private final ScheduledExecutorService executor; 30 | private final Decoder decoder; 31 | private final StreamProcessor processor; 32 | private final Duration initialRecoveryDelay; 33 | private final Duration maxRecoveryDelay; 34 | private final Duration retryResetDelay; 35 | private final int maxRecoveryAttempts; 36 | private final boolean shutdownOnFatal; 37 | private final Duration startDelay; 38 | 39 | private Server server = null; 40 | private boolean fatalErrorOccurred = false; 41 | 42 | // a thread to asynchronously handle unrecoverable errors in the stream consumer 43 | private final Thread shutdownThread = new Thread("kafka-unrecoverable-error-handler"){ 44 | public void run() { 45 | while (true) { 46 | try { 47 | Thread.sleep(10000); 48 | } catch (final InterruptedException e) { 49 | // stop sleeping 50 | } 51 | if (fatalErrorOccurred) { 52 | try { 53 | if (shutdownOnFatal && server != null) { 54 | // shutdown the full service 55 | // note: shuts down the consumer as it's Managed by the Environment 56 | server.stop(); 57 | } else { 58 | // just shutdown the consumer 59 | SynchronousConsumer.this.stop(); 60 | } 61 | } catch (Exception e) { 62 | LOG.error("Error occurred while attempting emergency shut down.", e); 63 | } 64 | } 65 | } 66 | } 67 | }; 68 | 69 | /** 70 | * Creates a {@link SynchronousConsumer} to process a stream. 71 | * 72 | * @param connector the {@link ConsumerConnector} of the underlying consumer. 73 | * @param partitions a mapping of the topic -> partitions to consume. 74 | * @param decoder a {@link Decoder} for decoding each message before being processed. 75 | * @param processor a {@link StreamProcessor} for processing messages. 76 | * @param executor the {@link ExecutorService} to process the stream with. 77 | */ 78 | public SynchronousConsumer(final ConsumerConnector connector, 79 | final Map partitions, 80 | final Decoder decoder, 81 | final StreamProcessor processor, 82 | final ScheduledExecutorService executor, 83 | final Duration initialRecoveryDelay, 84 | final Duration maxRecoveryDelay, 85 | final Duration retryResetDelay, 86 | final int maxRecoveryAttempts, 87 | final boolean shutdownOnFatal, 88 | final Duration startDelay) { 89 | this.connector = connector; 90 | this.partitions = partitions; 91 | this.decoder = decoder; 92 | this.processor = processor; 93 | this.executor = executor; 94 | this.initialRecoveryDelay = initialRecoveryDelay; 95 | this.maxRecoveryDelay = maxRecoveryDelay; 96 | this.retryResetDelay = retryResetDelay; 97 | this.maxRecoveryAttempts = maxRecoveryAttempts; 98 | this.shutdownOnFatal = shutdownOnFatal; 99 | this.startDelay = startDelay; 100 | 101 | shutdownThread.setDaemon(true); 102 | shutdownThread.start(); 103 | } 104 | 105 | /** 106 | * Commits the currently consumed offsets. 107 | */ 108 | public void commitOffsets() { 109 | connector.commitOffsets(); 110 | } 111 | 112 | @Override 113 | public void serverStarted(final Server server) { 114 | this.server = server; 115 | } 116 | 117 | /** 118 | * Starts this {@link SynchronousConsumer} immediately. 119 | *

120 | * The consumer will immediately begin consuming from the configured topics using the configured 121 | * {@link Decoder} to decode messages and {@link StreamProcessor} to process the decoded 122 | * messages. 123 | *

124 | * Each partition will be consumed using a separate thread. 125 | * 126 | * @throws Exception if an error occurs starting the consumer 127 | */ 128 | @Override 129 | public void start() throws Exception { 130 | final Set>>> streams = 131 | connector.createMessageStreams(partitions, decoder).entrySet(); 132 | 133 | for (final Map.Entry>> e : streams) { 134 | final String topic = e.getKey(); 135 | final List> messageStreams = e.getValue(); 136 | 137 | LOG.info("Consuming from topic '{}' with {} threads", topic, messageStreams.size()); 138 | 139 | for (final KafkaStream stream : messageStreams) { 140 | executor.schedule( 141 | new StreamProcessorRunnable(topic, stream), 142 | startDelay.getQuantity(), 143 | startDelay.getUnit()); 144 | } 145 | } 146 | } 147 | 148 | /** 149 | * Stops this {@link SynchronousConsumer} immediately. 150 | * 151 | * @throws Exception 152 | */ 153 | @Override 154 | public void stop() throws Exception { 155 | LOG.trace("Shutting down Connector"); 156 | connector.shutdown(); 157 | LOG.trace("Connector shut down"); 158 | } 159 | 160 | /** 161 | * Determines if this {@link KafkaConsumer} is currently consuming. 162 | * 163 | * @return true if this {@link KafkaConsumer} is currently consuming from at least one 164 | * partition; otherwise, false. 165 | */ 166 | public boolean isRunning() { 167 | return !executor.isShutdown() && !executor.isTerminated() && !fatalErrorOccurred; 168 | } 169 | 170 | private void fatalError() { 171 | this.fatalErrorOccurred = true; 172 | this.shutdownThread.interrupt(); 173 | } 174 | 175 | /** 176 | * A {@link Runnable} that processes a {@link KafkaStream}. 177 | * 178 | * The configured {@link StreamProcessor} is used to process the stream. 179 | */ 180 | private class StreamProcessorRunnable implements Runnable { 181 | 182 | private final KafkaStream stream; 183 | private final String topic; 184 | private int attempts = 0; 185 | private long lastErrorTimestamp = 0; 186 | 187 | /** 188 | * Creates a {@link StreamProcessorRunnable} for the given topic and stream. 189 | * 190 | * @param topic the topic the {@link KafkaStream} belongs to. 191 | * @param stream a stream of {@link kafka.message.Message}s in the topic. 192 | */ 193 | public StreamProcessorRunnable(final String topic, final KafkaStream stream) { 194 | this.topic = topic; 195 | this.stream = stream; 196 | } 197 | 198 | /** 199 | * Process the stream using the configured {@link StreamProcessor}. 200 | *

201 | * If an {@link Exception} is thrown during processing, if it is deemed recoverable, 202 | * the stream will continue to be consumed. 203 | *

204 | * Unrecoverable {@link Exception}s will cause the consumer to shut down completely. 205 | */ 206 | @Override 207 | public void run() { 208 | try { 209 | processor.process(stream, topic); 210 | } catch (final IllegalStateException e) { 211 | error(e); 212 | } catch (final Exception e) { 213 | recoverableError(e); 214 | } catch (final Throwable e) { 215 | error(e); 216 | } 217 | } 218 | 219 | private void recoverableError(final Exception e) { 220 | LOG.warn("Error processing stream, restarting stream consumer ({} attempts remaining): {}", 221 | maxRecoveryAttempts - attempts, e.toString()); 222 | 223 | // reset attempts if there hasn't been a failure in a while 224 | if (System.currentTimeMillis() - lastErrorTimestamp >= retryResetDelay.toMilliseconds()) { 225 | attempts = 0; 226 | } 227 | 228 | // if a ceiling has been set on the number of retries, check if we have reached the ceiling 229 | attempts++; 230 | if (maxRecoveryAttempts > -1 && attempts >= maxRecoveryAttempts) { 231 | LOG.warn("Failed to restart consumer after {} retries", maxRecoveryAttempts); 232 | error(e); 233 | } else { 234 | try { 235 | final long sleepTime = Math.min( 236 | maxRecoveryDelay.toMilliseconds(), 237 | (long) (initialRecoveryDelay.toMilliseconds() * Math.pow( 2, attempts))); 238 | 239 | Thread.sleep(sleepTime); 240 | } catch(final InterruptedException ie){ 241 | LOG.warn("Error recovery grace period interrupted.", ie); 242 | } 243 | lastErrorTimestamp = System.currentTimeMillis(); 244 | if (!executor.isShutdown()) { 245 | executor.execute(this); 246 | } 247 | } 248 | } 249 | 250 | private void error(final Throwable e) { 251 | LOG.error("Unrecoverable error processing stream, shutting down", e); 252 | fatalError(); 253 | } 254 | } 255 | } 256 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/producer/InstrumentedProducer.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.producer; 2 | 3 | import com.codahale.metrics.Meter; 4 | import com.codahale.metrics.MetricRegistry; 5 | import kafka.javaapi.producer.ProducerData; 6 | 7 | import java.util.List; 8 | 9 | public class InstrumentedProducer implements KafkaProducer { 10 | 11 | private final KafkaProducer underlying; 12 | private final Meter sentMessages; 13 | 14 | public InstrumentedProducer(final KafkaProducer underlying, 15 | final MetricRegistry registry, 16 | final String name) { 17 | this.underlying = underlying; 18 | this.sentMessages = registry.meter(MetricRegistry.name(name, "sent")); 19 | } 20 | 21 | @Override 22 | public void send(final String topic, final V message) { 23 | underlying.send(topic, message); 24 | sentMessages.mark(); 25 | } 26 | 27 | @Override 28 | public void send(final String topic, final K key, final V message) { 29 | underlying.send(topic, key, message); 30 | sentMessages.mark(); 31 | } 32 | 33 | @Override 34 | public void send(final ProducerData data) { 35 | underlying.send(data); 36 | sentMessages.mark(); 37 | } 38 | 39 | @Override 40 | public void send(final List> data) { 41 | underlying.send(data); 42 | sentMessages.mark(data.size()); 43 | } 44 | 45 | @Override 46 | public void close() { 47 | underlying.close(); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/producer/KafkaProducer.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.producer; 2 | 3 | import kafka.javaapi.producer.ProducerData; 4 | 5 | import java.util.List; 6 | 7 | public interface KafkaProducer { 8 | 9 | void send(String topic, V message); 10 | 11 | void send(String topic, K key, V message); 12 | 13 | void send(ProducerData data); 14 | 15 | void send(List> data); 16 | 17 | void close(); 18 | } 19 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/producer/ManagedProducer.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.producer; 2 | 3 | import io.dropwizard.lifecycle.Managed; 4 | 5 | public class ManagedProducer implements Managed { 6 | 7 | private final KafkaProducer producer; 8 | 9 | public ManagedProducer(final KafkaProducer producer) { 10 | this.producer = producer; 11 | } 12 | 13 | @Override 14 | public void start() throws Exception { 15 | // nothing to do, already started 16 | } 17 | 18 | @Override 19 | public void stop() throws Exception { 20 | producer.close(); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/producer/ProxyProducer.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.producer; 2 | 3 | import kafka.javaapi.producer.Producer; 4 | import kafka.javaapi.producer.ProducerData; 5 | 6 | import java.util.ArrayList; 7 | import java.util.List; 8 | 9 | public class ProxyProducer implements KafkaProducer { 10 | 11 | private final Producer producer; 12 | 13 | public ProxyProducer(final Producer producer) { 14 | this.producer = producer; 15 | } 16 | 17 | @Override 18 | public void send(final String topic, final V message) { 19 | final List data = new ArrayList<>(1); 20 | data.add(message); 21 | producer.send(new ProducerData(topic, data)); 22 | } 23 | 24 | @Override 25 | public void send(final String topic, final K key, final V message) { 26 | final List data = new ArrayList<>(1); 27 | data.add(message); 28 | producer.send(new ProducerData<>(topic, key, data)); 29 | } 30 | 31 | @Override 32 | public void send(final ProducerData data) { 33 | producer.send(data); 34 | } 35 | 36 | @Override 37 | public void send(final List> data) { 38 | producer.send(data); 39 | } 40 | 41 | @Override 42 | public void close() { 43 | producer.close(); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/serializer/JacksonDecoder.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.serializer; 2 | 3 | import com.fasterxml.jackson.core.JsonLocation; 4 | import com.fasterxml.jackson.core.JsonParseException; 5 | import com.fasterxml.jackson.databind.ObjectMapper; 6 | import com.fasterxml.jackson.databind.util.ByteBufferBackedInputStream; 7 | import kafka.message.Message; 8 | import kafka.serializer.Decoder; 9 | 10 | import java.io.IOException; 11 | import java.nio.ByteBuffer; 12 | 13 | /** 14 | * A Kafka {@link Decoder} for decoding an arbitrary type from a JSON blob. 15 | */ 16 | public class JacksonDecoder implements Decoder { 17 | 18 | private final Class clazz; 19 | private final ObjectMapper mapper; 20 | 21 | public JacksonDecoder(final ObjectMapper mapper, final Class clazz) { 22 | this.mapper = mapper; 23 | this.clazz = clazz; 24 | } 25 | 26 | @Override 27 | public T toEvent(final Message msg) { 28 | try { 29 | try { 30 | final ByteBuffer bb = msg.payload(); 31 | if (bb.hasArray()) { 32 | return mapper.readValue(bb.array(), bb.arrayOffset(), bb.limit() - bb.position(), clazz); 33 | } else { 34 | return mapper.readValue(new ByteBufferBackedInputStream(bb), clazz); 35 | } 36 | } catch (final JsonParseException ex) { 37 | final JsonLocation location = ex.getLocation(); 38 | Object src = location.getSourceRef(); 39 | if (src instanceof ByteBuffer) { 40 | src = ((ByteBuffer) src).asCharBuffer(); 41 | } else if (src instanceof byte[]) { 42 | src = new String((byte[]) src); 43 | } else if (src instanceof char[]) { 44 | src = new String((char[]) src); 45 | } 46 | throw new JsonParseException( 47 | ex.getMessage(), 48 | new JsonLocation( 49 | src, 50 | location.getByteOffset(), 51 | location.getCharOffset(), 52 | location.getLineNr(), 53 | location.getColumnNr()), 54 | ex.getCause()); 55 | } 56 | } catch (final IOException ex) { 57 | throw new RuntimeException(ex); 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/serializer/JacksonEncoder.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.serializer; 2 | 3 | import com.fasterxml.jackson.core.JsonProcessingException; 4 | import com.fasterxml.jackson.databind.ObjectMapper; 5 | import kafka.message.Message; 6 | import kafka.serializer.Encoder; 7 | 8 | /** 9 | * A Kafka {@link Encoder} for encoding an arbitrary type to a JSON blob. 10 | */ 11 | public class JacksonEncoder implements Encoder { 12 | 13 | private final ObjectMapper mapper; 14 | 15 | public JacksonEncoder(final ObjectMapper mapper) { 16 | this.mapper = mapper; 17 | } 18 | 19 | @Override 20 | public Message toMessage(final T event) { 21 | try { 22 | return new Message(mapper.writeValueAsBytes(event)); 23 | } catch (final JsonProcessingException ex) { 24 | throw new RuntimeException(ex); 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/main/java/com/datasift/dropwizard/kafka/util/Compression.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.util; 2 | 3 | import com.fasterxml.jackson.annotation.JsonCreator; 4 | import kafka.message.*; 5 | import kafka.message.DefaultCompressionCodec; 6 | import kafka.message.GZIPCompressionCodec; 7 | import kafka.message.NoCompressionCodec; 8 | 9 | /** 10 | * A utility for parsing {@link CompressionCodec}s from a {@link 11 | * io.dropwizard.Configuration}. 12 | *

13 | * To create {@link Compression} instances, use {@link Compression#parse(String)} to parse an 14 | * instance from a {@link String}. 15 | *

16 | * This is provided to parse textual specifications of a {@link CompressionCodec}, for example in a 17 | * {@link io.dropwizard.Configuration}. 18 | */ 19 | public class Compression { 20 | 21 | private final CompressionCodec codec; 22 | 23 | /** 24 | * Creates a {@link Compression} instance for the given codec type. 25 | *

26 | * The valid codec values are defined by {@link CompressionCodec}. 27 | *

28 | * To create {@link Compression} instances, use the {@link Compression#parse(String)} factory 29 | * method to parse an instance from a {@link String}. 30 | * 31 | * @param codec the codec to use, as an integer index. 32 | * 33 | * @see Compression#parse(String) 34 | */ 35 | private Compression(final int codec) { 36 | this.codec = CompressionCodec$.MODULE$.getCompressionCodec(codec); 37 | } 38 | 39 | /** 40 | * Gets the {@link CompressionCodec} instance for this {@link Compression}. 41 | * 42 | * @return the {@link CompressionCodec} instance for this {@link Compression} 43 | */ 44 | public CompressionCodec getCodec() { 45 | return codec; 46 | } 47 | 48 | /** 49 | * Parses a String representation of a {@link CompressionCodec}. 50 | * 51 | * @param codec the name of the {@link CompressionCodec} to parse. 52 | * 53 | * @return a {@link Compression} instance for the codec. 54 | * 55 | * @throws IllegalArgumentException if codec is not a valid {@link CompressionCodec}. 56 | */ 57 | @JsonCreator 58 | public static Compression parse(final String codec) { 59 | if ("gzip".equals(codec) || "gz".equals(codec)) { 60 | return new Compression(GZIPCompressionCodec.codec()); 61 | } else if ("snappy".equals(codec)) { 62 | return new Compression(SnappyCompressionCodec.codec()); 63 | } else if ("none".equals(codec) || "no".equals(codec) || "false".equals(codec)) { 64 | return new Compression(NoCompressionCodec.codec()); 65 | } else if ("default".equals(codec) 66 | || "yes".equals(codec) 67 | || "null".equals(codec) 68 | || codec == null) 69 | { 70 | return new Compression(DefaultCompressionCodec.codec()); 71 | } else { 72 | throw new IllegalArgumentException("Invalid Compression: " + codec); 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/test/java/com/datasift/dropwizard/kafka/KafkaConsumerFactoryTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka; 2 | 3 | import io.dropwizard.jackson.Jackson; 4 | import com.datasift.dropwizard.zookeeper.ZooKeeperFactory; 5 | import com.google.common.io.Resources; 6 | import io.dropwizard.configuration.ConfigurationFactory; 7 | import io.dropwizard.util.Duration; 8 | import org.junit.Before; 9 | import org.junit.Test; 10 | 11 | import javax.validation.Validation; 12 | import javax.validation.Validator; 13 | import java.io.File; 14 | 15 | import static org.hamcrest.Matchers.*; 16 | import static org.junit.Assert.assertThat; 17 | 18 | /** 19 | * Tests {@link KafkaConsumerFactory}. 20 | */ 21 | public class KafkaConsumerFactoryTest { 22 | 23 | private KafkaConsumerFactory factory = null; 24 | 25 | @Before 26 | public void setup() throws Exception { 27 | final Validator validator = Validation.buildDefaultValidatorFactory().getValidator(); 28 | factory = new ConfigurationFactory<>(KafkaConsumerFactory.class, validator, Jackson.newObjectMapper(), "dw") 29 | .build(new File(Resources.getResource("yaml/consumer.yaml").toURI())); 30 | } 31 | 32 | @Test 33 | public void testZooKeeper() { 34 | assertThat("has ZooKeeperConfiguration", 35 | factory.getZookeeper(), 36 | instanceOf(ZooKeeperFactory.class)); 37 | } 38 | 39 | @Test 40 | public void testGroup() { 41 | assertThat("group is correctly configured", factory.getGroup(), is("test")); 42 | } 43 | 44 | @Test 45 | public void testPartitions() { 46 | assertThat("has correct partition configuration", 47 | factory.getPartitions(), 48 | allOf(hasEntry("foo", 1), hasEntry("bar", 2))); 49 | } 50 | 51 | @Test 52 | public void testRebalanceRetries() { 53 | assertThat("rebalanceRetries is overridden to 5", 54 | factory.getRebalanceRetries(), 55 | is(5)); 56 | } 57 | 58 | @Test 59 | public void testInitialOffset() { 60 | final KafkaConsumerFactory factory = new KafkaConsumerFactory(); 61 | assertThat("auto.offset.reset defaults to 'largest'", 62 | KafkaConsumerFactory.toConsumerConfig(factory).autoOffsetReset(), 63 | equalTo("largest")); 64 | factory.setInitialOffset(KafkaConsumerFactory.InitialOffset.SMALLEST); 65 | assertThat("auto.offset.reset changed to 'smallest'", 66 | KafkaConsumerFactory.toConsumerConfig(factory).autoOffsetReset(), 67 | equalTo("smallest")); 68 | } 69 | 70 | @Test 71 | public void testRetryResetDelay() { 72 | assertThat("retryResetDelay is overridden to 3 seconds", 73 | factory.getRetryResetDelay(), 74 | is(Duration.seconds(3))); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/test/java/com/datasift/dropwizard/kafka/KafkaProducerFactoryTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka; 2 | 3 | import com.google.common.io.Resources; 4 | import io.dropwizard.configuration.ConfigurationFactory; 5 | import io.dropwizard.jackson.Jackson; 6 | import kafka.producer.ProducerConfig; 7 | import kafka.serializer.DefaultEncoder; 8 | import org.junit.Before; 9 | import org.junit.Test; 10 | 11 | import javax.validation.Validation; 12 | import javax.validation.Validator; 13 | import java.io.File; 14 | 15 | import static com.datasift.dropwizard.kafka.KafkaProducerFactory.DEFAULT_BROKER_PORT; 16 | import static org.hamcrest.Matchers.equalTo; 17 | import static org.junit.Assert.assertThat; 18 | 19 | /** 20 | * TODO: Document 21 | */ 22 | public class KafkaProducerFactoryTest { 23 | 24 | private KafkaProducerFactory factory; 25 | private ProducerConfig config; 26 | 27 | @Before 28 | public void setup() throws Exception { 29 | final Validator validator = Validation.buildDefaultValidatorFactory().getValidator(); 30 | factory = new ConfigurationFactory<>(KafkaProducerFactory.class, validator, Jackson.newObjectMapper(), "dw") 31 | .build(new File(Resources.getResource("yaml/producer.yaml").toURI())); 32 | config = KafkaProducerFactory.toProducerConfig(factory, new DefaultEncoder(), null); 33 | } 34 | 35 | @Test 36 | public void testExplicitBrokers() { 37 | assertThat("explcitly defined brokers are correctly parsed", 38 | config.brokerList(), 39 | equalTo("0:localhost:4321,1:192.168.10.12:123,2:localhost:" 40 | + DEFAULT_BROKER_PORT + ",3:192.168.4.21:" + DEFAULT_BROKER_PORT)); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/test/java/com/datasift/dropwizard/kafka/util/CompressionTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.kafka.util; 2 | 3 | import kafka.message.*; 4 | import org.junit.Test; 5 | 6 | import static org.junit.Assert.*; 7 | import static org.hamcrest.Matchers.*; 8 | 9 | /** 10 | * Tests {@link Compression} 11 | */ 12 | public class CompressionTest { 13 | 14 | @Test 15 | public void parsesGZIP() { 16 | assertCompression(GZIPCompressionCodec$.MODULE$, "gz"); 17 | assertCompression(GZIPCompressionCodec$.MODULE$, "gzip"); 18 | } 19 | 20 | @Test 21 | public void parseNoCodec() { 22 | assertCompression(NoCompressionCodec$.MODULE$, "none"); 23 | assertCompression(NoCompressionCodec$.MODULE$, "no"); 24 | assertCompression(NoCompressionCodec$.MODULE$, "false"); 25 | } 26 | 27 | @Test 28 | public void parseDefaultCodec() { 29 | assertCompression(GZIPCompressionCodec$.MODULE$, "default"); 30 | assertCompression(GZIPCompressionCodec$.MODULE$, "yes"); 31 | assertCompression(GZIPCompressionCodec$.MODULE$, "null"); 32 | assertCompression(GZIPCompressionCodec$.MODULE$, null); 33 | } 34 | 35 | private void assertCompression(final CompressionCodec expected, final String value) { 36 | assertThat(String.format("'%s' parses as %s", value, expected.getClass().getSimpleName()), 37 | Compression.parse(value).getCodec(), 38 | is(expected)); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/test/resources/yaml/consumer.yaml: -------------------------------------------------------------------------------- 1 | zookeeper: 2 | hosts: 3 | - localhost 4 | - fake.remotehost 5 | port: 2181 6 | group: test 7 | partitions: 8 | foo: 1 9 | bar: 2 10 | rebalanceRetries: 5 11 | retryResetDelay: 3 seconds 12 | -------------------------------------------------------------------------------- /dropwizard-extra-kafka7/src/test/resources/yaml/producer.yaml: -------------------------------------------------------------------------------- 1 | brokers: 2 | 0: "localhost:4321" 3 | 1: "192.168.10.12:123" 4 | 2: "localhost" 5 | 3: "192.168.4.21" 6 | -------------------------------------------------------------------------------- /dropwizard-extra-util/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 4.0.0 5 | 6 | com.datasift.dropwizard 7 | dropwizard-extra 8 | 0.7.1-2-SNAPSHOT 9 | ../pom.xml 10 | 11 | 12 | dropwizard-extra-util 13 | 14 | Dropwizard Extra Utilities 15 | http://datasift.github.com/dropwizard-extra/dropwizard-extra-util 16 | 17 | Some useful additions to Dropwizard that have no external dependencies. 18 | 19 | 20 | 21 | 22 | io.dropwizard 23 | dropwizard-core 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /dropwizard-extra-util/src/main/java/com/datasift/dropwizard/health/SocketHealthCheck.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.health; 2 | 3 | import com.codahale.metrics.health.HealthCheck; 4 | 5 | import java.io.IOException; 6 | import java.net.Socket; 7 | 8 | /** 9 | * A base {@link HealthCheck} for remote socket servers. 10 | *

11 | * Use this as a basis for {@link HealthCheck}s for remote services, such as databases or 12 | * web-services. 13 | */ 14 | public abstract class SocketHealthCheck extends HealthCheck { 15 | 16 | private final String hostname; 17 | private final int port; 18 | 19 | /** 20 | * Gets the hostname of the remote socket being checked. 21 | * 22 | * @return the hostname of the remote socket being checked. 23 | */ 24 | public String getHostname() { 25 | return hostname; 26 | } 27 | 28 | /** 29 | * Gets the port of the remote socket being checked. 30 | * 31 | * @return the port of the remote socket being checked. 32 | */ 33 | public int getPort() { 34 | return port; 35 | } 36 | 37 | /** 38 | * Initialises a {@link HealthCheck} for a remote socket with the given {@code hostname} and 39 | * {@code port}. 40 | * 41 | * @param hostname the hostname of the remote socket to check. 42 | * @param port the port of the remote socket to check. 43 | */ 44 | public SocketHealthCheck(final String hostname, final int port) { 45 | this.hostname = hostname; 46 | this.port = port; 47 | } 48 | 49 | /** 50 | * Generates a String representation of the remote socket being checked. 51 | *

52 | * This will be the socket address formatted as: hostname:port 53 | * 54 | * @return the String representation of the remote socket being checked. 55 | */ 56 | public String toString() { 57 | return getHostname() + ":" + getPort(); 58 | } 59 | 60 | /** 61 | * Checks that the configured remote socket can be connected to. 62 | * 63 | * @return The result of {@link #check(Socket)} if the socket can be connected to; or 64 | * {@link Result#unhealthy(String)} if the socket connection fails. 65 | * 66 | * @throws Exception Any Exceptions thrown by the implementation of {@link #check(Socket)}. 67 | */ 68 | @Override 69 | protected Result check() throws Exception { 70 | try (final Socket socket = createSocket(hostname, port)) { 71 | return socket.isConnected() 72 | ? check(socket) 73 | : Result.unhealthy(String.format( 74 | "Failed to connect to %s:%d", hostname, port)); 75 | } 76 | } 77 | 78 | /** 79 | * Creates a new {@link Socket} for the given {@code hostname} and {@code port}. 80 | * 81 | * @param hostname the remote hostname of the {@link Socket} to create. 82 | * @param port the remote port of the {@link Socket} to create. 83 | * 84 | * @return a new {@link Socket} for the given {@code hostname} and {@code port}. 85 | * 86 | * @throws IOException if an I/O error occurs when creating the {@link Socket} or connecting it. 87 | */ 88 | protected Socket createSocket(final String hostname, final int port) throws IOException { 89 | return new Socket(hostname, port); 90 | } 91 | 92 | /** 93 | * Perform a check of a {@link Socket}. 94 | *

95 | * Implementations can assume that the {@link Socket} is already connected. 96 | * 97 | * @param socket the {@link Socket} to check the health of 98 | * 99 | * @return if the component is healthy, a healthy {@link Result}; otherwise, an unhealthy {@link 100 | * Result} with a description of the error or exception 101 | * 102 | * @throws Exception if there is an unhandled error during the health check; this will result in 103 | * a failed health check 104 | */ 105 | protected abstract Result check(Socket socket); 106 | } 107 | -------------------------------------------------------------------------------- /dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Exceptions.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.util; 2 | 3 | /** 4 | * Utilities for working with {@link Exception}s. 5 | */ 6 | public class Exceptions { 7 | 8 | /** 9 | * Creates a new {@link Exception} instance from the given {@link Class}, using the given args. 10 | *

11 | * A new {@link Exception} instance object of the given {@link Class} is created, using 12 | * reflection, providing the given arguments to the constructor. 13 | * 14 | * @param clazz the {@link Class} of the {@link Exception} to instantiate. 15 | * @param args the arguments to pass to the constructor of the {@link Exception}. 16 | * @param the type of the {@link Exception} to return. 17 | * @return a new instance of the given {@link Exception}, constructed using the given args. 18 | * @throws RuntimeException if there was a problem instantiating the {@link Exception}. 19 | */ 20 | public static T newInstance(final Class clazz, final Object... args) { 21 | try { 22 | return Classes.newInstance(clazz, args); 23 | } catch (final Exception e) { // TODO: be less lazy with the Exception handling? 24 | throw new RuntimeException("Exception while instantiating" + clazz, e); 25 | } 26 | } 27 | 28 | /** 29 | * Creates a new {@link Exception} instance of the same {@link Class} as the given 30 | * template, using the given constructor args. 31 | *

32 | * A new {@link Exception} instance object of the given {@link Class} is created, using 33 | * reflection, providing the given arguments to the constructor. 34 | * 35 | * @param template an object that provides the {@link Class} of the {@link Exception } to 36 | * instantiate. 37 | * @param args the arguments to pass to the constructor of the {@link Exception}. 38 | * @param the type of the {@link Exception} to return. 39 | * @return a new instance of the given {@link Exception}, constructed using the given args. 40 | * @throws RuntimeException if there was a problem instantiating the {@link Exception}. 41 | */ 42 | public static T newInstanceFrom(final T template, final Object... args) { 43 | try { 44 | return Classes.newInstanceFrom(template, args); 45 | } catch (final Exception e) { // TODO: be less lazy with the Exception handling? 46 | throw new RuntimeException("Exception while instantiating" + template.getClass(), e); 47 | } 48 | } 49 | 50 | /** 51 | * Creates a new {@link Exception} instance from the given {@link Class}, using the given args, 52 | * ignoring visibility. 53 | *

54 | * A new {@link Exception} instance object of the given {@link Class} is created, using 55 | * reflection, providing the given arguments to the constructor. 56 | *

57 | * The visibility of the constructor defined by the arguments is ignored and a new instance 58 | * created irrespective of the defined visibility. This is potentially dangerous, as the API 59 | * likely makes no guarantee as to the behaviour when instantiating from a non-public 60 | * constructor. 61 | * 62 | * @param clazz the {@link Class} of the {@link Exception} to instantiate. 63 | * @param args the arguments to pass to the constructor of the {@link Exception}. 64 | * @param the type of the {@link Exception} to return. 65 | * @return a new instance of the given {@link Exception}, constructed using the given args. 66 | * @throws RuntimeException if there was a problem instantiating the {@link Exception}. 67 | */ 68 | public static T unsafeNewInstance(final Class clazz, 69 | final Object... args) { 70 | try { 71 | return Classes.unsafeNewInstance(clazz, args); 72 | } catch (final Exception e) { // TODO: be less lazy with the Exception handling? 73 | throw new RuntimeException("Exception while instantiating" + clazz, e); 74 | } 75 | } 76 | 77 | /** 78 | * Creates a new {@link Exception} instance of the same {@link Class} as the given 79 | * template, using the given constructor args, ignoring visibility. 80 | *

81 | * A new {@link Exception} instance object of the given {@link Class} is created, using 82 | * reflection, providing the given arguments to the constructor. 83 | *

84 | * The visibility of the constructor defined by the arguments is ignored and a new instance 85 | * created irrespective of the defined visibility. This is potentially dangerous, as the API 86 | * likely makes no guarantee as to the behaviour when instantiating from a non-public 87 | * constructor. 88 | * 89 | * @param template an object that provides the {@link Class} of the {@link Exception } to 90 | * instantiate. 91 | * @param args the arguments to pass to the constructor of the {@link Exception}. 92 | * @param the type of the {@link Exception} to return. 93 | * @return a new instance of the given {@link Exception}, constructed using the given args. 94 | * @throws RuntimeException if there was a problem instantiating the {@link Exception}. 95 | */ 96 | public static T unsafeNewInstanceFrom(final T template, 97 | final Object... args) { 98 | try { 99 | return Classes.unsafeNewInstanceFrom(template, args); 100 | } catch (final Exception e) { // TODO: be less lazy with the Exception handling? 101 | throw new RuntimeException("Exception while instantiating" + template.getClass(), e); 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /dropwizard-extra-util/src/main/java/com/datasift/dropwizard/util/Primitives.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.util; 2 | 3 | import com.google.common.collect.BiMap; 4 | import com.google.common.collect.ImmutableBiMap; 5 | import com.google.common.collect.ImmutableMap; 6 | import com.google.common.collect.ImmutableSet; 7 | 8 | import java.util.Map; 9 | import java.util.Set; 10 | 11 | /** 12 | * Utilities for working with primitive types and reflection. 13 | *

14 | * Terminology: 15 | *

16 | * Non-reference primitive types (e.g. int, double, void) 17 | * are considered native primitives, or sometimes, unboxed primitives. 18 | *

19 | * Reference primitive types (e.g. {@link Integer}, {@link Double}, {@link Void}) are considered 20 | * boxed primitives. 21 | *

22 | * Conversion between native primitves and boxed primitives can be done with {@link 23 | * Primitives#box(Class)} and {@link Primitives#unbox(Class)}. 24 | *

25 | * Whenever possible, boxing/unboxing will be implicit and transparent, with a preference for 26 | * native primitive types. 27 | */ 28 | public class Primitives { 29 | 30 | // a mapping of primitive types that are assignable to a wider primitive 31 | private static final Map> ASSIGNABLES; 32 | static { 33 | final ImmutableMap.Builder> builder = ImmutableMap.builder(); 34 | builder.put(Short.TYPE, ImmutableSet.of(Byte.TYPE)); 35 | builder.put(Integer.TYPE, ImmutableSet.of(Byte.TYPE, Short.TYPE)); 36 | builder.put(Long.TYPE, ImmutableSet.of(Byte.TYPE, Short.TYPE, Integer.TYPE)); 37 | builder.put(Float.TYPE, ImmutableSet.of(Byte.TYPE, Short.TYPE, Integer.TYPE, 38 | Long.TYPE, Character.TYPE)); 39 | builder.put(Double.TYPE, ImmutableSet.of(Byte.TYPE, Short.TYPE, Integer.TYPE, 40 | Long.TYPE, Float.TYPE, Character.TYPE)); 41 | ASSIGNABLES = builder.build(); 42 | } 43 | 44 | // a mapping of primitive Class instances to their boxed counterparts 45 | private static final BiMap BOXED_CLASSES; 46 | static { 47 | final ImmutableBiMap.Builder builder = ImmutableBiMap.builder(); 48 | builder.put(Byte.TYPE, Byte.class); 49 | builder.put(Short.TYPE, Short.class); 50 | builder.put(Integer.TYPE, Integer.class); 51 | builder.put(Long.TYPE, Long.class); 52 | builder.put(Float.TYPE, Float.class); 53 | builder.put(Double.TYPE, Double.class); 54 | builder.put(Boolean.TYPE, Boolean.class); 55 | builder.put(Character.TYPE, Character.class); 56 | builder.put(Void.TYPE, Void.class); 57 | BOXED_CLASSES = builder.build(); 58 | } 59 | 60 | /** 61 | * Produces the {@link Class} for the boxed counterpart of the given primitive {@link Class}. 62 | * 63 | * @param clazz the {@link Class} of the primitive type to get the boxed counterpart for. 64 | * @return the {@link Class} of the boxed counterpart to the given primitive type {@link 65 | * Class} or; if the argument is already a boxed primitive, the argument itself. 66 | * @throws IllegalArgumentException if the given {@link Class} is not for a primitive type. 67 | */ 68 | public static Class box(final Class clazz) { 69 | return isNativePrimitive(clazz) && BOXED_CLASSES.containsKey(clazz) 70 | ? BOXED_CLASSES.get(clazz) 71 | : requirePrimitiveClass(clazz); 72 | } 73 | 74 | /** 75 | * Produces the {@link Class} for the native primitive counterpart of the given boxed primitive 76 | * {@link Class}. 77 | * 78 | * @param clazz the {@link Class} of the boxed primitive type to unbox to a native primitive. 79 | * @return the {@link Class} of the native primitive counterpart to the given boxed primitive 80 | * {@link Class} or; if the argument is already a native primitive, the argument itself. 81 | * @throws IllegalArgumentException if the given {@link Class} is not for a primitive type. 82 | */ 83 | public static Class unbox(final Class clazz) { 84 | return isBoxedPrimitive(clazz) 85 | ? BOXED_CLASSES.inverse().get(clazz) 86 | : requirePrimitiveClass(clazz); 87 | } 88 | 89 | /** 90 | * Determines whether the objects of the given source {@link Class} can be assigned to the 91 | * primitive type of the given target {@link Class}. 92 | *

93 | * If either type is a boxed-primitive, it will be unboxed automatically; all comparisons 94 | * will be of the native primitive types. 95 | * 96 | * @param target the {@link Class} of the type you wish to assign to. 97 | * @param source the {@link Class} of the type you wish to assign from. 98 | * @return true if objects of the source {@link Class} can be assigned to the type of the 99 | * target {@link Class}; otherwise, false. 100 | */ 101 | public static boolean isAssignableFrom(final Class target, final Class source) { 102 | if (target.equals(Classes.Null.class)) { 103 | throw new NullPointerException("target for type assignment may not be the null type"); 104 | } 105 | 106 | // permit null sources for boxed targets 107 | if (Primitives.isBoxedPrimitive(target) && source.equals(Classes.Null.class)) { 108 | return true; 109 | } 110 | 111 | // ensure source type is a primitive type 112 | if (!Primitives.isPrimitive(source)) { 113 | return false; 114 | } 115 | 116 | // unbox both types for comparison 117 | final Class unboxedTarget = unbox(target); 118 | final Class unboxedSource = unbox(source); 119 | 120 | return unboxedTarget.equals(unboxedSource) || 121 | (ASSIGNABLES.containsKey(unboxedTarget) 122 | && ASSIGNABLES.get(unboxedTarget).contains(unboxedSource)); 123 | } 124 | 125 | /** 126 | * Determines whether the given {@link Class} is for a primitive type; either native or boxed. 127 | *

128 | * Both boxed and native primitive types are considered "primitives". Example: 129 | * 130 | * Primitives.isPrimitive(int.class) == true; 131 | * Primitives.isPrimitive(Integer.class) == true; 132 | * Primitives.isPrimitive(String.class) == false; 133 | * 134 | * 135 | * @param clazz the {@link Class} of the type to determine whether or not it is for a 136 | * primitive type. 137 | * @return true if the given {@link Class} is for a primitive type, boxed or native; false if 138 | * the given {@link Class} is for any other type. 139 | */ 140 | public static boolean isPrimitive(final Class clazz) { 141 | return clazz.isPrimitive() || BOXED_CLASSES.containsValue(clazz); 142 | } 143 | 144 | /** 145 | * Determines whether the given {@link Class} is for a boxed primitive type. 146 | *

147 | * Only boxed primitive types are accepted. Example: 148 | * 149 | * Primitives.isPrimitive(int.class) == false; 150 | * Primitives.isPrimitive(Integer.class) == true; 151 | * Primitives.isPrimitive(String.class) == false; 152 | * 153 | * 154 | * @param clazz the {@link Class} of the type to determine whether or not it is for a boxed 155 | * primitive type. 156 | * @return true if the given {@link Class} is for a boxed primitive type; false if the given 157 | * {@link Class} is for any other type, including native primitive types. 158 | */ 159 | public static boolean isBoxedPrimitive(final Class clazz) { 160 | return !clazz.isPrimitive() && BOXED_CLASSES.containsValue(clazz); 161 | } 162 | 163 | /** 164 | * Determines whether the given {@link Class} is for a native primitive type. 165 | *

166 | * Only native primitive types are accepted. Example: 167 | * 168 | * Primitives.isPrimitive(int.class) == true; 169 | * Primitives.isPrimitive(Integer.class) == false; 170 | * Primitives.isPrimitive(String.class) == false; 171 | * 172 | * 173 | * @param clazz the {@link Class} of the type to determine whether or not it is for a native 174 | * primitive type. 175 | * @return true if the given {@link Class} is for a native primitive type; false if the given 176 | * {@link Class} is for any other type, including boxed primitive types. 177 | */ 178 | public static boolean isNativePrimitive(final Class clazz) { 179 | return clazz.isPrimitive(); 180 | } 181 | 182 | /** 183 | * Asserts that the given {@link Class} is for a primitive type, either native or boxed. 184 | * 185 | * @param clazz the {@link Class} to assert is for a primitive type. 186 | * @return the given {@link Class}, without modifications, to facilitate chaining. 187 | * @throws AssertionError if the given {@link Class} is for a non-primitive type. 188 | */ 189 | public static Class requirePrimitiveClass(final Class clazz) { 190 | if (!isPrimitive(clazz)) { 191 | throw new AssertionError("Class must be for a primitive type; " + clazz + " given"); 192 | } 193 | return clazz; 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /dropwizard-extra-zookeeper/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 4.0.0 5 | 6 | com.datasift.dropwizard 7 | dropwizard-extra 8 | 0.7.1-2-SNAPSHOT 9 | ../pom.xml 10 | 11 | 12 | dropwizard-extra-zookeeper 13 | 14 | Dropwizard Extra ZooKeeper 15 | http://datasift.github.com/dropwizard-extra/dropwizard-extra-zookeeper 16 | 17 | Dropwizard integration for working with ZooKeeper. 18 | 19 | 20 | 21 | 22 | io.dropwizard 23 | dropwizard-core 24 | 25 | 26 | org.apache.zookeeper 27 | zookeeper 28 | 3.4.6 29 | 30 | 31 | javax.mail 32 | mail 33 | 34 | 35 | javax.jms 36 | jms 37 | 38 | 39 | com.sun.jdmk 40 | jmxtools 41 | 42 | 43 | com.sun.jmx 44 | jmxri 45 | 46 | 47 | log4j 48 | log4j 49 | 50 | 51 | org.slf4j 52 | slf4j-log4j12 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | maven-javadoc-plugin 62 | 63 | 64 | http://zookeeper.apache.org/doc/current/api/ 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /dropwizard-extra-zookeeper/src/main/java/com/datasift/dropwizard/zookeeper/ManagedZooKeeper.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.zookeeper; 2 | 3 | import io.dropwizard.lifecycle.Managed; 4 | import org.apache.zookeeper.ZooKeeper; 5 | 6 | /** 7 | * Manages the lifecycle of a {@link ZooKeeper} client instance. 8 | */ 9 | public class ManagedZooKeeper implements Managed{ 10 | 11 | private final ZooKeeper client; 12 | 13 | /** 14 | * Manage the given {@link ZooKeeper} client instance. 15 | * 16 | * @param client the client to manage. 17 | */ 18 | public ManagedZooKeeper(final ZooKeeper client) { 19 | this.client = client; 20 | } 21 | 22 | @Override 23 | public void start() throws Exception { 24 | // already started, nothing to do 25 | } 26 | 27 | @Override 28 | public void stop() throws Exception { 29 | client.close(); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /dropwizard-extra-zookeeper/src/main/java/com/datasift/dropwizard/zookeeper/health/ZooKeeperHealthCheck.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.zookeeper.health; 2 | 3 | import com.codahale.metrics.health.HealthCheck; 4 | import org.apache.zookeeper.ZooKeeper; 5 | 6 | /** 7 | * A {@link HealthCheck} for a ZooKeeper ensemble. 8 | *

9 | * Checks that: 10 | *

    11 | *
  • the client is alive,
  • 12 | *
  • the client is connected to the cluster, and
  • 13 | *
  • the configured namespace exists
  • 14 | *
15 | */ 16 | public class ZooKeeperHealthCheck extends HealthCheck { 17 | 18 | private final ZooKeeper client; 19 | private final String namespace; 20 | 21 | /** 22 | * Creates a {@link HealthCheck} that checks the given {@link ZooKeeper} client is functioning 23 | * correctly. 24 | * 25 | * @param client the client to check the health of. 26 | * @param namespace the namespace to check for within the ZooKeeper ensemble. 27 | * @param name the name of this {@link HealthCheck}. 28 | */ 29 | public ZooKeeperHealthCheck(final ZooKeeper client, final String namespace) { 30 | this.client = client; 31 | this.namespace = namespace; 32 | } 33 | 34 | /** 35 | * Checks that the configured {@link ZooKeeper} client is functioning correctly. 36 | * 37 | * @return {@link Result#unhealthy(String)} if the client is not functioning correctly or the 38 | * connected ZooKeeper ensemble is not operating correctly; otherwise, {@link 39 | * Result#healthy()}. 40 | * 41 | * @throws Exception if an error occurs checking the health of the {@link ZooKeeper} client. 42 | */ 43 | @Override 44 | protected Result check() throws Exception { 45 | 46 | final ZooKeeper.States state = client.getState(); 47 | 48 | if (!state.isAlive()) { 49 | return Result.unhealthy("Client is dead, in state: %s", state); 50 | } 51 | 52 | if (!state.isConnected()) { 53 | return Result.unhealthy("Client not connected, in state: %s", state); 54 | } 55 | 56 | if (client.exists(namespace, false) == null) { 57 | return Result.unhealthy("Root namespace does not exist: %s", namespace); 58 | } 59 | 60 | return Result.healthy(); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /dropwizard-extra-zookeeper/src/test/java/com/datasift/dropwizard/zookeeper/ZooKeeperFactoryTest.java: -------------------------------------------------------------------------------- 1 | package com.datasift.dropwizard.zookeeper; 2 | 3 | import io.dropwizard.jackson.Jackson; 4 | import com.google.common.io.Resources; 5 | import io.dropwizard.configuration.ConfigurationFactory; 6 | import io.dropwizard.util.Duration; 7 | import org.junit.Before; 8 | import org.junit.Test; 9 | 10 | import javax.validation.Validation; 11 | import javax.validation.Validator; 12 | import java.io.File; 13 | 14 | import static org.junit.Assert.*; 15 | import static org.hamcrest.Matchers.*; 16 | import static org.mockito.Mockito.*; 17 | 18 | /** 19 | * Tests {@link ZooKeeperConfiguration}. 20 | */ 21 | public class ZooKeeperFactoryTest { 22 | 23 | ZooKeeperFactory config = null; 24 | 25 | @Before 26 | public void setup() throws Exception { 27 | final Validator validator = Validation.buildDefaultValidatorFactory().getValidator(); 28 | config = new ConfigurationFactory<>(ZooKeeperFactory.class, validator, Jackson.newObjectMapper(), "dw") 29 | .build(new File(Resources.getResource("yaml/zookeeper.yaml").toURI())); 30 | } 31 | 32 | @Test 33 | public void hasValidDefaults() { 34 | final ZooKeeperFactory conf = new ZooKeeperFactory(); 35 | 36 | assertThat("default hostname is localhost", 37 | conf.getHosts(), 38 | hasItemInArray("localhost")); 39 | 40 | assertThat("default port is ZooKeeper default", 41 | conf.getPort(), 42 | is(2181)); 43 | 44 | assertThat("default namespace is ZooKeeper root", 45 | conf.getNamespace(), 46 | is("/")); 47 | 48 | assertThat("default connection timeout is 6 seconds", 49 | conf.getConnectionTimeout(), 50 | equalTo(Duration.seconds(6))); 51 | 52 | assertThat("default session timeout is 6 seconds", 53 | conf.getSessionTimeout(), 54 | equalTo(Duration.seconds(6))); 55 | } 56 | 57 | @Test 58 | public void quorumSpecForOneHost() { 59 | final ZooKeeperFactory conf = new ZooKeeperFactory(); 60 | assertThat("quorum spec is correct for single host", 61 | conf.getQuorumSpec(), 62 | is("localhost:2181")); 63 | } 64 | 65 | @Test 66 | public void quorumSpecForMultipleHosts() { 67 | final ZooKeeperFactory conf = mock(ZooKeeperFactory.class); 68 | when(conf.getHosts()).thenReturn(new String[] { "remote1", "remote2" }); 69 | when(conf.getPort()).thenReturn(2181); 70 | when(conf.getQuorumSpec()).thenCallRealMethod(); 71 | 72 | assertThat("quorum spec is correct for multiple hosts", 73 | conf.getQuorumSpec(), 74 | is("remote1:2181,remote2:2181")); 75 | } 76 | 77 | @Test 78 | public void namespacePath() { 79 | final ZooKeeperFactory conf = new ZooKeeperFactory(); 80 | assertThat("namespace represents a valid path", 81 | conf.getNamespace(), 82 | is("/")); 83 | 84 | assertThat("namespace String represents a valid path", 85 | conf.getNamespace(), 86 | is("/")); 87 | } 88 | 89 | @Test 90 | public void parsedConfig() { 91 | assertThat("contains hosts", 92 | config.getHosts(), 93 | is(new String[] { "test1", "test2" })); 94 | 95 | assertThat("parses port", 96 | config.getPort(), 97 | is(2182)); 98 | 99 | assertThat("parses namespace", 100 | config.getNamespace(), 101 | is("/test")); 102 | 103 | assertThat("parses connection timeout", 104 | config.getConnectionTimeout(), 105 | is(Duration.seconds(10))); 106 | 107 | assertThat("parses session timeout", 108 | config.getSessionTimeout(), 109 | is(Duration.seconds(30))); 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /dropwizard-extra-zookeeper/src/test/resources/yaml/zookeeper.yaml: -------------------------------------------------------------------------------- 1 | hosts: 2 | - test1 3 | - test2 4 | port: 2182 5 | namespace: /test 6 | connectionTimeout: 10 seconds 7 | sessionTimeout: 30 seconds 8 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 4.0.0 5 | 6 | 7 | 3.0.0 8 | 9 | 10 | 11 | org.sonatype.oss 12 | oss-parent 13 | 7 14 | 15 | 16 | com.datasift.dropwizard 17 | dropwizard-extra 18 | 0.7.1-2-SNAPSHOT 19 | pom 20 | 21 | Dropwizard Extra 22 | http://datasift.github.com/dropwizard-extra 23 | 24 | Additional integrations for Dropwizard. 25 | 26 | 27 | 28 | UTF-8 29 | UTF-8 30 | UTF-8 31 | 0.7.1 32 | 33 | 34 | 35 | DataSift Inc. 36 | http://datasift.com/ 37 | 38 | 39 | 40 | 41 | Nick Telford 42 | nick.telford@gmail.com 43 | https://github.com/nicktelford 44 | DataSift Inc. 45 | http://datasift.com 46 | Europe/London 47 | 48 | architect 49 | developer 50 | 51 | 52 | 53 | Jairam Chandar 54 | https://github.com/jairamc 55 | DataSift Inc. 56 | http://datasift.com 57 | Europe/London 58 | 59 | developer 60 | 61 | 62 | 63 | Alistair Bastian 64 | https://github.com/bastianal 65 | DataSift Inc. 66 | http://datasift.com 67 | Europe/London 68 | 69 | developer 70 | 71 | 72 | 73 | 74 | 75 | 76 | David Morgantini 77 | https://github.com/dmorgantini 78 | 79 | 80 | Coda Hale 81 | http://codahale.com 82 | 83 | 84 | 85 | 86 | 87 | The Apache Software License, Version 2.0 88 | http://www.apache.org/licenses/LICENSE-2.0.txt 89 | repo 90 | 91 | 92 | 93 | 94 | github 95 | http://github.com/datasift/dropwizard-extra/issues/#issue/ 96 | 97 | 98 | 99 | scm:git:https://github.com/datasift/dropwizard-extra 100 | scm:git:git@github.com:datasift/dropwizard-extra 101 | https://github.com/datasift/dropwizard-extra 102 | 103 | 112 | 113 | dropwizard-extra-curator 114 | dropwizard-extra-hbase 115 | dropwizard-extra-kafka 116 | dropwizard-extra-kafka7 117 | dropwizard-extra-zookeeper 118 | dropwizard-extra-util 119 | 120 | 121 | 122 | 123 | 124 | io.dropwizard 125 | dropwizard-core 126 | ${dropwizard.version} 127 | 128 | 129 | junit 130 | junit 131 | 4.10 132 | test 133 | 134 | 135 | org.hamcrest 136 | hamcrest-all 137 | 1.1 138 | test 139 | 140 | 141 | org.mockito 142 | mockito-all 143 | 1.9.0 144 | test 145 | 146 | 147 | org.powermock 148 | powermock-module-junit4 149 | 1.5.4 150 | test 151 | 152 | 153 | org.powermock 154 | powermock-api-mockito 155 | 1.4.12 156 | test 157 | 158 | 159 | io.dropwizard 160 | dropwizard-testing 161 | ${dropwizard.version} 162 | test 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | junit 171 | junit 172 | test 173 | 174 | 175 | org.hamcrest 176 | hamcrest-all 177 | test 178 | 179 | 180 | org.mockito 181 | mockito-all 182 | test 183 | 184 | 185 | org.powermock 186 | powermock-module-junit4 187 | test 188 | 189 | 190 | org.powermock 191 | powermock-api-mockito 192 | test 193 | 194 | 195 | 196 | 198 | 199 | 200 | 201 | 202 | 203 | maven-compiler-plugin 204 | 2.5.1 205 | 206 | 207 | maven-site-plugin 208 | 3.3 209 | 210 | 211 | maven-javadoc-plugin 212 | 2.8.1 213 | 215 | 216 | false 217 | 218 | 219 | 220 | maven-source-plugin 221 | 2.2 222 | 223 | 224 | maven-surefire-plugin 225 | 2.12.1 226 | 227 | 228 | maven-failsafe-plugin 229 | 2.12.1 230 | 231 | 232 | 233 | 234 | 235 | maven-compiler-plugin 236 | 237 | 1.7 238 | 1.7 239 | 240 | 241 | 242 | maven-site-plugin 243 | 244 | 245 | org.apache.maven.doxia 246 | doxia-module-markdown 247 | 1.3 248 | 249 | 250 | 251 | 252 | attach-descriptor 253 | 254 | attach-descriptor 255 | 256 | 257 | 258 | 259 | 260 | 269 | 270 | 271 | 272 | 273 | 274 | maven-project-info-reports-plugin 275 | 2.7 276 | 277 | false 278 | false 279 | 280 | ${project.packaging} 281 | 282 | 283 | 284 | maven-javadoc-plugin 285 | 2.8.1 286 | 287 | 288 | 289 | 290 | http://dropwizard.codahale.com/maven/apidocs/ 291 | http://metrics.codahale.com/maven/apidocs/ 292 | 293 | 294 | 295 | 296 | default 297 | 298 | javadoc 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | -------------------------------------------------------------------------------- /src/site/site.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | org.apache.maven.skins 7 | maven-fluido-skin 8 | 1.2.2 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | true 28 | false 29 | 30 | datasift/dropwizard-extra 31 | 32 | 33 | 34 | 35 | --------------------------------------------------------------------------------