├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── RELEASE.md ├── pom.xml └── src └── main └── java └── io └── latent └── storm └── rabbitmq ├── ConditionalPublishingRabbitMQBolt.java ├── Declarator.java ├── ErrorReporter.java ├── Message.java ├── MessageScheme.java ├── MultiStreamSplitter.java ├── MultiStreamSpout.java ├── RabbitMQBolt.java ├── RabbitMQConsumer.java ├── RabbitMQMessageScheme.java ├── RabbitMQProducer.java ├── RabbitMQSpout.java ├── RedeliveryStreamSeparator.java ├── TupleToMessage.java ├── TupleToMessageNonDynamic.java ├── UnanchoredConsumer.java ├── UnanchoredRabbitMQSpout.java └── config ├── ConfigAvailableHosts.java ├── ConfigUtils.java ├── ConnectionConfig.java ├── ConsumerConfig.java ├── ConsumerConfigBuilder.java ├── ProducerConfig.java └── ProducerConfigBuilder.java /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | 3 | *.iml 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: java 2 | jdk: 3 | - oraclejdk7 4 | - openjdk7 5 | - openjdk6 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License (MIT) 2 | 3 | Copyright (c) 2013 peter@latent.io 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # storm-rabbitmq 2 | 3 | [![Build Status](https://travis-ci.org/ppat/storm-rabbitmq.png)](https://travis-ci.org/ppat/storm-rabbitmq) 4 | 5 | 6 | Storm RabbitMQ is a library of tools to be employed while integrating with [RabbitMQ](https://github.com/rabbitmq/rabbitmq-server/) from [Storm](https://github.com/nathanmarz/storm/). This library is intended to be used with RabbitMQ specifically and may not work with other AMQP brokers as this library will be using RabbitMQ specific extensions to AMQP. 7 | 8 | LICENSE: MIT License 9 | 10 | 11 | ## Pre-requisites 12 | 13 | You will need an implementation of ```backtype.storm.spout.Scheme``` to deserialize a RabbitMQ message. 14 | 15 | 16 | ## RabbitMQ Spout 17 | 18 | - This spout will deserialize incoming messages using ```YourCustomMessageScheme``` and emit it on an anchored stream. 19 | 20 | ```java 21 | Scheme scheme = new YourCustomMessageScheme(); 22 | IRichSpout spout = new RabbitMQSpout(scheme); 23 | ``` 24 | 25 | - Configuring connection to RabbitMQ. If ```requeueOnFail``` is turned on, messages will be redelivered if they fail anywhere within the topology. If its turned off, failed messages are removed from the queue and potentially sent to a [dead letter exchange](http://www.rabbitmq.com/dlx.html) in RabbitMQ (if one has been configured for this queue). 26 | 27 | ```java 28 | ConnectionConfig connectionConfig = new ConnectionConfig("localhost", 5672, "guest", "guest", ConnectionFactory.DEFAULT_VHOST, 10); // host, port, username, password, virtualHost, heartBeat 29 | ConsumerConfig spoutConfig = new ConsumerConfigBuilder().connection(connectionConfig) 30 | .queue("your.rabbitmq.queue") 31 | .prefetch(200) 32 | .requeueOnFail() 33 | .build(); 34 | ``` 35 | 36 | - Add to topology using TopologyBuilder. Set the MaxSpoutPending in Storm to same value as RabbitMQ's Prefetch count (set in ConsumerConfig above) initially. You can tune them later separately but MaxSpoutPending should always be <= Prefetch. 37 | 38 | ```java 39 | TopologyBuilder builder = new TopologyBuilder(); 40 | 41 | builder.setSpout("my-spout", spout) 42 | .addConfigurations(spoutConfig.asMap()) 43 | .setMaxSpoutPending(200); 44 | ``` 45 | 46 | ## Unanchored Spout 47 | 48 | While the standard ```RabbitMQSpout``` above will deliver messages on an anchored stream, if fault tolerance is not required, you can use the ```UnanchoredRabbitMQSpout```. 49 | 50 | ```java 51 | Scheme scheme = new YourCustomMessageScheme(); 52 | IRichSpout spout = new UnanchoredRabbitMQSpout(scheme); 53 | ``` 54 | 55 | ## MultiStream Spout 56 | 57 | If you want to split the incoming message stream from your RabbitMQ queue in some manner suitable for your use case, you can use the ```MultiStreamSpout```. You need to provide an implementation of ```MultiStreamSplitter``` that will separate the stream of tuples based on either the deserialized message (as a tuple) or the original serialized ```Message```. 58 | 59 | ```java 60 | MultiStreamSplitter streamSeparator = new MultiStreamSplitter() { 61 | @Override 62 | public List streamNames() { 63 | return Arrays.asList("stream-X", "stream-Y"); 64 | } 65 | 66 | @Override 67 | public String selectStream(List tuple, Message message) { 68 | // you can look at the deserialized messge in the form of List tuple 69 | // or you can look at the original RabbitMQ Message to determine which stream it should be emitted in 70 | // this is just a simple example for demonstration purpose, you probably will want to inspect the right tuple 71 | // or message values to and do something more intelligent to determine which stream it should be assigned to 72 | if (tuple.get(0).toString().equalsIgnoreCase("something")) 73 | return "stream-X"; 74 | else 75 | return "stream-Y"; 76 | } 77 | }; 78 | 79 | IRichSpout spout = new MultiStreamSpout(scheme, streamSeparator); 80 | ``` 81 | 82 | Now you can bind any bolts to this spout on either "stream-X" or "stream-Y". 83 | 84 | ```java 85 | TopologyBuilder builder = new TopologyBuilder(); 86 | 87 | builder.setSpout("split-streams-spout", spout) 88 | .addConfigurations(spoutConfig.asMap()) 89 | .setMaxSpoutPending(200); 90 | builder.setBolt("work-on-stream-X", new StreamXBolt()) 91 | .shuffleGrouping("split-streams-spout", "stream-X"); 92 | builder.setBolt("work-on-stream-Y", new StreamYBolt()) 93 | .shuffleGrouping("split-streams-spout", "stream-Y"); 94 | ``` 95 | 96 | ### RedeliveryStreamSeparator 97 | 98 | This comes with an implementation of ```MultiStreamSplitter``` called ```RedeliveryStreamSeparator``` which can be used when you want to split the tuple stream into initial deliveries and redeliveries of messages that failed somewhere within the topology. Since RabbitMQ returns all failed messages back to the beginning of the queue, by separating redeliveries from initial deliveries, you can ensure that failing messages do not clog the stream for complete message stream. 99 | 100 | ```java 101 | MultiStreamSplitter streamSeparator = new RedeliveryStreamSeparator(); 102 | IRichSpout spout = new MultiStreamSpout(scheme, streamSeparator); 103 | ``` 104 | 105 | Now you can send initial deliveries to a FastBolt that can fail any tuple that cannot be processed quickly by timing out the calculation. So that the main stream of tuples will have no bottleneck due to individual messages that may take a long time to process. The messages that take a long time to process will be redelivered on a separate stream that will go to the SlowBolt. 106 | 107 | ```java 108 | TopologyBuilder builder = new TopologyBuilder(); 109 | 110 | builder.setSpout("redelivery-split-spout", spout) 111 | .addConfigurations(spoutConfig.asMap()) 112 | .setMaxSpoutPending(200); 113 | builder.setBolt("process-quickly-or-fail-bolt", new FastBolt(), 100) // fast bolt with parallelism 114 | .shuffleGrouping("redelivery-split-spout", RedeliveryStreamSeparator.INITIAL_DELIVERY_STREAM); 115 | builder.setBolt("retry-failures-with-longer-timeout", new SlowBolt(), 20) // slow bolt with different parallelism 116 | .shuffleGrouping("redelivery-split-spout", RedeliveryStreamSeparator.REDELIVERY_STREAM); 117 | ``` 118 | 119 | ## Declarator 120 | 121 | By default, these spouts assume that the queue in question already exists in RabbitMQ. If you want the queue declaration to also happen on the spout, you need to provide an implementation of ```io.latent.storm.rabbitmq.Declarator```. Declarator (and therefore storm-rabbitmq) is unopinionated about how the queue this spout will listen on should be wired to exchange(s) and you are free to choose any form of wiring that serves your use case. 122 | 123 | ```java 124 | public class CustomStormDeclarator implements Declarator { 125 | private final String exchange; 126 | private final String queue; 127 | private final String routingKey; 128 | 129 | public CustomStormDeclarator(String exchange, String queue) { 130 | this(exchange, queue, ""); 131 | } 132 | 133 | public CustomStormDeclarator(String exchange, String queue, String routingKey) { 134 | this.exchange = exchange; 135 | this.queue = queue; 136 | this.routingKey = routingKey; 137 | } 138 | 139 | @Override 140 | public void execute(Channel channel) { 141 | // you're given a RabbitMQ Channel so you're free to wire up your exchange/queue bindings as you see fit 142 | try { 143 | Map args = new HashMap<>(); 144 | channel.queueDeclare(queue, true, false, false, args); 145 | channel.exchangeDeclare(exchange, "topic", true); 146 | channel.queueBind(queue, exchange, routingKey); 147 | } catch (IOException e) { 148 | throw new RuntimeException("Error executing rabbitmq declarations.", e); 149 | } 150 | } 151 | } 152 | ``` 153 | 154 | And then pass it to spout constructor. 155 | ``` 156 | Declarator declarator = new CustomStormDeclarator("your.exchange", "your.rabbitmq.queue", "routing.key"); 157 | IRichSpout spout = new RabbitMQSpout(scheme, declarator); 158 | ``` 159 | The other spouts (UnanchoredRabbitMQSpout, MultiStreamSpout) also take in the declarator as a parameter. 160 | 161 | ## RabbitMQMessageScheme 162 | 163 | The standard backtype message scheme only allows access to the payload of a message received via RabbitMQ. Normally, the payload is all you will need. There are scenarios where this isn't true; you need access to the routing key as part of your topology logic, you only want to handle "new" messages and need access to the message timestamp, whatever your use case, the payload isn't enough. The provided RabbitMQMessageScheme allows you to gain access to RabbitMQ message information without having to change every bolt that interacts with a RabbitMQSpout. 164 | 165 | When constructing a RabbitMQMessageScheme you need to provide 3 pieces of information: 166 | 167 | * an implementation of ```backtype.storm.spout.Scheme``` to deserialize a RabbitMQ message payload. 168 | * the tuple field name to use for RabbitMQ message envelope info 169 | * the tuple field name to use for the RabbitMQ properties info 170 | 171 | The first should be fairly explanatory. You supply your existing payload handling scheme. All existing bolts will continue to function as is. No field names need to be changed nor would any field indexes. The supplied envelope and properties names will be used to allow you to access them in your bolt. Additionally, if you access tuple fields by index, the envelope and properties will be added as 2 additional fields at the end of the tuple. 172 | 173 | If you were to create a RabbitMQMessageScheme as below: 174 | 175 | ```java 176 | Scheme scheme = new RabbitMQMessageScheme(new SimpleJSONScheme(), "myMessageEnvelope", "myMessageProperties"); 177 | ``` 178 | 179 | then in any bolt attached to the spout stream you could access them as: 180 | 181 | ```java 182 | RabbitMQMessageScheme.Envelope envelope = tuple.getValueByField("myMessageEnvelope"); 183 | 184 | RabbitMQMessageScheme.Properties properties = tuple.getValueByField("myMessageProperties"); 185 | ``` 186 | 187 | All standard RabbitMQ envelope and message properties are available. See RabbitMQMessageScheme.java for the full interface. 188 | 189 | 190 | ## RabbitMQ as a Sink 191 | 192 | There may be times when you wish to send messages to RabbitMQ at the end of one of the stream within your topology. 193 | 194 | First you need to provide an implementation of `TupleToMessage`. This indicates how to transform an incoming tuple within your stream into a RabbitMQ Message. 195 | 196 | ```java 197 | TupleToMessage scheme = new TupleToMessage() { 198 | @Override 199 | byte[] extractBody(Tuple input) { return input.getStringByField("my-message-body").getBytes(); } 200 | 201 | @Override 202 | String determineExchangeName(Tuple input) { return input.getStringByField("exchange-to-publish-to"); } 203 | 204 | @Override 205 | String determineRoutingKey(Tuple input) { return input.getStringByField("my-routing-key"); } 206 | 207 | @Override 208 | Map specifiyHeaders(Tuple input) { return new HashMap(); } 209 | 210 | @Override 211 | String specifyContentType(Tuple input) { return "application/json"; } 212 | 213 | @Override 214 | String specifyContentEncoding(Tuple input) { return "UTF-8"; } 215 | 216 | @Override 217 | boolean specifyMessagePersistence(Tuple input) { return false; } 218 | }; 219 | ``` 220 | 221 | Then you need your RabbitMQ connection config (just like we did for the spout before). 222 | 223 | ```java 224 | ConnectionConfig connectionConfig = new ConnectionConfig("localhost", 5672, "guest", "guest", ConnectionFactory.DEFAULT_VHOST, 10); // host, port, username, password, virtualHost, heartBeat 225 | ProducerConfig sinkConfig = new ProducerConfigBuilder().connection(connectionConfig).build(); 226 | ``` 227 | 228 | Now we are ready to add RabbitMQBolt as a sink to your topology. 229 | 230 | ```java 231 | TopologyBuilder builder = new TopologyBuilder 232 | ... 233 | builder.setBolt("rabbitmq-sink", new RabbitMQBolt(scheme)) 234 | .addConfigurations(sinkConfig) 235 | .shuffleGrouping("previous-bolt") 236 | ``` 237 | 238 | ### When message attributes are non-dynamic 239 | 240 | Sometimes your message attributes (exchange name, routing key, content-type, etc..) do not change on a message by message basis and are fixed per topology. When this is the case you can use `TupleToMessageNonDynamic` to provide a more simplified implementation by providing the required fields (exchange name, routing key) via storm configuration. 241 | 242 | ```java 243 | TupleToMessage scheme = new TupleToMessageNonDynamic() { 244 | @Override 245 | byte[] extractBody(Tuple input) { return input.getStringByField("my-message-body").getBytes(); } 246 | }; 247 | ConnectionConfig connectionConfig = new ConnectionConfig("localhost", 5672, "guest", "guest", ConnectionFactory.DEFAULT_VHOST, 10); // host, port, username, password, virtualHost, heartBeat 248 | ProducerConfig sinkConfig = new ProducerConfigBuilder() 249 | .connection(connectionConfig) 250 | .contentEncoding("UTF-8") 251 | .contentType("application/json") 252 | .exchange("exchange-to-publish-to") 253 | .routingKey("") 254 | .build(); 255 | ... 256 | builder.setBolt("rabbitmq-sink", new RabbitMQBolt(scheme)) 257 | .addConfigurations(sinkConfig) 258 | .shuffleGrouping("previous-bolt") 259 | ``` 260 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # How to release to Maven Central 2 | 3 | * Follow Central recommendations for [setting up Apache Maven][ossrh-maven] 4 | * Ensure GPG is set up for signing jars 5 | * Ensure `~/.m2/settings.xml` has the following contents for signing 6 | and uploading jars to Maven Central 7 | 8 | ``` 9 | 10 | 11 | 12 | sonatype-nexus-staging 13 | [OSS_USER] 14 | [OSS_PASSWORD] 15 | 16 | 17 | 18 | 19 | ``` 20 | 21 | ### Prepare for release checklist 22 | 23 | [ ] Run unit testing and check test coverage 24 | 25 | [ ] Ensure Javadoc API is fully documented 26 | 27 | [ ] Update README.md for release 28 | 29 | ### Perform release 30 | 31 | 1. Increment the version in `pom.xml` to a stable release: 32 | e.g. `0.7`. Commit. 33 | 2. Tag the current commit: e.g. `0.7`. 34 | 3. Upload the release. 35 | ``` 36 | mvn -e clean deploy -P sonatype-oss-release 37 | ``` 38 | 4. Increment plugin version to next snapshot: e.g. `0.8-SNAPSHOT`. Commit. 39 | 5. Push commits and tags 40 | ``` 41 | git push origin && git push --tags origin 42 | ``` 43 | 44 | This will initially upload the artifact to a staging repository. Once confident 45 | about the release visit [Maven Central Nexus][ossrh] and follow [instructions on 46 | releasing to production][ossrh-release]. 47 | 48 | [ossrh-maven]: http://central.sonatype.org/pages/apache-maven.html 49 | [ossrh-guide]: http://central.sonatype.org/pages/ossrh-guide.html 50 | [ossrh]: https://oss.sonatype.org/ 51 | [ossrh-release]: http://central.sonatype.org/pages/releasing-the-deployment.html 52 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | 4.0.0 5 | 6 | io.latent 7 | storm-rabbitmq 8 | 1.1.0-SNAPSHOT 9 | jar 10 | storm-rabbitmq 11 | A library of tools for interacting with RabbitMQ from Storm. 12 | https://github.com/ppat/storm-rabbitmq 13 | 2013 14 | 15 | 16 | The MIT License 17 | http://www.opensource.org/licenses/mit-license.php 18 | repo 19 | 20 | 21 | 22 | 23 | ppat 24 | Peter Pathirana 25 | peter@latent.io 26 | 27 | 28 | SeanTAllen 29 | Sean T Allen 30 | sean@monkeysnatchbanana.com 31 | 32 | 33 | philipsdoctor 34 | Philip S Doctor 35 | 36 | 37 | drazzib 38 | Damien Raude-Morvan 39 | drazzib@drazzib.com 40 | 41 | 42 | bdgould 43 | Ben Gould 44 | bdgould@smcm.edu 45 | 46 | 47 | 48 | 52 | 53 | org.sonatype.oss 54 | oss-parent 55 | 9 56 | 57 | 58 | 59 | scm:git:git@github.com:ppat/storm-rabbitmq.git 60 | scm:git:git@github.com:ppat/storm-rabbitmq.git 61 | git@github.com:ppat/storm-rabbitmq.git 62 | HEAD 63 | 64 | 65 | 66 | UTF-8 67 | 1.6 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | org.apache.maven.plugins 77 | maven-gpg-plugin 78 | 1.5 79 | 80 | 81 | org.apache.maven.plugins 82 | maven-release-plugin 83 | 2.5 84 | 85 | 86 | org.apache.maven.plugins 87 | maven-source-plugin 88 | 2.2.1 89 | 90 | 91 | org.apache.maven.plugins 92 | maven-javadoc-plugin 93 | 2.9.1 94 | 95 | 96 | 97 | 98 | 99 | 100 | org.apache.maven.plugins 101 | maven-compiler-plugin 102 | 2.5.1 103 | 104 | ${jdk.version} 105 | ${jdk.version} 106 | 107 | 108 | 109 | org.apache.maven.plugins 110 | maven-deploy-plugin 111 | 2.5 112 | 113 | 114 | org.apache.maven.plugins 115 | maven-release-plugin 116 | 117 | @{project.version} 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | org.apache.storm 126 | storm-core 127 | 1.0.2 128 | provided 129 | 130 | 131 | com.rabbitmq 132 | amqp-client 133 | 4.0.0 134 | 135 | 136 | org.slf4j 137 | slf4j-api 138 | 1.7.5 139 | 140 | 141 | 142 | 143 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/ConditionalPublishingRabbitMQBolt.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | 4 | import org.apache.storm.tuple.Tuple; 5 | 6 | /** 7 | * Simple extension of {@link io.latent.storm.rabbitmq.RabbitMQBolt} that provides the ability to determine whether a message should be published 8 | * based on the input tuple 9 | * This class is sort of an SPI meaning that it is meant to be subclassed 10 | * and the method {@link io.latent.storm.rabbitmq.ConditionalPublishingRabbitMQBolt#shouldPublish} 11 | * to be overridden with the custom decision logic 12 | */ 13 | public class ConditionalPublishingRabbitMQBolt extends RabbitMQBolt { 14 | 15 | public ConditionalPublishingRabbitMQBolt(TupleToMessage scheme) { 16 | super(scheme); 17 | } 18 | 19 | public ConditionalPublishingRabbitMQBolt(TupleToMessage scheme, Declarator declarator) { 20 | super(scheme, declarator); 21 | } 22 | 23 | @Override 24 | public void execute(final Tuple tuple) { 25 | if(shouldPublish(tuple)) { 26 | publish(tuple); 27 | } 28 | acknowledge(tuple); 29 | } 30 | 31 | protected boolean shouldPublish(Tuple tuple) { 32 | return true; 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/Declarator.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import com.rabbitmq.client.Channel; 4 | 5 | import java.io.Serializable; 6 | 7 | public interface Declarator extends Serializable { 8 | void execute(Channel channel); 9 | 10 | public static class NoOp implements Declarator { 11 | @Override 12 | public void execute(Channel channel) {} 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/ErrorReporter.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | public interface ErrorReporter { 4 | void reportError(java.lang.Throwable error); 5 | } 6 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/Message.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import java.util.Date; 4 | import java.util.HashMap; 5 | import java.util.Map; 6 | 7 | import com.rabbitmq.client.QueueingConsumer; 8 | 9 | public class Message { 10 | public static final Message NONE = new None(); 11 | 12 | private final byte[] body; 13 | 14 | public Message(byte[] body) { 15 | this.body = body; 16 | } 17 | 18 | public static Message forDelivery(QueueingConsumer.Delivery delivery) { 19 | return (delivery != null) ? new DeliveredMessage(delivery) : NONE; 20 | } 21 | 22 | public static Message forSending(byte[] body, 23 | Map headers, 24 | String exchangeName, 25 | String routingKey, 26 | String contentType, 27 | String contentEncoding, 28 | boolean persistent) { 29 | return (body != null && exchangeName != null && exchangeName.length() > 0) ? 30 | new MessageForSending(body, headers, exchangeName, routingKey, contentType, contentEncoding, persistent) : 31 | NONE; 32 | } 33 | 34 | public byte[] getBody() { 35 | return body; 36 | } 37 | 38 | public static class DeliveredMessage extends Message { 39 | private final boolean redelivery; 40 | private final long deliveryTag; 41 | private final String routingKey; 42 | private final String exchange; 43 | private final String className; 44 | private final String clusterId; 45 | private final String contentEncoding; 46 | private final String contentType; 47 | private final String correlationId; 48 | private final Integer deliveryMode; 49 | private final String expiration; 50 | private final Map headers; 51 | private final String messageId; 52 | private final Integer priority; 53 | private final String replyTo; 54 | private final Date timestamp; 55 | private final String type; 56 | private final String userId; 57 | 58 | private DeliveredMessage(QueueingConsumer.Delivery delivery) { 59 | super(delivery.getBody()); 60 | redelivery = delivery.getEnvelope().isRedeliver(); 61 | deliveryTag = delivery.getEnvelope().getDeliveryTag(); 62 | routingKey = delivery.getEnvelope().getRoutingKey(); 63 | exchange = delivery.getEnvelope().getExchange(); 64 | className = delivery.getProperties().getClassName(); 65 | clusterId = delivery.getProperties().getClusterId(); 66 | contentEncoding = delivery.getProperties().getContentEncoding(); 67 | contentType = delivery.getProperties().getContentType(); 68 | correlationId = delivery.getProperties().getCorrelationId(); 69 | deliveryMode = delivery.getProperties().getDeliveryMode(); 70 | expiration = delivery.getProperties().getExpiration(); 71 | headers = delivery.getProperties().getHeaders(); 72 | messageId = delivery.getProperties().getMessageId(); 73 | priority = delivery.getProperties().getPriority(); 74 | replyTo = delivery.getProperties().getReplyTo(); 75 | timestamp = delivery.getProperties().getTimestamp(); 76 | type = delivery.getProperties().getType(); 77 | userId = delivery.getProperties().getUserId(); 78 | } 79 | 80 | public boolean isRedelivery() { return redelivery; } 81 | public long getDeliveryTag() { return deliveryTag; } 82 | public String getRoutingKey() { return routingKey; } 83 | public String getExchange() { return exchange; } 84 | public String getClassName() { return className;} 85 | public String getClusterId(){ return clusterId; } 86 | public String getContentEncoding() { return contentEncoding; } 87 | public String getContentType() { return contentType; } 88 | public String getCorrelationId() { return correlationId; } 89 | public Integer getDeliveryMode() { return deliveryMode; } 90 | public String getExpiration() { return expiration; } 91 | public Map getHeaders() { return headers; } 92 | public String getMessageId() { return messageId; } 93 | public Integer getPriority() { return priority; } 94 | public String getReplyTo() { return replyTo; } 95 | public Date getTimestamp() { return timestamp; } 96 | public String getType() { return type; } 97 | public String getUserId() { return userId; } 98 | } 99 | 100 | public static class None extends Message { 101 | private None() { 102 | super(null); 103 | } 104 | 105 | @Override 106 | public byte[] getBody() { throw new UnsupportedOperationException(); }; 107 | } 108 | 109 | public static class MessageForSending extends Message { 110 | private final Map headers; 111 | private final String exchangeName; 112 | private final String routingKey; 113 | private final String contentType; 114 | private final String contentEncoding; 115 | private final boolean persistent; 116 | 117 | private MessageForSending(byte[] body, 118 | Map headers, 119 | String exchangeName, 120 | String routingKey, 121 | String contentType, 122 | String contentEncoding, 123 | boolean persistent) { 124 | super(body); 125 | this.headers = (headers != null) ? headers : new HashMap(); 126 | this.exchangeName = exchangeName; 127 | this.routingKey = routingKey; 128 | this.contentType = contentType; 129 | this.contentEncoding = contentEncoding; 130 | this.persistent = persistent; 131 | } 132 | 133 | public Map getHeaders() 134 | { 135 | return headers; 136 | } 137 | 138 | public String getExchangeName() 139 | { 140 | return exchangeName; 141 | } 142 | 143 | public String getRoutingKey() 144 | { 145 | return routingKey; 146 | } 147 | 148 | public String getContentType() 149 | { 150 | return contentType; 151 | } 152 | 153 | public String getContentEncoding() 154 | { 155 | return contentEncoding; 156 | } 157 | 158 | public boolean isPersistent() 159 | { 160 | return persistent; 161 | } 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/MessageScheme.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import org.apache.storm.spout.Scheme; 4 | import org.apache.storm.task.TopologyContext; 5 | import org.apache.storm.tuple.Fields; 6 | 7 | import java.nio.ByteBuffer; 8 | import java.util.List; 9 | import java.util.Map; 10 | 11 | public interface MessageScheme extends Scheme { 12 | void open(Map config, 13 | TopologyContext context); 14 | 15 | void close(); 16 | 17 | List deserialize(Message message); 18 | 19 | class Builder { 20 | public static MessageScheme from(final Scheme scheme) { 21 | if (scheme instanceof MessageScheme) 22 | return (MessageScheme) scheme; 23 | else 24 | return create(scheme); 25 | } 26 | 27 | private static MessageScheme create(final Scheme scheme) { 28 | return new MessageScheme() { 29 | @Override 30 | public void open(Map config, 31 | TopologyContext context) { } 32 | 33 | @Override 34 | public void close() { } 35 | 36 | @Override 37 | public List deserialize(Message message) { 38 | return scheme.deserialize(ByteBuffer.wrap(message.getBody())); 39 | } 40 | 41 | @Override 42 | public List deserialize(ByteBuffer byteBuffer) { 43 | return scheme.deserialize(byteBuffer); 44 | } 45 | 46 | @Override 47 | public Fields getOutputFields() { 48 | return scheme.getOutputFields(); 49 | } 50 | }; 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/MultiStreamSplitter.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import java.io.Serializable; 4 | import java.util.List; 5 | 6 | /** 7 | * Used for splitting a stream into multiple streams by examining the each individual tuple/message 8 | * 9 | * @author peter@latent.io 10 | */ 11 | public interface MultiStreamSplitter extends Serializable { 12 | /** 13 | * @return a list of streams a tuple may be assigned to from this stream splitter 14 | */ 15 | List streamNames(); 16 | 17 | /** 18 | * Given the tuple (the de-serialized message contents) and the message in its original serialized format, select 19 | * the stream this particular tuple should be emitted under. 20 | * 21 | * @param tuple the de-serialized form of message as a list of objects (a tuple) 22 | * @param message the original RabbitMQ message before it was de-serialized (incl. amqp envelope information) 23 | * @return the stream assigned to the tuple 24 | */ 25 | String selectStream(List tuple, 26 | Message message); 27 | } 28 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/MultiStreamSpout.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import org.apache.storm.spout.Scheme; 4 | import org.apache.storm.spout.SpoutOutputCollector; 5 | import org.apache.storm.topology.OutputFieldsDeclarer; 6 | import org.apache.storm.tuple.Fields; 7 | 8 | import java.util.List; 9 | 10 | /** 11 | * MultiStreamSpout will emit tuples on multiple streams by assigning tuples to a stream using the provided 12 | * MultiStreamSplitter. 13 | * 14 | * @author peter@latent.io 15 | */ 16 | public class MultiStreamSpout extends RabbitMQSpout { 17 | private final MultiStreamSplitter streamSplitter; 18 | private final Fields outputFields; 19 | 20 | public MultiStreamSpout(Scheme scheme, 21 | MultiStreamSplitter streamSplitter) { 22 | super(scheme); 23 | this.outputFields = scheme.getOutputFields(); 24 | this.streamSplitter = streamSplitter; 25 | } 26 | 27 | public MultiStreamSpout(Scheme scheme, 28 | MultiStreamSplitter streamSplitter, 29 | Declarator declarator) { 30 | super(scheme, declarator); 31 | this.outputFields = scheme.getOutputFields(); 32 | this.streamSplitter = streamSplitter; 33 | } 34 | 35 | @Override 36 | protected List emit(List tuple, 37 | Message message, 38 | SpoutOutputCollector spoutOutputCollector) { 39 | String stream = streamSplitter.selectStream(tuple, message); 40 | return spoutOutputCollector.emit(stream, tuple, getDeliveryTag(message)); 41 | } 42 | 43 | @Override 44 | public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) { 45 | for (String stream : streamSplitter.streamNames()) { 46 | outputFieldsDeclarer.declareStream(stream, outputFields); 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/RabbitMQBolt.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import org.apache.storm.task.OutputCollector; 4 | import org.apache.storm.task.TopologyContext; 5 | import org.apache.storm.topology.OutputFieldsDeclarer; 6 | import org.apache.storm.topology.base.BaseRichBolt; 7 | import org.apache.storm.tuple.Tuple; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.util.Map; 12 | 13 | /** 14 | * This is a simple bolt for producing messages to RabbitMQ from a Storm 15 | * topology. It needs a {@link TupleToMessage} object to perform the real meat 16 | * of converting the incoming {@link Tuple} from a stream into a {@link Message} 17 | * to publish on RabbitMQ. 18 | * 19 | * @author bdgould 20 | * 21 | */ 22 | public class RabbitMQBolt extends BaseRichBolt { 23 | private static final long serialVersionUID = 97236452008970L; 24 | 25 | private final TupleToMessage scheme; 26 | private final Declarator declarator; 27 | 28 | private transient Logger logger; 29 | private transient RabbitMQProducer producer; 30 | private transient OutputCollector collector; 31 | 32 | public RabbitMQBolt(final TupleToMessage scheme) { 33 | this(scheme, new Declarator.NoOp()); 34 | } 35 | 36 | public RabbitMQBolt(final TupleToMessage scheme, final Declarator declarator) { 37 | this.scheme = scheme; 38 | this.declarator = declarator; 39 | } 40 | 41 | @Override 42 | public void prepare(@SuppressWarnings("rawtypes") final Map stormConf, final TopologyContext context, final OutputCollector collector) { 43 | producer = new RabbitMQProducer(declarator); 44 | producer.open(stormConf); 45 | logger = LoggerFactory.getLogger(this.getClass()); 46 | this.collector = collector; 47 | this.scheme.prepare(stormConf); 48 | logger.info("Successfully prepared RabbitMQBolt"); 49 | } 50 | 51 | @Override 52 | public void execute(final Tuple tuple) { 53 | publish(tuple); 54 | // tuples are always acked, even when transformation by scheme yields Message.NONE as 55 | // if it failed once it's unlikely to succeed when re-attempted (i.e. serialization/deserilization errors). 56 | acknowledge(tuple); 57 | } 58 | 59 | protected void acknowledge(Tuple tuple) { 60 | collector.ack(tuple); 61 | } 62 | 63 | protected void publish(Tuple tuple) { 64 | producer.send(scheme.produceMessage(tuple)); 65 | } 66 | 67 | @Override 68 | public void declareOutputFields(final OutputFieldsDeclarer declarer) { 69 | //No fields are emitted from this drain. 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/RabbitMQConsumer.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import io.latent.storm.rabbitmq.config.ConnectionConfig; 4 | 5 | import java.io.IOException; 6 | import java.io.Serializable; 7 | import java.util.concurrent.TimeoutException; 8 | 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | 12 | import com.rabbitmq.client.Address; 13 | import com.rabbitmq.client.Channel; 14 | import com.rabbitmq.client.Connection; 15 | import com.rabbitmq.client.ConnectionFactory; 16 | import com.rabbitmq.client.ConsumerCancelledException; 17 | import com.rabbitmq.client.QueueingConsumer; 18 | import com.rabbitmq.client.ShutdownListener; 19 | import com.rabbitmq.client.ShutdownSignalException; 20 | 21 | /** 22 | * An abstraction on RabbitMQ client API to encapsulate interaction with RabbitMQ and de-couple Storm API from RabbitMQ API. 23 | * 24 | * @author peter@latent.io 25 | */ 26 | public class RabbitMQConsumer implements Serializable { 27 | public static final long MS_WAIT_FOR_MESSAGE = 1L; 28 | 29 | private final ConnectionFactory connectionFactory; 30 | private final Address[] highAvailabilityHosts; 31 | private final int prefetchCount; 32 | private final String queueName; 33 | private final boolean requeueOnFail; 34 | private final Declarator declarator; 35 | private final ErrorReporter reporter; 36 | private final Logger logger; 37 | 38 | private Connection connection; 39 | private Channel channel; 40 | private QueueingConsumer consumer; 41 | private String consumerTag; 42 | 43 | public RabbitMQConsumer(ConnectionConfig connectionConfig, 44 | int prefetchCount, 45 | String queueName, 46 | boolean requeueOnFail, 47 | Declarator declarator, 48 | ErrorReporter errorReporter) { 49 | this.connectionFactory = connectionConfig.asConnectionFactory(); 50 | this.highAvailabilityHosts = connectionConfig.getHighAvailabilityHosts().toAddresses(); 51 | this.prefetchCount = prefetchCount; 52 | this.queueName = queueName; 53 | this.requeueOnFail = requeueOnFail; 54 | this.declarator = declarator; 55 | 56 | this.reporter = errorReporter; 57 | this.logger = LoggerFactory.getLogger(RabbitMQConsumer.class); 58 | } 59 | 60 | public Message nextMessage() { 61 | reinitIfNecessary(); 62 | if (consumerTag == null || consumer == null) return Message.NONE; 63 | try { 64 | return Message.forDelivery(consumer.nextDelivery(MS_WAIT_FOR_MESSAGE)); 65 | } catch (ShutdownSignalException sse) { 66 | reset(); 67 | logger.error("shutdown signal received while attempting to get next message", sse); 68 | reporter.reportError(sse); 69 | return Message.NONE; 70 | } catch (InterruptedException ie) { 71 | /* nothing to do. timed out waiting for message */ 72 | logger.debug("interruepted while waiting for message", ie); 73 | return Message.NONE; 74 | } catch (ConsumerCancelledException cce) { 75 | /* if the queue on the broker was deleted or node in the cluster containing the queue failed */ 76 | reset(); 77 | logger.error("consumer got cancelled while attempting to get next message", cce); 78 | reporter.reportError(cce); 79 | return Message.NONE; 80 | } 81 | } 82 | 83 | public void ack(Long msgId) { 84 | reinitIfNecessary(); 85 | try { 86 | channel.basicAck(msgId, false); 87 | } catch (ShutdownSignalException sse) { 88 | reset(); 89 | logger.error("shutdown signal received while attempting to ack message", sse); 90 | reporter.reportError(sse); 91 | } catch (Exception e) { 92 | logger.error("could not ack for msgId: " + msgId, e); 93 | reporter.reportError(e); 94 | } 95 | } 96 | 97 | public void fail(Long msgId) { 98 | if (requeueOnFail) 99 | failWithRedelivery(msgId); 100 | else 101 | deadLetter(msgId); 102 | } 103 | 104 | public void failWithRedelivery(Long msgId) { 105 | reinitIfNecessary(); 106 | try { 107 | channel.basicReject(msgId, true); 108 | } catch (ShutdownSignalException sse) { 109 | reset(); 110 | logger.error("shutdown signal received while attempting to fail with redelivery", sse); 111 | reporter.reportError(sse); 112 | } catch (Exception e) { 113 | logger.error("could not fail with redelivery for msgId: " + msgId, e); 114 | reporter.reportError(e); 115 | } 116 | } 117 | 118 | public void deadLetter(Long msgId) { 119 | reinitIfNecessary(); 120 | try { 121 | channel.basicReject(msgId, false); 122 | } catch (ShutdownSignalException sse) { 123 | reset(); 124 | logger.error("shutdown signal received while attempting to fail with no redelivery", sse); 125 | reporter.reportError(sse); 126 | } catch (Exception e) { 127 | logger.error("could not fail with dead-lettering (when configured) for msgId: " + msgId, e); 128 | reporter.reportError(e); 129 | } 130 | } 131 | 132 | public void open() { 133 | try { 134 | connection = createConnection(); 135 | channel = connection.createChannel(); 136 | if (prefetchCount > 0) { 137 | logger.info("setting basic.qos / prefetch count to " + prefetchCount + " for " + queueName); 138 | channel.basicQos(prefetchCount); 139 | } 140 | // run any declaration prior to queue consumption 141 | declarator.execute(channel); 142 | 143 | consumer = new QueueingConsumer(channel); 144 | consumerTag = channel.basicConsume(queueName, isAutoAcking(), consumer); 145 | } catch (Exception e) { 146 | reset(); 147 | logger.error("could not open listener on queue " + queueName); 148 | reporter.reportError(e); 149 | } 150 | } 151 | 152 | protected boolean isAutoAcking() { 153 | return false; 154 | } 155 | 156 | public void close() { 157 | try { 158 | if (channel != null && channel.isOpen()) { 159 | if (consumerTag != null) channel.basicCancel(consumerTag); 160 | channel.close(); 161 | } 162 | } catch (Exception e) { 163 | logger.debug("error closing channel and/or cancelling consumer", e); 164 | } 165 | try { 166 | logger.info("closing connection to rabbitmq: " + connection); 167 | connection.close(); 168 | } catch (Exception e) { 169 | logger.debug("error closing connection", e); 170 | } 171 | consumer = null; 172 | consumerTag = null; 173 | channel = null; 174 | connection = null; 175 | } 176 | 177 | private void reset() { 178 | consumerTag = null; 179 | } 180 | 181 | private void reinitIfNecessary() { 182 | if (consumerTag == null || consumer == null) { 183 | close(); 184 | open(); 185 | } 186 | } 187 | 188 | private Connection createConnection() throws IOException, TimeoutException { 189 | Connection connection = highAvailabilityHosts == null || highAvailabilityHosts.length == 0 190 | ? connectionFactory.newConnection() 191 | : connectionFactory.newConnection(highAvailabilityHosts); 192 | connection.addShutdownListener(new ShutdownListener() { 193 | @Override 194 | public void shutdownCompleted(ShutdownSignalException cause) { 195 | logger.error("shutdown signal received", cause); 196 | reporter.reportError(cause); 197 | reset(); 198 | } 199 | }); 200 | logger.info("connected to rabbitmq: " + connection + " for " + queueName); 201 | return connection; 202 | } 203 | } 204 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/RabbitMQMessageScheme.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import java.nio.ByteBuffer; 4 | import java.util.*; 5 | 6 | import com.rabbitmq.client.LongString; 7 | import org.apache.storm.spout.Scheme; 8 | import org.apache.storm.task.TopologyContext; 9 | import org.apache.storm.tuple.Fields; 10 | 11 | import java.io.Serializable; 12 | 13 | 14 | public class RabbitMQMessageScheme implements MessageScheme { 15 | private final Scheme payloadScheme; 16 | private final List fieldNames; 17 | 18 | 19 | public RabbitMQMessageScheme(Scheme payloadScheme, String envelopeFieldName, String propertiesFieldName) { 20 | this.payloadScheme = payloadScheme; 21 | 22 | List payloadFieldNames = payloadScheme.getOutputFields().toList(); 23 | 24 | this.fieldNames = new ArrayList(); 25 | fieldNames.addAll(payloadFieldNames); 26 | fieldNames.add(envelopeFieldName); 27 | fieldNames.add(propertiesFieldName); 28 | } 29 | 30 | @Override 31 | public void open(Map config, TopologyContext context) { 32 | } 33 | 34 | @Override 35 | public void close() { 36 | } 37 | 38 | @Override 39 | public List deserialize(Message message) { 40 | Message.DeliveredMessage dm = (Message.DeliveredMessage)message; 41 | Envelope envelope = createEnvelope(dm); 42 | Properties properties = createProperties(dm); 43 | List payloadValues = deserialize(ByteBuffer.wrap(dm.getBody())); 44 | 45 | List values = new ArrayList(); 46 | values.addAll(payloadValues); 47 | values.add(envelope); 48 | values.add(properties); 49 | 50 | return values; 51 | } 52 | 53 | @Override 54 | public List deserialize(ByteBuffer byteBuffer) { 55 | return payloadScheme.deserialize(byteBuffer); 56 | } 57 | 58 | @Override 59 | public Fields getOutputFields() { 60 | return new Fields(fieldNames); 61 | } 62 | 63 | private Envelope createEnvelope(Message.DeliveredMessage dm) { 64 | return new Envelope(dm.isRedelivery(), dm.getDeliveryTag(), dm.getExchange(), dm.getRoutingKey()); 65 | } 66 | 67 | private Properties createProperties(Message.DeliveredMessage dm) { 68 | return new Properties(dm.getClassName(), 69 | dm.getClusterId(), 70 | dm.getContentEncoding(), 71 | dm.getContentType(), 72 | dm.getCorrelationId(), 73 | dm.getDeliveryMode(), 74 | dm.getExpiration(), 75 | serializableHeaders(dm.getHeaders()), 76 | dm.getMessageId(), 77 | dm.getPriority(), 78 | dm.getReplyTo(), 79 | dm.getTimestamp(), 80 | dm.getType(), 81 | dm.getUserId()); 82 | } 83 | 84 | private Map serializableHeaders(Map headers) { 85 | if (headers == null) { 86 | return new HashMap(); 87 | } 88 | 89 | Map headersSerializable = new HashMap(headers.size()); 90 | for (Map.Entry entry : headers.entrySet()) { 91 | if (entry.getValue() instanceof Number || 92 | entry.getValue() instanceof Boolean || 93 | entry.getValue() instanceof Character || 94 | entry.getValue() instanceof String || 95 | entry.getValue() instanceof Date) { 96 | headersSerializable.put(entry.getKey(), entry.getValue()); 97 | } else if (entry.getValue() instanceof LongString) { 98 | headersSerializable.put(entry.getKey(), entry.getValue().toString()); 99 | } else if (entry.getValue() instanceof ArrayList) { 100 | ArrayList serializedList = new ArrayList(); 101 | for (Object elm : ((ArrayList) entry.getValue())) { 102 | if (elm instanceof HashMap) { 103 | serializedList.add(serializableHeaders((HashMap) elm)); 104 | } 105 | } 106 | headersSerializable.put(entry.getKey(), serializedList); 107 | } 108 | } 109 | return headersSerializable; 110 | } 111 | 112 | public static class Envelope implements Serializable { 113 | private final boolean isRedelivery; 114 | private final long deliveryTag; 115 | private final String exchange; 116 | private final String routingKey; 117 | 118 | public Envelope(boolean isRedelivery, long deliveryTag, String exchange, String routingKey) { 119 | this.isRedelivery = isRedelivery; 120 | this.deliveryTag = deliveryTag; 121 | this.exchange = exchange; 122 | this.routingKey = routingKey; 123 | } 124 | 125 | public boolean isRedelivery() { return isRedelivery; } 126 | public long getDeliveryTag() { return deliveryTag; } 127 | public String getExchange() { return exchange; } 128 | public String getRoutingKey() { return routingKey; } 129 | } 130 | 131 | public static class Properties implements Serializable { 132 | private final String className; 133 | private final String clusterId; 134 | private final String contentEncoding; 135 | private final String contentType; 136 | private final String correlationId; 137 | private final Integer deliveryMode; 138 | private final String expiration; 139 | private final Map headers; 140 | private final String messageId; 141 | private final Integer priority; 142 | private final String replyTo; 143 | private final Date timestamp; 144 | private final String type; 145 | private final String userId; 146 | 147 | 148 | public Properties(String className, 149 | String clusterId, 150 | String contentEncoding, 151 | String contentType, 152 | String correlationId, 153 | Integer deliveryMode, 154 | String expiration, 155 | Map headers, 156 | String messageId, 157 | Integer priority, 158 | String replyTo, 159 | Date timestamp, 160 | String type, 161 | String userId) { 162 | this.className = className; 163 | this.clusterId = clusterId; 164 | this.contentEncoding = contentEncoding; 165 | this.contentType = contentType; 166 | this.correlationId = correlationId; 167 | this.deliveryMode = deliveryMode; 168 | this.expiration = expiration; 169 | this.headers = headers; 170 | this.messageId = messageId; 171 | this.priority = priority; 172 | this.replyTo = replyTo; 173 | this.timestamp = timestamp; 174 | this.type = type; 175 | this.userId = userId; 176 | } 177 | 178 | public String getClassName() { return className; } 179 | public String getClusterId() { return clusterId; } 180 | public String getContentEncoding() { return contentEncoding; } 181 | public String getContentType() { return contentType; } 182 | public String getCorrelationId() { return correlationId; } 183 | public Integer getDeliveryMode() { return deliveryMode; } 184 | public String getExpiration() { return expiration; } 185 | public Map getHeaders() { return headers; } 186 | public String getMessageId() { return messageId; } 187 | public Integer getPriority() { return priority; } 188 | public String getReplyTo() { return replyTo; } 189 | public Date getTimestamp() { return timestamp; } 190 | public String getType() { return type; } 191 | public String getUserId() { return userId; } 192 | } 193 | } 194 | 195 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/RabbitMQProducer.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import com.rabbitmq.client.*; 4 | import io.latent.storm.rabbitmq.config.ConnectionConfig; 5 | import org.apache.storm.topology.ReportedFailedException; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import java.io.IOException; 10 | import java.io.Serializable; 11 | import java.util.Map; 12 | import java.util.concurrent.TimeoutException; 13 | 14 | public class RabbitMQProducer implements Serializable { 15 | private final Declarator declarator; 16 | 17 | private transient Logger logger; 18 | 19 | private transient ConnectionConfig connectionConfig; 20 | private transient Connection connection; 21 | private transient Channel channel; 22 | 23 | private boolean blocked = false; 24 | 25 | public RabbitMQProducer() 26 | { 27 | this(new Declarator.NoOp()); 28 | } 29 | 30 | public RabbitMQProducer(Declarator declarator) { 31 | this.declarator = declarator; 32 | } 33 | 34 | public void send(Message message) { 35 | if (message == Message.NONE) return; 36 | sendMessageWhenNotBlocked((Message.MessageForSending) message); 37 | } 38 | 39 | private void sendMessageWhenNotBlocked(Message.MessageForSending message) { 40 | while (true) { 41 | if (blocked) { 42 | try { Thread.sleep(100); } catch (InterruptedException ie) { } 43 | } else { 44 | sendMessageActual(message); 45 | return; 46 | } 47 | } 48 | } 49 | 50 | private void sendMessageActual(Message.MessageForSending message) { 51 | 52 | reinitIfNecessary(); 53 | if (channel == null) throw new ReportedFailedException("No connection to RabbitMQ"); 54 | try { 55 | AMQP.BasicProperties properties = new AMQP.BasicProperties.Builder() 56 | .contentType(message.getContentType()) 57 | .contentEncoding(message.getContentEncoding()) 58 | .deliveryMode((message.isPersistent()) ? 2 : 1) 59 | .headers(message.getHeaders()) 60 | .build(); 61 | channel.basicPublish(message.getExchangeName(), message.getRoutingKey(), properties, message.getBody()); 62 | } catch (AlreadyClosedException ace) { 63 | logger.error("already closed exception while attempting to send message", ace); 64 | reset(); 65 | throw new ReportedFailedException(ace); 66 | } catch (IOException ioe) { 67 | logger.error("io exception while attempting to send message", ioe); 68 | reset(); 69 | throw new ReportedFailedException(ioe); 70 | } catch (Exception e) { 71 | logger.warn("Unexpected error while sending message. Backing off for a bit before trying again (to allow time for recovery)", e); 72 | try { Thread.sleep(1000); } catch (InterruptedException ie) { } 73 | } 74 | } 75 | 76 | public void open(final Map config) { 77 | logger = LoggerFactory.getLogger(RabbitMQProducer.class); 78 | connectionConfig = ConnectionConfig.getFromStormConfig(config); 79 | internalOpen(); 80 | } 81 | 82 | private void internalOpen() { 83 | try { 84 | connection = createConnection(); 85 | channel = connection.createChannel(); 86 | 87 | // run any declaration prior to message sending 88 | declarator.execute(channel); 89 | } catch (Exception e) { 90 | logger.error("could not open connection on rabbitmq", e); 91 | reset(); 92 | } 93 | } 94 | 95 | public void close() { 96 | try { 97 | if (channel != null && channel.isOpen()) { 98 | channel.close(); 99 | } 100 | } catch (Exception e) { 101 | logger.debug("error closing channel", e); 102 | } 103 | try { 104 | logger.info("closing connection to rabbitmq: " + connection); 105 | connection.close(); 106 | } catch (Exception e) { 107 | logger.debug("error closing connection", e); 108 | } 109 | channel = null; 110 | connection = null; 111 | } 112 | 113 | private void reset() { 114 | channel = null; 115 | } 116 | 117 | private void reinitIfNecessary() { 118 | if (channel == null) { 119 | close(); 120 | internalOpen(); 121 | } 122 | } 123 | 124 | private Connection createConnection() throws IOException, TimeoutException { 125 | ConnectionFactory connectionFactory = connectionConfig.asConnectionFactory(); 126 | Connection connection = connectionConfig.getHighAvailabilityHosts().isEmpty() ? connectionFactory.newConnection() 127 | : connectionFactory.newConnection(connectionConfig.getHighAvailabilityHosts().toAddresses()); 128 | connection.addShutdownListener(new ShutdownListener() { 129 | @Override 130 | public void shutdownCompleted(ShutdownSignalException cause) { 131 | logger.error("shutdown signal received", cause); 132 | reset(); 133 | } 134 | }); 135 | connection.addBlockedListener(new BlockedListener() 136 | { 137 | @Override 138 | public void handleBlocked(String reason) throws IOException 139 | { 140 | blocked = true; 141 | logger.warn(String.format("Got blocked by rabbitmq with reason = %s", reason)); 142 | } 143 | 144 | @Override 145 | public void handleUnblocked() throws IOException 146 | { 147 | blocked = false; 148 | logger.warn(String.format("Got unblocked by rabbitmq")); 149 | } 150 | }); 151 | logger.info("connected to rabbitmq: " + connection); 152 | return connection; 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/RabbitMQSpout.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import io.latent.storm.rabbitmq.config.ConsumerConfig; 4 | 5 | import java.util.Collections; 6 | import java.util.List; 7 | import java.util.Map; 8 | 9 | import org.apache.storm.spout.Scheme; 10 | import org.apache.storm.spout.SpoutOutputCollector; 11 | import org.apache.storm.task.TopologyContext; 12 | import org.apache.storm.topology.OutputFieldsDeclarer; 13 | import org.apache.storm.topology.base.BaseRichSpout; 14 | import org.slf4j.Logger; 15 | import org.slf4j.LoggerFactory; 16 | 17 | /** 18 | * A simple RabbitMQ spout that emits an anchored tuple stream (on the default stream). This can be used with 19 | * Storm's guaranteed message processing. 20 | * 21 | * @author peter@latent.io 22 | */ 23 | public class RabbitMQSpout extends BaseRichSpout { 24 | private final MessageScheme scheme; 25 | private final Declarator declarator; 26 | 27 | private transient Logger logger; 28 | private transient RabbitMQConsumer consumer; 29 | private transient SpoutOutputCollector collector; 30 | private transient int prefetchCount; 31 | 32 | private boolean active; 33 | private String streamId; 34 | 35 | public RabbitMQSpout(Scheme scheme) { 36 | this(MessageScheme.Builder.from(scheme), new Declarator.NoOp(),null); 37 | } 38 | 39 | public RabbitMQSpout(Scheme scheme, String streamId){ 40 | this(MessageScheme.Builder.from(scheme), new Declarator.NoOp(), streamId); 41 | } 42 | 43 | public RabbitMQSpout(Scheme scheme, Declarator declarator) { 44 | this(MessageScheme.Builder.from(scheme), declarator,null); 45 | } 46 | 47 | public RabbitMQSpout(MessageScheme scheme, Declarator declarator) { 48 | this(scheme,declarator,null); 49 | } 50 | 51 | public RabbitMQSpout(Scheme scheme, Declarator declarator, String streamId){ 52 | this(MessageScheme.Builder.from(scheme), declarator, streamId); 53 | } 54 | 55 | public RabbitMQSpout(MessageScheme scheme, Declarator declarator, String streamId){ 56 | this.scheme =scheme; 57 | this.declarator =declarator; 58 | this.streamId = streamId; 59 | } 60 | 61 | @Override 62 | public void open(final Map config, 63 | final TopologyContext context, 64 | final SpoutOutputCollector spoutOutputCollector) { 65 | ConsumerConfig consumerConfig = ConsumerConfig.getFromStormConfig(config); 66 | ErrorReporter reporter = new ErrorReporter() { 67 | @Override 68 | public void reportError(Throwable error) { 69 | spoutOutputCollector.reportError(error); 70 | } 71 | }; 72 | consumer = loadConsumer(declarator, reporter, consumerConfig); 73 | scheme.open(config, context); 74 | consumer.open(); 75 | prefetchCount = consumerConfig.getPrefetchCount(); 76 | logger = LoggerFactory.getLogger(RabbitMQSpout.class); 77 | collector = spoutOutputCollector; 78 | active = true; 79 | } 80 | 81 | protected RabbitMQConsumer loadConsumer(Declarator declarator, 82 | ErrorReporter reporter, 83 | ConsumerConfig config) { 84 | return new RabbitMQConsumer(config.getConnectionConfig(), 85 | config.getPrefetchCount(), 86 | config.getQueueName(), 87 | config.isRequeueOnFail(), 88 | declarator, 89 | reporter); 90 | } 91 | 92 | @Override 93 | public void close() { 94 | consumer.close(); 95 | scheme.close(); 96 | super.close(); 97 | } 98 | 99 | @Override 100 | public void nextTuple() { 101 | if (!active) return; 102 | int emitted = 0; 103 | Message message; 104 | while (emitted < prefetchCount && (message = consumer.nextMessage()) != Message.NONE) { 105 | List tuple = extractTuple(message); 106 | if (!tuple.isEmpty()) { 107 | emit(tuple, message, collector); 108 | emitted += 1; 109 | } 110 | } 111 | } 112 | 113 | protected List emit(List tuple, 114 | Message message, 115 | SpoutOutputCollector spoutOutputCollector) { 116 | return streamId == null ? spoutOutputCollector.emit(tuple, getDeliveryTag(message)) : 117 | spoutOutputCollector.emit(streamId, tuple, getDeliveryTag(message)); 118 | } 119 | 120 | private List extractTuple(Message message) { 121 | long deliveryTag = getDeliveryTag(message); 122 | try { 123 | List tuple = scheme.deserialize(message); 124 | if (tuple != null && !tuple.isEmpty()) { 125 | return tuple; 126 | } 127 | String errorMsg = "Deserialization error for msgId " + deliveryTag; 128 | logger.warn(errorMsg); 129 | collector.reportError(new Exception(errorMsg)); 130 | } catch (Exception e) { 131 | logger.warn("Deserialization error for msgId " + deliveryTag, e); 132 | collector.reportError(e); 133 | } 134 | // get the malformed message out of the way by dead-lettering (if dead-lettering is configured) and move on 135 | consumer.deadLetter(deliveryTag); 136 | return Collections.emptyList(); 137 | } 138 | 139 | @Override 140 | public void ack(Object msgId) { 141 | if (msgId instanceof Long) consumer.ack((Long) msgId); 142 | } 143 | 144 | @Override 145 | public void fail(Object msgId) { 146 | if (msgId instanceof Long) consumer.fail((Long) msgId); 147 | } 148 | 149 | @Override 150 | public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) { 151 | if(streamId == null){ 152 | outputFieldsDeclarer.declare(scheme.getOutputFields()); 153 | }else{ 154 | outputFieldsDeclarer.declareStream(streamId, scheme.getOutputFields()); 155 | } 156 | } 157 | 158 | @Override 159 | public void deactivate() 160 | { 161 | super.deactivate(); 162 | active = false; 163 | } 164 | 165 | @Override 166 | public void activate() 167 | { 168 | super.activate(); 169 | active = true; 170 | } 171 | 172 | protected long getDeliveryTag(Message message) { 173 | return ((Message.DeliveredMessage) message).getDeliveryTag(); 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/RedeliveryStreamSeparator.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import java.util.Arrays; 4 | import java.util.Collections; 5 | import java.util.List; 6 | 7 | /** 8 | * Separates initial (first time) deliveries off of a RabbitMQ queue from redeliveries (messages that are being 9 | * processed after initial processing failed for some reason). 10 | * 11 | * @author peter@latent.io 12 | */ 13 | public class RedeliveryStreamSeparator implements MultiStreamSplitter { 14 | public static final String INITIAL_DELIVERY_STREAM = "initial_delivery"; 15 | public static final String REDELIVERY_STREAM = "redelivery"; 16 | 17 | private static final List streams = Collections.unmodifiableList(Arrays.asList(INITIAL_DELIVERY_STREAM, 18 | REDELIVERY_STREAM)); 19 | 20 | @Override 21 | public List streamNames() { 22 | return streams; 23 | } 24 | 25 | @Override 26 | public String selectStream(List tuple, 27 | Message message) { 28 | Message.DeliveredMessage deliveredMessage = (Message.DeliveredMessage) message; 29 | return deliveredMessage.isRedelivery() ? REDELIVERY_STREAM : INITIAL_DELIVERY_STREAM; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/TupleToMessage.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import org.apache.storm.tuple.Tuple; 4 | 5 | import java.io.Serializable; 6 | import java.util.HashMap; 7 | import java.util.Map; 8 | 9 | /** 10 | * This interface describes an object that will perform the work of mapping 11 | * incoming {@link Tuple}s to {@link Message} objects for posting on a RabbitMQ 12 | * exchange. 13 | * 14 | */ 15 | public abstract class TupleToMessage implements Serializable { 16 | protected void prepare(@SuppressWarnings("rawtypes") Map stormConfig) {} 17 | 18 | /** 19 | * Convert the incoming {@link Tuple} on the Storm stream to a {@link Message} 20 | * for posting to RabbitMQ. 21 | * 22 | * @param input 23 | * The incoming {@link Tuple} from Storm 24 | * @return The {@link Message} for the {@link RabbitMQProducer} to publish. If 25 | * transformation fails this should return Message.NONE. 26 | */ 27 | protected Message produceMessage(Tuple input) { 28 | return Message.forSending( 29 | extractBody(input), 30 | specifyHeaders(input), 31 | determineExchangeName(input), 32 | determineRoutingKey(input), 33 | specifyContentType(input), 34 | specifyContentEncoding(input), 35 | specifyMessagePersistence(input) 36 | ); 37 | } 38 | 39 | /** 40 | * Extract message body as a byte array from the incoming tuple. This is required. 41 | * This implementation must handle errors and should return null upon on unresolvable 42 | * errors. 43 | * 44 | * @param input the incoming tuple 45 | * @return message body as a byte array or null if extraction cannot be performed 46 | */ 47 | protected abstract byte[] extractBody(Tuple input); 48 | 49 | /** 50 | * Determine the exchange where the message is published to. This can be 51 | * derived based on the incoming tuple or a fixed value. 52 | * 53 | * @param input the incoming tuple 54 | * @return the exchange where the message is published to. 55 | */ 56 | protected abstract String determineExchangeName(Tuple input); 57 | 58 | /** 59 | * Determine the routing key used for this message. This can be derived based on 60 | * the incoming tuple or a fixed value. Default implementation provides no 61 | * routing key. 62 | * 63 | * @param input the incoming tuple 64 | * @return the routing key for this message 65 | */ 66 | protected String determineRoutingKey(Tuple input) { 67 | return ""; // rabbitmq java client library treats "" as no routing key 68 | } 69 | 70 | /** 71 | * Specify the headers to be sent along with this message. The default implementation 72 | * return an empty map. 73 | * 74 | * @param input the incoming tuple 75 | * @return the headers as a map 76 | */ 77 | protected Map specifyHeaders(Tuple input) 78 | { 79 | return new HashMap(); 80 | } 81 | 82 | /** 83 | * Specify message body content type. Default implementation skips the provision 84 | * of this detail. 85 | * 86 | * @param input the incoming tuple 87 | * @return content type 88 | */ 89 | protected String specifyContentType(Tuple input) { 90 | return null; 91 | } 92 | 93 | /** 94 | * Specify message body content encoding. Default implementation skips the provision 95 | * of this detail. 96 | * 97 | * @param input the incoming tuple 98 | * @return content encoding 99 | */ 100 | protected String specifyContentEncoding(Tuple input) { 101 | return null; 102 | } 103 | 104 | /** 105 | * Specify whether each individual message should make use of message persistence 106 | * when it's on a rabbitmq queue. This does imply queue durability or high availability 107 | * or just avoidance of message loss. To accomplish that please read rabbitmq docs 108 | * on High Availability, Publisher Confirms and Queue Durability in addition to 109 | * having this return true. By default, message persistence returns false. 110 | * 111 | * @param input the incoming tuple 112 | * @return whether the message should be persistent to disk or not. Defaults to not. 113 | */ 114 | protected boolean specifyMessagePersistence(Tuple input) { 115 | return false; 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/TupleToMessageNonDynamic.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import io.latent.storm.rabbitmq.config.ProducerConfig; 4 | import org.apache.storm.tuple.Tuple; 5 | 6 | import java.util.Map; 7 | 8 | public abstract class TupleToMessageNonDynamic extends TupleToMessage 9 | { 10 | private String exchangeName; 11 | private String routingKey; 12 | private String contentType; 13 | private String contentEncoding; 14 | private boolean persistent; 15 | 16 | @Override 17 | protected void prepare(@SuppressWarnings("rawtypes") Map stormConfig) 18 | { 19 | ProducerConfig producerConfig = ProducerConfig.getFromStormConfig(stormConfig); 20 | exchangeName = producerConfig.getExchangeName(); 21 | routingKey = producerConfig.getRoutingKey(); 22 | contentType = producerConfig.getContentType(); 23 | contentEncoding = producerConfig.getContentEncoding(); 24 | persistent = producerConfig.isPersistent(); 25 | } 26 | 27 | @Override 28 | protected String determineExchangeName(Tuple input) 29 | { 30 | return exchangeName; 31 | } 32 | 33 | @Override 34 | protected String determineRoutingKey(Tuple input) 35 | { 36 | return routingKey; 37 | } 38 | 39 | @Override 40 | protected String specifyContentType(Tuple input) 41 | { 42 | return contentType; 43 | } 44 | 45 | @Override 46 | protected String specifyContentEncoding(Tuple input) 47 | { 48 | return contentEncoding; 49 | } 50 | 51 | @Override 52 | protected boolean specifyMessagePersistence(Tuple input) 53 | { 54 | return persistent; 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/UnanchoredConsumer.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import io.latent.storm.rabbitmq.config.ConnectionConfig; 4 | 5 | public class UnanchoredConsumer extends RabbitMQConsumer { 6 | public UnanchoredConsumer(ConnectionConfig connectionConfig, 7 | int prefetchCount, 8 | String queueName, 9 | boolean requeueOnFail, 10 | Declarator declarator, 11 | ErrorReporter errorReporter) { 12 | super(connectionConfig, prefetchCount, queueName, requeueOnFail, declarator, errorReporter); 13 | } 14 | 15 | @Override 16 | public void ack(Long msgId) { /* no op */ } 17 | 18 | @Override 19 | public void fail(Long msgId) { /* no op */ } 20 | 21 | @Override 22 | public void failWithRedelivery(Long msgId) { /* no op */ } 23 | 24 | @Override 25 | public void deadLetter(Long msgId) { /* no op */ } 26 | 27 | @Override 28 | protected boolean isAutoAcking() { return true; } 29 | } 30 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/UnanchoredRabbitMQSpout.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq; 2 | 3 | import io.latent.storm.rabbitmq.config.ConsumerConfig; 4 | import org.apache.storm.spout.Scheme; 5 | import org.apache.storm.spout.SpoutOutputCollector; 6 | 7 | import java.util.List; 8 | 9 | /** 10 | * A RabbitMQ spout that emits an unanchored tuple stream on default stream. Should be used when Storm's guaranteed message 11 | * processing is not needed. Messages will be removed from RabbitMQ queue as soon as it's delivered to storm and will not be 12 | * retried on any errors during processing. 13 | * 14 | * @author peter@latent.io 15 | */ 16 | public class UnanchoredRabbitMQSpout extends RabbitMQSpout { 17 | public UnanchoredRabbitMQSpout(Scheme scheme) { 18 | super(scheme); 19 | } 20 | 21 | public UnanchoredRabbitMQSpout(Scheme scheme, 22 | Declarator declarator) { 23 | super(scheme, declarator); 24 | } 25 | 26 | @Override 27 | protected RabbitMQConsumer loadConsumer(Declarator declarator, 28 | ErrorReporter reporter, 29 | ConsumerConfig config) { 30 | return new UnanchoredConsumer(config.getConnectionConfig(), 31 | config.getPrefetchCount(), 32 | config.getQueueName(), 33 | config.isRequeueOnFail(), 34 | declarator, 35 | reporter); 36 | } 37 | 38 | @Override 39 | public void ack(Object msgId) { /* no op */ } 40 | 41 | @Override 42 | public void fail(Object msgId) { /* no op */ } 43 | 44 | @Override 45 | protected List emit(List tuple, 46 | Message message, 47 | SpoutOutputCollector spoutOutputCollector) { 48 | // don't anchor with msgId 49 | return spoutOutputCollector.emit(tuple); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/config/ConfigAvailableHosts.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq.config; 2 | 3 | import java.io.Serializable; 4 | import java.util.Map; 5 | import java.util.Map.Entry; 6 | import java.util.TreeMap; 7 | 8 | import com.rabbitmq.client.Address; 9 | 10 | /** 11 | * Simple configuration class to allow users to specify a set hosts to connect 12 | * to for high availability purposes. 13 | * 14 | */ 15 | public class ConfigAvailableHosts implements Serializable { 16 | 17 | private static final long serialVersionUID = -7444594758428554141L; 18 | 19 | private static final String HOST_DELIMINITER = "|"; 20 | 21 | /** 22 | * {@link TreeMap} of host configurations (order may matter...). 23 | */ 24 | private final Map hostsMap = new TreeMap(); 25 | 26 | /** 27 | * 28 | * @return The map of hostname to port we'll use to connect to 29 | */ 30 | public Map getHostsMap() { 31 | return hostsMap; 32 | } 33 | 34 | /** 35 | * 36 | * @return Whether or not there are any configured high availability hosts 37 | * in this config 38 | */ 39 | public boolean isEmpty() { 40 | return hostsMap.isEmpty(); 41 | } 42 | 43 | /** 44 | * 45 | * @return The {@link Map} of RabbitMQ hosts, converted to the necessary 46 | * {@link Address} array 47 | */ 48 | public Address[] toAddresses() { 49 | final Address[] addresses = new Address[hostsMap.size()]; 50 | int i = 0; 51 | for (final Entry entry : hostsMap.entrySet()) { 52 | if (entry.getKey() != null) { 53 | addresses[i++] = entry.getValue() == null ? new Address(entry.getKey()) : new Address(entry.getKey(), entry.getValue()); 54 | } 55 | } 56 | return addresses; 57 | } 58 | 59 | @Override 60 | public String toString() { 61 | final StringBuilder builder = new StringBuilder(); 62 | boolean first = true; 63 | for (final String host : hostsMap.keySet()) { 64 | final Integer port = hostsMap.get(host); 65 | if (!first) { 66 | builder.append(HOST_DELIMINITER); 67 | } else { 68 | first = false; 69 | } 70 | builder.append(host); 71 | builder.append(port != null ? ":" + port : ""); 72 | } 73 | return builder.toString(); 74 | } 75 | 76 | public static ConfigAvailableHosts fromString(final String serialzed) { 77 | final ConfigAvailableHosts value = new ConfigAvailableHosts(); 78 | final String[] hosts = serialzed.split("[" + HOST_DELIMINITER + "]"); 79 | for (final String host : hosts) { 80 | if (!host.isEmpty()) { 81 | String[] brokenUp = host.split(":"); 82 | value.getHostsMap().put(brokenUp[0], brokenUp.length == 2 ? Integer.parseInt(brokenUp[1]) : null); 83 | } 84 | } 85 | return value; 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/config/ConfigUtils.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq.config; 2 | 3 | import java.util.Map; 4 | 5 | public class ConfigUtils 6 | { 7 | public static String getFromMap(String key, Map map) { 8 | return map.get(key).toString(); 9 | } 10 | 11 | public static String getFromMap(String key, Map map, String defaultValue) { 12 | Object value = map.get(key); 13 | if (value==null) return defaultValue; 14 | return map.get(key).toString(); 15 | } 16 | 17 | public static int getFromMapAsInt(String key, Map map) { 18 | return Integer.valueOf(map.get(key).toString()); 19 | } 20 | 21 | public static int getFromMapAsInt(String key, Map map, int defaultValue) { 22 | Object value = map.get(key); 23 | if (value==null) return defaultValue; 24 | return Integer.valueOf(map.get(key).toString()); 25 | } 26 | 27 | public static boolean getFromMapAsBoolean(String key, Map map) { 28 | return Boolean.valueOf(map.get(key).toString()); 29 | } 30 | 31 | public static boolean getFromMapAsBoolean(String key, Map map, boolean defaultValue) { 32 | Object value = map.get(key); 33 | if (value==null) return defaultValue; 34 | return Boolean.valueOf(map.get(key).toString()); 35 | } 36 | 37 | public static void addToMap(String key, Map map, String value) { 38 | map.put(key, value); 39 | } 40 | 41 | public static void addToMap(String key, Map map, int value) { 42 | map.put(key, Integer.toString(value)); 43 | } 44 | 45 | public static void addToMap(String key, Map map, boolean value) { 46 | map.put(key, Boolean.toString(value)); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/config/ConnectionConfig.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq.config; 2 | 3 | import static io.latent.storm.rabbitmq.config.ConfigUtils.addToMap; 4 | import static io.latent.storm.rabbitmq.config.ConfigUtils.getFromMap; 5 | import static io.latent.storm.rabbitmq.config.ConfigUtils.getFromMapAsBoolean; 6 | import static io.latent.storm.rabbitmq.config.ConfigUtils.getFromMapAsInt; 7 | 8 | import java.io.Serializable; 9 | import java.net.URISyntaxException; 10 | import java.security.KeyManagementException; 11 | import java.security.NoSuchAlgorithmException; 12 | import java.util.HashMap; 13 | import java.util.Map; 14 | 15 | import com.rabbitmq.client.ConnectionFactory; 16 | 17 | public class ConnectionConfig implements Serializable { 18 | 19 | /** 20 | * Serial version UID. 21 | */ 22 | private static final long serialVersionUID = 1L; 23 | 24 | // Use named parameters 25 | private String host; 26 | private int port; 27 | private String username; 28 | private String password; 29 | private String virtualHost; 30 | private int heartBeat; 31 | private boolean ssl; 32 | // Backup hosts to try and connect to. 33 | private ConfigAvailableHosts highAvailabilityHosts = new ConfigAvailableHosts(); 34 | 35 | // Use AMQP URI http://www.rabbitmq.com/uri-spec.html 36 | private String uri; 37 | 38 | public static ConnectionConfig forTest() { 39 | return new ConnectionConfig(ConnectionFactory.DEFAULT_HOST, ConnectionFactory.DEFAULT_USER, ConnectionFactory.DEFAULT_PASS); 40 | } 41 | 42 | public ConnectionConfig(String uri) { 43 | this.uri = uri; 44 | } 45 | 46 | public ConnectionConfig(String host, 47 | String username, 48 | String password) { 49 | this(host, ConnectionFactory.DEFAULT_AMQP_PORT, username, password, ConnectionFactory.DEFAULT_VHOST, 10, false); 50 | } 51 | 52 | public ConnectionConfig(String host, 53 | String username, 54 | String password, boolean ssl) { 55 | this(host, ConnectionFactory.DEFAULT_AMQP_PORT, username, password, ConnectionFactory.DEFAULT_VHOST, 10, ssl); 56 | } 57 | 58 | public ConnectionConfig(String host, 59 | int port, 60 | String username, 61 | String password, 62 | String virtualHost, 63 | int heartBeat) { 64 | this(host,port,username,password,virtualHost,heartBeat,false); 65 | } 66 | 67 | public ConnectionConfig(String host, int port, String username, String password, String virtualHost, int heartBeat, boolean ssl) { 68 | this(new ConfigAvailableHosts(), host, port, username, password, virtualHost, heartBeat, ssl); 69 | } 70 | 71 | /** 72 | * Use this constructor if you wish to specify a set of 73 | * hosts to connect to in the event that you need a high 74 | * availability RabbitMQ connection. 75 | * 76 | * @param hosts The {@link ConfigAvailableHosts} that will give you the ability to specify a set of hosts 77 | * @param username 78 | * @param password 79 | * @param virtualHost 80 | */ 81 | public ConnectionConfig(final ConfigAvailableHosts hosts, String host, int port, String username, String password, String virtualHost, int heartBeat, final boolean ssl) { 82 | this.host = host; 83 | this.port = port; 84 | this.username = username; 85 | this.password = password; 86 | this.virtualHost = virtualHost; 87 | this.heartBeat = heartBeat; 88 | this.ssl = ssl; 89 | this.highAvailabilityHosts = hosts; 90 | } 91 | 92 | public ConfigAvailableHosts getHighAvailabilityHosts() { 93 | return highAvailabilityHosts; 94 | } 95 | 96 | /** 97 | * Set this value if you want to use a set of high availability hosts 98 | * in addition to the specified primary host you want to connect to, 99 | * and didn't use the full constructor. 100 | * 101 | * @param highAvailabilityHosts The host configuration for using backup hosts 102 | */ 103 | public void setHighAvailabilityHosts(ConfigAvailableHosts highAvailabilityHosts) { 104 | this.highAvailabilityHosts = highAvailabilityHosts; 105 | } 106 | 107 | public String getHost() { 108 | return host; 109 | } 110 | 111 | public int getPort() { 112 | return port; 113 | } 114 | 115 | public String getUsername() { 116 | return username; 117 | } 118 | 119 | public String getPassword() { 120 | return password; 121 | } 122 | 123 | public String getVirtualHost() { 124 | return virtualHost; 125 | } 126 | 127 | public int getHeartBeat() { 128 | return heartBeat; 129 | } 130 | 131 | public String getUri() { 132 | return uri; 133 | } 134 | 135 | boolean isSsl(){ 136 | return this.ssl; 137 | } 138 | 139 | public ConnectionFactory asConnectionFactory() { 140 | ConnectionFactory factory = new ConnectionFactory(); 141 | if (uri != null) { 142 | try { 143 | factory.setUri(uri); 144 | } catch (URISyntaxException e) { 145 | throw new RuntimeException(e); 146 | } catch (NoSuchAlgorithmException e) { 147 | throw new RuntimeException(e); 148 | } catch (KeyManagementException e) { 149 | throw new RuntimeException(e); 150 | } 151 | } else { 152 | factory.setHost(host); 153 | factory.setPort(port); 154 | factory.setUsername(username); 155 | factory.setPassword(password); 156 | factory.setVirtualHost(virtualHost); 157 | factory.setRequestedHeartbeat(heartBeat); 158 | if(ssl){ 159 | try { 160 | factory.useSslProtocol(); 161 | } catch (KeyManagementException e) { 162 | throw new RuntimeException(e); 163 | } catch (NoSuchAlgorithmException e) { 164 | throw new RuntimeException(e); 165 | } 166 | } 167 | } 168 | return factory; 169 | } 170 | 171 | public static ConnectionConfig getFromStormConfig(Map stormConfig) { 172 | if (stormConfig.containsKey("rabbitmq.uri")) { 173 | return new ConnectionConfig(getFromMap("rabbitmq.uri", stormConfig)); 174 | } else { 175 | String highAvailabilityHostsString = getFromMap("rabbitmq.ha.hosts", stormConfig); 176 | if(highAvailabilityHostsString != null){ 177 | final ConfigAvailableHosts haHosts = ConfigAvailableHosts.fromString(highAvailabilityHostsString); 178 | return new ConnectionConfig(haHosts, 179 | getFromMap("rabbitmq.host", stormConfig, ConnectionFactory.DEFAULT_HOST), 180 | getFromMapAsInt("rabbitmq.port", stormConfig, ConnectionFactory.DEFAULT_AMQP_PORT), 181 | getFromMap("rabbitmq.username", stormConfig, ConnectionFactory.DEFAULT_USER), 182 | getFromMap("rabbitmq.password", stormConfig, ConnectionFactory.DEFAULT_PASS), 183 | getFromMap("rabbitmq.virtualhost", stormConfig, ConnectionFactory.DEFAULT_VHOST), 184 | getFromMapAsInt("rabbitmq.heartbeat", stormConfig, ConnectionFactory.DEFAULT_HEARTBEAT), 185 | getFromMapAsBoolean("rabbitmq.ssl", stormConfig, false)); 186 | }else{ 187 | return new ConnectionConfig(getFromMap("rabbitmq.host", stormConfig, ConnectionFactory.DEFAULT_HOST), 188 | getFromMapAsInt("rabbitmq.port", stormConfig, ConnectionFactory.DEFAULT_AMQP_PORT), 189 | getFromMap("rabbitmq.username", stormConfig, ConnectionFactory.DEFAULT_USER), 190 | getFromMap("rabbitmq.password", stormConfig, ConnectionFactory.DEFAULT_PASS), 191 | getFromMap("rabbitmq.virtualhost", stormConfig, ConnectionFactory.DEFAULT_VHOST), 192 | getFromMapAsInt("rabbitmq.heartbeat", stormConfig, ConnectionFactory.DEFAULT_HEARTBEAT), 193 | getFromMapAsBoolean("rabbitmq.ssl", stormConfig, false)); 194 | } 195 | } 196 | } 197 | 198 | public Map asMap() { 199 | Map map = new HashMap(); 200 | if (uri != null) { 201 | addToMap("rabbitmq.uri", map, uri); 202 | } else { 203 | addToMap("rabbitmq.host", map, host); 204 | addToMap("rabbitmq.port", map, port); 205 | addToMap("rabbitmq.username", map, username); 206 | addToMap("rabbitmq.password", map, password); 207 | addToMap("rabbitmq.virtualhost", map, virtualHost); 208 | addToMap("rabbitmq.heartbeat", map, heartBeat); 209 | addToMap("rabbitmq.ssl", map, ssl); 210 | addToMap("rabbitmq.ha.hosts", map, highAvailabilityHosts.toString()); 211 | } 212 | return map; 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/config/ConsumerConfig.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq.config; 2 | 3 | import java.io.Serializable; 4 | import java.util.HashMap; 5 | import java.util.Map; 6 | 7 | import static io.latent.storm.rabbitmq.config.ConfigUtils.*; 8 | 9 | public class ConsumerConfig implements Serializable { 10 | private final ConnectionConfig connectionConfig; 11 | private final int prefetchCount; 12 | private final String queueName; 13 | private final boolean requeueOnFail; 14 | 15 | public ConsumerConfig(ConnectionConfig connectionConfig, 16 | int prefetchCount, 17 | String queueName, 18 | boolean requeueOnFail) { 19 | if (connectionConfig == null || prefetchCount < 1) { 20 | throw new IllegalArgumentException("Invalid configuration"); 21 | } 22 | 23 | this.connectionConfig = connectionConfig; 24 | this.prefetchCount = prefetchCount; 25 | this.queueName = queueName; 26 | this.requeueOnFail = requeueOnFail; 27 | } 28 | 29 | public ConnectionConfig getConnectionConfig() { 30 | return connectionConfig; 31 | } 32 | 33 | public int getPrefetchCount() { 34 | return prefetchCount; 35 | } 36 | 37 | public String getQueueName() { 38 | return queueName; 39 | } 40 | 41 | public boolean isRequeueOnFail() { 42 | return requeueOnFail; 43 | } 44 | 45 | public static ConsumerConfig getFromStormConfig(Map stormConfig) { 46 | ConnectionConfig connectionConfig = ConnectionConfig.getFromStormConfig(stormConfig); 47 | return new ConsumerConfig(connectionConfig, 48 | getFromMapAsInt("rabbitmq.prefetchCount", stormConfig), 49 | getFromMap("rabbitmq.queueName", stormConfig), 50 | getFromMapAsBoolean("rabbitmq.requeueOnFail", stormConfig)); 51 | } 52 | 53 | public Map asMap() { 54 | Map map = new HashMap(); 55 | map.putAll(connectionConfig.asMap()); 56 | addToMap("rabbitmq.prefetchCount", map, prefetchCount); 57 | addToMap("rabbitmq.queueName", map, queueName); 58 | addToMap("rabbitmq.requeueOnFail", map, requeueOnFail); 59 | return map; 60 | } 61 | } 62 | 63 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/config/ConsumerConfigBuilder.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq.config; 2 | 3 | public final class ConsumerConfigBuilder 4 | { 5 | private ConnectionConfig connectionConfig; 6 | private int prefetchCount; 7 | private String queueName; 8 | private boolean requeueOnFail; 9 | 10 | public ConsumerConfigBuilder() 11 | { 12 | } 13 | 14 | public ConsumerConfigBuilder connection(ConnectionConfig connection) { 15 | this.connectionConfig = connection; 16 | return this; 17 | } 18 | 19 | public ConsumerConfigBuilder prefetch(int prefetch) { 20 | this.prefetchCount = prefetch; 21 | return this; 22 | } 23 | 24 | public ConsumerConfigBuilder queue(String queue) { 25 | this.queueName = queue; 26 | return this; 27 | } 28 | 29 | public ConsumerConfigBuilder requeueOnFail() { 30 | this.requeueOnFail = true; 31 | return this; 32 | } 33 | 34 | public ConsumerConfig build() { 35 | return new ConsumerConfig(connectionConfig, prefetchCount, queueName, requeueOnFail); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/config/ProducerConfig.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq.config; 2 | 3 | import java.io.Serializable; 4 | import java.util.HashMap; 5 | import java.util.Map; 6 | 7 | import static io.latent.storm.rabbitmq.config.ConfigUtils.*; 8 | 9 | public class ProducerConfig implements Serializable 10 | { 11 | private final ConnectionConfig connectionConfig; 12 | private final String exchangeName; 13 | private final String routingKey; 14 | private final String contentType; 15 | private final String contentEncoding; 16 | private final boolean persistent; 17 | 18 | public ProducerConfig(ConnectionConfig connectionConfig, 19 | String exchangeName, 20 | String routingKey, 21 | String contentType, 22 | String contentEncoding, 23 | boolean persistent) 24 | { 25 | this.connectionConfig = connectionConfig; 26 | this.exchangeName = exchangeName; 27 | this.routingKey = routingKey; 28 | this.contentType = contentType; 29 | this.contentEncoding = contentEncoding; 30 | this.persistent = persistent; 31 | } 32 | 33 | public ConnectionConfig getConnectionConfig() 34 | { 35 | return connectionConfig; 36 | } 37 | 38 | public String getExchangeName() 39 | { 40 | return exchangeName; 41 | } 42 | 43 | public String getRoutingKey() 44 | { 45 | return routingKey; 46 | } 47 | 48 | public String getContentType() 49 | { 50 | return contentType; 51 | } 52 | 53 | public String getContentEncoding() 54 | { 55 | return contentEncoding; 56 | } 57 | 58 | public boolean isPersistent() 59 | { 60 | return persistent; 61 | } 62 | 63 | public static ProducerConfig getFromStormConfig(Map stormConfig) { 64 | ConnectionConfig connectionConfig = ConnectionConfig.getFromStormConfig(stormConfig); 65 | return new ProducerConfig(connectionConfig, 66 | getFromMap("rabbitmq.exchangeName", stormConfig), 67 | getFromMap("rabbitmq.routingKey", stormConfig), 68 | getFromMap("rabbitmq.contentType", stormConfig), 69 | getFromMap("rabbitmq.contentEncoding", stormConfig), 70 | getFromMapAsBoolean("rabbitmq.persistent", stormConfig)); 71 | } 72 | 73 | public Map asMap() { 74 | Map map = new HashMap(); 75 | map.putAll(connectionConfig.asMap()); 76 | addToMap("rabbitmq.exchangeName", map, exchangeName); 77 | addToMap("rabbitmq.routingKey", map, routingKey); 78 | addToMap("rabbitmq.contentType", map, contentType); 79 | addToMap("rabbitmq.contentEncoding", map, contentEncoding); 80 | addToMap("rabbitmq.persistent", map, persistent); 81 | return map; 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /src/main/java/io/latent/storm/rabbitmq/config/ProducerConfigBuilder.java: -------------------------------------------------------------------------------- 1 | package io.latent.storm.rabbitmq.config; 2 | 3 | public class ProducerConfigBuilder 4 | { 5 | private ConnectionConfig connectionConfig; 6 | private String exchangeName; 7 | private String routingKey; 8 | private String contentType; 9 | private String contentEncoding; 10 | private boolean persistent = false; 11 | 12 | public ProducerConfigBuilder() 13 | { 14 | } 15 | 16 | public ProducerConfigBuilder connection(ConnectionConfig connection) { 17 | this.connectionConfig = connection; 18 | return this; 19 | } 20 | 21 | public ProducerConfigBuilder exchange(String exchange) { 22 | this.exchangeName = exchange; 23 | return this; 24 | } 25 | 26 | public ProducerConfigBuilder routingKey(String routingKey) { 27 | this.routingKey = routingKey; 28 | return this; 29 | } 30 | 31 | public ProducerConfigBuilder contentType(String contentType) { 32 | this.contentType = contentType; 33 | return this; 34 | } 35 | 36 | public ProducerConfigBuilder contentEncoding(String contentEncoding) { 37 | this.contentEncoding = contentEncoding; 38 | return this; 39 | } 40 | 41 | public ProducerConfigBuilder persistent() { 42 | this.persistent = true; 43 | return this; 44 | } 45 | 46 | public ProducerConfig build() 47 | { 48 | return new ProducerConfig(connectionConfig, exchangeName, routingKey, contentType, contentEncoding, persistent); 49 | } 50 | } 51 | --------------------------------------------------------------------------------