├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── libsnappyjava.jnilib ├── pom.xml └── src ├── main └── java │ └── com │ └── hmsonline │ └── trident │ └── cql │ ├── CassandraCqlMapState.java │ ├── CassandraCqlMapStateFactory.java │ ├── CassandraCqlState.java │ ├── CassandraCqlStateFactory.java │ ├── CassandraCqlStateUpdater.java │ ├── ConstructorConfiguredCqlClientFactory.java │ ├── CqlClientFactory.java │ ├── MapConfiguredCqlClientFactory.java │ ├── incremental │ ├── CassandraCqlIncrementalState.java │ ├── CassandraCqlIncrementalStateException.java │ ├── CassandraCqlIncrementalStateFactory.java │ ├── CassandraCqlIncrementalStateUpdater.java │ ├── CqlIncrementMapper.java │ └── PersistedState.java │ └── mappers │ ├── CqlRowMapper.java │ └── CqlTupleMapper.java └── test ├── java └── com │ └── hmsonline │ └── trident │ └── cql │ ├── CassandraCqlStateUpdaterTest.java │ ├── ConditionalUpdateTest.java │ ├── ConstructorConfiguredCqlClientFactoryTest.java │ ├── CqlClientFactoryTestConstants.java │ ├── CqlTestEnvironment.java │ ├── CqlUnitClientFactory.java │ ├── MapConfiguredCqlClientFactoryTest.java │ ├── example │ ├── sales │ │ ├── SalesEmitter.java │ │ ├── SalesMapper.java │ │ ├── SalesSpout.java │ │ ├── SalesState.java │ │ └── SalesTopology.java │ ├── simpleupdate │ │ ├── DefaultCoordinator.java │ │ ├── SimpleUpdateEmitter.java │ │ ├── SimpleUpdateMapper.java │ │ ├── SimpleUpdateSpout.java │ │ └── SimpleUpdateTopology.java │ └── wordcount │ │ ├── IntegerCount.java │ │ ├── WordCountAndSourceMapper.java │ │ └── WordCountTopology.java │ └── incremental │ └── IncrementalStateTest.java └── resources ├── create_keyspace.cql ├── log4j.properties └── schema.cql /.gitignore: -------------------------------------------------------------------------------- 1 | *.class 2 | 3 | # Package Files # 4 | *.jar 5 | *.war 6 | *.ear 7 | /target 8 | 9 | .classpath 10 | .project 11 | .settings/ 12 | 13 | # IDE Files # 14 | *.iml 15 | .idea 16 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 0.3.5 2 | 3 | * [#62][]: Upgrade cassandra driver to 3.0.0 4 | 5 | ## 0.3.4 6 | 7 | * [#60][]: Bump cassandra-driver-core dependency to 2.1.9 8 | 9 | [#60]: https://github.com/hmsonline/storm-cassandra-cql/issues/60 10 | 11 | ## 0.3.3 12 | 13 | * [#55][]: Add support for QueryLogger into MapConfiguredCqlClientFactory 14 | 15 | [#55]: https://github.com/hmsonline/storm-cassandra-cql/issues/55 16 | 17 | ## 0.3.2 18 | 19 | * [#54][]: Reduce synchronization on CqlClientFactory 20 | 21 | [#54]: https://github.com/hmsonline/storm-cassandra-cql/issues/54 22 | 23 | ## 0.3.1 24 | 25 | * [#51][]: Bump cassandra-driver-core dependency to 2.1.6 26 | 27 | [#51]: https://github.com/hmsonline/storm-cassandra-cql/issues/51 28 | 29 | ## 0.3.0 30 | 31 | * [#31][]: Added ability to configure read timeouts, and various other parameters on the CQL cluster/session. 32 | * [#47][]: Added LZ4 dependency, so it gets bundled in. 33 | * [#37][]: Fixed leaking statements inside State object. 34 | * [#36][]: Fixed issue with incrmental state only commiting one aggregate value. 35 | * Added cassandra-unit to the test suite so we could un-Ignore tests. 36 | * Repackaged tests and added documentation to make them more understantable. 37 | 38 | [#31]: https://github.com/hmsonline/storm-cassandra-cql/issues/31 39 | [#36]: https://github.com/hmsonline/storm-cassandra-cql/issues/36 40 | [#37]: https://github.com/hmsonline/storm-cassandra-cql/issues/37 41 | [#47]: https://github.com/hmsonline/storm-cassandra-cql/issues/47 42 | 43 | ## 0.2.4 44 | 45 | * Bump cassandra-driver-core dependency to 2.1.4 46 | 47 | ## 0.2.1 48 | 49 | * [#27][]: Use QUORUM by default instead of LOCAL_QUORUM 50 | 51 | ## 0.2.0 52 | 53 | * [#22][] / [#26][]: Explicit consistency levels for batches, cluster, and conditional updates 54 | 55 | [#22]: https://github.com/hmsonline/storm-cassandra-cql/issues/22 56 | [#26]: https://github.com/hmsonline/storm-cassandra-cql/issues/26 57 | [#27]: https://github.com/hmsonline/storm-cassandra-cql/issues/27 58 | 59 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2014 Lexis Nexis / Reed Elsevier 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Description / Rationale 2 | =================== 3 | 4 | This is a new CassandraState implementation built on the CQL java driver. For Cassandra, CQL has better support for lightweight transacations, batching, and collections. Also, CQL will likely get more attention than the legacy Thrift interface. For these reasons, we decided to create a C* state implementation built on CQL. 5 | 6 | Storm-Cassandra-Cql provides three different state implementations: 7 | * CassandraCqlState : Simply maps tuples to statements with batching capabilities. 8 | * CassandraCqlMapState : Provides an IBackingMap implementation for use with keys/values and aggregations in Storm. 9 | * CassandraCqlIncrementalState : Leverages conditional updates to perform dimensional aggregations on data incrementally. (each batch constitutes an increment) 10 | 11 | Design 12 | =================== 13 | An application/topology provides implementations of the mapper interfaces. 14 | For example, the [CqlRowMapper](https://github.com/hmsonline/storm-cassandra-cql/blob/master/src/main/java/com/hmsonline/trident/cql/mappers/CqlRowMapper.java) provides a bidirectional mapping from Keys and Values to statements that can be used to upsert and retrieve data. 15 | 16 | The mappers are used to translate between Storm constructs and CQL constructs. Storm uses the state factories to create state objects. Updaters then use the mappers to update the state objects. State is then committed on a per batch basis. 17 | 18 | Getting Started 19 | =================== 20 | You can use the examples to get started. For the example, you'll want to run a local cassandra instance with the example schema found in 21 | [schema.cql](https://github.com/hmsonline/storm-cassandra-cql/blob/master/src/test/resources/schema.cql). 22 | 23 | You can do this using cqlsh: 24 | 25 | ``` 26 | cat storm-cassandra-cql/src/test/resources/create_keyspace.cql | cqlsh 27 | cat storm-cassandra-cql/src/test/resources/schema.cql | cqlsh 28 | ``` 29 | 30 | 31 | ## SimpleUpdateTopology (CassandraCqlState) 32 | 33 | The SimpleUpdateTopology simply emits integers (0-99) and writes those to Cassandra with the current timestamp % 10. The values are written to the table: mytable. 34 | 35 | This is persistence from the topology: 36 | ```java 37 | inputStream.partitionPersist(new CassandraCqlStateFactory(ConsistencyLevel.ONE), new Fields("test"), new CassandraCqlStateUpdater(mapper)); 38 | ``` 39 | 40 | During a partition persist, Storm repeatedly calls `updateState()` on the CassandraCqlStateUpdater to update a state object for the batch. The updater uses the mapper to convert the tuple into a CQL statement, and caches the CQL statement in a CassandraCqlState object. When the batch is complete, Storm calls commit on the state object, which then executes all of the CQL statements as a batch. 41 | 42 | See: 43 | * [CassandraCqlStateUpdater.updateState](https://github.com/hmsonline/storm-cassandra-cql/blob/master/src/main/java/com/hmsonline/trident/cql/CassandraCqlStateUpdater.java#L37-L41) 44 | * [CassandraCqlState.commit](https://github.com/hmsonline/storm-cassandra-cql/blob/master/src/main/java/com/hmsonline/trident/cql/CassandraCqlState.java#L39-L56) 45 | 46 | The SimpleUpdateMapper looks like this: 47 | 48 | ```java 49 | public class SimpleUpdateMapper implements CqlRowMapper, Serializable { 50 | public Statement map(TridentTuple tuple) { 51 | long t = System.currentTimeMillis() % 10; 52 | Update statement = update("mykeyspace", "mytable"); 53 | statement.with(set("col1", tuple.getString(0))).where(eq("t", t)); 54 | return statement; 55 | } 56 | ``` 57 | 58 | As you can see, it maps tuples to update statements. 59 | 60 | When you run the `main()` method in the SimpleUpdateTopology, you should get results in mytable that look like this: 61 | 62 | ``` 63 | t | col1 64 | ---+------ 65 | 5 | 97 66 | 1 | 99 67 | 8 | 99 68 | 0 | 99 69 | 2 | 99 70 | 4 | 99 71 | 7 | 99 72 | 6 | 99 73 | 9 | 99 74 | 3 | 99 75 | ``` 76 | 77 | 78 | ## WordCountTopology (CassandraCqlMapState) 79 | The WordCountTopology is slightly more complex in that it uses the CassandraCqlMapState. The map state assumes you are reading/writing keys and values. 80 | The topology emits words from two different sources, and then totals the words by source and persists the count to Cassandra. 81 | 82 | The CassandraCqlMapState object implements the IBackingMap interface in Storm. 83 | See: 84 | [Blog on the use of IBackingMap](https://svendvanderveken.wordpress.com/2013/07/30/scalable-real-time-state-update-with-storm/) 85 | 86 | Have a look at the [WordCountTopology](https://github.com/hmsonline/storm-cassandra-cql/blob/master/src/test/java/com/hmsonline/trident/cql/example/wordcount/WordCountTopology.java). 87 | 88 | It uses a FixedBatchSpout that emits sentences over and over again: 89 | 90 | ```java 91 | FixedBatchSpout spout1 = new FixedBatchSpout(new Fields("sentence"), 3, 92 | new Values("the cow jumped over the moon"), 93 | new Values("the man went to the store and bought some candy"), 94 | new Values("four score and seven years ago"), 95 | new Values("how many apples can you eat")); 96 | spout1.setCycle(true); 97 | ``` 98 | 99 | Then, it splits and groups those words: 100 | 101 | ```java 102 | TridentState wordCounts = 103 | topology.newStream("spout1", spout1) 104 | .each(new Fields("sentence"), new Split(), new Fields("word")) 105 | .groupBy(new Fields("word")) 106 | .persistentAggregate(CassandraCqlMapState.nonTransactional(new WordCountMapper()), 107 | new IntegerCount(), new Fields("count")) 108 | .parallelismHint(6); 109 | ``` 110 | 111 | Instead of a partitionPersist like the other topology, this topology aggregates first, using the persistentAggregate method. This performs an aggregation, storing the results in the CassandraCqlMapState, on which Storm eventually calls multiPut/multiGet to store/read values. 112 | 113 | See: 114 | [CassandraCqlMapState.multiPut/Get](https://github.com/hmsonline/storm-cassandra-cql/blob/master/src/main/java/com/hmsonline/trident/cql/CassandraCqlMapState.java#L122-L187) 115 | 116 | In this case, the mapper maps keys and values to CQL statements: 117 | 118 | ```java 119 | @Override 120 | public Statement map(List keys, Number value) { 121 | Insert statement = QueryBuilder.insertInto(KEYSPACE_NAME, TABLE_NAME); 122 | statement.value(WORD_KEY_NAME, keys.get(0)); 123 | statement.value(SOURCE_KEY_NAME, keys.get(1)); 124 | statement.value(VALUE_NAME, value); 125 | return statement; 126 | } 127 | 128 | @Override 129 | public Statement retrieve(List keys) { 130 | // Retrieve all the columns associated with the keys 131 | Select statement = QueryBuilder.select().column(SOURCE_KEY_NAME) 132 | .column(WORD_KEY_NAME).column(VALUE_NAME) 133 | .from(KEYSPACE_NAME, TABLE_NAME); 134 | statement.where(QueryBuilder.eq(SOURCE_KEY_NAME, keys.get(0))); 135 | statement.where(QueryBuilder.eq(WORD_KEY_NAME, keys.get(1))); 136 | return statement; 137 | } 138 | ``` 139 | 140 | When you run this example, you will find the following counts in Cassandra: 141 | 142 | ``` 143 | cqlsh> select * from mykeyspace.wordcounttable; 144 | 145 | source | word | count 146 | --------+--------+------- 147 | spout2 | a | 73 148 | spout2 | ago | 74 149 | spout2 | and | 74 150 | spout2 | apples | 69 151 | ... 152 | spout1 | some | 74 153 | spout1 | store | 74 154 | spout1 | the | 296 155 | spout1 | to | 74 156 | spout1 | went | 74 157 | ``` 158 | 159 | ## SalesTopology (CassandraCqlIncrementalState) 160 | The SalesTopology demonstrates the use of Cassandra for incremental data aggregation. The emitter/spout simulates sales of products across states. It emits three fields: state, product and price. The state object incorporates an aggregator, and aggregates values as tuples are processed. It then flushes the aggregate values to Cassandra, by first reading the current value, incorporating the new value, and then writing the new value using a conditional update. 161 | 162 | 163 | Here is the topolgy: 164 | 165 | ```java 166 | Stream inputStream = topology.newStream("sales", spout); 167 | SalesMapper mapper = new SalesMapper(); 168 | inputStream.partitionPersist( 169 | new CassandraCqlIncrementalStateFactory(new Sum(), mapper), 170 | new Fields("price", "state", "product"), 171 | new CassandraCqlIncrementalStateUpdater()); 172 | ``` 173 | 174 | Notice that the constructor for the state factory takes an aggregation, along with the mapper. As the updater processes the tuples, the aggregator is used to maintain aggregate values based on the keys. The updater users a standard mapper. For this example, the mapper is as follows: 175 | 176 | ```java 177 | @Override 178 | public Statement read(String key) { 179 | Select statement = select().column(VALUE_NAME).from(KEYSPACE_NAME, TABLE_NAME); 180 | statement.where(eq(KEY_NAME, key)); 181 | return statement; 182 | } 183 | 184 | @Override 185 | public Statement update(String key, Number value, PersistedState state, long txid, int partition) { 186 | Update update = QueryBuilder.update(KEYSPACE_NAME, TABLE_NAME); 187 | update.with(set(VALUE_NAME, value)).where(eq(KEY_NAME, key)); 188 | if (state.getValue() != null) { 189 | update.onlyIf(eq(VALUE_NAME, state.getValue())); 190 | } 191 | return update; 192 | } 193 | 194 | @Override 195 | public SalesState currentState(String key, List rows) { 196 | if (rows.size() == 0) { 197 | return new SalesState(null, null); 198 | } else { 199 | return new SalesState(rows.get(0).getInt(VALUE_NAME), ""); 200 | } 201 | } 202 | 203 | @Override 204 | public String getKey(TridentTuple tuple) { 205 | String state = tuple.getString(1); 206 | return state; 207 | } 208 | 209 | @Override 210 | public Number getValue(TridentTuple tuple) { 211 | return tuple.getInteger(0); 212 | } 213 | ``` 214 | 215 | Notice the update statement includes a condition. Also notice that the mapper provides a way to retrieve the relevant key and value from the tuple. These methods are used by the updater to get the key and value to incorporate into the aggregation. 216 | 217 | Configuration 218 | =================== 219 | You will likely need to configure various properties of the CQL client/cluster. To do that, you use the MapConfiguredCqlClientFactory. Populate a map with the configuration parameters. Storm then passes that configuration along to the StateFactory, which uses that configuration to construct the client factory. You can set things like read timeouts, etc. 220 | 221 | For an example, take a look at: 222 | [MapConfiguredCqlClientFactory](https://github.com/hmsonline/storm-cassandra-cql/blob/master/src/test/java/com/hmsonline/trident/cql/MapConfiguredCqlClientFactoryTest.java#L16-L24) 223 | 224 | 225 | -------------------------------------------------------------------------------- /libsnappyjava.jnilib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hmsonline/storm-cassandra-cql/2e0d30cdb097287a9111ade0efeb6abd83c78f7b/libsnappyjava.jnilib -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | org.sonatype.oss 5 | oss-parent 6 | 7 7 | 8 | 9 | 4.0.0 10 | com.hmsonline 11 | storm-cassandra-cql 12 | 0.3.11-SNAPSHOT 13 | Storm Cassandra CQL Support 14 | Storm Cassandra CQL Support 15 | 16 | 17 | scm:git:git@github.com:hmsonline/storm-cassandra-cql.git 18 | scm:git:git@github.com:hmsonline/storm-cassandra-cql.git 19 | :git@github.com:hmsonline/storm-cassandra-cql.git 20 | HEAD 21 | 22 | 23 | 24 | ossrh 25 | https://oss.sonatype.org/content/repositories/snapshots 26 | 27 | 28 | ossrh 29 | https://oss.sonatype.org/service/local/staging/deploy/maven2/ 30 | 31 | 32 | 33 | 34 | 35 | boneill 36 | Brian O'Neill 37 | bone@alumni.brown.edu 38 | 39 | 40 | 41 | 42 | 43 | clojars.org 44 | http://clojars.org/repo 45 | 46 | 47 | 48 | 49 | 0.9.1-incubating 50 | 1.2.5 51 | 52 | 53 | 54 | 55 | junit 56 | junit 57 | 4.10 58 | test 59 | 60 | 61 | org.slf4j 62 | slf4j-log4j12 63 | 1.6.4 64 | 65 | 66 | org.slf4j 67 | slf4j-api 68 | 1.6.4 69 | 70 | 71 | org.apache.storm 72 | storm-core 73 | ${storm.version} 74 | 75 | provided 76 | 77 | 78 | slf4j-api 79 | org.slf4j 80 | 81 | 82 | 83 | 84 | com.datastax.cassandra 85 | cassandra-driver-core 86 | 3.0.0 87 | 88 | 89 | org.cassandraunit 90 | cassandra-unit 91 | 2.0.2.2 92 | 93 | 94 | com.google.guava 95 | guava 96 | 16.0.1 97 | 98 | 99 | net.jpountz.lz4 100 | lz4 101 | 1.2.0 102 | 103 | 104 | 105 | 106 | 107 | 108 | org.apache.maven.plugins 109 | maven-compiler-plugin 110 | 2.5.1 111 | 112 | 1.7 113 | 1.7 114 | 115 | 116 | 117 | org.apache.maven.plugins 118 | maven-surefire-plugin 119 | 2.5 120 | 121 | 122 | org.apache.maven.plugins 123 | maven-release-plugin 124 | 2.5 125 | 126 | true 127 | false 128 | release 129 | deploy 130 | 131 | 132 | 133 | org.apache.maven.scm 134 | maven-scm-provider-gitexe 135 | 1.8.1 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | release 144 | 145 | 146 | 147 | org.apache.maven.plugins 148 | maven-gpg-plugin 149 | 1.5 150 | 151 | 152 | sign-artifacts 153 | verify 154 | 155 | sign 156 | 157 | 158 | 159 | 160 | 161 | org.sonatype.plugins 162 | nexus-staging-maven-plugin 163 | 1.6.3 164 | true 165 | 166 | ossrh 167 | https://oss.sonatype.org/ 168 | true 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/CassandraCqlMapState.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import java.io.Serializable; 4 | import java.util.ArrayList; 5 | import java.util.Iterator; 6 | import java.util.List; 7 | import java.util.Map; 8 | 9 | import com.datastax.driver.core.exceptions.FunctionExecutionException; 10 | import com.datastax.driver.core.exceptions.ReadFailureException; 11 | import com.datastax.driver.core.exceptions.WriteFailureException; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import storm.trident.state.OpaqueValue; 16 | import storm.trident.state.StateFactory; 17 | import storm.trident.state.StateType; 18 | import storm.trident.state.TransactionalValue; 19 | import storm.trident.state.map.IBackingMap; 20 | import backtype.storm.Config; 21 | import backtype.storm.metric.api.CountMetric; 22 | import backtype.storm.task.IMetricsContext; 23 | import backtype.storm.topology.ReportedFailedException; 24 | 25 | import com.datastax.driver.core.BatchStatement; 26 | import com.datastax.driver.core.BatchStatement.Type; 27 | import com.datastax.driver.core.ResultSet; 28 | import com.datastax.driver.core.Row; 29 | import com.datastax.driver.core.Session; 30 | import com.datastax.driver.core.Statement; 31 | import com.datastax.driver.core.exceptions.AlreadyExistsException; 32 | import com.datastax.driver.core.exceptions.AuthenticationException; 33 | import com.datastax.driver.core.exceptions.DriverException; 34 | import com.datastax.driver.core.exceptions.DriverInternalError; 35 | import com.datastax.driver.core.exceptions.InvalidConfigurationInQueryException; 36 | import com.datastax.driver.core.exceptions.InvalidQueryException; 37 | import com.datastax.driver.core.exceptions.InvalidTypeException; 38 | import com.datastax.driver.core.exceptions.QueryExecutionException; 39 | import com.datastax.driver.core.exceptions.QueryValidationException; 40 | import com.datastax.driver.core.exceptions.ReadTimeoutException; 41 | import com.datastax.driver.core.exceptions.SyntaxError; 42 | import com.datastax.driver.core.exceptions.TraceRetrievalException; 43 | import com.datastax.driver.core.exceptions.TruncateException; 44 | import com.datastax.driver.core.exceptions.UnauthorizedException; 45 | import com.datastax.driver.core.exceptions.UnavailableException; 46 | import com.datastax.driver.core.exceptions.WriteTimeoutException; 47 | import com.hmsonline.trident.cql.mappers.CqlRowMapper; 48 | 49 | /** 50 | * @param The generic state to back 51 | * @author robertlee 52 | */ 53 | public class CassandraCqlMapState implements IBackingMap { 54 | private static final Logger LOG = LoggerFactory.getLogger(CassandraCqlMapState.class); 55 | 56 | @SuppressWarnings("serial") 57 | public static class Options implements Serializable { 58 | public int localCacheSize = 5000; 59 | public String globalKey = "globalkey"; 60 | public String keyspace; 61 | public String tableName; 62 | public Integer ttl = 86400; // 1 day 63 | public Type batchType = Type.LOGGED; 64 | // Unique name of storm metrics. Must ne unique in topology 65 | public String mapStateMetricName = this.toString(); 66 | public int maxBatchSize = 100; 67 | } 68 | 69 | ///////////////////////////////////////////// 70 | // Static Methods For Specific State Type StateFactory 71 | ///////////////////////////////////////////// 72 | 73 | @SuppressWarnings("rawtypes") 74 | public static StateFactory opaque(CqlRowMapper mapper) { 75 | Options options = new Options(); 76 | return opaque(mapper, options); 77 | } 78 | 79 | @SuppressWarnings("rawtypes") 80 | public static StateFactory opaque(CqlRowMapper mapper, Options opts) { 81 | return new CassandraCqlMapStateFactory(mapper, StateType.OPAQUE, opts); 82 | } 83 | 84 | @SuppressWarnings("rawtypes") 85 | public static StateFactory transactional(CqlRowMapper mapper) { 86 | Options options = new Options(); 87 | return transactional(mapper, options); 88 | } 89 | 90 | @SuppressWarnings("rawtypes") 91 | public static StateFactory transactional(CqlRowMapper mapper, Options opts) { 92 | return new CassandraCqlMapStateFactory(mapper, StateType.TRANSACTIONAL, opts); 93 | } 94 | 95 | @SuppressWarnings("rawtypes") 96 | public static StateFactory nonTransactional(CqlRowMapper mapper) { 97 | Options options = new Options(); 98 | return nonTransactional(mapper, options); 99 | } 100 | 101 | @SuppressWarnings("rawtypes") 102 | public static StateFactory nonTransactional(CqlRowMapper mapper, Options opts) { 103 | return new CassandraCqlMapStateFactory(mapper, StateType.NON_TRANSACTIONAL, opts); 104 | } 105 | 106 | ////////////////////////////// 107 | // Instance Variables 108 | ////////////////////////////// 109 | // private Options options; 110 | protected final Session session; 111 | 112 | @SuppressWarnings("rawtypes") 113 | protected CqlRowMapper mapper; 114 | 115 | private Options options; 116 | 117 | // Metrics for storm metrics registering 118 | CountMetric _mreads; 119 | CountMetric _mwrites; 120 | CountMetric _mexceptions; 121 | 122 | @SuppressWarnings({"rawtypes"}) 123 | public CassandraCqlMapState(Session session, CqlRowMapper mapper, Options options, Map conf) { 124 | this.options = options; 125 | this.session = session; 126 | this.mapper = mapper; 127 | } 128 | 129 | //////////////////////////////////// 130 | // Overridden Methods for IBackingMap 131 | //////////////////////////////////// 132 | @SuppressWarnings("unchecked") 133 | @Override 134 | public List multiGet(List> keys) { 135 | try { 136 | List values = new ArrayList(); 137 | 138 | for (List rowKey : keys) { 139 | Statement statement = mapper.retrieve(rowKey); 140 | ResultSet results = session.execute(statement); 141 | // TODO: Better way to check for empty results besides accessing entire results list 142 | Iterator rowIter = results.iterator(); 143 | Row row; 144 | if (results != null && rowIter.hasNext() && (row = rowIter.next()) != null) { 145 | if (rowIter.hasNext()) { 146 | LOG.error("Found non-unique value for key [{}]", rowKey); 147 | } else { 148 | values.add((T) mapper.getValue(row)); 149 | } 150 | } else { 151 | values.add(null); 152 | } 153 | } 154 | 155 | _mreads.incrBy(values.size()); 156 | LOG.debug("Retrieving the following keys: {} with values: {}", keys, values); 157 | return values; 158 | } catch (Exception e) { 159 | checkCassandraException(e); 160 | throw new IllegalStateException("Impossible to reach this code"); 161 | } 162 | } 163 | 164 | @SuppressWarnings("unchecked") 165 | @Override 166 | public void multiPut(List> keys, List values) { 167 | LOG.debug("Putting the following keys: {} with values: {}", keys, values); 168 | try { 169 | List statements = new ArrayList(); 170 | 171 | // Retrieve the mapping statement for the key,val pair 172 | for (int i = 0; i < keys.size(); i++) { 173 | List key = keys.get(i); 174 | T val = values.get(i); 175 | Statement retrievedStatment = mapper.map(key, val); 176 | if (retrievedStatment instanceof BatchStatement) { // Allows for BatchStatements to be returned by the mapper. 177 | BatchStatement batchedStatment = (BatchStatement) retrievedStatment; 178 | statements.addAll(batchedStatment.getStatements()); 179 | } else { 180 | statements.add(retrievedStatment); 181 | } 182 | } 183 | 184 | // Execute all the statements as a batch. 185 | BatchStatement batch = new BatchStatement(options.batchType); 186 | int i = 0; 187 | for (Statement statement : statements) { 188 | batch.add(statement); 189 | i++; 190 | if(i >= options.maxBatchSize) { 191 | session.execute(batch); 192 | batch = new BatchStatement(options.batchType); 193 | i = 0; 194 | } 195 | } 196 | if (i > 0) { 197 | session.execute(batch); 198 | } 199 | 200 | _mwrites.incrBy(statements.size()); 201 | } catch (Exception e) { 202 | checkCassandraException(e); 203 | LOG.error("Exception {} caught.", e); 204 | } 205 | } 206 | 207 | protected void checkCassandraException(Exception e) { 208 | _mexceptions.incr(); 209 | if (e instanceof AlreadyExistsException || 210 | e instanceof AuthenticationException || 211 | e instanceof DriverException || 212 | e instanceof DriverInternalError || 213 | e instanceof InvalidConfigurationInQueryException || 214 | e instanceof InvalidQueryException || 215 | e instanceof InvalidTypeException || 216 | e instanceof QueryExecutionException || 217 | e instanceof QueryValidationException || 218 | e instanceof ReadTimeoutException || 219 | e instanceof SyntaxError || 220 | e instanceof TraceRetrievalException || 221 | e instanceof TruncateException || 222 | e instanceof UnauthorizedException || 223 | e instanceof UnavailableException || 224 | e instanceof ReadTimeoutException || 225 | e instanceof WriteTimeoutException || 226 | e instanceof ReadFailureException || 227 | e instanceof WriteFailureException || 228 | e instanceof FunctionExecutionException) { 229 | throw new ReportedFailedException(e); 230 | } else { 231 | throw new RuntimeException(e); 232 | } 233 | } 234 | 235 | @SuppressWarnings("rawtypes") 236 | public void registerMetrics(Map conf, IMetricsContext context, String mapStateMetricName) { 237 | int bucketSize = (Integer) (conf.get(Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS)); 238 | String metricBaseName = "cassandra/" + mapStateMetricName; 239 | _mreads = context.registerMetric(metricBaseName + "/readCount", new CountMetric(), bucketSize); 240 | _mwrites = context.registerMetric(metricBaseName + "/writeCount", new CountMetric(), bucketSize); 241 | _mexceptions = context.registerMetric(metricBaseName + "/exceptionCount", new CountMetric(), bucketSize); 242 | } 243 | } 244 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/CassandraCqlMapStateFactory.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import java.util.Map; 4 | 5 | import com.datastax.driver.core.Session; 6 | import storm.trident.state.State; 7 | import storm.trident.state.StateFactory; 8 | import storm.trident.state.StateType; 9 | import storm.trident.state.map.CachedMap; 10 | import storm.trident.state.map.MapState; 11 | import storm.trident.state.map.NonTransactionalMap; 12 | import storm.trident.state.map.OpaqueMap; 13 | import storm.trident.state.map.SnapshottableMap; 14 | import storm.trident.state.map.TransactionalMap; 15 | import backtype.storm.task.IMetricsContext; 16 | import backtype.storm.tuple.Values; 17 | 18 | import com.hmsonline.trident.cql.CassandraCqlMapState.Options; 19 | import com.hmsonline.trident.cql.mappers.CqlRowMapper; 20 | 21 | /** 22 | * The class responsible for generating instances of 23 | * the {@link CassandraCqlMapState}. 24 | * 25 | * @author robertlee 26 | */ 27 | public class CassandraCqlMapStateFactory implements StateFactory { 28 | private static final long serialVersionUID = 1L; 29 | 30 | protected CqlClientFactory clientFactory; 31 | protected StateType stateType; 32 | protected Options options; 33 | 34 | @SuppressWarnings("rawtypes") 35 | protected CqlRowMapper mapper; 36 | 37 | @SuppressWarnings({"rawtypes"}) 38 | public CassandraCqlMapStateFactory(CqlRowMapper mapper, StateType stateType, Options options) { 39 | this.stateType = stateType; 40 | this.options = options; 41 | this.mapper = mapper; 42 | } 43 | 44 | @SuppressWarnings({"rawtypes", "unchecked"}) 45 | public State makeState(Map configuration, IMetricsContext metrics, int partitionIndex, int numPartitions) { 46 | 47 | if (clientFactory == null) { 48 | clientFactory = new MapConfiguredCqlClientFactory(configuration); 49 | } 50 | 51 | Session session; 52 | if(options.keyspace != null) { 53 | session = clientFactory.getSession(options.keyspace); 54 | } else { 55 | session = clientFactory.getSession(); 56 | } 57 | 58 | CassandraCqlMapState state = new CassandraCqlMapState(session, mapper, options, configuration); 59 | state.registerMetrics(configuration, metrics, options.mapStateMetricName); 60 | 61 | CachedMap cachedMap = new CachedMap(state, options.localCacheSize); 62 | 63 | MapState mapState; 64 | if (stateType == StateType.NON_TRANSACTIONAL) { 65 | mapState = NonTransactionalMap.build(cachedMap); 66 | } else if (stateType == StateType.OPAQUE) { 67 | mapState = OpaqueMap.build(cachedMap); 68 | } else if (stateType == StateType.TRANSACTIONAL) { 69 | mapState = TransactionalMap.build(cachedMap); 70 | } else { 71 | throw new RuntimeException("Unknown state type: " + stateType); 72 | } 73 | 74 | return new SnapshottableMap(mapState, new Values(options.globalKey)); 75 | } 76 | } -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/CassandraCqlState.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import java.util.ArrayList; 4 | import java.util.List; 5 | 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import storm.trident.state.State; 10 | 11 | import com.datastax.driver.core.BatchStatement; 12 | import com.datastax.driver.core.BatchStatement.Type; 13 | import com.datastax.driver.core.ConsistencyLevel; 14 | import com.datastax.driver.core.ResultSet; 15 | import com.datastax.driver.core.Statement; 16 | 17 | public class CassandraCqlState implements State { 18 | private static final Logger LOG = LoggerFactory.getLogger(CassandraCqlState.class); 19 | private CqlClientFactory clientFactory; 20 | private int maxBatchSize; 21 | private ConsistencyLevel batchConsistencyLevel; 22 | private Type batchType = Type.LOGGED; 23 | 24 | List statements = new ArrayList(); 25 | 26 | public CassandraCqlState(CqlClientFactory clientFactory, ConsistencyLevel batchConsistencyLevel) { 27 | this(clientFactory, CassandraCqlStateFactory.DEFAULT_MAX_BATCH_SIZE, batchConsistencyLevel); 28 | } 29 | 30 | public CassandraCqlState(CqlClientFactory clientFactory, int maxBatchSize, ConsistencyLevel batchConsistencyLevel) { 31 | this.clientFactory = clientFactory; 32 | this.maxBatchSize = maxBatchSize; 33 | this.batchConsistencyLevel = batchConsistencyLevel; 34 | } 35 | 36 | @Override 37 | public void beginCommit(Long txid) { 38 | } 39 | 40 | @Override 41 | public void commit(Long txid) { 42 | LOG.debug("Commiting [{}]", txid); 43 | BatchStatement batch = new BatchStatement(batchType); 44 | batch.setConsistencyLevel(batchConsistencyLevel); 45 | int i = 0; 46 | for(Statement statement : this.statements) { 47 | batch.add(statement); 48 | i++; 49 | if(i >= this.maxBatchSize) { 50 | clientFactory.getSession().execute(batch); 51 | batch = new BatchStatement(batchType); 52 | i = 0; 53 | } 54 | } 55 | if(i > 0) { 56 | clientFactory.getSession().execute(batch); 57 | } 58 | this.statements.clear(); 59 | } 60 | 61 | public void addStatement(Statement statement) { 62 | this.statements.add(statement); 63 | } 64 | 65 | public ResultSet execute(Statement statement){ 66 | return clientFactory.getSession().execute(statement); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/CassandraCqlStateFactory.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import java.util.Map; 4 | 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import storm.trident.state.State; 9 | import storm.trident.state.StateFactory; 10 | import backtype.storm.task.IMetricsContext; 11 | 12 | import com.datastax.driver.core.ConsistencyLevel; 13 | 14 | @SuppressWarnings("rawtypes") 15 | public class CassandraCqlStateFactory implements StateFactory { 16 | private static final long serialVersionUID = 1L; 17 | private static final Logger LOG = LoggerFactory.getLogger(CassandraCqlStateFactory.class); 18 | public static final String TRIDENT_CASSANDRA_MAX_BATCH_SIZE = "trident.cassandra.maxbatchsize"; 19 | 20 | public static final int DEFAULT_MAX_BATCH_SIZE = 100; 21 | private static CqlClientFactory clientFactory; 22 | private ConsistencyLevel batchConsistencyLevel; 23 | 24 | public CassandraCqlStateFactory(ConsistencyLevel batchConsistencyLevel){ 25 | this.batchConsistencyLevel = batchConsistencyLevel; 26 | } 27 | 28 | @SuppressWarnings("unchecked") 29 | @Override 30 | public State makeState(Map configuration, IMetricsContext metrics, int partitionIndex, int numPartitions) { 31 | // worth synchronizing here? 32 | if (clientFactory == null) { 33 | clientFactory = new MapConfiguredCqlClientFactory(configuration); 34 | } 35 | final String maxBatchSizeString = (String) configuration.get(CassandraCqlStateFactory.TRIDENT_CASSANDRA_MAX_BATCH_SIZE); 36 | final int maxBatchSize = (maxBatchSizeString == null) ? DEFAULT_MAX_BATCH_SIZE : Integer.parseInt((String) maxBatchSizeString); 37 | LOG.debug("Creating State for partition [{}] of [{}]", new Object[]{partitionIndex, numPartitions}); 38 | return new CassandraCqlState(clientFactory, maxBatchSize, batchConsistencyLevel); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/CassandraCqlStateUpdater.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import com.datastax.driver.core.Statement; 4 | import com.hmsonline.trident.cql.mappers.CqlTupleMapper; 5 | 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import storm.trident.operation.TridentCollector; 10 | import storm.trident.operation.TridentOperationContext; 11 | import storm.trident.state.StateUpdater; 12 | import storm.trident.tuple.TridentTuple; 13 | 14 | import java.util.List; 15 | import java.util.Map; 16 | 17 | public class CassandraCqlStateUpdater implements StateUpdater { 18 | private static final long serialVersionUID = 1L; 19 | private static final Logger LOG = LoggerFactory.getLogger(CassandraCqlStateUpdater.class); 20 | private CqlTupleMapper mapper = null; 21 | private boolean propagateTuples; 22 | 23 | public CassandraCqlStateUpdater(CqlTupleMapper mapper) { 24 | this(mapper, false); 25 | } 26 | 27 | public CassandraCqlStateUpdater(CqlTupleMapper mapper, boolean propagateTuples) { 28 | this.mapper = mapper; 29 | this.propagateTuples = propagateTuples; 30 | } 31 | 32 | @SuppressWarnings("rawtypes") 33 | @Override 34 | public void prepare(Map configuration, TridentOperationContext context) { 35 | LOG.debug("Preparing updater with [{}]", configuration); 36 | } 37 | 38 | @Override 39 | public void cleanup() { 40 | } 41 | 42 | @Override 43 | public void updateState(CassandraCqlState state, List tuples, TridentCollector collector) { 44 | for (TridentTuple tuple : tuples) { 45 | Statement statement = this.mapper.map(tuple); 46 | state.addStatement(statement); 47 | if (propagateTuples) { 48 | collector.emit(tuple); 49 | } 50 | } 51 | } 52 | } -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/ConstructorConfiguredCqlClientFactory.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import java.net.InetSocketAddress; 4 | import java.util.ArrayList; 5 | import java.util.List; 6 | 7 | import org.apache.commons.lang.StringUtils; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import com.datastax.driver.core.Cluster; 12 | import com.datastax.driver.core.ConsistencyLevel; 13 | import com.datastax.driver.core.ProtocolOptions; 14 | import com.datastax.driver.core.QueryOptions; 15 | 16 | public class ConstructorConfiguredCqlClientFactory extends CqlClientFactory { 17 | private static final long serialVersionUID = 1L; 18 | private static final Logger LOG = LoggerFactory.getLogger(ConstructorConfiguredCqlClientFactory.class); 19 | private String[] hosts; 20 | private String clusterName = null; 21 | private ConsistencyLevel clusterConsistencyLevel= null; 22 | private ConsistencyLevel serialConsistencyLevel = null; 23 | 24 | protected static Cluster cluster; 25 | private final ProtocolOptions.Compression compression; 26 | 27 | public ConstructorConfiguredCqlClientFactory(String hosts) { 28 | this(hosts, null, ConsistencyLevel.QUORUM, QueryOptions.DEFAULT_SERIAL_CONSISTENCY_LEVEL, ProtocolOptions.Compression.NONE); 29 | } 30 | 31 | public ConstructorConfiguredCqlClientFactory(String hosts, ConsistencyLevel clusterConsistency) { 32 | this(hosts, null, clusterConsistency, QueryOptions.DEFAULT_SERIAL_CONSISTENCY_LEVEL, ProtocolOptions.Compression.NONE); 33 | } 34 | 35 | public ConstructorConfiguredCqlClientFactory(String hosts, String clusterName, ConsistencyLevel clusterConsistency, 36 | ConsistencyLevel conditionalUpdateConsistency, ProtocolOptions.Compression compression) { 37 | this.hosts = hosts.split(","); 38 | this.clusterConsistencyLevel = clusterConsistency; 39 | if (conditionalUpdateConsistency != null){ 40 | this.serialConsistencyLevel = conditionalUpdateConsistency; 41 | } 42 | if (clusterName != null) { 43 | this.clusterName = clusterName; 44 | } 45 | this.compression = compression; 46 | } 47 | 48 | public Cluster.Builder getClusterBuilder() { 49 | 50 | final List sockets = new ArrayList(); 51 | for (String host : hosts) { 52 | if(StringUtils.contains(host, ":")) { 53 | String hostParts [] = StringUtils.split(host, ":"); 54 | sockets.add(new InetSocketAddress(hostParts[0], Integer.valueOf(hostParts[1]))); 55 | LOG.debug("Connecting to [" + host + "] with port [" + hostParts[1] + "]"); 56 | } else { 57 | sockets.add(new InetSocketAddress(host, ProtocolOptions.DEFAULT_PORT)); 58 | LOG.debug("Connecting to [" + host + "] with port [" + ProtocolOptions.DEFAULT_PORT + "]"); 59 | } 60 | } 61 | 62 | Cluster.Builder builder = Cluster.builder().addContactPointsWithPorts(sockets).withCompression(compression); 63 | QueryOptions queryOptions = new QueryOptions(); 64 | queryOptions.setConsistencyLevel(clusterConsistencyLevel); 65 | queryOptions.setSerialConsistencyLevel(serialConsistencyLevel); 66 | builder = builder.withQueryOptions(queryOptions); 67 | 68 | if (StringUtils.isNotEmpty(clusterName)) { 69 | builder = builder.withClusterName(clusterName); 70 | } 71 | 72 | return builder; 73 | 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/CqlClientFactory.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.Session; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import java.io.Serializable; 9 | import java.util.HashMap; 10 | import java.util.Map; 11 | 12 | public abstract class CqlClientFactory implements Serializable { 13 | 14 | private static final long serialVersionUID = 2L; 15 | private static final Logger LOG = LoggerFactory.getLogger(CqlClientFactory.class); 16 | private Map sessions = new HashMap<>(); 17 | private Session defaultSession = null; 18 | private Cluster cluster = null; 19 | 20 | /** 21 | * Subclasses must implement the creation of 22 | * of the Cluster.Builder. 23 | * 24 | * @return 25 | */ 26 | abstract Cluster.Builder getClusterBuilder(); 27 | 28 | /** 29 | * This allows subclasses to work with the cluster after it is created. 30 | * For example, QueryLoggers could be registered. 31 | * 32 | * @param cluster 33 | */ 34 | protected void prepareCluster(final Cluster cluster) {} 35 | 36 | public Session getSession(String keyspace) { 37 | Session session = sessions.get(keyspace); 38 | if (session == null) { 39 | LOG.debug("Constructing session for keyspace [" + keyspace + "]"); 40 | session = initializeSession(keyspace); 41 | } 42 | return session; 43 | } 44 | 45 | public Session getSession() { 46 | if (defaultSession == null) { 47 | defaultSession = initializeSession(); 48 | } 49 | return defaultSession; 50 | } 51 | 52 | private synchronized Session initializeSession() { 53 | if (defaultSession == null) { 54 | defaultSession = getCluster().connect(); 55 | } 56 | return defaultSession; 57 | } 58 | private synchronized Session initializeSession(String keyspace) { 59 | Session session = sessions.get(keyspace); 60 | if (session==null) { 61 | session = getCluster().connect(keyspace); 62 | sessions.put(keyspace, session); 63 | } 64 | return session; 65 | } 66 | protected Cluster getCluster() { 67 | if (cluster == null || cluster.isClosed()) { 68 | if (cluster != null && cluster.isClosed()){ 69 | LOG.warn("Cluster closed, reconstructing cluster for [{}]", cluster.getClusterName()); 70 | } 71 | 72 | cluster = getClusterBuilder().build(); 73 | 74 | if (cluster == null) { 75 | throw new RuntimeException("Critical error: cluster is null after building."); 76 | } else { 77 | prepareCluster(cluster); 78 | } 79 | } 80 | 81 | return cluster; 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/MapConfiguredCqlClientFactory.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.ConsistencyLevel; 5 | import com.datastax.driver.core.ProtocolOptions; 6 | import com.datastax.driver.core.QueryLogger; 7 | import com.datastax.driver.core.QueryOptions; 8 | import com.datastax.driver.core.SocketOptions; 9 | import com.datastax.driver.core.policies.DCAwareRoundRobinPolicy; 10 | import com.datastax.driver.core.policies.LoadBalancingPolicy; 11 | import org.apache.commons.lang.StringUtils; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import java.net.InetSocketAddress; 16 | import java.util.ArrayList; 17 | import java.util.List; 18 | import java.util.Map; 19 | 20 | public class MapConfiguredCqlClientFactory extends CqlClientFactory { 21 | private static final long serialVersionUID = 1L; 22 | private static final Logger LOG = LoggerFactory.getLogger(MapConfiguredCqlClientFactory.class); 23 | 24 | public static final String TRIDENT_CASSANDRA_CQL_HOSTS = "trident.cassandra.cql.hosts"; 25 | public static final String TRIDENT_CASSANDRA_COMPRESSION = "trident.cassandra.compression"; 26 | public static final String TRIDENT_CASSANDRA_CONNECT_TIMEOUT = "trident.cassandra.connect.timeout"; 27 | public static final String TRIDENT_CASSANDRA_READ_TIMEOUT = "trident.cassandra.read.timeout"; 28 | public static final String TRIDENT_CASSANDRA_CLUSTER_NAME = "trident.cassandra.cluster.name"; 29 | public static final String TRIDENT_CASSANDRA_LOCAL_DATA_CENTER_NAME = "trident.cassandra.local.data.center.name"; 30 | public static final String TRIDENT_CASSANDRA_CONSISTENCY = "trident.cassandra.consistency"; 31 | public static final String TRIDENT_CASSANDRA_SERIAL_CONSISTENCY = "trident.cassandra.serial.consistency"; 32 | public static final String TRIDENT_CASSANDRA_QUERY_LOGGER_CONSTANT_THRESHOLD = "trident.cassandra.query.logger.constant.threshold"; 33 | 34 | final Map configuration; 35 | 36 | Cluster.Builder builder; 37 | 38 | public MapConfiguredCqlClientFactory(final Map configuration) { 39 | this.builder = Cluster.builder(); 40 | this.configuration = configuration; 41 | } 42 | 43 | private void configureHosts() { 44 | final String hostConfiguration = (String) configuration.get(TRIDENT_CASSANDRA_CQL_HOSTS); 45 | final String[] hosts = hostConfiguration.split(","); 46 | final List sockets = new ArrayList(); 47 | for (final String host : hosts) { 48 | if(StringUtils.contains(host, ":")) { 49 | final String hostParts [] = StringUtils.split(host, ":"); 50 | sockets.add(new InetSocketAddress(hostParts[0], Integer.valueOf(hostParts[1]))); 51 | LOG.debug("Configuring [" + host + "] with port [" + hostParts[1] + "]"); 52 | } else { 53 | sockets.add(new InetSocketAddress(host, ProtocolOptions.DEFAULT_PORT)); 54 | LOG.debug("Configuring [" + host + "] with port [" + ProtocolOptions.DEFAULT_PORT + "]"); 55 | } 56 | } 57 | builder = builder.addContactPointsWithPorts(sockets); 58 | } 59 | 60 | private void configureSocketOpts() { 61 | final String readTimeoutConfiguration = (String) configuration.get(TRIDENT_CASSANDRA_READ_TIMEOUT); 62 | final String connectTimeoutConfiguration = (String) configuration.get(TRIDENT_CASSANDRA_CONNECT_TIMEOUT); 63 | final SocketOptions socketOptions = builder.getConfiguration().getSocketOptions(); 64 | 65 | if (StringUtils.isNotEmpty(readTimeoutConfiguration)) { 66 | socketOptions.setReadTimeoutMillis(Integer.parseInt(readTimeoutConfiguration)); 67 | } 68 | 69 | if (StringUtils.isNotEmpty(connectTimeoutConfiguration)) { 70 | socketOptions.setConnectTimeoutMillis(Integer.parseInt(connectTimeoutConfiguration)); 71 | } 72 | 73 | builder = builder.withSocketOptions(socketOptions); 74 | } 75 | 76 | private void configureQueryOptions() { 77 | 78 | final String consistencyConfiguration = (String) configuration.get(TRIDENT_CASSANDRA_CONSISTENCY); 79 | final String serialConsistencyConfiguration = (String) configuration.get(TRIDENT_CASSANDRA_SERIAL_CONSISTENCY); 80 | final QueryOptions queryOptions = builder.getConfiguration().getQueryOptions(); 81 | 82 | if (StringUtils.isNotEmpty(consistencyConfiguration)) { 83 | queryOptions.setConsistencyLevel(ConsistencyLevel.valueOf(consistencyConfiguration)); 84 | } 85 | 86 | if (StringUtils.isNotEmpty(serialConsistencyConfiguration)) { 87 | queryOptions.setSerialConsistencyLevel(ConsistencyLevel.valueOf(serialConsistencyConfiguration)); 88 | } 89 | 90 | builder = builder.withQueryOptions(queryOptions); 91 | 92 | } 93 | 94 | private void configureCompression() { 95 | final String compressionConfiguration = (String) configuration.get(TRIDENT_CASSANDRA_COMPRESSION); 96 | if (StringUtils.isNotEmpty(compressionConfiguration)) { 97 | builder = builder.withCompression(ProtocolOptions.Compression.valueOf(compressionConfiguration)); 98 | } 99 | } 100 | 101 | private void configureOther() { 102 | final String nameConfiguration = (String) configuration.get(TRIDENT_CASSANDRA_CLUSTER_NAME); 103 | if (StringUtils.isNotEmpty(nameConfiguration)) { 104 | builder = builder.withClusterName(nameConfiguration); 105 | } 106 | } 107 | 108 | private void configureLoadBalancingPolicy() { 109 | final String dataCenterNameConfiguration = (String) configuration.get(TRIDENT_CASSANDRA_LOCAL_DATA_CENTER_NAME); 110 | if (StringUtils.isNotEmpty(dataCenterNameConfiguration)) { 111 | final LoadBalancingPolicy loadBalancingPolicy = DCAwareRoundRobinPolicy.builder().withLocalDc(dataCenterNameConfiguration).build(); 112 | builder = builder.withLoadBalancingPolicy(loadBalancingPolicy); 113 | } 114 | } 115 | 116 | public void configure() { 117 | configureHosts(); 118 | configureSocketOpts(); 119 | configureQueryOptions(); 120 | configureCompression(); 121 | configureLoadBalancingPolicy(); 122 | configureOther(); 123 | } 124 | 125 | @Override 126 | protected void prepareCluster(final Cluster cluster) { 127 | super.prepareCluster(cluster); 128 | final String threshold = (String) configuration.get(TRIDENT_CASSANDRA_QUERY_LOGGER_CONSTANT_THRESHOLD); 129 | if (StringUtils.isNotEmpty(threshold)) { 130 | final QueryLogger logger = QueryLogger.builder() 131 | .withConstantThreshold(Long.parseLong(threshold)) 132 | .build(); 133 | cluster.register(logger); 134 | } 135 | } 136 | 137 | public Cluster.Builder getClusterBuilder() { 138 | configure(); 139 | return builder; 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/incremental/CassandraCqlIncrementalState.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.incremental; 2 | 3 | import java.util.HashMap; 4 | import java.util.List; 5 | import java.util.Map; 6 | 7 | import org.slf4j.Logger; 8 | import org.slf4j.LoggerFactory; 9 | 10 | import storm.trident.operation.CombinerAggregator; 11 | import storm.trident.state.State; 12 | import storm.trident.tuple.TridentTuple; 13 | 14 | import com.datastax.driver.core.ResultSet; 15 | import com.datastax.driver.core.Row; 16 | import com.datastax.driver.core.Statement; 17 | import com.datastax.driver.core.exceptions.DriverException; 18 | import com.datastax.driver.core.exceptions.QueryExecutionException; 19 | import com.hmsonline.trident.cql.CqlClientFactory; 20 | 21 | public class CassandraCqlIncrementalState implements State { 22 | private static final Logger LOG = LoggerFactory.getLogger(CassandraCqlIncrementalState.class); 23 | private CqlClientFactory clientFactory; 24 | private CombinerAggregator aggregator; 25 | private CqlIncrementMapper mapper; 26 | private Map aggregateValues; 27 | public static int MAX_ATTEMPTS = 10; 28 | private int partitionIndex; 29 | private int maxAttempts; 30 | 31 | public CassandraCqlIncrementalState(CqlClientFactory clientFactory, CombinerAggregator aggregator, 32 | CqlIncrementMapper mapper, int partitionIndex) { 33 | init(clientFactory, aggregator, mapper, partitionIndex, MAX_ATTEMPTS); 34 | } 35 | 36 | public CassandraCqlIncrementalState(CqlClientFactory clientFactory, CombinerAggregator aggregator, 37 | CqlIncrementMapper mapper, int partitionIndex, int maxAttempts) { 38 | init(clientFactory, aggregator, mapper, partitionIndex, maxAttempts); 39 | } 40 | 41 | private void init(CqlClientFactory clientFactory, CombinerAggregator aggregator, 42 | CqlIncrementMapper mapper, int partitionIndex, int maxAttempts) { 43 | this.clientFactory = clientFactory; 44 | this.aggregator = aggregator; 45 | this.mapper = mapper; 46 | this.partitionIndex = partitionIndex; 47 | this.maxAttempts = maxAttempts; 48 | } 49 | 50 | @Override 51 | public void beginCommit(Long txid) { 52 | aggregateValues = new HashMap(); 53 | } 54 | 55 | private boolean applyUpdate(Statement updateStatement, Long txid) { 56 | LOG.debug("APPLYING [{}]", updateStatement.toString()); 57 | ResultSet results = clientFactory.getSession().execute(updateStatement); 58 | Row row = results.one(); 59 | if (row != null) { 60 | return row.getBool("[applied]"); 61 | } else { 62 | return true; 63 | } 64 | } 65 | 66 | @Override 67 | public void commit(Long txid) { 68 | DriverException lastException = null; 69 | // Read current value. 70 | //if we failed to apply the update , maybe the state has change already , we need to calculate the new state and apply it again 71 | for (Map.Entry entry : aggregateValues.entrySet()) { 72 | int attempts = 0; 73 | boolean applied = false; 74 | while (!applied && attempts < maxAttempts) { 75 | try{ 76 | applied = updateState(entry, txid); 77 | } catch(QueryExecutionException e) { 78 | lastException = e; 79 | LOG.warn("Catching {} attempt {}"+txid+"-"+partitionIndex, e.getMessage(), attempts); 80 | } 81 | attempts++; 82 | } 83 | if(!applied) { 84 | if(lastException != null) { 85 | throw new CassandraCqlIncrementalStateException("Ran out of attempts ["+attempts+"] max of ["+maxAttempts+"] "+txid+"-"+ partitionIndex, lastException); 86 | } else { 87 | throw new CassandraCqlIncrementalStateException("Ran out of attempts ["+attempts+"] max of ["+maxAttempts+"] "+txid+"-"+ partitionIndex); 88 | } 89 | } 90 | } 91 | } 92 | 93 | private boolean updateState(Map.Entry entry, Long txid) { 94 | Statement readStatement = mapper.read(entry.getKey()); 95 | LOG.debug("EXECUTING [{}]", readStatement.toString()); 96 | 97 | ResultSet results = clientFactory.getSession().execute(readStatement); 98 | List rows = results.all(); 99 | PersistedState persistedState = mapper.currentState(entry.getKey(), rows); 100 | LOG.debug("Persisted value = [{}]", persistedState.getValue()); 101 | 102 | V combinedValue; 103 | if (persistedState.getValue() != null) 104 | combinedValue = aggregator.combine(entry.getValue(), persistedState.getValue()); 105 | else 106 | combinedValue = entry.getValue(); 107 | 108 | Statement updateStatement = mapper.update(entry.getKey(), combinedValue, persistedState, txid, 109 | partitionIndex); 110 | //mapper don't want to update 111 | if(updateStatement==null){ 112 | return true; 113 | } 114 | return applyUpdate(updateStatement, txid); 115 | } 116 | 117 | // TODO: Do we need to synchronize this? (or use Concurrent) 118 | public void aggregateValue(TridentTuple tuple) { 119 | K key = mapper.getKey(tuple); 120 | V value = mapper.getValue(tuple); 121 | V currentValue = aggregateValues.get(key); 122 | V newValue; 123 | if (currentValue == null) { 124 | newValue = aggregator.init(tuple); 125 | } else { 126 | newValue = aggregator.combine(currentValue, value); 127 | } 128 | LOG.debug("Updating state [{}] ==> [{}]", new Object[]{key, newValue}); 129 | aggregateValues.put(key, newValue); 130 | } 131 | 132 | } 133 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/incremental/CassandraCqlIncrementalStateException.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.incremental; 2 | 3 | public class CassandraCqlIncrementalStateException extends RuntimeException { 4 | private static final long serialVersionUID = 4532573980053416051L; 5 | 6 | public CassandraCqlIncrementalStateException(String message) { 7 | super(message); 8 | } 9 | 10 | public CassandraCqlIncrementalStateException(String message, Exception e) { 11 | super(message, e); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/incremental/CassandraCqlIncrementalStateFactory.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.incremental; 2 | 3 | import java.util.Map; 4 | 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import storm.trident.operation.CombinerAggregator; 9 | import storm.trident.state.State; 10 | import storm.trident.state.StateFactory; 11 | import backtype.storm.task.IMetricsContext; 12 | 13 | import com.hmsonline.trident.cql.CqlClientFactory; 14 | import com.hmsonline.trident.cql.MapConfiguredCqlClientFactory; 15 | 16 | @SuppressWarnings("rawtypes") 17 | // TODO: Is it worth subclassing from CassandraCqlStateFactory? 18 | public class CassandraCqlIncrementalStateFactory implements StateFactory { 19 | private static final long serialVersionUID = 1L; 20 | private static final Logger LOG = LoggerFactory.getLogger(CassandraCqlIncrementalStateFactory.class); 21 | private CqlClientFactory clientFactory; 22 | private CombinerAggregator aggregator; 23 | private CqlIncrementMapper mapper; 24 | 25 | public CassandraCqlIncrementalStateFactory(CombinerAggregator aggregator, CqlIncrementMapper mapper) { 26 | this.aggregator = aggregator; 27 | this.mapper = mapper; 28 | } 29 | 30 | protected void setCqlClientFactory(CqlClientFactory clientFactory){ 31 | this.clientFactory = clientFactory; 32 | } 33 | 34 | @SuppressWarnings("unchecked") 35 | @Override 36 | public State makeState(Map configuration, IMetricsContext metrics, int partitionIndex, int numPartitions) { 37 | // NOTE: Lazy instantiation because Cluster is not serializable. 38 | if (clientFactory == null) { 39 | clientFactory = new MapConfiguredCqlClientFactory(configuration); 40 | } 41 | 42 | LOG.debug("Creating State for partition [{}] of [{}]", new Object[]{partitionIndex, numPartitions}); 43 | return new CassandraCqlIncrementalState(clientFactory, aggregator, mapper, partitionIndex); 44 | } 45 | 46 | 47 | } 48 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/incremental/CassandraCqlIncrementalStateUpdater.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.incremental; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | import storm.trident.operation.TridentCollector; 6 | import storm.trident.operation.TridentOperationContext; 7 | import storm.trident.state.StateUpdater; 8 | import storm.trident.tuple.TridentTuple; 9 | 10 | import java.util.List; 11 | import java.util.Map; 12 | 13 | public class CassandraCqlIncrementalStateUpdater implements StateUpdater> { 14 | private static final long serialVersionUID = 1L; 15 | private static final Logger LOG = LoggerFactory.getLogger(CassandraCqlIncrementalStateUpdater.class); 16 | 17 | @SuppressWarnings("rawtypes") 18 | @Override 19 | public void prepare(Map configuration, TridentOperationContext context) { 20 | LOG.debug("Preparing updater with [{}]", configuration); 21 | } 22 | 23 | @Override 24 | public void cleanup() { 25 | } 26 | 27 | @Override 28 | public void updateState(CassandraCqlIncrementalState state, List tuples, 29 | TridentCollector collector) { 30 | for (TridentTuple tuple : tuples) { 31 | state.aggregateValue(tuple); 32 | } 33 | } 34 | } -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/incremental/CqlIncrementMapper.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.incremental; 2 | 3 | import java.util.List; 4 | 5 | import storm.trident.tuple.TridentTuple; 6 | 7 | import com.datastax.driver.core.Row; 8 | import com.datastax.driver.core.Statement; 9 | 10 | public interface CqlIncrementMapper { 11 | 12 | public Statement read(K key); 13 | 14 | public Statement update(K key, V value, PersistedState state, long txid, int partitionIndex); 15 | 16 | public PersistedState currentState(K key, List rows); 17 | 18 | public K getKey(TridentTuple tuple); 19 | 20 | public V getValue(TridentTuple tuple); 21 | } 22 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/incremental/PersistedState.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.incremental; 2 | 3 | public interface PersistedState { 4 | V getValue(); 5 | String getPartitionKey(); 6 | } 7 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/mappers/CqlRowMapper.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.mappers; 2 | 3 | import com.datastax.driver.core.Row; 4 | 5 | /** 6 | * The CqlRowMapper interface extends CqlTupleMapper 7 | * with the ability to translate a single row into an object. 8 | * 9 | * @param K the key to map and retrieve 10 | * @param V the value to map and retrieve 11 | * @author boneill 12 | */ 13 | public abstract interface CqlRowMapper extends CqlTupleMapper { 14 | public V getValue(Row row); 15 | } 16 | -------------------------------------------------------------------------------- /src/main/java/com/hmsonline/trident/cql/mappers/CqlTupleMapper.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.mappers; 2 | 3 | import storm.trident.tuple.TridentTuple; 4 | 5 | import com.datastax.driver.core.Statement; 6 | 7 | /** 8 | * The CqlTupleMapper interface is responsible 9 | * for defining the structure of mapping and retrieving tuples 10 | * into the Cassandra store. 11 | * 12 | * @param K the key to map and retrieve 13 | * @param V the value to map and retrieve 14 | * @author rlee 15 | */ 16 | public abstract interface CqlTupleMapper { 17 | public Statement map(K key, V value); 18 | 19 | public Statement map(TridentTuple tuple); 20 | 21 | public Statement retrieve(K key); 22 | } 23 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/CassandraCqlStateUpdaterTest.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import com.datastax.driver.core.Statement; 4 | import com.hmsonline.trident.cql.mappers.CqlTupleMapper; 5 | import junit.framework.TestCase; 6 | import org.junit.Test; 7 | import org.junit.runner.RunWith; 8 | import org.junit.runners.JUnit4; 9 | import storm.trident.operation.TridentCollector; 10 | import storm.trident.testing.MockTridentTuple; 11 | import storm.trident.tuple.TridentTuple; 12 | 13 | import java.util.ArrayList; 14 | import java.util.List; 15 | 16 | @RunWith(JUnit4.class) 17 | public class CassandraCqlStateUpdaterTest extends TestCase { 18 | 19 | @Test 20 | public void testTuplesPropagation() throws Exception { 21 | List tuples = getTridentTuples(); 22 | MockTridentCollector mockTridentCollector = new MockTridentCollector(); 23 | 24 | // propagateTuples=false 25 | CassandraCqlStateUpdater stateUpdaterNoTuplesPropagation = new CassandraCqlStateUpdater(new MockCqlTupleMapper()); 26 | stateUpdaterNoTuplesPropagation.updateState(new CassandraCqlState(null, null), tuples, mockTridentCollector); 27 | assertTrue(mockTridentCollector.emittedTuples.isEmpty()); 28 | 29 | // propagateTuples=true 30 | CassandraCqlStateUpdater stateUpdaterTuplesPropagation = new CassandraCqlStateUpdater(new MockCqlTupleMapper(), true); 31 | stateUpdaterTuplesPropagation.updateState(new CassandraCqlState(null, null), tuples, mockTridentCollector); 32 | assertEquals(mockTridentCollector.emittedTuples, tuples); 33 | } 34 | 35 | private List getTridentTuples() { 36 | List tuples = new ArrayList(); 37 | List mockFieldList = new ArrayList(); 38 | mockFieldList.add("testField"); 39 | TridentTuple tuple = new MockTridentTuple(mockFieldList, "testValue"); 40 | tuples.add(tuple); 41 | return tuples; 42 | } 43 | 44 | private static class MockTridentCollector implements TridentCollector { 45 | 46 | public List emittedTuples = new ArrayList<>(); 47 | 48 | @Override 49 | public void emit(List values) { 50 | emittedTuples.add(values); 51 | } 52 | 53 | @Override 54 | public void reportError(Throwable t) { 55 | // Do nothing. 56 | } 57 | } 58 | 59 | private static class MockCqlTupleMapper implements CqlTupleMapper { 60 | 61 | @Override 62 | public Statement map(Object key, Object value) { 63 | // Do nothing. 64 | return null; 65 | } 66 | 67 | @Override 68 | public Statement map(TridentTuple tuple) { 69 | // Do nothing. 70 | return null; 71 | } 72 | 73 | @Override 74 | public Statement retrieve(Object key) { 75 | // Do nothing. 76 | return null; 77 | } 78 | } 79 | } -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/ConditionalUpdateTest.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; 4 | import static com.datastax.driver.core.querybuilder.QueryBuilder.set; 5 | import static com.datastax.driver.core.querybuilder.QueryBuilder.update; 6 | import static com.hmsonline.trident.cql.example.sales.SalesMapper.KEYSPACE_NAME; 7 | import static com.hmsonline.trident.cql.example.sales.SalesMapper.KEY_NAME; 8 | import static com.hmsonline.trident.cql.example.sales.SalesMapper.TABLE_NAME; 9 | import static com.hmsonline.trident.cql.example.sales.SalesMapper.VALUE_NAME; 10 | 11 | import org.junit.Test; 12 | import org.junit.runner.RunWith; 13 | import org.junit.runners.JUnit4; 14 | 15 | import com.datastax.driver.core.querybuilder.Update; 16 | 17 | /** 18 | * Test that demonstrates how to construct and use conditional updates. 19 | */ 20 | @RunWith(JUnit4.class) 21 | public class ConditionalUpdateTest extends CqlTestEnvironment { 22 | //private static final Logger LOG = LoggerFactory.getLogger(ConditionalUpdateTest.class); 23 | public String APPLIED_COLUMN = "[applied]"; 24 | 25 | public ConditionalUpdateTest() { 26 | super(); 27 | } 28 | 29 | @Test 30 | public void testConditionalUpdates() throws Exception { 31 | Update initialStatement = update(KEYSPACE_NAME, TABLE_NAME); 32 | initialStatement.with(set(VALUE_NAME, 10)).where(eq(KEY_NAME, "DE")); 33 | this.executeAndAssert(initialStatement, "DE", 10); 34 | 35 | // Now let's conditionally update where it is true 36 | Update updateStatement = update(KEYSPACE_NAME, TABLE_NAME); 37 | updateStatement.with(set(VALUE_NAME, 15)).where(eq(KEY_NAME, "DE")).onlyIf(eq(VALUE_NAME, 10)); 38 | this.executeAndAssert(updateStatement, "DE", 15); 39 | 40 | // Now let's conditionally update where it is false 41 | Update conditionalStatement = update(KEYSPACE_NAME, TABLE_NAME); 42 | conditionalStatement.with(set(VALUE_NAME, 20)).where(eq(KEY_NAME, "DE")).onlyIf(eq(VALUE_NAME, 10)); 43 | this.executeAndAssert(conditionalStatement, "DE", 15); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/ConstructorConfiguredCqlClientFactoryTest.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import junit.framework.Assert; 5 | import org.junit.Test; 6 | import org.junit.runner.RunWith; 7 | import org.junit.runners.JUnit4; 8 | import java.net.InetSocketAddress; 9 | 10 | @RunWith(JUnit4.class) 11 | public class ConstructorConfiguredCqlClientFactoryTest extends CqlClientFactoryTestConstants { 12 | 13 | @Test 14 | public void testGetCluster() { 15 | final CqlClientFactory factory = 16 | new ConstructorConfiguredCqlClientFactory(HOSTS, 17 | CLUSTER_NAME, 18 | DEFAULT_CONSISTENCY_LEVEL, 19 | DEFAULT_SERIAL_CONSISTENCY_LEVEL, 20 | COMPRESSION); 21 | 22 | final Cluster.Builder clusterBuilder = factory.getClusterBuilder(); 23 | Assert.assertEquals(CLUSTER_NAME, clusterBuilder.getClusterName()); 24 | final InetSocketAddress first = clusterBuilder.getContactPoints().get(0); 25 | final InetSocketAddress second = clusterBuilder.getContactPoints().get(1); 26 | Assert.assertEquals("localhost", first.getHostName()); 27 | Assert.assertEquals(9042, first.getPort()); 28 | Assert.assertEquals("remotehost", second.getHostName()); 29 | Assert.assertEquals(1234, second.getPort()); 30 | } 31 | 32 | } -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/CqlClientFactoryTestConstants.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import com.datastax.driver.core.ConsistencyLevel; 4 | import com.datastax.driver.core.ProtocolOptions; 5 | 6 | public class CqlClientFactoryTestConstants { 7 | 8 | public static final String HOSTS = "localhost,remotehost:1234"; 9 | public static final String CLUSTER_NAME = "Test Cluster"; 10 | public static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.LOCAL_QUORUM; 11 | public static final ConsistencyLevel DEFAULT_SERIAL_CONSISTENCY_LEVEL = ConsistencyLevel.SERIAL; 12 | public static final ProtocolOptions.Compression COMPRESSION = ProtocolOptions.Compression.LZ4; 13 | public static final String READ_TIMEOUT = "10000"; 14 | public static final String CONNECT_TIMEOUT = "2000"; 15 | public static final String DATA_CENTER_NAME = "philly"; 16 | 17 | } -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/CqlTestEnvironment.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; 4 | import static com.datastax.driver.core.querybuilder.QueryBuilder.select; 5 | import static com.hmsonline.trident.cql.example.sales.SalesMapper.KEYSPACE_NAME; 6 | import static com.hmsonline.trident.cql.example.sales.SalesMapper.KEY_NAME; 7 | import static com.hmsonline.trident.cql.example.sales.SalesMapper.TABLE_NAME; 8 | import static com.hmsonline.trident.cql.example.sales.SalesMapper.VALUE_NAME; 9 | import static org.junit.Assert.assertEquals; 10 | 11 | import java.util.HashMap; 12 | import java.util.Map; 13 | 14 | import org.cassandraunit.CassandraCQLUnit; 15 | import org.cassandraunit.dataset.cql.ClassPathCQLDataSet; 16 | import org.junit.After; 17 | import org.junit.Before; 18 | import org.junit.Rule; 19 | import org.slf4j.Logger; 20 | import org.slf4j.LoggerFactory; 21 | 22 | import com.datastax.driver.core.ResultSet; 23 | import com.datastax.driver.core.Row; 24 | import com.datastax.driver.core.Session; 25 | import com.datastax.driver.core.Statement; 26 | import com.datastax.driver.core.querybuilder.Select; 27 | 28 | public class CqlTestEnvironment { 29 | private static final Logger LOG = LoggerFactory.getLogger(CqlTestEnvironment.class); 30 | 31 | public CqlClientFactory clientFactory; 32 | public Map configuration = new HashMap(); 33 | 34 | @Rule 35 | public static CassandraCQLUnit cqlUnit; 36 | 37 | public CqlTestEnvironment() { 38 | setup(); 39 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CQL_HOSTS, "localhost"); 40 | clientFactory = new CqlUnitClientFactory(configuration, cqlUnit); 41 | } 42 | 43 | public Session getSession(){ 44 | return clientFactory.getSession(); 45 | } 46 | 47 | @Before 48 | public void setup(){ 49 | if (cqlUnit == null){ 50 | cqlUnit = new CassandraCQLUnit(new ClassPathCQLDataSet("schema.cql","mykeyspace")); 51 | } 52 | } 53 | 54 | @After 55 | public void tearDown() throws InterruptedException{ 56 | LOG.debug("Tearing Down."); 57 | cqlUnit.cluster.close(); 58 | } 59 | 60 | public void assertValue(String k, Integer expectedValue) { 61 | Select selectStatement = select().column("v").from(KEYSPACE_NAME, TABLE_NAME); 62 | selectStatement.where(eq(KEY_NAME, k)); 63 | ResultSet results = getSession().execute(selectStatement); 64 | LOG.debug("EXECUTING [{}]", selectStatement.toString()); 65 | Integer actualValue = results.one().getInt(VALUE_NAME); 66 | assertEquals(expectedValue, actualValue); 67 | } 68 | 69 | public void executeAndAssert(Statement statement, String k, Integer expectedValue) { 70 | LOG.debug("EXECUTING [{}]", statement.toString()); 71 | ResultSet results = getSession().execute(statement); 72 | Row row = results.one(); 73 | if (row != null) 74 | LOG.debug("APPLIED?[{}]", row.getBool("[applied]")); 75 | assertValue(k, expectedValue); 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/CqlUnitClientFactory.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import java.util.Map; 4 | 5 | import org.cassandraunit.CassandraCQLUnit; 6 | 7 | import com.datastax.driver.core.Session; 8 | 9 | public class CqlUnitClientFactory extends MapConfiguredCqlClientFactory { 10 | private static final long serialVersionUID = 1L; 11 | public CassandraCQLUnit cqlUnit; 12 | 13 | public CqlUnitClientFactory(final Map configuration, CassandraCQLUnit cqlUnit) { 14 | super(configuration); 15 | this.cqlUnit = cqlUnit; 16 | } 17 | 18 | @Override 19 | public Session getSession(){ 20 | if (cqlUnit.session == null) 21 | throw new RuntimeException("No session established in CQL_UNIT"); 22 | return cqlUnit.session; 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/MapConfiguredCqlClientFactoryTest.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql; 2 | 3 | import java.net.InetSocketAddress; 4 | import java.util.HashMap; 5 | import java.util.Map; 6 | 7 | import junit.framework.Assert; 8 | 9 | import org.junit.Test; 10 | 11 | import com.datastax.driver.core.Cluster; 12 | import com.datastax.driver.core.ProtocolOptions; 13 | 14 | public class MapConfiguredCqlClientFactoryTest extends CqlClientFactoryTestConstants { 15 | @Test 16 | public void testGetClusterBuilder() throws Exception { 17 | final Map configuration = new HashMap(); 18 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CQL_HOSTS, HOSTS); 19 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CLUSTER_NAME, CLUSTER_NAME); 20 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_READ_TIMEOUT, READ_TIMEOUT); 21 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CONNECT_TIMEOUT, CONNECT_TIMEOUT); 22 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_LOCAL_DATA_CENTER_NAME, DATA_CENTER_NAME); 23 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CONSISTENCY, DEFAULT_CONSISTENCY_LEVEL.name()); 24 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_SERIAL_CONSISTENCY, DEFAULT_SERIAL_CONSISTENCY_LEVEL.name()); 25 | 26 | final CqlClientFactory factory = 27 | new MapConfiguredCqlClientFactory(configuration); 28 | 29 | final Cluster.Builder clusterBuilder = factory.getClusterBuilder(); 30 | Assert.assertEquals(CLUSTER_NAME, clusterBuilder.getClusterName()); 31 | final InetSocketAddress first = clusterBuilder.getContactPoints().get(0); 32 | final InetSocketAddress second = clusterBuilder.getContactPoints().get(1); 33 | Assert.assertEquals("localhost", first.getHostName()); 34 | Assert.assertEquals(9042, first.getPort()); 35 | Assert.assertEquals("remotehost", second.getHostName()); 36 | Assert.assertEquals(1234, second.getPort()); 37 | Assert.assertEquals(Integer.parseInt(CONNECT_TIMEOUT), clusterBuilder.getConfiguration().getSocketOptions().getConnectTimeoutMillis()); 38 | Assert.assertEquals(Integer.parseInt(READ_TIMEOUT), clusterBuilder.getConfiguration().getSocketOptions().getReadTimeoutMillis()); 39 | Assert.assertEquals(DEFAULT_CONSISTENCY_LEVEL, clusterBuilder.getConfiguration().getQueryOptions().getConsistencyLevel()); 40 | Assert.assertEquals(DEFAULT_SERIAL_CONSISTENCY_LEVEL, clusterBuilder.getConfiguration().getQueryOptions().getSerialConsistencyLevel()); 41 | Assert.assertEquals(ProtocolOptions.Compression.NONE, clusterBuilder.getConfiguration().getProtocolOptions().getCompression()); 42 | } 43 | 44 | @Test 45 | public void testCompression() { 46 | final Map configuration = new HashMap(); 47 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CQL_HOSTS, HOSTS); 48 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_COMPRESSION, COMPRESSION.name()); 49 | final CqlClientFactory factory = 50 | new MapConfiguredCqlClientFactory(configuration); 51 | Assert.assertEquals(COMPRESSION, factory.getCluster().getConfiguration().getProtocolOptions().getCompression()); 52 | } 53 | 54 | @Test 55 | public void testQueryLogger() { 56 | final Map configuration = new HashMap(); 57 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CQL_HOSTS, HOSTS); 58 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_QUERY_LOGGER_CONSTANT_THRESHOLD, "5000"); 59 | final CqlClientFactory factory = 60 | new MapConfiguredCqlClientFactory(configuration); 61 | factory.getCluster(); 62 | // Cluster doesn't let you see get to the list of registered listeners and we have no 63 | // mocking framework in the pom as of now. 64 | } 65 | } -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/sales/SalesEmitter.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.sales; 2 | 3 | import storm.trident.operation.TridentCollector; 4 | import storm.trident.spout.ITridentSpout.Emitter; 5 | import storm.trident.topology.TransactionAttempt; 6 | 7 | import java.io.Serializable; 8 | import java.util.ArrayList; 9 | import java.util.List; 10 | import java.util.Random; 11 | import java.util.concurrent.atomic.AtomicInteger; 12 | 13 | public class SalesEmitter implements Emitter, Serializable { 14 | private static final long serialVersionUID = 1L; 15 | public static AtomicInteger successfulTransactions = new AtomicInteger(0); 16 | public static AtomicInteger uids = new AtomicInteger(0); 17 | private Random generator = new Random(); 18 | private String[] states = {"DE", "MD", "PA", "NJ", "NY"}; 19 | private String[] products = {"lego", "brick", "bike", "horn"}; 20 | 21 | @Override 22 | public void emitBatch(TransactionAttempt tx, Long coordinatorMeta, 23 | TridentCollector collector) { 24 | for (int i = 0; i < 100; i++) { 25 | List sale = new ArrayList(); 26 | sale.add(states[generator.nextInt(4)]); 27 | sale.add(products[generator.nextInt(4)]); 28 | sale.add(generator.nextInt(100)); 29 | collector.emit(sale); 30 | } 31 | } 32 | 33 | @Override 34 | public void success(TransactionAttempt tx) { 35 | successfulTransactions.incrementAndGet(); 36 | } 37 | 38 | @Override 39 | public void close() { 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/sales/SalesMapper.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.sales; 2 | 3 | import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; 4 | import static com.datastax.driver.core.querybuilder.QueryBuilder.select; 5 | import static com.datastax.driver.core.querybuilder.QueryBuilder.set; 6 | 7 | import java.io.Serializable; 8 | import java.util.List; 9 | 10 | import storm.trident.tuple.TridentTuple; 11 | 12 | import com.datastax.driver.core.Row; 13 | import com.datastax.driver.core.Statement; 14 | import com.datastax.driver.core.querybuilder.QueryBuilder; 15 | import com.datastax.driver.core.querybuilder.Select; 16 | import com.datastax.driver.core.querybuilder.Update; 17 | import com.hmsonline.trident.cql.incremental.CqlIncrementMapper; 18 | import com.hmsonline.trident.cql.incremental.PersistedState; 19 | 20 | public class SalesMapper implements CqlIncrementMapper, Serializable { 21 | private static final long serialVersionUID = 1L; 22 | // private static final Logger LOG = 23 | // LoggerFactory.getLogger(SalesAnalyticsMapper.class); 24 | 25 | // values assumed by the schema.cql; should make customizable by constructor 26 | public static final String KEYSPACE_NAME = "mykeyspace"; 27 | public static final String TABLE_NAME = "incrementaltable"; 28 | public static final String KEY_NAME = "k"; 29 | public static final String VALUE_NAME = "v"; 30 | 31 | @Override 32 | public Statement read(String key) { 33 | Select statement = select().column(VALUE_NAME).from(KEYSPACE_NAME, TABLE_NAME); 34 | statement.where(eq(KEY_NAME, key)); 35 | return statement; 36 | } 37 | 38 | @Override 39 | public Statement update(String key, Number value, PersistedState state, long txid, int partition) { 40 | Update update = QueryBuilder.update(KEYSPACE_NAME, TABLE_NAME); 41 | update.with(set(VALUE_NAME, value)).where(eq(KEY_NAME, key)); 42 | if (state.getValue() != null) { 43 | update.onlyIf(eq(VALUE_NAME, state.getValue())); 44 | } 45 | return update; 46 | } 47 | 48 | @Override 49 | public SalesState currentState(String key, List rows) { 50 | if (rows.size() == 0) { 51 | return new SalesState(null, null); 52 | } else { 53 | return new SalesState(rows.get(0).getInt(VALUE_NAME), ""); 54 | } 55 | } 56 | 57 | @Override 58 | public String getKey(TridentTuple tuple) { 59 | String state = tuple.getString(1); 60 | return state; 61 | } 62 | 63 | @Override 64 | public Number getValue(TridentTuple tuple) { 65 | return tuple.getInteger(0); 66 | } 67 | 68 | } 69 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/sales/SalesSpout.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.sales; 2 | 3 | import backtype.storm.spout.SpoutOutputCollector; 4 | import backtype.storm.task.TopologyContext; 5 | import backtype.storm.tuple.Fields; 6 | 7 | import com.hmsonline.trident.cql.example.simpleupdate.DefaultCoordinator; 8 | 9 | import storm.trident.spout.ITridentSpout; 10 | 11 | import java.util.Map; 12 | 13 | @SuppressWarnings("rawtypes") 14 | public class SalesSpout implements ITridentSpout { 15 | private static final long serialVersionUID = 1L; 16 | SpoutOutputCollector collector; 17 | BatchCoordinator coordinator = new DefaultCoordinator(); 18 | Emitter emitter = new SalesEmitter(); 19 | 20 | @Override 21 | public BatchCoordinator getCoordinator(String txStateId, Map conf, TopologyContext context) { 22 | return coordinator; 23 | } 24 | 25 | @Override 26 | public Emitter getEmitter(String txStateId, Map conf, TopologyContext context) { 27 | return emitter; 28 | } 29 | 30 | @Override 31 | public Map getComponentConfiguration() { 32 | return null; 33 | } 34 | 35 | @Override 36 | public Fields getOutputFields() { 37 | return new Fields("state", "product", "price"); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/sales/SalesState.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.sales; 2 | 3 | import com.hmsonline.trident.cql.incremental.PersistedState; 4 | 5 | public class SalesState implements PersistedState { 6 | Number value; 7 | String partitionsKey; 8 | public SalesState(Number value,String partitionsKey){ 9 | this.value = value; 10 | this.partitionsKey = partitionsKey; 11 | } 12 | 13 | @Override 14 | public Number getValue() { 15 | return value; 16 | } 17 | 18 | @Override 19 | public String getPartitionKey() { 20 | return partitionsKey; 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/sales/SalesTopology.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.sales; 2 | 3 | import org.cassandraunit.CassandraCQLUnit; 4 | import org.cassandraunit.dataset.cql.ClassPathCQLDataSet; 5 | import org.junit.Rule; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import storm.trident.Stream; 10 | import storm.trident.TridentTopology; 11 | import storm.trident.operation.builtin.Sum; 12 | import backtype.storm.Config; 13 | import backtype.storm.LocalCluster; 14 | import backtype.storm.generated.StormTopology; 15 | import backtype.storm.tuple.Fields; 16 | 17 | import com.hmsonline.trident.cql.MapConfiguredCqlClientFactory; 18 | import com.hmsonline.trident.cql.incremental.CassandraCqlIncrementalStateFactory; 19 | import com.hmsonline.trident.cql.incremental.CassandraCqlIncrementalStateUpdater; 20 | 21 | public class SalesTopology { 22 | private static final Logger LOG = LoggerFactory.getLogger(SalesTopology.class); 23 | @Rule 24 | public static CassandraCQLUnit cqlUnit = new CassandraCQLUnit(new ClassPathCQLDataSet("schema.cql","mykeyspace")); 25 | 26 | public static StormTopology buildTopology() { 27 | LOG.info("Building topology."); 28 | TridentTopology topology = new TridentTopology(); 29 | SalesSpout spout = new SalesSpout(); 30 | Stream inputStream = topology.newStream("sales", spout); 31 | SalesMapper mapper = new SalesMapper(); 32 | inputStream.partitionPersist( 33 | new CassandraCqlIncrementalStateFactory(new Sum(), mapper), 34 | new Fields("price", "state", "product"), 35 | new CassandraCqlIncrementalStateUpdater()); 36 | return topology.build(); 37 | } 38 | 39 | public static void main(String[] args) throws Exception { 40 | final Config configuration = new Config(); 41 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CQL_HOSTS, "localhost"); 42 | final LocalCluster cluster = new LocalCluster(); 43 | LOG.info("Submitting topology."); 44 | cluster.submitTopology("cqlexample", configuration, buildTopology()); 45 | LOG.info("Topology submitted."); 46 | Thread.sleep(600000); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/simpleupdate/DefaultCoordinator.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.simpleupdate; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | import storm.trident.spout.ITridentSpout.BatchCoordinator; 6 | 7 | import java.io.Serializable; 8 | 9 | public class DefaultCoordinator implements BatchCoordinator, Serializable { 10 | private static final long serialVersionUID = 1L; 11 | private static final Logger LOG = LoggerFactory 12 | .getLogger(DefaultCoordinator.class); 13 | 14 | @Override 15 | public boolean isReady(long txid) { 16 | return true; 17 | } 18 | 19 | @Override 20 | public void close() { 21 | } 22 | 23 | @Override 24 | public Long initializeTransaction(long txid, Long prevMetadata, 25 | Long currMetadata) { 26 | LOG.info("Initializing Transaction [" + txid + "]"); 27 | return null; 28 | } 29 | 30 | @Override 31 | public void success(long txid) { 32 | LOG.info("Successful Transaction [" + txid + "]"); 33 | } 34 | 35 | } 36 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/simpleupdate/SimpleUpdateEmitter.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.simpleupdate; 2 | 3 | import storm.trident.operation.TridentCollector; 4 | import storm.trident.spout.ITridentSpout.Emitter; 5 | import storm.trident.topology.TransactionAttempt; 6 | 7 | import java.io.Serializable; 8 | import java.util.ArrayList; 9 | import java.util.List; 10 | import java.util.concurrent.atomic.AtomicInteger; 11 | 12 | public class SimpleUpdateEmitter implements Emitter, Serializable { 13 | private static final long serialVersionUID = 1L; 14 | public static AtomicInteger successfulTransactions = new AtomicInteger(0); 15 | public static AtomicInteger uids = new AtomicInteger(0); 16 | 17 | @Override 18 | public void emitBatch(TransactionAttempt tx, Long coordinatorMeta, 19 | TridentCollector collector) { 20 | for (int i = 0; i < 100; i++) { 21 | List message = new ArrayList(); 22 | message.add(Integer.toString(i)); 23 | collector.emit(message); 24 | } 25 | } 26 | 27 | @Override 28 | public void success(TransactionAttempt tx) { 29 | successfulTransactions.incrementAndGet(); 30 | } 31 | 32 | @Override 33 | public void close() { 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/simpleupdate/SimpleUpdateMapper.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.simpleupdate; 2 | 3 | import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; 4 | import static com.datastax.driver.core.querybuilder.QueryBuilder.set; 5 | import static com.datastax.driver.core.querybuilder.QueryBuilder.update; 6 | 7 | import java.io.Serializable; 8 | 9 | import storm.trident.tuple.TridentTuple; 10 | 11 | import com.datastax.driver.core.Row; 12 | import com.datastax.driver.core.Statement; 13 | import com.datastax.driver.core.querybuilder.Update; 14 | import com.hmsonline.trident.cql.mappers.CqlRowMapper; 15 | 16 | public class SimpleUpdateMapper implements CqlRowMapper, Serializable { 17 | private static final long serialVersionUID = 1L; 18 | 19 | public Statement map(TridentTuple tuple) { 20 | long t = System.currentTimeMillis() % 10; 21 | Update statement = update("mykeyspace", "mytable"); 22 | statement.with(set("col1", tuple.getString(0))).where(eq("t", t)); 23 | return statement; 24 | } 25 | 26 | public Statement map(TridentTuple tuple, Object value) { 27 | // TODO Auto-generated method stub 28 | return null; 29 | } 30 | 31 | @Override 32 | public Statement retrieve(Object key) { 33 | // TODO Auto-generated method stub 34 | return null; 35 | } 36 | 37 | @Override 38 | public Statement map(Object key, Object value) { 39 | // TODO Auto-generated method stub 40 | return null; 41 | } 42 | 43 | @Override 44 | public Object getValue(Row row) { 45 | // TODO Auto-generated method stub 46 | return null; 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/simpleupdate/SimpleUpdateSpout.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.simpleupdate; 2 | 3 | import backtype.storm.spout.SpoutOutputCollector; 4 | import backtype.storm.task.TopologyContext; 5 | import backtype.storm.tuple.Fields; 6 | import storm.trident.spout.ITridentSpout; 7 | 8 | import java.util.Map; 9 | 10 | @SuppressWarnings("rawtypes") 11 | public class SimpleUpdateSpout implements ITridentSpout { 12 | private static final long serialVersionUID = 1L; 13 | SpoutOutputCollector collector; 14 | BatchCoordinator coordinator = new DefaultCoordinator(); 15 | Emitter emitter = new SimpleUpdateEmitter(); 16 | 17 | @Override 18 | public BatchCoordinator getCoordinator(String txStateId, Map conf, TopologyContext context) { 19 | return coordinator; 20 | } 21 | 22 | @Override 23 | public Emitter getEmitter(String txStateId, Map conf, TopologyContext context) { 24 | return emitter; 25 | } 26 | 27 | @Override 28 | public Map getComponentConfiguration() { 29 | return null; 30 | } 31 | 32 | @Override 33 | public Fields getOutputFields() { 34 | return new Fields("test"); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/simpleupdate/SimpleUpdateTopology.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.simpleupdate; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | 6 | import storm.trident.Stream; 7 | import storm.trident.TridentTopology; 8 | import backtype.storm.Config; 9 | import backtype.storm.LocalCluster; 10 | import backtype.storm.generated.StormTopology; 11 | import backtype.storm.tuple.Fields; 12 | 13 | import com.datastax.driver.core.ConsistencyLevel; 14 | import com.hmsonline.trident.cql.CassandraCqlStateFactory; 15 | import com.hmsonline.trident.cql.CassandraCqlStateUpdater; 16 | import com.hmsonline.trident.cql.MapConfiguredCqlClientFactory; 17 | 18 | public class SimpleUpdateTopology { 19 | private static final Logger LOG = LoggerFactory.getLogger(SimpleUpdateTopology.class); 20 | 21 | @SuppressWarnings({ "rawtypes", "unchecked" }) 22 | public static StormTopology buildTopology() { 23 | LOG.info("Building topology."); 24 | TridentTopology topology = new TridentTopology(); 25 | SimpleUpdateSpout spout = new SimpleUpdateSpout(); 26 | Stream inputStream = topology.newStream("test", spout); 27 | SimpleUpdateMapper mapper = new SimpleUpdateMapper(); 28 | inputStream.partitionPersist(new CassandraCqlStateFactory(ConsistencyLevel.ONE), new Fields("test"), new CassandraCqlStateUpdater(mapper)); 29 | // inputStream.each(new Fields("test"), new Debug()); 30 | return topology.build(); 31 | } 32 | 33 | public static void main(String[] args) throws Exception { 34 | final Config configuration = new Config(); 35 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CQL_HOSTS, "localhost"); 36 | final LocalCluster cluster = new LocalCluster(); 37 | LOG.info("Submitting topology."); 38 | cluster.submitTopology("cqlexample", configuration, buildTopology()); 39 | LOG.info("Topology submitted."); 40 | Thread.sleep(600000); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/wordcount/IntegerCount.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.wordcount; 2 | 3 | import storm.trident.operation.CombinerAggregator; 4 | import storm.trident.tuple.TridentTuple; 5 | 6 | 7 | public class IntegerCount implements CombinerAggregator { 8 | private static final long serialVersionUID = 1L; 9 | 10 | @Override 11 | public Integer init(TridentTuple tuple) { 12 | return 1; 13 | } 14 | 15 | @Override 16 | public Integer combine(Integer val1, Integer val2) { 17 | return val1 + val2; 18 | } 19 | 20 | @Override 21 | public Integer zero() { 22 | return 0; 23 | } 24 | 25 | } 26 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/wordcount/WordCountAndSourceMapper.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.wordcount; 2 | 3 | import java.io.Serializable; 4 | import java.util.List; 5 | 6 | import storm.trident.tuple.TridentTuple; 7 | 8 | import com.datastax.driver.core.Row; 9 | import com.datastax.driver.core.Statement; 10 | import com.datastax.driver.core.querybuilder.Insert; 11 | import com.datastax.driver.core.querybuilder.QueryBuilder; 12 | import com.datastax.driver.core.querybuilder.Select; 13 | import com.hmsonline.trident.cql.mappers.CqlRowMapper; 14 | 15 | public class WordCountAndSourceMapper implements CqlRowMapper, Number>, Serializable { 16 | private static final long serialVersionUID = 1L; 17 | //private static final Logger LOG = LoggerFactory.getLogger(WordCountAndSourceMapper.class); 18 | 19 | public static final String KEYSPACE_NAME = "mykeyspace"; 20 | public static final String TABLE_NAME = "wordcounttable"; 21 | public static final String SOURCE_KEY_NAME = "source"; 22 | public static final String WORD_KEY_NAME = "word"; 23 | public static final String VALUE_NAME = "count"; 24 | 25 | @Override 26 | public Statement map(List keys, Number value) { 27 | Insert statement = QueryBuilder.insertInto(KEYSPACE_NAME, TABLE_NAME); 28 | statement.value(WORD_KEY_NAME, keys.get(0)); 29 | statement.value(SOURCE_KEY_NAME, keys.get(1)); 30 | statement.value(VALUE_NAME, value); 31 | return statement; 32 | } 33 | 34 | @Override 35 | public Statement retrieve(List keys) { 36 | // Retrieve all the columns associated with the keys 37 | Select statement = QueryBuilder.select().column(SOURCE_KEY_NAME) 38 | .column(WORD_KEY_NAME).column(VALUE_NAME) 39 | .from(KEYSPACE_NAME, TABLE_NAME); 40 | statement.where(QueryBuilder.eq(SOURCE_KEY_NAME, keys.get(0))); 41 | statement.where(QueryBuilder.eq(WORD_KEY_NAME, keys.get(1))); 42 | return statement; 43 | } 44 | 45 | @Override 46 | public Number getValue(Row row) { 47 | return (Number) row.getInt(VALUE_NAME); 48 | } 49 | 50 | @Override 51 | public Statement map(TridentTuple tuple) { 52 | return null; 53 | } 54 | 55 | 56 | } 57 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/example/wordcount/WordCountTopology.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.example.wordcount; 2 | 3 | import backtype.storm.Config; 4 | import backtype.storm.LocalCluster; 5 | import backtype.storm.LocalDRPC; 6 | import backtype.storm.generated.StormTopology; 7 | import backtype.storm.tuple.Fields; 8 | import backtype.storm.tuple.Values; 9 | import com.hmsonline.trident.cql.CassandraCqlMapState; 10 | import com.hmsonline.trident.cql.MapConfiguredCqlClientFactory; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | import storm.trident.TridentState; 14 | import storm.trident.TridentTopology; 15 | import storm.trident.operation.builtin.FilterNull; 16 | import storm.trident.operation.builtin.MapGet; 17 | import storm.trident.operation.builtin.Sum; 18 | import storm.trident.testing.FixedBatchSpout; 19 | import storm.trident.testing.Split; 20 | 21 | public class WordCountTopology { 22 | private static final Logger LOG = LoggerFactory.getLogger(WordCountTopology.class); 23 | 24 | @SuppressWarnings("unchecked") 25 | public static StormTopology buildWordCountAndSourceTopology(LocalDRPC drpc) { 26 | LOG.info("Building topology."); 27 | TridentTopology topology = new TridentTopology(); 28 | 29 | String source1 = "spout1"; 30 | String source2 = "spout2"; 31 | FixedBatchSpout spout1 = new FixedBatchSpout(new Fields("sentence", "source"), 3, 32 | new Values("the cow jumped over the moon", source1), 33 | new Values("the man went to the store and bought some candy", source1), 34 | new Values("four score and four years ago", source2), 35 | new Values("how much wood can a wood chuck chuck", source2)); 36 | spout1.setCycle(true); 37 | 38 | TridentState wordCounts = 39 | topology.newStream("spout1", spout1) 40 | .each(new Fields("sentence"), new Split(), new Fields("word")) 41 | .groupBy(new Fields("word", "source")) 42 | .persistentAggregate(CassandraCqlMapState.nonTransactional(new WordCountAndSourceMapper()), 43 | new IntegerCount(), new Fields("count")) 44 | .parallelismHint(6); 45 | 46 | topology.newDRPCStream("words", drpc) 47 | .each(new Fields("args"), new Split(), new Fields("word")) 48 | .groupBy(new Fields("word")) 49 | .stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")) 50 | .each(new Fields("count"), new FilterNull()) 51 | .aggregate(new Fields("count"), new Sum(), new Fields("sum")); 52 | 53 | return topology.build(); 54 | } 55 | 56 | public static void main(String[] args) throws Exception { 57 | final Config configuration = new Config(); 58 | configuration.put(MapConfiguredCqlClientFactory.TRIDENT_CASSANDRA_CQL_HOSTS, "localhost"); 59 | final LocalCluster cluster = new LocalCluster(); 60 | LocalDRPC client = new LocalDRPC(); 61 | 62 | LOG.info("Submitting topology."); 63 | cluster.submitTopology("cqlexample", configuration, buildWordCountAndSourceTopology(client)); 64 | LOG.info("Topology submitted."); 65 | Thread.sleep(10000); 66 | LOG.info("DRPC Query: Word Count [cat, dog, the, man]: {}", client.execute("words", "cat dog the man")); 67 | cluster.shutdown(); 68 | client.shutdown(); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/test/java/com/hmsonline/trident/cql/incremental/IncrementalStateTest.java: -------------------------------------------------------------------------------- 1 | package com.hmsonline.trident.cql.incremental; 2 | 3 | import static com.datastax.driver.core.querybuilder.QueryBuilder.delete; 4 | import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; 5 | import static com.hmsonline.trident.cql.example.sales.SalesMapper.KEYSPACE_NAME; 6 | import static com.hmsonline.trident.cql.example.sales.SalesMapper.KEY_NAME; 7 | import static com.hmsonline.trident.cql.example.sales.SalesMapper.TABLE_NAME; 8 | 9 | import java.util.ArrayList; 10 | import java.util.Arrays; 11 | import java.util.List; 12 | 13 | import org.junit.Test; 14 | import org.junit.runner.RunWith; 15 | import org.junit.runners.JUnit4; 16 | 17 | import storm.trident.operation.builtin.Sum; 18 | import storm.trident.testing.MockTridentTuple; 19 | import storm.trident.tuple.TridentTuple; 20 | 21 | import com.datastax.driver.core.querybuilder.Delete; 22 | import com.hmsonline.trident.cql.CqlTestEnvironment; 23 | import com.hmsonline.trident.cql.example.sales.SalesMapper; 24 | 25 | /** 26 | * Test that demonstrates how to construct and use conditional updates. 27 | */ 28 | @RunWith(JUnit4.class) 29 | public class IncrementalStateTest extends CqlTestEnvironment { 30 | private CassandraCqlIncrementalStateFactory stateFactory; 31 | private CassandraCqlIncrementalStateUpdater stateUpdater; 32 | private static List FIELDS = Arrays.asList("price", "state", "product"); 33 | 34 | public IncrementalStateTest() { 35 | super(); 36 | SalesMapper mapper = new SalesMapper(); 37 | stateFactory = new CassandraCqlIncrementalStateFactory(new Sum(), mapper); 38 | stateFactory.setCqlClientFactory(clientFactory); 39 | stateUpdater = new CassandraCqlIncrementalStateUpdater(); 40 | } 41 | 42 | private void clearState() { 43 | Delete deleteStatement = delete().all().from(KEYSPACE_NAME, TABLE_NAME); 44 | deleteStatement.where(eq(KEY_NAME, "MD")); 45 | clientFactory.getSession().execute(deleteStatement); 46 | } 47 | 48 | @SuppressWarnings("unchecked") 49 | @Test 50 | public void testStateUpdates() throws Exception { 51 | clearState(); 52 | 53 | // Let's get some initial state in the database 54 | 55 | CassandraCqlIncrementalState state = 56 | (CassandraCqlIncrementalState) stateFactory.makeState(configuration, null, 5, 50); 57 | incrementState(state); 58 | assertValue("MD", 100); 59 | 60 | CassandraCqlIncrementalState state1 = 61 | (CassandraCqlIncrementalState) stateFactory.makeState(configuration, null, 55, 122); 62 | incrementState(state1); 63 | assertValue("MD", 200); 64 | } 65 | 66 | @SuppressWarnings("unchecked") 67 | @Test 68 | public void testValueAggregations() { 69 | clearState(); 70 | CassandraCqlIncrementalState state = (CassandraCqlIncrementalState) stateFactory.makeState(configuration, null, 5, 50); 71 | state.beginCommit(Long.MAX_VALUE); 72 | state.aggregateValue(new MockTridentTuple(FIELDS, Arrays.asList(100, "MD", "bike"))); 73 | state.aggregateValue(new MockTridentTuple(FIELDS, Arrays.asList(50, "PA", "bike"))); 74 | state.aggregateValue(new MockTridentTuple(FIELDS, Arrays.asList(10, "PA", "bike"))); 75 | state.commit(Long.MAX_VALUE); 76 | assertValue("MD", 100); 77 | assertValue("PA", 60); 78 | } 79 | 80 | private void incrementState(CassandraCqlIncrementalState state) { 81 | MockTridentTuple mockTuple = new MockTridentTuple(FIELDS, Arrays.asList(100, "MD", "bike")); 82 | List mockTuples = new ArrayList(); 83 | mockTuples.add(mockTuple); 84 | state.beginCommit(Long.MAX_VALUE); 85 | stateUpdater.updateState(state, mockTuples, null); 86 | state.commit(Long.MAX_VALUE); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/test/resources/create_keyspace.cql: -------------------------------------------------------------------------------- 1 | CREATE KEYSPACE mykeyspace 2 | WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }; 3 | 4 | 5 | -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=DEBUG, stdout 2 | 3 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 4 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 5 | 6 | # Pattern to output the caller's file name and line number. 7 | log4j.appender.stdout.layout.ConversionPattern=%5p (%C:%L) - %m%n 8 | 9 | log4j.logger.backtype.storm=WARN 10 | log4j.logger.clojure.contrib=WARN 11 | log4j.logger.org.apache=WARN 12 | log4j.logger.com.griddelta=DEBUG 13 | log4j.logger.com.netflix=WARN 14 | 15 | log4j.logger.backtype.storm.daemon.nimbus=INFO 16 | 17 | -------------------------------------------------------------------------------- /src/test/resources/schema.cql: -------------------------------------------------------------------------------- 1 | CREATE TABLE mykeyspace.mytable ( 2 | t int, 3 | col1 text, 4 | primary key (t) 5 | ); 6 | 7 | CREATE TABLE mykeyspace.incrementaltable ( 8 | k text, 9 | v int, 10 | primary key (k) 11 | ); 12 | 13 | CREATE TABLE mykeyspace.wordcounttable ( 14 | source text, 15 | word text, 16 | count int, 17 | primary key(source, word) 18 | ); 19 | 20 | CREATE TABLE mykeyspace.multikeytable ( 21 | k1 text, 22 | k2 text, 23 | v1 int, 24 | primary key (k1, k2) 25 | ); 26 | --------------------------------------------------------------------------------