├── .gitignore ├── LICENSE ├── README.md ├── checkstyle_include.xml ├── findbugs_exclude.xml ├── import.sh ├── pom.xml └── src ├── main ├── assemblies │ └── plugin.xml ├── java │ └── org │ │ └── elasticsearch │ │ ├── plugin │ │ └── river │ │ │ └── hbase │ │ │ └── HBaseRiverPlugin.java │ │ └── river │ │ └── hbase │ │ ├── HBaseCallbackLogger.java │ │ ├── HBaseParser.java │ │ ├── HBaseRiver.java │ │ └── HBaseRiverModule.java └── resources │ └── es-plugin.properties └── test └── java └── org └── elasticsearch └── river └── hbase ├── HBaseParserTest.java └── HBaseRiverTest.java /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .DS_Store 3 | .project 4 | .classpath 5 | /.checkstyle 6 | /test-output 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Elasticsearch-HBase-River 2 | ========================== 3 | 4 | An import river similar to the elasticsearch mysql river 5 | 6 | If you're looking for an alternative sollution that uses the core hbase libraries and uses hbase replication for moving data, you can find one here: 7 | https://github.com/posix4e/Elasticsearch-HBase-River 8 | 9 | # Building 10 | 11 | To build the plugin you need to have maven installed. With that in mind simply check out the project and run "mvn package" in the project directory. The plugin should then be available under target/release as a .zip file. 12 | 13 | # Installation 14 | 15 | Just copy the .zip file on the elasticsearch server should be using the plugin and run the "plugin" script coming with elasticsearch in the bin folder. 16 | 17 | An Exmaple how one would call the plugin script: 18 | 19 | /my/elasticsearch/bin/plugin install river-hbase -url file:///path/to/plugin/river-hbase.zip 20 | 21 | The plugin needs to be installed on all nodes of the ES cluster. 22 | 23 | for more info on plugins check out http://www.elasticsearch.org/guide/reference/modules/plugins.html 24 | 25 | # Usage 26 | 27 | Check out the import.sh script, which is used to initialize the hbase river with all necessary config data. 28 | 29 | More info on how to use rivers can be found here: http://www.elasticsearch.org/guide/reference/river/ 30 | -------------------------------------------------------------------------------- /checkstyle_include.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | -------------------------------------------------------------------------------- /findbugs_exclude.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /import.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | JSON=$(cat < 2 | 4 | ElasticSearch HBase River 5 | 4.0.0 6 | org.elasticsearch 7 | rivers-hbase 8 | jar 9 | HBase River for ElasticSearch 10 | 2013 11 | 1.0.0-SNAPSHOT 12 | 13 | 14 | UTF-8 15 | 0.20.5 16 | 1.4.1 17 | 18 | 6.8 19 | 0.999.19 20 | 2.5.2 21 | 2.9.1 22 | 23 | 2.4 24 | 2.4 25 | 3.0 26 | 2.12 27 | 28 | 29 | 30 | scm:git://github.com/mallocator/Elasticsearch-HBase-River.git 31 | scm:git://github.com/mallocator/Elasticsearch-HBase-River.git 32 | scm:git://github.com/mallocator/Elasticsearch-HBase-River.git 33 | 34 | 35 | 36 | 37 | org.elasticsearch 38 | elasticsearch 39 | ${elasticsearch.version} 40 | provided 41 | 42 | 43 | org.hbase 44 | asynchbase 45 | ${hbase.async.version} 46 | 47 | 48 | 49 | 50 | com.googlecode.jmockit 51 | jmockit 52 | ${version.jmockit} 53 | test 54 | 55 | 56 | 57 | org.testng 58 | testng 59 | ${version.testng} 60 | test 61 | 62 | 63 | 64 | 65 | 66 | org.apache.maven.plugins 67 | maven-compiler-plugin 68 | ${mavenCompilerPlugin} 69 | 70 | 1.6 71 | 1.6 72 | -Xlint 73 | true 74 | true 75 | 76 | 77 | 78 | org.apache.maven.plugins 79 | maven-jar-plugin 80 | ${mavenJarPluginVersion} 81 | 82 | 83 | 84 | true 85 | 86 | 87 | ${project.artifactId} 88 | ${maven.build.timestamp} 89 | ${buildNumber} 90 | 91 | 92 | 93 | 94 | 95 | maven-assembly-plugin 96 | ${mavenAssemblyPlugin} 97 | 98 | false 99 | ${project.build.directory}/releases/ 100 | 101 | ${basedir}/src/main/assemblies/plugin.xml 102 | 103 | 104 | 105 | 106 | package 107 | 108 | single 109 | 110 | 111 | 112 | 113 | 114 | org.apache.maven.plugins 115 | maven-surefire-plugin 116 | ${version.maven.surefire} 117 | 118 | -javaagent:"${settings.localRepository}"/com/googlecode/jmockit/jmockit/${version.jmockit}/jmockit-${version.jmockit}.jar -Xmx512m -XX:-UseSplitVerifier 119 | once 120 | 121 | 122 | reporter 123 | org.testng.reporters.XMLReporter 124 | 125 | 126 | 127 | 128 | 129 | org.codehaus.mojo 130 | findbugs-maven-plugin 131 | ${version.findbugs} 132 | 133 | false 134 | true 135 | true 136 | ${basedir}/findbugs_exclude.xml 137 | true 138 | 139 | 140 | 141 | install 142 | 143 | findbugs 144 | 145 | 146 | 147 | 148 | 149 | org.apache.maven.plugins 150 | maven-checkstyle-plugin 151 | ${version.checkstyle} 152 | 153 | ${basedir}/checkstyle_include.xml 154 | 155 | 156 | 157 | install 158 | 159 | checkstyle 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | -------------------------------------------------------------------------------- /src/main/assemblies/plugin.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | plugin 4 | 5 | zip 6 | 7 | false 8 | 9 | 10 | / 11 | true 12 | true 13 | 14 | org.elasticsearch:elasticsearch 15 | 16 | 17 | 18 | / 19 | true 20 | true 21 | 22 | org.hbase:asynchbase 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /src/main/java/org/elasticsearch/plugin/river/hbase/HBaseRiverPlugin.java: -------------------------------------------------------------------------------- 1 | package org.elasticsearch.plugin.river.hbase; 2 | 3 | import org.elasticsearch.common.inject.Inject; 4 | import org.elasticsearch.plugins.AbstractPlugin; 5 | import org.elasticsearch.river.RiversModule; 6 | import org.elasticsearch.river.hbase.HBaseRiverModule; 7 | 8 | /** 9 | * Basic plug in information required by ElasticSearch. This class is also referenced under 10 | * /src/main/resources/es-plugin.properties. 11 | * 12 | * @author Ravi Gairola 13 | */ 14 | public class HBaseRiverPlugin extends AbstractPlugin { 15 | 16 | @Inject 17 | public HBaseRiverPlugin() {} 18 | 19 | @Override 20 | public String name() { 21 | return "river-hbase"; 22 | } 23 | 24 | @Override 25 | public String description() { 26 | return "River HBase Plugin"; 27 | } 28 | 29 | /** 30 | * Registers the HBaseRiverModule as "hbase" river. 31 | * 32 | * @param module 33 | */ 34 | public void onModule(final RiversModule module) { 35 | module.registerRiver("hbase", HBaseRiverModule.class); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/org/elasticsearch/river/hbase/HBaseCallbackLogger.java: -------------------------------------------------------------------------------- 1 | package org.elasticsearch.river.hbase; 2 | 3 | import org.elasticsearch.common.logging.ESLogger; 4 | 5 | import com.stumbleupon.async.Callback; 6 | 7 | /** 8 | * A small helper class that will log any responses from HBase, in case there are any. 9 | * 10 | * @author Ravi Gairola 11 | */ 12 | public class HBaseCallbackLogger implements Callback { 13 | private final ESLogger logger; 14 | private final String realm; 15 | 16 | public HBaseCallbackLogger(final ESLogger logger, final String realm) { 17 | this.logger = logger; 18 | this.realm = realm; 19 | } 20 | 21 | @Override 22 | public Object call(final Object arg) throws Exception { 23 | if (arg instanceof Throwable) { 24 | this.logger.error("An async error has been caught from HBase within {}:", (Throwable) arg, this.realm); 25 | } 26 | else { 27 | this.logger.trace("Got response from HBase within {}: {}", this.realm, arg); 28 | } 29 | return arg; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/main/java/org/elasticsearch/river/hbase/HBaseParser.java: -------------------------------------------------------------------------------- 1 | package org.elasticsearch.river.hbase; 2 | 3 | import java.util.ArrayList; 4 | import java.util.HashMap; 5 | import java.util.List; 6 | import java.util.Map; 7 | import java.util.Map.Entry; 8 | 9 | import org.elasticsearch.action.bulk.BulkItemResponse; 10 | import org.elasticsearch.action.bulk.BulkRequestBuilder; 11 | import org.elasticsearch.action.bulk.BulkResponse; 12 | import org.elasticsearch.action.index.IndexRequestBuilder; 13 | import org.elasticsearch.action.search.SearchResponse; 14 | import org.elasticsearch.common.logging.ESLogger; 15 | import org.elasticsearch.index.query.QueryBuilders; 16 | import org.elasticsearch.search.facet.FacetBuilders; 17 | import org.elasticsearch.search.facet.statistical.StatisticalFacet; 18 | import org.hbase.async.DeleteRequest; 19 | import org.hbase.async.HBaseClient; 20 | import org.hbase.async.KeyValue; 21 | import org.hbase.async.Scanner; 22 | 23 | /** 24 | * A separate Thread that does the actual fetching and storing of data from an HBase cluster. 25 | * 26 | * @author Ravi Gairola 27 | */ 28 | class HBaseParser implements Runnable { 29 | private static final String TIMESTMAP_STATS = "timestamp_stats"; 30 | private final HBaseRiver river; 31 | private final ESLogger logger; 32 | private final HBaseCallbackLogger cbLogger; 33 | private int indexCounter; 34 | private HBaseClient client; 35 | private Scanner scanner; 36 | private boolean stopThread; 37 | 38 | HBaseParser(final HBaseRiver river) { 39 | this.river = river; 40 | this.logger = river.getLogger(); 41 | this.cbLogger = new HBaseCallbackLogger(this.logger, "HBase Parser"); 42 | } 43 | 44 | /** 45 | * Timing mechanism of the thread that determines when a parse operation is supposed to run. Waits for the predefined 46 | * interval until a new run is performed. The method checks every 1000ms if it should be parsing again. The first run is 47 | * done immediately once the thread is started. 48 | */ 49 | @Override 50 | public void run() { 51 | this.logger.info("HBase Import Thread has started"); 52 | long lastRun = 0; 53 | while (!this.stopThread) { 54 | if (lastRun + this.river.getInterval() < System.currentTimeMillis()) { 55 | lastRun = System.currentTimeMillis(); 56 | try { 57 | this.indexCounter = 0; 58 | parse(); 59 | } catch (Throwable t) { 60 | this.logger.error("An exception has been caught while parsing data from HBase", t); 61 | } 62 | if (!this.stopThread) { 63 | this.logger.info("HBase Import Thread is waiting for {} Seconds until the next run", this.river.getInterval() / 1000); 64 | } 65 | } 66 | try { 67 | Thread.sleep(1000); 68 | } catch (InterruptedException e) { 69 | this.logger.trace("HBase river parsing thread has been interrupted"); 70 | } 71 | } 72 | this.logger.info("HBase Import Thread has finished"); 73 | } 74 | 75 | /** 76 | * The actual parse implementation that connects to the HBase cluster and fetches all rows since the last import. Fetched 77 | * rows are added to an ElasticSearch Bulk Request with a size according to batchSize (default is 100). 78 | * 79 | * @throws InterruptedException 80 | * @throws Exception 81 | */ 82 | protected void parse() throws InterruptedException, Exception { 83 | this.logger.info("Parsing data from HBase"); 84 | try { 85 | this.client = new HBaseClient(this.river.getHosts()); 86 | this.logger.debug("Checking if table {} actually exists in HBase DB", this.river.getTable()); 87 | this.client.ensureTableExists(this.river.getTable()).addErrback(this.cbLogger); 88 | this.logger.debug("Fetching HBase Scanner"); 89 | this.scanner = this.client.newScanner(this.river.getTable()); 90 | this.scanner.setServerBlockCache(false); 91 | if (this.river.getFamily() != null) { 92 | this.scanner.setFamily(this.river.getFamily()); 93 | } 94 | if (this.river.getQualifiers() != null) { 95 | for (final String qualifier : this.river.getQualifiers().split(",")) { 96 | this.scanner.setQualifier(qualifier.trim().getBytes(this.river.getCharset())); 97 | } 98 | } 99 | 100 | setMinTimestamp(this.scanner); 101 | ArrayList> rows; 102 | this.logger.debug("Starting to fetch rows"); 103 | 104 | while ((rows = this.scanner.nextRows(this.river.getBatchSize()).addErrback(this.cbLogger).joinUninterruptibly()) != null) { 105 | if (this.stopThread) { 106 | this.logger.info("Stopping HBase import in the midle of it"); 107 | break; 108 | } 109 | parseBulkOfRows(rows); 110 | } 111 | } finally { 112 | this.logger.debug("Closing HBase Scanner and Async Client"); 113 | if (this.scanner != null) { 114 | try { 115 | this.scanner.close().addErrback(this.cbLogger); 116 | } catch (Exception e) { 117 | this.logger.error("An Exception has been caught while closing the HBase Scanner", e, (Object[]) null); 118 | } 119 | } 120 | if (this.client != null) { 121 | try { 122 | this.client.shutdown().addErrback(this.cbLogger); 123 | } catch (Exception e) { 124 | this.logger.error("An Exception has been caught while shuting down the HBase client", e, (Object[]) null); 125 | } 126 | } 127 | } 128 | } 129 | 130 | /** 131 | * Run over a bulk of rows and process them. 132 | * 133 | * @param rows 134 | */ 135 | protected void parseBulkOfRows(final ArrayList> rows) { 136 | this.logger.debug("Processing the next {} entries in HBase parsing process", rows.size()); 137 | final BulkRequestBuilder bulkRequest = this.river.getEsClient().prepareBulk(); 138 | final Map keyMapForDeletion = new HashMap(); 139 | for (final ArrayList row : rows) { 140 | if (this.stopThread) { 141 | this.logger.info("Stopping HBase import in the midle of it"); 142 | break; 143 | } 144 | if (row.size() > 0) { 145 | final IndexRequestBuilder request = this.river.getEsClient().prepareIndex(this.river.getIndex(), this.river.getType()); 146 | final byte[] key = row.get(0).key(); 147 | final Map dataTree = readDataTree(row); 148 | request.setSource(dataTree); 149 | request.setTimestamp(String.valueOf(row.get(0).timestamp())); 150 | if (this.river.getIdField() == null) { 151 | final String keyString = new String(key, this.river.getCharset()); 152 | request.setId(keyString); 153 | keyMapForDeletion.put(keyString, key); 154 | } 155 | else { 156 | final String keyString = findKeyInDataTree(dataTree, this.river.getIdField()); 157 | keyMapForDeletion.put(keyString, key); 158 | } 159 | bulkRequest.add(request); 160 | } 161 | } 162 | final BulkResponse response = bulkRequest.execute().actionGet(); 163 | 164 | this.indexCounter += response.items().length; 165 | this.logger.info("HBase river has indexed {} entries so far", this.indexCounter); 166 | final List failedKeys = new ArrayList(); 167 | if (response.hasFailures()) { 168 | for (BulkItemResponse r : response.items()) { 169 | if (r.failed()) { 170 | failedKeys.add(keyMapForDeletion.remove(r.getId())); 171 | } 172 | } 173 | this.logger.error("Errors have occured while trying to index new data from HBase"); 174 | this.logger.debug("Failed keys are {}", failedKeys); 175 | } 176 | if (this.river.getDeleteOld()) { 177 | for (Entry keyEntry : keyMapForDeletion.entrySet()) { 178 | this.client.delete(new DeleteRequest(this.river.getTable().getBytes(), keyEntry.getValue())).addErrback(this.cbLogger); 179 | } 180 | } 181 | } 182 | 183 | @SuppressWarnings("unchecked") 184 | protected String findKeyInDataTree(final Map dataTree, final String keyPath) { 185 | if (!keyPath.contains(this.river.getColumnSeparator())) { 186 | return (String) dataTree.get(keyPath); 187 | } 188 | final String key = keyPath.substring(0, keyPath.indexOf(this.river.getColumnSeparator())); 189 | if (dataTree.get(key) instanceof Map) { 190 | final int subKeyIndex = keyPath.indexOf(this.river.getColumnSeparator()) + this.river.getColumnSeparator().length(); 191 | return findKeyInDataTree((Map) dataTree.get(key), keyPath.substring(subKeyIndex)); 192 | } 193 | return null; 194 | } 195 | 196 | /** 197 | * Generate a tree structure that ElasticSearch can read and index from one of the rows that has been returned from 198 | * HBase. 199 | * 200 | * @param row 201 | * @return 202 | */ 203 | @SuppressWarnings("unchecked") 204 | protected Map readDataTree(final ArrayList row) { 205 | final Map dataTree = new HashMap(); 206 | for (final KeyValue column : row) { 207 | final String family = this.river.normalizeField(new String(column.family(), this.river.getCharset())); 208 | final String qualifier = new String(column.qualifier(), this.river.getCharset()); 209 | final String value = new String(column.value(), this.river.getCharset()); 210 | if (!dataTree.containsKey(family)) { 211 | dataTree.put(family, new HashMap()); 212 | } 213 | readQualifierStructure((Map) dataTree.get(family), qualifier, value); 214 | } 215 | return dataTree; 216 | } 217 | 218 | /** 219 | * Will separate a column into sub column and return the value at the right json tree level. 220 | * 221 | * @param parent 222 | * @param qualifier 223 | * @param value 224 | */ 225 | @SuppressWarnings("unchecked") 226 | protected void readQualifierStructure(final Map parent, final String qualifier, final String value) { 227 | if (this.river.getColumnSeparator() != null && !this.river.getColumnSeparator().isEmpty()) { 228 | final int separatorPos = qualifier.indexOf(this.river.getColumnSeparator()); 229 | if (separatorPos != -1) { 230 | final String parentQualifier = this.river.normalizeField(qualifier.substring(0, separatorPos)); 231 | final String childQualifier = qualifier.substring(separatorPos + this.river.getColumnSeparator().length()); 232 | if (!childQualifier.isEmpty()) { 233 | if (!(parent.get(parentQualifier) instanceof Map)) { 234 | parent.put(parentQualifier, new HashMap()); 235 | } 236 | readQualifierStructure((Map) parent.get(parentQualifier), childQualifier, value); 237 | return; 238 | } 239 | parent.put(this.river.normalizeField(qualifier.replace(this.river.getColumnSeparator(), "")), value); 240 | return; 241 | } 242 | } 243 | parent.put(this.river.normalizeField(qualifier), value); 244 | } 245 | 246 | /** 247 | * Checks if there is an open Scanner or Client and closes them. 248 | */ 249 | public synchronized void stopThread() { 250 | this.stopThread = true; 251 | } 252 | 253 | /** 254 | * Sets the minimum time stamp on the HBase scanner, by looking into Elasticsearch for the last entry made. 255 | * 256 | * @param scanner 257 | */ 258 | protected long setMinTimestamp(final Scanner scanner) { 259 | this.logger.debug("Looking into ElasticSearch to determine timestamp of last import"); 260 | final SearchResponse response = this.river.getEsClient() 261 | .prepareSearch(this.river.getIndex()) 262 | .setTypes(this.river.getType()) 263 | .setQuery(QueryBuilders.matchAllQuery()) 264 | .addFacet(FacetBuilders.statisticalFacet(TIMESTMAP_STATS).field("_timestamp")) 265 | .execute() 266 | .actionGet(); 267 | 268 | if (response.facets().facet(TIMESTMAP_STATS) != null) { 269 | this.logger.debug("Got statistical data from ElasticSearch about data timestamps"); 270 | final StatisticalFacet facet = (StatisticalFacet) response.facets().facet(TIMESTMAP_STATS); 271 | final long timestamp = (long) Math.max(facet.getMax() + 1, 0); 272 | scanner.setMinTimestamp(timestamp); 273 | this.logger.debug("Found latest timestamp in ElasticSearch to be {}", timestamp); 274 | return timestamp; 275 | } 276 | this.logger.debug("No statistical data about data timestamps could be found -> probably no data there yet"); 277 | scanner.setMinTimestamp(0); 278 | this.logger.debug("Found latest timestamp in ElasticSearch to be not present (-> 0)"); 279 | return 0L; 280 | } 281 | } 282 | -------------------------------------------------------------------------------- /src/main/java/org/elasticsearch/river/hbase/HBaseRiver.java: -------------------------------------------------------------------------------- 1 | package org.elasticsearch.river.hbase; 2 | 3 | import java.lang.Thread.UncaughtExceptionHandler; 4 | import java.nio.charset.Charset; 5 | import java.security.InvalidParameterException; 6 | import java.util.Map; 7 | 8 | import org.elasticsearch.ElasticSearchException; 9 | import org.elasticsearch.ExceptionsHelper; 10 | import org.elasticsearch.action.admin.indices.status.ShardStatus; 11 | import org.elasticsearch.client.Client; 12 | import org.elasticsearch.common.inject.Inject; 13 | import org.elasticsearch.common.logging.ESLogger; 14 | import org.elasticsearch.common.util.concurrent.EsExecutors; 15 | import org.elasticsearch.common.xcontent.support.XContentMapValues; 16 | import org.elasticsearch.index.shard.IndexShardState; 17 | import org.elasticsearch.indices.IndexAlreadyExistsException; 18 | import org.elasticsearch.river.AbstractRiverComponent; 19 | import org.elasticsearch.river.River; 20 | import org.elasticsearch.river.RiverName; 21 | import org.elasticsearch.river.RiverSettings; 22 | 23 | /** 24 | * An HBase import river built similar to the MySQL river, that was modeled after the Solr SQL import functionality. 25 | * 26 | * @author Ravi Gairola 27 | */ 28 | public class HBaseRiver extends AbstractRiverComponent implements River, UncaughtExceptionHandler { 29 | private static final String CONFIG_SPACE = "hbase"; 30 | private final Client esClient; 31 | private volatile Runnable parser; 32 | 33 | /** 34 | * Comma separated list of Zookeeper hosts to which the HBase client can connect to find the cluster. 35 | */ 36 | private final String hosts; 37 | 38 | /** 39 | * The HBase table name to be imported from. 40 | */ 41 | private final String table; 42 | 43 | /** 44 | * The ElasticSearch index name to be imported to. (default is the river name) 45 | */ 46 | private final String index; 47 | 48 | /** 49 | * The ElasticSearch type name to be imported to. (Default is the source table name) 50 | */ 51 | private final String type; 52 | 53 | /** 54 | * The interval in ms with which the river is supposed to run (60000 = every minute). (Default is every 10 minutes) 55 | */ 56 | private final long interval; 57 | 58 | /** 59 | * How big are the ElasticSearch bulk indexing sizes supposed to be. Tweaking this might improve performance. (Default is 60 | * 100 operations) 61 | */ 62 | private final int batchSize; 63 | 64 | /** 65 | * Name of the field from HBase to be used as an idField in ElasticSearch. The mapping will set up accordingly, so that 66 | * the _id field is routed to this field name (you can access it then under both the field name and "_id"). If no id 67 | * field is given, then ElasticSearch will automatically generate an id. 68 | */ 69 | private final String idField; 70 | 71 | /** 72 | * The char set which is used to parse data from HBase. (Default is UTF-8) 73 | */ 74 | private final Charset charset; 75 | 76 | /** 77 | * Limit the scanning of the HBase table to a certain family. 78 | */ 79 | private final byte[] family; 80 | 81 | /** 82 | * Limit the scanning of the HBase table to a number of qualifiers. A family must be set for this to take effect. 83 | * Multiple qualifiers can be set via comma separated list. 84 | */ 85 | private final String qualifiers; 86 | 87 | /** 88 | * Some names must be given in a lower case format (the index name for example), others are more flexible. This flag will 89 | * normalize all fields to lower case and remove special characters that ELasticSearch can't handle. (The filter is 90 | * probably stricter than needed in most cases) 91 | */ 92 | private final boolean normalizeFields; 93 | 94 | /** 95 | * Splits up the column into further sub columns if a separator is defined. For example: 96 | * 97 | *
 98 | 	 * Separator: "-"
 99 | 	 * Columns name: "this-is-my-column"
100 | 	 * Result:
101 | 	 * {
102 | 	 * 	this: {
103 | 	 * 		is: {
104 | 	 * 			my: {
105 | 	 * 				column: -value-
106 | 	 * 			}
107 | 	 * 		}
108 | 	 * 	}
109 | 	 * }
110 | 	 * 
111 | * 112 | * If no separator is defined, or the separator is empty, no operation is performed. Try to use single character 113 | * separators, as multi character separators will allow partial hits of a separator to be part of the data. (e.g. A 114 | * separator defined as "()" will leave all "(" and ")" in the parsed data. 115 | */ 116 | public final String columnSeparator; 117 | 118 | /** 119 | * Define a custom mapping that will be used instead of an automatically generated one. Make sure to enable time stamps 120 | * and if you want an id-field to be recognized set the proper alias. 121 | */ 122 | public final String customMapping; 123 | 124 | /** 125 | * Setting if old entries that have just been read from HBase should be deleted after they've been read. 126 | */ 127 | private final boolean deleteOld; 128 | 129 | /** 130 | * Loads and verifies all the configuration needed to run this river. 131 | * 132 | * @param riverName 133 | * @param settings 134 | * @param esClient 135 | */ 136 | @Inject 137 | public HBaseRiver(final RiverName riverName, final RiverSettings settings, final Client esClient) { 138 | super(riverName, settings); 139 | this.esClient = esClient; 140 | this.logger.info("Creating HBase Stream River"); 141 | 142 | this.normalizeFields = Boolean.parseBoolean(readConfig("normalizeFields", "true")); 143 | this.hosts = readConfig("hosts"); 144 | this.table = readConfig("table"); 145 | this.columnSeparator = readConfig("columnSeparator", null); 146 | this.idField = normalizeField(readConfig("idField", null)); 147 | this.index = normalizeField(readConfig("index", riverName.name())); 148 | this.type = normalizeField(readConfig("type", this.table)); 149 | this.interval = Long.parseLong(readConfig("interval", "600000")); 150 | this.batchSize = Integer.parseInt(readConfig("batchSize", "100")); 151 | this.charset = Charset.forName(readConfig("charset", "UTF-8")); 152 | this.deleteOld = Boolean.parseBoolean(readConfig("deleteOld", "false")); 153 | 154 | final String family = readConfig("family", null); 155 | this.family = family != null ? family.getBytes(this.charset) : null; 156 | this.qualifiers = readConfig("qualifiers", null); 157 | this.customMapping = readConfig("customMapping", null); 158 | 159 | if (this.interval <= 0) { 160 | throw new IllegalArgumentException("The interval between runs must be at least 1 ms. The current config is set to " 161 | + this.interval); 162 | } 163 | if (this.batchSize <= 0) { 164 | throw new IllegalArgumentException("The batch size must be set to at least 1. The current config is set to " + this.batchSize); 165 | } 166 | } 167 | 168 | /** 169 | * Fetch the value of a configuration that has no default value and is therefore mandatory. Empty (trimmed) strings are 170 | * as invalid as no value at all (null). 171 | * 172 | * @param config Key of the configuration to fetch 173 | * @throws InvalidParameterException if a configuration is missing (null or empty) 174 | * @return 175 | */ 176 | private String readConfig(final String config) { 177 | final String result = readConfig(config, null); 178 | if (result == null || result.trim().isEmpty()) { 179 | this.logger.error("Unable to read required config {}. Aborting!", config); 180 | throw new InvalidParameterException("Unable to read required config " + config); 181 | } 182 | return result; 183 | } 184 | 185 | /** 186 | * Fetch the value of a configuration that has a default value and is therefore optional. 187 | * 188 | * @param config Key of the configuration to fetch 189 | * @param defaultValue The value to set if no value could be found 190 | * @return 191 | */ 192 | @SuppressWarnings({ "unchecked" }) 193 | private String readConfig(final String config, final String defaultValue) { 194 | if (this.settings.settings().containsKey(CONFIG_SPACE)) { 195 | Map mysqlSettings = (Map) this.settings.settings().get(CONFIG_SPACE); 196 | return XContentMapValues.nodeStringValue(mysqlSettings.get(config), defaultValue); 197 | } 198 | return defaultValue; 199 | } 200 | 201 | /** 202 | * This method is launched by ElasticSearch and starts the HBase River. The method will try to create a mapping with time 203 | * stamps enabled. If a mapping already exists the user should make sure, that time stamps are enabled for this type. 204 | */ 205 | @Override 206 | public synchronized void start() { 207 | if (this.parser != null) { 208 | this.logger.warn("Trying to start HBase stream although it is already running"); 209 | return; 210 | } 211 | this.parser = new HBaseParser(this); 212 | 213 | this.logger.info("Waiting for Index to be ready for interaction"); 214 | waitForESReady(); 215 | 216 | this.logger.info("Starting HBase Stream"); 217 | String mapping; 218 | if (this.customMapping != null && !this.customMapping.trim().isEmpty()) { 219 | mapping = this.customMapping; 220 | } 221 | else { 222 | if (this.idField == null) { 223 | mapping = "{\"" + this.type + "\":{\"_timestamp\":{\"enabled\":true}}}"; 224 | } 225 | if (this.columnSeparator != null) { 226 | mapping = "{\"" + this.type + "\":{\"_timestamp\":{\"enabled\":true},\"_id\":{\"path\":\"" 227 | + this.idField.replace(this.columnSeparator, ".") + "\"}}}"; 228 | } 229 | else { 230 | mapping = "{\"" + this.type + "\":{\"_timestamp\":{\"enabled\":true},\"_id\":{\"path\":\"" + this.idField + "\"}}}"; 231 | } 232 | } 233 | 234 | try { 235 | this.esClient.admin().indices().prepareCreate(this.index).addMapping(this.type, mapping).execute().actionGet(); 236 | this.logger.info("Created Index {} with _timestamp mapping for {}", this.index, this.type); 237 | } catch (Exception e) { 238 | if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) { 239 | this.logger.debug("Not creating Index {} as it already exists", this.index); 240 | } 241 | else if (ExceptionsHelper.unwrapCause(e) instanceof ElasticSearchException) { 242 | this.logger.debug("Mapping {}.{} already exists and will not be created", this.index, this.type); 243 | } 244 | else { 245 | this.logger.warn("failed to create index [{}], disabling river...", e, this.index); 246 | return; 247 | } 248 | } 249 | 250 | try { 251 | this.esClient.admin() 252 | .indices() 253 | .preparePutMapping(this.index) 254 | .setType(this.type) 255 | .setSource(mapping) 256 | .setIgnoreConflicts(true) 257 | .execute() 258 | .actionGet(); 259 | } catch (ElasticSearchException e) { 260 | this.logger.debug("Mapping already exists for index {} and type {}", this.index, this.type); 261 | } 262 | 263 | final Thread t = EsExecutors.daemonThreadFactory(this.settings.globalSettings(), "hbase_slurper").newThread(this.parser); 264 | t.setUncaughtExceptionHandler(this); 265 | t.start(); 266 | } 267 | 268 | private void waitForESReady() { 269 | if (!this.esClient.admin().indices().prepareExists(this.index).execute().actionGet().exists()) { 270 | return; 271 | } 272 | for (final ShardStatus status : this.esClient.admin().indices().prepareStatus(this.index).execute().actionGet().getShards()) { 273 | if (status.getState() != IndexShardState.STARTED) { 274 | try { 275 | Thread.sleep(1000); 276 | } catch (InterruptedException e) { 277 | this.logger.trace("HBase thread has been interrupted while waiting for the database to be reachable"); 278 | } 279 | this.logger.trace("Waiting..."); 280 | waitForESReady(); 281 | break; 282 | } 283 | } 284 | } 285 | 286 | /** 287 | * This method is called by ElasticSearch when shutting down the river. The method will stop the thread and close all 288 | * connections to HBase. 289 | */ 290 | @Override 291 | public synchronized void close() { 292 | this.logger.info("Closing HBase river"); 293 | if (this.parser instanceof HBaseParser) { 294 | ((HBaseParser) this.parser).stopThread(); 295 | } 296 | this.parser = null; 297 | } 298 | 299 | /** 300 | * Some of the asynchronous methods of the HBase client will throw Exceptions that are not caught anywhere else. 301 | */ 302 | @Override 303 | public void uncaughtException(final Thread arg0, final Throwable arg1) { 304 | this.logger.error("An Exception has been thrown in HBase Import Thread", arg1, (Object[]) null); 305 | } 306 | 307 | /** 308 | * If the normalizeField flag is set, this method will return a lower case representation of the field, as well as 309 | * stripping away all special characters except "-" and "_". 310 | * 311 | * @param fieldName 312 | * @return 313 | */ 314 | public String normalizeField(final String fieldName) { 315 | if (!isNormalizeFields() || fieldName == null) { 316 | return fieldName; 317 | } 318 | if (getColumnSeparator() != null) { 319 | String regex = "a-z0-9\\-_"; 320 | for (int i = 0; i < getColumnSeparator().length(); i++) { 321 | regex += "\\" + getColumnSeparator().charAt(i); 322 | } 323 | return fieldName.toLowerCase().replaceAll("[^" + regex + "]", ""); 324 | } 325 | return fieldName.toLowerCase().replaceAll("[^a-z0-9\\-_]", ""); 326 | } 327 | 328 | public boolean isNormalizeFields() { 329 | return this.normalizeFields; 330 | } 331 | 332 | public long getInterval() { 333 | return this.interval; 334 | } 335 | 336 | public String getTable() { 337 | return this.table; 338 | } 339 | 340 | public String getHosts() { 341 | return this.hosts; 342 | } 343 | 344 | public byte[] getFamily() { 345 | return this.family; 346 | } 347 | 348 | public String getQualifiers() { 349 | return this.qualifiers; 350 | } 351 | 352 | public Charset getCharset() { 353 | return this.charset; 354 | } 355 | 356 | public int getBatchSize() { 357 | return this.batchSize; 358 | } 359 | 360 | public Client getEsClient() { 361 | return this.esClient; 362 | } 363 | 364 | public String getIndex() { 365 | return this.index; 366 | } 367 | 368 | public String getType() { 369 | return this.type; 370 | } 371 | 372 | public String getIdField() { 373 | return this.idField; 374 | } 375 | 376 | public String getColumnSeparator() { 377 | return this.columnSeparator; 378 | } 379 | 380 | public ESLogger getLogger() { 381 | return this.logger; 382 | } 383 | 384 | public boolean getDeleteOld() { 385 | return this.deleteOld; 386 | } 387 | } 388 | -------------------------------------------------------------------------------- /src/main/java/org/elasticsearch/river/hbase/HBaseRiverModule.java: -------------------------------------------------------------------------------- 1 | package org.elasticsearch.river.hbase; 2 | 3 | import org.elasticsearch.common.inject.AbstractModule; 4 | import org.elasticsearch.river.River; 5 | 6 | /** 7 | * Does the initial configuration of the Module, when it is called by ElasticSearch. Binds the HBase river as an eager 8 | * singleton river. 9 | * 10 | * @author Ravi Gairola 11 | */ 12 | public class HBaseRiverModule extends AbstractModule { 13 | 14 | @Override 15 | protected void configure() { 16 | bind(River.class).to(HBaseRiver.class).asEagerSingleton(); 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /src/main/resources/es-plugin.properties: -------------------------------------------------------------------------------- 1 | plugin=org.elasticsearch.plugin.river.hbase.HBaseRiverPlugin -------------------------------------------------------------------------------- /src/test/java/org/elasticsearch/river/hbase/HBaseParserTest.java: -------------------------------------------------------------------------------- 1 | package org.elasticsearch.river.hbase; 2 | 3 | import static org.testng.Assert.assertEquals; 4 | import static org.testng.Assert.assertNotNull; 5 | 6 | import java.nio.charset.Charset; 7 | import java.util.ArrayList; 8 | import java.util.HashMap; 9 | import java.util.Map; 10 | 11 | import mockit.Mock; 12 | import mockit.MockUp; 13 | import mockit.Mockit; 14 | 15 | import org.elasticsearch.client.Client; 16 | import org.elasticsearch.river.AbstractRiverComponent; 17 | import org.elasticsearch.river.RiverName; 18 | import org.elasticsearch.river.RiverSettings; 19 | import org.hbase.async.KeyValue; 20 | import org.testng.Assert; 21 | import org.testng.annotations.AfterClass; 22 | import org.testng.annotations.BeforeClass; 23 | import org.testng.annotations.Test; 24 | 25 | public class HBaseParserTest { 26 | @AfterClass 27 | public void tearDown() { 28 | Mockit.tearDownMocks(); 29 | } 30 | 31 | public class ReadQualifierStructureTest { 32 | public String separator; 33 | public boolean normalize; 34 | 35 | @BeforeClass 36 | public void setUp() { 37 | new MockUp() { 38 | @Mock 39 | void $init(final RiverName riverName, final RiverSettings settings) {} 40 | }; 41 | new MockUp() { 42 | @Mock 43 | void $init(final RiverName riverName, final RiverSettings settings, final Client esClient) {} 44 | 45 | @Mock 46 | String getColumnSeparator() { 47 | return ReadQualifierStructureTest.this.separator; 48 | } 49 | 50 | @Mock 51 | boolean isNormalizeFields() { 52 | return ReadQualifierStructureTest.this.normalize; 53 | } 54 | }; 55 | } 56 | 57 | @SuppressWarnings("unchecked") 58 | @Test 59 | public void testBase() throws Exception { 60 | this.separator = "::"; 61 | this.normalize = false; 62 | 63 | final Map result = new HashMap(); 64 | final HBaseParser parser = new HBaseParser(new HBaseRiver(null, null, null)); 65 | parser.readQualifierStructure(result, "data::set1::category1", "test1"); 66 | parser.readQualifierStructure(result, "data::set1::category2", "test2"); 67 | parser.readQualifierStructure(result, "data::set1::category3", "test3"); 68 | parser.readQualifierStructure(result, "data::set2::category1", "test4"); 69 | parser.readQualifierStructure(result, "data::set2::category2", "test5"); 70 | 71 | Assert.assertEquals(((Map) ((Map) result.get("data")).get("set1")).get("category1"), "test1"); 72 | Assert.assertEquals(((Map) ((Map) result.get("data")).get("set1")).get("category2"), "test2"); 73 | Assert.assertEquals(((Map) ((Map) result.get("data")).get("set1")).get("category3"), "test3"); 74 | Assert.assertEquals(((Map) ((Map) result.get("data")).get("set2")).get("category1"), "test4"); 75 | Assert.assertEquals(((Map) ((Map) result.get("data")).get("set2")).get("category2"), "test5"); 76 | } 77 | 78 | @Test 79 | public void testNullSeperator() throws Exception { 80 | this.separator = null; 81 | this.normalize = false; 82 | 83 | final Map result = new HashMap(); 84 | final HBaseParser parser = new HBaseParser(new HBaseRiver(null, null, null)); 85 | parser.readQualifierStructure(result, "data::set1::category1", "test1"); 86 | parser.readQualifierStructure(result, "data::set1::category2", "test2"); 87 | parser.readQualifierStructure(result, "data::set1::category3", "test3"); 88 | parser.readQualifierStructure(result, "data::set2::category1", "test4"); 89 | parser.readQualifierStructure(result, "data::set2::category2", "test5"); 90 | 91 | Assert.assertEquals(result.get("data::set1::category1"), "test1"); 92 | Assert.assertEquals(result.get("data::set1::category2"), "test2"); 93 | Assert.assertEquals(result.get("data::set1::category3"), "test3"); 94 | Assert.assertEquals(result.get("data::set2::category1"), "test4"); 95 | Assert.assertEquals(result.get("data::set2::category2"), "test5"); 96 | } 97 | 98 | @Test 99 | public void testEmptySeperator() throws Exception { 100 | this.separator = ""; 101 | this.normalize = false; 102 | 103 | final Map result = new HashMap(); 104 | final HBaseParser parser = new HBaseParser(new HBaseRiver(null, null, null)); 105 | parser.readQualifierStructure(result, "data::set1::category1", "test1"); 106 | parser.readQualifierStructure(result, "data::set1::category2", "test2"); 107 | parser.readQualifierStructure(result, "data::set1::category3", "test3"); 108 | parser.readQualifierStructure(result, "data::set2::category1", "test4"); 109 | parser.readQualifierStructure(result, "data::set2::category2", "test5"); 110 | 111 | Assert.assertEquals(result.get("data::set1::category1"), "test1"); 112 | Assert.assertEquals(result.get("data::set1::category2"), "test2"); 113 | Assert.assertEquals(result.get("data::set1::category3"), "test3"); 114 | Assert.assertEquals(result.get("data::set2::category1"), "test4"); 115 | Assert.assertEquals(result.get("data::set2::category2"), "test5"); 116 | } 117 | 118 | @SuppressWarnings("unchecked") 119 | @Test 120 | public void testEmptySubQualifier() throws Exception { 121 | this.separator = "::"; 122 | this.normalize = true; 123 | 124 | final Map result = new HashMap(); 125 | final HBaseParser parser = new HBaseParser(new HBaseRiver(null, null, null)); 126 | parser.readQualifierStructure(result, "data::set1::category1", "test1"); 127 | parser.readQualifierStructure(result, "data::set1::category2", "test2"); 128 | parser.readQualifierStructure(result, "data::set1::category3", "test3"); 129 | parser.readQualifierStructure(result, "data::set2::category1", "test4"); 130 | parser.readQualifierStructure(result, "data::set2::", "test5"); 131 | 132 | System.out.println(result); 133 | 134 | Assert.assertEquals(((Map) ((Map) result.get("data")).get("set1")).get("category1"), "test1"); 135 | Assert.assertEquals(((Map) ((Map) result.get("data")).get("set1")).get("category2"), "test2"); 136 | Assert.assertEquals(((Map) ((Map) result.get("data")).get("set1")).get("category3"), "test3"); 137 | Assert.assertEquals(((Map) result.get("data")).get("set2"), "test5"); 138 | } 139 | 140 | @Test 141 | public void testWrongSeperator() throws Exception { 142 | this.separator = "--"; 143 | this.normalize = false; 144 | 145 | final Map result = new HashMap(); 146 | final HBaseParser parser = new HBaseParser(new HBaseRiver(null, null, null)); 147 | parser.readQualifierStructure(result, "data::set1::category1", "test1"); 148 | parser.readQualifierStructure(result, "data::set1::category2", "test2"); 149 | parser.readQualifierStructure(result, "data::set1::category3", "test3"); 150 | this.normalize = true; 151 | parser.readQualifierStructure(result, "data::set2::category1", "test4"); 152 | parser.readQualifierStructure(result, "data::set2::category2", "test5"); 153 | 154 | Assert.assertEquals(result.get("data::set1::category1"), "test1"); 155 | Assert.assertEquals(result.get("data::set1::category2"), "test2"); 156 | Assert.assertEquals(result.get("data::set1::category3"), "test3"); 157 | Assert.assertEquals(result.get("dataset2category1"), "test4"); 158 | Assert.assertEquals(result.get("dataset2category2"), "test5"); 159 | } 160 | } 161 | 162 | public class ReadDataTreeTest { 163 | private final Charset charset = Charset.forName("UTF-8"); 164 | private int rowCounter = 0; 165 | 166 | @BeforeClass 167 | public void setUp() { 168 | new MockUp() { 169 | @Mock 170 | void $init(final RiverName riverName, final RiverSettings settings) {} 171 | }; 172 | 173 | new MockUp() { 174 | 175 | @Mock 176 | void $init(final RiverName riverName, final RiverSettings settings, final Client esClient) {} 177 | 178 | @Mock 179 | Charset getCharset() { 180 | return ReadDataTreeTest.this.charset; 181 | } 182 | 183 | @Mock 184 | boolean isNormalizeFields() { 185 | return true; 186 | } 187 | }; 188 | } 189 | 190 | @Test 191 | @SuppressWarnings("unchecked") 192 | public void testBase() { 193 | final HBaseParser parser = new HBaseParser(new HBaseRiver(null, null, null)); 194 | 195 | final ArrayList input = new ArrayList(); 196 | 197 | input.add(getKeyValue("family1", "category1", "value1")); 198 | input.add(getKeyValue("family1", "category2", "value2")); 199 | input.add(getKeyValue("family1", "category3", "value3")); 200 | input.add(getKeyValue("family2", "category1", "value4")); 201 | input.add(getKeyValue("family2", "category4", "value5")); 202 | input.add(getKeyValue("family3", "category5", "value6")); 203 | input.add(getKeyValue("family2", "category6", "value7")); 204 | 205 | final Map output = parser.readDataTree(input); 206 | 207 | assertNotNull(output.get("family1")); 208 | final Map family1 = (Map) output.get("family1"); 209 | assertEquals(family1.get("category1"), "value1"); 210 | assertEquals(family1.get("category2"), "value2"); 211 | assertEquals(family1.get("category3"), "value3"); 212 | assertNotNull(output.get("family2")); 213 | final Map family2 = (Map) output.get("family2"); 214 | assertEquals(family2.get("category1"), "value4"); 215 | assertEquals(family2.get("category4"), "value5"); 216 | assertEquals(family2.get("category6"), "value7"); 217 | assertNotNull(output.get("family3")); 218 | final Map family3 = (Map) output.get("family3"); 219 | assertEquals(family3.get("category5"), "value6"); 220 | } 221 | 222 | private KeyValue getKeyValue(final String family, final String qualifier, final String value) { 223 | return new KeyValue(String.valueOf(this.rowCounter++).getBytes(this.charset), 224 | family.getBytes(this.charset), 225 | qualifier.getBytes(this.charset), 226 | value.getBytes(this.charset)); 227 | } 228 | } 229 | 230 | public class FindKeyInDataTreeTest { 231 | protected String separator; 232 | protected boolean normalize; 233 | 234 | @BeforeClass 235 | public void setUp() { 236 | new MockUp() { 237 | @Mock 238 | void $init(final RiverName riverName, final RiverSettings settings) {} 239 | }; 240 | 241 | new MockUp() { 242 | 243 | @Mock 244 | void $init(final RiverName riverName, final RiverSettings settings, final Client esClient) {} 245 | 246 | @Mock 247 | String getColumnSeparator() { 248 | return FindKeyInDataTreeTest.this.separator; 249 | } 250 | 251 | @Mock 252 | boolean isNormalizeFields() { 253 | return FindKeyInDataTreeTest.this.normalize; 254 | } 255 | }; 256 | } 257 | 258 | @Test 259 | public void testBase() { 260 | final HBaseParser parser = new HBaseParser(new HBaseRiver(null, null, null)); 261 | this.separator = "::"; 262 | 263 | final Map dataTree = new HashMap(); 264 | final Map dataBranch = new HashMap(); 265 | dataBranch.put("theId", "TheValue"); 266 | dataTree.put("aBranch", dataBranch); 267 | 268 | assertEquals(parser.findKeyInDataTree(dataTree, "aBranch::theId"), "TheValue"); 269 | } 270 | 271 | @Test 272 | public void testDotSeparator() { 273 | final HBaseParser parser = new HBaseParser(new HBaseRiver(null, null, null)); 274 | this.separator = "."; 275 | 276 | final Map dataTree = new HashMap(); 277 | final Map dataBranch = new HashMap(); 278 | dataBranch.put("theId", "TheValue"); 279 | dataTree.put("aBranch", dataBranch); 280 | 281 | assertEquals(parser.findKeyInDataTree(dataTree, "aBranch.theId"), "TheValue"); 282 | } 283 | } 284 | } 285 | -------------------------------------------------------------------------------- /src/test/java/org/elasticsearch/river/hbase/HBaseRiverTest.java: -------------------------------------------------------------------------------- 1 | package org.elasticsearch.river.hbase; 2 | 3 | import mockit.Mock; 4 | import mockit.MockUp; 5 | 6 | import org.elasticsearch.client.Client; 7 | import org.elasticsearch.river.AbstractRiverComponent; 8 | import org.elasticsearch.river.RiverName; 9 | import org.elasticsearch.river.RiverSettings; 10 | import org.testng.Assert; 11 | import org.testng.annotations.Test; 12 | 13 | public class HBaseRiverTest { 14 | @Test 15 | public void testNormalizeField() { 16 | new MockUp() { 17 | @Mock 18 | void $init(final RiverName riverName, final RiverSettings settings) {} 19 | }; 20 | new MockUp() { 21 | @Mock 22 | void $init(final RiverName riverName, final RiverSettings settings, final Client esClient) {} 23 | 24 | @Mock 25 | boolean isNormalizeFields() { 26 | return true; 27 | } 28 | 29 | @Mock 30 | String getColumnSeparator() { 31 | return "::"; 32 | } 33 | }; 34 | 35 | final HBaseRiver river = new HBaseRiver(null, null, null); 36 | 37 | Assert.assertEquals(river.normalizeField(""), ""); 38 | Assert.assertEquals(river.normalizeField(" "), ""); 39 | Assert.assertEquals(river.normalizeField("a"), "a"); 40 | Assert.assertEquals(river.normalizeField("A"), "a"); 41 | Assert.assertEquals(river.normalizeField("Aa"), "aa"); 42 | Assert.assertEquals(river.normalizeField("a-b"), "a-b"); 43 | Assert.assertEquals(river.normalizeField("a_b"), "a_b"); 44 | Assert.assertEquals(river.normalizeField("90aS"), "90as"); 45 | Assert.assertEquals(river.normalizeField("&*($@#!ui^&$(#\"8ui"), "ui8ui"); 46 | Assert.assertEquals(river.normalizeField("bl%^&*ah::blubb"), "blah::blubb"); 47 | Assert.assertEquals(river.normalizeField(null), null); 48 | } 49 | } 50 | --------------------------------------------------------------------------------