├── LICENSE ├── README.md ├── pom.xml └── src ├── main ├── java │ └── io │ │ └── svectors │ │ └── hbase │ │ ├── HBaseClient.java │ │ ├── HBaseConnectionFactory.java │ │ ├── config │ │ └── HBaseSinkConfig.java │ │ ├── parser │ │ ├── AvroEventParser.java │ │ ├── EventParser.java │ │ ├── EventParsingException.java │ │ └── JsonEventParser.java │ │ ├── sink │ │ ├── HBaseSinkConnector.java │ │ ├── HBaseSinkTask.java │ │ └── SinkConnectorException.java │ │ └── util │ │ └── ToPutFunction.java └── resources │ └── config │ └── hbase-sink.properties └── test ├── java └── io │ └── svectors │ └── hbase │ ├── parser │ ├── TestAvroEventParser.java │ └── TestJsonEventParser.java │ ├── sink │ ├── HbaseTestUtil.java │ └── TestHbaseSinkTask.java │ └── util │ └── TestToPutFunction.java └── resources └── log4j.properties /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kafka Connect for Hbase 2 | 3 | A Sink connector to write to HBase. 4 | I have the source connector implementation available at https://github.com/mravi/hbase-connect-kafka 5 | 6 | ## Pre-requisites 7 | * Confluent 2.0 8 | * HBase 1.0.0 9 | * JDK 1.8 10 | 11 | ## Assumptions 12 | * The HBase table already exists. 13 | * Each Kafka topic is mapped to a HBase table. 14 | 15 | 16 | ## Properties 17 | 18 | Below are the properties that need to be passed in the configuration file: 19 | 20 | name | data type | required | description 21 | -----|-----------|----------|------------ 22 | zookeeper.quorum | string | yes | Zookeeper quorum of the HBase cluster 23 | event.parser.class | string | yes | Can be either AvroEventParser or JsonEventParser to parse avro or json events respectively. 24 | topics | string | yes | list of kafka topics. 25 | hbase.``.rowkey.columns | string | yes | The columns that represent the rowkey of the hbase table `` 26 | hbase.``.family | string | yes | Column family of the hbase table ``. 27 | 28 | Example connector.properties file 29 | 30 | ```bash 31 | name=kafka-cdc-hbase 32 | connector.class=io.svectors.hbase.sink.HBaseSinkConnector 33 | tasks.max=1 34 | topics=test 35 | zookeeper.quorum=localhost:2181 36 | event.parser.class=io.svectors.hbase.parser.AvroEventParser 37 | hbase.test.rowkey.columns=id 38 | hbase.test.rowkey.delimiter=| 39 | hbase.test.family=d 40 | ``` 41 | 42 | ## Packaging 43 | * mvn clean package 44 | 45 | 46 | ## Deployment 47 | 48 | * Follow the [Getting started](http://hbase.apache.org/book.html#standalone_dist) guide for HBase. 49 | 50 | * [Download and install Confluent](http://www.confluent.io/) 51 | 52 | * Copy hbase-sink.jar and hbase-sink.properties from the project build location to `$CONFLUENT_HOME/share/java/kafka-connect-hbase` 53 | 54 | ```bash 55 | mkdir $CONFLUENT_HOME/share/java/kafka-connect-hbase 56 | cp target/hbase-sink.jar $CONFLUENT_HOME/share/java/kafka-connect-hbase/ 57 | cp hbase-sink.properties $CONFLUENT_HOME/share/java/kafka-connect-hbase/ 58 | ``` 59 | 60 | * Start Zookeeper, Kafka and Schema registry 61 | 62 | ```bash 63 | nohup $CONFLUENT_HOME/bin/zookeeper-server-start $CONFLUENT_HOME/etc/kafka/zookeeper.properties & 64 | nohup $CONFLUENT_HOME/bin/kafka-server-start $CONFLUENT_HOME/etc/kafka/server.properties & 65 | nohup $CONFLUENT_HOME/bin/schema-registry-start $CONFLUENT_HOME/etc/schema-registry/schema-registry.properties &" 66 | ``` 67 | 68 | * Create HBase table 'test' from hbase shell 69 | 70 | * Start the hbase sink 71 | 72 | ```bash 73 | export CLASSPATH=$CONFLUENT_HOME/share/java/kafka-connect-hbase/hbase-sink.jar 74 | 75 | $CONFLUENT_HOME/bin/connect-standalone etc/schema-registry/connect-avro-standalone.properties etc/kafka-connect-hbase/hbase-sink.properties 76 | ``` 77 | 78 | * Test with avro console, start the console to create the topic and write values 79 | 80 | ```bash 81 | $CONFLUENT_HOME/bin/kafka-avro-console-producer \ 82 | --broker-list localhost:9092 --topic test \ 83 | --property value.schema='{"type":"record","name":"record","fields":[{"name":"id","type":"int"}, {"name":"name", "type": "string"}]}' 84 | ``` 85 | 86 | ```bash 87 | #insert at prompt 88 | {"id": 1, "name": "foo"} 89 | {"id": 2, "name": "bar"} 90 | ``` 91 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | io.svectors.connect 8 | kafka-connect-hbase 9 | 0.1 10 | 11 | 12 | UTF-8 13 | 1.2.0 14 | 2.6.0 15 | 0.9.0.0 16 | 4.11 17 | 2.7.1 18 | 16.0 19 | 2.0.0 20 | 21 | 22 | 23 | 24 | confluent 25 | Confluent 26 | http://packages.confluent.io/maven/ 27 | 28 | 29 | 30 | 31 | 32 | junit 33 | junit 34 | ${junit.version} 35 | test 36 | 37 | 38 | org.apache.kafka 39 | kafka-clients 40 | ${kafka.version} 41 | 42 | 43 | org.apache.kafka 44 | kafka_2.10 45 | ${kafka.version} 46 | test 47 | 48 | 49 | org.apache.kafka 50 | connect-runtime 51 | ${kafka.version} 52 | 53 | 54 | org.apache.kafka 55 | connect-api 56 | ${kafka.version} 57 | provided 58 | 59 | 60 | org.apache.kafka 61 | connect-json 62 | ${kafka.version} 63 | provided 64 | 65 | 66 | org.apache.kafka 67 | kafka_2.10 68 | ${kafka.version} 69 | provided 70 | 71 | 72 | org.apache.hbase 73 | hbase-server 74 | ${hbase.version} 75 | provided 76 | 77 | 78 | avro 79 | org.apache.avro 80 | 81 | 82 | 83 | 84 | org.apache.hbase 85 | hbase-client 86 | ${hbase.version} 87 | 88 | 89 | org.apache.hbase 90 | hbase-common 91 | ${hbase.version} 92 | 93 | 94 | org.apache.hbase 95 | hbase-testing-util 96 | ${hbase.version} 97 | test 98 | 99 | 100 | avro 101 | org.apache.avro 102 | 103 | 104 | 105 | 106 | io.confluent 107 | kafka-avro-serializer 108 | ${confluent.version} 109 | 110 | 111 | avro 112 | org.apache.avro 113 | 114 | 115 | 116 | 117 | io.confluent 118 | kafka-connect-avro-converter 119 | ${confluent.version} 120 | 121 | 122 | avro 123 | org.apache.avro 124 | 125 | 126 | 127 | 128 | org.apache.avro 129 | avro 130 | 1.7.7 131 | 132 | 133 | 134 | 135 | 136 | 137 | org.apache.maven.plugins 138 | maven-compiler-plugin 139 | 3.0 140 | 141 | 1.8 142 | 1.8 143 | 144 | 145 | 146 | org.apache.maven.plugins 147 | maven-shade-plugin 148 | 2.3 149 | 150 | hbase-sink 151 | true 152 | 153 | 154 | *:* 155 | 156 | META-INF/*.SF 157 | META-INF/*.DSA 158 | META-INF/*.RSA 159 | 160 | 161 | 162 | 163 | 164 | 165 | package 166 | 167 | shade 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | -------------------------------------------------------------------------------- /src/main/java/io/svectors/hbase/HBaseClient.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase; 19 | 20 | import com.google.common.base.Preconditions; 21 | import io.svectors.hbase.sink.SinkConnectorException; 22 | import org.apache.hadoop.hbase.TableName; 23 | import org.apache.hadoop.hbase.client.BufferedMutator; 24 | import org.apache.hadoop.hbase.client.Connection; 25 | import org.apache.hadoop.hbase.client.Put; 26 | import java.util.List; 27 | 28 | /** 29 | * @author ravi.magham 30 | */ 31 | public final class HBaseClient { 32 | 33 | private final HBaseConnectionFactory connectionFactory; 34 | 35 | public HBaseClient(final HBaseConnectionFactory connectionFactory) { 36 | this.connectionFactory = connectionFactory; 37 | } 38 | 39 | public void write(final String tableName, final List puts) { 40 | Preconditions.checkNotNull(tableName); 41 | Preconditions.checkNotNull(puts); 42 | final TableName table = TableName.valueOf(tableName); 43 | write(table, puts); 44 | } 45 | 46 | public void write(final TableName table, final List puts) { 47 | Preconditions.checkNotNull(table); 48 | Preconditions.checkNotNull(puts); 49 | try(final Connection connection = this.connectionFactory.getConnection(); 50 | final BufferedMutator mutator = connection.getBufferedMutator(table);) { 51 | mutator.mutate(puts); 52 | mutator.flush(); 53 | } catch(Exception ex) { 54 | final String errorMsg = String.format("Failed with a [%s] when writing to table [%s] ", ex.getMessage(), 55 | table.getNameAsString()); 56 | throw new SinkConnectorException(errorMsg, ex); 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/main/java/io/svectors/hbase/HBaseConnectionFactory.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase; 19 | 20 | import org.apache.hadoop.conf.Configuration; 21 | import org.apache.hadoop.hbase.client.Connection; 22 | import org.apache.hadoop.hbase.client.ConnectionFactory; 23 | 24 | /** 25 | * @author ravi.magham 26 | */ 27 | public class HBaseConnectionFactory { 28 | 29 | private final Configuration configuration; 30 | 31 | public HBaseConnectionFactory(final Configuration configuration) { 32 | this.configuration = configuration; 33 | } 34 | 35 | public Connection getConnection() throws Exception { 36 | return ConnectionFactory.createConnection(configuration); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/main/java/io/svectors/hbase/config/HBaseSinkConfig.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.config; 19 | 20 | import com.google.common.base.Preconditions; 21 | import io.svectors.hbase.parser.EventParser; 22 | import org.apache.kafka.common.config.AbstractConfig; 23 | import org.apache.kafka.common.config.ConfigDef; 24 | import org.apache.kafka.common.config.ConfigException; 25 | import org.apache.kafka.connect.runtime.ConnectorConfig; 26 | 27 | import java.util.Map; 28 | 29 | /** 30 | * @author ravi.magham 31 | */ 32 | public class HBaseSinkConfig extends AbstractConfig { 33 | 34 | public static final String ZOOKEEPER_QUORUM_CONFIG = "zookeeper.quorum"; 35 | public static final String EVENT_PARSER_CONFIG = "event.parser.class"; 36 | public static String DEFAULT_HBASE_ROWKEY_DELIMITER = ","; 37 | public static String DEFAULT_HBASE_COLUMN_FAMILY = "d"; 38 | 39 | /* 40 | * The configuration for a table "test" will be in the format 41 | * hbase.test.rowkey.columns = id , ts 42 | * hbase.test.rowkey.delimiter = | 43 | */ 44 | public static final String TABLE_ROWKEY_COLUMNS_TEMPLATE = "hbase.%s.rowkey.columns"; 45 | public static final String TABLE_ROWKEY_DELIMITER_TEMPLATE = "hbase.%s.rowkey.delimiter"; 46 | public static final String TABLE_COLUMN_FAMILY_TEMPLATE = "hbase.%s.family"; 47 | 48 | private static ConfigDef CONFIG = new ConfigDef(); 49 | private Map properties; 50 | 51 | static { 52 | 53 | CONFIG.define(ZOOKEEPER_QUORUM_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, "Zookeeper quorum " + 54 | "of the hbase cluster"); 55 | 56 | CONFIG.define(EVENT_PARSER_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, "Event parser class " + 57 | "to parse the SinkRecord"); 58 | 59 | } 60 | 61 | public HBaseSinkConfig(Map originals) { 62 | this(CONFIG, originals); 63 | } 64 | 65 | public HBaseSinkConfig(ConfigDef definition, Map originals) { 66 | super(definition, originals); 67 | this.properties = originals; 68 | } 69 | 70 | /** 71 | * Validates the properties to ensure the rowkey property is configured for each table. 72 | */ 73 | public void validate() { 74 | final String topicsAsStr = properties.get(ConnectorConfig.TOPICS_CONFIG); 75 | final String[] topics = topicsAsStr.split(","); 76 | for(String topic : topics) { 77 | String key = String.format(TABLE_ROWKEY_COLUMNS_TEMPLATE, topic); 78 | if(!properties.containsKey(key)) { 79 | throw new ConfigException(String.format(" No rowkey has been configured for table [%s]", key)); 80 | } 81 | } 82 | } 83 | 84 | /** 85 | * Instantiates and return the event parser . 86 | * @return 87 | */ 88 | public EventParser eventParser() { 89 | try { 90 | final String eventParserClass = getString(EVENT_PARSER_CONFIG); 91 | final Class eventParserImpl = (Class) Class.forName(eventParserClass); 92 | return eventParserImpl.newInstance(); 93 | } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { 94 | throw new RuntimeException(e); 95 | } 96 | } 97 | 98 | /** 99 | * @param propertyName 100 | * @param defaultValue 101 | * @return 102 | */ 103 | public String getPropertyValue(final String propertyName, final String defaultValue) { 104 | String propertyValue = getPropertyValue(propertyName); 105 | return propertyValue != null ? propertyValue : defaultValue; 106 | } 107 | 108 | /** 109 | * @param propertyName 110 | * @return 111 | */ 112 | public String getPropertyValue(final String propertyName) { 113 | Preconditions.checkNotNull(propertyName); 114 | return this.properties.get(propertyName); 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/main/java/io/svectors/hbase/parser/AvroEventParser.java: -------------------------------------------------------------------------------- 1 | package io.svectors.hbase.parser; 2 | 3 | import com.google.common.base.Preconditions; 4 | import io.confluent.connect.avro.AvroData; 5 | import org.apache.avro.generic.GenericRecord; 6 | import org.apache.hadoop.hbase.util.Bytes; 7 | import org.apache.kafka.connect.data.Field; 8 | import org.apache.kafka.connect.data.Schema; 9 | import org.apache.kafka.connect.sink.SinkRecord; 10 | 11 | import java.nio.ByteBuffer; 12 | import java.util.Collections; 13 | import java.util.LinkedHashMap; 14 | import java.util.List; 15 | import java.util.Map; 16 | 17 | /** 18 | * @author ravi.magham 19 | */ 20 | public class AvroEventParser implements EventParser { 21 | 22 | private final static AvroData avroData = new AvroData(100); 23 | private final Map EMPTY_MAP = Collections.emptyMap(); 24 | 25 | /** 26 | * default c.tor 27 | */ 28 | public AvroEventParser() { 29 | } 30 | 31 | @Override 32 | public Map parseKey(SinkRecord sr) throws EventParsingException { 33 | return parse(sr.keySchema(), sr.key()); 34 | } 35 | 36 | @Override 37 | public Map parseValue(SinkRecord sr) throws EventParsingException { 38 | return parse(sr.valueSchema(), sr.value()); 39 | } 40 | 41 | /** 42 | * parses the value. 43 | * @param schema 44 | * @param value 45 | * @return 46 | */ 47 | private Map parse(final Schema schema, final Object value) { 48 | final Map values = new LinkedHashMap<>(); 49 | try { 50 | Object data = avroData.fromConnectData(schema, value); 51 | if (data == null || !(data instanceof GenericRecord)) { 52 | return EMPTY_MAP; 53 | } 54 | final GenericRecord record = (GenericRecord) data; 55 | final List fields = schema.fields(); 56 | for (Field field : fields) { 57 | final byte[] fieldValue = toValue(record, field); 58 | if (fieldValue == null) { 59 | continue; 60 | } 61 | values.put(field.name(), fieldValue); 62 | } 63 | return values; 64 | } catch (Exception ex) { 65 | final String errorMsg = String.format("Failed to parse the schema [%s] , value [%s] with ex [%s]" , 66 | schema, value, ex.getMessage()); 67 | throw new EventParsingException(errorMsg, ex); 68 | } 69 | } 70 | 71 | private byte[] toValue(final GenericRecord record, final Field field) { 72 | Preconditions.checkNotNull(field); 73 | final Schema.Type type = field.schema().type(); 74 | final String fieldName = field.name(); 75 | final Object fieldValue = record.get(fieldName); 76 | switch (type) { 77 | case STRING: 78 | return Bytes.toBytes((String) fieldValue); 79 | case BOOLEAN: 80 | return Bytes.toBytes((Boolean)fieldValue); 81 | case BYTES: 82 | return Bytes.toBytes((ByteBuffer) fieldValue); 83 | case FLOAT32: 84 | return Bytes.toBytes((Float)fieldValue); 85 | case FLOAT64: 86 | return Bytes.toBytes((Double)fieldValue); 87 | case INT8: 88 | return Bytes.toBytes((Byte)fieldValue); 89 | case INT16: 90 | return Bytes.toBytes((Short)fieldValue); 91 | case INT32: 92 | return Bytes.toBytes((Integer)fieldValue); 93 | case INT64: 94 | return Bytes.toBytes((Long)fieldValue); 95 | default: 96 | return null; 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/main/java/io/svectors/hbase/parser/EventParser.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.parser; 19 | 20 | import org.apache.kafka.connect.sink.SinkRecord; 21 | 22 | import java.util.Map; 23 | 24 | /** 25 | * @author ravi.magham 26 | */ 27 | public interface EventParser { 28 | 29 | /** 30 | * Parses the key value based on the key schema . 31 | * @param sr 32 | * @return 33 | */ 34 | Map parseKey(SinkRecord sr) throws EventParsingException; 35 | 36 | /** 37 | * Parses the values based on the value schema. 38 | * @param sr 39 | * @return 40 | */ 41 | Map parseValue(SinkRecord sr) throws EventParsingException; 42 | } 43 | -------------------------------------------------------------------------------- /src/main/java/io/svectors/hbase/parser/EventParsingException.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.parser; 19 | 20 | /** 21 | * @author ravi.magham 22 | */ 23 | public class EventParsingException extends RuntimeException { 24 | 25 | /** 26 | * 27 | */ 28 | private static final long serialVersionUID = -5861884289109519422L; 29 | 30 | public EventParsingException() { 31 | super(); 32 | } 33 | 34 | public EventParsingException(String message) { 35 | super(message); 36 | } 37 | 38 | public EventParsingException(String message, Throwable cause) { 39 | super(message, cause); 40 | } 41 | 42 | public EventParsingException(Throwable cause) { 43 | super(cause); 44 | } 45 | 46 | protected EventParsingException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { 47 | super(message, cause, enableSuppression, writableStackTrace); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/io/svectors/hbase/parser/JsonEventParser.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.parser; 19 | 20 | import com.fasterxml.jackson.core.type.TypeReference; 21 | import com.fasterxml.jackson.databind.JsonNode; 22 | import com.fasterxml.jackson.databind.ObjectMapper; 23 | import com.fasterxml.jackson.databind.ObjectReader; 24 | import com.google.common.base.Preconditions; 25 | import org.apache.hadoop.hbase.util.Bytes; 26 | import org.apache.kafka.connect.data.Field; 27 | import org.apache.kafka.connect.data.Schema; 28 | import org.apache.kafka.connect.json.JsonConverter; 29 | import org.apache.kafka.connect.sink.SinkRecord; 30 | 31 | import java.nio.ByteBuffer; 32 | import java.util.Collections; 33 | import java.util.HashMap; 34 | import java.util.LinkedHashMap; 35 | import java.util.List; 36 | import java.util.Map; 37 | 38 | /** 39 | * Parses a json event. 40 | * @author ravi.magham 41 | */ 42 | public class JsonEventParser implements EventParser { 43 | 44 | private final static ObjectMapper OBJECT_MAPPER = new ObjectMapper(); 45 | private final static ObjectReader JSON_READER = OBJECT_MAPPER.reader(JsonNode.class); 46 | 47 | private final JsonConverter keyConverter; 48 | private final JsonConverter valueConverter; 49 | 50 | /** 51 | * default c.tor 52 | */ 53 | public JsonEventParser() { 54 | this.keyConverter = new JsonConverter(); 55 | this.valueConverter = new JsonConverter(); 56 | 57 | Map props = new HashMap<>(1); 58 | props.put("schemas.enable", Boolean.FALSE.toString()); 59 | 60 | this.keyConverter.configure(props, true); 61 | this.valueConverter.configure(props, false); 62 | 63 | } 64 | 65 | @Override 66 | public Map parseKey(SinkRecord sr) throws EventParsingException { 67 | return this.parse(sr.topic(), sr.keySchema(), sr.key(), true); 68 | } 69 | 70 | @Override 71 | public Map parseValue(SinkRecord sr) throws EventParsingException { 72 | return this.parse(sr.topic(), sr.valueSchema(), sr.value(), false); 73 | } 74 | 75 | /** 76 | * Parses the value. 77 | * @param topic 78 | * @param schema 79 | * @param value 80 | * @return 81 | * @throws EventParsingException 82 | */ 83 | public Map parse(final String topic, final Schema schema, final Object value, final boolean isKey) 84 | throws EventParsingException { 85 | final Map values = new LinkedHashMap<>(); 86 | try { 87 | byte[] valueBytes = null; 88 | if(isKey) { 89 | valueBytes = keyConverter.fromConnectData(topic, schema, value); 90 | } else { 91 | valueBytes = valueConverter.fromConnectData(topic, schema, value); 92 | } 93 | if(valueBytes == null || valueBytes.length == 0) { 94 | return Collections.emptyMap(); 95 | } 96 | 97 | final JsonNode valueNode = JSON_READER.readValue(valueBytes); 98 | final Map keyValues = OBJECT_MAPPER.convertValue(valueNode, 99 | new TypeReference>() {}); 100 | 101 | final List fields = schema.fields(); 102 | for(Field field : fields) { 103 | final byte[] fieldValue = toValue(keyValues, field); 104 | if(fieldValue == null) { 105 | continue; 106 | } 107 | values.put(field.name(), fieldValue); 108 | } 109 | return values; 110 | } catch (Exception ex) { 111 | final String errorMsg = String.format("Failed to parse the schema [%s] , value [%s] with ex [%s]" , 112 | schema, value, ex.getMessage()); 113 | throw new EventParsingException(errorMsg, ex); 114 | } 115 | } 116 | 117 | /** 118 | * 119 | * @param keyValues 120 | * @param field 121 | * @return 122 | */ 123 | private byte[] toValue(final Map keyValues, final Field field) { 124 | Preconditions.checkNotNull(field); 125 | final Schema.Type type = field.schema().type(); 126 | final String fieldName = field.name(); 127 | final Object fieldValue = keyValues.get(fieldName); 128 | switch (type) { 129 | case STRING: 130 | return Bytes.toBytes((String) fieldValue); 131 | case BOOLEAN: 132 | return Bytes.toBytes((Boolean)fieldValue); 133 | case BYTES: 134 | return Bytes.toBytes((ByteBuffer) fieldValue); 135 | case FLOAT32: 136 | return Bytes.toBytes((Float)fieldValue); 137 | case FLOAT64: 138 | return Bytes.toBytes((Double)fieldValue); 139 | case INT8: 140 | return Bytes.toBytes((Byte)fieldValue); 141 | case INT16: 142 | return Bytes.toBytes((Short)fieldValue); 143 | case INT32: 144 | return Bytes.toBytes((Integer)fieldValue); 145 | case INT64: 146 | return Bytes.toBytes((Long)fieldValue); 147 | default: 148 | return null; 149 | } 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/main/java/io/svectors/hbase/sink/HBaseSinkConnector.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.sink; 19 | 20 | import com.google.common.collect.Lists; 21 | import org.apache.kafka.connect.connector.Task; 22 | import org.apache.kafka.connect.sink.SinkConnector; 23 | 24 | import java.util.List; 25 | import java.util.Map; 26 | 27 | /** 28 | * @author ravi.magham 29 | */ 30 | public class HBaseSinkConnector extends SinkConnector { 31 | 32 | public static final String VERSION = "1.0"; 33 | private Map configProperties; 34 | 35 | @Override 36 | public String version() { 37 | return VERSION; 38 | } 39 | 40 | @Override 41 | public void start(Map props) { 42 | this.configProperties = props; 43 | } 44 | 45 | @Override 46 | public Class taskClass() { 47 | return HBaseSinkTask.class; 48 | } 49 | 50 | @Override 51 | public List> taskConfigs(int maxTasks) { 52 | List> configs = Lists.newArrayList(); 53 | for (int i = 0; i < maxTasks; i++) { 54 | configs.add(configProperties); 55 | } 56 | return configs; 57 | } 58 | 59 | @Override 60 | public void stop() { 61 | // NO-OP 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/main/java/io/svectors/hbase/sink/HBaseSinkTask.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.sink; 19 | 20 | import static java.util.stream.Collectors.groupingBy; 21 | import static java.util.stream.Collectors.toList; 22 | import static java.util.stream.Collectors.toMap; 23 | 24 | import io.svectors.hbase.HBaseClient; 25 | import io.svectors.hbase.HBaseConnectionFactory; 26 | import io.svectors.hbase.util.ToPutFunction; 27 | import org.apache.hadoop.conf.Configuration; 28 | import org.apache.hadoop.hbase.HBaseConfiguration; 29 | import org.apache.hadoop.hbase.HConstants; 30 | import org.apache.hadoop.hbase.client.Put; 31 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 32 | import org.apache.kafka.common.TopicPartition; 33 | import org.apache.kafka.connect.sink.SinkRecord; 34 | import org.apache.kafka.connect.sink.SinkTask; 35 | 36 | import java.util.Collection; 37 | import java.util.List; 38 | import java.util.Map; 39 | 40 | import io.svectors.hbase.config.HBaseSinkConfig; 41 | 42 | 43 | /** 44 | * @author ravi.magham 45 | */ 46 | public class HBaseSinkTask extends SinkTask { 47 | 48 | private ToPutFunction toPutFunction; 49 | private HBaseClient hBaseClient; 50 | 51 | @Override 52 | public String version() { 53 | return HBaseSinkConnector.VERSION; 54 | } 55 | 56 | @Override 57 | public void start(Map props) { 58 | final HBaseSinkConfig sinkConfig = new HBaseSinkConfig(props); 59 | sinkConfig.validate(); // we need to do some sanity checks of the properties we configure. 60 | 61 | final String zookeeperQuorum = sinkConfig.getString(HBaseSinkConfig.ZOOKEEPER_QUORUM_CONFIG); 62 | final Configuration configuration = HBaseConfiguration.create(); 63 | configuration.set(HConstants.ZOOKEEPER_QUORUM, zookeeperQuorum); 64 | 65 | final HBaseConnectionFactory connectionFactory = new HBaseConnectionFactory(configuration); 66 | this.hBaseClient = new HBaseClient(connectionFactory); 67 | this.toPutFunction = new ToPutFunction(sinkConfig); 68 | } 69 | 70 | @Override 71 | public void put(Collection records) { 72 | Map> byTopic = records.stream() 73 | .collect(groupingBy(SinkRecord::topic)); 74 | 75 | Map> byTable = byTopic.entrySet().stream() 76 | .collect(toMap(Map.Entry::getKey, 77 | (e) -> e.getValue().stream().map(sr -> toPutFunction.apply(sr)).collect(toList()))); 78 | 79 | byTable.entrySet().parallelStream().forEach(entry -> { 80 | hBaseClient.write(entry.getKey(), entry.getValue()); 81 | }); 82 | } 83 | 84 | @Override 85 | public void flush(Map offsets) { 86 | // NO-OP 87 | } 88 | 89 | @Override 90 | public void stop() { 91 | // NO-OP 92 | } 93 | 94 | } 95 | -------------------------------------------------------------------------------- /src/main/java/io/svectors/hbase/sink/SinkConnectorException.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.sink; 19 | 20 | /** 21 | * @author ravi.magham 22 | */ 23 | public class SinkConnectorException extends RuntimeException { 24 | 25 | /** 26 | * 27 | */ 28 | private static final long serialVersionUID = -7544850650938270177L; 29 | 30 | public SinkConnectorException() { 31 | super(); 32 | } 33 | 34 | public SinkConnectorException(String message) { 35 | super(message); 36 | } 37 | 38 | public SinkConnectorException(String message, Throwable cause) { 39 | super(message, cause); 40 | } 41 | 42 | public SinkConnectorException(Throwable cause) { 43 | super(cause); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/main/java/io/svectors/hbase/util/ToPutFunction.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.util; 19 | 20 | import com.google.common.base.Function; 21 | import com.google.common.base.Preconditions; 22 | 23 | import io.svectors.hbase.parser.EventParser; 24 | import io.svectors.hbase.config.HBaseSinkConfig; 25 | 26 | import org.apache.hadoop.hbase.client.Put; 27 | import org.apache.hadoop.hbase.util.Bytes; 28 | import org.apache.kafka.connect.sink.SinkRecord; 29 | import java.util.Map; 30 | 31 | 32 | /** 33 | * @author ravi.magham 34 | */ 35 | public class ToPutFunction implements Function { 36 | 37 | private final HBaseSinkConfig sinkConfig; 38 | private final EventParser eventParser; 39 | 40 | public ToPutFunction(HBaseSinkConfig sinkConfig) { 41 | this.sinkConfig = sinkConfig; 42 | this.eventParser = sinkConfig.eventParser(); 43 | } 44 | 45 | /** 46 | * Converts the sinkRecord to a {@link Put} instance 47 | * The event parser parses the key schema of sinkRecord only when there is 48 | * no property configured for {@link HBaseSinkConfig#TABLE_ROWKEY_COLUMNS_TEMPLATE} 49 | * 50 | * @param sinkRecord 51 | * @return 52 | */ 53 | @Override 54 | public Put apply(final SinkRecord sinkRecord) { 55 | Preconditions.checkNotNull(sinkRecord); 56 | final String table = sinkRecord.topic(); 57 | final String columnFamily = columnFamily(table); 58 | final String delimiter = rowkeyDelimiter(table); 59 | 60 | final Map valuesMap = this.eventParser.parseValue(sinkRecord); 61 | final Map keysMap = this.eventParser.parseKey(sinkRecord); 62 | 63 | valuesMap.putAll(keysMap); 64 | final String[] rowkeyColumns = rowkeyColumns(table); 65 | final byte[] rowkey = toRowKey(valuesMap, rowkeyColumns, delimiter); 66 | 67 | final Put put = new Put(rowkey); 68 | valuesMap.entrySet().stream().forEach(entry -> { 69 | final String qualifier = entry.getKey(); 70 | final byte[] value = entry.getValue(); 71 | put.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes(qualifier), value); 72 | }); 73 | return put; 74 | } 75 | 76 | /** 77 | * A kafka topic is a 1:1 mapping to a HBase table. 78 | * @param table 79 | * @return 80 | */ 81 | private String[] rowkeyColumns(final String table) { 82 | final String entry = String.format(HBaseSinkConfig.TABLE_ROWKEY_COLUMNS_TEMPLATE, table); 83 | final String entryValue = sinkConfig.getPropertyValue(entry); 84 | return entryValue.split(","); 85 | } 86 | 87 | /** 88 | * Returns the delimiter for a table. If nothing is configured in properties, 89 | * we use the default {@link HBaseSinkConfig#DEFAULT_HBASE_ROWKEY_DELIMITER} 90 | * @param table hbase table. 91 | * @return 92 | */ 93 | private String rowkeyDelimiter(final String table) { 94 | final String entry = String.format(HBaseSinkConfig.TABLE_ROWKEY_DELIMITER_TEMPLATE, table); 95 | final String entryValue = sinkConfig.getPropertyValue(entry, HBaseSinkConfig.DEFAULT_HBASE_ROWKEY_DELIMITER); 96 | return entryValue; 97 | } 98 | 99 | /** 100 | * Returns the column family mapped in configuration for the table. If not present, we use the 101 | * default {@link HBaseSinkConfig#DEFAULT_HBASE_COLUMN_FAMILY} 102 | * @param table hbase table. 103 | * @return 104 | */ 105 | private String columnFamily(final String table) { 106 | final String entry = String.format(HBaseSinkConfig.TABLE_COLUMN_FAMILY_TEMPLATE, table); 107 | final String entryValue = sinkConfig.getPropertyValue(entry, HBaseSinkConfig.DEFAULT_HBASE_COLUMN_FAMILY); 108 | return entryValue; 109 | } 110 | 111 | /** 112 | * 113 | * @param valuesMap 114 | * @param columns 115 | * @return 116 | */ 117 | private byte[] toRowKey(final Map valuesMap, final String[] columns, final String delimiter) { 118 | Preconditions.checkNotNull(valuesMap); 119 | Preconditions.checkNotNull(delimiter); 120 | 121 | byte[] rowkey = null; 122 | byte[] delimiterBytes = Bytes.toBytes(delimiter); 123 | for(String column : columns) { 124 | byte[] columnValue = valuesMap.get(column); 125 | if(rowkey == null) { 126 | rowkey = columnValue; 127 | } else { 128 | rowkey = Bytes.add(rowkey, delimiterBytes, columnValue); 129 | } 130 | } 131 | return rowkey; 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/main/resources/config/hbase-sink.properties: -------------------------------------------------------------------------------- 1 | name=kafka-cdc-hbase 2 | connector.class=io.svectors.hbase.sink.HBaseSinkConnector 3 | tasks.max=1 4 | topics=test 5 | zookeeper.quorum=localhost:2181 6 | event.parser.class=io.svectors.hbase.parser.AvroEventParser 7 | 8 | # properties for hbase table 'test' 9 | hbase.test.rowkey.columns=id 10 | hbase.test.rowkey.delimiter=| 11 | hbase.test.family=d 12 | 13 | -------------------------------------------------------------------------------- /src/test/java/io/svectors/hbase/parser/TestAvroEventParser.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.parser; 19 | 20 | import org.apache.hadoop.hbase.util.Bytes; 21 | import org.apache.kafka.connect.data.Schema; 22 | import org.apache.kafka.connect.data.SchemaBuilder; 23 | import org.apache.kafka.connect.data.Struct; 24 | import org.apache.kafka.connect.sink.SinkRecord; 25 | import org.junit.Assert; 26 | import org.junit.Before; 27 | import org.junit.Test; 28 | 29 | import java.util.Map; 30 | 31 | /** 32 | * @author ravi.magham 33 | */ 34 | public class TestAvroEventParser { 35 | 36 | private AvroEventParser eventParser; 37 | 38 | @Before 39 | public void setup() { 40 | eventParser = new AvroEventParser(); 41 | } 42 | 43 | @Test 44 | public void testParseValue() { 45 | final Schema valueSchema = SchemaBuilder.struct().name("record").version(1) 46 | .field("url", Schema.STRING_SCHEMA) 47 | .field("id", Schema.INT32_SCHEMA) 48 | .field("zipcode", Schema.INT32_SCHEMA) 49 | .field("status", Schema.BOOLEAN_SCHEMA) 50 | .build(); 51 | 52 | String url = "google.com"; 53 | int id = 1; 54 | int zipcode = 95051; 55 | boolean status = true; 56 | 57 | final Struct record = new Struct(valueSchema) 58 | .put("url", url) 59 | .put("id", id) 60 | .put("zipcode", zipcode) 61 | .put("status", status); 62 | 63 | final SinkRecord sinkRecord = new SinkRecord("test", 0, null, null, valueSchema, record, 0); 64 | 65 | Map result = eventParser.parseValue(sinkRecord); 66 | Assert.assertEquals(4, result.size()); 67 | Assert.assertEquals(url, Bytes.toString(result.get("url"))); 68 | Assert.assertEquals(id, Bytes.toInt(result.get("id"))); 69 | Assert.assertEquals(zipcode, Bytes.toInt(result.get("zipcode"))); 70 | Assert.assertEquals(status, Bytes.toBoolean(result.get("status"))); 71 | } 72 | 73 | @Test 74 | public void testParseNullKey() { 75 | final SinkRecord sinkRecord = new SinkRecord("test", 0, null, null, null, null, 0); 76 | final Map keys = eventParser.parseKey(sinkRecord); 77 | Assert.assertTrue(keys.isEmpty()); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/test/java/io/svectors/hbase/parser/TestJsonEventParser.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.parser; 19 | 20 | import org.apache.hadoop.hbase.util.Bytes; 21 | import org.apache.kafka.connect.data.Schema; 22 | import org.apache.kafka.connect.data.SchemaBuilder; 23 | import org.apache.kafka.connect.data.Struct; 24 | import org.apache.kafka.connect.sink.SinkRecord; 25 | import org.junit.Assert; 26 | import org.junit.Before; 27 | import org.junit.Test; 28 | 29 | import java.util.Map; 30 | 31 | /** 32 | * @author ravi.magham 33 | */ 34 | public class TestJsonEventParser { 35 | 36 | private JsonEventParser eventParser; 37 | 38 | @Before 39 | public void setup() { 40 | eventParser = new JsonEventParser(); 41 | } 42 | 43 | @Test 44 | public void testParseValue() { 45 | final Schema valueSchema = SchemaBuilder.struct().name("record").version(1) 46 | .field("url", Schema.STRING_SCHEMA) 47 | .field("id", Schema.INT32_SCHEMA) 48 | .field("zipcode", Schema.INT32_SCHEMA) 49 | .field("status", Schema.BOOLEAN_SCHEMA) 50 | .build(); 51 | 52 | String url = "google.com"; 53 | int id = 1; 54 | int zipcode = 95051; 55 | boolean status = true; 56 | 57 | final Struct record = new Struct(valueSchema) 58 | .put("url", url) 59 | .put("id", id) 60 | .put("zipcode", zipcode) 61 | .put("status", status); 62 | 63 | final SinkRecord sinkRecord = new SinkRecord("test", 0, null, null, valueSchema, record, 0); 64 | 65 | Map result = eventParser.parseValue(sinkRecord); 66 | Assert.assertEquals(4, result.size()); 67 | Assert.assertEquals(url, Bytes.toString(result.get("url"))); 68 | Assert.assertEquals(id, Bytes.toInt(result.get("id"))); 69 | Assert.assertEquals(zipcode, Bytes.toInt(result.get("zipcode"))); 70 | Assert.assertEquals(status, Bytes.toBoolean(result.get("status"))); 71 | } 72 | 73 | @Test 74 | public void testParseNullKey() { 75 | final SinkRecord sinkRecord = new SinkRecord("test", 0, null, null, null, null, 0); 76 | final Map keys = eventParser.parseKey(sinkRecord); 77 | Assert.assertTrue(keys.isEmpty()); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/test/java/io/svectors/hbase/sink/HbaseTestUtil.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.sink; 19 | 20 | import org.apache.hadoop.conf.Configuration; 21 | import org.apache.hadoop.hbase.HBaseConfiguration; 22 | import org.apache.hadoop.hbase.HBaseTestingUtility; 23 | import org.apache.hadoop.hbase.HColumnDescriptor; 24 | import org.apache.hadoop.hbase.HTableDescriptor; 25 | import org.apache.hadoop.hbase.TableName; 26 | import org.apache.hadoop.hbase.client.HBaseAdmin; 27 | import org.apache.hadoop.hbase.util.Bytes; 28 | 29 | import java.io.IOException; 30 | import java.util.concurrent.atomic.AtomicBoolean; 31 | import java.util.concurrent.atomic.AtomicReference; 32 | 33 | /** 34 | * Utility class for hbase tests. 35 | * 36 | * @author ravi.magham 37 | */ 38 | public abstract class HbaseTestUtil { 39 | 40 | /* status of the cluster */ 41 | private static AtomicBoolean status = new AtomicBoolean(); 42 | private static AtomicReference utility = new AtomicReference<>(); 43 | 44 | /** 45 | * Returns a new HBaseTestingUtility instance. 46 | */ 47 | private static HBaseTestingUtility createTestingUtility() { 48 | final Configuration hbaseConf = HBaseConfiguration.create(); 49 | hbaseConf.setInt("replication.stats.thread.period.seconds", 5); 50 | hbaseConf.setLong("replication.sleep.before.failover", 2000); 51 | hbaseConf.setInt("replication.source.maxretriesmultiplier", 10); 52 | return new HBaseTestingUtility(hbaseConf); 53 | } 54 | 55 | public static HBaseTestingUtility getUtility() { 56 | HBaseTestingUtility testingUtility = utility.get(); 57 | if (testingUtility == null) { 58 | testingUtility = createTestingUtility(); 59 | utility.set(testingUtility); 60 | } 61 | return testingUtility; 62 | } 63 | 64 | /** 65 | * start the mini cluster 66 | */ 67 | public static void startMiniCluster() { 68 | if (status.compareAndSet(false, true)) { 69 | try { 70 | getUtility().startMiniCluster(); 71 | } catch (Exception e) { 72 | status.set(false); 73 | throw new RuntimeException("Unable to start the hbase mini cluster", e); 74 | } 75 | } 76 | } 77 | 78 | /** 79 | * stops the mini cluster 80 | */ 81 | public static void stopMiniCluster() { 82 | HBaseTestingUtility testingUtility = getUtility(); 83 | if (testingUtility != null && status.compareAndSet(true, false)) { 84 | try { 85 | testingUtility.shutdownMiniCluster(); 86 | } catch (Exception e) { 87 | status.set(true); 88 | throw new RuntimeException("Unable to shutdown MiniCluster", e); 89 | } 90 | } 91 | } 92 | 93 | /** 94 | * Creates the table with the column families 95 | * 96 | * @param tableName 97 | * @param columnFamilies 98 | * @return 99 | */ 100 | public static void createTable(String tableName, String... columnFamilies) { 101 | HBaseTestingUtility testingUtility = getUtility(); 102 | if (!status.get()) { 103 | throw new RuntimeException("The mini cluster hasn't started yet. " + 104 | " Call HBaseTestUtil#startMiniCluster() before creating a table"); 105 | } 106 | final TableName name = TableName.valueOf(tableName); 107 | try (HBaseAdmin hBaseAdmin = testingUtility.getHBaseAdmin()) { 108 | final HTableDescriptor hTableDescriptor = new HTableDescriptor(name); 109 | for (String family : columnFamilies) { 110 | final HColumnDescriptor hColumnDescriptor = new HColumnDescriptor(Bytes.toBytes(family)); 111 | hTableDescriptor.addFamily(hColumnDescriptor); 112 | } 113 | 114 | hBaseAdmin.createTable(hTableDescriptor); 115 | testingUtility.waitUntilAllRegionsAssigned(name); 116 | } catch (IOException e) { 117 | throw new RuntimeException(e); 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/test/java/io/svectors/hbase/sink/TestHbaseSinkTask.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.sink; 19 | 20 | import static io.svectors.hbase.sink.HbaseTestUtil.startMiniCluster; 21 | import static io.svectors.hbase.sink.HbaseTestUtil.stopMiniCluster; 22 | import static io.svectors.hbase.sink.HbaseTestUtil.createTable; 23 | import static io.svectors.hbase.sink.HbaseTestUtil.getUtility; 24 | 25 | import io.svectors.hbase.config.HBaseSinkConfig; 26 | import io.svectors.hbase.parser.AvroEventParser; 27 | import io.svectors.hbase.parser.JsonEventParser; 28 | 29 | import org.apache.hadoop.conf.Configuration; 30 | import org.apache.hadoop.hbase.TableName; 31 | import org.apache.hadoop.hbase.client.ConnectionFactory; 32 | import org.apache.hadoop.hbase.client.Result; 33 | import org.apache.hadoop.hbase.client.ResultScanner; 34 | import org.apache.hadoop.hbase.client.Scan; 35 | import org.apache.hadoop.hbase.client.Table; 36 | import org.apache.hadoop.hbase.util.Bytes; 37 | import org.apache.kafka.connect.data.Schema; 38 | import org.apache.kafka.connect.data.SchemaBuilder; 39 | import org.apache.kafka.connect.data.Struct; 40 | import org.apache.kafka.connect.runtime.ConnectorConfig; 41 | import org.apache.kafka.connect.sink.SinkRecord; 42 | 43 | import org.junit.After; 44 | import org.junit.Assert; 45 | import org.junit.Before; 46 | import org.junit.Test; 47 | 48 | import java.io.IOException; 49 | import java.util.ArrayList; 50 | import java.util.Collection; 51 | import java.util.HashMap; 52 | import java.util.Map; 53 | import java.util.function.Function; 54 | 55 | 56 | /** 57 | * Integration Test of HBase sink. 58 | * 59 | * @author ravi.magham 60 | */ 61 | public class TestHbaseSinkTask { 62 | 63 | private final Function TO_LOCAL_URI = (port) -> "localhost:" + port; 64 | private final String hbaseTable = "test"; // using this interchangeably with kafka topic name. 65 | private final String columnFamily = "d"; 66 | private final Map configProps = new HashMap<>(); 67 | private Configuration configuration; 68 | 69 | @Before 70 | public void setUp() throws Exception { 71 | startMiniCluster(); 72 | createTable(hbaseTable, columnFamily); 73 | configuration = getUtility().getConfiguration(); 74 | 75 | //configure defaults for Sink task. 76 | configProps.put("hbase.test.rowkey.columns", "id"); 77 | configProps.put("hbase.test.rowkey.delimiter", "|"); 78 | configProps.put("hbase.test.family", columnFamily); 79 | configProps.put(ConnectorConfig.TOPICS_CONFIG, hbaseTable); 80 | configProps.put(HBaseSinkConfig.ZOOKEEPER_QUORUM_CONFIG, TO_LOCAL_URI.apply(getUtility().getZkCluster() 81 | .getClientPort())); 82 | } 83 | 84 | @Test 85 | public void testConnectUsingJsonEventParser() throws Exception { 86 | configProps.put(HBaseSinkConfig.EVENT_PARSER_CONFIG, JsonEventParser.class.getName()); 87 | writeAndValidate(); 88 | } 89 | 90 | @Test 91 | public void testConnectUsingAvroEventParser() throws Exception { 92 | configProps.put(HBaseSinkConfig.EVENT_PARSER_CONFIG, AvroEventParser.class.getName()); 93 | writeAndValidate(); 94 | } 95 | 96 | /** 97 | * Performs write through kafka connect and validates the data in hbase. 98 | * 99 | * @throws IOException 100 | */ 101 | private void writeAndValidate() throws IOException { 102 | HBaseSinkTask task = new HBaseSinkTask(); 103 | task.start(configProps); 104 | 105 | final Schema valueSchema = SchemaBuilder.struct().name("record").version(1) 106 | .field("url", Schema.STRING_SCHEMA) 107 | .field("id", Schema.INT32_SCHEMA) 108 | .field("zipcode", Schema.INT32_SCHEMA) 109 | .field("status", Schema.INT32_SCHEMA) 110 | .build(); 111 | 112 | Collection sinkRecords = new ArrayList<>(); 113 | int noOfRecords = 10; 114 | for (int i = 1; i <= noOfRecords; i++) { 115 | final Struct record = new Struct(valueSchema) 116 | .put("url", "google.com") 117 | .put("id", i) 118 | .put("zipcode", 95050 + i) 119 | .put("status", 400 + i); 120 | SinkRecord sinkRecord = new SinkRecord(hbaseTable, 0, null, null, valueSchema, record, i); 121 | sinkRecords.add(sinkRecord); 122 | } 123 | 124 | task.put(sinkRecords); 125 | 126 | // read from hbase. 127 | TableName table = TableName.valueOf(hbaseTable); 128 | Scan scan = new Scan(); 129 | try (Table hTable = ConnectionFactory.createConnection(configuration).getTable(table); 130 | ResultScanner results = hTable.getScanner(scan);) { 131 | int count = 0; 132 | for (Result result : results) { 133 | int rowId = Bytes.toInt(result.getRow()); 134 | String url = Bytes.toString(result.getValue(Bytes.toBytes(columnFamily), Bytes.toBytes("url"))); 135 | Assert.assertEquals(count + 1, rowId); 136 | Assert.assertEquals("google.com", url); 137 | count++; 138 | } 139 | Assert.assertEquals(noOfRecords, count); 140 | } 141 | task.stop(); 142 | } 143 | 144 | @After 145 | public void tearDown() throws Exception { 146 | stopMiniCluster(); 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /src/test/java/io/svectors/hbase/util/TestToPutFunction.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package io.svectors.hbase.util; 19 | 20 | import io.svectors.hbase.config.HBaseSinkConfig; 21 | import io.svectors.hbase.parser.JsonEventParser; 22 | import org.apache.hadoop.hbase.client.Put; 23 | import org.apache.hadoop.hbase.util.Bytes; 24 | import org.apache.kafka.connect.data.Schema; 25 | import org.apache.kafka.connect.data.SchemaBuilder; 26 | import org.apache.kafka.connect.data.Struct; 27 | import org.apache.kafka.connect.sink.SinkRecord; 28 | import org.junit.Assert; 29 | import org.junit.Test; 30 | 31 | import java.util.HashMap; 32 | import java.util.Map; 33 | 34 | /** 35 | * @author ravi.magham 36 | */ 37 | public class TestToPutFunction { 38 | 39 | @Test 40 | public void testRowkey() { 41 | final Map configProps = new HashMap<>(); 42 | configProps.put(HBaseSinkConfig.ZOOKEEPER_QUORUM_CONFIG, "localhost"); 43 | configProps.put("hbase.test.rowkey.columns", "id"); 44 | configProps.put(HBaseSinkConfig.EVENT_PARSER_CONFIG, JsonEventParser.class.getName()); 45 | final ToPutFunction toPutFunction = new ToPutFunction(new HBaseSinkConfig(configProps)); 46 | 47 | final Schema schema = SchemaBuilder.struct().name("record").version(1) 48 | .field("url", Schema.STRING_SCHEMA) 49 | .field("id", Schema.INT32_SCHEMA) 50 | .field("zipcode", Schema.INT32_SCHEMA) 51 | .field("status", Schema.BOOLEAN_SCHEMA) 52 | .build(); 53 | 54 | final Struct record = new Struct(schema) 55 | .put("url", "google.com") 56 | .put("id", 123456) 57 | .put("zipcode", 95051) 58 | .put("status", true); 59 | 60 | final SinkRecord sinkRecord = new SinkRecord("test", 0, null, null, schema, record, 0); 61 | final Put put = toPutFunction.apply(sinkRecord); 62 | Assert.assertEquals(123456, Bytes.toInt(put.getRow())); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Root logger option 18 | log4j.rootLogger=ERROR, console 19 | 20 | # Direct log messages to stdout 21 | log4j.appender.console=org.apache.log4j.ConsoleAppender 22 | log4j.appender.console.target=System.err 23 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 24 | log4j.appender.console.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n 25 | 26 | --------------------------------------------------------------------------------