├── src └── main │ ├── resources │ ├── my.properties │ └── log4j.properties │ └── java │ └── com │ └── structured │ ├── pool │ ├── tool │ │ ├── ConnectionFactory.java │ │ ├── ConnectionException.java │ │ ├── ConnectionPool.java │ │ ├── ConnectionPoolConfig.java │ │ └── ConnectionPoolBase.java │ └── hbase │ │ ├── HbaseConfig.java │ │ ├── HbaseSharedConnPool.java │ │ ├── HbaseConnectionPool.java │ │ └── HbaseConnectionFactory.java │ ├── APP │ ├── structuredJava.java │ └── ForeachWriterHBase.java │ ├── conf │ └── ConfigurationManager.java │ └── constant │ └── Constants.java ├── README.md └── pom.xml /src/main/resources/my.properties: -------------------------------------------------------------------------------- 1 | kafka.metadata.broker.list= 2 | zk.metadata.broker.list= 3 | 4 | hbase.pool.max-total=200 5 | hbase.pool.max-idle=100 6 | hbase.pool.max-waitmillis=500 7 | hbase.pool.testonborrow=true 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## structured-streaming-Kafka2HBase项目介绍 2 | Spark structured-streaming 消费kafka数据写入hbase
3 | 该项目修改基础配置就可以跑
4 | 5 | ### 数据格式 6 | 7 | ``` 8 | 数据写入格式如下(中间分割符为\001) 9 | dbName tableName tableRowkey tableData 10 | tets test 1111 {"a1":"a1","a2":"a3"} 11 | ``` 12 | 13 | ### 数据如下 14 | ``` 15 | "A"+ "\001" 16 | + "test2"+ "\001" 17 | + "1111"+ "\001" 18 | + "{\"a1\":\"a1\",\"a2\":\"a3\"}" 19 | ``` 20 | 21 | # 赞助 22 | 23 | -------------------------------------------------------------------------------- /src/main/java/com/structured/pool/tool/ConnectionFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015-2016 Dark Phoenixs (Open-Source Organization). 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.structured.pool.tool; 17 | 18 | import org.apache.commons.pool2.PooledObjectFactory; 19 | 20 | import java.io.Serializable; 21 | 22 | public interface ConnectionFactory extends PooledObjectFactory, Serializable { 23 | 24 | /** 25 | *

Title: createConnection

26 | *

Description: 创建连接

27 | * 28 | * @return 连接 29 | * @throws Exception 30 | */ 31 | public abstract T createConnection() throws Exception; 32 | } 33 | -------------------------------------------------------------------------------- /src/main/java/com/structured/APP/structuredJava.java: -------------------------------------------------------------------------------- 1 | package com.structured.APP; 2 | 3 | import org.apache.spark.sql.Dataset; 4 | import org.apache.spark.sql.Row; 5 | import org.apache.spark.sql.SparkSession; 6 | import org.apache.spark.sql.streaming.StreamingQuery; 7 | import org.apache.spark.sql.streaming.StreamingQueryException; 8 | 9 | import com.structured.conf.ConfigurationManager; 10 | import com.structured.constant.Constants; 11 | 12 | public class structuredJava { 13 | 14 | public static void main(String[] args) { 15 | 16 | SparkSession spark = SparkSession.builder() 17 | .appName("structured-streaming-Kafka2HBase") 18 | .master("local[4]") 19 | .getOrCreate(); 20 | 21 | Dataset line = spark 22 | .readStream() 23 | .format("kafka") 24 | .option("kafka.bootstrap.servers",ConfigurationManager.getProperty(Constants.KAFKA_METADATA_BROKER_LIST)) 25 | .option("subscribe", "Kafka2HBase").load(); 26 | 27 | Dataset dataset = line.selectExpr("CAST(value AS STRING)"); 28 | 29 | StreamingQuery query = dataset.writeStream() 30 | .foreach(new ForeachWriterHBase()) 31 | .outputMode("update").start(); 32 | 33 | try { 34 | query.awaitTermination(); 35 | } catch (StreamingQueryException e) { 36 | e.printStackTrace(); 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/main/java/com/structured/pool/tool/ConnectionException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015-2016 Dark Phoenixs (Open-Source Organization). 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.structured.pool.tool; 17 | 18 | public class ConnectionException extends RuntimeException { 19 | 20 | private static final long serialVersionUID = -6503525110247209484L; 21 | 22 | public ConnectionException() { 23 | super(); 24 | } 25 | 26 | public ConnectionException(String message) { 27 | super(message); 28 | } 29 | 30 | public ConnectionException(Throwable e) { 31 | super(e); 32 | } 33 | 34 | public ConnectionException(String message, Throwable cause) { 35 | super(message, cause); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/com/structured/pool/tool/ConnectionPool.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015-2016 Dark Phoenixs (Open-Source Organization). 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.structured.pool.tool; 17 | 18 | import java.io.Serializable; 19 | 20 | public interface ConnectionPool extends Serializable { 21 | 22 | /** 23 | *

Title: getConnection

24 | *

Description: 获取连接

25 | * 26 | * @return 连接 27 | */ 28 | public abstract T getConnection(); 29 | 30 | /** 31 | *

Title: returnConnection

32 | *

Description: 返回连接

33 | * 34 | * @param conn 连接 35 | */ 36 | public void returnConnection(T conn); 37 | 38 | /** 39 | *

Title: invalidateConnection

40 | *

Description: 废弃连接

41 | * 42 | * @param conn 连接 43 | */ 44 | public void invalidateConnection(T conn); 45 | } 46 | -------------------------------------------------------------------------------- /src/main/java/com/structured/conf/ConfigurationManager.java: -------------------------------------------------------------------------------- 1 | package com.structured.conf; 2 | 3 | import java.io.InputStream; 4 | import java.util.Properties; 5 | 6 | /** 7 | * 配置管理组件 8 | * 9 | * @author rttxdu 10 | * 11 | */ 12 | final public class ConfigurationManager { 13 | 14 | private static Properties prop = new Properties(); 15 | 16 | static { 17 | try { 18 | InputStream in = ConfigurationManager.class 19 | .getClassLoader().getResourceAsStream("my.properties"); 20 | prop.load(in); 21 | } catch (Exception e) { 22 | e.printStackTrace(); 23 | } 24 | } 25 | 26 | /** 27 | * 获取指定key对应的value 28 | * @param key 29 | * @return value 30 | */ 31 | public static String getProperty(String key) { 32 | return prop.getProperty(key); 33 | } 34 | 35 | /** 36 | * 获取整数类型的配置项 37 | * @param key 38 | * @return value 39 | */ 40 | public static Integer getInteger(String key) { 41 | String value = getProperty(key); 42 | try { 43 | return Integer.valueOf(value); 44 | } catch (Exception e) { 45 | e.printStackTrace(); 46 | } 47 | return 0; 48 | } 49 | 50 | /** 51 | * 获取布尔类型的配置项 52 | * @param key 53 | * @return value 54 | */ 55 | public static Boolean getBoolean(String key) { 56 | String value = getProperty(key); 57 | try { 58 | return Boolean.valueOf(value); 59 | } catch (Exception e) { 60 | e.printStackTrace(); 61 | } 62 | return false; 63 | } 64 | 65 | /** 66 | * 获取Long类型的配置项 67 | * @param key 68 | * @return 69 | */ 70 | public static Long getLong(String key) { 71 | String value = getProperty(key); 72 | try { 73 | return Long.valueOf(value); 74 | } catch (Exception e) { 75 | e.printStackTrace(); 76 | } 77 | return 0L; 78 | } 79 | 80 | } 81 | -------------------------------------------------------------------------------- /src/main/java/com/structured/pool/hbase/HbaseConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015-2016 Dark Phoenixs (Open-Source Organization). 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.structured.pool.hbase; 17 | 18 | public interface HbaseConfig { 19 | 20 | /** 21 | * DEFAULT_HOST 22 | */ 23 | public static final String DEFAULT_HOST = "localhost"; 24 | /** 25 | * DEFAULT_PORT 26 | */ 27 | public static final String DEFAULT_PORT = "2181"; 28 | /** 29 | * DEFAULT_MASTER 30 | */ 31 | public static final String DEFAULT_MASTER = null; 32 | /** 33 | * DEFAULT_ROOTDIR 34 | */ 35 | public static final String DEFAULT_ROOTDIR = null; 36 | 37 | /** 38 | * ZOOKEEPER_QUORUM_PROPERTY 39 | */ 40 | public static final String ZOOKEEPER_QUORUM_PROPERTY = "hbase.zookeeper.quorum"; 41 | /** 42 | * ZOOKEEPER_CLIENTPORT_PROPERTY 43 | */ 44 | public static final String ZOOKEEPER_CLIENTPORT_PROPERTY = "hbase.zookeeper.property.clientPort"; 45 | /** 46 | * MASTER_PROPERTY 47 | */ 48 | public static final String MASTER_PROPERTY = "hbase.master"; 49 | /** 50 | * ROOTDIR_PROPERTY 51 | */ 52 | public static final String ROOTDIR_PROPERTY = "hbase.rootdir"; 53 | 54 | } 55 | -------------------------------------------------------------------------------- /src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # Set everything to be logged to the console 19 | log4j.rootCategory=WARN, console 20 | log4j.appender.console=org.apache.log4j.ConsoleAppender 21 | log4j.appender.console.target=System.err 22 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n 24 | 25 | # Settings to quiet third party logs that are too verbose 26 | log4j.logger.org.spark-project.jetty=WARN 27 | log4j.logger.org.spark-project.jetty.util.component.AbstractLifeCycle=ERROR 28 | log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO 29 | log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO 30 | log4j.logger.org.apache.parquet=ERROR 31 | log4j.logger.parquet=ERROR 32 | 33 | # SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support 34 | log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL 35 | log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR 36 | -------------------------------------------------------------------------------- /src/main/java/com/structured/pool/tool/ConnectionPoolConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015-2016 Dark Phoenixs (Open-Source Organization). 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.structured.pool.tool; 17 | 18 | import org.apache.commons.pool2.impl.GenericObjectPoolConfig; 19 | 20 | import java.io.Serializable; 21 | 22 | public class ConnectionPoolConfig extends GenericObjectPoolConfig implements Serializable { 23 | 24 | /** 25 | * DEFAULT_TEST_WHILE_IDLE 26 | */ 27 | public static final boolean DEFAULT_TEST_WHILE_IDLE = true; 28 | /** 29 | * DEFAULT_MIN_EVICTABLE_IDLE_TIME_MILLIS 30 | */ 31 | public static final long DEFAULT_MIN_EVICTABLE_IDLE_TIME_MILLIS = 60000; 32 | /** 33 | * DEFAULT_TIME_BETWEEN_EVICTION_RUNS_MILLIS 34 | */ 35 | public static final long DEFAULT_TIME_BETWEEN_EVICTION_RUNS_MILLIS = 30000; 36 | /** 37 | * DEFAULT_NUM_TESTS_PER_EVICTION_RUN 38 | */ 39 | public static final int DEFAULT_NUM_TESTS_PER_EVICTION_RUN = -1; 40 | /** 41 | * serialVersionUID 42 | */ 43 | private static final long serialVersionUID = -2414567557372345057L; 44 | 45 | /** 46 | *

Title: ConnectionPoolConfig

47 | *

Description: 默认构造方法

48 | */ 49 | public ConnectionPoolConfig() { 50 | 51 | // defaults to make your life with connection pool easier :) 52 | setTestWhileIdle(DEFAULT_TEST_WHILE_IDLE); 53 | setMinEvictableIdleTimeMillis(DEFAULT_MIN_EVICTABLE_IDLE_TIME_MILLIS); 54 | setTimeBetweenEvictionRunsMillis(DEFAULT_TIME_BETWEEN_EVICTION_RUNS_MILLIS); 55 | setNumTestsPerEvictionRun(DEFAULT_NUM_TESTS_PER_EVICTION_RUN); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/main/java/com/structured/constant/Constants.java: -------------------------------------------------------------------------------- 1 | package com.structured.constant; 2 | 3 | import java.text.SimpleDateFormat; 4 | 5 | /** 6 | * 常量接口 7 | * @author rttxdu 8 | * 9 | */ 10 | public interface Constants { 11 | /** 12 | * Spark作业相关的常量 13 | */ 14 | String HDFS_LODA_ADTA_PATH= "hdfs.LodaData"; 15 | String STREAMING_CHECKPOINT_PATH = "streaming.checkpoint.path"; 16 | String SPARK_RUNMODE_MASTER = "spark.runmode.master"; 17 | String SPARK_PROJECT_NAME = "spark.project.name"; 18 | String SPARK_STREAMING_BATCH_TIME = "spark.string.batch.time"; 19 | 20 | /** 21 | * JDBC配置 22 | */ 23 | String JDBC_URL = "jdbc.url"; 24 | String JDBC_USER = "jdbc.user"; 25 | String JDBC_PASSWORD = "jdbc.password"; 26 | String JDBC_USER_NAME = "user"; 27 | String JDBC_PASSWORD_NAME = "password"; 28 | 29 | /** 30 | * KAFKA和ZK配置 31 | */ 32 | String KAFKA_METADATA_BROKER_LIST = "kafka.metadata.broker.list"; 33 | String ZK_METADATA_BROKER_LIST = "zk.metadata.broker.list"; 34 | String KAFKA_TOPICS_ID = "kafka.topics.id"; 35 | String KAFKA_TOPICS_MYSQL_TABLENAME = "Kafka.topics.mysql.tablename"; 36 | String KAFKA_TOPICS_MYSQL_TOPICNAME = "Kafka.topics.mysql.topic.name"; 37 | String KAFKA_TOPICS_MYSQL_TOPICID = "Kafka.topics.mysql.topic.id"; 38 | String KAFKA_TOPICS_DATA_CHANNEL_DB2HBASE = "Kafka.topics.data.Channel.DB2HBASE"; 39 | String KAFKA_TOPICS_DATA_CHANNEL_FLUME2HBASE= "Kafka.topics.data.Channel.FLUME2HBASE"; 40 | String KAFKA_TOPICS_DATA_CHANNEL_TPTDP2HBASE= "Kafka.topics.data.Channel.TPTDP2HBASE"; 41 | String KAFKA_TOPICS_CHANNEL_DB2HDFS = "Kafka.topics.data.Channel.DB2HDFS"; 42 | 43 | /** 44 | * HBase相关 45 | */ 46 | String CF_DEFAULT = "info"; 47 | String DEFAULT_ROW_KEY = "_pk"; 48 | String HBASE_POOL_MAX_TOTAL = "hbase.pool.max-total"; 49 | String HBASE_POOL_MAX_IDLE = "hbase.pool.max-idle"; 50 | String HBASE_POOL_MAX_WAITMILLIS = "hbase.pool.max-waitmillis"; 51 | String HBASE_POOL_TESTONBORROW = "hbase.pool.testonborrow"; 52 | 53 | /** 54 | * 数据管道json字段 55 | */ 56 | String CHANNEL_JSON_EVEENTCHANNEL = "eventChannel"; 57 | String CHANNEL_JSON_EVEENTID = "eventId"; 58 | String CHANNEL_JSON_EVEENTTIME = "eventTime"; 59 | String CHANNEL_JSON_EVEENTTYPE = "eventType"; 60 | String CHANNEL_JSON_EVEENTDATA = "eventData"; 61 | String CHANNEL_JSON_EVEENTTARGET = "eventTarget"; 62 | 63 | /** 64 | * 分隔符和常量 65 | */ 66 | String SEPARATOR_001 = "\001"; 67 | String SEPARATOR_002 = "\002"; 68 | String ENCODE_UTF8 = "UTF-8"; 69 | String DECODE_UTF8 = "UTF-8"; 70 | 71 | /** 72 | * 用于身份证解析与反解析 73 | */ 74 | String IDCard_BACK_x = "x"; 75 | String IDCard_BACK_X = "X"; 76 | String IDCard_BACK_11 = "11"; 77 | String STR_NUMBER_0 = "0"; 78 | 79 | /** 80 | * 格式化时间 81 | */ 82 | SimpleDateFormat TIME_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); 83 | SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd"); 84 | SimpleDateFormat DATEKEY_FORMAT = new SimpleDateFormat("yyyyMMdd"); 85 | 86 | /** 87 | * jar产生的log日志 88 | */ 89 | String JAR_LOG_PATH = "jar.log.path"; 90 | } 91 | -------------------------------------------------------------------------------- /src/main/java/com/structured/pool/hbase/HbaseSharedConnPool.java: -------------------------------------------------------------------------------- 1 | package com.structured.pool.hbase; 2 | 3 | import org.apache.hadoop.conf.Configuration; 4 | import org.apache.hadoop.hbase.client.Connection; 5 | import org.apache.hadoop.hbase.client.ConnectionFactory; 6 | 7 | import com.structured.pool.tool.ConnectionException; 8 | import com.structured.pool.tool.ConnectionPool; 9 | 10 | import java.io.IOException; 11 | import java.util.Map; 12 | import java.util.Properties; 13 | import java.util.concurrent.atomic.AtomicReference; 14 | 15 | public class HbaseSharedConnPool implements ConnectionPool { 16 | 17 | private static final long serialVersionUID = 1L; 18 | 19 | private static final AtomicReference pool = new AtomicReference(); 20 | 21 | private final Connection connection; 22 | 23 | private HbaseSharedConnPool(Configuration configuration) throws IOException { 24 | this.connection = ConnectionFactory.createConnection(configuration); 25 | } 26 | 27 | /** 28 | * Gets instance. 29 | * 30 | * @param host the host 31 | * @param port the port 32 | * @param master the master 33 | * @param rootdir the rootdir 34 | * @return the instance 35 | */ 36 | public synchronized static HbaseSharedConnPool getInstance(final String host, final String port, final String master, final String rootdir) { 37 | 38 | Properties properties = new Properties(); 39 | 40 | if (host == null) 41 | throw new ConnectionException("[" + HbaseConfig.ZOOKEEPER_QUORUM_PROPERTY + "] is required !"); 42 | properties.setProperty(HbaseConfig.ZOOKEEPER_QUORUM_PROPERTY, host); 43 | 44 | if (port == null) 45 | throw new ConnectionException("[" + HbaseConfig.ZOOKEEPER_CLIENTPORT_PROPERTY + "] is required !"); 46 | properties.setProperty(HbaseConfig.ZOOKEEPER_CLIENTPORT_PROPERTY, port); 47 | 48 | if (master != null) 49 | properties.setProperty(HbaseConfig.MASTER_PROPERTY, master); 50 | 51 | if (rootdir != null) 52 | properties.setProperty(HbaseConfig.ROOTDIR_PROPERTY, rootdir); 53 | 54 | return getInstance(properties); 55 | } 56 | 57 | /** 58 | * Gets instance. 59 | * 60 | * @param properties the properties 61 | * @return the instance 62 | */ 63 | public synchronized static HbaseSharedConnPool getInstance(final Properties properties) { 64 | 65 | Configuration configuration = new Configuration(); 66 | 67 | for (Map.Entry entry : properties.entrySet()) { 68 | 69 | configuration.set((String) entry.getKey(), (String) entry.getValue()); 70 | } 71 | 72 | return getInstance(configuration); 73 | } 74 | 75 | /** 76 | * Gets instance. 77 | * 78 | * @param configuration the configuration 79 | * @return the instance 80 | */ 81 | public synchronized static HbaseSharedConnPool getInstance(final Configuration configuration) { 82 | 83 | if (pool.get() == null) 84 | 85 | try { 86 | pool.set(new HbaseSharedConnPool(configuration)); 87 | 88 | } catch (IOException e) { 89 | 90 | e.printStackTrace(); 91 | } 92 | 93 | return pool.get(); 94 | } 95 | 96 | @Override 97 | public Connection getConnection() { 98 | 99 | return connection; 100 | } 101 | 102 | @Override 103 | public void returnConnection(Connection conn) { 104 | 105 | // TODO: 2016/8/25 106 | } 107 | 108 | @Override 109 | public void invalidateConnection(Connection conn) { 110 | 111 | try { 112 | if (conn != null) 113 | 114 | conn.close(); 115 | 116 | } catch (IOException e) { 117 | 118 | e.printStackTrace(); 119 | } 120 | } 121 | 122 | /** 123 | * Close. 124 | */ 125 | public void close() { 126 | 127 | try { 128 | connection.close(); 129 | 130 | pool.set(null); 131 | 132 | } catch (IOException e) { 133 | 134 | e.printStackTrace(); 135 | } 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /src/main/java/com/structured/pool/hbase/HbaseConnectionPool.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015-2016 Dark Phoenixs (Open-Source Organization). 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.structured.pool.hbase; 17 | 18 | import org.apache.hadoop.conf.Configuration; 19 | import org.apache.hadoop.hbase.client.Connection; 20 | 21 | import com.structured.pool.tool.ConnectionPool; 22 | import com.structured.pool.tool.ConnectionPoolBase; 23 | import com.structured.pool.tool.ConnectionPoolConfig; 24 | 25 | import java.util.Properties; 26 | 27 | public class HbaseConnectionPool extends ConnectionPoolBase implements ConnectionPool { 28 | 29 | /** 30 | * serialVersionUID 31 | */ 32 | private static final long serialVersionUID = -9126420905798370243L; 33 | 34 | /** 35 | *

Title: HbaseConnectionPool

36 | *

Description: 默认构造方法

37 | */ 38 | public HbaseConnectionPool() { 39 | 40 | this(HbaseConfig.DEFAULT_HOST, HbaseConfig.DEFAULT_PORT); 41 | } 42 | 43 | /** 44 | *

Title: HbaseConnectionPool

45 | *

Description: 构造方法

46 | * 47 | * @param host 地址 48 | * @param port 端口 49 | */ 50 | public HbaseConnectionPool(final String host, final String port) { 51 | 52 | this(new ConnectionPoolConfig(), host, port, HbaseConfig.DEFAULT_MASTER, HbaseConfig.DEFAULT_ROOTDIR); 53 | } 54 | 55 | /** 56 | *

Title: HbaseConnectionPool

57 | *

Description: 构造方法

58 | * 59 | * @param host 地址 60 | * @param port 端口 61 | * @param master hbase主机 62 | * @param rootdir hdfs目录 63 | */ 64 | public HbaseConnectionPool(final String host, final String port, final String master, final String rootdir) { 65 | 66 | this(new ConnectionPoolConfig(), host, port, master, rootdir); 67 | } 68 | 69 | /** 70 | *

Title: HbaseConnectionPool

71 | *

Description: 构造方法

72 | * 73 | * @param hadoopConfiguration hbase配置 74 | */ 75 | public HbaseConnectionPool(final Configuration hadoopConfiguration) { 76 | 77 | this(new ConnectionPoolConfig(), hadoopConfiguration); 78 | } 79 | 80 | /** 81 | *

Title: HbaseConnectionPool

82 | *

Description: 构造方法

83 | * 84 | * @param poolConfig 池配置 85 | * @param host 地址 86 | * @param port 端口 87 | */ 88 | public HbaseConnectionPool(final ConnectionPoolConfig poolConfig, final String host, final String port) { 89 | 90 | this(poolConfig, host, port, HbaseConfig.DEFAULT_MASTER, HbaseConfig.DEFAULT_ROOTDIR); 91 | } 92 | 93 | /** 94 | *

Title: HbaseConnectionPool

95 | *

Description: 构造方法

96 | * 97 | * @param poolConfig 池配置 98 | * @param hadoopConfiguration hbase配置 99 | */ 100 | public HbaseConnectionPool(final ConnectionPoolConfig poolConfig, final Configuration hadoopConfiguration) { 101 | 102 | super(poolConfig, new HbaseConnectionFactory(hadoopConfiguration)); 103 | } 104 | 105 | /** 106 | *

Title: HbaseConnectionPool

107 | *

Description: 构造方法

108 | * 109 | * @param poolConfig 池配置 110 | * @param host 地址 111 | * @param port 端口 112 | * @param master hbase主机 113 | * @param rootdir hdfs目录 114 | */ 115 | public HbaseConnectionPool(final ConnectionPoolConfig poolConfig, final String host, final String port, final String master, final String rootdir) { 116 | 117 | super(poolConfig, new HbaseConnectionFactory(host, port, master, rootdir)); 118 | } 119 | 120 | /** 121 | * @param poolConfig 池配置 122 | * @param properties 参数配置 123 | * @since 1.2.1 124 | */ 125 | public HbaseConnectionPool(final ConnectionPoolConfig poolConfig, final Properties properties) { 126 | 127 | super(poolConfig, new HbaseConnectionFactory(properties)); 128 | } 129 | 130 | @Override 131 | public Connection getConnection() { 132 | 133 | return super.getResource(); 134 | } 135 | 136 | @Override 137 | public void returnConnection(Connection conn) { 138 | 139 | super.returnResource(conn); 140 | } 141 | 142 | @Override 143 | public void invalidateConnection(Connection conn) { 144 | 145 | super.invalidateResource(conn); 146 | } 147 | 148 | } 149 | -------------------------------------------------------------------------------- /src/main/java/com/structured/pool/hbase/HbaseConnectionFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015-2016 Dark Phoenixs (Open-Source Organization). 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.structured.pool.hbase; 17 | 18 | import org.apache.commons.pool2.PooledObject; 19 | import org.apache.commons.pool2.impl.DefaultPooledObject; 20 | import org.apache.hadoop.conf.Configuration; 21 | import org.apache.hadoop.hbase.client.Connection; 22 | 23 | import com.structured.pool.tool.ConnectionException; 24 | import com.structured.pool.tool.ConnectionFactory; 25 | 26 | import java.util.Map.Entry; 27 | import java.util.Properties; 28 | 29 | class HbaseConnectionFactory implements ConnectionFactory { 30 | 31 | /** 32 | * serialVersionUID 33 | */ 34 | private static final long serialVersionUID = 4024923894283696465L; 35 | 36 | /** 37 | * hadoopConfiguration 38 | */ 39 | private final Configuration hadoopConfiguration; 40 | 41 | /** 42 | *

Title: HbaseConnectionFactory

43 | *

Description: 构造方法

44 | * 45 | * @param hadoopConfiguration hbase配置 46 | */ 47 | public HbaseConnectionFactory(final Configuration hadoopConfiguration) { 48 | 49 | this.hadoopConfiguration = hadoopConfiguration; 50 | } 51 | 52 | /** 53 | *

Title: HbaseConnectionFactory

54 | *

Description: 构造方法

55 | * 56 | * @param host zookeeper地址 57 | * @param port zookeeper端口 58 | * @param master hbase主机 59 | * @param rootdir hdfs数据目录 60 | */ 61 | public HbaseConnectionFactory(final String host, final String port, final String master, final String rootdir) { 62 | 63 | this.hadoopConfiguration = new Configuration(); 64 | 65 | if (host == null) 66 | throw new ConnectionException("[" + HbaseConfig.ZOOKEEPER_QUORUM_PROPERTY + "] is required !"); 67 | this.hadoopConfiguration.set(HbaseConfig.ZOOKEEPER_QUORUM_PROPERTY, host); 68 | 69 | if (port == null) 70 | throw new ConnectionException("[" + HbaseConfig.ZOOKEEPER_CLIENTPORT_PROPERTY + "] is required !"); 71 | this.hadoopConfiguration.set(HbaseConfig.ZOOKEEPER_CLIENTPORT_PROPERTY, port); 72 | 73 | if (master != null) 74 | this.hadoopConfiguration.set(HbaseConfig.MASTER_PROPERTY, master); 75 | 76 | if (rootdir != null) 77 | this.hadoopConfiguration.set(HbaseConfig.ROOTDIR_PROPERTY, rootdir); 78 | } 79 | 80 | /** 81 | * @param properties 参数配置 82 | * @since 1.2.1 83 | */ 84 | public HbaseConnectionFactory(final Properties properties) { 85 | 86 | this.hadoopConfiguration = new Configuration(); 87 | 88 | for (Entry entry : properties.entrySet()) { 89 | 90 | this.hadoopConfiguration.set((String) entry.getKey(), (String) entry.getValue()); 91 | } 92 | } 93 | 94 | @Override 95 | public PooledObject makeObject() throws Exception { 96 | 97 | Connection connection = this.createConnection(); 98 | 99 | return new DefaultPooledObject(connection); 100 | } 101 | 102 | @Override 103 | public void destroyObject(PooledObject p) throws Exception { 104 | 105 | Connection connection = p.getObject(); 106 | 107 | if (connection != null) 108 | 109 | connection.close(); 110 | } 111 | 112 | @Override 113 | public boolean validateObject(PooledObject p) { 114 | 115 | Connection connection = p.getObject(); 116 | 117 | if (connection != null) 118 | 119 | return ((!connection.isAborted()) && (!connection.isClosed())); 120 | 121 | return false; 122 | } 123 | 124 | @Override 125 | public void activateObject(PooledObject p) throws Exception { 126 | // TODO Auto-generated method stub 127 | 128 | } 129 | 130 | @Override 131 | public void passivateObject(PooledObject p) throws Exception { 132 | // TODO Auto-generated method stub 133 | 134 | } 135 | 136 | @Override 137 | public Connection createConnection() throws Exception { 138 | 139 | Connection connection = org.apache.hadoop.hbase.client.ConnectionFactory 140 | .createConnection(hadoopConfiguration); 141 | 142 | return connection; 143 | } 144 | 145 | } 146 | -------------------------------------------------------------------------------- /src/main/java/com/structured/APP/ForeachWriterHBase.java: -------------------------------------------------------------------------------- 1 | package com.structured.APP; 2 | 3 | import java.io.IOException; 4 | import java.util.HashMap; 5 | import java.util.Map; 6 | import java.util.Map.Entry; 7 | 8 | import org.apache.hadoop.conf.Configuration; 9 | import org.apache.hadoop.hbase.HBaseConfiguration; 10 | import org.apache.hadoop.hbase.HColumnDescriptor; 11 | import org.apache.hadoop.hbase.HTableDescriptor; 12 | import org.apache.hadoop.hbase.TableName; 13 | import org.apache.hadoop.hbase.client.Admin; 14 | import org.apache.hadoop.hbase.client.Connection; 15 | import org.apache.hadoop.hbase.client.Put; 16 | import org.apache.hadoop.hbase.client.Table; 17 | import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; 18 | import org.apache.hadoop.hbase.util.Bytes; 19 | import org.apache.spark.sql.ForeachWriter; 20 | import org.apache.spark.sql.Row; 21 | import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema; 22 | import org.slf4j.Logger; 23 | import org.slf4j.LoggerFactory; 24 | 25 | import com.google.gson.Gson; 26 | import com.structured.conf.ConfigurationManager; 27 | import com.structured.constant.Constants; 28 | import com.structured.pool.hbase.HbaseConnectionPool; 29 | import com.structured.pool.tool.ConnectionPoolConfig; 30 | 31 | import scala.Serializable; 32 | 33 | public class ForeachWriterHBase extends ForeachWriter implements 34 | Serializable { 35 | public static final long serialVersionUID = 1L; 36 | private static Logger logger = LoggerFactory.getLogger(ForeachWriterHBase.class); 37 | public static HbaseConnectionPool pool = null; 38 | public Connection conn = null; 39 | 40 | static { 41 | ConnectionPoolConfig config = new ConnectionPoolConfig(); 42 | // 配置连接池参数 43 | config.setMaxTotal(ConfigurationManager 44 | .getInteger(Constants.HBASE_POOL_MAX_TOTAL)); 45 | config.setMaxIdle(ConfigurationManager 46 | .getInteger(Constants.HBASE_POOL_MAX_IDLE)); 47 | config.setMaxWaitMillis(ConfigurationManager 48 | .getInteger(Constants.HBASE_POOL_MAX_WAITMILLIS)); 49 | config.setTestOnBorrow(ConfigurationManager 50 | .getBoolean(Constants.HBASE_POOL_TESTONBORROW)); 51 | Configuration hbaseConfig = getHBaseConfiguration(); 52 | pool =new HbaseConnectionPool(config, hbaseConfig); 53 | } 54 | 55 | public static synchronized Connection getConn() { 56 | return pool.getConnection(); 57 | } 58 | 59 | @Override 60 | public boolean open(long partitionId, long version) { 61 | try { 62 | conn = getConn(); 63 | return true; 64 | } catch (Exception e) { 65 | pool.returnConnection(conn); 66 | return false; 67 | } 68 | } 69 | 70 | @SuppressWarnings("unchecked") 71 | @Override 72 | public void process(Row value) { 73 | GenericRowWithSchema genericRowWithSchema = (GenericRowWithSchema) value; 74 | // 数据写入格式如下(中间分割符为\001) 75 | // dbName tableName tableRowkey tableData 76 | // tets test 1111 {"a1":"a1","a2":"a3"} 77 | String[] tableInformation= genericRowWithSchema.get(0).toString().split(Constants.SEPARATOR_001); 78 | 79 | String dbName = tableInformation[0]; 80 | String tableName = tableInformation[1]; 81 | Object tableRowkey = tableInformation[2]; 82 | 83 | Gson gson = new Gson(); 84 | Map tableData = new HashMap(); 85 | tableData = gson.fromJson(tableInformation[3], tableData.getClass()); 86 | 87 | // 调用数据解析器 88 | tableName = dbName + "." + tableName; 89 | 90 | HTableDescriptor table = new HTableDescriptor( 91 | TableName.valueOf(tableName)); 92 | table.addFamily(new HColumnDescriptor(Constants.CF_DEFAULT) 93 | .setCompressionType(Algorithm.NONE)); 94 | 95 | Table tablePut = null; 96 | Admin admin = null; 97 | try { 98 | tablePut = conn.getTable(TableName.valueOf(tableName)); 99 | admin = conn.getAdmin(); 100 | TableName tName = table.getTableName(); 101 | if (!admin.tableExists(tName)) { 102 | try { 103 | admin.createTable(table); 104 | tablePut = conn.getTable(TableName.valueOf(tableName)); 105 | // admin.flush(tName); 106 | } catch (Exception e) { 107 | logger.error("建表失败: ->" + tableName); 108 | } 109 | } 110 | } catch (IOException e1) { 111 | logger.error("获取tablePut或Admin失败: ->" + tableName); 112 | } 113 | 114 | try { 115 | Put put = setDataPut(tableRowkey, tableData); 116 | tablePut.put(put); 117 | } catch (Exception e) { 118 | logger.error("写入数据失败: ->" + tableName + "-" + tableData); 119 | } 120 | 121 | try { 122 | admin.close(); 123 | tablePut.close(); 124 | } catch (IOException e) { 125 | logger.error("关闭tablePut或Admin失败: ->" + tableName); 126 | } 127 | // pool.returnConnection(conn); 128 | 129 | } 130 | 131 | @Override 132 | public void close(Throwable errorOrNull) { 133 | pool.returnConnection(conn); 134 | } 135 | 136 | 137 | public static Configuration getHBaseConfiguration() { 138 | Configuration conf = null; 139 | try { 140 | conf = HBaseConfiguration.create(); 141 | conf.set("hbase.zookeeper.quorum", ConfigurationManager 142 | .getProperty(Constants.ZK_METADATA_BROKER_LIST)); 143 | conf.set("hbase.defaults.for.version.skip", "true"); 144 | 145 | } catch (Exception e) { 146 | logger.error("获取HBaseConfiguration出错,请检查是否有配置文件和ZK是否正常。ZK链接: ->" 147 | + ConfigurationManager 148 | .getProperty(Constants.ZK_METADATA_BROKER_LIST)); 149 | } 150 | return conf; 151 | } 152 | 153 | public static Put setDataPut(Object tableRowkey, 154 | Map tableData) { 155 | Put put = new Put(Bytes.toBytes(tableRowkey.toString())); 156 | for (Entry entry : tableData.entrySet()) { 157 | put.addColumn(Bytes.toBytes(Constants.CF_DEFAULT), 158 | Bytes.toBytes(entry.getKey()), 159 | Bytes.toBytes(entry.getValue().toString())); 160 | } 161 | return put; 162 | } 163 | } -------------------------------------------------------------------------------- /src/main/java/com/structured/pool/tool/ConnectionPoolBase.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015-2016 Dark Phoenixs (Open-Source Organization). 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.structured.pool.tool; 17 | 18 | import org.apache.commons.pool2.PooledObjectFactory; 19 | import org.apache.commons.pool2.impl.GenericObjectPool; 20 | import org.apache.commons.pool2.impl.GenericObjectPoolConfig; 21 | 22 | import java.io.Closeable; 23 | import java.io.Serializable; 24 | 25 | public abstract class ConnectionPoolBase implements Closeable, Serializable { 26 | 27 | /** 28 | * serialVersionUID 29 | */ 30 | private static final long serialVersionUID = 536428799879058482L; 31 | 32 | /** 33 | * internalPool 34 | */ 35 | protected GenericObjectPool internalPool; 36 | 37 | /** 38 | *

Title: ConnectionPoolBase

39 | *

Description: 构造方法

40 | */ 41 | public ConnectionPoolBase() { 42 | } 43 | 44 | /** 45 | *

Title: ConnectionPoolBase

46 | *

Description: 构造方法

47 | * 48 | * @param poolConfig 池配置 49 | * @param factory 池对象工厂 50 | */ 51 | public ConnectionPoolBase(final GenericObjectPoolConfig poolConfig, 52 | PooledObjectFactory factory) { 53 | this.initPool(poolConfig, factory); 54 | } 55 | 56 | /** 57 | *

Title: initPool

58 | *

Description: 初始化对象池

59 | * 60 | * @param poolConfig 池配置 61 | * @param factory 池对象工厂 62 | */ 63 | protected void initPool(final GenericObjectPoolConfig poolConfig, 64 | PooledObjectFactory factory) { 65 | if (this.internalPool != null) 66 | this.destroy(); 67 | 68 | this.internalPool = new GenericObjectPool(factory, poolConfig); 69 | } 70 | 71 | /** 72 | *

Title: destroy

73 | *

Description: 销毁对象池

74 | */ 75 | protected void destroy() { 76 | this.close(); 77 | } 78 | 79 | /** 80 | *

Title: getResource

81 | *

Description: 获得池对象

82 | * 83 | * @return 池对象 84 | */ 85 | protected T getResource() { 86 | try { 87 | return internalPool.borrowObject(); 88 | } catch (Exception e) { 89 | throw new ConnectionException( 90 | "Could not get a resource from the pool", e); 91 | } 92 | } 93 | 94 | /** 95 | *

Title: returnResource

96 | *

Description: 返回池对象

97 | * 98 | * @param resource 池对象 99 | */ 100 | protected void returnResource(final T resource) { 101 | if (null != resource) 102 | try { 103 | internalPool.returnObject(resource); 104 | } catch (Exception e) { 105 | throw new ConnectionException( 106 | "Could not return the resource to the pool", e); 107 | } 108 | } 109 | 110 | /** 111 | *

Title: invalidateResource

112 | *

Description: 废弃池对象

113 | * 114 | * @param resource 池对象 115 | */ 116 | protected void invalidateResource(final T resource) { 117 | if (null != resource) 118 | try { 119 | internalPool.invalidateObject(resource); 120 | } catch (Exception e) { 121 | throw new ConnectionException( 122 | "Could not invalidate the resource to the pool", e); 123 | } 124 | } 125 | 126 | /** 127 | *

Title: getNumActive

128 | *

Description: 获得池激活数

129 | * 130 | * @return 激活数 131 | */ 132 | public int getNumActive() { 133 | if (isInactived()) { 134 | return -1; 135 | } 136 | 137 | return this.internalPool.getNumActive(); 138 | } 139 | 140 | /** 141 | *

Title: getNumIdle

142 | *

Description: 获得池空闲数

143 | * 144 | * @return 空闲数 145 | */ 146 | public int getNumIdle() { 147 | if (isInactived()) { 148 | return -1; 149 | } 150 | 151 | return this.internalPool.getNumIdle(); 152 | } 153 | 154 | /** 155 | *

Title: getNumWaiters

156 | *

Description: 获得池等待数

157 | * 158 | * @return 等待数 159 | */ 160 | public int getNumWaiters() { 161 | if (isInactived()) { 162 | return -1; 163 | } 164 | 165 | return this.internalPool.getNumWaiters(); 166 | } 167 | 168 | /** 169 | *

Title: getMeanBorrowWaitTimeMillis

170 | *

Description: 获得平均等待时间

171 | * 172 | * @return 平均等待时间 173 | */ 174 | public long getMeanBorrowWaitTimeMillis() { 175 | if (isInactived()) { 176 | return -1; 177 | } 178 | 179 | return this.internalPool.getMeanBorrowWaitTimeMillis(); 180 | } 181 | 182 | /** 183 | *

Title: getMaxBorrowWaitTimeMillis

184 | *

Description: 获得最大等待时间

185 | * 186 | * @return 最大等待时间 187 | */ 188 | public long getMaxBorrowWaitTimeMillis() { 189 | if (isInactived()) { 190 | return -1; 191 | } 192 | 193 | return this.internalPool.getMaxBorrowWaitTimeMillis(); 194 | } 195 | 196 | /** 197 | *

Title: isClosed

198 | *

Description: 池是否关闭

199 | * 200 | * @return 是否关闭 201 | */ 202 | public boolean isClosed() { 203 | try { 204 | return this.internalPool.isClosed(); 205 | } catch (Exception e) { 206 | throw new ConnectionException( 207 | "Could not check closed from the pool", e); 208 | } 209 | } 210 | 211 | /** 212 | *

Title: isInactived

213 | *

Description: 池是否失效

214 | * 215 | * @return 是否失效 216 | */ 217 | private boolean isInactived() { 218 | try { 219 | return this.internalPool == null || this.internalPool.isClosed(); 220 | } catch (Exception e) { 221 | throw new ConnectionException( 222 | "Could not check inactived from the pool", e); 223 | } 224 | } 225 | 226 | /** 227 | *

Title: addObjects

228 | *

Description: 添加池对象

229 | * 230 | * @param count 池对象数量 231 | */ 232 | protected void addObjects(final int count) { 233 | try { 234 | for (int i = 0; i < count; i++) { 235 | this.internalPool.addObject(); 236 | } 237 | } catch (Exception e) { 238 | throw new ConnectionException("Error trying to add idle objects", e); 239 | } 240 | } 241 | 242 | /** 243 | *

Title: clear

244 | *

Description: 清除对象池

245 | */ 246 | public void clear() { 247 | try { 248 | this.internalPool.clear(); 249 | } catch (Exception e) { 250 | throw new ConnectionException("Could not clear the pool", e); 251 | } 252 | } 253 | 254 | /** 255 | *

Title: close

256 | *

Description: 关闭对象池

257 | */ 258 | public void close() { 259 | try { 260 | this.internalPool.close(); 261 | } catch (Exception e) { 262 | throw new ConnectionException("Could not destroy the pool", e); 263 | } 264 | } 265 | } 266 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | com.structured 5 | structured-streaming-Kafka2HBase 6 | 0.0.1-SNAPSHOT 7 | 8 | 9 | UTF-8 10 | 2.11.11 11 | 2.11 12 | 1.1.0 13 | 2.6.0 14 | 2.3.0 15 | 16 | 17 | 18 | 19 | 20 | org.apache.hadoop 21 | hadoop-client 22 | ${hadoop.version} 23 | 24 | 25 | com.google.code.gson 26 | gson 27 | 2.8.5 28 | 29 | 30 | 31 | org.apache.hbase 32 | hbase-client 33 | ${hbase.version} 34 | 35 | 36 | io.netty 37 | netty-all 38 | 39 | 40 | 41 | 42 | 43 | org.apache.commons 44 | commons-pool2 45 | 2.4.2 46 | 47 | 48 | 49 | org.apache.spark 50 | spark-core_${scala.compat.version} 51 | ${spark.version} 52 | 53 | 54 | org.apache.spark 55 | spark-sql_${scala.compat.version} 56 | ${spark.version} 57 | 58 | 59 | 60 | 61 | org.apache.spark 62 | spark-sql-kafka-0-10_${scala.compat.version} 63 | ${spark.version} 64 | 65 | 66 | 67 | 68 | 69 | 70 | src/main/java 71 | src/test/java 72 | 73 | 74 | 75 | 76 | org.apache.maven.plugins 77 | maven-surefire-plugin 78 | 2.9 79 | 80 | false 81 | 82 | **/*.java 83 | **/*.scala 84 | 85 | 86 | 87 | 88 | org.apache.maven.plugins 89 | maven-shade-plugin 90 | 2.4 91 | 92 | 93 | package 94 | 95 | shade 96 | 97 | 98 | 99 | 100 | *:* 101 | 102 | META-INF/*.SF 103 | META-INF/*.DSA 104 | META-INF/*.RSA 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 114 | META-INF/spring.handlers 115 | 116 | 118 | META-INF/spring.schemas 119 | 120 | 122 | META-INF/spring.tooling 123 | 124 | 125 | 127 | 128 | com.rttx.App 129 | 20180327 130 | 131 | 132 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | org.codehaus.mojo 142 | exec-maven-plugin 143 | 1.2.1 144 | 145 | 146 | 147 | exec 148 | 149 | 150 | 151 | 152 | java 153 | true 154 | false 155 | compile 156 | com.stars 157 | 158 | 159 | 160 | 161 | org.apache.maven.plugins 162 | maven-compiler-plugin 163 | 164 | 1.8 165 | 1.8 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | zyxrRepository 176 | zyxrRepository 177 | http://maven.zyxr.com/repository/maven-public/ 178 | default 179 | 180 | true 181 | 182 | 183 | true 184 | 185 | 186 | 187 | 188 | aliyun 189 | http://maven.aliyun.com/nexus/content/groups/public/ 190 | 191 | 192 | cloudera 193 | https://repository.cloudera.com/artifactory/cloudera-repos/ 194 | 195 | 196 | jboss 197 | http://repository.jboss.org/nexus/content/groups/public 198 | 199 | 200 | mvnrepository 201 | https://mvnrepository.com/artifact/ 202 | 203 | 204 | 205 | 206 | --------------------------------------------------------------------------------