├── .gitignore ├── LICENSE ├── README.md ├── pom.xml └── src ├── main ├── java │ └── com │ │ └── aliyun │ │ ├── dms │ │ └── subscribe │ │ │ └── clients │ │ │ ├── DBMapper.java │ │ │ ├── DTSConsumerWithDBMapping.java │ │ │ ├── DefaultDistributedDTSConsumer.java │ │ │ ├── DistributedDTSConsumer.java │ │ │ └── UserRecordGeneratorWithDBMapping.java │ │ └── dts │ │ └── subscribe │ │ └── clients │ │ ├── AbstractDTSConsumer.java │ │ ├── ConsumerContext.java │ │ ├── DTSConsumer.java │ │ ├── DefaultDTSConsumer.java │ │ ├── check │ │ ├── CheckManager.java │ │ ├── CheckResult.java │ │ ├── DefaultCheckManager.java │ │ ├── SubscribeChecker.java │ │ ├── SubscribeNetworkChecker.java │ │ └── util │ │ │ ├── NetUtil.java │ │ │ └── NodeCommandClientConfig.java │ │ ├── common │ │ ├── AtomicFileStore.java │ │ ├── BytesUtil.java │ │ ├── Checkpoint.java │ │ ├── FieldEntryHolder.java │ │ ├── GeometryUtil.java │ │ ├── JDKCharsetMapper.java │ │ ├── NullableOptional.java │ │ ├── RecordListener.java │ │ ├── RetryUtil.java │ │ ├── ThreadFactoryWithNamePrefix.java │ │ ├── Time.java │ │ ├── UniqueKeyGenerator.java │ │ ├── UserCommitCallBack.java │ │ ├── Util.java │ │ ├── WorkThread.java │ │ └── function │ │ │ └── SwallowException.java │ │ ├── exception │ │ ├── CriticalException.java │ │ ├── DTSBaseException.java │ │ └── TimestampSeekException.java │ │ ├── formats │ │ ├── avro │ │ │ ├── BinaryGeometry.java │ │ │ ├── BinaryObject.java │ │ │ ├── Character.java │ │ │ ├── DateTime.java │ │ │ ├── Decimal.java │ │ │ ├── DefaultValueDeserializer.java │ │ │ ├── EmptyObject.java │ │ │ ├── Field.java │ │ │ ├── Float.java │ │ │ ├── Integer.java │ │ │ ├── Operation.java │ │ │ ├── Record.java │ │ │ ├── Source.java │ │ │ ├── SourceType.java │ │ │ ├── TextGeometry.java │ │ │ ├── TextObject.java │ │ │ ├── Timestamp.java │ │ │ └── TimestampWithTimeZone.java │ │ └── util │ │ │ └── ObjectNameUtils.java │ │ ├── metastore │ │ ├── AbstractUserMetaStore.java │ │ ├── KafkaMetaStore.java │ │ ├── LocalFileMetaStore.java │ │ ├── MetaStore.java │ │ └── MetaStoreCenter.java │ │ ├── metrics │ │ ├── DTSMetrics.java │ │ └── LogMetricsReporter.java │ │ ├── record │ │ ├── AvroRecordParser.java │ │ ├── DatabaseInfo.java │ │ ├── DefaultRecordSchema.java │ │ ├── DefaultUserRecord.java │ │ ├── ForeignKeyIndexInfo.java │ │ ├── OperationType.java │ │ ├── RecordField.java │ │ ├── RecordIndexInfo.java │ │ ├── RecordSchema.java │ │ ├── RowImage.java │ │ ├── SimplifiedRecordField.java │ │ ├── UserRecord.java │ │ ├── impl │ │ │ └── DefaultRowImage.java │ │ └── value │ │ │ ├── BinaryEncodingObject.java │ │ │ ├── BitValue.java │ │ │ ├── DateTime.java │ │ │ ├── DecimalNumeric.java │ │ │ ├── FloatNumeric.java │ │ │ ├── IntegerNumeric.java │ │ │ ├── NoneValue.java │ │ │ ├── ObjectType.java │ │ │ ├── SpecialNumericType.java │ │ │ ├── StringValue.java │ │ │ ├── TextEncodingObject.java │ │ │ ├── UnixTimestamp.java │ │ │ ├── Value.java │ │ │ ├── ValueType.java │ │ │ ├── WKBGeometry.java │ │ │ └── WKTGeometry.java │ │ ├── recordfetcher │ │ ├── ClusterSwitchListener.java │ │ ├── ConsumerWrap.java │ │ ├── KafkaRecordFetcher.java │ │ ├── Names.java │ │ └── OffsetCommitCallBack.java │ │ ├── recordgenerator │ │ ├── AvroDeserializer.java │ │ └── UserRecordGenerator.java │ │ └── recordprocessor │ │ ├── DbType.java │ │ ├── DefaultRecordPrintListener.java │ │ ├── EtlRecordProcessor.java │ │ └── FieldValue.java └── resources │ └── log4j.properties └── test └── java └── com └── aliyun └── dts └── subscribe └── clients ├── DBMapperTest.java ├── DTSConsumerAssignDemo.java ├── DTSConsumerSubscribeDemo.java ├── DistributedDTSConsumerDemo.java ├── UserMetaStore.java └── record └── value └── DateTimeTest.java /.gitignore: -------------------------------------------------------------------------------- 1 | *.class 2 | target 3 | *.iml 4 | .idea 5 | dts-new-subscribe-sdk.ipr 6 | dts-new-subscribe-sdk.iws 7 | localCheckpointStore* 8 | src/test/java/com/aliyun/dts/subscribe/clients/YanmenConsumerDemo.java 9 | src/test/java/com/aliyun/dts/subscribe/clients/YanmenConsumerDemo2.java 10 | src/test/java/com/aliyun/dts/subscribe/clients/YanmenDistributedDTSConsumerDemo.java 11 | src/test/java/com/aliyun/dts/subscribe/clients/MySQLUserMetaStore.java 12 | dts-new-subscribe.log 13 | .flattened-pom.xml 14 | 15 | /client/ 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 使用方式: 3 | 4 | 1、对于普通用户建议直接依赖打包好的新版订阅sdk使用 5 | 6 | 在程序的pom.xml文件中假如如下的依赖即可使用: 7 | 8 | 9 | com.aliyun.dts 10 | dts-new-subscribe-sdk 11 | {dts_new_sdk_version} 12 | 13 | 14 | 最新的版本请在 https://s01.oss.sonatype.org/#nexus-search;quick~dts-new-subscribe-sdk 查看 15 | 16 | 2、对于Kafka很了解的用户或者有对sdk内部的逻辑有改造需求的用户可以对该源码进行修改 17 | 18 | 配置说明: 19 | 20 | 使用的时候主要逻辑如下,在test目录下也有参考的demo: 21 | 22 | ConsumerContext consumerContext = new ConsumerContext(brokerUrl, topic, sid, userName, password, initCheckpoint, subscribeMode); 23 | 24 | //if this parameter is set, force to use the initCheckpoint to initial 25 | consumerContext.setForceUseCheckpoint(isForceUseInitCheckpoint); 26 | 27 | //add user store 28 | consumerContext.setUserRegisteredStore(new UserMetaStore()); 29 | 30 | DTSConsumer dtsConsumer = new DefaultDTSConsumer(consumerContext); 31 | 32 | dtsConsumer.addRecordListeners(buildRecordListener()); 33 | 34 | dtsConsumer.start(); 35 | 36 | 其中的brokerUrl, topic, sid, userName, password在使用的时候进行替换,对应的信息都是在DTS订阅控制台进行配置。 37 | initCheckpoint是第一次启动DTS新版订阅client使用的起始位点,该位点只对assign和第一次启动的subscribe模式的client有效。 38 | 39 | subscribeMode代表了DTS新版订阅client的两种使用方式: 40 | 1、ConsumerContext.ConsumerSubscribeMode.ASSIGN: 41 | 42 | 对应Kafka client的assign模式,因为DTS为了保证消息的全局有序,每个topic只有一个partition,这种模式直接分配固定的partition 0,建议只启动一个client。 43 | 44 | 2、ConsumerContext.ConsumerSubscribeMode.SUBSCRIBE: 45 | 对应Kafka client的subscribe模式,因为DTS为了保证消息的全局有序,每个topic只有一个partition,所以对于配置了同一个sid的不同SDK客户端只会有一个客户端能够分配到partition 0并 接收到消息。启动多个SDK client可以起到容灾的效果,也就是一个SDK client失败了,另外一个SDK client可以自动的分配到partition 0并继续消费。 46 | 47 | 消息位点的管理: 48 | 49 | SDK的client需要知道在第一次启动、内部重试、重启SDK client等不同的场景下如何管理消费位点来确保数据不丢失,并且尽量的不重复。 50 | 51 | a.位点的保存: 订阅SDK默认5s会保存一次位点到本地本地localCheckpointStore文件,并提交一次位点到DTS server端,如果用户在模式配置了 52 | consumerContext.setUserRegisteredStore(new UserMetaStore())使用自己外部的持久化共享存储介质(比如数据库)来保存位点的话,也会5s保存一次该位点到用户配置的存储方式。 53 | 54 | b.位点的使用,找到即返回,具体分为如下场景讨论: 55 | 56 | 1、第一次启动SDK client: 57 | 这种场景无论ASSIGN还是SUBSCRIBE模式都会使用传入的checkpoint去DTS server端查找对应的位点,并开始消费 58 | 59 | 2、SDK内部对于比如DTS server端的DStore发生容灾切换或者其他可重试的一些错误去重新连接DTS server端: 60 | 1)ASSIGN: 61 | 对于该模式,不管有没有配置consumerContext.setForceUseCheckpoint(true | false) 62 | client使用的位点查找顺序如下:用户配置的外部存储的位点--->本地localCheckpointStore文件--->使用传入的intinal timestamp 63 | 64 | 2)SUBSCRIBE: 65 | 对于该模式,client使用的位点查找顺序如下: 66 | 用户配置的外部存储的位点--->DTS server(DStore)保存的位点--->使用传入的intinal timestamp--->使用DTS server(新建DStore)的起始位点 67 | 68 | 3、SDK client进程重启: 69 | 1)ASSIGN: 70 | 如果配置了consumerContext.setForceUseCheckpoint(true)的话,每次重启都会强制使用传入的initCheckpoint作为位点 71 | 72 | 如果consumerContext.setForceUseCheckpoint(false)或者没设置的话使用的位点查找顺序如下: 73 | 本地localCheckpointStore文件--->DTS server(DStore)保存的位点--->用户配置的外部存储的位点 74 | 2)SUBSCRIBE: 75 | 该模式consumerContext.setForceUseCheckpoint无效,位点的查找顺序为: 76 | 用户配置的外部存储的位点--->DTS server(DStore)保存的位点--->使用传入的intinal timestamp--->使用DTS server(新建DStore)的起始位点 77 | 78 | 统计信息: 79 | 80 | SDK内部对从 DTS server(DStore)接收到的数据总数、接收rps、用户消费的数据量、消费rps,SDK内部缓存的数据量都有一个统计,例如: 81 | {"outCounts":6073728.0,"outBytes":2473651076,"outRps":18125.37,"outBps":7379638.54,"count":11.0,"inBytes":2.4751382E+9,"DStoreRecordQueue":0.0,"inCounts":6082097.0,"inRps":18112.68,"inBps":7371325.86,"__dt":1611808055414,"DefaultUserRecordQueue":0.0} 82 | 83 | 说明如下: 84 | DStoreRecordQueue:从 DTS server(DStore)接收到的数的缓存队列目前的大小 85 | DefaultUserRecordQueue:序列化之后的数据的缓存队列目前的大小 86 | inCounts:从 DTS server(DStore)接收到的数据总数 87 | inRps:从 DTS server(DStore)接收到的数据Rps 88 | inBps:从 DTS server(DStore)接收到的数据Bps 89 | outCounts:client消费的数据总数 90 | outRps:client消费的Rps 91 | outBps:client消费的Bps 92 | 93 | 问题排查: 94 | 95 | 1、连接不上---DProxy地址填写错误 96 | 比如brokerUrl填写错误,写成了"dts-cn-hangzhou.aliyuncs.com:18009" 97 | 98 | client报错信息如下: 99 | ERROR CheckResult{isOk=false, errMsg='telnet dts-cn-hangzhou.aliyuncs.com:18009 failed, please check the network and if the brokerUrl is correct'} (com.aliyun.dts.subscribe.clients.DefaultDTSConsumer) 100 | 101 | 2、连接不上---和真实的broker地址不通 102 | 103 | client报错信息如下: 104 | telnet real node xxx failed, please check the network 105 | 106 | 3、连接不上---用户名密码填写错误 107 | 108 | client报错信息如下: 109 | ERROR CheckResult{isOk=false, errMsg='build kafka consumer failed, error: org.apache.kafka.common.errors.TimeoutException: Timeout expired while fetching topic metadata, probably the user name or password is wrong'} (com.aliyun.dts.subscribe.clients.DefaultDTSConsumer) 110 | 111 | 4、连接不上---用户的位点不在范围 112 | 比如配置了强制使用传入的位点: 113 | consumerContext.setUseCheckpoint(true); 114 | 并且: 115 | 把checkpoint = "1609725891"(2020-12-01 10:04:51,比dstore范围早) 116 | 或者比如把checkpoint = "1610249501"2021-01-10 11:31:41,比dstore范围晚) 117 | 118 | client报错信息如下: 119 | com.aliyun.dts.subscribe.clients.exception.TimestampSeekException: RecordGenerator:seek timestamp for topic [cn_hangzhou_rm_bp11tv2923n87081s_rdsdt_dtsacct-0] with timestamp [1610249501] failed 120 | 121 | 122 | 5、判断是从DTS server(DStore)拉取数据慢了,还是client消费数据慢了: 123 | 124 | 查看输出的统计信息,主要关注DStoreRecordQueue和DefaultUserRecordQueue队列的大小, 125 | 如果队列的大小一直是0,那么说明DTS server(DStore)拉取数据慢了,如果队列的大小一直是默认大小512说明是消费慢了 126 | 127 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dms/subscribe/clients/DBMapper.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dms.subscribe.clients; 2 | 3 | import com.alibaba.fastjson.JSONObject; 4 | import com.aliyun.dts.subscribe.clients.common.RetryUtil; 5 | import com.aliyun.dts.subscribe.clients.formats.avro.Operation; 6 | import com.aliyun.dts.subscribe.clients.formats.avro.Record; 7 | import com.aliyuncs.IAcsClient; 8 | import com.aliyuncs.dts.model.v20200101.DescribeSubscriptionMetaRequest; 9 | import com.aliyuncs.dts.model.v20200101.DescribeSubscriptionMetaResponse; 10 | import com.aliyuncs.exceptions.ClientException; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | 14 | import java.util.ArrayList; 15 | import java.util.HashMap; 16 | import java.util.List; 17 | import java.util.Map; 18 | import java.util.concurrent.TimeUnit; 19 | 20 | public class DBMapper { 21 | private final Logger log = LoggerFactory.getLogger(DBMapper.class); 22 | 23 | private Map physic2logicDBMapper = new HashMap<>(); 24 | private Map physic2logicTableMapper = new HashMap<>(); 25 | private boolean mapping = true; 26 | 27 | private IAcsClient iAcsClient; 28 | private DescribeSubscriptionMetaRequest describeSubscriptionMetaRequest; 29 | 30 | private RetryUtil retryUtil = new RetryUtil(4, TimeUnit.SECONDS, 15, (e) -> true); 31 | 32 | public void setClient(IAcsClient client) { 33 | iAcsClient = client; 34 | } 35 | 36 | public void setDescribeSubscriptionMetaRequest(DescribeSubscriptionMetaRequest describeSubscriptionMetaRequest) { 37 | this.describeSubscriptionMetaRequest = describeSubscriptionMetaRequest; 38 | } 39 | 40 | public synchronized void init(String dbListString) { 41 | JSONObject dbList = JSONObject.parseObject(dbListString); 42 | for (Map.Entry entry: dbList.entrySet()) { 43 | String physicDb = entry.getKey(); 44 | String logicDb = (String)((JSONObject)entry.getValue()).get("name"); 45 | JSONObject tables = (JSONObject)((JSONObject)entry.getValue()).get("Table"); 46 | 47 | physic2logicDBMapper.put(physicDb, logicDb); 48 | for (Map.Entry table: tables.entrySet()) { 49 | String physicTable = table.getKey(); 50 | String logicTable = (String)((JSONObject)table.getValue()).get("name"); 51 | physic2logicTableMapper.put(physicDb + "." + physicTable, logicDb + "." + logicTable); 52 | } 53 | } 54 | } 55 | 56 | public void init(List dbLists) { 57 | for (String dbList: dbLists) { 58 | init(dbList); 59 | } 60 | } 61 | 62 | public boolean refreshDbList() throws ClientException { 63 | List dbLists = new ArrayList<>(); 64 | DescribeSubscriptionMetaResponse res = iAcsClient.getAcsResponse(this.describeSubscriptionMetaRequest); 65 | boolean success = res.getSuccess().equalsIgnoreCase("true"); 66 | if (success) { 67 | for (DescribeSubscriptionMetaResponse.SubscriptionMetaListItem meta: (res).getSubscriptionMetaList()) { 68 | dbLists.add(meta.getDBList()); 69 | log.debug("refresh dbList:" + meta.getDBList()); 70 | } 71 | init(dbLists); 72 | } 73 | return success; 74 | 75 | } 76 | 77 | public Record transform(Record record) { 78 | // do not support ddl for now 79 | // if (record.getOperation().equals(Operation.DDL)) { 80 | // if (physic2logicDBMapper.containsKey(record.getObjectName())) { 81 | // record.setObjectName(physic2logicDBMapper.get(record.getObjectName())); 82 | // } 83 | // } 84 | 85 | if (record.getOperation().equals(Operation.INSERT) || record.getOperation().equals(Operation.UPDATE) || 86 | record.getOperation().equals(Operation.DELETE)) { 87 | if (!physic2logicTableMapper.containsKey(record.getObjectName())) { 88 | log.info("Cannot find logic db table for " + record.getObjectName() + ", refreshing dbList now"); 89 | try { 90 | retryUtil.callFunctionWithRetry( 91 | () -> { 92 | refreshDbList(); 93 | } 94 | ); 95 | } catch (Exception e) { 96 | log.error("Error getting dbList:" + e); 97 | } 98 | } 99 | record.setObjectName(physic2logicTableMapper.get(record.getObjectName())); 100 | } 101 | return record; 102 | } 103 | 104 | public boolean isMapping() { 105 | return mapping; 106 | } 107 | 108 | public void setMapping(boolean mapping) { 109 | this.mapping = mapping; 110 | } 111 | 112 | } 113 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dms/subscribe/clients/DTSConsumerWithDBMapping.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dms.subscribe.clients; 2 | 3 | import com.aliyun.dts.subscribe.clients.AbstractDTSConsumer; 4 | import com.aliyun.dts.subscribe.clients.ConsumerContext; 5 | import com.aliyun.dts.subscribe.clients.common.Checkpoint; 6 | import com.aliyun.dts.subscribe.clients.common.WorkThread; 7 | import com.aliyun.dts.subscribe.clients.exception.CriticalException; 8 | import com.aliyun.dts.subscribe.clients.recordfetcher.KafkaRecordFetcher; 9 | import com.aliyun.dts.subscribe.clients.recordgenerator.UserRecordGenerator; 10 | import com.aliyun.dts.subscribe.clients.recordprocessor.EtlRecordProcessor; 11 | 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import java.util.LinkedList; 16 | import java.util.List; 17 | 18 | 19 | import static com.aliyun.dts.subscribe.clients.common.Util.sleepMS; 20 | 21 | public class DTSConsumerWithDBMapping extends AbstractDTSConsumer { 22 | private static final Logger log = LoggerFactory.getLogger(DTSConsumerWithDBMapping.class); 23 | 24 | public DTSConsumerWithDBMapping(ConsumerContext consumerContext) { 25 | super(consumerContext); 26 | } 27 | 28 | @Override 29 | public void start() { 30 | 31 | //check firstly 32 | boolean checkResult = check(); 33 | 34 | if (!checkResult) { 35 | log.error("DTS precheck failed, dts consumer exit."); 36 | throw new CriticalException("DTS precheck failed, dts consumer exit."); 37 | } 38 | 39 | synchronized (this) { 40 | initLog4j(); 41 | if (started) { 42 | throw new IllegalStateException("The client has already been started"); 43 | } 44 | 45 | KafkaRecordFetcher recordFetcher = new KafkaRecordFetcher(consumerContext, toProcessRecords); 46 | 47 | UserRecordGeneratorWithDBMapping userRecordGenerator = new UserRecordGeneratorWithDBMapping(consumerContext, toProcessRecords, 48 | defaultUserRecords, 49 | (tp, timestamp, offset, metadata) -> recordFetcher.setToCommitCheckpoint(new Checkpoint(tp, timestamp, offset, metadata))); 50 | 51 | //processor 52 | EtlRecordProcessor etlRecordProcessor = new EtlRecordProcessor(consumerContext, defaultUserRecords, recordListeners); 53 | 54 | List startStream = startWorker(etlRecordProcessor, userRecordGenerator, recordFetcher); 55 | 56 | while (!consumerContext.isExited()) { 57 | sleepMS(1000); 58 | } 59 | log.info("DTS Consumer: shutting down..."); 60 | for (WorkThread workThread : startStream) { 61 | workThread.stop(); 62 | } 63 | 64 | started = true; 65 | } 66 | } 67 | 68 | private static List startWorker(EtlRecordProcessor etlRecordProcessor, UserRecordGeneratorWithDBMapping userRecordGenerator, KafkaRecordFetcher recordGenerator) { 69 | List ret = new LinkedList<>(); 70 | ret.add(new WorkThread(etlRecordProcessor, EtlRecordProcessor.class.getName())); 71 | ret.add(new WorkThread(userRecordGenerator, UserRecordGenerator.class.getName())); 72 | ret.add(new WorkThread(recordGenerator, KafkaRecordFetcher.class.getName())); 73 | for (WorkThread workThread : ret) { 74 | workThread.start(); 75 | } 76 | return ret; 77 | } 78 | } 79 | 80 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dms/subscribe/clients/DefaultDistributedDTSConsumer.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dms.subscribe.clients; 2 | 3 | import com.aliyun.dts.subscribe.clients.ConsumerContext; 4 | import com.aliyun.dts.subscribe.clients.DTSConsumer; 5 | import com.aliyun.dts.subscribe.clients.common.Checkpoint; 6 | import com.aliyun.dts.subscribe.clients.common.RecordListener; 7 | import com.aliyun.dts.subscribe.clients.metastore.MetaStore; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.util.ArrayList; 12 | import java.util.List; 13 | import java.util.Map; 14 | 15 | import java.util.Properties; 16 | import java.util.concurrent.LinkedBlockingQueue; 17 | import java.util.concurrent.ThreadPoolExecutor; 18 | import java.util.concurrent.TimeUnit; 19 | 20 | public class DefaultDistributedDTSConsumer implements DistributedDTSConsumer { 21 | private static final Logger LOG = LoggerFactory.getLogger(DefaultDistributedDTSConsumer.class); 22 | private List dtsConsumers = new ArrayList<>(); 23 | 24 | 25 | private int corePoolSize = 8; 26 | private int maximumPoolSize = 64; 27 | private ThreadPoolExecutor executor; 28 | private volatile boolean isClosePoolExecutor = false; 29 | 30 | public DefaultDistributedDTSConsumer() {} 31 | 32 | public void addDTSConsumer(DTSConsumer consumer) { 33 | dtsConsumers.add(consumer); 34 | } 35 | 36 | public void init(Map topic2checkpoint, DBMapper dbMapper, String dProxy, Map topic2Sid, String username, String password, 37 | ConsumerContext.ConsumerSubscribeMode subscribeMode, boolean isForceUseInitCheckpoint, 38 | MetaStore userRegisteredStore, Map recordListeners) { 39 | 40 | this.init(topic2checkpoint, dbMapper, dProxy, topic2Sid, username, password, subscribeMode, 41 | isForceUseInitCheckpoint, userRegisteredStore, recordListeners, new Properties()); 42 | } 43 | 44 | public void init(Map topic2checkpoint, DBMapper dbMapper, String dProxy, Map topic2Sid, String username, String password, 45 | ConsumerContext.ConsumerSubscribeMode subscribeMode, boolean isForceUseInitCheckpoint, 46 | MetaStore userRegisteredStore, Map recordListeners, Properties properties) { 47 | 48 | this.executor = new ThreadPoolExecutor(corePoolSize, maximumPoolSize, 1000 * 60, 49 | TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>()); 50 | 51 | for (Map.Entry topicCheckpoint: topic2checkpoint.entrySet()) { 52 | 53 | ConsumerContext consumerContext = new ConsumerContext(dbMapper, dProxy, topicCheckpoint.getKey(), topic2Sid.get(topicCheckpoint.getKey()), username, password, 54 | topicCheckpoint.getValue(), subscribeMode, properties); 55 | consumerContext.setUserRegisteredStore(userRegisteredStore); 56 | consumerContext.setForceUseCheckpoint(isForceUseInitCheckpoint); 57 | 58 | DTSConsumer dtsConsumer = new DTSConsumerWithDBMapping(consumerContext); 59 | dtsConsumer.addRecordListeners(recordListeners); 60 | 61 | addDTSConsumer(dtsConsumer); 62 | } 63 | } 64 | @Override 65 | public void start() { 66 | for (DTSConsumer consumer: dtsConsumers) { 67 | try { 68 | executor.submit(consumer::start); 69 | } catch (Exception e) { 70 | LOG.error("error starting consumer:" + e); 71 | shutdownGracefully(10, TimeUnit.SECONDS); 72 | } 73 | } 74 | } 75 | 76 | public void shutdownGracefully(long timeout, TimeUnit timeUnit) { 77 | executor.shutdown(); 78 | 79 | try { 80 | if (!executor.awaitTermination(timeout, timeUnit)) { 81 | executor.shutdownNow(); 82 | 83 | } 84 | } catch (InterruptedException e) { 85 | executor.shutdownNow(); 86 | Thread.currentThread().interrupt(); 87 | } finally { 88 | isClosePoolExecutor = true; 89 | } 90 | } 91 | 92 | @Override 93 | public void addRecordListeners(Map recordListeners) { 94 | for (DTSConsumer dtsConsumer: dtsConsumers) { 95 | dtsConsumer.addRecordListeners(recordListeners); 96 | } 97 | } 98 | 99 | } 100 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dms/subscribe/clients/DistributedDTSConsumer.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dms.subscribe.clients; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.RecordListener; 4 | import java.util.Map; 5 | 6 | public interface DistributedDTSConsumer { 7 | 8 | 9 | void start(); 10 | 11 | void addRecordListeners(Map recordListeners); 12 | 13 | } 14 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dms/subscribe/clients/UserRecordGeneratorWithDBMapping.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dms.subscribe.clients; 2 | 3 | import com.aliyun.dts.subscribe.clients.ConsumerContext; 4 | import com.aliyun.dts.subscribe.clients.common.Checkpoint; 5 | import com.aliyun.dts.subscribe.clients.formats.avro.Record; 6 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord; 7 | import com.aliyun.dts.subscribe.clients.recordfetcher.OffsetCommitCallBack; 8 | import com.aliyun.dts.subscribe.clients.recordgenerator.UserRecordGenerator; 9 | import org.apache.kafka.clients.consumer.ConsumerRecord; 10 | import org.apache.kafka.common.TopicPartition; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | 14 | 15 | import java.util.concurrent.LinkedBlockingQueue; 16 | import java.util.concurrent.TimeUnit; 17 | 18 | import static com.aliyun.dts.subscribe.clients.common.Util.sleepMS; 19 | 20 | /** 21 | * This class is to resolve avro record deserialize from bytes to UserRecord 22 | */ 23 | public class UserRecordGeneratorWithDBMapping extends UserRecordGenerator { 24 | private static final Logger log = LoggerFactory.getLogger(UserRecordGeneratorWithDBMapping.class); 25 | 26 | public UserRecordGeneratorWithDBMapping(ConsumerContext consumerContext, LinkedBlockingQueue toProcessRecord, 27 | LinkedBlockingQueue processedRecord, 28 | OffsetCommitCallBack offsetCommitCallBack) { 29 | super(consumerContext, toProcessRecord, processedRecord, offsetCommitCallBack); 30 | } 31 | 32 | @Override 33 | public void run() { 34 | while (!consumerContext.isExited()) { 35 | ConsumerRecord toProcess = null; 36 | Record record = null; 37 | int fetchFailedCount = 0; 38 | try { 39 | while (null == (toProcess = toProcessRecord.peek()) && !consumerContext.isExited()) { 40 | sleepMS(5); 41 | fetchFailedCount++; 42 | if (fetchFailedCount % 1000 == 0 && consumerContext.hasValidTopicPartitions()) { 43 | log.info("UserRecordGenerator: haven't receive records from generator for 5s"); 44 | } 45 | } 46 | if (consumerContext.isExited()) { 47 | return; 48 | } 49 | final ConsumerRecord consumerRecord = toProcess; 50 | consumerRecord.timestamp(); 51 | record = fastDeserializer.deserialize(consumerRecord.value()); 52 | log.debug("UserRecordGenerator: meet [{}] record type", record.getOperation()); 53 | 54 | if (consumerContext.getDbMapper() != null && consumerContext.getDbMapper().isMapping()) { 55 | record = consumerContext.getDbMapper().transform(record); 56 | } 57 | DefaultUserRecord defaultUserRecord = new DefaultUserRecord(new TopicPartition(consumerRecord.topic(), consumerRecord.partition()), consumerRecord.offset(), 58 | record, 59 | (tp, commitRecord, offset, metadata) -> { 60 | recordStoreOutCountSensor.record(1); 61 | recordStoreOutByteSensor.record(consumerRecord.value().length); 62 | commitCheckpoint = new Checkpoint(tp, commitRecord.getSourceTimestamp(), offset, metadata); 63 | commit(); 64 | }); 65 | 66 | int offerTryCount = 0; 67 | 68 | while (!offerRecord(1000, TimeUnit.MILLISECONDS, defaultUserRecord) && !consumerContext.isExited()) { 69 | if (++offerTryCount % 10 == 0) { 70 | log.info("UserRecordGenerator: offer user record has failed for a period (10s) [ " + record + "]"); 71 | } 72 | } 73 | 74 | toProcessRecord.poll(); 75 | } catch (Exception e) { 76 | log.error("UserRecordGenerator: process record failed, raw consumer record [" + toProcess + "], parsed record [" + record + "], cause " + e.getMessage(), e); 77 | consumerContext.exit(); 78 | } 79 | } 80 | } 81 | 82 | // user define how to commit 83 | private void commit() { 84 | if (null != offsetCommitCallBack) { 85 | if (commitCheckpoint.getTopicPartition() != null && commitCheckpoint.getOffset() != -1) { 86 | offsetCommitCallBack.commit(commitCheckpoint.getTopicPartition(), commitCheckpoint.getTimeStamp(), 87 | commitCheckpoint.getOffset(), commitCheckpoint.getInfo()); 88 | } 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/AbstractDTSConsumer.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients; 2 | 3 | import com.aliyun.dts.subscribe.clients.check.CheckManager; 4 | import com.aliyun.dts.subscribe.clients.check.CheckResult; 5 | import com.aliyun.dts.subscribe.clients.check.DefaultCheckManager; 6 | import com.aliyun.dts.subscribe.clients.check.SubscribeNetworkChecker; 7 | import com.aliyun.dts.subscribe.clients.common.RecordListener; 8 | import com.aliyun.dts.subscribe.clients.common.WorkThread; 9 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord; 10 | import com.aliyun.dts.subscribe.clients.recordfetcher.KafkaRecordFetcher; 11 | import com.aliyun.dts.subscribe.clients.recordgenerator.UserRecordGenerator; 12 | import com.aliyun.dts.subscribe.clients.recordprocessor.EtlRecordProcessor; 13 | import org.apache.kafka.clients.consumer.ConsumerRecord; 14 | import org.apache.log4j.PropertyConfigurator; 15 | import org.slf4j.Logger; 16 | import org.slf4j.LoggerFactory; 17 | 18 | import java.io.InputStream; 19 | import java.util.LinkedList; 20 | import java.util.List; 21 | import java.util.Map; 22 | import java.util.Properties; 23 | import java.util.concurrent.LinkedBlockingQueue; 24 | 25 | import static com.aliyun.dts.subscribe.clients.common.Util.require; 26 | import static com.aliyun.dts.subscribe.clients.common.Util.swallowErrorClose; 27 | 28 | public abstract class AbstractDTSConsumer implements DTSConsumer { 29 | private static final Logger log = LoggerFactory.getLogger(AbstractDTSConsumer.class); 30 | 31 | protected ConsumerContext consumerContext; 32 | 33 | protected Map recordListeners; 34 | 35 | protected final LinkedBlockingQueue toProcessRecords; 36 | protected final LinkedBlockingQueue defaultUserRecords; 37 | 38 | protected volatile boolean started = false;; 39 | 40 | public AbstractDTSConsumer(ConsumerContext consumerContext) { 41 | this.consumerContext = consumerContext; 42 | 43 | this.toProcessRecords = new LinkedBlockingQueue<>(512); 44 | this.defaultUserRecords = new LinkedBlockingQueue<>(512); 45 | } 46 | 47 | @Override 48 | public abstract void start(); 49 | 50 | private static List startWorker(EtlRecordProcessor etlRecordProcessor, UserRecordGenerator userRecordGenerator, KafkaRecordFetcher recordGenerator) { 51 | List ret = new LinkedList<>(); 52 | ret.add(new WorkThread(etlRecordProcessor, EtlRecordProcessor.class.getName())); 53 | ret.add(new WorkThread(userRecordGenerator, UserRecordGenerator.class.getName())); 54 | ret.add(new WorkThread(recordGenerator, KafkaRecordFetcher.class.getName())); 55 | for (WorkThread workThread : ret) { 56 | workThread.start(); 57 | } 58 | return ret; 59 | } 60 | 61 | @Override 62 | public void addRecordListeners(Map recordListeners) { 63 | require(null != recordListeners && !recordListeners.isEmpty(), "record listener required"); 64 | 65 | recordListeners.forEach((k, v) -> { 66 | log.info("register record listener " + k); 67 | }); 68 | 69 | this.recordListeners = recordListeners; 70 | } 71 | 72 | @Override 73 | public boolean check() { 74 | CheckManager checkerManager = new DefaultCheckManager(consumerContext); 75 | 76 | checkerManager.addCheckItem(new SubscribeNetworkChecker(consumerContext.getBrokerUrl())); 77 | 78 | CheckResult checkResult = checkerManager.check(); 79 | 80 | if (checkResult.isOk()) { 81 | log.info(checkResult.toString()); 82 | return true; 83 | } else { 84 | log.error(checkResult.toString()); 85 | return false; 86 | } 87 | } 88 | 89 | protected static Properties initLog4j() { 90 | Properties properties = new Properties(); 91 | InputStream log4jInput = null; 92 | try { 93 | log4jInput = Thread.currentThread().getContextClassLoader().getResourceAsStream("log4j.properties"); 94 | PropertyConfigurator.configure(log4jInput); 95 | } catch (Exception e) { 96 | } finally { 97 | swallowErrorClose(log4jInput); 98 | } 99 | return properties; 100 | } 101 | 102 | @Override 103 | public void close() { 104 | this.consumerContext.exit(); 105 | 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/ConsumerContext.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients; 2 | 3 | import com.aliyun.dms.subscribe.clients.DBMapper; 4 | import com.aliyun.dts.subscribe.clients.common.Checkpoint; 5 | import com.aliyun.dts.subscribe.clients.common.Util; 6 | import com.aliyun.dts.subscribe.clients.metastore.MetaStore; 7 | import com.aliyun.dts.subscribe.clients.metrics.DTSMetrics; 8 | import org.apache.kafka.common.TopicPartition; 9 | 10 | import java.util.Collection; 11 | import java.util.Properties; 12 | import java.util.concurrent.atomic.AtomicBoolean; 13 | 14 | import static com.aliyun.dts.subscribe.clients.recordfetcher.Names.*; 15 | 16 | public class ConsumerContext { 17 | private Properties properties; 18 | 19 | private DBMapper dbMapper; 20 | private String brokerUrl; 21 | private String topic; 22 | private String sid; 23 | private String user; 24 | private String password; 25 | 26 | private String initialCheckpoint; 27 | 28 | private boolean isForceUseCheckpoint; 29 | 30 | private ConsumerContext.ConsumerSubscribeMode subscribeMode; 31 | 32 | private Collection topicPartitions; 33 | 34 | private MetaStore userRegisteredStore; 35 | 36 | private long checkpointCommitInterval = 5000; 37 | 38 | private DTSMetrics dtsMetrics; 39 | 40 | private AtomicBoolean exited = new AtomicBoolean(false); 41 | 42 | private boolean useLocalCheckpointStore = true; 43 | 44 | private boolean isCheckpointNotExistThrowException; 45 | 46 | public ConsumerContext(String brokerUrl, String topic, String sid, String userName, String password, 47 | String initialCheckpoint, ConsumerContext.ConsumerSubscribeMode subscribeMode) { 48 | this(null, brokerUrl, topic, sid, userName, password, initialCheckpoint, subscribeMode, new Properties()); 49 | } 50 | 51 | public ConsumerContext(DBMapper dbMapper, String brokerUrl, String topic, String sid, String userName, String password, 52 | String initialCheckpoint, ConsumerContext.ConsumerSubscribeMode subscribeMode, Properties properties) { 53 | this(dbMapper, brokerUrl, topic, sid, userName, password, initialCheckpoint, subscribeMode, properties, false); 54 | } 55 | 56 | public ConsumerContext(DBMapper dbMapper, String brokerUrl, String topic, String sid, String userName, String password, 57 | String initialCheckpoint, ConsumerContext.ConsumerSubscribeMode subscribeMode, Properties properties, 58 | boolean isCheckpointNotExistThrowException) { 59 | this.properties = properties; 60 | this.dbMapper = dbMapper; 61 | this.brokerUrl = brokerUrl; 62 | this.topic = topic; 63 | this.sid = sid; 64 | this.user = userName; 65 | this.password = password; 66 | this.initialCheckpoint = initialCheckpoint; 67 | this.subscribeMode = subscribeMode; 68 | this.dtsMetrics = new DTSMetrics(); 69 | this.useLocalCheckpointStore = true; 70 | this.isCheckpointNotExistThrowException = isCheckpointNotExistThrowException; 71 | } 72 | 73 | public DBMapper getDbMapper() { 74 | if (this.dbMapper == null) { 75 | this.dbMapper = new DBMapper(); 76 | this.dbMapper.setMapping(false); 77 | } 78 | return this.dbMapper; 79 | } 80 | 81 | public boolean isExited() { 82 | return this.exited.get(); 83 | } 84 | 85 | public synchronized void exit() { 86 | dtsMetrics.close(); 87 | this.exited.set(true); 88 | } 89 | 90 | public String getBrokerUrl() { 91 | return brokerUrl; 92 | } 93 | 94 | public void setBrokerUrl(String brokerUrl) { 95 | this.brokerUrl = brokerUrl; 96 | } 97 | 98 | public String getTopic() { 99 | return topic; 100 | } 101 | 102 | public void setTopic(String topic) { 103 | this.topic = topic; 104 | } 105 | 106 | public String getSid() { 107 | return sid; 108 | } 109 | 110 | public void setSid(String sid) { 111 | this.sid = sid; 112 | } 113 | 114 | public String getUser() { 115 | return user; 116 | } 117 | 118 | public void setUser(String user) { 119 | this.user = user; 120 | } 121 | 122 | public String getPassword() { 123 | return password; 124 | } 125 | 126 | public void setPassword(String password) { 127 | this.password = password; 128 | } 129 | 130 | public Checkpoint getInitialCheckpoint() { 131 | return Util.parseCheckpoint(initialCheckpoint); 132 | } 133 | 134 | public void setInitialCheckpoint(String initialCheckpoint) { 135 | this.initialCheckpoint = initialCheckpoint; 136 | } 137 | 138 | public boolean isForceUseCheckpoint() { 139 | return isForceUseCheckpoint; 140 | } 141 | 142 | public void setForceUseCheckpoint(boolean isForceUseCheckpoint) { 143 | this.isForceUseCheckpoint = isForceUseCheckpoint; 144 | } 145 | 146 | public ConsumerSubscribeMode getSubscribeMode() { 147 | return this.subscribeMode; 148 | } 149 | 150 | public void setSubscribeMode(ConsumerContext.ConsumerSubscribeMode subscribeMode) { 151 | this.subscribeMode = subscribeMode; 152 | } 153 | 154 | public Collection getTopicPartitions() { 155 | return topicPartitions; 156 | } 157 | 158 | public void setTopicPartitions(Collection topicPartitions) { 159 | this.topicPartitions = topicPartitions; 160 | } 161 | 162 | public boolean hasValidTopicPartitions() { 163 | return topicPartitions != null && topicPartitions.size() > 0; 164 | } 165 | 166 | public MetaStore getUserRegisteredStore() { 167 | return userRegisteredStore; 168 | } 169 | 170 | public void setUserRegisteredStore(MetaStore userRegisteredStore) { 171 | this.userRegisteredStore = userRegisteredStore; 172 | } 173 | 174 | public Properties getKafkaProperties() { 175 | properties.setProperty(USER_NAME, this.user); 176 | properties.setProperty(PASSWORD_NAME, this.password); 177 | properties.setProperty(SID_NAME, this.sid); 178 | properties.setProperty(GROUP_NAME, this.sid); 179 | properties.setProperty(KAFKA_TOPIC, this.topic); 180 | properties.setProperty(KAFKA_BROKER_URL_NAME, this.brokerUrl); 181 | 182 | return properties; 183 | } 184 | 185 | public void setProperty(String key, String value) { 186 | this.properties.setProperty(key, value); 187 | } 188 | 189 | public String getGroupID() { 190 | return this.sid; 191 | } 192 | 193 | public long getCheckpointCommitInterval() { 194 | return checkpointCommitInterval; 195 | } 196 | 197 | public void setCheckpointCommitInterval(long checkpointCommitInterval) { 198 | this.checkpointCommitInterval = checkpointCommitInterval; 199 | } 200 | 201 | public DTSMetrics getDtsMetrics() { 202 | return dtsMetrics; 203 | } 204 | 205 | public boolean isUseLocalCheckpointStore() { 206 | return useLocalCheckpointStore; 207 | } 208 | 209 | public void setUseLocalCheckpointStore(boolean useLocalCheckpointStore) { 210 | this.useLocalCheckpointStore = useLocalCheckpointStore; 211 | } 212 | 213 | public boolean isCheckpointNotExistThrowException() { 214 | return isCheckpointNotExistThrowException; 215 | } 216 | 217 | public enum ConsumerSubscribeMode { 218 | ASSIGN, 219 | SUBSCRIBE, 220 | UNKNOWN; 221 | } 222 | } 223 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/DTSConsumer.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.RecordListener; 4 | 5 | import java.util.Map; 6 | 7 | public interface DTSConsumer { 8 | void start(); 9 | 10 | void addRecordListeners(Map recordListeners); 11 | 12 | boolean check(); 13 | 14 | void close(); 15 | } 16 | 17 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/DefaultDTSConsumer.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.Checkpoint; 4 | import com.aliyun.dts.subscribe.clients.exception.CriticalException; 5 | import com.aliyun.dts.subscribe.clients.common.WorkThread; 6 | import com.aliyun.dts.subscribe.clients.recordfetcher.KafkaRecordFetcher; 7 | import com.aliyun.dts.subscribe.clients.recordgenerator.UserRecordGenerator; 8 | import com.aliyun.dts.subscribe.clients.recordprocessor.EtlRecordProcessor; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | 12 | import static com.aliyun.dts.subscribe.clients.common.Util.*; 13 | 14 | import java.util.LinkedList; 15 | import java.util.List; 16 | 17 | public class DefaultDTSConsumer extends AbstractDTSConsumer { 18 | private static final Logger log = LoggerFactory.getLogger(DefaultDTSConsumer.class); 19 | 20 | 21 | 22 | public DefaultDTSConsumer(ConsumerContext consumerContext) { 23 | super(consumerContext); 24 | } 25 | 26 | @Override 27 | public void start() { 28 | 29 | //check firstly 30 | boolean checkResult = check(); 31 | 32 | if (!checkResult) { 33 | log.error("DTS precheck failed, dts consumer exit."); 34 | throw new CriticalException("DTS precheck failed, dts consumer exit."); 35 | } 36 | 37 | synchronized (this) { 38 | initLog4j(); 39 | if (started) { 40 | throw new IllegalStateException("The client has already been started"); 41 | } 42 | 43 | KafkaRecordFetcher recordFetcher = new KafkaRecordFetcher(consumerContext, toProcessRecords); 44 | 45 | UserRecordGenerator userRecordGenerator = new UserRecordGenerator(consumerContext, toProcessRecords, defaultUserRecords, 46 | (tp, timestamp, offset, metadata) -> recordFetcher.setToCommitCheckpoint(new Checkpoint(tp, timestamp, offset, metadata))); 47 | 48 | //processor 49 | EtlRecordProcessor etlRecordProcessor = new EtlRecordProcessor(consumerContext, defaultUserRecords, recordListeners); 50 | 51 | List startStream = startWorker(etlRecordProcessor, userRecordGenerator, recordFetcher); 52 | 53 | while (!consumerContext.isExited()) { 54 | sleepMS(1000); 55 | } 56 | log.info("DTS Consumer: shutting down..."); 57 | for (WorkThread workThread : startStream) { 58 | workThread.stop(); 59 | } 60 | 61 | started = true; 62 | } 63 | } 64 | 65 | private static List startWorker(EtlRecordProcessor etlRecordProcessor, UserRecordGenerator userRecordGenerator, KafkaRecordFetcher recordGenerator) { 66 | List ret = new LinkedList<>(); 67 | ret.add(new WorkThread(etlRecordProcessor, EtlRecordProcessor.class.getName())); 68 | ret.add(new WorkThread(userRecordGenerator, UserRecordGenerator.class.getName())); 69 | ret.add(new WorkThread(recordGenerator, KafkaRecordFetcher.class.getName())); 70 | for (WorkThread workThread : ret) { 71 | workThread.start(); 72 | } 73 | return ret; 74 | } 75 | 76 | } 77 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/check/CheckManager.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.check; 2 | 3 | public interface CheckManager { 4 | 5 | void addCheckItem(SubscribeChecker subscribeChecker); 6 | 7 | CheckResult check(); 8 | } 9 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/check/CheckResult.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.check; 2 | 3 | public class CheckResult { 4 | public static final CheckResult SUCESS = new CheckResult(true, null); 5 | private boolean isOk; 6 | private String errMsg; 7 | 8 | public CheckResult(boolean isOk, String errMsg) { 9 | this.isOk = isOk; 10 | this.errMsg = errMsg; 11 | } 12 | 13 | public boolean isOk() { 14 | return isOk; 15 | } 16 | 17 | public void setOk(boolean ok) { 18 | isOk = ok; 19 | } 20 | 21 | public String getErrMsg() { 22 | return errMsg; 23 | } 24 | 25 | public void setErrMsg(String errMsg) { 26 | this.errMsg = errMsg; 27 | } 28 | 29 | @Override 30 | public String toString() { 31 | return "CheckResult{" + 32 | "isOk=" + isOk + 33 | ", errMsg='" + errMsg + '\'' + 34 | '}'; 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/check/DefaultCheckManager.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.check; 2 | 3 | import com.aliyun.dts.subscribe.clients.ConsumerContext; 4 | 5 | import java.util.ArrayList; 6 | import java.util.List; 7 | 8 | public class DefaultCheckManager implements CheckManager { 9 | 10 | private ConsumerContext consumerContext; 11 | 12 | private List checkerList; 13 | 14 | public DefaultCheckManager(ConsumerContext consumerContext) { 15 | this.consumerContext = consumerContext; 16 | 17 | checkerList = new ArrayList<>(); 18 | } 19 | 20 | @Override 21 | public void addCheckItem(SubscribeChecker subscribeChecker) { 22 | checkerList.add(subscribeChecker); 23 | } 24 | 25 | @Override 26 | public CheckResult check() { 27 | CheckResult checkResult = CheckResult.SUCESS; 28 | for(SubscribeChecker checker : checkerList) { 29 | checkResult = checker.check(); 30 | 31 | if(!checkResult.isOk()) { 32 | return checkResult; 33 | } 34 | } 35 | 36 | return checkResult; 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/check/SubscribeChecker.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.check; 2 | 3 | public interface SubscribeChecker { 4 | CheckResult check(); 5 | } 6 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/check/SubscribeNetworkChecker.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.check; 2 | 3 | import com.aliyun.dts.subscribe.clients.check.util.NetUtil; 4 | import com.aliyun.dts.subscribe.clients.common.RetryUtil; 5 | 6 | import java.net.SocketException; 7 | import java.util.concurrent.TimeUnit; 8 | 9 | public class SubscribeNetworkChecker implements SubscribeChecker { 10 | 11 | private String brokerUrl; 12 | 13 | private RetryUtil retryUtil; 14 | 15 | public SubscribeNetworkChecker(String brokerUrl) { 16 | this.brokerUrl = brokerUrl; 17 | retryUtil = new RetryUtil(4, TimeUnit.SECONDS, 15, (e) -> true); 18 | } 19 | 20 | public CheckResult check() { 21 | boolean isOk = true; 22 | String errMsg = null; 23 | 24 | try { 25 | retryUtil.callFunctionWithRetry( 26 | () -> { 27 | int index = brokerUrl.lastIndexOf(":"); 28 | String url = brokerUrl.substring(0, index); 29 | int port = Integer.parseInt(brokerUrl.substring(index+1)); 30 | NetUtil.testSocket(url, port); 31 | } 32 | ); 33 | } catch (Exception e) { 34 | isOk = false; 35 | errMsg = "telnet " + brokerUrl + " failed, please check the network and if the brokerUrl is correct"; 36 | } 37 | 38 | return new CheckResult(isOk, errMsg); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/check/util/NetUtil.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.check.util; 2 | 3 | import java.io.IOException; 4 | import java.net.InetSocketAddress; 5 | import java.net.Socket; 6 | import java.net.SocketException; 7 | 8 | public class NetUtil { 9 | public static boolean testSocket(String ip, int port) throws SocketException { 10 | Socket soc = null; 11 | try { 12 | soc = new Socket(); 13 | soc.connect(new InetSocketAddress(ip, port), 5000); 14 | return true; 15 | } catch (Exception ex) { 16 | throw new SocketException(ex.getMessage()); 17 | } finally { 18 | try { 19 | soc.close(); 20 | } catch (IOException e) { 21 | e.printStackTrace(); 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/check/util/NodeCommandClientConfig.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.check.util; 2 | 3 | import org.apache.kafka.clients.CommonClientConfigs; 4 | import org.apache.kafka.common.config.AbstractConfig; 5 | import org.apache.kafka.common.config.ConfigDef; 6 | 7 | import java.util.Map; 8 | 9 | import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; 10 | 11 | public class NodeCommandClientConfig extends AbstractConfig { 12 | 13 | public static final String METADATA_MAX_AGE_CONFIG = CommonClientConfigs.METADATA_MAX_AGE_CONFIG; 14 | public static final String RETRY_BACKOFF_MS_CONFIG = CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG; 15 | public static final String CLIENT_ID_CONFIG = CommonClientConfigs.CLIENT_ID_CONFIG; 16 | public static final String RECONNECT_BACKOFF_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG; 17 | public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG = "reconnect_back_off_time"; 18 | public static final String SEND_BUFFER_CONFIG = CommonClientConfigs.SEND_BUFFER_CONFIG; 19 | public static final String RECEIVE_BUFFER_CONFIG = CommonClientConfigs.RECEIVE_BUFFER_CONFIG; 20 | public static final String REQUEST_TIMEOUT_MS_CONFIG = CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG; 21 | public static final String BOOTSTRAP_SERVERS_CONFIG = CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG; 22 | public static final String METRICS_NUM_SAMPLES_CONFIG = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG; 23 | public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG; 24 | public static final String METRIC_REPORTER_CLASSES_CONFIG = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG; 25 | public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG; 26 | public static final String POLL_MS_CONFIG = "poll.ms"; 27 | public static final String SECURITY_PROTOCOL_CONFIG = CommonClientConfigs.SECURITY_PROTOCOL_CONFIG; 28 | private static final ConfigDef CONFIG; 29 | static { 30 | CONFIG = new ConfigDef() 31 | .define(BOOTSTRAP_SERVERS_CONFIG, // required with no default value 32 | ConfigDef.Type.LIST, 33 | ConfigDef.Importance.HIGH, 34 | CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) 35 | .define(METRICS_NUM_SAMPLES_CONFIG, 36 | ConfigDef.Type.INT, 37 | 2, 38 | atLeast(1), 39 | ConfigDef.Importance.LOW, 40 | CommonClientConfigs.METRICS_NUM_SAMPLES_DOC) 41 | .define(METRICS_SAMPLE_WINDOW_MS_CONFIG, 42 | ConfigDef.Type.LONG, 43 | 30000, 44 | atLeast(0), 45 | ConfigDef.Importance.LOW, 46 | CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_DOC) 47 | .define(METRIC_REPORTER_CLASSES_CONFIG, 48 | ConfigDef.Type.LIST, 49 | "", 50 | ConfigDef.Importance.LOW, 51 | CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) 52 | .define(CONNECTIONS_MAX_IDLE_MS_CONFIG, 53 | ConfigDef.Type.LONG, 54 | 9 * 60 * 1000, 55 | ConfigDef.Importance.LOW, 56 | CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC) 57 | .define(METADATA_MAX_AGE_CONFIG, 58 | ConfigDef.Type.LONG, 59 | 5 * 60 * 1000, 60 | atLeast(0), 61 | ConfigDef.Importance.LOW, 62 | CommonClientConfigs.METADATA_MAX_AGE_DOC) 63 | .define(RECEIVE_BUFFER_CONFIG, 64 | ConfigDef.Type.INT, 65 | 32 * 1024, 66 | atLeast(0), 67 | ConfigDef.Importance.LOW, 68 | CommonClientConfigs.RECEIVE_BUFFER_DOC) 69 | .define(RECONNECT_BACKOFF_MS_CONFIG, 70 | ConfigDef.Type.LONG, 71 | 50L, 72 | atLeast(0L), 73 | ConfigDef.Importance.LOW, 74 | CommonClientConfigs.RECONNECT_BACKOFF_MS_DOC) 75 | .define(RECONNECT_BACKOFF_MAX_MS_CONFIG, 76 | ConfigDef.Type.LONG, 77 | 1000L, 78 | atLeast(0L), 79 | ConfigDef.Importance.LOW, 80 | "reconnect back off ms") 81 | .define(RETRY_BACKOFF_MS_CONFIG, 82 | ConfigDef.Type.LONG, 83 | 100L, 84 | atLeast(0L), 85 | ConfigDef.Importance.LOW, 86 | CommonClientConfigs.RETRY_BACKOFF_MS_DOC) 87 | .define(REQUEST_TIMEOUT_MS_CONFIG, 88 | ConfigDef.Type.INT, 89 | 400 * 1000, 90 | atLeast(0), 91 | ConfigDef.Importance.LOW, 92 | CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC) 93 | .define(CLIENT_ID_CONFIG, 94 | ConfigDef.Type.STRING, 95 | "", 96 | ConfigDef.Importance.MEDIUM, 97 | CommonClientConfigs.CLIENT_ID_DOC) 98 | .define(SECURITY_PROTOCOL_CONFIG, 99 | ConfigDef.Type.STRING, 100 | CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, 101 | ConfigDef.Importance.MEDIUM, 102 | CommonClientConfigs.SECURITY_PROTOCOL_DOC) 103 | .define(SEND_BUFFER_CONFIG, 104 | ConfigDef.Type.INT, 105 | 128 * 1024, 106 | atLeast(0), 107 | ConfigDef.Importance.LOW, 108 | CommonClientConfigs.SEND_BUFFER_DOC) 109 | .define(POLL_MS_CONFIG, 110 | ConfigDef.Type.LONG, 111 | 5000, 112 | atLeast(0), 113 | ConfigDef.Importance.LOW, 114 | "pool time out from server"); 115 | } 116 | public NodeCommandClientConfig(Map originals) { 117 | super(CONFIG, originals); 118 | } 119 | 120 | public static ConfigDef configDef() { 121 | return new ConfigDef(CONFIG); 122 | } 123 | 124 | 125 | } -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/AtomicFileStore.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import java.io.*; 4 | import java.util.LinkedList; 5 | import java.util.List; 6 | 7 | import static com.aliyun.dts.subscribe.clients.common.Util.*; 8 | 9 | public class AtomicFileStore { 10 | private final String fileName; 11 | 12 | public AtomicFileStore(String fileName) { 13 | this.fileName = fileName; 14 | } 15 | 16 | public List getContent() { 17 | List ret = new LinkedList<>(); 18 | if (!checkFileExists(fileName)) { 19 | return ret; 20 | } 21 | FileReader readFile = null; 22 | BufferedReader bufferedReader = null; 23 | try { 24 | readFile = new FileReader(fileName); 25 | bufferedReader = new BufferedReader(readFile); 26 | String s = null; 27 | while ((s = bufferedReader.readLine()) != null) { 28 | ret.add(s); 29 | } 30 | } catch (Exception e) { 31 | } finally { 32 | swallowErrorClose(readFile); 33 | swallowErrorClose(bufferedReader); 34 | } 35 | return ret; 36 | 37 | } 38 | 39 | public boolean updateContent(List newContent) { 40 | synchronized (this) { 41 | String tmpFileName = fileName + ".tmp"; 42 | if (checkFileExists(tmpFileName)) { 43 | deleteFile(tmpFileName); 44 | } 45 | boolean writeSuccess = true; 46 | FileWriter fileWriter = null; 47 | BufferedWriter bufferedWriter = null; 48 | try { 49 | fileWriter = new FileWriter(tmpFileName); 50 | bufferedWriter = new BufferedWriter(fileWriter); 51 | for (String content : newContent) { 52 | bufferedWriter.write(content); 53 | bufferedWriter.newLine(); 54 | } 55 | bufferedWriter.flush(); 56 | } catch (Exception e) { 57 | writeSuccess = false; 58 | } finally { 59 | swallowErrorClose(fileWriter); 60 | swallowErrorClose(bufferedWriter); 61 | } 62 | return writeSuccess ? (new File(tmpFileName).renameTo(new File(fileName))) : false; 63 | } 64 | } 65 | 66 | public void remove() { 67 | deleteFile(fileName); 68 | } 69 | 70 | } 71 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/Checkpoint.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | public class Checkpoint { 6 | 7 | public static final Checkpoint INVALID_STREAM_CHECKPOINT = new Checkpoint(null, -1, -1, "-1"); 8 | private final TopicPartition topicPartition; 9 | private final long timeStamp; 10 | private final long offset; 11 | private final String info; 12 | 13 | public Checkpoint(TopicPartition topicPartition, long timeStamp, long offset, String info) { 14 | this.topicPartition = topicPartition; 15 | this.timeStamp = timeStamp; 16 | this.offset = offset; 17 | this.info = info; 18 | } 19 | 20 | public long getOffset() { 21 | return offset; 22 | } 23 | 24 | public long getTimeStamp() { 25 | return timeStamp; 26 | } 27 | 28 | public String getInfo() { 29 | return info; 30 | } 31 | 32 | public TopicPartition getTopicPartition() { 33 | return topicPartition; 34 | } 35 | 36 | public String toString() { 37 | return "Checkpoint[ topicPartition: " + topicPartition + "timestamp: " + timeStamp + ", offset: " + offset + ", info: " + info + "]"; 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/FieldEntryHolder.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import java.util.Iterator; 4 | import java.util.LinkedList; 5 | import java.util.List; 6 | 7 | public class FieldEntryHolder { 8 | private final List originFields; 9 | private final Iterator iterator; 10 | private final List filteredFields; 11 | public FieldEntryHolder(List originFields) { 12 | this.originFields = originFields; 13 | if (null == originFields) { 14 | this.filteredFields = null; 15 | this.iterator = null; 16 | } else { 17 | this.filteredFields = new LinkedList<>(); 18 | this.iterator = originFields.iterator(); 19 | } 20 | } 21 | 22 | public boolean hasNext() { 23 | if (iterator == null) { 24 | return true; 25 | } 26 | return iterator.hasNext(); 27 | } 28 | 29 | public void skip() { 30 | if (null != iterator) { 31 | iterator.next(); 32 | } 33 | } 34 | 35 | public Object take() { 36 | if (null != iterator) { 37 | Object current = iterator.next(); 38 | filteredFields.add(current); 39 | return current; 40 | } else { 41 | return null; 42 | } 43 | } 44 | } -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/GeometryUtil.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import com.vividsolutions.jts.geom.Geometry; 4 | import com.vividsolutions.jts.io.ParseException; 5 | import com.vividsolutions.jts.io.WKBReader; 6 | 7 | import java.nio.ByteBuffer; 8 | 9 | public class GeometryUtil { 10 | 11 | private static final double SCALE = Math.pow(10.0D, 4.0); 12 | 13 | public static String fromWKBToWKTText(ByteBuffer data) throws ParseException { 14 | if (null == data) { 15 | return null; 16 | } else { 17 | WKBReader reader = new WKBReader(); 18 | Geometry geometry = reader.read(data.array()); 19 | return geometry.toText(); 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/JDKCharsetMapper.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import java.util.HashMap; 4 | import java.util.Map; 5 | 6 | public interface JDKCharsetMapper { 7 | 8 | Map MYSQL_JDK_ENCODINGS = new HashMap() { 9 | { 10 | put("armscii8", "WINDOWS-1252"); 11 | put("ascii", "US-ASCII"); 12 | put("big5", "BIG5"); 13 | put("binary", "ISO-8859-1"); 14 | put("cp1250", "Cp1250"); 15 | put("cp1251", "Cp1251"); 16 | put("cp1256", "WINDOWS-1256"); 17 | put("cp1257", "Cp1257"); 18 | put("cp850", "IBM850"); 19 | put("cp852", "IBM852"); 20 | put("cp866", "Cp866"); 21 | put("cp932", "Cp932"); 22 | put("dec8", "WINDOWS-1252"); 23 | put("eucjpms", "X-EUCJP-OPEN"); 24 | put("euckr", "EUC_KR"); 25 | put("gb2312", "EUC_CN"); 26 | put("gbk", "GBK"); 27 | put("geostd8", "WINDOWS-1252"); 28 | put("greek", "ISO8859_7"); 29 | put("hebrew", "ISO8859_8"); 30 | put("hp8", "WINDOWS-1252"); 31 | put("keybcs2", "IBM852"); 32 | put("koi8r", "KOI8-R"); 33 | put("koi8u", "KOI8-R"); 34 | put("latin1", "Cp1252"); 35 | put("latin2", "ISO8859_2"); 36 | put("latin5", "ISO-8859-9"); 37 | put("latin7", "ISO-8859-13"); 38 | put("macce", "MacCentralEurope"); 39 | put("macroman", "MacRoman"); 40 | put("sjis", "SJIS"); 41 | put("swe7", "WINDOWS-1252"); 42 | put("tis620", "TIS620"); 43 | put("ujis", "EUC_JP"); 44 | put("utf16", "UTF-16"); 45 | put("utf16le", "UTF-16LE"); 46 | put("utf32", "UTF-32"); 47 | put("utf8", "UTF-8"); 48 | put("utf8mb4", "UTF-8"); 49 | put("utf8mb3", "UTF-8"); 50 | put("ucs2", "UnicodeBig"); 51 | } 52 | }; 53 | 54 | static String getJDKECharset(String dbCharset) { 55 | return MYSQL_JDK_ENCODINGS.getOrDefault(dbCharset, dbCharset); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/NullableOptional.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import java.util.NoSuchElementException; 4 | import java.util.Objects; 5 | import java.util.function.Consumer; 6 | import java.util.function.Function; 7 | import java.util.function.Predicate; 8 | import java.util.function.Supplier; 9 | 10 | /** 11 | * Copy code from java.util.Optional, the only difference is NullableOptional consider null as a normal value. 12 | * So the NullableOptional.ofNullable(null).isPresent() should be true. 13 | */ 14 | public final class NullableOptional { 15 | private static final NullableOptional EMPTY = new NullableOptional(); 16 | private final T value; 17 | private final boolean isPresent; 18 | 19 | private NullableOptional() { 20 | this.isPresent = false; 21 | this.value = null; 22 | } 23 | 24 | public static NullableOptional empty() { 25 | NullableOptional value = EMPTY; 26 | return value; 27 | } 28 | 29 | private NullableOptional(T value) { 30 | this.value = value; 31 | this.isPresent = true; 32 | } 33 | 34 | public static NullableOptional of(T value) { 35 | Objects.requireNonNull(value); 36 | return new NullableOptional(value); 37 | } 38 | 39 | public static NullableOptional ofNullable(T value) { 40 | return new NullableOptional<>(value); 41 | } 42 | 43 | public T get() { 44 | if (!this.isPresent()) { 45 | throw new NoSuchElementException("No value present"); 46 | } else { 47 | return this.value; 48 | } 49 | } 50 | 51 | public boolean isPresent() { 52 | return this.isPresent; 53 | } 54 | 55 | public void ifPresent(Consumer consumer) { 56 | if (this.isPresent()) { 57 | consumer.accept(this.value); 58 | } 59 | } 60 | 61 | public NullableOptional filter(Predicate filterFunction) { 62 | Objects.requireNonNull(filterFunction); 63 | if (!this.isPresent()) { 64 | return this; 65 | } else { 66 | return filterFunction.test(this.value) ? this : empty(); 67 | } 68 | } 69 | 70 | public NullableOptional map(Function function) { 71 | Objects.requireNonNull(function); 72 | return !this.isPresent() ? empty() : ofNullable(function.apply(this.value)); 73 | } 74 | 75 | public NullableOptional flatMap(Function> mapper) { 76 | Objects.requireNonNull(mapper); 77 | return !this.isPresent() ? empty() : (NullableOptional) Objects.requireNonNull(mapper.apply(this.value)); 78 | } 79 | 80 | public T orElse(T defaultValue) { 81 | return this.isPresent() ? this.value : defaultValue; 82 | } 83 | 84 | public T orElseGet(Supplier defaultValueSupplier) { 85 | return this.isPresent() ? this.value : defaultValueSupplier.get(); 86 | } 87 | 88 | public T orElseThrow(Supplier exceptionSupplier) throws X { 89 | if (this.isPresent()) { 90 | return this.value; 91 | } else { 92 | throw exceptionSupplier.get(); 93 | } 94 | } 95 | 96 | public boolean equals(Object var1) { 97 | if (this == var1) { 98 | return true; 99 | } else if (!(var1 instanceof NullableOptional)) { 100 | return false; 101 | } else { 102 | NullableOptional var2 = (NullableOptional) var1; 103 | return Objects.equals(this.isPresent, var2.isPresent) && Objects.equals(this.value, var2.value); 104 | } 105 | } 106 | 107 | public int hashCode() { 108 | return Objects.hash(this.isPresent, this.value); 109 | } 110 | 111 | public String toString() { 112 | return this.isPresent() ? String.format("Optional[%s]", this.value) : "Optional.empty"; 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/RecordListener.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord; 4 | 5 | public interface RecordListener { 6 | 7 | public void consume(DefaultUserRecord record); 8 | 9 | } 10 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/RetryUtil.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import com.aliyun.dts.subscribe.clients.exception.CriticalException; 4 | import org.slf4j.Logger; 5 | import org.slf4j.LoggerFactory; 6 | 7 | import java.util.concurrent.TimeUnit; 8 | import java.util.function.BiFunction; 9 | import java.util.function.Consumer; 10 | import java.util.function.Function; 11 | 12 | public class RetryUtil { 13 | private static final Logger LOG = LoggerFactory.getLogger(RetryUtil.class); 14 | private final String globalJobType; 15 | private final String objectNameShouldBeRetried; 16 | private Function recoverableChecker; 17 | private int maxRetryTimes; 18 | private int freezeInterval; 19 | private TimeUnit freezeTimeUnit; 20 | 21 | public RetryUtil(String globalJobType, String objectNameShouldBeRetried, 22 | int freezeInterval, TimeUnit freezeTimeUnit, int maxRetryTimes, 23 | Function recoverableChecker) { 24 | this.globalJobType = globalJobType; 25 | this.objectNameShouldBeRetried = objectNameShouldBeRetried; 26 | this.maxRetryTimes = maxRetryTimes; 27 | this.freezeInterval = freezeInterval; 28 | this.freezeTimeUnit = freezeTimeUnit; 29 | this.recoverableChecker = recoverableChecker; 30 | } 31 | 32 | public RetryUtil(Function recoverableChecker) { 33 | this(5, TimeUnit.SECONDS, 1, recoverableChecker); 34 | } 35 | 36 | public RetryUtil(int freezeInterval, TimeUnit freezeTimeUnit, int maxRetryTimes, 37 | Function recoverableChecker) { 38 | this("unknown", "unknown", freezeInterval, freezeTimeUnit, maxRetryTimes, recoverableChecker); 39 | } 40 | 41 | public void callFunctionWithRetry(ThrowableFunctionVoid throwableFunction) throws Exception { 42 | callFunctionWithRetry(throwableFunction, null, null); 43 | } 44 | 45 | public void callFunctionWithRetry(ThrowableFunctionVoid throwableFunction, 46 | BiFunction recoverableChecker, 47 | Consumer retryInfoConsumer) throws Exception { 48 | callFunctionWithRetry( 49 | () -> { 50 | throwableFunction.call(); 51 | return null; 52 | }, 53 | recoverableChecker, retryInfoConsumer); 54 | } 55 | 56 | public T callFunctionWithRetry(ThrowableFunction throwableFunction) throws Exception { 57 | return callFunctionWithRetry(maxRetryTimes, freezeInterval, freezeTimeUnit, throwableFunction); 58 | } 59 | 60 | public T callFunctionWithRetry(int maxRetryTimes, int freezeInternal, TimeUnit freezeTimeUnit, 61 | ThrowableFunction throwableFunction) throws Exception { 62 | return this.callFunctionWithRetry(maxRetryTimes, freezeInternal, freezeTimeUnit, throwableFunction, 63 | (e, times) -> recoverableChecker.apply(e), null); 64 | } 65 | 66 | public T callFunctionWithRetry(ThrowableFunction throwableFunction, 67 | BiFunction recoverableChecker, 68 | Consumer retryInfoConsumer) throws Exception { 69 | return callFunctionWithRetry(maxRetryTimes, freezeInterval, freezeTimeUnit, throwableFunction, recoverableChecker, retryInfoConsumer); 70 | } 71 | 72 | public T callFunctionWithRetry(int maxRetryTimes, int freezeInternal, TimeUnit freezeTimeUnit, 73 | ThrowableFunction throwableFunction, 74 | BiFunction recoverableChecker, 75 | Consumer retryInfoConsumer) throws Exception { 76 | Throwable error = null; 77 | RetryInfo retryInfo = null; 78 | maxRetryTimes = Math.max(1, maxRetryTimes); 79 | 80 | for (int i = 0; i < maxRetryTimes; i++) { 81 | try { 82 | T rs = throwableFunction.call(); 83 | if (null != retryInfo) { 84 | retryInfo.endRetry(); 85 | } 86 | return rs; 87 | } catch (Throwable e) { 88 | error = e; 89 | if (recoverableChecker.apply(e, i)) { 90 | if (null == retryInfo) { 91 | retryInfo = new RetryInfo(globalJobType, objectNameShouldBeRetried); 92 | } 93 | retryInfo.retry(e); 94 | 95 | LOG.warn("call function {} with {} times failed, try to execute it again", 96 | throwableFunction.toString(), retryInfo.getRetryCount(), e); 97 | freezeTimeUnit.sleep(freezeInternal); 98 | 99 | if (null != retryInfoConsumer) { 100 | retryInfoConsumer.accept(retryInfo); 101 | } 102 | } else { 103 | break; 104 | } 105 | } 106 | } 107 | 108 | if (error instanceof Exception) { 109 | throw (Exception) error; 110 | } else { 111 | throw new CriticalException("common", error); 112 | } 113 | } 114 | 115 | public interface ThrowableFunctionVoid { 116 | void call() throws Exception; 117 | } 118 | 119 | public interface ThrowableFunction { 120 | T call() throws Exception; 121 | } 122 | 123 | public static class RetryInfo { 124 | private final String retryModule; 125 | private final String retryTarget; 126 | private String errMsg; 127 | 128 | private long retryCount; 129 | private long beginTimestamp; 130 | private long endTimestamp; 131 | 132 | public RetryInfo(String retryModule, String retryTarget) { 133 | this.retryModule = retryModule; 134 | this.retryTarget = retryTarget; 135 | this.retryCount = 0; 136 | this.errMsg = "null"; 137 | this.beginTimestamp = 0; 138 | this.endTimestamp = 0; 139 | } 140 | 141 | public boolean isRetrying() { 142 | return 0 != beginTimestamp && 0 == endTimestamp; 143 | } 144 | 145 | public long getBeginTimestamp() { 146 | return beginTimestamp; 147 | } 148 | 149 | void beginRetry() { 150 | this.beginTimestamp = Time.now(); 151 | } 152 | 153 | void endRetry() { 154 | this.endTimestamp = Time.now(); 155 | } 156 | 157 | public long getRetryCount() { 158 | return retryCount; 159 | } 160 | 161 | public void retry(Throwable e) { 162 | if (0 == beginTimestamp) { 163 | beginRetry(); 164 | } 165 | if (null != e) { 166 | errMsg = e.toString(); 167 | } 168 | retryCount++; 169 | } 170 | 171 | public String getRetryModule() { 172 | return retryModule; 173 | } 174 | 175 | public String getErrMsg() { 176 | return errMsg; 177 | } 178 | 179 | void setErrMsg(String errMsg) { 180 | this.errMsg = errMsg; 181 | } 182 | 183 | public String getRetryTarget() { 184 | return retryTarget; 185 | } 186 | 187 | public long getRetryTime(TimeUnit unit) { 188 | long end = (0 == endTimestamp) ? Time.now() : endTimestamp; 189 | return TimeUnit.MILLISECONDS.convert(end - beginTimestamp, unit); 190 | } 191 | } 192 | 193 | } 194 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/ThreadFactoryWithNamePrefix.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import java.util.concurrent.ThreadFactory; 4 | import java.util.concurrent.atomic.AtomicInteger; 5 | 6 | public class ThreadFactoryWithNamePrefix implements ThreadFactory { 7 | private static final AtomicInteger POOL_NUMBER = new AtomicInteger(1); 8 | private final ThreadGroup group; 9 | private final AtomicInteger threadNumber = new AtomicInteger(1); 10 | private final String namePrefix; 11 | private final boolean daemon; 12 | 13 | public ThreadFactoryWithNamePrefix(String poolPrefix) { 14 | this(poolPrefix, false); 15 | } 16 | 17 | public ThreadFactoryWithNamePrefix(String poolPrefix, boolean daemon) { 18 | this.daemon = daemon; 19 | SecurityManager s = System.getSecurityManager(); 20 | group = (s != null) ? s.getThreadGroup() : 21 | Thread.currentThread().getThreadGroup(); 22 | namePrefix = poolPrefix + POOL_NUMBER.getAndIncrement() + "-thread-"; 23 | } 24 | 25 | @Override 26 | public Thread newThread(Runnable r) { 27 | Thread t = new Thread(group, r, 28 | namePrefix + threadNumber.getAndIncrement(), 29 | 0); 30 | t.setDaemon(daemon); 31 | if (t.getPriority() != Thread.NORM_PRIORITY) { 32 | t.setPriority(Thread.NORM_PRIORITY); 33 | } 34 | return t; 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/Time.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import java.text.SimpleDateFormat; 4 | import java.util.Calendar; 5 | import java.util.TimeZone; 6 | 7 | /** 8 | * Utility methods for getting the time and computing intervals. 9 | */ 10 | public final class Time { 11 | 12 | /** 13 | * number of nano seconds in 1 millisecond. 14 | */ 15 | private static final long NANOSECONDS_PER_MILLISECOND = 1000000; 16 | 17 | private static final TimeZone UTC_ZONE = TimeZone.getTimeZone("UTC"); 18 | 19 | private static final ThreadLocal DATE_FORMAT = 20 | new ThreadLocal() { 21 | @Override 22 | protected SimpleDateFormat initialValue() { 23 | return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSSZ"); 24 | } 25 | }; 26 | 27 | /** 28 | * Current system time. Do not use this to calculate a duration or interval 29 | * to sleep, because it will be broken by settimeofday. Instead, use 30 | * monotonicNow. 31 | * 32 | * @return current time in msec. 33 | */ 34 | public static long now() { 35 | return System.currentTimeMillis(); 36 | } 37 | 38 | /** 39 | * Current time from some arbitrary time base in the past, counting in 40 | * milliseconds, and not affected by settimeofday or similar system clock 41 | * changes. This is appropriate to use when computing how much longer to 42 | * wait for an interval to expire. 43 | * This function can return a negative value and it must be handled correctly 44 | * by callers. See the documentation of System#nanoTime for caveats. 45 | * 46 | * @return a monotonic clock that counts in milliseconds. 47 | */ 48 | public static long monotonicNow() { 49 | return System.nanoTime() / NANOSECONDS_PER_MILLISECOND; 50 | } 51 | 52 | /** 53 | * Same as {@link #monotonicNow()} but returns its result in nanoseconds. 54 | * Note that this is subject to the same resolution constraints as 55 | * {@link System#nanoTime()}. 56 | * 57 | * @return a monotonic clock that counts in nanoseconds. 58 | */ 59 | public static long monotonicNowNanos() { 60 | return System.nanoTime(); 61 | } 62 | 63 | /** 64 | * Convert time in millisecond to human readable format. 65 | * @param millis time 66 | * @return a human readable string for the input time 67 | */ 68 | public static String formatTime(long millis) { 69 | return DATE_FORMAT.get().format(millis); 70 | } 71 | 72 | /** 73 | * Get the current UTC time in milliseconds. 74 | * 75 | * @return the current UTC time in milliseconds. 76 | */ 77 | public static long getUtcTime() { 78 | return Calendar.getInstance(UTC_ZONE).getTimeInMillis(); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/UniqueKeyGenerator.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import java.util.concurrent.atomic.AtomicLong; 4 | 5 | /** 6 | * for compaction enabled topic, empty key field in producer record is not allowed, so we gene random key to avoid compaction 7 | */ 8 | public class UniqueKeyGenerator { 9 | private AtomicLong counter; 10 | private final String startMSStr; 11 | public UniqueKeyGenerator() { 12 | counter = new AtomicLong(0); 13 | startMSStr = String.valueOf(System.currentTimeMillis()) + "-"; 14 | } 15 | public String nextKey() { 16 | return startMSStr + counter.getAndIncrement(); 17 | } 18 | 19 | } -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/UserCommitCallBack.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import com.aliyun.dts.subscribe.clients.formats.avro.Record; 4 | import org.apache.kafka.common.TopicPartition; 5 | 6 | public interface UserCommitCallBack { 7 | public void commit(TopicPartition tp, Record record, long offset, String metadata); 8 | } 9 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/Util.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import com.aliyun.dts.subscribe.clients.ConsumerContext; 4 | import com.aliyun.dts.subscribe.clients.recordfetcher.ClusterSwitchListener; 5 | import org.apache.commons.lang3.StringUtils; 6 | import org.apache.kafka.clients.CommonClientConfigs; 7 | import org.apache.kafka.clients.consumer.ConsumerConfig; 8 | import org.apache.kafka.common.config.SaslConfigs; 9 | 10 | import java.io.Closeable; 11 | import java.io.File; 12 | import java.util.Properties; 13 | 14 | import static com.aliyun.dts.subscribe.clients.recordfetcher.Names.*; 15 | 16 | public class Util { 17 | public static void swallowErrorClose(Closeable target) { 18 | try { 19 | if (null != target) { 20 | target.close(); 21 | } 22 | } catch (Exception e) { 23 | } 24 | } 25 | 26 | public static void sleepMS(long value) { 27 | try { 28 | Thread.sleep(value); 29 | } catch (Exception e) { 30 | } 31 | } 32 | 33 | public static void mergeSourceKafkaProperties(Properties originProperties, Properties mergeToProperties) { 34 | originProperties.forEach((k, v) ->{ 35 | String key = (String)k; 36 | if (key.startsWith("kafka.")) { 37 | String toPutKey = key.substring(6); 38 | mergeToProperties.setProperty(toPutKey, (String)v); 39 | } 40 | }); 41 | mergeToProperties.setProperty(SaslConfigs.SASL_JAAS_CONFIG, 42 | buildJaasConfig(originProperties.getProperty(SID_NAME), originProperties.getProperty(USER_NAME), originProperties.getProperty(PASSWORD_NAME))); 43 | mergeToProperties.setProperty(SaslConfigs.SASL_MECHANISM, "PLAIN"); 44 | mergeToProperties.setProperty(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT"); 45 | mergeToProperties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, originProperties.getProperty(KAFKA_BROKER_URL_NAME)); 46 | mergeToProperties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, originProperties.getProperty(GROUP_NAME)); 47 | // disable auto commit 48 | mergeToProperties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); 49 | 50 | mergeToProperties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); 51 | mergeToProperties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); 52 | // to let the consumer feel the switch of cluster and reseek the offset by timestamp 53 | mergeToProperties.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, ClusterSwitchListener.class.getName()); 54 | 55 | mergeToProperties.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none"); 56 | } 57 | 58 | public static void require(boolean predict, String errMessage) { 59 | if (!predict) { 60 | throw new RuntimeException(errMessage); 61 | } 62 | } 63 | 64 | public static String buildJaasConfig(String sid, String user, String password) { 65 | String jaasTemplate = "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"%s-%s\" password=\"%s\";"; 66 | return String.format(jaasTemplate, user, sid, password); 67 | } 68 | 69 | public static boolean checkFileExists(String fileName) { 70 | File metaFile = new File(fileName); 71 | return metaFile.exists(); 72 | } 73 | 74 | public static void deleteFile(String fileName) { 75 | File maybeAbsolutePath = new File(fileName); 76 | if (!maybeAbsolutePath.exists()) { 77 | return; 78 | } 79 | File metaFile = null; 80 | if (maybeAbsolutePath.isAbsolute()) { 81 | metaFile = maybeAbsolutePath; 82 | } else { 83 | File currentPath = new File("."); 84 | metaFile = new File(currentPath.getAbsolutePath() + File.separator + fileName); 85 | } 86 | boolean deleted = metaFile.delete(); 87 | if (!deleted) { 88 | throw new RuntimeException(metaFile.getAbsolutePath() + " should be cleaned anyway"); 89 | } 90 | } 91 | 92 | private static void setIfAbsent(Properties properties, String key, String valueSetIfAbsent) { 93 | if (StringUtils.isEmpty(properties.getProperty(key))) { 94 | properties.setProperty(key, valueSetIfAbsent); 95 | } 96 | } 97 | 98 | public static String[] uncompressionObjectName(String compressionName){ 99 | if(null == compressionName || compressionName.isEmpty() ){ 100 | return null; 101 | } 102 | 103 | String [] names = compressionName.split("\\."); 104 | 105 | int length = names.length; 106 | 107 | for(int i=0;i= 2) { 146 | streamCheckpoint = new Checkpoint(null, Long.valueOf(offsetAndTS[0]), Long.valueOf(offsetAndTS[1]), ""); 147 | } 148 | return streamCheckpoint; 149 | } 150 | 151 | public static ConsumerContext.ConsumerSubscribeMode parseConsumerSubscribeMode(String value) { 152 | if (StringUtils.equalsIgnoreCase("assign", value)) { 153 | return ConsumerContext.ConsumerSubscribeMode.ASSIGN; 154 | } else if (StringUtils.equalsIgnoreCase("subscribe", value)) { 155 | return ConsumerContext.ConsumerSubscribeMode.SUBSCRIBE; 156 | } else { 157 | throw new RuntimeException("RecordGenerator: unknown subscribe mode [" + value + "]"); 158 | } 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/WorkThread.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common; 2 | 3 | import java.io.Closeable; 4 | 5 | import static com.aliyun.dts.subscribe.clients.common.Util.swallowErrorClose; 6 | 7 | public class WorkThread { 8 | private final T r; 9 | private final Thread worker; 10 | 11 | public WorkThread(T r, String name) { 12 | this.r = r; 13 | worker = new Thread(r); 14 | worker.setName(name); 15 | } 16 | 17 | public void start() { 18 | worker.start(); 19 | } 20 | 21 | public void stop() { 22 | swallowErrorClose(r); 23 | try { 24 | worker.join(10000, 0); 25 | } catch (Exception e) { 26 | e.printStackTrace(); 27 | } 28 | } 29 | } -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/common/function/SwallowException.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.common.function; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | 6 | import java.io.Closeable; 7 | import java.util.function.Function; 8 | 9 | //@SuppressFBWarnings 10 | public final class SwallowException implements AutoCloseable { 11 | 12 | private static final Logger LOGGER = LoggerFactory.getLogger(SwallowException.class); 13 | 14 | private final T o; 15 | 16 | private SwallowException(T oo) { 17 | this.o = oo; 18 | } 19 | 20 | public interface CalleeFunctionWithoutReturnValue { 21 | void call() throws Exception; 22 | } 23 | 24 | public interface CalleeFunctionWithReturnValue { 25 | R call() throws Exception; 26 | } 27 | 28 | public interface ConsumerFunctionWithoutReturnValue { 29 | void call(L value) throws Exception; 30 | } 31 | 32 | public static R callAndSwallowException(CalleeFunctionWithReturnValue callee) { 33 | try { 34 | return callee.call(); 35 | } catch (Exception e) { 36 | LOGGER.info("call function {} failed, and swallow exception", callee.getClass().getName(), e); 37 | } 38 | 39 | return null; 40 | } 41 | 42 | public static void callAndSwallowException(CalleeFunctionWithoutReturnValue function) { 43 | callAndSwallowException((CalleeFunctionWithReturnValue) () -> { 44 | function.call(); 45 | return null; 46 | }); 47 | } 48 | 49 | public static R callAndThrowRuntimeException(CalleeFunctionWithReturnValue callee) { 50 | try { 51 | return callee.call(); 52 | } catch (Exception e) { 53 | LOGGER.info("call function {} failed, and swallow exception", callee.getClass().getName(), e); 54 | throw new RuntimeException(e.toString(), e); 55 | } 56 | } 57 | 58 | public static void callAndThrowRuntimeException(CalleeFunctionWithoutReturnValue function) { 59 | callAndThrowRuntimeException((CalleeFunctionWithReturnValue) () -> { 60 | function.call(); 61 | return null; 62 | }); 63 | } 64 | 65 | public static R callAndThrowException(CalleeFunctionWithReturnValue callee, 66 | Function exceptionSupplier) { 67 | try { 68 | return callee.call(); 69 | } catch (Exception e) { 70 | LOGGER.info("call function {} failed", callee.getClass().getName(), e); 71 | throw exceptionSupplier.apply(e); 72 | } 73 | } 74 | 75 | public static void callAndThrowException(CalleeFunctionWithoutReturnValue function, 76 | Function exceptionSupplier) { 77 | callAndThrowException((CalleeFunctionWithReturnValue) () -> { 78 | function.call(); 79 | return null; 80 | }, exceptionSupplier); 81 | } 82 | 83 | public static SwallowException create(R oo) { 84 | return new SwallowException<>(oo); 85 | } 86 | 87 | public T getObject() { 88 | return o; 89 | } 90 | 91 | @Override 92 | public void close() { 93 | callAndSwallowException(() -> o.close()); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/exception/CriticalException.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.exception; 2 | 3 | public class CriticalException extends DTSBaseException { 4 | public CriticalException(String errMsg) { 5 | super(errMsg); 6 | } 7 | 8 | public CriticalException(String message, Throwable cause) { 9 | super(message, cause); 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/exception/DTSBaseException.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.exception; 2 | 3 | public class DTSBaseException extends RuntimeException { 4 | 5 | public DTSBaseException(String message) { 6 | super(message); 7 | } 8 | 9 | public DTSBaseException(String message, Throwable cause) { 10 | super(message, cause); 11 | } 12 | 13 | public DTSBaseException(Throwable throwable) { 14 | super(throwable); 15 | } 16 | 17 | public DTSBaseException() { 18 | super(); 19 | } 20 | } -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/exception/TimestampSeekException.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.exception; 2 | 3 | public class TimestampSeekException extends DTSBaseException { 4 | public TimestampSeekException(String errMsg) { 5 | super(errMsg); 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/formats/avro/DefaultValueDeserializer.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.formats.avro; 2 | 3 | import com.aliyun.dts.subscribe.clients.record.value.*; 4 | 5 | public class DefaultValueDeserializer { 6 | 7 | public static Value deserialize(Object data) { 8 | 9 | if (null == data) { 10 | return null; 11 | } 12 | 13 | if (data instanceof String) { 14 | return new StringValue((String) data); 15 | } 16 | 17 | if (data instanceof Integer) { 18 | Integer integer = (Integer) data; 19 | return new IntegerNumeric(integer.getValue()); 20 | } 21 | 22 | if (data instanceof Character) { 23 | Character character = (Character) data; 24 | return new StringValue(character.getValue(), character.getCharset()); 25 | } 26 | 27 | if (data instanceof TextObject) { 28 | TextObject textObject = (TextObject) data; 29 | return new TextEncodingObject(ObjectType.valueOf(textObject.getType().toUpperCase()), textObject.getValue()); 30 | } 31 | 32 | if (data instanceof Timestamp) { 33 | Timestamp timestamp = (Timestamp) data; 34 | 35 | UnixTimestamp unixTimestamp = new UnixTimestamp(); 36 | unixTimestamp.setTimestampSec(timestamp.getTimestamp()); 37 | if (null != timestamp.getMillis()) { 38 | unixTimestamp.setMicro(timestamp.getMillis()); 39 | } 40 | return unixTimestamp; 41 | } 42 | 43 | if (data instanceof DateTime) { 44 | DateTime aDt = (DateTime) data; 45 | return deserialize(aDt); 46 | } 47 | 48 | if (data instanceof TimestampWithTimeZone) { 49 | 50 | TimestampWithTimeZone timestampWithTimeZone = (TimestampWithTimeZone) data; 51 | com.aliyun.dts.subscribe.clients.record.value.DateTime dt = deserialize(timestampWithTimeZone.getValue()); 52 | dt.setTimeZone(timestampWithTimeZone.getTimezone()); 53 | return dt; 54 | } 55 | 56 | if (data instanceof BinaryObject) { 57 | BinaryObject binaryObject = (BinaryObject) data; 58 | return new BinaryEncodingObject(ObjectType.valueOf(binaryObject.getType().toUpperCase()), binaryObject.getValue()); 59 | } 60 | 61 | if (data instanceof Float) { 62 | Float aFloat = (Float) data; 63 | return new FloatNumeric(aFloat.getValue()); 64 | } 65 | 66 | if (data instanceof Decimal) { 67 | Decimal decimal = (Decimal) data; 68 | return new DecimalNumeric(decimal.getValue()); 69 | } 70 | 71 | if (data instanceof BinaryGeometry) { 72 | BinaryGeometry geometry = (BinaryGeometry) data; 73 | return new WKBGeometry(geometry.getValue()); 74 | } 75 | 76 | if (data instanceof TextGeometry) { 77 | TextGeometry geometry = (TextGeometry) data; 78 | return new WKTGeometry(geometry.getValue()); 79 | } 80 | 81 | if (data == EmptyObject.NONE) { 82 | return new NoneValue(); 83 | } 84 | 85 | throw new RuntimeException("Not support avro class type:" + data.getClass().getName()); 86 | } 87 | 88 | static com.aliyun.dts.subscribe.clients.record.value.DateTime deserialize(DateTime inDt) { 89 | com.aliyun.dts.subscribe.clients.record.value.DateTime dt = new com.aliyun.dts.subscribe.clients.record.value.DateTime(); 90 | if (null != inDt.getYear()) { 91 | dt.setSegments(com.aliyun.dts.subscribe.clients.record.value.DateTime.SEG_YEAR); 92 | dt.setYear(inDt.getYear()); 93 | } 94 | 95 | if (null != inDt.getMonth()) { 96 | dt.setSegments(com.aliyun.dts.subscribe.clients.record.value.DateTime.SEG_MONTH); 97 | dt.setMonth(inDt.getMonth()); 98 | } 99 | 100 | if (null != inDt.getDay()) { 101 | dt.setSegments(com.aliyun.dts.subscribe.clients.record.value.DateTime.SEG_DAY); 102 | dt.setDay(inDt.getDay()); 103 | } 104 | 105 | if (null != inDt.getHour()) { 106 | dt.setSegments(com.aliyun.dts.subscribe.clients.record.value.DateTime.SEG_TIME); 107 | dt.setHour(inDt.getHour()); 108 | dt.setMinute(inDt.getMinute()); 109 | dt.setSecond(inDt.getSecond()); 110 | } 111 | if (null != inDt.getMillis()) { 112 | dt.setSegments(com.aliyun.dts.subscribe.clients.record.value.DateTime.SEG_NAONS); 113 | dt.setNaons(inDt.getMillis() * 1000); 114 | } 115 | return dt; 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/formats/avro/EmptyObject.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Autogenerated by Avro 3 | * 4 | * DO NOT EDIT DIRECTLY 5 | */ 6 | package com.aliyun.dts.subscribe.clients.formats.avro; 7 | @SuppressWarnings("all") 8 | @org.apache.avro.specific.AvroGenerated 9 | public enum EmptyObject { 10 | NULL, NONE ; 11 | public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"enum\",\"name\":\"EmptyObject\",\"namespace\":\"com.alibaba.dts.formats.avro\",\"symbols\":[\"NULL\",\"NONE\"]}"); 12 | public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } 13 | } 14 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/formats/avro/Operation.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Autogenerated by Avro 3 | * 4 | * DO NOT EDIT DIRECTLY 5 | */ 6 | package com.aliyun.dts.subscribe.clients.formats.avro; 7 | @SuppressWarnings("all") 8 | @org.apache.avro.specific.AvroGenerated 9 | public enum Operation { 10 | INSERT, UPDATE, DELETE, DDL, BEGIN, COMMIT, ROLLBACK, ABORT, HEARTBEAT, CHECKPOINT, COMMAND, FILL, FINISH, CONTROL, RDB, NOOP, INIT ; 11 | public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"enum\",\"name\":\"Operation\",\"namespace\":\"com.alibaba.dts.formats.avro\",\"symbols\":[\"INSERT\",\"UPDATE\",\"DELETE\",\"DDL\",\"BEGIN\",\"COMMIT\",\"ROLLBACK\",\"ABORT\",\"HEARTBEAT\",\"CHECKPOINT\",\"COMMAND\",\"FILL\",\"FINISH\",\"CONTROL\",\"RDB\",\"NOOP\",\"INIT\"]}"); 12 | public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } 13 | } 14 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/formats/avro/SourceType.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Autogenerated by Avro 3 | * 4 | * DO NOT EDIT DIRECTLY 5 | */ 6 | package com.aliyun.dts.subscribe.clients.formats.avro; 7 | @SuppressWarnings("all") 8 | @org.apache.avro.specific.AvroGenerated 9 | public enum SourceType { 10 | MySQL, Oracle, SQLServer, PostgreSQL, MongoDB, Redis, DB2, PPAS, DRDS, HBASE, HDFS, FILE, OTHER ; 11 | public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"enum\",\"name\":\"SourceType\",\"namespace\":\"com.alibaba.dts.formats.avro\",\"symbols\":[\"MySQL\",\"Oracle\",\"SQLServer\",\"PostgreSQL\",\"MongoDB\",\"Redis\",\"DB2\",\"PPAS\",\"DRDS\",\"HBASE\",\"HDFS\",\"FILE\",\"OTHER\"]}"); 12 | public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } 13 | } 14 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/formats/util/ObjectNameUtils.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.formats.util; 2 | 3 | public class ObjectNameUtils { 4 | 5 | static final boolean checkAllNotNull(String... names) { 6 | if (null == names || names.length <= 0) { 7 | return true; 8 | } 9 | for (String name : names) { 10 | if (null != name) { 11 | return false; 12 | } 13 | } 14 | return true; 15 | } 16 | 17 | public static final String compressionObjectName(String... names) { 18 | 19 | if (checkAllNotNull(names)) { 20 | return null; 21 | } 22 | 23 | StringBuilder nameBuilder = new StringBuilder(128); 24 | for (String name : names) { 25 | if (nameBuilder.length() > 0) { 26 | nameBuilder.append("."); 27 | } 28 | nameBuilder.append(escapeName(name)); 29 | } 30 | return nameBuilder.toString(); 31 | } 32 | 33 | public static final String[] uncompressionObjectName(String compressionName) { 34 | if (null == compressionName || compressionName.isEmpty()) { 35 | return null; 36 | } 37 | 38 | String[] names = compressionName.split("\\."); 39 | 40 | int length = names.length; 41 | 42 | for (int i = 0; i < length; ++i) { 43 | names[i] = unescapeName(names[i]); 44 | } 45 | return names; 46 | } 47 | 48 | public static final String[] uncompressionObjectName(String compressionName, int limit) { 49 | if (null == compressionName || compressionName.isEmpty()) { 50 | return null; 51 | } 52 | 53 | String[] names = compressionName.split("\\.", limit); 54 | 55 | int length = names.length; 56 | 57 | for (int i = 0; i < length; ++i) { 58 | names[i] = unescapeName(names[i]); 59 | } 60 | return names; 61 | } 62 | 63 | static final String escapeName(String name) { 64 | if (null == name || (name.indexOf('.') < 0)) { 65 | return name; 66 | } 67 | 68 | StringBuilder builder = new StringBuilder(); 69 | 70 | int length = name.length(); 71 | 72 | for (int i = 0; i < length; ++i) { 73 | char c = name.charAt(i); 74 | if ('.' == c) { 75 | builder.append("\\u002E"); 76 | } else { 77 | builder.append(c); 78 | } 79 | } 80 | 81 | return builder.toString(); 82 | } 83 | 84 | static final String unescapeName(String name) { 85 | if (null == name || (name.indexOf("\\u002E") < 0)) { 86 | return name; 87 | } 88 | 89 | StringBuilder builder = new StringBuilder(); 90 | 91 | int length = name.length(); 92 | 93 | for (int i = 0; i < length; ++i) { 94 | char c = name.charAt(i); 95 | if ('\\' == c && (i < length - 5 && 'u' == name.charAt(i + 1) 96 | && '0' == name.charAt(i + 2) && '0' == name.charAt(i + 3) 97 | && '2' == name.charAt(i + 4) && 'E' == name.charAt(i + 5))) { 98 | builder.append("."); 99 | i += 5; 100 | continue; 101 | } else { 102 | builder.append(c); 103 | } 104 | } 105 | 106 | return builder.toString(); 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/metastore/AbstractUserMetaStore.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.metastore; 2 | 3 | import com.alibaba.fastjson.JSONArray; 4 | import com.alibaba.fastjson.JSONObject; 5 | import com.aliyun.dts.subscribe.clients.common.Checkpoint; 6 | import org.apache.commons.lang3.StringUtils; 7 | import org.apache.kafka.common.TopicPartition; 8 | import org.apache.kafka.common.internals.KafkaFutureImpl; 9 | 10 | import java.util.HashMap; 11 | import java.util.Map; 12 | import java.util.concurrent.Future; 13 | 14 | public abstract class AbstractUserMetaStore implements MetaStore { 15 | private static final String GROUP_ID_NAME = "groupID"; 16 | private static final String STREAM_CHECKPOINT_NAME = "streamCheckpoint"; 17 | private static final String TOPIC_NAME = "topic"; 18 | private static final String PARTITION_NAME = "partition"; 19 | private static final String OFFSET_NAME = "offset"; 20 | private static final String TIMESTAMP_NAME = "timestamp"; 21 | private static final String INFO_NAME = "info"; 22 | 23 | private static class StoreElement { 24 | final String groupName; 25 | final Map streamCheckpoint; 26 | 27 | private StoreElement(String groupName, Map streamCheckpoint) { 28 | this.groupName = groupName; 29 | this.streamCheckpoint = streamCheckpoint; 30 | } 31 | } 32 | 33 | private String toJson(StoreElement storeElement) { 34 | JSONObject jsonObject = new JSONObject(); 35 | jsonObject.put(GROUP_ID_NAME, storeElement.groupName); 36 | JSONArray jsonArray = new JSONArray(); 37 | storeElement.streamCheckpoint.forEach((tp, checkpoint) -> { 38 | JSONObject streamCheckpointJsonObject = new JSONObject(); 39 | streamCheckpointJsonObject.put(TOPIC_NAME, tp.topic()); 40 | streamCheckpointJsonObject.put(PARTITION_NAME, tp.partition()); 41 | streamCheckpointJsonObject.put(OFFSET_NAME, checkpoint.getOffset()); 42 | streamCheckpointJsonObject.put(TIMESTAMP_NAME, checkpoint.getTimeStamp()); 43 | streamCheckpointJsonObject.put(INFO_NAME, checkpoint.getInfo()); 44 | jsonArray.add(streamCheckpointJsonObject); 45 | }); 46 | 47 | jsonObject.put(STREAM_CHECKPOINT_NAME, jsonArray); 48 | return jsonObject.toJSONString(); 49 | } 50 | 51 | private StoreElement fromString(String jsonString) { 52 | JSONObject jsonObject = JSONObject.parseObject(jsonString); 53 | String groupName = jsonObject.getString(GROUP_ID_NAME); 54 | JSONArray streamCheckpointJsonObject = jsonObject.getJSONArray(STREAM_CHECKPOINT_NAME); 55 | Map checkpointInfo = new HashMap<>(); 56 | for (Object o : streamCheckpointJsonObject) { 57 | JSONObject tpAndCheckpoint = (JSONObject) o; 58 | String topic = tpAndCheckpoint.getString(TOPIC_NAME); 59 | int partition = tpAndCheckpoint.getInteger(PARTITION_NAME); 60 | long offset = tpAndCheckpoint.getLong(OFFSET_NAME); 61 | long timestamp = tpAndCheckpoint.getLong(TIMESTAMP_NAME); 62 | String info = tpAndCheckpoint.getString(INFO_NAME); 63 | checkpointInfo.put(new TopicPartition(topic, partition), new Checkpoint(new TopicPartition(topic, partition), timestamp, offset, info)); 64 | } 65 | 66 | return new StoreElement(groupName, checkpointInfo); 67 | } 68 | 69 | @Override 70 | public Future serializeTo(TopicPartition topicPartition, String groupID, Checkpoint value) { 71 | 72 | Map topicPartitionCheckpoint = new HashMap<>(); 73 | topicPartitionCheckpoint.put(topicPartition, value); 74 | 75 | String toStoreJson = toJson(new StoreElement(groupID, topicPartitionCheckpoint)); 76 | 77 | saveData(groupID, toStoreJson); 78 | 79 | KafkaFutureImpl ret = new KafkaFutureImpl<>(); 80 | ret.complete(value); 81 | return ret; 82 | } 83 | 84 | protected abstract void saveData(String groupID, String toStoreJson); 85 | 86 | @Override 87 | public Checkpoint deserializeFrom(TopicPartition topicPartition, String groupID) { 88 | 89 | String checkpointData = getData(groupID); 90 | 91 | if (StringUtils.isNotEmpty(checkpointData)) { 92 | StoreElement storeElement = fromString(checkpointData); 93 | if (StringUtils.equals(storeElement.groupName, groupID)) { 94 | return storeElement.streamCheckpoint.get(topicPartition); 95 | } 96 | } 97 | 98 | return null; 99 | } 100 | 101 | protected abstract String getData(String groupID); 102 | } 103 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/metastore/KafkaMetaStore.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.metastore; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.Checkpoint; 4 | import org.apache.commons.lang3.StringUtils; 5 | import org.apache.kafka.clients.consumer.KafkaConsumer; 6 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 7 | import org.apache.kafka.clients.consumer.OffsetCommitCallback; 8 | import org.apache.kafka.common.KafkaException; 9 | import org.apache.kafka.common.TopicPartition; 10 | import org.apache.kafka.common.internals.KafkaFutureImpl; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | 14 | import java.util.Collections; 15 | import java.util.Map; 16 | import java.util.concurrent.Future; 17 | 18 | public class KafkaMetaStore implements MetaStore { 19 | private static final Logger log = LoggerFactory.getLogger(KafkaMetaStore.class); 20 | 21 | private volatile KafkaConsumer kafkaConsumer; 22 | 23 | public KafkaMetaStore(KafkaConsumer kafkaConsumer) { 24 | this.kafkaConsumer = kafkaConsumer; 25 | } 26 | 27 | public void resetKafkaConsumer(KafkaConsumer newConsumer) { 28 | this.kafkaConsumer = newConsumer; 29 | } 30 | 31 | @Override 32 | public Future serializeTo(TopicPartition topicPartition, String group, Checkpoint value) { 33 | KafkaFutureImpl ret = new KafkaFutureImpl(); 34 | if (null != kafkaConsumer) { 35 | OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(value.getOffset(), String.valueOf(value.getTimeStamp())); 36 | // Notice: commitAsync is only put commit offset request to sending queue, the future result will be driven by KafkaConsumer.poll() function 37 | // So if you only call this method but not poll, you may not wait offset commit call back 38 | kafkaConsumer.commitAsync(Collections.singletonMap(topicPartition, offsetAndMetadata), new OffsetCommitCallback() { 39 | @Override 40 | public void onComplete(Map offsets, Exception exception) { 41 | if (null != exception) { 42 | log.warn("KafkaMetaStore: Commit offset for group[" + group + "] topicPartition[" + topicPartition.toString() + "] " + 43 | value.toString() + " failed cause " + exception.getMessage(), exception); 44 | ret.completeExceptionally(exception); 45 | } else { 46 | log.debug("KafkaMetaStore:Commit offset success for group[{}] topicPartition [{}] {}", group, topicPartition, value); 47 | ret.complete(value); 48 | } 49 | } 50 | }); 51 | } else { 52 | log.warn("KafkaMetaStore: kafka consumer not set, ignore report"); 53 | ret.complete(value); 54 | } 55 | return ret; 56 | 57 | } 58 | 59 | @Override 60 | public Checkpoint deserializeFrom(TopicPartition topicPartition, String group) { 61 | if (null != kafkaConsumer) { 62 | OffsetAndMetadata offsetAndMetadata = kafkaConsumer.committed(topicPartition); 63 | if (null != offsetAndMetadata && StringUtils.isNotEmpty(offsetAndMetadata.metadata())) { 64 | return new Checkpoint(topicPartition, Long.valueOf(offsetAndMetadata.metadata()), offsetAndMetadata.offset(), offsetAndMetadata.metadata()); 65 | } else { 66 | return null; 67 | } 68 | } else { 69 | log.warn("KafkaMetaStore: kafka consumer not set, ignore fetch offset"); 70 | throw new KafkaException("KafkaMetaStore: kafka consumer not set, ignore fetch offset for group[" + group + "] and tp [" + topicPartition + "]"); 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/metastore/LocalFileMetaStore.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.metastore; 2 | 3 | import com.alibaba.fastjson.JSONArray; 4 | import com.alibaba.fastjson.JSONObject; 5 | import com.aliyun.dts.subscribe.clients.common.AtomicFileStore; 6 | import com.aliyun.dts.subscribe.clients.common.Checkpoint; 7 | import org.apache.commons.lang3.StringUtils; 8 | import org.apache.kafka.common.TopicPartition; 9 | import org.apache.kafka.common.internals.KafkaFutureImpl; 10 | 11 | import java.util.HashMap; 12 | import java.util.LinkedList; 13 | import java.util.List; 14 | import java.util.Map; 15 | import java.util.concurrent.Future; 16 | 17 | public class LocalFileMetaStore implements MetaStore { 18 | 19 | private static final String GROUP_ID_NAME = "groupID"; 20 | private static final String STREAM_CHECKPOINT_NAME = "streamCheckpoint"; 21 | private static final String TOPIC_NAME = "topic"; 22 | private static final String PARTITION_NAME = "partition"; 23 | private static final String OFFSET_NAME = "offset"; 24 | private static final String TIMESTAMP_NAME = "timestamp"; 25 | private static final String INFO_NAME = "info"; 26 | 27 | private static class StoreElement { 28 | final String groupName; 29 | final Map streamCheckpoint; 30 | 31 | private StoreElement(String groupName, Map streamCheckpoint) { 32 | this.groupName = groupName; 33 | this.streamCheckpoint = streamCheckpoint; 34 | } 35 | } 36 | 37 | private final AtomicFileStore fileStore; 38 | private final Map> inMemStore = new HashMap<>(); 39 | public LocalFileMetaStore(String fileName) { 40 | this.fileStore = new AtomicFileStore(fileName); 41 | } 42 | 43 | private String toJson(StoreElement storeElement) { 44 | JSONObject jsonObject = new JSONObject(); 45 | jsonObject.put(GROUP_ID_NAME, storeElement.groupName); 46 | JSONArray jsonArray = new JSONArray(); 47 | storeElement.streamCheckpoint.forEach((tp, checkpoint) -> { 48 | JSONObject streamCheckpointJsonObject = new JSONObject(); 49 | streamCheckpointJsonObject.put(TOPIC_NAME, tp.topic()); 50 | streamCheckpointJsonObject.put(PARTITION_NAME, tp.partition()); 51 | streamCheckpointJsonObject.put(OFFSET_NAME, checkpoint.getOffset()); 52 | streamCheckpointJsonObject.put(TIMESTAMP_NAME, checkpoint.getTimeStamp()); 53 | streamCheckpointJsonObject.put(INFO_NAME, checkpoint.getInfo()); 54 | jsonArray.add(streamCheckpointJsonObject); 55 | }); 56 | 57 | jsonObject.put(STREAM_CHECKPOINT_NAME, jsonArray); 58 | return jsonObject.toJSONString(); 59 | } 60 | 61 | private StoreElement fromString(String jsonString) { 62 | JSONObject jsonObject = JSONObject.parseObject(jsonString); 63 | String groupName = jsonObject.getString(GROUP_ID_NAME); 64 | JSONArray streamCheckpointJsonObject = jsonObject.getJSONArray(STREAM_CHECKPOINT_NAME); 65 | Map checkpointInfo = new HashMap<>(); 66 | for (Object o : streamCheckpointJsonObject) { 67 | JSONObject tpAndCheckpoint = (JSONObject) o; 68 | String topic = tpAndCheckpoint.getString(TOPIC_NAME); 69 | int partition = tpAndCheckpoint.getInteger(PARTITION_NAME); 70 | long offset = tpAndCheckpoint.getLong(OFFSET_NAME); 71 | long timestamp = tpAndCheckpoint.getLong(TIMESTAMP_NAME); 72 | String info = tpAndCheckpoint.getString(INFO_NAME); 73 | checkpointInfo.put(new TopicPartition(topic, partition), new Checkpoint(new TopicPartition(topic, partition), timestamp, offset, info)); 74 | } 75 | 76 | return new StoreElement(groupName, checkpointInfo); 77 | } 78 | 79 | @Override 80 | public Future serializeTo(TopicPartition topicPartition, String groupID, Checkpoint value) { 81 | Map topicPartitionCheckpoint = inMemStore.get(groupID); 82 | if (null == topicPartitionCheckpoint) { 83 | topicPartitionCheckpoint = new HashMap<>(); 84 | } 85 | topicPartitionCheckpoint.put(topicPartition, value); 86 | inMemStore.put(groupID, topicPartitionCheckpoint); 87 | 88 | List toSerialize = new LinkedList<>(); 89 | inMemStore.forEach((k, v) ->{ 90 | toSerialize.add(toJson(new StoreElement(k, v))); 91 | }); 92 | fileStore.updateContent(toSerialize); 93 | KafkaFutureImpl ret = new KafkaFutureImpl<>(); 94 | ret.complete(value); 95 | return ret; 96 | } 97 | 98 | @Override 99 | public Checkpoint deserializeFrom(TopicPartition topicPartition, String groupID) { 100 | Map tpAndCheckpointMap = inMemStore.get(groupID); 101 | if (null != tpAndCheckpointMap) { 102 | Checkpoint ret = tpAndCheckpointMap.get(topicPartition); 103 | if (null != ret) { 104 | return ret; 105 | 106 | } 107 | } 108 | List storedCheckpoint = fileStore.getContent(); 109 | for (String checkpoint : storedCheckpoint) { 110 | StoreElement storeElement = fromString(checkpoint); 111 | // add to cache 112 | if (!inMemStore.containsKey(storeElement.groupName)) { 113 | inMemStore.put(storeElement.groupName, storeElement.streamCheckpoint); 114 | } 115 | if (StringUtils.equals(storeElement.groupName, groupID)) { 116 | return storeElement.streamCheckpoint.get(topicPartition); 117 | } 118 | } 119 | return null; 120 | } 121 | 122 | } 123 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/metastore/MetaStore.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.metastore; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | import java.util.concurrent.Future; 6 | 7 | public interface MetaStore { 8 | Future serializeTo(TopicPartition topicPartition, String group, V value); 9 | V deserializeFrom(TopicPartition topicPartition, String group); 10 | } 11 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/metastore/MetaStoreCenter.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.metastore; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.Checkpoint; 4 | import org.apache.kafka.common.TopicPartition; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import java.util.HashMap; 9 | import java.util.Map; 10 | 11 | public class MetaStoreCenter { 12 | private static final Logger log = LoggerFactory.getLogger(MetaStoreCenter.class); 13 | private final Map> registeredStore = new HashMap<>(); 14 | public MetaStoreCenter() { 15 | 16 | } 17 | 18 | public void registerStore(String name, MetaStore metaStore) { 19 | log.info("MetaStoreCenter: register metaStore {}", name); 20 | registeredStore.put(name, metaStore); 21 | } 22 | 23 | public void store(TopicPartition topicPartition, String group, Checkpoint value) { 24 | registeredStore.values().forEach(v -> { 25 | v.serializeTo(topicPartition, group, value); 26 | }); 27 | } 28 | 29 | public Checkpoint seek(String storeName, TopicPartition tp, String group) { 30 | MetaStore metaStore = registeredStore.get(storeName); 31 | if (null != metaStore) { 32 | return metaStore.deserializeFrom(tp, group); 33 | } else { 34 | return null; 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/metrics/DTSMetrics.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.metrics; 2 | 3 | import org.apache.kafka.common.metrics.MetricConfig; 4 | import org.apache.kafka.common.metrics.Metrics; 5 | 6 | import java.util.concurrent.TimeUnit; 7 | 8 | public class DTSMetrics { 9 | 10 | private Metrics coreMetrics; 11 | 12 | public DTSMetrics() { 13 | MetricConfig metricConfig = new MetricConfig(); 14 | metricConfig.timeWindow(5, TimeUnit.SECONDS); 15 | metricConfig.samples(2); 16 | 17 | coreMetrics = new Metrics(metricConfig); 18 | 19 | LogMetricsReporter fileMetricsReporter = new LogMetricsReporter(); 20 | fileMetricsReporter.configure(5, null); 21 | 22 | coreMetrics.addReporter(fileMetricsReporter); 23 | fileMetricsReporter.start(); 24 | } 25 | 26 | public Metrics getCoreMetrics() { 27 | return coreMetrics; 28 | } 29 | 30 | public void setCoreMetrics(Metrics coreMetrics) { 31 | this.coreMetrics = coreMetrics; 32 | } 33 | 34 | public void close() { 35 | coreMetrics.close(); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/DatabaseInfo.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record; 2 | 3 | public class DatabaseInfo { 4 | 5 | private final String databaseType; 6 | private final String version; 7 | 8 | public DatabaseInfo(String databaseType, String version) { 9 | this.databaseType = databaseType; 10 | this.version = version; 11 | } 12 | 13 | public String getDatabaseType() { 14 | return databaseType; 15 | } 16 | 17 | public String getVersion() { 18 | return version; 19 | } 20 | 21 | @Override 22 | public String toString() { 23 | return "{\"sourceType\": \"" + databaseType + "\", \"version\": \"" + version + "\"}"; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/DefaultUserRecord.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.UserCommitCallBack; 4 | import com.aliyun.dts.subscribe.clients.formats.avro.Record; 5 | import org.apache.kafka.common.TopicPartition; 6 | 7 | import java.util.function.Function; 8 | 9 | public class DefaultUserRecord implements UserRecord { 10 | private final TopicPartition topicPartition; 11 | private final long offset; 12 | private final Record avroRecord; 13 | private final UserCommitCallBack userCommitCallBack; 14 | 15 | private volatile boolean initHeader = false; 16 | 17 | private RecordSchema recordSchema; 18 | private RowImage beforeImage; 19 | private RowImage afterImage; 20 | 21 | public DefaultUserRecord(TopicPartition tp, long offset, Record avroRecord, UserCommitCallBack userCommitCallBack) { 22 | this.topicPartition = tp; 23 | this.offset = offset; 24 | this.avroRecord = avroRecord; 25 | this.userCommitCallBack = userCommitCallBack; 26 | } 27 | 28 | public long getOffset() { 29 | return offset; 30 | } 31 | 32 | private R callAvroRecordMethod(Function method) { 33 | return method.apply(avroRecord); 34 | } 35 | 36 | public com.aliyun.dts.subscribe.clients.formats.avro.Record getAvroRecord() { 37 | return avroRecord; 38 | } 39 | 40 | public TopicPartition getTopicPartition() { 41 | return topicPartition; 42 | } 43 | 44 | public void commit(String metadata) { 45 | userCommitCallBack.commit(topicPartition, avroRecord, offset, metadata); 46 | } 47 | 48 | @Override 49 | public long getId() { 50 | return callAvroRecordMethod(Record::getId); 51 | } 52 | 53 | @Override 54 | public long getSourceTimestamp() { 55 | return callAvroRecordMethod(Record::getSourceTimestamp); 56 | } 57 | 58 | @Override 59 | public OperationType getOperationType() { 60 | return callAvroRecordMethod(AvroRecordParser::getOperationType); 61 | } 62 | 63 | @Override 64 | public RecordSchema getSchema() { 65 | return callAvroRecordMethod((avroRecord) -> { 66 | if (recordSchema == null) { 67 | recordSchema = AvroRecordParser.getRecordSchema(avroRecord); 68 | } 69 | return recordSchema; 70 | }); 71 | } 72 | 73 | @Override 74 | public RowImage getBeforeImage() { 75 | if (null == beforeImage) { 76 | beforeImage = callAvroRecordMethod(record -> AvroRecordParser.getRowImage(getSchema(), record, true)); 77 | } 78 | return beforeImage; 79 | } 80 | 81 | @Override 82 | public RowImage getAfterImage() { 83 | if (null == afterImage) { 84 | afterImage = callAvroRecordMethod(record -> AvroRecordParser.getRowImage(getSchema(), record, false)); 85 | } 86 | return afterImage; 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/ForeignKeyIndexInfo.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record; 2 | 3 | import java.util.LinkedList; 4 | import java.util.List; 5 | import java.util.TreeMap; 6 | 7 | public class ForeignKeyIndexInfo extends RecordIndexInfo { 8 | final String parentSchema; 9 | final String parentDatabase; 10 | final String parentTable; 11 | final TreeMap referencedColumn; 12 | public ForeignKeyIndexInfo(IndexType type, String parentSchema, String parentDatabase, String parentTable) { 13 | super(type); 14 | this.parentSchema = parentSchema; 15 | this.parentDatabase = parentDatabase; 16 | this.parentTable = parentTable; 17 | this.referencedColumn = new TreeMap<>(); 18 | } 19 | 20 | public String getParentSchema() { 21 | return parentSchema; 22 | } 23 | 24 | public void addConstraintField(String parentColumn, RecordField currentField) { 25 | referencedColumn.put(parentColumn, currentField); 26 | super.addField(currentField); 27 | } 28 | 29 | public String getParentTable() { 30 | return parentTable; 31 | } 32 | 33 | public String getParentDatabase() { 34 | return parentDatabase; 35 | } 36 | 37 | public List getIndexFields() { 38 | List ret = new LinkedList<>(); 39 | referencedColumn.forEach((k, v) -> ret.add(v)); 40 | return ret; 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/OperationType.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record; 2 | 3 | public enum OperationType { 4 | INSERT, 5 | UPDATE, 6 | DELETE, 7 | DDL, 8 | BEGIN, 9 | COMMIT, 10 | HEARTBEAT, 11 | CHECKPOINT, 12 | UNKNOWN; 13 | } 14 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/RecordField.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record; 2 | 3 | import com.aliyun.dts.subscribe.clients.record.value.Value; 4 | 5 | public interface RecordField { 6 | /** 7 | * @return Get the field name, which is case sensitive. 8 | */ 9 | String getFieldName(); 10 | 11 | /** 12 | * @return Get raw data type of this field. 13 | */ 14 | int getRawDataTypeNum(); 15 | 16 | /** 17 | * @return Get default value of current field. 18 | */ 19 | Value getDefaultValue(); 20 | 21 | /** 22 | * @return Determine if current field is nullable. 23 | */ 24 | boolean isNullable(); 25 | 26 | /** 27 | * @return Determine if current field is an element of uk. 28 | */ 29 | boolean isUnique(); 30 | 31 | /** 32 | * @param unique if record field unique 33 | * @return set if current field is an element of uk. 34 | */ 35 | RecordField setUnique(boolean unique); 36 | 37 | /** 38 | * @return Determine if current field is an element of pk. 39 | */ 40 | boolean isPrimary(); 41 | 42 | /** 43 | * @return Determine if current field is an element of some index. 44 | */ 45 | boolean isIndexed(); 46 | 47 | /** 48 | * @return Determine if current field is auto incremental. 49 | */ 50 | boolean isAutoIncrement(); 51 | 52 | /** 53 | * @return Get field position to set/get value, which starts from 0. 54 | */ 55 | int getFieldPosition(); 56 | 57 | /** 58 | * @param position 59 | * Set field position to set/get value, which starts from 0. 60 | */ 61 | void setFieldPosition(int position); 62 | 63 | } 64 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/RecordIndexInfo.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record; 2 | 3 | import org.apache.commons.lang3.StringUtils; 4 | 5 | import java.util.ArrayList; 6 | import java.util.List; 7 | 8 | public class RecordIndexInfo { 9 | 10 | public enum IndexType { 11 | PrimaryKey, 12 | UniqueKey, 13 | NormalIndex, 14 | ForeignKey, 15 | Unknown 16 | } 17 | 18 | private IndexType indexType; 19 | private List indexFields; 20 | private long cardinality; 21 | private boolean nullable = true; 22 | private boolean isFirstUniqueIndex = false; 23 | private String name; 24 | 25 | public RecordIndexInfo(IndexType type) { 26 | this.indexType = type; 27 | this.indexFields = new ArrayList<>(); 28 | } 29 | 30 | public void addField(RecordField recordField) { 31 | for (RecordField field : indexFields) { 32 | // has contains field, reject new field 33 | if (StringUtils.equals(field.getFieldName(), recordField.getFieldName())) { 34 | return; 35 | } 36 | } 37 | indexFields.add(recordField); 38 | } 39 | 40 | public void removeField(RecordField recordField) { 41 | indexFields.remove(recordField); 42 | } 43 | 44 | public synchronized List getIndexFields() { 45 | return indexFields; 46 | } 47 | 48 | public IndexType getIndexType() { 49 | return indexType; 50 | } 51 | 52 | public boolean isPrimaryKeyIndex() { 53 | return IndexType.PrimaryKey == indexType; 54 | } 55 | 56 | public boolean isUniqueKeyIndex() { 57 | return IndexType.UniqueKey == indexType; 58 | } 59 | 60 | public boolean isNormalIndex() { 61 | return IndexType.NormalIndex == indexType; 62 | } 63 | 64 | public long getCardinality() { 65 | return cardinality; 66 | } 67 | 68 | public void setCardinality(long cardinality) { 69 | this.cardinality = cardinality; 70 | } 71 | 72 | public boolean isNullable() { 73 | return nullable; 74 | } 75 | 76 | public void setNullable(boolean nullable) { 77 | this.nullable = nullable; 78 | } 79 | 80 | public boolean isFirstUniqueIndex() { 81 | return isFirstUniqueIndex; 82 | } 83 | 84 | public String getName() { 85 | return name; 86 | } 87 | 88 | public void setName(String name) { 89 | this.name = name; 90 | } 91 | 92 | public void setFirstUniqueIndex(boolean firstUniqueIndex) { 93 | isFirstUniqueIndex = firstUniqueIndex; 94 | } 95 | 96 | @Override 97 | public String toString() { 98 | return "[indexType=" + indexType + ", indexFields=" + indexFields + ", cardinality=" + cardinality + ", nullable=" + nullable + ", isFirstUniqueIndex=" + isFirstUniqueIndex 99 | + ", name=" + name + "]"; 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/RecordSchema.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.NullableOptional; 4 | 5 | import java.util.List; 6 | 7 | public interface RecordSchema { 8 | 9 | /** 10 | * get the database info the record schema refers to. 11 | * @return DatabaseInfo 12 | */ 13 | DatabaseInfo getDatabaseInfo(); 14 | 15 | /** 16 | * @return the list of fields that are present in the schema 17 | */ 18 | List getFields(); 19 | 20 | /** 21 | * @return the number of fields in the schema 22 | */ 23 | int getFieldCount(); 24 | 25 | /** 26 | * @param index the 0-based index of which field to return 27 | * @return the index'th field 28 | */ 29 | RecordField getField(int index); 30 | 31 | /** 32 | * @param fieldName 33 | * the name of the field 34 | * @return an Optional RecordField for the field with the given name 35 | */ 36 | NullableOptional getField(String fieldName); 37 | 38 | 39 | /** 40 | * mark the @filed to be ignored, so the user can not see it any more 41 | * @param field to be ignored 42 | */ 43 | void ignoreField(RecordField field); 44 | 45 | /** 46 | * @return the raw data types of the fields 47 | */ 48 | List getRawDataTypes(); 49 | 50 | /** 51 | * @return the names of the fields 52 | */ 53 | List getFieldNames(); 54 | 55 | /** 56 | * @param fieldName 57 | * the name of the field whose type is desired 58 | * @return the RecordFieldType associated with the field that has the given name, or null if the schema does not contain a field with the given name 59 | */ 60 | NullableOptional getRawDataType(String fieldName); 61 | 62 | /** 63 | * @return the full name with qualified character of current record schema 64 | */ 65 | NullableOptional getFullQualifiedName(); 66 | 67 | /** 68 | * @return the table name 69 | */ 70 | NullableOptional getDatabaseName(); 71 | 72 | /** 73 | * get schema name 74 | * @return NullableOptional if schema name is empty 75 | */ 76 | NullableOptional getSchemaName(); 77 | 78 | /** 79 | * getTableName 80 | * @return NullableOptional if table name is empty 81 | */ 82 | NullableOptional getTableName(); 83 | 84 | /** 85 | * @return the id for this schema 86 | */ 87 | String getSchemaIdentifier(); 88 | 89 | /** 90 | * get the primary key info. 91 | * @return record index info 92 | */ 93 | RecordIndexInfo getPrimaryIndexInfo(); 94 | 95 | /** 96 | * get all foreign key info, may be the foreign key refers to a primary key with multi cols, so we 97 | * use RecordIndexInfo to represent it. 98 | * @return all foreign key info 99 | */ 100 | List getForeignIndexInfo(); 101 | 102 | /** 103 | * get all unique key info. 104 | * @return all unique key info 105 | */ 106 | List getUniqueIndexInfo(); 107 | 108 | /** 109 | * get all normal indexes(which means it's not pk, uk and fk). 110 | * @return all normal indexes 111 | */ 112 | List getNormalIndexInfo(); 113 | 114 | /** 115 | * get the estimated total rows in current record schema. 116 | * @return total rows 117 | */ 118 | default long getTotalRows() { 119 | return 0L; 120 | } 121 | 122 | String getFilterCondition(); 123 | 124 | void initFilterCondition(String condition); 125 | 126 | default List getPartitionFields() { 127 | return null; 128 | } 129 | 130 | default void addUniqueIndexInfo(RecordIndexInfo indexInfo) { 131 | throw new RuntimeException("not impl"); 132 | } 133 | 134 | default void addForeignIndexInfo(ForeignKeyIndexInfo indexInfo) { 135 | throw new RuntimeException("not impl"); 136 | } 137 | 138 | /** 139 | * get the table charset 140 | * @return charset 141 | */ 142 | default String getCharset() { 143 | return null; 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/RowImage.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record; 2 | 3 | import com.aliyun.dts.subscribe.clients.record.value.Value; 4 | import org.apache.commons.lang3.tuple.Pair; 5 | 6 | import java.util.Map; 7 | import java.util.function.Function; 8 | 9 | public interface RowImage { 10 | 11 | /** 12 | *

13 | * @return a view of the the values of the fields in this Record. Note that this method returns values only for 14 | * those entries in the Record's schema. This allows the Record to guarantee that it will return the values in 15 | * the order dictated by the schema. 16 | *

17 | * 18 | * NOTE: The array that is returned may be an underlying array that is backing 19 | * the contents of the Record. As such, modifying the array in any way may result in 20 | * modifying the record. 21 | */ 22 | Value[] getValues(); 23 | 24 | /** 25 | * @param pos the position of the value 26 | * @return the value of specified @pos. 27 | */ 28 | Value getValue(int pos); 29 | 30 | /** 31 | * @param fieldName the field name 32 | * @return the value of specified @fieldName. 33 | * This method is different as other getValue, for the field matched @fileName should never exist. 34 | */ 35 | Value getValue(String fieldName); 36 | 37 | /** 38 | * @param recordField record filed 39 | * @return the value of specified @recordField. 40 | */ 41 | Value getValue(RecordField recordField); 42 | 43 | /** 44 | * @return the primary keys of current row image. 45 | */ 46 | Pair[] getPrimaryKeyValues(); 47 | 48 | /** 49 | * @return the merged field and value pairs fo all unique keys in current record. 50 | */ 51 | Pair[] getUniqueKeyValues(); 52 | 53 | /** 54 | * @return the foreign keys of current row image. 55 | */ 56 | Pair[] getForeignKeyValues(); 57 | 58 | /** 59 | * Converts the Record into a Map whose keys are the same as the Record's field names and the values are the field values 60 | * @param filedNameResolver field name resolver 61 | * @param valueResolver value resolver 62 | * @return a Map that represents the values in the Record 63 | */ 64 | Map toMap(Function filedNameResolver, Function valueResolver); 65 | 66 | /** 67 | * The total size of all values in current row image. 68 | * @return size 69 | */ 70 | long size(); 71 | } 72 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/SimplifiedRecordField.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record; 2 | 3 | import com.aliyun.dts.subscribe.clients.record.value.Value; 4 | 5 | import java.util.Collections; 6 | import java.util.Set; 7 | 8 | public class SimplifiedRecordField implements RecordField { 9 | 10 | private final String fieldName; 11 | private final int rawDataTypeNum; 12 | private boolean isPrimaryKey; 13 | private boolean isUniqueKey; 14 | 15 | private int fieldPosition; 16 | 17 | public SimplifiedRecordField(String fieldName, int rawDataTypeNum) { 18 | this.fieldName = fieldName; 19 | this.rawDataTypeNum = rawDataTypeNum; 20 | } 21 | 22 | public String getFieldName() { 23 | return fieldName; 24 | } 25 | 26 | public Set getAliases() { 27 | return Collections.emptySet(); 28 | } 29 | 30 | public int getRawDataTypeNum() { 31 | return rawDataTypeNum; 32 | } 33 | 34 | public Value getDefaultValue() { 35 | return null; 36 | } 37 | 38 | public boolean isNullable() { 39 | return true; 40 | } 41 | 42 | public boolean isUnique() { 43 | return isUniqueKey; 44 | } 45 | 46 | public RecordField setUnique(boolean isUnique) { 47 | isUniqueKey = isUnique; 48 | return this; 49 | } 50 | 51 | public boolean isPrimary() { 52 | return isPrimaryKey; 53 | } 54 | 55 | public boolean setPrimary(boolean isPrimary) { 56 | isPrimaryKey = isPrimary; 57 | return isPrimaryKey; 58 | } 59 | 60 | public boolean isIndexed() { 61 | return isPrimaryKey || isUniqueKey; 62 | } 63 | 64 | public boolean isAutoIncrement() { 65 | return false; 66 | } 67 | 68 | public int keySeq() { 69 | return 0; 70 | } 71 | 72 | public int getFieldPosition() { 73 | return fieldPosition; 74 | } 75 | 76 | public void setFieldPosition(int fieldPosition) { 77 | this.fieldPosition = fieldPosition; 78 | } 79 | 80 | public int getScale() { 81 | return 0; 82 | } 83 | 84 | @Override 85 | public String toString() { 86 | return "{" + 87 | "fieldName='" + fieldName + '\'' + 88 | ", rawDataTypeNum=" + rawDataTypeNum + 89 | ", isPrimaryKey=" + isPrimaryKey + 90 | ", isUniqueKey=" + isUniqueKey + 91 | ", fieldPosition=" + fieldPosition + 92 | '}'; 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/UserRecord.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record; 2 | 3 | public interface UserRecord { 4 | /** 5 | * @return Get the record unique id. 6 | */ 7 | long getId(); 8 | 9 | /** 10 | * @return Get the record source timestamp. 11 | */ 12 | long getSourceTimestamp(); 13 | 14 | /** 15 | * @return Get the operation which causes current record. 16 | */ 17 | OperationType getOperationType(); 18 | 19 | /** 20 | * @return Get the schema of current record data. 21 | */ 22 | RecordSchema getSchema(); 23 | 24 | /** 25 | * @return Get the before row image of current record. 26 | */ 27 | RowImage getBeforeImage(); 28 | 29 | /** 30 | * @return Get the after row image of current record. 31 | */ 32 | RowImage getAfterImage(); 33 | } 34 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/impl/DefaultRowImage.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.impl; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.NullableOptional; 4 | import com.aliyun.dts.subscribe.clients.record.*; 5 | import com.aliyun.dts.subscribe.clients.record.value.Value; 6 | import org.apache.commons.lang3.StringUtils; 7 | import org.apache.commons.lang3.tuple.ImmutablePair; 8 | import org.apache.commons.lang3.tuple.Pair; 9 | 10 | import java.util.*; 11 | import java.util.function.Function; 12 | import java.util.stream.Collectors; 13 | 14 | public class DefaultRowImage implements RowImage { 15 | 16 | private final RecordSchema recordSchema; 17 | private final Value[] values; 18 | private long size; 19 | 20 | public DefaultRowImage(RecordSchema recordSchema) { 21 | this.recordSchema = recordSchema; 22 | this.values = new Value[recordSchema.getFieldCount()]; 23 | } 24 | 25 | @Override 26 | public Value[] getValues() { 27 | return this.values; 28 | } 29 | 30 | @Override 31 | public Value getValue(int index) { 32 | return values[index]; 33 | } 34 | 35 | @Override 36 | public Value getValue(RecordField field) { 37 | return getValue(field.getFieldPosition()); 38 | } 39 | 40 | @Override 41 | public Value getValue(String fieldName) { 42 | NullableOptional recordField = recordSchema.getField(fieldName); 43 | return recordField.map(field -> getValue(field)) 44 | .orElse(null); 45 | } 46 | 47 | private void accumulateSize(Value value) { 48 | if (null != value) { 49 | size += value.size(); 50 | } 51 | } 52 | 53 | public void setValue(int i, Value value) { 54 | values[i] = value; 55 | 56 | accumulateSize(value); 57 | } 58 | 59 | public void setValue(String fieldName, Value value) { 60 | RecordField recordField = recordSchema.getField(fieldName) 61 | .orElse(null); 62 | setValue(recordField, value); 63 | } 64 | 65 | public void setValue(RecordField field, Value value) { 66 | int index = field.getFieldPosition(); 67 | setValue(index, value); 68 | } 69 | 70 | @Override 71 | public Map toMap(Function filedNameResolver, Function valueResolver) { 72 | Map valueMap = new TreeMap<>(); 73 | int i = 0; 74 | 75 | for (RecordField field : recordSchema.getFields()) { 76 | valueMap.put(filedNameResolver == null ? field.getFieldName() : filedNameResolver.apply(field.getFieldName()), 77 | valueResolver == null ? values[i] : valueResolver.apply(values[i])); 78 | i++; 79 | } 80 | 81 | return valueMap; 82 | } 83 | 84 | public Pair[] buildFieldValuePairArray(Collection recordFields) { 85 | Pair[] rs = new ImmutablePair[recordFields.size()]; 86 | int index = 0; 87 | for (RecordField recordField : recordFields) { 88 | rs[index] = Pair.of(recordField, getValue(recordField)); 89 | } 90 | 91 | return rs; 92 | } 93 | 94 | @Override 95 | public Pair[] getPrimaryKeyValues() { 96 | RecordIndexInfo recordIndexInfo = recordSchema.getPrimaryIndexInfo(); 97 | if (null == recordIndexInfo) { 98 | return null; 99 | } 100 | 101 | return buildFieldValuePairArray(recordIndexInfo.getIndexFields()); 102 | } 103 | 104 | private Pair[] buildAllFieldValuePairArray(List recordIndexInfoList) { 105 | if (null == recordIndexInfoList || recordIndexInfoList.isEmpty()) { 106 | return null; 107 | } 108 | 109 | Set recordFieldSet = recordIndexInfoList.stream() 110 | .flatMap(indexInfo -> indexInfo.getIndexFields().stream()) 111 | .collect(Collectors.toSet()); 112 | 113 | return buildFieldValuePairArray(recordFieldSet); 114 | } 115 | 116 | @Override 117 | public Pair[] getUniqueKeyValues() { 118 | List recordIndexInfoList = recordSchema.getUniqueIndexInfo(); 119 | return buildAllFieldValuePairArray(recordIndexInfoList); 120 | } 121 | 122 | @Override 123 | public Pair[] getForeignKeyValues() { 124 | List recordIndexInfoList = recordSchema.getForeignIndexInfo(); 125 | return buildAllFieldValuePairArray(recordIndexInfoList); 126 | } 127 | 128 | @Override 129 | public long size() { 130 | return size; 131 | } 132 | 133 | public String toString() { 134 | 135 | StringBuilder sb = new StringBuilder(); 136 | 137 | sb.append("["); 138 | 139 | recordSchema.getFields().forEach(recordField -> 140 | { sb.append("Field ") 141 | .append("[") 142 | .append(recordField.getFieldName()) 143 | .append("]") 144 | .append(" ") 145 | .append("[") 146 | .append(getValue(recordField)) 147 | .append("]") 148 | .append("\n");}); 149 | 150 | sb.append("]"); 151 | 152 | return sb.toString(); 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/BinaryEncodingObject.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.BytesUtil; 4 | 5 | import java.nio.ByteBuffer; 6 | 7 | public class BinaryEncodingObject implements Value { 8 | 9 | private ObjectType objectType; 10 | private ByteBuffer binaryData; 11 | 12 | public BinaryEncodingObject(ObjectType objectType, ByteBuffer binaryData) { 13 | this.objectType = objectType; 14 | this.binaryData = binaryData; 15 | } 16 | 17 | @Override 18 | public ValueType getType() { 19 | return ValueType.BINARY_ENCODING_OBJECT; 20 | } 21 | 22 | @Override 23 | public ByteBuffer getData() { 24 | return binaryData; 25 | } 26 | 27 | public ObjectType getObjectType() { 28 | return this.objectType; 29 | } 30 | 31 | @Override 32 | public long size() { 33 | if (null != binaryData) { 34 | return binaryData.capacity(); 35 | } 36 | 37 | return 0L; 38 | } 39 | 40 | public String toString() { 41 | return BytesUtil.byteBufferToHexString(binaryData); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/BitValue.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | import java.io.UnsupportedEncodingException; 4 | import java.nio.ByteBuffer; 5 | 6 | public class BitValue implements Value { 7 | private ByteBuffer value; 8 | 9 | public BitValue() { 10 | } 11 | 12 | public BitValue(byte[] value) { 13 | this.value = ByteBuffer.wrap(value); 14 | } 15 | 16 | public BitValue(ByteBuffer value) { 17 | this.value = value; 18 | } 19 | 20 | @Override 21 | public ValueType getType() { 22 | return ValueType.BIT; 23 | } 24 | 25 | @Override 26 | public ByteBuffer getData() { 27 | return value; 28 | } 29 | 30 | @Override 31 | public String toString() { 32 | try { 33 | return new String(value.array(), "utf-8"); 34 | } catch (UnsupportedEncodingException e) { 35 | throw new RuntimeException(e); 36 | } 37 | } 38 | 39 | @Override 40 | public long size() { 41 | if (null != value) { 42 | return value.capacity(); 43 | } 44 | 45 | return 0L; 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/DecimalNumeric.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | 4 | import java.math.BigDecimal; 5 | 6 | public class DecimalNumeric implements Value { 7 | 8 | private BigDecimal data; 9 | 10 | public DecimalNumeric() { 11 | } 12 | 13 | public DecimalNumeric(BigDecimal data) { 14 | this.data = data; 15 | } 16 | 17 | public DecimalNumeric(String data) { 18 | if (null == data) { 19 | return; 20 | } 21 | this.data = new BigDecimal(data); 22 | } 23 | 24 | @Override 25 | public ValueType getType() { 26 | return ValueType.DECIMAL_NUMERIC; 27 | } 28 | 29 | @Override 30 | public BigDecimal getData() { 31 | return this.data; 32 | } 33 | 34 | @Override 35 | public String toString() { 36 | if (null == this.data) { 37 | return null; 38 | } 39 | return this.data.toString(); 40 | } 41 | 42 | @Override 43 | public long size() { 44 | if (null != data) { 45 | return data.toBigInteger().toByteArray().length; 46 | } 47 | return 0L; 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/FloatNumeric.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | public class FloatNumeric implements Value { 4 | 5 | private Double data; 6 | 7 | public FloatNumeric(Double data) { 8 | this.data = data; 9 | } 10 | 11 | @Override 12 | public ValueType getType() { 13 | return ValueType.FLOAT_NUMERIC; 14 | } 15 | 16 | @Override 17 | public Double getData() { 18 | return this.data; 19 | } 20 | 21 | @Override 22 | public String toString() { 23 | return Double.toString(this.data); 24 | } 25 | 26 | @Override 27 | public long size() { 28 | return Double.BYTES; 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/IntegerNumeric.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | 4 | import java.math.BigInteger; 5 | 6 | public class IntegerNumeric implements Value { 7 | 8 | private BigInteger data; 9 | 10 | public IntegerNumeric() { 11 | } 12 | 13 | public IntegerNumeric(long value) { 14 | data = BigInteger.valueOf(value); 15 | } 16 | 17 | public IntegerNumeric(BigInteger value) { 18 | this.data = value; 19 | } 20 | 21 | public IntegerNumeric(String value) { 22 | this.data = new BigInteger(value); 23 | } 24 | 25 | @Override 26 | public ValueType getType() { 27 | return ValueType.INTEGER_NUMERIC; 28 | } 29 | 30 | public BigInteger getData() { 31 | return this.data; 32 | } 33 | 34 | @Override 35 | public String toString() { 36 | return this.data.toString(); 37 | } 38 | 39 | @Override 40 | public long size() { 41 | if (null != data) { 42 | return data.toByteArray().length; 43 | } 44 | 45 | return 0L; 46 | } 47 | 48 | public IntegerNumeric parse(Object rawData) { 49 | if (null == rawData) { 50 | return null; 51 | } 52 | 53 | return new IntegerNumeric(rawData.toString()); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/NoneValue.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | /** 4 | * 占位字段,不具有任何意义 5 | */ 6 | public class NoneValue implements Value { 7 | 8 | @Override 9 | public ValueType getType() { 10 | return ValueType.NONE; 11 | } 12 | 13 | @Override 14 | public Boolean getData() { 15 | return false; 16 | } 17 | 18 | @Override 19 | public long size() { 20 | return 0L; 21 | } 22 | } -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/ObjectType.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | public enum ObjectType { 4 | 5 | BINARY, BOOL, BLOB, XML, JSON, TEXT, BFILE, RAW, LONG_RAW, ROWID, UROWID, ENUM, SET, BYTEA, GEOMETRY, XTYPE, UUID; 6 | 7 | public static ObjectType parse(String type) { 8 | 9 | if (null == type) { 10 | return XTYPE; 11 | } 12 | type = type.toUpperCase(); 13 | 14 | ObjectType[] objectTypes = ObjectType.values(); 15 | for (ObjectType objectType : objectTypes) { 16 | if (objectType.name().equals(type)) { 17 | return objectType; 18 | } 19 | } 20 | return XTYPE; 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/SpecialNumericType.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | 4 | class SpecialNumeric implements Value { 5 | 6 | private static final String NAN = "NaN"; 7 | private static final String INFINITY = "Infinity"; 8 | private static final String NEGATIVE_INFINITY = "-Infinity"; 9 | private static final String NEAR = "~"; 10 | 11 | private SpecialNumericType value; 12 | 13 | public SpecialNumeric(SpecialNumericType value) { 14 | this.value = value; 15 | } 16 | 17 | public SpecialNumeric(String text) { 18 | this(SpecialNumericType.parseFrom(text)); 19 | } 20 | 21 | @Override 22 | public ValueType getType() { 23 | return ValueType.SPECIAL_NUMERIC; 24 | } 25 | 26 | @Override 27 | public SpecialNumericType getData() { 28 | return this.value; 29 | } 30 | 31 | @Override 32 | public String toString() { 33 | return this.value.toString(); 34 | } 35 | 36 | @Override 37 | public long size() { 38 | return Integer.BYTES; 39 | } 40 | 41 | public enum SpecialNumericType { 42 | NOT_ASSIGNED, 43 | INFINITY, 44 | NEGATIVE_INFINITY, 45 | NOT_A_NUMBER, 46 | NAN, 47 | NEAR; 48 | 49 | public static SpecialNumericType parseFrom(String value) { 50 | if (SpecialNumeric.NAN.equals(value)) { 51 | return NAN; 52 | } 53 | if (SpecialNumeric.NEAR.equals(value)) { 54 | return NEAR; 55 | } 56 | if (SpecialNumeric.INFINITY.equals(value)) { 57 | return INFINITY; 58 | } 59 | if (SpecialNumeric.NEGATIVE_INFINITY.equals(value)) { 60 | return NEGATIVE_INFINITY; 61 | } 62 | return SpecialNumericType.valueOf(value); 63 | } 64 | 65 | @Override 66 | public String toString() { 67 | if (this.equals(NAN)) { 68 | return SpecialNumeric.NAN; 69 | } 70 | if (this.equals(NEAR)) { 71 | return SpecialNumeric.NEAR; 72 | } 73 | if (this.equals(INFINITY)) { 74 | return SpecialNumeric.INFINITY; 75 | } 76 | if (this.equals(NEGATIVE_INFINITY)) { 77 | return SpecialNumeric.NEGATIVE_INFINITY; 78 | } 79 | return this.name(); 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/StringValue.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.BytesUtil; 4 | import com.aliyun.dts.subscribe.clients.common.JDKCharsetMapper; 5 | import com.aliyun.dts.subscribe.clients.common.function.SwallowException; 6 | import org.apache.commons.lang3.StringUtils; 7 | 8 | import java.io.UnsupportedEncodingException; 9 | import java.nio.ByteBuffer; 10 | 11 | public class StringValue implements Value { 12 | 13 | public static final String DEFAULT_CHARSET = "UTF-8"; 14 | private ByteBuffer data; 15 | private String charset; 16 | 17 | public StringValue(ByteBuffer data, String charset) { 18 | this.data = data; 19 | this.charset = charset; 20 | } 21 | 22 | public StringValue(String data) { 23 | this(ByteBuffer.wrap( 24 | SwallowException.callAndThrowRuntimeException(() -> data.getBytes(DEFAULT_CHARSET))), 25 | DEFAULT_CHARSET); 26 | } 27 | 28 | public String getCharset() { 29 | return this.charset; 30 | } 31 | 32 | @Override 33 | public ValueType getType() { 34 | return ValueType.STRING; 35 | } 36 | 37 | @Override 38 | public ByteBuffer getData() { 39 | return this.data; 40 | } 41 | 42 | @Override 43 | public String toString() { 44 | 45 | // just return hex string if missing charset 46 | if (StringUtils.isEmpty(charset)) { 47 | return BytesUtil.byteBufferToHexString(data); 48 | } 49 | 50 | // try encode data by specified charset 51 | try { 52 | if (!StringUtils.isEmpty(charset)) { 53 | return new String(data.array(), charset); 54 | } 55 | return new String(data.array()); 56 | } catch (UnsupportedEncodingException e1) { 57 | try { 58 | return new String(data.array(), JDKCharsetMapper.getJDKECharset(charset)); 59 | } catch (UnsupportedEncodingException e2) { 60 | return charset + "_'" + BytesUtil.byteBufferToHexString(data) + "'"; 61 | } 62 | } 63 | } 64 | 65 | public String toString(String targetCharset) { 66 | //TODO(huoyu): convert 67 | return "to impl"; 68 | } 69 | 70 | @Override 71 | public long size() { 72 | if (null != data) { 73 | return data.capacity(); 74 | } 75 | 76 | return 0L; 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/TextEncodingObject.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | import org.apache.commons.lang3.StringUtils; 4 | 5 | public class TextEncodingObject implements Value { 6 | 7 | private ObjectType objectType; 8 | private String data; 9 | 10 | public TextEncodingObject(ObjectType objectType, String data) { 11 | this.objectType = objectType; 12 | this.data = data; 13 | } 14 | 15 | @Override 16 | public ValueType getType() { 17 | return ValueType.TEXT_ENCODING_OBJECT; 18 | } 19 | 20 | @Override 21 | public String getData() { 22 | return this.data; 23 | } 24 | 25 | @Override 26 | public long size() { 27 | return StringUtils.length(data); 28 | } 29 | 30 | public ObjectType getObjectType() { 31 | return objectType; 32 | } 33 | 34 | public String toString() { 35 | return data; 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/UnixTimestamp.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | 4 | import java.sql.Timestamp; 5 | import java.text.DateFormat; 6 | 7 | /** 8 | * 毫秒位可能存在 9 | */ 10 | public class UnixTimestamp implements Value { 11 | 12 | private long timestampSec; 13 | private Integer micro; 14 | 15 | public UnixTimestamp() { 16 | this(0L, null); 17 | } 18 | 19 | public UnixTimestamp(long timestampSec, Integer micro) { 20 | this.timestampSec = timestampSec; 21 | this.micro = micro; 22 | } 23 | 24 | public void setTimestampSec(long second) { 25 | this.timestampSec = second; 26 | } 27 | 28 | public long getTimestampSec() { 29 | return this.timestampSec; 30 | } 31 | 32 | public void setMicro(Integer micro) { 33 | this.micro = micro; 34 | } 35 | 36 | public Integer getMicro() { 37 | return this.micro; 38 | } 39 | 40 | @Override 41 | public ValueType getType() { 42 | return ValueType.UNIX_TIMESTAMP; 43 | } 44 | 45 | @Override 46 | public String getData() { 47 | return toString(); 48 | } 49 | 50 | @Override 51 | public String toString() { 52 | return toString(null); 53 | } 54 | 55 | public String toString(DateFormat dateFormat) { 56 | Timestamp timestamp = toJdbcTimestamp(); 57 | if (null == dateFormat) { 58 | return timestamp.toString(); 59 | } else { 60 | return dateFormat.format(timestamp); 61 | } 62 | } 63 | 64 | public Timestamp toJdbcTimestamp() { 65 | Timestamp timestamp = new Timestamp(this.timestampSec * 1000); 66 | if (null != this.micro) { 67 | timestamp.setNanos(this.micro * 1000); 68 | } 69 | return timestamp; 70 | } 71 | 72 | @Override 73 | public long size() { 74 | return Long.BYTES + Integer.BYTES; 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/Value.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | public interface Value { 4 | /** 5 | * @return get value type 6 | */ 7 | ValueType getType(); 8 | 9 | /** 10 | * @return Get the internal data of current value. 11 | */ 12 | T getData(); 13 | 14 | /** 15 | * @return Convert current to string by utf-8 encoding. 16 | */ 17 | String toString(); 18 | 19 | /** 20 | * @return Get the size of current value. 21 | */ 22 | long size(); 23 | } 24 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/ValueType.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | public enum ValueType { 4 | BIT, 5 | INTEGER_NUMERIC, 6 | FLOAT_NUMERIC, 7 | DECIMAL_NUMERIC, 8 | SPECIAL_NUMERIC, 9 | STRING, 10 | DATETIME, 11 | UNIX_TIMESTAMP, 12 | TEXT_ENCODING_OBJECT, 13 | BINARY_ENCODING_OBJECT, 14 | WKB_GEOMETRY, 15 | WKT_GEOMETRY, 16 | NONE 17 | } 18 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/WKBGeometry.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | 4 | import com.aliyun.dts.subscribe.clients.common.BytesUtil; 5 | import com.aliyun.dts.subscribe.clients.common.GeometryUtil; 6 | import com.vividsolutions.jts.io.ParseException; 7 | 8 | import java.nio.ByteBuffer; 9 | 10 | public class WKBGeometry implements Value { 11 | 12 | private long srid; 13 | private ByteBuffer data; 14 | 15 | public WKBGeometry(ByteBuffer data) { 16 | this.data = data; 17 | } 18 | 19 | public void setData(ByteBuffer data) { 20 | this.data = data; 21 | } 22 | 23 | @Override 24 | public ValueType getType() { 25 | return ValueType.WKB_GEOMETRY; 26 | } 27 | 28 | @Override 29 | public ByteBuffer getData() { 30 | return this.data; 31 | } 32 | 33 | @Override 34 | public long size() { 35 | if (null != data) { 36 | return data.capacity(); 37 | } 38 | 39 | return 0L; 40 | } 41 | 42 | public String toString() { 43 | try { 44 | return GeometryUtil.fromWKBToWKTText(data); 45 | } catch (ParseException ex) { 46 | return BytesUtil.byteBufferToHexString(data); 47 | } 48 | } 49 | 50 | public WKBGeometry parse(Object rawData) { 51 | if (null == rawData) { 52 | return null; 53 | } 54 | 55 | if (rawData instanceof byte[]) { 56 | return new WKBGeometry(ByteBuffer.wrap((byte[]) rawData)); 57 | } 58 | 59 | return new WKBGeometry(BytesUtil.hexStringToByteBuffer(rawData.toString())); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/record/value/WKTGeometry.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | import org.apache.commons.lang3.StringUtils; 4 | 5 | public class WKTGeometry implements Value { 6 | 7 | private long srid; 8 | private String data; 9 | 10 | public WKTGeometry(String data) { 11 | this.data = data; 12 | } 13 | 14 | @Override 15 | public ValueType getType() { 16 | return ValueType.WKT_GEOMETRY; 17 | } 18 | 19 | @Override 20 | public String getData() { 21 | return this.data; 22 | } 23 | 24 | @Override 25 | public long size() { 26 | if (null != data) { 27 | return StringUtils.length(data); 28 | } 29 | 30 | return 0L; 31 | } 32 | 33 | public String toString() { 34 | return data; 35 | } 36 | 37 | public WKTGeometry parse(Object rawData) { 38 | if (null == rawData) { 39 | return null; 40 | } 41 | 42 | return new WKTGeometry(rawData.toString()); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/recordfetcher/ClusterSwitchListener.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.recordfetcher; 2 | 3 | import org.apache.kafka.clients.consumer.ConsumerInterceptor; 4 | import org.apache.kafka.clients.consumer.ConsumerRecords; 5 | import org.apache.kafka.common.ClusterResource; 6 | import org.apache.kafka.common.ClusterResourceListener; 7 | import org.apache.kafka.common.KafkaException; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.util.Map; 12 | 13 | /** 14 | * We recommend user register this listener. 15 | * Cause when origin cluster is unavailable and new cluster is created by HA(high available service). 16 | * The cluster name is different. We want warn user that a new cluster is working. 17 | * The more important thing is that we want user recreate KakfaConsumer and use timestamp to reseek offset. 18 | * If user following this guid, less duplicated data will be pushed. 19 | * Otherwise 20 | */ 21 | public class ClusterSwitchListener implements ClusterResourceListener, ConsumerInterceptor { 22 | private final static Logger logger = LoggerFactory.getLogger(ClusterSwitchListener.class); 23 | private ClusterResource originClusterResource = null; 24 | 25 | public ConsumerRecords onConsume(ConsumerRecords records) { 26 | return records; 27 | } 28 | 29 | 30 | public void close() { 31 | } 32 | 33 | public void onCommit(Map offsets) { 34 | } 35 | 36 | 37 | public void onUpdate(ClusterResource clusterResource) { 38 | synchronized (this) { 39 | if (null == originClusterResource) { 40 | logger.info("Cluster updated to " + clusterResource.clusterId()); 41 | originClusterResource = clusterResource; 42 | } else { 43 | if (clusterResource.clusterId().equals(originClusterResource.clusterId())) { 44 | logger.info("Cluster not changed on update:" + clusterResource.clusterId()); 45 | } else { 46 | throw new ClusterSwitchException("Cluster changed from " + originClusterResource.clusterId() + " to " + clusterResource.clusterId() 47 | + ", consumer require restart"); 48 | } 49 | } 50 | } 51 | } 52 | 53 | public void configure(Map configs) { 54 | } 55 | 56 | public static class ClusterSwitchException extends KafkaException { 57 | public ClusterSwitchException(String message, Throwable cause) { 58 | super(message, cause); 59 | } 60 | 61 | public ClusterSwitchException(String message) { 62 | super(message); 63 | } 64 | 65 | public ClusterSwitchException(Throwable cause) { 66 | super(cause); 67 | } 68 | 69 | public ClusterSwitchException() { 70 | super(); 71 | } 72 | 73 | } 74 | } 75 | 76 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/recordfetcher/ConsumerWrap.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.recordfetcher; 2 | 3 | import com.aliyun.dts.subscribe.clients.ConsumerContext; 4 | import com.aliyun.dts.subscribe.clients.common.Checkpoint; 5 | import org.apache.commons.lang3.StringUtils; 6 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 7 | import org.apache.kafka.clients.consumer.ConsumerRecords; 8 | import org.apache.kafka.clients.consumer.KafkaConsumer; 9 | import org.apache.kafka.clients.consumer.OffsetAndTimestamp; 10 | import org.apache.kafka.common.TopicPartition; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | 14 | import java.io.Closeable; 15 | import java.util.*; 16 | import java.util.concurrent.atomic.AtomicBoolean; 17 | import java.util.function.Supplier; 18 | 19 | import static com.aliyun.dts.subscribe.clients.recordfetcher.Names.*; 20 | import static com.aliyun.dts.subscribe.clients.common.Util.mergeSourceKafkaProperties; 21 | 22 | 23 | public abstract class ConsumerWrap implements Closeable { 24 | private static final Logger log = LoggerFactory.getLogger(ConsumerWrap.class); 25 | 26 | // directly set offset using the give offset, we don't check the offset is legal or not. 27 | public abstract void setFetchOffsetByOffset(TopicPartition topicPartition, Checkpoint checkpoint); 28 | public abstract void setFetchOffsetByTimestamp(TopicPartition topicPartition, Checkpoint checkpoint, boolean isCheckpointNotExistThrowException); 29 | // assign topic is not use auto balance, which we recommend this way to consume record. and commit offset by user it self 30 | public abstract void assignTopic(TopicPartition topicPartition, Checkpoint checkpoint, boolean isCheckpointNotExistThrowException); 31 | // subscribe function use consumer group mode, which means multi consumer using the same groupid could build a high available consume system 32 | // still we recommend shutdown auto commit mode, and user commit the offset manually. 33 | // this can delay offset commit until the record is really consumed by business logic which can strongly defend the data loss. 34 | public abstract void subscribeTopic(TopicPartition topicPartition, Supplier streamCheckpoint, boolean isCheckpointNotExistThrowException); 35 | 36 | 37 | public abstract ConsumerRecords poll(); 38 | 39 | public abstract KafkaConsumer getRawConsumer(); 40 | 41 | public static class DefaultConsumerWrap extends ConsumerWrap { 42 | private AtomicBoolean firstStart = new AtomicBoolean(true); 43 | private KafkaConsumer consumer; 44 | private final long poolTimeOut; 45 | 46 | private final ConsumerContext consumerContext; 47 | 48 | public DefaultConsumerWrap(Properties properties, ConsumerContext consumerContext) { 49 | Properties consumerConfig = new Properties(); 50 | mergeSourceKafkaProperties(properties, consumerConfig); 51 | checkConfig(consumerConfig); 52 | consumer = new KafkaConsumer(consumerConfig); 53 | poolTimeOut = Long.valueOf(properties.getProperty(POLL_TIME_OUT, "500")); 54 | 55 | this.consumerContext = consumerContext; 56 | } 57 | 58 | @Override 59 | public void setFetchOffsetByOffset(TopicPartition topicPartition, Checkpoint checkpoint) { 60 | consumer.seek(topicPartition, checkpoint.getOffset()); 61 | } 62 | 63 | // recommended 64 | @Override 65 | public void setFetchOffsetByTimestamp(TopicPartition topicPartition, Checkpoint checkpoint, boolean isCheckpointNotExistThrowException) { 66 | long timeStamp = checkpoint.getTimeStamp(); 67 | Map remoteOffset = consumer.offsetsForTimes(Collections.singletonMap(topicPartition, timeStamp)); 68 | OffsetAndTimestamp toSet = remoteOffset.get(topicPartition); 69 | if (null == toSet) { 70 | log.warn("Failed seek timestamp for topic [" + topicPartition + "] with timestamp [" + timeStamp + "] failed"); 71 | if (isCheckpointNotExistThrowException) { 72 | throw new RuntimeException("Failed seek timestamp for topic [\" + topicPartition + \"] with timestamp [\" + timeStamp + \"] failed"); 73 | } else { 74 | log.warn("Set to beginning"); 75 | consumer.seekToBeginning(Collections.singleton(topicPartition)); 76 | } 77 | } else { 78 | log.info("RecordFetcher: seek for {} with checkpoint {}", topicPartition, checkpoint); 79 | 80 | consumer.seek(topicPartition, toSet.offset()); 81 | } 82 | } 83 | 84 | @Override 85 | public void assignTopic(TopicPartition topicPartition, Checkpoint checkpoint, boolean isCheckpointNotExistThrowException) { 86 | consumer.assign(Arrays.asList(topicPartition)); 87 | 88 | consumerContext.setTopicPartitions(Collections.singleton(topicPartition)); 89 | 90 | log.info("RecordGenerator: assigned for {} with checkpoint {}", topicPartition, checkpoint); 91 | setFetchOffsetByTimestamp(topicPartition, checkpoint, isCheckpointNotExistThrowException); 92 | } 93 | 94 | 95 | //Not test, please not use this function 96 | @Override 97 | public void subscribeTopic(TopicPartition topicPartition, Supplier streamCheckpoint, boolean isCheckpointNotExistThrowException) { 98 | consumer.subscribe(Arrays.asList(topicPartition.topic()), new ConsumerRebalanceListener() { 99 | @Override 100 | public void onPartitionsRevoked(Collection partitions) { 101 | log.info("RecordFetcher consumer: partition revoked for [{}]", StringUtils.join(partitions, ",")); 102 | } 103 | 104 | @Override 105 | public void onPartitionsAssigned(Collection partitions) { 106 | log.info("RecordFetcher consumer: partition assigned for [{}]", StringUtils.join(partitions, ",")); 107 | 108 | consumerContext.setTopicPartitions(partitions); 109 | if (!consumerContext.hasValidTopicPartitions()) { 110 | log.warn("In subscribe mode, recordFetcher consumer dose not assigned any partition, probably this client is a backup..."); 111 | } 112 | 113 | if (partitions.contains(topicPartition)) { 114 | Checkpoint toSet = streamCheckpoint.get(); 115 | setFetchOffsetByTimestamp(topicPartition, toSet, isCheckpointNotExistThrowException); 116 | log.info("RecordFetcher consumer: subscribe for [{}] with checkpoint [{}] start", topicPartition, toSet); 117 | } 118 | } 119 | }); 120 | } 121 | 122 | public ConsumerRecords poll() { 123 | return consumer.poll(poolTimeOut); 124 | } 125 | 126 | @Override 127 | public KafkaConsumer getRawConsumer() { 128 | return consumer; 129 | } 130 | 131 | public synchronized void close() { 132 | if (null != consumer) { 133 | consumer.close(); 134 | } 135 | } 136 | 137 | private void checkConfig(Properties properties) { 138 | 139 | } 140 | 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/recordfetcher/Names.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.recordfetcher; 2 | 3 | public class Names { 4 | // detail control 5 | public static final String TRY_TIME = "stream.tryTime"; 6 | public static final String TRY_BACK_TIME_MS = "stream.tryBackTimeMS"; 7 | public static final String RETRY_TIME_OUT = "stream.errorRetryTimeOut"; 8 | public static final String POLL_TIME_OUT = "stream.pool.timeout"; 9 | // general name 10 | public static final String KAFKA_TOPIC = "kafkaTopic"; 11 | public static final String KAFKA_BROKER_URL_NAME = "broker"; 12 | public static final String GROUP_NAME = "group"; 13 | 14 | public static final String USE_CONFIG_CHECKPOINT_NAME = "useConfigCheckpoint"; 15 | public static final String SUBSCRIBE_MODE_NAME = "subscribeMode"; 16 | 17 | public static final String INITIAL_CHECKPOINT_NAME = "checkpoint"; 18 | public static final String USER_NAME = "user"; 19 | public static final String PASSWORD_NAME = "password"; 20 | public static final String SID_NAME = "sid"; 21 | public static final long MAX_TIMESTAMP_SECOND = 99999999999L; 22 | } 23 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/recordfetcher/OffsetCommitCallBack.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.recordfetcher; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | public interface OffsetCommitCallBack { 6 | void commit(TopicPartition tp, long timestamp, long offset, String metadata); 7 | } 8 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/recordgenerator/AvroDeserializer.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.recordgenerator; 2 | 3 | import com.aliyun.dts.subscribe.clients.formats.avro.Record; 4 | import org.apache.avro.io.Decoder; 5 | import org.apache.avro.io.DecoderFactory; 6 | import org.apache.avro.specific.SpecificDatumReader; 7 | import org.slf4j.Logger; 8 | import org.slf4j.LoggerFactory; 9 | 10 | 11 | public class AvroDeserializer { 12 | private static final Logger log = LoggerFactory.getLogger(AvroDeserializer.class); 13 | 14 | private final SpecificDatumReader reader = new SpecificDatumReader(Record.class); 15 | 16 | public AvroDeserializer() { 17 | } 18 | 19 | public Record deserialize(byte[] data) { 20 | 21 | Decoder decoder = DecoderFactory.get().binaryDecoder(data, null); 22 | Record payload = null; 23 | try { 24 | payload = reader.read(null, decoder); 25 | return payload; 26 | }catch (Throwable ex) { 27 | log.error("AvroDeserializer: deserialize record failed cause " + ex.getMessage(), ex); 28 | throw new RuntimeException(ex); 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/recordgenerator/UserRecordGenerator.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.recordgenerator; 2 | 3 | import com.aliyun.dts.subscribe.clients.ConsumerContext; 4 | import com.aliyun.dts.subscribe.clients.common.Checkpoint; 5 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord; 6 | import com.aliyun.dts.subscribe.clients.common.WorkThread; 7 | import com.aliyun.dts.subscribe.clients.formats.avro.Record; 8 | import com.aliyun.dts.subscribe.clients.recordfetcher.OffsetCommitCallBack; 9 | import org.apache.kafka.clients.consumer.ConsumerRecord; 10 | import org.apache.kafka.common.TopicPartition; 11 | import org.apache.kafka.common.metrics.Metrics; 12 | import org.apache.kafka.common.metrics.Sensor; 13 | import org.apache.kafka.common.metrics.stats.SimpleRate; 14 | import org.apache.kafka.common.metrics.stats.Total; 15 | import org.slf4j.Logger; 16 | import org.slf4j.LoggerFactory; 17 | 18 | import java.io.Closeable; 19 | import java.io.IOException; 20 | import java.util.concurrent.LinkedBlockingQueue; 21 | import java.util.concurrent.TimeUnit; 22 | 23 | import static com.aliyun.dts.subscribe.clients.common.Util.sleepMS; 24 | 25 | /** 26 | * This class is to resolve avro record deserialize from bytes to UserRecord 27 | */ 28 | public class UserRecordGenerator implements Runnable, Closeable { 29 | private static final Logger log = LoggerFactory.getLogger(UserRecordGenerator.class); 30 | 31 | protected ConsumerContext consumerContext; 32 | protected final LinkedBlockingQueue toProcessRecord; 33 | protected final AvroDeserializer fastDeserializer; 34 | 35 | protected final LinkedBlockingQueue processedRecord; 36 | 37 | protected volatile Checkpoint commitCheckpoint; 38 | protected WorkThread commitThread; 39 | protected final OffsetCommitCallBack offsetCommitCallBack; 40 | 41 | protected Metrics metrics; 42 | 43 | protected final Sensor recordStoreOutCountSensor; 44 | protected final Sensor recordStoreOutByteSensor; 45 | 46 | public UserRecordGenerator(ConsumerContext consumerContext, LinkedBlockingQueue toProcessRecord, LinkedBlockingQueue processedRecord, 47 | OffsetCommitCallBack offsetCommitCallBack) { 48 | this.consumerContext = consumerContext; 49 | this.toProcessRecord = toProcessRecord; 50 | this.fastDeserializer = new AvroDeserializer(); 51 | this.processedRecord = processedRecord; 52 | 53 | this.offsetCommitCallBack = offsetCommitCallBack; 54 | 55 | commitCheckpoint = new Checkpoint(null, -1, -1, "-1"); 56 | 57 | metrics = consumerContext.getDtsMetrics().getCoreMetrics(); 58 | 59 | metrics.addMetric( 60 | metrics.metricName("DStoreRecordQueue", "UserRecordGenerator"), 61 | (config, now) -> (toProcessRecord.size())); 62 | 63 | metrics.addMetric( 64 | metrics.metricName("DefaultUserRecordQueue", "UserRecordGenerator"), 65 | (config, now) -> (processedRecord.size())); 66 | 67 | this.recordStoreOutCountSensor = metrics.sensor("record-store-out-row"); 68 | this.recordStoreOutCountSensor.add(metrics.metricName("outCounts", "recordstore"), new Total()); 69 | this.recordStoreOutCountSensor.add(metrics.metricName("outRps", "recordstore"), new SimpleRate()); 70 | this.recordStoreOutByteSensor = metrics.sensor("record-store-out-byte"); 71 | this.recordStoreOutByteSensor.add(metrics.metricName("outBytes", "recordstore"), new Total()); 72 | this.recordStoreOutByteSensor.add(metrics.metricName("outBps", "recordstore"), new SimpleRate()); 73 | } 74 | 75 | @Override 76 | public void run() { 77 | while (!consumerContext.isExited()) { 78 | ConsumerRecord toProcess = null; 79 | Record record = null; 80 | int fetchFailedCount = 0; 81 | try { 82 | while (null == (toProcess = toProcessRecord.peek()) && !consumerContext.isExited()) { 83 | sleepMS(5); 84 | fetchFailedCount++; 85 | if (fetchFailedCount % 1000 == 0 && consumerContext.hasValidTopicPartitions()) { 86 | log.info("UserRecordGenerator: haven't receive records from generator for 5s"); 87 | } 88 | } 89 | if (consumerContext.isExited()) { 90 | return; 91 | } 92 | final ConsumerRecord consumerRecord = toProcess; 93 | consumerRecord.timestamp(); 94 | record = fastDeserializer.deserialize(consumerRecord.value()); 95 | log.debug("UserRecordGenerator: meet [{}] record type", record.getOperation()); 96 | 97 | DefaultUserRecord defaultUserRecord = new DefaultUserRecord(new TopicPartition(consumerRecord.topic(), consumerRecord.partition()), consumerRecord.offset(), 98 | record, 99 | (tp, commitRecord, offset, metadata) -> { 100 | recordStoreOutCountSensor.record(1); 101 | recordStoreOutByteSensor.record(consumerRecord.value().length); 102 | commitCheckpoint = new Checkpoint(tp, commitRecord.getSourceTimestamp(), offset, metadata); 103 | commit(); 104 | }); 105 | 106 | int offerTryCount = 0; 107 | 108 | while (!offerRecord(1000, TimeUnit.MILLISECONDS, defaultUserRecord) && !consumerContext.isExited()) { 109 | if (++offerTryCount % 10 == 0) { 110 | log.info("UserRecordGenerator: offer user record has failed for a period (10s) [ " + record + "]"); 111 | } 112 | } 113 | 114 | toProcessRecord.poll(); 115 | } catch (Exception e) { 116 | log.error("UserRecordGenerator: process record failed, raw consumer record [" + toProcess + "], parsed record [" + record + "], cause " + e.getMessage(), e); 117 | consumerContext.exit(); 118 | } 119 | } 120 | } 121 | 122 | protected boolean offerRecord(int timeOut, TimeUnit timeUnit, DefaultUserRecord defaultUserRecord) { 123 | try { 124 | return processedRecord.offer(defaultUserRecord, timeOut, timeUnit); 125 | } catch (Exception e) { 126 | log.error("UserRecordGenerator: offer record failed, record[" + defaultUserRecord + "], cause " + e.getMessage(), e); 127 | return false; 128 | } 129 | } 130 | 131 | @Override 132 | public void close() throws IOException { 133 | consumerContext.exit(); 134 | commitThread.stop(); 135 | } 136 | 137 | // user define how to commit 138 | private void commit() { 139 | if (null != offsetCommitCallBack) { 140 | if (commitCheckpoint.getTopicPartition() != null && commitCheckpoint.getOffset() != -1) { 141 | offsetCommitCallBack.commit(commitCheckpoint.getTopicPartition(), commitCheckpoint.getTimeStamp(), 142 | commitCheckpoint.getOffset(), commitCheckpoint.getInfo()); 143 | } 144 | } 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/recordprocessor/DbType.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.recordprocessor; 2 | 3 | public enum DbType { 4 | MySQL, Oracle; 5 | } 6 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/recordprocessor/DefaultRecordPrintListener.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.recordprocessor; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.RecordListener; 4 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord; 5 | import com.aliyun.dts.subscribe.clients.record.OperationType; 6 | import com.aliyun.dts.subscribe.clients.record.RecordSchema; 7 | import org.slf4j.Logger; 8 | import org.slf4j.LoggerFactory; 9 | 10 | public class DefaultRecordPrintListener implements RecordListener { 11 | private static final Logger log = LoggerFactory.getLogger(DefaultRecordPrintListener.class); 12 | 13 | public DefaultRecordPrintListener(DbType mySQL) { 14 | } 15 | 16 | @Override 17 | public void consume(DefaultUserRecord record) { 18 | 19 | OperationType operationType = record.getOperationType(); 20 | 21 | RecordSchema recordSchema = record.getSchema(); 22 | 23 | StringBuilder stringBuilder = new StringBuilder(); 24 | 25 | stringBuilder 26 | .append("\n") 27 | // record id can not be used as unique identifier 28 | .append("RecordID [").append(record.getId()).append("]\n") 29 | // record generate timestamp in source log 30 | .append("RecordTimestamp [").append(record.getSourceTimestamp()).append("] \n") 31 | // source info contains which source this record came from 32 | .append("Source [").append(recordSchema.getDatabaseInfo()).append("]\n") 33 | // record type 34 | .append("RecordType [").append(record.getOperationType()).append("]\n"); 35 | 36 | if (operationType.equals(OperationType.INSERT) 37 | || operationType.equals(OperationType.UPDATE) 38 | || operationType.equals(OperationType.DELETE) 39 | || operationType.equals(OperationType.DDL)) { 40 | 41 | stringBuilder 42 | .append("Schema info [").append(recordSchema.toString()).append("]\n") 43 | //before image 44 | .append("Before image {").append(record.getBeforeImage()).append("}\n") 45 | //after image 46 | .append("After image {").append(record.getAfterImage()).append("}\n"); 47 | } 48 | 49 | log.info(stringBuilder.toString()); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/recordprocessor/EtlRecordProcessor.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.recordprocessor; 2 | 3 | 4 | import com.aliyun.dts.subscribe.clients.ConsumerContext; 5 | import com.aliyun.dts.subscribe.clients.common.*; 6 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord; 7 | import org.apache.kafka.common.metrics.Metrics; 8 | import org.apache.kafka.common.metrics.Sensor; 9 | import org.apache.kafka.common.metrics.stats.SimpleRate; 10 | import org.apache.kafka.common.metrics.stats.Total; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | 14 | import java.io.Closeable; 15 | import java.util.Map; 16 | import java.util.concurrent.LinkedBlockingQueue; 17 | 18 | import static com.aliyun.dts.subscribe.clients.common.Util.require; 19 | import static com.aliyun.dts.subscribe.clients.common.Util.sleepMS; 20 | 21 | 22 | /** 23 | * This demo show how to resolve avro record deserialize from bytes 24 | * We will show how to print a column from deserialize record 25 | */ 26 | public class EtlRecordProcessor implements Runnable, Closeable { 27 | private static final Logger log = LoggerFactory.getLogger(EtlRecordProcessor.class); 28 | 29 | private final LinkedBlockingQueue toProcessRecord; 30 | private final Map recordListeners; 31 | 32 | private ConsumerContext consumerContext; 33 | 34 | public EtlRecordProcessor(ConsumerContext consumerContext, LinkedBlockingQueue toProcessRecord, 35 | Map recordListeners) { 36 | this.consumerContext = consumerContext; 37 | this.toProcessRecord = toProcessRecord; 38 | this.recordListeners= recordListeners; 39 | } 40 | 41 | @Override 42 | public void run() { 43 | while (!consumerContext.isExited()) { 44 | DefaultUserRecord toProcess = null; 45 | int fetchFailedCount = 0; 46 | try { 47 | while (null == (toProcess = toProcessRecord.peek()) && !consumerContext.isExited()) { 48 | sleepMS(5); 49 | fetchFailedCount++; 50 | if (fetchFailedCount % 1000 == 0 && consumerContext.hasValidTopicPartitions()) { 51 | log.info("EtlRecordProcessor: haven't receive records from generator for 5s"); 52 | } 53 | } 54 | if (consumerContext.isExited()) { 55 | return; 56 | } 57 | fetchFailedCount = 0; 58 | final DefaultUserRecord consumerRecord = toProcess; 59 | 60 | for (RecordListener recordListener : recordListeners.values()) { 61 | recordListener.consume(consumerRecord); 62 | } 63 | 64 | toProcessRecord.poll(); 65 | } catch (Exception e) { 66 | log.error("EtlRecordProcessor: process record failed, raw consumer record [" + toProcess + "], cause " + e.getMessage(), e); 67 | consumerContext.exit(); 68 | } 69 | } 70 | } 71 | 72 | public void registerRecordListener(String name, RecordListener recordListener) { 73 | require(null != name && null != recordListener, "null value not accepted"); 74 | recordListeners.put(name, recordListener); 75 | } 76 | 77 | public void close() { 78 | consumerContext.exit(); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/main/java/com/aliyun/dts/subscribe/clients/recordprocessor/FieldValue.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.recordprocessor; 2 | 3 | import org.apache.commons.lang3.StringUtils; 4 | 5 | import java.io.UnsupportedEncodingException; 6 | 7 | public class FieldValue { 8 | private String encoding; 9 | private byte[] bytes; 10 | public String getEncoding() { 11 | return encoding; 12 | } 13 | public byte[] getValue() { 14 | return bytes; 15 | } 16 | 17 | public void setEncoding(String encoding) { 18 | this.encoding = encoding; 19 | } 20 | public void setValue(byte[] bytes) { 21 | this.bytes = bytes; 22 | } 23 | @Override 24 | public String toString() { 25 | if (null == getValue()) { 26 | return "null [binary]"; 27 | } 28 | if (encoding==null) { 29 | return super.toString(); 30 | } 31 | try { 32 | if(StringUtils.equals("utf8mb4", encoding)){ 33 | return new String(getValue(), "utf8"); 34 | }else{ 35 | return new String(getValue(), encoding); 36 | } 37 | } catch (UnsupportedEncodingException e) { 38 | throw new RuntimeException("Unsupported encoding: " + encoding); 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | log4j.rootLogger=INFO,CONSOLE 16 | 17 | ## 日志输出到控制台 18 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender 19 | log4j.appender.CONSOLE.Threshold=INFO 20 | log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout 21 | log4j.appender.CONSOLE.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ss.SSS}] [%-5p] [%t] [%c:%L] - %m%n 22 | 23 | # 日志文件(logFile) 24 | log4j.logger.com.aliyun=INFO,file 25 | log4j.additivity.com.aliyun=true 26 | log4j.appender.file=org.apache.log4j.DailyRollingFileAppender 27 | log4j.appender.file.Threshold=INFO 28 | log4j.appender.file.Append=true 29 | log4j.appender.file.File=dts-new-subscribe.log 30 | log4j.appender.file.layout=org.apache.log4j.PatternLayout 31 | log4j.appender.file.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ss.SSS}] [%-5p] [%t] [%c:%L] - %m%n 32 | 33 | -------------------------------------------------------------------------------- /src/test/java/com/aliyun/dts/subscribe/clients/DBMapperTest.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients; 2 | 3 | import com.aliyun.dms.subscribe.clients.DBMapper; 4 | import com.aliyun.dts.subscribe.clients.formats.avro.Operation; 5 | import com.aliyun.dts.subscribe.clients.formats.avro.Record; 6 | import org.junit.Assert; 7 | import org.junit.Test; 8 | 9 | 10 | public class DBMapperTest { 11 | 12 | @Test 13 | public void dbMapperTest() { 14 | DBMapper dbMapper = new DBMapper(); 15 | dbMapper.init("{\"dts_h02\":{\"all\":false,\"name\":\"dts_h\",\"Table\":{\"dtsh27_02\":{\"all\":true,\"name\":\"dtsh\"},\"dts28_01\":{\"all\":true,\"name\":\"dts\"},\"dts28_02\":{\"all\":true,\"name\":\"dts\"}}},\"dts_h01\":{\"all\":false,\"name\":\"dts_h\",\"Table\":{\"dtsh27_01\":{\"all\":true,\"name\":\"dtsh\"},\"dts29_02\":{\"all\":true,\"name\":\"dts\"},\"dts29_01\":{\"all\":true,\"name\":\"dts\"}}}}"); 16 | Record record = new Record(); 17 | record.setOperation(Operation.UPDATE); 18 | String physicTable = "dts_h02.dtsh27_02"; 19 | String logicTable = "dts_h.dtsh"; 20 | 21 | record.setObjectName(physicTable); 22 | record = dbMapper.transform(record); 23 | Assert.assertEquals(record.getObjectName(), logicTable); 24 | 25 | String physicDb = "dts_h01"; 26 | String logicDb = "dts_h"; 27 | record.setOperation(Operation.DDL); 28 | record.setObjectName(physicDb); 29 | record = dbMapper.transform(record); 30 | Assert.assertEquals(record.getObjectName(), logicDb); 31 | 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/test/java/com/aliyun/dts/subscribe/clients/DTSConsumerAssignDemo.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.RecordListener; 4 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord; 5 | import com.aliyun.dts.subscribe.clients.record.OperationType; 6 | import com.aliyun.dts.subscribe.clients.recordprocessor.DbType; 7 | import com.aliyun.dts.subscribe.clients.recordprocessor.DefaultRecordPrintListener; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.util.Collections; 12 | import java.util.Map; 13 | 14 | public class DTSConsumerAssignDemo { 15 | private static final Logger log = LoggerFactory.getLogger(DTSConsumerAssignDemo.class); 16 | 17 | private final DTSConsumer dtsConsumer; 18 | 19 | public DTSConsumerAssignDemo(String brokerUrl, String topic, String sid, String userName, String password, 20 | String checkpoint, ConsumerContext.ConsumerSubscribeMode subscribeMode, boolean isForceUseInitCheckpoint) { 21 | this.dtsConsumer = initDTSClient(brokerUrl, topic, sid, userName, password, checkpoint, subscribeMode, isForceUseInitCheckpoint); 22 | } 23 | 24 | private DTSConsumer initDTSClient(String brokerUrl, String topic, String sid, String userName, String password, 25 | String initCheckpoint, ConsumerContext.ConsumerSubscribeMode subscribeMode, boolean isForceUseInitCheckpoint) { 26 | ConsumerContext consumerContext = new ConsumerContext(brokerUrl, topic, sid, userName, password, initCheckpoint, subscribeMode); 27 | 28 | //if this parameter is set, force to use the initCheckpoint to initial 29 | consumerContext.setForceUseCheckpoint(isForceUseInitCheckpoint); 30 | 31 | //add user store 32 | consumerContext.setUserRegisteredStore(new UserMetaStore()); 33 | 34 | DTSConsumer dtsConsumer = new DefaultDTSConsumer(consumerContext); 35 | 36 | dtsConsumer.addRecordListeners(buildRecordListener()); 37 | 38 | return dtsConsumer; 39 | } 40 | 41 | public static Map buildRecordListener() { 42 | // user can impl their own listener 43 | RecordListener mysqlRecordPrintListener = new RecordListener() { 44 | @Override 45 | public void consume(DefaultUserRecord record) { 46 | 47 | OperationType operationType = record.getOperationType(); 48 | 49 | if(operationType.equals(OperationType.INSERT) 50 | || operationType.equals(OperationType.UPDATE) 51 | || operationType.equals(OperationType.DELETE) 52 | || operationType.equals(OperationType.DDL) 53 | || operationType.equals(OperationType.HEARTBEAT)) { 54 | 55 | // consume record 56 | RecordListener recordPrintListener = new DefaultRecordPrintListener(DbType.MySQL); 57 | 58 | recordPrintListener.consume(record); 59 | 60 | //commit method push the checkpoint update 61 | record.commit(""); 62 | } 63 | } 64 | }; 65 | return Collections.singletonMap("mysqlRecordPrinter", mysqlRecordPrintListener); 66 | } 67 | 68 | public void start() { 69 | System.out.println("Start DTS subscription client..."); 70 | 71 | dtsConsumer.start(); 72 | } 73 | 74 | public static void main(String[] args) { 75 | // kafka broker url 76 | String brokerUrl = "your broker url"; 77 | // topic to consume, partition is 0 78 | String topic = "your dts topic"; 79 | // user password and sid for auth 80 | String sid = "your sid"; 81 | String userName = "your user name"; 82 | String password = "your password"; 83 | // initial checkpoint for first seek(a timestamp to set, eg 1566180200 if you want (Mon Aug 19 10:03:21 CST 2019)) 84 | String initCheckpoint = "start timestamp"; 85 | // when use subscribe mode, group config is required. kafka consumer group is enabled 86 | ConsumerContext.ConsumerSubscribeMode subscribeMode = ConsumerContext.ConsumerSubscribeMode.ASSIGN; 87 | // if force use config checkpoint when start. for checkpoint reset, only assign mode works 88 | boolean isForceUseInitCheckpoint = true; 89 | 90 | DTSConsumerAssignDemo consumerDemo = new DTSConsumerAssignDemo(brokerUrl, topic, sid, userName, password, initCheckpoint, subscribeMode, isForceUseInitCheckpoint); 91 | consumerDemo.start(); 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/test/java/com/aliyun/dts/subscribe/clients/DTSConsumerSubscribeDemo.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients; 2 | 3 | import com.aliyun.dts.subscribe.clients.common.RecordListener; 4 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord; 5 | import com.aliyun.dts.subscribe.clients.record.OperationType; 6 | import com.aliyun.dts.subscribe.clients.recordprocessor.DbType; 7 | import com.aliyun.dts.subscribe.clients.recordprocessor.DefaultRecordPrintListener; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.util.Collections; 12 | import java.util.Map; 13 | 14 | public class DTSConsumerSubscribeDemo { 15 | private static final Logger log = LoggerFactory.getLogger(DTSConsumerSubscribeDemo.class); 16 | 17 | private final DTSConsumer dtsConsumer; 18 | 19 | public DTSConsumerSubscribeDemo(String brokerUrl, String topic, String sid, String userName, String password, 20 | String checkpoint, ConsumerContext.ConsumerSubscribeMode subscribeMode) { 21 | this.dtsConsumer = initDTSClient(brokerUrl, topic, sid, userName, password, checkpoint, subscribeMode); 22 | } 23 | 24 | private DTSConsumer initDTSClient(String brokerUrl, String topic, String sid, String userName, String password, 25 | String initCheckpoint, ConsumerContext.ConsumerSubscribeMode subscribeMode) { 26 | ConsumerContext consumerContext = new ConsumerContext(brokerUrl, topic, sid, userName, password, initCheckpoint, subscribeMode); 27 | 28 | //add user meta store to manage checkpoint by yourself 29 | consumerContext.setUserRegisteredStore(new UserMetaStore()); 30 | 31 | DTSConsumer dtsConsumer = new DefaultDTSConsumer(consumerContext); 32 | 33 | dtsConsumer.addRecordListeners(buildRecordListener()); 34 | 35 | return dtsConsumer; 36 | } 37 | 38 | public static Map buildRecordListener() { 39 | // user can impl their own listener 40 | RecordListener mysqlRecordPrintListener = new RecordListener() { 41 | @Override 42 | public void consume(DefaultUserRecord record) { 43 | 44 | OperationType operationType = record.getOperationType(); 45 | 46 | if(operationType.equals(OperationType.INSERT) 47 | || operationType.equals(OperationType.UPDATE) 48 | || operationType.equals(OperationType.DELETE) 49 | || operationType.equals(OperationType.DDL) 50 | || operationType.equals(OperationType.HEARTBEAT)) { 51 | 52 | // consume record 53 | RecordListener recordPrintListener = new DefaultRecordPrintListener(DbType.MySQL); 54 | 55 | recordPrintListener.consume(record); 56 | 57 | record.commit(""); 58 | } 59 | } 60 | }; 61 | return Collections.singletonMap("mysqlRecordPrinter", mysqlRecordPrintListener); 62 | } 63 | 64 | public void start() { 65 | System.out.println("Start DTS subscription client..."); 66 | 67 | dtsConsumer.start(); 68 | } 69 | 70 | public static void main(String[] args) { 71 | // kafka broker url 72 | String brokerUrl = "your broker url"; 73 | // topic to consume, partition is 0 74 | String topic = "your dts topic"; 75 | // user password and sid for auth 76 | String sid = "your sid"; 77 | String userName = "your user name"; 78 | String password = "your password"; 79 | // initial checkpoint for first seek(a timestamp to set, eg 1566180200 if you want (Mon Aug 19 10:03:21 CST 2019)) 80 | String initCheckpoint = "start timestamp"; 81 | // when use subscribe mode, group config is required. kafka consumer group is enabled 82 | ConsumerContext.ConsumerSubscribeMode subscribeMode = ConsumerContext.ConsumerSubscribeMode.SUBSCRIBE; 83 | 84 | DTSConsumerSubscribeDemo consumerDemo = new DTSConsumerSubscribeDemo(brokerUrl, topic, sid, userName, password, initCheckpoint, subscribeMode); 85 | consumerDemo.start(); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/test/java/com/aliyun/dts/subscribe/clients/DistributedDTSConsumerDemo.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients; 2 | 3 | import com.aliyun.dms.subscribe.clients.DBMapper; 4 | import com.aliyun.dms.subscribe.clients.DistributedDTSConsumer; 5 | import com.aliyun.dms.subscribe.clients.DefaultDistributedDTSConsumer; 6 | 7 | import com.aliyun.dts.subscribe.clients.common.RecordListener; 8 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord; 9 | import com.aliyun.dts.subscribe.clients.record.OperationType; 10 | import com.aliyun.dts.subscribe.clients.recordprocessor.DbType; 11 | import com.aliyun.dts.subscribe.clients.recordprocessor.DefaultRecordPrintListener; 12 | import com.aliyuncs.DefaultAcsClient; 13 | import com.aliyuncs.IAcsClient; 14 | import com.aliyuncs.dts.model.v20200101.DescribeDtsJobsRequest; 15 | import com.aliyuncs.dts.model.v20200101.DescribeDtsJobsResponse; 16 | import com.aliyuncs.dts.model.v20200101.DescribeSubscriptionMetaRequest; 17 | import com.aliyuncs.dts.model.v20200101.DescribeSubscriptionMetaResponse; 18 | import com.aliyuncs.exceptions.ClientException; 19 | import com.aliyuncs.profile.DefaultProfile; 20 | import org.apache.commons.lang3.StringUtils; 21 | import org.slf4j.Logger; 22 | import org.slf4j.LoggerFactory; 23 | 24 | import java.util.ArrayList; 25 | import java.util.Collections; 26 | import java.util.HashMap; 27 | import java.util.List; 28 | import java.util.Map; 29 | import java.util.stream.Collectors; 30 | 31 | 32 | public class DistributedDTSConsumerDemo { 33 | private static final Logger log = LoggerFactory.getLogger(DistributedDTSConsumerDemo.class); 34 | 35 | private final DistributedDTSConsumer distributedDTSConsumer; 36 | private Map topic2checkpoint = new HashMap<>(); 37 | private Map topic2Sid = new HashMap<>(); 38 | private ArrayList dbLists = new ArrayList<>(); 39 | private DBMapper dbMapper = new DBMapper(); 40 | 41 | public DistributedDTSConsumerDemo(String username, String password, String region, String groupId, String sid, String dtsInstanceId, 42 | String accessKeyId, String accessKeySecret, ConsumerContext.ConsumerSubscribeMode subscribeMode, String dProxy, 43 | String checkpoint, boolean isForceUseInitCheckpoint, boolean mapping) throws ClientException{ 44 | getSubscribeSubJobs(region, groupId, sid, dtsInstanceId, accessKeyId, accessKeySecret); 45 | 46 | dbMapper.setMapping(mapping); 47 | dbMapper.init(dbLists); 48 | log.debug("init dbList:" + dbLists); 49 | this.distributedDTSConsumer = initDistributedConsumer(username, password, subscribeMode, dProxy, checkpoint, isForceUseInitCheckpoint); 50 | 51 | } 52 | 53 | private DistributedDTSConsumer initDistributedConsumer(String username, String password, 54 | ConsumerContext.ConsumerSubscribeMode subscribeMode, String dProxy, 55 | String checkpoint, boolean isForceUseInitCheckpoint) { 56 | 57 | DefaultDistributedDTSConsumer distributedConsumer = new DefaultDistributedDTSConsumer(); 58 | // user can change checkpoint if needed 59 | for (String topic : topic2Sid.keySet()) { 60 | topic2checkpoint.put(topic, checkpoint); 61 | } 62 | 63 | distributedConsumer.init(topic2checkpoint, dbMapper, dProxy, topic2Sid, username, password, subscribeMode, isForceUseInitCheckpoint, 64 | new UserMetaStore(), buildRecordListener()); 65 | 66 | return distributedConsumer; 67 | } 68 | 69 | public static Map buildRecordListener() { 70 | // user can impl their own listener 71 | RecordListener mysqlRecordPrintListener = new RecordListener() { 72 | @Override 73 | public void consume(DefaultUserRecord record) { 74 | 75 | OperationType operationType = record.getOperationType(); 76 | 77 | if (operationType.equals(OperationType.INSERT) 78 | || operationType.equals(OperationType.UPDATE) 79 | || operationType.equals(OperationType.DELETE) 80 | || operationType.equals(OperationType.HEARTBEAT)) { 81 | 82 | // consume record 83 | RecordListener recordPrintListener = new DefaultRecordPrintListener(DbType.MySQL); 84 | 85 | recordPrintListener.consume(record); 86 | 87 | //commit method push the checkpoint update 88 | record.commit(""); 89 | } 90 | } 91 | }; 92 | return Collections.singletonMap("mysqlRecordPrinter", mysqlRecordPrintListener); 93 | } 94 | 95 | public void start() { 96 | distributedDTSConsumer.start(); 97 | } 98 | 99 | 100 | public void getSubscribeSubJobs(String region, String groupId, String sid, String dtsInstanceId, String accessKeyId, String accessKeySecret) throws ClientException { 101 | DefaultProfile profile = DefaultProfile.getProfile(region, accessKeyId, accessKeySecret); 102 | IAcsClient client = new DefaultAcsClient(profile); 103 | DescribeDtsJobsRequest request = new DescribeDtsJobsRequest(); 104 | 105 | request.setGroupId(groupId); 106 | request.setJobType("subscribe"); 107 | request.setRegion(region); 108 | 109 | DescribeDtsJobsResponse response = client.getAcsResponse(request); 110 | List subMigrationJobIds = response.getDtsJobList().stream().map(DescribeDtsJobsResponse.DtsJobStatus::getDtsJobId).collect(Collectors.toList()); 111 | 112 | DescribeSubscriptionMetaRequest req = new DescribeSubscriptionMetaRequest(); 113 | req.setSid(sid); 114 | req.setSubMigrationJobIds(String.join(",", subMigrationJobIds)); 115 | req.setDtsInstanceId(dtsInstanceId); 116 | 117 | DescribeSubscriptionMetaResponse res = client.getAcsResponse(req); 118 | if (res.getSuccess().equalsIgnoreCase("true")) { 119 | for (DescribeSubscriptionMetaResponse.SubscriptionMetaListItem meta : (res).getSubscriptionMetaList()) { 120 | topic2Sid.put(meta.getTopic(), meta.getSid()); 121 | dbLists.add(meta.getDBList()); 122 | 123 | if (StringUtils.isEmpty(meta.getDBList())) { 124 | log.warn("dbList is null, sid:" + sid + ",dtsInstanceId:" + dtsInstanceId + ",subMigrationJobIds:" + String.join(",", subMigrationJobIds)); 125 | } 126 | } 127 | } 128 | dbMapper.setClient(client); 129 | dbMapper.setDescribeSubscriptionMetaRequest(req); 130 | } 131 | 132 | public static void main(String[] args) throws ClientException { 133 | //分布式类型数据源的订阅配置方式,例如PolarDBX10(原DRDS)。配置AccessKey、实例Id、主任务id,订阅消费组等相关信息。 134 | String accessKeyId = "your access key id"; 135 | String accessKeySecret = "your access key secret"; 136 | String regionId = "your regionId"; 137 | String dtsInstanceId = "your dts instanceId"; 138 | String jobId = "your dts jobId"; 139 | String sid = "your sid"; 140 | String userName = "your user name"; 141 | String password = "your password"; 142 | String proxyUrl = "your proxyUrl"; 143 | // initial checkpoint for first seek(a timestamp to set, eg 1566180200 if you want (Mon Aug 19 10:03:21 CST 2019)) 144 | String checkpoint = ""; 145 | 146 | // Convert physical database/table name to logical database/table name 147 | boolean mapping = true; 148 | // if force use config checkpoint when start. for checkpoint reset, only assign mode works 149 | boolean isForceUseInitCheckpoint = false; 150 | 151 | ConsumerContext.ConsumerSubscribeMode subscribeMode = ConsumerContext.ConsumerSubscribeMode.ASSIGN; 152 | DistributedDTSConsumerDemo demo = new DistributedDTSConsumerDemo(userName, password, regionId, 153 | jobId, sid, dtsInstanceId, accessKeyId, accessKeySecret, subscribeMode, proxyUrl, 154 | checkpoint, isForceUseInitCheckpoint, mapping); 155 | demo.start(); 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /src/test/java/com/aliyun/dts/subscribe/clients/UserMetaStore.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients; 2 | 3 | import com.aliyun.dts.subscribe.clients.metastore.AbstractUserMetaStore; 4 | 5 | /** 6 | * store the checkpoint data in the shared storage, such us database, shared file storage... 7 | * this meta store need to be completed by consumer 8 | */ 9 | public class UserMetaStore extends AbstractUserMetaStore { 10 | 11 | @Override 12 | protected void saveData(String groupID, String toStoreJson) { 13 | 14 | } 15 | 16 | @Override 17 | protected String getData(String groupID) { 18 | return null; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/test/java/com/aliyun/dts/subscribe/clients/record/value/DateTimeTest.java: -------------------------------------------------------------------------------- 1 | package com.aliyun.dts.subscribe.clients.record.value; 2 | 3 | import org.junit.Assert; 4 | import org.junit.Test; 5 | 6 | import java.text.ParseException; 7 | import java.text.SimpleDateFormat; 8 | import java.time.LocalDateTime; 9 | import java.time.format.DateTimeFormatter; 10 | import java.util.Date; 11 | 12 | public class DateTimeTest { 13 | @Test 14 | public void testToUnixTimestamp() throws ParseException { 15 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toUnixTimestamp(), 1644913016350L); 16 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.035", DateTime.SEG_DATETIME_NAONS).toUnixTimestamp(), 1644913016035L); 17 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toUnixTimestamp(), 1644913016350L); 18 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.350", DateTime.SEG_DATETIME_NAONS).toUnixTimestamp(), 1644913016350L); 19 | } 20 | 21 | @Test 22 | public void testToEpochMilliSeconds() throws ParseException { 23 | System.out.println(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds()); 24 | System.out.println(new DateTime("2022-02-15 16:16:56.035", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds()); 25 | System.out.println(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds()); 26 | System.out.println(new DateTime("2022-02-15 16:16:56.350", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds()); 27 | 28 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds(), 1644913016350L); 29 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.035", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds(), 1644913016035L); 30 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds(), 1644913016350L); 31 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.350", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds(), 1644913016350L); 32 | 33 | } 34 | 35 | @Test 36 | public void testToString() { 37 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toString(), "2022-02-15 16:16:56.35"); 38 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.035", DateTime.SEG_DATETIME_NAONS).toString(), "2022-02-15 16:16:56.035"); 39 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toString(), "2022-02-15 16:16:56.35"); 40 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.350", DateTime.SEG_DATETIME_NAONS).toString(), "2022-02-15 16:16:56.35"); 41 | } 42 | 43 | @Test 44 | public void testDate() throws ParseException { 45 | DateTime dateTime = new DateTime("2022-02-15 16:16:56.350", DateTime.SEG_DATETIME_NAONS); 46 | 47 | System.out.println(dateTime); 48 | 49 | SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSSS"); 50 | 51 | 52 | System.out.println(formatter.parse(dateTime.toString()).getTime()); 53 | 54 | } 55 | } 56 | --------------------------------------------------------------------------------