getPartitionFields() {
127 | return null;
128 | }
129 |
130 | default void addUniqueIndexInfo(RecordIndexInfo indexInfo) {
131 | throw new RuntimeException("not impl");
132 | }
133 |
134 | default void addForeignIndexInfo(ForeignKeyIndexInfo indexInfo) {
135 | throw new RuntimeException("not impl");
136 | }
137 |
138 | /**
139 | * get the table charset
140 | * @return charset
141 | */
142 | default String getCharset() {
143 | return null;
144 | }
145 | }
146 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/RowImage.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record;
2 |
3 | import com.aliyun.dts.subscribe.clients.record.value.Value;
4 | import org.apache.commons.lang3.tuple.Pair;
5 |
6 | import java.util.Map;
7 | import java.util.function.Function;
8 |
9 | public interface RowImage {
10 |
11 | /**
12 | *
13 | * @return a view of the the values of the fields in this Record. Note that this method returns values only for
14 | * those entries in the Record's schema. This allows the Record to guarantee that it will return the values in
15 | * the order dictated by the schema.
16 | *
17 | *
18 | * NOTE: The array that is returned may be an underlying array that is backing
19 | * the contents of the Record. As such, modifying the array in any way may result in
20 | * modifying the record.
21 | */
22 | Value[] getValues();
23 |
24 | /**
25 | * @param pos the position of the value
26 | * @return the value of specified @pos.
27 | */
28 | Value getValue(int pos);
29 |
30 | /**
31 | * @param fieldName the field name
32 | * @return the value of specified @fieldName.
33 | * This method is different as other getValue, for the field matched @fileName should never exist.
34 | */
35 | Value getValue(String fieldName);
36 |
37 | /**
38 | * @param recordField record filed
39 | * @return the value of specified @recordField.
40 | */
41 | Value getValue(RecordField recordField);
42 |
43 | /**
44 | * @return the primary keys of current row image.
45 | */
46 | Pair[] getPrimaryKeyValues();
47 |
48 | /**
49 | * @return the merged field and value pairs fo all unique keys in current record.
50 | */
51 | Pair[] getUniqueKeyValues();
52 |
53 | /**
54 | * @return the foreign keys of current row image.
55 | */
56 | Pair[] getForeignKeyValues();
57 |
58 | /**
59 | * Converts the Record into a Map whose keys are the same as the Record's field names and the values are the field values
60 | * @param filedNameResolver field name resolver
61 | * @param valueResolver value resolver
62 | * @return a Map that represents the values in the Record
63 | */
64 | Map toMap(Function filedNameResolver, Function valueResolver);
65 |
66 | /**
67 | * The total size of all values in current row image.
68 | * @return size
69 | */
70 | long size();
71 | }
72 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/SimplifiedRecordField.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record;
2 |
3 | import com.aliyun.dts.subscribe.clients.record.value.Value;
4 |
5 | import java.util.Collections;
6 | import java.util.Set;
7 |
8 | public class SimplifiedRecordField implements RecordField {
9 |
10 | private final String fieldName;
11 | private final int rawDataTypeNum;
12 | private boolean isPrimaryKey;
13 | private boolean isUniqueKey;
14 |
15 | private int fieldPosition;
16 |
17 | public SimplifiedRecordField(String fieldName, int rawDataTypeNum) {
18 | this.fieldName = fieldName;
19 | this.rawDataTypeNum = rawDataTypeNum;
20 | }
21 |
22 | public String getFieldName() {
23 | return fieldName;
24 | }
25 |
26 | public Set getAliases() {
27 | return Collections.emptySet();
28 | }
29 |
30 | public int getRawDataTypeNum() {
31 | return rawDataTypeNum;
32 | }
33 |
34 | public Value getDefaultValue() {
35 | return null;
36 | }
37 |
38 | public boolean isNullable() {
39 | return true;
40 | }
41 |
42 | public boolean isUnique() {
43 | return isUniqueKey;
44 | }
45 |
46 | public RecordField setUnique(boolean isUnique) {
47 | isUniqueKey = isUnique;
48 | return this;
49 | }
50 |
51 | public boolean isPrimary() {
52 | return isPrimaryKey;
53 | }
54 |
55 | public boolean setPrimary(boolean isPrimary) {
56 | isPrimaryKey = isPrimary;
57 | return isPrimaryKey;
58 | }
59 |
60 | public boolean isIndexed() {
61 | return isPrimaryKey || isUniqueKey;
62 | }
63 |
64 | public boolean isAutoIncrement() {
65 | return false;
66 | }
67 |
68 | public int keySeq() {
69 | return 0;
70 | }
71 |
72 | public int getFieldPosition() {
73 | return fieldPosition;
74 | }
75 |
76 | public void setFieldPosition(int fieldPosition) {
77 | this.fieldPosition = fieldPosition;
78 | }
79 |
80 | public int getScale() {
81 | return 0;
82 | }
83 |
84 | @Override
85 | public String toString() {
86 | return "{" +
87 | "fieldName='" + fieldName + '\'' +
88 | ", rawDataTypeNum=" + rawDataTypeNum +
89 | ", isPrimaryKey=" + isPrimaryKey +
90 | ", isUniqueKey=" + isUniqueKey +
91 | ", fieldPosition=" + fieldPosition +
92 | '}';
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/UserRecord.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record;
2 |
3 | public interface UserRecord {
4 | /**
5 | * @return Get the record unique id.
6 | */
7 | long getId();
8 |
9 | /**
10 | * @return Get the record source timestamp.
11 | */
12 | long getSourceTimestamp();
13 |
14 | /**
15 | * @return Get the operation which causes current record.
16 | */
17 | OperationType getOperationType();
18 |
19 | /**
20 | * @return Get the schema of current record data.
21 | */
22 | RecordSchema getSchema();
23 |
24 | /**
25 | * @return Get the before row image of current record.
26 | */
27 | RowImage getBeforeImage();
28 |
29 | /**
30 | * @return Get the after row image of current record.
31 | */
32 | RowImage getAfterImage();
33 | }
34 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/impl/DefaultRowImage.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.impl;
2 |
3 | import com.aliyun.dts.subscribe.clients.common.NullableOptional;
4 | import com.aliyun.dts.subscribe.clients.record.*;
5 | import com.aliyun.dts.subscribe.clients.record.value.Value;
6 | import org.apache.commons.lang3.StringUtils;
7 | import org.apache.commons.lang3.tuple.ImmutablePair;
8 | import org.apache.commons.lang3.tuple.Pair;
9 |
10 | import java.util.*;
11 | import java.util.function.Function;
12 | import java.util.stream.Collectors;
13 |
14 | public class DefaultRowImage implements RowImage {
15 |
16 | private final RecordSchema recordSchema;
17 | private final Value[] values;
18 | private long size;
19 |
20 | public DefaultRowImage(RecordSchema recordSchema) {
21 | this.recordSchema = recordSchema;
22 | this.values = new Value[recordSchema.getFieldCount()];
23 | }
24 |
25 | @Override
26 | public Value[] getValues() {
27 | return this.values;
28 | }
29 |
30 | @Override
31 | public Value getValue(int index) {
32 | return values[index];
33 | }
34 |
35 | @Override
36 | public Value getValue(RecordField field) {
37 | return getValue(field.getFieldPosition());
38 | }
39 |
40 | @Override
41 | public Value getValue(String fieldName) {
42 | NullableOptional recordField = recordSchema.getField(fieldName);
43 | return recordField.map(field -> getValue(field))
44 | .orElse(null);
45 | }
46 |
47 | private void accumulateSize(Value value) {
48 | if (null != value) {
49 | size += value.size();
50 | }
51 | }
52 |
53 | public void setValue(int i, Value value) {
54 | values[i] = value;
55 |
56 | accumulateSize(value);
57 | }
58 |
59 | public void setValue(String fieldName, Value value) {
60 | RecordField recordField = recordSchema.getField(fieldName)
61 | .orElse(null);
62 | setValue(recordField, value);
63 | }
64 |
65 | public void setValue(RecordField field, Value value) {
66 | int index = field.getFieldPosition();
67 | setValue(index, value);
68 | }
69 |
70 | @Override
71 | public Map toMap(Function filedNameResolver, Function valueResolver) {
72 | Map valueMap = new TreeMap<>();
73 | int i = 0;
74 |
75 | for (RecordField field : recordSchema.getFields()) {
76 | valueMap.put(filedNameResolver == null ? field.getFieldName() : filedNameResolver.apply(field.getFieldName()),
77 | valueResolver == null ? values[i] : valueResolver.apply(values[i]));
78 | i++;
79 | }
80 |
81 | return valueMap;
82 | }
83 |
84 | public Pair[] buildFieldValuePairArray(Collection recordFields) {
85 | Pair[] rs = new ImmutablePair[recordFields.size()];
86 | int index = 0;
87 | for (RecordField recordField : recordFields) {
88 | rs[index] = Pair.of(recordField, getValue(recordField));
89 | }
90 |
91 | return rs;
92 | }
93 |
94 | @Override
95 | public Pair[] getPrimaryKeyValues() {
96 | RecordIndexInfo recordIndexInfo = recordSchema.getPrimaryIndexInfo();
97 | if (null == recordIndexInfo) {
98 | return null;
99 | }
100 |
101 | return buildFieldValuePairArray(recordIndexInfo.getIndexFields());
102 | }
103 |
104 | private Pair[] buildAllFieldValuePairArray(List extends RecordIndexInfo> recordIndexInfoList) {
105 | if (null == recordIndexInfoList || recordIndexInfoList.isEmpty()) {
106 | return null;
107 | }
108 |
109 | Set recordFieldSet = recordIndexInfoList.stream()
110 | .flatMap(indexInfo -> indexInfo.getIndexFields().stream())
111 | .collect(Collectors.toSet());
112 |
113 | return buildFieldValuePairArray(recordFieldSet);
114 | }
115 |
116 | @Override
117 | public Pair[] getUniqueKeyValues() {
118 | List recordIndexInfoList = recordSchema.getUniqueIndexInfo();
119 | return buildAllFieldValuePairArray(recordIndexInfoList);
120 | }
121 |
122 | @Override
123 | public Pair[] getForeignKeyValues() {
124 | List recordIndexInfoList = recordSchema.getForeignIndexInfo();
125 | return buildAllFieldValuePairArray(recordIndexInfoList);
126 | }
127 |
128 | @Override
129 | public long size() {
130 | return size;
131 | }
132 |
133 | public String toString() {
134 |
135 | StringBuilder sb = new StringBuilder();
136 |
137 | sb.append("[");
138 |
139 | recordSchema.getFields().forEach(recordField ->
140 | { sb.append("Field ")
141 | .append("[")
142 | .append(recordField.getFieldName())
143 | .append("]")
144 | .append(" ")
145 | .append("[")
146 | .append(getValue(recordField))
147 | .append("]")
148 | .append("\n");});
149 |
150 | sb.append("]");
151 |
152 | return sb.toString();
153 | }
154 | }
155 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/BinaryEncodingObject.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 | import com.aliyun.dts.subscribe.clients.common.BytesUtil;
4 |
5 | import java.nio.ByteBuffer;
6 |
7 | public class BinaryEncodingObject implements Value {
8 |
9 | private ObjectType objectType;
10 | private ByteBuffer binaryData;
11 |
12 | public BinaryEncodingObject(ObjectType objectType, ByteBuffer binaryData) {
13 | this.objectType = objectType;
14 | this.binaryData = binaryData;
15 | }
16 |
17 | @Override
18 | public ValueType getType() {
19 | return ValueType.BINARY_ENCODING_OBJECT;
20 | }
21 |
22 | @Override
23 | public ByteBuffer getData() {
24 | return binaryData;
25 | }
26 |
27 | public ObjectType getObjectType() {
28 | return this.objectType;
29 | }
30 |
31 | @Override
32 | public long size() {
33 | if (null != binaryData) {
34 | return binaryData.capacity();
35 | }
36 |
37 | return 0L;
38 | }
39 |
40 | public String toString() {
41 | return BytesUtil.byteBufferToHexString(binaryData);
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/BitValue.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 | import java.io.UnsupportedEncodingException;
4 | import java.nio.ByteBuffer;
5 |
6 | public class BitValue implements Value {
7 | private ByteBuffer value;
8 |
9 | public BitValue() {
10 | }
11 |
12 | public BitValue(byte[] value) {
13 | this.value = ByteBuffer.wrap(value);
14 | }
15 |
16 | public BitValue(ByteBuffer value) {
17 | this.value = value;
18 | }
19 |
20 | @Override
21 | public ValueType getType() {
22 | return ValueType.BIT;
23 | }
24 |
25 | @Override
26 | public ByteBuffer getData() {
27 | return value;
28 | }
29 |
30 | @Override
31 | public String toString() {
32 | try {
33 | return new String(value.array(), "utf-8");
34 | } catch (UnsupportedEncodingException e) {
35 | throw new RuntimeException(e);
36 | }
37 | }
38 |
39 | @Override
40 | public long size() {
41 | if (null != value) {
42 | return value.capacity();
43 | }
44 |
45 | return 0L;
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/DecimalNumeric.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 |
4 | import java.math.BigDecimal;
5 |
6 | public class DecimalNumeric implements Value {
7 |
8 | private BigDecimal data;
9 |
10 | public DecimalNumeric() {
11 | }
12 |
13 | public DecimalNumeric(BigDecimal data) {
14 | this.data = data;
15 | }
16 |
17 | public DecimalNumeric(String data) {
18 | if (null == data) {
19 | return;
20 | }
21 | this.data = new BigDecimal(data);
22 | }
23 |
24 | @Override
25 | public ValueType getType() {
26 | return ValueType.DECIMAL_NUMERIC;
27 | }
28 |
29 | @Override
30 | public BigDecimal getData() {
31 | return this.data;
32 | }
33 |
34 | @Override
35 | public String toString() {
36 | if (null == this.data) {
37 | return null;
38 | }
39 | return this.data.toString();
40 | }
41 |
42 | @Override
43 | public long size() {
44 | if (null != data) {
45 | return data.toBigInteger().toByteArray().length;
46 | }
47 | return 0L;
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/FloatNumeric.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 | public class FloatNumeric implements Value {
4 |
5 | private Double data;
6 |
7 | public FloatNumeric(Double data) {
8 | this.data = data;
9 | }
10 |
11 | @Override
12 | public ValueType getType() {
13 | return ValueType.FLOAT_NUMERIC;
14 | }
15 |
16 | @Override
17 | public Double getData() {
18 | return this.data;
19 | }
20 |
21 | @Override
22 | public String toString() {
23 | return Double.toString(this.data);
24 | }
25 |
26 | @Override
27 | public long size() {
28 | return Double.BYTES;
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/IntegerNumeric.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 |
4 | import java.math.BigInteger;
5 |
6 | public class IntegerNumeric implements Value {
7 |
8 | private BigInteger data;
9 |
10 | public IntegerNumeric() {
11 | }
12 |
13 | public IntegerNumeric(long value) {
14 | data = BigInteger.valueOf(value);
15 | }
16 |
17 | public IntegerNumeric(BigInteger value) {
18 | this.data = value;
19 | }
20 |
21 | public IntegerNumeric(String value) {
22 | this.data = new BigInteger(value);
23 | }
24 |
25 | @Override
26 | public ValueType getType() {
27 | return ValueType.INTEGER_NUMERIC;
28 | }
29 |
30 | public BigInteger getData() {
31 | return this.data;
32 | }
33 |
34 | @Override
35 | public String toString() {
36 | return this.data.toString();
37 | }
38 |
39 | @Override
40 | public long size() {
41 | if (null != data) {
42 | return data.toByteArray().length;
43 | }
44 |
45 | return 0L;
46 | }
47 |
48 | public IntegerNumeric parse(Object rawData) {
49 | if (null == rawData) {
50 | return null;
51 | }
52 |
53 | return new IntegerNumeric(rawData.toString());
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/NoneValue.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 | /**
4 | * 占位字段,不具有任何意义
5 | */
6 | public class NoneValue implements Value {
7 |
8 | @Override
9 | public ValueType getType() {
10 | return ValueType.NONE;
11 | }
12 |
13 | @Override
14 | public Boolean getData() {
15 | return false;
16 | }
17 |
18 | @Override
19 | public long size() {
20 | return 0L;
21 | }
22 | }
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/ObjectType.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 | public enum ObjectType {
4 |
5 | BINARY, BOOL, BLOB, XML, JSON, TEXT, BFILE, RAW, LONG_RAW, ROWID, UROWID, ENUM, SET, BYTEA, GEOMETRY, XTYPE, UUID;
6 |
7 | public static ObjectType parse(String type) {
8 |
9 | if (null == type) {
10 | return XTYPE;
11 | }
12 | type = type.toUpperCase();
13 |
14 | ObjectType[] objectTypes = ObjectType.values();
15 | for (ObjectType objectType : objectTypes) {
16 | if (objectType.name().equals(type)) {
17 | return objectType;
18 | }
19 | }
20 | return XTYPE;
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/SpecialNumericType.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 |
4 | class SpecialNumeric implements Value {
5 |
6 | private static final String NAN = "NaN";
7 | private static final String INFINITY = "Infinity";
8 | private static final String NEGATIVE_INFINITY = "-Infinity";
9 | private static final String NEAR = "~";
10 |
11 | private SpecialNumericType value;
12 |
13 | public SpecialNumeric(SpecialNumericType value) {
14 | this.value = value;
15 | }
16 |
17 | public SpecialNumeric(String text) {
18 | this(SpecialNumericType.parseFrom(text));
19 | }
20 |
21 | @Override
22 | public ValueType getType() {
23 | return ValueType.SPECIAL_NUMERIC;
24 | }
25 |
26 | @Override
27 | public SpecialNumericType getData() {
28 | return this.value;
29 | }
30 |
31 | @Override
32 | public String toString() {
33 | return this.value.toString();
34 | }
35 |
36 | @Override
37 | public long size() {
38 | return Integer.BYTES;
39 | }
40 |
41 | public enum SpecialNumericType {
42 | NOT_ASSIGNED,
43 | INFINITY,
44 | NEGATIVE_INFINITY,
45 | NOT_A_NUMBER,
46 | NAN,
47 | NEAR;
48 |
49 | public static SpecialNumericType parseFrom(String value) {
50 | if (SpecialNumeric.NAN.equals(value)) {
51 | return NAN;
52 | }
53 | if (SpecialNumeric.NEAR.equals(value)) {
54 | return NEAR;
55 | }
56 | if (SpecialNumeric.INFINITY.equals(value)) {
57 | return INFINITY;
58 | }
59 | if (SpecialNumeric.NEGATIVE_INFINITY.equals(value)) {
60 | return NEGATIVE_INFINITY;
61 | }
62 | return SpecialNumericType.valueOf(value);
63 | }
64 |
65 | @Override
66 | public String toString() {
67 | if (this.equals(NAN)) {
68 | return SpecialNumeric.NAN;
69 | }
70 | if (this.equals(NEAR)) {
71 | return SpecialNumeric.NEAR;
72 | }
73 | if (this.equals(INFINITY)) {
74 | return SpecialNumeric.INFINITY;
75 | }
76 | if (this.equals(NEGATIVE_INFINITY)) {
77 | return SpecialNumeric.NEGATIVE_INFINITY;
78 | }
79 | return this.name();
80 | }
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/StringValue.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 | import com.aliyun.dts.subscribe.clients.common.BytesUtil;
4 | import com.aliyun.dts.subscribe.clients.common.JDKCharsetMapper;
5 | import com.aliyun.dts.subscribe.clients.common.function.SwallowException;
6 | import org.apache.commons.lang3.StringUtils;
7 |
8 | import java.io.UnsupportedEncodingException;
9 | import java.nio.ByteBuffer;
10 |
11 | public class StringValue implements Value {
12 |
13 | public static final String DEFAULT_CHARSET = "UTF-8";
14 | private ByteBuffer data;
15 | private String charset;
16 |
17 | public StringValue(ByteBuffer data, String charset) {
18 | this.data = data;
19 | this.charset = charset;
20 | }
21 |
22 | public StringValue(String data) {
23 | this(ByteBuffer.wrap(
24 | SwallowException.callAndThrowRuntimeException(() -> data.getBytes(DEFAULT_CHARSET))),
25 | DEFAULT_CHARSET);
26 | }
27 |
28 | public String getCharset() {
29 | return this.charset;
30 | }
31 |
32 | @Override
33 | public ValueType getType() {
34 | return ValueType.STRING;
35 | }
36 |
37 | @Override
38 | public ByteBuffer getData() {
39 | return this.data;
40 | }
41 |
42 | @Override
43 | public String toString() {
44 |
45 | // just return hex string if missing charset
46 | if (StringUtils.isEmpty(charset)) {
47 | return BytesUtil.byteBufferToHexString(data);
48 | }
49 |
50 | // try encode data by specified charset
51 | try {
52 | if (!StringUtils.isEmpty(charset)) {
53 | return new String(data.array(), charset);
54 | }
55 | return new String(data.array());
56 | } catch (UnsupportedEncodingException e1) {
57 | try {
58 | return new String(data.array(), JDKCharsetMapper.getJDKECharset(charset));
59 | } catch (UnsupportedEncodingException e2) {
60 | return charset + "_'" + BytesUtil.byteBufferToHexString(data) + "'";
61 | }
62 | }
63 | }
64 |
65 | public String toString(String targetCharset) {
66 | //TODO(huoyu): convert
67 | return "to impl";
68 | }
69 |
70 | @Override
71 | public long size() {
72 | if (null != data) {
73 | return data.capacity();
74 | }
75 |
76 | return 0L;
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/TextEncodingObject.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 | import org.apache.commons.lang3.StringUtils;
4 |
5 | public class TextEncodingObject implements Value {
6 |
7 | private ObjectType objectType;
8 | private String data;
9 |
10 | public TextEncodingObject(ObjectType objectType, String data) {
11 | this.objectType = objectType;
12 | this.data = data;
13 | }
14 |
15 | @Override
16 | public ValueType getType() {
17 | return ValueType.TEXT_ENCODING_OBJECT;
18 | }
19 |
20 | @Override
21 | public String getData() {
22 | return this.data;
23 | }
24 |
25 | @Override
26 | public long size() {
27 | return StringUtils.length(data);
28 | }
29 |
30 | public ObjectType getObjectType() {
31 | return objectType;
32 | }
33 |
34 | public String toString() {
35 | return data;
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/UnixTimestamp.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 |
4 | import java.sql.Timestamp;
5 | import java.text.DateFormat;
6 |
7 | /**
8 | * 毫秒位可能存在
9 | */
10 | public class UnixTimestamp implements Value {
11 |
12 | private long timestampSec;
13 | private Integer micro;
14 |
15 | public UnixTimestamp() {
16 | this(0L, null);
17 | }
18 |
19 | public UnixTimestamp(long timestampSec, Integer micro) {
20 | this.timestampSec = timestampSec;
21 | this.micro = micro;
22 | }
23 |
24 | public void setTimestampSec(long second) {
25 | this.timestampSec = second;
26 | }
27 |
28 | public long getTimestampSec() {
29 | return this.timestampSec;
30 | }
31 |
32 | public void setMicro(Integer micro) {
33 | this.micro = micro;
34 | }
35 |
36 | public Integer getMicro() {
37 | return this.micro;
38 | }
39 |
40 | @Override
41 | public ValueType getType() {
42 | return ValueType.UNIX_TIMESTAMP;
43 | }
44 |
45 | @Override
46 | public String getData() {
47 | return toString();
48 | }
49 |
50 | @Override
51 | public String toString() {
52 | return toString(null);
53 | }
54 |
55 | public String toString(DateFormat dateFormat) {
56 | Timestamp timestamp = toJdbcTimestamp();
57 | if (null == dateFormat) {
58 | return timestamp.toString();
59 | } else {
60 | return dateFormat.format(timestamp);
61 | }
62 | }
63 |
64 | public Timestamp toJdbcTimestamp() {
65 | Timestamp timestamp = new Timestamp(this.timestampSec * 1000);
66 | if (null != this.micro) {
67 | timestamp.setNanos(this.micro * 1000);
68 | }
69 | return timestamp;
70 | }
71 |
72 | @Override
73 | public long size() {
74 | return Long.BYTES + Integer.BYTES;
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/Value.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 | public interface Value {
4 | /**
5 | * @return get value type
6 | */
7 | ValueType getType();
8 |
9 | /**
10 | * @return Get the internal data of current value.
11 | */
12 | T getData();
13 |
14 | /**
15 | * @return Convert current to string by utf-8 encoding.
16 | */
17 | String toString();
18 |
19 | /**
20 | * @return Get the size of current value.
21 | */
22 | long size();
23 | }
24 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/ValueType.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 | public enum ValueType {
4 | BIT,
5 | INTEGER_NUMERIC,
6 | FLOAT_NUMERIC,
7 | DECIMAL_NUMERIC,
8 | SPECIAL_NUMERIC,
9 | STRING,
10 | DATETIME,
11 | UNIX_TIMESTAMP,
12 | TEXT_ENCODING_OBJECT,
13 | BINARY_ENCODING_OBJECT,
14 | WKB_GEOMETRY,
15 | WKT_GEOMETRY,
16 | NONE
17 | }
18 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/WKBGeometry.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 |
4 | import com.aliyun.dts.subscribe.clients.common.BytesUtil;
5 | import com.aliyun.dts.subscribe.clients.common.GeometryUtil;
6 | import com.vividsolutions.jts.io.ParseException;
7 |
8 | import java.nio.ByteBuffer;
9 |
10 | public class WKBGeometry implements Value {
11 |
12 | private long srid;
13 | private ByteBuffer data;
14 |
15 | public WKBGeometry(ByteBuffer data) {
16 | this.data = data;
17 | }
18 |
19 | public void setData(ByteBuffer data) {
20 | this.data = data;
21 | }
22 |
23 | @Override
24 | public ValueType getType() {
25 | return ValueType.WKB_GEOMETRY;
26 | }
27 |
28 | @Override
29 | public ByteBuffer getData() {
30 | return this.data;
31 | }
32 |
33 | @Override
34 | public long size() {
35 | if (null != data) {
36 | return data.capacity();
37 | }
38 |
39 | return 0L;
40 | }
41 |
42 | public String toString() {
43 | try {
44 | return GeometryUtil.fromWKBToWKTText(data);
45 | } catch (ParseException ex) {
46 | return BytesUtil.byteBufferToHexString(data);
47 | }
48 | }
49 |
50 | public WKBGeometry parse(Object rawData) {
51 | if (null == rawData) {
52 | return null;
53 | }
54 |
55 | if (rawData instanceof byte[]) {
56 | return new WKBGeometry(ByteBuffer.wrap((byte[]) rawData));
57 | }
58 |
59 | return new WKBGeometry(BytesUtil.hexStringToByteBuffer(rawData.toString()));
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/record/value/WKTGeometry.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 | import org.apache.commons.lang3.StringUtils;
4 |
5 | public class WKTGeometry implements Value {
6 |
7 | private long srid;
8 | private String data;
9 |
10 | public WKTGeometry(String data) {
11 | this.data = data;
12 | }
13 |
14 | @Override
15 | public ValueType getType() {
16 | return ValueType.WKT_GEOMETRY;
17 | }
18 |
19 | @Override
20 | public String getData() {
21 | return this.data;
22 | }
23 |
24 | @Override
25 | public long size() {
26 | if (null != data) {
27 | return StringUtils.length(data);
28 | }
29 |
30 | return 0L;
31 | }
32 |
33 | public String toString() {
34 | return data;
35 | }
36 |
37 | public WKTGeometry parse(Object rawData) {
38 | if (null == rawData) {
39 | return null;
40 | }
41 |
42 | return new WKTGeometry(rawData.toString());
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/recordfetcher/ClusterSwitchListener.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.recordfetcher;
2 |
3 | import org.apache.kafka.clients.consumer.ConsumerInterceptor;
4 | import org.apache.kafka.clients.consumer.ConsumerRecords;
5 | import org.apache.kafka.common.ClusterResource;
6 | import org.apache.kafka.common.ClusterResourceListener;
7 | import org.apache.kafka.common.KafkaException;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 |
11 | import java.util.Map;
12 |
13 | /**
14 | * We recommend user register this listener.
15 | * Cause when origin cluster is unavailable and new cluster is created by HA(high available service).
16 | * The cluster name is different. We want warn user that a new cluster is working.
17 | * The more important thing is that we want user recreate KakfaConsumer and use timestamp to reseek offset.
18 | * If user following this guid, less duplicated data will be pushed.
19 | * Otherwise
20 | */
21 | public class ClusterSwitchListener implements ClusterResourceListener, ConsumerInterceptor {
22 | private final static Logger logger = LoggerFactory.getLogger(ClusterSwitchListener.class);
23 | private ClusterResource originClusterResource = null;
24 |
25 | public ConsumerRecords onConsume(ConsumerRecords records) {
26 | return records;
27 | }
28 |
29 |
30 | public void close() {
31 | }
32 |
33 | public void onCommit(Map offsets) {
34 | }
35 |
36 |
37 | public void onUpdate(ClusterResource clusterResource) {
38 | synchronized (this) {
39 | if (null == originClusterResource) {
40 | logger.info("Cluster updated to " + clusterResource.clusterId());
41 | originClusterResource = clusterResource;
42 | } else {
43 | if (clusterResource.clusterId().equals(originClusterResource.clusterId())) {
44 | logger.info("Cluster not changed on update:" + clusterResource.clusterId());
45 | } else {
46 | throw new ClusterSwitchException("Cluster changed from " + originClusterResource.clusterId() + " to " + clusterResource.clusterId()
47 | + ", consumer require restart");
48 | }
49 | }
50 | }
51 | }
52 |
53 | public void configure(Map configs) {
54 | }
55 |
56 | public static class ClusterSwitchException extends KafkaException {
57 | public ClusterSwitchException(String message, Throwable cause) {
58 | super(message, cause);
59 | }
60 |
61 | public ClusterSwitchException(String message) {
62 | super(message);
63 | }
64 |
65 | public ClusterSwitchException(Throwable cause) {
66 | super(cause);
67 | }
68 |
69 | public ClusterSwitchException() {
70 | super();
71 | }
72 |
73 | }
74 | }
75 |
76 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/recordfetcher/ConsumerWrap.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.recordfetcher;
2 |
3 | import com.aliyun.dts.subscribe.clients.ConsumerContext;
4 | import com.aliyun.dts.subscribe.clients.common.Checkpoint;
5 | import org.apache.commons.lang3.StringUtils;
6 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
7 | import org.apache.kafka.clients.consumer.ConsumerRecords;
8 | import org.apache.kafka.clients.consumer.KafkaConsumer;
9 | import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
10 | import org.apache.kafka.common.TopicPartition;
11 | import org.slf4j.Logger;
12 | import org.slf4j.LoggerFactory;
13 |
14 | import java.io.Closeable;
15 | import java.util.*;
16 | import java.util.concurrent.atomic.AtomicBoolean;
17 | import java.util.function.Supplier;
18 |
19 | import static com.aliyun.dts.subscribe.clients.recordfetcher.Names.*;
20 | import static com.aliyun.dts.subscribe.clients.common.Util.mergeSourceKafkaProperties;
21 |
22 |
23 | public abstract class ConsumerWrap implements Closeable {
24 | private static final Logger log = LoggerFactory.getLogger(ConsumerWrap.class);
25 |
26 | // directly set offset using the give offset, we don't check the offset is legal or not.
27 | public abstract void setFetchOffsetByOffset(TopicPartition topicPartition, Checkpoint checkpoint);
28 | public abstract void setFetchOffsetByTimestamp(TopicPartition topicPartition, Checkpoint checkpoint, boolean isCheckpointNotExistThrowException);
29 | // assign topic is not use auto balance, which we recommend this way to consume record. and commit offset by user it self
30 | public abstract void assignTopic(TopicPartition topicPartition, Checkpoint checkpoint, boolean isCheckpointNotExistThrowException);
31 | // subscribe function use consumer group mode, which means multi consumer using the same groupid could build a high available consume system
32 | // still we recommend shutdown auto commit mode, and user commit the offset manually.
33 | // this can delay offset commit until the record is really consumed by business logic which can strongly defend the data loss.
34 | public abstract void subscribeTopic(TopicPartition topicPartition, Supplier streamCheckpoint, boolean isCheckpointNotExistThrowException);
35 |
36 |
37 | public abstract ConsumerRecords poll();
38 |
39 | public abstract KafkaConsumer getRawConsumer();
40 |
41 | public static class DefaultConsumerWrap extends ConsumerWrap {
42 | private AtomicBoolean firstStart = new AtomicBoolean(true);
43 | private KafkaConsumer consumer;
44 | private final long poolTimeOut;
45 |
46 | private final ConsumerContext consumerContext;
47 |
48 | public DefaultConsumerWrap(Properties properties, ConsumerContext consumerContext) {
49 | Properties consumerConfig = new Properties();
50 | mergeSourceKafkaProperties(properties, consumerConfig);
51 | checkConfig(consumerConfig);
52 | consumer = new KafkaConsumer(consumerConfig);
53 | poolTimeOut = Long.valueOf(properties.getProperty(POLL_TIME_OUT, "500"));
54 |
55 | this.consumerContext = consumerContext;
56 | }
57 |
58 | @Override
59 | public void setFetchOffsetByOffset(TopicPartition topicPartition, Checkpoint checkpoint) {
60 | consumer.seek(topicPartition, checkpoint.getOffset());
61 | }
62 |
63 | // recommended
64 | @Override
65 | public void setFetchOffsetByTimestamp(TopicPartition topicPartition, Checkpoint checkpoint, boolean isCheckpointNotExistThrowException) {
66 | long timeStamp = checkpoint.getTimeStamp();
67 | Map remoteOffset = consumer.offsetsForTimes(Collections.singletonMap(topicPartition, timeStamp));
68 | OffsetAndTimestamp toSet = remoteOffset.get(topicPartition);
69 | if (null == toSet) {
70 | log.warn("Failed seek timestamp for topic [" + topicPartition + "] with timestamp [" + timeStamp + "] failed");
71 | if (isCheckpointNotExistThrowException) {
72 | throw new RuntimeException("Failed seek timestamp for topic [\" + topicPartition + \"] with timestamp [\" + timeStamp + \"] failed");
73 | } else {
74 | log.warn("Set to beginning");
75 | consumer.seekToBeginning(Collections.singleton(topicPartition));
76 | }
77 | } else {
78 | log.info("RecordFetcher: seek for {} with checkpoint {}", topicPartition, checkpoint);
79 |
80 | consumer.seek(topicPartition, toSet.offset());
81 | }
82 | }
83 |
84 | @Override
85 | public void assignTopic(TopicPartition topicPartition, Checkpoint checkpoint, boolean isCheckpointNotExistThrowException) {
86 | consumer.assign(Arrays.asList(topicPartition));
87 |
88 | consumerContext.setTopicPartitions(Collections.singleton(topicPartition));
89 |
90 | log.info("RecordGenerator: assigned for {} with checkpoint {}", topicPartition, checkpoint);
91 | setFetchOffsetByTimestamp(topicPartition, checkpoint, isCheckpointNotExistThrowException);
92 | }
93 |
94 |
95 | //Not test, please not use this function
96 | @Override
97 | public void subscribeTopic(TopicPartition topicPartition, Supplier streamCheckpoint, boolean isCheckpointNotExistThrowException) {
98 | consumer.subscribe(Arrays.asList(topicPartition.topic()), new ConsumerRebalanceListener() {
99 | @Override
100 | public void onPartitionsRevoked(Collection partitions) {
101 | log.info("RecordFetcher consumer: partition revoked for [{}]", StringUtils.join(partitions, ","));
102 | }
103 |
104 | @Override
105 | public void onPartitionsAssigned(Collection partitions) {
106 | log.info("RecordFetcher consumer: partition assigned for [{}]", StringUtils.join(partitions, ","));
107 |
108 | consumerContext.setTopicPartitions(partitions);
109 | if (!consumerContext.hasValidTopicPartitions()) {
110 | log.warn("In subscribe mode, recordFetcher consumer dose not assigned any partition, probably this client is a backup...");
111 | }
112 |
113 | if (partitions.contains(topicPartition)) {
114 | Checkpoint toSet = streamCheckpoint.get();
115 | setFetchOffsetByTimestamp(topicPartition, toSet, isCheckpointNotExistThrowException);
116 | log.info("RecordFetcher consumer: subscribe for [{}] with checkpoint [{}] start", topicPartition, toSet);
117 | }
118 | }
119 | });
120 | }
121 |
122 | public ConsumerRecords poll() {
123 | return consumer.poll(poolTimeOut);
124 | }
125 |
126 | @Override
127 | public KafkaConsumer getRawConsumer() {
128 | return consumer;
129 | }
130 |
131 | public synchronized void close() {
132 | if (null != consumer) {
133 | consumer.close();
134 | }
135 | }
136 |
137 | private void checkConfig(Properties properties) {
138 |
139 | }
140 |
141 | }
142 | }
143 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/recordfetcher/Names.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.recordfetcher;
2 |
3 | public class Names {
4 | // detail control
5 | public static final String TRY_TIME = "stream.tryTime";
6 | public static final String TRY_BACK_TIME_MS = "stream.tryBackTimeMS";
7 | public static final String RETRY_TIME_OUT = "stream.errorRetryTimeOut";
8 | public static final String POLL_TIME_OUT = "stream.pool.timeout";
9 | // general name
10 | public static final String KAFKA_TOPIC = "kafkaTopic";
11 | public static final String KAFKA_BROKER_URL_NAME = "broker";
12 | public static final String GROUP_NAME = "group";
13 |
14 | public static final String USE_CONFIG_CHECKPOINT_NAME = "useConfigCheckpoint";
15 | public static final String SUBSCRIBE_MODE_NAME = "subscribeMode";
16 |
17 | public static final String INITIAL_CHECKPOINT_NAME = "checkpoint";
18 | public static final String USER_NAME = "user";
19 | public static final String PASSWORD_NAME = "password";
20 | public static final String SID_NAME = "sid";
21 | public static final long MAX_TIMESTAMP_SECOND = 99999999999L;
22 | }
23 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/recordfetcher/OffsetCommitCallBack.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.recordfetcher;
2 |
3 | import org.apache.kafka.common.TopicPartition;
4 |
5 | public interface OffsetCommitCallBack {
6 | void commit(TopicPartition tp, long timestamp, long offset, String metadata);
7 | }
8 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/recordgenerator/AvroDeserializer.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.recordgenerator;
2 |
3 | import com.aliyun.dts.subscribe.clients.formats.avro.Record;
4 | import org.apache.avro.io.Decoder;
5 | import org.apache.avro.io.DecoderFactory;
6 | import org.apache.avro.specific.SpecificDatumReader;
7 | import org.slf4j.Logger;
8 | import org.slf4j.LoggerFactory;
9 |
10 |
11 | public class AvroDeserializer {
12 | private static final Logger log = LoggerFactory.getLogger(AvroDeserializer.class);
13 |
14 | private final SpecificDatumReader reader = new SpecificDatumReader(Record.class);
15 |
16 | public AvroDeserializer() {
17 | }
18 |
19 | public Record deserialize(byte[] data) {
20 |
21 | Decoder decoder = DecoderFactory.get().binaryDecoder(data, null);
22 | Record payload = null;
23 | try {
24 | payload = reader.read(null, decoder);
25 | return payload;
26 | }catch (Throwable ex) {
27 | log.error("AvroDeserializer: deserialize record failed cause " + ex.getMessage(), ex);
28 | throw new RuntimeException(ex);
29 | }
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/recordgenerator/UserRecordGenerator.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.recordgenerator;
2 |
3 | import com.aliyun.dts.subscribe.clients.ConsumerContext;
4 | import com.aliyun.dts.subscribe.clients.common.Checkpoint;
5 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord;
6 | import com.aliyun.dts.subscribe.clients.common.WorkThread;
7 | import com.aliyun.dts.subscribe.clients.formats.avro.Record;
8 | import com.aliyun.dts.subscribe.clients.recordfetcher.OffsetCommitCallBack;
9 | import org.apache.kafka.clients.consumer.ConsumerRecord;
10 | import org.apache.kafka.common.TopicPartition;
11 | import org.apache.kafka.common.metrics.Metrics;
12 | import org.apache.kafka.common.metrics.Sensor;
13 | import org.apache.kafka.common.metrics.stats.SimpleRate;
14 | import org.apache.kafka.common.metrics.stats.Total;
15 | import org.slf4j.Logger;
16 | import org.slf4j.LoggerFactory;
17 |
18 | import java.io.Closeable;
19 | import java.io.IOException;
20 | import java.util.concurrent.LinkedBlockingQueue;
21 | import java.util.concurrent.TimeUnit;
22 |
23 | import static com.aliyun.dts.subscribe.clients.common.Util.sleepMS;
24 |
25 | /**
26 | * This class is to resolve avro record deserialize from bytes to UserRecord
27 | */
28 | public class UserRecordGenerator implements Runnable, Closeable {
29 | private static final Logger log = LoggerFactory.getLogger(UserRecordGenerator.class);
30 |
31 | protected ConsumerContext consumerContext;
32 | protected final LinkedBlockingQueue toProcessRecord;
33 | protected final AvroDeserializer fastDeserializer;
34 |
35 | protected final LinkedBlockingQueue processedRecord;
36 |
37 | protected volatile Checkpoint commitCheckpoint;
38 | protected WorkThread commitThread;
39 | protected final OffsetCommitCallBack offsetCommitCallBack;
40 |
41 | protected Metrics metrics;
42 |
43 | protected final Sensor recordStoreOutCountSensor;
44 | protected final Sensor recordStoreOutByteSensor;
45 |
46 | public UserRecordGenerator(ConsumerContext consumerContext, LinkedBlockingQueue toProcessRecord, LinkedBlockingQueue processedRecord,
47 | OffsetCommitCallBack offsetCommitCallBack) {
48 | this.consumerContext = consumerContext;
49 | this.toProcessRecord = toProcessRecord;
50 | this.fastDeserializer = new AvroDeserializer();
51 | this.processedRecord = processedRecord;
52 |
53 | this.offsetCommitCallBack = offsetCommitCallBack;
54 |
55 | commitCheckpoint = new Checkpoint(null, -1, -1, "-1");
56 |
57 | metrics = consumerContext.getDtsMetrics().getCoreMetrics();
58 |
59 | metrics.addMetric(
60 | metrics.metricName("DStoreRecordQueue", "UserRecordGenerator"),
61 | (config, now) -> (toProcessRecord.size()));
62 |
63 | metrics.addMetric(
64 | metrics.metricName("DefaultUserRecordQueue", "UserRecordGenerator"),
65 | (config, now) -> (processedRecord.size()));
66 |
67 | this.recordStoreOutCountSensor = metrics.sensor("record-store-out-row");
68 | this.recordStoreOutCountSensor.add(metrics.metricName("outCounts", "recordstore"), new Total());
69 | this.recordStoreOutCountSensor.add(metrics.metricName("outRps", "recordstore"), new SimpleRate());
70 | this.recordStoreOutByteSensor = metrics.sensor("record-store-out-byte");
71 | this.recordStoreOutByteSensor.add(metrics.metricName("outBytes", "recordstore"), new Total());
72 | this.recordStoreOutByteSensor.add(metrics.metricName("outBps", "recordstore"), new SimpleRate());
73 | }
74 |
75 | @Override
76 | public void run() {
77 | while (!consumerContext.isExited()) {
78 | ConsumerRecord toProcess = null;
79 | Record record = null;
80 | int fetchFailedCount = 0;
81 | try {
82 | while (null == (toProcess = toProcessRecord.peek()) && !consumerContext.isExited()) {
83 | sleepMS(5);
84 | fetchFailedCount++;
85 | if (fetchFailedCount % 1000 == 0 && consumerContext.hasValidTopicPartitions()) {
86 | log.info("UserRecordGenerator: haven't receive records from generator for 5s");
87 | }
88 | }
89 | if (consumerContext.isExited()) {
90 | return;
91 | }
92 | final ConsumerRecord consumerRecord = toProcess;
93 | consumerRecord.timestamp();
94 | record = fastDeserializer.deserialize(consumerRecord.value());
95 | log.debug("UserRecordGenerator: meet [{}] record type", record.getOperation());
96 |
97 | DefaultUserRecord defaultUserRecord = new DefaultUserRecord(new TopicPartition(consumerRecord.topic(), consumerRecord.partition()), consumerRecord.offset(),
98 | record,
99 | (tp, commitRecord, offset, metadata) -> {
100 | recordStoreOutCountSensor.record(1);
101 | recordStoreOutByteSensor.record(consumerRecord.value().length);
102 | commitCheckpoint = new Checkpoint(tp, commitRecord.getSourceTimestamp(), offset, metadata);
103 | commit();
104 | });
105 |
106 | int offerTryCount = 0;
107 |
108 | while (!offerRecord(1000, TimeUnit.MILLISECONDS, defaultUserRecord) && !consumerContext.isExited()) {
109 | if (++offerTryCount % 10 == 0) {
110 | log.info("UserRecordGenerator: offer user record has failed for a period (10s) [ " + record + "]");
111 | }
112 | }
113 |
114 | toProcessRecord.poll();
115 | } catch (Exception e) {
116 | log.error("UserRecordGenerator: process record failed, raw consumer record [" + toProcess + "], parsed record [" + record + "], cause " + e.getMessage(), e);
117 | consumerContext.exit();
118 | }
119 | }
120 | }
121 |
122 | protected boolean offerRecord(int timeOut, TimeUnit timeUnit, DefaultUserRecord defaultUserRecord) {
123 | try {
124 | return processedRecord.offer(defaultUserRecord, timeOut, timeUnit);
125 | } catch (Exception e) {
126 | log.error("UserRecordGenerator: offer record failed, record[" + defaultUserRecord + "], cause " + e.getMessage(), e);
127 | return false;
128 | }
129 | }
130 |
131 | @Override
132 | public void close() throws IOException {
133 | consumerContext.exit();
134 | commitThread.stop();
135 | }
136 |
137 | // user define how to commit
138 | private void commit() {
139 | if (null != offsetCommitCallBack) {
140 | if (commitCheckpoint.getTopicPartition() != null && commitCheckpoint.getOffset() != -1) {
141 | offsetCommitCallBack.commit(commitCheckpoint.getTopicPartition(), commitCheckpoint.getTimeStamp(),
142 | commitCheckpoint.getOffset(), commitCheckpoint.getInfo());
143 | }
144 | }
145 | }
146 | }
147 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/recordprocessor/DbType.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.recordprocessor;
2 |
3 | public enum DbType {
4 | MySQL, Oracle;
5 | }
6 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/recordprocessor/DefaultRecordPrintListener.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.recordprocessor;
2 |
3 | import com.aliyun.dts.subscribe.clients.common.RecordListener;
4 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord;
5 | import com.aliyun.dts.subscribe.clients.record.OperationType;
6 | import com.aliyun.dts.subscribe.clients.record.RecordSchema;
7 | import org.slf4j.Logger;
8 | import org.slf4j.LoggerFactory;
9 |
10 | public class DefaultRecordPrintListener implements RecordListener {
11 | private static final Logger log = LoggerFactory.getLogger(DefaultRecordPrintListener.class);
12 |
13 | public DefaultRecordPrintListener(DbType mySQL) {
14 | }
15 |
16 | @Override
17 | public void consume(DefaultUserRecord record) {
18 |
19 | OperationType operationType = record.getOperationType();
20 |
21 | RecordSchema recordSchema = record.getSchema();
22 |
23 | StringBuilder stringBuilder = new StringBuilder();
24 |
25 | stringBuilder
26 | .append("\n")
27 | // record id can not be used as unique identifier
28 | .append("RecordID [").append(record.getId()).append("]\n")
29 | // record generate timestamp in source log
30 | .append("RecordTimestamp [").append(record.getSourceTimestamp()).append("] \n")
31 | // source info contains which source this record came from
32 | .append("Source [").append(recordSchema.getDatabaseInfo()).append("]\n")
33 | // record type
34 | .append("RecordType [").append(record.getOperationType()).append("]\n");
35 |
36 | if (operationType.equals(OperationType.INSERT)
37 | || operationType.equals(OperationType.UPDATE)
38 | || operationType.equals(OperationType.DELETE)
39 | || operationType.equals(OperationType.DDL)) {
40 |
41 | stringBuilder
42 | .append("Schema info [").append(recordSchema.toString()).append("]\n")
43 | //before image
44 | .append("Before image {").append(record.getBeforeImage()).append("}\n")
45 | //after image
46 | .append("After image {").append(record.getAfterImage()).append("}\n");
47 | }
48 |
49 | log.info(stringBuilder.toString());
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/recordprocessor/EtlRecordProcessor.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.recordprocessor;
2 |
3 |
4 | import com.aliyun.dts.subscribe.clients.ConsumerContext;
5 | import com.aliyun.dts.subscribe.clients.common.*;
6 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord;
7 | import org.apache.kafka.common.metrics.Metrics;
8 | import org.apache.kafka.common.metrics.Sensor;
9 | import org.apache.kafka.common.metrics.stats.SimpleRate;
10 | import org.apache.kafka.common.metrics.stats.Total;
11 | import org.slf4j.Logger;
12 | import org.slf4j.LoggerFactory;
13 |
14 | import java.io.Closeable;
15 | import java.util.Map;
16 | import java.util.concurrent.LinkedBlockingQueue;
17 |
18 | import static com.aliyun.dts.subscribe.clients.common.Util.require;
19 | import static com.aliyun.dts.subscribe.clients.common.Util.sleepMS;
20 |
21 |
22 | /**
23 | * This demo show how to resolve avro record deserialize from bytes
24 | * We will show how to print a column from deserialize record
25 | */
26 | public class EtlRecordProcessor implements Runnable, Closeable {
27 | private static final Logger log = LoggerFactory.getLogger(EtlRecordProcessor.class);
28 |
29 | private final LinkedBlockingQueue toProcessRecord;
30 | private final Map recordListeners;
31 |
32 | private ConsumerContext consumerContext;
33 |
34 | public EtlRecordProcessor(ConsumerContext consumerContext, LinkedBlockingQueue toProcessRecord,
35 | Map recordListeners) {
36 | this.consumerContext = consumerContext;
37 | this.toProcessRecord = toProcessRecord;
38 | this.recordListeners= recordListeners;
39 | }
40 |
41 | @Override
42 | public void run() {
43 | while (!consumerContext.isExited()) {
44 | DefaultUserRecord toProcess = null;
45 | int fetchFailedCount = 0;
46 | try {
47 | while (null == (toProcess = toProcessRecord.peek()) && !consumerContext.isExited()) {
48 | sleepMS(5);
49 | fetchFailedCount++;
50 | if (fetchFailedCount % 1000 == 0 && consumerContext.hasValidTopicPartitions()) {
51 | log.info("EtlRecordProcessor: haven't receive records from generator for 5s");
52 | }
53 | }
54 | if (consumerContext.isExited()) {
55 | return;
56 | }
57 | fetchFailedCount = 0;
58 | final DefaultUserRecord consumerRecord = toProcess;
59 |
60 | for (RecordListener recordListener : recordListeners.values()) {
61 | recordListener.consume(consumerRecord);
62 | }
63 |
64 | toProcessRecord.poll();
65 | } catch (Exception e) {
66 | log.error("EtlRecordProcessor: process record failed, raw consumer record [" + toProcess + "], cause " + e.getMessage(), e);
67 | consumerContext.exit();
68 | }
69 | }
70 | }
71 |
72 | public void registerRecordListener(String name, RecordListener recordListener) {
73 | require(null != name && null != recordListener, "null value not accepted");
74 | recordListeners.put(name, recordListener);
75 | }
76 |
77 | public void close() {
78 | consumerContext.exit();
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/dts/subscribe/clients/recordprocessor/FieldValue.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.recordprocessor;
2 |
3 | import org.apache.commons.lang3.StringUtils;
4 |
5 | import java.io.UnsupportedEncodingException;
6 |
7 | public class FieldValue {
8 | private String encoding;
9 | private byte[] bytes;
10 | public String getEncoding() {
11 | return encoding;
12 | }
13 | public byte[] getValue() {
14 | return bytes;
15 | }
16 |
17 | public void setEncoding(String encoding) {
18 | this.encoding = encoding;
19 | }
20 | public void setValue(byte[] bytes) {
21 | this.bytes = bytes;
22 | }
23 | @Override
24 | public String toString() {
25 | if (null == getValue()) {
26 | return "null [binary]";
27 | }
28 | if (encoding==null) {
29 | return super.toString();
30 | }
31 | try {
32 | if(StringUtils.equals("utf8mb4", encoding)){
33 | return new String(getValue(), "utf8");
34 | }else{
35 | return new String(getValue(), encoding);
36 | }
37 | } catch (UnsupportedEncodingException e) {
38 | throw new RuntimeException("Unsupported encoding: " + encoding);
39 | }
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | log4j.rootLogger=INFO,CONSOLE
16 |
17 | ## 日志输出到控制台
18 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
19 | log4j.appender.CONSOLE.Threshold=INFO
20 | log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
21 | log4j.appender.CONSOLE.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ss.SSS}] [%-5p] [%t] [%c:%L] - %m%n
22 |
23 | # 日志文件(logFile)
24 | log4j.logger.com.aliyun=INFO,file
25 | log4j.additivity.com.aliyun=true
26 | log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
27 | log4j.appender.file.Threshold=INFO
28 | log4j.appender.file.Append=true
29 | log4j.appender.file.File=dts-new-subscribe.log
30 | log4j.appender.file.layout=org.apache.log4j.PatternLayout
31 | log4j.appender.file.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ss.SSS}] [%-5p] [%t] [%c:%L] - %m%n
32 |
33 |
--------------------------------------------------------------------------------
/src/test/java/com/aliyun/dts/subscribe/clients/DBMapperTest.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients;
2 |
3 | import com.aliyun.dms.subscribe.clients.DBMapper;
4 | import com.aliyun.dts.subscribe.clients.formats.avro.Operation;
5 | import com.aliyun.dts.subscribe.clients.formats.avro.Record;
6 | import org.junit.Assert;
7 | import org.junit.Test;
8 |
9 |
10 | public class DBMapperTest {
11 |
12 | @Test
13 | public void dbMapperTest() {
14 | DBMapper dbMapper = new DBMapper();
15 | dbMapper.init("{\"dts_h02\":{\"all\":false,\"name\":\"dts_h\",\"Table\":{\"dtsh27_02\":{\"all\":true,\"name\":\"dtsh\"},\"dts28_01\":{\"all\":true,\"name\":\"dts\"},\"dts28_02\":{\"all\":true,\"name\":\"dts\"}}},\"dts_h01\":{\"all\":false,\"name\":\"dts_h\",\"Table\":{\"dtsh27_01\":{\"all\":true,\"name\":\"dtsh\"},\"dts29_02\":{\"all\":true,\"name\":\"dts\"},\"dts29_01\":{\"all\":true,\"name\":\"dts\"}}}}");
16 | Record record = new Record();
17 | record.setOperation(Operation.UPDATE);
18 | String physicTable = "dts_h02.dtsh27_02";
19 | String logicTable = "dts_h.dtsh";
20 |
21 | record.setObjectName(physicTable);
22 | record = dbMapper.transform(record);
23 | Assert.assertEquals(record.getObjectName(), logicTable);
24 |
25 | String physicDb = "dts_h01";
26 | String logicDb = "dts_h";
27 | record.setOperation(Operation.DDL);
28 | record.setObjectName(physicDb);
29 | record = dbMapper.transform(record);
30 | Assert.assertEquals(record.getObjectName(), logicDb);
31 |
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/src/test/java/com/aliyun/dts/subscribe/clients/DTSConsumerAssignDemo.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients;
2 |
3 | import com.aliyun.dts.subscribe.clients.common.RecordListener;
4 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord;
5 | import com.aliyun.dts.subscribe.clients.record.OperationType;
6 | import com.aliyun.dts.subscribe.clients.recordprocessor.DbType;
7 | import com.aliyun.dts.subscribe.clients.recordprocessor.DefaultRecordPrintListener;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 |
11 | import java.util.Collections;
12 | import java.util.Map;
13 |
14 | public class DTSConsumerAssignDemo {
15 | private static final Logger log = LoggerFactory.getLogger(DTSConsumerAssignDemo.class);
16 |
17 | private final DTSConsumer dtsConsumer;
18 |
19 | public DTSConsumerAssignDemo(String brokerUrl, String topic, String sid, String userName, String password,
20 | String checkpoint, ConsumerContext.ConsumerSubscribeMode subscribeMode, boolean isForceUseInitCheckpoint) {
21 | this.dtsConsumer = initDTSClient(brokerUrl, topic, sid, userName, password, checkpoint, subscribeMode, isForceUseInitCheckpoint);
22 | }
23 |
24 | private DTSConsumer initDTSClient(String brokerUrl, String topic, String sid, String userName, String password,
25 | String initCheckpoint, ConsumerContext.ConsumerSubscribeMode subscribeMode, boolean isForceUseInitCheckpoint) {
26 | ConsumerContext consumerContext = new ConsumerContext(brokerUrl, topic, sid, userName, password, initCheckpoint, subscribeMode);
27 |
28 | //if this parameter is set, force to use the initCheckpoint to initial
29 | consumerContext.setForceUseCheckpoint(isForceUseInitCheckpoint);
30 |
31 | //add user store
32 | consumerContext.setUserRegisteredStore(new UserMetaStore());
33 |
34 | DTSConsumer dtsConsumer = new DefaultDTSConsumer(consumerContext);
35 |
36 | dtsConsumer.addRecordListeners(buildRecordListener());
37 |
38 | return dtsConsumer;
39 | }
40 |
41 | public static Map buildRecordListener() {
42 | // user can impl their own listener
43 | RecordListener mysqlRecordPrintListener = new RecordListener() {
44 | @Override
45 | public void consume(DefaultUserRecord record) {
46 |
47 | OperationType operationType = record.getOperationType();
48 |
49 | if(operationType.equals(OperationType.INSERT)
50 | || operationType.equals(OperationType.UPDATE)
51 | || operationType.equals(OperationType.DELETE)
52 | || operationType.equals(OperationType.DDL)
53 | || operationType.equals(OperationType.HEARTBEAT)) {
54 |
55 | // consume record
56 | RecordListener recordPrintListener = new DefaultRecordPrintListener(DbType.MySQL);
57 |
58 | recordPrintListener.consume(record);
59 |
60 | //commit method push the checkpoint update
61 | record.commit("");
62 | }
63 | }
64 | };
65 | return Collections.singletonMap("mysqlRecordPrinter", mysqlRecordPrintListener);
66 | }
67 |
68 | public void start() {
69 | System.out.println("Start DTS subscription client...");
70 |
71 | dtsConsumer.start();
72 | }
73 |
74 | public static void main(String[] args) {
75 | // kafka broker url
76 | String brokerUrl = "your broker url";
77 | // topic to consume, partition is 0
78 | String topic = "your dts topic";
79 | // user password and sid for auth
80 | String sid = "your sid";
81 | String userName = "your user name";
82 | String password = "your password";
83 | // initial checkpoint for first seek(a timestamp to set, eg 1566180200 if you want (Mon Aug 19 10:03:21 CST 2019))
84 | String initCheckpoint = "start timestamp";
85 | // when use subscribe mode, group config is required. kafka consumer group is enabled
86 | ConsumerContext.ConsumerSubscribeMode subscribeMode = ConsumerContext.ConsumerSubscribeMode.ASSIGN;
87 | // if force use config checkpoint when start. for checkpoint reset, only assign mode works
88 | boolean isForceUseInitCheckpoint = true;
89 |
90 | DTSConsumerAssignDemo consumerDemo = new DTSConsumerAssignDemo(brokerUrl, topic, sid, userName, password, initCheckpoint, subscribeMode, isForceUseInitCheckpoint);
91 | consumerDemo.start();
92 | }
93 | }
94 |
--------------------------------------------------------------------------------
/src/test/java/com/aliyun/dts/subscribe/clients/DTSConsumerSubscribeDemo.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients;
2 |
3 | import com.aliyun.dts.subscribe.clients.common.RecordListener;
4 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord;
5 | import com.aliyun.dts.subscribe.clients.record.OperationType;
6 | import com.aliyun.dts.subscribe.clients.recordprocessor.DbType;
7 | import com.aliyun.dts.subscribe.clients.recordprocessor.DefaultRecordPrintListener;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 |
11 | import java.util.Collections;
12 | import java.util.Map;
13 |
14 | public class DTSConsumerSubscribeDemo {
15 | private static final Logger log = LoggerFactory.getLogger(DTSConsumerSubscribeDemo.class);
16 |
17 | private final DTSConsumer dtsConsumer;
18 |
19 | public DTSConsumerSubscribeDemo(String brokerUrl, String topic, String sid, String userName, String password,
20 | String checkpoint, ConsumerContext.ConsumerSubscribeMode subscribeMode) {
21 | this.dtsConsumer = initDTSClient(brokerUrl, topic, sid, userName, password, checkpoint, subscribeMode);
22 | }
23 |
24 | private DTSConsumer initDTSClient(String brokerUrl, String topic, String sid, String userName, String password,
25 | String initCheckpoint, ConsumerContext.ConsumerSubscribeMode subscribeMode) {
26 | ConsumerContext consumerContext = new ConsumerContext(brokerUrl, topic, sid, userName, password, initCheckpoint, subscribeMode);
27 |
28 | //add user meta store to manage checkpoint by yourself
29 | consumerContext.setUserRegisteredStore(new UserMetaStore());
30 |
31 | DTSConsumer dtsConsumer = new DefaultDTSConsumer(consumerContext);
32 |
33 | dtsConsumer.addRecordListeners(buildRecordListener());
34 |
35 | return dtsConsumer;
36 | }
37 |
38 | public static Map buildRecordListener() {
39 | // user can impl their own listener
40 | RecordListener mysqlRecordPrintListener = new RecordListener() {
41 | @Override
42 | public void consume(DefaultUserRecord record) {
43 |
44 | OperationType operationType = record.getOperationType();
45 |
46 | if(operationType.equals(OperationType.INSERT)
47 | || operationType.equals(OperationType.UPDATE)
48 | || operationType.equals(OperationType.DELETE)
49 | || operationType.equals(OperationType.DDL)
50 | || operationType.equals(OperationType.HEARTBEAT)) {
51 |
52 | // consume record
53 | RecordListener recordPrintListener = new DefaultRecordPrintListener(DbType.MySQL);
54 |
55 | recordPrintListener.consume(record);
56 |
57 | record.commit("");
58 | }
59 | }
60 | };
61 | return Collections.singletonMap("mysqlRecordPrinter", mysqlRecordPrintListener);
62 | }
63 |
64 | public void start() {
65 | System.out.println("Start DTS subscription client...");
66 |
67 | dtsConsumer.start();
68 | }
69 |
70 | public static void main(String[] args) {
71 | // kafka broker url
72 | String brokerUrl = "your broker url";
73 | // topic to consume, partition is 0
74 | String topic = "your dts topic";
75 | // user password and sid for auth
76 | String sid = "your sid";
77 | String userName = "your user name";
78 | String password = "your password";
79 | // initial checkpoint for first seek(a timestamp to set, eg 1566180200 if you want (Mon Aug 19 10:03:21 CST 2019))
80 | String initCheckpoint = "start timestamp";
81 | // when use subscribe mode, group config is required. kafka consumer group is enabled
82 | ConsumerContext.ConsumerSubscribeMode subscribeMode = ConsumerContext.ConsumerSubscribeMode.SUBSCRIBE;
83 |
84 | DTSConsumerSubscribeDemo consumerDemo = new DTSConsumerSubscribeDemo(brokerUrl, topic, sid, userName, password, initCheckpoint, subscribeMode);
85 | consumerDemo.start();
86 | }
87 | }
88 |
--------------------------------------------------------------------------------
/src/test/java/com/aliyun/dts/subscribe/clients/DistributedDTSConsumerDemo.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients;
2 |
3 | import com.aliyun.dms.subscribe.clients.DBMapper;
4 | import com.aliyun.dms.subscribe.clients.DistributedDTSConsumer;
5 | import com.aliyun.dms.subscribe.clients.DefaultDistributedDTSConsumer;
6 |
7 | import com.aliyun.dts.subscribe.clients.common.RecordListener;
8 | import com.aliyun.dts.subscribe.clients.record.DefaultUserRecord;
9 | import com.aliyun.dts.subscribe.clients.record.OperationType;
10 | import com.aliyun.dts.subscribe.clients.recordprocessor.DbType;
11 | import com.aliyun.dts.subscribe.clients.recordprocessor.DefaultRecordPrintListener;
12 | import com.aliyuncs.DefaultAcsClient;
13 | import com.aliyuncs.IAcsClient;
14 | import com.aliyuncs.dts.model.v20200101.DescribeDtsJobsRequest;
15 | import com.aliyuncs.dts.model.v20200101.DescribeDtsJobsResponse;
16 | import com.aliyuncs.dts.model.v20200101.DescribeSubscriptionMetaRequest;
17 | import com.aliyuncs.dts.model.v20200101.DescribeSubscriptionMetaResponse;
18 | import com.aliyuncs.exceptions.ClientException;
19 | import com.aliyuncs.profile.DefaultProfile;
20 | import org.apache.commons.lang3.StringUtils;
21 | import org.slf4j.Logger;
22 | import org.slf4j.LoggerFactory;
23 |
24 | import java.util.ArrayList;
25 | import java.util.Collections;
26 | import java.util.HashMap;
27 | import java.util.List;
28 | import java.util.Map;
29 | import java.util.stream.Collectors;
30 |
31 |
32 | public class DistributedDTSConsumerDemo {
33 | private static final Logger log = LoggerFactory.getLogger(DistributedDTSConsumerDemo.class);
34 |
35 | private final DistributedDTSConsumer distributedDTSConsumer;
36 | private Map topic2checkpoint = new HashMap<>();
37 | private Map topic2Sid = new HashMap<>();
38 | private ArrayList dbLists = new ArrayList<>();
39 | private DBMapper dbMapper = new DBMapper();
40 |
41 | public DistributedDTSConsumerDemo(String username, String password, String region, String groupId, String sid, String dtsInstanceId,
42 | String accessKeyId, String accessKeySecret, ConsumerContext.ConsumerSubscribeMode subscribeMode, String dProxy,
43 | String checkpoint, boolean isForceUseInitCheckpoint, boolean mapping) throws ClientException{
44 | getSubscribeSubJobs(region, groupId, sid, dtsInstanceId, accessKeyId, accessKeySecret);
45 |
46 | dbMapper.setMapping(mapping);
47 | dbMapper.init(dbLists);
48 | log.debug("init dbList:" + dbLists);
49 | this.distributedDTSConsumer = initDistributedConsumer(username, password, subscribeMode, dProxy, checkpoint, isForceUseInitCheckpoint);
50 |
51 | }
52 |
53 | private DistributedDTSConsumer initDistributedConsumer(String username, String password,
54 | ConsumerContext.ConsumerSubscribeMode subscribeMode, String dProxy,
55 | String checkpoint, boolean isForceUseInitCheckpoint) {
56 |
57 | DefaultDistributedDTSConsumer distributedConsumer = new DefaultDistributedDTSConsumer();
58 | // user can change checkpoint if needed
59 | for (String topic : topic2Sid.keySet()) {
60 | topic2checkpoint.put(topic, checkpoint);
61 | }
62 |
63 | distributedConsumer.init(topic2checkpoint, dbMapper, dProxy, topic2Sid, username, password, subscribeMode, isForceUseInitCheckpoint,
64 | new UserMetaStore(), buildRecordListener());
65 |
66 | return distributedConsumer;
67 | }
68 |
69 | public static Map buildRecordListener() {
70 | // user can impl their own listener
71 | RecordListener mysqlRecordPrintListener = new RecordListener() {
72 | @Override
73 | public void consume(DefaultUserRecord record) {
74 |
75 | OperationType operationType = record.getOperationType();
76 |
77 | if (operationType.equals(OperationType.INSERT)
78 | || operationType.equals(OperationType.UPDATE)
79 | || operationType.equals(OperationType.DELETE)
80 | || operationType.equals(OperationType.HEARTBEAT)) {
81 |
82 | // consume record
83 | RecordListener recordPrintListener = new DefaultRecordPrintListener(DbType.MySQL);
84 |
85 | recordPrintListener.consume(record);
86 |
87 | //commit method push the checkpoint update
88 | record.commit("");
89 | }
90 | }
91 | };
92 | return Collections.singletonMap("mysqlRecordPrinter", mysqlRecordPrintListener);
93 | }
94 |
95 | public void start() {
96 | distributedDTSConsumer.start();
97 | }
98 |
99 |
100 | public void getSubscribeSubJobs(String region, String groupId, String sid, String dtsInstanceId, String accessKeyId, String accessKeySecret) throws ClientException {
101 | DefaultProfile profile = DefaultProfile.getProfile(region, accessKeyId, accessKeySecret);
102 | IAcsClient client = new DefaultAcsClient(profile);
103 | DescribeDtsJobsRequest request = new DescribeDtsJobsRequest();
104 |
105 | request.setGroupId(groupId);
106 | request.setJobType("subscribe");
107 | request.setRegion(region);
108 |
109 | DescribeDtsJobsResponse response = client.getAcsResponse(request);
110 | List subMigrationJobIds = response.getDtsJobList().stream().map(DescribeDtsJobsResponse.DtsJobStatus::getDtsJobId).collect(Collectors.toList());
111 |
112 | DescribeSubscriptionMetaRequest req = new DescribeSubscriptionMetaRequest();
113 | req.setSid(sid);
114 | req.setSubMigrationJobIds(String.join(",", subMigrationJobIds));
115 | req.setDtsInstanceId(dtsInstanceId);
116 |
117 | DescribeSubscriptionMetaResponse res = client.getAcsResponse(req);
118 | if (res.getSuccess().equalsIgnoreCase("true")) {
119 | for (DescribeSubscriptionMetaResponse.SubscriptionMetaListItem meta : (res).getSubscriptionMetaList()) {
120 | topic2Sid.put(meta.getTopic(), meta.getSid());
121 | dbLists.add(meta.getDBList());
122 |
123 | if (StringUtils.isEmpty(meta.getDBList())) {
124 | log.warn("dbList is null, sid:" + sid + ",dtsInstanceId:" + dtsInstanceId + ",subMigrationJobIds:" + String.join(",", subMigrationJobIds));
125 | }
126 | }
127 | }
128 | dbMapper.setClient(client);
129 | dbMapper.setDescribeSubscriptionMetaRequest(req);
130 | }
131 |
132 | public static void main(String[] args) throws ClientException {
133 | //分布式类型数据源的订阅配置方式,例如PolarDBX10(原DRDS)。配置AccessKey、实例Id、主任务id,订阅消费组等相关信息。
134 | String accessKeyId = "your access key id";
135 | String accessKeySecret = "your access key secret";
136 | String regionId = "your regionId";
137 | String dtsInstanceId = "your dts instanceId";
138 | String jobId = "your dts jobId";
139 | String sid = "your sid";
140 | String userName = "your user name";
141 | String password = "your password";
142 | String proxyUrl = "your proxyUrl";
143 | // initial checkpoint for first seek(a timestamp to set, eg 1566180200 if you want (Mon Aug 19 10:03:21 CST 2019))
144 | String checkpoint = "";
145 |
146 | // Convert physical database/table name to logical database/table name
147 | boolean mapping = true;
148 | // if force use config checkpoint when start. for checkpoint reset, only assign mode works
149 | boolean isForceUseInitCheckpoint = false;
150 |
151 | ConsumerContext.ConsumerSubscribeMode subscribeMode = ConsumerContext.ConsumerSubscribeMode.ASSIGN;
152 | DistributedDTSConsumerDemo demo = new DistributedDTSConsumerDemo(userName, password, regionId,
153 | jobId, sid, dtsInstanceId, accessKeyId, accessKeySecret, subscribeMode, proxyUrl,
154 | checkpoint, isForceUseInitCheckpoint, mapping);
155 | demo.start();
156 | }
157 | }
158 |
--------------------------------------------------------------------------------
/src/test/java/com/aliyun/dts/subscribe/clients/UserMetaStore.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients;
2 |
3 | import com.aliyun.dts.subscribe.clients.metastore.AbstractUserMetaStore;
4 |
5 | /**
6 | * store the checkpoint data in the shared storage, such us database, shared file storage...
7 | * this meta store need to be completed by consumer
8 | */
9 | public class UserMetaStore extends AbstractUserMetaStore {
10 |
11 | @Override
12 | protected void saveData(String groupID, String toStoreJson) {
13 |
14 | }
15 |
16 | @Override
17 | protected String getData(String groupID) {
18 | return null;
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/src/test/java/com/aliyun/dts/subscribe/clients/record/value/DateTimeTest.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.dts.subscribe.clients.record.value;
2 |
3 | import org.junit.Assert;
4 | import org.junit.Test;
5 |
6 | import java.text.ParseException;
7 | import java.text.SimpleDateFormat;
8 | import java.time.LocalDateTime;
9 | import java.time.format.DateTimeFormatter;
10 | import java.util.Date;
11 |
12 | public class DateTimeTest {
13 | @Test
14 | public void testToUnixTimestamp() throws ParseException {
15 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toUnixTimestamp(), 1644913016350L);
16 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.035", DateTime.SEG_DATETIME_NAONS).toUnixTimestamp(), 1644913016035L);
17 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toUnixTimestamp(), 1644913016350L);
18 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.350", DateTime.SEG_DATETIME_NAONS).toUnixTimestamp(), 1644913016350L);
19 | }
20 |
21 | @Test
22 | public void testToEpochMilliSeconds() throws ParseException {
23 | System.out.println(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds());
24 | System.out.println(new DateTime("2022-02-15 16:16:56.035", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds());
25 | System.out.println(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds());
26 | System.out.println(new DateTime("2022-02-15 16:16:56.350", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds());
27 |
28 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds(), 1644913016350L);
29 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.035", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds(), 1644913016035L);
30 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds(), 1644913016350L);
31 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.350", DateTime.SEG_DATETIME_NAONS).toEpochMilliSeconds(), 1644913016350L);
32 |
33 | }
34 |
35 | @Test
36 | public void testToString() {
37 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toString(), "2022-02-15 16:16:56.35");
38 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.035", DateTime.SEG_DATETIME_NAONS).toString(), "2022-02-15 16:16:56.035");
39 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.35", DateTime.SEG_DATETIME_NAONS).toString(), "2022-02-15 16:16:56.35");
40 | Assert.assertEquals(new DateTime("2022-02-15 16:16:56.350", DateTime.SEG_DATETIME_NAONS).toString(), "2022-02-15 16:16:56.35");
41 | }
42 |
43 | @Test
44 | public void testDate() throws ParseException {
45 | DateTime dateTime = new DateTime("2022-02-15 16:16:56.350", DateTime.SEG_DATETIME_NAONS);
46 |
47 | System.out.println(dateTime);
48 |
49 | SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSSS");
50 |
51 |
52 | System.out.println(formatter.parse(dateTime.toString()).getTime());
53 |
54 | }
55 | }
56 |
--------------------------------------------------------------------------------