├── .gitignore
├── README.md
├── assembly-magpie.xml
├── conf
└── parser.properties
├── pom.xml
└── src
├── main
├── assembly
│ └── assembly.xml
├── bin
│ ├── start-server.sh
│ └── stop-server.sh
├── java
│ └── com
│ │ └── github
│ │ └── hackerwin7
│ │ └── mysql
│ │ └── parser
│ │ ├── deployer
│ │ └── LocalParser.java
│ │ ├── hbase
│ │ ├── driver
│ │ │ └── HBaseOperator.java
│ │ └── utils
│ │ │ └── HData.java
│ │ ├── kafka
│ │ ├── driver
│ │ │ ├── consumer
│ │ │ │ ├── KafkaNoStaticReceiver.java
│ │ │ │ └── KafkaReceiver.java
│ │ │ └── producer
│ │ │ │ ├── GenericKafkaSender.java
│ │ │ │ └── KafkaSender.java
│ │ └── utils
│ │ │ ├── KafkaConf.java
│ │ │ ├── KafkaMetaMsg.java
│ │ │ └── KafkaNoStaticConf.java
│ │ ├── monitor
│ │ ├── JrdwMonitorVo.java
│ │ ├── MonitorToKafkaProducer.java
│ │ ├── ParserMonitor.java
│ │ ├── SimplePartitioner.java
│ │ └── constants
│ │ │ ├── JDMysqlParserMonitor.java
│ │ │ └── JDMysqlParserMonitorType.java
│ │ ├── parser
│ │ ├── HandlerForMagpieHBase.java
│ │ ├── HandlerKafkaZkLocal.java
│ │ ├── HandlerKafkaZkLocalPerformance.java
│ │ ├── HandlerMagpieKafka.java
│ │ ├── HandlerMagpieKafkaCheckpointHBase.java
│ │ ├── HandlerMagpieKafkaCheckpointHBaseSiteConfig.java
│ │ ├── HandlerMagpieKafkaCheckpointZk.java
│ │ ├── MysqlParser.java
│ │ ├── ParserVertifyKafka.java
│ │ ├── ParserVertifyKafkaAvro.java
│ │ ├── ParserVertifyKafkaJson.java
│ │ ├── ParserVertifyKafkaSimple.java
│ │ ├── utils
│ │ │ ├── EntryPrinter.java
│ │ │ ├── KafkaPosition.java
│ │ │ ├── ParserConf.java
│ │ │ ├── ParserConfig.java
│ │ │ └── ParserFilterConf.java
│ │ └── verify
│ │ │ ├── ParserVerifyAvro.java
│ │ │ ├── ParserVerifyField.java
│ │ │ └── ParserVerifyJdwAvro.java
│ │ ├── protocol
│ │ ├── avro
│ │ │ ├── EventEntry.avro
│ │ │ └── EventEntryAvro.java
│ │ ├── json
│ │ │ ├── ConfigJson.java
│ │ │ ├── JSONConvert.java
│ │ │ └── LoadURLJson.java
│ │ └── protobuf
│ │ │ ├── CanalEntry.java
│ │ │ └── EntryProtocol.proto
│ │ └── zk
│ │ ├── client
│ │ └── ZkExecutor.java
│ │ └── utils
│ │ └── ZkConf.java
└── resources
│ ├── input_config.yaml
│ ├── log4j.properties
│ └── parser.properties
└── test
└── java
├── AvroNullTest.java
├── DBTBGemerater.java
├── DeepMapRelicaTest.java
├── DeepReplicaTest.java
├── JSONStrGenerater.java
├── JavaSetTest.java
├── JsonArrTest.java
├── JsonParserTest.java
├── MapCharsequenceTest.java
├── MapNullTest.java
├── MysqlParserTest.java
├── ParserJsonTest.java
├── SimpleTest.java
├── TestElem.java
└── Testu001.java
/.gitignore:
--------------------------------------------------------------------------------
1 | ### JetBrains template
2 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio
3 |
4 | *.iml
5 |
6 | ## Directory-based project format:
7 | .idea/
8 | # if you remove the above rule, at least ignore the following:
9 |
10 | # User-specific stuff:
11 | # .idea/workspace.xml
12 | # .idea/tasks.xml
13 | # .idea/dictionaries
14 |
15 | # Sensitive or high-churn files:
16 | # .idea/dataSources.ids
17 | # .idea/dataSources.xml
18 | # .idea/sqlDataSources.xml
19 | # .idea/dynamic.xml
20 | # .idea/uiDesigner.xml
21 |
22 | # Gradle:
23 | # .idea/gradle.xml
24 | # .idea/libraries
25 |
26 | # Mongo Explorer plugin:
27 | # .idea/mongoSettings.xml
28 |
29 | ## File-based project format:
30 | *.ipr
31 | *.iws
32 |
33 | ## Plugin-specific files:
34 |
35 | # IntelliJ
36 | /out/
37 |
38 | # mpeltonen/sbt-idea plugin
39 | .idea_modules/
40 |
41 | # JIRA plugin
42 | atlassian-ide-plugin.xml
43 |
44 | # Crashlytics plugin (for Android Studio and IntelliJ)
45 | com_crashlytics_export_strings.xml
46 | crashlytics.properties
47 | crashlytics-build.properties
48 | ### Java template
49 | *.class
50 |
51 | # Mobile Tools for Java (J2ME)
52 | .mtj.tmp/
53 |
54 | # Package Files #
55 | *.jar
56 | *.war
57 | *.ear
58 |
59 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
60 | hs_err_pid*
61 |
62 | # Created by .ignore support plugin (hsz.mobi)
63 | #
64 | logs/
65 | target/
66 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ### introduction
2 | consumer for mysql-tracker, some operations to restructure thr format of message
3 | mysql-parser fetch the messages from mysql-tracker and pacakge the new format messages
4 | then I will update the comfiguration service for mysql-parser in the last versions
5 | ### to see
6 | [1]: [design details](http://blog.csdn.net/hackerwin7/article/details/39896173)
7 | [2]: [to kafka design](http://blog.csdn.net/hackerwin7/article/details/42713271)
8 | ### real-time job
9 | HA and real-time job is lack of configuration, to see next version
10 |
--------------------------------------------------------------------------------
/assembly-magpie.xml:
--------------------------------------------------------------------------------
1 |
5 | standalone
6 |
7 | jar
8 |
9 | false
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 | ${project.basedir}/target/classes/
21 | /
22 |
23 | /**/*.class
24 | *.xml
25 | /**/*.xml
26 | META-INF/**/*
27 |
28 |
29 |
30 | ${project.basedir}/src/main/resources
31 | /
32 |
33 | *.yaml
34 | *.py
35 | /**/*.rb
36 | *.properties
37 |
38 |
39 |
40 |
41 |
42 | /
43 | false
44 | true
45 |
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/conf/parser.properties:
--------------------------------------------------------------------------------
1 | ### common ###
2 | job.name = mysql-parser
3 | ### kafka data ###
4 | kafka.data.zkserver = 127.0.0.1:2181
5 | #default is /
6 | kafka.data.zkroot = /kafka
7 | kafka.data.tracker.topic = tracker-log-mysql
8 | kafka.data.parser.topic = parser-log-mysql
9 | kafka.data.client.name = kafka-parser
10 | kafka.acks = -1
11 | ### kafka monitor ###
12 | kafka.monitor.zkserver = 127.0.0.1:2181
13 | #default is /
14 | kafka.monitor.zkroot = /kafka
15 | kafka.monitor.topic = parser-monitor
16 | ### zookeeper ###
17 | zookeeper.servers =127.0.0.1:2181
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | mysql-parser
8 | mysql-parser
9 | 1.4.1.6
10 |
11 |
12 |
13 | cloudera
14 | https://repository.cloudera.com/artifactory/cloudera-repos/
15 |
16 |
17 | external-dependencies-repo
18 | https://raw.githubusercontent.com/hackerwin7/external-dependencies-repo/master/
19 |
20 | true
21 | always
22 |
23 |
24 |
25 |
26 |
27 |
28 | org.apache.commons
29 | commons-lang3
30 | 3.4
31 |
32 |
33 | log4j
34 | log4j
35 | 1.2.16
36 |
37 |
38 | org.slf4j
39 | slf4j-log4j12
40 | 1.7.2
41 |
42 |
43 | com.google.protobuf
44 | protobuf-java
45 | 2.5.0
46 |
47 |
48 | org.slf4j
49 | slf4j-api
50 | 1.6.0
51 |
52 |
53 | org.slf4j
54 | jcl-over-slf4j
55 | 1.6.0
56 |
57 |
58 | org.apache.zookeeper
59 | zookeeper
60 | 3.4.5
61 |
62 |
63 | org.slf4j
64 | slf4j-log4j12
65 |
66 |
67 | org.slf4j
68 | slf4j-api
69 |
70 |
71 | jline
72 | jline
73 |
74 |
75 | log4j
76 | log4j
77 |
78 |
79 |
80 |
81 | org.apache.hadoop
82 | hadoop-common
83 | 2.0.0-cdh4.3.0
84 | jar
85 |
86 |
87 | kfs
88 | net.sf.kosmosfs
89 |
90 |
91 |
92 |
93 | org.apache.hadoop
94 | hadoop-auth
95 | 2.0.0-cdh4.3.0
96 |
97 |
98 | org.apache.hbase
99 | hbase
100 | 0.94.6-cdh4.3.0
101 |
102 |
103 |
104 | org.apache.kafka
105 | kafka_2.9.2
106 | 0.8.1.1
107 |
108 |
109 | com.sun.jmx
110 | jmxri
111 |
112 |
113 | javax.jms
114 | jms
115 |
116 |
117 | com.sun.jdmk
118 | jmxtools
119 |
120 |
121 |
122 |
123 |
124 | net.sf.json-lib
125 | json-lib
126 | 2.4
127 | jdk15
128 |
129 |
130 |
131 | org.jyaml
132 | jyaml
133 | 1.3
134 |
135 |
136 |
137 | org.apache.avro
138 | avro
139 | 1.7.4
140 |
141 |
142 |
143 | com.github.hackerwin7
144 | jd-lib
145 | 0.1.3.1
146 |
147 |
148 |
149 |
150 |
151 |
152 | maven-jar-plugin
153 | 2.5
154 |
155 |
156 | true
157 |
158 |
159 | **/parser.properties
160 | **/log4j.properties
161 |
162 |
163 |
164 |
165 |
166 | maven-assembly-plugin
167 | 2.2.1
168 |
169 |
170 | package
171 |
172 | single
173 |
174 |
175 |
176 |
177 |
178 | assembly-magpie.xml
179 | src/main/assembly/assembly.xml
180 |
181 |
182 |
183 | true
184 |
185 |
186 |
187 |
188 |
189 |
190 | org.apache.maven.plugins
191 | maven-source-plugin
192 | 2.1.2
193 |
194 |
195 | attach-sources
196 | verify
197 |
198 | jar-no-fork
199 |
200 |
201 |
202 |
203 |
204 |
205 | org.apache.avro
206 | avro-maven-plugin
207 | 1.7.3
208 |
209 |
210 | generate-sources
211 |
212 | schema
213 |
214 |
215 | src/main/java/com/github/hackerwin7/mysql/parser/protocol/avro/
216 | ${project.basedir}/src/main/java/
217 |
218 |
219 |
220 |
221 |
222 | org.apache.maven.plugins
223 | maven-compiler-plugin
224 | 3.2
225 |
226 | 1.7
227 | 1.7
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
--------------------------------------------------------------------------------
/src/main/assembly/assembly.xml:
--------------------------------------------------------------------------------
1 |
4 | deployer
5 |
6 | tar.gz
7 |
8 | true
9 |
10 |
11 | .
12 | /
13 |
14 | README*
15 |
16 |
17 |
18 | ./src/main/bin
19 | /bin
20 |
21 | **/*
22 |
23 | 0755
24 |
25 |
26 | ./src/main/resources
27 | /conf
28 |
29 | **/*
30 |
31 |
32 |
33 | target
34 | /logs
35 |
36 | **/*
37 |
38 |
39 |
40 |
41 |
42 | lib
43 |
44 | junit:junit
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/src/main/bin/start-server.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | current_path=$(pwd)
4 | # get the shell's father directory
5 | case "$(uname)" in
6 | Linux)
7 | bin_abs_path=$(readlink -f $(dirname $0))
8 | ;;
9 | *)
10 | bin_abs_path=$(cd $(dirname $0); pwd)
11 | ;;
12 | esac
13 |
14 | base=${bin_abs_path}/..
15 | conf=${base}/conf/parser.properties
16 |
17 | export LANG=en_US.UTF-8
18 | export BASE=$base
19 |
20 | if [ -f $base/bin/parser.pid ] ; then
21 | echo "found parser.pid , please run stop-server.sh first." 2>&2
22 | exit 1
23 | fi
24 |
25 | if [ ! -d $base/logs/parser ] ; then
26 | mkdir -p $base/logs/parser
27 | echo "mkdired $base/logs/parser"
28 | fi
29 |
30 | ## set java path
31 | if [ -z "$JAVA" ] ; then
32 | JAVA=$(which java)
33 | fi
34 |
35 | if [ -z "$JAVA" ] ; then
36 | echo "cannot find a java jdk" 2>&2
37 | exit 1
38 | fi
39 |
40 | case "$#"
41 | in
42 | 0 )
43 | ;;
44 | 1 )
45 | var=$*
46 | if [ -f $var ] ; then
47 | conf=$var
48 | else
49 | echo "the parameter is not correct."
50 | exit
51 | fi;;
52 | 2 )
53 | var=$1
54 | if [ -f $var ] ; then
55 | conf=$var
56 | else
57 | if [ "$1" = "debug" ] ; then
58 | DEBUG_PORT=$2
59 | DEBUG_SUSPEND="n"
60 | JAVA_DEBUG_OPT="-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,address=$DEBUG_PORT,server=y,suspend=$DEBUG_SUSPEND"
61 | fi
62 | fi;;
63 | * )
64 | echo "the parameter must be tow or less"
65 | exit;;
66 | esac
67 |
68 | str=$(file $JAVA_HOME/bin/java | grep 64-bit)
69 | if [ -n "$str" ] ; then
70 | JAVA_OPTS="-server -Xms2048m -Xmx3072m -Xmn1024m -XX:SurvivorRatio=2 -XX:PermSize=96m -XX:MaxPermSize=256m -Xss256k -XX:-UseAdaptiveSizePolicy -XX:MaxTenuringThreshold=15 -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCMSCompactAtFullCollection -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:+HeapDumpOnOutOfMemoryError"
71 | else
72 | JAVA_OPTS="-server -Xms1024m -Xmx1024m -XX:NewSize=256m -XX:MaxNewSize=256m -XX:MaxPermSize=128m"
73 | fi
74 |
75 | JAVA_OPTS=" $JAVA_OPTS -Djava.awt.headless=true -Djava.net.preferIPv4Stack=true -Dfile.encoding=UTF-8"
76 | PARSER_OPTS="-DappName=mysql-parser -Dparser.conf=$conf"
77 |
78 | echo conf : $conf
79 |
80 | if [ -e $conf ]
81 | then
82 | for i in $base/lib/*;
83 | do CLASSPATH=$i:"$CLASSPATH";
84 | done
85 | for i in $base/conf/*;
86 | do CLASSPATH=$i:"$CLASSPATH";
87 | done
88 |
89 | echo "cd to $bin_abs_path for workaround relative path"
90 | cd $bin_abs_path
91 |
92 | echo conf : $conf
93 | echo CLASSPATH : $CLASSPATH
94 | cd $base
95 | $JAVA $JAVA_OPTS $JAVA_DEBUG_OPT $PARSER_OPTS -classpath .:$CLASSPATH com.github.hackerwin7.mysql.parser.deployer.LocalParser 1>>$base/logs/parser/console.log 2>&1 &
96 | echo $! > $base/bin/parser.pid
97 |
98 | echo "cd to $current_path for continue"
99 | cd $current_path
100 | else
101 | echo "conf $conf is not exists!"
102 | fi
--------------------------------------------------------------------------------
/src/main/bin/stop-server.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | cygwin=false
4 | case "$(uname)" in
5 | CYGWIN*)
6 | cygwin=true
7 | ;;
8 | esac
9 |
10 | get_pid() {
11 | STR=$1
12 | PID=$2
13 | if $cygwin ; then
14 | JAVA_CMD="$JAVA_HOME/bin/java"
15 | JAVA_CMD=$(cygpath --path --unix $JAVA_CMD)
16 | JAVA_PID=$(ps | grep $JAVA_CMD | awk '{print $1}')
17 | else
18 | if [ ! -z "$PID" ] ; then
19 | JAVA_PID=$(ps -C java -f --width 1000 | grep "$STR" | grep "$PID" -v grep | awk '{print $2}')
20 | else
21 | JAVA_PID=$(ps -C java -f --width 1000 | grep "$STR" | grep -v grep | awk '{print $2}')
22 | fi
23 | fi
24 | echo $JAVA_PID;
25 | }
26 |
27 | base=$(dirname $0)/..
28 | pidfile=$base/bin/parser.pid
29 | if [ ! -f "$pidfile" ] ; then
30 | echo "parser is not running. exists"
31 | exit
32 | fi
33 |
34 | pid=$(cat $pidfile)
35 | if [ "$pid" == "" ] ; then
36 | pid=$(get_pid "appName=mysql-parser")
37 | fi
38 |
39 | echo -e "$(hostname): stopping parser $pid ... "
40 | kill $pid
41 |
42 | LOOPS=0
43 | while (true);
44 | do
45 | gpid=$(get_pid "appName=mysql-parser" "$pid")
46 | if [ "$gpid" == "" ] ; then
47 | echo "ok! killed!!"
48 | $(rm $pidfile)
49 | break;
50 | fi
51 | let LOOPS=LOOPS+1
52 | sleep 1
53 | done
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/deployer/LocalParser.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.deployer;
2 |
3 | import com.github.hackerwin7.mysql.parser.parser.HandlerKafkaZkLocalPerformance;
4 | import org.apache.log4j.Logger;
5 |
6 | /**
7 | * Created by hp on 15-3-2.
8 | */
9 | public class LocalParser {
10 |
11 | private static Logger logger = Logger.getLogger(LocalParser.class);
12 | private static boolean running = true;
13 |
14 | public static void main(String[] args) throws Exception {
15 | while (true) {
16 | try {
17 | final HandlerKafkaZkLocalPerformance handler = new HandlerKafkaZkLocalPerformance();
18 | handler.prepare("mysql-tracker-json");
19 | while(running) {
20 | handler.run();
21 | }
22 | Runtime.getRuntime().addShutdownHook(new Thread() {
23 | public void run() {
24 | try {
25 | running = false;
26 | handler.close("mysql-tracker-json");
27 | } catch (Exception e) {
28 | e.printStackTrace();
29 | }
30 | }
31 | });
32 | } catch (Throwable e) {
33 | logger.info(e.getMessage(), e);
34 | Thread.sleep(3000);
35 | }
36 | }
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/hbase/driver/HBaseOperator.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.hbase.driver;
2 |
3 | import org.apache.hadoop.conf.Configuration;
4 | import org.apache.hadoop.hbase.HBaseConfiguration;
5 | import org.apache.hadoop.hbase.KeyValue;
6 | import org.apache.hadoop.hbase.client.*;
7 | import org.apache.hadoop.hbase.util.Bytes;
8 |
9 | import java.io.IOException;
10 | import java.util.ArrayList;
11 | import java.util.HashMap;
12 | import java.util.List;
13 | import java.util.Map;
14 |
15 | /**
16 | * Created by hp on 14-9-17.
17 | */
18 | public class HBaseOperator {
19 |
20 | //hadoop config
21 | private Configuration conf;
22 |
23 | //kinds of htale
24 | private HTable hEventWriter;
25 | private HTable hEventReader;
26 | private HTable hEntryWriter;
27 | private HTable hEntryReader;
28 | private HTable hCheckpointWriter;
29 | private HTable hCheckpointReader;
30 |
31 | //entry table name
32 | private String eventBytesSchemaName = "mysql_event";
33 |
34 | //checkpoint table name
35 | private String checkpointSchemaName = "mysql_checkpoint";
36 |
37 | //minute table name
38 | private String entryDataSchemaName = "mysql_entry";
39 |
40 | //job id
41 | private String mysqlId = "127.0.0.1:3306";
42 |
43 | //public global single rowkey for tracker and parser and per minute row key = single parser + time
44 | public String trackerRowKey = "jd-MysqlTracker";
45 | public String parserRowKey = "jd-MysqlParser";
46 | public String binlogXidCol = "BinlogXid";
47 | public String eventXidCol = "EventXidRowKey";
48 | public String eventRowCol = "EventRowKey";
49 | public String entryRowCol = "EntryRowKey";//is checkpoint parser's pos column and at the same time is the entry(hbase table) parser's entry's column
50 | public String eventBytesCol = "eventBytes";
51 |
52 | //constructor and getter and setter
53 | public HBaseOperator() {
54 | conf = HBaseConfiguration.create();
55 | //import the xml configuration
56 | //conf.addResource("conf/hbase-site.xml");
57 | conf.set("hbase.rootdir","hdfs://localhost:9000/hbase");
58 | conf.set("hbase.cluster.distributed","true");
59 | conf.set("hbase.zookeeper.quorum","localhost");
60 | conf.set("hbase.zookeeper.property.clientPort","2181");
61 | conf.set("dfs.socket.timeout", "180000");
62 |
63 |
64 | }
65 |
66 | public HBaseOperator(String myId) {
67 | conf = HBaseConfiguration.create();
68 | //import the xml configuration
69 | //conf.addResource("conf/hbase-site.xml");
70 | conf.set("hbase.rootdir","hdfs://localhost:9000/hbase");
71 | conf.set("hbase.cluster.distributed","true");
72 | conf.set("hbase.zookeeper.quorum","localhost");
73 | conf.set("hbase.zookeeper.property.clientPort","2181");
74 | conf.set("dfs.socket.timeout", "180000");
75 |
76 | mysqlId = myId;
77 | trackerRowKey = trackerRowKey + "###" + mysqlId;
78 | parserRowKey = parserRowKey + "###" + mysqlId;
79 | }
80 |
81 | public void connect() {
82 | try {
83 | hEventReader = new HTable(conf, eventBytesSchemaName);
84 | hEventWriter = new HTable(conf, eventBytesSchemaName);
85 | hEntryWriter = new HTable(conf, entryDataSchemaName);
86 | hEntryReader = new HTable(conf, entryDataSchemaName);
87 | hCheckpointWriter = new HTable(conf, checkpointSchemaName);
88 | hCheckpointReader = new HTable(conf, checkpointSchemaName);
89 | } catch (IOException e) {
90 | e.printStackTrace();
91 | }
92 | }
93 |
94 | public void disconnect() {
95 | try {
96 | hEventReader.close();
97 | hEventWriter.close();
98 | hEntryReader.close();
99 | hEntryWriter.close();
100 | hCheckpointReader.close();
101 | hCheckpointWriter.close();
102 | } catch (IOException e) {
103 | e.printStackTrace();
104 | }
105 | }
106 |
107 | private HTable getHTableWriterBySchema(String schema) {
108 | if(schema.equals(eventBytesSchemaName)) return hEventWriter;
109 | if(schema.equals(entryDataSchemaName)) return hEntryWriter;
110 | if(schema.equals(checkpointSchemaName)) return hCheckpointWriter;
111 | return null;
112 | }
113 |
114 | private HTable getHTableReaderBySchema(String schema) {
115 | if(schema.equals(eventBytesSchemaName)) return hEventReader;
116 | if(schema.equals(entryDataSchemaName)) return hEntryReader;
117 | if(schema.equals(checkpointSchemaName)) return hCheckpointReader;
118 | return null;
119 | }
120 |
121 | public Configuration getConf() {
122 | return conf;
123 | }
124 |
125 | public String getEventBytesSchemaName() {
126 | return eventBytesSchemaName;
127 | }
128 |
129 | public String getCheckpointSchemaName() {
130 | return checkpointSchemaName;
131 | }
132 |
133 | public String getEntryDataSchemaName() {
134 | return entryDataSchemaName;
135 | }
136 |
137 | //single get data, single row multiple col,so multiple bytes
138 | public List getHBaseData(byte[] rowKey, String schemaName) throws IOException {
139 | HTable hTable = new HTable(conf, schemaName);
140 | Get get = new Get(rowKey);
141 | get.addFamily(getFamily(schemaName));
142 | Result result = hTable.get(get);
143 | List colValue = null;
144 | for(KeyValue kv:result.raw()){
145 | colValue .add(kv.getValue());
146 | }
147 | hTable.close();
148 | return(colValue);
149 | }
150 |
151 | //variable get data
152 | public Result getHBaseData(Get get, String schemaName) throws IOException {
153 | HTable hTable = getHTableReaderBySchema(schemaName);
154 | Result result = hTable.get(get);
155 | return(result);
156 | }
157 |
158 | public boolean existHBaseData(Get get, String schemaName) throws IOException {
159 | HTable hTable = new HTable(conf, schemaName);
160 | return hTable.exists(get);
161 | }
162 |
163 | public Result[] getHBaseData(List gets, String schemaName) throws IOException {
164 | HTable hTable = new HTable(conf, schemaName);
165 | Result[] results = hTable.get(gets);
166 | return results;
167 | }
168 |
169 | //variable scan data
170 | public ResultScanner getHBaseData(Scan scan, String schemaName) throws IOException {
171 | HTable hTable = getHTableReaderBySchema(schemaName);
172 | ResultScanner results = hTable.getScanner(scan);
173 | return(results);
174 | }
175 |
176 | //batched get data, multiple row so multiple row * multiple column,so multiple row * multiple col =
177 | //multiple * multiple bytes
178 | public Map> getHBaseData(byte[] startRowKey, byte[] endRowKey, String schemaName) throws IOException{
179 | HTable hTable = new HTable(conf, schemaName);
180 | Map> subTable = new HashMap>();
181 | Scan scan = new Scan();
182 | scan.addFamily(getFamily(schemaName));
183 | scan.setStartRow(startRowKey);
184 | //if endRowKey > hbase' all rowKey, exceed the boundary, will return null colValue????
185 | //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1maybe a potential bug
186 | scan.setStopRow(endRowKey);//do not include this, so endRowKey = startRowKey + insertRowNum
187 | //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1maybe a potential bug
188 | scan.setBatch(1000);
189 | ResultScanner results = hTable.getScanner(scan);
190 | for(Result result : results){
191 | byte[] elemRowKey = result.getRow();
192 | List elemColValue = new ArrayList();
193 | for(KeyValue kv : result.raw()){
194 | elemColValue.add(kv.getValue());
195 | }
196 | subTable.put(elemRowKey,elemColValue);
197 | }
198 | hTable.close();
199 | return(subTable);
200 | }
201 |
202 | //single or batched put data
203 | public void putHBaseData(List rowKeys, List colValues, String schemaName) throws IOException{
204 | HTable hTable = new HTable(conf, schemaName);
205 | List putList = new ArrayList();
206 | for(int i=0;i<=rowKeys.size()-1;i++){
207 | Put put = new Put(rowKeys.get(i));
208 | if(i>colValues.size()-1) put.add(getFamily(schemaName),null,null);
209 | else put.add(getFamily(schemaName),null,colValues.get(i));
210 | putList.add(put);
211 | }
212 | hTable.put(putList);
213 | hTable.close();
214 | }
215 |
216 | public void putHBaseData(List rowKeys, Map> subTable, String schemaName) throws IOException{
217 | HTable hTable = new HTable(conf, schemaName);
218 | List putList = new ArrayList();
219 | for(int i=0;i<=rowKeys.size()-1;i++){
220 | Put put = new Put(rowKeys.get(i));
221 | List columns = getColumn(schemaName,rowKeys.get(i).toString());
222 | int j = 0;
223 | for(byte[] bytes : subTable.get(rowKeys.get(i))){
224 | put.add(getFamily(schemaName),columns.get(j),bytes);
225 | j++;
226 | }
227 | putList.add(put);
228 | }
229 | hTable.put(putList);
230 | hTable.close();
231 | }
232 |
233 | public void putHBaseData(List puts, String schemaName) throws IOException{
234 | HTable hTable = getHTableWriterBySchema(schemaName);
235 | hTable.put(puts);
236 | }
237 |
238 | public void putHBaseData(Put put, String schemaName) throws IOException{
239 | HTable hTable = getHTableWriterBySchema(schemaName);
240 | hTable.put(put);
241 | }
242 |
243 | public void deleteHBaseData(Delete del, String schemaName) throws IOException {
244 | HTable hTable = getHTableWriterBySchema(schemaName);
245 | hTable.delete(del);
246 | }
247 |
248 | public byte[] getFamily(String schemaName){
249 | return(Bytes.toBytes("d"));
250 | }
251 | public byte[] getFamily(){
252 | return(Bytes.toBytes("d"));
253 | }
254 |
255 | private List getColumn(String schemaName, String rowKey){
256 | String[] columns = null;
257 | List columnBytes = null;
258 | if(schemaName.equals(entryDataSchemaName)){
259 | columns = new String[]{"EventBytes"};
260 | }
261 | else if(schemaName.equals(checkpointSchemaName)){
262 | if(rowKey.contains("tracker")) {
263 | columns = new String[]{"BinlogXid", "EventXidRowKey"};
264 | }
265 | else if(rowKey.contains("com/github/hackerwin7/mysql/parser/parser")){
266 | columns = new String[]{"EventRowKey","EntryRowKey"};
267 | }
268 | else{
269 | columns = null;
270 | }
271 | }
272 | for(String col : columns){
273 | columnBytes.add(Bytes.toBytes(col));
274 | }
275 | return(columnBytes);
276 | }
277 |
278 | }
279 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/hbase/utils/HData.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.hbase.utils;
2 |
3 | /**
4 | * Created by hp on 14-11-25.
5 | */
6 | public class HData {
7 |
8 | public byte[] rowKey;
9 |
10 | public byte[] rowData;
11 |
12 | public HData(byte[] key, byte[] data) {
13 | rowData = data;
14 | rowKey =key;
15 | }
16 |
17 | }
18 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/kafka/driver/consumer/KafkaNoStaticReceiver.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.kafka.driver.consumer;
2 |
3 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaMetaMsg;
4 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaNoStaticConf;
5 | import kafka.api.FetchRequest;
6 | import kafka.api.FetchRequestBuilder;
7 | import kafka.api.PartitionOffsetRequestInfo;
8 | import kafka.cluster.Broker;
9 | import kafka.common.ErrorMapping;
10 | import kafka.common.TopicAndPartition;
11 | import kafka.javaapi.FetchResponse;
12 | import kafka.javaapi.OffsetRequest;
13 | import kafka.javaapi.OffsetResponse;
14 | import kafka.javaapi.PartitionMetadata;
15 | import kafka.javaapi.TopicMetadata;
16 | import kafka.javaapi.TopicMetadataRequest;
17 | import kafka.javaapi.TopicMetadataResponse;
18 | import kafka.javaapi.consumer.SimpleConsumer;
19 | import kafka.message.MessageAndOffset;
20 | import org.slf4j.Logger;
21 | import org.slf4j.LoggerFactory;
22 |
23 | import java.nio.ByteBuffer;
24 | import java.util.ArrayList;
25 | import java.util.Collections;
26 | import java.util.HashMap;
27 | import java.util.List;
28 | import java.util.Map;
29 | import java.util.concurrent.BlockingQueue;
30 | import java.util.concurrent.LinkedBlockingQueue;
31 |
32 | /**
33 | * Created by hp on 15-3-18.
34 | */
35 | public class KafkaNoStaticReceiver {
36 |
37 | private Logger logger = LoggerFactory.getLogger(KafkaNoStaticReceiver.class);
38 | private KafkaNoStaticConf conf;
39 | private List replicaBrokers = new ArrayList();
40 | private List replicaPorts = new ArrayList();
41 | public static int retry = 3;
42 | private int MAXLEN = 10000;
43 | private SimpleConsumer consumer;
44 | public BlockingQueue msgQueue = new LinkedBlockingQueue(MAXLEN);//outer interface, outer read data from this queue.
45 | public boolean isFetch = true;
46 |
47 | public KafkaNoStaticReceiver(KafkaNoStaticConf cnf) {
48 | conf = cnf;
49 | }
50 |
51 | public KafkaNoStaticReceiver(KafkaNoStaticConf cnf, int qSize) {
52 | conf = cnf;
53 | MAXLEN = qSize;
54 | }
55 |
56 |
57 |
58 | public PartitionMetadata findLeader(List brokers, int port, String topic, int partition) {
59 | PartitionMetadata returnData = null;
60 | loop:
61 | for (String broker : brokers) {
62 | SimpleConsumer consumer = new SimpleConsumer(broker, port, 100000, 64 * 1024, "leader");
63 | List topics = Collections.singletonList(topic);
64 | TopicMetadataRequest req = new TopicMetadataRequest(topics);
65 | TopicMetadataResponse rep = consumer.send(req);
66 | List topicMetadatas = rep.topicsMetadata();
67 | for (TopicMetadata topicMetadata : topicMetadatas) {
68 | for (PartitionMetadata part : topicMetadata.partitionsMetadata()) {
69 | if(part.partitionId() == partition) {
70 | returnData = part;
71 | break loop;
72 | }
73 | }
74 | }
75 | }
76 | if(returnData != null) {
77 | replicaBrokers.clear();
78 | for (Broker broker : returnData.replicas()) {
79 | replicaBrokers.add(broker.host());
80 | }
81 | }
82 | return returnData;
83 | }
84 |
85 | //two List length must be equal
86 | public PartitionMetadata findLeader(List brokers, List ports, String topic, int partition) {
87 | PartitionMetadata returnData = null;
88 | loop:
89 | for (int i = 0; i <= brokers.size() - 1; i++) {
90 | SimpleConsumer consumer = new SimpleConsumer(brokers.get(i), ports.get(i), 100000, 64 * 1024, "leader");
91 | List topics = Collections.singletonList(topic);
92 | TopicMetadataRequest req = new TopicMetadataRequest(topics);
93 | TopicMetadataResponse rep = consumer.send(req);
94 | List topicMetadatas = rep.topicsMetadata();
95 | for (TopicMetadata topicMetadata : topicMetadatas) {
96 | for (PartitionMetadata part : topicMetadata.partitionsMetadata()) {
97 | if(part.partitionId() == partition) {
98 | returnData = part;
99 | break loop;
100 | }
101 | }
102 | }
103 | }
104 | if(returnData != null) {
105 | replicaBrokers.clear();
106 | replicaPorts.clear();
107 | for (Broker broker : returnData.replicas()) {
108 | replicaBrokers.add(broker.host());
109 | replicaPorts.add(broker.port());
110 | }
111 | }
112 | return returnData;
113 | }
114 |
115 | public long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whitchTime, String clientName) {
116 | TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
117 | Map requestInfo = new HashMap();
118 | requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whitchTime, 1));
119 | OffsetRequest req = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
120 | OffsetResponse rep = consumer.getOffsetsBefore(req);
121 | if(rep.hasError()) {
122 | logger.error("Error fetching data Offset Data the Broker. Reason: " + rep.errorCode(topic, partition));
123 | return -1;
124 | }
125 | long[] offsets = rep.offsets(topic, partition);
126 | return offsets[0];
127 | }
128 |
129 | public String findNewLeader(String oldLeader, String topic, int partition, int port) throws Exception {
130 | for(int i = 0; i < retry; i++) {
131 | boolean goToSleep = false;
132 | PartitionMetadata metadata = findLeader(replicaBrokers, port, topic, partition);
133 | if(metadata == null) {
134 | goToSleep = true;
135 | } else if (metadata.leader() == null) {
136 | goToSleep = true;
137 | } else if(oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
138 | goToSleep = true;
139 | } else {
140 | return metadata.leader().host();
141 | }
142 | if(goToSleep) {
143 | delay(1);
144 | }
145 | }
146 | logger.error("Unable to find new leader after Broker failure. Exiting");
147 | throw new Exception("Unable to find new leader after Broker failure. Exiting");
148 | }
149 |
150 | public String findNewLeader(String oldLeader, String topic, int partition) throws Exception {
151 | for(int i = 0; i < retry; i++) {
152 | boolean goToSleep = false;
153 | PartitionMetadata metadata = findLeader(replicaBrokers, replicaPorts, topic, partition);
154 | if(metadata == null) {
155 | goToSleep = true;
156 | } else if (metadata.leader() == null) {
157 | goToSleep = true;
158 | } else if(oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
159 | goToSleep = true;
160 | } else {
161 | return metadata.leader().host();
162 | }
163 | if(goToSleep) {
164 | delay(1);
165 | }
166 | }
167 | logger.error("Unable to find new leader after Broker failure. Exiting");
168 | throw new Exception("Unable to find new leader after Broker failure. Exiting");
169 | }
170 |
171 | public void run() {
172 | PartitionMetadata metadata = findLeader(conf.brokerSeeds, conf.portList, conf.topic, conf.partition);
173 | if(metadata == null) {
174 | logger.error("Can't find metadata for Topic and Partition. Existing");
175 | return;
176 | }
177 | if(metadata.leader() == null) {
178 | logger.error("Can't find Leader for Topic and Partition. Existing");
179 | return;
180 | }
181 | String leadBroker = metadata.leader().host();
182 | int leadPort = metadata.leader().port();
183 | String clientName = "client_" + conf.topic + conf.partition;
184 | consumer = new SimpleConsumer(leadBroker, leadPort, 100000, 64 * 1024, clientName);
185 | long readOffset = getLastOffset(consumer, conf.topic, conf.partition, kafka.api.OffsetRequest.LatestTime(), clientName);
186 | int numErr = 0;
187 | while (isFetch) {
188 | if(consumer == null) {
189 | consumer = new SimpleConsumer(leadBroker, leadPort, 100000, 64 * 1024, clientName);
190 | }
191 | FetchRequest req = new FetchRequestBuilder()
192 | .clientId(clientName)
193 | .addFetch(conf.topic, conf.partition, readOffset, conf.readBufferSize)
194 | .build();
195 | FetchResponse rep = consumer.fetch(req);
196 | if(rep.hasError()) {
197 | numErr++;
198 | short code = rep.errorCode(conf.topic, conf.partition);
199 | logger.warn("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
200 | if(numErr > 5) {
201 | logger.error("5 errors occurred existing the fetching");
202 | break;
203 | }
204 | if(code == ErrorMapping.OffsetOutOfRangeCode()) {
205 | readOffset = getLastOffset(consumer, conf.topic, conf.partition, kafka.api.OffsetRequest.LatestTime(), clientName);
206 | continue;
207 | }
208 | consumer.close();
209 | consumer = null;
210 | try {
211 | leadBroker = findNewLeader(leadBroker, conf.topic, conf.partition);
212 | } catch (Exception e) {
213 | logger.error("find lead broker failed");
214 | e.printStackTrace();
215 | break;
216 | }
217 | continue;
218 | }
219 | numErr = 0;
220 | long numRead=0;
221 | for(MessageAndOffset messageAndOffset : rep.messageSet(conf.topic, conf.partition)) {
222 | long currentOffset = messageAndOffset.offset();
223 | if(currentOffset < readOffset) {
224 | logger.info("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
225 | continue;
226 | }
227 | readOffset = messageAndOffset.nextOffset();
228 | ByteBuffer payload = messageAndOffset.message().payload();
229 | byte[] bytes = new byte[payload.limit()];
230 | payload.get(bytes);
231 | long offset = messageAndOffset.offset();
232 | KafkaMetaMsg metaMsg = new KafkaMetaMsg(bytes, offset);
233 | try {
234 | msgQueue.put(metaMsg);
235 | } catch (InterruptedException e) {
236 | logger.error(e.getMessage());
237 | e.printStackTrace();
238 | }
239 | numRead++;
240 | }
241 | if(numRead == 0) {
242 | delay(1);//block
243 | }
244 | }
245 | }
246 |
247 | private void delay(int sec) {
248 | try {
249 | Thread.sleep(sec * 1000);
250 | } catch (InterruptedException e) {
251 | e.printStackTrace();
252 | }
253 | }
254 |
255 | public boolean isConnected() {
256 | SimpleConsumer hconsumer = null;
257 | try {
258 | for (int i = 0; i <= conf.brokerSeeds.size() - 1; i++) {
259 | hconsumer = new SimpleConsumer(conf.brokerSeeds.get(i), conf.portList.get(i), 100000, 64 * 1024, "heartBeat");
260 | List topics = Collections.singletonList(conf.topic);
261 | TopicMetadataRequest req = new TopicMetadataRequest(topics);
262 | TopicMetadataResponse rep = hconsumer.send(req);
263 | }
264 | } catch (Exception e) {
265 | e.printStackTrace();
266 | return false;
267 | } finally {
268 | if(hconsumer != null) hconsumer.close();
269 | }
270 | return true;
271 | }
272 |
273 | }
274 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/kafka/driver/consumer/KafkaReceiver.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.kafka.driver.consumer;
2 |
3 |
4 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaConf;
5 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaMetaMsg;
6 | import kafka.api.FetchRequest;
7 | import kafka.api.FetchRequestBuilder;
8 | import kafka.api.PartitionOffsetRequestInfo;
9 | import kafka.cluster.Broker;
10 | import kafka.common.ErrorMapping;
11 | import kafka.common.TopicAndPartition;
12 | import kafka.javaapi.FetchResponse;
13 | import kafka.javaapi.OffsetRequest;
14 | import kafka.javaapi.OffsetResponse;
15 | import kafka.javaapi.PartitionMetadata;
16 | import kafka.javaapi.TopicMetadata;
17 | import kafka.javaapi.TopicMetadataRequest;
18 | import kafka.javaapi.TopicMetadataResponse;
19 | import kafka.javaapi.consumer.SimpleConsumer;
20 | import kafka.message.MessageAndOffset;
21 | import org.slf4j.Logger;
22 | import org.slf4j.LoggerFactory;
23 |
24 | import java.nio.ByteBuffer;
25 | import java.util.ArrayList;
26 | import java.util.Collections;
27 | import java.util.HashMap;
28 | import java.util.List;
29 | import java.util.Map;
30 | import java.util.concurrent.BlockingQueue;
31 | import java.util.concurrent.LinkedBlockingQueue;
32 |
33 | /**
34 | * Created by hp on 14-12-12.
35 | */
36 |
37 | /**
38 | * while(isFetch) {
39 | * request req;
40 | * response rep = consumer.fetch(req);
41 | * getMessage(rep);
42 | * }
43 | * */
44 | public class KafkaReceiver extends Thread {
45 |
46 | private Logger logger = LoggerFactory.getLogger(KafkaReceiver.class);
47 | private KafkaConf conf;
48 | private List replicaBrokers = new ArrayList();
49 | private List replicaPorts = new ArrayList();
50 | public static int retry = 3;
51 | private int MAXLEN = 10000;
52 | private SimpleConsumer consumer;
53 | public BlockingQueue msgQueue = new LinkedBlockingQueue(MAXLEN);//outer interface, outer read data from this queue.
54 | public boolean isFetch = true;
55 |
56 | public KafkaReceiver(KafkaConf cnf) {
57 | conf = cnf;
58 | }
59 |
60 | public KafkaReceiver(KafkaConf cnf, int qSize) {
61 | conf = cnf;
62 | MAXLEN = qSize;
63 | }
64 |
65 |
66 |
67 | public PartitionMetadata findLeader(List brokers, int port, String topic, int partition) {
68 | PartitionMetadata returnData = null;
69 | loop:
70 | for (String broker : brokers) {
71 | SimpleConsumer consumer = new SimpleConsumer(broker, port, 100000, 64 * 1024, "leader");
72 | List topics = Collections.singletonList(topic);
73 | TopicMetadataRequest req = new TopicMetadataRequest(topics);
74 | TopicMetadataResponse rep = consumer.send(req);
75 | List topicMetadatas = rep.topicsMetadata();
76 | for (TopicMetadata topicMetadata : topicMetadatas) {
77 | for (PartitionMetadata part : topicMetadata.partitionsMetadata()) {
78 | if(part.partitionId() == partition) {
79 | returnData = part;
80 | break loop;
81 | }
82 | }
83 | }
84 | }
85 | if(returnData != null) {
86 | replicaBrokers.clear();
87 | for (Broker broker : returnData.replicas()) {
88 | replicaBrokers.add(broker.host());
89 | }
90 | }
91 | return returnData;
92 | }
93 |
94 | //two List length must be equal
95 | public PartitionMetadata findLeader(List brokers, List ports, String topic, int partition) {
96 | PartitionMetadata returnData = null;
97 | loop:
98 | for (int i = 0; i <= brokers.size() - 1; i++) {
99 | SimpleConsumer consumer = new SimpleConsumer(brokers.get(i), ports.get(i), 100000, 64 * 1024, "leader");
100 | List topics = Collections.singletonList(topic);
101 | TopicMetadataRequest req = new TopicMetadataRequest(topics);
102 | TopicMetadataResponse rep = consumer.send(req);
103 | List topicMetadatas = rep.topicsMetadata();
104 | for (TopicMetadata topicMetadata : topicMetadatas) {
105 | for (PartitionMetadata part : topicMetadata.partitionsMetadata()) {
106 | if(part.partitionId() == partition) {
107 | returnData = part;
108 | break loop;
109 | }
110 | }
111 | }
112 | }
113 | if(returnData != null) {
114 | replicaBrokers.clear();
115 | replicaPorts.clear();
116 | for (Broker broker : returnData.replicas()) {
117 | replicaBrokers.add(broker.host());
118 | replicaPorts.add(broker.port());
119 | }
120 | }
121 | return returnData;
122 | }
123 |
124 | public long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whitchTime, String clientName) {
125 | TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
126 | Map requestInfo = new HashMap();
127 | requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whitchTime, 1));
128 | OffsetRequest req = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
129 | OffsetResponse rep = consumer.getOffsetsBefore(req);
130 | if(rep.hasError()) {
131 | logger.error("Error fetching data Offset Data the Broker. Reason: " + rep.errorCode(topic, partition));
132 | return -1;
133 | }
134 | long[] offsets = rep.offsets(topic, partition);
135 | return offsets[0];
136 | }
137 |
138 | public String findNewLeader(String oldLeader, String topic, int partition, int port) throws Exception {
139 | for(int i = 0; i < retry; i++) {
140 | boolean goToSleep = false;
141 | PartitionMetadata metadata = findLeader(replicaBrokers, port, topic, partition);
142 | if(metadata == null) {
143 | goToSleep = true;
144 | } else if (metadata.leader() == null) {
145 | goToSleep = true;
146 | } else if(oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
147 | goToSleep = true;
148 | } else {
149 | return metadata.leader().host();
150 | }
151 | if(goToSleep) {
152 | delay(1);
153 | }
154 | }
155 | logger.error("Unable to find new leader after Broker failure. Exiting");
156 | throw new Exception("Unable to find new leader after Broker failure. Exiting");
157 | }
158 |
159 | public String findNewLeader(String oldLeader, String topic, int partition) throws Exception {
160 | for(int i = 0; i < retry; i++) {
161 | boolean goToSleep = false;
162 | PartitionMetadata metadata = findLeader(replicaBrokers, replicaPorts, topic, partition);
163 | if(metadata == null) {
164 | goToSleep = true;
165 | } else if (metadata.leader() == null) {
166 | goToSleep = true;
167 | } else if(oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
168 | goToSleep = true;
169 | } else {
170 | return metadata.leader().host();
171 | }
172 | if(goToSleep) {
173 | delay(1);
174 | }
175 | }
176 | logger.error("Unable to find new leader after Broker failure. Exiting");
177 | throw new Exception("Unable to find new leader after Broker failure. Exiting");
178 | }
179 |
180 | public void run() {
181 | PartitionMetadata metadata = findLeader(conf.brokerSeeds, conf.portList, conf.topic, conf.partition);
182 | if(metadata == null) {
183 | logger.error("Can't find metadata for Topic and Partition. Existing");
184 | return;
185 | }
186 | if(metadata.leader() == null) {
187 | logger.error("Can't find Leader for Topic and Partition. Existing");
188 | return;
189 | }
190 | String leadBroker = metadata.leader().host();
191 | int leadPort = metadata.leader().port();
192 | String clientName = "client_" + conf.topic + conf.partition;
193 | consumer = new SimpleConsumer(leadBroker, leadPort, 100000, 64 * 1024, clientName);
194 | long readOffset = getLastOffset(consumer, conf.topic, conf.partition, kafka.api.OffsetRequest.LatestTime(), clientName);
195 | int numErr = 0;
196 | while (isFetch) {
197 | if(consumer == null) {
198 | consumer = new SimpleConsumer(leadBroker, leadPort, 100000, 64 * 1024, clientName);
199 | }
200 | FetchRequest req = new FetchRequestBuilder()
201 | .clientId(clientName)
202 | .addFetch(conf.topic, conf.partition, readOffset, conf.readBufferSize)
203 | .build();
204 | FetchResponse rep = consumer.fetch(req);
205 | if(rep.hasError()) {
206 | numErr++;
207 | short code = rep.errorCode(conf.topic, conf.partition);
208 | logger.warn("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
209 | if(numErr > 5) {
210 | logger.error("5 errors occurred existing the fetching");
211 | break;
212 | }
213 | if(code == ErrorMapping.OffsetOutOfRangeCode()) {
214 | readOffset = getLastOffset(consumer, conf.topic, conf.partition, kafka.api.OffsetRequest.LatestTime(), clientName);
215 | continue;
216 | }
217 | consumer.close();
218 | consumer = null;
219 | try {
220 | leadBroker = findNewLeader(leadBroker, conf.topic, conf.partition);
221 | } catch (Exception e) {
222 | logger.error("find lead broker failed");
223 | e.printStackTrace();
224 | break;
225 | }
226 | continue;
227 | }
228 | numErr = 0;
229 | long numRead=0;
230 | for(MessageAndOffset messageAndOffset : rep.messageSet(conf.topic, conf.partition)) {
231 | long currentOffset = messageAndOffset.offset();
232 | if(currentOffset < readOffset) {
233 | logger.info("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
234 | continue;
235 | }
236 | readOffset = messageAndOffset.nextOffset();
237 | ByteBuffer payload = messageAndOffset.message().payload();
238 | byte[] bytes = new byte[payload.limit()];
239 | payload.get(bytes);
240 | long offset = messageAndOffset.offset();
241 | KafkaMetaMsg metaMsg = new KafkaMetaMsg(bytes, offset);
242 | try {
243 | msgQueue.put(metaMsg);
244 | } catch (InterruptedException e) {
245 | logger.error(e.getMessage());
246 | e.printStackTrace();
247 | }
248 | numRead++;
249 | }
250 | if(numRead == 0) {
251 | delay(1);//block
252 | }
253 | }
254 | }
255 |
256 | private void delay(int sec) {
257 | try {
258 | Thread.sleep(sec * 1000);
259 | } catch (InterruptedException e) {
260 | e.printStackTrace();
261 | }
262 | }
263 |
264 | public boolean isConnected() {
265 | SimpleConsumer hconsumer = null;
266 | try {
267 | for (int i = 0; i <= conf.brokerSeeds.size() - 1; i++) {
268 | hconsumer = new SimpleConsumer(conf.brokerSeeds.get(i), conf.portList.get(i), 100000, 64 * 1024, "heartBeat");
269 | List topics = Collections.singletonList(conf.topic);
270 | TopicMetadataRequest req = new TopicMetadataRequest(topics);
271 | TopicMetadataResponse rep = hconsumer.send(req);
272 | }
273 | } catch (Exception e) {
274 | e.printStackTrace();
275 | return false;
276 | } finally {
277 | if(hconsumer != null) hconsumer.close();
278 | }
279 | return true;
280 | }
281 | }
282 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/kafka/driver/producer/GenericKafkaSender.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.kafka.driver.producer;
2 |
3 | import kafka.javaapi.producer.Producer;
4 | import kafka.producer.KeyedMessage;
5 | import kafka.producer.ProducerConfig;
6 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaConf;
7 | import org.slf4j.Logger;
8 | import org.slf4j.LoggerFactory;
9 |
10 | import java.util.List;
11 | import java.util.Properties;
12 |
13 | /**
14 | * Created by hp on 15-1-8.
15 | */
16 | public class GenericKafkaSender {
17 |
18 | private Logger logger = LoggerFactory.getLogger(GenericKafkaSender.class);
19 |
20 | private KafkaConf conf;
21 | private Producer producer;
22 | private int retrys = 100;
23 |
24 | public GenericKafkaSender(KafkaConf cf) {
25 | conf = cf;
26 | }
27 |
28 | public void connect() {
29 | Properties prop = new Properties();
30 | prop.put("metadata.broker.list", conf.brokerList);
31 | prop.put("serializer.class", conf.serializer);//msg is string
32 | prop.put("key.serializer.class", conf.keySerializer);
33 | prop.put("partitioner.class", conf.partitioner);
34 | prop.put("request.required.acks", conf.acks);
35 | ProducerConfig pConfig = new ProducerConfig(prop);
36 | producer = new Producer(pConfig);
37 | }
38 |
39 | public void sendKeyMsg(List> keyMsgs) {
40 | blockSend(keyMsgs);
41 | }
42 | public void sendKeyMsg(KeyedMessage km) {
43 | blockSend(km);
44 | }
45 |
46 | public void blockSend(List> keyMsgs) {
47 | boolean isAck = false;
48 | int retryKafka = 0;
49 | while (!isAck) {
50 | if(retryKafka >= retrys) {
51 | reconnect();
52 | logger.warn("retry times out, reconnect the kafka server......");
53 | retryKafka = 0;
54 | }
55 | retryKafka++;
56 | try {
57 | producer.send(keyMsgs);
58 | isAck = true;
59 | } catch (Exception e) {
60 | logger.warn("retrying sending... Exception:" + e.getMessage());
61 | delay(3);
62 | }
63 | }
64 | }
65 |
66 | public void blockSend(KeyedMessage keyMsg) {
67 | boolean isAck = false;
68 | int retryKafka = 0;
69 | while (!isAck) {
70 | if(retryKafka >= retrys) {
71 | reconnect();
72 | logger.warn("retry times out, reconnect the kafka server......");
73 | retryKafka = 0;
74 | }
75 | retryKafka++;
76 | try {
77 | producer.send(keyMsg);
78 | isAck = true;
79 | } catch (Exception e) {
80 | logger.warn("retrying sending... Exception:" + e.getMessage());
81 | delay(3);
82 | }
83 | }
84 | }
85 |
86 | private void delay(int sec) {
87 | try {
88 | Thread.sleep(sec * 1000);
89 | } catch (InterruptedException e) {
90 | e.printStackTrace();
91 | }
92 | }
93 |
94 | public void close() {
95 | if(producer != null) producer.close();
96 | }
97 |
98 | public void reconnect() {
99 | close();
100 | connect();
101 | }
102 |
103 | public boolean isConnected() {
104 | Properties prop = new Properties();
105 | prop.put("metadata.broker.list", conf.brokerList);
106 | prop.put("serializer.class", conf.serializer);//msg is string
107 | prop.put("key.serializer.class", conf.keySerializer);
108 | prop.put("partitioner.class", conf.partitioner);
109 | prop.put("request.required.acks", conf.acks);
110 | prop.put("send.buffer.bytes", conf.sendBufferSize);
111 | ProducerConfig pConfig = new ProducerConfig(prop);
112 | Producer heartPro = null;
113 | try {
114 | heartPro = new Producer(pConfig);
115 | if(heartPro != null) heartPro.close();
116 | } catch (Exception e) {
117 | return false;
118 | }
119 | return true;
120 | }
121 |
122 | }
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/kafka/driver/producer/KafkaSender.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.kafka.driver.producer;
2 |
3 | import com.github.hackerwin7.mysql.parser.protocol.json.JSONConvert;
4 | import kafka.javaapi.producer.Producer;
5 | import kafka.producer.KeyedMessage;
6 | import kafka.producer.ProducerConfig;
7 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaConf;
8 | import com.github.hackerwin7.mysql.parser.monitor.JrdwMonitorVo;
9 | import com.github.hackerwin7.mysql.parser.monitor.ParserMonitor;
10 | import com.github.hackerwin7.mysql.parser.monitor.constants.JDMysqlParserMonitorType;
11 | import org.slf4j.Logger;
12 | import org.slf4j.LoggerFactory;
13 | import com.github.hackerwin7.mysql.parser.parser.utils.ParserConf;
14 |
15 | import java.util.ArrayList;
16 | import java.util.List;
17 | import java.util.Properties;
18 |
19 | /**
20 | * Created by hp on 14-12-12.
21 | */
22 | public class KafkaSender {
23 |
24 | private Logger logger = LoggerFactory.getLogger(KafkaSender.class);
25 |
26 | private KafkaConf conf;
27 | private Producer producer;
28 | private int retrys = ParserConf.KAFKA_RECONN_COUNT;
29 | private int reconns = ParserConf.KAFKA_RETRY_COUNT;
30 |
31 | public KafkaSender(KafkaConf cf) {
32 | conf = cf;
33 | }
34 |
35 | public void connect() {
36 | Properties prop = new Properties();
37 | prop.put("metadata.broker.list", conf.brokerList);
38 | prop.put("serializer.class", conf.serializer);//msg is string
39 | prop.put("key.serializer.class", conf.keySerializer);
40 | prop.put("partitioner.class", conf.partitioner);
41 | prop.put("request.required.acks", conf.acks);
42 | prop.put("compression.codec", conf.compression);
43 | ProducerConfig pConfig = new ProducerConfig(prop);
44 | producer = new Producer(pConfig);
45 | }
46 |
47 | public void send(byte[] msg) {
48 | KeyedMessage keyMsg = new KeyedMessage(conf.topic, null, msg);
49 | blockSend(keyMsg);
50 | }
51 |
52 | public void send(String topic, byte[] msg) {
53 | KeyedMessage keyMsg = new KeyedMessage(topic, null, msg);
54 | blockSend(keyMsg);
55 | }
56 |
57 | public void send(List msgs) {
58 | List> keyMsgs = new ArrayList>();
59 | for(byte[] msg : msgs) {
60 | KeyedMessage keyMsg = new KeyedMessage(conf.topic, null, msg);
61 | keyMsgs.add(keyMsg);
62 | }
63 | blockSend(keyMsgs);
64 | }
65 |
66 | public void send(String topic, List msgs) {
67 | List> keyMsgs = new ArrayList>();
68 | for(byte[] msg : msgs) {
69 | KeyedMessage keyMsg = new KeyedMessage(topic, null, msg);
70 | keyMsgs.add(keyMsg);
71 | }
72 | blockSend(keyMsgs);
73 | }
74 |
75 | public int sendKeyMsg(List> keyMsgs) {
76 | return blockSend(keyMsgs);
77 | }
78 |
79 | public int sendKeyMsg(List> keyMsgs, KafkaSender sender, ParserConf config) {
80 | return blockSend(keyMsgs, sender, config);
81 | }
82 |
83 | public int sendKeyMsg(KeyedMessage km) {
84 | return blockSend(km);
85 | }
86 |
87 | public int sendKeyMsg(KeyedMessage km, KafkaSender sender, ParserConf config) {
88 | return blockSend(km, sender, config);
89 | }
90 |
91 | public int blockSend(List> keyMsgs) {
92 | boolean isAck = false;
93 | int retryKafka = 0;
94 | int reconnKafka = 0;
95 | while (!isAck) {
96 | if(retryKafka >= retrys) {
97 | reconnect();
98 | reconnKafka++;
99 | if(reconnKafka > reconns) {
100 | return -1;
101 | }
102 | logger.error("retry times out, reconnect the kafka server......");
103 | retryKafka = 0;
104 | }
105 | retryKafka++;
106 | try {
107 | producer.send(keyMsgs);
108 | isAck = true;
109 | } catch (Exception e) {
110 | logger.error("retrying sending... Exception:" + e.getMessage(), e);
111 | delay(3);
112 | }
113 | }
114 | return 0;
115 | }
116 |
117 | public int blockSend(List> keyMsgs, KafkaSender sender, ParserConf config) {
118 | boolean isAck = false;
119 | int retryKafka = 0;
120 | int reconnKafka = 0;
121 | while (!isAck) {
122 | if(retryKafka >= retrys) {
123 | reconnect();
124 | reconnKafka++;
125 | if(reconnKafka > reconns) {
126 | return -1;
127 | }
128 | logger.error("retry times out, reconnect the kafka server......");
129 | retryKafka = 0;
130 | }
131 | retryKafka++;
132 | try {
133 | producer.send(keyMsgs);
134 | isAck = true;
135 | } catch (Exception e) {
136 | //send monitor
137 | try {
138 | ParserMonitor monitor = new ParserMonitor();
139 | monitor.exMsg = e.getMessage();
140 | JrdwMonitorVo jmv = monitor.toJrdwMonitorOnline(JDMysqlParserMonitorType.EXCEPTION_MONITOR, config.jobId);
141 | String jsonStr = JSONConvert.JrdwMonitorVoToJson(jmv).toString();
142 | KeyedMessage km = new KeyedMessage(config.phKaTopic, null, jsonStr.getBytes("UTF-8"));
143 | sender.sendKeyMsg(km);
144 | } catch (Exception e1) {
145 | e1.printStackTrace();
146 | }
147 | logger.error("retrying sending... Exception:" + e.getMessage(), e);
148 | delay(3);
149 | }
150 | }
151 | return 0;
152 | }
153 |
154 | public int blockSend(KeyedMessage keyMsg) {
155 | boolean isAck = false;
156 | int retryKafka = 0;
157 | int reconnKafka = 0;
158 | while (!isAck) {
159 | if(retryKafka >= retrys) {
160 | reconnect();
161 | reconnKafka++;
162 | if(reconnKafka > reconns) {
163 | return -1;
164 | }
165 | logger.error("retry times out, reconnect the kafka server......");
166 | retryKafka = 0;
167 | }
168 | retryKafka++;
169 | try {
170 | producer.send(keyMsg);
171 | isAck = true;
172 | } catch (Exception e) {
173 | logger.error("retrying sending... Exception:" + e.getMessage(), e);
174 | delay(3);
175 | }
176 | }
177 | return 0;
178 | }
179 |
180 | public int blockSend(KeyedMessage keyMsg, KafkaSender sender, ParserConf config) {
181 | boolean isAck = false;
182 | int retryKafka = 0;
183 | int reconnKafka = 0;
184 | while (!isAck) {
185 | if(retryKafka >= retrys) {
186 | reconnect();
187 | reconnKafka++;
188 | if(reconnKafka > reconns) {
189 | return -1;
190 | }
191 | logger.error("retry times out, reconnect the kafka server......");
192 | retryKafka = 0;
193 | }
194 | retryKafka++;
195 | try {
196 | producer.send(keyMsg);
197 | isAck = true;
198 | } catch (Exception e) {
199 | //send monitor
200 | try {
201 | ParserMonitor monitor = new ParserMonitor();
202 | monitor.exMsg = e.getMessage();
203 | JrdwMonitorVo jmv = monitor.toJrdwMonitorOnline(JDMysqlParserMonitorType.EXCEPTION_MONITOR, config.jobId);
204 | String jsonStr = JSONConvert.JrdwMonitorVoToJson(jmv).toString();
205 | KeyedMessage km = new KeyedMessage(config.phKaTopic, null, jsonStr.getBytes("UTF-8"));
206 | sender.sendKeyMsg(km);
207 | } catch (Exception e1) {
208 | e1.printStackTrace();
209 | }
210 | logger.error("retrying sending... Exception:" + e.getMessage(), e);
211 | delay(3);
212 | }
213 | }
214 | return 0;
215 | }
216 |
217 | private void delay(int sec) {
218 | try {
219 | Thread.sleep(sec * 1000);
220 | } catch (InterruptedException e) {
221 | e.printStackTrace();
222 | }
223 | }
224 |
225 | public void close() {
226 | if(producer != null) producer.close();
227 | }
228 |
229 | public void reconnect() {
230 | close();
231 | connect();
232 | }
233 |
234 | public boolean isConnected() {
235 | Properties prop = new Properties();
236 | prop.put("metadata.broker.list", conf.brokerList);
237 | prop.put("serializer.class", conf.serializer);//msg is string
238 | prop.put("key.serializer.class", conf.keySerializer);
239 | prop.put("partitioner.class", conf.partitioner);
240 | prop.put("request.required.acks", conf.acks);
241 | prop.put("send.buffer.bytes", conf.sendBufferSize);
242 | ProducerConfig pConfig = new ProducerConfig(prop);
243 | Producer heartPro = null;
244 | try {
245 | heartPro = new Producer(pConfig);
246 | if(heartPro != null) heartPro.close();
247 | } catch (Exception e) {
248 | return false;
249 | }
250 | return true;
251 | }
252 | }
253 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/kafka/utils/KafkaConf.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.kafka.utils;
2 |
3 | import com.github.hackerwin7.mysql.parser.zk.client.ZkExecutor;
4 | import net.sf.json.JSONObject;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 | import com.github.hackerwin7.mysql.parser.zk.utils.ZkConf;
8 |
9 | import java.util.ArrayList;
10 | import java.util.List;
11 |
12 | /**
13 | * Created by hp on 14-12-12.
14 | */
15 | public class KafkaConf {
16 |
17 | private static Logger logger = LoggerFactory.getLogger(KafkaConf.class);
18 |
19 | public String brokerList = "localhost:9092";//"12:9092,13.9092,14:9092"
20 | public int port = 9092;
21 | public String zk = "localhost:2181";
22 | public String serializer = "kafka.serializer.DefaultEncoder";//default is byte[]
23 | public String keySerializer = "kafka.serializer.StringEncoder";//default is message's byte[]
24 | public String partitioner = "kafka.producer.DefaultPartitioner";
25 | public String compression = "none";
26 | public String acks = "1";
27 | public String sendBufferSize = String.valueOf(1024 * 1024);//1MB
28 | public String topic;//queue topic
29 | public int partition = 0;
30 | public List topics = new ArrayList();//distribute the multiple topic
31 | public List brokerSeeds = new ArrayList();//"12,13,14"
32 | public List portList = new ArrayList();//9092 9093 9094
33 | public int readBufferSize = 1 * 1024 * 1024;//1 MB
34 | public String clientName = "cc456687IUGHG";
35 |
36 | //load the zkPos to find the bokerList and port zkPos : 172.17.36.60:2181/kafka
37 | public void loadZk(String zkPos) throws Exception {
38 | logger.info("parser load the string : " + zkPos);
39 | if(zkPos == null) throw new Exception("zk path is null");
40 | String[] ss = zkPos.split("/");
41 | String zkServer = "";
42 | String zkPath = "";
43 | for(int i = 0; i<= ss.length - 1; i++) {
44 | logger.info("!!!!! debug : " + ss[i]);
45 | if(i == 0) {
46 | zkServer = ss[i];
47 | } else {
48 | zkPath += ("/" + ss[i]);
49 | }
50 | }
51 | zkPath += ("/brokers/ids");
52 | ZkConf zcnf = new ZkConf();
53 | zcnf.zkServers = zkServer;
54 | logger.info("load conf : " + zcnf.zkServers);
55 | ZkExecutor zkexe = new ZkExecutor(zcnf);
56 | zkexe.connect();
57 | logger.info("load path : " + zkPath);
58 | List ids = zkexe.getChildren(zkPath);
59 | brokerList = "";
60 | brokerSeeds.clear();
61 | portList.clear();
62 | for(String brokerNode : ids) {
63 | String zkNodeJson = zkexe.get(zkPath + "/" + brokerNode);
64 | if(zkNodeJson == null) continue;
65 | JSONObject jo = JSONObject.fromObject(zkNodeJson);
66 | String host = jo.getString("host");
67 | int port = jo.getInt("port");
68 | brokerSeeds.add(host);
69 | portList.add(port);
70 | brokerList += (host + ":" + port + ",");
71 | logger.info("load zk host and port: " + host + " # " + port);
72 | }
73 | brokerList = brokerList.substring(0, brokerList.lastIndexOf(","));
74 | zkexe.close();
75 | }
76 |
77 | }
78 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/kafka/utils/KafkaMetaMsg.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.kafka.utils;
2 |
3 | /**
4 | * Created by hp on 14-12-12.
5 | */
6 | public class KafkaMetaMsg {
7 |
8 | public byte[] msg;
9 | public long offset;// next read offset
10 |
11 | public KafkaMetaMsg(byte[] bytes, long pos) {
12 | msg = bytes;
13 | offset = pos;
14 | }
15 |
16 | }
17 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/kafka/utils/KafkaNoStaticConf.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.kafka.utils;
2 |
3 | import com.github.hackerwin7.mysql.parser.zk.client.ZkExecutor;
4 | import net.sf.json.JSONObject;
5 | import com.github.hackerwin7.mysql.parser.zk.utils.ZkConf;
6 |
7 | import java.util.ArrayList;
8 | import java.util.List;
9 |
10 | /**
11 | * Created by hp on 15-3-18.
12 | */
13 | public class KafkaNoStaticConf {
14 |
15 | public String brokerList = "localhost:9092";//"12:9092,13.9092,14:9092"
16 | public int port = 9092;
17 | public String zk = "localhost:2181";
18 | public String serializer = "kafka.serializer.DefaultEncoder";//default is byte[]
19 | public String keySerializer = "kafka.serializer.StringEncoder";//default is message's byte[]
20 | public String partitioner = "kafka.producer.DefaultPartitioner";
21 | public String acks = "1";
22 | public String sendBufferSize = String.valueOf(1024 * 1024);//1MB
23 | public String topic;//queue topic
24 | public int partition = 0;
25 | public List topics = new ArrayList();//distribute the multiple topic
26 | public List brokerSeeds = new ArrayList();//"12,13,14"
27 | public List portList = new ArrayList();//9092 9093 9094
28 | public int readBufferSize = 1 * 1024 * 1024;//1 MB
29 | public String clientName = "cc456687IUGHG";
30 |
31 | //load the zkPos to find the bokerList and port zkPos : 172.17.36.60:2181/kafka
32 | public void loadZk(String zkPos) throws Exception{
33 | if(zkPos == null) throw new Exception("zk path is null");
34 | String[] ss = zkPos.split("/");
35 | String zkServer = "";
36 | String zkPath = "";
37 | for(int i = 0; i<= ss.length - 1; i++) {
38 | if(i == 0) {
39 | zkServer = ss[i];
40 | } else {
41 | zkPath += ("/" + ss[i]);
42 | }
43 | }
44 | zkPath += ("/brokers/ids");
45 | ZkConf zcnf = new ZkConf();
46 | zcnf.zkServers = zkServer;
47 | ZkExecutor zkexe = new ZkExecutor(zcnf);
48 | zkexe.connect();
49 | List ids = zkexe.getChildren(zkPath);
50 | brokerList = "";
51 | brokerSeeds.clear();
52 | portList.clear();
53 | for(String brokerNode : ids) {
54 | String zkNodeJson = zkexe.get(zkPath + "/" + brokerNode);
55 | if(zkNodeJson == null) continue;
56 | JSONObject jo = JSONObject.fromObject(zkNodeJson);
57 | String host = jo.getString("host");
58 | int port = jo.getInt("port");
59 | brokerSeeds.add(host);
60 | portList.add(port);
61 | brokerList += (host + ":" + port + ",");
62 | }
63 | brokerList = brokerList.substring(0, brokerList.lastIndexOf(","));
64 | zkexe.close();
65 | }
66 |
67 | }
68 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/monitor/JrdwMonitorVo.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.monitor;
2 |
3 | /**
4 | * Created by hp on 15-1-8.
5 | */
6 | public class JrdwMonitorVo {
7 |
8 | private int id;
9 | private String jrdw_mark ;
10 | private String content;
11 |
12 | public int getId() {
13 | return id;
14 | }
15 |
16 | public void setId(int id) {
17 | this.id = id;
18 | }
19 |
20 | public String getContent() {
21 | return content;
22 | }
23 |
24 | public void setContent(String content) {
25 | this.content = content;
26 | }
27 |
28 | public String getJrdw_mark() {
29 | return jrdw_mark;
30 | }
31 |
32 | public void setJrdw_mark(String jrdw_mark) {
33 | this.jrdw_mark = jrdw_mark;
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/monitor/MonitorToKafkaProducer.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.monitor;
2 |
3 | import kafka.javaapi.producer.Producer;
4 | import kafka.producer.KeyedMessage;
5 | import kafka.producer.ProducerConfig;
6 |
7 | import java.util.Properties;
8 |
9 | /**
10 | * Created by hp on 14-9-26.
11 | */
12 | public class MonitorToKafkaProducer {
13 |
14 | private String brokerList = "localhost:9092";
15 | private String serializerClass = "kafka.serializer.StringEncoder";
16 | private String partitionerClass = "SimplePartitioner";
17 | private String acks = "1";
18 | private String topic = "mysql_tracker_parser";
19 |
20 | private ProducerConfig config = null;
21 | private Properties props = null;
22 | private Producer producer = null;
23 |
24 | public MonitorToKafkaProducer() {
25 |
26 | }
27 |
28 |
29 | public MonitorToKafkaProducer(String broker, String serializer, String partitioner, String acks) {
30 | brokerList = broker;
31 | serializerClass = serializer;
32 | partitionerClass = partitioner;
33 | this.acks = acks;
34 | }
35 |
36 | public void open() throws Exception {
37 | props = new Properties();
38 | props.put("metadata.broker.list", brokerList);
39 | props.put("serializer.class", serializerClass);
40 | props.put("partitioner.class", partitionerClass);
41 | props.put("request.required.acks", acks);
42 | config = new ProducerConfig(props);
43 | producer = new Producer(config);
44 | }
45 |
46 | public void send(String key ,String msg) throws Exception {
47 | if(config == null) {
48 | throw new NullPointerException("process open function first!!!");
49 | }
50 | KeyedMessage message = new KeyedMessage(topic, key, msg);
51 | producer.send(message);
52 | }
53 |
54 | public void close() throws Exception {
55 | producer.close();
56 | }
57 |
58 | }
59 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/monitor/ParserMonitor.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.monitor;
2 |
3 | import com.github.hackerwin7.mysql.parser.protocol.json.JSONConvert;
4 | import com.github.hackerwin7.mysql.parser.monitor.constants.JDMysqlParserMonitor;
5 | import com.github.hackerwin7.mysql.parser.monitor.constants.JDMysqlParserMonitorType;
6 | import net.sf.json.JSONObject;
7 |
8 | import java.util.HashMap;
9 | import java.util.Map;
10 |
11 | /**
12 | * Created by hp on 14-9-23.
13 | */
14 | public class ParserMonitor implements Cloneable {
15 |
16 | public long fetchStart;
17 |
18 | public long fetchEnd;
19 |
20 | public long persistenceStart;
21 |
22 | public long persistenceEnd;
23 |
24 | public long sendStart;
25 |
26 | public long sendEnd;
27 |
28 | public long perMinStart;
29 |
30 | public long perMinEnd;
31 |
32 | public long hbaseReadStart;
33 |
34 | public long hbaseReadEnd;
35 |
36 | public long hbaseWriteStart;
37 |
38 | public long hbaseWriteEnd;
39 |
40 | public long serializeStart;
41 |
42 | public long serializeEnd;
43 |
44 | public long fetchNum;
45 |
46 | public long persisNum;
47 |
48 | public long batchSize;//bytes for unit
49 |
50 | public long fetcherStart;
51 |
52 | public long fetcherEnd;
53 |
54 | public long decodeStart;
55 |
56 | public long decodeEnd;
57 |
58 | public long delayTime;
59 |
60 | public long nowOffset;
61 |
62 | public long lastOffset;
63 |
64 | public Map topicDelay;
65 |
66 | public Map topicRows;
67 |
68 | public String exMsg;
69 |
70 | public String ip;
71 |
72 | public ParserMonitor() {
73 | fetchStart = fetchEnd = persistenceStart = persistenceEnd = 0;
74 | perMinStart = perMinEnd = hbaseReadStart = hbaseReadEnd = 0;
75 | hbaseWriteStart = hbaseWriteEnd = serializeStart = serializeEnd = 0;
76 | fetchNum = persisNum = batchSize = 0;
77 | fetcherStart = fetcherEnd = decodeStart = decodeEnd = 0;
78 | sendStart = sendEnd = 0;
79 | delayTime = 0;
80 | nowOffset = lastOffset = 0;
81 | topicDelay = new HashMap();
82 | topicRows = new HashMap();
83 | exMsg = ip = "";
84 | }
85 |
86 | public Object clone() {
87 | Object o = null;
88 | try {
89 | ParserMonitor os = (ParserMonitor) super.clone();
90 | os.topicDelay = new HashMap();
91 | os.topicRows = new HashMap();
92 | if(topicDelay != null) {
93 | for(Map.Entry entry : topicDelay.entrySet()) {
94 | String key = entry.getKey();
95 | Long value = entry.getValue();
96 | os.topicDelay.put(key, value);
97 | }
98 | }
99 | if(topicRows != null) {
100 | for(Map.Entry entry : topicRows.entrySet()) {
101 | String key = entry.getKey();
102 | Long value = entry.getValue();
103 | os.topicRows.put(key, value);
104 | }
105 | }
106 | o = (ParserMonitor) os;
107 | } catch (CloneNotSupportedException e) {
108 | e.printStackTrace();
109 | }
110 | return o;
111 | }
112 |
113 | public ParserMonitor cloneDeep() {
114 | return (ParserMonitor) clone();
115 | }
116 |
117 | public void clear() {
118 | fetchStart = fetchEnd = persistenceStart = persistenceEnd = 0;
119 | perMinStart = perMinEnd = hbaseReadStart = hbaseReadEnd = 0;
120 | hbaseWriteStart = hbaseWriteEnd = serializeStart = serializeEnd = 0;
121 | fetchNum = persisNum = batchSize = 0;
122 | fetcherStart = fetcherEnd = decodeStart = decodeEnd = 0;
123 | sendStart = sendEnd = 0;
124 | delayTime = 0;
125 | nowOffset = lastOffset = 0;
126 | exMsg = ip = "";
127 | if(topicDelay != null) {
128 | for(Map.Entry entry : topicDelay.entrySet()) {
129 | topicDelay.put(entry.getKey(), 0L);
130 | }
131 | }
132 | if(topicRows != null) {
133 | for(Map.Entry entry : topicRows.entrySet()) {
134 | topicRows.put(entry.getKey(), 0L);
135 | }
136 | }
137 | }
138 |
139 | public JrdwMonitorVo toJrdwMonitor(int id, String jobId) {
140 | JrdwMonitorVo jmv = new JrdwMonitorVo();
141 | jmv.setId(id);
142 | jmv.setJrdw_mark(jobId);
143 | //pack the name/value to map
144 | Map content = new HashMap();
145 | switch (id) {
146 | case JDMysqlParserMonitor.FETCH_MONITOR:
147 | content.put(JDMysqlParserMonitor.FETCH_NUM, fetchNum);
148 | content.put(JDMysqlParserMonitor.FETCH_SIZE, batchSize);
149 | content.put(JDMysqlParserMonitor.DELAY_NUM, (lastOffset - nowOffset));
150 | break;
151 | case JDMysqlParserMonitor.PERSIS_MONITOR:
152 | content.put(JDMysqlParserMonitor.SEND_NUM, persisNum);
153 | content.put(JDMysqlParserMonitor.SEND_SIZE, batchSize);
154 | content.put(JDMysqlParserMonitor.SEND_TIME, (sendEnd - sendStart));
155 | content.put(JDMysqlParserMonitor.DELAY_TIME, delayTime);
156 | break;
157 | case JDMysqlParserMonitor.TOPIC_DELAY:
158 | content.putAll(topicDelay);
159 | break;
160 | case JDMysqlParserMonitor.TOPIC_ROWS:
161 | content.putAll(topicRows);
162 | break;
163 | }
164 | //map to json
165 | JSONObject jo = JSONConvert.MapToJson(content);
166 | jmv.setContent(jo.toString());
167 | return jmv;
168 | }
169 |
170 | public JrdwMonitorVo toJrdwMonitorOnline(int id, String jobId) {
171 | JrdwMonitorVo jmv = new JrdwMonitorVo();
172 | jmv.setId(id);
173 | jmv.setJrdw_mark(jobId);
174 | //pack the name/value to map
175 | Map content = new HashMap();
176 | Map msgContent = new HashMap();
177 | Map IPContent = new HashMap();
178 | JSONObject jo;
179 | switch (id) {
180 | case JDMysqlParserMonitorType.FETCH_MONITOR:
181 | content.put(JDMysqlParserMonitorType.FETCH_NUM, fetchNum);
182 | content.put(JDMysqlParserMonitorType.FETCH_SIZE, batchSize);
183 | content.put(JDMysqlParserMonitorType.DELAY_NUM, (lastOffset - nowOffset));
184 | jo = JSONConvert.MapToJson(content);
185 | break;
186 | case JDMysqlParserMonitorType.PERSIS_MONITOR:
187 | content.put(JDMysqlParserMonitorType.SEND_NUM, persisNum);
188 | content.put(JDMysqlParserMonitorType.SEND_SIZE, batchSize);
189 | content.put(JDMysqlParserMonitorType.SEND_TIME, (sendEnd - sendStart));
190 | content.put(JDMysqlParserMonitorType.DELAY_TIME, delayTime);
191 | jo = JSONConvert.MapToJson(content);
192 | break;
193 | case JDMysqlParserMonitorType.TOPIC_DELAY:
194 | content.putAll(topicDelay);
195 | jo = JSONConvert.MapToJson(content);
196 | break;
197 | case JDMysqlParserMonitorType.TOPIC_ROWS:
198 | content.putAll(topicRows);
199 | jo = JSONConvert.MapToJson(content);
200 | break;
201 | case JDMysqlParserMonitorType.EXCEPTION_MONITOR:
202 | msgContent.put(JDMysqlParserMonitorType.EXCEPTION, exMsg);
203 | jo = JSONConvert.MapToJson(msgContent);
204 | break;
205 | case JDMysqlParserMonitorType.IP_MONITOR:
206 | IPContent.put(JDMysqlParserMonitorType.IP, ip);
207 | jo = JSONConvert.MapToJson(IPContent);
208 | break;
209 | case JDMysqlParserMonitorType.TOPIC_MONITOR:
210 | content.putAll(topicRows);
211 | jo = JSONConvert.MapToJson(content);
212 | break;
213 | default:
214 | jo = new JSONObject();
215 | break;
216 | }
217 | //map to json
218 | jmv.setContent(jo.toString());
219 | return jmv;
220 | }
221 |
222 | }
223 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/monitor/SimplePartitioner.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.monitor;
2 |
3 | import kafka.producer.Partitioner;
4 | import kafka.utils.VerifiableProperties;
5 |
6 | /**
7 | * Created by hp on 14-9-26.
8 | */
9 | public class SimplePartitioner implements Partitioner {
10 |
11 | public SimplePartitioner(VerifiableProperties props) {
12 |
13 | }
14 |
15 | public int partition(Object key, int a_numPartitions) {
16 | int partition = 0;
17 | String stringKey = (String) key;
18 | int offset = stringKey.lastIndexOf('.');
19 | if(offset > 0) {
20 | partition = Integer.parseInt(stringKey.substring(offset + 1)) % a_numPartitions;
21 | }
22 | return(partition);
23 | }
24 |
25 | }
26 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/monitor/constants/JDMysqlParserMonitor.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.monitor.constants;
2 |
3 | /**
4 | * Created by hp on 15-1-8.
5 | */
6 | public class JDMysqlParserMonitor {
7 |
8 | public static final int FETCH_MONITOR = 22000;
9 | public static final int PERSIS_MONITOR = 22001;
10 | public static final int TOPIC_DELAY = 31104;
11 | public static final int TOPIC_ROWS = 31102;
12 |
13 | //fetch
14 | public static final String FETCH_NUM = "FETCH_NUM";
15 | public static final String FETCH_SIZE = "FETCH_SIZE";
16 | public static final String DELAY_NUM = "DELAY_NUM";
17 |
18 | //persistence
19 | public static final String SEND_NUM = "SEND_NUM";
20 | public static final String SEND_SIZE = "SEND_SIZE";
21 | public static final String DELAY_TIME = "DELAY_TIME";
22 | public static final String SEND_TIME = "SEND_TIME";
23 |
24 |
25 | }
26 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/monitor/constants/JDMysqlParserMonitorType.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.monitor.constants;
2 |
3 | /**
4 | * Created by hp on 15-1-15.
5 | */
6 | public class JDMysqlParserMonitorType {
7 |
8 | public static final int JD_MYSQL_PARSER_MONITOR_TYPE_MIN = 22000;
9 | public static final int FETCH_MONITOR = 22001;
10 | public static final int PERSIS_MONITOR = 22002;
11 | public static final int TOPIC_DELAY = 31104;
12 | public static final int TOPIC_ROWS = 31102;
13 | public static final int EXCEPTION_MONITOR = 22003;
14 | public static final int IP_MONITOR = 22004;
15 | public static final int TOPIC_MONITOR = 22005;
16 | public static final int JD_MYSQL_PARSER_MONITOR_TYPE_MAX = 22999;
17 |
18 | // fetch
19 | public static final String FETCH_NUM = "FETCH_NUM";
20 | public static final String FETCH_SIZE = "FETCH_SIZE";
21 | public static final String DELAY_NUM = "DELAY_NUM";
22 |
23 | // persistence
24 | public static final String SEND_NUM = "SEND_NUM";
25 | public static final String SEND_SIZE = "SEND_SIZE";
26 | public static final String DELAY_TIME = "DELAY_TIME";
27 | public static final String SEND_TIME = "SEND_TIME";
28 |
29 | //exception
30 | public static final String EXCEPTION = "EXCEPTION";
31 |
32 | //ip
33 | public static final String IP = "IP";
34 | }
35 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/parser/MysqlParser.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.parser;
2 |
3 | import com.github.hackerwin7.mysql.parser.hbase.driver.HBaseOperator;
4 | import org.apache.hadoop.hbase.client.*;
5 | import org.apache.hadoop.hbase.util.Bytes;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 | import com.github.hackerwin7.mysql.parser.parser.utils.EntryPrinter;
9 | import com.github.hackerwin7.mysql.parser.protocol.protobuf.CanalEntry;
10 |
11 | import java.io.IOException;
12 | import java.text.DateFormat;
13 | import java.text.SimpleDateFormat;
14 | import java.util.*;
15 | import java.util.concurrent.BlockingQueue;
16 | import java.util.concurrent.LinkedBlockingQueue;
17 |
18 | /**
19 | * Created by hp on 14-9-17.
20 | */
21 | public class MysqlParser {
22 |
23 | //parser's logger
24 | private Logger logger = LoggerFactory.getLogger(MysqlParser.class);
25 |
26 | //hbase operator
27 | private HBaseOperator hBaseOP;
28 |
29 | //multiple thread queue
30 | private BlockingQueue bytesQueue;
31 |
32 | // batch size threshold for per fetch the number of the event,if event size >= batchsize then
33 | // bigFetch() return
34 | // now by the py test we set the var is 1000
35 |
36 | private int batchsize = 3000;
37 |
38 | // time threshold if batch size number is not reached then if the time is
39 | // now by the py test we set the var is 1.5 second
40 | private double secondsize = 1.5;
41 |
42 | //per seconds write the position
43 | private int secondPer = 60;
44 |
45 | //Global variables
46 | private byte[] globalReadPos = null;
47 | private byte[] globalWritePos = null;
48 |
49 | //control variables
50 | private boolean running;
51 | private long startTime;
52 |
53 | //constructor
54 | public MysqlParser() {
55 |
56 | hBaseOP = new HBaseOperator();
57 | bytesQueue = new LinkedBlockingQueue();
58 |
59 | }
60 |
61 | //prepare the parser configs
62 | private void preParser() throws IOException{
63 | running = true;
64 | startTime = new Date().getTime();
65 | globalWritePos = null;
66 | globalReadPos =null;
67 | findStartPos();
68 | }
69 |
70 | //find the start position to the global read and global write
71 | private void findStartPos() throws IOException{
72 | if(!findStartPosHBase()){
73 | findStartPosDefault();
74 | }
75 | }
76 |
77 | //find the start position according to HBase checkpoint table
78 | private boolean findStartPosHBase() throws IOException{
79 | Get get = new Get(Bytes.toBytes(hBaseOP.parserRowKey));
80 | get.addFamily(hBaseOP.getFamily());
81 | Result result = hBaseOP.getHBaseData(get, hBaseOP.getCheckpointSchemaName());
82 | byte[] readPos = result.getValue(hBaseOP.getFamily(), Bytes.toBytes(hBaseOP.eventRowCol));
83 | if(readPos != null) {
84 | String readPosString = Bytes.toString(readPos);
85 | Long readPosLong = Long.valueOf(readPosString);
86 | globalReadPos = Bytes.toBytes(readPosLong);
87 | }
88 | byte[] writePos = result.getValue(hBaseOP.getFamily(), Bytes.toBytes(hBaseOP.entryRowCol));
89 | if(writePos != null) {
90 | String writePosString = Bytes.toString(writePos);
91 | Long writePosLong = Long.valueOf(writePosString);
92 | globalWritePos = Bytes.toBytes(writePosLong);
93 | }
94 | if(globalReadPos == null || globalWritePos == null){
95 | return(false);
96 | }else {
97 | return (true);
98 | }
99 | }
100 |
101 | //find the start position by the default value
102 | private void findStartPosDefault(){
103 | if(globalReadPos == null) globalReadPos = Bytes.toBytes(0L);
104 | if(globalWritePos == null) globalWritePos = Bytes.toBytes(0L);
105 | }
106 |
107 | //Long switch to bytes specially
108 | private byte[] LongToStringToBytes(Long value){
109 | String strVal = String.valueOf(value);
110 | return(Bytes.toBytes(strVal));
111 | }
112 | private Long BytesToStringToLong(byte[] value){
113 | String strVal = new String(value);
114 | return(Long.valueOf(strVal));
115 | }
116 |
117 | //running the process, open the multiple thread to start
118 | private void runParser() throws IOException{
119 | //build and start the fetch thread
120 | FetchThread fetchThread = new FetchThread();
121 | fetchThread.start();
122 | //build and start the minute thread
123 | MinuteTimer minuteThread = new MinuteTimer();
124 | Timer timer = new Timer();
125 | timer.schedule(minuteThread, 3 * 1000, secondPer * 1000);
126 | //build and start the persistence thread
127 | PersistenceThread persThread = new PersistenceThread();
128 | persThread.start();
129 | while(running){
130 | try{
131 | Thread.sleep(1000);
132 | }catch (InterruptedException e){
133 | logger.error("main thread failed!!!");
134 | e.printStackTrace();
135 | }
136 | }
137 | }
138 |
139 | //fetch thread
140 | class FetchThread extends Thread {
141 |
142 | //thread logger
143 | private Logger logger = LoggerFactory.getLogger(FetchThread.class);
144 |
145 | private boolean fetchable = true;
146 |
147 | private int turnCount = 999;//per turn 100 data
148 |
149 | public void run() {
150 | while(fetchable){
151 | //while + sleep
152 | try{
153 | Thread.sleep(1000);
154 | } catch (InterruptedException e) {
155 | logger.error("sleep error!!!");
156 | e.printStackTrace();
157 | }
158 | if(isFetchable()) {
159 | ResultScanner results = null;
160 | Scan scan = new Scan();
161 | scan.setBatch(1500);
162 | scan.setStartRow(globalReadPos);
163 | scan.setStopRow(Bytes.toBytes(Bytes.toLong(globalReadPos) + turnCount));
164 | try {
165 | results = hBaseOP.getHBaseData(scan, hBaseOP.getEventBytesSchemaName());
166 | } catch (IOException e) {
167 | logger.error("fetch data failed!!!");
168 | e.printStackTrace();
169 | }
170 | if (results != null) {
171 | for (Result result : results) {
172 | if (result == null) {//the null is this is the end of batched data
173 | break;
174 | }
175 | byte[] receiveBytes = result.getValue(hBaseOP.getFamily(),
176 | Bytes.toBytes(hBaseOP.eventBytesCol));
177 | if (receiveBytes != null) {
178 | try {
179 | bytesQueue.put(receiveBytes);
180 | } catch (InterruptedException e) {
181 | logger.error("queue put failed!!!");
182 | e.printStackTrace();
183 | }
184 | globalReadPos = Bytes.toBytes(Bytes.toLong(globalReadPos) + 1L);
185 | } else { //the null is this is the end of batched data
186 | break;
187 | }
188 | }
189 | //persistence the global read pos
190 | Put put = new Put(Bytes.toBytes(hBaseOP.parserRowKey));
191 | Long readPosLong = Bytes.toLong(globalReadPos);
192 | String readPosString = String.valueOf(readPosLong);
193 | put.add(hBaseOP.getFamily(), Bytes.toBytes(hBaseOP.eventRowCol), Bytes.toBytes(readPosString));
194 | try {
195 | hBaseOP.putHBaseData(put, hBaseOP.getCheckpointSchemaName());
196 | } catch (IOException e) {
197 | logger.error("write global read pos failed!!!");
198 | e.printStackTrace();
199 | }
200 | }
201 | }
202 | }
203 | running = false;//close all running process
204 | }
205 |
206 | //monitor the hbase globalReadPos whether have inserted data
207 | private boolean isFetchable(){
208 | //monitor the hbase globalReadPos whether have the data inserted
209 | Get get = new Get(globalReadPos);
210 | get.addColumn(hBaseOP.getFamily(), Bytes.toBytes(hBaseOP.eventBytesCol));
211 | Result result = null;
212 | try {
213 | result = hBaseOP.getHBaseData(get, hBaseOP.getEventBytesSchemaName());
214 | } catch (IOException e){
215 | logger.error("fetch single data failed!!!");
216 | e.printStackTrace();
217 | }
218 | if(result == null) return false;
219 | byte[] receiveBytes = result.getValue(hBaseOP.getFamily(), Bytes.toBytes(hBaseOP.eventBytesCol));
220 | if(receiveBytes != null) return true;
221 | else return false;
222 | }
223 | }
224 |
225 | //per minute run the function to record the read pos and write pos to checkpoint in HBase
226 | class MinuteTimer extends TimerTask {
227 |
228 | //logger
229 | private Logger logger = LoggerFactory.getLogger(MinuteTimer.class);
230 |
231 | public void run() {
232 | if(globalReadPos != null && globalWritePos != null) {
233 | Calendar cal = Calendar.getInstance();
234 | DateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm");
235 | String time = sdf.format(cal.getTime());
236 | String rowKey = hBaseOP.parserRowKey + ":" + time;
237 | Put put = new Put(Bytes.toBytes(rowKey));
238 | Long readPosLong = Bytes.toLong(globalReadPos);
239 | String readPosString = String.valueOf(readPosLong);
240 | Long writePosLong = Bytes.toLong(globalWritePos);
241 | String writePosString = String.valueOf(writePosLong);
242 | put.add(hBaseOP.getFamily(), Bytes.toBytes(hBaseOP.eventRowCol), Bytes.toBytes(readPosString));
243 | put.add(hBaseOP.getFamily(), Bytes.toBytes(hBaseOP.entryRowCol), Bytes.toBytes(writePosString));
244 | try {
245 | hBaseOP.putHBaseData(put, hBaseOP.getCheckpointSchemaName());
246 | }catch (IOException e){
247 | logger.error("minute persistence read pos and write pos failed!!!");
248 | e.printStackTrace();
249 | }
250 | }
251 | }
252 | }
253 |
254 | //persistence batch data and pos thread
255 | class PersistenceThread extends Thread {
256 |
257 | //logger
258 | private Logger logger = LoggerFactory.getLogger(PersistenceThread.class);
259 |
260 | //BytesList get data from BytesQueue
261 | private List bytesList = new ArrayList();
262 |
263 | //control
264 | private boolean persistenceRunning = true;
265 |
266 | public void run() {
267 | startTime = new Date().getTime();
268 | bytesList.clear();
269 | while(persistenceRunning){
270 | //while + sleep
271 | try {
272 | Thread.sleep(2000);
273 | } catch (InterruptedException e) {
274 | logger.error("sleep error!!!");
275 | e.printStackTrace();
276 | }
277 | while(!bytesQueue.isEmpty()) {
278 | try {
279 | byte[] receiveBytes = bytesQueue.take();
280 | bytesList.add(receiveBytes);
281 | //per turn do not load much data
282 | if(bytesList.size() >= batchsize) break;
283 | } catch (InterruptedException e) {
284 | logger.error("take data from queue failed!!!");
285 | e.printStackTrace();
286 | }
287 | }
288 | //persistence the batched size entry string to entry table in HBase and
289 | // write pos to checkpoint
290 | if(bytesList.size() >= batchsize ||
291 | new Date().getTime() - startTime > secondsize * 1000) {
292 | if(bytesList.size() > 0) {
293 | try {
294 | //persistence entry data
295 | persistenceEntry();
296 | } catch (IOException e) {
297 | logger.error("persistence entry data failed!!!");
298 | e.printStackTrace();
299 | }
300 | try {
301 | //persistence pos data
302 | persistencePos();
303 | } catch (IOException e) {
304 | logger.error("persistence write pos failed!!!");
305 | e.printStackTrace();
306 | }
307 | //clear list
308 | bytesList.clear();
309 | startTime = new Date().getTime();
310 | }
311 | }
312 | }
313 | }
314 |
315 | //persistence entry data
316 | private void persistenceEntry() throws IOException{
317 | List puts = new ArrayList();
318 | for(byte[] bytes : bytesList) {
319 | CanalEntry.Entry entry = CanalEntry.Entry.parseFrom(bytes);
320 | System.out.println("--------------------------->get entry : " +
321 | entry.getEntryType() +
322 | ",-----> now pos : " +
323 | entry.getHeader().getLogfileOffset() +
324 | ",-----> next pos : " +
325 | (entry.getHeader().getLogfileOffset() + entry.getHeader().getEventLength()) +
326 | ",-----> binlog file : " +
327 | entry.getHeader().getLogfileName() +
328 | ",-----> schema name : " +
329 | entry.getHeader().getSchemaName() +
330 | ",-----> table name : " +
331 | entry.getHeader().getTableName()
332 | );
333 | String entryString = EntryToString(entry);
334 | Put put = new Put(globalWritePos);
335 | put.add(hBaseOP.getFamily(), Bytes.toBytes(hBaseOP.entryRowCol), Bytes.toBytes(entryString));
336 | puts.add(put);
337 | globalWritePos = Bytes.toBytes(Bytes.toLong(globalWritePos) + 1L);
338 | }
339 | if(puts.size() > 0) hBaseOP.putHBaseData(puts, hBaseOP.getEntryDataSchemaName());
340 | }
341 |
342 | //Entry to String
343 | private String EntryToString(CanalEntry.Entry entry) {
344 | return(EntryPrinter.printEntry(entry));
345 | }
346 |
347 | //persistence write pos data
348 | private void persistencePos() throws IOException {
349 | if(bytesList.size() > 0) {
350 | Put put = new Put(Bytes.toBytes(hBaseOP.parserRowKey));
351 | Long writePosLong = Bytes.toLong(globalWritePos);
352 | String writePosString = String.valueOf(writePosLong);
353 | put.add(hBaseOP.getFamily(), Bytes.toBytes(hBaseOP.entryRowCol), Bytes.toBytes(writePosString));
354 | hBaseOP.putHBaseData(put, hBaseOP.getCheckpointSchemaName());
355 | }
356 | }
357 | }
358 |
359 | //after parser
360 | private void afterParser(){
361 |
362 | }
363 |
364 | //main process
365 | public void mainProc() throws IOException{
366 | preParser();
367 | runParser();
368 | afterParser();
369 | }
370 | }
371 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/parser/ParserVertifyKafka.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.parser;
2 |
3 | import com.github.hackerwin7.mysql.parser.kafka.driver.consumer.KafkaNoStaticReceiver;
4 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaMetaMsg;
5 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaNoStaticConf;
6 | import net.sf.json.JSONObject;
7 | import org.apache.avro.io.Decoder;
8 | import org.apache.avro.io.DecoderFactory;
9 | import org.apache.avro.specific.SpecificDatumReader;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 | import com.github.hackerwin7.mysql.parser.protocol.avro.EventEntryAvro;
13 |
14 | import java.io.IOException;
15 | import java.io.InputStream;
16 | import java.net.URL;
17 | import java.util.ArrayList;
18 | import java.util.HashSet;
19 | import java.util.Iterator;
20 | import java.util.List;
21 | import java.util.Map;
22 | import java.util.Properties;
23 | import java.util.Set;
24 |
25 | /**
26 | * Created by hp on 14-12-31.
27 | */
28 | public class ParserVertifyKafka {
29 |
30 | private Logger logger = LoggerFactory.getLogger(ParserVertifyKafka.class);
31 |
32 | private KafkaNoStaticConf kcnfAvro, kcnfJson;
33 | private KafkaNoStaticReceiver krAvro, krJson;
34 | public boolean running = true;
35 | private List msgList = new ArrayList();
36 | private List msgListAvro = new ArrayList();
37 | private List msgListJson = new ArrayList();
38 | private long timesize = 0;
39 | private String jsonKey;
40 | private String avroKey;
41 | private String[] jprimaryKeys;
42 | private String[] aprimaryKeys;
43 | private Set jkeys = new HashSet();
44 | private Set akeys = new HashSet();
45 | private Set rkeys = new HashSet();
46 |
47 | private boolean isJsonEnd = false;
48 | private boolean isAvroEnd = false;
49 |
50 |
51 | private void loadOnlineConfAvroJson() throws Exception {
52 | String zkserver, zkroot, topic, dataKfkaZk;
53 | int partition;
54 |
55 | logger.info("load online conf");
56 | URL url = new URL("https://raw.githubusercontent.com/hackerwin7/configuration-service/master/parser-down.properties");
57 | InputStream in = url.openStream();
58 | Properties po = new Properties();
59 | po.load(in);
60 |
61 | zkserver = po.getProperty("kafka-zk");
62 | zkroot = po.getProperty("kafka-zk-root");
63 | topic = po.getProperty("kafka-topic");
64 | partition = Integer.valueOf(po.getProperty("kafka-partition"));
65 | dataKfkaZk = zkserver + zkroot;
66 | kcnfJson = new KafkaNoStaticConf();
67 | kcnfJson.loadZk(dataKfkaZk);
68 | kcnfJson.partition = partition;
69 | kcnfJson.topic = topic;
70 | kcnfJson.clientName = "jsoncnf1523657";
71 | krJson = new KafkaNoStaticReceiver(kcnfJson);
72 |
73 | zkserver = po.getProperty("kafka-avro-zk");
74 | zkroot = po.getProperty("kafka-avro-zk-root");
75 | topic = po.getProperty("kafka-avro-topic");
76 | partition = Integer.valueOf(po.getProperty("kafka-avro-partition"));
77 | dataKfkaZk = zkserver + zkroot;
78 | kcnfAvro = new KafkaNoStaticConf();
79 | kcnfAvro.loadZk(dataKfkaZk);
80 | kcnfAvro.partition = partition;
81 | kcnfAvro.topic = topic;
82 | kcnfAvro.clientName = "avrocnf5896532";
83 | krAvro = new KafkaNoStaticReceiver(kcnfAvro);
84 |
85 | timesize = Long.valueOf(po.getProperty("timesize")) * 60 * 1000;
86 | jsonKey = po.getProperty("json-key");
87 | jprimaryKeys = jsonKey.split(",");
88 | avroKey = po.getProperty("avro-key");
89 | aprimaryKeys = avroKey.split(",");
90 |
91 | logger.info("json conf:" + dataKfkaZk + "," + kcnfJson.partition + "," + kcnfJson.topic + "," + kcnfJson.clientName);
92 | logger.info("avro conf:" + dataKfkaZk + "," + kcnfAvro.partition + "," + kcnfAvro.topic + "," + kcnfAvro.clientName);
93 | }
94 |
95 | public void dumpJsonAvro() throws Exception {
96 | logger.info("dumping...");
97 | Thread jsonDump = new Thread(new Runnable() {
98 | @Override
99 | public void run() {
100 | krJson.run();
101 | }
102 | });
103 | Thread avroDump = new Thread(new Runnable() {
104 | @Override
105 | public void run() {
106 | krAvro.run();
107 | }
108 | });
109 | jsonDump.start();
110 | avroDump.start();
111 | Thread jsonRec = new Thread(new Runnable() {
112 | private Logger logger = LoggerFactory.getLogger(this.getClass().getName());
113 | @Override
114 | public void run() {
115 | logger.info("json running...");
116 | isJsonEnd = false;
117 | long starttime = System.currentTimeMillis();
118 | boolean isrun = true;
119 | logger.info("json queue...");
120 | while (isrun) {
121 | if(System.currentTimeMillis() - starttime >= timesize) break;
122 | while (isrun && !krJson.msgQueue.isEmpty()) {
123 | try {
124 | KafkaMetaMsg kmsg = krJson.msgQueue.take();
125 | msgListJson.add(kmsg.msg);
126 | } catch (Exception e) {
127 | logger.error(e.getMessage());
128 | }
129 | if(System.currentTimeMillis() - starttime >= timesize) {
130 | isrun = false;
131 | break;
132 | }
133 | }
134 | }
135 | isJsonEnd = true;
136 | }
137 | });
138 | Thread avroRec = new Thread(new Runnable() {
139 | private Logger logger = LoggerFactory.getLogger(this.getClass().getName());
140 | @Override
141 | public void run() {
142 | logger.info("avro running...");
143 | isAvroEnd = false;
144 | long starttime = System.currentTimeMillis();
145 | boolean isrun = true;
146 | logger.info("avro queue...");
147 | while (isrun) {
148 | if(System.currentTimeMillis() - starttime >= timesize) break;
149 | while (isrun && !krAvro.msgQueue.isEmpty()) {
150 | try {
151 | KafkaMetaMsg kmsg = krAvro.msgQueue.take();
152 | msgListAvro.add(kmsg.msg);
153 | } catch (Exception e) {
154 | logger.error(e.getMessage());
155 | }
156 | if(System.currentTimeMillis() - starttime >= timesize) {
157 | isrun = false;
158 | break;
159 | }
160 | }
161 | }
162 | isAvroEnd = true;
163 | }
164 | });
165 | jsonRec.start();
166 | avroRec.start();
167 | logger.info("running...");
168 | while (!isJsonEnd || !isAvroEnd) {
169 | Thread.sleep(3000);
170 | }
171 | logger.info("size :" + msgListJson.size() + ", " + msgListAvro.size());
172 | //do some operation
173 | jkeys.clear();
174 | akeys.clear();
175 | rkeys.clear();
176 | for(byte[] value : msgListJson) {
177 | String key = getJsonKey(value);
178 | jkeys.add(key);
179 | logger.info("json keys :" + key);
180 | }
181 | for(byte[] value : msgListAvro) {
182 | String key = getAvroKey(value);
183 | akeys.add(key);
184 | logger.info("avro keys :" + key);
185 | }
186 | rkeys.addAll(jkeys);
187 | rkeys.removeAll(akeys);
188 | logger.info("sub size :" + rkeys.size());
189 | for(String subKey : rkeys) {
190 | logger.info("sub set key : " + subKey);
191 | }
192 | logger.info("closed...");
193 | }
194 |
195 | private String getAvroKey(byte[] value) {
196 | String keyStr = "";
197 | EventEntryAvro avro = getAvroFromBytes(value);
198 | String dbname = avro.getDb().toString();
199 | String tbname = avro.getTab().toString();
200 | String oper = avro.getOpt().toString();
201 | keyStr += dbname + "#" + tbname + "#";
202 | Map fields = avro.getCur();
203 | for(String s : aprimaryKeys) {
204 | if(fields.containsKey(s)) {
205 | String kv = fields.get(s).toString();
206 | keyStr += kv + "#";
207 | } else {
208 | //logger.info("avro : map -> " + s + "," + fields.toString());
209 | Iterator iter = fields.entrySet().iterator();
210 | while (iter.hasNext()) {
211 | Map.Entry entry = (Map.Entry) iter.next();
212 | Object fkey = entry.getKey();
213 | Object fvalue = entry.getValue();
214 | String fk = fkey.toString();
215 | //logger.info("fkfkfk:" + fk);
216 | if(fk.equals(s)) {
217 | keyStr += fvalue.toString() + "#";
218 | break;
219 | }
220 | }
221 | }
222 | }
223 | keyStr += oper;
224 | return keyStr;
225 | }
226 |
227 | private String getJsonKey(byte[] value) throws Exception {
228 | String keyStr = "";
229 | String json = new String(value, "UTF-8");
230 | JSONObject jo = JSONObject.fromObject(json);
231 | String sdata = jo.getString("data");
232 | JSONObject jdata = JSONObject.fromObject(sdata);
233 | JSONObject jfields = jdata.getJSONObject("fields");
234 | String dbname = jdata.getString("schema");
235 | String tbname = jdata.getString("table");
236 | String oper = jdata.getString("operation");
237 | keyStr += dbname + "#" +tbname + "#";
238 | for(String s : jprimaryKeys) {
239 | if(jfields.containsKey(s)) {
240 | String kv = jfields.getString(s);
241 | keyStr += kv + "#";
242 | }
243 | }
244 | keyStr += oper;
245 | return keyStr;
246 | }
247 |
248 | private EventEntryAvro getAvroFromBytes(byte[] value) {
249 | SpecificDatumReader reader = new SpecificDatumReader(EventEntryAvro.getClassSchema());
250 | Decoder decoder = DecoderFactory.get().binaryDecoder(value,null);
251 | EventEntryAvro avro = null;
252 | try {
253 | avro = reader.read(null,decoder);
254 | } catch (IOException e) {
255 | e.printStackTrace();
256 | }
257 | return avro;
258 | }
259 |
260 | public static void main(String[] args) throws Exception {
261 | ParserVertifyKafka par = new ParserVertifyKafka();
262 | par.loadOnlineConfAvroJson();
263 | par.dumpJsonAvro();
264 | }
265 | }
266 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/parser/ParserVertifyKafkaAvro.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.parser;
2 |
3 | import com.github.hackerwin7.mysql.parser.kafka.driver.consumer.KafkaReceiver;
4 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaConf;
5 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaMetaMsg;
6 | import net.sf.json.JSONObject;
7 | import org.apache.avro.io.Decoder;
8 | import org.apache.avro.io.DecoderFactory;
9 | import org.apache.avro.specific.SpecificDatumReader;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 | import com.github.hackerwin7.mysql.parser.protocol.avro.EventEntryAvro;
13 |
14 | import java.io.IOException;
15 | import java.io.InputStream;
16 | import java.net.URL;
17 | import java.util.ArrayList;
18 | import java.util.HashSet;
19 | import java.util.Iterator;
20 | import java.util.List;
21 | import java.util.Map;
22 | import java.util.Properties;
23 | import java.util.Set;
24 |
25 | /**
26 | * Created by hp on 15-3-17.
27 | */
28 | public class ParserVertifyKafkaAvro {
29 |
30 | private Logger logger = LoggerFactory.getLogger(ParserVertifyKafkaAvro.class);
31 |
32 | private KafkaConf kcnf, kcnfAvro, kcnfJson;
33 | private KafkaReceiver kr, krAvro, krJson;
34 | public boolean running = true;
35 | private List msgList = new ArrayList();
36 | private List msgListAvro = new ArrayList();
37 | private List msgListJson = new ArrayList();
38 | private long timesize = 0;
39 | private String jsonKey;
40 | private String avroKey;
41 | private String[] jprimaryKeys;
42 | private String[] aprimaryKeys;
43 | private Set jkeys = new HashSet();
44 | private Set akeys = new HashSet();
45 | private Set rkeys = new HashSet();
46 |
47 | public ParserVertifyKafkaAvro(KafkaConf kc) {
48 | kcnf = kc;
49 | kr = new KafkaReceiver(kcnf);
50 | }
51 |
52 | public ParserVertifyKafkaAvro() {
53 |
54 | }
55 |
56 | private void initJson() throws Exception {
57 | kcnf = new KafkaConf();
58 | loadOnlineConf();
59 | kr = new KafkaReceiver(kcnf);
60 | }
61 |
62 | private void initAvro() throws Exception {
63 | kcnf = new KafkaConf();
64 | loadOnlineAvro();
65 | kr = new KafkaReceiver(kcnf);
66 | }
67 |
68 |
69 | private void loadOnlineConfAvroJson() throws Exception {
70 | logger.info("load online conf");
71 | URL url = new URL("https://raw.githubusercontent.com/hackerwin7/configuration-service/master/parser-down.properties");
72 | InputStream in = url.openStream();
73 | Properties po = new Properties();
74 | po.load(in);
75 | String zkserver = po.getProperty("kafka-zk");
76 | String zkroot = po.getProperty("kafka-zk-root");
77 | String topic = po.getProperty("kafka-topic");
78 | int partition = Integer.valueOf(po.getProperty("kafka-partition"));
79 |
80 | String dataKfkaZk = zkserver + zkroot;
81 | kcnfJson.loadZk(dataKfkaZk);
82 | kcnfJson.partition = partition;
83 | kcnfJson.topic = topic;
84 |
85 | krJson = new KafkaReceiver(kcnfJson);
86 |
87 | zkserver = po.getProperty("kafka-avro-zk");
88 | zkroot = po.getProperty("kafka-avro-zk-root");
89 | topic = po.getProperty("kafka-avro-topic");
90 | partition = Integer.valueOf(po.getProperty("kafka-avro-partition"));
91 |
92 | dataKfkaZk = zkserver + zkroot;
93 | kcnfAvro.loadZk(dataKfkaZk);
94 | kcnfAvro.partition = partition;
95 | kcnfAvro.topic = topic;
96 |
97 | krAvro = new KafkaReceiver(kcnfAvro);
98 |
99 | timesize = Long.valueOf(po.getProperty("timesize")) * 60 * 1000;
100 |
101 | jsonKey = po.getProperty("json-key");
102 | jprimaryKeys = jsonKey.split(",");
103 | avroKey = po.getProperty("avro-key");
104 | aprimaryKeys = avroKey.split(",");
105 | }
106 |
107 | private void loadStatic() {
108 | // kcnf.brokerSeeds.add("172.17.36.53");
109 | // kcnf.brokerSeeds.add("172.17.36.54");
110 | // kcnf.brokerSeeds.add("172.17.36.55");
111 | // kcnf.port = 9092;
112 | // kcnf.portList.add(9092);
113 | // kcnf.portList.add(9092);
114 | // kcnf.portList.add(9092);
115 | // kcnf.partition = 0;
116 | // kcnf.topic = "mysql_bb";
117 |
118 | kcnf.brokerSeeds.add("127.0.0.1");
119 | kcnf.portList.add(9092);
120 | kcnf.partition = 0;
121 | kcnf.topic = "parser-log-mysql";
122 | }
123 |
124 | private void loadOnlineConf() throws Exception {
125 | logger.info("load online conf");
126 | URL url = new URL("https://raw.githubusercontent.com/hackerwin7/configuration-service/master/parser-down.properties");
127 | InputStream in = url.openStream();
128 | Properties po = new Properties();
129 | po.load(in);
130 | String zkserver = po.getProperty("kafka-zk");
131 | String zkroot = po.getProperty("kafka-zk-root");
132 | String topic = po.getProperty("kafka-topic");
133 | int partition = Integer.valueOf(po.getProperty("kafka-partition"));
134 |
135 | String dataKfkaZk = zkserver + zkroot;
136 | kcnf.loadZk(dataKfkaZk);
137 | kcnf.partition = partition;
138 | kcnf.topic = topic;
139 | }
140 |
141 | private void loadOnlineAvro() throws Exception {
142 | logger.info("load online conf");
143 | URL url = new URL("https://raw.githubusercontent.com/hackerwin7/configuration-service/master/parser-down.properties");
144 | InputStream in = url.openStream();
145 | Properties po = new Properties();
146 | po.load(in);
147 | String zkserver = po.getProperty("kafka-avro-zk");
148 | String zkroot = po.getProperty("kafka-avro-zk-root");
149 | String topic = po.getProperty("kafka-avro-topic");
150 | int partition = Integer.valueOf(po.getProperty("kafka-partition"));
151 |
152 | String dataKfkaZk = zkserver + zkroot;
153 | kcnf.loadZk(dataKfkaZk);
154 | kcnf.partition = partition;
155 | kcnf.topic = topic;
156 | }
157 |
158 | public void dumpJsonAvro() throws Exception {
159 | logger.info("dumping...");
160 | Thread jsonDump = new Thread(new Runnable() {
161 | @Override
162 | public void run() {
163 | krJson.run();
164 | }
165 | });
166 | Thread avroDump = new Thread(new Runnable() {
167 | @Override
168 | public void run() {
169 | krAvro.run();
170 | }
171 | });
172 | long starttime = System.currentTimeMillis();
173 | jsonDump.start();
174 | avroDump.start();
175 | while (running) {
176 | if(System.currentTimeMillis() - starttime >= timesize) break;
177 | while (running && !krJson.msgQueue.isEmpty()) {
178 | KafkaMetaMsg kmsg = krJson.msgQueue.take();
179 | msgListJson.add(kmsg.msg);
180 | logger.info(new String(kmsg.msg));
181 | if(System.currentTimeMillis() - starttime >= timesize) {
182 | running = false;
183 | break;
184 | }
185 | }
186 | while (running && !krAvro.msgQueue.isEmpty()) {
187 | KafkaMetaMsg kmsg = krAvro.msgQueue.take();
188 | msgListAvro.add(kmsg.msg);
189 | logger.info(new String(kmsg.msg));
190 | if(System.currentTimeMillis() - starttime >= timesize) {
191 | running = false;
192 | break;
193 | }
194 | }
195 | }
196 | logger.info("size :" + msgListJson.size() + ", " + msgListAvro.size());
197 | //do some operation
198 | jkeys.clear();
199 | akeys.clear();
200 | rkeys.clear();
201 | for(byte[] value : msgListJson) {
202 | String key = getJsonKey(value);
203 | jkeys.add(key);
204 | logger.info("json keys :" + key);
205 | }
206 | for(byte[] value : msgListAvro) {
207 | String key = getAvroKey(value);
208 | akeys.add(key);
209 | logger.info("avro keys :" + key);
210 | }
211 | rkeys.addAll(jkeys);
212 | rkeys.removeAll(akeys);
213 | for(String subKey : rkeys) {
214 | logger.info("sub set key : " + subKey);
215 | }
216 | logger.info("closed...");
217 | }
218 |
219 | private String getAvroKey(byte[] value) {
220 | String keyStr = "";
221 | EventEntryAvro avro = getAvroFromBytes(value);
222 | String dbname = avro.getDb().toString();
223 | String tbname = avro.getTab().toString();
224 | String oper = avro.getOpt().toString();
225 | keyStr += dbname + "#" + tbname + "#";
226 | Map fields = avro.getCur();
227 | for(String s : aprimaryKeys) {
228 | if(fields.containsKey(s)) {
229 | String kv = fields.get(s).toString();
230 | keyStr += kv + "#";
231 | }
232 | }
233 | keyStr += oper;
234 | return keyStr;
235 | }
236 |
237 | private String getJsonKey(byte[] value) {
238 | String keyStr = "";
239 | String jsonStr = new String(value);
240 | JSONObject jo = JSONObject.fromObject(jsonStr);
241 | JSONObject jdata = jo.getJSONObject("data");
242 | JSONObject jfields = jdata.getJSONObject("fields");
243 | String dbname = jdata.getString("schema");
244 | String tbname = jdata.getString("table");
245 | String oper = jdata.getString("operation");
246 | keyStr += dbname + "#" +tbname + "#";
247 | for(String s : jprimaryKeys) {
248 | if(jfields.containsKey(s)) {
249 | String kv = jfields.getString(s);
250 | keyStr += kv + "#";
251 | }
252 | }
253 | keyStr += oper;
254 | return keyStr;
255 | }
256 |
257 | public void dumpJson() throws Exception {
258 | logger.info("dumping...");
259 | //thread start dumping
260 | Thread tdump = new Thread(new Runnable() {
261 | @Override
262 | public void run() {
263 | kr.run();
264 | }
265 | });
266 | tdump.start();
267 | while (running) {
268 | while (!kr.msgQueue.isEmpty()) {
269 | KafkaMetaMsg kmsg = kr.msgQueue.take();
270 | msgList.add(kmsg.msg);
271 | }
272 | for(byte[] value : msgList) {
273 | String sv = new String(value);
274 | logger.info("value is :" + sv);
275 | }
276 | msgList.clear();
277 | }
278 | }
279 |
280 | public void dumpAvro() throws Exception {
281 | logger.info("dumping...");
282 | //thread start dumping
283 | Thread tdump = new Thread(new Runnable() {
284 | @Override
285 | public void run() {
286 | kr.run();
287 | }
288 | });
289 | tdump.start();
290 | while (running) {
291 | while (!kr.msgQueue.isEmpty()) {
292 | KafkaMetaMsg kmsg = kr.msgQueue.take();
293 | msgList.add(kmsg.msg);
294 | }
295 | for(byte[] value : msgList) {
296 | EventEntryAvro avro = getAvroFromBytes(value);
297 | logger.info("================================================= get message :");
298 | logger.info("---> dbName:"+avro.getDb()+",tableName:"+avro.getTab());
299 | logger.info("---> type:"+avro.getOpt()+",ddl:"+avro.getDdl());
300 | logger.info("---> cus:"+getMapVal(avro.getCus()));
301 | logger.info("---> column/value:" + getColVal(avro.getCur()));
302 | }
303 | msgList.clear();
304 | }
305 | }
306 |
307 | private String getMapVal(Map cv) {
308 | String constr = "";
309 | if(cv != null) {
310 | Iterator iter = cv.entrySet().iterator();
311 | while (iter.hasNext()) {
312 | Map.Entry entry = (Map.Entry) iter.next();
313 | Object key = entry.getKey();
314 | Object value = entry.getValue();
315 | constr += ("[" + key.toString() + "," + value.toString() + "]");
316 | }
317 | }
318 | return constr;
319 | }
320 |
321 | private String getColVal(Map cv) {
322 | String constr = "";
323 | if(cv != null) {
324 | Iterator iter = cv.entrySet().iterator();
325 | while (iter.hasNext()) {
326 | Map.Entry entry = (Map.Entry) iter.next();
327 | Object key = entry.getKey();
328 | Object value = entry.getValue();
329 | constr += ("[" + key.toString() + "," + value.toString() + "]");
330 | }
331 | }
332 | return constr;
333 | }
334 |
335 | private EventEntryAvro getAvroFromBytes(byte[] value) {
336 | SpecificDatumReader reader = new SpecificDatumReader(EventEntryAvro.getClassSchema());
337 | Decoder decoder = DecoderFactory.get().binaryDecoder(value,null);
338 | EventEntryAvro avro = null;
339 | try {
340 | avro = reader.read(null,decoder);
341 | } catch (IOException e) {
342 | e.printStackTrace();
343 | }
344 | return avro;
345 | }
346 |
347 | public static void main(String[] args) throws Exception {
348 | ParserVertifyKafkaAvro par = new ParserVertifyKafkaAvro();
349 | par.initAvro();
350 | par.dumpAvro();
351 | }
352 |
353 | }
354 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/parser/ParserVertifyKafkaJson.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.parser;
2 |
3 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaConf;
4 | import com.github.hackerwin7.mysql.parser.kafka.driver.consumer.KafkaReceiver;
5 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaMetaMsg;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import java.io.InputStream;
10 | import java.net.URL;
11 | import java.util.ArrayList;
12 | import java.util.List;
13 | import java.util.Properties;
14 |
15 | /**
16 | * Created by hp on 15-3-18.
17 | */
18 | public class ParserVertifyKafkaJson {
19 |
20 | private Logger logger = LoggerFactory.getLogger(ParserVertifyKafkaJson.class);
21 |
22 | private KafkaConf kcnfJson;
23 | private KafkaReceiver krJson;
24 | public boolean running = true;
25 | private List msgListJson = new ArrayList();
26 |
27 | public void loadOnlineConf() throws Exception {
28 | logger.info("loading conf...");
29 | URL url = new URL("https://raw.githubusercontent.com/hackerwin7/configuration-service/master/parser-down.properties");
30 | InputStream in = url.openStream();
31 | Properties po = new Properties();
32 | po.load(in);
33 |
34 | String zkserver = po.getProperty("kafka-zk");
35 | String zkroot = po.getProperty("kafka-zk-root");
36 | String topic = po.getProperty("kafka-topic");
37 | int partition = Integer.valueOf(po.getProperty("kafka-partition"));
38 | String dataKfkaZk = zkserver + zkroot;
39 | kcnfJson = new KafkaConf();
40 | kcnfJson.loadZk(dataKfkaZk);
41 | kcnfJson.partition = partition;
42 | kcnfJson.topic = topic;
43 | kcnfJson.clientName = "jsoncnf1523657";
44 | logger.info("json conf:" + dataKfkaZk + "," + kcnfJson.partition + "," + kcnfJson.topic + "," + kcnfJson.clientName);
45 | krJson = new KafkaReceiver(kcnfJson);
46 | }
47 |
48 | public void dumpJson() throws Exception {
49 | logger.info("dumping...");
50 | Thread dump = new Thread(new Runnable() {
51 | @Override
52 | public void run() {
53 | krJson.run();
54 | }
55 | });
56 | dump.start();
57 | while (running) {
58 | while (!krJson.msgQueue.isEmpty()) {
59 | KafkaMetaMsg kmsg = krJson.msgQueue.take();
60 | msgListJson.add(kmsg.msg);
61 | logger.info("====>json str:" + new String(kmsg.msg));
62 | }
63 | }
64 | msgListJson.clear();
65 | }
66 |
67 | public static void main(String[] args) throws Exception {
68 | ParserVertifyKafkaJson pkj = new ParserVertifyKafkaJson();
69 | pkj.loadOnlineConf();
70 | pkj.dumpJson();
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/parser/ParserVertifyKafkaSimple.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.parser;
2 |
3 | import com.github.hackerwin7.mysql.parser.kafka.driver.consumer.KafkaReceiver;
4 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaConf;
5 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaMetaMsg;
6 | import net.sf.json.JSONObject;
7 | import org.apache.avro.io.Decoder;
8 | import org.apache.avro.io.DecoderFactory;
9 | import org.apache.avro.specific.SpecificDatumReader;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 | import com.github.hackerwin7.mysql.parser.protocol.avro.EventEntryAvro;
13 |
14 | import java.io.IOException;
15 | import java.io.InputStream;
16 | import java.net.URL;
17 | import java.util.ArrayList;
18 | import java.util.HashSet;
19 | import java.util.Iterator;
20 | import java.util.List;
21 | import java.util.Map;
22 | import java.util.Properties;
23 | import java.util.Set;
24 |
25 | /**
26 | * Created by hp on 15-3-17.
27 | */
28 | public class ParserVertifyKafkaSimple {
29 |
30 | private Logger logger = LoggerFactory.getLogger(ParserVertifyKafkaSimple.class);
31 |
32 | private KafkaConf kcnf, kcnfAvro, kcnfJson;
33 | private KafkaReceiver kr, krAvro, krJson;
34 | public boolean running = true;
35 | private List msgList = new ArrayList();
36 | private List msgListAvro = new ArrayList();
37 | private List msgListJson = new ArrayList();
38 | private long timesize = 0;
39 | private String jsonKey;
40 | private String avroKey;
41 | private String[] jprimaryKeys;
42 | private String[] aprimaryKeys;
43 | private Set jkeys = new HashSet();
44 | private Set akeys = new HashSet();
45 | private Set rkeys = new HashSet();
46 |
47 | public ParserVertifyKafkaSimple(KafkaConf kc) {
48 | kcnf = kc;
49 | kr = new KafkaReceiver(kcnf);
50 | }
51 |
52 | public ParserVertifyKafkaSimple() {
53 |
54 | }
55 |
56 | private void initJson() throws Exception {
57 | kcnf = new KafkaConf();
58 | loadOnlineConf();
59 | kr = new KafkaReceiver(kcnf);
60 | }
61 |
62 |
63 | private void loadOnlineConfAvroJson() throws Exception {
64 | logger.info("load online conf");
65 | URL url = new URL("https://raw.githubusercontent.com/hackerwin7/configuration-service/master/parser-down.properties");
66 | InputStream in = url.openStream();
67 | Properties po = new Properties();
68 | po.load(in);
69 | String zkserver = po.getProperty("kafka-zk");
70 | String zkroot = po.getProperty("kafka-zk-root");
71 | String topic = po.getProperty("kafka-topic");
72 | int partition = Integer.valueOf(po.getProperty("kafka-partition"));
73 |
74 | String dataKfkaZk = zkserver + zkroot;
75 | kcnfJson.loadZk(dataKfkaZk);
76 | kcnfJson.partition = partition;
77 | kcnfJson.topic = topic;
78 |
79 | krJson = new KafkaReceiver(kcnfJson);
80 |
81 | zkserver = po.getProperty("kafka-avro-zk");
82 | zkroot = po.getProperty("kafka-avro-zk-root");
83 | topic = po.getProperty("kafka-avro-topic");
84 | partition = Integer.valueOf(po.getProperty("kafka-avro-partition"));
85 |
86 | dataKfkaZk = zkserver + zkroot;
87 | kcnfAvro.loadZk(dataKfkaZk);
88 | kcnfAvro.partition = partition;
89 | kcnfAvro.topic = topic;
90 |
91 | krAvro = new KafkaReceiver(kcnfAvro);
92 |
93 | timesize = Long.valueOf(po.getProperty("timesize")) * 60 * 1000;
94 |
95 | jsonKey = po.getProperty("json-key");
96 | jprimaryKeys = jsonKey.split(",");
97 | avroKey = po.getProperty("avro-key");
98 | aprimaryKeys = avroKey.split(",");
99 | }
100 |
101 | private void loadStatic() {
102 | // kcnf.brokerSeeds.add("172.17.36.53");
103 | // kcnf.brokerSeeds.add("172.17.36.54");
104 | // kcnf.brokerSeeds.add("172.17.36.55");
105 | // kcnf.port = 9092;
106 | // kcnf.portList.add(9092);
107 | // kcnf.portList.add(9092);
108 | // kcnf.portList.add(9092);
109 | // kcnf.partition = 0;
110 | // kcnf.topic = "mysql_bb";
111 |
112 | kcnf.brokerSeeds.add("127.0.0.1");
113 | kcnf.portList.add(9092);
114 | kcnf.partition = 0;
115 | kcnf.topic = "parser-log-mysql";
116 | }
117 |
118 | private void loadOnlineConf() throws Exception {
119 | logger.info("load online conf");
120 | URL url = new URL("https://raw.githubusercontent.com/hackerwin7/configuration-service/master/parser-down.properties");
121 | InputStream in = url.openStream();
122 | Properties po = new Properties();
123 | po.load(in);
124 | String zkserver = po.getProperty("kafka-zk");
125 | String zkroot = po.getProperty("kafka-zk-root");
126 | String topic = po.getProperty("kafka-topic");
127 | int partition = Integer.valueOf(po.getProperty("kafka-partition"));
128 |
129 | String dataKfkaZk = zkserver + zkroot;
130 | kcnf.loadZk(dataKfkaZk);
131 | kcnf.partition = partition;
132 | kcnf.topic = topic;
133 | }
134 |
135 | public void dumpJsonAvro() throws Exception {
136 | logger.info("dumping...");
137 | Thread jsonDump = new Thread(new Runnable() {
138 | @Override
139 | public void run() {
140 | krJson.run();
141 | }
142 | });
143 | Thread avroDump = new Thread(new Runnable() {
144 | @Override
145 | public void run() {
146 | krAvro.run();
147 | }
148 | });
149 | long starttime = System.currentTimeMillis();
150 | jsonDump.start();
151 | avroDump.start();
152 | while (running) {
153 | if(System.currentTimeMillis() - starttime >= timesize) break;
154 | while (running && !krJson.msgQueue.isEmpty()) {
155 | KafkaMetaMsg kmsg = krJson.msgQueue.take();
156 | msgListJson.add(kmsg.msg);
157 | logger.info(new String(kmsg.msg));
158 | if(System.currentTimeMillis() - starttime >= timesize) {
159 | running = false;
160 | break;
161 | }
162 | }
163 | while (running && !krAvro.msgQueue.isEmpty()) {
164 | KafkaMetaMsg kmsg = krAvro.msgQueue.take();
165 | msgListAvro.add(kmsg.msg);
166 | logger.info(new String(kmsg.msg));
167 | if(System.currentTimeMillis() - starttime >= timesize) {
168 | running = false;
169 | break;
170 | }
171 | }
172 | }
173 | logger.info("size :" + msgListJson.size() + ", " + msgListAvro.size());
174 | //do some operation
175 | jkeys.clear();
176 | akeys.clear();
177 | rkeys.clear();
178 | for(byte[] value : msgListJson) {
179 | String key = getJsonKey(value);
180 | jkeys.add(key);
181 | logger.info("json keys :" + key);
182 | }
183 | for(byte[] value : msgListAvro) {
184 | String key = getAvroKey(value);
185 | akeys.add(key);
186 | logger.info("avro keys :" + key);
187 | }
188 | rkeys.addAll(jkeys);
189 | rkeys.removeAll(akeys);
190 | for(String subKey : rkeys) {
191 | logger.info("sub set key : " + subKey);
192 | }
193 | logger.info("closed...");
194 | }
195 |
196 | private String getAvroKey(byte[] value) {
197 | String keyStr = "";
198 | EventEntryAvro avro = getAvroFromBytes(value);
199 | String dbname = avro.getDb().toString();
200 | String tbname = avro.getTab().toString();
201 | String oper = avro.getOpt().toString();
202 | keyStr += dbname + "#" + tbname + "#";
203 | Map fields = avro.getCur();
204 | for(String s : aprimaryKeys) {
205 | if(fields.containsKey(s)) {
206 | String kv = fields.get(s).toString();
207 | keyStr += kv + "#";
208 | }
209 | }
210 | keyStr += oper;
211 | return keyStr;
212 | }
213 |
214 | private String getJsonKey(byte[] value) {
215 | String keyStr = "";
216 | String jsonStr = new String(value);
217 | JSONObject jo = JSONObject.fromObject(jsonStr);
218 | JSONObject jdata = jo.getJSONObject("data");
219 | JSONObject jfields = jdata.getJSONObject("fields");
220 | String dbname = jdata.getString("schema");
221 | String tbname = jdata.getString("table");
222 | String oper = jdata.getString("operation");
223 | keyStr += dbname + "#" +tbname + "#";
224 | for(String s : jprimaryKeys) {
225 | if(jfields.containsKey(s)) {
226 | String kv = jfields.getString(s);
227 | keyStr += kv + "#";
228 | }
229 | }
230 | keyStr += oper;
231 | return keyStr;
232 | }
233 |
234 | public void dumpJson() throws Exception {
235 | logger.info("dumping...");
236 | //thread start dumping
237 | Thread tdump = new Thread(new Runnable() {
238 | @Override
239 | public void run() {
240 | kr.run();
241 | }
242 | });
243 | tdump.start();
244 | while (running) {
245 | while (!kr.msgQueue.isEmpty()) {
246 | KafkaMetaMsg kmsg = kr.msgQueue.take();
247 | msgList.add(kmsg.msg);
248 | }
249 | for(byte[] value : msgList) {
250 | String sv = new String(value);
251 | logger.info("value is :" + sv);
252 | }
253 | msgList.clear();
254 | }
255 | }
256 |
257 | public void dumpAvro() throws Exception {
258 | //thread start dumping
259 | Thread tdump = new Thread(new Runnable() {
260 | @Override
261 | public void run() {
262 | kr.run();
263 | }
264 | });
265 | tdump.start();
266 | while (running) {
267 | while (!kr.msgQueue.isEmpty()) {
268 | KafkaMetaMsg kmsg = kr.msgQueue.take();
269 | msgList.add(kmsg.msg);
270 | }
271 | for(byte[] value : msgList) {
272 | EventEntryAvro avro = getAvroFromBytes(value);
273 | logger.info("================================================= get message :");
274 | logger.info("---> dbName:"+avro.getDb()+",tableName:"+avro.getTab());
275 | logger.info("---> type:"+avro.getOpt()+",ddl:"+avro.getDdl());
276 | logger.info("---> cus:"+getMapVal(avro.getCus()));
277 | logger.info("---> column/value:" + getColVal(avro.getCur()));
278 | }
279 | msgList.clear();
280 | }
281 | }
282 |
283 | private String getMapVal(Map cv) {
284 | String constr = "";
285 | if(cv != null) {
286 | Iterator iter = cv.entrySet().iterator();
287 | while (iter.hasNext()) {
288 | Map.Entry entry = (Map.Entry) iter.next();
289 | Object key = entry.getKey();
290 | Object value = entry.getValue();
291 | constr += ("[" + key.toString() + "," + value.toString() + "]");
292 | }
293 | }
294 | return constr;
295 | }
296 |
297 | private String getColVal(Map cv) {
298 | String constr = "";
299 | if(cv != null) {
300 | Iterator iter = cv.entrySet().iterator();
301 | while (iter.hasNext()) {
302 | Map.Entry entry = (Map.Entry) iter.next();
303 | Object key = entry.getKey();
304 | Object value = entry.getValue();
305 | constr += ("[" + key.toString() + "," + value.toString() + "]");
306 | }
307 | }
308 | return constr;
309 | }
310 |
311 | private EventEntryAvro getAvroFromBytes(byte[] value) {
312 | SpecificDatumReader reader = new SpecificDatumReader(EventEntryAvro.getClassSchema());
313 | Decoder decoder = DecoderFactory.get().binaryDecoder(value,null);
314 | EventEntryAvro avro = null;
315 | try {
316 | avro = reader.read(null,decoder);
317 | } catch (IOException e) {
318 | e.printStackTrace();
319 | }
320 | return avro;
321 | }
322 |
323 | public static void main(String[] args) throws Exception {
324 | ParserVertifyKafkaSimple par = new ParserVertifyKafkaSimple();
325 | par.initJson();
326 | par.dumpJson();
327 | }
328 |
329 | }
330 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/parser/utils/EntryPrinter.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.parser.utils;
2 |
3 | import com.github.hackerwin7.mysql.parser.protocol.protobuf.CanalEntry;
4 | import com.google.protobuf.InvalidProtocolBufferException;
5 |
6 | import java.util.Date;
7 | import java.util.List;
8 |
9 | /**
10 | * Created by hp on 14-9-17.
11 | */
12 | public class EntryPrinter {
13 |
14 | private CanalEntry.Entry entry;
15 |
16 | public EntryPrinter(CanalEntry.Entry entry) {
17 | this.entry = entry;
18 | }
19 |
20 | public static String printEntry(CanalEntry.Entry entry){
21 | String result = null;
22 | long executeTime = entry.getHeader().getExecuteTime();
23 | long delayTime = new Date().getTime() - executeTime;
24 |
25 | if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN || entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONEND) {
26 | if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN) {
27 | CanalEntry.TransactionBegin begin = null;
28 | try {
29 | begin = CanalEntry.TransactionBegin.parseFrom(entry.getStoreValue());
30 | } catch (InvalidProtocolBufferException e) {
31 | throw new RuntimeException("parse event has an error , data:" + entry.toString(), e);
32 | }
33 | // 打印事务头信息,执行的线程id,事务耗时
34 | result = "{" +
35 | "\"binlog name\":" + entry.getHeader().getLogfileName() + "," +
36 | "\"log file offset\":" + String.valueOf(entry.getHeader().getLogfileOffset()) + "," +
37 | "\"execute time\":" + String.valueOf(entry.getHeader().getExecuteTime()) + "," +
38 | "\"delay time\":" + String.valueOf(delayTime) + "," +
39 | "\"BEGIN ----> Thread id\":" + begin.getThreadId();
40 | } else if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONEND) {
41 | CanalEntry.TransactionEnd end = null;
42 | try {
43 | end = CanalEntry.TransactionEnd.parseFrom(entry.getStoreValue());
44 | } catch (InvalidProtocolBufferException e) {
45 | throw new RuntimeException("parse event has an error , data:" + entry.toString(), e);
46 | }
47 | // 打印事务提交信息,事务id
48 | result = "{" +
49 | "\"binlog name\":" + entry.getHeader().getLogfileName() + "," +
50 | "\"log file offset\":" + String.valueOf(entry.getHeader().getLogfileOffset()) + "," +
51 | "\"execute time\":" + String.valueOf(entry.getHeader().getExecuteTime()) + "," +
52 | "\"delay time\":" + String.valueOf(delayTime) + "," +
53 | "\"END ----> Thread id\":" + end.getTransactionId();
54 | }
55 |
56 | }
57 |
58 | if (entry.getEntryType() == CanalEntry.EntryType.ROWDATA) {
59 | CanalEntry.RowChange rowChage = null;
60 | try {
61 | rowChage = CanalEntry.RowChange.parseFrom(entry.getStoreValue());
62 | } catch (Exception e) {
63 | throw new RuntimeException("parse event has an error , data:" + entry.toString(), e);
64 | }
65 |
66 | CanalEntry.EventType eventType = rowChage.getEventType();
67 | //print the row information
68 | result = "{" +
69 | "\"binlog name\":" + entry.getHeader().getLogfileName() + "," +
70 | "\"log file offset\":" + String.valueOf(entry.getHeader().getLogfileOffset()) + "," +
71 | "\"schema name\":" + entry.getHeader().getSchemaName() + "," +
72 | "\"table name\":" + entry.getHeader().getTableName() + "," +
73 | "\"event type\":" + eventType + "," +
74 | "\"execute time\":" + String.valueOf(entry.getHeader().getExecuteTime()) + "," +
75 | "\"delay time\":" + String.valueOf(delayTime);
76 | if (rowChage.getIsDdl()) {
77 | result += "," + "\"SQL:\"" + rowChage.getSql();
78 | }
79 |
80 | for (CanalEntry.RowData rowData : rowChage.getRowDatasList()) {
81 | if (eventType == CanalEntry.EventType.DELETE) {
82 | result += printColumn(rowData.getBeforeColumnsList());
83 | } else if (eventType == CanalEntry.EventType.INSERT) {
84 | result += printColumn(rowData.getAfterColumnsList());
85 | } else {//update
86 | //before
87 | result += printColumn(rowData.getBeforeColumnsList());
88 | //after
89 | result += printColumn(rowData.getAfterColumnsList());
90 | //updated
91 | result += printColumn(rowData.getBeforeColumnsList(),rowData.getAfterColumnsList());
92 | }
93 | }
94 | }
95 | return(result);
96 | }
97 |
98 |
99 | private static String printColumn(List columns) {
100 | //print the column information
101 | String result = "[";
102 | int cases = 0;
103 | for (CanalEntry.Column column : columns) {
104 | result += "\"" + column.getName() + "\":" + column.getValue() +
105 | "," + "\"type=\":" + column.getMysqlType();
106 | if (column.getUpdated()) {
107 | result += "," + "\"update\":" + column.getUpdated();
108 | }
109 | if(cases != columns.size() -1) {
110 | result += "|";
111 | }
112 | cases ++;
113 | }
114 | result += "]";
115 | return (result);
116 | }
117 |
118 | private static String printColumn(List columns1,List columns2) {
119 | //print the column information
120 | String result = "[";
121 | for(int i=0;i<=columns2.size()-1;i++){
122 | StringBuilder builder = new StringBuilder();
123 | if(columns2.get(i).getIsKey()||columns2.get(i).getUpdated()){
124 | builder.append(columns2.get(i).getName() + " : " + columns2.get(i).getValue());
125 | builder.append(" type=" + columns2.get(i).getMysqlType());
126 | result += "\"" + columns2.get(i).getName() + "\":" + columns2.get(i).getValue() +
127 | "," + "\"type\":" + columns2.get(i).getMysqlType();
128 | }
129 | if(i != columns2.size() - 1) {
130 | result += "|";
131 | }
132 | }
133 | result += "]";
134 | return(result);
135 | }
136 |
137 | }
138 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/parser/utils/KafkaPosition.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.parser.utils;
2 |
3 | /**
4 | * Created by hp on 14-12-15.
5 | */
6 | public class KafkaPosition {
7 | public String topic = null;
8 | public int partition = 0;
9 | public long offset = 0;
10 | public long batchId = 0;
11 | public long inId = 0;
12 | public long uId = 0;
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/parser/utils/ParserConfig.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.parser.utils;
2 |
3 | /**
4 | * Created by hp on 14-9-28.
5 | */
6 | public class ParserConfig {
7 |
8 |
9 |
10 | private String hbaseRootDir;
11 |
12 | private String hbaseDistributed;
13 |
14 | private String hbaseZkQuorum;
15 |
16 | private String hbaseZkPort;
17 |
18 | private String dfsSocketTimeout;
19 |
20 |
21 | public ParserConfig() {
22 |
23 | }
24 |
25 |
26 | public String getHbaseRootDir() {
27 | return hbaseRootDir;
28 | }
29 |
30 | public void setHbaseRootDir(String hbaseRootDir) {
31 | this.hbaseRootDir = hbaseRootDir;
32 | }
33 |
34 | public String getHbaseDistributed() {
35 | return hbaseDistributed;
36 | }
37 |
38 | public void setHbaseDistributed(String hbaseDistributed) {
39 | this.hbaseDistributed = hbaseDistributed;
40 | }
41 |
42 | public String getHbaseZkQuorum() {
43 | return hbaseZkQuorum;
44 | }
45 |
46 | public void setHbaseZkQuorum(String hbaseZkQuorum) {
47 | this.hbaseZkQuorum = hbaseZkQuorum;
48 | }
49 |
50 | public String getHbaseZkPort() {
51 | return hbaseZkPort;
52 | }
53 |
54 | public void setHbaseZkPort(String hbaseZkPort) {
55 | this.hbaseZkPort = hbaseZkPort;
56 | }
57 |
58 | public String getDfsSocketTimeout() {
59 | return dfsSocketTimeout;
60 | }
61 |
62 | public void setDfsSocketTimeout(String dfsSocketTimeout) {
63 | this.dfsSocketTimeout = dfsSocketTimeout;
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/parser/utils/ParserFilterConf.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.parser.utils;
2 |
3 | import com.github.hackerwin7.mysql.parser.kafka.utils.KafkaConf;
4 | import com.github.hackerwin7.mysql.parser.protocol.json.ConfigJson;
5 | import net.sf.json.JSONArray;
6 | import net.sf.json.JSONObject;
7 | import org.apache.commons.lang.StringUtils;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 |
11 | import java.io.FileInputStream;
12 | import java.io.InputStream;
13 | import java.util.ArrayList;
14 | import java.util.HashMap;
15 | import java.util.List;
16 | import java.util.Map;
17 | import java.util.Properties;
18 |
19 | /**
20 | * Created by hp on 14-12-15.
21 | */
22 | public class ParserFilterConf {
23 | private Logger logger = LoggerFactory.getLogger(ParserFilterConf.class);
24 |
25 | //kafka conf
26 | public static String brokerList = "localhost:9092";//"12:9092,13.9092,14:9092"
27 | public static int kafkaPort = 9092;
28 | public static String zkKafka = "localhost:2181";
29 | public static String serializer = "kafka.serializer.DefaultEncoder";//default is byte[]
30 | public static String keySerializer = "kafka.serializer.StringEncoder";
31 | public static String partitioner = "kafka.producer.DefaultPartitioner";
32 | public static String acks = "-1";
33 | public static String topic = "test";//queue topic
34 | public static String senderTopic = "test2";
35 | public static int partition = 0;
36 | public static String strSeeds = "127.0.0.1";//"172.17.36.53,172.17.36.54,172.17.36.55";
37 | public static List brokerSeeds = new ArrayList();//"12,13,14"
38 | public static List portList = new ArrayList();
39 | public static String clientName;
40 | //zk conf
41 | public String zkServers = "127.0.0.1:2181";//"48:2181,19:2181,50:2181"
42 | public int timeout = 100000;
43 | public String rootPath = "/checkpoint";
44 | public String persisPath = rootPath + "/persistence";
45 | public String minutePath = rootPath + "/minutes";
46 | //parser conf database.table could be a regex
47 | public int batchsize = 10000;
48 | public int queuesize = 50000;
49 | public int minsec = 60;
50 | public int heartsec = 5 * 60;
51 | public int timeInterval = 1;
52 | public int reInterval = 3;//retry 3 seconds to retry and reconnect
53 | public int retrys = 10;
54 | public int conUnit = 1024;//1024 bytes
55 | public int fetchKb = 1024;//1024 Kb
56 | public int requestSize = 1 * fetchKb * conUnit;//1 * fetchKb * conUnit;// 1 MB (fetch size)
57 | public double mbUnit = 1024.0 * 1024.0;
58 | public String jobId = "mysql-parser";
59 | public int spacesize = 8;//15 MB
60 | public int monitorsec = 60;//1 minute
61 | public long mid = 0;
62 | //filter
63 | public static Map disTopic = new HashMap();
64 | public static Map disKey = new HashMap();
65 | public static Map disType = new HashMap();
66 | public static Map disSense = new HashMap();
67 | //avro
68 | public String cusTime = "dmlts";
69 | public String cusIp = "ip";
70 | //phenix monitor
71 | public String phKaBrokerList = "localhost:9092";
72 | public int phKaPort = 9092;
73 | public String phKaZk = "localhost:2181";
74 | public String phKaSeria = "kafka.serializer.DefaultEncoder";
75 | public String phKaKeySeria = "kafka.serializer.StringEncoder";
76 | public String phKaParti = "kafka.producer.DefaultPartitioner";
77 | public String phKaAcks = "1";
78 | public String phKaTopic = "test1";
79 | public int phKaPartition = 0;
80 | public String CLASS_PREFIX = "classpath:";
81 |
82 | public void initConfLocal() {
83 | brokerSeeds.add("127.0.0.1");
84 | disTopic.put("canal_test.simple", "aa");
85 | disTopic.put("canal_test.test", "bb");
86 | disTopic.put("canal_test.filter", "aa");
87 | }
88 |
89 | public void initConfStatic() {
90 | brokerList = "172.17.36.53:9092,172.17.36.54:9092,172.17.36.55:9092";
91 | String ss[] = strSeeds.split(",");
92 | for(String s : ss) {
93 | brokerSeeds.add(s);
94 | }
95 | kafkaPort = 9092;
96 | zkKafka = "172.17.36.60/kafka";
97 | topic = "mysql_log";
98 | zkServers = "172.17.36.60:2181,172.17.36.61:2181,172.17.36.62:2181";
99 | }
100 |
101 | public void initConfFile() throws Exception {
102 | clear();
103 | String cnf = System.getProperty("parser.conf", "classpath:parser.properties");
104 | logger.info("load file : " + cnf);
105 | InputStream in = null;
106 | if(cnf.startsWith(CLASS_PREFIX)) {
107 | cnf = StringUtils.substringAfter(cnf, CLASS_PREFIX);
108 | in = ParserConf.class.getClassLoader().getResourceAsStream(cnf);
109 | } else {
110 | in = new FileInputStream(cnf);
111 | }
112 | Properties pro = new Properties();
113 | pro.load(in);
114 | //load the parameters
115 | jobId = pro.getProperty("job.name");
116 | logger.info("job Id : " + jobId);
117 | String dataKafkaZk = pro.getProperty("kafka.data.zkserver") + pro.getProperty("kafka.data.zkroot");
118 | logger.info("data kafka zk :" + dataKafkaZk);
119 | KafkaConf dataCnf = new KafkaConf();
120 | dataCnf.loadZk(dataKafkaZk);
121 | brokerList = dataCnf.brokerList;
122 | brokerSeeds.addAll(dataCnf.brokerSeeds);
123 | portList.addAll(dataCnf.portList);
124 | topic = pro.getProperty("kafka.data.tracker.topic");
125 | senderTopic = pro.getProperty("kafka.data.parser.topic");
126 | clientName = pro.getProperty("kafka.data.client.name");
127 | acks = pro.getProperty("kafka.acks");
128 | String monitorKafkaZk = pro.getProperty("kafka.monitor.zkserver") + pro.getProperty("kafka.monitor.zkroot");
129 | KafkaConf monitorCnf = new KafkaConf();
130 | monitorCnf.loadZk(monitorKafkaZk);
131 | phKaBrokerList = monitorCnf.brokerList;
132 | phKaTopic = pro.getProperty("kafka.monitor.topic");
133 | zkServers = pro.getProperty("zookeeper.servers");
134 | in.close();
135 | }
136 |
137 | public void initConfJSON() {
138 | //load magpie json
139 | ConfigJson jcnf = new ConfigJson(jobId, "magpie.address");
140 | JSONObject root = jcnf.getJson();
141 | //parser json
142 | if(root != null) {
143 | JSONObject data = root.getJSONObject("info").getJSONObject("content");
144 | brokerList = data.getString("brokerList");
145 | strSeeds = data.getString("strSeeds");
146 | String ss[] = strSeeds.split(",");
147 | for(String s : ss) {
148 | brokerSeeds.add(s);
149 | }
150 | kafkaPort = Integer.valueOf(data.getString("kafkaPort"));
151 | zkKafka = data.getString("zkKafka");
152 | topic = data.getString("topic");
153 | zkServers = data.getString("zkServers");
154 | }
155 |
156 | //load fileter json
157 | ConfigJson fcnf = new ConfigJson("", "kafka.distributed.address");
158 | JSONArray froot = fcnf.getJsonArr();
159 | //parser json
160 | if(froot != null) {
161 | for(int i = 0; i <= froot.size() - 1; i++) {
162 | JSONObject data = froot.getJSONObject(i);
163 | String dbname = data.getString("dbname");
164 | String tablename = data.getString("tablename");
165 | String mapkey = dbname + "." + tablename;
166 | String primarykey = data.getString("primarykey");
167 | if(primarykey != null) disKey.put(mapkey, primarykey);
168 | String sourceType = data.getString("sourceType");
169 | if(sourceType != null) disType.put(mapkey, sourceType);
170 | String topic = data.getString("topic");
171 | if(topic != null) disTopic.put(mapkey, topic);
172 | }
173 | }
174 | }
175 |
176 | public void initConfOnlineJSON() throws Exception {
177 | clear();
178 | //external config
179 | ConfigJson cj = new ConfigJson(jobId + "_sense", "magpie.address");
180 | JSONObject jo = cj.getJson();
181 | if(jo != null) {
182 | if(jo.containsKey("info")) {
183 | JSONObject jd0 = jo.getJSONObject("info");
184 | if(jd0.containsKey("content")) {
185 | JSONObject jd = jd0.getJSONObject("content");
186 | if (jd.containsKey("db_tab_meta")) {
187 | JSONArray jf = jd.getJSONArray("db_tab_meta");
188 | for (int i = 0; i <= jf.size() - 1; i++) {
189 | JSONObject jdata = jf.getJSONObject(i);
190 | String dbname = jdata.getString("dbname");
191 | String tablename = jdata.getString("tablename");
192 | String mapkey = dbname + "." + tablename;
193 | if (jdata.containsKey("sensefield")) {
194 | String senseField = jdata.getString("sensefield");
195 | disSense.put(mapkey, senseField);
196 | }
197 | }
198 | }
199 | }
200 | }
201 | }
202 | }
203 |
204 | public void clear() {
205 | brokerSeeds.clear();
206 | disTopic.clear();
207 | disKey.clear();
208 | disType.clear();
209 | disSense.clear();
210 | }
211 | }
212 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/protocol/avro/EventEntry.avro:
--------------------------------------------------------------------------------
1 | {
2 | "namespace": "protocol.avro",
3 | "type": "record",
4 | "name": "EventEntryAvro",
5 | "fields": [
6 | {"name": "mid", "type": "long"},
7 | {"name": "db", "type": "string"},
8 | {"name": "sch", "type": "string"},
9 | {"name": "tab", "type": "string"},
10 | {"name": "opt","type": "string"},
11 | {"name": "ts", "type": "long"},
12 | {"name": "ddl", "type": ["string", "null"]},
13 | {"name": "err", "type": ["string", "null"]},
14 | {"name": "src", "type": [{"type": "map","values": ["string", "null"]},"null"]},
15 | {"name": "cur", "type": [{"type": "map","values": ["string", "null"]},"null"]},
16 | {"name": "cus", "type": [{"type": "map","values": ["string", "null"]},"null"]}
17 | ]
18 | }
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/protocol/json/ConfigJson.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.protocol.json;
2 |
3 | import net.sf.json.JSONArray;
4 | import net.sf.json.JSONObject;
5 | import net.sf.json.util.JSONTokener;
6 | import org.ho.yaml.Yaml;
7 |
8 | import java.io.FileNotFoundException;
9 | import java.io.IOException;
10 | import java.io.InputStreamReader;
11 | import java.net.URL;
12 | import java.nio.charset.MalformedInputException;
13 | import java.util.HashMap;
14 |
15 | /**
16 | * Created by hp on 14-11-13.
17 | */
18 | public class ConfigJson {
19 |
20 | private String jsonStr;
21 | private String urlStr;
22 | private String loadFile = "input_config.yaml";
23 | private String jobId;
24 | private String key = "magpie.address";
25 |
26 | public ConfigJson(String id) {
27 | jobId = id;
28 | }
29 |
30 | public ConfigJson(String id, String getKey) {
31 | jobId = id;
32 | key = getKey;
33 | }
34 |
35 | private void getFile() {
36 | //get the urlStr
37 | try {
38 | HashMap ml = Yaml.loadType(this.getClass().getClassLoader().getResource(loadFile).openStream(), HashMap.class);
39 | urlStr = ml.get(key) + jobId;
40 | } catch (FileNotFoundException e) {
41 | e.printStackTrace();
42 | } catch (IOException e) {
43 | e.printStackTrace();
44 | }
45 | }
46 |
47 | public void getJsonStr() {
48 | jsonStr = null;
49 | StringBuffer sb = new StringBuffer();
50 | try {
51 | URL url = new URL(urlStr);
52 | InputStreamReader isr = new InputStreamReader(url.openStream());
53 | char[] buffer = new char[1024];
54 | int len = 0;
55 | while ((len = isr.read(buffer)) != -1) {
56 | sb.append(buffer,0,len);
57 | }
58 | } catch (MalformedInputException e) {
59 | e.printStackTrace();
60 | } catch (IOException e) {
61 | e.printStackTrace();
62 | }
63 | jsonStr = sb.toString();
64 | }
65 |
66 | public JSONObject getJson() {
67 | getFile();
68 | getJsonStr();
69 | JSONTokener jsonParser = new JSONTokener(jsonStr);
70 | JSONObject jsonOb = (JSONObject)jsonParser.nextValue();
71 | return jsonOb;
72 | }
73 |
74 | public JSONArray getJsonArr() {
75 | getFile();
76 | getJsonStr();
77 | JSONTokener jsonParser = new JSONTokener(jsonStr);
78 | System.out.println("json string : " + jsonStr);
79 | JSONArray ja = (JSONArray) jsonParser.nextValue();
80 | return ja;
81 | }
82 |
83 | }
84 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/protocol/json/JSONConvert.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.protocol.json;
2 |
3 | import com.github.hackerwin7.mysql.parser.monitor.JrdwMonitorVo;
4 | import net.sf.json.JSONObject;
5 |
6 | import java.util.Map;
7 |
8 | /**
9 | * Created by hp on 15-1-8.
10 | */
11 | public class JSONConvert {
12 |
13 | public static JSONObject MapToJson(Map m) {
14 | if(m == null) return null;
15 | return JSONObject.fromObject(m);
16 | }
17 |
18 | public static JSONObject JrdwMonitorVoToJson(JrdwMonitorVo jmv) {
19 | if(jmv == null) return null;
20 | return JSONObject.fromObject(jmv);
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/protocol/json/LoadURLJson.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.protocol.json;
2 |
3 | import com.github.hackerwin7.mysql.parser.parser.utils.ParserConfig;
4 | import net.sf.json.JSONObject;
5 |
6 | /**
7 | * Created by hp on 14-11-14.
8 | */
9 | public class LoadURLJson {
10 |
11 | public static void main(String[] args) {
12 | ParserConfig configer = new ParserConfig();
13 | ConfigJson configJson = new ConfigJson("jd-mysql-parser-1");
14 | JSONObject jRoot = configJson.getJson();
15 | if(jRoot != null) {
16 | JSONObject jContent = jRoot.getJSONObject("info").getJSONObject("content");
17 | configer.setHbaseRootDir(jContent.getString("HbaseRootDir"));
18 | configer.setHbaseDistributed(jContent.getString("HbaseDistributed"));
19 | configer.setHbaseZkQuorum(jContent.getString("HbaseZkQuorum"));
20 | configer.setHbaseZkPort(jContent.getString("HbaseZkPort"));
21 | configer.setDfsSocketTimeout(jContent.getString("DfsSocketTimeout"));
22 | }
23 |
24 | System.out.println(configer.getHbaseRootDir()+","+configer.getHbaseDistributed()+"," +
25 | configer.getHbaseZkQuorum()+","+configer.getHbaseZkPort()+","+configer.getDfsSocketTimeout());
26 |
27 | }
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/protocol/protobuf/EntryProtocol.proto:
--------------------------------------------------------------------------------
1 | package protocol.protobuf;
2 |
3 | option java_package = "protocol.protobuf";
4 | option java_outer_classname = "CanalEntry";
5 | option optimize_for = SPEED;
6 |
7 | /****************************************************************
8 | * message model
9 | *如果要在Enum中新增类型,确保以前的类型的下标值不变.
10 | ****************************************************************/
11 | message Entry {
12 | /**协议头部信息**/
13 | optional Header header = 1;
14 |
15 | /**打散后的事件类型**/
16 | optional EntryType entryType = 2 [default = ROWDATA];
17 |
18 | /**传输的二进制数组**/
19 | optional bytes storeValue = 3;
20 |
21 | /**additional info**/
22 | optional int64 batchId = 4;
23 |
24 | /**additional info**/
25 | optional int64 inId = 5;
26 |
27 | /**additional info**/
28 | optional string ip = 6;
29 | }
30 |
31 | /**message Header**/
32 | message Header {
33 | /**协议的版本号**/
34 | optional int32 version = 1 [default = 1];
35 |
36 | /**binlog/redolog 文件名**/
37 | optional string logfileName = 2;
38 |
39 | /**binlog/redolog 文件的偏移位置**/
40 | optional int64 logfileOffset = 3;
41 |
42 | /**服务端serverId**/
43 | optional int64 serverId = 4;
44 |
45 | /** 变更数据的编码 **/
46 | optional string serverenCode = 5;
47 |
48 | /**变更数据的执行时间 **/
49 | optional int64 executeTime = 6;
50 |
51 | /** 变更数据的来源**/
52 | optional Type sourceType = 7 [default = MYSQL];
53 |
54 | /** 变更数据的schemaname**/
55 | optional string schemaName = 8;
56 |
57 | /**变更数据的tablename**/
58 | optional string tableName = 9;
59 |
60 | /**每个event的长度**/
61 | optional int64 eventLength = 10;
62 |
63 | /**数据变更类型**/
64 | optional EventType eventType = 11 [default = UPDATE];
65 |
66 | /**预留扩展**/
67 | repeated Pair props = 12;
68 | }
69 |
70 | /**每个字段的数据结构**/
71 | message Column {
72 | /**字段下标**/
73 | optional int32 index = 1;
74 |
75 | /**字段java中类型**/
76 | optional int32 sqlType = 2;
77 |
78 | /**字段名称(忽略大小写),在mysql中是没有的**/
79 | optional string name = 3;
80 |
81 | /**是否是主键**/
82 | optional bool isKey = 4;
83 |
84 | /**如果EventType=UPDATE,用于标识这个字段值是否有修改**/
85 | optional bool updated = 5;
86 |
87 | /** 标识是否为空 **/
88 | optional bool isNull = 6 [default = false];
89 |
90 | /**预留扩展**/
91 | repeated Pair props = 7;
92 |
93 | /** 字段值,timestamp,Datetime是一个时间格式的文本 **/
94 | optional string value = 8;
95 |
96 | /** 对应数据对象原始长度 **/
97 | optional int32 length = 9;
98 |
99 | /**字段mysql类型**/
100 | optional string mysqlType = 10;
101 | }
102 |
103 | message RowData {
104 |
105 | /** 字段信息,增量数据(修改前,删除前) **/
106 | repeated Column beforeColumns = 1;
107 |
108 | /** 字段信息,增量数据(修改后,新增后) **/
109 | repeated Column afterColumns = 2;
110 |
111 | /**预留扩展**/
112 | repeated Pair props = 3;
113 | }
114 |
115 | /**message row 每行变更数据的数据结构**/
116 | message RowChange {
117 |
118 | /**tableId,由数据库产生**/
119 | optional int64 tableId = 1;
120 |
121 | /**数据变更类型**/
122 | optional EventType eventType = 2 [default = UPDATE];
123 |
124 | /** 标识是否是ddl语句 **/
125 | optional bool isDdl = 10 [default = false];
126 |
127 | /** ddl/query的sql语句 **/
128 | optional string sql = 11;
129 |
130 | /** 一次数据库变更可能存在多行 **/
131 | repeated RowData rowDatas = 12;
132 |
133 | /**预留扩展**/
134 | repeated Pair props = 13;
135 |
136 | /** ddl/query的schemaName,会存在跨库ddl,需要保留执行ddl的当前schemaName **/
137 | optional string ddlSchemaName = 14;
138 | }
139 |
140 | /**开始事务的一些信息**/
141 | message TransactionBegin{
142 |
143 | /**已废弃,请使用header里的executeTime**/
144 | optional int64 executeTime = 1;
145 |
146 | /**已废弃,Begin里不提供事务id**/
147 | optional string transactionId = 2;
148 |
149 | /**预留扩展**/
150 | repeated Pair props = 3;
151 |
152 | /**执行的thread Id**/
153 | optional int64 threadId = 4;
154 | }
155 |
156 | /**结束事务的一些信息**/
157 | message TransactionEnd{
158 |
159 | /**已废弃,请使用header里的executeTime**/
160 | optional int64 executeTime = 1;
161 |
162 | /**事务号**/
163 | optional string transactionId = 2;
164 |
165 | /**预留扩展**/
166 | repeated Pair props = 3;
167 | }
168 |
169 | /**预留扩展**/
170 | message Pair{
171 | optional string key = 1;
172 | optional string value = 2;
173 | }
174 |
175 | /**打散后的事件类型,主要用于标识事务的开始,变更数据,结束**/
176 | enum EntryType{
177 | TRANSACTIONBEGIN = 1;
178 | ROWDATA = 2;
179 | TRANSACTIONEND = 3;
180 | /** 心跳类型,内部使用,外部暂不可见,可忽略 **/
181 | HEARTBEAT = 4;
182 | }
183 |
184 | /** 事件类型 **/
185 | enum EventType {
186 | INSERT = 1;
187 | UPDATE = 2;
188 | DELETE = 3;
189 | CREATE = 4;
190 | ALTER = 5;
191 | ERASE = 6;
192 | QUERY = 7;
193 | TRUNCATE = 8;
194 | RENAME = 9;
195 | /**CREATE INDEX**/
196 | CINDEX = 10;
197 | DINDEX = 11;
198 | }
199 |
200 | /**数据库类型**/
201 | enum Type {
202 | ORACLE = 1;
203 | MYSQL = 2;
204 | PGSQL = 3;
205 | }
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/zk/client/ZkExecutor.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.zk.client;
2 |
3 | import org.apache.zookeeper.CreateMode;
4 | import org.apache.zookeeper.WatchedEvent;
5 | import org.apache.zookeeper.Watcher;
6 | import org.apache.zookeeper.ZooDefs;
7 | import org.apache.zookeeper.ZooKeeper;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 | import com.github.hackerwin7.mysql.parser.zk.utils.ZkConf;
11 |
12 | import java.io.IOException;
13 | import java.util.List;
14 |
15 | /**
16 | * Created by hp on 14-12-12.
17 | */
18 | public class ZkExecutor {
19 |
20 | private Logger logger = LoggerFactory.getLogger(ZkExecutor.class);
21 | private ZkConf conf;
22 | private ZooKeeper zk;
23 |
24 | public ZkExecutor (ZkConf cnf) {
25 | conf = cnf;
26 | }
27 |
28 | public void connect() throws Exception {
29 | zk = new ZooKeeper(conf.zkServers, conf.timeout, new Watcher() {
30 | @Override
31 | public void process(WatchedEvent event) {
32 | logger.info("watcher : " + event.getType());
33 | }
34 | });
35 | }
36 |
37 | public void close() throws Exception {
38 | zk.close();
39 | }
40 |
41 | public boolean exists(String path) throws Exception {
42 | if(zk.exists(path, false) == null) return false;
43 | else return true;
44 | }
45 |
46 | public void create(String path, String data) throws Exception {
47 | zk.create(path, data.getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
48 | }
49 |
50 | public void set(String path, String data) throws Exception {
51 | zk.setData(path, data.getBytes(), -1);
52 | }
53 |
54 | public String get(String path) throws Exception {
55 | if(! exists(path)) {
56 | return null;//not exists return null
57 | }
58 | byte[] bytes = zk.getData(path, new Watcher() {
59 | @Override
60 | public void process(WatchedEvent event) {
61 | logger.info("get data watcher : " + event.getType());
62 | }
63 | },null);
64 | return new String(bytes);
65 | }
66 |
67 | public List getChildren(String path) throws Exception {
68 | if(!exists(path)) {
69 | return null;
70 | }
71 | List childList = zk.getChildren(path, false);
72 | return childList;
73 | }
74 |
75 | public void delete(String path) throws Exception {
76 | if(exists(path)) {
77 | zk.delete(path, -1);
78 | }
79 | }
80 |
81 | public boolean isConnected() {
82 | ZooKeeper heartzk = null;
83 | try {
84 | heartzk = new ZooKeeper(conf.zkServers, conf.timeout, new Watcher() {
85 | @Override
86 | public void process(WatchedEvent event) {
87 | logger.info("watcher : " + event.getType());
88 | }
89 | });
90 | if(heartzk!=null) {
91 | try {
92 | heartzk.close();
93 | } catch (InterruptedException e) {
94 | e.printStackTrace();
95 | }
96 | }
97 | } catch (IOException e) {
98 | return false;
99 | }
100 | return true;
101 | }
102 |
103 | public void reconnect() throws Exception {
104 | close();
105 | connect();
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/src/main/java/com/github/hackerwin7/mysql/parser/zk/utils/ZkConf.java:
--------------------------------------------------------------------------------
1 | package com.github.hackerwin7.mysql.parser.zk.utils;
2 |
3 | /**
4 | * Created by hp on 14-12-12.
5 | */
6 | public class ZkConf {
7 | public String zkServers = "127.0.0.1:2181";//"48:2181,19:2181,50:2181"
8 | public int timeout = 100000;
9 | }
10 |
--------------------------------------------------------------------------------
/src/main/resources/input_config.yaml:
--------------------------------------------------------------------------------
1 | release.address: http://train.bdp.jd.com/api/ztc/job/getJobConfig.ajax?jobId=
--------------------------------------------------------------------------------
/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | ### set log levels ###
2 | log4j.rootLogger=ALL, stdout, file
3 |
4 | ### stdout ###
5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
6 | log4j.appender.stdout.Threshold=INFO
7 | log4j.appender.stdout.Target=System.out
8 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
9 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %c{1} [%p] %m%n
10 |
11 | ### file ###
12 | log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
13 | log4j.appender.file.Threshold=INFO
14 | log4j.appender.file.File=./parser.log
15 | log4j.appender.file.Append=true
16 | log4j.appender.file.DatePattern='.'yyyy-MM-dd
17 | log4j.appender.file.layout=org.apache.log4j.PatternLayout
18 | log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %c{1} [%p] %m%n
--------------------------------------------------------------------------------
/src/main/resources/parser.properties:
--------------------------------------------------------------------------------
1 | ### common ###
2 | job.name = mysql-parser
3 | ### kafka data ###
4 | kafka.data.zkserver = 127.0.0.1:2181
5 | #default is /
6 | kafka.data.zkroot = /kafka
7 | kafka.data.tracker.topic = tracker-log-mysql
8 | kafka.data.parser.topic = parser-log-mysql
9 | kafka.data.client.name = kafka-parser
10 | kafka.acks = -1
11 | ### kafka monitor ###
12 | kafka.monitor.zkserver = 127.0.0.1:2181
13 | #default is /
14 | kafka.monitor.zkroot = /kafka
15 | kafka.monitor.topic = parser-monitor
16 | ### zookeeper ###
17 | zookeeper.servers =127.0.0.1:2181
--------------------------------------------------------------------------------
/src/test/java/AvroNullTest.java:
--------------------------------------------------------------------------------
1 | import org.apache.avro.io.BinaryEncoder;
2 | import org.apache.avro.io.DatumWriter;
3 | import org.apache.avro.io.Decoder;
4 | import org.apache.avro.io.DecoderFactory;
5 | import org.apache.avro.io.EncoderFactory;
6 | import org.apache.avro.specific.SpecificDatumReader;
7 | import org.apache.avro.specific.SpecificDatumWriter;
8 | import com.github.hackerwin7.mysql.parser.protocol.avro.EventEntryAvro;
9 |
10 | import java.io.ByteArrayOutputStream;
11 | import java.io.IOException;
12 | import java.util.HashMap;
13 | import java.util.Map;
14 |
15 | /**
16 | * Created by hp on 15-4-7.
17 | */
18 | public class AvroNullTest {
19 | public static void main(String[] args) throws Exception {
20 | // JdwData data = new JdwData();
21 | // data.setDb("xxx");
22 | // data.setTab("111");
23 | // data.setErr("");
24 | // data.setSch("xxx");
25 | // data.setOpt("231");
26 | // data.setCus(new HashMap());
27 | // data.setDdl("");
28 | // data.setMid(1L);
29 | // data.setSrc(new HashMap());
30 | // data.setTs(1000L);
31 | // Map ccm = new HashMap();
32 | // ccm.put("one", null);
33 | // ccm.put("two", "2");
34 | // ccm.put("three", null);
35 | // ccm.put("four", "4");
36 | // data.setCur(ccm);
37 | // byte[] val = AvroCoderUtils.encode(data, JdwData.SCHEMA$);
38 | // JdwData dd = AvroCoderUtils.decode(val, JdwData.SCHEMA$);
39 | // Map cur = dd.getCur();
40 | // for(Map.Entry entry : cur.entrySet()) {
41 | // CharSequence k = entry.getKey();
42 | // CharSequence v = entry.getValue();
43 | // System.out.println(k + ":" + v);
44 | // if(v == null) {
45 | // System.out.println("null object");
46 | // }
47 | // if(v != null && v.equals("")) {
48 | // System.out.println("null string");
49 | // }
50 | // }
51 | EventEntryAvro avro = new EventEntryAvro();
52 | avro.setMid(1L);
53 | avro.setDb("db");
54 | avro.setSch("sch");
55 | avro.setTab("tab");
56 | avro.setOpt("opt");
57 | avro.setTs(System.currentTimeMillis());
58 | avro.setDdl("ddl");
59 | avro.setErr("");
60 | avro.setCus(new HashMap());
61 | Map ccm = new HashMap();
62 | ccm.put("one", null);
63 | ccm.put("two", "2");
64 | ccm.put("three", null);
65 | ccm.put("four", "4");
66 | for(Map.Entry entry : ccm.entrySet()) {
67 | CharSequence k = entry.getKey();
68 | CharSequence v = entry.getValue();
69 | System.out.println(k + ":" + v);
70 | if(v == null) {
71 | System.out.println("null object");
72 | }
73 | if(v != null && v.equals("")) {
74 | System.out.println("null string");
75 | }
76 | }
77 | avro.setCur(ccm);
78 | avro.setSrc(new HashMap());
79 |
80 | byte[] value = getBytesFromAvro(avro);
81 |
82 | EventEntryAvro data = getAvroFromBytes(value);
83 |
84 | Map cur = data.getCur();
85 | for(Map.Entry entry : cur.entrySet()) {
86 | CharSequence k = entry.getKey();
87 | CharSequence v = entry.getValue();
88 | System.out.println(k + ":" + v);
89 | if(v == null) {
90 | System.out.println("null object");
91 | }
92 | if(v != null && v.equals("")) {
93 | System.out.println("null string");
94 | }
95 | }
96 | }
97 |
98 | private static EventEntryAvro getAvroFromBytes(byte[] value) {
99 | SpecificDatumReader reader = new SpecificDatumReader(EventEntryAvro.getClassSchema());
100 | Decoder decoder = DecoderFactory.get().binaryDecoder(value,null);
101 | EventEntryAvro avro = null;
102 | try {
103 | avro = reader.read(null,decoder);
104 | } catch (IOException e) {
105 | e.printStackTrace();
106 | }
107 | return avro;
108 | }
109 |
110 | private static byte[] getBytesFromAvro(EventEntryAvro avro) {
111 | ByteArrayOutputStream out = new ByteArrayOutputStream();
112 | BinaryEncoder encoder = EncoderFactory.get().binaryEncoder(out,null);
113 | DatumWriter writer = new SpecificDatumWriter(EventEntryAvro.getClassSchema());
114 | try {
115 | writer.write(avro,encoder);
116 | encoder.flush();
117 | out.close();
118 | } catch (IOException e) {
119 | e.printStackTrace();
120 | }
121 | byte[] value = out.toByteArray();
122 | return value;
123 | }
124 | }
125 |
--------------------------------------------------------------------------------
/src/test/java/DBTBGemerater.java:
--------------------------------------------------------------------------------
1 | import net.sf.json.JSONArray;
2 | import net.sf.json.JSONObject;
3 | import com.github.hackerwin7.mysql.parser.protocol.json.ConfigJson;
4 |
5 | /**
6 | * Created by hp on 6/4/15.
7 | */
8 | public class DBTBGemerater {
9 | public static void main(String[] args) throws Exception {
10 | String jobStr = "2102172174092,2102172174094,2102172174096,2102172174098,21021721740100,21021721740102,21021721740104,21021721740106,21021721740108,21021721740110,21021721740112,21021721740114,21021721740116,21021721740118,21021721740120,21021721740122,21021721740124,21021721740126,21021721740128,21021721740130";
11 | String[] jobs = jobStr.trim().split(",");
12 | for(String jobId : jobs) {
13 | ConfigJson cj = new ConfigJson(jobId, "release.address");
14 | JSONObject jo = cj.getJson();
15 | if (jo != null) {
16 | if (jo.containsKey("data")) {
17 | JSONObject jd = jo.getJSONObject("data");
18 | if (jd.containsKey("db_tab_meta")) {
19 | JSONArray jf = jd.getJSONArray("db_tab_meta");
20 | for (int i = 0; i <= jf.size() - 1; i++) {
21 | JSONObject jdata = jf.getJSONObject(i);
22 | String dbname = jdata.getString("dbname");
23 | String tablename = jdata.getString("tablename");
24 | String mapkey = dbname + "." + tablename;
25 | System.out.println(mapkey);
26 | }
27 | }
28 | }
29 | }
30 | }
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/test/java/DeepMapRelicaTest.java:
--------------------------------------------------------------------------------
1 | import java.util.HashMap;
2 | import java.util.Map;
3 |
4 | /**
5 | * Created by hp on 15-3-13.
6 | */
7 | public class DeepMapRelicaTest implements Cloneable {
8 |
9 | public Map maps = new HashMap();
10 |
11 |
12 | public Object clone() {
13 | Object o = null;
14 | try {
15 | DeepMapRelicaTest os = (DeepMapRelicaTest) super.clone();
16 | os.maps = new HashMap();
17 | if(maps != null) {
18 | for(Map.Entry entry : maps.entrySet()) {
19 | String key = entry.getKey();
20 | Long value = entry.getValue();
21 | os.maps.put(key, value);
22 | }
23 | }
24 | o = (DeepMapRelicaTest) os;
25 | } catch (CloneNotSupportedException e) {
26 | e.printStackTrace();
27 | }
28 | return o;
29 | }
30 |
31 | public DeepMapRelicaTest cloneDeep() {
32 | return (DeepMapRelicaTest) clone();
33 | }
34 |
35 | public void clear() {
36 | if(maps != null) {
37 | for(Map.Entry entry : maps.entrySet()) {
38 | maps.put(entry.getKey(), 0L);
39 | }
40 | }
41 | }
42 |
43 | public static void main(String[] args) {
44 | DeepMapRelicaTest d1 = new DeepMapRelicaTest();
45 | d1.maps.put("one", 1L);
46 | d1.maps.put("two", 2L);
47 | d1.maps.put("three", 3L);
48 | DeepMapRelicaTest d2 = d1.cloneDeep();
49 | System.out.println(d2.maps.get("one"));
50 | d1.clear();
51 | System.out.println(d2.maps.get("one"));
52 | }
53 |
54 | }
55 |
--------------------------------------------------------------------------------
/src/test/java/DeepReplicaTest.java:
--------------------------------------------------------------------------------
1 | import java.util.HashMap;
2 | import java.util.Map;
3 |
4 | /**
5 | * Created by hp on 15-3-13.
6 | */
7 | public class DeepReplicaTest implements Cloneable {
8 |
9 | public Map maps = new HashMap();
10 |
11 |
12 | public Object clone() {
13 | Object o = null;
14 | try {
15 | o = (DeepReplicaTest) super.clone();
16 | } catch (CloneNotSupportedException e) {
17 | e.printStackTrace();
18 | }
19 | return o;
20 | }
21 |
22 | public DeepReplicaTest cloneDeep() {
23 | return (DeepReplicaTest) clone();
24 | }
25 |
26 | public void clear() {
27 | if(maps != null) {
28 | for(Map.Entry entry : maps.entrySet()) {
29 | maps.put(entry.getKey(), 0L);
30 | }
31 | }
32 | }
33 |
34 | public static void main(String[] args) {
35 | DeepReplicaTest d1 = new DeepReplicaTest();
36 | d1.maps.put("one", 1L);
37 | d1.maps.put("two", 2L);
38 | d1.maps.put("three", 3L);
39 | DeepReplicaTest d2 = d1.cloneDeep();
40 | System.out.println(d2.maps.get("one"));
41 | d1.clear();
42 | System.out.println(d2.maps.get("one"));
43 | }
44 |
45 | }
46 |
--------------------------------------------------------------------------------
/src/test/java/JSONStrGenerater.java:
--------------------------------------------------------------------------------
1 | import net.sf.json.JSONArray;
2 | import net.sf.json.JSONObject;
3 | import com.github.hackerwin7.mysql.parser.protocol.json.ConfigJson;
4 |
5 | /**
6 | * Created by hp on 6/4/15.
7 | */
8 | public class JSONStrGenerater {
9 | public static void main(String[] args) throws Exception {
10 | String senseVal = "customerName,address,phone,usermob,email,remark,userremark,orderftel,premark";
11 | String jobStr = "2102172174092,2102172174094,2102172174096,2102172174098,21021721740100,21021721740102,21021721740104,21021721740106,21021721740108,21021721740110,21021721740112,21021721740114,21021721740116,21021721740118,21021721740120,21021721740122,21021721740124,21021721740126,21021721740128,21021721740130";
12 | String[] jobs = jobStr.trim().split(",");
13 | for(String jobId : jobs) {
14 | JSONObject jog = new JSONObject();
15 | JSONObject jdatag = new JSONObject();
16 | JSONArray jag = new JSONArray();
17 | ConfigJson cj = new ConfigJson(jobId, "release.address");
18 | JSONObject jo = cj.getJson();
19 | if (jo != null) {
20 | if (jo.containsKey("data")) {
21 | JSONObject jd = jo.getJSONObject("data");
22 | if (jd.containsKey("db_tab_meta")) {
23 | JSONArray jf = jd.getJSONArray("db_tab_meta");
24 | for (int i = 0; i <= jf.size() - 1; i++) {
25 | JSONObject jdata = jf.getJSONObject(i);
26 | String dbname = jdata.getString("dbname");
27 | String tablename = jdata.getString("tablename");
28 | JSONObject jg = new JSONObject();
29 | jg.accumulate("dbname", dbname);
30 | jg.accumulate("tablename", tablename);
31 | jg.accumulate("sensefield", senseVal);
32 | jag.add(jg);
33 | }
34 | }
35 | }
36 | }
37 | jdatag.accumulate("db_tab_meta", jag);
38 | jog.accumulate("data", jdatag);
39 | System.out.println("http://172.22.178.85:8080/magpie-conf-service/conf/" + jobId + "_sense");
40 | System.out.println(jog.toString());
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/src/test/java/JavaSetTest.java:
--------------------------------------------------------------------------------
1 | import java.util.HashSet;
2 | import java.util.Set;
3 |
4 | /**
5 | * Created by hp on 15-2-3.
6 | */
7 | public class JavaSetTest {
8 |
9 | public static void main(String[] args) {
10 | Set columns = new HashSet();
11 | columns.add("cake");
12 | columns.add("create");
13 | columns.add("delete");
14 | columns.add("modify");
15 | columns.add("update");
16 | columns.add("alter");
17 | columns.add("select");
18 | if(columns.contains("cake")) {
19 | print("cake");
20 | }
21 | if(columns.contains("soul")) {
22 | print("soul");
23 | }
24 | }
25 |
26 | private static void print(String str) {
27 | System.out.print(str);
28 | }
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/src/test/java/JsonArrTest.java:
--------------------------------------------------------------------------------
1 | import net.sf.json.JSONArray;
2 | import net.sf.json.JSONObject;
3 | import com.github.hackerwin7.mysql.parser.protocol.json.ConfigJson;
4 |
5 | import java.util.HashMap;
6 | import java.util.Map;
7 |
8 | /**
9 | * Created by hp on 14-12-27.
10 | */
11 | public class JsonArrTest {
12 |
13 | public static void main(String[] args) throws Exception {
14 | // Map disKey = new HashMap();
15 | // Map disType = new HashMap();
16 | // Map disTopic = new HashMap();
17 | // ConfigJson fcnf = new ConfigJson("", "kafka.distributed.address");
18 | // JSONArray froot = fcnf.getJsonArr();
19 | // //parser json
20 | // if(froot != null) {
21 | // for(int i = 0; i <= froot.size() - 1; i++) {
22 | // JSONObject data = froot.getJSONObject(i);
23 | // String dbname = data.getString("dbname");
24 | // String tablename = data.getString("tablename");
25 | // String mapkey = dbname + "." + tablename;
26 | // String primarykey = data.getString("primarykey");
27 | // if(primarykey != null) disKey.put(mapkey, primarykey);
28 | // String sourceType = data.getString("sourceType");
29 | // if(sourceType != null) disType.put(mapkey, sourceType);
30 | // String topic = data.getString("topic");
31 | // if(topic != null) disTopic.put(mapkey, topic);
32 | // }
33 | // }
34 |
35 | Map disTopic = new HashMap();
36 | Map disKey = new HashMap();
37 | Map disType = new HashMap();
38 | ConfigJson jcnf = new ConfigJson("", "online.address");
39 | JSONObject root = jcnf.getJson();
40 | //parse the json
41 | if(root != null) {
42 | JSONObject data = root.getJSONObject("data");
43 | //JSONArray jf = data.getJSONArray("db_tab_meta");
44 | JSONArray jf = JSONArray.fromObject("[\n" +
45 | "{\"dbname\":\"jd_data\",\"tablename\":\"test\",\"primarykey\":\"uid\",\"sourceType\":\"02\",\"topic\":\"mysql_aa\"},\n" +
46 | "{\"dbname\":\"jd_data\",\"tablename\":\"simple\",\"primarykey\":\"uid\",\"sourceType\":\"02\",\"topic\":\"mysql_bb\"}\n" +
47 | "]");
48 | System.out.println(jf.toString());
49 | for(int i = 0; i <= jf.size() - 1; i++) {
50 | JSONObject jdata = jf.getJSONObject(i);
51 | String dbname = jdata.getString("dbname");
52 | String tablename = jdata.getString("tablename");
53 | System.out.println("table :" + tablename);
54 | String mapkey = dbname + "." + tablename;
55 | String primarykey = jdata.getString("primarykey");
56 | System.out.println("key :" + primarykey);
57 | if(primarykey != null) disKey.put(mapkey, primarykey);
58 | String sourceType = jdata.getString("sourceType");
59 | if(sourceType != null) disType.put(mapkey, sourceType);
60 | String topic = jdata.getString("topic");
61 | if(topic != null) disTopic.put(mapkey, topic);
62 | }
63 | }
64 | }
65 |
66 | }
67 |
--------------------------------------------------------------------------------
/src/test/java/JsonParserTest.java:
--------------------------------------------------------------------------------
1 | import net.sf.json.JSONArray;
2 | import net.sf.json.JSONObject;
3 |
4 | import java.util.ArrayList;
5 | import java.util.List;
6 |
7 | /**
8 | * Created by hp on 15-3-18.
9 | */
10 | public class JsonParserTest {
11 |
12 | private static String getJsonKey(String jsonStr) {
13 | List jprimaryKeys = new ArrayList();
14 | jprimaryKeys.add("id");
15 | String keyStr = "";
16 | JSONObject jo = JSONObject.fromObject(jsonStr);
17 | String sdata = jo.getString("data");
18 | JSONObject jdata = JSONObject.fromObject(sdata);
19 | JSONObject jfields = jdata.getJSONObject("fields");
20 | String dbname = jdata.getString("schema");
21 | String tbname = jdata.getString("table");
22 | String oper = jdata.getString("operation");
23 | keyStr += dbname + "#" +tbname + "#";
24 | for(String s : jprimaryKeys) {
25 | if(jfields.containsKey(s)) {
26 | String kv = jfields.getString(s);
27 | keyStr += kv + "#";
28 | }
29 | }
30 | keyStr += oper;
31 | return keyStr;
32 | }
33 |
34 | public static void main(String[] args) {
35 | String jsonStr = "{\"code\":0,\"info\":{\"job-id\":\"2102172174092_sense\",\"content\":{\"db_tab_meta\":[{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_12\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_53\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_61\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_33\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_38\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_41\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_42\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_6\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_24\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_39\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_47\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_45\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_22\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_20\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_57\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_14\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_50\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_26\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_48\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_40\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_7\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_17\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_60\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_58\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_27\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_4\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_28\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_10\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_59\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_1\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_23\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_44\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_37\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_29\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_35\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_18\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_56\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_0\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_51\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_30\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_62\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_49\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_3\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_16\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_9\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_31\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_32\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_46\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_19\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_15\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_21\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_25\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_8\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_55\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_52\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_43\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_36\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_5\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_54\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_13\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_2\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_0\",\"tablename\":\"orders_11\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_34\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"},{\"dbname\":\"jdorders_1\",\"tablename\":\"orders_63\",\"sensefield\":\"customerName,address,phone,usermob,email,remark,userremark,orderftel,premark\"}]},\"update-time\":null,\"type\":null,\"create-time\":null,\"yn\":\"yes\"}}";
36 | JSONObject jo = JSONObject.fromObject(jsonStr);
37 | JSONObject jinfo = jo.getJSONObject("info");
38 | JSONObject jcontent = jinfo.getJSONObject("content");
39 | if(jcontent.containsKey("db_tab_meta")) {
40 | System.out.println("contain");
41 | JSONArray jarry = jcontent.getJSONArray("db_tab_meta");
42 | } else {
43 | System.out.println("not contain");
44 | }
45 | System.out.println(jcontent.toString());
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/test/java/MapCharsequenceTest.java:
--------------------------------------------------------------------------------
1 | import java.util.HashMap;
2 | import java.util.Map;
3 |
4 | /**
5 | * Created by hp on 15-3-18.
6 | */
7 | public class MapCharsequenceTest {
8 |
9 | public static void main(String[] args) throws Exception {
10 | String s = "afs_service_id";
11 | Map maps = new HashMap();
12 | maps.put("afs_service_id", "1");
13 | maps.put("ssh", "2");
14 | if(maps.containsKey(s)) {
15 | System.out.print("yes");
16 | } else {
17 | System.out.print("no");
18 | }
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/src/test/java/MapNullTest.java:
--------------------------------------------------------------------------------
1 | import java.util.HashMap;
2 | import java.util.Map;
3 |
4 | /**
5 | * Created by hp on 15-4-7.
6 | */
7 | public class MapNullTest {
8 |
9 | public static void main(String[] args) throws Exception {
10 | Map ccm = new HashMap();
11 | ccm.put("one", null);
12 | ccm.put("two", "2");
13 | ccm.put("three", null);
14 | ccm.put("four", "4");
15 | for(Map.Entry entry : ccm.entrySet()) {
16 | CharSequence k = entry.getKey();
17 | CharSequence v = entry.getValue();
18 | System.out.println(k + ":" + v);
19 | if(v == null) {
20 | System.out.println("null object");
21 | }
22 | if(v != null && v.equals("")) {
23 | System.out.println("null string");
24 | }
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/src/test/java/MysqlParserTest.java:
--------------------------------------------------------------------------------
1 | import com.github.hackerwin7.mysql.parser.parser.MysqlParser;
2 |
3 | import java.io.IOException;
4 |
5 | /**
6 | * Created by hp on 14-9-17.
7 | */
8 | public class MysqlParserTest {
9 |
10 | public static void main(String[] args) throws IOException{
11 | MysqlParser mysqlParser = new MysqlParser();
12 | mysqlParser.mainProc();
13 | }
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/src/test/java/ParserJsonTest.java:
--------------------------------------------------------------------------------
1 | import com.github.hackerwin7.mysql.parser.protocol.json.ConfigJson;
2 | import net.sf.json.JSONObject;
3 | import com.github.hackerwin7.mysql.parser.parser.utils.ParserConfig;
4 |
5 | /**
6 | * Created by hp on 14-11-14.
7 | */
8 | public class ParserJsonTest {
9 |
10 | public static void main(String[] args) {
11 | ParserConfig configer = new ParserConfig();
12 | ConfigJson configJson = new ConfigJson("jd-mysql-parser-1");
13 | JSONObject jRoot = configJson.getJson();
14 | if(jRoot != null) {
15 | JSONObject jContent = jRoot.getJSONObject("info").getJSONObject("content");
16 | configer.setHbaseRootDir(jContent.getString("HbaseRootDir"));
17 | configer.setHbaseDistributed(jContent.getString("HbaseDistributed"));
18 | configer.setHbaseZkQuorum(jContent.getString("HbaseZKQuorum"));
19 | configer.setHbaseZkPort(jContent.getString("HbaseZKPort"));
20 | configer.setDfsSocketTimeout(jContent.getString("DfsSocketTimeout"));
21 | }
22 |
23 | System.out.println(configer.getHbaseRootDir()+","+configer.getHbaseDistributed()+"," +
24 | configer.getHbaseZkQuorum()+","+configer.getHbaseZkPort()+","+configer.getDfsSocketTimeout());
25 |
26 | }
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/src/test/java/SimpleTest.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Created by hp on 14-12-26.
3 | */
4 | public class SimpleTest {
5 |
6 | public static void main(String[] args) {
7 | String ss = "a/u0001b/u0001c/u0001d/u0001";
8 | String sa = ss.substring(0, ss.lastIndexOf("/u0001"));
9 | System.out.println(sa);
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/src/test/java/TestElem.java:
--------------------------------------------------------------------------------
1 | import java.util.ArrayList;
2 | import java.util.List;
3 |
4 | /**
5 | * Created by hp on 15-3-13.
6 | */
7 | public class TestElem {
8 |
9 | public long time;
10 | public String name;
11 |
12 | public static void main(String[] args) throws Exception {
13 | TestElem te = null;
14 | TestElem t1 = new TestElem();
15 | t1.time = 1;
16 | TestElem t2 = new TestElem();
17 | t2.time = 2;
18 | List tts = new ArrayList();
19 | tts.add(t1);
20 | tts.add(t2);
21 | te = tts.get(tts.size() - 1);
22 | System.out.println(te.time);
23 | tts.clear();
24 | System.out.println(te.time);
25 | }
26 |
27 | }
28 |
--------------------------------------------------------------------------------
/src/test/java/Testu001.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Created by hp on 15-3-11.
3 | */
4 | public class Testu001 {
5 |
6 | public static void main(String[] args) throws Exception {
7 | String s = "\u0001";
8 | System.out.println(s);
9 | }
10 | }
11 |
--------------------------------------------------------------------------------