├── .gitignore
├── lib
└── ojdbc5.jar
├── src
└── main
│ ├── java
│ └── com
│ │ └── yxt
│ │ └── data
│ │ └── migration
│ │ ├── bean
│ │ ├── DataTable.java
│ │ └── DataTableStatus.java
│ │ ├── DbViewer.java
│ │ ├── JobJsonGenerator.java
│ │ ├── ApplicationBoot.java
│ │ ├── util
│ │ ├── DbHelper.java
│ │ ├── DataXJobFile.java
│ │ └── AppConfig.java
│ │ ├── DbViewerImpl.java
│ │ └── MigrationTask.java
│ └── resources
│ ├── applicationContext.xml
│ ├── log4j.properties
│ ├── job
│ └── jobtemplate.json
│ └── config.properties
├── pom.xml
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | /.settings
2 | /logs
3 | /target
4 | /.classpath
5 | /.project
6 | .idea/
7 |
--------------------------------------------------------------------------------
/lib/ojdbc5.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Jawf/DataX-Migration/HEAD/lib/ojdbc5.jar
--------------------------------------------------------------------------------
/src/main/java/com/yxt/data/migration/bean/DataTable.java:
--------------------------------------------------------------------------------
1 | package com.yxt.data.migration.bean;
2 |
3 | public class DataTable {
4 |
5 | private String name;
6 | private float size;
7 | private long count;
8 |
9 | public String getName() {
10 | return name;
11 | }
12 | public void setName(String name) {
13 | this.name = name;
14 | }
15 | public float getSize() {
16 | return size;
17 | }
18 | public void setSize(float size) {
19 | this.size = size;
20 | }
21 | public long getCount() {
22 | return count;
23 | }
24 | public void setCount(long count) {
25 | this.count = count;
26 | }
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/src/main/resources/applicationContext.xml:
--------------------------------------------------------------------------------
1 |
2 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | #
2 | log4j.rootCategory=INFO, stdout, file
3 |
4 | # \u63a7\u5236\u53f0\u8f93\u51fa
5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
6 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
7 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %5p %c{1}:%L - %m%n
8 |
9 | # root\u65e5\u5fd7\u8f93\u51fa
10 | log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
11 | log4j.appender.file.file=logs/migration.log
12 | log4j.appender.file.DatePattern='.'yyyy-MM-dd
13 | log4j.appender.file.layout=org.apache.log4j.PatternLayout
14 | log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %5p %c{1}:%L - %m%n
--------------------------------------------------------------------------------
/src/main/java/com/yxt/data/migration/DbViewer.java:
--------------------------------------------------------------------------------
1 | package com.yxt.data.migration;
2 |
3 | import java.sql.SQLException;
4 | import java.util.List;
5 |
6 | import com.yxt.data.migration.bean.DataTable;
7 | import com.yxt.data.migration.bean.DataTableStatus;
8 |
9 | public interface DbViewer {
10 |
11 | public List getTargetTransfterTables() throws SQLException;
12 |
13 | public List getTargetTransfterTableColumns(String tableName) throws SQLException;
14 |
15 | public List getTargetTransfterTablePrimaryKey(String tableName) throws SQLException;
16 |
17 | public List getSourceTransfterTablesStatus() throws SQLException;
18 |
19 | public long getSourceTransfterTableMigrationCount(String tableName, String whereClause) throws SQLException;
20 |
21 | public long getTargetTransfterTableMigrationFinishedCount(String tableName, String whereClause) throws SQLException;
22 |
23 | }
--------------------------------------------------------------------------------
/src/main/resources/job/jobtemplate.json:
--------------------------------------------------------------------------------
1 | {
2 | "job": {
3 | "setting": {
4 | "speed": {
5 | "channel":{job.channel}
6 | },
7 | "errorLimit": {
8 | "record": 0,
9 | "percentage": 0.02
10 | }
11 | },
12 | "content": [
13 | {
14 | "reader": {
15 | "name": "{source.db.type}reader",
16 | "parameter": {
17 | "username":"{source.db.username}",
18 | "password":"{source.db.password}",
19 | "column" : [{source.db.table.columns}],
20 | "splitPk": "{source.db.table.pk}",
21 | {source.db.table.where.clause}
22 | "connection": [
23 | {
24 | "table": [
25 | "{source.db.table.name}"
26 | ],
27 | "jdbcUrl": [
28 | "{source.db.url}"
29 | ]
30 | }
31 | ]
32 | }
33 | },
34 | "writer": {
35 | "name": "{target.db.type}writer",
36 | "parameter": {
37 | "username":"{target.db.username}",
38 | "password":"{target.db.password}",
39 | "column" : [{target.db.table.columns}],
40 | "writeMode":"insert",
41 | "connection": [
42 | {
43 | "jdbcUrl": "{target.db.url}",
44 | "table": [
45 | "{target.db.table.name}"
46 | ]
47 | }
48 | ]
49 | }
50 | }
51 | }
52 | ]
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/src/main/java/com/yxt/data/migration/bean/DataTableStatus.java:
--------------------------------------------------------------------------------
1 | package com.yxt.data.migration.bean;
2 |
3 | public class DataTableStatus extends DataTable {
4 |
5 | private boolean finished = false;
6 | private boolean hasException = false;
7 | private String costTime="";
8 | private String readWriteRateSpeed="";
9 | private String readWriteRecordSpeed="";
10 | private long pendingRecords = 0;
11 | private long finishedRecords = 0;
12 | private long readWriteRecords = 0;
13 | private long readWriteFailRecords = 0;
14 |
15 | public boolean isFinished() {
16 | return finished;
17 | }
18 | public void setFinished(boolean finished) {
19 | this.finished = finished;
20 | }
21 | public boolean isHasException() {
22 | return hasException;
23 | }
24 | public void setHasException(boolean hasException) {
25 | this.hasException = hasException;
26 | }
27 | public String getCostTime() {
28 | return costTime;
29 | }
30 | public void setCostTime(String costTime) {
31 | this.costTime = costTime;
32 | }
33 | public String getReadWriteRateSpeed() {
34 | return readWriteRateSpeed;
35 | }
36 | public void setReadWriteRateSpeed(String readWriteRateSpeed) {
37 | this.readWriteRateSpeed = readWriteRateSpeed;
38 | }
39 | public String getReadWriteRecordSpeed() {
40 | return readWriteRecordSpeed;
41 | }
42 | public void setReadWriteRecordSpeed(String readWriteRecordSpeed) {
43 | this.readWriteRecordSpeed = readWriteRecordSpeed;
44 | }
45 | public long getReadWriteRecords() {
46 | return readWriteRecords;
47 | }
48 | public void setReadWriteRecords(long readWriteRecords) {
49 | this.readWriteRecords = readWriteRecords;
50 | }
51 | public long getReadWriteFailRecords() {
52 | return readWriteFailRecords;
53 | }
54 | public void setReadWriteFailRecords(long readWriteFailRecords) {
55 | this.readWriteFailRecords = readWriteFailRecords;
56 | }
57 | public long getPendingRecords() {
58 | return pendingRecords;
59 | }
60 | public void setPendingRecords(long pendingRecords) {
61 | this.pendingRecords = pendingRecords;
62 | }
63 | public long getFinishedRecords() {
64 | return finishedRecords;
65 | }
66 | public void setFinishedRecords(long finishedRecords) {
67 | this.finishedRecords = finishedRecords;
68 | }
69 |
70 | }
71 |
--------------------------------------------------------------------------------
/src/main/java/com/yxt/data/migration/JobJsonGenerator.java:
--------------------------------------------------------------------------------
1 | package com.yxt.data.migration;
2 |
3 | import java.sql.SQLException;
4 | import java.util.List;
5 |
6 | import org.apache.commons.logging.Log;
7 | import org.apache.commons.logging.LogFactory;
8 | import org.springframework.beans.factory.annotation.Autowired;
9 | import org.springframework.stereotype.Service;
10 |
11 | import com.yxt.data.migration.bean.DataTable;
12 | import com.yxt.data.migration.bean.DataTableStatus;
13 | import com.yxt.data.migration.util.DataXJobFile;
14 |
15 | /**
16 | *
17 | * Generate Datax jobs json files to Datax home/jobs folder
18 | * @author Jawf Can Li
19 | * @since 1.0 base on datax 3.0
20 | *
21 | */
22 | @Service
23 | public class JobJsonGenerator {
24 | protected static final Log log = LogFactory.getLog(JobJsonGenerator.class);
25 |
26 | @Autowired
27 | private DbViewer viewer;
28 |
29 | @Autowired
30 | private DataXJobFile jobFile;
31 |
32 | public void generate() throws SQLException {
33 | List sourceTables = viewer.getSourceTransfterTablesStatus();
34 | List targetTables = viewer.getTargetTransfterTables();
35 |
36 | if (sourceTables != null) {
37 | // int i = 0;
38 | for (DataTable ta : sourceTables) {
39 | String sourceTableName = ta.getName();
40 | if (sourceTableName == null || "".equals(sourceTableName)){
41 | throw new SQLException("Source Table is empty or not existed!");
42 | }
43 | String targetTableName = getTargetTableName(sourceTableName, targetTables);
44 | List columns = viewer.getTargetTransfterTableColumns(targetTableName);
45 | List pks = viewer.getTargetTransfterTablePrimaryKey(targetTableName);
46 | String whereClause = jobFile.getSourceGlobalTableWhereClause(columns);
47 | long migrationRecords = viewer.getSourceTransfterTableMigrationCount(sourceTableName, whereClause);
48 | String pk = null;
49 | if (pks != null && !pks.isEmpty()) {
50 | if (pks.size() == 1) {
51 | pk = pks.get(0);
52 | }
53 | }
54 | jobFile.generateJsonJobFile(sourceTableName, targetTableName, columns, pk, whereClause, migrationRecords);
55 | // i++;
56 | // if (i==30)
57 | // break;// remove this line*/
58 | }
59 | }
60 | }
61 |
62 | private String getTargetTableName(String sourceTableName, List targetTables) throws SQLException {
63 | String result = null;
64 | if (sourceTableName!=null && targetTables!=null){
65 | for (DataTable t:targetTables){
66 | if (sourceTableName.equalsIgnoreCase(t.getName())){
67 | result = t.getName();
68 | break;
69 | }
70 | }
71 | }
72 | if (result == null){
73 | String errorMsg = "Target Table for "+sourceTableName+" is empty or not existed!";
74 | log.error(errorMsg);
75 | throw new SQLException(errorMsg);
76 | }
77 | return result;
78 | }
79 |
80 | }
81 |
--------------------------------------------------------------------------------
/src/main/java/com/yxt/data/migration/ApplicationBoot.java:
--------------------------------------------------------------------------------
1 | package com.yxt.data.migration;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 | import org.springframework.context.support.GenericXmlApplicationContext;
6 |
7 | /**
8 | * Starter
9 | *
10 | * Batch Migration Task to split each table to one DataX job. Then call datax
11 | * script to start migration and noted the status records to reports.
12 | * Commands Format:
13 | * Full running:
14 | * java -jar datax-migration.jar
15 | * Only generate datax jobs json files:
16 | * java -jar datax-migration.jar json
17 | * Only generate cvs report for all source/target table status:
18 | * java -jar datax-migration.jar report
19 | * Only run the datax jobs:
20 | * java -jar datax-migration.jar run
21 | *
22 | * @author Jawf Can Li
23 | * @since 1.0 base on datax 3.0
24 | *
25 | */
26 | public class ApplicationBoot {
27 |
28 | protected static final Log log = LogFactory.getLog(ApplicationBoot.class);
29 |
30 | /**
31 | * if args equal skip parameter passed, only generate the table status
32 | * report. skip execute the generating and migration shell executing.
33 | * e.g.: java -jar migration.jar skip
34 | *
35 | * @param args
36 | */
37 | public static void main(String[] args) {
38 |
39 | log.info(" --- DataX-Migration 1.0, From Yunxuetang(www.yxt.com), Based on Alibaba DataX !");
40 | log.info(" --- Copyright (C) 2010-2016, Yuexuetang Group. All Rights Reserved.");
41 |
42 | boolean onlyReportFlag = false;
43 | boolean onlyGenerateFlag = false;
44 | boolean onlyExecuteJobsFlag = false;
45 | if (args != null && args.length >= 1) {
46 | log.info("Main Parameters 1:" + args[0]);
47 | onlyReportFlag = "report".equalsIgnoreCase(args[0]) || "onlyreport".equalsIgnoreCase(args[0])
48 | || "skip".equalsIgnoreCase(args[0]);
49 |
50 | onlyGenerateFlag = "generate".equalsIgnoreCase(args[0]) || "json".equalsIgnoreCase(args[0]);
51 | onlyExecuteJobsFlag = "run".equalsIgnoreCase(args[0]) || "execute".equalsIgnoreCase(args[0])
52 | || "command".equalsIgnoreCase(args[0]);
53 | }
54 |
55 | GenericXmlApplicationContext context = new GenericXmlApplicationContext();
56 | context.setValidating(false);
57 | context.load("classpath*:applicationContext.xml");
58 | context.refresh();
59 |
60 | JobJsonGenerator gen = context.getBean(JobJsonGenerator.class);
61 | MigrationTask migration = context.getBean(MigrationTask.class);
62 |
63 | try {
64 |
65 | if (onlyGenerateFlag) {
66 | gen.generate();
67 | } else {
68 | if (!onlyReportFlag && !onlyExecuteJobsFlag) {
69 | gen.generate();
70 | }
71 | migration.execute(onlyReportFlag);
72 | }
73 |
74 | exit();
75 | } catch (Exception e) {
76 | log.error(e.getMessage(), e);
77 | } finally {
78 | context.close();
79 | }
80 |
81 | }
82 |
83 | private static void exit() {
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/src/main/resources/config.properties:
--------------------------------------------------------------------------------
1 | source.db.url=jdbc:mysql://xxx.xxx.xxx.xxx:3306/sourcedbname?useUnicode=true&characterEncoding=UTF-8
2 | source.db.name=sourcedbname
3 | source.db.username=username
4 | source.db.password=password
5 | target.db.url=jdbc:mysql://xxx.xxx.xxx.xxx:3306/targetdbname?useUnicode=true&characterEncoding=UTF-8
6 | target.db.name=targetdbname
7 | target.db.username=username
8 | target.db.password=password
9 | #global where clause to filter the migration data, the clause also be used in get status of report, ensure it able to be run in source and target db
10 | #if the source table contain the column in where clause, will use the first where clause and ignore the second.
11 | #if the source table does not contain the column in the where clause, but contain column in the second clause, will use the second clause and igonre the first clause.
12 | #if the source table does not contain both where column, will ignore both
13 | source.db.global.where.clause=""
14 | source.db.global.where.second.clause=""
15 | #target db query sql: select migration tables
16 | migration.query.target.tables.sql=select ut.table_name from information_schema.tables ut where ut.table_schema='targetdbname' and ut.table_type='base table'
17 | #target db query sql: select migration table columns
18 | migration.query.target.table.columns.sql=select column_name from information_schema.columns t where table_schema='targetdbname' and table_name='{0}'
19 | #target db query sql: select migration table primary keys
20 | migration.query.target.table.primarykeys.sql=select column_name from information_schema.columns t where column_key='pri' and table_name='{0}'
21 | #must contain 1:tablename,2:size,3:numOfRows. And must order by size desc.
22 | #mysql query
23 | migration.query.source.tables.status.sql=select ut.table_name,(ut.data_length+ut.index_length)/1024/1024 as size_MB, ut.table_rows from information_schema.tables ut where ut.table_schema='sourcedbname' and ut.table_type='base table' order by size_MB desc;
24 | #oracle query
25 | #migration.query.source.tables.status.sql=select * from ( select ut.table_name, nvl(us.size_MB,0) as size_MB, uta.num_rows from user_tab_comments ut left join (select segment_name as TableName, sum(bytes)/(1024*1024) as Size_MB from user_segments where segment_type like 'TABLE%' group by segment_name) us on ut.table_name=us.TableName left join user_tables uta on uta.table_name=ut.table_name where ut.table_type = 'TABLE' )order by Size_MB desc
26 | #mutiple channel used within one job to speed the migration, 2 channel will open 2*5 thread for one job. Caution: Mutiple channels may able to cause records consistency.
27 | migration.datax.channel.multiple=true
28 | migration.datax.channel.2channels.records.over=1000000
29 | migration.datax.channel.4channels.records.over=10000000
30 | migration.datax.channel.nchannels.number=8
31 | migration.datax.channel.nchannels.records.over=100000000
32 | migration.datax.tool.folder=E:/work/db_migration/datax/datax
33 | #if got error when migration, identity whether go on or not
34 | migration.error.continue=true
35 | #define the tables that ingore to migration
36 | migration.ingore.tables=empty
37 | #define the table size bigger than the value and ignore to migration
38 | migration.ingore.bigtables.size.than.mb=1
39 | #job thread to group a number of tables in thread by split type, available value: index:tables list index in the cvs reports, size:table size
40 | migration.jobthread.split.type=size
41 | #job thread max tables, if between size:20-10 got 60 tables, will be grouped to 40,20. similar for split by index numbers. Adjust this value according to the big table size to encrease the migration speed.
42 | migration.jobthread.split.maxcount=40
43 | #enabled when type=size
44 | migration.jobthread.split.tablesize.mb="40000,30000,20000,10000,5000,1000,500,200,100,50,20,10,1,0.4375,0.25,0.1875,0.125,0.0625"
45 | #enabled when type=index
46 | migration.jobthread.split.indexes="0,1,2,5,10,50,60,90,100,200,300,310"
47 |
--------------------------------------------------------------------------------
/src/main/java/com/yxt/data/migration/util/DbHelper.java:
--------------------------------------------------------------------------------
1 | package com.yxt.data.migration.util;
2 |
3 | import java.sql.Connection;
4 | import java.sql.SQLException;
5 |
6 | import org.apache.commons.logging.Log;
7 | import org.apache.commons.logging.LogFactory;
8 | import org.springframework.beans.factory.annotation.Autowired;
9 | import org.springframework.stereotype.Service;
10 |
11 | import com.alibaba.druid.pool.DruidDataSource;
12 |
13 | /**
14 | * JDBC封装类
15 | *
16 | */
17 | @Service
18 | public class DbHelper {
19 |
20 | protected static final Log log = LogFactory.getLog(DbHelper.class);
21 |
22 | @Autowired
23 | private AppConfig config;
24 |
25 | private static DruidDataSource targetds = null;
26 | // 声明线程共享变量
27 | public static ThreadLocal targetcontainer = new ThreadLocal();
28 |
29 | private static DruidDataSource sourceds = null;
30 | // 声明线程共享变量
31 | public static ThreadLocal sourcecontainer = new ThreadLocal();
32 |
33 | private void init() {
34 | targetds = new DruidDataSource();
35 | targetds.setUrl(config.getTargetDbUrl());
36 | targetds.setUsername(config.getTargetDbUsername());// 用户名
37 | targetds.setPassword(config.getTargetDbPassword());// 密码
38 | targetds.setInitialSize(2);
39 | targetds.setMaxActive(20);
40 | targetds.setMinIdle(0);
41 | targetds.setMaxWait(60000);
42 | //targetds.setValidationQuery("SELECT 1");
43 | targetds.setTestOnBorrow(false);
44 | targetds.setTestWhileIdle(true);
45 | targetds.setPoolPreparedStatements(false);
46 |
47 | sourceds = new DruidDataSource();
48 | sourceds.setUrl(config.getSourceDbUrl());
49 | sourceds.setUsername(config.getSourceDbUsername());// 用户名
50 | sourceds.setPassword(config.getSourceDbPassword());// 密码
51 | sourceds.setInitialSize(2);
52 | sourceds.setMaxActive(20);
53 | sourceds.setMinIdle(0);
54 | sourceds.setMaxWait(60000);
55 | //sourceds.setValidationQuery("SELECT 1 FROM DUAL");
56 | sourceds.setTestOnBorrow(false);
57 | sourceds.setTestWhileIdle(true);
58 | sourceds.setPoolPreparedStatements(false);
59 | }
60 |
61 | /**
62 | * 获取数据连接
63 | *
64 | * @return
65 | */
66 | public Connection getTargetConnection() {
67 | Connection conn = null;
68 | try {
69 | if (targetds == null) {
70 | init();
71 | }
72 | conn = targetds.getConnection();
73 | log.info(Thread.currentThread().getName() + " Target Connection started......");
74 | targetcontainer.set(conn);
75 | } catch (Exception e) {
76 | log.info(Thread.currentThread().getName() + " Get Target connection failed!");
77 | e.printStackTrace();
78 | }
79 | return conn;
80 | }
81 |
82 | /**
83 | * 获取数据连接
84 | *
85 | * @return
86 | */
87 | public Connection getSourceConnection() {
88 | Connection conn = null;
89 | try {
90 | if (sourceds == null) {
91 | init();
92 | }
93 | conn = sourceds.getConnection();
94 | log.info(Thread.currentThread().getName() + " Source Connection started......");
95 | sourcecontainer.set(conn);
96 | } catch (Exception e) {
97 | log.info(Thread.currentThread().getName() + " Get Source connection failed!");
98 | e.printStackTrace();
99 | }
100 | return conn;
101 | }
102 |
103 | /*** 关闭连接 */
104 | public void closeTargetConnection() {
105 | try {
106 | Connection conn = targetcontainer.get();
107 | if (conn != null) {
108 | conn.close();
109 | log.info(Thread.currentThread().getName() + " Target Connection closed.");
110 | }
111 | } catch (SQLException e) {
112 | throw new RuntimeException(e.getMessage(), e);
113 | } finally {
114 | try {
115 | targetcontainer.remove();// 从当前线程移除连接切记
116 | } catch (Exception e2) {
117 | e2.printStackTrace();
118 | }
119 | }
120 | }
121 |
122 | /*** 关闭连接 */
123 | public void closeSourceConnection() {
124 | try {
125 | Connection conn = sourcecontainer.get();
126 | if (conn != null) {
127 | conn.close();
128 | log.info(Thread.currentThread().getName() + " Source Connection closed.");
129 | }
130 | } catch (
131 |
132 | SQLException e) {
133 | throw new RuntimeException(e.getMessage(), e);
134 | } finally {
135 | try {
136 | sourcecontainer.remove();// 从当前线程移除连接切记
137 | } catch (Exception e2) {
138 | e2.printStackTrace();
139 | }
140 | }
141 | }
142 |
143 | }
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 | 4.0.0
5 |
6 | com.yxt.data
7 | datax-migration
8 | 1.0-SNAPSHOT
9 | jar
10 |
11 | datax-migration
12 |
13 |
14 | UTF-8
15 | 4.1.6.RELEASE
16 | 2.5.1
17 |
18 |
19 | ${project.artifactId}
20 |
21 |
22 | org.apache.maven.plugins
23 | maven-compiler-plugin
24 | 3.3
25 |
26 | lib
27 | 1.7
28 | 1.7
29 | UTF-8
30 |
31 |
32 |
33 | org.apache.maven.plugins
34 | maven-surefire-plugin
35 | 2.7.1
36 |
37 |
38 | org.apache.maven.plugins
39 | maven-jar-plugin
40 | 2.6
41 |
42 |
43 |
44 | true
45 | ${project.name}_lib/
46 | com.yxt.data.migration.ApplicationBoot
47 |
48 |
49 | ${project.name}_lib/ojdbc5-11.jar
50 |
51 |
52 |
53 | com/**
54 | job/**
55 | *.xml
56 | *.properties
57 |
58 |
59 |
60 |
61 |
62 | org.apache.maven.plugins
63 | maven-dependency-plugin
64 | 2.8
65 |
66 |
67 | copy
68 | package
69 |
70 | copy-dependencies
71 |
72 |
73 |
74 | ${project.build.directory}/${project.name}_lib
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 | com.alibaba
87 | druid
88 | 1.0.26
89 |
90 |
91 | log4j
92 | log4j
93 | 1.2.17
94 |
95 |
96 |
98 |
99 |
100 | com.oracle
101 | ojdbc5
102 | 11
103 | system
104 | ${project.basedir}/lib/ojdbc5.jar
105 |
106 |
107 | mysql
108 | mysql-connector-java
109 | 5.1.25
110 |
111 |
112 |
113 |
114 | org.springframework
115 | spring-core
116 | ${spring.version}
117 |
118 |
119 | org.springframework
120 | spring-context
121 | ${spring.version}
122 |
123 |
124 | org.springframework
125 | spring-beans
126 | ${spring.version}
127 |
128 |
129 | com.fasterxml.jackson.core
130 | jackson-core
131 | ${jackson.version}
132 |
133 |
134 | com.fasterxml.jackson.core
135 | jackson-annotations
136 | ${jackson.version}
137 |
138 |
139 | com.fasterxml.jackson.core
140 | jackson-databind
141 | ${jackson.version}
142 |
143 |
144 | com.jayway.jsonpath
145 | json-path
146 | 0.9.1
147 |
148 |
149 |
150 | junit
151 | junit
152 | 4.11
153 | test
154 |
155 |
156 |
157 |
158 |
--------------------------------------------------------------------------------
/src/main/java/com/yxt/data/migration/DbViewerImpl.java:
--------------------------------------------------------------------------------
1 | package com.yxt.data.migration;
2 |
3 | import java.sql.ResultSet;
4 | import java.sql.SQLException;
5 | import java.util.ArrayList;
6 | import java.util.List;
7 |
8 | import org.apache.commons.logging.Log;
9 | import org.apache.commons.logging.LogFactory;
10 | import org.springframework.beans.factory.annotation.Autowired;
11 | import org.springframework.stereotype.Service;
12 |
13 | import com.yxt.data.migration.DbViewer;
14 | import com.yxt.data.migration.bean.DataTable;
15 | import com.yxt.data.migration.bean.DataTableStatus;
16 | import com.yxt.data.migration.util.AppConfig;
17 | import com.yxt.data.migration.util.DbHelper;
18 |
19 | @Service
20 | public class DbViewerImpl implements DbViewer {
21 | protected static final Log log = LogFactory.getLog(DbViewerImpl.class);
22 |
23 | @Autowired
24 | private DbHelper dbHelper;
25 |
26 | @Autowired
27 | private AppConfig config;
28 |
29 | /* (non-Javadoc)
30 | * @see com.yxt.data.migration.DbViewer#getTransfterTables()
31 | */
32 | public List getTargetTransfterTables() throws SQLException {
33 | String sql = config.getMigrationQueryTargetTablesSql();
34 | ResultSet rs = null;
35 | List result = null;
36 | try {
37 | rs = dbHelper.getTargetConnection().prepareStatement(sql).executeQuery();
38 |
39 | if (rs != null) {
40 | result = new ArrayList();
41 | while (rs.next()) {
42 | DataTable ta = new DataTable();
43 | ta.setName(rs.getString(1));
44 | result.add(ta);
45 | }
46 | }
47 | } catch (SQLException e) {
48 | log.error(e.getMessage(), e);
49 | throw e;
50 | }
51 | dbHelper.closeTargetConnection();
52 | return result;
53 | }
54 |
55 | /* (non-Javadoc)
56 | * @see com.yxt.data.migration.DbViewer#getTransfterTableColumns(java.lang.String)
57 | */
58 | public List getTargetTransfterTableColumns(String tableName) throws SQLException {
59 | String sql = config.getMigrationQueryTargetTableColumnsSql();
60 | sql = sql.replace("{0}", tableName);
61 | List result = null;
62 |
63 | try {
64 | ResultSet rs = dbHelper.getTargetConnection().prepareStatement(sql).executeQuery();
65 | if (rs != null) {
66 | result = new ArrayList();
67 | while (rs.next()) {
68 | result.add(rs.getString(1));
69 | }
70 | }
71 | } catch (SQLException e) {
72 | log.error(e.getMessage(), e);
73 | throw e;
74 | }
75 | dbHelper.closeTargetConnection();
76 | return result;
77 | }
78 |
79 | /* (non-Javadoc)
80 | * @see com.yxt.data.migration.DbViewer#getTransfterTablePrimaryKey(java.lang.String)
81 | */
82 | public List getTargetTransfterTablePrimaryKey(String tableName) throws SQLException {
83 | String sql = config.getMigrationQueryTargetTablePrimaryKeysSql();
84 | sql = sql.replace("{0}", tableName);
85 | List result = null;
86 | try {
87 | ResultSet rs = dbHelper.getTargetConnection().prepareStatement(sql).executeQuery();
88 |
89 | if (rs != null) {
90 | result = new ArrayList();
91 | while (rs.next()) {
92 | result.add(rs.getString(1));
93 | }
94 | }
95 | } catch (SQLException e) {
96 | log.error(e.getMessage(), e);
97 | throw e;
98 | }
99 | dbHelper.closeTargetConnection();
100 | return result;
101 | }
102 |
103 |
104 | /* (non-Javadoc)
105 | * @see com.yxt.data.migration.DbViewer#getTransfterTablesSize()
106 | */
107 | public List getSourceTransfterTablesStatus() throws SQLException {
108 | String sql = config.getMigrationQuerySourceTablesStatusSql();
109 |
110 | List result = null;
111 |
112 | try {
113 | ResultSet rs = dbHelper.getSourceConnection().prepareStatement(sql).executeQuery();
114 | if (rs != null) {
115 | result = new ArrayList();
116 | while (rs.next()) {
117 | DataTableStatus ta = new DataTableStatus();
118 | ta.setName(rs.getString(1));
119 | ta.setSize(rs.getFloat(2));
120 | ta.setCount(rs.getLong(3));
121 | result.add(ta);
122 | }
123 | }
124 | } catch (SQLException e) {
125 | log.error(e.getMessage(), e);
126 | throw e;
127 | }
128 | dbHelper.closeSourceConnection();
129 | return result;
130 | }
131 |
132 |
133 | /* (non-Javadoc)
134 | * @see com.yxt.data.migration.DbViewer#getTransfterTableMigrationCount(java.lang.String, java.lang.String)
135 | */
136 | public long getSourceTransfterTableMigrationCount(String tableName, String whereClause) throws SQLException {
137 | StringBuffer sql = new StringBuffer();
138 | sql.append("select count(*) from " + tableName + " ");
139 | if (whereClause != null && !"".equals(whereClause)) {
140 | whereClause = whereClause.replace("\"", "");
141 | sql.append(" where " + whereClause);
142 | }
143 | long result = 0;
144 |
145 | try {
146 | ResultSet rs = dbHelper.getSourceConnection().prepareStatement(sql.toString()).executeQuery();
147 | if (rs != null) {
148 | rs.next();
149 | result = rs.getLong(1);
150 | }
151 | } catch (SQLException e) {
152 | log.error(e.getMessage(), e);
153 | throw e;
154 | }
155 | dbHelper.closeSourceConnection();
156 | return result;
157 | }
158 |
159 | public long getTargetTransfterTableMigrationFinishedCount(String tableName, String whereClause) throws SQLException {
160 | StringBuffer sql = new StringBuffer();
161 | sql.append("select count(*) from " + tableName + " ");
162 | if (whereClause != null && !"".equals(whereClause)) {
163 | whereClause = whereClause.replace("\"", "");
164 | sql.append(" where " + whereClause);
165 | }
166 | long result = 0;
167 |
168 | try {
169 | ResultSet rs = dbHelper.getTargetConnection().prepareStatement(sql.toString()).executeQuery();
170 | if (rs != null) {
171 | rs.next();
172 | result = rs.getLong(1);
173 | }
174 | } catch (SQLException e) {
175 | log.error(e.getMessage(), e);
176 | throw e;
177 | }
178 | dbHelper.closeTargetConnection();
179 | return result;
180 | }
181 |
182 | }
183 |
--------------------------------------------------------------------------------
/src/main/java/com/yxt/data/migration/util/DataXJobFile.java:
--------------------------------------------------------------------------------
1 | package com.yxt.data.migration.util;
2 |
3 | import java.io.BufferedReader;
4 | import java.io.BufferedWriter;
5 | import java.io.File;
6 | import java.io.FileReader;
7 | import java.io.FileWriter;
8 | import java.io.IOException;
9 | import java.io.InputStream;
10 | import java.io.InputStreamReader;
11 | import java.util.List;
12 |
13 | import org.apache.commons.logging.Log;
14 | import org.apache.commons.logging.LogFactory;
15 | import org.springframework.beans.factory.annotation.Autowired;
16 | import org.springframework.stereotype.Service;
17 | import org.springframework.util.StringUtils;
18 |
19 | import com.jayway.jsonpath.JsonPath;
20 |
21 | @Service
22 | public class DataXJobFile {
23 |
24 | protected static final Log log = LogFactory.getLog(DataXJobFile.class);
25 | private static String template = null;
26 |
27 | @Autowired
28 | private AppConfig config;
29 |
30 | public void generateJsonJobFile(String sourceTableName, String targetTableName, List columns, String pk, String whereClause, long migrationRecords) {
31 |
32 | String json = getTemplate();
33 |
34 | CharSequence cols = getColumnsString(columns);
35 | int channels = getChannelNumber(migrationRecords);
36 |
37 | json = json.replace("{job.channel}", String.valueOf(channels));
38 | json = json.replace("{source.db.username}", config.getSourceDbUsername());
39 | json = json.replace("{source.db.password}", config.getSourceDbPassword());
40 | json = json.replace("{source.db.table.columns}", cols);
41 | json = json.replace("{source.db.table.pk}", pk == null ? "" : pk);
42 | json = json.replace("{source.db.table.name}", sourceTableName);
43 | json = json.replace("{source.db.url}", config.getSourceDbUrl());
44 | json = json.replace("{source.db.type}", getDbType(config.getSourceDbUrl()));
45 |
46 | if (whereClause!=null && !"".equals(whereClause)){
47 | json = json.replace("{source.db.table.where.clause}",
48 | "\"where\": \" " + whereClause +"\",");
49 | } else {
50 | json = json.replace("{source.db.table.where.clause}\n ", "");
51 | }
52 |
53 | json = json.replace("{target.db.username}", config.getTargetDbUsername());
54 | json = json.replace("{target.db.password}", config.getTargetDbPassword());
55 | json = json.replace("{target.db.table.columns}", cols);
56 | json = json.replace("{target.db.table.name}", targetTableName);
57 | json = json.replace("{target.db.url}", config.getTargetDbUrl());
58 | json = json.replace("{target.db.type}", getDbType(config.getTargetDbUrl()));
59 |
60 | //log.info(json);
61 |
62 | try {
63 | log.info("Write job json for table:"+sourceTableName);
64 | writeToFile(sourceTableName, json);
65 | } catch (IOException e) {
66 | log.error(e.getMessage(), e);
67 | }
68 | }
69 |
70 | private static String getDbType(String dbUrl) {
71 | String dbType = null;
72 | if (dbUrl != null) {
73 | String url = dbUrl.replaceFirst("jdbc:", "");
74 | url = url.replaceFirst("microsoft:", "");
75 | dbType = url.substring(0, url.indexOf(":"));
76 | if (dbType.indexOf("-") > 0) {
77 | dbType = dbType.substring(0, dbType.indexOf("-"));
78 | }
79 | }
80 | return dbType;
81 | }
82 |
83 | private int getChannelNumber(long migrationRecords) {
84 | int result = 1;
85 | if ("true".equalsIgnoreCase(config.getDataxUseMultipleChannel()) && migrationRecords > 0) {
86 | if (migrationRecords > config.getDataxUse2ChannelRecordsOver()) {
87 | result = 2;
88 | }
89 | if (migrationRecords > config.getDataxUse4ChannelRecordsOver()) {
90 | result = 4;
91 | }
92 | if (migrationRecords > config.getDataxUseNChannelRecordsOver()) {
93 | result = config.getDataxUseNChannelNumber();
94 | }
95 |
96 | }
97 | return result;
98 | }
99 |
100 | public String getSourceGlobalTableWhereClause(List columns) {
101 | String whereCase1 = config.getGlobalWhereClause();
102 | String whereCase2 = config.getGlobalWhere2Clause();
103 | String result = null;
104 | if (columns != null && !columns.isEmpty()) {
105 | if (hasWhereColumn(whereCase1, columns)) {
106 | result = whereCase1;
107 | } else if (hasWhereColumn(whereCase2, columns)) {
108 | result = whereCase2;
109 | }
110 | }
111 | return result;
112 | }
113 |
114 | /**
115 | * Pre-condition: the job file has need to be generated.
116 | * @param tableName String
117 | * @return String
118 | */
119 | public String getJobFileWhereClause(String tableName){
120 | String jsonContent = this.ReadFile(config.getDataxToolFolder() + "/job/" + tableName + ".json");
121 | String value = null;
122 | try {
123 | value = JsonPath.read(jsonContent, "$.job.content[0].reader.parameter.where");
124 | } catch (Exception e) {
125 | log.error(e.getMessage(), e);
126 | }
127 | return value;
128 |
129 | }
130 |
131 | private boolean hasWhereColumn(String whereCase, List columns) {
132 | String whereCol = null;
133 |
134 | if (!StringUtils.isEmpty(whereCase)) {
135 | whereCase = whereCase.replace(" ", "");
136 | whereCase = whereCase.replace("\"", "");
137 | whereCase = whereCase.replace("in", "=");
138 | String[] temp = whereCase.split("=");
139 | if (temp != null && temp.length > 1) {
140 | whereCol=temp[0];
141 | }
142 | }
143 |
144 | boolean result = false;
145 | if (whereCol!=null && columns != null && columns.size() > 0) {
146 | for (String column:columns){
147 | if (column!=null && column.equalsIgnoreCase(whereCol)){
148 | result = true;
149 | break;
150 | }
151 | }
152 | }
153 |
154 | return result;
155 | }
156 |
157 |
158 | private void writeToFile(String fileName, String json) throws IOException {
159 | File file = new File(config.getDataxToolFolder() + "/job/" + fileName + ".json");
160 |
161 | BufferedWriter out = new BufferedWriter(new FileWriter(file));
162 | out.write(json);
163 | out.close();
164 | log.info("Write json to file:"+file.getAbsolutePath());
165 | }
166 |
167 | private CharSequence getColumnsString(List columns) {
168 | StringBuffer stb = new StringBuffer();
169 |
170 | for (String s : columns) {
171 | stb.append("\"");
172 | stb.append(s);
173 | stb.append("\",");
174 | }
175 | return stb.subSequence(0, stb.length() - 1);
176 | }
177 |
178 | private String getTemplate() {
179 | if (template == null) {
180 | StringBuffer stb = new StringBuffer();
181 | try {
182 | readToBuffer(stb, "job/jobtemplate.json");
183 | } catch (IOException e) {
184 | log.error(e.getMessage(), e);
185 | }
186 | template = stb.toString();
187 | log.info(template);
188 | }
189 |
190 | return template;
191 | }
192 |
193 | private void readToBuffer(StringBuffer buffer, String filePath) throws IOException {
194 | InputStream is = DataXJobFile.class.getClassLoader().getResourceAsStream(filePath);
195 | BufferedReader reader = new BufferedReader(new InputStreamReader(is));
196 | String line;
197 | line = reader.readLine();
198 | while (line != null) {
199 | buffer.append(line);
200 | buffer.append("\n");
201 | line = reader.readLine();
202 | }
203 | reader.close();
204 | }
205 |
206 | private String ReadFile(String path) {
207 | File file = new File(path);
208 | BufferedReader reader = null;
209 | String result = "";
210 | try {
211 | reader = new BufferedReader(new FileReader(file));
212 | String tempString = null;
213 | while ((tempString = reader.readLine()) != null) {
214 | result = result + tempString;
215 | }
216 | reader.close();
217 | } catch (IOException e) {
218 | log.error(e.getMessage(), e);
219 | } finally {
220 | if (reader != null) {
221 | try {
222 | reader.close();
223 | } catch (IOException e1) {
224 | }
225 | }
226 | }
227 | return result;
228 | }
229 |
230 | }
231 |
--------------------------------------------------------------------------------
/src/main/java/com/yxt/data/migration/util/AppConfig.java:
--------------------------------------------------------------------------------
1 | package com.yxt.data.migration.util;
2 |
3 | import org.springframework.beans.factory.annotation.Value;
4 | import org.springframework.context.annotation.Bean;
5 | import org.springframework.context.annotation.ComponentScan;
6 | import org.springframework.context.annotation.Configuration;
7 | import org.springframework.context.annotation.PropertySource;
8 | import org.springframework.context.support.PropertySourcesPlaceholderConfigurer;
9 | import org.springframework.stereotype.Service;
10 |
11 | @Configuration
12 | @PropertySource("classpath:config.properties")
13 | @ComponentScan(basePackages = {"com.yxt",})
14 |
15 | //@EnableAsync
16 | @Service
17 | public class AppConfig {
18 |
19 | @Value("${source.db.name}")
20 | private String sourceDbName;
21 |
22 | @Value("${source.db.url}")
23 | private String sourceDbUrl;
24 |
25 | @Value("${source.db.username}")
26 | private String sourceDbUsername;
27 |
28 | @Value("${source.db.password}")
29 | private String sourceDbPassword;
30 |
31 | @Value("${target.db.name}")
32 | private String targetDbName;
33 |
34 | @Value("${target.db.url}")
35 | private String targetDbUrl;
36 |
37 | @Value("${target.db.username}")
38 | private String targetDbUsername;
39 |
40 | @Value("${target.db.password}")
41 | private String targetDbPassword;
42 |
43 | @Value("${migration.datax.tool.folder}")
44 | private String dataxToolFolder;
45 |
46 | @Value("${migration.datax.channel.multiple}")
47 | private String dataxUseMultipleChannel;
48 |
49 | @Value("${migration.datax.channel.2channels.records.over}")
50 | private long dataxUse2ChannelRecordsOver;
51 |
52 | @Value("${migration.datax.channel.4channels.records.over}")
53 | private long dataxUse4ChannelRecordsOver;
54 |
55 | @Value("${migration.datax.channel.nchannels.number}")
56 | private int dataxUseNChannelNumber;
57 |
58 | @Value("${migration.datax.channel.nchannels.records.over}")
59 | private long dataxUseNChannelRecordsOver;
60 |
61 | @Value("${source.db.global.where.clause}")
62 | private String globalWhereClause;
63 |
64 | @Value("${source.db.global.where.second.clause}")
65 | private String globalWhere2Clause;
66 |
67 | @Value("${migration.error.continue}")
68 | private String errorContinue;
69 |
70 | @Value("${migration.ingore.tables}")
71 | private String ingoreTables;
72 |
73 | @Value("${migration.ingore.bigtables.size.than.mb}")
74 | private float ingoreBigTablesSizeThanMb;
75 |
76 | @Value("${migration.jobthread.split.type}")
77 | private String jobthreadSplitType;
78 |
79 | @Value("${migration.jobthread.split.maxcount}")
80 | private int jobthreadSplitMaxcount;
81 |
82 | @Value("${migration.jobthread.split.tablesize.mb}")
83 | private String jobthreadSplitTableSizeMb;
84 |
85 | @Value("${migration.jobthread.split.indexes}")
86 | private String jobthreadSplitTableIndexes;
87 |
88 | @Value("${migration.query.target.tables.sql}")
89 | private String migrationQueryTargetTablesSql;
90 |
91 | @Value("${migration.query.target.table.columns.sql}")
92 | private String migrationQueryTargetTableColumnsSql;
93 |
94 | @Value("${migration.query.target.table.primarykeys.sql}")
95 | private String migrationQueryTargetTablePrimaryKeysSql;
96 |
97 | @Value("${migration.query.source.tables.status.sql}")
98 | private String migrationQuerySourceTablesStatusSql;
99 |
100 | @Bean
101 | public static PropertySourcesPlaceholderConfigurer propertyPlaceholderConfigurer() {
102 | return new PropertySourcesPlaceholderConfigurer();
103 |
104 | }
105 |
106 | public String getSourceDbName() {
107 | return sourceDbName;
108 | }
109 |
110 | public void setSourceDbName(String sourceDbName) {
111 | this.sourceDbName = sourceDbName;
112 | }
113 |
114 | public String getSourceDbUrl() {
115 | return sourceDbUrl;
116 | }
117 |
118 | public void setSourceDbUrl(String sourceDbUrl) {
119 | this.sourceDbUrl = sourceDbUrl;
120 | }
121 |
122 | public String getSourceDbUsername() {
123 | return sourceDbUsername;
124 | }
125 |
126 | public void setSourceDbUsername(String sourceDbUsername) {
127 | this.sourceDbUsername = sourceDbUsername;
128 | }
129 |
130 | public String getSourceDbPassword() {
131 | return sourceDbPassword;
132 | }
133 |
134 | public void setSourceDbPassword(String sourceDbPassword) {
135 | this.sourceDbPassword = sourceDbPassword;
136 | }
137 |
138 | public String getTargetDbName() {
139 | return targetDbName;
140 | }
141 |
142 | public void setTargetDbName(String targetDbName) {
143 | this.targetDbName = targetDbName;
144 | }
145 |
146 | public String getTargetDbUrl() {
147 | return targetDbUrl;
148 | }
149 |
150 | public void setTargetDbUrl(String targetDbUrl) {
151 | this.targetDbUrl = targetDbUrl;
152 | }
153 |
154 | public String getTargetDbUsername() {
155 | return targetDbUsername;
156 | }
157 |
158 | public void setTargetDbUsername(String targetDbUsername) {
159 | this.targetDbUsername = targetDbUsername;
160 | }
161 |
162 | public String getTargetDbPassword() {
163 | return targetDbPassword;
164 | }
165 |
166 | public void setTargetDbPassword(String targetDbPassword) {
167 | this.targetDbPassword = targetDbPassword;
168 | }
169 |
170 | public String getDataxToolFolder() {
171 | return dataxToolFolder;
172 | }
173 |
174 | public void setDataxToolFolder(String dataxToolFolder) {
175 | this.dataxToolFolder = dataxToolFolder;
176 | }
177 |
178 | public String getGlobalWhereClause() {
179 | return globalWhereClause;
180 | }
181 |
182 | public void setGlobalWhereClause(String globalWhereClause) {
183 | this.globalWhereClause = globalWhereClause;
184 | }
185 |
186 | public String getGlobalWhere2Clause() {
187 | return globalWhere2Clause;
188 | }
189 |
190 | public void setGlobalWhere2Clause(String globalWhere2Clause) {
191 | this.globalWhere2Clause = globalWhere2Clause;
192 | }
193 |
194 | public String getErrorContinue() {
195 | return errorContinue;
196 | }
197 |
198 | public void setErrorContinue(String errorContinue) {
199 | this.errorContinue = errorContinue;
200 | }
201 |
202 | public String getIngoreTables() {
203 | return ingoreTables;
204 | }
205 |
206 | public void setIngoreTables(String ingoreTables) {
207 | this.ingoreTables = ingoreTables;
208 | }
209 |
210 | public float getIngoreBigTablesSizeThanMb() {
211 | return ingoreBigTablesSizeThanMb;
212 | }
213 |
214 | public void setIngoreBigTablesSizeThanMb(float ingoreBigTablesSizeThanMb) {
215 | this.ingoreBigTablesSizeThanMb = ingoreBigTablesSizeThanMb;
216 | }
217 |
218 | public String getJobthreadSplitTableSizeMb() {
219 | return jobthreadSplitTableSizeMb;
220 | }
221 |
222 | public void setJobthreadSplitTableSizeMb(String jobthreadSplitTableSizeMb) {
223 | this.jobthreadSplitTableSizeMb = jobthreadSplitTableSizeMb;
224 | }
225 |
226 | public String getDataxUseMultipleChannel() {
227 | return dataxUseMultipleChannel;
228 | }
229 |
230 | public void setDataxUseMultipleChannel(String dataxUseMultipleChannel) {
231 | this.dataxUseMultipleChannel = dataxUseMultipleChannel;
232 | }
233 |
234 | public long getDataxUse2ChannelRecordsOver() {
235 | return dataxUse2ChannelRecordsOver;
236 | }
237 |
238 | public void setDataxUse2ChannelRecordsOver(long dataxUse2ChannelRecordsOver) {
239 | this.dataxUse2ChannelRecordsOver = dataxUse2ChannelRecordsOver;
240 | }
241 |
242 | public long getDataxUse4ChannelRecordsOver() {
243 | return dataxUse4ChannelRecordsOver;
244 | }
245 |
246 | public void setDataxUse4ChannelRecordsOver(long dataxUse4ChannelRecordsOver) {
247 | this.dataxUse4ChannelRecordsOver = dataxUse4ChannelRecordsOver;
248 | }
249 |
250 | public int getDataxUseNChannelNumber() {
251 | return dataxUseNChannelNumber;
252 | }
253 |
254 | public void setDataxUseNChannelNumber(int dataxUseNChannelNumber) {
255 | this.dataxUseNChannelNumber = dataxUseNChannelNumber;
256 | }
257 |
258 | public long getDataxUseNChannelRecordsOver() {
259 | return dataxUseNChannelRecordsOver;
260 | }
261 |
262 | public void setDataxUseNChannelRecordsOver(long dataxUseNChannelRecordsOver) {
263 | this.dataxUseNChannelRecordsOver = dataxUseNChannelRecordsOver;
264 | }
265 |
266 | public String getMigrationQueryTargetTablesSql() {
267 | return migrationQueryTargetTablesSql;
268 | }
269 |
270 | public void setMigrationQueryTargetTablesSql(String migrationQueryTargetTablesSql) {
271 | this.migrationQueryTargetTablesSql = migrationQueryTargetTablesSql;
272 | }
273 |
274 | public String getMigrationQueryTargetTableColumnsSql() {
275 | return migrationQueryTargetTableColumnsSql;
276 | }
277 |
278 | public void setMigrationQueryTargetTableColumnsSql(String migrationQueryTargetTableColumnsSql) {
279 | this.migrationQueryTargetTableColumnsSql = migrationQueryTargetTableColumnsSql;
280 | }
281 |
282 | public String getMigrationQueryTargetTablePrimaryKeysSql() {
283 | return migrationQueryTargetTablePrimaryKeysSql;
284 | }
285 |
286 | public void setMigrationQueryTargetTablePrimaryKeysSql(String migrationQueryTargetTablePrimaryKeysSql) {
287 | this.migrationQueryTargetTablePrimaryKeysSql = migrationQueryTargetTablePrimaryKeysSql;
288 | }
289 |
290 | public String getMigrationQuerySourceTablesStatusSql() {
291 | return migrationQuerySourceTablesStatusSql;
292 | }
293 |
294 | public void setMigrationQuerySourceTablesStatusSql(String migrationQuerySourceTablesStatusSql) {
295 | this.migrationQuerySourceTablesStatusSql = migrationQuerySourceTablesStatusSql;
296 | }
297 |
298 | public String getJobthreadSplitType() {
299 | return jobthreadSplitType;
300 | }
301 |
302 | public void setJobthreadSplitType(String jobthreadSplitType) {
303 | this.jobthreadSplitType = jobthreadSplitType;
304 | }
305 |
306 | public int getJobthreadSplitMaxcount() {
307 | return jobthreadSplitMaxcount;
308 | }
309 |
310 | public void setJobthreadSplitMaxcount(int jobthreadSplitMaxcount) {
311 | this.jobthreadSplitMaxcount = jobthreadSplitMaxcount;
312 | }
313 |
314 | public String getJobthreadSplitTableIndexes() {
315 | return jobthreadSplitTableIndexes;
316 | }
317 |
318 | public void setJobthreadSplitTableIndexes(String jobthreadSplitTableIndexes) {
319 | this.jobthreadSplitTableIndexes = jobthreadSplitTableIndexes;
320 | }
321 |
322 | }
323 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DataX-Migration
2 | **DataX-Migration** is Full Database Migration Tool based on Alibaba DataX 3.0. Support Database Migration among Mysql, Oracle, SqlServer, PostgreSql. And support where condition when migration.
3 | **DataX-Migration** 是基于阿里巴巴DataX 3.0的数据库迁移工具。支持对Mysql,Oracle,SqlServer, PostgreSql之间的相互迁移, 支持迁移时带where查询条件,并生成迁移数据报表。
4 |
5 | ## DataX是什么?
6 |
7 | **DataX** 是阿里巴巴集团内被广泛使用的离线数据同步工具/平台,实现包括 MySQL、Oracle、SqlServer、Postgre、HDFS、Hive、ADS、HBase、OTS、ODPS 等各种异构数据源之间高效的数据同步功能。
8 | **DataX** 在阿里巴巴集团内被广泛使用,承担了所有大数据的离线同步业务,并已持续稳定运行了6年之久。目前每天完成同步8w多道作业,每日传输数据量超过300TB。
9 |
10 | 
11 | 关于更详细的介绍请看这里:https://github.com/alibaba/DataX/wiki/DataX-Introduction
12 |
13 | ## 为什么还需要DataX-Migration
14 | **DataX**专注于对数据的同步,它使用脚本以及可配置的方式,以一个个独立的脚本任务,非常方便地对单表的数据进行同步操作。但我们需要更加智能或自动的方式同步整个数据库,所以我们对DataX进行了包装,以更方便地进行整个数据库的迁移工作。
15 |
16 | ## DataX-Migration的功能
17 | **DataX-Migration** 能根据用户配置数据库表tables的查询条件,生成这些数据库表的单独的DataX json配置,然后启动DataX的脚本来开始这些表的数据迁移,并生成相应的cvs**报表**。当表的数量过多时,可以配置切分策略来划分出**多个线程**来同时做迁移已加快迁移数据。
18 |
19 | 
20 |
21 | ## Support Data Channels
22 |
23 | DataX目前已经有了比较全面的插件体系,主流的RDBMS数据库、NOSQL、大数据计算系统都已经接入,目前支持数据如下图,详情请点击:[DataX数据源参考指南](https://github.com/alibaba/DataX/wiki/DataX-all-data-channels)
24 |
25 | | 类型 | 数据源 | Reader(读) | Writer(写) |文档|
26 | | ------------ | ---------- | :-------: | :-------: |:-------: |
27 | | RDBMS 关系型数据库 | MySQL | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/mysqlreader/doc/mysqlreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/mysqlwriter/doc/mysqlwriter.md)|
28 | | | Oracle | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/oraclereader/doc/oraclereader.md) 、[写](https://github.com/alibaba/DataX/blob/master/oraclewriter/doc/oraclewriter.md)|
29 | | | SQLServer | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/sqlserverreader/doc/sqlserverreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/sqlserverwriter/doc/sqlserverwriter.md)|
30 | | | PostgreSQL | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/postgresqlreader/doc/postgresqlreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/postgresqlwriter/doc/postgresqlwriter.md)|
31 | | | DRDS | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/drdsreader/doc/drdsreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/drdswriter/doc/drdswriter.md)|
32 | | | 通用RDBMS(支持所有关系型数据库) | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/rdbmsreader/doc/rdbmsreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/rdbmswriter/doc/rdbmswriter.md)|
33 | | 阿里云数仓数据存储 | ODPS | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/odpsreader/doc/odpsreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/odpswriter/doc/odpswriter.md)|
34 | | | ADS | | √ |[写](https://github.com/alibaba/DataX/blob/master/adswriter/doc/adswriter.md)|
35 | | | OSS | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/ossreader/doc/ossreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/osswriter/doc/osswriter.md)|
36 | | | OCS | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/ocsreader/doc/ocsreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/ocswriter/doc/ocswriter.md)|
37 | | NoSQL数据存储 | OTS | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/otsreader/doc/otsreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/otswriter/doc/otswriter.md)|
38 | | | Hbase0.94 | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/hbase094xreader/doc/hbase094xreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/hbase094xwriter/doc/hbase094xwriter.md)|
39 | | | Hbase1.1 | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/hbase11xreader/doc/hbase11xreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/hbase11xwriter/doc/hbase11xwriter.md)|
40 | | | Phoenix4.x | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/hbase11xsqlreader/doc/hbase11xsqlreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/hbase11xsqlwriter/doc/hbase11xsqlwriter.md)|
41 | | | MongoDB | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/mongoreader/doc/mongoreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/mongowriter/doc/mongowriter.md)|
42 | | | Hive | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/hdfsreader/doc/hdfsreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/hdfswriter/doc/hdfswriter.md)|
43 | | 无结构化数据存储 | TxtFile | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/txtfilereader/doc/txtfilereader.md) 、[写](https://github.com/alibaba/DataX/blob/master/txtfilewriter/doc/txtfilewriter.md)|
44 | | | FTP | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/ftpreader/doc/ftpreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/ftpwriter/doc/ftpwriter.md)|
45 | | | HDFS | √ | √ |[读](https://github.com/alibaba/DataX/blob/master/hdfsreader/doc/hdfsreader.md) 、[写](https://github.com/alibaba/DataX/blob/master/hdfswriter/doc/hdfswriter.md)|
46 | | | Elasticsearch | | √ |[写](https://github.com/alibaba/DataX/blob/master/elasticsearchwriter/doc/elasticsearchwriter.md)|
47 |
48 | Datax-Migration目前对关系型数据库的直接使用是基本没有问题,至于非关系型数据库还需验证。
49 |
50 | ## Quick Start
51 |
52 | ``` python
53 | 1. download [DataX可以运行的bin下载地址](http://datax-opensource.oss-cn-hangzhou.aliyuncs.com/datax.tar.gz)
54 | 2. git clone https://github.com/Jawf/datax-migration.git (也可从下面的链接直接下载)
55 | 3. mvn clean install
56 | 4. copy target/datax-migration.jar and target/datax-migration_lib to datax/ home directory.
57 | 5. open the datax-migration.jar, edit the config.properties, config migration db information source/target url, dbname, user, password, etc.
58 | 6. open the datax-migration.jar, edit the job/jobtemplate.json accordingly, default it migration from mysqlreader->mysqlwriter
59 | 7. java -jar datax-migration.jar
60 | ```
61 | - [DataX-Migration可以运行的bin下载地址(DataX-Mirgration runable jar downlad)](https://github.com/Jawf/driver/blob/master/datax/datax-migration.zip?raw=true)
62 | - [DataX可以运行的bin下载地址(DataX runable bin files download)](http://datax-opensource.oss-cn-hangzhou.aliyuncs.com/datax.tar.gz)
63 |
64 | ## Special Function
65 | ``` python
66 | java -jar datax-migration.jar json #Only generate json files config for all tables, may required to modify the reader and writer or columns accordingly.
67 | java -jar datax-migration.jar report #Only generate cvs report to view migration status.
68 | java -jar datax-migration.jar run #Only run the migration and generate the cvs reports. Before run this, need generate json config files for all tables first.
69 | ```
70 |
71 | ## Config.properties Detail Properties
72 | | Properties Name | Mandatory Config | Value Sample | Remark |
73 | | ------------ | ---------- | ---------- | :-------: |
74 | | source.db.url | * |jdbc:mysql://192.168.0.188:3306/sourcedbname?useUnicode=true&characterEncoding=UTF-8 | 迁移源数据库连接url |
75 | | source.db.name | * | sourcedbname | 源数据库名 |
76 | | source.db.username | * | username | 源数据库连接用户名 |
77 | | source.db.password | * | password | 源数据库连接密码 |
78 | | target.db.url | * | jdbc:mysql://192.168.0.189:3306/targetdbname?useUnicode=true&characterEncoding=UTF-8 | 迁移目标数据库名 |
79 | | target.db.name | * | targetdbname | 目标数据库名 |
80 | | target.db.username | * | username | 目标数据库连接用户名 |
81 | | target.db.password | * | password | 目标数据库连接密码 |
82 | | source.db.global.where.clause | | orgId='410e7127-d969-4e0b-8326-4828182363cc' | global where clause to filter the migration data, the clause also be used in get status of report, ensure it able to be run in source and target db
83 | | source.db.global.where.second.clause | | userid in (select id from CORE_USERPROFILE where orgid='d7f8dffb-8ae9-4a97-857b-59f395942781') | if the source table contain the column in where clause, will use the first where clause and ignore the second. if the source table does not contain the column in the where clause, but contain column in the second clause, will use the second clause and igonre the first clause. if the source table does not contain both where column, will ignore both |
84 | | migration.query.target.tables.sql | * | select ut.table_name from information_schema.tables ut where ut.table_schema='targetdbname' and ut.table_type='base table' | target db query sql: select migration tables |
85 | | migration.query.target.table.columns.sql | * | select column_name from information_schema.columns t where table_schema='targetdbname' and table_name='{0}'| target db query sql: select migration table columns |
86 | | migration.query.target.table.primarykeys.sql | * | select column_name from information_schema.columns t where column_key='pri' and table_name='{0}' | #target db query sql: select migration table primary keys |
87 | | migration.query.source.tables.status.sql | * | select ut.table_name,(ut.data_length+ut.index_length)/1024/1024 as size_MB, ut.table_rows from information_schema.tables ut where ut.table_schema='targetdbname' and ut.table_type='base table' order by size_MB desc; | must contain 1:tablename,2:size,3:numOfRows. And must order by size desc. |
88 | | migration.datax.channel.multiple| | true | mutiple channel used within one job to speed the migration, 2 channel will open 2*5 thread for one job. Caution: Mutiple channels may able to cause records consistency. |
89 | | migration.datax.channel.2channels.records.over | | 1000000 | if migration records more than this value, will use 2 channel in DataX json config. Caution: Mutiple channels may able to cause records consistency. |
90 | | migration.error.continue | | true | if got error whether terminate the running thread |
91 | | migration.ingore.tables | | empty | config ingore tables to ignore migration. empty=ignore none |
92 | | migration.ingore.bigtables.size.than.mb | | 1 | define the table size bigger than the value and ignore to migration. 1=igonre all tables that size > 1MB |
93 | | migration.jobthread.split.type | | size | job thread to group a number of tables in thread by split type, available value: index:tables list index in the cvs reports, size:table size |
94 | | migration.jobthread.split.maxcount | | 40 | job thread max tables, if between size:20-10 got 60 tables, will be grouped to 40,20. similar for split by index numbers. Adjust this value according to the big table size to encrease the migration speed. |
95 | | migration.jobthread.split.tablesize.mb | | "40000,30000,20000,10000,5000,1000,500,200,100,50,20,10,1,0.4375,0.25,0.1875,0.125,0.0625" | size unit is MB, split the table groups by table size, enabled when type=size |
96 | | migration.jobthread.split.indexes | | "0,1,2,5,10,50,60,90,100,200,300,310" | split the table groups by index, enabled when type=index |
97 |
98 | ## Feedback & Bug Report
99 | 欢迎使用,或加入我们使其变得更加完善。
100 | - 问题可以直接 [issue](https://github.com/Jawf/datax-migration/issues/new) 我们
101 | - Email:
102 | - Wechat: jawfneo
103 | - 目前此项目已经被不少同学使用过了,也积赞了一些star,希望同学们多提供宝贵都意见或提交代码,多Pull Request
104 |
105 | ## FAQ
106 | ```
107 | 1. config.properties 里面需要,替换sourcedbname, targetdbname,要用搜索替换:) 已有同学在此踩坑了..
108 | ```
109 |
110 |
--------------------------------------------------------------------------------
/src/main/java/com/yxt/data/migration/MigrationTask.java:
--------------------------------------------------------------------------------
1 | package com.yxt.data.migration;
2 |
3 | import java.io.BufferedReader;
4 | import java.io.BufferedWriter;
5 | import java.io.File;
6 | import java.io.FileWriter;
7 | import java.io.IOException;
8 | import java.io.InputStream;
9 | import java.io.InputStreamReader;
10 | import java.sql.SQLException;
11 | import java.text.SimpleDateFormat;
12 | import java.util.ArrayList;
13 | import java.util.Date;
14 | import java.util.List;
15 |
16 | import org.apache.commons.logging.Log;
17 | import org.apache.commons.logging.LogFactory;
18 | import org.springframework.beans.factory.annotation.Autowired;
19 | import org.springframework.stereotype.Service;
20 |
21 | import com.yxt.data.migration.bean.DataTable;
22 | import com.yxt.data.migration.bean.DataTableStatus;
23 | import com.yxt.data.migration.util.AppConfig;
24 | import com.yxt.data.migration.util.DataXJobFile;
25 |
26 | /**
27 | *
28 | * Batch Migration Task to split each table to one DataX job. Then call datax
29 | * script to start migration and noted the status records to reports.
30 | * @author Jawf Can Li
31 | * @since 1.0 base on datax 3.0
32 | *
33 | */
34 | @Service()
35 | public class MigrationTask {
36 |
37 | protected static final Log log = LogFactory.getLog(MigrationTask.class);
38 |
39 | protected static boolean breakFlag = false;
40 |
41 | @Autowired
42 | private AppConfig config;
43 |
44 | @Autowired
45 | private DataXJobFile jobFile;
46 |
47 | @Autowired
48 | private DbViewer viewer;
49 |
50 | private List allTableStatus;
51 | private List targetTables;
52 |
53 |
54 | /*
55 | * Testing
56 | * @param reportFlag
57 | * @param args
58 | * @throws IOException
59 | */
60 | /*public static void main(String args[]) throws IOException {
61 |
62 | Process process = Runtime.getRuntime().exec(
63 | "python E:/work/db_migration/datax/datax/bin/datax.py E:/work/db_migration/datax/datax/job/qidatest.json");
64 |
65 | try {
66 | BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
67 | String line = null;
68 | while ((line = reader.readLine()) != null) {
69 | System.out.println(line);
70 | }
71 | } catch (Exception e) {
72 | e.printStackTrace();
73 | }
74 | }*/
75 |
76 | public void execute() {
77 | execute(false);
78 | }
79 |
80 | public void execute(final boolean skipCommandFlag) {
81 | try {
82 |
83 | initAllTableStatus();
84 | log.info("init All Table Status done with all table size="+allTableStatus!=null?allTableStatus.size():0);
85 |
86 |
87 | //Get the split index points.
88 | List splitIndexs = getJobThreadSplitIndexes();
89 |
90 | //Split All table jobs to several job threads According to the splitIndexs
91 | int startNum = 0;
92 | int endNum = 0;
93 | if (splitIndexs!=null && !splitIndexs.isEmpty()){
94 | for (int i = 0; i < splitIndexs.size(); i++) {
95 | if (i==0){
96 | endNum = splitIndexs.get(i);
97 | } else if (i==splitIndexs.size()-1){
98 | startNum = endNum;
99 | endNum = allTableStatus.size()-1;
100 | } else {
101 | startNum = endNum;
102 | endNum = splitIndexs.get(i);
103 | }
104 | if (endNum==0){
105 | //when allTableStatus.size() ==1
106 | endNum = 1;
107 | }
108 | final int jobStartNum = startNum;
109 | final int jobEndNum = endNum;
110 | log.info("Split Jobs: Job Thread "+(i+1)+": Start Index>="+jobStartNum+", End Index<"+jobEndNum);
111 | new Thread(new Runnable() {
112 | public void run() {
113 | StringBuffer logInfo = new StringBuffer();
114 | for (int t1=jobStartNum;t11000000){
210 | StreamCommandOutputThread outputThread = new StreamCommandOutputThread(process.getInputStream(), threadString, tableStatus, logInfo);
211 | outputThread.start();
212 |
213 | try {
214 | outputThread.join();
215 | //process.waitFor();
216 | } catch (InterruptedException e) {
217 | hasException = true;
218 | log.error(threadString+e.getMessage(), e);
219 | }
220 | } else {
221 | BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
222 | String line = null;
223 |
224 | while ((line = reader.readLine()) != null) {
225 | logInfo.append(threadString+line).append("\n");
226 | if (!hasException && line!=null && (line.contains("DataXException") || line.contains("SQLException"))){
227 | hasException = true;
228 | tableStatus.setHasException(hasException);
229 | }
230 | readExecueOutputLineStatus(line, tableStatus);
231 | }
232 | logInfo.append(threadString+"execute finished!");
233 | }
234 | return hasException;
235 | }
236 |
237 |
238 |
239 | }).start();
240 |
241 | }
242 | }
243 |
244 | } catch (Exception e) {
245 | e.printStackTrace();
246 | log.error(e.getMessage(), e);
247 | } finally {
248 | try {
249 | outputMigrationStatusToFile();
250 | } catch (IOException e) {
251 | log.error(e.getMessage(), e);
252 | }
253 | }
254 | }
255 |
256 | private static void readExecueOutputLineStatus(String line, DataTableStatus tableStatus) {
257 | if (line!=null && line.contains(" : ")){
258 | String[] resultTemp = line.split(" : ");
259 | String value = resultTemp[1].trim();
260 | if (value.matches("\\d+s")){
261 | tableStatus.setCostTime(value);
262 | } else if (value.matches("\\d+[A-Z]*B\\/s")){
263 | tableStatus.setReadWriteRateSpeed(value);
264 | } else if (value.matches("\\d+rec\\/s")){
265 | tableStatus.setReadWriteRecordSpeed(value);
266 | }
267 | }else if (line!=null && line.contains("used")){
268 | int count = Integer.parseInt(line.substring(line.indexOf("used")+5, line.indexOf("ms")-1).trim());
269 | tableStatus.setCostTime(count+"ms");
270 | } else if (line!=null && line.contains("StandAloneJobContainerCommunicator")){
271 | int count = Integer.parseInt(line.substring(line.indexOf("Total")+5, line.indexOf("records")).trim());
272 | tableStatus.setReadWriteRecords(count);
273 |
274 | int countFail = Integer.parseInt(line.substring(line.indexOf("Error")+5, line.indexOf("records", line.indexOf("Error"))).trim());
275 | tableStatus.setReadWriteFailRecords(countFail);
276 |
277 | String speed = line.substring(line.indexOf("Speed")+5, line.indexOf("B/s")+3).trim();
278 | tableStatus.setReadWriteRateSpeed(speed);
279 |
280 | String speedRec = line.substring(line.indexOf(",", line.indexOf("B/s"))+1, line.indexOf("records/s")+9).trim();
281 | tableStatus.setReadWriteRecordSpeed(speedRec);
282 | }
283 | }
284 |
285 |
286 | private String getCommand(String fileName) {
287 | String dataxPath = config.getDataxToolFolder();
288 | String command = "python " + dataxPath + "/bin/datax.py " + dataxPath + "/job/" + fileName + ".json";
289 | return command;
290 | }
291 |
292 | protected boolean executeCommand(String command) throws IOException {
293 | Process process = Runtime.getRuntime().exec(command);
294 |
295 | BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
296 | String line = null;
297 | boolean hasExeption = false;
298 | while ((line = reader.readLine()) != null) {
299 | log.info(line);
300 | if (!hasExeption && line!=null && line.contains("Exception")){
301 | hasExeption = true;
302 | }
303 | }
304 | return hasExeption;
305 | }
306 |
307 | private void initAllTableStatus() throws SQLException {
308 | allTableStatus = viewer.getSourceTransfterTablesStatus();
309 | targetTables = viewer.getTargetTransfterTables();
310 | }
311 |
312 | private List getJobThreadSplitIndexes() {
313 | List splitIndexs = new ArrayList();
314 | String type = config.getJobthreadSplitType();
315 | float[] splitConfigSize = getSplitSizeFromConfig();//Size Unit: MB
316 | int[] splitConfigIndexes = getSplitIndexFromConfig();//Size Unit: MB
317 | int splitMaxCount = config.getJobthreadSplitMaxcount();
318 | if (allTableStatus != null && allTableStatus.size() > 1 ){
319 | if ("index".equalsIgnoreCase(type) && splitConfigIndexes!=null) {
320 | int splitCount = 0;
321 | int i = 0;
322 | if (splitConfigIndexes[i]==0){
323 | i++;
324 | }
325 | for (int j = 1; j < allTableStatus.size(); j++) {
326 | if (splitCount >= splitMaxCount) {
327 | splitIndexs.add(j);
328 | splitCount = 0;
329 | } else if (j >= splitConfigIndexes[i] && j <= splitConfigIndexes[splitConfigIndexes.length - 1]) {
330 | splitIndexs.add(j);
331 | splitCount = 0;
332 | if (i < splitConfigIndexes.length - 1) {
333 | i++;
334 | }
335 | } else {
336 | splitCount++;
337 | }
338 | }
339 | } else if (splitConfigSize != null) {
340 | int splitCount = 0;
341 | for (int i = 0; i < splitConfigSize.length; i++) {
342 | int n = 0;
343 | if (i > 0 && splitIndexs.size() > 0) {
344 | n = splitIndexs.get(splitIndexs.size() - 1);
345 | }
346 | for (int j = n + 1; j < allTableStatus.size(); j++) {
347 | DataTable ta = allTableStatus.get(j);
348 |
349 | float size = ta.getSize();
350 | if (splitCount >= splitMaxCount) {
351 | splitIndexs.add(j);
352 | splitCount = 0;
353 | } else if (size < splitConfigSize[i]) {
354 | splitIndexs.add(j);
355 | splitCount = 0;
356 | break;
357 | } else {
358 | splitCount++;
359 | }
360 | }
361 | }
362 | } else {
363 | splitIndexs.add(allTableStatus.size() - 1);
364 | }
365 | } else if (allTableStatus != null && allTableStatus.size() > 0) {
366 | splitIndexs.add(allTableStatus.size() - 1);
367 | }
368 | return splitIndexs;
369 | }
370 |
371 | private String getTargetTableName(String sourceTableName) {
372 | String result = null;
373 | if (sourceTableName!=null && targetTables!=null){
374 | for (DataTable t:targetTables){
375 | if (sourceTableName.equalsIgnoreCase(t.getName())){
376 | result = t.getName();
377 | break;
378 | }
379 | }
380 | }
381 | if (result == null){
382 | String errorMsg = "Target Table for "+sourceTableName+" is empty or not existed!";
383 | log.error(errorMsg);
384 | }
385 | return result;
386 | }
387 |
388 | private boolean isIgnoreTables(String tableName) {
389 | boolean result = false;
390 | String[] ingoreTables = getIgnoreTables();
391 | if (ingoreTables != null) {
392 | for (int i = 0; i < ingoreTables.length; i++) {
393 | if (ingoreTables[i] != null && ingoreTables[i].equalsIgnoreCase(tableName)) {
394 | result = true;
395 | break;
396 | }
397 | }
398 | }
399 | return result;
400 | }
401 |
402 | private boolean isIgnoreBigTables(float size) {
403 | float bigsize = config.getIngoreBigTablesSizeThanMb();
404 | boolean result = false;
405 | if (bigsize != 0) {
406 | result = size > bigsize;
407 | }
408 | return result;
409 | }
410 |
411 | private String[] getIgnoreTables(){
412 | String tables = config.getIngoreTables();
413 | String[] result = null;
414 | if (tables!=null && !"empty".equalsIgnoreCase(tables) && !"null".equalsIgnoreCase(tables) && !"false".equalsIgnoreCase(tables) && !"no".equalsIgnoreCase(tables)){
415 | tables = tables.replace(" ", "");
416 | tables = tables.replace("\"", "");
417 | tables = tables.replace(";", ",");
418 | tables = tables.replace(":", ",");
419 | result = tables.split(",");
420 | }
421 | return result;
422 | }
423 |
424 | private float[] getSplitSizeFromConfig() {
425 | float[] splitSize = null;
426 | String splitTemp = config.getJobthreadSplitTableSizeMb();
427 | if (splitTemp != null && !splitTemp.trim().equals("")) {
428 | splitTemp = splitTemp.replace(" ", "");
429 | splitTemp = splitTemp.replace("\"", "");
430 | splitTemp = splitTemp.replace(";", ",");
431 | splitTemp = splitTemp.replace(":", ",");
432 | String[] splits = splitTemp.split(",");
433 |
434 | if (splits != null && splits.length > 0) {
435 | splitSize = new float[splits.length];
436 | for (int i = 0; i < splits.length; i++) {
437 | try {
438 | splitSize[i] = Float.parseFloat(splits[i]);
439 | } catch (Exception e) {
440 | log.error(e.getMessage(), e);
441 | }
442 | }
443 | }
444 | }
445 |
446 | return splitSize;
447 | }
448 |
449 | private int[] getSplitIndexFromConfig() {
450 | int[] splitIndexes = null;
451 | String splitTemp = config.getJobthreadSplitTableIndexes();
452 | if (splitTemp != null && !splitTemp.trim().equals("")) {
453 | splitTemp = splitTemp.replace(" ", "");
454 | splitTemp = splitTemp.replace("\"", "");
455 | splitTemp = splitTemp.replace(";", ",");
456 | splitTemp = splitTemp.replace(":", ",");
457 | String[] splits = splitTemp.split(",");
458 |
459 | if (splits != null && splits.length > 0) {
460 | splitIndexes = new int[splits.length];
461 | for (int i = 0; i < splits.length; i++) {
462 | try {
463 | splitIndexes[i] = Integer.parseInt(splits[i]);
464 | } catch (Exception e) {
465 | log.error(e.getMessage(), e);
466 | }
467 | }
468 | }
469 | }
470 |
471 | return splitIndexes;
472 | }
473 |
474 | private synchronized void outputMigrationStatusToFile() throws IOException {
475 | String datestr = new SimpleDateFormat("yyyyMMdd-mm").format(new Date());
476 | File file = new File(config.getDataxToolFolder() + "/reports");
477 | if (!file.exists()){
478 | file.mkdirs();
479 | }
480 |
481 | file = new File(config.getDataxToolFolder() + "/reports/Migration-status_"+datestr+".csv");
482 |
483 | BufferedWriter out = new BufferedWriter(new FileWriter(file));
484 | if (allTableStatus != null && !allTableStatus.isEmpty()) {
485 | out.write("TableName,Size_MB,RecordsCount,MigrationRecords,Finished,HasException,CostTime,RateSpeed,RecordSpeed,ReadWriteRecords,ExceptionRecords,FinishedRecords\n");
486 | for (DataTableStatus ta : allTableStatus) {
487 | String finished = ta.isFinished()?"Yes":"No";
488 | String isHasException = ta.isHasException()?"Yes":"No";
489 | out.write(ta.getName() + "," + ta.getSize() + "," + ta.getCount() + "," + ta.getPendingRecords() + "," + finished + "," + isHasException + "," + ta.getCostTime() + "," + ta.getReadWriteRateSpeed() + "," + ta.getReadWriteRecordSpeed() + "," + ta.getReadWriteRecords() + "," + ta.getReadWriteFailRecords() +"," + ta.getFinishedRecords() + "\n");
490 | }
491 | }
492 | out.close();
493 | }
494 |
495 | /*public static void main(String[] str){
496 | String line="2016-11-08 15:57:52.270 [job-0] INFO StandAloneJobContainerCommunicator - Total 54 records, 835 bytes | Speed 69B/s, 4 records/s | Error 2 records, 0 bytes | All Task WaitWriterTime 0.010s | All Task WaitReaderTime 1.101s | Percentage 100.00%";
497 | int countFail = Integer.parseInt(line.substring(line.indexOf("Error")+5, line.indexOf("records", line.indexOf("Error"))).trim());
498 | String speed = line.substring(line.indexOf("Speed")+5, line.indexOf("B/s")+3).trim();
499 | String speedRec = line.substring(line.indexOf(",", line.indexOf("B/s"))+1, line.indexOf("records/s")+9).trim();
500 | System.out.println((line.substring(line.indexOf("Total")+5, line.indexOf("records")).trim()));
501 | System.out.println(countFail);
502 | System.out.println(speed);
503 | System.out.println(speedRec);
504 |
505 | line = " 2016-11-08 15:57:44.936 [taskGroup-0] INFO TaskGroupContainer - taskGroup[0] taskId[0] is successed, used[2131]ms";
506 | int count = Integer.parseInt(line.substring(line.indexOf("used")+5, line.indexOf("ms")-1).trim());
507 | System.out.println(count+"ms");
508 |
509 | line = "任务总计耗时 : 23s";
510 | System.out.println(line.split(" : ")[1].trim().matches("\\d+s"));
511 | line = "任务平均流量 : 76B/s";
512 | System.out.println(line.split(" : ")[1].trim().matches("\\d+[A-Za-z]+\\/s"));
513 | line = "记录写入速度 : 0rec/s";
514 | System.out.println(line.split(" : ")[1].trim().matches("\\d+rec\\/s"));
515 | }*/
516 |
517 | class StreamCommandOutputThread extends Thread {
518 | private InputStream is;
519 | private String threadString;
520 | private StringBuffer stringOutput;
521 | private DataTableStatus tableStatus;
522 |
523 | public StreamCommandOutputThread(InputStream is, String threadString, DataTableStatus tableStatus, StringBuffer stringOutput) {
524 | this.is = is;
525 | this.stringOutput = stringOutput;
526 | this.tableStatus = tableStatus;
527 | this.threadString = threadString;
528 | }
529 |
530 | public void run() {
531 | String line = null;
532 | InputStreamReader isr = new InputStreamReader(is);
533 | BufferedReader br = new BufferedReader(isr);
534 | boolean hasException = false;
535 | try {
536 | while ((line = br.readLine()) != null) {
537 | stringOutput.append(threadString+line).append("\n");
538 | if (!hasException && line!=null && (line.contains("DataXException") || line.contains("SQLException"))){
539 | hasException = true;
540 | tableStatus.setHasException(true);
541 | }
542 | readExecueOutputLineStatus(line, tableStatus);
543 | }
544 | stringOutput.append(threadString+"execute finished in streamCommandthread!");
545 | } catch (IOException ioe) {
546 | log.error(ioe.getMessage(), ioe);
547 | hasException = true;
548 | tableStatus.setHasException(true);
549 | } finally {
550 | try {
551 | br.close();
552 | isr.close();
553 | } catch (IOException e) {
554 | log.error(e.getMessage(), e);
555 | hasException = true;
556 | tableStatus.setHasException(true);
557 | }
558 | }
559 | }
560 | }
561 | }
--------------------------------------------------------------------------------