├── docs ├── error.md ├── developer.md ├── relation.md ├── settings.md ├── zh-error.md ├── zh-developer.md ├── zh-relation.md ├── zh-settings.md ├── zh-install.md └── install.md ├── META-INF └── MANIFEST.MF ├── src ├── META-INF │ └── MANIFEST.MF ├── main │ ├── java │ │ └── org │ │ │ └── fly │ │ │ └── sync │ │ │ ├── contract │ │ │ ├── DaoInterface.java │ │ │ ├── ModelInterface.java │ │ │ ├── AbstractRelation.java │ │ │ ├── AbstractWriter.java │ │ │ ├── AbstractAction.java │ │ │ ├── AbstractRelateAction.java │ │ │ ├── AbstractRecordAction.java │ │ │ ├── DatabaseListener.java │ │ │ ├── DbFactory.java │ │ │ ├── AbstractLifeCycle.java │ │ │ └── AbstractConnector.java │ │ │ ├── mysql │ │ │ ├── type │ │ │ │ ├── MySQLJson.java │ │ │ │ └── MySQLJsonSerializer.java │ │ │ ├── model │ │ │ │ ├── DatabaseDao.java │ │ │ │ ├── TableDao.java │ │ │ │ ├── VariableDao.java │ │ │ │ ├── Variable.java │ │ │ │ ├── ColumnDao.java │ │ │ │ ├── Database.java │ │ │ │ ├── Records.java │ │ │ │ ├── Column.java │ │ │ │ ├── Table.java │ │ │ │ └── Record.java │ │ │ ├── parser │ │ │ │ ├── PositionParser.java │ │ │ │ └── InsertParser.java │ │ │ ├── relation │ │ │ │ ├── Relation.java │ │ │ │ └── RelationRecords.java │ │ │ └── Dumper.java │ │ │ ├── action │ │ │ ├── NullAction.java │ │ │ ├── DeleteRelateAction.java │ │ │ ├── InsertRelateAction.java │ │ │ ├── DeleteAction.java │ │ │ ├── UpdateRelateAction.java │ │ │ ├── InsertAction.java │ │ │ ├── UpdateAction.java │ │ │ ├── ChangePositionAction.java │ │ │ └── ReportAction.java │ │ │ ├── setting │ │ │ ├── Config.java │ │ │ ├── BinLog.java │ │ │ └── Setting.java │ │ │ ├── exception │ │ │ ├── EsException.java │ │ │ ├── DumpException.java │ │ │ ├── FatalException.java │ │ │ ├── ConfigException.java │ │ │ ├── EsFatalException.java │ │ │ ├── RunningException.java │ │ │ ├── DumpFatalException.java │ │ │ ├── CanalFatalException.java │ │ │ ├── OutOfRetryException.java │ │ │ ├── DisconnectionException.java │ │ │ ├── RelationException.java │ │ │ ├── ColumnNotFoundException.java │ │ │ ├── RecordNotFoundException.java │ │ │ └── BinLogFormatException.java │ │ │ ├── es │ │ │ ├── UpdateByQuery.java │ │ │ ├── Writer.java │ │ │ └── Es.java │ │ │ ├── executor │ │ │ ├── Statistic.java │ │ │ ├── Consumer.java │ │ │ ├── Emiter.java │ │ │ └── Executor.java │ │ │ ├── Main.java │ │ │ ├── CanalTest.java │ │ │ └── canal │ │ │ ├── Server.java │ │ │ ├── Canal.java │ │ │ └── Client.java │ └── resources │ │ └── log4j2.xml ├── assembly.xml └── test │ └── java │ └── BinLogSync.java ├── etc ├── binlog.json ├── config.json ├── log4j2.xml ├── river.json └── template.json ├── .idea ├── vcs.xml ├── misc.xml ├── compiler.xml └── uiDesigner.xml ├── zh-README.md ├── .project ├── .gitignore ├── bin └── me.sh ├── README.md └── pom.xml /docs/error.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/developer.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/relation.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/settings.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/zh-error.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/zh-developer.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/zh-relation.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/zh-settings.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /META-INF/MANIFEST.MF: -------------------------------------------------------------------------------- 1 | Manifest-Version: 1.0 2 | Main-Class: org.fly.sync.Main 3 | 4 | -------------------------------------------------------------------------------- /src/META-INF/MANIFEST.MF: -------------------------------------------------------------------------------- 1 | Manifest-Version: 1.0 2 | Main-Class: org.fly.sync.Main 3 | 4 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/contract/DaoInterface.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.contract; 2 | 3 | public interface DaoInterface { 4 | 5 | } 6 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/contract/ModelInterface.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.contract; 2 | 3 | public interface ModelInterface { 4 | } 5 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/contract/AbstractRelation.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.contract; 2 | 3 | public interface AbstractRelation { 4 | 5 | void load(); 6 | } 7 | -------------------------------------------------------------------------------- /etc/binlog.json: -------------------------------------------------------------------------------- 1 | // It's example json 2 | // The application will write this file into setting.json@data_dir 3 | { 4 | "db_name": { 5 | "name": "", 6 | "position": 0 7 | } 8 | 9 | } -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/contract/AbstractWriter.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.contract; 2 | 3 | public interface AbstractWriter { 4 | 5 | DbFactory getDbFactory(); 6 | 7 | void updateByQuery(AbstractRelateAction relateAction); 8 | } 9 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/contract/AbstractAction.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.contract; 2 | 3 | import java.io.Serializable; 4 | 5 | public interface AbstractAction extends Serializable { 6 | 7 | void execute(AbstractWriter writer); 8 | String getGroup(); 9 | } 10 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/contract/AbstractRelateAction.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.contract; 2 | 3 | import org.fly.sync.setting.River; 4 | 5 | public interface AbstractRelateAction extends AbstractRecordAction { 6 | 7 | River.Associate getAssociate(); 8 | 9 | } 10 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/type/MySQLJson.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.type; 2 | 3 | public class MySQLJson { 4 | 5 | private String raw; 6 | 7 | public MySQLJson(String json) { 8 | this.raw = json; 9 | } 10 | 11 | public String getRaw() { 12 | return raw; 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/contract/AbstractRecordAction.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.contract; 2 | 3 | import org.fly.sync.mysql.model.Record; 4 | import org.elasticsearch.action.DocWriteRequest; 5 | 6 | public interface AbstractRecordAction extends AbstractAction { 7 | 8 | Record getRecord(); 9 | 10 | DocWriteRequest getRequest(DbFactory dbFactory) throws Exception; 11 | } 12 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/contract/DatabaseListener.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.contract; 2 | 3 | import org.fly.sync.setting.BinLog; 4 | 5 | public interface DatabaseListener { 6 | void onCreateTable(String table); 7 | void onInsert(String sql); 8 | void onUpdate(String sql); 9 | void onDelete(String sql); 10 | void onPositionChange(BinLog.Position position); 11 | void onError(Exception error); 12 | } 13 | -------------------------------------------------------------------------------- /etc/config.json: -------------------------------------------------------------------------------- 1 | { 2 | // binlog.json's path 3 | "data_dir": "../data", 4 | // log's path 5 | "log_dir": "../logs", 6 | // mysqldump absolute path 7 | "mysqldump": "mysqldump", 8 | // minimal items to be inserted in one bulk 9 | // min is 128 10 | "bulk_size": 128, 11 | // force flush the pending requests if we don't have enough items >= bulk_size 12 | // unit: ms, min is 200ms 13 | "flush_bulk_time": 200 14 | // 15 | } -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/model/DatabaseDao.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.model; 2 | 3 | import org.fly.core.database.SqlUtils; 4 | import org.jdbi.v3.sqlobject.config.RegisterConstructorMapper; 5 | import org.jdbi.v3.sqlobject.statement.SqlQuery; 6 | 7 | @RegisterConstructorMapper(Database.class) 8 | public interface DatabaseDao { 9 | 10 | @SqlQuery(SqlUtils.DATABASES + " WHERE SCHEMA_NAME = ?") 11 | Database find(String db); 12 | } 13 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/action/NullAction.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.action; 2 | 3 | 4 | import org.fly.sync.contract.AbstractAction; 5 | import org.fly.sync.contract.AbstractWriter; 6 | import io.reactivex.annotations.Nullable; 7 | 8 | public class NullAction implements AbstractAction { 9 | 10 | public void execute(AbstractWriter writer) { 11 | 12 | } 13 | 14 | @Override 15 | public String getGroup() { 16 | return Nullable.class.getName(); 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/contract/DbFactory.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.contract; 2 | 3 | import com.fasterxml.jackson.databind.ObjectMapper; 4 | import org.fly.sync.es.Es; 5 | import org.fly.sync.executor.Statistic; 6 | import org.fly.sync.mysql.MySql; 7 | import org.fly.sync.setting.River; 8 | 9 | public interface DbFactory { 10 | 11 | Es getEs(); 12 | MySql getMySql(); 13 | River.Database getRiverDatabase(); 14 | Statistic getStatistic(); 15 | ObjectMapper getJsonMapper(); 16 | 17 | } 18 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/model/TableDao.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.model; 2 | 3 | import org.fly.core.database.SqlUtils; 4 | import org.fly.sync.contract.DaoInterface; 5 | import org.jdbi.v3.sqlobject.config.RegisterConstructorMapper; 6 | import org.jdbi.v3.sqlobject.statement.SqlQuery; 7 | 8 | @RegisterConstructorMapper(Table.class) 9 | public interface TableDao extends DaoInterface { 10 | 11 | @SqlQuery(SqlUtils.TABLES + " AND TABLE_NAME = ?") 12 | Table find(String db, String table); 13 | } 14 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/model/VariableDao.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.model; 2 | 3 | import org.fly.core.database.SqlUtils; 4 | import org.fly.sync.contract.DaoInterface; 5 | import org.jdbi.v3.sqlobject.config.RegisterConstructorMapper; 6 | import org.jdbi.v3.sqlobject.statement.SqlQuery; 7 | 8 | @RegisterConstructorMapper(Variable.class) 9 | public interface VariableDao extends DaoInterface { 10 | 11 | @SqlQuery(SqlUtils.VARIABLES + " LIKE ?") 12 | Variable find(String variableName); 13 | } 14 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/model/Variable.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.model; 2 | 3 | import org.fly.sync.contract.ModelInterface; 4 | 5 | import java.beans.ConstructorProperties; 6 | 7 | public class Variable implements ModelInterface { 8 | public String variableName; 9 | public String value; 10 | 11 | @ConstructorProperties({"Variable_name", "Value"}) 12 | public Variable(String variableName, String value) { 13 | this.variableName = variableName; 14 | this.value = value; 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/setting/Config.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.setting; 2 | 3 | import com.fasterxml.jackson.annotation.JsonProperty; 4 | import org.fly.core.text.json.Jsonable; 5 | 6 | import java.io.File; 7 | 8 | public class Config extends Jsonable { 9 | 10 | @JsonProperty("data_dir") public File dataDir; 11 | @JsonProperty("log_dir") public File logDir; 12 | public String mysqldump = "mysqldump"; 13 | @JsonProperty("bulk_size") public int bulkSize = 128; 14 | @JsonProperty("flush_bulk_time") public int flushBulkTime = 200; 15 | 16 | } 17 | -------------------------------------------------------------------------------- /.idea/misc.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /.idea/compiler.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /src/main/resources/log4j2.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /zh-README.md: -------------------------------------------------------------------------------- 1 | # MySQL - ElasticSearch Synchronization 2 | 3 | 一个**实时**、**无数据遗失**、**支持一对一关系**的Mysql -> ElasticSearch 同步工具。 4 | 5 | 基于[alibaba/canal](https://github.com/alibaba/canal), [RxJava](https://github.com/ReactiveX/RxJava) 6 | 7 | ## 版本 8 | 9 | - 1.0-beta : 2018-09-04 10 | 11 | ## 手册 12 | 13 | - [安装、启动](docs/zh-install.md) 14 | - [配置](docs/zh-settings.md) 15 | - [一对一关系导入](docs/zh-relation.md) 16 | - [错误](docs/zh-error.md) 17 | - [二次开发](docs/zh-developer.md) 18 | 19 | ## 最低配置 20 | 21 | - Java 1.8 + 22 | - 2 GB 内存 + 23 | - 2 核 CPU + 24 | - 100M 剩余空间 25 | 26 | ## 特性 27 | 28 | - 支持Elastic Search 5.x ~ 6.x -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/EsException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | public class EsException extends Exception { 4 | public EsException(String message) { 5 | super(message); 6 | } 7 | 8 | public EsException(String message, Throwable cause) { 9 | super(message, cause); 10 | } 11 | 12 | public EsException(Throwable cause) { 13 | super(cause); 14 | } 15 | 16 | public EsException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { 17 | super(message, cause, enableSuppression, writableStackTrace); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /.project: -------------------------------------------------------------------------------- 1 | 2 | 3 | mysql_es 4 | 5 | 6 | 7 | 8 | 9 | org.eclipse.jdt.core.javabuilder 10 | 11 | 12 | 13 | 14 | org.eclipse.m2e.core.maven2Builder 15 | 16 | 17 | 18 | 19 | 20 | org.eclipse.jdt.core.javanature 21 | org.eclipse.m2e.core.maven2Nature 22 | 23 | 24 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/DumpException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | public class DumpException extends Exception { 4 | 5 | public DumpException(String message) { 6 | super(message); 7 | } 8 | 9 | public DumpException(String message, Throwable cause) { 10 | super(message, cause); 11 | } 12 | 13 | public DumpException(Throwable cause) { 14 | super(cause); 15 | } 16 | 17 | public DumpException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { 18 | super(message, cause, enableSuppression, writableStackTrace); 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/FatalException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | public class FatalException extends RuntimeException { 4 | public FatalException(String message) { 5 | super(message); 6 | } 7 | 8 | public FatalException(String message, Throwable cause) { 9 | super(message, cause); 10 | } 11 | 12 | public FatalException(Throwable cause) { 13 | super(cause); 14 | } 15 | 16 | public FatalException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { 17 | super(message, cause, enableSuppression, writableStackTrace); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/ConfigException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | public class ConfigException extends Exception { 4 | 5 | public ConfigException(String message) { 6 | super(message); 7 | } 8 | 9 | public ConfigException(String message, Throwable cause) { 10 | super(message, cause); 11 | } 12 | 13 | public ConfigException(Throwable cause) { 14 | super(cause); 15 | } 16 | 17 | 18 | public ConfigException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { 19 | super(message, cause, enableSuppression, writableStackTrace); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/EsFatalException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | public class EsFatalException extends FatalException{ 4 | public EsFatalException(String message) { 5 | super(message); 6 | } 7 | 8 | public EsFatalException(String message, Throwable cause) { 9 | super(message, cause); 10 | } 11 | 12 | public EsFatalException(Throwable cause) { 13 | super(cause); 14 | } 15 | 16 | public EsFatalException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { 17 | super(message, cause, enableSuppression, writableStackTrace); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/RunningException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | public class RunningException extends FatalException { 4 | public RunningException(String message) { 5 | super(message); 6 | } 7 | 8 | public RunningException(String message, Throwable cause) { 9 | super(message, cause); 10 | } 11 | 12 | public RunningException(Throwable cause) { 13 | super(cause); 14 | } 15 | 16 | public RunningException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { 17 | super(message, cause, enableSuppression, writableStackTrace); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /etc/data 2 | /etc/logs 3 | sample 4 | out 5 | target 6 | .gradle 7 | /build/ 8 | 9 | # Ignore Gradle GUI config 10 | gradle-app.setting 11 | 12 | # Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored) 13 | !gradle-wrapper.jar 14 | 15 | # Cache of project 16 | .gradletasknamecache 17 | 18 | # # Work around https://youtrack.jetbrains.com/issue/IDEA-116898 19 | # gradle/wrapper/gradle-wrapper.properties 20 | 21 | target/ 22 | pom.xml.tag 23 | pom.xml.releaseBackup 24 | pom.xml.versionsBackup 25 | pom.xml.next 26 | release.properties 27 | dependency-reduced-pom.xml 28 | buildNumber.properties 29 | .mvn/timing.properties 30 | .mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/DumpFatalException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | public class DumpFatalException extends FatalException { 4 | public DumpFatalException(String message) { 5 | super(message); 6 | } 7 | 8 | public DumpFatalException(String message, Throwable cause) { 9 | super(message, cause); 10 | } 11 | 12 | public DumpFatalException(Throwable cause) { 13 | super(cause); 14 | } 15 | 16 | public DumpFatalException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { 17 | super(message, cause, enableSuppression, writableStackTrace); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/CanalFatalException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | public class CanalFatalException extends FatalException { 4 | public CanalFatalException(String message) { 5 | super(message); 6 | } 7 | 8 | public CanalFatalException(String message, Throwable cause) { 9 | super(message, cause); 10 | } 11 | 12 | public CanalFatalException(Throwable cause) { 13 | super(cause); 14 | } 15 | 16 | public CanalFatalException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { 17 | super(message, cause, enableSuppression, writableStackTrace); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/OutOfRetryException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | public class OutOfRetryException extends FatalException { 4 | public OutOfRetryException(String message) { 5 | super(message); 6 | } 7 | 8 | public OutOfRetryException(String message, Throwable cause) { 9 | super(message, cause); 10 | } 11 | 12 | public OutOfRetryException(Throwable cause) { 13 | super(cause); 14 | } 15 | 16 | public OutOfRetryException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { 17 | super(message, cause, enableSuppression, writableStackTrace); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/DisconnectionException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | public class DisconnectionException extends Exception { 4 | public DisconnectionException(String message) { 5 | super(message); 6 | } 7 | 8 | public DisconnectionException(String message, Throwable cause) { 9 | super(message, cause); 10 | } 11 | 12 | public DisconnectionException(Throwable cause) { 13 | super(cause); 14 | } 15 | 16 | public DisconnectionException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { 17 | super(message, cause, enableSuppression, writableStackTrace); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/parser/PositionParser.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.parser; 2 | 3 | import org.fly.sync.setting.BinLog; 4 | 5 | import java.util.regex.Matcher; 6 | import java.util.regex.Pattern; 7 | 8 | public class PositionParser { 9 | 10 | public static BinLog.Position parse(String sql) 11 | { 12 | Pattern pattern = Pattern.compile("^\\s?+CHANGE\\s?+MASTER\\s?+TO\\s?+MASTER_LOG_FILE\\s?+=\\s?+['\"]([^'\"]*?)['\"],\\s?+MASTER_LOG_POS\\s?+=\\s?+([\\d]*?);", Pattern.CASE_INSENSITIVE | Pattern.DOTALL); 13 | Matcher matcher = pattern.matcher(sql); 14 | 15 | return matcher.find() ? new BinLog.Position(matcher.group(1), Long.parseLong(matcher.group(2))) : null; 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/RelationException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | public class RelationException extends RuntimeException { 4 | public RelationException() { 5 | } 6 | 7 | public RelationException(String message) { 8 | super(message); 9 | } 10 | 11 | public RelationException(String message, Throwable cause) { 12 | super(message, cause); 13 | } 14 | 15 | public RelationException(Throwable cause) { 16 | super(cause); 17 | } 18 | 19 | public RelationException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { 20 | super(message, cause, enableSuppression, writableStackTrace); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/contract/AbstractLifeCycle.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.contract; 2 | 3 | import org.fly.sync.exception.RunningException; 4 | 5 | public class AbstractLifeCycle { 6 | protected volatile boolean running = false; // 是否处于运行中 7 | 8 | public boolean isStart() { 9 | return running; 10 | } 11 | 12 | public void start() { 13 | if (running) { 14 | throw new RunningException(this.getClass().getName() + " has startup , don't repeat start"); 15 | } 16 | 17 | running = true; 18 | } 19 | 20 | public void stop() { 21 | if (!running) { 22 | throw new RunningException(this.getClass().getName() + " isn't start , please check"); 23 | } 24 | 25 | running = false; 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/model/ColumnDao.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.model; 2 | 3 | import org.fly.core.database.SqlUtils; 4 | import org.jdbi.v3.sqlobject.config.RegisterConstructorMapper; 5 | import org.jdbi.v3.sqlobject.customizer.BindList; 6 | import org.jdbi.v3.sqlobject.statement.SqlQuery; 7 | 8 | import java.util.List; 9 | 10 | @RegisterConstructorMapper(Column.class) 11 | public interface ColumnDao { 12 | 13 | @SqlQuery(SqlUtils.COLUMNS) 14 | List all(String dbName, String tableName); 15 | 16 | @SqlQuery(SqlUtils.COLUMNS + " AND COLUMN_NAME IN ()") 17 | List findMany(@BindList("ColumnNames") List columnNames); 18 | 19 | @SqlQuery(SqlUtils.COLUMN_NAMES) 20 | List allNames(String dbName, String tableName); 21 | 22 | } 23 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/type/MySQLJsonSerializer.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.type; 2 | 3 | import com.fasterxml.jackson.core.JsonGenerator; 4 | import com.fasterxml.jackson.databind.JsonSerializer; 5 | import com.fasterxml.jackson.databind.SerializerProvider; 6 | import com.fasterxml.jackson.databind.ser.std.StdSerializer; 7 | 8 | import java.io.IOException; 9 | 10 | public class MySQLJsonSerializer extends JsonSerializer { 11 | 12 | @Override 13 | public void serialize(MySQLJson value, JsonGenerator gen, SerializerProvider provider) throws IOException { 14 | String raw = value.getRaw(); 15 | if (raw == null || raw.isEmpty()) 16 | gen.writeNull(); 17 | else 18 | gen.writeString(raw); 19 | //gen.writeRawValue(raw); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/ColumnNotFoundException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | import java.sql.SQLException; 4 | 5 | public class ColumnNotFoundException extends SQLException { 6 | public ColumnNotFoundException(String reason, String SQLState, int vendorCode) { 7 | super(reason, SQLState, vendorCode); 8 | } 9 | 10 | public ColumnNotFoundException(String reason, String SQLState) { 11 | super(reason, SQLState); 12 | } 13 | 14 | public ColumnNotFoundException(String reason) { 15 | super(reason); 16 | } 17 | 18 | public ColumnNotFoundException(Throwable cause) { 19 | super(cause); 20 | } 21 | 22 | public ColumnNotFoundException(String reason, Throwable cause) { 23 | super(reason, cause); 24 | } 25 | 26 | public ColumnNotFoundException(String reason, String sqlState, Throwable cause) { 27 | super(reason, sqlState, cause); 28 | } 29 | 30 | public ColumnNotFoundException(String reason, String sqlState, int vendorCode, Throwable cause) { 31 | super(reason, sqlState, vendorCode, cause); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/RecordNotFoundException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | import java.sql.SQLException; 4 | 5 | public class RecordNotFoundException extends SQLException { 6 | public RecordNotFoundException(String reason, String SQLState, int vendorCode) { 7 | super(reason, SQLState, vendorCode); 8 | } 9 | 10 | public RecordNotFoundException(String reason, String SQLState) { 11 | super(reason, SQLState); 12 | } 13 | 14 | public RecordNotFoundException(String reason) { 15 | super(reason); 16 | } 17 | 18 | public RecordNotFoundException(Throwable cause) { 19 | super(cause); 20 | } 21 | 22 | public RecordNotFoundException(String reason, Throwable cause) { 23 | super(reason, cause); 24 | } 25 | 26 | public RecordNotFoundException(String reason, String sqlState, Throwable cause) { 27 | super(reason, sqlState, cause); 28 | } 29 | 30 | public RecordNotFoundException(String reason, String sqlState, int vendorCode, Throwable cause) { 31 | super(reason, sqlState, vendorCode, cause); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/exception/BinLogFormatException.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.exception; 2 | 3 | import java.sql.SQLException; 4 | 5 | public class BinLogFormatException extends SQLException { 6 | public BinLogFormatException(String reason, String SQLState, int vendorCode) { 7 | super(reason, SQLState, vendorCode); 8 | } 9 | 10 | public BinLogFormatException(String reason, String SQLState) { 11 | super(reason, SQLState); 12 | } 13 | 14 | public BinLogFormatException(String reason) { 15 | super(reason); 16 | } 17 | 18 | public BinLogFormatException() { 19 | } 20 | 21 | public BinLogFormatException(Throwable cause) { 22 | super(cause); 23 | } 24 | 25 | public BinLogFormatException(String reason, Throwable cause) { 26 | super(reason, cause); 27 | } 28 | 29 | public BinLogFormatException(String reason, String sqlState, Throwable cause) { 30 | super(reason, sqlState, cause); 31 | } 32 | 33 | public BinLogFormatException(String reason, String sqlState, int vendorCode, Throwable cause) { 34 | super(reason, sqlState, vendorCode, cause); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/model/Database.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.model; 2 | 3 | import org.fly.sync.contract.ModelInterface; 4 | import org.jdbi.v3.core.mapper.reflect.JdbiConstructor; 5 | 6 | import java.beans.ConstructorProperties; 7 | 8 | 9 | public class Database implements ModelInterface { 10 | 11 | public String CATALOG_NAME; 12 | public String SCHEMA_NAME; 13 | public String DEFAULT_CHARACTER_SET_NAME; 14 | public String DEFAULT_COLLATION_NAME; 15 | public String SQL_PATH; 16 | 17 | @JdbiConstructor 18 | @ConstructorProperties({"CATALOG_NAME", "SCHEMA_NAME", "DEFAULT_CHARACTER_SET_NAME", "DEFAULT_COLLATION_NAME", "SQL_PATH"}) 19 | public Database(String CATALOG_NAME, 20 | String SCHEMA_NAME, 21 | String DEFAULT_CHARACTER_SET_NAME, 22 | String DEFAULT_COLLATION_NAME, 23 | String SQL_PATH) { 24 | this.CATALOG_NAME = CATALOG_NAME; 25 | this.SCHEMA_NAME = SCHEMA_NAME; 26 | this.DEFAULT_CHARACTER_SET_NAME = DEFAULT_CHARACTER_SET_NAME; 27 | this.DEFAULT_COLLATION_NAME = DEFAULT_COLLATION_NAME; 28 | this.SQL_PATH = SQL_PATH; 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/assembly.xml: -------------------------------------------------------------------------------- 1 | 4 | release 5 | 6 | zip 7 | 8 | mysql_es 9 | true 10 | 11 | 12 | etc 13 | conf 14 | 15 | 16 | 17 | ${project.build.directory}/lib 18 | lib 19 | 20 | 21 | ${project.build.directory} 22 | 23 | ${project.artifactId}-${project.version}.${project.packaging} 24 | 25 | lib 26 | 27 | 28 | bin 29 | bin 30 | 31 | 32 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/es/UpdateByQuery.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.es; 2 | 3 | import com.fasterxml.jackson.annotation.JsonIgnore; 4 | import org.fly.core.text.json.Jsonable; 5 | 6 | import java.util.HashMap; 7 | import java.util.Map; 8 | 9 | public class UpdateByQuery extends Jsonable { 10 | public Query query = new Query(); 11 | public Script script = new Script(); 12 | 13 | public static class Query { 14 | 15 | public Map term = new HashMap<>(); 16 | 17 | @JsonIgnore 18 | public void setField(String key, Object value){ 19 | term.put(key, value); 20 | } 21 | 22 | } 23 | 24 | public static class Script { 25 | public String lang = "painless"; 26 | public Map params; 27 | 28 | 29 | public String getSource() 30 | { 31 | if (null == params) return ""; 32 | 33 | StringBuilder sb = new StringBuilder(); 34 | for (String key: params.keySet() 35 | ) { 36 | sb.append("ctx._source[\"") 37 | .append(key) 38 | .append("\"]") 39 | .append("=params[\"") 40 | .append(key) 41 | .append("\"];"); 42 | } 43 | return sb.toString(); 44 | } 45 | 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/action/DeleteRelateAction.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.action; 2 | 3 | import org.fly.sync.contract.AbstractRelateAction; 4 | import org.fly.sync.contract.AbstractWriter; 5 | import org.fly.sync.contract.DbFactory; 6 | import org.fly.sync.mysql.model.Record; 7 | import org.fly.sync.setting.River; 8 | import org.elasticsearch.action.DocWriteRequest; 9 | 10 | public class DeleteRelateAction implements AbstractRelateAction { 11 | 12 | private Record record; 13 | private River.Associate associate; 14 | 15 | public DeleteRelateAction(Record record, River.Associate associate) { 16 | this.record = record; 17 | this.associate = associate; 18 | } 19 | 20 | public static DeleteRelateAction create(Record record, River.Associate associate) { 21 | return new DeleteRelateAction(record, associate); 22 | } 23 | 24 | @Override 25 | public Record getRecord() { 26 | return record; 27 | } 28 | 29 | @Override 30 | public DocWriteRequest getRequest(DbFactory dbFactory) throws Exception { 31 | return null; 32 | } 33 | 34 | @Override 35 | public void execute(AbstractWriter writer) { 36 | writer.updateByQuery(this); 37 | } 38 | 39 | @Override 40 | public String getGroup() { 41 | return AbstractRelateAction.class.getName(); 42 | } 43 | 44 | @Override 45 | public River.Associate getAssociate() { 46 | return associate; 47 | } 48 | 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/action/InsertRelateAction.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.action; 2 | 3 | import org.fly.sync.contract.AbstractRelateAction; 4 | import org.fly.sync.contract.AbstractWriter; 5 | import org.fly.sync.contract.DbFactory; 6 | import org.fly.sync.mysql.model.Record; 7 | import org.fly.sync.setting.River; 8 | import org.elasticsearch.action.DocWriteRequest; 9 | 10 | public class InsertRelateAction implements AbstractRelateAction { 11 | 12 | private Record record; 13 | private River.Associate associate; 14 | 15 | public InsertRelateAction(Record record, River.Associate associate) { 16 | this.record = record; 17 | this.associate = associate; 18 | } 19 | 20 | public static InsertRelateAction create(Record record, River.Associate associate) { 21 | return new InsertRelateAction(record, associate); 22 | } 23 | 24 | @Override 25 | public Record getRecord() { 26 | return record; 27 | } 28 | 29 | @Override 30 | public DocWriteRequest getRequest(DbFactory dbFactory) throws Exception { 31 | return null; 32 | } 33 | 34 | @Override 35 | public void execute(AbstractWriter writer) { 36 | writer.updateByQuery(this); 37 | } 38 | 39 | @Override 40 | public String getGroup() { 41 | return AbstractRelateAction.class.getName(); 42 | } 43 | 44 | @Override 45 | public River.Associate getAssociate() { 46 | return associate; 47 | } 48 | 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/executor/Statistic.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.executor; 2 | 3 | import java.util.concurrent.atomic.AtomicLong; 4 | 5 | public class Statistic { 6 | private AtomicLong subscribeCount = new AtomicLong(0); 7 | private AtomicLong recordCount = new AtomicLong(0); 8 | private AtomicLong dumpCount = new AtomicLong(0); 9 | private AtomicLong canalCount = new AtomicLong(0); 10 | private AtomicLong insertCount = new AtomicLong(0); 11 | private AtomicLong updateCount = new AtomicLong(0); 12 | private AtomicLong deleteCount = new AtomicLong(0); 13 | private AtomicLong relateCount = new AtomicLong(0); 14 | 15 | private long createdAt = System.currentTimeMillis(); 16 | 17 | public Statistic() { 18 | } 19 | 20 | 21 | public AtomicLong getSubscribeCount() { 22 | return subscribeCount; 23 | } 24 | 25 | public long getCreatedAt() { 26 | return createdAt; 27 | } 28 | 29 | public AtomicLong getRecordCount() { 30 | return recordCount; 31 | } 32 | 33 | public AtomicLong getDumpCount() { 34 | return dumpCount; 35 | } 36 | 37 | public AtomicLong getCanalCount() { 38 | return canalCount; 39 | } 40 | 41 | public AtomicLong getInsertCount() { 42 | return insertCount; 43 | } 44 | 45 | public AtomicLong getUpdateCount() { 46 | return updateCount; 47 | } 48 | 49 | public AtomicLong getDeleteCount() { 50 | return deleteCount; 51 | } 52 | 53 | public AtomicLong getRelateCount() { 54 | return relateCount; 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/action/DeleteAction.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.action; 2 | 3 | import org.fly.sync.contract.AbstractRecordAction; 4 | import org.fly.sync.contract.AbstractWriter; 5 | import org.fly.sync.contract.DbFactory; 6 | import org.fly.sync.mysql.model.Record; 7 | import org.fly.sync.setting.River; 8 | import org.elasticsearch.action.DocWriteRequest; 9 | import org.elasticsearch.action.delete.DeleteRequest; 10 | import org.slf4j.Logger; 11 | import org.slf4j.LoggerFactory; 12 | 13 | public class DeleteAction implements AbstractRecordAction { 14 | public final static Logger logger = LoggerFactory.getLogger(DeleteAction.class); 15 | 16 | private Record record; 17 | 18 | public DeleteAction(Record record) { 19 | this.record = record; 20 | } 21 | 22 | public static DeleteAction create(Record record) { 23 | return new DeleteAction(record); 24 | } 25 | 26 | @Override 27 | public void execute(AbstractWriter writer) { 28 | writer.getDbFactory().getStatistic().getDeleteCount().incrementAndGet(); 29 | } 30 | 31 | @Override 32 | public DocWriteRequest getRequest(DbFactory dbFactory) throws Exception { 33 | River.Table table = dbFactory.getRiverDatabase().getTable(record.table); 34 | 35 | return new DeleteRequest(table.buildIndexName(record), table.type, record.getID(table)); 36 | } 37 | 38 | @Override 39 | public Record getRecord() { 40 | return record; 41 | } 42 | 43 | @Override 44 | public String getGroup() { 45 | return DeleteAction.class.getName(); 46 | } 47 | 48 | 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/action/UpdateRelateAction.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.action; 2 | 3 | import org.fly.sync.contract.AbstractRelateAction; 4 | import org.fly.sync.contract.AbstractWriter; 5 | import org.fly.sync.contract.DbFactory; 6 | import org.fly.sync.mysql.model.Record; 7 | import org.fly.sync.setting.River; 8 | import org.elasticsearch.action.DocWriteRequest; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | 12 | public class UpdateRelateAction implements AbstractRelateAction { 13 | public final static Logger logger = LoggerFactory.getLogger(UpdateRelateAction.class); 14 | 15 | private Record record; 16 | private River.Associate associate; 17 | 18 | public UpdateRelateAction(Record record, River.Associate associate) { 19 | this.record = record; 20 | this.associate = associate; 21 | } 22 | 23 | public static UpdateRelateAction create(Record record, River.Associate associate) { 24 | return new UpdateRelateAction(record, associate); 25 | } 26 | 27 | @Override 28 | public Record getRecord() { 29 | return record; 30 | } 31 | 32 | @Override 33 | public DocWriteRequest getRequest(DbFactory dbFactory) throws Exception { 34 | return null; 35 | } 36 | 37 | @Override 38 | public void execute(AbstractWriter writer) { 39 | writer.updateByQuery(this); 40 | } 41 | 42 | @Override 43 | public String getGroup() { 44 | return AbstractRelateAction.class.getName(); 45 | } 46 | 47 | @Override 48 | public River.Associate getAssociate() { 49 | return associate; 50 | } 51 | 52 | } 53 | -------------------------------------------------------------------------------- /docs/zh-install.md: -------------------------------------------------------------------------------- 1 | # 安装 2 | 3 | ## 要求 4 | 5 | ### 开启MySQL的binlog 6 | 7 | > 警告: 请确定服务器有足够的剩余空间开启BinLOG. 8 | > 9 | > 开启BinLOG之前的数据也会被导入,因为工具采用了两套模式同步数据,所以无需担心 10 | 11 | `binlog_format` 和 `binlog_row_image` 的配置非常重要的,必须设置为 `ROW` 和 `FULL`: 12 | 13 | ```ini 14 | log_bin=mysql-bin 15 | binlog_format=ROW 16 | binlog_row_image=FULL 17 | explicit_defaults_for_timestamp=true 18 | log-slave-updates=1 19 | ; 在BinLog打开的时候需要设置一个ID,集群内唯一,1001之后的数字已经被本工具使用 20 | server-id=1 21 | ``` 22 | 23 | ### 创建 MySQL 用户 24 | 25 | 这个MySQL的用户必须拥有如下权限: 26 | 27 | `SELECT, SHOW VIEW, Reload, REPLICATION SLAVE, REPLICATION CLIENT` 28 | 29 | 可以按照下面的语句创造用户: 30 | 31 | ```sql 32 | CREATE USER 'canal'@'%' IDENTIFIED BY 'Your Password'; 33 | GRANT SELECT, SHOW VIEW, Reload, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'canal'@'%'; 34 | FLUSH PRIVILEGES; 35 | ``` 36 | 37 | ## 安装Jre 38 | 39 | Java 1.8+ 40 | 41 | CentOS/Redhat 42 | 43 | ```sh 44 | yum install java java-devel 45 | ``` 46 | 47 | Fedora 48 | 49 | ```sh 50 | dnf -y install \ 51 | unzip \ 52 | java-1.8.0-openjdk \ 53 | java-1.8.0-openjdk-devel 54 | ``` 55 | 56 | ## 下载 & 安装 57 | 58 | 1. 到 [release](https://github.com/fly-studio/mysql-es/releases) 页面下载最新的稳定版本 59 | 60 | 文件名类似: `mysql_es-x.x.x-release.zip`. 61 | 62 | 2. 解压. 63 | 64 | 3. 进入 `mysql_es` 文件夹. 65 | 66 | 4. 设置 `config/config.json`, `config/river.json`. 67 | 68 | 可以查看 [配置](settings.md) 页面 69 | 70 | ## 运行 71 | 72 | 1. 给 `bin/me.sh` 文件添加 `X` 权限,比如: `chmod +x bin/me.sh`, 73 | 74 | 2. 启动 `bin/me.sh start` 75 | 76 | 3. 支持的指令有: 77 | 78 | ```sh 79 | bin/me.sh start|stop|restart|info|status 80 | ``` 81 | 82 | ## 卸载 83 | 84 | 直接删除 `mysql_es` 目录即可. -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/action/InsertAction.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.action; 2 | 3 | import org.fly.sync.contract.AbstractRecordAction; 4 | import org.fly.sync.contract.AbstractWriter; 5 | import org.fly.sync.contract.DbFactory; 6 | import org.fly.sync.mysql.model.Record; 7 | import org.fly.sync.setting.River; 8 | import org.elasticsearch.action.DocWriteRequest; 9 | import org.elasticsearch.action.index.IndexRequest; 10 | import org.elasticsearch.common.xcontent.XContentType; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | 14 | public class InsertAction implements AbstractRecordAction { 15 | 16 | public final static Logger logger = LoggerFactory.getLogger(InsertAction.class); 17 | private Record record; 18 | 19 | public InsertAction(Record record) { 20 | this.record = record; 21 | } 22 | 23 | public static InsertAction create(Record record) 24 | { 25 | return new InsertAction(record); 26 | } 27 | 28 | @Override 29 | public void execute(AbstractWriter writer) { 30 | writer.getDbFactory().getStatistic().getInsertCount().incrementAndGet(); 31 | } 32 | 33 | @Override 34 | public DocWriteRequest getRequest(DbFactory dbFactory) throws Exception { 35 | 36 | River.Table table = dbFactory.getRiverDatabase().getTable(record.table); 37 | 38 | return new IndexRequest(table.buildIndexName(record), table.type, record.getID(table)) 39 | .source(record.toJson(dbFactory.getJsonMapper()), XContentType.JSON); 40 | } 41 | 42 | public Record getRecord() { 43 | return record; 44 | } 45 | 46 | @Override 47 | public String getGroup() { 48 | return AbstractRecordAction.class.getName(); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/action/UpdateAction.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.action; 2 | 3 | import org.fly.sync.contract.AbstractRecordAction; 4 | import org.fly.sync.contract.AbstractWriter; 5 | import org.fly.sync.contract.DbFactory; 6 | import org.fly.sync.mysql.model.Record; 7 | import org.fly.sync.setting.River; 8 | import org.elasticsearch.action.DocWriteRequest; 9 | import org.elasticsearch.action.update.UpdateRequest; 10 | import org.elasticsearch.common.xcontent.XContentType; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | 14 | public class UpdateAction implements AbstractRecordAction { 15 | 16 | public final static Logger logger = LoggerFactory.getLogger(UpdateAction.class); 17 | 18 | private Record record; 19 | 20 | public UpdateAction(Record record) { 21 | this.record = record; 22 | } 23 | 24 | public static UpdateAction create(Record record) { 25 | return new UpdateAction(record); 26 | } 27 | 28 | public void execute(AbstractWriter writer) { 29 | writer.getDbFactory().getStatistic().getUpdateCount().incrementAndGet(); 30 | } 31 | 32 | @Override 33 | public DocWriteRequest getRequest(DbFactory dbFactory) throws Exception { 34 | 35 | River.Table table = dbFactory.getRiverDatabase().getTable(record.table); 36 | 37 | return new UpdateRequest(table.buildIndexName(record), table.type, record.getID(table)) 38 | .doc(record.toJson(dbFactory.getJsonMapper()), XContentType.JSON); 39 | } 40 | 41 | @Override 42 | public Record getRecord() { 43 | return record; 44 | } 45 | 46 | @Override 47 | public String getGroup() { 48 | return AbstractRecordAction.class.getName(); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/action/ChangePositionAction.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.action; 2 | 3 | import org.fly.sync.contract.AbstractAction; 4 | import org.fly.sync.contract.AbstractWriter; 5 | import org.fly.sync.setting.BinLog; 6 | import org.fly.sync.setting.Setting; 7 | import org.slf4j.Logger; 8 | import org.slf4j.LoggerFactory; 9 | 10 | public class ChangePositionAction implements AbstractAction { 11 | public final static Logger logger = LoggerFactory.getLogger(ChangePositionAction.class); 12 | private BinLog.Position binLogPosition; 13 | 14 | public ChangePositionAction(BinLog.Position binLogPosition) { 15 | this.binLogPosition = binLogPosition; 16 | } 17 | 18 | public ChangePositionAction(String name, long position) { 19 | binLogPosition = new BinLog.Position(name, position); 20 | } 21 | 22 | public static ChangePositionAction create(BinLog.Position binLogPosition) { 23 | return new ChangePositionAction(binLogPosition); 24 | } 25 | 26 | public static ChangePositionAction create(String name, long position) { 27 | return new ChangePositionAction(name, position); 28 | } 29 | 30 | @Override 31 | public String getGroup() { 32 | return ChangePositionAction.class.getName(); 33 | } 34 | 35 | @Override 36 | public void execute(AbstractWriter writer) { 37 | 38 | try 39 | { 40 | Setting.binLog.set(writer.getDbFactory().getRiverDatabase().schemaName, binLogPosition); 41 | 42 | logger.info("Write BinLog Position: {} : [{}: {}]", writer.getDbFactory().getRiverDatabase().schemaName, binLogPosition.name, binLogPosition.position); 43 | 44 | Setting.saveBinLog(); 45 | } catch (Exception e) 46 | { 47 | logger.error("Save BinLog Position Failed.", e); 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /docs/install.md: -------------------------------------------------------------------------------- 1 | # Install 2 | 3 | ## Requirements 4 | 5 | ### Enable MySQL binlog 6 | 7 | > Warning: Computer must have enough free space for enabling bin-log. 8 | > 9 | > Don't worry about your historical data, this tool could sync these via other way. 10 | 11 | `binlog_format` AND `binlog_row_image` are very important,MUST be `ROW` and `FULL`: 12 | 13 | ```ini 14 | log_bin=mysql-bin 15 | binlog_format=ROW 16 | binlog_row_image=FULL 17 | explicit_defaults_for_timestamp=true 18 | log-slave-updates=1 19 | ; MUST set a server-id, this tool used the 1001+ for slave-id 20 | server-id=1 21 | ``` 22 | 23 | ### MySQL user privileges 24 | 25 | The user MUST had these privileges: 26 | 27 | `SELECT, SHOW VIEW, Reload, REPLICATION SLAVE, REPLICATION CLIENT` 28 | 29 | Create a user like this: 30 | 31 | ```sql 32 | CREATE USER 'canal'@'%' IDENTIFIED BY 'Your Password'; 33 | GRANT SELECT, SHOW VIEW, Reload, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'canal'@'%'; 34 | FLUSH PRIVILEGES; 35 | ``` 36 | 37 | ## Jre 38 | 39 | Java 1.8+ 40 | 41 | CentOS/Redhat 42 | 43 | ```sh 44 | yum install java java-devel 45 | ``` 46 | 47 | Fedora 48 | 49 | ```sh 50 | dnf -y install \ 51 | java-1.8.0-openjdk \ 52 | java-1.8.0-openjdk-devel 53 | ``` 54 | 55 | 56 | ## Download & Install 57 | 58 | 1. Goto [release](https://github.com/fly-studio/mysql-es/releases) page, and download the newest stable version. 59 | 60 | filename like: `mysql_es-x.x.x-release.zip`. 61 | 62 | 2. Unzip it. 63 | 64 | 3. Goto `mysql_es` folder. 65 | 66 | 4. Set `config/config.json`, `config/river.json`. 67 | 68 | See [Settings](settings.md) page 69 | 70 | ## Run 71 | 72 | 1. Add `X` permission to file: `chmod +x bin/me.sh`, 73 | 74 | 2. `bin/me.sh start` 75 | 76 | 3. all usages: 77 | 78 | ```sh 79 | bin/me.sh start|stop|restart|info|status 80 | ``` 81 | 82 | ## Uninstall 83 | 84 | Delete the `mysql_es` folder. -------------------------------------------------------------------------------- /etc/log4j2.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | $${sys:me.log.path} 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | %d{yyy-MM-dd HH:mm:ss.SSS} [%t] %level %logger{1.} - %msg%n 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | %d{yyy-MM-dd HH:mm:ss.SSS} [%t] %level %logger{1.} - %msg%n 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/model/Records.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.model; 2 | 3 | import org.fly.sync.contract.AbstractRecordAction; 4 | import com.sun.istack.NotNull; 5 | import com.sun.istack.Nullable; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import java.util.ArrayList; 10 | import java.util.List; 11 | 12 | public class Records extends ArrayList { 13 | public final static Logger logger = LoggerFactory.getLogger(Records.class); 14 | 15 | public static Records create(List recordActions) 16 | { 17 | Records records = new Records(); 18 | for (AbstractRecordAction action: recordActions 19 | ) { 20 | records.add(action.getRecord()); 21 | } 22 | 23 | return records; 24 | } 25 | 26 | @NotNull 27 | public Records search(@NotNull String key, @Nullable Object val, boolean strict) 28 | { 29 | Records results = new Records(); 30 | for (Record record: this 31 | ) { 32 | if (record.equals(key, val, strict)) 33 | results.add(record); 34 | } 35 | return results; 36 | } 37 | 38 | @Nullable 39 | public Records search(@NotNull String key, @Nullable Object val) 40 | { 41 | return search(key, val, false); 42 | } 43 | 44 | @Nullable 45 | public Record find(@NotNull String key, @Nullable Object val, boolean strict) 46 | { 47 | for (Record record: this 48 | ) { 49 | if (record.equals(key, val, strict)) 50 | return record; 51 | } 52 | 53 | return null; 54 | } 55 | 56 | @Nullable 57 | public Record find(@NotNull String key, @Nullable Object val) 58 | { 59 | return find(key, val, false); 60 | } 61 | 62 | public void addUnique(Record record) { 63 | if (!contains(record)) 64 | add(record); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/action/ReportAction.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.action; 2 | 3 | import org.fly.sync.contract.AbstractAction; 4 | import org.fly.sync.contract.AbstractWriter; 5 | import org.fly.sync.executor.Statistic; 6 | import org.apache.commons.io.FileUtils; 7 | import org.apache.commons.lang.time.DurationFormatUtils; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | public class ReportAction implements AbstractAction { 12 | public final static Logger logger = LoggerFactory.getLogger(ReportAction.class); 13 | 14 | public void execute(AbstractWriter writer) { 15 | Statistic statistic = writer.getDbFactory().getStatistic(); 16 | 17 | long millis = System.currentTimeMillis()- statistic.getCreatedAt(); 18 | String hms = DurationFormatUtils.formatDuration(millis, "d 'days' HH:mm:ss"); 19 | 20 | logger.trace("\nExecute Information:\n" + 21 | "+ Memory Usage: {}\n" + 22 | "+ Duration: {}\n" + 23 | "+ Records: {}\n" + 24 | "\t- Dump: {}\n" + 25 | "\t- Canal: {}\n" + 26 | "\t+ DML:\n" + 27 | "\t\t- Insert: {}\n" + 28 | "\t\t- Update: {}\n" + 29 | "\t\t- Delete: {}\n" + 30 | "- Relate: {}", 31 | FileUtils.byteCountToDisplaySize(Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()), 32 | hms, 33 | statistic.getRecordCount(), 34 | statistic.getDumpCount(), 35 | statistic.getCanalCount(), 36 | statistic.getInsertCount(), 37 | statistic.getUpdateCount(), 38 | statistic.getDeleteCount(), 39 | statistic.getRelateCount() 40 | ); 41 | } 42 | 43 | @Override 44 | public String getGroup() { 45 | return ReportAction.class.getName(); 46 | } 47 | 48 | } 49 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/setting/BinLog.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.setting; 2 | 3 | import com.alibaba.otter.canal.protocol.position.LogPosition; 4 | import com.fasterxml.jackson.annotation.JsonIgnore; 5 | import org.fly.core.text.json.Jsonable; 6 | 7 | import java.text.SimpleDateFormat; 8 | import java.util.HashMap; 9 | import java.util.Map; 10 | 11 | public class BinLog extends Jsonable { 12 | public Map lists = new HashMap<>(); 13 | 14 | public String getTimestamp() 15 | { 16 | SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZZ"); 17 | return simpleDateFormat.format(System.currentTimeMillis()); 18 | } 19 | 20 | public boolean isEmpty(String db) 21 | { 22 | Position position = get(db); 23 | return position == null || position.isEmpty(); 24 | } 25 | 26 | public void set(String db, Position position) 27 | { 28 | synchronized (BinLog.class) { 29 | lists.put(db, position); 30 | } 31 | } 32 | 33 | public Position get(String db) 34 | { 35 | synchronized (BinLog.class) { 36 | return lists.get(db); 37 | } 38 | } 39 | 40 | public void remove(String db) 41 | { 42 | lists.remove(db); 43 | } 44 | 45 | public static class Position { 46 | public String name = ""; 47 | public long position = 0; 48 | 49 | public Position() 50 | { 51 | 52 | } 53 | 54 | public Position(String name, long position) { 55 | this.name = name; 56 | this.position = position; 57 | } 58 | 59 | public static Position create(LogPosition logPosition) { 60 | return new Position(logPosition.getPostion().getJournalName(), logPosition.getPostion().getPosition()); 61 | } 62 | 63 | public static Position createVirtual() { 64 | return new Position("mysql-bin.000001", 0); 65 | } 66 | 67 | @JsonIgnore 68 | public boolean isEmpty() 69 | { 70 | return name == null || name.isEmpty(); 71 | } 72 | 73 | public void reset() 74 | { 75 | name = ""; 76 | position = 0; 77 | } 78 | 79 | public void updateFrom(Position position) { 80 | this.name = position.name; 81 | this.position = position.position; 82 | } 83 | } 84 | 85 | } 86 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/Main.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync; 2 | 3 | import org.fly.sync.executor.Executor; 4 | import org.fly.sync.setting.Setting; 5 | import org.apache.logging.log4j.LogManager; 6 | import org.apache.logging.log4j.core.LoggerContext; 7 | import org.apache.logging.log4j.core.config.Configurator; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | public class Main { 12 | 13 | public final static Logger logger = LoggerFactory.getLogger(Main.class); 14 | private final static Thread mainThread = Thread.currentThread(); 15 | 16 | public static void main(String[] argv) { 17 | 18 | try { 19 | Setting.readSettings(); 20 | } catch (Exception e) { 21 | logger.error(e.getMessage(), e); 22 | return; 23 | } 24 | 25 | logger.info("## set default uncaught exception handler"); 26 | setGlobalUncaughtExceptionHandler(); 27 | 28 | Executor executor = new Executor(); 29 | 30 | Runtime.getRuntime().addShutdownHook(new Thread(() -> { 31 | logger.trace("Shutdown from Runtime hook."); 32 | 33 | if( LogManager.getContext() instanceof LoggerContext ) { 34 | logger.debug("Shutting down log4j2"); 35 | Configurator.shutdown((LoggerContext)LogManager.getContext()); 36 | } else 37 | logger.warn("Unable to shutdown log4j2"); 38 | 39 | executor.stop(); 40 | 41 | while(mainThread.getState() != Thread.State.TERMINATED) { 42 | try { 43 | Thread.sleep(100); 44 | 45 | } catch (InterruptedException e) 46 | { 47 | e.printStackTrace(); 48 | return; 49 | } 50 | } 51 | 52 | System.out.println("Exit success."); 53 | }, "Shutdown-Thread")); 54 | 55 | //RxJavaPlugins.setErrorHandler(executor::throwException); 56 | 57 | try { 58 | 59 | executor.start(); 60 | 61 | executor.run(); 62 | 63 | } catch (InterruptedException e) 64 | { 65 | 66 | } catch (Exception e) 67 | { 68 | executor.stop(); 69 | logger.error(e.getMessage(), e); 70 | } 71 | 72 | logger.info("Server down."); 73 | } 74 | 75 | private static void setGlobalUncaughtExceptionHandler() { 76 | Thread.setDefaultUncaughtExceptionHandler((Thread t, Throwable e) -> { 77 | logger.error("UnCaughtException", e); 78 | }); 79 | } 80 | 81 | } 82 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/relation/Relation.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.relation; 2 | 3 | import org.fly.sync.action.InsertAction; 4 | import org.fly.sync.action.UpdateAction; 5 | import org.fly.sync.contract.*; 6 | import org.fly.sync.mysql.model.Record; 7 | import org.fly.sync.setting.River; 8 | 9 | import java.util.HashMap; 10 | import java.util.List; 11 | import java.util.Map; 12 | 13 | public class Relation implements AbstractRelation { 14 | 15 | private DbFactory dbFactory; 16 | private Map tableRecords = new HashMap<>(); 17 | 18 | public Relation(DbFactory dbFactory, List actionList) { 19 | 20 | this.dbFactory = dbFactory; 21 | 22 | filterToTableActions(actionList); 23 | } 24 | 25 | private River.Database getRiverDatabase() 26 | { 27 | return dbFactory.getRiverDatabase(); 28 | } 29 | 30 | private void filterToTableActions(List actionList) 31 | { 32 | tableRecords.clear(); 33 | 34 | for (AbstractAction action :actionList 35 | ) { 36 | if (!(action instanceof AbstractRecordAction)) 37 | continue; 38 | 39 | Record record = ((AbstractRecordAction) action).getRecord(); 40 | 41 | if (action instanceof InsertAction || action instanceof UpdateAction) 42 | { 43 | if (getRiverDatabase().hasWith(record.table)) 44 | putToTableAction(record.table, record); 45 | } else if (action instanceof AbstractRelateAction) 46 | putToTableAction(record.table, ((AbstractRelateAction) action).getAssociate(), record); 47 | } 48 | } 49 | 50 | private void putToTableAction(String tableName, River.Associate associate, Record record) 51 | { 52 | RelationRecords records; 53 | 54 | String key = River.makeRelationKey(tableName, associate == null ? "" : associate.relationKey); 55 | 56 | if (tableRecords.containsKey(key)) 57 | { 58 | records = tableRecords.get(key); 59 | } else { 60 | records = new RelationRecords(dbFactory, tableName, associate); 61 | tableRecords.put(key, records); 62 | } 63 | 64 | records.addRecord(record); 65 | } 66 | 67 | private void putToTableAction(String tableName, Record record) 68 | { 69 | putToTableAction(tableName, null, record); 70 | } 71 | 72 | public void load() 73 | { 74 | for (Map.Entry entry: tableRecords.entrySet() 75 | ) { 76 | entry.getValue().relate(); 77 | } 78 | } 79 | 80 | } 81 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/model/Column.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.model; 2 | 3 | import org.fly.sync.contract.ModelInterface; 4 | 5 | import java.beans.ConstructorProperties; 6 | 7 | public class Column implements ModelInterface { 8 | 9 | public String TABLE_CATALOG; 10 | public String TABLE_SCHEMA; 11 | public String TABLE_NAME; 12 | public String COLUMN_NAME; 13 | public long ORDINAL_POSITION; 14 | public String COLUMN_DEFAULT; 15 | public String IS_NULLABLE; 16 | public String DATA_TYPE; 17 | public long CHARACTER_MAXIMUM_LENGTH; 18 | public long CHARACTER_OCTET_LENGTH; 19 | public long NUMERIC_PRECISION; 20 | public long NUMERIC_SCALE; 21 | public long DATETIME_PRECISION; 22 | public String CHARACTER_SET_NAME; 23 | public String COLLATION_NAME; 24 | public String COLUMN_TYPE; 25 | public String COLUMN_KEY; 26 | public String EXTRA; 27 | public String PRIVILEGES; 28 | public String COLUMN_COMMENT; 29 | public String GENERATION_EXPRESSION; 30 | 31 | @ConstructorProperties({"TABLE_CATALOG", "TABLE_SCHEMA", "TABLE_NAME", "COLUMN_NAME", "ORDINAL_POSITION", "COLUMN_DEFAULT", "IS_NULLABLE", "DATA_TYPE", "CHARACTER_MAXIMUM_LENGTH", "CHARACTER_OCTET_LENGTH", "NUMERIC_PRECISION", "NUMERIC_SCALE", "DATETIME_PRECISION", "CHARACTER_SET_NAME", "COLLATION_NAME", "COLUMN_TYPE", "COLUMN_KEY", "EXTRA", "PRIVILEGES", "COLUMN_COMMENT", "GENERATION_EXPRESSION"}) 32 | public Column(String TABLE_CATALOG, String TABLE_SCHEMA, String TABLE_NAME, String COLUMN_NAME, long ORDINAL_POSITION, String COLUMN_DEFAULT, String IS_NULLABLE, String DATA_TYPE, long CHARACTER_MAXIMUM_LENGTH, long CHARACTER_OCTET_LENGTH, long NUMERIC_PRECISION, long NUMERIC_SCALE, long DATETIME_PRECISION, String CHARACTER_SET_NAME, String COLLATION_NAME, String COLUMN_TYPE, String COLUMN_KEY, String EXTRA, String PRIVILEGES, String COLUMN_COMMENT, String GENERATION_EXPRESSION) { 33 | this.TABLE_CATALOG = TABLE_CATALOG; 34 | this.TABLE_SCHEMA = TABLE_SCHEMA; 35 | this.TABLE_NAME = TABLE_NAME; 36 | this.COLUMN_NAME = COLUMN_NAME; 37 | this.ORDINAL_POSITION = ORDINAL_POSITION; 38 | this.COLUMN_DEFAULT = COLUMN_DEFAULT; 39 | this.IS_NULLABLE = IS_NULLABLE; 40 | this.DATA_TYPE = DATA_TYPE; 41 | this.CHARACTER_MAXIMUM_LENGTH = CHARACTER_MAXIMUM_LENGTH; 42 | this.CHARACTER_OCTET_LENGTH = CHARACTER_OCTET_LENGTH; 43 | this.NUMERIC_PRECISION = NUMERIC_PRECISION; 44 | this.NUMERIC_SCALE = NUMERIC_SCALE; 45 | this.DATETIME_PRECISION = DATETIME_PRECISION; 46 | this.CHARACTER_SET_NAME = CHARACTER_SET_NAME; 47 | this.COLLATION_NAME = COLLATION_NAME; 48 | this.COLUMN_TYPE = COLUMN_TYPE; 49 | this.COLUMN_KEY = COLUMN_KEY; 50 | this.EXTRA = EXTRA; 51 | this.PRIVILEGES = PRIVILEGES; 52 | this.COLUMN_COMMENT = COLUMN_COMMENT; 53 | this.GENERATION_EXPRESSION = GENERATION_EXPRESSION; 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/model/Table.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.model; 2 | 3 | import org.fly.sync.contract.ModelInterface; 4 | 5 | import java.beans.ConstructorProperties; 6 | import java.time.LocalDateTime; 7 | 8 | public class Table implements ModelInterface { 9 | 10 | public String TABLE_CATALOG; 11 | public String TABLE_SCHEMA; 12 | public String TABLE_NAME; 13 | public String TABLE_TYPE; 14 | public String ENGINE; 15 | public long VERSION; 16 | public String ROW_FORMAT; 17 | public long TABLE_ROWS; 18 | public long AVG_ROW_LENGTH; 19 | public long DATA_LENGTH; 20 | public long MAX_DATA_LENGTH; 21 | public long INDEX_LENGTH; 22 | public long DATA_FREE; 23 | public long AUTO_INCREMENT; 24 | public LocalDateTime CREATE_TIME; 25 | public LocalDateTime UPDATE_TIME; 26 | public LocalDateTime CHECK_TIME; 27 | public String TABLE_COLLATION; 28 | public long CHECKSUM; 29 | public String CREATE_OPTIONS; 30 | public String TABLE_COMMENT; 31 | 32 | @ConstructorProperties({"TABLE_CATALOG", "TABLE_SCHEMA", "TABLE_NAME", "TABLE_TYPE", "ENGINE", "VERSION", "ROW_FORMAT", "TABLE_ROWS", "AVG_ROW_LENGTH", "DATA_LENGTH", "MAX_DATA_LENGTH", "INDEX_LENGTH", "DATA_FREE", "AUTO_INCREMENT", "CREATE_TIME", "UPDATE_TIME", "CHECK_TIME", "TABLE_COLLATION", "CHECKSUM", "CREATE_OPTIONS", "TABLE_COMMENT"}) 33 | public Table(String TABLE_CATALOG, 34 | String TABLE_SCHEMA, 35 | String TABLE_NAME, 36 | String TABLE_TYPE, 37 | String ENGINE, 38 | long VERSION, 39 | String ROW_FORMAT, 40 | long TABLE_ROWS, 41 | long AVG_ROW_LENGTH, 42 | long DATA_LENGTH, 43 | long MAX_DATA_LENGTH, 44 | long INDEX_LENGTH, 45 | long DATA_FREE, 46 | long AUTO_INCREMENT, 47 | LocalDateTime CREATE_TIME, 48 | LocalDateTime UPDATE_TIME, 49 | LocalDateTime CHECK_TIME, 50 | String TABLE_COLLATION, 51 | long CHECKSUM, 52 | String CREATE_OPTIONS, 53 | String TABLE_COMMENT) { 54 | this.TABLE_CATALOG = TABLE_CATALOG; 55 | this.TABLE_SCHEMA = TABLE_SCHEMA; 56 | this.TABLE_NAME = TABLE_NAME; 57 | this.TABLE_TYPE = TABLE_TYPE; 58 | this.ENGINE = ENGINE; 59 | this.VERSION = VERSION; 60 | this.ROW_FORMAT = ROW_FORMAT; 61 | this.TABLE_ROWS = TABLE_ROWS; 62 | this.AVG_ROW_LENGTH = AVG_ROW_LENGTH; 63 | this.DATA_LENGTH = DATA_LENGTH; 64 | this.MAX_DATA_LENGTH = MAX_DATA_LENGTH; 65 | this.INDEX_LENGTH = INDEX_LENGTH; 66 | this.DATA_FREE = DATA_FREE; 67 | this.AUTO_INCREMENT = AUTO_INCREMENT; 68 | this.CREATE_TIME = CREATE_TIME; 69 | this.UPDATE_TIME = UPDATE_TIME; 70 | this.CHECK_TIME = CHECK_TIME; 71 | this.TABLE_COLLATION = TABLE_COLLATION; 72 | this.CHECKSUM = CHECKSUM; 73 | this.CREATE_OPTIONS = CREATE_OPTIONS; 74 | this.TABLE_COMMENT = TABLE_COMMENT; 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/parser/InsertParser.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.parser; 2 | 3 | import org.apache.commons.lang.StringEscapeUtils; 4 | 5 | import java.util.ArrayList; 6 | import java.util.List; 7 | import java.util.regex.Matcher; 8 | import java.util.regex.Pattern; 9 | 10 | public class InsertParser { 11 | 12 | private static final String INSERT_PATTERN = "^\\s?+INSERT INTO `(.+?)` VALUES \\((.+)\\);"; 13 | 14 | public static String parseTable(String sql) { 15 | Pattern pattern = Pattern.compile(INSERT_PATTERN, Pattern.CASE_INSENSITIVE | Pattern.DOTALL); 16 | Matcher matcher = pattern.matcher(sql); 17 | if (matcher.find()){ 18 | return matcher.group(1); 19 | } 20 | 21 | return null; 22 | } 23 | 24 | public static List parseValue(String sql) 25 | { 26 | Pattern pattern = Pattern.compile(INSERT_PATTERN, Pattern.CASE_INSENSITIVE | Pattern.DOTALL); 27 | Matcher matcher = pattern.matcher(sql); 28 | 29 | if (!matcher.find()) 30 | return null; 31 | 32 | String str = matcher.group(2); 33 | 34 | int len = str.length(); 35 | List valueList = new ArrayList<>(); 36 | 37 | int j = 0; 38 | boolean escaped = false; 39 | char ch; 40 | for (int i = 0; i < len ;) { 41 | ch = str.charAt(i); 42 | if (ch != '\'') // no string, read until comma , 43 | { 44 | j = i + 1; 45 | for (; j < len && str.charAt(j) != ','; j++); 46 | 47 | valueList.add(str.substring(i, j)); 48 | 49 | i = j + 1; 50 | } else { // read string until another single quote 51 | j = i + 1; 52 | 53 | escaped = false; 54 | 55 | for(; j < len;) 56 | { 57 | ch = str.charAt(j); 58 | if (ch == '\\') 59 | { 60 | // skip escaped character 61 | j += 2; 62 | escaped = true; 63 | } else if (ch == '\'') { 64 | break; 65 | } else { 66 | j++; 67 | } 68 | } 69 | 70 | if (j >= len) 71 | return null; 72 | 73 | String value = str.substring(i, j + 1); 74 | 75 | if (escaped) { 76 | value = StringEscapeUtils.unescapeJava(value); 77 | } 78 | 79 | valueList.add(value); 80 | 81 | // skip ' and , 82 | i = j + 2; 83 | } 84 | 85 | // skip blank 86 | } 87 | 88 | List list = new ArrayList<>(); 89 | 90 | for (String value : valueList 91 | ) { 92 | if (value.equalsIgnoreCase("NULL")) 93 | list.add(null); 94 | else if (value.length() > 1 && value.charAt(0) == '\'' && value.charAt(value.length() - 1) == '\'') 95 | list.add(value.substring(1, value.length() - 1)); 96 | else 97 | list.add(value); 98 | } 99 | 100 | return list; 101 | } 102 | 103 | 104 | } 105 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/executor/Consumer.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.executor; 2 | 3 | import com.fasterxml.jackson.databind.ObjectMapper; 4 | import org.fly.sync.action.DeleteAction; 5 | import org.fly.sync.contract.*; 6 | import org.fly.sync.es.Es; 7 | import org.fly.sync.es.Writer; 8 | import org.fly.sync.mysql.MySql; 9 | import org.fly.sync.setting.River; 10 | import io.reactivex.Observer; 11 | import io.reactivex.disposables.Disposable; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import java.util.Arrays; 16 | import java.util.List; 17 | 18 | public class Consumer extends AbstractLifeCycle implements Observer>, DbFactory { 19 | 20 | public final static Logger logger = LoggerFactory.getLogger(Consumer.class); 21 | private final static List bulkGroup = Arrays.asList( 22 | AbstractRecordAction.class.getName(), 23 | DeleteAction.class.getName() 24 | ); 25 | 26 | private Executor executor; 27 | private River.Database database; 28 | private Disposable disposable; 29 | private Writer writer; 30 | 31 | public Consumer(Executor executor, River.Database database) { 32 | this.executor = executor; 33 | this.database = database; 34 | writer = new Writer(this); 35 | } 36 | 37 | @Override 38 | public void start() { 39 | super.start(); 40 | } 41 | 42 | @Override 43 | public void stop() { 44 | super.stop(); 45 | 46 | if (null != disposable && !disposable.isDisposed()) 47 | disposable.dispose(); 48 | } 49 | 50 | @Override 51 | public Es getEs() 52 | { 53 | return executor.getEs(); 54 | } 55 | 56 | @Override 57 | public MySql getMySql() 58 | { 59 | return executor.getMySql(); 60 | } 61 | 62 | @Override 63 | public River.Database getRiverDatabase() { 64 | return database; 65 | } 66 | 67 | @Override 68 | public Statistic getStatistic() { 69 | return executor.getStatistic(); 70 | } 71 | 72 | @Override 73 | public ObjectMapper getJsonMapper() { 74 | return executor.getJsonMapper(); 75 | } 76 | 77 | public Executor getExecutor() { 78 | return executor; 79 | } 80 | 81 | @Override 82 | public void onSubscribe(Disposable disposable) { 83 | this.disposable = disposable; 84 | } 85 | 86 | @Override 87 | public synchronized void onNext(List actionList) { 88 | 89 | if (actionList.isEmpty()) 90 | return; 91 | 92 | getStatistic().getSubscribeCount().addAndGet(actionList.size()); 93 | getStatistic().getRecordCount().addAndGet(actionList.stream().filter(action -> action instanceof AbstractRecordAction).count()); 94 | getStatistic().getRelateCount().addAndGet(actionList.stream().filter(action -> action instanceof AbstractRelateAction).count()); 95 | 96 | try 97 | { 98 | // bulk 99 | if (bulkGroup.contains(actionList.get(0).getGroup())) 100 | { 101 | writer.executeBulk(actionList); 102 | } else { 103 | writer.execute(actionList); 104 | } 105 | } catch (Exception e) { 106 | disposable.dispose(); 107 | onError(e); 108 | } 109 | } 110 | 111 | 112 | @Override 113 | public void onError(Throwable throwable) { 114 | executor.throwException(throwable); 115 | } 116 | 117 | @Override 118 | public void onComplete() { 119 | disposable.dispose(); 120 | executor.stop(); 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /etc/river.json: -------------------------------------------------------------------------------- 1 | { 2 | "my" : { 3 | "host" : "127.0.0.1", 4 | "port": 3306, 5 | /** 6 | * Create A user like this: 7 | * CREATE USER canal IDENTIFIED BY 'Your Password'; 8 | * GRANT SELECT, SHOW VIEW, Reload, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'canal'@'%'; 9 | * FLUSH PRIVILEGES; 10 | */ 11 | "user": "canal", 12 | "password" : "", 13 | "time_zone": "Asia/Shanghai" 14 | }, 15 | "es" : { 16 | "host" : "127.0.0.1", 17 | "port": 9200, 18 | 19 | /** 20 | * If you installed X-pack or SearchGuard, You can set a user for here 21 | */ 22 | "user": "", 23 | "password" : "", 24 | "time_zone": "Asia/Shanghai" 25 | }, 26 | // Execute "Set NAMES utf8" once before SELECT 27 | "charset": "utf8", 28 | "databases" : [ 29 | { 30 | "schema" : "db1", 31 | /** 32 | * ES 6 remove the TYPE in INDEX, One-Table-One-Index 33 | * 34 | */ 35 | "tables": { 36 | /* 37 | * @Example: Full config 38 | * 39 | */ 40 | "table_name" : { 41 | // Default: true, If this table donot need sync, Or you only wanna other table relates to this table, set false. 42 | "sync": true, 43 | // What ES INDEX name, the items is same as "${SCHEMA}-${TABLE}", support ${SCHEMA}, ${TABLE} 44 | "index": "db1-table_name", 45 | // Also support ${SCHEMA}, ${TABLE} like above, default: "_doc" 46 | "type": "_doc", 47 | // What ES template do this index use, default: "" 48 | "template" : "", 49 | // SimpleDateFormat 50 | "date_format": "yyyy-MM-dd'T'HH:mm:ss.SSSZZ", 51 | // What Primary key, default: ["id"] 52 | "pk" : ["id"], 53 | // What columnNames you wanna sync to this index, default: ["*"] 54 | "columns": ["*"], 55 | // Column alias 56 | "column_alias": { 57 | // "Column Name" : "New name" 58 | }, 59 | // One-to-One relationship 60 | "relations" : { 61 | // see examples below 62 | }, 63 | // What relations you wanna import to this index 64 | "withs": {} 65 | }, 66 | 67 | /* 68 | * @Example: relations 69 | * 70 | */ 71 | // | id | nickname | 72 | "users" : { 73 | "index": "${SCHEMA}-${TABLE}", 74 | "relations" : { 75 | "extra": { 76 | // | id | city | 77 | "table": "user_infos", // Which table do "users" relate 78 | "foreign": "id", // Which column in "user_infos" 79 | "local": "id" //Which column in "user" 80 | } 81 | } 82 | }, 83 | // | id | uid | content | 84 | "comments": { 85 | "index": "${SCHEMA}-${TABLE}", 86 | "relations" : { 87 | "user" : { 88 | "table": "users", // Which table do "comments" relate 89 | "foreign": "id", // Which column in "users" 90 | "local": "uid", // Which column in "comments" 91 | "columns": ["*"], // Same as Table's columnNames 92 | "column_alias": { // Same as Tables's columnAlias 93 | 94 | } 95 | } 96 | }, 97 | // It'll add these columnNames to this index 98 | // | id | uid | content | user.id | user.nickname | user.extra.id | user.extra.city | 99 | "withs": { 100 | "user": {}, 101 | "user.extra": {} 102 | } 103 | } 104 | 105 | } 106 | } 107 | ] 108 | 109 | 110 | 111 | 112 | } -------------------------------------------------------------------------------- /bin/me.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | ## Example from: https://gist.github.com/gythialy/8e720aa0a43bd0dd0b6a 4 | # 5 | 6 | ## Get the current path 7 | current_path=`pwd` 8 | case "`uname`" in 9 | Linux) 10 | bin_abs_path=$(readlink -f $(dirname $0)) 11 | ;; 12 | *) 13 | bin_abs_path=`cd $(dirname $0); pwd` 14 | ;; 15 | esac 16 | 17 | ## Custom 18 | 19 | BASE=${bin_abs_path}/.. 20 | RUNNING_USER=root 21 | CONF=$BASE/conf 22 | APP_MAINCLASS=org.fly.sync.Main 23 | JAVA_OPTS=" -Djava.awt.headless=true -XX:MaxPermSize=512m -Dme.etc.path=$CONF" 24 | CLASSPATH=$BASE/lib 25 | PIDFILE=$BASE/run.pid 26 | 27 | # Loop for lib/*.jar 28 | for i in "$BASE"/lib/*.jar; do 29 | CLASSPATH="$CLASSPATH":"$i" 30 | done 31 | 32 | ## Get java path 33 | if [ -x "$JAVA_HOME/bin/java" ]; then 34 | JAVA="$JAVA_HOME/bin/java" 35 | else 36 | set +e 37 | JAVA=`which java` 38 | set -e 39 | fi 40 | 41 | if [ -x "$JAVA_HOME/bin/jps" ]; then 42 | JPS="$JAVA_HOME/bin/jps" 43 | else 44 | set +e 45 | JPS=`which jps` 46 | set -e 47 | fi 48 | 49 | if [ -z "$JAVA" ] ; then 50 | echo "Cannot find a Java JDK. Please set either set JAVA or put java (>=1.8) in your PATH." 2>&2 51 | exit 1 52 | fi 53 | 54 | psid=0 55 | 56 | checkpid() { 57 | 58 | if [ -z "$JPS" ]; then 59 | 60 | if [ -e "$PIDFILE" ]; then 61 | psid=`cat "$PIDFILE"` 62 | else 63 | psid=0 64 | fi 65 | echo $psid 66 | else 67 | javaps=`$JPS -l | grep $APP_MAINCLASS` 68 | 69 | if [ -n "$javaps" ]; then 70 | psid=`echo $javaps | awk '{print $1}'` 71 | else 72 | psid=0 73 | fi 74 | fi 75 | } 76 | 77 | start() { 78 | checkpid 79 | 80 | if [ $psid -ne 0 ]; then 81 | echo "================================" 82 | echo "warn: $APP_MAINCLASS already started! (pid=$psid)" 83 | echo "================================" 84 | else 85 | echo -n "Starting $APP_MAINCLASS ..." 86 | nohup $JAVA $JAVA_OPTS -classpath $CLASSPATH $APP_MAINCLASS >/dev/null 2>&1 & 87 | psid=$! 88 | echo $psid > "$PIDFILE" 89 | 90 | checkpid 91 | if [ $psid -ne 0 ]; then 92 | echo "(pid=$psid) [OK]" 93 | else 94 | echo "[Failed]" 95 | fi 96 | fi 97 | } 98 | 99 | stop() { 100 | checkpid 101 | 102 | if [ $psid -ne 0 ]; then 103 | echo -n "Stopping $APP_MAINCLASS ...(pid=$psid) " 104 | kill -15 $psid 105 | rm -rf "$PIDFILE" 106 | 107 | if [ $? -eq 0 ]; then 108 | echo "[OK]" 109 | else 110 | echo "[Failed]" 111 | fi 112 | 113 | #checkpid 114 | #if [ $psid -ne 0 ]; then 115 | # stop 116 | #fi 117 | else 118 | echo "================================" 119 | echo "warn: $APP_MAINCLASS is not running" 120 | echo "================================" 121 | fi 122 | } 123 | 124 | status() { 125 | checkpid 126 | 127 | if [ $psid -ne 0 ]; then 128 | echo "$APP_MAINCLASS is running! (pid=$psid)" 129 | else 130 | echo "$APP_MAINCLASS is not running" 131 | fi 132 | } 133 | 134 | info() { 135 | echo "System Information:" 136 | echo "****************************" 137 | echo `head -n 1 /etc/issue` 138 | echo `uname -a` 139 | echo 140 | echo "JAVA=$JAVA" 141 | echo `$JAVA -version` 142 | echo 143 | echo "APP_HOME=$BASE" 144 | echo "APP_MAINCLASS=$APP_MAINCLASS" 145 | echo "****************************" 146 | } 147 | 148 | case "$1" in 149 | 'start') 150 | start 151 | ;; 152 | 'stop') 153 | stop 154 | ;; 155 | 'restart') 156 | stop 157 | start 158 | ;; 159 | 'status') 160 | status 161 | ;; 162 | 'info') 163 | info 164 | ;; 165 | *) 166 | echo "Usage: $0 {start|stop|restart|status|info}" 167 | exit 1 168 | esac 169 | exit 0 -------------------------------------------------------------------------------- /etc/template.json: -------------------------------------------------------------------------------- 1 | { 2 | "settings": { 3 | "index": { 4 | "sort.field": ["id"], 5 | "sort.order": ["desc"] 6 | }, 7 | "analysis": { 8 | "analyzer": { 9 | "title_standard": { 10 | "type": "custom", 11 | "tokenizer": "standard", 12 | "filter": [ 13 | "lowercase", 14 | "asciifolding", 15 | "f_ngram", 16 | "unique" 17 | ] 18 | }, 19 | "title_search_standard": { 20 | "type": "custom", 21 | "tokenizer": "standard", 22 | "filter": [ 23 | "lowercase", 24 | "asciifolding", 25 | "unique" 26 | ] 27 | } 28 | }, 29 | "filter": { 30 | "f_ngram": { 31 | "type": "edgeNGram", 32 | "min_gram": 2, 33 | "max_gram": 25, 34 | "side": "front" 35 | }, 36 | "f_word_delimiter": { 37 | "type": "word_delimiter", 38 | "split_on_numerics": true, 39 | "split_on_case_change": true, 40 | "generate_word_parts": true, 41 | "generate_number_parts": true, 42 | "catenate_words": true, 43 | "catenate_numbers": true, 44 | "catenate_all": true, 45 | "preserve_original": true 46 | } 47 | } 48 | } 49 | }, 50 | "mappings": { 51 | "_doc": { 52 | "properties": { 53 | "id": { 54 | "type": "long", 55 | "fields": { 56 | "keyword": { 57 | "type": "keyword", 58 | "doc_values": true, 59 | "ignore_above": 30 60 | } 61 | } 62 | } 63 | }, 64 | "dynamic_templates": [ 65 | { 66 | "keywords": { 67 | "match_pattern": "regex", 68 | "match_mapping_type": "string", 69 | "match": "^.*?_type$", 70 | "mapping": { 71 | "type": "keyword" 72 | } 73 | } 74 | }, 75 | { 76 | "whole_words": { 77 | "match_pattern": "regex", 78 | "match_mapping_type": "string", 79 | "match": "^(name|username|account)$", 80 | "mapping": { 81 | "type": "text", 82 | "search_analyzer": "title_search_standard", 83 | "analyzer": "title_standard" 84 | } 85 | } 86 | }, 87 | { 88 | "phones": { 89 | "match_mapping_type": "string", 90 | "match": "*phone", 91 | "mapping": { 92 | "type": "text", 93 | "search_analyzer": "title_search_standard", 94 | "analyzer": "title_standard" 95 | } 96 | } 97 | }, 98 | { 99 | "dates": { 100 | "match_mapping_type": "string", 101 | "match": "*_at", 102 | "mapping": { 103 | "type": "date", 104 | "format": "yyy-MM-dd HH:mm:ss||yyy-MM-dd HH:mm:ss.SSS||date||HH:mm:ss||date_time||date_time_no_millis||date_hour_minute_second||date_hour_minute_second_millis" 105 | } 106 | } 107 | }, 108 | { 109 | "timestamp": { 110 | "match_mapping_type": "long", 111 | "match": "*_at", 112 | "mapping": { 113 | "type": "date", 114 | "format": "seconds-since-the-epoch||milliseconds-since-the-epoch" 115 | } 116 | } 117 | }, 118 | { 119 | "long_to_string" : { 120 | "match_mapping_type": "long", 121 | "mapping": { 122 | "type": "long", 123 | "fields": { 124 | "keyword": { 125 | "type": "keyword", 126 | "ignore_above": 30 127 | } 128 | } 129 | } 130 | } 131 | } 132 | ] 133 | } 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/CanalTest.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync; 2 | 3 | import com.alibaba.otter.canal.client.CanalConnector; 4 | import com.alibaba.otter.canal.client.CanalConnectors; 5 | import com.alibaba.otter.canal.protocol.CanalEntry; 6 | import com.alibaba.otter.canal.protocol.Message; 7 | 8 | import java.net.InetSocketAddress; 9 | import java.util.List; 10 | 11 | public class CanalTest { 12 | 13 | public static void main(String[] argv) { 14 | 15 | // 创建链接 16 | CanalConnector connector = CanalConnectors.newSingleConnector( 17 | new InetSocketAddress("106.75.153.131",3306), 18 | "example", 19 | "", 20 | ""); 21 | int batchSize = 1000; 22 | int emptyCount = 0; 23 | try { 24 | connector.connect(); 25 | connector.subscribe(".*\\..*"); 26 | connector.rollback(); 27 | int totalEmptyCount = 120; 28 | while (emptyCount < totalEmptyCount) { 29 | Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据 30 | long batchId = message.getId(); 31 | int size = message.getEntries().size(); 32 | if (batchId == -1 || size == 0) { 33 | emptyCount++; 34 | System.out.println("empty count : " + emptyCount); 35 | try { 36 | Thread.sleep(1000); 37 | } catch (InterruptedException e) { 38 | } 39 | } else { 40 | emptyCount = 0; 41 | // System.out.printf("message[batchId=%s,size=%s] \n", batchId, size); 42 | printEntry(message.getEntries()); 43 | } 44 | 45 | connector.ack(batchId); // 提交确认 46 | // connector.rollback(batchId); // 处理失败, 回滚数据 47 | } 48 | 49 | System.out.println("empty too many times, exit"); 50 | } finally { 51 | connector.disconnect(); 52 | } 53 | } 54 | 55 | private static void printEntry(List entrys) { 56 | for (CanalEntry.Entry entry : entrys) { 57 | if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN || entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONEND) { 58 | continue; 59 | } 60 | 61 | CanalEntry.RowChange rowChage = null; 62 | try { 63 | rowChage = CanalEntry.RowChange.parseFrom(entry.getStoreValue()); 64 | } catch (Exception e) { 65 | throw new RuntimeException("ERROR ## parser of eromanga-event has an error , data:" + entry.toString(), 66 | e); 67 | } 68 | 69 | CanalEntry.EventType eventType = rowChage.getEventType(); 70 | System.out.println(String.format("================> binlog[%s:%s] , name[%s,%s] , eventType : %s", 71 | entry.getHeader().getLogfileName(), entry.getHeader().getLogfileOffset(), 72 | entry.getHeader().getSchemaName(), entry.getHeader().getTableName(), 73 | eventType)); 74 | 75 | for (CanalEntry.RowData rowData : rowChage.getRowDatasList()) { 76 | if (eventType == CanalEntry.EventType.DELETE) { 77 | printColumn(rowData.getBeforeColumnsList()); 78 | } else if (eventType == CanalEntry.EventType.INSERT) { 79 | printColumn(rowData.getAfterColumnsList()); 80 | } else { 81 | System.out.println("-------> before"); 82 | printColumn(rowData.getBeforeColumnsList()); 83 | System.out.println("-------> after"); 84 | printColumn(rowData.getAfterColumnsList()); 85 | } 86 | } 87 | } 88 | } 89 | 90 | private static void printColumn(List columns) { 91 | for (CanalEntry.Column column : columns) { 92 | System.out.println(column.getName() + " : " + column.getValue() + " update=" + column.getUpdated()); 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/contract/AbstractConnector.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.contract; 2 | 3 | import org.fly.sync.exception.OutOfRetryException; 4 | import org.fly.sync.setting.River; 5 | 6 | import java.util.Timer; 7 | import java.util.TimerTask; 8 | 9 | public abstract class AbstractConnector { 10 | 11 | private static final int INTERVAL = 5000; 12 | 13 | private Timer timer; 14 | private TimerTask task = new TimerTask() { 15 | @Override 16 | public void run() { 17 | heartbeat(autoReconnect); 18 | } 19 | }; 20 | 21 | protected River river; 22 | protected boolean autoReconnect, connected; 23 | protected ConnectionListener listener; 24 | 25 | 26 | public AbstractConnector(River river, boolean autoReconnect) { 27 | this.river = river; 28 | this.autoReconnect = autoReconnect; 29 | this.connected = false; 30 | } 31 | 32 | public void setListener(ConnectionListener listener) { 33 | this.listener = listener; 34 | } 35 | 36 | public ConnectionListener getListener() { 37 | return listener; 38 | } 39 | 40 | public River getRiver() { 41 | return river; 42 | } 43 | 44 | public boolean connect() throws Exception 45 | { 46 | doConnecting(); 47 | 48 | tryHeartbeat(); 49 | if (!isConnected()) 50 | return false; 51 | 52 | if (null != listener) listener.onConnected(this); 53 | 54 | if (null == timer) 55 | { 56 | timer = new Timer(true); 57 | timer.schedule(task, INTERVAL, INTERVAL); 58 | } 59 | 60 | return true; 61 | } 62 | 63 | public void reconnect() throws Exception 64 | { 65 | doReconnect(); 66 | } 67 | 68 | public void waitForConnected(int count, int sleep) 69 | { 70 | int i = 0; 71 | while (!isConnected()) { 72 | if (count > 0 && ++i > count) 73 | throw new OutOfRetryException("Try to reconnect for " + String.valueOf(count) + " times, Still Failed."); 74 | try { 75 | reconnect(); 76 | 77 | } catch (Exception e) { 78 | 79 | try { 80 | Thread.sleep(sleep); 81 | } catch (InterruptedException e1) 82 | { 83 | return; 84 | } 85 | } 86 | } 87 | } 88 | 89 | public synchronized void tryHeartbeat() throws Exception 90 | { 91 | try { 92 | doHeartbeat(); 93 | connected = true; 94 | } catch (Exception e) { 95 | throw e; 96 | } 97 | } 98 | 99 | public synchronized void heartbeat(boolean autoReconnect) { 100 | try { 101 | doHeartbeat(); 102 | connected = true; 103 | } catch (Exception e) 104 | { 105 | connected = false; 106 | throwError(e); 107 | 108 | if (null != listener && connected) listener.onDisconnected(this); 109 | 110 | if (autoReconnect) 111 | try { 112 | reconnect(); 113 | } catch (Exception e1) 114 | { 115 | throwError(e1); 116 | } 117 | } 118 | 119 | } 120 | 121 | public void close() throws Exception 122 | { 123 | if (null != timer) { 124 | timer.cancel(); 125 | timer = null; 126 | } 127 | 128 | doClose(); 129 | 130 | boolean _connected = connected; 131 | connected = false; 132 | 133 | if (null != listener && _connected) listener.onDisconnected(this); 134 | 135 | } 136 | 137 | protected abstract void doConnecting() throws Exception; 138 | protected abstract void doReconnect() throws Exception; 139 | protected abstract void doHeartbeat() throws Exception; 140 | protected abstract void doClose() throws Exception; 141 | 142 | 143 | public boolean isConnected() 144 | { 145 | return connected; 146 | } 147 | 148 | protected void throwError(Exception e) 149 | { 150 | if (null != listener) listener.onError(this, e); 151 | } 152 | 153 | public interface ConnectionListener { 154 | void onConnected(AbstractConnector connector); 155 | void onDisconnected(AbstractConnector connector); 156 | void onError(AbstractConnector connector, Exception e); 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /src/test/java/BinLogSync.java: -------------------------------------------------------------------------------- 1 | /* 2 | import com.alibaba.otter.canal.protocol.CanalEntry; 3 | import com.alibaba.otter.canal.protocol.Message; 4 | import org.fly.sync.canal.Client; 5 | import org.fly.sync.canal.Server; 6 | import org.fly.sync.setting.BinLog; 7 | import org.fly.sync.setting.Setting; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.util.HashMap; 12 | import java.util.List; 13 | import java.util.Map; 14 | 15 | public class BinLogSync { 16 | 17 | public final static Logger logger = LoggerFactory.getLogger(BinLogSync.class); 18 | 19 | public static void main(String[] argv) { 20 | try { 21 | Setting.readSettings(); 22 | } catch (Exception e) { 23 | logger.error(e.getMessage(), e); 24 | return; 25 | } 26 | 27 | BinLog.Position position = Setting.binLog.get(Setting.river.databases.get(0).schemaName); 28 | 29 | System.out.println(position); 30 | 31 | Server server = new Server(Setting.river, Setting.river.databases.get(0), position != null ? position : BinLog.Position.createVirtual()); 32 | server.start(); 33 | System.out.println("Start Server"); 34 | new BinLogSync() 35 | .run(server); 36 | } 37 | 38 | public void run(Server server) 39 | { 40 | // 创建链接 41 | Client connector = new Client(server); 42 | int batchSize = 10; 43 | try { 44 | connector.subscribe(); 45 | connector.rollback(); 46 | 47 | while (true) { 48 | Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据 49 | long batchId = message.getId(); 50 | int size = message.getEntries().size(); 51 | if (batchId == -1 || size == 0) { 52 | System.out.println("empty, and sleep"); 53 | try { 54 | Thread.sleep(1000); 55 | continue; 56 | } catch (InterruptedException e) { 57 | } 58 | } else { 59 | //System.out.printf("message[batchId=%s,size=%s] \n", batchId, size); 60 | 61 | printEntry(message.getEntries()); 62 | } 63 | 64 | connector.ack(batchId); // 提交确认 65 | // connector.rollback(batchId); // 处理失败, 回滚数据 66 | 67 | System.out.println("next i"); 68 | } 69 | } catch (Exception e) 70 | { 71 | e.printStackTrace(); 72 | } finally { 73 | System.out.println("exit manual."); 74 | } 75 | } 76 | 77 | private static Map idList = new HashMap(); 78 | 79 | private static void printEntry(List entrys) { 80 | for (CanalEntry.Entry entry : entrys) { 81 | if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN || entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONEND) { 82 | continue; 83 | } 84 | 85 | CanalEntry.RowChange rowChage = null; 86 | 87 | try { 88 | rowChage = CanalEntry.RowChange.parseFrom(entry.getStoreValue()); 89 | } catch (Exception e) { 90 | throw new RuntimeException("ERROR ## parser of eromanga-event has an error , data:" + entry.toString(), 91 | e); 92 | } 93 | 94 | CanalEntry.EventType eventType = rowChage.getEventType(); 95 | 96 | 97 | System.out.println(String.format("================> executor[%s:%s] , name[%s,%s] , eventType : %s", 98 | entry.getHeader().getLogfileName(), entry.getHeader().getLogfileOffset(), 99 | entry.getHeader().getSchemaName(), entry.getHeader().getTableName(), 100 | eventType)); 101 | 102 | for (CanalEntry.RowData rowData : rowChage.getRowDatasList()) { 103 | if (eventType == CanalEntry.EventType.DELETE) { 104 | printColumn(rowData.getBeforeColumnsList()); 105 | } else if (eventType == CanalEntry.EventType.INSERT) { 106 | printColumn(rowData.getAfterColumnsList()); 107 | } else { 108 | System.out.println("-------> before"); 109 | printColumn(rowData.getBeforeColumnsList()); 110 | System.out.println("-------> after"); 111 | printColumn(rowData.getAfterColumnsList()); 112 | } 113 | } 114 | 115 | } 116 | } 117 | 118 | 119 | private static void printColumn(List columns) { 120 | for (CanalEntry.Column column : columns) { 121 | System.out.println(column.getName() + " : " + column.getValue() + " update=" + column.getUpdated()); 122 | } 123 | } 124 | } 125 | */ -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/es/Writer.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.es; 2 | 3 | import org.elasticsearch.client.Request; 4 | import org.elasticsearch.client.RequestOptions; 5 | import org.elasticsearch.index.reindex.UpdateByQueryRequest; 6 | import org.fly.sync.contract.*; 7 | import org.fly.sync.mysql.model.Record; 8 | import org.fly.sync.setting.River; 9 | import org.apache.http.HttpEntity; 10 | import org.apache.http.entity.ContentType; 11 | import org.apache.http.nio.entity.NStringEntity; 12 | import org.elasticsearch.action.bulk.BulkItemResponse; 13 | import org.elasticsearch.action.bulk.BulkRequest; 14 | import org.elasticsearch.action.bulk.BulkResponse; 15 | import org.elasticsearch.action.support.WriteRequest; 16 | import org.slf4j.Logger; 17 | import org.slf4j.LoggerFactory; 18 | 19 | import java.io.IOException; 20 | import java.util.HashMap; 21 | import java.util.List; 22 | import java.util.Map; 23 | 24 | public class Writer implements AbstractWriter { 25 | public final static Logger logger = LoggerFactory.getLogger(Writer.class); 26 | 27 | DbFactory dbFactory; 28 | 29 | public Writer(DbFactory dbFactory) { 30 | this.dbFactory = dbFactory; 31 | } 32 | 33 | @Override 34 | public DbFactory getDbFactory() { 35 | return dbFactory; 36 | } 37 | 38 | public void updateByQuery(AbstractRelateAction relateAction) 39 | { 40 | River.Associate associate = relateAction.getAssociate(); 41 | Record record = relateAction.getRecord(); 42 | 43 | if (associate.calledTable == null) 44 | return; 45 | 46 | River.Table table = dbFactory.getRiverDatabase().getTable(associate.calledTable.tableName); 47 | 48 | int offset = associate.findOffset(record.table); 49 | // == 0 is a sync-table, not a relation 50 | // why sync-table is in this method? 51 | if (offset <= 0) 52 | return; 53 | 54 | River.Relation relation = associate.getRelation(offset - 1); 55 | 56 | List relationKeyList = River.getRelationKeyList(associate.relationKey); 57 | 58 | String relationKey = River.makeRelationKey(relationKeyList.subList(0, offset)); 59 | String localKey = (River.makeRelationKey(relationKeyList.subList(0, offset - 1)) + River.DOT + relation.local).replaceFirst("^\\" + River.DOT, ""); 60 | 61 | Map values = record.mixWithPrefix(relationKey.isEmpty() ? relationKey : relationKey + River.DOT); 62 | 63 | // Elastic's XContentBuilder do not support BigInteger/Timestamp, Use jackson instead of it. 64 | UpdateByQuery updateByQuery = new UpdateByQuery(); 65 | 66 | // build query 67 | updateByQuery.query.setField(localKey, record.getWithDeleted(relation.foreign)); 68 | 69 | // build script 70 | updateByQuery.script.params = values; 71 | 72 | String json = updateByQuery.toJson(dbFactory.getJsonMapper()); 73 | 74 | // search the parent record's local KEY, 75 | // value is the current record's foreign KEY 76 | 77 | HttpEntity entity = new NStringEntity(json, ContentType.APPLICATION_JSON); 78 | 79 | try { 80 | 81 | Request request = new Request("POST", table.index + "/" +table.type+ "/_update_by_query"); 82 | request.setEntity(entity); 83 | request.addParameter("conflicts", "proceed"); 84 | request.addParameter("refresh", "false"); 85 | request.addParameter("wait_for_completion", "false"); 86 | 87 | dbFactory 88 | .getEs() 89 | .getRestClient() 90 | .performRequest(request); 91 | 92 | } catch (IOException e) 93 | { 94 | logger.error(e.getMessage(), e); 95 | } 96 | } 97 | 98 | public void execute(List actionList) 99 | { 100 | for (AbstractAction action: actionList 101 | ) { 102 | action.execute(this); 103 | } 104 | } 105 | 106 | public void executeBulk(List actionList) 107 | { 108 | long nanoTime = System.nanoTime(); 109 | 110 | BulkRequest request = new BulkRequest(); 111 | request.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE); 112 | 113 | try { 114 | 115 | for (AbstractAction action : actionList 116 | ) { 117 | action.execute(this); 118 | request.add( 119 | ((AbstractRecordAction)action).getRequest(dbFactory) 120 | ); 121 | } 122 | 123 | } catch (Exception e) 124 | { 125 | logger.error(e.getMessage(), e); 126 | return; 127 | } 128 | 129 | try { 130 | 131 | BulkResponse bulkResponse = dbFactory.getEs().getClient().bulk(request, RequestOptions.DEFAULT); 132 | 133 | for (BulkItemResponse response : bulkResponse 134 | ) { 135 | if (response.isFailed()) 136 | logger.error(response.getFailureMessage()); 137 | } 138 | 139 | } catch (IOException e) { 140 | logger.error("Elasticsearch request failed.", e); 141 | } 142 | 143 | logger.info("Sync {} records, use {} ms", actionList.size(), (System.nanoTime() - nanoTime) / 1000_000.0); 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MySQL - ElasticSearch Synchronization 2 | 3 | A MySQL-ElasticSearch synchronization tool with **Real-Time**, **No-Lose**, **One-to-One Relation**. 4 | 5 | base on [alibaba/canal](https://github.com/alibaba/canal), [RxJava](https://github.com/ReactiveX/RxJava). 6 | 7 | > The Canal is a bin-log parser and subscriber of alibaba 8 | 9 | ## Version 10 | 11 | - 1.0-beta : 2018-09-04 12 | 13 | ## Manuals 14 | 15 | - [Install and launch](docs/install.md) 16 | - [Settings](docs/settings.md) 17 | - [One-to-One Relation](docs/relation.md) 18 | - [Errors](docs/error.md) 19 | - [For developer](docs/developer.md) 20 | 21 | [中文手册](zh-README.md),请点击这里 22 | 23 | ## Requirements 24 | 25 | - Java 1.8 + 26 | - 2 GB Memory + 27 | - 2 Core CPU + 28 | - 100M Free space (for logs) 29 | 30 | ## Features 31 | 32 | - supported ElasticSearch 5.x ~ 6.x. 33 | 34 | - supported **No-enable-bin-log** MySQL **before**. 35 | 36 | If MySQL did not enabled the bin-log before, NO PROBLEM, enable it NOW. 37 | 38 | this tool will dumping the history data via "mysqldump". 39 | 40 | See [How to work](#how-to-work). 41 | 42 | - supported One-to-One relation. 43 | - Original tables 44 | 45 | - **users-table**: | id | nickname | xxx | 46 | 47 | - **posts-table**: | id | user_id | title | content | 48 | 49 | - Use a simple settings to synchronize them all, like: 50 | 51 | - **posts-ES-index**: | id | user_id | user.id | user.nickname | user.xxx | title | content | 52 | 53 | See [Relation](docs/relation.md). 54 | 55 | - parsing the bin-log's records to synchronize in **REAL-TIME**, include Create / Update / Delete operations 56 | 57 | - synchronize the relation records in **REAL-TIME**, Also after them modified. 58 | 59 | - supported multiple primary keys. 60 | 61 | - Backup bin-log position's file 62 | 63 | ## How to work 64 | 65 | This tool launchs following these process: 66 | 67 | 1. Read Config 68 | 69 | 70 | 2. Read Bin-log's position file. 71 | 72 | If file exists and the last position exists, skip dump. 73 | 74 | 3. Dump the history data via "mysqldump" If the position was not setted. 75 | 76 | - Launch "mysqldump", dump all data to synchronize them to Elastic. 77 | 78 | - And "mysqldump" will returning a new bin-log position when MySQL enabled the bin-log. 79 | 80 | > If MySQL do not enable the bin-log, "mysqldump" will not return a position. 81 | 82 | - Dump complete, Goto canal. 83 | 84 | 4. Parse the Real-time bin-log via "Canal" 85 | 86 | - Launch the canal with the position. 87 | 88 | - Loop executing: 89 | 90 | 1. parse and synchronize the records from bin-log in Real-Time. 91 | 92 | 2. Save the newest bin-log position after synchronized 93 | 94 | ## Known issue 95 | 96 | - Do not support the **No-Primary-key's table. 97 | 98 | - If a table's primary key had be modified, like "id", cannot modify the old id to new id in Elastic. 99 | 100 | - If a relation table's primary key had be modified, cannot modify the record to the related index in Elastic. 101 | 102 | - If a column had be added / droped / modified, cannot synchronize. 103 | 104 | - If the settings of tables or relations had be modified, cannot synchronize 105 | 106 | ## Todo 107 | 108 | We will Support these features like: 109 | 110 | - Synchronize when Alter table's column (`ADD` / `DROP` / `MODIFY`) 111 | - Synchronize when the primary key modified 112 | - Synchronize when the relation's primary key modified 113 | - Synchronize the *Partial* columns that you want. 114 | - Column *alias* 115 | 116 | ## Similar software 117 | 118 | ### Mysql -> Elastic Search 119 | 120 | - [go-mysql-elasticsearch](https://github.com/siddontang/go-mysql-elasticsearch) [CN] 121 | 122 | A Golang program. 123 | 124 | - [elasticsearch-jdbc](https://github.com/jprante/elasticsearch-jdbc) 125 | 126 | - [elasticsearch-river-mysql](https://github.com/scharron/elasticsearch-river-mysql) 127 | 128 | ### MySQL(or Oracle) -> Other Driver(include ElasticSearch) 129 | 130 | - [DataX](https://github.com/alibaba/DataX) [CN] 131 | 132 | A multi-driver synchronization tool, by alibaba. base on canal. 133 | 134 | - [tungsten-replicator](https://github.com/continuent/tungsten-replicator) 135 | 136 | A Enterprise software, supported ES 5 137 | 138 | - [maxwell](https://github.com/zendesk/maxwell) 139 | 140 | MySQL -> maxwell -> json -> kafka -> LogStash(or FileBeat) -> elastic search 141 | 142 | ## Copyright and License 143 | 144 | This tool is released under the **MIT License** 145 | 146 | ```text 147 | Copyright (C) 2018 Fly-Studio 148 | 149 |   Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 150 | 151 |   The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 152 | 153 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 154 | ``` -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/canal/Server.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.canal; 2 | 3 | import com.alibaba.otter.canal.instance.manager.CanalInstanceWithManager; 4 | import com.alibaba.otter.canal.instance.manager.model.Canal; 5 | import com.alibaba.otter.canal.instance.manager.model.CanalParameter; 6 | import com.alibaba.otter.canal.instance.manager.model.CanalStatus; 7 | import com.alibaba.otter.canal.parse.CanalEventParser; 8 | import com.alibaba.otter.canal.parse.inbound.AbstractEventParser; 9 | import com.alibaba.otter.canal.parse.index.CanalLogPositionManager; 10 | import com.alibaba.otter.canal.protocol.position.LogPosition; 11 | import org.fly.core.database.SqlUtils; 12 | 13 | import org.fly.core.text.regex.RegexUtils; 14 | import org.fly.sync.contract.AbstractLifeCycle; 15 | import org.fly.sync.setting.BinLog; 16 | import org.fly.sync.setting.River; 17 | import org.fly.sync.setting.Setting; 18 | import org.slf4j.Logger; 19 | import org.slf4j.LoggerFactory; 20 | 21 | import java.net.InetSocketAddress; 22 | import java.util.Date; 23 | import java.util.StringJoiner; 24 | import java.util.concurrent.atomic.AtomicLong; 25 | 26 | public class Server extends AbstractLifeCycle { 27 | 28 | public final static Logger logger = LoggerFactory.getLogger(Server.class); 29 | private CanalInstanceWithManager canalInstance; 30 | private final static AtomicLong SERVER_ID = new AtomicLong(123); 31 | private River.Database database; 32 | private Canal canal; 33 | private String filter; 34 | 35 | public Server(River river, River.Database database, BinLog.Position position) { 36 | this.database = database; 37 | 38 | canal = Config.build(river, database, position); 39 | filter = buildFilter(); 40 | 41 | canalInstance = new CanalInstanceWithManager(canal, filter); 42 | } 43 | 44 | public String buildFilter() 45 | { 46 | StringBuilder sb = new StringBuilder(); 47 | sb.append(RegexUtils.preg_quote(database.schemaName)) 48 | .append("\\."); 49 | 50 | if (database.associates.size() > 1) 51 | { 52 | sb.append("("); 53 | StringJoiner sj = new StringJoiner("|"); 54 | 55 | for (String tableName: database.associates.keySet() 56 | ) { 57 | sj.add(RegexUtils.preg_quote(tableName)); 58 | } 59 | sb.append(sj.toString()) 60 | .append(")"); 61 | } else { 62 | sb.append(database.associates.keySet().toArray()[0]); 63 | } 64 | 65 | return sb.toString(); 66 | } 67 | 68 | public String getFilter() { 69 | return filter; 70 | } 71 | 72 | public CanalInstanceWithManager getCanalInstance() { 73 | return canalInstance; 74 | } 75 | 76 | public BinLog.Position getBinLogPosition() 77 | { 78 | CanalEventParser eventParser = canalInstance.getEventParser(); 79 | if (!(eventParser instanceof AbstractEventParser)) 80 | return null; 81 | 82 | CanalLogPositionManager canalLogPositionManager = ((AbstractEventParser)eventParser).getLogPositionManager(); 83 | LogPosition logPosition = canalLogPositionManager.getLatestIndexBy(database.schemaName); 84 | 85 | return logPosition != null ? BinLog.Position.create(logPosition) : null; 86 | } 87 | 88 | 89 | public River.Database getDatabase() { 90 | return database; 91 | } 92 | 93 | public Canal getCanal() { 94 | return canal; 95 | } 96 | 97 | public CanalParameter getCanalParameter() 98 | { 99 | return canal.getCanalParameter(); 100 | } 101 | 102 | @Override 103 | public void start() 104 | { 105 | super.start(); 106 | 107 | canalInstance.start(); 108 | } 109 | 110 | public void stop() 111 | { 112 | super.stop(); 113 | 114 | canalInstance.stop(); 115 | } 116 | 117 | private static class Config { 118 | 119 | public static Canal build(River river, River.Database database, BinLog.Position position) 120 | { 121 | Canal canal = new Canal(); 122 | 123 | canal.setId(SERVER_ID.incrementAndGet()); 124 | canal.setGmtCreate(new Date()); 125 | canal.setGmtModified(new Date()); 126 | canal.setName(database.schemaName); 127 | canal.setStatus(CanalStatus.STOP); 128 | canal.setCanalParameter(buildCanalParameter(river, database, position)); 129 | 130 | return canal; 131 | } 132 | 133 | private static CanalParameter buildCanalParameter(River river, River.Database database, BinLog.Position position) 134 | { 135 | CanalParameter parameter = new CanalParameter(); 136 | 137 | parameter.setCanalId(SERVER_ID.incrementAndGet()); 138 | 139 | parameter.setStorageMode(CanalParameter.StorageMode.MEMORY); 140 | // 以下参数后续版本实现 141 | parameter.setFileStorageDirectory(Setting.getDataDir().getAbsolutePath()); 142 | parameter.setFileStorageStoreCount(1000); 143 | parameter.setFileStorageRollverCount(1000); 144 | parameter.setFileStoragePercentThresold(98); 145 | 146 | parameter.setMasterAddress(new InetSocketAddress(river.my.host, river.my.port)); 147 | parameter.setMasterLogfileName(position.name); 148 | parameter.setMasterLogfileOffest(position.position); 149 | parameter.setMasterTimestamp(System.currentTimeMillis() / 1_000); 150 | parameter.setMasterUsername(river.my.user); 151 | parameter.setMasterPassword(river.my.password); 152 | 153 | parameter.setSlaveId(SERVER_ID.incrementAndGet()); 154 | parameter.setConnectionCharset(river.charset.equalsIgnoreCase("utf8") ? "UTF-8" : river.charset); 155 | parameter.setDetectingSQL(SqlUtils.HEARTBEAT); 156 | parameter.setDefaultDatabaseName(database.schemaName); 157 | 158 | 159 | parameter.setIndexMode(CanalParameter.IndexMode.MEMORY); 160 | parameter.setDdlIsolation(true); 161 | 162 | return parameter; 163 | } 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/relation/RelationRecords.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.relation; 2 | 3 | import org.fly.sync.contract.DbFactory; 4 | import org.fly.sync.exception.RelationException; 5 | import org.fly.sync.mysql.model.Record; 6 | import org.fly.sync.mysql.model.Records; 7 | import org.fly.sync.setting.River; 8 | import com.sun.istack.NotNull; 9 | import io.reactivex.annotations.Nullable; 10 | 11 | import java.util.*; 12 | import java.util.stream.Collectors; 13 | 14 | public class RelationRecords { 15 | 16 | private String tableName; 17 | private River.Associate associate = null; 18 | private Records records = new Records(); 19 | private Map loadedRelations = new HashMap<>(); 20 | private DbFactory dbFactory; 21 | 22 | public RelationRecords(DbFactory dbFactory, String tableName) { 23 | this.dbFactory = dbFactory; 24 | this.tableName = tableName; 25 | } 26 | 27 | public RelationRecords(DbFactory dbFactory, String tableName, River.Associate associate) { 28 | this.dbFactory = dbFactory; 29 | this.tableName = tableName; 30 | this.associate = associate; 31 | } 32 | 33 | void addRecord(Record record) 34 | { 35 | records.add(record); 36 | } 37 | 38 | private River.Table getRiverTable(String tableName) 39 | { 40 | return dbFactory.getRiverDatabase().getTable(tableName); 41 | } 42 | 43 | public void relate() 44 | { 45 | if (associate == null) 46 | relateByWiths(); 47 | else 48 | relateByAssociate(associate); 49 | } 50 | 51 | public void relateBySubAssociate(River.Associate associate, int relationOffset) 52 | { 53 | if (records.isEmpty() || loadedRelations.containsKey(associate.relationKey) || relationOffset < 0 || relationOffset >= associate.nestedRelations.size()) 54 | return; 55 | 56 | Records lastRecords = records; 57 | 58 | for(int i = relationOffset; i < associate.nestedRelations.size(); ++i) 59 | lastRecords = generateOffsetRecords(associate, lastRecords, i); 60 | } 61 | 62 | public void relateByAssociate(River.Associate associate) 63 | { 64 | if (records.isEmpty()) 65 | return; 66 | 67 | relateBySubAssociate(associate, associate.findOffset(tableName)); 68 | } 69 | 70 | public void relateByWiths() 71 | { 72 | if (records.isEmpty()) 73 | return; 74 | 75 | River.Table table = getRiverTable(tableName); 76 | if (table == null) 77 | throw new RelationException("This Table ["+dbFactory.getRiverDatabase().schemaName+"."+tableName+"] is NOT a Syncable table."); 78 | 79 | for (String withName: table.getFullWithNames() 80 | ) { 81 | River.Associate associate = dbFactory.getRiverDatabase().findAssociate(withName); 82 | 83 | relateBySubAssociate(associate, 0); 84 | } 85 | 86 | } 87 | 88 | private Records generateOffsetRecords(River.Associate associate, @NotNull Records originalRecords, int relationOffset) 89 | { 90 | List relationKeys = associate.getRelationKeyList(); 91 | String key = River.makeRelationKey(associate.calledTable.tableName, relationKeys.subList(0, relationOffset + 1)); 92 | 93 | Records records = loadedRelations.get(key); 94 | 95 | if (records == null) 96 | { 97 | River.Relation relation = associate.nestedRelations.get(relationOffset); 98 | Records needRelateRecords = originalRecords.stream() 99 | .filter(record -> record.isModified(relation.local)) 100 | //.collect(Records::new, Records::add, Records::addAll); 101 | .collect(Collectors.toCollection(Records::new)); 102 | 103 | records = fillRecords( 104 | needRelateRecords, 105 | relation, 106 | getRelationRecords( 107 | getLocalValues(needRelateRecords, relation), 108 | relation), 109 | relationKeys.get(relationOffset) 110 | ); 111 | 112 | loadedRelations.put(key, records); 113 | } 114 | 115 | return records; 116 | } 117 | 118 | private Records fillRecords(@NotNull List originalRecords, River.Relation relation, Records relationRecords, String relationKey) 119 | { 120 | Record nullRecord = Record.createNull(relation.tableName, relation.getColumns()); 121 | nullRecord.setInserted(); 122 | 123 | Records records = new Records(); 124 | 125 | Record relationRecord; 126 | 127 | for (Record record: originalRecords 128 | ) { 129 | 130 | // If record is deleted, add a nullRecord to relations 131 | Object val = record.get(relation.local); 132 | 133 | if (val == null || relationRecords == null) { 134 | 135 | relationRecord = nullRecord; 136 | 137 | } else { 138 | 139 | relationRecord = relationRecords.find(relation.foreign, val); 140 | 141 | if (relationRecord == null) relationRecord = nullRecord; 142 | } 143 | 144 | records.addUnique(relationRecord); 145 | 146 | record.with(relationKey, relationRecord); 147 | } 148 | 149 | return records; 150 | } 151 | 152 | @Nullable 153 | private Records getRelationRecords(List localValues, River.Relation relation) 154 | { 155 | if (localValues == null || localValues.isEmpty()) 156 | return null; 157 | 158 | Records records = dbFactory.getMySql().getLocalQuery().queryIn(relation, relation.foreign, localValues); 159 | if (records != null && !records.isEmpty()) 160 | records.forEach(Record::setInserted); 161 | 162 | return records; 163 | } 164 | 165 | @Nullable 166 | private List getLocalValues(List originalRecords, River.Relation relation) 167 | { 168 | if (originalRecords == null || originalRecords.isEmpty()) return null; 169 | 170 | Set localValues = new HashSet<>(); 171 | 172 | for (Record record: originalRecords 173 | ) { 174 | // If record is deleted, return a null to localValues 175 | Object object = record.get(relation.local); 176 | if (object == null) 177 | continue; 178 | 179 | localValues.add(object.toString()); 180 | } 181 | 182 | return localValues.isEmpty() ? null : Arrays.asList(localValues.toArray(new String[0])); 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/setting/Setting.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.setting; 2 | 3 | import com.fasterxml.jackson.core.JsonParseException; 4 | import com.fasterxml.jackson.databind.JsonMappingException; 5 | import org.fly.core.io.IoUtils; 6 | import org.fly.core.text.json.Jsonable; 7 | import org.fly.sync.exception.ConfigException; 8 | import com.sun.istack.NotNull; 9 | import org.apache.logging.log4j.LogManager; 10 | import org.apache.logging.log4j.core.LoggerContext; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | 14 | import java.io.File; 15 | import java.io.IOException; 16 | import java.text.SimpleDateFormat; 17 | 18 | public class Setting { 19 | 20 | public static String ETC_PATH = null; 21 | 22 | static { 23 | String etcPath = System.getProperty("me.etc.path"); 24 | if (etcPath != null && etcPath.length() != 0) 25 | ETC_PATH = etcPath; 26 | else 27 | ETC_PATH = new File(System.getProperty("user.dir"), "etc").getAbsolutePath(); 28 | } 29 | 30 | public final static String RIVER_FILE = "river.json"; 31 | public final static String CONFIG_FILE = "config.json"; 32 | public final static String BINLOG_FILE = "binlog.json"; 33 | 34 | public final static Logger logger = LoggerFactory.getLogger(Setting.class); 35 | public static Config config; 36 | public static River river; 37 | public static BinLog binLog; 38 | 39 | public static File getEtcPath() 40 | { 41 | return new File(ETC_PATH); 42 | } 43 | 44 | public static File getEtcPath(@NotNull String filename) 45 | { 46 | return new File(getEtcPath(), filename); 47 | } 48 | 49 | public static File getEtcPath(@NotNull File file) 50 | { 51 | return file.isAbsolute() ? file : getEtcPath(file.getPath()); 52 | } 53 | 54 | public static Config getConfig() throws Exception 55 | { 56 | return getConfig(getEtcPath(CONFIG_FILE)); 57 | } 58 | 59 | public static Config getConfig(File file) throws Exception 60 | { 61 | if (Setting.config != null) 62 | return Setting.config; 63 | 64 | Config config; 65 | 66 | try { 67 | config = Jsonable.fromJson(Config.class, file); 68 | } catch (Exception e) { 69 | throw new ConfigException("\"" + CONFIG_FILE + "\" JSON format ERROR.", e); 70 | } 71 | 72 | if (config.logDir == null) { 73 | config.logDir = getEtcPath("logs"); 74 | logger.warn("Invaid [log_dir] in \"{}\", Redirect path to default.", CONFIG_FILE); 75 | } 76 | 77 | config.logDir = getEtcPath(config.logDir); 78 | System.setProperty("me.log.path", config.logDir.getAbsolutePath()); 79 | 80 | // locate to user's log4j2.xml 81 | LoggerContext context = (org.apache.logging.log4j.core.LoggerContext) LogManager.getContext(false); 82 | context.setConfigLocation(getEtcPath("log4j2.xml").toURI()); 83 | 84 | if (config.dataDir == null) 85 | { 86 | config.dataDir = getEtcPath("data"); 87 | logger.warn("Invaid [data_dir] in \"{}\", path to default.", CONFIG_FILE); 88 | } 89 | 90 | config.dataDir = getEtcPath(config.dataDir); 91 | System.setProperty("me.data.path", config.dataDir.getAbsolutePath()); 92 | 93 | 94 | if (config.bulkSize < 10) 95 | { 96 | config.bulkSize = 10; 97 | logger.warn("[bulk_size] in \"{}\" less than 10, auto change to 10.", CONFIG_FILE); 98 | } 99 | 100 | if (config.flushBulkTime < 200) 101 | { 102 | config.flushBulkTime = 200; 103 | logger.warn("[flush_bulk_time] in \"{}\" is less than 200, auto change to 200.", CONFIG_FILE); 104 | } 105 | 106 | logger.info("The [data] directory locate to {}", config.dataDir.getAbsolutePath()); 107 | logger.info("The [log] directory locate to {}", config.logDir.getAbsolutePath()); 108 | 109 | try { 110 | config.dataDir.mkdirs(); 111 | config.logDir.mkdirs(); 112 | } catch (Exception e) 113 | { 114 | throw new ConfigException(e.getMessage(), e); 115 | } 116 | 117 | Setting.config = config; 118 | 119 | logger.info("Loaded {}.", CONFIG_FILE); 120 | return config; 121 | } 122 | 123 | public static River getRiver() throws ConfigException 124 | { 125 | return getRiver(getEtcPath(RIVER_FILE)); 126 | } 127 | 128 | public static River getRiver(File file) throws ConfigException 129 | { 130 | if (Setting.river != null) 131 | return Setting.river; 132 | 133 | River river; 134 | try { 135 | 136 | river = Jsonable.fromJson(River.class, file); 137 | river.init(); 138 | } catch (Exception e) 139 | { 140 | throw new ConfigException("\"" + RIVER_FILE + "\" JSON format ERROR.", e); 141 | } 142 | 143 | Setting.river = river; 144 | return river; 145 | } 146 | 147 | public static File getDataDir() 148 | { 149 | return config.dataDir; 150 | } 151 | 152 | public static BinLog getBinLog() 153 | { 154 | return getBinLog(new File(getDataDir(), BINLOG_FILE)); 155 | } 156 | 157 | public static BinLog getBinLog(File file) 158 | { 159 | if (Setting.binLog != null) 160 | return Setting.binLog; 161 | 162 | BinLog log; 163 | try { 164 | log = Jsonable.fromJson(BinLog.class, file); 165 | } 166 | catch (JsonParseException | JsonMappingException e) 167 | { 168 | logger.error(e.getMessage(), e); 169 | log = new BinLog(); 170 | } catch (IOException e) 171 | { 172 | log = new BinLog(); 173 | } 174 | 175 | Setting.binLog = log; 176 | return log; 177 | } 178 | 179 | public static void saveBinLog() throws Exception 180 | { 181 | synchronized (BinLog.class) 182 | { 183 | File file = new File(config.dataDir, BINLOG_FILE); 184 | 185 | String backupJson = null; 186 | 187 | if (!file.exists()) 188 | file.createNewFile(); 189 | else 190 | backupJson = IoUtils.readUtf8(file); 191 | 192 | File backup = new File(file.getAbsolutePath() + "."+ new SimpleDateFormat("yyyy-MM-dd").format(System.currentTimeMillis()) +".log"); 193 | 194 | if (!backup.exists()) 195 | backup.createNewFile(); 196 | 197 | if (backupJson != null) 198 | IoUtils.appendUtf8(backup, backupJson + "\n"); 199 | 200 | String json = binLog.toJson(); 201 | if (json == null || json.length() <= 10 || json.charAt(0) != '{' || json.charAt(json.length() - 1) != '}') { 202 | logger.error("BinLog JSON format error."); 203 | return; 204 | } 205 | 206 | IoUtils.writeUtf8(file, json); 207 | 208 | logger.info("BinLog write success."); 209 | } 210 | } 211 | 212 | public static void readSettings() throws Exception 213 | { 214 | getConfig(); 215 | getRiver(); 216 | getBinLog(); 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/es/Es.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.es; 2 | 3 | import com.fasterxml.jackson.databind.ObjectMapper; 4 | import com.fasterxml.jackson.databind.module.SimpleModule; 5 | import org.apache.http.HttpHost; 6 | import org.apache.http.auth.AuthScope; 7 | import org.apache.http.auth.UsernamePasswordCredentials; 8 | import org.apache.http.client.CredentialsProvider; 9 | import org.apache.http.impl.client.BasicCredentialsProvider; 10 | import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; 11 | import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; 12 | import org.elasticsearch.action.admin.indices.get.GetIndexRequest; 13 | import org.elasticsearch.client.RequestOptions; 14 | import org.elasticsearch.client.RestClient; 15 | import org.elasticsearch.client.RestClientBuilder; 16 | import org.elasticsearch.client.RestHighLevelClient; 17 | import org.elasticsearch.common.xcontent.XContentType; 18 | import org.fly.core.io.IoUtils; 19 | import org.fly.core.text.json.Jsonable; 20 | import org.fly.sync.contract.AbstractConnector; 21 | import org.fly.sync.exception.OutOfRetryException; 22 | import org.fly.sync.mysql.type.MySQLJson; 23 | import org.fly.sync.mysql.type.MySQLJsonSerializer; 24 | import org.fly.sync.setting.River; 25 | import org.fly.sync.setting.Setting; 26 | import org.slf4j.Logger; 27 | import org.slf4j.LoggerFactory; 28 | 29 | import java.io.File; 30 | import java.io.IOException; 31 | import java.text.DateFormat; 32 | import java.text.SimpleDateFormat; 33 | import java.util.Map; 34 | import java.util.TimeZone; 35 | 36 | public class Es { 37 | 38 | public Connector connector; 39 | private River river; 40 | public final static Logger logger = LoggerFactory.getLogger(Es.class); 41 | 42 | public Es(River river, boolean autoReconnect) { 43 | this.river = river; 44 | this.connector = new Connector(river, autoReconnect); 45 | } 46 | 47 | public boolean connect() throws Exception 48 | { 49 | return connector.connect(); 50 | } 51 | 52 | public void close() throws Exception 53 | { 54 | connector.close(); 55 | } 56 | 57 | public void waitForConnected(int count, int sleep) 58 | { 59 | connector.waitForConnected(count, sleep); 60 | } 61 | 62 | 63 | public RestHighLevelClient getClient() throws OutOfRetryException 64 | { 65 | if(!connector.isConnected()) 66 | waitForConnected(10, 5000); 67 | 68 | return connector.getClient(); 69 | } 70 | 71 | public RestClient getRestClient() throws OutOfRetryException 72 | { 73 | if (!connector.isConnected()) 74 | waitForConnected(10, 5000); 75 | 76 | return this.connector.getRestClient(); 77 | } 78 | 79 | public void createIndex(River.Table table) throws OutOfRetryException, IOException 80 | { 81 | createIndex(table, false); 82 | } 83 | 84 | public void createIndex(River.Table table, boolean force) throws OutOfRetryException, IOException 85 | { 86 | GetIndexRequest getIndexRequest = new GetIndexRequest(); 87 | getIndexRequest.indices(table.index); 88 | RestHighLevelClient client = getClient(); 89 | boolean existed = client.indices().exists(getIndexRequest, RequestOptions.DEFAULT); 90 | if (existed && force) 91 | { 92 | DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(table.index); 93 | client.indices().delete(deleteIndexRequest, RequestOptions.DEFAULT); 94 | } else if (existed) 95 | { 96 | logger.error("Elastic Index [{}] Exists, Skip created.", table.index); 97 | return; 98 | } 99 | 100 | CreateIndexRequest createIndexRequest = new CreateIndexRequest(table.index); 101 | //With Mapping 102 | if (table.template != null && !table.template.isEmpty()) 103 | { 104 | String json = IoUtils.readJson(Setting.getEtcPath(new File(table.template))); 105 | createIndexRequest.source(json, XContentType.JSON); 106 | } 107 | 108 | client.indices().create(createIndexRequest, RequestOptions.DEFAULT); 109 | 110 | } 111 | 112 | public void createIndices(River.Database database) throws OutOfRetryException, IOException 113 | { 114 | createIndices(database, false); 115 | } 116 | 117 | public void createIndices(River.Database database, boolean force) throws OutOfRetryException, IOException 118 | { 119 | for (Map.Entry entry: database.tables.entrySet() 120 | ) { 121 | River.Table table = entry.getValue(); 122 | if (!table.sync.created || table.isDynamicIndexName()) continue; 123 | 124 | createIndex(table, force); 125 | } 126 | } 127 | 128 | public ObjectMapper getJsonMapper() { 129 | ObjectMapper mapper = Jsonable.Builder.makeAdapter(); 130 | 131 | // add MySQLJson Type 132 | SimpleModule module = new SimpleModule(); 133 | module.addSerializer(MySQLJson.class, new MySQLJsonSerializer()); 134 | mapper.registerModule(module); 135 | 136 | DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); 137 | dateFormat.setTimeZone(TimeZone.getTimeZone(river.es.timeZone)); 138 | mapper.setDateFormat(dateFormat); 139 | 140 | return mapper; 141 | } 142 | 143 | public class Connector extends AbstractConnector { 144 | 145 | private RestClient restClient; 146 | private RestHighLevelClient client; 147 | 148 | public Connector(River river, boolean autoReconnect) { 149 | super(river, autoReconnect); 150 | } 151 | 152 | public RestHighLevelClient getClient() 153 | { 154 | return client; 155 | } 156 | 157 | public RestClient getRestClient() { 158 | return restClient; 159 | } 160 | 161 | protected void doConnecting() 162 | { 163 | if (null != client) 164 | return; 165 | 166 | RestClientBuilder builder = RestClient.builder(new HttpHost(river.es.host, river.es.port)); 167 | 168 | if (river.es.user != null && river.es.user.length() != 0) 169 | { 170 | final CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); 171 | credentialsProvider.setCredentials(AuthScope.ANY, 172 | new UsernamePasswordCredentials(river.es.user, river.es.password != null ? river.es.password : "")); 173 | 174 | builder.setHttpClientConfigCallback(httpClientBuilder -> httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider)); 175 | } 176 | 177 | client = new RestHighLevelClient(builder); 178 | restClient = client.getLowLevelClient(); 179 | } 180 | 181 | protected void doReconnect() throws Exception 182 | { 183 | client = null; 184 | connect(); 185 | } 186 | 187 | protected void doClose() throws Exception 188 | { 189 | if (null != client) { 190 | client.close(); 191 | restClient.close(); 192 | } 193 | 194 | client = null; 195 | } 196 | 197 | public void doHeartbeat() throws Exception 198 | { 199 | if (!client.ping(RequestOptions.DEFAULT)) 200 | throw new IOException("Ping ElasticSearch Failed."); 201 | } 202 | } 203 | 204 | 205 | } 206 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | org.fly.sync 8 | mysql_es 9 | 1.0.0-beta1 10 | 11 | 12 | 13 | Fly 14 | https://github.com/fly-studio 15 | 8 16 | 17 | 18 | 19 | 20 | 21 | MIT License 22 | https://opensource.org/licenses/MIT 23 | 24 | 25 | 26 | 27 | 28 | org.fly 29 | core 30 | 1.0.0 31 | 32 | 33 | com.sun.istack 34 | istack-commons-runtime 35 | 3.0.5 36 | 37 | 38 | mysql 39 | mysql-connector-java 40 | 8.0.11 41 | 42 | 43 | com.google.guava 44 | guava 45 | 18.0 46 | 47 | 48 | com.alibaba.otter 49 | canal.client 50 | 1.1.3 51 | 52 | 53 | ch.qos.logback 54 | logback-classic 55 | 56 | 57 | com.google.guava 58 | guava 59 | 60 | 61 | 62 | 63 | com.alibaba.otter 64 | canal.server 65 | 1.1.3 66 | 67 | 68 | ch.qos.logback 69 | logback-classic 70 | 71 | 72 | com.google.guava 73 | guava 74 | 75 | 76 | 77 | 78 | org.apache.logging.log4j 79 | log4j-core 80 | 2.11.2 81 | 82 | 83 | org.apache.logging.log4j 84 | log4j-slf4j-impl 85 | 2.11.2 86 | 87 | 88 | org.jdbi 89 | jdbi3-core 90 | 3.3.0 91 | 92 | 93 | org.slf4j 94 | slf4j-api 95 | 96 | 97 | 98 | 99 | org.jdbi 100 | jdbi3-sqlobject 101 | 102 | 103 | io.reactivex.rxjava2 104 | rxjava 105 | 2.2.0 106 | 107 | 108 | com.github.akarnokd 109 | rxjava2-extensions 110 | 0.20.0 111 | 112 | 113 | org.elasticsearch.client 114 | elasticsearch-rest-client 115 | 6.4.3 116 | 117 | 118 | org.elasticsearch.client 119 | elasticsearch-rest-high-level-client 120 | 6.4.3 121 | 122 | 123 | com.fasterxml.jackson.core 124 | jackson-databind 125 | 2.9.7 126 | 127 | 128 | com.google.protobuf 129 | protobuf-java 130 | 3.6.1 131 | 132 | 133 | 134 | 135 | compile 136 | 137 | 138 | maven-compiler-plugin 139 | 140 | 8 141 | 8 142 | 143 | 144 | 145 | maven-dependency-plugin 146 | 147 | 148 | copy-dependencies 149 | prepare-package 150 | 151 | copy-dependencies 152 | 153 | 154 | ${project.build.directory}/lib 155 | false 156 | false 157 | true 158 | runtime 159 | 160 | 161 | 162 | 163 | 164 | maven-jar-plugin 165 | 166 | target/classes/ 167 | 168 | false 169 | 170 | org.fly.sync.Main 171 | true 172 | lib/ 173 | 174 | 175 | 176 | 177 | 178 | maven-assembly-plugin 179 | 180 | 181 | src/assembly.xml 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | org.jdbi 192 | jdbi3-bom 193 | pom 194 | 3.3.0 195 | import 196 | 197 | 198 | 199 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/executor/Emiter.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.executor; 2 | 3 | import com.fasterxml.jackson.databind.ObjectMapper; 4 | import org.fly.sync.action.ReportAction; 5 | import org.fly.sync.canal.Canal; 6 | import org.fly.sync.contract.AbstractAction; 7 | import org.fly.sync.contract.AbstractLifeCycle; 8 | import org.fly.sync.contract.AbstractRecordAction; 9 | import org.fly.sync.contract.DbFactory; 10 | import org.fly.sync.es.Es; 11 | import org.fly.sync.exception.EsFatalException; 12 | import org.fly.sync.mysql.Dumper; 13 | import org.fly.sync.mysql.MySql; 14 | import org.fly.sync.mysql.relation.Relation; 15 | import org.fly.sync.setting.River; 16 | import org.fly.sync.setting.Setting; 17 | import io.reactivex.Observable; 18 | import io.reactivex.Scheduler; 19 | import io.reactivex.functions.Function; 20 | import org.slf4j.Logger; 21 | import org.slf4j.LoggerFactory; 22 | 23 | import java.io.IOException; 24 | import java.util.ArrayList; 25 | import java.util.Arrays; 26 | import java.util.List; 27 | import java.util.concurrent.TimeUnit; 28 | import java.util.stream.Collectors; 29 | 30 | public class Emiter extends AbstractLifeCycle implements DbFactory { 31 | 32 | public final static Logger logger = LoggerFactory.getLogger(Emiter.class); 33 | private Executor executor; 34 | private River.Database database; 35 | private Canal canal = null; 36 | private Dumper dumper = null; 37 | 38 | public Emiter(Executor executor, River.Database database) { 39 | this.executor = executor; 40 | this.database = database; 41 | 42 | } 43 | 44 | @Override 45 | public void start() { 46 | super.start(); 47 | } 48 | 49 | @Override 50 | public void stop() { 51 | super.stop(); 52 | 53 | if (null != dumper) 54 | dumper.stop(); 55 | 56 | if (null != canal) 57 | canal.stop(); 58 | } 59 | 60 | @Override 61 | public Es getEs() 62 | { 63 | return executor.getEs(); 64 | } 65 | 66 | @Override 67 | public MySql getMySql() 68 | { 69 | return executor.getMySql(); 70 | } 71 | 72 | @Override 73 | public River.Database getRiverDatabase() { 74 | return database; 75 | } 76 | 77 | @Override 78 | public Statistic getStatistic() { 79 | return executor.getStatistic(); 80 | } 81 | 82 | @Override 83 | public ObjectMapper getJsonMapper() { 84 | return executor.getJsonMapper(); 85 | } 86 | 87 | public Executor getExecutor() { 88 | return executor; 89 | } 90 | 91 | public Observable> buildObservable(Scheduler scheduler) { 92 | 93 | createIndices(); 94 | 95 | return Observable.merge( 96 | Observable.concat( 97 | runDumper(scheduler), 98 | runCanal(scheduler) 99 | ), 100 | Observable.interval(15, TimeUnit.SECONDS) 101 | .subscribeOn(scheduler) 102 | .observeOn(scheduler) 103 | .map(v -> new ReportAction()) 104 | ) 105 | .takeWhile(value -> executor.isRunning()) 106 | 107 | .subscribeOn(scheduler) 108 | .observeOn(scheduler) 109 | .buffer(Setting.config.flushBulkTime, TimeUnit.MILLISECONDS, scheduler, Setting.config.bulkSize) 110 | 111 | .observeOn(scheduler) 112 | .concatMap(new WithGroup()) 113 | 114 | .observeOn(scheduler) 115 | .map(new WithRelations()) 116 | 117 | .observeOn(scheduler) 118 | .map(new AliasColumns()) 119 | ; 120 | } 121 | 122 | private void createIndices() 123 | { 124 | if (!Setting.binLog.isEmpty(database.schemaName)) 125 | return; 126 | 127 | try { 128 | this.getEs().createIndices(database); 129 | } catch (IOException e) { 130 | throw new EsFatalException("Create Indices of DB: \"" + database.schemaName + "\" Failed.", e); 131 | } 132 | } 133 | 134 | private Observable runDumper(Scheduler scheduler) 135 | { 136 | if (!Setting.binLog.isEmpty(database.schemaName)) 137 | return Observable.empty(); 138 | 139 | dumper = new Dumper(Setting.config, Setting.river, this); 140 | dumper.start(); 141 | 142 | return dumper.run(scheduler) 143 | .doOnComplete(() -> { 144 | dumper.stop(); 145 | dumper = null; 146 | }); 147 | } 148 | 149 | private Observable runCanal(Scheduler scheduler) { 150 | 151 | return Observable.create( emitter -> { 152 | while(Setting.binLog.isEmpty(database.schemaName)) 153 | { 154 | try { 155 | 156 | Thread.sleep(500); 157 | } catch (InterruptedException e) 158 | { 159 | emitter.onComplete(); 160 | return; 161 | } 162 | } 163 | 164 | emitter.onNext("Complete"); 165 | emitter.onComplete(); 166 | 167 | }).flatMap(nothing -> { 168 | 169 | if (Setting.binLog.isEmpty(database.schemaName)) 170 | return Observable.empty(); 171 | 172 | canal = new Canal(Setting.config, Setting.river, Setting.binLog.get(getRiverDatabase().schemaName), this); 173 | canal.start(); 174 | 175 | return canal.run(scheduler); 176 | }); 177 | } 178 | 179 | public class WithGroup implements Function, Observable>> 180 | { 181 | private String lastGroup; 182 | private List> lists = new ArrayList<>(); 183 | private List actions = new ArrayList<>(); 184 | 185 | public void add(AbstractAction action) 186 | { 187 | if (!action.getGroup().equals(lastGroup)) 188 | emit(); 189 | 190 | lastGroup = action.getGroup(); 191 | actions.add(action); 192 | } 193 | 194 | public void emit() 195 | { 196 | if (actions.isEmpty()) 197 | return; 198 | 199 | lists.add(Arrays.asList(actions.toArray(new AbstractAction[0]))); 200 | lastGroup = null; 201 | actions.clear(); 202 | } 203 | 204 | @Override 205 | public synchronized Observable> apply(List actionList) throws Exception { 206 | if (actionList.isEmpty()) 207 | return Observable.empty(); 208 | 209 | lists.clear(); 210 | actions.clear(); 211 | 212 | for (AbstractAction action : actionList 213 | ) { 214 | add(action); 215 | } 216 | 217 | emit(); 218 | 219 | List counts = lists.stream().map(List::size).collect(Collectors.toList()); 220 | 221 | logger.trace("WithGroup Total: {}, split to {}", actionList.size(), counts); 222 | 223 | return Observable.fromIterable(lists); 224 | } 225 | } 226 | 227 | public class WithRelations implements Function, List> { 228 | 229 | @Override 230 | public List apply(List actionList) throws Exception { 231 | 232 | long nanoTime = System.nanoTime(); 233 | 234 | if (actionList.isEmpty() || !(actionList.get(0) instanceof AbstractRecordAction)) { 235 | return actionList; 236 | } 237 | 238 | Relation relation = new Relation(Emiter.this, actionList); 239 | relation.load(); 240 | 241 | logger.info("WithRelations: {}, use {} ms", actionList.size(), (System.nanoTime() - nanoTime) / 1000_000.0); 242 | 243 | 244 | return actionList; 245 | } 246 | } 247 | 248 | public class AliasColumns implements Function, List> { 249 | 250 | @Override 251 | public List apply(List actionList) throws Exception { 252 | //Todo 253 | 254 | return actionList; 255 | } 256 | } 257 | 258 | 259 | } 260 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/executor/Executor.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.executor; 2 | 3 | import com.fasterxml.jackson.databind.ObjectMapper; 4 | import org.fly.sync.Main; 5 | import org.fly.sync.es.Es; 6 | import org.fly.sync.mysql.MySql; 7 | import org.fly.sync.setting.River; 8 | import org.fly.sync.setting.Setting; 9 | import com.google.common.util.concurrent.ThreadFactoryBuilder; 10 | import io.reactivex.Scheduler; 11 | import io.reactivex.schedulers.Schedulers; 12 | import org.apache.logging.log4j.LogManager; 13 | import org.apache.logging.log4j.core.LoggerContext; 14 | import org.slf4j.Logger; 15 | import org.slf4j.LoggerFactory; 16 | 17 | import java.util.ArrayList; 18 | import java.util.LinkedList; 19 | import java.util.List; 20 | import java.util.concurrent.ExecutorService; 21 | import java.util.concurrent.Executors; 22 | import java.util.concurrent.RejectedExecutionException; 23 | import java.util.concurrent.TimeUnit; 24 | import java.util.concurrent.atomic.AtomicBoolean; 25 | 26 | public class Executor { 27 | 28 | public final static Logger logger = LoggerFactory.getLogger(Main.class); 29 | 30 | public enum Action { 31 | CONNECT, 32 | VALIDATE, 33 | LAUNCH, 34 | STOP, 35 | EXIT, 36 | } 37 | private Es es; 38 | private MySql mySql; 39 | private static AtomicBoolean running = new AtomicBoolean(false); 40 | private ExecutorService threadPool; 41 | private Statistic statistic = new Statistic(); 42 | private List emiters = new ArrayList<>(); 43 | private List consumers = new ArrayList<>(); 44 | private Thread currentThread = null; 45 | private LinkedList queue = new LinkedList<>(); 46 | 47 | public Executor() 48 | { 49 | threadPool = Executors.newFixedThreadPool(Setting.river.databases.size() * 10, new ThreadFactoryBuilder().setNameFormat("SubscribeOn-%d").build()); 50 | es = new Es(Setting.river, true); 51 | mySql = new MySql(Setting.river, true); 52 | } 53 | 54 | public void run() throws Exception 55 | { 56 | currentThread = Thread.currentThread(); 57 | 58 | while(true) 59 | { 60 | Action action = queue.isEmpty() ? null : queue.pop(); 61 | 62 | if (action != null) 63 | { 64 | try { 65 | switch (action) 66 | { 67 | case EXIT: 68 | return; 69 | case LAUNCH: 70 | doLaunch(); 71 | break; 72 | case STOP: 73 | // stop 不应该抛出错误 74 | doStop(); 75 | break; 76 | case CONNECT: 77 | doConnect(); 78 | break; 79 | case VALIDATE: 80 | doValidate(); 81 | break; 82 | } 83 | } catch (Exception e) 84 | { 85 | // 此处是在launch之前的错误 86 | // 因为之后的错误都被RxJava统一捕获,并发送给Consumer 87 | throwException(e); 88 | } 89 | } 90 | 91 | Thread.sleep(100); 92 | } 93 | } 94 | 95 | private synchronized void trigger(Action action) 96 | { 97 | queue.addLast(action); 98 | } 99 | 100 | public synchronized void start() 101 | { 102 | queue.clear(); 103 | trigger(Executor.Action.CONNECT); 104 | trigger(Executor.Action.VALIDATE); 105 | trigger(Executor.Action.LAUNCH); 106 | } 107 | 108 | /** 109 | * 当run被调取之后,本线程(比如Main中调取就是主线程)会被阻塞直到插入EXIT到队列, 110 | * 如果在相同进程(比如Main中)调取stop,会发现根本无法响应,因为线程已经被阻塞根本无法执行后续代码。 111 | * 所以需要另起的线程调取stop,比如:Main中使用的ShutdownHook的线程调取, Observeable中使用Consumer(也在其他线程) 112 | */ 113 | public synchronized void stop() 114 | { 115 | queue.clear(); 116 | trigger(Action.STOP); 117 | trigger(Action.EXIT); 118 | } 119 | 120 | private void doConnect() throws Exception 121 | { 122 | mySql.connect(); 123 | logger.info("Connected to MySQL."); 124 | 125 | es.connect(); 126 | logger.info("Connect to Elastic search."); 127 | } 128 | 129 | private void doValidate() throws Exception 130 | { 131 | mySql.validate(); 132 | } 133 | 134 | private void doLaunch() 135 | { 136 | if (running.get()) 137 | throw new RejectedExecutionException("Application is running."); 138 | 139 | running.set(true); 140 | 141 | Scheduler scheduler = Schedulers.from(threadPool); 142 | 143 | for (River.Database database: Setting.river.databases 144 | ) { 145 | Emiter emiter = new Emiter(this, database); 146 | Consumer consumer = new Consumer(this, database); 147 | 148 | emiters.add(emiter); 149 | consumers.add(consumer); 150 | emiter.start(); 151 | consumer.start(); 152 | 153 | emiter.buildObservable(scheduler) 154 | 155 | //.subscribeOn(scheduler) 156 | .observeOn(scheduler) 157 | .subscribe(consumer); 158 | } 159 | } 160 | 161 | private void doStop() { 162 | 163 | boolean isLog4jEnable = false; 164 | 165 | if( LogManager.getContext() instanceof LoggerContext) 166 | isLog4jEnable = ((LoggerContext)LogManager.getContext()).isStarted(); 167 | 168 | if (!isLog4jEnable) 169 | System.out.println("try to stop."); 170 | else 171 | logger.trace("try to stop."); 172 | 173 | threadPool.shutdown(); 174 | 175 | if (running.get()) 176 | { 177 | running.set(false); 178 | 179 | if (!isLog4jEnable) 180 | System.out.println("stop all consumers."); 181 | else 182 | logger.trace("stop all consumers."); 183 | consumers.forEach(Consumer::stop); 184 | 185 | if (!isLog4jEnable) 186 | System.out.println("stop all emiters."); 187 | else 188 | logger.trace("stop all emiters."); 189 | emiters.forEach(Emiter::stop); 190 | } 191 | 192 | running.set(false); 193 | 194 | try { 195 | if (!isLog4jEnable) 196 | System.out.println("waiting threads stop."); 197 | else 198 | logger.trace("waiting threads stop."); 199 | 200 | while(!threadPool.awaitTermination(500, TimeUnit.MILLISECONDS)) 201 | { 202 | if (!isLog4jEnable) 203 | System.out.println("LOOP Waiting Rx-threads stop."); 204 | else 205 | logger.trace("LOOP Waiting Rx-threads stop."); 206 | } 207 | } catch (InterruptedException e) 208 | { 209 | 210 | } 211 | 212 | try { 213 | if (!isLog4jEnable) 214 | System.out.println("close mysql."); 215 | else 216 | logger.trace("close mysql."); 217 | 218 | mySql.close(); 219 | 220 | if (!isLog4jEnable) 221 | System.out.println("close elastic."); 222 | else 223 | logger.trace("close elastic."); 224 | 225 | es.close(); 226 | } catch (Exception e) 227 | { 228 | 229 | } 230 | 231 | if (!isLog4jEnable) 232 | System.out.println("all stoped."); 233 | else 234 | logger.trace("all stoped."); 235 | } 236 | 237 | public synchronized void throwException(Throwable e) 238 | { 239 | /*Throwable throwable = e; 240 | while(throwable != null) 241 | { 242 | if (throwable instanceof InterruptedException || throwable instanceof UndeliverableException) 243 | { 244 | logger.warn("Get uncaught {} from {}, stop", e.getClass(), e.getCause().getClass()); 245 | stop(); 246 | return; 247 | } 248 | 249 | throwable = throwable.getCause(); 250 | } 251 | 252 | logger.error(e.getMessage(), e); 253 | 254 | if (e instanceof FatalException) 255 | { 256 | stop(); 257 | }*/ 258 | 259 | logger.error(e.getMessage(), e); 260 | stop(); 261 | } 262 | 263 | public Es getEs() { 264 | return es; 265 | } 266 | 267 | public MySql getMySql() { 268 | return mySql; 269 | } 270 | 271 | public Statistic getStatistic() { 272 | return statistic; 273 | } 274 | 275 | public ObjectMapper getJsonMapper() { 276 | 277 | return es.getJsonMapper(); 278 | } 279 | 280 | public static boolean isRunning() { 281 | return running.get(); 282 | } 283 | 284 | 285 | } 286 | -------------------------------------------------------------------------------- /.idea/uiDesigner.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/model/Record.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql.model; 2 | 3 | import com.alibaba.otter.canal.protocol.CanalEntry; 4 | import com.fasterxml.jackson.databind.ObjectMapper; 5 | import org.fly.sync.setting.River; 6 | import com.google.common.collect.Sets; 7 | import com.sun.istack.NotNull; 8 | import com.sun.istack.Nullable; 9 | 10 | import java.sql.Timestamp; 11 | import java.text.SimpleDateFormat; 12 | import java.util.*; 13 | 14 | public class Record { 15 | 16 | public String table; 17 | private Map items; 18 | private Map relations = new HashMap<>(); 19 | private Set modifiedColumns = new HashSet<>(); 20 | private CanalEntry.EventType eventType = null; 21 | 22 | private static final String atFormat = "yyyy.MM.dd"; 23 | 24 | public Record(String table, Map items) { 25 | this.table = table; 26 | this.items = items; 27 | } 28 | 29 | public static Record create(String table, List columns, List itemList) { 30 | 31 | if (columns.size() != itemList.size()) 32 | throw new ArrayIndexOutOfBoundsException("columnNames's size MUST equal to valueList's size."); 33 | 34 | Map kv = new HashMap<>(); 35 | Record record = new Record(table, kv); 36 | 37 | for (int i = 0; i < columns.size(); i++) 38 | record.set(columns.get(i), itemList.get(i)); 39 | 40 | return record; 41 | } 42 | 43 | public static Record create(String table, Map kv) 44 | { 45 | return new Record(table, kv); 46 | } 47 | 48 | public static Record createNull(String table, List columns) 49 | { 50 | Map kv = new HashMap<>(); 51 | Record record = new Record(table, kv); 52 | 53 | for (int i = 0; i < columns.size(); i++) 54 | record.set(columns.get(i), null); 55 | 56 | return record; 57 | } 58 | 59 | public Record setEventType(CanalEntry.EventType eventType) 60 | { 61 | this.eventType = eventType; 62 | 63 | return this; 64 | } 65 | 66 | public Record setInserted() 67 | { 68 | setAllModifiedColumns(); 69 | 70 | return setEventType(CanalEntry.EventType.INSERT); 71 | } 72 | 73 | public Record setDeleted() 74 | { 75 | setAllModifiedColumns(); 76 | 77 | return setEventType(CanalEntry.EventType.DELETE); 78 | } 79 | 80 | public Record setUpdated(List columnNames) 81 | { 82 | 83 | setModifiedColumns(columnNames); 84 | 85 | return setEventType(CanalEntry.EventType.UPDATE); 86 | } 87 | 88 | public boolean setModifiedColumn(String columnName) 89 | { 90 | if (items.containsKey(columnName)) { 91 | modifiedColumns.add(columnName); 92 | return true; 93 | } 94 | 95 | return false; 96 | } 97 | 98 | public boolean setAllModifiedColumns() 99 | { 100 | modifiedColumns.clear(); 101 | modifiedColumns.addAll(keys()); 102 | 103 | return true; 104 | } 105 | 106 | public boolean setModifiedColumns(List columnNames) 107 | { 108 | Set keys = Sets.newHashSet(items.keySet()); 109 | 110 | keys.retainAll(columnNames); 111 | 112 | modifiedColumns.addAll(keys); 113 | 114 | return !keys.isEmpty(); 115 | } 116 | 117 | public Record setTable(String tableName) { 118 | table = tableName; 119 | return this; 120 | } 121 | 122 | public boolean equals(@NotNull String key, @Nullable Object val, boolean strict) 123 | { 124 | if (containsKey(key)) 125 | { 126 | Object obj = get(key); 127 | 128 | if (val == null) 129 | return obj == null; 130 | 131 | return strict ? val.equals(obj) : val.toString().equalsIgnoreCase(obj.toString()); 132 | } 133 | 134 | return false; 135 | } 136 | 137 | public boolean equals(@NotNull String key, @Nullable Object val) 138 | { 139 | return equals(key, val, false); 140 | } 141 | 142 | public boolean containsKey(@NotNull String key) 143 | { 144 | return items.containsKey(key); 145 | } 146 | 147 | public Object get(@NotNull String key, boolean includeDeleted) 148 | { 149 | return !includeDeleted && isDeleted() ? null : items.get(key); 150 | } 151 | 152 | public Object get(@NotNull String key) 153 | { 154 | return get(key, false); 155 | } 156 | 157 | public Object get(String relationKey, String key) 158 | { 159 | if (relationKey == null || relationKey.isEmpty()) 160 | return get(key, false); 161 | 162 | Record record = getRelation(relationKey); 163 | 164 | return record == null ? null : record.get(key, false); 165 | } 166 | 167 | public Object getWithDeleted(String key) { 168 | return get(key, true); 169 | } 170 | 171 | public Record getRelation(@Nullable String relationKey) 172 | { 173 | return relations.get(relationKey); 174 | } 175 | 176 | public Record put(@NotNull String key, Object val) 177 | { 178 | return set(key, val); 179 | } 180 | 181 | public Record set(@NotNull String key, Object val) 182 | { 183 | items.put(key, val); 184 | 185 | return this; 186 | } 187 | 188 | public Set keys() 189 | { 190 | return items.keySet(); 191 | } 192 | 193 | public String getID(River.Table table) 194 | { 195 | if (table.pk.size() == 1) 196 | return getWithDeleted(table.pk.get(0)).toString(); 197 | 198 | StringJoiner sj = new StringJoiner(":"); 199 | for (String key: table.pk 200 | ) { 201 | sj.add(getWithDeleted(key).toString()); 202 | } 203 | 204 | return sj.toString(); 205 | } 206 | 207 | 208 | public void with(@NotNull String relationKey, @NotNull Record record) 209 | { 210 | relations.put(relationKey, record); 211 | } 212 | 213 | public String getTable() { 214 | return table; 215 | } 216 | 217 | public boolean isDeleted() 218 | { 219 | return eventType == CanalEntry.EventType.DELETE; 220 | } 221 | 222 | public boolean isUpdated() 223 | { 224 | return eventType == CanalEntry.EventType.UPDATE; 225 | } 226 | 227 | public boolean isInserted() 228 | { 229 | return eventType == CanalEntry.EventType.INSERT; 230 | } 231 | 232 | public Map getItems() { 233 | return items; 234 | } 235 | 236 | public Set getModifiedColumns() { 237 | return modifiedColumns; 238 | } 239 | 240 | public boolean isModified(String columnName) 241 | { 242 | return modifiedColumns.contains(columnName); 243 | } 244 | 245 | public Map getRelations() { 246 | return relations; 247 | } 248 | 249 | public Map getModifiedItems(@NotNull String prefix) 250 | { 251 | if (modifiedColumns.size() == items.keySet().size() && prefix.isEmpty()) 252 | return items; 253 | 254 | Map newItems = new HashMap<>(); 255 | 256 | for (String key: modifiedColumns 257 | ) 258 | newItems.put(prefix + key, get(key)); 259 | 260 | return newItems; 261 | } 262 | 263 | public Map mix(boolean nested, @NotNull String prefix) 264 | { 265 | Map newItems = getModifiedItems(prefix); 266 | 267 | for (Map.Entry entry: relations.entrySet() 268 | ) { 269 | String relationKey = entry.getKey(); 270 | Map value = entry.getValue().mix(nested, prefix); 271 | 272 | if (nested) 273 | { 274 | newItems.put(relationKey, value); 275 | } else { 276 | for (Map.Entry entry1: value.entrySet() 277 | ) { 278 | newItems.put(prefix + relationKey + River.DOT + entry1.getKey(), entry1.getValue()); 279 | } 280 | } 281 | } 282 | 283 | return newItems; 284 | } 285 | 286 | public Map mix() 287 | { 288 | return mix(false, ""); 289 | } 290 | 291 | public Map mixWithPrefix(String prefix) { 292 | return mix(false, prefix); 293 | } 294 | 295 | public String toJson(ObjectMapper objectMapper, boolean nested, String prefix) throws Exception 296 | { 297 | Map mix = mix(nested, prefix); 298 | try { 299 | 300 | String r = objectMapper 301 | .writeValueAsString(mix); 302 | return r; 303 | 304 | } catch (Exception e) 305 | { 306 | System.out.println(e.toString()); 307 | throw e; 308 | } 309 | } 310 | 311 | public String toJson(ObjectMapper objectMapper) throws Exception 312 | { 313 | return toJson(objectMapper, false, ""); 314 | } 315 | 316 | 317 | 318 | } 319 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/mysql/Dumper.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.mysql; 2 | 3 | import com.fasterxml.jackson.databind.ObjectMapper; 4 | import org.fly.sync.action.ChangePositionAction; 5 | import org.fly.sync.action.InsertAction; 6 | import org.fly.sync.contract.AbstractAction; 7 | import org.fly.sync.contract.AbstractLifeCycle; 8 | import org.fly.sync.contract.DbFactory; 9 | import org.fly.sync.es.Es; 10 | import org.fly.sync.exception.DumpFatalException; 11 | import org.fly.sync.executor.Executor; 12 | import org.fly.sync.executor.Statistic; 13 | import org.fly.sync.mysql.model.Record; 14 | import org.fly.sync.mysql.model.Records; 15 | import org.fly.sync.mysql.parser.InsertParser; 16 | import org.fly.sync.mysql.parser.PositionParser; 17 | import org.fly.sync.setting.BinLog; 18 | import org.fly.sync.setting.Config; 19 | import org.fly.sync.setting.River; 20 | import com.sun.istack.NotNull; 21 | import io.reactivex.Observable; 22 | import io.reactivex.ObservableEmitter; 23 | import io.reactivex.ObservableOnSubscribe; 24 | import io.reactivex.Scheduler; 25 | import org.slf4j.Logger; 26 | import org.slf4j.LoggerFactory; 27 | 28 | import java.io.BufferedReader; 29 | import java.io.IOException; 30 | import java.io.InputStreamReader; 31 | import java.util.ArrayList; 32 | import java.util.List; 33 | import java.util.Map; 34 | 35 | public class Dumper extends AbstractLifeCycle implements DbFactory { 36 | 37 | private Config config; 38 | private River river; 39 | private DbFactory dbFactory; 40 | private BinLog.Position position = new BinLog.Position(); 41 | private Process process; 42 | 43 | public final static Logger logger = LoggerFactory.getLogger(Dumper.class); 44 | 45 | public Dumper(@NotNull Config config, @NotNull River river, DbFactory dbFactory) 46 | { 47 | this.config = config; 48 | this.river = river; 49 | this.dbFactory = dbFactory; 50 | } 51 | 52 | @Override 53 | public void start() { 54 | super.start(); 55 | } 56 | 57 | @Override 58 | public void stop() { 59 | super.stop(); 60 | 61 | if (null != process && process.isAlive()) 62 | process.destroy(); 63 | } 64 | 65 | @Override 66 | public Es getEs() { 67 | return dbFactory.getEs(); 68 | } 69 | 70 | @Override 71 | public MySql getMySql() { 72 | return dbFactory.getMySql(); 73 | } 74 | 75 | @Override 76 | public River.Database getRiverDatabase() { 77 | return dbFactory.getRiverDatabase(); 78 | } 79 | 80 | @Override 81 | public Statistic getStatistic() { 82 | return dbFactory.getStatistic(); 83 | } 84 | 85 | @Override 86 | public ObjectMapper getJsonMapper() { 87 | return dbFactory.getJsonMapper(); 88 | } 89 | 90 | public Observable run(Scheduler scheduler) 91 | { 92 | StringBuilder cmd = new StringBuilder(); 93 | 94 | /* 95 | * /usr/bin/mysqldump --host=XXXX --port=3306 --user=root --password=x xxxxxxxxxxxx \ 96 | * --master-data --single-transaction --skip-lock-tables --compact --skip-opt \ 97 | * --quick --no-create-info --skip-extended-insert --set-gtid-purged=OFF --default-character-set=utf8 \ 98 | * schemaName table1 table2 table3 table4 99 | */ 100 | River.Database database = getRiverDatabase(); 101 | cmd.append(config.mysqldump) 102 | .append(" --host=") 103 | .append(river.my.host) 104 | .append(" --port=") 105 | .append(river.my.port) 106 | .append(" --user=") 107 | .append(river.my.user) 108 | .append(" --password=") 109 | .append(river.my.password) 110 | .append(" --default-character-set=") 111 | .append(river.charset) 112 | .append(" --master-data --single-transaction --skip-lock-tables --compact --skip-opt --quick --hex-blob --no-create-info --skip-extended-insert --set-gtid-purged=OFF ") 113 | .append(database.schemaName); 114 | 115 | for (Map.Entry tableEntry: database.tables.entrySet() 116 | ) { 117 | if (!tableEntry.getValue().sync.created) 118 | continue; 119 | 120 | cmd.append(" "); 121 | cmd.append(tableEntry.getKey()); 122 | } 123 | 124 | logger.info(cmd.toString().replace(river.my.password, "*")); 125 | 126 | try { 127 | process = Runtime.getRuntime().exec(cmd.toString()); 128 | 129 | } catch (IOException e) 130 | { 131 | return Observable.error(new DumpFatalException(e)); 132 | } 133 | 134 | logger.info("Dump database [{}] from mysqldump.", database.schemaName); 135 | 136 | return Observable.merge( 137 | errorObservable(process) 138 | .subscribeOn(scheduler) 139 | .observeOn(scheduler), 140 | dataObservable(process) 141 | .subscribeOn(scheduler) 142 | .observeOn(scheduler) 143 | 144 | ) 145 | .doOnError( 146 | throwable -> { 147 | position.reset(); 148 | process.destroy(); 149 | process = null; 150 | } 151 | ); 152 | } 153 | 154 | private Observable dataObservable(Process process) 155 | { 156 | return Observable.create(new DataEmitter(process)); 157 | } 158 | 159 | private Observable errorObservable(Process process){ 160 | 161 | return Observable.create(observableEmitter -> { 162 | String s; 163 | 164 | try ( 165 | BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(process.getErrorStream())) 166 | ) { 167 | while(Executor.isRunning() && isStart()) 168 | { 169 | s = bufferedReader.readLine(); 170 | if (s == null) 171 | break; 172 | 173 | if (s.contains("[Warning]")) 174 | continue; 175 | 176 | observableEmitter.onError(new DumpFatalException(s)); 177 | } 178 | 179 | observableEmitter.onComplete(); 180 | 181 | } catch (IOException e) 182 | { 183 | observableEmitter.onError(new DumpFatalException(e)); 184 | 185 | } 186 | 187 | }); 188 | } 189 | 190 | private synchronized void parsePosition(String sql) { 191 | BinLog.Position position = PositionParser.parse(sql); 192 | if (position != null) 193 | this.position.updateFrom(position); 194 | } 195 | 196 | private class DataEmitter implements ObservableOnSubscribe 197 | { 198 | private Process process; 199 | String lastTable = null; 200 | List> insertData = new ArrayList<>(); 201 | ObservableEmitter observableEmitter; 202 | 203 | DataEmitter(Process process) { 204 | this.process = process; 205 | } 206 | 207 | void addInsertData(String table, List data) 208 | { 209 | if (!table.equalsIgnoreCase(lastTable) || insertData.size() >= config.bulkSize) 210 | emit(); 211 | 212 | insertData.add(data); 213 | lastTable = table; 214 | } 215 | 216 | private void emit() 217 | { 218 | if (insertData.isEmpty()) 219 | { 220 | lastTable = null; 221 | return; 222 | } 223 | 224 | Records records = getMySql().getUtcQuery().mixRecords(getRiverDatabase().schemaName, lastTable, insertData, true); 225 | if (records == null) 226 | logger.warn("Lost {} records.", insertData.size()); 227 | else 228 | { 229 | for (Record record: records 230 | ) { 231 | record.setInserted(); 232 | observableEmitter.onNext(InsertAction.create(record)); 233 | } 234 | } 235 | 236 | lastTable = null; 237 | insertData.clear(); 238 | } 239 | 240 | @Override 241 | public void subscribe(ObservableEmitter observableEmitter) throws Exception { 242 | this.observableEmitter = observableEmitter; 243 | 244 | String sql; 245 | 246 | try ( 247 | BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(process.getInputStream())) 248 | ){ 249 | 250 | while(Executor.isRunning() && isStart()) 251 | { 252 | if (getStatistic().getDumpCount().get() - getStatistic().getRecordCount().get() > config.bulkSize * 5) 253 | { 254 | //logger.info("Dump {} and subscribe {}, sleep 0.1s", total, getRecordCount.get()); 255 | try { 256 | 257 | Thread.sleep(100); 258 | } catch (InterruptedException e) 259 | { 260 | break; 261 | } 262 | continue; 263 | } 264 | 265 | sql = bufferedReader.readLine(); 266 | 267 | if (sql == null) 268 | { 269 | emit(); 270 | break; 271 | } 272 | 273 | getStatistic().getDumpCount().incrementAndGet(); 274 | 275 | if (sql.startsWith("CHANGE MASTER TO MASTER_LOG_FILE")) 276 | { 277 | emit(); 278 | parsePosition(sql); 279 | } else if (sql.startsWith("INSERT INTO")) 280 | { 281 | String tableName = InsertParser.parseTable(sql); 282 | if (tableName != null && !tableName.isEmpty()) { 283 | List data = InsertParser.parseValue(sql); 284 | if (data != null) 285 | addInsertData(tableName, data); 286 | } 287 | 288 | } else { 289 | emit(); 290 | logger.warn("Skip SQL {} ", sql); 291 | } 292 | } 293 | 294 | emit(); 295 | 296 | if (!position.isEmpty()) 297 | observableEmitter.onNext(ChangePositionAction.create(position)); 298 | 299 | observableEmitter.onComplete(); 300 | 301 | } catch (IOException e) 302 | { 303 | observableEmitter.onError(new DumpFatalException(e)); 304 | 305 | } 306 | 307 | logger.info("Dump database: [{}] complete;", getRiverDatabase().schemaName); 308 | } 309 | } 310 | 311 | } 312 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/canal/Canal.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.canal; 2 | 3 | import com.alibaba.otter.canal.protocol.CanalEntry; 4 | import com.alibaba.otter.canal.protocol.Message; 5 | import com.fasterxml.jackson.databind.ObjectMapper; 6 | import org.fly.sync.action.*; 7 | import org.fly.sync.contract.AbstractAction; 8 | import org.fly.sync.contract.AbstractLifeCycle; 9 | import org.fly.sync.contract.DbFactory; 10 | import org.fly.sync.es.Es; 11 | import org.fly.sync.exception.CanalFatalException; 12 | import org.fly.sync.executor.Executor; 13 | import org.fly.sync.executor.Statistic; 14 | import org.fly.sync.mysql.MySql; 15 | import org.fly.sync.mysql.model.Record; 16 | import org.fly.sync.setting.BinLog; 17 | import org.fly.sync.setting.Config; 18 | import org.fly.sync.setting.River; 19 | import com.sun.istack.NotNull; 20 | import io.reactivex.Observable; 21 | import io.reactivex.ObservableEmitter; 22 | import io.reactivex.ObservableOnSubscribe; 23 | import io.reactivex.Scheduler; 24 | import org.slf4j.Logger; 25 | import org.slf4j.LoggerFactory; 26 | 27 | import java.sql.SQLException; 28 | import java.util.ArrayList; 29 | import java.util.HashMap; 30 | import java.util.List; 31 | import java.util.Map; 32 | import java.util.concurrent.TimeUnit; 33 | import java.util.stream.Collectors; 34 | 35 | public class Canal extends AbstractLifeCycle implements DbFactory { 36 | 37 | private Config config; 38 | private River river; 39 | private DbFactory dbFactory; 40 | private BinLog.Position position; 41 | private Server server; 42 | private Client client; 43 | 44 | public final static Logger logger = LoggerFactory.getLogger(Canal.class); 45 | 46 | public Canal(@NotNull Config config, @NotNull River river, BinLog.Position position, DbFactory dbFactory) { 47 | this.config = config; 48 | this.river = river; 49 | this.position = position; 50 | this.dbFactory = dbFactory; 51 | 52 | init(); 53 | } 54 | 55 | private void init() 56 | { 57 | server = new Server(river, getRiverDatabase(), position); 58 | client = new Client(server); 59 | } 60 | 61 | @Override 62 | public void start() { 63 | super.start(); 64 | 65 | logger.info("Start Canal Server/Client."); 66 | server.start(); 67 | client.start(); 68 | 69 | } 70 | 71 | @Override 72 | public void stop() 73 | { 74 | super.stop(); 75 | 76 | client.unsubscribe(); 77 | server.stop(); 78 | client.stop(); 79 | 80 | logger.info("Canal server stop."); 81 | } 82 | 83 | @Override 84 | public Es getEs() { 85 | return dbFactory.getEs(); 86 | } 87 | 88 | @Override 89 | public MySql getMySql() { 90 | return dbFactory.getMySql(); 91 | } 92 | 93 | @Override 94 | public River.Database getRiverDatabase() { 95 | return dbFactory.getRiverDatabase(); 96 | } 97 | 98 | @Override 99 | public Statistic getStatistic() { 100 | return dbFactory.getStatistic(); 101 | } 102 | 103 | @Override 104 | public ObjectMapper getJsonMapper() { 105 | return dbFactory.getJsonMapper(); 106 | } 107 | 108 | public Observable run(Scheduler scheduler) 109 | { 110 | while(!server.getCanalInstance().getMetaManager().isStart()) 111 | { 112 | try { 113 | Thread.sleep(100); 114 | } catch (InterruptedException e) { 115 | logger.error(e.getMessage(), e); 116 | return Observable.empty(); 117 | } 118 | } 119 | 120 | logger.info("Canal server started."); 121 | 122 | client.subscribe(); 123 | client.rollback(); 124 | 125 | return Observable.create(new DataEmitter()) 126 | .observeOn(scheduler) 127 | .subscribeOn(scheduler) 128 | ; 129 | } 130 | 131 | private class DataEmitter implements ObservableOnSubscribe { 132 | 133 | ObservableEmitter observableEmitter; 134 | 135 | public DataEmitter() { 136 | 137 | } 138 | 139 | private List parseEntries(List entries) throws SQLException 140 | { 141 | List actionList = new ArrayList<>(); 142 | List relateActionList = new ArrayList<>(); 143 | 144 | for (CanalEntry.Entry entry : entries) { 145 | if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN || entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONEND) { 146 | continue; 147 | } 148 | 149 | CanalEntry.RowChange rowChange = null; 150 | 151 | try { 152 | rowChange = CanalEntry.RowChange.parseFrom(entry.getStoreValue()); 153 | } catch (Exception e) { 154 | logger.error("ERROR ## parser of eromanga-event has an error , data:" + entry.toString(), e); 155 | continue; 156 | } 157 | 158 | String tableName = entry.getHeader().getTableName(); 159 | 160 | River.Sync sync = getRiverDatabase().getSync(tableName); 161 | List associates = getRiverDatabase().getAssociates(tableName); 162 | 163 | 164 | CanalEntry.EventType eventType = rowChange.getEventType(); 165 | 166 | switch (eventType) 167 | { 168 | case DELETE: 169 | 170 | for (CanalEntry.RowData rowData : rowChange.getRowDatasList()) 171 | { 172 | Record record = getMySql().getLocalQuery().mixRecord(getRiverDatabase().schemaName, tableName, getBeforeColumnsList(rowData)); 173 | record.setDeleted(); 174 | 175 | if (sync.deleted) 176 | actionList.add(DeleteAction.create(record)); 177 | 178 | for (River.Associate associate: associates 179 | ) 180 | if (associate.getWith() != null && associate.getWith().sync.deleted) 181 | relateActionList.add(DeleteRelateAction.create(record, associate)); 182 | } 183 | 184 | break; 185 | case INSERT: 186 | 187 | for (CanalEntry.RowData rowData : rowChange.getRowDatasList()) 188 | { 189 | Record record = getMySql().getLocalQuery().mixRecord(getRiverDatabase().schemaName, tableName, getAfterColumnsList(rowData)); 190 | record.setInserted(); 191 | 192 | if (sync.created) 193 | actionList.add(InsertAction.create(record)); 194 | 195 | for (River.Associate associate: associates 196 | ) 197 | if (associate.getWith() != null && associate.getWith().sync.created) 198 | relateActionList.add(InsertRelateAction.create(record, associate)); 199 | } 200 | 201 | break; 202 | case UPDATE: 203 | 204 | for (CanalEntry.RowData rowData : rowChange.getRowDatasList()) 205 | { 206 | Record record = getMySql().getLocalQuery().mixRecord(getRiverDatabase().schemaName, tableName, getAfterColumnsList(rowData)); 207 | record.setUpdated(getModifiedColumns(rowData.getAfterColumnsList())); 208 | 209 | if (sync.updated) 210 | actionList.add(UpdateAction.create(record)); 211 | 212 | for (River.Associate associate: associates 213 | ) 214 | if (associate.getWith() != null && associate.getWith().sync.updated) 215 | relateActionList.add(UpdateRelateAction.create(record, associate)); 216 | } 217 | 218 | break; 219 | case ALTER: 220 | 221 | //TODO 222 | break; 223 | } 224 | } 225 | 226 | // append 227 | actionList.addAll(relateActionList); 228 | 229 | return actionList; 230 | } 231 | 232 | private List getModifiedColumns(List columns) 233 | { 234 | return columns.stream().filter(CanalEntry.Column::getUpdated).map(CanalEntry.Column::getName).collect(Collectors.toList()); 235 | } 236 | 237 | private Map getBeforeColumnsList(CanalEntry.RowData rowData) 238 | { 239 | Map items = new HashMap<>(); 240 | 241 | for (CanalEntry.Column column: rowData.getBeforeColumnsList() 242 | ) { 243 | items.put(column.getName(), column.getIsNull() ? null : column.getValue()); 244 | } 245 | 246 | return items; 247 | } 248 | 249 | private Map getAfterColumnsList(CanalEntry.RowData rowData) 250 | { 251 | Map items = new HashMap<>(); 252 | 253 | for (CanalEntry.Column column: rowData.getAfterColumnsList() 254 | ) { 255 | items.put(column.getName(), column.getIsNull() ? null : column.getValue()); 256 | } 257 | 258 | return items; 259 | } 260 | 261 | @Override 262 | public void subscribe(ObservableEmitter observableEmitter) throws Exception { 263 | this.observableEmitter = observableEmitter; 264 | 265 | while (Executor.isRunning() && isStart() && server.getCanalInstance().getMetaManager().isStart()) { 266 | 267 | if (getStatistic().getDumpCount().get() 268 | + getStatistic().getCanalCount().get() 269 | - getStatistic().getRecordCount().get() 270 | > config.bulkSize * 5 271 | ) 272 | { 273 | //logger.info("Canal {} and subscribe {}, sleep 0.1s", total, getRecordCount.get()); 274 | try { 275 | Thread.sleep(100); 276 | } catch (InterruptedException e) 277 | { 278 | break; 279 | } 280 | continue; 281 | } 282 | 283 | try { 284 | 285 | Message message = client.getWithoutAck(config.bulkSize, (long) config.flushBulkTime, TimeUnit.MILLISECONDS); 286 | 287 | long batchId = message.getId(); 288 | int size = message.getEntries().size(); 289 | 290 | if (batchId == -1 || size == 0) { 291 | Thread.sleep(config.flushBulkTime); 292 | continue; 293 | } 294 | 295 | List actionList = parseEntries(message.getEntries()); 296 | if (!actionList.isEmpty()) { 297 | getStatistic().getCanalCount().addAndGet(actionList.size()); 298 | 299 | for (AbstractAction action : actionList 300 | ) { 301 | observableEmitter.onNext(action); 302 | } 303 | } 304 | 305 | client.ack(batchId); // 提交确认 306 | 307 | BinLog.Position position = server.getBinLogPosition(); 308 | if (position != null) 309 | observableEmitter.onNext(new ChangePositionAction(position)); 310 | 311 | } catch (InterruptedException e) 312 | { 313 | break; 314 | } catch (Exception e) 315 | { 316 | if (e.getCause() instanceof InterruptedException) 317 | break; 318 | 319 | if (client.isStart()) 320 | client.rollback(); 321 | 322 | observableEmitter.onError(new CanalFatalException(e)); 323 | break; 324 | } 325 | } 326 | 327 | observableEmitter.onComplete(); 328 | } 329 | } 330 | } 331 | -------------------------------------------------------------------------------- /src/main/java/org/fly/sync/canal/Client.java: -------------------------------------------------------------------------------- 1 | package org.fly.sync.canal; 2 | 3 | import com.alibaba.otter.canal.instance.core.CanalInstance; 4 | import com.alibaba.otter.canal.protocol.CanalEntry; 5 | import com.alibaba.otter.canal.protocol.ClientIdentity; 6 | import com.alibaba.otter.canal.protocol.Message; 7 | import com.alibaba.otter.canal.protocol.position.LogPosition; 8 | import com.alibaba.otter.canal.protocol.position.Position; 9 | import com.alibaba.otter.canal.protocol.position.PositionRange; 10 | import com.alibaba.otter.canal.server.exception.CanalServerException; 11 | import com.alibaba.otter.canal.store.CanalEventStore; 12 | import com.alibaba.otter.canal.store.model.Event; 13 | import com.alibaba.otter.canal.store.model.Events; 14 | import org.fly.sync.contract.AbstractLifeCycle; 15 | import com.google.common.collect.Lists; 16 | import com.google.protobuf.InvalidProtocolBufferException; 17 | import org.slf4j.Logger; 18 | import org.slf4j.LoggerFactory; 19 | import org.springframework.util.CollectionUtils; 20 | 21 | import java.util.*; 22 | import java.util.concurrent.TimeUnit; 23 | import java.util.concurrent.atomic.AtomicLong; 24 | import java.util.stream.Collectors; 25 | 26 | /** 27 | * Code from com.alibaba.otter.canal.server.embedded.CanalServerWithEmbedded 28 | */ 29 | public class Client extends AbstractLifeCycle { 30 | 31 | public final static Logger logger = LoggerFactory.getLogger(Client.class); 32 | private Server server; 33 | private ClientIdentity clientIdentity; 34 | private CanalInstance canalInstance; 35 | private final static AtomicLong CLIENT_ID = new AtomicLong(1000); 36 | 37 | public Client(Server server) { 38 | this.server = server; 39 | clientIdentity = new ClientIdentity(server.getCanal().getName(), (short) CLIENT_ID.incrementAndGet(), server.getFilter()); 40 | canalInstance = server.getCanalInstance(); 41 | } 42 | 43 | public ClientIdentity getClientIdentity() 44 | { 45 | return clientIdentity; 46 | } 47 | 48 | public Server getServer() 49 | { 50 | return server; 51 | } 52 | 53 | public CanalInstance getCanalInstance() 54 | { 55 | return canalInstance; 56 | } 57 | 58 | private void checkStart() throws CanalServerException 59 | { 60 | if (!canalInstance.getMetaManager().isStart()) 61 | throw new CanalServerException("Cannal server ["+ server.getCanal().getName() +"] is stop."); 62 | } 63 | 64 | private void checkSubscribe() { 65 | 66 | boolean hasSubscribe = canalInstance.getMetaManager().hasSubscribe(clientIdentity); 67 | if (!hasSubscribe) { 68 | throw new CanalServerException(String.format("ClientIdentity:%s should subscribe first", 69 | clientIdentity.toString())); 70 | } 71 | } 72 | 73 | /** 74 | * 查询所有的订阅信息 75 | */ 76 | public List listAllSubscribe() throws CanalServerException 77 | { 78 | return canalInstance.getMetaManager().listAllSubscribeInfo(server.getCanal().getName()); 79 | } 80 | 81 | /** 82 | * 查询当前未被ack的batch列表,batchId会按照从小到大进行返回 83 | */ 84 | public List listBatchIds() throws CanalServerException { 85 | checkStart(); 86 | checkSubscribe(); 87 | 88 | Map batchs = canalInstance.getMetaManager().listAllBatchs(clientIdentity); 89 | List result = new ArrayList(batchs.keySet()); 90 | Collections.sort(result); 91 | return result; 92 | } 93 | 94 | public void subscribe() throws CanalServerException 95 | { 96 | checkStart(); 97 | 98 | if (!canalInstance.getMetaManager().isStart()) { 99 | canalInstance.getMetaManager().start(); 100 | } 101 | 102 | canalInstance.getMetaManager().subscribe(clientIdentity); // 执行一下meta订阅 103 | 104 | Position position = canalInstance.getMetaManager().getCursor(clientIdentity); 105 | if (position == null) { 106 | position = canalInstance.getEventStore().getFirstPosition();// 获取一下store中的第一条 107 | if (position != null) { 108 | canalInstance.getMetaManager().updateCursor(clientIdentity, position); // 更新一下cursor 109 | } 110 | logger.info("subscribe successfully, {} with first position:{} ", clientIdentity, position); 111 | } else { 112 | logger.info("subscribe successfully, use last cursor position:{} ", clientIdentity, position); 113 | } 114 | 115 | // 通知下订阅关系变化 116 | canalInstance.subscribeChange(clientIdentity); 117 | } 118 | 119 | public void unsubscribe() throws CanalServerException 120 | { 121 | canalInstance.getMetaManager().unsubscribe(clientIdentity); // 执行一下meta订阅 122 | 123 | logger.info("unsubscribe successfully, {}", clientIdentity); 124 | } 125 | 126 | /** 127 | * 获取数据 128 | * 129 | *
130 |      * 注意: meta获取和数据的获取需要保证顺序性,优先拿到meta的,一定也会是优先拿到数据,所以需要加同步. (不能出现先拿到meta,拿到第二批数据,这样就会导致数据顺序性出现问题)
131 |      * 
132 | */ 133 | public Message get(int batchSize) throws CanalServerException { 134 | return get(batchSize, null, null); 135 | } 136 | 137 | /** 138 | * 获取数据,可以指定超时时间. 139 | * 140 | *
141 |      * 几种case:
142 |      * a. 如果timeout为null,则采用tryGet方式,即时获取
143 |      * b. 如果timeout不为null
144 |      *    1. timeout为0,则采用get阻塞方式,获取数据,不设置超时,直到有足够的batchSize数据才返回
145 |      *    2. timeout不为0,则采用get+timeout方式,获取数据,超时还没有batchSize足够的数据,有多少返回多少
146 |      *
147 |      * 注意: meta获取和数据的获取需要保证顺序性,优先拿到meta的,一定也会是优先拿到数据,所以需要加同步. (不能出现先拿到meta,拿到第二批数据,这样就会导致数据顺序性出现问题)
148 |      * 
149 | */ 150 | public Message get(int batchSize, Long timeout, TimeUnit unit) 151 | throws CanalServerException { 152 | 153 | checkStart(); 154 | checkSubscribe(); 155 | 156 | synchronized (canalInstance) { 157 | // 获取到流式数据中的最后一批获取的位置 158 | PositionRange positionRanges = canalInstance.getMetaManager().getLastestBatch(clientIdentity); 159 | 160 | if (positionRanges != null) { 161 | throw new CanalServerException(String.format("clientId:%s has last batch:[%s] isn't ack , maybe loss data", 162 | clientIdentity.getClientId(), 163 | positionRanges)); 164 | } 165 | 166 | Events events = null; 167 | Position start = canalInstance.getMetaManager().getCursor(clientIdentity); 168 | events = getEvents(canalInstance.getEventStore(), start, batchSize, timeout, unit); 169 | 170 | if (CollectionUtils.isEmpty(events.getEvents())) { 171 | /*logger.trace("get successfully, clientId:{} batchSize:{} but result is null", 172 | clientIdentity.getClientId(), 173 | batchSize);*/ 174 | return new Message(-1, true, new ArrayList()); // 返回空包,避免生成batchId,浪费性能 175 | } else { 176 | // 记录到流式信息 177 | Long batchId = canalInstance.getMetaManager().addBatch(clientIdentity, events.getPositionRange()); 178 | List entrys = Lists.transform(events.getEvents(), input -> { 179 | try { 180 | return CanalEntry.Entry.parseFrom(input.getRawEntry()); 181 | } catch (InvalidProtocolBufferException e) 182 | { 183 | return null; 184 | } 185 | }) 186 | .stream() 187 | .filter(Objects::nonNull) 188 | .collect(Collectors.toList()); 189 | if (logger.isInfoEnabled()) { 190 | logger.trace("get successfully, clientId:{} batchSize:{} real size is {} and result is [batchId:{} , position:{}]", 191 | clientIdentity.getClientId(), 192 | batchSize, 193 | entrys.size(), 194 | batchId, 195 | events.getPositionRange()); 196 | } 197 | // 直接提交ack 198 | ack(batchId); 199 | return new Message(batchId, false, entrys); 200 | } 201 | } 202 | } 203 | 204 | /** 205 | * 不指定 position 获取事件。canal 会记住此 client 最新的 position。
206 | * 如果是第一次 fetch,则会从 canal 中保存的最老一条数据开始输出。 207 | * 208 | *
209 |      * 注意: meta获取和数据的获取需要保证顺序性,优先拿到meta的,一定也会是优先拿到数据,所以需要加同步. (不能出现先拿到meta,拿到第二批数据,这样就会导致数据顺序性出现问题)
210 |      * 
211 | */ 212 | public Message getWithoutAck(int batchSize) throws CanalServerException { 213 | return getWithoutAck(batchSize, null, null); 214 | } 215 | 216 | /** 217 | * 不指定 position 获取事件。canal 会记住此 client 最新的 position。
218 | * 如果是第一次 fetch,则会从 canal 中保存的最老一条数据开始输出。 219 | * 220 | *
221 |      * 几种case:
222 |      * a. 如果timeout为null,则采用tryGet方式,即时获取
223 |      * b. 如果timeout不为null
224 |      *    1. timeout为0,则采用get阻塞方式,获取数据,不设置超时,直到有足够的batchSize数据才返回
225 |      *    2. timeout不为0,则采用get+timeout方式,获取数据,超时还没有batchSize足够的数据,有多少返回多少
226 |      *
227 |      * 注意: meta获取和数据的获取需要保证顺序性,优先拿到meta的,一定也会是优先拿到数据,所以需要加同步. (不能出现先拿到meta,拿到第二批数据,这样就会导致数据顺序性出现问题)
228 |      * 
229 | */ 230 | public Message getWithoutAck(int batchSize, Long timeout, TimeUnit unit) 231 | throws CanalServerException { 232 | 233 | checkStart(); 234 | checkSubscribe(); 235 | 236 | synchronized (canalInstance) { 237 | // 获取到流式数据中的最后一批获取的位置 238 | PositionRange positionRanges = canalInstance.getMetaManager().getLastestBatch(clientIdentity); 239 | 240 | Events events = null; 241 | if (positionRanges != null) { // 存在流数据 242 | events = getEvents(canalInstance.getEventStore(), positionRanges.getStart(), batchSize, timeout, unit); 243 | } else {// ack后第一次获取 244 | Position start = canalInstance.getMetaManager().getCursor(clientIdentity); 245 | if (start == null) { // 第一次,还没有过ack记录,则获取当前store中的第一条 246 | start = canalInstance.getEventStore().getFirstPosition(); 247 | } 248 | 249 | events = getEvents(canalInstance.getEventStore(), start, batchSize, timeout, unit); 250 | } 251 | 252 | if (CollectionUtils.isEmpty(events.getEvents())) { 253 | /*logger.trace("getWithoutAck successfully, clientId:{} batchSize:{} but result is null", 254 | clientIdentity.getClientId(), 255 | batchSize);*/ 256 | return new Message(-1, true, new ArrayList()); // 返回空包,避免生成batchId,浪费性能 257 | } else { 258 | // 记录到流式信息 259 | Long batchId = canalInstance.getMetaManager().addBatch(clientIdentity, events.getPositionRange()); 260 | List entrys = Lists.transform(events.getEvents(), input -> { 261 | try { 262 | return CanalEntry.Entry.parseFrom(input.getRawEntry()); 263 | } catch (InvalidProtocolBufferException e) 264 | { 265 | return null; 266 | } 267 | }) 268 | .stream() 269 | .filter(Objects::nonNull) 270 | .collect(Collectors.toList()); 271 | if (logger.isInfoEnabled()) { 272 | logger.trace("getWithoutAck successfully, clientId:{} batchSize:{} real size is {} and result is [batchId:{} , position:{}]", 273 | clientIdentity.getClientId(), 274 | batchSize, 275 | entrys.size(), 276 | batchId, 277 | events.getPositionRange()); 278 | } 279 | return new Message(batchId, false, entrys); 280 | } 281 | 282 | } 283 | } 284 | 285 | /** 286 | * 进行 batch id 的确认。确认之后,小于等于此 batchId 的 Message 都会被确认。 287 | * 288 | *
289 |      * 注意:进行反馈时必须按照batchId的顺序进行ack(需有客户端保证)
290 |      * 
291 | */ 292 | public void ack(long batchId) throws CanalServerException { 293 | checkStart(); 294 | checkSubscribe(); 295 | 296 | PositionRange positionRanges = null; 297 | positionRanges = canalInstance.getMetaManager().removeBatch(clientIdentity, batchId); // 更新位置 298 | if (positionRanges == null) { // 说明是重复的ack/rollback 299 | throw new CanalServerException(String.format("ack error , clientId:%s batchId:%d is not exist , please check", 300 | clientIdentity.getClientId(), 301 | batchId)); 302 | } 303 | 304 | // 更新cursor最好严格判断下位置是否有跳跃更新 305 | // Position position = lastRollbackPostions.get(clientIdentity); 306 | // if (position != null) { 307 | // // Position position = 308 | // canalInstance.getMetaManager().getCursor(clientIdentity); 309 | // LogPosition minPosition = 310 | // CanalEventUtils.min(positionRanges.getStart(), (LogPosition) 311 | // position); 312 | // if (minPosition == position) {// ack的position要晚于该最后ack的位置,可能有丢数据 313 | // throw new CanalServerException( 314 | // String.format( 315 | // "ack error , clientId:%s batchId:%d %s is jump ack , last ack:%s", 316 | // clientIdentity.getClientId(), batchId, positionRanges, 317 | // position)); 318 | // } 319 | // } 320 | 321 | // 更新cursor 322 | if (positionRanges.getAck() != null) { 323 | canalInstance.getMetaManager().updateCursor(clientIdentity, positionRanges.getAck()); 324 | if (logger.isInfoEnabled()) { 325 | logger.trace("ack successfully, clientId:{} batchId:{} position:{}", 326 | clientIdentity.getClientId(), 327 | batchId, 328 | positionRanges); 329 | } 330 | } 331 | 332 | // 可定时清理数据 333 | canalInstance.getEventStore().ack(positionRanges.getEnd()); 334 | 335 | } 336 | 337 | /** 338 | * 回滚到未进行 {@link #ack} 的地方,下次fetch的时候,可以从最后一个没有 {@link #ack} 的地方开始拿 339 | */ 340 | public void rollback() throws CanalServerException { 341 | checkStart(); 342 | // 因为存在第一次链接时自动rollback的情况,所以需要忽略未订阅 343 | boolean hasSubscribe = canalInstance.getMetaManager().hasSubscribe(clientIdentity); 344 | if (!hasSubscribe) { 345 | return; 346 | } 347 | 348 | synchronized (canalInstance) { 349 | // 清除batch信息 350 | canalInstance.getMetaManager().clearAllBatchs(clientIdentity); 351 | // rollback eventStore中的状态信息 352 | canalInstance.getEventStore().rollback(); 353 | logger.info("rollback successfully, clientId:{}", new Object[] { clientIdentity.getClientId() }); 354 | } 355 | } 356 | 357 | /** 358 | * 回滚到未进行 {@link #ack} 的地方,下次fetch的时候,可以从最后一个没有 {@link #ack} 的地方开始拿 359 | */ 360 | public void rollback(Long batchId) throws CanalServerException { 361 | checkStart(); 362 | 363 | // 因为存在第一次链接时自动rollback的情况,所以需要忽略未订阅 364 | boolean hasSubscribe = canalInstance.getMetaManager().hasSubscribe(clientIdentity); 365 | if (!hasSubscribe) { 366 | return; 367 | } 368 | synchronized (canalInstance) { 369 | // 清除batch信息 370 | PositionRange positionRanges = canalInstance.getMetaManager().removeBatch(clientIdentity, 371 | batchId); 372 | if (positionRanges == null) { // 说明是重复的ack/rollback 373 | throw new CanalServerException(String.format("rollback error, clientId:%s batchId:%d is not exist , please check", 374 | clientIdentity.getClientId(), 375 | batchId)); 376 | } 377 | 378 | // lastRollbackPostions.put(clientIdentity, 379 | // positionRanges.getEnd());// 记录一下最后rollback的位置 380 | // TODO 后续rollback到指定的batchId位置 381 | canalInstance.getEventStore().rollback();// rollback 382 | // eventStore中的状态信息 383 | logger.info("rollback successfully, clientId:{} batchId:{} position:{}", 384 | clientIdentity.getClientId(), 385 | batchId, 386 | positionRanges); 387 | } 388 | } 389 | 390 | /** 391 | * 根据不同的参数,选择不同的方式获取数据 392 | */ 393 | private Events getEvents(CanalEventStore eventStore, Position start, int batchSize, Long timeout, 394 | TimeUnit unit) { 395 | if (timeout == null) { 396 | return eventStore.tryGet(start, batchSize); 397 | } else { 398 | try { 399 | if (timeout <= 0) { 400 | return eventStore.get(start, batchSize); 401 | } else { 402 | return eventStore.get(start, batchSize, timeout, unit); 403 | } 404 | } catch (Exception e) { 405 | throw new CanalServerException(e); 406 | } 407 | } 408 | } 409 | 410 | } 411 | --------------------------------------------------------------------------------