├── 0 └── 0_overview.md ├── 1 ├── 1.0_mysql_replication.graffle ├── 1.0_mysql_replication.md ├── 1.0_mysql_replication.png ├── 1.1_bidirectional_replication.graffle ├── 1.1_bidirectional_replication.md ├── 1.1_bidirectional_replication.png ├── 1.2_sync_between_cloud.md ├── 1.3_mysql_kafka.graffle │ ├── data.plist │ └── image2.png ├── 1.3_mysql_kafka.md ├── 1.3_mysql_kafka.png ├── 1.4_oracle_mysql.md └── 1.4_oracle_mysql.png ├── 2 ├── 2.0.1_interface.md ├── 2.0_mysql_replication_1_1.md ├── 2.1_mysql_replication_n_1.md ├── 2.2_mysql_replication_1_n.md ├── 2.3_dc_to_dc_bidirectional.md ├── 2.4_ali_to_jd.md ├── 2.5_mysql_kafka.md ├── 2.6_oracle_mysql.md ├── 2.7_multi_node.md ├── consul-3.png ├── nomad-client.png └── nomad-server.png ├── 3 ├── 3.0_function_scenario_mapping.md ├── 3.10_consul.md ├── 3.11_oracle_mysql.md ├── 3.1_limitation.md ├── 3.2_ports.md ├── 3.3_impact_on_dest.md ├── 3.4.1_delay_alert.md ├── 3.4.2_monitor.md ├── 3.4_metrics.md ├── 3.5_deployment.md ├── 3.6_DDL.md ├── 3.7_DCL.md ├── 3.8_dtle_mapping.md ├── 3.9_binlog_relay.md └── images │ ├── 3.4.1_delay2.png │ ├── 3.4.2_add_cpu_panel.png │ ├── 3.4.2_add_data_source.png │ ├── 3.4.2_add_panel.png │ ├── 3.4.2_add_prometheus_url.png │ ├── 3.4.2_all_panel.png │ ├── 3.4.2_choose_prometheus.png │ ├── 3.4.2_job_sample.png │ ├── 3.4.2_prometheus_targets.png │ ├── 3.4.2_set_unit.png │ ├── 3.4.2_topu.png │ ├── 3.4_delay1.png │ ├── 3.5_deployment.graffle │ └── 3.5_deployment.png ├── 4 ├── 4.0_installation.md ├── 4.1_node_configuration.md ├── 4.2_command.md ├── 4.3.1_tuning.md ├── 4.3.2_job_sample.md ├── 4.3_job_configuration.md ├── 4.4.1_dtle_http_api.md ├── 4.4.1_http_api_oldv2.md ├── 4.4_http_api.md ├── 4.5_mysql_user_privileges.md ├── 4.6_dtle_2to3.md ├── 4.7_diagnosing.md └── images │ ├── 4.4.1_execute.png │ ├── 4.4.1_login.png │ ├── 4.4.1_response.png │ ├── 4.4.1_swagger_ui.png │ └── 4.4.1_try_it_out.png ├── 5 ├── 5.1_resource_estimation.md ├── 5.2_architecture.md ├── 5.3_kafka_message_format.md ├── 5.4_columns_mapping.md └── dtle-architecture.png ├── 6 └── howto_contribute.md ├── 7 └── 7_roadmap.md ├── .gitignore ├── .travis.yml ├── .vscode └── settings.json ├── LICENSE ├── Makefile ├── QR_code.png ├── README.md ├── SUMMARY.md ├── book.json ├── dtle-manual.pdf ├── package-lock.json └── website.css /.gitignore: -------------------------------------------------------------------------------- 1 | ### Eclipse template 2 | *.pydevproject 3 | .metadata 4 | .gradle 5 | /bin/ 6 | /tmp/ 7 | *.tmp 8 | *.bak 9 | *.swp 10 | *~.nib 11 | local.properties 12 | .settings/ 13 | .loadpath 14 | 15 | # Eclipse Core 16 | .project 17 | 18 | # External tool builders 19 | .externalToolBuilders/ 20 | 21 | # Locally stored "Eclipse launch configurations" 22 | *.launch 23 | 24 | # CDT-specific 25 | .cproject 26 | 27 | # JDT-specific (Eclipse Java Development Tools) 28 | .classpath 29 | 30 | # Java annotation processor (APT) 31 | .factorypath 32 | 33 | # PDT-specific 34 | .buildpath 35 | 36 | # sbteclipse plugin 37 | .target 38 | 39 | # TeXlipse plugin 40 | .texlipse 41 | 42 | 43 | ### JetBrains template 44 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion 45 | 46 | *.iml 47 | 48 | ## Directory-based project format: 49 | .idea/ 50 | # if you remove the above rule, at least ignore the following: 51 | 52 | # User-specific stuff: 53 | # .idea/workspace.xml 54 | # .idea/tasks.xml 55 | # .idea/dictionaries 56 | 57 | # Sensitive or high-churn files: 58 | # .idea/dataSources.ids 59 | # .idea/dataSources.xml 60 | # .idea/sqlDataSources.xml 61 | # .idea/dynamic.xml 62 | # .idea/uiDesigner.xml 63 | 64 | # Gradle: 65 | # .idea/gradle.xml 66 | # .idea/libraries 67 | 68 | # Mongo Explorer plugin: 69 | # .idea/mongoSettings.xml 70 | 71 | ## File-based project format: 72 | *.ipr 73 | *.iws 74 | 75 | ## Plugin-specific files: 76 | 77 | # IntelliJ 78 | /out/ 79 | .idea/ 80 | 81 | # mpeltonen/sbt-idea plugin 82 | .idea_modules/ 83 | 84 | # JIRA plugin 85 | atlassian-ide-plugin.xml 86 | 87 | # Crashlytics plugin (for Android Studio and IntelliJ) 88 | com_crashlytics_export_strings.xml 89 | crashlytics.properties 90 | crashlytics-build.properties 91 | 92 | 93 | ### Java template 94 | *.class 95 | 96 | # Mobile Tools for Java (J2ME) 97 | .mtj.tmp/ 98 | 99 | # Package Files # 100 | *.jar 101 | *.war 102 | *.ear 103 | 104 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 105 | hs_err_pid* 106 | 107 | 108 | /project/project/ 109 | /project/target/ 110 | /project/activator-* 111 | /RUNNING_PID 112 | .DS_Store 113 | /target/ 114 | /tmlogs/ 115 | /txlogs/ 116 | /viewConf/ 117 | src/main/resources/server.xml 118 | src/main/resources/schema.xml 119 | src/main/resources/rule.xml 120 | src/main/resources/zkconf/server.xml 121 | src/main/resources/zkconf/schema.xml 122 | src/main/resources/zkconf/rule.xml 123 | conf/dnindex.properties 124 | version.txt 125 | copyResources.bat 126 | dependency-reduced-pom.xml 127 | checkstyle-result.out 128 | 129 | # gitbook 130 | _book/ 131 | node_modules/ 132 | 133 | # vscode 134 | .vscode/ 135 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: trusty 2 | env: 3 | global: 4 | - GH_REF: github.com/actiontech/dtle-docs-cn 5 | - GH_USER: actiontech-bot 6 | - GH_MAIL: github@actionsky.com 7 | 8 | 9 | # 指定环境语言 10 | language: node_js 11 | # 指定sudo权限 12 | sudo: required 13 | # 指定node版本 14 | node_js: v14.2.0 15 | # 指定缓存模块,缓存可以加速编译 16 | cache: 17 | directories: 18 | - node_modules 19 | 20 | # 邮件通知 21 | notifications: 22 | email: 23 | recipients: 24 | - yangzhongjiao@actionsky.com 25 | on_success: never # default: change 26 | on_failure: always # default: always 27 | 28 | # 构建的分支 29 | branches: 30 | only: 31 | - master 32 | 33 | # 调整时区 34 | before_install: 35 | - export TZ='Asia/Shanghai' 36 | - sudo apt-get install -y calibre fonts-arphic-gbsn00lp 37 | 38 | # 安装环境 39 | install: 40 | - npm install -g gitbook-cli 41 | - npm install gitbook-plugin-yahei 42 | - gitbook install 43 | 44 | # gitbook生成静态文件 45 | script: 46 | - gitbook build 47 | - cd ./_book 48 | - git init 49 | - git config user.name "${GH_USER}" 50 | - git config user.email "${GH_MAIL}" 51 | - git add . 52 | - git commit -m "Update GitBook By TravisCI With Build $TRAVIS_BUILD_NUMBER" 53 | - git push --force --quiet "https://${GH_TOKEN}@${GH_REF}" master:gh-pages 54 | - cd .. 55 | # pdf 56 | - xvfb-run gitbook pdf ./ ./dtle-manual.pdf 57 | - mkdir pdf 58 | - cp dtle-manual.pdf ./pdf/ 59 | - cd ./pdf 60 | - git init 61 | - git config user.name "${GH_USER}" 62 | - git config user.email "${GH_MAIL}" 63 | - git add . 64 | - git commit -m "Update GitBook By TravisCI With Build PDF $TRAVIS_BUILD_NUMBER" 65 | - git push --force --quiet "https://${GH_TOKEN}@${GH_REF}" master:pdf 66 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "workbench.editor.wrapTabs": true, 3 | "workbench.editor.highlightModifiedTabs": true 4 | } -------------------------------------------------------------------------------- /0/0_overview.md: -------------------------------------------------------------------------------- 1 | # 概述 2 | 3 | dtle (Data-Transformation-le) 是[上海爱可生信息技术股份有限公司](http://www.actionsky.com/) 开发并开源的 [CDC](https://en.wikipedia.org/wiki/Change_data_capture) 工具. 其功能特点是: 4 | - 多种数据传输模式 5 | - 支持链路压缩 6 | - 支持同构传输和异构传输 7 | - 支持跨网络边际的传输 8 | - 多种数据处理模式 9 | - 支持库/表/行级别 数据过滤 10 | - 多种数据通道模式 11 | - 支持多对多的数据传输 12 | - 支持回环传输 13 | - 多种源/目标端 14 | - 支持MySQL - ActionDB的数据传输 15 | - 支持MySQL - MySQL的数据传输 16 | - 支持MySQL - Kafka的数据传输 17 | - 支持Oracle - MySQL的数据传输 18 | - 集群模式 19 | - 提供可靠的元数据存储 20 | - 可进行自动任务分配 21 | - 支持自动故障转移 22 | -------------------------------------------------------------------------------- /1/1.0_mysql_replication.graffle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/1/1.0_mysql_replication.graffle -------------------------------------------------------------------------------- /1/1.0_mysql_replication.md: -------------------------------------------------------------------------------- 1 | # 1.0 兼容MySQL的单向复制/聚合/分散 2 | 如下图, dtle 支持 兼容MySQL的 单向数据复制的常见场景如下: 3 | - 按数据源/数据目标的映射关系划分 4 | - 支持1:1的复制 5 | - 支持n:1的数据汇聚, 将多个数据源的数据 聚合到 同一个数据目标 6 | - 支持1:n的数据拆分, 将一个数据源的数据 拆分到 多个数据目标 7 | - 按网络类型划分 8 | - 支持网络内的数据传输 9 | - 支持跨网络边际的数据传输 (可使用 链路压缩/链路限流 等功能) 10 | - 按集群规模划分 11 | - 可配置 单一dtle实例 处理 单一数据通道 12 | - 可配置 dtle集群 处理 多个数据通道 13 | 14 | ![](1.0_mysql_replication.png) 15 | -------------------------------------------------------------------------------- /1/1.0_mysql_replication.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/1/1.0_mysql_replication.png -------------------------------------------------------------------------------- /1/1.1_bidirectional_replication.graffle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/1/1.1_bidirectional_replication.graffle -------------------------------------------------------------------------------- /1/1.1_bidirectional_replication.md: -------------------------------------------------------------------------------- 1 | # 1.1 跨数据中心的双向复制 2 | 3 | 如下图, dtle支持MySQL间的双向复制, 其普遍场景是用于跨数据中心的数据双向同步. 4 | 5 | ![](1.1_bidirectional_replication.png) 6 | 7 | 其中: 8 | - dtle 会对数据的回环状况进行判断, 不会重复传输同一事务. 9 | - dtle 在传输过程中维持数据的事务性, 对于数据源的事务产生的数据, 在数据目标端是以相同的事务方式进行回放. 对于双写的场景, 目标端不会受到不完整的事务的影响. 10 | - dtle 在数据链路上, 可使用压缩/限速等功能, 更适合于跨数据中心的场景. -------------------------------------------------------------------------------- /1/1.1_bidirectional_replication.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/1/1.1_bidirectional_replication.png -------------------------------------------------------------------------------- /1/1.2_sync_between_cloud.md: -------------------------------------------------------------------------------- 1 | # 1.2 公有云间的数据同步 2 | 3 | dtle 可用于公有云间的数据同步, 可支持的部署方式同 [1.0](1.0_mysql_replication.md) 和 [1.1](1.1_bidirectional_replication.md) 两节介绍的方式. 其中的不同之处在于: 4 | 5 | - dtle 可部署于公有云的云主机服务上 6 | - dtle 对公有云上RDS服务给予的权限进行了适配, 不需**高权限**即可实现数据复制 7 | - dtle 对公有云的 MySQL 非官方版 进行了适配 (如阿里云RDS会增加隐式主键列, 导致 binlog中的数据与表结构不符) 8 | 9 | 目前支持的公有云同步通道: 10 | * 阿里云 -> 京东云 -------------------------------------------------------------------------------- /1/1.3_mysql_kafka.graffle/data.plist: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/1/1.3_mysql_kafka.graffle/data.plist -------------------------------------------------------------------------------- /1/1.3_mysql_kafka.graffle/image2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/1/1.3_mysql_kafka.graffle/image2.png -------------------------------------------------------------------------------- /1/1.3_mysql_kafka.md: -------------------------------------------------------------------------------- 1 | # 1.3 MySQL到Kafka的数据变更通知 2 | 3 | 如下图, dtle支持MySQL到Kafka的数据变更通知, 其普遍场景是: 4 | - 当数据变更时, 通知 缓存件 进行缓存刷新 5 | - 当数据变更时, 通知 数据后处理件 进行数据扫描 6 | 7 | ![](1.3_mysql_kafka.png) 8 | -------------------------------------------------------------------------------- /1/1.3_mysql_kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/1/1.3_mysql_kafka.png -------------------------------------------------------------------------------- /1/1.4_oracle_mysql.md: -------------------------------------------------------------------------------- 1 | # 1.4 Oracle到MySQL的数据同步 2 | 3 | 如下图, dtle支持Oracle到MySQL的数据同步通知, 其场景是: 4 | - 当Oracle执行DDL时, 同步库/表结构到MySQL 5 | - 当Oracle执行DML时,同步字段变更到MySQL 6 | 7 | ![](1.4_oracle_mysql.png) -------------------------------------------------------------------------------- /1/1.4_oracle_mysql.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/1/1.4_oracle_mysql.png -------------------------------------------------------------------------------- /2/2.0.1_interface.md: -------------------------------------------------------------------------------- 1 | # HTTP API、nomad 命令行工具 和 Web界面 2 | 3 | ## HTTP API 4 | 5 | curl命令实际上是调用nomad agent端的HTTP接口,将本地的job.json提交到nomad agent端。 6 | 7 | ``` 8 | curl -XPOST "http://127.0.0.1:4646/v1/jobs" -d @job.json -s | jq 9 | ``` 10 | 11 | dtle rpm安装包提供了json和hcl格式的job样例。 12 | 13 | ### jq 14 | 15 | jq是一款格式化、提取json内容的工具。一般需使用Linux包管理器安装。 16 | 17 | 典型用法 18 | ``` 19 | # 格式化json内容: 20 | some_command_print_json | jq 21 | 22 | # 提取字段(Status): 23 | some_command_print_json | jq '.Status' 24 | ``` 25 | 26 | 具体参考 https://stedolan.github.io/jq/tutorial/ 27 | 28 | ## nomad 命令行工具 29 | 30 | 此外还可以使用nomad命令行工具。nomad将命令行工具和agent端放在了同一个可执行文件中。 31 | 32 | 使用 nomad 命令行工具运行job, 使用hcl格式: 33 | 34 | ``` 35 | nomad job run -address="http://192.168.1.1:4646" job1.hcl 36 | # 或 37 | export NOMAD_ADDR="http://192.168.1.1:4646" 38 | nomad job run job1.hcl 39 | ``` 40 | 41 | 该用法本质上是对HTTP API的封装。 42 | 43 | ## nomad Web 界面 44 | 45 | 浏览器访问 http://127.0.0.1:4646, 为 nomad Web 界面。可查看Jobs、Servers和Clients。 46 | 47 | 在Jobs界面,点击Run Job,可运行HCL或JSON格式的job。 48 | 49 | ## consul 50 | 51 | - nomad 本体使用consul进行多节点注册和发现 52 | - dtle nomad 插件使用consul进行任务元数据储存 53 | 54 | 浏览器访问 http://127.0.0.1:4646,为 consul Web 界面。可查看KV中的Job进度(Gtid)。 55 | 56 | 或 57 | 58 | ``` 59 | curl -XGET "127.0.0.1:8500/v1/kv/dtle/aa/Gtid?raw 60 | ``` 61 | -------------------------------------------------------------------------------- /2/2.0_mysql_replication_1_1.md: -------------------------------------------------------------------------------- 1 | # MySQL 的单向复制 2 | 3 | 以下步骤以docker容器的方式快速演示如何搭建MySQL的单向复制环境. 4 | 5 | ## 创建网络 6 | 7 | ``` 8 | docker network create dtle-net 9 | ``` 10 | 11 | ## 创建源端/目标端 MySQL 12 | 13 | ``` 14 | docker run --name mysql-src -e MYSQL_ROOT_PASSWORD=pass -p 33061:3306 --network=dtle-net -d mysql:5.7 --gtid-mode=ON --enforce-gtid-consistency=1 --log-bin=bin --server-id=1 15 | 16 | docker run --name mysql-dst -e MYSQL_ROOT_PASSWORD=pass -p 33062:3306 --network=dtle-net -d mysql:5.7 --gtid-mode=ON --enforce-gtid-consistency=1 --log-bin=bin --server-id=2 17 | ``` 18 | 19 | 检查是否联通: 20 | 21 | ``` 22 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "select @@version\G" 23 | < *************************** 1. row *************************** 24 | @@version: 5.7.23-log 25 | 26 | > mysql -h 127.0.0.1 -P 33062 -uroot -ppass -e "select @@version\G" 27 | < *************************** 1. row *************************** 28 | @@version: 5.7.23-log 29 | ``` 30 | 31 | ## 创建 dtle 32 | 33 | ``` 34 | docker run --name dtle-consul -p 8500:8500 --network=dtle-net -d consul:latest 35 | docker run --name dtle -p 4646:4646 --network=dtle-net -d actiontech/dtle 36 | # 如需要使用dtle 2.x HTTP API兼容层,则需要额外映射8190端口:-p 8190:8190 37 | ``` 38 | 39 | 检查是否正常: 40 | 41 | ``` 42 | > curl -XGET "127.0.0.1:4646/v1/nodes" -s | jq 43 | < [ 44 | { 45 | "Address": "127.0.0.1", 46 | "Datacenter": "dc1", 47 | "Drivers": { 48 | "dtle": { 49 | "Attributes": { 50 | "driver.dtle": "true", 51 | "driver.dtle.version": "..." 52 | }, 53 | "Detected": true, 54 | "Healthy": true, 55 | } 56 | }, 57 | "ID": "65ff2f9a-a9fa-997c-cce0-9bc0b4f3396c", 58 | "Name": "nomad0", 59 | "Status": "ready", 60 | } 61 | ] 62 | # (部分项目省略) 63 | ``` 64 | 65 | ## 准备作业定义文件 66 | 67 | 准备文件job.json, 内容如下: 68 | 69 | ``` 70 | { 71 | "Job": { 72 | "ID": "dtle-demo", 73 | "Datacenters": ["dc1"], 74 | "TaskGroups": [{ 75 | "Name": "src", 76 | "Tasks": [{ 77 | "Name": "src", 78 | "Driver": "dtle", 79 | "Config": { 80 | "Gtid": "", 81 | "ReplicateDoDb": [{ 82 | "TableSchema": "demo", 83 | "Tables": [{ 84 | "TableName": "demo_tbl" 85 | }] 86 | }], 87 | "SrcConnectionConfig": { 88 | "Host": "mysql-src", 89 | "Port": 3306, 90 | "User": "root", 91 | "Password": "pass" 92 | }, 93 | "DestConnectionConfig": { 94 | "Host": "mysql-dst", 95 | "Port": 3306, 96 | "User": "root", 97 | "Password": "pass" 98 | } 99 | } 100 | }] 101 | }, { 102 | "Name": "dest", 103 | "Tasks": [{ 104 | "Name": "dest", 105 | "Driver": "dtle", 106 | "Config": { 107 | "DestType": "mysql" 108 | } 109 | }] 110 | }] 111 | } 112 | } 113 | ``` 114 | 115 | 其中定义了: 116 | - 源端/目标端的连接字符串 117 | - 要复制的表为`demo.demo_tbl` 118 | - GTID点位为空, 表示此复制是 全量+增量 的复制. 如只测试增量复制, 可指定合法的GTID 119 | 120 | ## 准备测试数据 121 | 122 | 可在源端准备提前建表`demo.demo_tbl`, 并插入数据, 以体验全量复制过程. 123 | 也可不提前建表. 124 | 125 | ## 创建复制任务 126 | 127 | ``` 128 | > curl -XPOST "http://127.0.0.1:4646/v1/jobs" -d @job.json -s | jq 129 | < { 130 | "EvalCreateIndex": 50, 131 | "EvalID": "a5e9c353-5eb9-243e-983d-bc096a93ddca", 132 | "Index": 50, 133 | "JobModifyIndex": 49, 134 | "KnownLeader": false, 135 | "LastContact": 0, 136 | "Warnings": "" 137 | } 138 | ``` 139 | 140 | 查看作业状态 141 | 142 | ``` 143 | > curl -XGET "http://127.0.0.1:4646/v1/job/dtle-demo" -s | jq '.Status' 144 | < "running" 145 | ``` 146 | 147 | ## 测试 148 | 149 | 此时可在源端对表`demo.demo_tbl`进行DDL/DML等各种操作, 查看目标端数据是否一致 150 | -------------------------------------------------------------------------------- /2/2.1_mysql_replication_n_1.md: -------------------------------------------------------------------------------- 1 | # MySQL 的汇聚复制 2 | 3 | 以下步骤以docker容器的方式快速演示如何搭建MySQL的汇聚复制环境. 4 | 5 | ## 创建网络 6 | 7 | ``` 8 | docker network create dtle-net 9 | ``` 10 | 11 | ## 创建源端(2个)和目标端(1个) MySQL 12 | 13 | ``` 14 | docker run --name mysql-src1 -e MYSQL_ROOT_PASSWORD=pass -p 33061:3306 --network=dtle-net -d mysql:5.7 --gtid-mode=ON --enforce-gtid-consistency=1 --log-bin=bin --server-id=1 15 | 16 | docker run --name mysql-src2 -e MYSQL_ROOT_PASSWORD=pass -p 33062:3306 --network=dtle-net -d mysql:5.7 --gtid-mode=ON --enforce-gtid-consistency=1 --log-bin=bin --server-id=2 17 | 18 | docker run --name mysql-dst -e MYSQL_ROOT_PASSWORD=pass -p 33063:3306 --network=dtle-net -d mysql:5.7 --gtid-mode=ON --enforce-gtid-consistency=1 --log-bin=bin --server-id=3 19 | ``` 20 | 21 | 检查是否联通: 22 | 23 | ``` 24 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "select @@version\G" 25 | < *************************** 1. row *************************** 26 | @@version: 5.7.23-log 27 | 28 | > mysql -h 127.0.0.1 -P 33062 -uroot -ppass -e "select @@version\G" 29 | < *************************** 1. row *************************** 30 | @@version: 5.7.23-log 31 | 32 | > mysql -h 127.0.0.1 -P 33063 -uroot -ppass -e "select @@version\G" 33 | < *************************** 1. row *************************** 34 | @@version: 5.7.23-log 35 | ``` 36 | 37 | ## 在源端MySQL中创建表结构, 获取GTID点位, 并插入数据 38 | 39 | ``` 40 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "CREATE DATABASE demo; CREATE TABLE demo.demo_tbl(a int primary key)" 41 | < ... 42 | 43 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "show master status\G" | grep "Executed_Gtid_Set" 44 | < Executed_Gtid_Set: f6def853-cbaa-11e8-8aeb-0242ac120003:1-7 45 | 46 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "insert into demo.demo_tbl values(1),(2),(3)" 47 | < ... 48 | 49 | --- 50 | 51 | > mysql -h 127.0.0.1 -P 33062 -uroot -ppass -e "CREATE DATABASE demo; CREATE TABLE demo.demo_tbl(a int primary key)" 52 | < ... 53 | 54 | > mysql -h 127.0.0.1 -P 33062 -uroot -ppass -e "show master status\G" | grep "Executed_Gtid_Set" 55 | < Executed_Gtid_Set: f74aacb5-cbaa-11e8-bdd1-0242ac120004:1-7 56 | 57 | 58 | > mysql -h 127.0.0.1 -P 33062 -uroot -ppass -e "insert into demo.demo_tbl values(4),(5),(6)" 59 | < ... 60 | 61 | ``` 62 | 63 | ## 在目标端MySQL中创建表结构 64 | 65 | ``` 66 | > mysql -h 127.0.0.1 -P 33063 -uroot -ppass -e "CREATE DATABASE demo; CREATE TABLE demo.demo_tbl(a int primary key)" 67 | < ... 68 | ``` 69 | 70 | ## 创建 dtle 71 | 72 | ``` 73 | docker run --name dtle-consul -p 8500:8500 --network=dtle-net -d consul:latest 74 | docker run --name dtle -p 4646:4646 --network=dtle-net -d actiontech/dtle 75 | ``` 76 | 77 | 检查是否正常: 78 | 79 | ``` 80 | > curl -XGET "127.0.0.1:4646/v1/nodes" -s | jq 81 | < [{...}] 82 | ``` 83 | 84 | ## 准备作业定义文件 85 | 86 | ### src1到dst的复制定义文件 87 | 88 | 准备src1_dst.json, 内容如下: 89 | 90 | ``` 91 | { 92 | "Job": { 93 | "ID": "dtle-demo-src1-dst", 94 | "Datacenters": ["dc1"], 95 | "TaskGroups": [{ 96 | "Name": "src", 97 | "Tasks": [{ 98 | "Name": "src", 99 | "Driver": "dtle", 100 | "Config": { 101 | "Gtid": "f6def853-cbaa-11e8-8aeb-0242ac120003:1-7", 102 | "ReplicateDoDb": [{ 103 | "TableSchema": "demo", 104 | "Tables": [{ 105 | "TableName": "demo_tbl" 106 | }] 107 | }], 108 | "SrcConnectionConfig": { 109 | "Host": "mysql-src1", 110 | "Port": 3306, 111 | "User": "root", 112 | "Password": "pass" 113 | }, 114 | "DestConnectionConfig": { 115 | "Host": "mysql-dst", 116 | "Port": 3306, 117 | "User": "root", 118 | "Password": "pass" 119 | } 120 | } 121 | }] 122 | }, { 123 | "Name": "dest", 124 | "Tasks": [{ 125 | "Name": "dest", 126 | "Driver": "dtle", 127 | "Config": { 128 | "DestType": "mysql" 129 | } 130 | }] 131 | }] 132 | } 133 | } 134 | ``` 135 | 136 | 其中定义了: 137 | - 源端/目标端的连接字符串 138 | - 要复制的表为`demo.demo_tbl` 139 | - GTID点位为 准备数据阶段 插入数据之前的src1上的GTID点位 140 | 141 | 142 | ### src2到dst的复制定义文件 143 | 144 | 准备src2_dst.json, 内容如下: 145 | 146 | ``` 147 | { 148 | "Job": { 149 | "ID": "dtle-demo-src2-dst", 150 | "Datacenters": ["dc1"], 151 | "TaskGroups": [{ 152 | "Name": "src", 153 | "Tasks": [{ 154 | "Name": "src", 155 | "Driver": "dtle", 156 | "Config": { 157 | "Gtid": "f74aacb5-cbaa-11e8-bdd1-0242ac120004:1-7", 158 | "ReplicateDoDb": [{ 159 | "TableSchema": "demo", 160 | "Tables": [{ 161 | "TableName": "demo_tbl" 162 | }] 163 | }], 164 | "SrcConnectionConfig": { 165 | "Host": "mysql-src2", 166 | "Port": 3306, 167 | "User": "root", 168 | "Password": "pass" 169 | }, 170 | "DestConnectionConfig": { 171 | "Host": "mysql-dst", 172 | "Port": 3306, 173 | "User": "root", 174 | "Password": "pass" 175 | } 176 | } 177 | }] 178 | }, { 179 | "Name": "dest", 180 | "Tasks": [{ 181 | "Name": "dest", 182 | "Driver": "dtle", 183 | "Config": { 184 | "DestType": "mysql" 185 | } 186 | }] 187 | }] 188 | } 189 | } 190 | ``` 191 | 192 | 其中与`src1_dst.json`不同的是: 193 | - 源端的连接字符串 194 | - GTID点位为 准备数据阶段 插入数据之前的src2上的GTID点位 195 | 196 | ## 创建复制任务 197 | 198 | ``` 199 | > curl -XPOST "http://127.0.0.1:4646/v1/jobs" -d @src1_dst.json -s | jq 200 | < {...} 201 | 202 | > curl -XPOST "http://127.0.0.1:4646/v1/jobs" -d @src2_dst.json -s | jq 203 | < {...} 204 | ``` 205 | 206 | 查看作业ID和状态: 207 | 208 | ``` 209 | > curl -XGET "127.0.0.1:4646/v1/jobs" -s | jq '.[] | .ID, .Status' 210 | < "dtle-demo-src1-dst" 211 | "running" 212 | "dtle-demo-src2-dst" 213 | "running" 214 | ``` 215 | 216 | ## 测试 217 | 218 | 在src1和src2中分别插入数据, 查看dst中的数据, 验证全量和增量的数据均存在 219 | 220 | ``` 221 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "insert into demo.demo_tbl values(11)" 222 | < ... 223 | 224 | > mysql -h 127.0.0.1 -P 33062 -uroot -ppass -e "insert into demo.demo_tbl values(12)" 225 | < ... 226 | 227 | > mysql -h 127.0.0.1 -P 33063 -uroot -ppass -e "select * from demo.demo_tbl" 228 | < 229 | +----+ 230 | | a | 231 | +----+ 232 | | 1 | 233 | | 2 | 234 | | 3 | 235 | | 4 | 236 | | 5 | 237 | | 6 | 238 | | 11 | 239 | | 12 | 240 | +----+ 241 | ``` 242 | -------------------------------------------------------------------------------- /2/2.2_mysql_replication_1_n.md: -------------------------------------------------------------------------------- 1 | # MySQL 的数据分散 2 | 3 | 以下步骤以docker容器的方式快速演示如何搭建MySQL的数据分散环境. 数据分散复制, 将源表中的数据中, `主键<5`的行复制到目标库1, `主键>=5`的行复制到目标库2. 4 | 5 | ## 创建网络 6 | 7 | ``` 8 | docker network create dtle-net 9 | ``` 10 | 11 | ## 创建源端(1个)和目标端(2个) MySQL 12 | 13 | ``` 14 | docker run --name mysql-src -e MYSQL_ROOT_PASSWORD=pass -p 33061:3306 --network=dtle-net -d mysql:5.7 --gtid-mode=ON --enforce-gtid-consistency=1 --log-bin=bin --server-id=1 15 | 16 | docker run --name mysql-dst1 -e MYSQL_ROOT_PASSWORD=pass -p 33062:3306 --network=dtle-net -d mysql:5.7 --gtid-mode=ON --enforce-gtid-consistency=1 --log-bin=bin --server-id=2 17 | 18 | docker run --name mysql-dst2 -e MYSQL_ROOT_PASSWORD=pass -p 33063:3306 --network=dtle-net -d mysql:5.7 --gtid-mode=ON --enforce-gtid-consistency=1 --log-bin=bin --server-id=3 19 | ``` 20 | 21 | 检查是否联通: 22 | 23 | ``` 24 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "select @@version\G" 25 | < *************************** 1. row *************************** 26 | @@version: 5.7.23-log 27 | 28 | > mysql -h 127.0.0.1 -P 33062 -uroot -ppass -e "select @@version\G" 29 | < *************************** 1. row *************************** 30 | @@version: 5.7.23-log 31 | 32 | > mysql -h 127.0.0.1 -P 33063 -uroot -ppass -e "select @@version\G" 33 | < *************************** 1. row *************************** 34 | @@version: 5.7.23-log 35 | ``` 36 | 37 | ## 在源端MySQL中创建表结构, 获取GTID点位, 并插入数据 38 | 39 | ``` 40 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "CREATE DATABASE demo; CREATE TABLE demo.demo_tbl(a int primary key)" 41 | < ... 42 | 43 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "show master status\G" | grep "Executed_Gtid_Set" 44 | < Executed_Gtid_Set: 167dd42f-d076-11e8-8104-0242ac120003:1-7 45 | 46 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "insert into demo.demo_tbl values(1),(2),(3)" 47 | < ... 48 | 49 | ``` 50 | 51 | ## 在目标端MySQL中创建表结构 52 | 53 | ``` 54 | > mysql -h 127.0.0.1 -P 33062 -uroot -ppass -e "CREATE DATABASE demo; CREATE TABLE demo.demo_tbl(a int primary key)" 55 | < ... 56 | 57 | > mysql -h 127.0.0.1 -P 33063 -uroot -ppass -e "CREATE DATABASE demo; CREATE TABLE demo.demo_tbl(a int primary key)" 58 | < ... 59 | ``` 60 | 61 | ## 创建 dtle 62 | 63 | ``` 64 | docker run --name dtle-consul -p 8500:8500 --network=dtle-net -d consul:latest 65 | docker run --name dtle -p 4646:4646 --network=dtle-net -d actiontech/dtle 66 | ``` 67 | 68 | 检查是否正常: 69 | 70 | ``` 71 | > curl -XGET "127.0.0.1:4646/v1/nodes" -s | jq 72 | < [{...}] 73 | ``` 74 | 75 | ## 准备作业定义文件 76 | 77 | ### src到dst1的复制定义文件 78 | 79 | 准备src_dst1.json, 内容如下: 80 | 81 | ``` 82 | { 83 | "Job": { 84 | "ID": "dtle-demo-src-dst1", 85 | "Datacenters": ["dc1"], 86 | "TaskGroups": [{ 87 | "Name": "src", 88 | "Tasks": [{ 89 | "Name": "src", 90 | "Driver": "dtle", 91 | "Config": { 92 | "Gtid": "", 93 | "ReplicateDoDb": [{ 94 | "TableSchema": "demo", 95 | "Tables": [{ 96 | "TableName": "demo_tbl", 97 | "Where":"a<5" 98 | }] 99 | }], 100 | "SrcConnectionConfig": { 101 | "Host": "mysql-src", 102 | "Port": 3306, 103 | "User": "root", 104 | "Password": "pass" 105 | }, 106 | "DestConnectionConfig": { 107 | "Host": "mysql-dst1", 108 | "Port": 3306, 109 | "User": "root", 110 | "Password": "pass" 111 | } 112 | } 113 | }] 114 | }, { 115 | "Name": "dest", 116 | "Tasks": [{ 117 | "Name": "dest", 118 | "Driver": "dtle", 119 | "Config": { 120 | "DestType": "mysql" 121 | } 122 | }] 123 | }] 124 | } 125 | } 126 | ``` 127 | 128 | 其中定义了: 129 | - 源端/目标端的连接字符串 130 | - 要复制的表为`demo.demo_tbl` 131 | - `demo_tbl`的复制数据条件为`a<5` 132 | 133 | 134 | ### src到dst2的复制定义文件 135 | 136 | 准备src_dst2.json, 内容如下: 137 | 138 | ``` 139 | { 140 | "Job": { 141 | "ID": "dtle-demo-src-dst2", 142 | "Datacenters": ["dc1"], 143 | "TaskGroups": [{ 144 | "Name": "src", 145 | "Tasks": [{ 146 | "Name": "src", 147 | "Driver": "dtle", 148 | "Config": { 149 | "Gtid": "", 150 | "ReplicateDoDb": [{ 151 | "TableSchema": "demo", 152 | "Tables": [{ 153 | "TableName": "demo_tbl", 154 | "Where":"a>=5" 155 | }] 156 | }], 157 | "SrcConnectionConfig": { 158 | "Host": "mysql-src", 159 | "Port": 3306, 160 | "User": "root", 161 | "Password": "pass" 162 | }, 163 | "DestConnectionConfig": { 164 | "Host": "mysql-dst2", 165 | "Port": 3306, 166 | "User": "root", 167 | "Password": "pass" 168 | } 169 | } 170 | }] 171 | }, { 172 | "Name": "dest", 173 | "Tasks": [{ 174 | "Name": "dest", 175 | "Driver": "dtle", 176 | "Config": { 177 | "DestType": "mysql" 178 | } 179 | }] 180 | }] 181 | } 182 | } 183 | ``` 184 | 185 | 其中定义了: 186 | - 源端/目标端的连接字符串 187 | - 要复制的表为`demo.demo_tbl` 188 | - `demo_tbl`的复制数据条件为`a>=5` 189 | 190 | 其中与`src1_dst.json`不同的是: 191 | - 源端的连接字符串 192 | - `demo_tbl`的复制数据条件 193 | 194 | ## 创建复制任务 195 | 196 | ``` 197 | > curl -XPOST "http://127.0.0.1:4646/v1/jobs" -d @src_dst1.json -s | jq 198 | < {...} 199 | 200 | > curl -XPOST "http://127.0.0.1:4646/v1/jobs" -d @src_dst2.json -s | jq 201 | < {...} 202 | ``` 203 | 204 | 查看作业ID和状态: 205 | 206 | ``` 207 | > curl -XGET "127.0.0.1:4646/v1/jobs" -s | jq '.[] | .ID, .Status' 208 | < "dtle-demo-src-dst1" 209 | "running" 210 | "dtle-demo-src-dst2" 211 | "running" 212 | ``` 213 | 214 | ## 测试 215 | 216 | 在src中插入数据, 查看dst1/dst2中的数据, 验证全量和增量的数据均存在 217 | 218 | ``` 219 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "insert into demo.demo_tbl values(0),(10)" 220 | < ... 221 | 222 | 223 | > mysql -h 127.0.0.1 -P 33062 -uroot -ppass -e "select * from demo.demo_tbl" 224 | < 225 | +----+ 226 | | a | 227 | +----+ 228 | | 0 | 229 | | 1 | 230 | | 2 | 231 | | 3 | 232 | | 4 | 233 | +----+ 234 | 235 | > mysql -h 127.0.0.1 -P 33063 -uroot -ppass -e "select * from demo.demo_tbl" 236 | < 237 | +----+ 238 | | a | 239 | +----+ 240 | | 5 | 241 | | 6 | 242 | | 7 | 243 | | 8 | 244 | | 9 | 245 | | 10 | 246 | +----+ 247 | ``` 248 | -------------------------------------------------------------------------------- /2/2.3_dc_to_dc_bidirectional.md: -------------------------------------------------------------------------------- 1 | # MySQL的跨数据中心的双向复制 2 | 3 | 以下步骤以docker容器的方式快速演示如何搭建MySQL的跨数据中心的双向复制. 4 | 5 | ## 创建两个网络 6 | 7 | ``` 8 | docker network create dtle-net-dc1 9 | docker network create dtle-net-dc2 10 | ``` 11 | 12 | ## 在两个网络中分别创建MySQL 13 | 14 | ``` 15 | docker run --name mysql-dc1 -e MYSQL_ROOT_PASSWORD=pass -p 33061:3306 --network=dtle-net-dc1 -d mysql:5.7 --gtid-mode=ON --enforce-gtid-consistency=1 --log-bin=bin --server-id=1 16 | 17 | docker run --name mysql-dc2 -e MYSQL_ROOT_PASSWORD=pass -p 33062:3306 --network=dtle-net-dc2 -d mysql:5.7 --gtid-mode=ON --enforce-gtid-consistency=1 --log-bin=bin --server-id=2 18 | ``` 19 | 20 | 检查MySQL是否启动成功: 21 | 22 | ``` 23 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "select @@version\G" 24 | < *************************** 1. row *************************** 25 | @@version: 5.7.23-log 26 | 27 | > mysql -h 127.0.0.1 -P 33062 -uroot -ppass -e "select @@version\G" 28 | < *************************** 1. row *************************** 29 | @@version: 5.7.23-log 30 | ``` 31 | 32 | ## 在两个网络中分别创建dtle 33 | 34 | ``` 35 | docker run --name dtle-consul -p 8500:8500 --network=dtle-net-dc1 -d consul:latest 36 | docker run --name dtle-dc1 -p 4646:4646 --network=dtle-net-dc1 -d actiontech/dtle 37 | 38 | # dtle-dc2 will work as a client only. No need to start consul-dc2. 39 | docker run --name dtle-dc2 -p 5646:4646 --network=dtle-net-dc2 -d actiontech/dtle 40 | ``` 41 | 42 | 43 | ## 将两个dtle通过公网连通 44 | 45 | ``` 46 | docker network create dtle-net-public 47 | docker network connect dtle-net-public dtle-dc1 48 | docker network connect dtle-net-public dtle-consul 49 | docker network connect dtle-net-public dtle-dc2 50 | ``` 51 | 52 | ## 修改dtle的配置 53 | 54 | ### 修改容器dtle-dc1内的配置并重启 55 | 修改容器dtle-dc1内的配置并重启: 56 | 57 | ``` 58 | docker exec -u root -it dtle-dc1 vi /dtle/etc/dtle/nomad.hcl 59 | ... 60 | docker exec -u root -it dtle-dc1 rm -rf /dtle/var/lib/nomad 61 | docker restart dtle-dc1 62 | ``` 63 | 64 | 配置`/dtle/etc/dtle/nomad.hcl`修改的内容如下: 65 | 66 | ``` 67 | name = "nomad1" # rename for each node 68 | 69 | # ... (省略未更改项目) 70 | 71 | bind_addr = "172.22.0.2" 72 | advertise { 73 | http = "172.22.0.2" 74 | rpc = "172.22.0.2" 75 | serf = "172.22.0.2" 76 | } 77 | 78 | plugin "dtle" { 79 | config { 80 | nats_bind = "172.22.0.2:8193" 81 | nats_advertise = "172.22.0.2:8193" 82 | nomad_addr = "172.22.0.2:4646" 83 | # ... 84 | } 85 | } 86 | ``` 87 | 88 | 其中: 89 | - 由于dtle-dc1容器存在两个网络 (与MySQL通信的内网`dtle-net-dc1`, 和与dtle-dc2通信的公网`dtle-net-public`), 需要指定`bind_addr`和`advertise.rpc`为本机的`dtle-net-public`的网络地址, 此处为`172.22.0.2` 90 | 91 | 92 | ### 修改容器dtle-dc2内的配置并重启 93 | 修改容器dtle-dc2内的配置并重启: 94 | 95 | ``` 96 | docker exec -u root -it dtle-dc2 vi /dtle/etc/dtle/nomad.hcl 97 | ... 98 | docker exec -u root -it dtle-dc2 rm -rf /dtle/var/lib/nomad 99 | docker restart dtle-dc2 100 | ``` 101 | 102 | 配置`/dtle/etc/dtle/nomad.hcl`修改的内容如下: 103 | 104 | ``` 105 | name = "nomad2" # rename for each node 106 | 107 | # ... (省略未更改项目) 108 | 109 | bind_addr = "172.22.0.3" 110 | advertise { 111 | http = "172.22.0.3" 112 | rpc = "172.22.0.3" 113 | serf = "172.22.0.3" 114 | } 115 | 116 | server { 117 | # 重要! 118 | # 只有 dtle-dc1 作为server. dtle-dc2 仅作为 client. 119 | enabled = false 120 | } 121 | 122 | plugin "dtle" { 123 | config { 124 | nats_bind = "172.22.0.3:8193" 125 | nats_advertise = "172.22.0.3:8193" 126 | nomad_addr = "172.22.0.3:4646" 127 | # ... 128 | } 129 | } 130 | ``` 131 | 132 | 其中: 133 | - 由于dtle-dc2容器存在两个网络 (与MySQL通信的内网`dtle-net-dc2`, 和与dtle-dc1通信的公网`dtle-net-public`), 需要指定`bind_addr`和`advertise.rpc`为本机的`dtle-net-public`的网络地址, 此处为`172.22.0.3` 134 | 135 | 136 | ### 检查是否正常 137 | 138 | ``` 139 | > curl -XGET "127.0.0.1:4646/v1/nodes" -s | jq 140 | ``` 141 | 142 | 或查看Web UI,确定我们构建了一个 1 server 2 client 的nomad部署。 143 | 144 | ## 配置dc1到dc2的复制 145 | 146 | 获取mysql-dc1的GTID: 147 | ``` 148 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "show master status\G" | grep "Executed_Gtid_Set" 149 | < Executed_Gtid_Set: 41f102d4-d29f-11e8-8de7-0242ac130002:1-5 150 | ``` 151 | 152 | 准备文件job-dc1-dc2.json, 内容如下: 153 | 154 | ``` 155 | { 156 | "Job": { 157 | "ID":"dtle-demo-dc1-2-dc2", 158 | "Datacenters": ["dc1"], 159 | "TaskGroups": [{ 160 | "Name": "src", 161 | "Tasks": [{ 162 | "Name": "src", 163 | "Driver": "dtle", 164 | "Constraints": [{ 165 | "LTarget": "${node.unique.name}", 166 | "RTarget": "nomad1", 167 | "Operand": "=" 168 | }], 169 | "Config": { 170 | "Gtid":"41f102d4-d29f-11e8-8de7-0242ac130002:1-5", 171 | "ReplicateDoDb": [{ 172 | "TableSchema": "demo", 173 | "Tables": [{ 174 | "TableName": "demo_tbl" 175 | }] 176 | }], 177 | "SrcConnectionConfig": { 178 | "Host":"mysql-dc1", 179 | "Port": 3306, 180 | "User": "root", 181 | "Password": "pass" 182 | }, 183 | "DestConnectionConfig": { 184 | "Host":"mysql-dc2", 185 | "Port": 3306, 186 | "User": "root", 187 | "Password": "pass" 188 | } 189 | } 190 | }] 191 | }, { 192 | "Name": "dest", 193 | "Tasks": [{ 194 | "Name": "dest", 195 | "Driver": "dtle", 196 | "Constraints": [{ 197 | "LTarget": "${node.unique.name}", 198 | "RTarget": "nomad2", 199 | "Operand": "=" 200 | }], 201 | "Config": { 202 | "DestType": "mysql" 203 | } 204 | }] 205 | }] 206 | } 207 | } 208 | ``` 209 | 210 | 其中定义了: 211 | - 源端/目标端的连接字符串 212 | - 要复制的表为`demo.demo_tbl` 213 | - GTID点位, 表示此复制是 增量复制 (双向复制 只支持增量复制) 214 | - 源任务(src)配置在dc1的dtle节点上执行 (通过 Constraints 指定) 215 | - 目标任务(dest)配置在dc2的dtle节点上执行 (通过 Constraints 指定) 216 | 217 | ## 创建dc1到dc2的复制任务 218 | 219 | ``` 220 | > curl -XPOST "http://127.0.0.1:4646/v1/jobs" -d @job-dc1-dc2.json -s | jq 221 | ``` 222 | 223 | 查看作业状态 224 | 225 | ``` 226 | > curl -XGET "127.0.0.1:4646/v1/job/dtle-demo-dc1-2-dc2" -s | jq '.Status' 227 | < "running" 228 | ``` 229 | 230 | 231 | ## 配置dc2到dc1的复制 232 | 233 | 获取mysql-dc2的GTID: 234 | ``` 235 | > mysql -h 127.0.0.1 -P 33062 -uroot -ppass -e "show master status\G" 236 | < *************************** 1. row *************************** 237 | File: bin.000003 238 | Position: 537 239 | Binlog_Do_DB: 240 | Binlog_Ignore_DB: 241 | Executed_Gtid_Set: 41f102d4-d29f-11e8-8de7-0242ac130002:6-7, 242 | 42158e2f-d29f-11e8-b322-0242ac150002:1-5 243 | ``` 244 | 245 | 准备文件job-dc2-dc1.json, 内容如下: 246 | 247 | ``` 248 | { 249 | "Job": { 250 | "ID":"dtle-demo-dc2-2-dc1", 251 | "Datacenters": ["dc1"], 252 | "TaskGroups": [{ 253 | "Name": "src", 254 | "Tasks": [{ 255 | "Name": "src", 256 | "Driver": "dtle", 257 | "Constraints": [{ 258 | "LTarget": "${node.unique.name}", 259 | "RTarget": "nomad2", 260 | "Operand": "=" 261 | }], 262 | "Config": { 263 | "Gtid":"41f102d4-d29f-11e8-8de7-0242ac130002:6-7,42158e2f-d29f-11e8-b322-0242ac150002:1-5", 264 | "ReplicateDoDb": [{ 265 | "TableSchema": "demo", 266 | "Tables": [{ 267 | "TableName": "demo_tbl" 268 | }] 269 | }], 270 | "SrcConnectionConfig": { 271 | "Host":"mysql-dc2", 272 | "Port": 3306, 273 | "User": "root", 274 | "Password": "pass" 275 | }, 276 | "DestConnectionConfig": { 277 | "Host":"mysql-dc1", 278 | "Port": 3306, 279 | "User": "root", 280 | "Password": "pass" 281 | } 282 | } 283 | }] 284 | }, { 285 | "Name": "dest", 286 | "Tasks": [{ 287 | "Name": "dest", 288 | "Driver": "dtle", 289 | "Constraints": [{ 290 | "LTarget": "${node.unique.name}", 291 | "RTarget": "nomad1", 292 | "Operand": "=" 293 | }], 294 | "Config": { 295 | "DestType": "mysql" 296 | } 297 | }] 298 | }] 299 | } 300 | } 301 | ``` 302 | 303 | 其中与 dc1到dc2的复制任务 不同的是: 304 | - 源端/目标端的连接字符串 305 | - GTID点位 306 | - 源任务(src)配置在dc2的dtle节点上执行 307 | - 目标任务(dest)配置在dc1的dtle节点上执行 308 | 309 | ## 创建dc2到dc1的复制任务 310 | 311 | ``` 312 | > curl -XPOST "http://127.0.0.1:4646/v1/jobs" -d @job-dc2-dc1.json -s | jq 313 | ``` 314 | 315 | 查看作业状态 316 | 317 | ``` 318 | > curl -XGET "127.0.0.1:4646/v1/job/dtle-demo-dc2-2-dc1" -s | jq '.Status' 319 | < "running" 320 | ``` 321 | 322 | ## 测试 323 | 324 | 此时可在任一端对表`demo.demo_tbl`进行DDL/DML等各种操作, 查看目标端数据是否一致 325 | 326 | ## 数据冲突 327 | 328 | dtle不检测数据冲突。如果回放报错(如应数据冲突导致update了不存在的列),则job报错。 329 | 330 | 其中,DML insert使用replace回放,故insert冲突时,效果是last-win。 331 | 332 | 建议由业务端确保数据不会冲突。 333 | -------------------------------------------------------------------------------- /2/2.4_ali_to_jd.md: -------------------------------------------------------------------------------- 1 | # 阿里云到京东云的MySQL复制 2 | 3 | 以下步骤演示如何搭建从阿里云RDS到京东云RDS的MySQL复制. 4 | 5 | ## 检查阿里云RDS的环境 6 | 7 | MySQL版本为5.7.18 8 | 9 | 检查权限: 10 | 11 | ``` 12 | mysql> select user(); 13 | +---------------------+ 14 | | user() | 15 | +---------------------+ 16 | | root@180.169.60.146 | 17 | +---------------------+ 18 | 1 row in set (0.02 sec) 19 | 20 | 21 | mysql> show grants for 'root'@'%' \G 22 | *************************** 1. row *************************** 23 | Grants for root@%: GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, REFERENCES, INDEX, ALTER, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER ON *.* TO 'root'@'%' WITH GRANT OPTION 24 | 1 row in set (0.02 sec) 25 | ``` 26 | 27 | ## 检查京东云RDS的环境 28 | 29 | MySQL版本为5.7.21 30 | 31 | **注意: 京东云RDS实例的用户权限是以schema为基础的. 需要在创建迁移job前,通过京东云RDS为该MySQL实例创建两个schema: dtle(存储dtle元数据) 和 迁移的目标库** 32 | 33 | 34 | ``` 35 | mysql> select user(); 36 | +---------------------------+ 37 | | user() | 38 | +---------------------------+ 39 | | actiontech@180.169.60.146 | 40 | +---------------------------+ 41 | 1 row in set (0.00 sec) 42 | 43 | 44 | mysql> show grants for 'actiontech'@'%'; 45 | +------------------------------------------------------------+ 46 | | Grants for actiontech@% | 47 | +------------------------------------------------------------+ 48 | | GRANT USAGE ON *.* TO 'actiontech'@'%' | 49 | | GRANT ALL PRIVILEGES ON `actiontech`.* TO 'actiontech'@'%' | 50 | +------------------------------------------------------------+ 51 | 2 rows in set (0.00 sec) 52 | ``` 53 | 54 | ## 申请京东云ECS 55 | 56 | 需要申请京东云ECS, 用于 57 | 58 | 示例主机IP为`192.168.0.17`, 规格是1c4g40g 59 | 60 | 61 | ## 安装并配置dtle 62 | 63 | 安装dtle: 64 | 65 | ``` 66 | rpm -ivh dtle-xxx.rpm 67 | ``` 68 | 69 | ### 配置 `/etc/dtle/nomad.hcl`: 70 | 71 | ``` 72 | # 省略未修改配置 73 | bind_addr = "192.168.0.17" 74 | 75 | advertise { 76 | http = "192.168.0.17" 77 | rpc = "192.168.0.17" 78 | serf = "192.168.0.17" 79 | } 80 | ``` 81 | 82 | 启动dtle: 83 | 84 | ``` 85 | systemctl start dtle-consul dtle-nomad 86 | ``` 87 | 88 | ## 增加复制任务 89 | 90 | 复制配置文件 job.json 内容如下: 91 | 92 | ``` 93 | { 94 | "Job": { 95 | "ID":"ali-jd-demo", 96 | "Datacenters": ["dc1"], 97 | "TaskGroups": [{ 98 | "Name": "src", 99 | "Tasks": [{ 100 | "Name": "src", 101 | "Driver": "dtle", 102 | "Config": { 103 | "Gtid":"", 104 | "ReplicateDoDb": [{ 105 | "TableSchema": "actiontech", 106 | "Tables": [] 107 | }], 108 | "SrcConnectionConfig": { 109 | "Host":"rm-xxxx.mysql.rds.aliyuncs.com", 110 | "Port":"3306", 111 | "User":"root", 112 | "Password":"Acti0ntech" 113 | }, 114 | "DestConnectionConfig": { 115 | "Host":"mysql-cn-east-2-yyyy.public.jcloud.com", 116 | "Port":"3306", 117 | "User":"actiontech", 118 | "Password":"Acti0ntech" 119 | } 120 | } 121 | }] 122 | }, { 123 | "Name": "dest", 124 | "Tasks": [{ 125 | "Name": "dest", 126 | "Driver": "dtle", 127 | "Config": { 128 | "DestType": "mysql" 129 | } 130 | }] 131 | }] 132 | } 133 | } 134 | ``` 135 | 136 | 向dtle发布任务: 137 | 138 | ``` 139 | curl -XPOST "192.168.0.17:4646/v1/jobs" -d @job.json 140 | ``` 141 | 142 | 检查任务运行状态: 143 | 144 | ``` 145 | curl -XGET "192.168.0.17:4646/v1/job/ali-jd-demo" -s | jq '.Status' 146 | ``` 147 | 148 | ## 其他 149 | 150 | 如要使用链路压缩等功能, 可参照[MySQL的跨数据中心的双向复制](2.3_dc_to_dc.md) 151 | 152 | consul 默认只能从从本机查询。若要从外部访问KV,请更改/etc/dtle/consul.hcl中的 `client_addr` 。 153 | 并相应配置nomad.hcl。 154 | -------------------------------------------------------------------------------- /2/2.5_mysql_kafka.md: -------------------------------------------------------------------------------- 1 | # MySQL到Kafka的数据变更通知 2 | 3 | 以下步骤以docker容器的方式快速演示如何搭建MySQL的单向复制环境. 4 | 5 | ## 创建网络 6 | 7 | ``` 8 | docker network create dtle-net 9 | ``` 10 | 11 | ## 创建源端 MySQL 12 | 13 | ``` 14 | docker run --name mysql-src -e MYSQL_ROOT_PASSWORD=pass -p 33061:3306 --network=dtle-net -d mysql:5.7 --gtid-mode=ON --enforce-gtid-consistency=1 --log-bin=bin --server-id=1 15 | ``` 16 | 17 | 检查是否联通: 18 | 19 | ``` 20 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "select @@version\G" 21 | < *************************** 1. row *************************** 22 | @@version: 5.7.23-log 23 | ``` 24 | 25 | ## 创建源端表结构 26 | 27 | ``` 28 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "CREATE DATABASE demo; CREATE TABLE demo.demo_tbl(a int primary key)" 29 | ``` 30 | 31 | ## 创建目标端 Kafka 32 | 33 | ``` 34 | docker run --name kafka-zookeeper -p 2181:2181 -e ALLOW_ANONYMOUS_LOGIN=yes --network=dtle-net -d bitnami/zookeeper 35 | docker run --name kafka-dst -p 9092:9092 -e KAFKA_ZOOKEEPER_CONNECT=kafka-zookeeper:2181 -e ALLOW_PLAINTEXT_LISTENER=yes --network=dtle-net -d bitnami/kafka 36 | 37 | ``` 38 | 39 | 检查是否联通: 40 | 41 | ``` 42 | > docker run -it --rm \ 43 | --network dtle-net \ 44 | -e KAFKA_ZOOKEEPER_CONNECT=kafka-zookeeper:2181 \ 45 | bitnami/kafka:latest kafka-topics.sh --list --zookeeper kafka-zookeeper:2181 46 | < Welcome to the Bitnami kafka container 47 | Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-kafka 48 | Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-kafka/issues 49 | ``` 50 | 51 | ## 创建 dtle 52 | 53 | ``` 54 | docker run --name dtle-consul -p 8500:8500 --network=dtle-net -d consul:latest 55 | docker run --name dtle -p 4646:4646 --network=dtle-net -d actiontech/dtle 56 | ``` 57 | 58 | 检查是否正常: 59 | 60 | ``` 61 | > curl -XGET "127.0.0.1:4646/v1/nodes" -s | jq 62 | < [{...}] 63 | ``` 64 | 65 | ## 准备作业定义文件 66 | 67 | 准备文件job.json, 内容如下: 68 | 69 | ``` 70 | { 71 | "Job": { 72 | "ID": "dtle-demo", 73 | "Datacenters": ["dc1"], 74 | "TaskGroups": [{ 75 | "Name": "src", 76 | "Tasks": [{ 77 | "Name": "src", 78 | "Driver": "dtle", 79 | "Config": { 80 | "Gtid": "", 81 | "ReplicateDoDb": [{ 82 | "TableSchema": "demo", 83 | "Tables": [{ 84 | "TableName": "demo_tbl" 85 | }] 86 | }], 87 | "SrcConnectionConfig": { 88 | "Host": "mysql-src", 89 | "Port": 3306, 90 | "User": "root", 91 | "Password": "pass" 92 | }, 93 | "KafkaConfig": { 94 | "Topic": "demo-topic", 95 | "Brokers": ["kafka-dst:9092"], 96 | "Converter": "json" 97 | } 98 | } 99 | }] 100 | }, { 101 | "Name": "dest", 102 | "Tasks": [{ 103 | "Name": "dest", 104 | "Driver": "dtle", 105 | "Config": { 106 | "DestType": "kafka" 107 | } 108 | }] 109 | }] 110 | } 111 | } 112 | ``` 113 | 114 | 其中定义了: 115 | - 源端 MySQL 的连接字符串 116 | - 目标端 Kafka 的 broker 访问地址 117 | - 要复制的表为`demo.demo_tbl` 118 | - GTID点位为空, 表示此复制是 全量+增量 的复制. 如只测试增量复制, 可指定合法的GTID 119 | 120 | ## 创建复制任务 121 | 122 | ``` 123 | > curl -XPOST "http://127.0.0.1:4646/v1/jobs" -d @job.json -s | jq 124 | < {...} 125 | ``` 126 | 127 | 查看作业状态: 128 | 129 | ``` 130 | > curl -XGET "127.0.0.1:4646/v1/job/dtle-demo" -s | jq '.Status' 131 | < "running" 132 | ``` 133 | 134 | ## 测试 135 | 136 | 在源端写入数据: 137 | 138 | ``` 139 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "INSERT INTO demo.demo_tbl values(1)" 140 | ... 141 | ``` 142 | 143 | 验证相关的topic存在: 144 | 145 | ``` 146 | > docker run -it --rm \ 147 | --network dtle-net \ 148 | -e KAFKA_ZOOKEEPER_CONNECT=kafka-zookeeper:2181 \ 149 | bitnami/kafka:latest kafka-topics.sh --list --zookeeper kafka-zookeeper:2181 150 | < Welcome to the Bitnami kafka container 151 | Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-kafka 152 | Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-kafka/issues 153 | 154 | demo-topic.demo.demo_tbl 155 | ``` 156 | 157 | 验证数据: 158 | 159 | ``` 160 | > docker run -it --rm \ 161 | --network dtle-net \ 162 | -e KAFKA_ZOOKEEPER_CONNECT=kafka-zookeeper:2181 \ 163 | bitnami/kafka:latest kafka-console-consumer.sh --bootstrap-server kafka-dst:9092 --topic demo-topic.demo.demo_tbl --from-beginning 164 | < ... 165 | {"schema":{"type":"struct","optional":false,"fields":[{"type":"struct","optional":true,"field":"before","fields":[{"type":"int32","optional":false,"field":"a"}],"name":"demo-topic.demo.demo_tbl.Value"},{"type":"struct","optional":true,"field":"after","fields":[{"type":"int32","optional":false,"field":"a"}],"name":"demo-topic.demo.demo_tbl.Value"},{"type":"struct","optional":false,"field":"source","fields":[{"type":"string","optional":true,"field":"version"},{"type":"string","optional":false,"field":"name"},{"type":"int64","optional":false,"field":"server_id"},{"type":"int64","optional":false,"field":"ts_sec"},{"type":"string","optional":true,"field":"gtid"},{"type":"string","optional":false,"field":"file"},{"type":"int64","optional":false,"field":"pos"},{"type":"int32","optional":false,"field":"row"},{"type":"boolean","optional":true,"field":"snapshot"},{"type":"int64","optional":true,"field":"thread"},{"type":"string","optional":true,"field":"db"},{"type":"string","optional":true,"field":"table"}],"name":"io.debezium.connector.mysql.Source"},{"type":"string","optional":false,"field":"op"},{"type":"int64","optional":true,"field":"ts_ms"}],"name":"demo-topic.demo.demo_tbl.Envelope","version":1},"payload":{"before":null,"after":{"a":11},"source":{"version":"0.0.1","name":"demo-topic","server_id":0,"ts_sec":0,"gtid":null,"file":"","pos":0,"row":1,"snapshot":true,"thread":null,"db":"demo","table":"demo_tbl"},"op":"c","ts_ms":1539760682507}} 166 | ``` 167 | 168 | 此时可在源端对表`demo.demo_tbl`进行DDL/DML等各种操作, 查看目标端数据是否一致 169 | 170 | 关于Kafka的消息格式, 参看[5.3 Kafka 消息格式](../5/5.3_kafka_message_format.md) 171 | -------------------------------------------------------------------------------- /2/2.6_oracle_mysql.md: -------------------------------------------------------------------------------- 1 | # Oracle到MySQL的数据同步 2 | 3 | 以下步骤以docker容器的方式快速演示如何搭建Oracle到MySQL的单向复制环境. 4 | 5 | ## 创建网络 6 | 7 | ``` 8 | docker network create dtle-net 9 | ``` 10 | 11 | ## 创建源端 Oracle 12 | 13 | 14 | ``` 15 | # 启动oracle镜像 16 | docker run -it -d -p 1521:1521 --name oracle-src --network=dtle-net -e ORACLE_ALLOW_REMOTE=true wnameless/oracle-xe-11g-r2 17 | 18 | # 环境配置并启动oracle 19 | docker exec -it oracle-src bash 20 | mkdir /u01/app/oracle/oradata/archive_log 21 | chown oracle /u01/app/oracle/oradata/archive_log 22 | 23 | export ORACLE_HOME=/u01/app/oracle/product/11.2.0/xe 24 | export PATH=$ORACLE_HOME/bin:$PATH 25 | export ORACLE_SID=XE 26 | 27 | service oracle-xe start 28 | 29 | # 设置同步配置 30 | sqlplus SYS/oracle AS SYSDBA 31 | alter system set log_archive_dest_1='location=/u01/app/oracle/oradata/archive_log' scope=spfile; 32 | alter system set db_recovery_file_dest_size = 10G; 33 | 34 | shutdown immediate; 35 | startup mount; 36 | alter database add logfile group 3 '/u01/app/oracle/fast_recovery_area/XE/onlinelog/redo01.log' size 500m; 37 | alter database add logfile group 4 '/u01/app/oracle/fast_recovery_area/XE/onlinelog/redo02.log' size 500m; 38 | alter database add logfile group 5 '/u01/app/oracle/fast_recovery_area/XE/onlinelog/redo03.log' size 500m; 39 | alter database archivelog; 40 | alter database add supplemental log data (all) columns; 41 | alter database open; 42 | 43 | # 创建同步账号 44 | create role roma_logminer_privs; 45 | grant create session,execute_catalog_role,select any transaction,select_catalog_role,select any dictionary to roma_logminer_privs; 46 | grant select on SYSTEM.LOGMNR_COL$ to roma_logminer_privs; 47 | grant select on SYSTEM.LOGMNR_OBJ$ to roma_logminer_privs; 48 | grant select on SYSTEM.LOGMNR_USER$ to roma_logminer_privs; 49 | grant select on SYSTEM.LOGMNR_UID$ to roma_logminer_privs; 50 | create user roma_logminer identified by oracle default tablespace users; 51 | grant roma_logminer_privs to roma_logminer; 52 | alter user roma_logminer quota unlimited on users; 53 | ``` 54 | 55 | 56 | ## 创建目标端 MySQL 57 | 58 | ``` 59 | docker run --name mysql-dst -e MYSQL_ROOT_PASSWORD=pass -p 33061:3306 --network=dtle-net -d mysql:5.7 --gtid-mode=ON --enforce-gtid-consistency=1 --log-bin=bin --server-id=1 60 | ``` 61 | 62 | 检查是否联通: 63 | 64 | ``` 65 | > mysql -h 127.0.0.1 -P 33061 -uroot -ppass -e "select @@version\G" 66 | < *************************** 1. row *************************** 67 | @@version: 5.7.23-log 68 | 69 | ``` 70 | 71 | ## 创建 dtle 72 | 73 | ``` 74 | docker run --name dtle-consul -p 8500:8500 --network=dtle-net -d consul:latest 75 | docker run --name dtle -p 4646:4646 --network=dtle-net -d actiontech/dtle 76 | # 如需要使用dtle 2.x HTTP API兼容层,则需要额外映射8190端口:-p 8190:8190 77 | ``` 78 | 79 | 检查是否正常: 80 | 81 | ``` 82 | > curl -XGET "127.0.0.1:4646/v1/nodes" -s | jq 83 | < [ 84 | { 85 | "Address": "127.0.0.1", 86 | "Datacenter": "dc1", 87 | "Drivers": { 88 | "dtle": { 89 | "Attributes": { 90 | "driver.dtle": "true", 91 | "driver.dtle.version": "..." 92 | }, 93 | "Detected": true, 94 | "Healthy": true, 95 | } 96 | }, 97 | "ID": "65ff2f9a-a9fa-997c-cce0-9bc0b4f3396c", 98 | "Name": "nomad0", 99 | "Status": "ready", 100 | } 101 | ] 102 | # (部分项目省略) 103 | ``` 104 | 105 | ## 准备作业定义文件 106 | 107 | 准备文件job.json, 内容如下: 108 | 109 | ```json 110 | { 111 | "Job": { 112 | "ID": "dtle-demo", 113 | "Datacenters": ["dc1"], 114 | "TaskGroups": [{ 115 | "Name": "src", 116 | "Tasks": [{ 117 | "Name": "src", 118 | "Driver": "dtle", 119 | "Config": { 120 | "ReplicateDoDb": [{ 121 | "TableSchema": "TEST", 122 | "Tables": [{ 123 | "TableName": "t1" 124 | }] 125 | }], 126 | "SrcOracleConfig": { 127 | "User": "roma_logminer", 128 | "Password": "oracle", 129 | "Host": "oracle-src", 130 | "Port": 1521, 131 | "ServiceName": "XE", 132 | "Scn": 0 133 | }, 134 | "DestConnectionConfig": { 135 | "Host": "mysql-dst", 136 | "Port": 3306, 137 | "User": "root", 138 | "Password": "pass" 139 | } 140 | } 141 | }] 142 | }, { 143 | "Name": "dest", 144 | "Tasks": [{ 145 | "Name": "dest", 146 | "Driver": "dtle", 147 | "Config": { 148 | "DestType": "mysql" 149 | } 150 | }] 151 | }] 152 | } 153 | } 154 | ``` 155 | 156 | 其中定义了: 157 | - 源端 Oracle 的连接配置 158 | - 目标端 MySQL 的连接配置 159 | - 要复制的表为`TEST.t1` 160 | - SCN点位为0, 表示此复制是从任务启动时间点开始复制. 如需测试指定位置增量复制, 可指定合法的SCN 161 | 162 | ## 创建复制任务 163 | 164 | ``` 165 | > curl -XPOST "http://127.0.0.1:4646/v1/jobs" -d @job.json -s | jq 166 | < {...} 167 | ``` 168 | 169 | 查看作业状态: 170 | 171 | ``` 172 | > curl -XGET "127.0.0.1:4646/v1/job/dtle-demo" -s | jq '.Status' 173 | < "running" 174 | ``` 175 | 176 | ## 测试 177 | 178 | 在源端写入数据: 179 | 180 | ``` 181 | sqlplus SYS/oracle AS SYSDBA 182 | create user TEST identified by oracle; 183 | grant connect,resource to TEST; 184 | create table TEST."t1" (id int,name varchar(255)); 185 | insert into TEST."t1" values(1,'ryan'); 186 | commit; 187 | ``` 188 | 189 | 验证目标端数据 190 | ``` 191 | 查看目标端数据是否一致 192 | 193 | ``` 194 | 195 | 字段映射关系参看 [5.4 Oracle MySQL 字段映射](../5/5.4_columns_mapping.md) 196 | -------------------------------------------------------------------------------- /2/2.7_multi_node.md: -------------------------------------------------------------------------------- 1 | # 多server部署配置 2 | 3 | nomad可以配置成 4 | - 单server,单client 5 | - 单server,多client 6 | - 多server,多client 7 | 8 | 其中 9 | - server管理job数据 10 | - server数量为奇数,一般使用1或3个,不超过5个。 11 | - client(运行dtle插件)执行job 12 | - client数量任意 13 | - server和client可运行于同一进程,也可单独启动server或client 14 | 15 | 需另外运行consul,用于 16 | - nomad 服务发现(多节点自动注册) 17 | - dtle 保存运行信息 18 | 19 | 一般每个nomad server搭配一个consul server,两者运行于同一台主机。 20 | 21 | 下面描述 **多server多client配置** 。 22 | 23 | ## consul配置 24 | 25 | 修改 /etc/dtle/consul.hcl 26 | 27 | ``` 28 | # Rename for each node 29 | node_name = "consul1" 30 | 31 | # 配置IP 32 | 33 | # Address that should be bound to for internal cluster communications 34 | bind_addr = "0.0.0.0" 35 | # Address to which Consul will bind client interfaces, including the HTTP and DNS servers 36 | client_addr = "127.0.0.1" 37 | advertise_addr = "127.0.0.1" 38 | 39 | # ... 省略未更改项 40 | 41 | bootstrap_expect = 3 42 | retry_join = ["127.0.0.1", "127.0.0.2", "127.0.0.3"] # will use default serf port 43 | ``` 44 | 45 | 为另外两个节点也做出修改。 46 | 47 | 全部启动后,从Web UI中可以看出,组成了3节点consul,其中一个为Leader。 48 | 49 | ![](./consul-3.png) 50 | 51 | ## nomad配置 52 | 53 | 修改 /etc/dtle/nomad.hcl: 54 | 55 | ``` 56 | name = "nomad1" # rename for each node 57 | # ... 省略未更改项 58 | advertise { 59 | http = "127.0.0.1:4646" 60 | rpc = "127.0.0.1:4647" 61 | serf = "127.0.0.1:4648" 62 | } 63 | server { 64 | enabled = true 65 | bootstrap_expect = 3 66 | } 67 | consul { 68 | address = "127.0.0.1:8500" 69 | } 70 | plugin "dtle" { 71 | config { 72 | # ... 省略未更改项 73 | nats_bind = "127.0.0.1:8193" 74 | nats_advertise = "127.0.0.1:8193" 75 | consul = "127.0.0.1:8500" 76 | nomad_addr = "127.0.0.1:4646" # compatibility API need to access a nomad server 77 | } 78 | } 79 | ``` 80 | 81 | 全部启动后, nomad将自动向consul注册,组成集群: 82 | 83 | ![](./nomad-server.png) 84 | 85 | ![](./nomad-client.png) 86 | -------------------------------------------------------------------------------- /2/consul-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/2/consul-3.png -------------------------------------------------------------------------------- /2/nomad-client.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/2/nomad-client.png -------------------------------------------------------------------------------- /2/nomad-server.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/2/nomad-server.png -------------------------------------------------------------------------------- /3/3.0_function_scenario_mapping.md: -------------------------------------------------------------------------------- 1 | # 功能/场景的映射列表 2 | 3 | | 场景 | 复制手段(binlog-binlog) | 复制手段(binlog-sql) | 复制模式(全量+增量) | 复制模式(增量) | 4 | | ------------- | ------------- | ------------- | ------------- | ------------- | 5 | | 单个MySQL 单向复制到 单个MySQL | 支持 | 支持 | 支持 | 支持 | 6 | | 单个MySQL 双向复制到 单个MySQL | 支持 | 支持 | - | 支持 | 7 | | 多个MySQL的表 合并到 单个MySQL | 支持 | 支持 | 支持 | 支持 | 8 | | 单个MySQL的不同表 分发到 多个MySQL | 支持 | 支持 | 支持 | 支持 | 9 | | 单个MySQL的同一表的不同记录 分发 到 多个MySQL | 支持按主键分发; 不支持按函数分发 | 支持 | 支持 | 支持按主键分发; 不支持按函数分发 | 10 | | 公有云间的数据同步 | 不支持 | 支持 | 支持 | 支持 | 11 | | 单个MySQL复制到Kafka | - | - | 支持 | 支持 | 12 | | 多个MySQL复制到Kafka | - | - | 支持 | 支持 | 13 | 14 | | 场景 | 复制对象(整库复制) | 复制对象(整表复制) | 复制对象(按条件复制部分记录) | 15 | | ------------- | ------------- | ------------- | ------------- | 16 | | 单个MySQL 单向复制到 单个MySQL | 支持 | 支持 | 支持 | 17 | | 单个MySQL 双向复制到 单个MySQL | 支持; 不支持建表语句包含`if not exists` [#361](https://github.com/actiontech/dtle/issues/361) | 支持 | 支持 | 18 | | 多个MySQL的表 合并到 单个MySQL | - | 支持 | 支持 | 19 | | 单个MySQL的不同表 分发到 多个MySQL | - | 支持 | 支持 | 20 | | 单个MySQL的同一表的不同记录 分发 到 多个MySQL | - | 支持 | 支持 | 21 | | 公有云间的数据同步 | 支持 | 支持 | 支持 | 22 | | 单个MySQL复制到Kafka | 支持 | 支持 | 支持 | 23 | | 多个MySQL复制到Kafka | 支持 | 支持 | 支持 | 24 | 25 | 26 | | 场景 | 复制链路(链路压缩) | 复制链路(跨网络边际) | 回访模式(并行回放) | 27 | | ------------- | ------------- | ------------- | ------------- | 28 | | 单个MySQL 单向复制到 单个MySQL | 支持 | 支持 | 支持 | 29 | | 单个MySQL 双向复制到 单个MySQL | 支持 | 支持 | 支持 | 30 | | 多个MySQL的表 合并到 单个MySQL | 支持 | 支持 | 支持 | 31 | | 单个MySQL的不同表 分发到 多个MySQL | 支持 | 支持 | 支持 | 32 | | 单个MySQL的同一表的不同记录 分发 到 多个MySQL | 支持 | 支持 | 支持 | 33 | | 公有云间的数据同步 | 支持 | 支持 | 支持 | 34 | | 单个MySQL复制到Kafka | 支持 | 支持 | - | 35 | | 多个MySQL复制到Kafka | 支持 | 支持 | - | 36 | 37 | 38 | | 场景 | 自动创建表结构 | 支持DDL | Agent 水平扩展 | 39 | | ------------- | ------------- | ------------- | ------------- | 40 | | 单个MySQL 单向复制到 单个MySQL | 支持 | 支持 | 支持 | 41 | | 单个MySQL 双向复制到 单个MySQL | - | 支持 | 支持 | 42 | | 多个MySQL的表 合并到 单个MySQL | 支持 | 支持 | 支持 | 43 | | 单个MySQL的不同表 分发到 多个MySQL | 支持 | 支持 | 支持 | 44 | | 单个MySQL的同一表的不同记录 分发 到 多个MySQL | 支持 | 支持按主键分发; 不支持按函数分发 | 支持按主键分发; 不支持按函数分发 | 45 | | 公有云间的数据同步 | 支持 | 支持 | 支持 | 46 | | 单个MySQL复制到Kafka | 支持 | 不支持 | 支持 | 47 | | 多个MySQL复制到Kafka | 支持 | 不支持 | 支持 | 48 | 49 | | 场景 | 高可用(故障转移) | 高可用(断点续做) | 任务暂停/恢复 | 监控 | 50 | | ------------- | ------------- | ------------- | ------------- | ------------- | 51 | | 单个MySQL 单向复制到 单个MySQL | 支持 | 支持 | 支持 | 支持 | 52 | | 单个MySQL 双向复制到 单个MySQL | 支持 | 支持 | 支持 | 支持 | 53 | | 多个MySQL的表 合并到 单个MySQL | 支持 | 支持 | 支持 | 支持 | 54 | | 单个MySQL的不同表 分发到 多个MySQL | 支持 | 支持 | 支持 | 支持 | 55 | | 单个MySQL的同一表的不同记录 分发 到 多个MySQL | 支持按主键分发; 不支持按函数分发 | 支持按主键分发; 不支持按函数分发 | 支持按主键分发; 不支持按函数分发 | 支持 | 56 | | 公有云间的数据同步 | 支持 | 支持 | 支持 | 支持 | 57 | | 单个MySQL复制到Kafka | 支持 | 不支持 | 支持 | 支持 | 58 | | 多个MySQL复制到Kafka | 支持 | 不支持 | 支持 | 支持 | -------------------------------------------------------------------------------- /3/3.10_consul.md: -------------------------------------------------------------------------------- 1 | # consul 上的 job 数据管理 2 | 3 | dtle 3.x作为nomad插件运行,并且需要consul伴随执行。 4 | 5 | 部分job信息储存在了consul上。其中最重要的是进度,即 Gtid 或 BinlogFile & Pos。 6 | 7 | 查看方法(以默认consul地址为例): 8 | 9 | ``` 10 | # 使用raw查看原始值 11 | $ curl -XGET "127.0.0.1:8500/v1/kv/dtle/job_name/Gtid?raw" 12 | acd7d195-06cd-11e9-928f-02000aba3e28:1-143934 13 | 14 | $ curl -XGET "127.0.0.1:8500/v1/kv/dtle/job_name/BinlogPos?raw" 15 | bin.000075//dtle//11909 16 | ``` 17 | 18 | - 注意Gtid可能有多行,需要完整记录。 19 | - BinlogFile & Pos 使用 `//dtle//` 分割。 20 | 21 | 为了使用户能够记录进度,job删除后,dtle不会自动删除consul上的信息. 22 | 23 | 重建Job时,若consul上已有进度,则会使用consul上的进度(而非job配置中的起点)。 24 | 25 | 已删除的Job需要自行删除consul上的信息: 26 | ``` 27 | # 使用recurse删除job_name下所有项目 28 | $ curl -XDELETE "127.0.0.1:8500/v1/kv/dtle/job_name?recurse" 29 | ``` 30 | 31 | 或者使用浏览器访问 127.0.0.1:8500, 使用Web UI管理。 32 | -------------------------------------------------------------------------------- /3/3.11_oracle_mysql.md: -------------------------------------------------------------------------------- 1 | # CDC场景 2 | 3 | ## 全量复制 4 | - 任务启动时间点开始,将指定库表结构数据传输到目标端 5 | 6 | ## 全量流程 7 | 1. 获取当前所需同步的库/表,从服务器上的redo日志获取当前系统改变号(SCN)的位置 8 | 2. 获取同步表的ROW SHARE LOCK,以防止在创建快照期间表结构发生变化 9 | 4. 获取同步的库/表的结构信息,同步到目标端 10 | 5. 释放ROW SHARE LOCK 11 | 6. 依据步骤3读取的SCN位置,全表扫描所有相关数据库表和schema 12 | ``` 13 | 例: 14 | SELECT * FROM SCHEMA.TABLE AS OF SCN 123 where ROWNUM <= 2000 15 | minus 16 | SELECT * FROM SCHEMA.TABLE AS OF SCN 123 where ROWNUM < 1 17 | ``` 18 | 7. 传输完所有的表数据,继续增量同步 19 | 20 | ## 限制 21 | 全量同步过程,表结构同步完成前,不支持对同步的表做DDL操作 22 | ## 增量复制 23 | - 根据SCN节点开启增量复制 24 | - 从任务启动时间开启增量复制 25 | 26 | # DML支持 27 | 28 | ## DML类型 29 | | DML类型 | option | Oracle SQL | MySQL SQL | 其他 | 30 | | --- | --- | --- | --- | --- | 31 | | INSERT | | INSERT INTO TEST.CHARACTER_256_COLUMNS VALUES (4, NULL); | ``replace into `TEST`.`CHAR_256_COLUMNS`(`COL1`, `COL2`)values(?, ?)`` | args=[0, ] | 32 | | UPDATE | | UPDATE TEST.CHAR_20000_COLUMNS SET COL2='a a b ' WHERE COL1=2; | ``update `TEST`.`CHAR_256_COLUMNS` set`COL1`=?, `COL2`=?where((`COL1` = ?) and (`COL2` = ?)) limit 1`` | args=[3, "a a", 3, "a a"] | 33 | | DELETE | | DELETE FROM TEST.CHAR_256_COLUMNS WHERE COL1 = 5; | ``delete from `TEST`.`CHAR_256_COLUMNS` where((`COL1` = ?) and (`COL2` = ?)) limit 1`` | args=[5, "ABCDEFGHIJKLMNOPQRSTUVWXYZ "] | 34 | 35 | ## 函数支持 36 | | 函数名 | 是否支持 | 其他 | 37 | | --- | --- | --- | 38 | | EMPTY_BLOB | 是 | 函数支持解析为NULL| 39 | | EMPTY_CLOB | 是 | 函数支持解析为NULL| 40 | | CHR | 是 | | 41 | | HEXTORAW | 是 | | 42 | | DATE | 是| | 43 | | TO_DATE | 是 | | 44 | | TO_DSINTERVAL | 是 | | 45 | | TO_YMINTERVAL | 是 | | 46 | | RAWTOHEX | 是 | | 47 | | UNISTR | 是| | 48 | | RAWTOHEX(CHR(34)) |是 | | 49 | | TO_TIMESTAMP | 是 | | 50 | | LOCALTIMESTAMP | 是 | | 51 | | CURRENT_TIMESTAMP | 是 | | 52 | | SYSTIMESTAMP | 是 | | | 53 | 54 | # DDL支持 55 | | SQL类型 | Option | Oracle SQL | 转化后MySQL SQL | 语法支持 | 56 | |---------------------------|--------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------| 57 | | CREATE TABLE | 不带约束 | CREATE TABLE "test"."CaseInsensitive" ("firstName" VARCHAR(15) NOT NULL,lastName VARCHAR2(45) NOT NULL) | ``CREATE TABLE `test`.`CaseInsensitive` (`firstName` VARCHAR(15) NOT NULL,`LASTNAME` VARCHAR(45) NOT NULL) DEFAULT CHARACTER SET = UTF8MB4`` | 支持 | 58 | | CREATE TABLE | 带约束 | CREATE TABLE TEST.employees_demo( employee_id NUMBER(6), last_name VARCHAR2(25) CONSTRAINT emp_last_name_nn_demo NOT NULL, CONSTRAINT emp_id_uk_demo UNIQUE (employee_id)) | ``CREATE TABLE `TEST`.`EMPLOYEES_DEMO` (`EMPLOYEE_ID` INT,`LAST_NAME` VARCHAR(25) NOT NULL,UNIQUE `EMP_ID_UK_DEMO`(`employee_id`)) DEFAULT CHARACTER SET = UTF8MB4`` | 不支持外键约束 | 59 | | ALTER TABLE | AddColumnClase | alter table TEST.ADDCOLUMN add (author_last_published date); | ``ALTER TABLE `TEST`.`ADDCOLUMN` ADD COLUMN (`AUTHOR_LAST_PUBLISHED` DATETIME)`` | 支持 | 60 | | ALTER TABLE | ModifyColumnClause | ALTER TABLE test."MODIFYCOLUMN" MODIFY ( alter_new_name1 CHAR ( 13 )) MODIFY ( alter_name2 VARCHAR ( 66 )) | ``ALTER TABLE `TEST`.`MODIFYCOLUMN` MODIFY COLUMN `ALTER_NEW_NAME1` CHAR(13), MODIFY COLUMN `ALTER_NAME2` VARCHAR(66)`` | 支持 | 61 | | ALTER TABLE | DropColumnClause | alter table TEST.DROPCOLUMN1 drop column COL1 | ``ALTER TABLE `TEST`.`DROPCOLUMN1` DROP COLUMN `TEST`.`DROPCOLUMN1`.`COL1`` | 支持 | 62 | | ALTER TABLE | RenameColumnClase | alter table TEST.RENAMECOLUMN RENAME COLUMN COL1 TO COLNEW1 | ``ALTER TABLE `TEST`.`RENAMECOLUMN` RENAME COLUMN `TEST`.`RENAMECOLUMN`.`COL1` TO `TEST`.`RENAMECOLUMN`.`COLNEW1` `` | 当前仅支持8.0语法 | 63 | | DROP TABLE | | DROP TABLE TEST.DROPTABLE | ``DROP TABLE `TEST`.`DROPTABLE` `` | | 64 | | create schema/create user | | | | 实现为执行create table 前先执行create schema if not exists,保持库同步 [#840](https://github.com/actiontech/dtle/issues/840) | 65 | 66 | 67 | # 下个版本支持功能 68 | - [ ] 支持 索引同步 69 | - [ ] 同步LOB_WRITE,LOB_TRIM, LOB_ERASE,SEL_LOB_LOCATOR 事件 70 | - [ ] 支持PDB(多租户,oracle 12开始支持) 71 | - [ ] DTLE Oracle extractor 通过 SQL driver 轮询读取的间隔目前写死的5秒,优化为动态数值 72 | - [ ] DTLE Oracle extractor 通过 SQL driver 轮询的SCN区间目前写死的100000,优化为动态数值 73 | -------------------------------------------------------------------------------- /3/3.1_limitation.md: -------------------------------------------------------------------------------- 1 | # 使用限制 2 | 3 | ## 限制 4 | 5 | * 仅支持 MySQL 5.6/5.7 版本 6 | * 仅支持 InnoDB 引擎 7 | * 仅支持以下字符集: 8 | * latin1 9 | * latin2 10 | * gb2312, gbk, gb18030 11 | * utf8, utf8mb4 12 | * utf32 13 | * binary 14 | * 在latin1/2表中,不支持非latin字符(如中文)(#388) 15 | * 对于非UTF8编码执行的DDL,不支持DDL中含有混合编码字符串,如`(col varchar default _utf32"...")` 16 | * binlog 仅支持`row`模式 17 | * binlog image 仅支持`FULL`模式 18 | * 源端和目标端大小写敏感配置 \(`lower_case_table_names`\) 需保持一致 19 | * 需要开启 GTID 20 | * 不支持 Trigger 21 | * 暂不支持 View 22 | * 支持procedure,function,event的增量部分迁移(须创建库级别的迁移job),但存在源端与目标端字符集不完全一致的问题[\#357](https://github.com/actiontech/dtle/issues/357) 23 | * 支持user增量部分的迁移(须创建实例级别的迁移job),且支持grant,revoke(要求回放用户有`grant option`) 24 | * 支持MySQL认证方式 `mysql_native_password`(MySQL 5.7)和`caching_sha2_password`(MySQL 8.0),其他认证方式不详 25 | * 在dtle的增量复制过程中,如果源端执行`replace into`语句或者执行产生Duplicate entry冲突insert语句,可能导致目标端的 `AUTO_INCREMENT`值和源端不一致([MySQL Bug\#83030](https://bugs.mysql.com/bug.php?id=83030&tdsourcetag=s_pctim_aiomsg)) 26 | 27 | ## 源端 MySQL 需配置如下参数 28 | 29 | | 参数 | 值 | 检查方式 | 30 | | --- | --- | --- | 31 | | `log_bin` | ON (my.cnf中填写合法文件名) | `show global variables like 'log_bin'` | 32 | | `binlog_format` | ROW | `show global variables like 'binlog_format';` | 33 | | `binlog_row_image` | FULL | `show global variables like 'binlog_row_image';` | 34 | | `log_slave_updates` | ON | `show global variables like 'log_slave_updates';` | 35 | | `gtid_mode` | ON | `show global variables like 'gtid_mode';` | 36 | 37 | - 对于 `lower_case_table_names`参数, dtle支持的值为`0`或`1`。 38 | - 原则上要求源端和目标端设置相同。 39 | - 且job存续期间,MySQL上该参数的值不可改变。 40 | - 允许设置参数值`2`, 但不支持大小写混用。 41 | 42 | ## 关于外键 (foreign key) 43 | 44 | 在3.21.10.0以前,dtle回放时会设置`set @@foreign_key_checks=OFF`。外键的级连操作(如on update cascade)将无法生效。 45 | 46 | 从3.21.10.0开始,dtle增量回放时,默认`set @@foreign_key_checks=ON`。可以触发外键级连操作。 47 | 48 | 对于存在外键关系的一系列表,需要这些表在同一个job的复制范围内,才能正常执行。 49 | 50 | 该行为可用job配置中dest部分`ForeignKeyChecks`控制,默认为true。如有必要,可将其设为false。 51 | 52 | 涉及外键引用父表的事务,回放时会单独回放,不能并行。 53 | 54 | 遗留问题:在外键子表上`alter table drop foreign key`后,原外键父表依然会被认为是外键父表。 55 | -------------------------------------------------------------------------------- /3/3.2_ports.md: -------------------------------------------------------------------------------- 1 | # 端口使用说明 2 | 3 | 默认情况下, nomad 和 consul的传输/通信会使用如下端口: 4 | 5 | | 端口号 | 说明 | 6 | | ------------- | ------------- | 7 | | 8190 | dtle 2.x HTTP API兼容层的端口 | 8 | | 8500 | consul HTTP 端口 | 9 | | 4646 | nomad HTTP 端口 | 10 | | 4647 | nomad RPC 端口 | 11 | | 4648 | nomad serf端口 | 12 | | 8193 | 数据传输的端口 | 13 | 14 | ## 如何修改 15 | 16 | 端口配置可在[/etc/dtle/nomad.hcl](../4/4.1_node_configuration.md)中修改 17 | -------------------------------------------------------------------------------- /3/3.3_impact_on_dest.md: -------------------------------------------------------------------------------- 1 | # 对目标端数据库的影响(gtid_executed表) 2 | 3 | ## 表 `dtle.gtid_executed_v4` 4 | 5 | 当目标端是MySQL数据库时, dtle会在目标端自动创建表`dtle.gtid_executed_v4`, 目标端的用于回放数据的数据库用户需要对这张表有[相应权限](./4.4_mysql_user_privileges.md). 6 | 7 | 表`dtle.gtid_executed_v4`的作用是存储已经回放的事务的GTID, 用作断点续传/数据检查等. 8 | 9 | 使用表`dtle.gtid_executed_v4`模仿GTID机制, 而不使用MySQL原生GTID机制的原因是: 在回放时, `set GTID_NEXT=...` 语句需要 SUPER 权限, 而云环境下, 数据库用户可能无法拥有 SUPER 权限. 10 | 11 | `dtle.gtid_executed_v4`的建表语句如下: 12 | 13 | ```sql 14 | CREATE TABLE IF NOT EXISTS dtle.gtid_executed_v4 ( 15 | job_name varchar(64) NOT NULL, 16 | source_uuid binary(16) NOT NULL, 17 | gtid int NOT NULL, 18 | gtid_set longtext, 19 | primary key (job_name, source_uuid, gtid) 20 | ); 21 | ``` 22 | 23 | 表结构说明: 24 | - job_name: 执行同步的任务名 25 | - source_uuid: 源端数据库UUID号 26 | - gtid: 执行过的GTID gno编号。若某行该列为0,则表明这是一个汇总行 27 | - 行数过多时,会触发汇总机制 28 | - gtid_set: 对于gtid=0的汇总行,该列批量储存gno编号,如1-100:200:300-400 29 | 30 | 典型的查询方法 31 | ```sql 32 | SELECT job_name, HEX(source_uuid), gtid, gtid_set FROM dtle.gtid_executed_v4; 33 | -- 注意source_uuid以binary储存,直接查询会乱码,需要HEX()转换 34 | ``` 35 | -------------------------------------------------------------------------------- /3/3.4.1_delay_alert.md: -------------------------------------------------------------------------------- 1 | # 延迟监控告警示例 2 | [Prometheus](https://prometheus.io)可直观查看监控项并记录历史值, 3 | 可通过[搭建Prometheus](./3.4.1_delay_alert.md#Prometheus配置)查看延迟情况, 4 | [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) 可针对异常监控项及时发出告警信息, 5 | 通过[配置Alertmanager](./3.4.1_delay_alert.md#Alertmanager配置)对延迟异常任务发出告警 6 | 7 | 8 | ## Prometheus配置 9 | 10 | 查看监控项并记录历史值 11 | 12 | - 准备配置文件 prometheus.yml : 13 | 14 | ``` 15 | # 设定alertmanager和prometheus交互的接口,即alertmanager监听的ip地址和端口 16 | alerting: 17 | alertmanagers: 18 | - static_configs: 19 | - targets: ["127.0.0.1:9093"] 20 | 21 | # 告警规则文件 22 | rule_files: 23 | - 'prometheus_rule.yml' 24 | 25 | scrape_configs: 26 | - job_name: 'dtle' 27 | 28 | # Override the global default and scrape targets from this job every 5 seconds. 29 | scrape_interval: 5s 30 | 31 | static_configs: 32 | - targets: ['127.0.0.1:8190','127.0.0.2:8190'] # 填写dtle兼容层的地址。可填多个。 33 | 34 | ``` 35 | 36 | - 准备告警规则文件 prometheus_rule.yml 37 | 38 | ``` 39 | groups: 40 | - name: simple_example 41 | rules: 42 | 43 | # Alert for task that is delay more than 5s for >1 minutes. 44 | - alert: TaskDelay 45 | expr: dtle_delay_time > 5 46 | for: 1m 47 | labels: 48 | severity: warning 49 | annotations: 50 | summary: "task {{ $labels.task_name }} has delay" 51 | description: "Task {{ $labels.task_name }} of instance {{ $labels.instance }} has delay 5s more than 1 minutes." 52 | ``` 53 | 54 | - 使用docker运行Prometheus: 55 | 56 | ``` 57 | docker run \ 58 | -p 9090:9090 \ 59 | -v ${PWD}/prometheus.yml:/etc/prometheus/prometheus.yml \ 60 | -v ${PWD}/prometheus_rule.yml:/etc/prometheus/prometheus_rule.yml \ 61 | prom/prometheus 62 | ``` 63 | 64 | - 然后浏览器访问 http://127.0.0.1:9090, 并查询(Prometheus提供补全)需要的监控项。 65 | 66 | ![](images/3.4_delay1.png) 67 | 68 | - 访问http://127.0.0.1:9090/alerts, 获取当前告警规则/内容 69 | 70 | ![](images/3.4.1_delay2.png) 71 | 72 | ## Alertmanager配置 73 | 74 | 针对任务延迟异常发送告警 75 | 76 | - 创建配置文件 alertmanager.yml 配置示例如下 77 | 78 | ``` 79 | global: 80 | smtp_smarthost: 'smtp.gmail.com:587' 81 | smtp_from: 'SENDER_ACCOUNT' 82 | smtp_auth_username: 'SENDER_ACCOUNT' 83 | smtp_auth_password: 'email smtp verify password' 84 | smtp_require_tls: false 85 | route: 86 | # If an alert has successfully been sent, wait 'repeat_interval' to resend them. 87 | repeat_interval: 10s 88 | # A default receiver 89 | receiver: team-dtle-mails 90 | 91 | receivers: 92 | - name: 'team-dtle-mails' 93 | email_configs: 94 | - to: 'receiver@actionsky.com' 95 | ``` 96 | - 启动alertmanager 97 | ``` 98 | docker run -p 9093:9093 -v ${PWD}/alertmanager.yml:/etc/alertmanager/alertmanager.yml prom/alertmanager 99 | ``` 100 | 101 | 102 | - 根据配置延迟5s以上并持续1min时,receiver@actionsky.com 邮箱收到告警如下: 103 | 104 | ``` 105 | [1] Firing 106 | Labels 107 | alertname = TaskDelay 108 | host = localhost.localdomain 109 | instance = dtle-1 110 | job = dtle 111 | severity = warning 112 | task_name = dest-fail-migration_src 113 | Annotations 114 | description = Task dest-fail-migration_src of instance dtle-1 has delay 5s more than 1 minutes. 115 | summary = task dest-fail-migration_src has delay 116 | Source 117 | ``` 118 | 119 | 120 | -------------------------------------------------------------------------------- /3/3.4.2_monitor.md: -------------------------------------------------------------------------------- 1 | # 如何搭建DTLE的监控系统 2 | 3 | ## 背景: 4 | 5 | 虽然在DTLE的文档里提供各种监控项的介绍,但是对于不熟悉prometheus和grafana配置的同学来说上手还是有些难度的。今天我就来DTLE 3.21.07.0来搭建一个DTLE的监控系统。 6 | 7 | ## 一、搭建DTLE运行环境 8 | 9 | - 配置两个节点的DTLE集群来演示,其拓扑如下: 10 | 11 | ![topu](images/3.4.2_topu.png) 12 | 13 | 在修改DTLE配置文件的时候需要注意以下两点: 14 | 15 | 1. 开启DTLE的监控,确保publish_metrics的值为ture 16 | 2. 开启nomad的监控,确保正确配置[telemetry](https://www.nomadproject.io/docs/configuration/telemetry) 17 | 18 | 这里以dtle-src-1的配置为例,具体配置参考[节点配置](https://github.com/actiontech/dtle-docs-cn/blob/master/4/4.1_node_configuration.md): 19 | 20 | ``` 21 | # DTLE 3.21.07.0中nomad升级为1.1.2,需要添加如下配置使nomad提供监控数据 22 | # 之前版本的DTLE无需添加此配置 23 | telemetry { 24 | prometheus_metrics = true 25 | collection_interval = "15s" 26 | } 27 | 28 | plugin "dtle" { 29 | config { 30 | data_dir = "/opt/dtle/var/lib/nomad" 31 | nats_bind = "10.186.63.20:8193" 32 | nats_advertise = "10.186.63.20:8193" 33 | # Repeat the consul address above. 34 | consul = "10.186.63.76:8500" 35 | 36 | # By default, API compatibility layer is disabled. 37 | api_addr = "10.186.63.20:8190" # for compatibility API 38 | nomad_addr = "10.186.63.20:4646" # compatibility API need to access a nomad server 39 | 40 | publish_metrics = true 41 | stats_collection_interval = 15 42 | } 43 | } 44 | ``` 45 | 46 | - 添加两个job模拟两个MySQL实例之间传输数据 47 | 48 | ![job_sample](images/3.4.2_job_sample.png) 49 | 50 | ## 二、部署prometheus 51 | 52 | - 准备prometheus配置文件同时接收nomad和DTLE的metrics 53 | - DTLE监控labels:instance的值建议设置为DTLE服务器的hostname 54 | 55 | ```yaml 56 | shell> cat /path/to/prometheus.yml 57 | global: 58 | scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. 59 | evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. 60 | 61 | scrape_configs: 62 | - job_name: 'nomad' 63 | scrape_interval: 15s 64 | metrics_path: '/v1/metrics' 65 | params: 66 | format: ['prometheus'] 67 | static_configs: 68 | - targets: ['10.186.63.20:4646'] 69 | labels: 70 | instance: nomad-src-1 71 | - targets: ['10.186.63.76:4646'] 72 | labels: 73 | instance: nomad-dest-1 74 | 75 | - job_name: 'dtle' 76 | scrape_interval: 15s 77 | metrics_path: '/metrics' 78 | static_configs: 79 | - targets: ['10.186.63.20:8190'] 80 | labels: 81 | instance: dtle-src-1 82 | - targets: ['10.186.63.76:8190'] 83 | labels: 84 | instance: dtle-dest-1 85 | ``` 86 | 87 | - 利用docker部署prometheus服务 88 | 89 | ``` 90 | shell> docker run -itd -p 9090:9090 --name=prometheus --hostname=prometheus --restart=always -v /path/to/prometheus.yml:/etc/prometheus/prometheus.yml prom/prometheus 91 | ``` 92 | 93 | - 在浏览器上访问prometheus的页面 http://${prometheus_server_ip}:9090/targets 验证配置生效 94 | 95 | ![prometheus targets](images/3.4.2_prometheus_targets.png) 96 | 97 | ## 三、部署grafana 98 | 99 | - 利用docker部署grafana服务 100 | 101 | ``` 102 | shell> docker run -d --name=grafana -p 3000:3000 grafana/grafana 103 | ``` 104 | 105 | - 在浏览器上访问grafana的页面 http://${grafana_server_ip}:3000 ,使用默认用户 admin/admin登录 106 | 107 | - 配置添加数据源 108 | 109 | ![add_data_source](images/3.4.2_add_data_source.png) 110 | - 选择添加promethues 111 | 112 | ![choose_prometheus](images/3.4.2_choose_prometheus.png) 113 | 114 | - 只需将promethues的访问地址添加到URL中,点击“sava & test”按钮 115 | 116 | ![add_prometheus_url](images/3.4.2_add_prometheus_url.png) 117 | 118 | - 添加panel 119 | 120 | ![add_panel](images/3.4.2_add_panel.png) 121 | 122 | - 以添加一个CPU使用率监控为例配置一个panel 123 | 124 | ![add_cpu_panel](images/3.4.2_add_cpu_panel.png) 125 | 126 | ![set_unit](images/3.4.2_set_unit.png) 127 | 128 | ## 四、常用的监控项 129 | 130 | nomad所有监控项: https://www.nomadproject.io/docs/operations/metrics 131 | 132 | DTLE所有监控项:https://actiontech.github.io/dtle-docs-cn/3/3.4_metrics.html 133 | 134 | | 说明 | 公式示例 | 单位 | 135 | | :---------------------------: | ------------------------------------------------------------ | :----------------------: | 136 | | CPU使用率(总计) | sum(rate(process_cpu_seconds_total{instance=~"nomad-src-1 | dtle-src-1"}[60s])) * 100 | Misc / Percent(0-100) | 137 | | CPU使用率(DTLE ) | `rate(process_cpu_seconds_total{instance="dtle-src-1"}[60s]) * 100` | Misc / Percent(0-100) | 138 | | CPU使用率(nomad) | `rate(process_cpu_seconds_total{instance="dtle-src-1"}[60s]) * 100` | Misc /Percent(0-100) | 139 | | 内存使用(总计) | sum(process_resident_memory_bytes{instance=~"nomad-src-1 | dtle-src-1"}) /1024 /1024 | Data / mebibyte | 140 | | 内存使用(DTLE ) | `process_resident_memory_bytes{instance="dtle-src-1"} /1024 /1024` | Data / mebibyte | 141 | | 内存使用(nomad) | `process_resident_memory_bytes{instance="nomad-src-1"} /1024 /1024` | Data / mebibyte | 142 | | 带宽(总计 - 源端发送) | `sum(increase(dtle_network_out_bytes{host="dtle-src-1"}[30s]) /30 /1024) * 8` | Data rate / kibibits/sec | 143 | | 带宽(按task分组 - 源端发送) | `increase(dtle_network_out_bytes{host="dtle-src-1"}[30s]) /30 /1024 * 8` | Data rate / kibibits/sec | 144 | | 带宽(总计 - 目标端接收) | `sum(increase(dtle_network_in_bytes{host="dtle-dest-1"}[30s]) /30 /1024) * 8` | Data rate / kibibits/sec | 145 | | 带宽(按task分组 - 目标端接收) | `increase(dtle_network_in_bytes{host="dtle-dest-1"}[30s]) /30 /1024 * 8` | Data rate / kibibits/sec | 146 | | 数据延迟(源端) | `dtle_delay_time{host="dtle-src-1"}` | Time / seconds(s) | 147 | | 数据延迟(目标端) | `dtle_delay_time{host="dtle-dest-1"}` | Time / seconds(s) | 148 | | TPS(源端) | `irate(dtle_src_extracted_incr_tx_count[30s])` | Misc / none | 149 | | TPS(目标端) | `irate(dtle_dest_applied_incr_tx_count[30s])` | Misc / none | 150 | | QPS(源端) | `irate(dtle_src_extracted_incr_query_count[30s])` | Misc / none | 151 | | QPS(目标端) | `irate(dtle_dest_applied_incr_query_count[30s])` | Misc / none | 152 | | Buffer(源端) | `dtle_buffer_src_queue_size` | Misc / none | 153 | | Buffer(目标端) | `dtle_buffer_dest_queue_size` | Misc / none | 154 | 155 | ## 五、最后创建多个panel同时展示 156 | 157 | ![all_panel](images/3.4.2_all_panel.png) 158 | 159 | -------------------------------------------------------------------------------- /3/3.4_metrics.md: -------------------------------------------------------------------------------- 1 | # 监控项说明 2 | 3 | nomad原生metrics可访问:`http://127.0.0.1:4646/v1/metrics?format=prometheus` 4 | 5 | 由于nomad plugin并不能访问nomad监控接口,dtle有关的监控需要通过API兼容层访问。 6 | 7 | 注意:通过兼容层只能看到本节点运行的任务的监控项。 8 | 9 | ## 配置 10 | 11 | 首先配置nomad.hcl中打开api兼容层,并配置`publish_metrics = true`。 12 | 13 | ``` 14 | plugin "dtle" { 15 | config { 16 | api_addr = "127.0.0.1:8190" 17 | nomad_addr = "127.0.0.1:4646" 18 | publish_metrics = true 19 | stats_collection_interval = 15 20 | ... 21 | ``` 22 | 23 | 访问 `127.0.0.1:8190/metrics` 可查看监控项,或在prometheus中配置从此地址获取监控项。 24 | 25 | ## 监控项 26 | 27 | | 类别 | 监控项 | 说明 | 28 | | ------------- | ------------- | ------------- | 29 | | 网络流量状态 | - | - | 30 | | - | network.in_msgs | - | 31 | | - | network.out_msgs | - | 32 | | - | network.in_bytes | - | 33 | | - | network.out_bytes | - | 34 | | 缓存/队列状态 | - | - | 35 | | - | buffer.src_queue_size | - | 36 | | - | buffer.dest_queue_size | - | 37 | | - | buffer.send_by_timeout | - | 38 | | - | buffer.send_by_size_full | - | 39 | | 内存使用估计 | - | - | 40 | | --全量计数值 | memory.full_kb_count | - | 41 | | --增量计数值 | memory.incr_kb_count | - | 42 | | --全量估计值 | memory.full_kb_est | - | 43 | | --增量估计值 | memory.incr_kb_est | - | 44 | | 延迟统计| - |- | 45 | | - | delay.time |- | 46 | | 表统计(未实现) | - |- | 47 | | - | table.insert |- | 48 | | - | table.update |- | 49 | | - | table.delete |- | 50 | | 吞吐统计(未实现) | - |- | 51 | | - | throughput.num |- | 52 | | - | throughput.time |- | 53 | | 事务统计 | - | - | 54 | | - | src_extracted_incr_tx_count | 增量阶段中源端完成抽取并解析的事务总量。从源端任务启动开始计数,重启任务时计数清零。可配合prometheus的irate()计算tps,如:irate(demo_src_extracted_tx_count[1m]) | 55 | | - | dest_applied_incr_tx_count | 增量阶段中目标端完成回放的事务总量。从目标端任务启动开始计数,重启任务时计数清零。可配合prometheus的irate()计算tps,如:irate(demo_dest_applied_tx_count[1m]) | 56 | | sql执行量统计 | - | - | 57 | | - | src_extracted_incr_query_count | 增量阶段中源端完成抽取并解析的dml/ddl数量。从源端任务启动开始计数,重启任务时计数清零。可配合prometheus的irate()计算qps,如:irate(demo_src_extracted_query_count[1m]) | 58 | | - | dest_applied_incr_query_count | 增量阶段中目标端执行的ddl/dml总量(未commit前也算)。从目标端任务启动开始计数,重启任务时计数清零。可配合prometheus的irate()计算qps,如:irate(demo_dest_applied_query_count[1m]) | 59 | 60 | 61 | ### 内存使用 62 | 63 | - dtle根据数据量(内存计数值)来估计内存占用。因程序处理,实际使用的内存有放大效应 64 | - 内存估计值 = 内存计数值 x 放大系数 65 | - 根据Go内存分配器原理,job处理完后,内存可能不会立刻被释放给操作系统 66 | 67 | ### 任务延迟 68 | 69 | 延迟统计仅对增量(含Kafka输出)有效,其原理为: 70 | - 源端MySQL在执行事务时,binlog中记录了时间戳 71 | - dtle在传输/回放事务时,取时间戳和当前时间的差值为延迟值 72 | - 如果一段时间(15s)没有事务,则重置延迟值为0 73 | 74 | 注意事项 75 | - 需要MySQL和dtle主机的时间基本正确 76 | - 源端和目标端都有延迟统计,取两者中大值为延迟 77 | 78 | 为了便于查看延迟曲线以及跟踪高延迟情况,可用Prometheus抓取dtle的监控项并使用Alertmanager发送告警, 79 | 步骤可参考 [延迟告警示例](3.4.1_delay_alert.md)。 80 | 81 | 效果图: 82 | 83 | ![](images/3.4_delay1.png) -------------------------------------------------------------------------------- /3/3.5_deployment.md: -------------------------------------------------------------------------------- 1 | # 3.5 部署结构 2 | 3 | ![](images/3.5_deployment.png) 4 | 5 | 如上图, nomad (运行dtle插件)支持多种不同的部署结构, 其中: 6 | - 简单的部署结构: 7 | - 适用于简单的场景, 用一个nomad节点同时作为server (管理节点)和client (执行节点,运行dtle插件) 8 | - 一个节点可同时处理多个传输链路 9 | - 高可用的部署结构: 10 | - 适用于对可用性较高的场景, 将 nomad 和 consul 进行三节点集群部署, 任务元数据信息在集群中同步 11 | - 一个 nomad 节点可同时作为 server 和 client, 也可将 server 和 client 分开部署 12 | - 当server发生故障时, 传输任务会转移到其他server执行 (需要server集群存活一半以上) 13 | - 当client发生故障时, 传输任务会转移到其他client执行 14 | - 跨DC的部署结构 15 | - 适用于多个数据中心间的数据同步 16 | - server集群可部署在任一数据中心, 或第三方数据中心 17 | - 源数据库和目标数据库 **不必要**保障 直接网络连通 18 | - client需部署在网络边界上 19 | -------------------------------------------------------------------------------- /3/3.6_DDL.md: -------------------------------------------------------------------------------- 1 | # DDL支持度 2 | 3 | ## 以DATABASE为对象的DDL 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 51 | 52 | 53 | 54 | 55 |
DDL类型DDL语句示例全量增量备注
CREATE DATABASEbasicCREATE DATABASE db_name;

17 | CREATE DATABASE IF NOT EXISTS db_name;

18 | CREATE SCHEMA db_name;

19 | CREATE SCHEMA IF NOT EXISTS db_name;
支持支持1. 支持server级默认字符集不一致情形
character set and collateCREATE SCHEMA db_name CHARACTER SET utf8 COLLATE utf8_general_ci;

27 | CREATE SCHEMA db_name DEFAULT CHARACTER SET utf8mb4 DEFAULT COLLATE utf8mb4_general_ci;

28 | CREATE SCHEMA db_name CHARACTER SET=latin1 COLLATE=latin1_swedish_ci;

29 | CREATE DATABASE db_name COLLATE latin2_general_ci;

30 | CREATE DATABASE db_name CHARACTER SET binary;

31 | CREATE DATABASE db_name DEFAULT CHARACTER SET=gbk DEFAULT COLLATE=gbk_chinese_ci;
支持支持1. 支持字符集latin1、latin2、gbk、utf8、utf8mb4、binary
ALTER DATABASEALTER DATABASE db_name CHARACTER SET utf8 COLLATE utf8_general_ci支持支持 支持字符集latin1、latin2、gbk、utf8、utf8mb4、binary
DROP DATABASEDROP DATABASE db_name;

48 | DROP DATABASE IF EXISTS db_name;

49 | DROP SCHEMA db_name;

50 | DROP SCHEMA IF EXISTS db_name;
支持支持
56 | 57 | 58 | ## 以TABLE为对象的DDL 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 185 | 186 | 187 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 346 | 347 | 348 | 349 | 350 | 351 | 352 | 365 | 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | 380 | 381 | 382 | 383 | 384 | 385 | 386 | 387 | 388 | 389 | 390 | 391 | 392 | 393 | 394 | 395 | 396 | 397 | 398 | 399 | 400 | 401 | 402 | 403 | 404 | 405 | 406 | 407 | 408 | 409 | 410 | 411 | 412 | 413 | 414 | 415 | 416 | 417 | 418 | 419 | 420 | 421 | 422 | 423 | 424 | 425 | 426 | 427 | 428 | 429 | 430 | 431 | 432 | 433 | 434 | 435 | 436 | 437 | 438 | 439 | 440 | 441 | 442 | 443 | 444 | 445 | 446 | 447 | 448 | 449 | 450 | 451 | 452 | 453 | 454 | 455 | 456 | 457 | 458 | 459 | 460 | 461 | 462 | 463 | 464 | 465 | 466 | 467 | 468 | 469 | 470 | 471 | 472 | 473 | 474 | 475 | 476 | 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | 488 | 489 | 490 | 491 | 492 | 493 | 494 | 495 | 496 | 497 | 498 | 499 | 500 | 501 | 502 | 503 | 504 | 505 | 506 | 508 | 509 | 510 | 511 | 512 | 513 | 514 | 515 | 516 | 517 | 518 | 519 | 520 | 521 | 523 | 524 | 525 | 526 | 527 | 528 | 529 | 531 | 532 | 533 | 534 | 535 | 536 | 537 | 539 | 540 | 541 | 542 | 543 | 544 | 545 | 547 | 548 | 549 | 550 | 551 | 552 | 553 | 554 | 559 | 560 | 561 | 564 | 565 | 566 | 567 | 568 | 569 | 570 | 572 | 573 | 574 | 575 | 576 | 577 | 578 | 579 | 580 | 581 | 582 | 583 | 584 | 585 | 586 | 587 | 588 | 589 | 590 | 591 | 592 | 593 | 594 | 595 | 596 | 597 | 598 | 599 | 600 | 601 | 602 | 603 | 604 | 605 | 606 | 607 | 608 | 624 | 625 | 626 | 627 | 628 | 629 | 630 | 643 | 644 | 645 | 646 | 647 | 648 | 649 | 663 | 664 | 665 | 666 | 667 | 668 | 669 | 682 | 683 | 684 | 685 | 686 | 688 | 689 | 690 | 691 | 692 | 693 | 694 | 695 | 696 | 697 | 698 | 699 | 700 | 701 | 702 | 703 | 704 | 705 | 706 | 707 | 708 | 709 | 710 | 712 | 713 | 714 | 715 | 716 | 717 | 718 | 720 | 721 | 722 | 723 | 724 |
DDL类型DDL语句示例全量增量备注
CREATE TABLEcolumn data types and attributesbasicCREATE TABLE [IF NOT EXISTS] tbl_name (col_name column_definition);支持支持
character setCREATE TABLE [IF NOT EXISTS] tbl_name (col_name column_definition CHARACTER SET utf8 COLLATE utf8_bin);支持支持
nullCREATE TABLE [IF NOT EXISTS] tbl_name (col_name column_definition NULL);支持支持
not nullCREATE TABLE [IF NOT EXISTS] tbl_name (col_name column_definition NOT NULL);支持支持
defaultCREATE TABLE [IF NOT EXISTS] tbl_name (c CHAR(20) DEFAULT '');支持支持
auto_incrementCREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT PRIMARY KEY);支持支持
commentCREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) COMMENT '');支持支持
generated alwaysCREATE TABLE [IF NOT EXISTS] tbl_name (sidea DOUBLE,sideb DOUBLE,sidec DOUBLE GENERATED ALWAYS AS (SQRT(sidea * sidea + sideb * sideb)));支持支持建表语句支持,但是数据复制有缺陷
126 | https://github.com/actiontech/dtle/issues/787
indexes and foreign keyscheckCREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) CHECK(expr));支持支持
primary keyCREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT PRIMARY KEY);

139 | CREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT, PRIMARY KEY (id));

140 | CREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT, c CHAR(20), PRIMARY KEY(id, c));
支持支持
keyCREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT KEY);

148 | CREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT, KEY index_name (key_part));

149 | CREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT, c CHAR(20) DEFAULT 't7', KEY (key_part, key_part));
支持支持
indexCREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT, INDEX index_name (key_part));

157 | CREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT, c CHAR(20) DEFAULT 't10', INDEX(key_part, key_part));
支持支持
uniqueCREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT UNIQUE KEY);

165 | CREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT, UNIQUE KEY index_name (key_part));

166 | CREATE TABLE [IF NOT EXISTS] tbl_name (c CHAR(20), UNIQUE INDEX (key_part(prefix value)));

167 | CREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT, c CHAR(20) DEFAULT 't10', CONSTRAINT UNIQUE INDEX (key_part, key_part));"
支持支持
fulltextCREATE TABLE [IF NOT EXISTS] tbl_name (c_varchar_1 varchar(255), c_varchar_2 varchar(255), FULLTEXT KEY index_name (key_part, key_part));支持支持
foreign keyCREATE TABLE [IF NOT EXISTS] tbl_name (id INT, parent_id INT, CONSTRAINT symbol FOREIGN KEY (col_name) REFERENCES tbl_name(key_part) ON DELETE reference_option ON UPDATE reference_option);

182 | CREATE TABLE [IF NOT EXISTS] tbl_name (id INT, parent_id INT, CONSTRAINT symbol FOREIGN KEY (col_name) REFERENCES tbl_name(key_part) ON DELETE reference_option ON UPDATE reference_option);

183 | CREATE TABLE [IF NOT EXISTS] tbl_name (id INT, parent_id INT, FOREIGN KEY index_name (col_name) REFERENCES tbl_name(key_part) ON DELETE reference_option ON UPDATE reference_option);

184 | CREATE TABLE [IF NOT EXISTS] tbl_name (id INT, parent_id INT, FOREIGN KEY (col_name) REFERENCES tbl_name(key_part) ON DELETE reference_option ON UPDATE reference_option);
支持支持应满足一下配置,否则在开启MTS的情况下不能保证数据一致性

188 | 1.目标端数据库@@foreign_key_checks = 1 (默认值为1)

189 | 2.dtle job 中ForeignKeyChecks = true (默认值为true)
table optionsengineCREATE TABLE [IF NOT EXISTS] tbl_name (c CHAR(20)) ENGINE=InnoDB;支持支持
auto_incrementCREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT PRIMARY KEY) AUTO_INCREMENT=100;支持支持
character setCREATE TABLE [IF NOT EXISTS] tbl_name (c CHAR(20)) CHARACTER SET=utf8;支持支持
collateCREATE TABLE [IF NOT EXISTS] tbl_name (c CHAR(20)) DEFAULT COLLATE=utf8_general_ci;支持支持
checksumCREATE TABLE [IF NOT EXISTS] tbl_name (c CHAR(20)) CHECKSUM=1;支持支持
commentCREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT PRIMARY KEY) COMMENT='';支持支持
compressionCREATE TABLE [IF NOT EXISTS] tbl_name (id INT(11) AUTO_INCREMENT PRIMARY KEY) COMPRESSION='ZLIB';支持支持
table partitioninghashCREATE TABLE [IF NOT EXISTS] tbl_name (col1 INT, col2 CHAR(5)) PARTITION BY HASH(col1);

245 | CREATE TABLE [IF NOT EXISTS] tbl_name (col1 INT, col2 CHAR(5), col3 DATETIME) PARTITION BY HASH ( YEAR(col3) );
支持支持
keyCREATE TABLE [IF NOT EXISTS] tbl_name (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY KEY(col3) PARTITIONS 4;支持支持
linear keyCREATE TABLE [IF NOT EXISTS] tbl_name (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY LINEAR KEY(col3) PARTITIONS 5;支持支持
rangeCREATE TABLE [IF NOT EXISTS] tbl_name (year_col INT, some_data INT) PARTITION BY RANGE (year_col) (PARTITION p0 VALUES LESS THAN (1991), PARTITION p1 VALUES LESS THAN (2020), PARTITION p5 VALUES LESS THAN MAXVALUE);支持支持
range columnsCREATE TABLE [IF NOT EXISTS] tbl_name (a INT NOT NULL, b INT NOT NULL) PARTITION BY RANGE COLUMNS(a,b) (PARTITION p1 VALUES LESS THAN (20,10), PARTITION p2 VALUES LESS THAN (50,MAXVALUE), PARTITION p3 VALUES LESS THAN (65,MAXVALUE), PARTITION p4 VALUES LESS THAN (MAXVALUE,MAXVALUE));支持支持
listCREATE TABLE [IF NOT EXISTS] tbl_name (id INT, name VARCHAR(35)) PARTITION BY LIST (id) (PARTITION r0 VALUES IN (1, 5, 9, 13, 17, 21), PARTITION r1 VALUES IN (2, 6, 10, 14, 18, 22), PARTITION r2 VALUES IN (3, 7, 11, 15, 19, 23), PARTITION r3 VALUES IN (4, 8, 12, 16, 20, 24));支持支持
like statementtracked to trackedCREATE TABLE new_1 LIKE old_1;支持支持新表在目标端创建,后续数据传输正常
292 | {
293 | "replicate_do_db": [
294 | {
295 | "table_schema": "action_db_1",
296 | "tables": [
297 | {
298 | "table_name": "old_1"
299 | },
300 | {
301 | "table_name": "new_1"
302 | }
303 | ]
304 | }
305 | ]
306 | }
tracked to not trackedCREATE TABLE new_2 LIKE old_2;支持支持新表不会在目标端创建

314 | {
315 | "replicate_do_db": [
316 | {
317 | "table_schema": "action_db_1",
318 | "tables": [
319 | {
320 | "table_name": "old_2"
321 | }
322 | ]
323 | }
324 | ]
325 | }
not tracked to trackedCREATE TABLE new_3 LIKE old_3;支持支持新表不会在目标端创建,DTLE会报错重启
333 | old_3不在目标端,无法执行该语句

334 | {
335 | "replicate_do_db": [
336 | {
337 | "table_schema": "action_db_1",
338 | "tables": [
339 | {
340 | "table_name": "new_3"
341 | }
342 | ]
343 | }
344 | ]
345 | }
not tracked to not trackedCREATE TABLE new_4 LIKE old_4;支持支持新表不会在目标端创建

353 | {
354 | "replicate_do_db": [
355 | {
356 | "table_schema": "action_db_1",
357 | "tables"": [
358 | {
359 | "table_name": "old_1"
360 | }
361 | ]
362 | }
363 | ]
364 | }
alter tabletable optionsengineALTER TABLE tbl_name ENGINE=InnoDB;/支持
auto_incrementALTER TABLE tbl_name AUTO_INCREMENT=100;/支持
character setALTER TABLE tbl_name CHARACTER SET=utf8;/支持
collateALTER TABLE tbl_name COLLATE=utf8_general_ci;/支持
checksumALTER TABLE tbl_name CHECKSUM=1;/支持
commentALTER TABLE tbl_name COMMENT='';/支持
compressionALTER TABLE tbl_name COMPRESSION='ZLIB';/支持
adding and dropping columnsaddALTER TABLE tbl_name ADD d CHAR(20);/支持
add afterALTER TABLE tbl_name ADD d CHAR(20) AFTER b;/支持
add firstALTER TABLE tbl_name ADD d CHAR(20) FIRST;/支持
dropALTER TABLE tbl_name DROP b;/支持
add multipleALTER TABLE tbl_name ADD (d CHAR(20), e INT(11));/支持
drop multipleALTER TABLE tbl_name DROP b, DROP COLUMN c;/支持
renaming, redefining, and reordering columnschange nameALTER TABLE tbl_name CHANGE COLUMN b d INT(11) DEFAULT NULL;/支持
change typeALTER TABLE tbl_name CHANGE b b BIGINT DEFAULT NULL;/支持
modify typeALTER TABLE tbl_name MODIFY COLUMN b BIGINT DEFAULT NULL;/支持
chang multipleALTER TABLE tbl_name CHANGE b d INT(11) DEFAULT NULL, CHANGE c e BIGINT DEFAULT NULL;/支持
modify multipleALTER TABLE tbl_name MODIFY b BIGINT DEFAULT NULL, MODIFY c BIGINT DEFAULT NULL;/支持
modify character setALTER TABLE tbl_name MODIFY b TEXT CHARACTER SET utf8;/支持
primary keys and indexesadd primary keyALTER TABLE tbl_name ADD CONSTRAINT PRIMARY KEY (key_part);

507 | ALTER TABLE tbl_name ADD PRIMARY KEY (key_part, key_part);
/支持
drop primary keyALTER TABLE tbl_name DROP PRIMARY KEY;/支持
add keyALTER TABLE tbl_name ADD KEY (key_part);

522 | ALTER TABLE tbl_name ADD INDEX index_name (key_part, key_part);
/支持
drop keyALTER TABLE tbl_name DROP PRIMARY KEY;

530 | ALTER TABLE tbl_name DROP INDEX index_name;
/支持
add unique keyALTER TABLE tbl_name ADD UNIQUE INDEX (key_part);

538 | ALTER TABLE tbl_name ADD CONSTRAINT UNIQUE KEY index_name (key_part, key_part);
/支持
drop unique keyALTER TABLE tbl_name DROP KEY id;

546 | ALTER TABLE tbl_name DROP INDEX index_name;
/支持
foreign keysadd foreign keyALTER TABLE tbl_name ADD CONSTRAINT symbol FOREIGN KEY (col_name) REFERENCES tbl_name(key_part) ON DELETE reference_option ON UPDATE reference_option;

555 | ALTER TABLE tbl_name ADD CONSTRAINT symbol FOREIGN KEY (col_name) REFERENCES tbl_name(key_part) ON DELETE reference_option ON UPDATE reference_option;

556 | ALTER TABLE tbl_name ADD CONSTRAINT FOREIGN KEY index_name (col_name) REFERENCES tbl_name(key_part) ON DELETE reference_option ON UPDATE reference_option;

557 | ALTER TABLE tbl_name ADD FOREIGN KEY (col_name) REFERENCES tbl_name(key_part) ON DELETE reference_option ON UPDATE reference_option;

558 | ALTER TABLE tbl_name ADD FOREIGN KEY (col_name) REFERENCES tbl_name(key_part);
/支持应满足一下配置,否则在开启MTS的情况下不能保证数据一致性
562 | 1.目标端数据库@@foreign_key_checks = 1 (默认值为1)
563 | 2.dtle job 中ForeignKeyChecks = true (默认值为true)
drop foreign keyALTER TABLE tbl_name DROP FOREIGN KEY fk_child_5_1; /支持无法和MTS同时使用:
571 | https://github.com/actiontech/dtle/issues/795#issuecomment-961786003
changing the character setmodify character setALTER TABLE tbl_name MODIFY b TEXT CHARACTER SET utf8;/支持
convert toALTER TABLE tbl_name CONVERT TO CHARACTER SET utf8;/支持
renamerename indexALTER TABLE tbl_name RENAME INDEX index_name_old to index_name_new;/支持
rename keyALTER TABLE tbl_name RENAME KEY index_name_old toindex_name_new;/支持
tracked to trackedALTER TABLE old_1 RENAME TO new_1;/支持重命名成功,后续数据传输正常

609 | {
610 | "replicate_do_db": [
611 | {
612 | "table_schema": "action_db_1",
613 | "tables": [
614 | {
615 | "table_name": "old_1"
616 | },
617 | {
618 | "table_name": "new_1"
619 | }
620 | ]
621 | }
622 | ]
623 | }
tracked to not trackedALTER TABLE old_2 RENAME AS new_2;/支持重命名成功,后续数不应据传到目标端

631 | {
632 | "replicate_do_db": [
633 | {
634 | "table_schema": "action_db_1",
635 | "tables": [
636 | {
637 | "table_name": "old_2"
638 | }
639 | ]
640 | }
641 | ]
642 | }
not tracked to trackedALTER TABLE old_3 RENAME AS new_3;/支持因目标端没有改名前的表,会有ERROR log
650 | old_3不在目标端,无法执行该语句

651 | {
652 | "replicate_do_db": [
653 | {
654 | "table_schema": "action_db_1",
655 | "tables": [
656 | {
657 | "table_name": "new_3"
658 | }
659 | ]
660 | }
661 | ]
662 | }
not tracked to not trackedALTER TABLE old_4 RENAME TO new_4;/支持目标端不会有重命名后的表

670 | {
671 | "replicate_do_db": [
672 | {
673 | "table_schema": "action_db_1",
674 | "tables": [
675 | {
676 | "table_name": "old_1"
677 | }
678 | ]
679 | }
680 | ]
681 | }
table partitioningADD PARTITIONALTER TABLE tbl_name PARTITION BY HASH(expr) PARTITIONS num;

687 | ALTER TABLE tbl_name ADD PARTITION (PARTITION partition_names VALUES LESS THAN (MAXVALUE));
/支持
DROP PARTITIONALTER TABLE tbl_name DROP PARTITION partition_names;/支持
drop tablebasicDROP TABLE tbl_name;/支持
if existsDROP TABLE IF EXISTS tbl_name RESTRICT;

711 | DROP TABLE IF EXISTS not_exists_tbl_name CASCADE;
/支持
drop multipleDROP TABLE IF EXISTS not_exists_tbl_name CASCADE;

719 | DROP TABLE IF EXISTS tbl_name_1, tbl_name_2, not_exists_tbl_name;
/支持
725 | ## 以INDEX为对象的DDL 726 | 727 | 728 | 729 | 730 | 731 | 732 | 733 | 734 | 735 | 736 | 737 | 738 | 739 | 740 | 741 | 742 | 743 | 744 | 745 | 746 | 748 | 749 | 750 | 751 | 752 | 753 | 754 | 755 | 756 | 757 | 758 | 759 | 760 | 761 | 762 | 763 | 764 | 765 | 766 | 767 | 768 | 769 | 770 | 771 | 772 | 773 | 774 | 775 | 776 | 777 | 778 | 779 | 780 | 781 | 782 | 783 | 784 | 785 | 786 | 787 | 788 | 789 | 790 | 791 | 792 | 793 | 794 | 795 | 796 | 797 | 798 | 799 | 800 | 801 | 802 |
DDL类型DDL语句示例全量增量备注
CREATE INDEXbasicCREATE INDEX key_t1_1 ON t1 (id);支持支持
prefix keyCREATE INDEX key_t2_1 ON t2 (a(2));

747 | CREATE UNIQUE INDEX key_t3_1 ON t3 (a(3));
支持支持
uniqueCREATE UNIQUE INDEX key_t4_1 ON t4 (id);支持支持
unique not nullCREATE UNIQUE INDEX key_t5_1 ON t5 (id);支持支持
unique multipleCREATE UNIQUE INDEX key_t6_1 ON t6 (a, b);支持支持
fulltextCREATE FULLTEXT INDEX full_t7_1 ON t7 (a);支持支持
index_typeCREATE INDEX key_t8_1 ON t8 (id) USING BTREE;支持支持
commentCREATE INDEX key_t9_1 ON t9 (id) COMMENT 'test comment';支持支持
DROP INDEXDROP INDEX key_t1_1 ON t1;支持支持
803 | 804 | ## 其他对象DDL 805 | 806 | 807 | 808 | 809 | 810 | 811 | 812 | 813 | 814 | 815 | 816 | 817 | 818 | 819 | 820 | 836 | 837 | 838 | 839 | 840 | 841 | 842 | 855 | 856 | 857 | 858 | 859 | 860 | 861 | 875 | 876 | 877 | 878 | 879 | 880 | 881 | 894 | 895 | 896 | 897 | 898 | 899 | 900 | 901 | 902 | 903 | 904 | 905 | 906 | 908 | 909 | 910 | 911 | 912 | 913 | 914 | 915 | 917 | 918 | 919 | 920 | 921 | 922 | 923 | 924 | 925 | 926 | 927 | 928 | 929 | 933 | 934 | 935 | 936 | 937 | 938 | 939 | 941 | 942 | 943 | 947 | 948 | 949 | 950 | 952 | 953 | 954 | 955 | 956 | 957 | 960 | 961 | 962 | 963 | 964 | 965 | 966 | 968 | 969 | 970 | 974 | 975 | 976 | 977 | 979 | 980 | 981 | 982 | 983 | 984 | 987 | 988 | 989 | 990 | 991 | 992 | 993 | 994 | 995 | 996 | 998 | 999 | 1000 | 1001 | 1002 | 1003 | 1004 | 1005 | 1006 | 1007 | 1010 | 1011 | 1012 | 1013 | 1014 | 1015 | 1016 | 1022 | 1023 | 1024 | 1026 | 1027 | 1028 | 1029 | 1032 | 1033 | 1034 | 1035 |
DDL类型DDL语句示例全量增量备注
RENAMEtracked to trackedRENAME TABLE old_1 to new_1;/支持重命名成功,后续数据传输正常

821 | {
822 | "replicate_do_db": [
823 | {
824 | "table_schema": "action_db_1",
825 | "tables": [
826 | {
827 | "table_name": "old_1"
828 | },
829 | {
830 | "table_name": "new_1"
831 | }
832 | ]
833 | }
834 | ]
835 | }"
tracked to not trackedRENAME TABLE old_2 to new_2;/支持重命名成功,后续数不应据传到目标端

843 | {
844 | "replicate_do_db": [
845 | {
846 | "table_schema": "action_db_1",
847 | "tables": [
848 | {
849 | "table_name": "old_2"
850 | }
851 | ]
852 | }
853 | ]
854 | }
not tracked to trackedRENAME TABLE old_3 to new_3;/支持因目标端没有改名前的表,会有ERROR log
862 | old_3不在目标端,无法执行该语句

863 | {
864 | "replicate_do_db": [
865 | {
866 | "table_schema": "action_db_1",
867 | "tables": [
868 | {
869 | "table_name": "new_3"
870 | }
871 | ]
872 | }
873 | ]
874 | }
not tracked to not trackedRENAME TABLE old_4 to new_4;/支持目标端不会有重命名后的表

882 | {
883 | ""replicate_do_db"": [
884 | {
885 | ""table_schema"": ""action_db_1"",
886 | ""tables"": [
887 | {
888 | ""table_name"": ""old_1""
889 | }
890 | ]
891 | }
892 | ]
893 | }
rename multipleRENAME TABLE old_5 to new_5, old_6 to new_6;/支持
TRUNCATETRUNCATE tbl_name;

907 | TRUNCATE TABLE tbl_name;
/支持
VIEWCREATE VIEWCREATE VIEW view_name AS select_statement;

916 | CREATE OR REPLACE VIEW view_name AS select_statement;"
不支持不支持
ALTER VIEWALTER VIEW view_name (column_list) AS select_statement;/不支持
DROP VIEWDROP VIEW view_name RESTRICT;

930 | DROP VIEW IF EXISTS view_name RESTRICT;

931 | DROP VIEW IF EXISTS view_name CASCADE;

932 | DROP VIEW view_name_1, view_name_2;
/不支持
FUNCTIONCREATE FUNCTIONCREATE FUNCTION sp_name() RETURNS type characteristic routine_body;

940 | CREATE FUNCTION sp_name(func_parameters) RETURNS type characteristic RETURN routine_body;
不支持支持1. [MySQL 5]目标端账户需要CREATE ROUTINE, SUPER权限
944 | 2. [MySQL 8] 目标端账户需要CREATE ROUTINE, SET_USER_ID, SYSTEM_USER权限, SET GLOBAL log_bin_trust_function_creators = ON;
945 | 3. job配置ExpandSyntaxSupport=true
946 | 4. 全量不复制function
ALTER FUNCTIONALTER FUNCTION sp_name SQL SECURITY DEFINER;

951 | ALTER FUNCTION sp_name COMMENT '';
/支持
DROP FUNCTIONDROP FUNCTION sp_name;

958 | DROP FUNCTION IF EXISTS sp_name;

959 | DROP FUNCTION IF EXISTS not_exists_sp_name;
/支持
PROCEDURECREATE PROCEDURECREATE PROCEDURE sp_name() characteristic routine_body;

967 | CREATE PROCEDURE sp_name(proc_parameters) characteristic routine_body;
不支持支持1. [MySQL 5]目标端账户需要CREATE ROUTINE, SUPER权限
971 | 2. [MySQL 8] 目标端账户需要CREATE ROUTINE, SET_USER_ID, SYSTEM_USER权限, SET GLOBAL log_bin_trust_function_creators = ON;
972 | 3. job配置ExpandSyntaxSupport=true
973 | 4. 全量不复制创建procedure
ALTER PROCEDUREALTER PROCEDURE sp_name SQL SECURITY DEFINER;

978 | ALTER PROCEDURE sp_name COMMENT '';
/支持
DROP PROCEDUREDROP PROCEDURE sp_name;

985 | DROP PROCEDURE IF EXISTS sp_name;

986 | DROP PROCEDURE IF EXISTS not_exists_sp_name;
/支持
EVENTCREATE EVENTCREATE EVENT event_name ON SCHEDULE schedule COMMENT '' DO event_body;不支持不支持1.不支持复制event
997 | 2.源端event产生的数据会复制到目标端
ALTER EVENTALTER EVENT event_name RENAME TO event_name_new;/不支持
DROP EVENTDROP EVENT IF EXISTS not_exists_event_name;

1008 | DROP EVENT IF EXISTS event_name;

1009 | DROP EVENT event_name;
/不支持
TRIGGERCREATE TRIGGERCREATE TRIGGER trigger_name BEFORE INSERT on old FOR EACH ROW trigger_body;

1017 | CREATE TRIGGER trigger_name AFTER INSERT on old FOR EACH ROW trigger_body;

1018 | CREATE TRIGGER trigger_name BEFORE UPDATE on old FOR EACH ROW trigger_body;

1019 | CREATE TRIGGER trigger_name AFTER UPDATE on old FOR EACH ROW trigger_body;

1020 | CREATE TRIGGER trigger_name BEFORE DELETE on old FOR EACH ROW trigger_body;

1021 | CREATE TRIGGER trigger_name AFTER DELETE on old FOR EACH ROW trigger_body;
不支持不支持1.不支持复制trigger
1025 | 2. 源端trigger产生的数据会复制到目标端
DROP TRIGGERDROP TRIGGER trigger_name;

1030 | DROP TRIGGER IF EXISTS trigger_name;

1031 | DROP TRIGGER schema_name.trigger_name;
/不支持
1036 | 1037 | ### 关于不支持 Trigger、Event 1038 | 1039 | 由于Trigger或Event可能会更改表数据,目标端存在Trigger或Event时,存在二次触发的问题(源端已经触发过一次),会引起数据不一致,故dtle不复制Trigger和Event。源端Trigger/Event变更的数据,会被写进binlog,并由dtle复制到目标端。 1040 | -------------------------------------------------------------------------------- /3/3.7_DCL.md: -------------------------------------------------------------------------------- 1 | # DCL支持度 2 | 3 | ### 条件及限制 4 | - 创建实例级别迁移 5 | - "ExpandSyntaxSupport": true 6 | - 增量部分DCL的操作会被支持 7 | - 全量部分是否需要支持?即,创建job前,源端已存在的用户是否需要被迁移至目标端?[#358](https://github.com/actiontech/dtle/issues/358) 8 | - 若需要执行grant和revoke,则回放用户需要有‘grant option’,回放用户需要有被赋权的权限 9 | 10 | 11 | | DCL类型 | 语句示例 | 是否支持 | 12 | | --- | ------------- | -- | 13 | | CREATE | create user ...identified by ... | 支持 | 14 | | ALTER | alter user ...identified by ... | 支持 | 15 | | RENAME | rename user ... to ... | 支持 | 16 | | SET PASSWORD | set password for ...='...'; | 支持 | 17 | | GRANT | grant all on *.* to 'test'@'%'; | 支持 | 18 | | REVOKE | revoke insert on *.* from 'test'@'%'; | 支持 | 19 | 20 | 21 | 实例级别job.json配置样例: 22 | 23 | ``` 24 | { 25 | "job_id": "dcl_expand_syntax_support_true", 26 | "src_task": { 27 | "task_name": "src", 28 | "mysql_src_task_config": { 29 | "expand_syntax_support": true 30 | }, 31 | "replicate_do_db": [], 32 | ... 33 | }, 34 | "dest_task": { 35 | "task_name": "dest", 36 | ... 37 | } 38 | } 39 | ``` 40 | -------------------------------------------------------------------------------- /3/3.8_dtle_mapping.md: -------------------------------------------------------------------------------- 1 | # dtle mapping 2 | 3 | 在job配置文件中,Table字段增加若干参数,详情参考[4.3 作业配置](../4/4.3_job_configuration.md),使用方法如下 4 | 5 | ## schema mapping 6 | 7 | 8 | ### 单库mapping 9 | job.json中ReplicateDoDb配置: 10 | ``` 11 | "ReplicateDoDb":[ 12 | { 13 | "TableSchema":"demo", 14 | "TableSchemaRename":"demoRename" 15 | } 16 | ], 17 | ``` 18 | 19 | ### 单库mapping结果 20 | ``` 21 | src : demo 22 | dest: demoRename 23 | ``` 24 | 25 | ### 多库mapping 26 | job.json中ReplicateDoDb配置: 27 | ``` 28 | "ReplicateDoDb":[ 29 | { 30 | "TableSchemaRegex":"(\\w*)src(\\w*)", 31 | "TableSchemaRename":"rename${1}", 32 | } 33 | ], 34 | ``` 35 | 36 | ### 多库mapping结果 37 | ``` 38 | src : test1src,test2src,test3src,cust 39 | dest: renametest1, renametest2, renametest3 40 | ``` 41 | 42 | 43 | ## table mapping 44 | ### 单表mapping 45 | job.json中ReplicateDoDb配置: 46 | ``` 47 | "ReplicateDoDb":[ 48 | { 49 | "TableSchema":"demo", 50 | "Tables":[ 51 | { 52 | "TableName":"testDemo", 53 | "TableRename":"renameDemo" 54 | } 55 | ] 56 | } 57 | ], 58 | ``` 59 | ### 单表mapping结果 60 | 61 | ``` 62 | src : demo.testDemo 63 | dest: demo.renameDemo 64 | ``` 65 | 66 | 67 | ### 多表mapping 68 | job.json中ReplicateDoDb配置: 69 | ``` 70 | "ReplicateDoDb":[ 71 | { 72 | "TableSchema":"demo", 73 | "Tables":[ 74 | { 75 | "TableRegex":"(\\w*)Shard(\\w*)", 76 | "TableRename":"${1}Rename" 77 | } 78 | ] 79 | } 80 | ], 81 | ``` 82 | 83 | ### 多表mapping结果 84 | ``` 85 | src : demo.test1Shard,demo.test2Shard,demo.customer,demo.test3Shard 86 | dest: demo.test1Rename,demo.test2Rename,demo.test3Rename 87 | ``` 88 | 89 | 90 | ## 列mapping 91 | 92 | src tables 93 | 94 | ```sql 95 | create table demo.a (id int primary key, a int); 96 | create table demo.b (id int primary key, b int); 97 | ``` 98 | 99 | dst table 100 | 101 | ```sql 102 | create table demo.colmap (id int primary key auto_increment, val int); 103 | ``` 104 | 105 | 使用 `ColumnMapFrom` 和 `ColumnMapTo`参数,将表a和表b合并到表colmap。忽略原id,使用新的自增id作为主键。 106 | 107 | 注意:不支持自动创建目标表,需预先手动创建。 108 | 109 | ```json 110 | "ReplicateDoDb": [{ 111 | "TableSchema":"demo", 112 | "Tables":[{ 113 | "TableName": "a", 114 | "TableRename": "colmap", 115 | "ColumnMapFrom": ["a"], 116 | "ColumnMapTo": ["val"] 117 | }, { 118 | "TableName": "b", 119 | "TableRename": "colmap", 120 | "ColumnMapFrom": ["b"], 121 | "ColumnMapTo": ["val"] 122 | }] 123 | }], 124 | "SkipCreateDbTable": true, 125 | "DropTableIfExists": false, 126 | ``` 127 | 128 | 参数说明 129 | - ColumnMapFrom: 从源表中, 依照指定的顺序, 提取全部列或部分列. 130 | - ColumnMapTo: 写入目标表时, 指定写入的列. 可为目标表全部列或部分列. 131 | - From和To的列数量必须相等. 132 | - 只填写ColumnMapFrom的用法现已deprecated. 133 | - 对于TwoWaySync双向任务, 反向任务会交换正向任务的ColumnMapFrom/ColumnMapTo 134 | 135 | 暂不支持使用正则表达式匹配列。 136 | -------------------------------------------------------------------------------- /3/3.9_binlog_relay.md: -------------------------------------------------------------------------------- 1 | # Binlog Relay (中继) 2 | 3 | ## 背景 4 | - 某些MySQL部署会定期清除binlog 5 | - dtle增量复制依赖binlog,如果binlog被清除则复制会出错 6 | - dtle全量标记增量开始位置, 若全量耗时较长, 开始增量时binlog极有可能被清除 7 | - 需要在开始全量时将MySQL binlog暂存到dtle本地 8 | 9 | ## 使用 10 | 在job.json源端任务配置中将`BinlogRelay`设为`true` 11 | ```json 12 | "Type": "Src", 13 | "Config": { 14 | "BinlogRelay": true, 15 | "Gtid": "", 16 | ``` 17 | 18 | 对于纯增量job,开启BinlogRelay时,必须用Gtid指定复制起点(进度),不能使用BinlogFile/Pos。 19 | 20 | 参数说明详见[作业配置]( ../4/4.3_job_configuration.md). 21 | 22 | ## 影响 23 | binlog储存位置为 `nomad_data_dir/binlog/job_name/mysql_server_uuid`。一般情况job被删除时会自动清除binlog目录。若未清除则需手动清除。 24 | -------------------------------------------------------------------------------- /3/images/3.4.1_delay2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.4.1_delay2.png -------------------------------------------------------------------------------- /3/images/3.4.2_add_cpu_panel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.4.2_add_cpu_panel.png -------------------------------------------------------------------------------- /3/images/3.4.2_add_data_source.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.4.2_add_data_source.png -------------------------------------------------------------------------------- /3/images/3.4.2_add_panel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.4.2_add_panel.png -------------------------------------------------------------------------------- /3/images/3.4.2_add_prometheus_url.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.4.2_add_prometheus_url.png -------------------------------------------------------------------------------- /3/images/3.4.2_all_panel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.4.2_all_panel.png -------------------------------------------------------------------------------- /3/images/3.4.2_choose_prometheus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.4.2_choose_prometheus.png -------------------------------------------------------------------------------- /3/images/3.4.2_job_sample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.4.2_job_sample.png -------------------------------------------------------------------------------- /3/images/3.4.2_prometheus_targets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.4.2_prometheus_targets.png -------------------------------------------------------------------------------- /3/images/3.4.2_set_unit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.4.2_set_unit.png -------------------------------------------------------------------------------- /3/images/3.4.2_topu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.4.2_topu.png -------------------------------------------------------------------------------- /3/images/3.4_delay1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.4_delay1.png -------------------------------------------------------------------------------- /3/images/3.5_deployment.graffle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.5_deployment.graffle -------------------------------------------------------------------------------- /3/images/3.5_deployment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/3/images/3.5_deployment.png -------------------------------------------------------------------------------- /4/4.0_installation.md: -------------------------------------------------------------------------------- 1 | # 安装步骤 2 | 3 | > 从 dtle 3.x 版本开始,dtle更改了架构,作为nomad插件运行(而非此前的单一二进制文件),并需要运行 consul 以储存任务元数据。 4 | > 5 | > dtle docker image 已包含nomad。consul可使用其官方image。 6 | > 7 | > 标准rpm安装包已集成 consul 和 nomad 及启动脚本和参考配置。 8 | 9 | ## 基于容器使用 10 | 11 | ``` 12 | docker pull consul:latest 13 | docker pull actiontech/dtle:latest 14 | ``` 15 | 16 | 使用方法参见 [快速开始](../2/2.0_mysql_replication_1_1.md) 一节 17 | 18 | 容器的版本列表参看[docker hub](https://hub.docker.com/r/actiontech/dtle/tags/) 19 | 20 | ## 基于rpm包的安装 21 | 22 | 从[此处](https://github.com/actiontech/dtle/releases)下载dtle的 rpm 安装包, 并执行以下命令可安装dtle 23 | 24 | ``` 25 | rpm -ivh --prefix /opt/dtle dtle-.rpm 26 | ``` 27 | 28 | 配置文件位于 29 | - `/opt/dtle/etc/dtle/` 30 | 31 | 服务启动命令: 32 | 33 | ``` 34 | systemctl start dtle-consul dtle-nomad 35 | systemctl enable dtle-consul dtle-nomad # 开机自动启动 36 | ``` 37 | 38 | 日志文件位于 `/opt/dtle/var/log/nomad/` 39 | -------------------------------------------------------------------------------- /4/4.1_node_configuration.md: -------------------------------------------------------------------------------- 1 | # 节点配置 2 | 3 | 安装包默认将参考配置装在了如下位置(安装时未设置--prefix的情况) 4 | - /etc/consul 5 | - /etc/nomad 6 | 7 | 使用多节点部署时,请注意更改 `node_name`、`data_dir`、各类地址和端口, 避免冲突。 8 | 9 | 默认的启动脚本(systemd)使用单节点配置。 10 | 11 | - consul 全部配置 https://www.consul.io/docs/agent/options.html#configuration_files 12 | - nomad(本体)全部配置 https://www.nomadproject.io/docs/configuration/ 13 | 14 | nomad 分为 server 和 client。一个nomad进程可以同时作为server和client,也可以只担任一个角色。 15 | dtle 插件运行在 nomad client 中。 16 | 17 | ## nomad 中 dtle 插件的配置 18 | 19 | 参考样例配置中这一段 20 | ``` 21 | plugin "dtle" { 22 | config { 23 | ... 24 | } 25 | } 26 | ``` 27 | 28 | | 配置项 | 类型 | 默认值 | 强制要求 | 说明 | 29 | |---------------------------|------------|--------------------------|----------|------------------------------------------------------------------------------------------------------| 30 | | log_level | string | "INFO" | 否 | 日志级别(由于dtle plugin无法获取nomad日志级别,此处需额外设置) | 31 | | log_file | string | "/var/log/dtle/dtle.log" | 否 | 从4.22.09.0开始, dtle单独生成日志,不再和nomad合并。
每512MB进行rotate和压缩,生成文件形如dtle-2022-11-04T06-46-39.502.log.gz | 32 | | big_tx_max_jobs | int | 取决于启动时的可用内存 | 否 | 允许同时处理大事务的job数量。默认值:启动时可用内存/2G。该值至少为1。如有6G可用内存,则该值默认为3 | 33 | | nats_bind | string | "0.0.0.0:8193" | 否 | Nats (dtle使用的传输协议) 地址 | 34 | | nats_advertise | string | 127.0.0.1:8193 | 否 | Nats Advertise 地址, 其他节点使用此地址连接本节点。跨公网传输需要设成上层路由器地址并设置网络穿透 | 35 | | api_addr | string | "" (参考配置中开启) | 否 | 兼容层地址,可以在此地址使用dtle 2.x的HTTP API。参考值:"0.0.0.0:8190"。为空则关闭兼容层。 | 36 | | nomad_addr | string | "127.0.0.1:4646" | 否 | nomad 地址. 由于nomad插件API限制, dtle 无法自动获取该地址, 需要用户手动重复填写一遍. | 37 | | consul | string | "127.0.0.1:8500" | 否 | consul的地址, 同nomad本体配置中的. 应填写和最近nomad server关联的consul地址. dtle插件需要consul以储存任务信息 | 38 | | data_dir | string | "/var/lib/nomad" | 否 | 数据目录。目前用于存放binlog(job配置中BinlogRelay=true时) | 39 | | stats_collection_interval | int | 15 | 否 | 收集监控项的周期(秒) | 40 | | publish_metrics | bool | false | 否 | 是否输出监控项 | 41 | | rsa_private_key_path | string | "" | 否 | 指定rsa私钥文件的绝对路径,目前只在HTTP api中用于对mysql密码解码。(具体用法见[dtle 3.x HTTP API 说明](./4.4.1_dtle_http_api.md)) | 42 | | cert_file_path | string | "" | 否| 指定证书文件的绝对路径 | 43 | | key_file_path | string | "" | 否 | 指定私钥文件的绝对路径 | 44 | 45 | ## 关于 (Bind) Address 和 Advertise Address 46 | 47 | - bind address为,需要是本地网卡配置的地址 48 | - advertise addr为对外告知连接用的地址 49 | - 对于跨网段的nomad集群,需要配置上层路由地址并在各级路由配置NAT(端口映射) 50 | 51 | ## 修改日志级别 52 | 53 | 从4.22.09.0开始,动态修改日志级别直接调用API即可生效(不需要事先修改配置文件或重启dtle)。 54 | 55 | ```sh 56 | curl -XPOST http://127.0.0.1:8190/v2/log/level -d "dtle_log_level=INFO" 57 | ``` 58 | 59 | 注:dtle后续重启时,仍然使用配置文件中的日志级别。 60 | -------------------------------------------------------------------------------- /4/4.2_command.md: -------------------------------------------------------------------------------- 1 | # 命令说明 2 | 3 | dtle二进制文件仅作为nomad插件使用。各项功能通过`nomad`二进制执行。 4 | 5 | ## 启动nomad节点 6 | 7 | ``` 8 | nomad agent -config=/path/to/nomad.hcl 9 | ``` 10 | 11 | ## 集群相关 12 | ``` 13 | # 查看管理(server)节点 14 | nomad server members 15 | nomad server members -address=http://127.0.0.1:4646 16 | 17 | # 查看执行(client)节点,即运行dtle插件的节点 18 | nomad node status 19 | nomad node status -address=http://127.0.0.1:4646 20 | 21 | # 查看某个节点的状态 22 | nomad node status 23 | ``` 24 | 25 | 此时nomad命令作为HTTP客户端连接nomad agent, 如果agent不在默认地址,则需要指定 `-address=...`, 下同。 26 | 27 | ## job相关 28 | 29 | ``` 30 | # 增加 31 | nomad job run job.hcl 32 | nomad job run -address="http://127.0.0.1:4646" job.hcl 33 | 34 | # 删除 35 | nomad job stop -purge 36 | 37 | # 查看所有 38 | nomad job status 39 | 40 | # 查看某个 41 | nomad job status 42 | nomad job status -address=http://127.0.0.1:4646 43 | ``` 44 | 45 | ## 查看版本 46 | 47 | 查看nomad本体版本 48 | ``` 49 | nomad version 50 | ``` 51 | 52 | 查看某一节点的dtle插件版本 53 | 54 | ``` 55 | nomad node status -verbose | grep dtle 56 | ``` 57 | 58 | 输出 59 | ``` 60 | dtle true true Healthy 2020-10-09T14:05:00+08:00 61 | driver.dtle = 1 62 | driver.dtle.full_version = 9.9.9.9-binlog-provider-7d5a0766 63 | driver.dtle.version = 9.9.9.9 64 | ``` 65 | -------------------------------------------------------------------------------- /4/4.3.1_tuning.md: -------------------------------------------------------------------------------- 1 | # 性能调优 2 | 3 | 部分参数可能影响复制性能。 4 | 5 | ## nomad constraint 6 | 7 | 限制 task 在某个nomad client节点上执行。当源端目标端MySQL之间网络延迟很大时,应在各个主机/机房设立nomad client,并限制 task 在本地节点上执行,以充分利用dtle的压缩传输。 8 | 9 | ## ReplChanBufferSize 10 | 11 | 默认60,增量事物队列数量。增大可以降低可能的空等,但同时会占用更多内存。 12 | 13 | ## ChunkSize 14 | 15 | 默认2000。全量复制时每次选取的行数。增大可以增加吞吐量,但同时会占用更多内存。 16 | 17 | ## GroupMaxSize & GroupTimeout 18 | 19 | GroupMaxSize默认值1,即每个事物立刻发送。增大后将等待数据量达到设定值再打包发送多个事务。 20 | 可增加传输时压缩率,适合低带宽网络。 21 | 22 | 设定GroupTimeout可避免数据量不足时等待过久。默认值100(毫秒)。一般设成略小于 ping RTT 的时间值。 23 | 24 | ## 增量的并行回放(MTS)相关 25 | 26 | 推荐使用MySQL 5.7.22+ 和 MySQL 8.0 GA 后引入的 WriteSet MTS。在源端MySQL设置 27 | 28 | ```sql 29 | set global transaction_write_set_extraction = XXHASH64; 30 | set global binlog_transaction_dependency_tracking = WRITESET; 31 | -- will take effect for new session 32 | ``` 33 | 34 | 此后MySQL生成的binlog中将附带TX依赖信息,dtle回放时可以利用依赖信息进行调度。 35 | 36 | 在dtle dest task config中设置ParallelWorkers,控制增量并行回放线程数。参考值为8~64。 37 | 38 | 如果因版本和权限问题,不能在源端MySQL上设置WriteSet Tracking,则可以使用dtle的依赖计算功能(`UseMySQLDependency = false`)。 39 | 40 | ## 批量插入(bulk insert) 41 | 42 | 当源端使用批量插入,即 `insert into ... values (),(), ..., ()` 时,dtle会在目标端使用批量插入。 43 | 44 | dtle会使用两个固定数量(行数)的批量插入PreparedStatement,默认为4和8。超过8的会被分到下一批。小于4的会单独插入。 45 | 46 | 可用`BulkInsert1`和`BulkInsert2`调整批量插入使用的数量。 47 | -------------------------------------------------------------------------------- /4/4.3.2_job_sample.md: -------------------------------------------------------------------------------- 1 | # Job 示例 2 | 3 | ## 复制整个实例的所有数据库 4 | job.hcl 中ReplicateDoDb配置: 5 | ``` 6 | ReplicateDoDb = [] 7 | ``` 8 | 9 | ## 复制指定数据库 10 | ``` 11 | ReplicateDoDb = [{ 12 | TableSchema = "action_db_1" 13 | }] 14 | ``` 15 | 16 | ## 复制一个库中的多个表 17 | job.hcl 中ReplicateDoDb配置: 18 | ``` 19 | ReplicateDoDb = [{ 20 | TableSchema = "action_db_1" 21 | Tables = [{ 22 | TableName = "sbtest1" 23 | }, { 24 | TableName = "sbtest2" 25 | }, { 26 | TableName = "sbtest3" 27 | }] 28 | }] 29 | ``` 30 | 31 | ## 复制多个库中的多个表 32 | job.hcl 中ReplicateDoDb配置: 33 | ``` 34 | ReplicateDoDb = [{ 35 | TableSchema = "action_db_1" 36 | Tables = [{ 37 | TableName = "sbtest1" 38 | }, { 39 | TableName = "sbtest2" 40 | }, { 41 | TableName = "sbtest3" 42 | }] 43 | }, { 44 | TableSchema = "action_db_2" 45 | Tables = [{ 46 | TableName = "sbtest1" 47 | }, { 48 | TableName = "sbtest2" 49 | }, { 50 | TableName = "sbtest3" 51 | }] 52 | }] 53 | ``` 54 | 55 | ## 带where条件复制任务 56 | 参考[2.2.MySQL 的数据分散](../2/2.2_mysql_replication_1_n.md) 57 | 58 | ## 使用正则挑选复制库表 59 | 参考[3.8.dtle mapping 支持](../3/3.8_dtle_mapping.md) 60 | 61 | ## 忽略指定的库 62 | job.hcl通过以下配置忽略表db1及db1内所有的表 63 | ``` 64 | ReplicateDoDb = [] 65 | ReplicateIgnoreDb = [{ 66 | TableSchema = "db1" 67 | }] 68 | ``` 69 | job.hcl通过以下配置在ReplicateDoDb指定的范围内忽略表db1和db1下的所有表,最终效果是没有要复制的库表 70 | ``` 71 | ReplicateDoDb = [{ 72 | TableSchema = "db1" 73 | Tables = [{ 74 | TableName = "tb1" 75 | }] 76 | }] 77 | ReplicateIgnoreDb = [{ 78 | TableSchema = "db1" 79 | }] 80 | ``` 81 | ## 忽略指定的表 82 | job.hcl通过以下配置在ReplicateDoDb指定的范围内忽略db1.tb1,最终复制库db1下除了tb1以外的表 83 | ``` 84 | ReplicateDoDb = [{ 85 | TableSchema = "db1" 86 | }] 87 | ReplicateIgnoreDb = [{ 88 | TableSchema = "db1" 89 | Tables = [{ 90 | TableName = "tb1" 91 | }] 92 | }] 93 | ``` 94 | job.hcl通过以下配置在ReplicateDoDb指定的范围内忽略db1.tb1,最终只复制库db1结构,但不复制db1下的任何表 95 | ``` 96 | ReplicateDoDb = [{ 97 | TableSchema = "db1" 98 | Tables = [{ 99 | TableName = "tb1" 100 | }] 101 | }] 102 | ReplicateIgnoreDb = [{ 103 | TableSchema = "db1" 104 | Tables = [{ 105 | TableName = "tb1" 106 | }] 107 | }] 108 | ``` 109 | ## 限定故障转移域 110 | 源端任务和目标端任务在指定`datacenter`上故障转移 111 | dtle配置文件: 112 | ``` 113 | name = "dtle-1" # rename for each node 114 | datacenter = "shanghai" 115 | ... 116 | ``` 117 | job示例: 118 | ``` 119 | job "test_constraint" { 120 | # 此处表示该job可以运行在datacenter为"shanghai"和"beijing"的节点上 121 | datacenters = ["shanghai", "beijing"] 122 | 123 | group "Src" { 124 | constraint { 125 | attribute = "${node.datacenter}" 126 | operator = "=" 127 | value = "shanghai" 128 | } 129 | task "src" { 130 | driver = "dtle" 131 | config { 132 | ReplicateDoDb = [{ 133 | TableSchema = "test" 134 | }] 135 | ConnectionConfig = { 136 | ... 137 | } 138 | } 139 | } 140 | } 141 | group "Dest" { 142 | constraint { 143 | attribute = "${node.datacenter}" 144 | operator = "=" 145 | value = "beijing" 146 | } 147 | task "dest" { 148 | driver = "dtle" 149 | config { 150 | ConnectionConfig = { 151 | ... 152 | } 153 | } 154 | } 155 | } 156 | } 157 | ``` 158 | -------------------------------------------------------------------------------- /4/4.3_job_configuration.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # 作业(job)配置 4 | 5 | 作业配置一般采用 json (HTTP API 提交)或 hcl (nomad 命令行工具提交)文件。样例配置在 `/usr/share/dtle/scripts/` 中。 6 | 7 | nomad job 的完整配置参考 https://www.nomadproject.io/docs/job-specification/ 8 | 9 | nomad job 有group/task层级,一个group中的tasks会被放在同一个节点执行。dtle要求src和dest task分别放在src 和 dest group. 10 | task 中指定 driver = "dtle", 在config段落中填写dtle专有配置。 11 | 12 | 从4.22.11.0开始,dtle配置发生变化 13 | - 所有常规配置填在源端任务(src task) 14 | - 原两端的`ConnectionConfig`的分别重命名为`SrcConnectionConfig`和`DestConnectionConfig` 15 | - 目标端固定填写一个配置项 `DestType` 16 | 17 | ``` 18 | group "dest" { 19 | task "dest" { 20 | driver = "dtle" 21 | config { 22 | DestType = "mysql" # 或"kafka" 23 | } 24 | } 25 | } 26 | ``` 27 | 28 | 从3.x ~ 4.22.07.x升级到4.22.11后, 可使用 `/usr/share/dtle/scripts/dtle-7to11.py`更新现有job配置格式. 29 | ```sh 30 | ./dtle-7to11.py 'http://127.0.0.1:4646' 31 | ``` 32 | 33 | ## dtle 源端任务有如下配置项: 34 | 35 | | 参数名 | 必填? | 类型 | 默认值 | 说明 | 36 | |-----------------------|-------|----------|----------------|-----------------------------------------------------------------------------------------------------------------------------------| 37 | | Gtid | 否 | String | 默认为 全量+增量 任务 | MySQL的GTID集合(区间), 可取值:
1. 默认为空, 则为 <全量+增量> 复制任务
2. 已复制的GTID集合(不是点位), 将从未复制的GTID开始增量复制 | 38 | | GtidStart | 否 | String | | 增量复制开始的 GTID 点位. (将自动求差集获取上述 GTID 集合.) 需要保持 Gtid 为空 | 39 | | AutoGtid | 否 | Bool | false | 设为 true 后自动从当前 GTID 开始增量任务. 需要保持 Gtid 和 GtidStart 为空. | 40 | | BinlogRelay | 否 | Bool | false | 是否使用Binlog Relay(中继)机制. 即先将源端mysql binlog读到本地, 避免源端清除binlog导致任务失败. 注意: 如果使用带有BinlogRelay的纯增量复制, 必须用Gtid指定复制起点,不能使用BinlogFile/Pos。 | 41 | | BinlogFile | 否 | String | | 增量任务开始的Binlog文件(即源端mysql上 `show master status` 的结果). | 42 | | BinlogPos | 否 | Int | 0 | 增量任务开始的Binlog位置, 和BinlogFile配套使用. | 43 | | ReplicateDoDb | 否 | Object数组 | - | 如为空`[]`, 则复制整个数据库实例. 可填写多元素. 元素内容见下方说明 | 44 | | ReplicateIgnoreDb | 否 | Object数组 | - | 指定要忽略的库表,优先级高于ReplicateDoDb。如为空`[]`, 则完全执行ReplicateDoDb配置. 可填写多元素. 元素内容见下方说明 | 45 | | SrcConnectionConfig | 否 | Object | - | MySQL源端信息, 见下方 ConnectionConfig 说明。和 OracleConfig 二选一填写。 | 46 | | DestConnectionConfig | 否 | Object | - | MySQL目标端信息, 见下方 ConnectionConfig 说明。和 KafkaConfig 二选一填写。 | 47 | | SrcOracleConfig | 否 | Object | - | Oracle源端信息, 见下方 OracleConfig 说明。和 SrcConnectionConfig 二选一填写。 | 48 | | KafkaConfig | 否 | Object | - | Kafka目标端信息, 见下方 KafkaConfig 说明。和 DestConnectionConfig 二选一填写。 | 49 | | DropTableIfExists | 否 | Bool | false | 全量复制时, 在目标端删除参与复制的表, 之后由dtle自动创建表结构 (相关参数: `SkipCreateDbTable`). 如果开启此选项, 目标端数据库用户需要有相应表的`DROP`权限. | 50 | | SkipCreateDbTable | 否 | Bool | false | 不为目标库创建复制库和复制表. 如果关闭此选项, 目标端数据库用户需要有相应表的`CREATE`权限. | 51 | | ParallelWorkers | 否 | Int | 1 | 回放端的并发数. 当值大于1时, 目标端会进行并行回放 | 52 | | UseMySQLDependency | 否 | Bool | true | 默认使用MySQL的并行回放事务依赖关系检测。如果不能开启源端MySQL的WRITESET追踪,可将此设为false,使用dtle的依赖检测。 | 53 | | DependencyHistorySize | 否 | Int | 2500 | 使用dtle并行复制计算事务依赖时,保存的行数。增大可以潜在地增加并行度,但会更消耗内存。 | 54 | | ForeignKeyChecks | 否 | Bool | true | 3.21.10.0+. 默认开启目标端MySQL连接上的 `@@foreign_key_checks` | 55 | | ReplChanBufferSize | 否 | Int | 32 | 复制任务缓存的大小, 单位为事务组数。事务组大小和GroupMaxSize/GroupTimeout有关。 | 56 | | ChunkSize | 否 | Int | 2000 | 全量复制时, 每次读取-传输-写入的行数 | 57 | | DumpEntryLimit | 否 | Int | 67108864 (64M) | 复制时, 读取后分块发送的分块大小。空闲内存较小时需适当调小。适用于大全量/增量大事务 | 58 | | ExpandSyntaxSupport | 否 | Bool | false | 支持复制 用户权限/存储过程DDL/函数DDL | 59 | | GroupMaxSize | 否 | Int | 1 | 源端发送数据时, 等待数据包达到一定大小(`GroupMaxSize`字节)后发送该包. 单位为字节. 默认值1表示即刻发送数据 | 60 | | GroupTimeout | 否 | Int | 100 | 源端发送数据时, 等待数据包达到超时时间(`GroupTimeout`毫秒)发送该包. 单位为毫秒. | 61 | | SqlFilter | 否 | String数组 | [] | 是否跳过一些事件, 如 `["NoDMLDelete", "NoDDLDropSchema", "NoDDLDropTable", "NoDDLDropIndex", "NoDDLTruncate"]`。详见下文。 | 62 | | SlaveNetWriteTimeout | 否 | Int | 28800 (8小时) | 调整MySQL slave线程的超时时间。MySQL默认值为60,太短可能导致断连。太长则会导致异常连接回收不及时。 | 63 | | BulkInsert1 | 否 | Int | 4 | 批量插入第一级数量。见[性能调优](4.3.1_tuning.md) | 64 | | BulkInsert2 | 否 | Int | 8 | 批量插入第二级数量。 | 65 | | BulkInsert3 | 否 | Int | 128 | 批量插入第三级数量。 | 66 | | SetGtidNext | 否 | Bool | false | 目标端执行事务前执行`set gtid_next = ...`, 使源端目标端MySQL事务gtid相同。可用以避免循环复制。需要 `REPLICATION_APPLIER` (MySQL 8.0)或 `SUPER` 权限 | 67 | | TwoWaySync | 否 | Bool | false | 开启双向任务。 | 68 | | TwoWaySyncGtid | 否 | String | "" | 反向任务使用的Gtid。当值为"auto"时,从当前 GTID 开始增量。 | 69 | | RetryTxLimit | 否 | Int | 3 | 当执行发生某些错误时(如:deadlock),重试事务的次数 | 70 | 71 | ReplicateDoDb 每个元素有如下字段: 72 | 73 | | 参数名 | 必填? | 类型 | 默认值 | 说明 | 74 | |---------------------|-------|------------|--------|------------------------------------------------------------------------------------------------------| 75 | | TableSchema | 否 | String | - | 数据库名 | 76 | | TableSchemaRegex | 否 | String | - | 数据库映射正则表达式,可用于多个数据库重命名 | 77 | | TableSchemaRename | 否 | String | - | 重命名后的数据库名称,当进行多数据库重命名时,支持正则表达式,使用见[demo](../3/3.8_dtle_mapping.md) | 78 | | Tables | 否 | Object数组 | - | 可配置多张表, 类型为Table. 若不配置, 则复制指定数据库中的所有表 | 79 | | Table.TableName | 否 | String | - | 表名 | 80 | | Table.Where | 否 | String | - | 只复制满足该条件的数据行. 语法为SQL表达式, 返回值应为布尔值. 可以引用表中的列名. | 81 | | Table.TableRegex | 否 | String | - | 表名映射匹配正则表达式,用于多个表同时重命名. | 82 | | Table.TableRename | 否 | String | - | 重命名后的表名,当进行多表重命名时,支持支持正则表达,见[demo](../3/3.8_dtle_mapping.md) | 83 | | Table.ColumnMapFrom | 否 | String数组 | - | 列映射(暂不支持正则表达式)。见[demo](../3/3.8_dtle_mapping.md) | 84 | | Table.ColumnMapTo | 否 | String数组 | - | 列映射(暂不支持正则表达式)。见[demo](../3/3.8_dtle_mapping.md) | 85 | 86 | 注:hcl格式中`${SOME_TEXT}`会被认为是变量引用。正则替换中输入此类文字时,则需使用双$符号:`$${SOME_TEXT}`。 87 | 88 | ReplicateIgnoreDb 每个元素有如下字段: 89 | 90 | | 参数名 | 必填? | 类型 | 默认值 | 说明 | 91 | |-----------------|-------|------------|--------|-----------------------------------------------------------------| 92 | | TableSchema | 是 | String | - | 数据库名 | 93 | | Tables | 否 | Object数组 | - | 可配置多张表, 类型为Table. 若不配置, 则忽略指定数据库中的所有表 | 94 | | Table.TableName | 否 | String | - | 表名 | 95 | 96 | ConnectionConfig 有如下字段: 97 | 98 | | 参数名 | 必填? | 类型 | 默认值 | 说明 | 99 | | ------------- | ------------- | ------------------- | ------------- | ------------- | 100 | | Host | 是 | String | - | 数据源地址 | 101 | | Port | 是 | String | - | 数据源端口 | 102 | | User | 是 | String | - | 数据源用户名 | 103 | | Password | 是 | String | - | 数据源密码 | 104 | | Charset | 否 | String | utf8mb4 | 数据源的字符集 | 105 | 106 | KafkaConfig 有如下字段: 107 | 108 | | 参数名 | 必填? | 类型 | 默认值 | 说明 | 109 | |----------------------|-----|----------|------------------------|------------------------------------------------------------------------------------------| 110 | | Topic | 是 | String | - | Kafka Topic | 111 | | SchemaChangeTopic | 否 | String | "schema-changes.Topic" | Schema change (DDL) 消息使用的topic | 112 | | TopicWithSchemaTable | 否 | Bool | true | 默认最终topic为 `指定的Topic.库名.表名`, 如果不需要追加库表名,请设为false | 113 | | Brokers | 是 | String数组 | - | Kafka Brokers, 如 `["127.0.0.1:9192", "..."]` | 114 | | Converter | 否 | String | json | Kafka Converter。目前仅支持json | 115 | | MessageGroupMaxSize | 否 | int | 1 | 目标端向kafka发送消息时, 等待MySQL事务数据包达到一定大小(MessageGroupMaxSize字节)后将该包序列化并发送. 单位为字节. 默认值1表示即刻发送数据 | 116 | | MessageGroupTimeout | 否 | int | 100 | 目标端向kafka发送消息时, 等待数据包达到超时时间(MessageGroupTimeout毫秒)发送该包. 单位为毫秒. | 117 | | User | 否 | String | - | Kafka SASL.User | 118 | | Password | 否 | String | - | Kafka SASL.Password | 119 | 120 | OracleConfig 有如下字段: 121 | 122 | | 参数名 | 必填? | 类型 | 默认值 | 说明 | 123 | | ------------- | ------------- | ------------------- | ------------- | ------------- | 124 | | Host | 是 | String | - | 数据源地址 | 125 | | Port | 是 | String | - | 数据源端口 | 126 | | User | 是 | String | - | 数据源用户名 | 127 | | Password | 是 | String | - | 数据源密码 | 128 | | ServiceName | 否 | String | XE | 数据源服务名 | 129 | | Scn | 否 | int | 0 | 同步起点 | 130 | 131 | ### SqlFilter注意事项 132 | 全部的filter: 133 | - NoDML 134 | - NoDMLInsert, NoDMLDelete, NoDMLUpdate 135 | - NoDDL 136 | - NoDDLCreateSchema, NoDDLCreateTable 137 | - NoDDLDropSchema, NoDDLDropTable, NoDDLDropIndex, NoDDLTruncate 138 | - NoDDLAlterTable 139 | - NoDDLAlterTableAddColumn, NoDDLAlterTableDropColumn 140 | - NoDDLAlterTableModifyColumn, NoDDLAlterTableChangeColumn, NoDDLAlterTableAlterColumn 141 | 142 | SqlFilter只能简单过滤相关语句。不会自动转换后续语句。例如 143 | ```sql 144 | -- SqlFilter = ["NoDDLDropTable"] 145 | 146 | /** 源端 **/ 147 | -- 已有 table a.a (id int primary key) 148 | drop table a.a; 149 | create table a.a (id int primary key, val int); 150 | insert into a.a values (1, 11); 151 | 152 | /** 目标端 **/ 153 | -- 已有 table a.a (id int primary key) 154 | -- drop table 语句被过滤 155 | create table a.a (id int primary key, val int); 156 | -- 执行错误,目标表已存在 157 | insert into a.a values (1, 11); 158 | -- 执行错误,列数目不对 159 | ``` 160 | 161 | 用户需自行确保在发生过滤的情况下,后续DML/DDL能正确执行。 162 | 163 | ## nomad job 常用通用配置 164 | 165 | ### constraint 166 | 167 | job、group 或 task 级配置。配置后该job/group/task会绑定在指定的节点上执行 168 | ``` 169 | constraint { 170 | attribute = "${node.unique.name}" 171 | value = "nomad3" 172 | } 173 | ``` 174 | 175 | 完整参考 176 | - https://www.nomadproject.io/docs/job-specification/constraint 177 | - https://www.nomadproject.io/docs/runtime/interpolation#interpreted_node_vars 178 | 179 | ### resources 180 | task级配置,src/dest task需各自重复。默认值为 `cpu=100`,`memory=300`。 181 | 以默认值建立大量轻量级任务,会导致资源不够而pending,可适当调小。 182 | 183 | 任务的内存消耗和每行大小、事物大小、队列长度有关。注意真实资源消耗,避免OOM。 184 | 185 | ``` 186 | task "src" { 187 | resources { 188 | cpu = 100 # MHz 189 | memory = 300 # MB 190 | } 191 | } 192 | ``` 193 | 194 | ### restart & reschedule 195 | nomad job 默认有如下 [restart](https://www.nomadproject.io/docs/job-specification/restart) 和 [reschedule](https://www.nomadproject.io/docs/job-specification/reschedule) 配置 196 | 197 | ``` 198 | restart { # group or task level 199 | interval = "30m" 200 | attempts = 2 201 | delay = "15s" 202 | mode = "fail" # "fail" or "delay" 203 | # "delay" 意味着interval过后继续尝试 204 | # "fail" 则不再尝试 205 | } 206 | reschedule { # job or group level 207 | delay = "30s" 208 | delay_function = "exponential" 209 | max_delay = "1h" 210 | unlimited = true 211 | } 212 | ``` 213 | 214 | - 当task报错时,会根据restart配置,30分钟内在同一节点上重启最多两次 215 | - 即使失败的job被`stop -purge`再重新添加,也需要根据restart参数重启 216 | - 2次重启均失败后,会根据reschedule配置,在其他节点上执行 217 | 218 | 为了避免无限reschedule带来的问题,dtle安装包提供的样例job配置中(`/usr/share/dtle/scripts/example.job.*`),限制reschedule为每半小时1次: 219 | 220 | ``` 221 | reschedule { 222 | attempts = 1 223 | interval = "30m" 224 | unlimited = false 225 | } 226 | # 或json格式 227 | "Reschedule": { 228 | "Attempts": 1, 229 | "Interval": 1800000000000, 230 | "Unlimited": false 231 | } 232 | ``` 233 | -------------------------------------------------------------------------------- /4/4.4.1_http_api_oldv2.md: -------------------------------------------------------------------------------- 1 | # HTTP API 说明 (dtle 2.x 兼容层) 2 | 3 | dtle 3.x 作为nomad plugin发布。nomad自身API和dtle 2.x略有差异。我们提供了兼容层(开启方式见[节点配置](./4.1_node_configuration.md),api_addr),支持 dtle 2.x的HTTP API。 4 | 5 | ## 列出所有作业 6 | 7 | API: `GET /v1/jobs` 8 | 9 | 样例: 10 | 11 | ``` 12 | > curl -XGET "172.17.5.6:8190/v1/jobs" | jq 13 | < [ 14 | { 15 | "CreateIndex": 59, 16 | "ID": "8ce4b408-8c64-41b9-04d7-baf451348e89", 17 | "JobModifyIndex": 469, 18 | "JobSummary": { 19 | "Constraints": null, 20 | "CreateIndex": 59, 21 | "Datacenters": [ 22 | "dc1" 23 | ], 24 | "EnforceIndex": false, 25 | "Failover": false, 26 | "ID": "8ce4b408-8c64-41b9-04d7-baf451348e89", 27 | "JobModifyIndex": 469, 28 | "ModifyIndex": 469, 29 | "Name": "test1-2", 30 | "Orders": [], 31 | "Region": "global", 32 | "Status": "running", 33 | "StatusDescription": "", 34 | "Tasks": [ 35 | { 36 | "Config": { 37 | "ExpandSyntaxSupport": false, 38 | "NatsAddr": "127.0.0.1:8193", 39 | "ReplChanBufferSize": "60", 40 | "ReplicateDoDb": [ 41 | { 42 | "TableSchema": "db1", 43 | "Tables": [ 44 | { 45 | "TableName": "tb1" 46 | } 47 | ] 48 | } 49 | ], 50 | "ChunkSize": "2000", 51 | "ApproveHeterogeneous": true, 52 | "DropTableIfExists": false, 53 | "ConnectionConfig": { 54 | "Password": "*", 55 | "Host": "172.100.9.3", 56 | "Port": "3306", 57 | "User": "lx1" 58 | }, 59 | "TrafficAgainstLimits": 0, 60 | "Gtid": "8868d98f-af5e-11e8-9aa9-0242ac110002:1-171", 61 | "SkipCreateDbTable": false 62 | }, 63 | "ConfigLock": {}, 64 | "Constraints": null, 65 | "Driver": "MySQL", 66 | "Leader": false, 67 | "NodeID": "", 68 | "NodeName": "dtle", 69 | "Type": "Src" 70 | }, 71 | { 72 | "Config": { 73 | "NatsAddr": "127.0.0.1:8193", 74 | "Gtid": "8868d98f-af5e-11e8-9aa9-0242ac110002:1-171", 75 | "SkipCreateDbTable": false, 76 | "DropTableIfExists": false, 77 | "ExpandSyntaxSupport": false, 78 | "ReplChanBufferSize": "60", 79 | "ApproveHeterogeneous": true, 80 | "ConnectionConfig": { 81 | "User": "test1", 82 | "Password": "*", 83 | "Host": "172.100.9.6", 84 | "Port": "3306" 85 | } 86 | }, 87 | "ConfigLock": {}, 88 | "Constraints": null, 89 | "Driver": "MySQL", 90 | "Leader": true, 91 | "NodeID": "", 92 | "NodeName": "dtle", 93 | "Type": "Dest" 94 | } 95 | ], 96 | "Type": "synchronous" 97 | }, 98 | "ModifyIndex": 469, 99 | "Name": "test1-2", 100 | "Status": "running", 101 | "StatusDescription": "", 102 | "Type": "synchronous" 103 | }, 104 | { 105 | "CreateIndex": 66, 106 | "ID": "ae2a74ac-1ee7-7f82-4b52-2f1ca759eab5", 107 | "JobModifyIndex": 469, 108 | "JobSummary": { 109 | "Constraints": null, 110 | "CreateIndex": 66, 111 | "Datacenters": [ 112 | "dc1" 113 | ], 114 | "EnforceIndex": false, 115 | "Failover": false, 116 | "ID": "ae2a74ac-1ee7-7f82-4b52-2f1ca759eab5", 117 | "JobModifyIndex": 469, 118 | "ModifyIndex": 469, 119 | "Name": "test1-2", 120 | "Orders": [], 121 | "Region": "global", 122 | "Status": "running", 123 | "StatusDescription": "", 124 | "Tasks": [ 125 | { 126 | "Config": { 127 | "ConnectionConfig": { 128 | "Host": "172.100.9.3", 129 | "Port": "3306", 130 | "User": "lx1", 131 | "Password": "*" 132 | }, 133 | "TrafficAgainstLimits": 0, 134 | "NatsAddr": "127.0.0.1:8193", 135 | "ReplicateDoDb": [ 136 | { 137 | "Tables": [ 138 | { 139 | "TableName": "tb1" 140 | } 141 | ], 142 | "TableSchema": "db1" 143 | } 144 | ], 145 | "DropTableIfExists": false, 146 | "ExpandSyntaxSupport": false, 147 | "ChunkSize": "2000", 148 | "SkipCreateDbTable": false, 149 | "Gtid": "8868d98f-af5e-11e8-9aa9-0242ac110002:1-171", 150 | "ApproveHeterogeneous": true, 151 | "ReplChanBufferSize": "60" 152 | }, 153 | "ConfigLock": {}, 154 | "Constraints": null, 155 | "Driver": "MySQL", 156 | "Leader": false, 157 | "NodeID": "", 158 | "NodeName": "dtle", 159 | "Type": "Src" 160 | }, 161 | { 162 | "Config": { 163 | "DropTableIfExists": false, 164 | "ExpandSyntaxSupport": false, 165 | "NatsAddr": "127.0.0.1:8193", 166 | "ReplChanBufferSize": "60", 167 | "ApproveHeterogeneous": true, 168 | "ConnectionConfig": { 169 | "Port": "3306", 170 | "User": "test1", 171 | "Password": "*", 172 | "Host": "172.100.9.6" 173 | }, 174 | "Gtid": "8868d98f-af5e-11e8-9aa9-0242ac110002:1-171", 175 | "SkipCreateDbTable": false 176 | }, 177 | "ConfigLock": {}, 178 | "Constraints": null, 179 | "Driver": "MySQL", 180 | "Leader": true, 181 | "NodeID": "", 182 | "NodeName": "dtle", 183 | "Type": "Dest" 184 | } 185 | ], 186 | "Type": "synchronous" 187 | }, 188 | "ModifyIndex": 469, 189 | "Name": "test1-2", 190 | "Status": "running", 191 | "StatusDescription": "", 192 | "Type": "synchronous" 193 | } 194 | ] 195 | ``` 196 | 197 | ## 获取某个作业的信息 198 | 199 | API: `GET /v1/job/{ID}` 200 | 201 | 样例: 202 | 203 | ``` 204 | > curl -XGET "172.17.5.6:8190/v1/job/8ce4b408-8c64-41b9-04d7-baf451348e89" | jq 205 | < { 206 | "Constraints": null, 207 | "CreateIndex": 59, 208 | "Datacenters": [ 209 | "dc1" 210 | ], 211 | "EnforceIndex": false, 212 | "Failover": false, 213 | "ID": "8ce4b408-8c64-41b9-04d7-baf451348e89", 214 | "JobModifyIndex": 471, 215 | "ModifyIndex": 471, 216 | "Name": "test1-2", 217 | "Orders": [], 218 | "Region": "global", 219 | "Status": "running", 220 | "StatusDescription": "", 221 | "Tasks": [ 222 | { 223 | "Config": { 224 | "ConnectionConfig": { 225 | "Host": "172.100.9.3", 226 | "Port": "3306", 227 | "User": "lx1", 228 | "Password": "111111" 229 | }, 230 | "ChunkSize": "2000", 231 | "ApproveHeterogeneous": true, 232 | "DropTableIfExists": false, 233 | "ReplChanBufferSize": "60", 234 | "ReplicateDoDb": [ 235 | { 236 | "TableSchema": "db1", 237 | "Tables": [ 238 | { 239 | "TableName": "tb1" 240 | } 241 | ] 242 | } 243 | ], 244 | "NatsAddr": "127.0.0.1:8193", 245 | "TrafficAgainstLimits": 0, 246 | "Gtid": "8868d98f-af5e-11e8-9aa9-0242ac110002:1-171", 247 | "SkipCreateDbTable": false, 248 | "ExpandSyntaxSupport": false 249 | }, 250 | "ConfigLock": {}, 251 | "Constraints": null, 252 | "Driver": "MySQL", 253 | "Leader": false, 254 | "NodeID": "", 255 | "NodeName": "dtle", 256 | "Type": "Src" 257 | }, 258 | { 259 | "Config": { 260 | "Gtid": "8868d98f-af5e-11e8-9aa9-0242ac110002:1-171", 261 | "SkipCreateDbTable": false, 262 | "DropTableIfExists": false, 263 | "ExpandSyntaxSupport": false, 264 | "ReplChanBufferSize": "60", 265 | "ApproveHeterogeneous": true, 266 | "ConnectionConfig": { 267 | "Host": "172.100.9.6", 268 | "Port": "3306", 269 | "User": "test1", 270 | "Password": "111111" 271 | }, 272 | "NatsAddr": "127.0.0.1:8193" 273 | }, 274 | "ConfigLock": {}, 275 | "Constraints": null, 276 | "Driver": "MySQL", 277 | "Leader": true, 278 | "NodeID": "", 279 | "NodeName": "dtle", 280 | "Type": "Dest" 281 | } 282 | ], 283 | "Type": "synchronous" 284 | } 285 | ``` 286 | 287 | ## 获取 服务端集群 的leader节点信息 288 | 289 | API: `GET /v1/leader` 290 | 291 | 样例: 292 | 293 | ``` 294 | > curl -XGET "172.17.5.6:8190/v1/leader" 295 | < "172.17.5.6:8191" 296 | ``` 297 | 298 | ## 列出 服务端集群 的节点 299 | 300 | API: `GET /v1/members` 301 | 302 | 样例: 303 | 304 | ``` 305 | > curl -XGET "172.17.5.6:8190/v1/members" | jq 306 | < { 307 | "Members": [ 308 | { 309 | "Addr": "172.17.5.6", 310 | "DelegateCur": 4, 311 | "DelegateMax": 5, 312 | "DelegateMin": 2, 313 | "Name": "udp-6.global", 314 | "Port": 8192, 315 | "ProtocolCur": 2, 316 | "ProtocolMax": 5, 317 | "ProtocolMin": 1, 318 | "Status": "alive", 319 | "Tags": { 320 | "port": "8191", 321 | "bootstrap": "1", 322 | "role": "server", 323 | "region": "global", 324 | "dc": "dc1", 325 | "build": "9.9.9.9" 326 | } 327 | } 328 | ], 329 | "ServerDC": "dc1", 330 | "ServerName": "udp-6", 331 | "ServerRegion": "global" 332 | } 333 | ``` 334 | 335 | 336 | ## 列出所有节点 337 | 338 | API: `GET /v1/nodes` 339 | 340 | 样例: 341 | 342 | ``` 343 | > curl -XGET "172.17.5.6:8190/v1/nodes" | jq 344 | < [ 345 | { 346 | "CreateIndex": 4, 347 | "Datacenter": "dc1", 348 | "HTTPAddr": "172.17.5.6:8190", 349 | "ID": "8f05de6f-34ef-5989-9a20-c9f8bc9817b0", 350 | "ModifyIndex": 43, 351 | "Name": "udp-6", 352 | "Status": "ready", 353 | "StatusDescription": "" 354 | } 355 | ] 356 | ``` 357 | 358 | ## 列出某作业的所有任务执行 359 | 360 | API: `GET /job//allocations` 361 | 362 | 样例: 363 | 364 | ``` 365 | > curl -XGET "172.17.5.6:8190/v1/job/8ce4b408-8c64-41b9-04d7-baf451348e89/allocations" | jq 366 | < [ 367 | { 368 | "ClientDescription": "", 369 | "ClientStatus": "running", 370 | "CreateIndex": 61, 371 | "CreateTime": 1538993511593515300, 372 | "DesiredDescription": "", 373 | "DesiredStatus": "run", 374 | "EvalID": "f67f2020-a326-6d12-2239-6da8f9dd3e4e", 375 | "ID": "33a7a1bb-8eb4-11a2-bd2c-0c7e0457dfc2", 376 | "JobID": "8ce4b408-8c64-41b9-04d7-baf451348e89", 377 | "ModifyIndex": 63, 378 | "Name": "test1-2.Src", 379 | "NodeID": "b32743aa-5c69-0853-239c-a69237e97c45", 380 | "Task": "Src", 381 | "TaskStates": { 382 | "Src": { 383 | "Events": [ 384 | { 385 | "DiskLimit": 0, 386 | "DriverError": "", 387 | "DriverMessage": "", 388 | "ExitCode": 0, 389 | "FailedSibling": "", 390 | "FailsTask": false, 391 | "KillError": "", 392 | "KillReason": "", 393 | "KillTimeout": 0, 394 | "Message": "", 395 | "RestartReason": "", 396 | "SetupError": "", 397 | "StartDelay": 0, 398 | "TaskSignal": "", 399 | "TaskSignalReason": "", 400 | "Time": "2018-10-08T10:11:51.603015093Z", 401 | "Type": "Received" 402 | }, 403 | { 404 | "DiskLimit": 0, 405 | "DriverError": "", 406 | "DriverMessage": "", 407 | "ExitCode": 0, 408 | "FailedSibling": "", 409 | "FailsTask": false, 410 | "KillError": "", 411 | "KillReason": "", 412 | "KillTimeout": 0, 413 | "Message": "", 414 | "RestartReason": "", 415 | "SetupError": "", 416 | "StartDelay": 0, 417 | "TaskSignal": "", 418 | "TaskSignalReason": "", 419 | "Time": "2018-10-08T10:11:51.603322601Z", 420 | "Type": "Started" 421 | } 422 | ], 423 | "Failed": false, 424 | "FinishedAt": null, 425 | "StartedAt": "2018-10-08T10:11:51.603998869Z", 426 | "State": "running" 427 | } 428 | } 429 | }, 430 | { 431 | "ClientDescription": "", 432 | "ClientStatus": "running", 433 | "CreateIndex": 61, 434 | "CreateTime": 1538993511593515300, 435 | "DesiredDescription": "", 436 | "DesiredStatus": "run", 437 | "EvalID": "f67f2020-a326-6d12-2239-6da8f9dd3e4e", 438 | "ID": "83a46402-2c98-ffa1-b953-1cc038d91a7c", 439 | "JobID": "8ce4b408-8c64-41b9-04d7-baf451348e89", 440 | "ModifyIndex": 63, 441 | "Name": "test1-2.Dest", 442 | "NodeID": "b32743aa-5c69-0853-239c-a69237e97c45", 443 | "Task": "Dest", 444 | "TaskStates": { 445 | "Dest": { 446 | "Events": [ 447 | { 448 | "DiskLimit": 0, 449 | "DriverError": "", 450 | "DriverMessage": "", 451 | "ExitCode": 0, 452 | "FailedSibling": "", 453 | "FailsTask": false, 454 | "KillError": "", 455 | "KillReason": "", 456 | "KillTimeout": 0, 457 | "Message": "", 458 | "RestartReason": "", 459 | "SetupError": "", 460 | "StartDelay": 0, 461 | "TaskSignal": "", 462 | "TaskSignalReason": "", 463 | "Time": "2018-10-08T10:11:51.603156346Z", 464 | "Type": "Received" 465 | }, 466 | { 467 | "DiskLimit": 0, 468 | "DriverError": "", 469 | "DriverMessage": "", 470 | "ExitCode": 0, 471 | "FailedSibling": "", 472 | "FailsTask": false, 473 | "KillError": "", 474 | "KillReason": "", 475 | "KillTimeout": 0, 476 | "Message": "", 477 | "RestartReason": "", 478 | "SetupError": "", 479 | "StartDelay": 0, 480 | "TaskSignal": "", 481 | "TaskSignalReason": "", 482 | "Time": "2018-10-08T10:11:51.603388746Z", 483 | "Type": "Started" 484 | } 485 | ], 486 | "Failed": false, 487 | "FinishedAt": null, 488 | "StartedAt": "2018-10-08T10:11:51.603690587Z", 489 | "State": "running" 490 | } 491 | } 492 | } 493 | ] 494 | ``` 495 | 496 | ## 查看某个任务执行的状态 497 | 498 | API: `GET /agent/allocation//stats` 499 | 500 | 样例: 501 | 502 | ``` 503 | > curl -XGET "172.17.5.6:8190/v1/agent/allocation/33a7a1bb-8eb4-11a2-bd2c-0c7e0457dfc2/stats" | jq 504 | < { 505 | "Tasks": { 506 | "Src": { 507 | "Backlog": "0/60", 508 | "BufferStat": { 509 | "ApplierGroupTxQueueSize": 0, 510 | "ApplierTxQueueSize": 0, 511 | "ExtractorTxQueueSize": 0, 512 | "SendBySizeFull": 0, 513 | "SendByTimeout": 0 514 | }, 515 | "CurrentCoordinates": { 516 | "ExecutedGtidSet": "", 517 | "File": "1.000002", 518 | "GtidSet": "8868d98f-af5e-11e8-9aa9-0242ac110002:171", 519 | "Position": 44918, 520 | "ReadMasterLogPos": 0, 521 | "RelayMasterLogFile": "", 522 | "RetrievedGtidSet": "" 523 | }, 524 | "DelayCount": null, 525 | "ETA": "0s", 526 | "ExecMasterRowCount": 14, 527 | "ExecMasterTxCount": 111939, 528 | "MsgStat": { 529 | "InBytes": 0, 530 | "InMsgs": 4, 531 | "OutBytes": 3413, 532 | "OutMsgs": 4, 533 | "Reconnects": 0 534 | }, 535 | "ProgressPct": "100.0", 536 | "ReadMasterRowCount": 14, 537 | "ReadMasterTxCount": 111939, 538 | "Stage": "Master has sent all binlog to slave; waiting for more updates", 539 | "TableStats": null, 540 | "ThroughputStat": null, 541 | "Timestamp": 1539004705449136600 542 | } 543 | } 544 | } 545 | ``` 546 | 547 | ## 创建/更新 一个作业 548 | 549 | API: `POST /v1/jobs` 550 | 551 | 样例: **job1.json的内容说明参看[作业(job)配置](./4.3_job_configuration.md)** 552 | 553 | ``` 554 | > curl -H "Accept:application/json" -XPOST "172.17.5.6:8190/v1/jobs" -d @job1.json 555 | ``` 556 | 557 | ## 删除一个作业 558 | 559 | API: `DELETE /v1/job/{ID}` 560 | 561 | 样例: 562 | 563 | ``` 564 | > curl -H "Accept:application/json" -XDELETE "172.17.5.6:8190/v1/job/8ce4b408-8c64-41b9-04d7-baf451348e89" 565 | ``` 566 | 567 | ## 暂停一个作业 和 继续一个作业 568 | 由于nomad plugin API限制,无法实现。需要通过删除来暂停job,重新创建来继续job。见[HTTP API](./4.4_http_api.md) 569 | -------------------------------------------------------------------------------- /4/4.4_http_api.md: -------------------------------------------------------------------------------- 1 | # HTTP API 说明 2 | 3 | (适用dtle 3.x nomad 插件) 4 | 5 | nomad 默认开启一个web服务,可使用curl工具向其发送HTTP请求。 6 | 7 | ## 作业管理 8 | 9 | 完整可参考 10 | - https://www.nomadproject.io/api-docs/jobs 11 | - https://www.nomadproject.io/api-docs/allocations 12 | 13 | 常用如下: 14 | 15 | ### 前置知识:nomad 中 job、task、alloc的概念 16 | 17 | job包含多个task。一个dtle job有src和dest两个task。 18 | 19 | task在nomad节点上的执行,称为allocation。同一个task的多次执行(如失败-重试)会创建多个allocation。 20 | 21 | ### 列出所有job 22 | ``` 23 | curl -XGET 127.0.0.1:4646/v1/jobs | jq 24 | ``` 25 | 26 | ### 添加job 27 | ``` 28 | curl -XPOST -data @job.json 127.0.0.1:4646/v1/jobs | jq 29 | ``` 30 | 31 | job.json的内容说明参看 [作业(job)配置](./4.3_job_configuration.md) 32 | 33 | ### 获取某个job信息 34 | ``` 35 | curl -XGET 127.0.0.1:4646/v1/job/ | jq 36 | ``` 37 | 38 | ### 列出某job的所有allocation 39 | ``` 40 | curl -XGET "127.0.0.1:4646/v1/job//allocations | jq 41 | ``` 42 | 43 | ### 查看某个allocation的执行状态 44 | ``` 45 | curl -XGET "127.0.0.1:4646/v1/allocation/" | jq 46 | ``` 47 | 48 | ### 区别任务处于全量还是增量状态 49 | ``` 50 | curl -XGET "127.0.0.1:4646/v1/job//allocations" | jq '.' | grep job_stage 51 | ``` 52 | ``` 53 | "DisplayMessage": "job_stage_full", 54 | "DriverMessage": "job_stage_full", 55 | "DisplayMessage": "job_stage_incr", 56 | "DriverMessage": "job_stage_incr", 57 | ``` 58 | 59 | - 当结果中 **只** 出现 job_stage_full 时,任务处于全量阶段 60 | - 当结果出现 job_stage_incr 时,任务处于增量阶段 61 | 62 | ### 停止(删除)job 63 | ``` 64 | curl -XDELETE 127.0.0.1:4646/v1/job/my-job 65 | # DELETE后job信息仍会在nomad上保留一段时间供查询,直到nomad自动回收(gc) 66 | # 指定purge可立刻删除 67 | curl -XDELETE 127.0.0.1:4646/v1/job/my-job?purge=true 68 | ``` 69 | 70 | dtle 3.x 移除了暂停/恢复job的功能. 可使用删除/添加job来达成相同的效果。 71 | - 注意保留添加job时使用的job配置文件 72 | - job删除后进度(Gtid)仍然保存在consul kv中 73 | - 位置:`dtle//Gtid` 74 | - 再次添加job时,consul中保存的Gtid优先于job配置中的项目 75 | 76 | 如果要后续添加同名job,并且不想从consul保存的位置继续(而是从job配置中指定的位置开始),则 77 | 需要删除consul重的数据。见 [consul 上的 job 数据管理](../3/3.10_consul.md)。 78 | 79 | ## 节点管理 80 | 81 | 更多可参考 82 | - https://www.nomadproject.io/api-docs/nodes 83 | - https://www.nomadproject.io/api-docs/status 84 | 85 | 列出所有节点: 86 | 87 | ``` 88 | curl -XGET "127.0.0.1:4646/v1/nodes" | jq 89 | [{ 90 | "Address": "127.0.0.1", 91 | "Datacenter": "dc1", 92 | "Drivers": { 93 | "dtle": { 94 | "Attributes": { 95 | "driver.dtle": "true", 96 | "driver.dtle.version": "9.9.9.9", 97 | "driver.dtle.full_version": "9.9.9.9-master-eeb399e9" 98 | }, 99 | "Detected": true, 100 | "Healthy": true, 101 | } 102 | }, 103 | "ID": "0e70636d-b274-c139-185e-e37dcf7a4bca", 104 | "Name": "nomad0", 105 | "Status": "ready", 106 | "Version": "0.11.2" 107 | }] 108 | ``` 109 | 110 | 可以查看节点名、节点ID和dtle插件信息(部分项省略)。 111 | -------------------------------------------------------------------------------- /4/4.5_mysql_user_privileges.md: -------------------------------------------------------------------------------- 1 | # MySQL 用户权限说明 2 | 3 | dtle配置的MySQL用户, 在使用不同功能时, 需具有以下权限 4 | 5 | ## 源端用户 6 | 7 | | 权限 | 功能说明 | 8 | | ------------- | ------------- | 9 | | select | 全量复制时, 对目标表需要`select`权限 | 10 | | replication client | 全量/增量复制时, 需执行`show master status` 获取binlog信息 | 11 | | replication slave | 增量复制时, 需要模拟 MySQL 复制 | 12 | 13 | ## 目标端用户 14 | 15 | | 权限 | 功能说明 | 16 | | ------------- | ------------- | 17 | | alter | 复制时处理DDL语句 | 18 | | create | 复制时处理DDL语句; 自动创建表结构功能; 自动创建目标端的GTID元数据表 | 19 | | drop | 复制时处理DDL语句 | 20 | | index | 复制时处理DDL语句 | 21 | | references | 复制时处理DDL语句 | 22 | | insert | 复制时处理DML语句; 修改目标端的GTID元数据表 | 23 | | delete | 复制时处理DML语句; 修改目标端的GTID元数据表 | 24 | | update | 复制时处理DML语句 | 25 | | select | 查询目标端的GTID元数据表 | 26 | | trigger | 进行目标端触发器检查 | 27 | 28 | 如果job中设置`SetGtidNext=true`, 则需要 replication_applier (MySQL 8.0) 或 super 权限。 29 | -------------------------------------------------------------------------------- /4/4.6_dtle_2to3.md: -------------------------------------------------------------------------------- 1 | # 从dtle 2.x升级到dtle 3.x 2 | 3 | dtle 2.x 和 3.x 的数据文件不兼容,直接升级无法保留进行中的job。(若无需保留的job,则可直接升级。) 4 | 5 | ## 升级步骤 6 | 7 | 8 | 1. 确保dtle 2.x运行中。 9 | 2. 使用导出脚本( [点此下载](https://raw.githubusercontent.com/actiontech/dtle/master/scripts/dtle-job-2to3.py) )将现有job导出 10 | - 如 `./dtle-job-2to3.py 127.0.0.1:8190` 11 | - 将在当前目录得到一系列job json文件。**需手动填写文件里的密码**。 12 | - 导出脚本主要意义在于保存复制进度。 13 | 3. 卸载dtle 2.x并删除数据目录。 14 | 4. 安装dtle 3.x。配置/etc/dtle/nomad.hcl, 开启兼容层(设定`api_addr`、`nomad_addr`) 15 | 5. 运行 dtle 3.x。将导出的job配置提交到 dtle 兼容层端口 16 | - 如 `curl -XPOST -d @job1.json 127.0.0.1:8190` 17 | - 不要提交到nomad原生端口 18 | 19 | ## dtle 3.x 和 2.x的显著差异 20 | 21 | - 作为nomad插件运行 22 | - 需要另外启动consul 23 | - job.json格式差异 24 | - 默认端口不同 25 | - 可使用hcl格式job配置文件 26 | - 查询任务进度 27 | - "暂停/恢复job"被"删除/添加job"代替 28 | - 恢复需要根据之前的job.json(或hcl)添加job 29 | - 会自动从consul中储存的Gtid继续复制 30 | - 如果要重建同名job(并放弃进度),除了在nomad上删除,还需要在consul上删除 31 | 32 | ### `allocation//stats` 接口变更 33 | 34 | 由于nomad没有提供合适的API [#5863](https://github.com/hashicorp/nomad/issues/5863) ,我们暂且借用nomad alloc signal接口返回的错误信息来传递stats。 35 | 36 | ``` 37 | $ nomad alloc signal -s stats b0a227c1 # 或使用curl访问HTTP API 38 | $ curl -XPOST -d '{"Signal": "stats" }' 127.0.0.1:4646/v1/client/allocation/b0a227c1-b910-0eb1-2bb9-b8bfe7607adc/signal 39 | 40 | Error signalling allocation: Unexpected response code: 500 (1 error occurred: 41 | * Failed to signal task: Dest, err: rpc error: code = Unknown desc = { 42 | "CurrentCoordinates":{"File":"bin.000075","Position":18716, 43 | "GtidSet":"acd7d195-06cd-11e9-928f-02000aba3e28:1-143962", 44 | "RelayMasterLogFile":"","ReadMasterLogPos":0,"RetrievedGtidSet":""}, 45 | "TableStats":null,"DelayCount":null,"ProgressPct":"0.0","ExecMasterRowCount":0, 46 | "ExecMasterTxCount":0,"ReadMasterRowCount":0,"ReadMasterTxCount":0,"ETA":"N/A", 47 | "Backlog":"","ThroughputStat":null,"MsgStat":{"InMsgs":2,"OutMsgs":2,"InBytes":299, 48 | "OutBytes":0,"Reconnects":0},"BufferStat":{"ExtractorTxQueueSize":0, 49 | "ApplierTxQueueSize":0,"ApplierGroupTxQueueSize":0,"SendByTimeout":0, 50 | "SendBySizeFull":0},"Stage":"Waiting for slave workers to process their queues", 51 | "Timestamp":1599130915717858000} 52 | ``` 53 | -------------------------------------------------------------------------------- /4/4.7_diagnosing.md: -------------------------------------------------------------------------------- 1 | # 问题诊断 FAQ 2 | 3 | ## 通用问题 4 | 5 | 1. dtle.gtid_executed 表中是乱码 6 | 7 | 该表用uuid以binary储存以提升性能。注意查询方式[gtid_executed表](../3/3.3_impact_on_dest.md) 8 | 9 | ## 协助诊断 10 | 11 | 遇到问题,首先确认使用了最新稳定版dtle。 12 | 13 | 将以下内容提供给爱可生工程师,我们将帮助您诊断故障。 14 | 15 | ### 通用 16 | - job配置 17 | - 复制阶段(全量/增量) 18 | - 日志(请用gzip压缩) 19 | - 堆栈/内存/运行状态/pprof信息:执行`kill -TTIN {dtle_pid}`,dtle会自动生成信息文件,存放在`/tmp/dtle_dump_[date-time]`目录下 20 | 21 | ### 服务无法启动,无日志输出,使用如下命令查看std日志 22 | - `journalctl _SYSTEMD_UNIT=dtle-consul.service` 23 | - `journalctl _SYSTEMD_UNIT=dtle-nomad.service` 24 | 25 | ### 复制停顿、不开始 26 | - 任务有无报错 27 | - 修改日志级别为Debug 28 | 29 | ### 性能低、延迟大 30 | - 确认日志级别为Info。Debug日志会大幅降低性能。 31 | - 网络(带宽/延迟) 32 | - 监控项: 队列 33 | - 数据产生量 34 | - 部署结构(节点、dtle/mysql所在) 35 | 36 | ### 数据不一致 37 | - 不一致的具体表现、特征 38 | - consul中保存的dtle进度(gtid) 39 | - 目标端 dtle.gtid_executed 表的内容 [方法参考](../3/3.3_impact_on_dest.md) 40 | - 源端 show master status 结果 41 | - 表结构、是否有无PK表 42 | - 复制过程中是否有DDL 43 | - 解析源端binlog, 查找不一致数据出现的位置 44 | - 如为双向复制,需确保[业务上无数据冲突](../2/2.3_dc_to_dc_bidirectional.md#数据冲突) 45 | 46 | ### binlog purged 47 | 48 | 即类似如下报错 49 | 50 | > ERROR 1236 (HY000): The slave is connecting using CHANGE MASTER TO MASTER_AUTO_POSITION = 1, but the master has purged binary logs containing GTIDs that the slave requires. 51 | 52 | - 目标端 dtle.gtid_executed 表的内容 [方法参考](../3/3.3_impact_on_dest.md) 53 | - consul中储存的job gtid 54 | - MySQL `show master status;`、`show binary logs;`和`select @@gtid_purged;` 的结果 55 | -------------------------------------------------------------------------------- /4/images/4.4.1_execute.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/4/images/4.4.1_execute.png -------------------------------------------------------------------------------- /4/images/4.4.1_login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/4/images/4.4.1_login.png -------------------------------------------------------------------------------- /4/images/4.4.1_response.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/4/images/4.4.1_response.png -------------------------------------------------------------------------------- /4/images/4.4.1_swagger_ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/4/images/4.4.1_swagger_ui.png -------------------------------------------------------------------------------- /4/images/4.4.1_try_it_out.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/4/images/4.4.1_try_it_out.png -------------------------------------------------------------------------------- /5/5.1_resource_estimation.md: -------------------------------------------------------------------------------- 1 | # 时间/资源估算 2 | 3 | ## ETA (预计完成时间) 估算 4 | 5 | ### 源端 6 | 7 | - 全量过程, 公式为: 8 | ``` 9 | 总时间 = 已用时间 / 发送到目标端的行数 * 总行数 10 | 其中,总行数 = (select count(*) ...) 11 | 预计完成时间 = 总时间 - 已用时间 12 | 即:预计完成时间 = 剩余行数 / 当前发送速率 13 | ``` 14 | - 增量过程, ETA 一直为`0s` 15 | 16 | ### 目标端 17 | 18 | - 全量过程. 公式为: 19 | ``` 20 | 总时间 = 已用时间 / 已写入目标端的行数 * 总行数 21 | 预计完成时间 = 总时间 - 已用时间 22 | 即:预计完成时间 = 剩余行数 / 当前写入速率 23 | ``` 24 | - 增量过程, ETA 一直为`0s` 25 | 26 | ## 内存占用估算 27 | 28 | ``` 29 | 内存占用估算 = RowSize * ChunkSize * QueueSize * 内存占用系数 30 | ``` 31 | 32 | 其中: 33 | - RowSize为数据行的平均大小 (字节) 34 | - ChunkSize为[配置项](../4/4.3_job_configuration.md) 35 | - QueueSize为传输队列长度, 硬编码为24 36 | - 内存占用系数 测量约为 常量3.2 37 | 38 | ## 关于大事务 39 | 40 | 大事务指传输、处理数据量较大的事务,一般由DML组成。DDL事务(QueryEvent)不会太大,尽管某些DDL需要较长的执行时间。 41 | 42 | 对于一个多行的大事务,dtle会按行分开处理、传输并执行(但在目标端仍作为一个事务提交)。 43 | 44 | 当一个job处理大事务时,需要等待该段数据在目标端执行完毕才会获取下一批数据。 45 | 46 | 当同时处理大事务的job数量达到 `big_tx_max_jobs`时,所有job都会进入等待模式。 47 | 48 | 为了避免MySQL源端超时,等待时间的上限是 `@@net_write_timeout / 2` 49 | -------------------------------------------------------------------------------- /5/5.2_architecture.md: -------------------------------------------------------------------------------- 1 | # dtle 架构 2 | 3 | nomad角色分为 server、client. 4 | - manager数量应为1、3或5个 5 | - agent数量不限 6 | - 至少需要1个manager和1个agent 7 | - 一个nomad进程可同时扮演 server 和 client 8 | 9 | 任务分为源端任务和目标端任务, 各由agent执行. 通过网络压缩传输数据. 10 | 11 | ![](dtle-architecture.png) 12 | -------------------------------------------------------------------------------- /5/5.3_kafka_message_format.md: -------------------------------------------------------------------------------- 1 | # Kafka 消息格式 2 | 3 | dtle Kafka 输出, 消息格式兼容 [Debezium](https://debezium.io) 4 | 5 | 其消息格式具体可参考 https://debezium.io/documentation/reference/1.8/tutorial.html 6 | 7 | 此处概要说明 8 | - 每行数据变更会有一个消息 9 | - 每个消息分为key和value 10 | - key是该次变更的主键 11 | - value是该次变更的整行数据 12 | - key和value各自又有schema和payload 13 | - payload是具体的数据 14 | - schema指明了数据的格式, 即payload的解读方式, 可以理解为“类定义” 15 | - 注意和SQL schema含义不同 16 | - 表结构会包含在 Kafka Connect schema 中 17 | 18 | ## DML 19 | 20 | ### Key 21 | 22 | 以下是一个消息的key. 只是简单的包含了主键. 23 | 24 | ```json 25 | { 26 | "schema": { 27 | "type": "struct", 28 | "name": "dbserver1.inventory.customers.Key" 29 | "optional": false, 30 | "fields": [ 31 | { 32 | "field": "id", 33 | "type": "int32", 34 | "optional": false 35 | } 36 | ] 37 | }, 38 | "payload": { 39 | "id": 1004 40 | } 41 | } 42 | ``` 43 | 44 | ### Value 45 | 46 | 以下是一个消息的value, 其类型为 `topic.schema.table.Envelope`, 拥有5个字段 47 | 48 | - `before`, 复杂类型 `topic.schema.table.Value`, 为该表的表结构. 49 | - `after`, 复杂类型, 同上 50 | - `source`, 复杂类型, 为该次变更的元数据 51 | - `op`: `string`. 用"c", "d", "u" 分别表达操作类型: 增、删、改 52 | - `ts_ms`: `int64`. dtle 处理该行变更的时间. 53 | 54 | ```json 55 | { 56 | "schema": { 57 | "type": "struct", 58 | "fields": [ 59 | { 60 | "type": "struct", 61 | "fields": [ 62 | { 63 | "type": "int32", 64 | "optional": false, 65 | "field": "id" 66 | }, 67 | { 68 | "type": "string", 69 | "optional": false, 70 | "field": "first_name" 71 | }, 72 | { 73 | "type": "string", 74 | "optional": false, 75 | "field": "last_name" 76 | }, 77 | { 78 | "type": "string", 79 | "optional": false, 80 | "field": "email" 81 | } 82 | ], 83 | "optional": true, 84 | "name": "dbserver1.inventory.customers.Value", 85 | "field": "before" 86 | }, 87 | { 88 | "type": "struct", 89 | "fields": [ 90 | { 91 | "type": "int32", 92 | "optional": false, 93 | "field": "id" 94 | }, 95 | { 96 | "type": "string", 97 | "optional": false, 98 | "field": "first_name" 99 | }, 100 | { 101 | "type": "string", 102 | "optional": false, 103 | "field": "last_name" 104 | }, 105 | { 106 | "type": "string", 107 | "optional": false, 108 | "field": "email" 109 | } 110 | ], 111 | "optional": true, 112 | "name": "dbserver1.inventory.customers.Value", 113 | "field": "after" 114 | }, 115 | { 116 | "type": "struct", 117 | "fields": [ 118 | { 119 | "type": "string", 120 | "optional": true, 121 | "field": "version" 122 | }, 123 | { 124 | "type": "string", 125 | "optional": false, 126 | "field": "name" 127 | }, 128 | { 129 | "type": "int64", 130 | "optional": false, 131 | "field": "server_id" 132 | }, 133 | { 134 | "type": "int64", 135 | "optional": false, 136 | "field": "ts_sec" 137 | }, 138 | { 139 | "type": "string", 140 | "optional": true, 141 | "field": "gtid" 142 | }, 143 | { 144 | "type": "string", 145 | "optional": false, 146 | "field": "file" 147 | }, 148 | { 149 | "type": "int64", 150 | "optional": false, 151 | "field": "pos" 152 | }, 153 | { 154 | "type": "int32", 155 | "optional": false, 156 | "field": "row" 157 | }, 158 | { 159 | "type": "boolean", 160 | "optional": true, 161 | "field": "snapshot" 162 | }, 163 | { 164 | "type": "int64", 165 | "optional": true, 166 | "field": "thread" 167 | }, 168 | { 169 | "type": "string", 170 | "optional": true, 171 | "field": "db" 172 | }, 173 | { 174 | "type": "string", 175 | "optional": true, 176 | "field": "table" 177 | } 178 | ], 179 | "optional": false, 180 | "name": "io.debezium.connector.mysql.Source", 181 | "field": "source" 182 | }, 183 | { 184 | "type": "string", 185 | "optional": false, 186 | "field": "op" 187 | }, 188 | { 189 | "type": "int64", 190 | "optional": true, 191 | "field": "ts_ms" 192 | } 193 | ], 194 | "optional": false, 195 | "name": "dbserver1.inventory.customers.Envelope", 196 | "version": 1 197 | }, 198 | "payload": { 199 | "before": null, 200 | "after": { 201 | "id": 1004, 202 | "first_name": "Anne", 203 | "last_name": "Kretchmar", 204 | "email": "annek@noanswer.org" 205 | }, 206 | "source": { 207 | "version": "0.8.3.Final", 208 | "name": "dbserver1", 209 | "server_id": 0, 210 | "ts_sec": 0, 211 | "gtid": null, 212 | "file": "mysql-bin.000003", 213 | "pos": 154, 214 | "row": 0, 215 | "snapshot": true, 216 | "thread": null, 217 | "db": "inventory", 218 | "table": "customers" 219 | }, 220 | "op": "c", 221 | "ts_ms": 1486500577691 222 | } 223 | } 224 | ``` 225 | 226 | ## DDL (SchemaChangeTopic) 227 | 228 | dtle会将DDL写入SchemaChangeTopic。该topic值可配置. 229 | 230 | Schema change消息中,key永远为`null`, 仅 value部分有值: 231 | 232 | ```json 233 | { 234 | "source" : { 235 | "server" : "mysql2" 236 | }, 237 | "position" : { 238 | "ts_sec" : 1641807976, 239 | "file" : "bin.000022", 240 | "pos" : 439, 241 | "gtids" : "acd7d195-06cd-11e9-928f-02000aba3e28:1-175", 242 | "snapshot" : true 243 | }, 244 | "databaseName" : "a", 245 | "ddl" : "CREATE TABLE `a` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1", 246 | "tableChanges" : [ { 247 | "type" : "CREATE", 248 | "id" : "\"a\".\"a\"", 249 | "table" : { 250 | "defaultCharsetName" : "latin1", 251 | "primaryKeyColumnNames" : [ "id" ], 252 | "columns" : [ { 253 | "name" : "id", 254 | "jdbcType" : 4, 255 | "typeName" : "INT", 256 | "typeExpression" : "INT", 257 | "charsetName" : null, 258 | "length" : 11, 259 | "position" : 1, 260 | "optional" : false, 261 | "autoIncremented" : true, 262 | "generated" : true 263 | } ] 264 | } 265 | } ] 266 | } 267 | ``` 268 | 269 | 其中: 270 | - `position.snapshot==true`表明这是全量初始化时的表结构(通过`show create table`等生成)。 271 | - `position.snapshot==false`则表明:这是增量过程中执行的DDL。 272 | 273 | 注:`tableChanges`结构在dtle中尚未实现。 274 | 275 | ## MySQL数据类型到 “Kafka Connect schema types”的转换 276 | 见 https://debezium.io/docs/connectors/mysql/#data-types 277 | 278 | -------------------------------------------------------------------------------- /5/5.4_columns_mapping.md: -------------------------------------------------------------------------------- 1 | # Oracle MySQL 字段映射 2 | 3 | ## 字段类型 4 | 5 | | Oracle | MySQL | 全量支持| 增量支持| 限制 | 后期是否考虑优化/支持| 6 | | --- | --- | --- | --- | --- | --- | 7 | | BINARY_FLOAT| float | 否 |否(insert 支持) | mysql 不支持Inf/-Inf/Nan数据,MySQL float类型无法精确匹配) | 是 | 8 | | BINARY_DOUBLE | float |是| 是|mysql 不支持Inf/-Inf/Nan数据 | | 9 | | CHAR(n), CHARACTER(n) |CHAR(n), CHARACTER(n) |是| 是| || 10 | | DATE | datetime | 是 | 是|MySQL 最大长度限制为6,Oracle为9| | 11 | | DECIMAL(p,s), DEC(p,s) | DECIMAL(p,s), DEC(p,s) | 是| 是 | | | 12 | | DOUBLE PRECISION | DOUBLE PRECISION | 否 |是| || 13 | | FLOAT(p) | DOUBLE | 是 |是| || 14 | | INTEGER, INT | INT | 是 |是| [极值问题](https://github.com/actiontech/dtle/issues/825)|| 15 | | INTERVAL YEAR(p) TO MONTH | VARCHAR(30) | 是 |是| 部分结果异常|是| 16 | | INTERVAL DAY(p) TO SECOND(s) | VARCHAR(30) | 是 |是|同步结果以纳秒保存 |是| 17 | | NCHAR(n) | NCHAR(n)/NVARCHAR(n) | 是 |是| | | 18 | | NCHAR VARYING(n) | NCHAR VARYING(n) | 是 |是| | | 19 | | NUMBER(p,0), NUMBER(p) | TINYINT/SMALLINT/INT/BIGINT/DECIMAL(p) | 是 |是| | | 20 | | NUMBER(p,s) | DECIMAL(p,s) | 是| 是| | | 21 | | NUMBER, NUMBER(\*) | DOUBLE | 是| 是| | | 22 | | NUMERIC(p,s) | NUMERIC(p,s) | 是| 是| | | 23 | | NVARCHAR2(n) | NVARCHAR(n) | 是| 是| | | 24 | | RAW(n) | VARBINARY(n) | 是 |是| | | 25 | | REAL | DOUBLE | 是 |是| | | 26 | | ROWID | CHAR(100) | 是 |是| | | 27 | | SMALLINT | DECIMAL(38) | 是 |是| | | 28 | | TIMESTAMP(p) | datetime | 是 |是| | | 29 | | VARCHAR(n) | VARCHAR(n) | 是 |是| || 30 | | VARCHAR2(n) | VARCHAR(n) | 是 |是| || 31 | | BLOB | BLOB | 否 |否| 当前解析逻辑无法获取完整数据|| 32 | | CLOB | CLOB | 否|否 |当前解析逻辑无法获取完整数据| | 33 | | LONG | LONGTEXT | 否 | 否(insert 支持) | 不支持minus查询 | 是| 34 | | LONG RAW | LONGBLOB | 否 |否(insert 支持) | | 是| 35 | | NCLOB | NCLOB | 否 |否| 当前解析逻辑无法获取完整数据|是| 36 | | TIMESTAMP(p) WITH TIME ZONE | datetime | 否 | 否 | 时区未解析 | 是 | 37 | | BFILE | | 否 | 否 | [logminer不支持](https://docs.oracle.com/cd/B19306_01/server.102/b14215/logminer.htm#:~:text=set%20to%2010.2.0.0.-,Unsupported%20Datatypes%20and%20Table%20Storage%20Attributes,-LogMiner%20does%20not) | 38 | |UROWID(n) | | 否 | 否 | logminer解析异常| 否 | 39 | |XMLTYPE | | 否 | 否| [logminer不支持](https://docs.oracle.com/cd/B19306_01/server.102/b14215/logminer.htm#:~:text=set%20to%2010.2.0.0.-,Unsupported%20Datatypes%20and%20Table%20Storage%20Attributes,-LogMiner%20does%20not)| 否| 40 | 41 | 42 | 43 | 44 | 45 | ### 待支持 46 | 47 | | Oracle | MySQL | 是否支持|不支持原因 | 后期是否考虑支持 | 48 | | --- | --- | --- | --- | --- | 49 | 50 | 51 | ### 不支持 52 | 53 | | Oracle | 是否支持|不支持原因 | 54 | | --- | --- | --- | 55 | | BFILE | 否| [logminer不支持](https://docs.oracle.com/cd/B19306_01/server.102/b14215/logminer.htm#:~:text=set%20to%2010.2.0.0.-,Unsupported%20Datatypes%20and%20Table%20Storage%20Attributes,-LogMiner%20does%20not) | 56 | |UROWID(n) | 否 | logminer解析异常| 57 | |XMLTYPE | 否 | [logminer不支持](https://docs.oracle.com/cd/B19306_01/server.102/b14215/logminer.htm#:~:text=set%20to%2010.2.0.0.-,Unsupported%20Datatypes%20and%20Table%20Storage%20Attributes,-LogMiner%20does%20not)| 58 | -------------------------------------------------------------------------------- /5/dtle-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/5/dtle-architecture.png -------------------------------------------------------------------------------- /6/howto_contribute.md: -------------------------------------------------------------------------------- 1 | # 如何参与 2 | 3 | ## 提交缺陷 4 | 5 | 可直接在[github issues页面](https://github.com/actiontech/dtle/issues/) 新建 issue, 选择 `Bug Report` 模板, 按格式填写完成后提交即可 6 | 7 | ## 提交功能 8 | 9 | 可直接在[github issues页面](https://github.com/actiontech/dtle/issues/) 新建 issue, 选择 `Feature request` 模板, 按格式填写完成后提交即可 10 | 11 | ## 提交代码 12 | 13 | 按照github的[pull request流程](https://help.github.com/articles/creating-a-pull-request/)即可 14 | 15 | # 如何全职参与 16 | 17 | 本项目的维护方([上海爱可生信息技术股份有限公司](www.actionsky.com))一直在招聘 靠谱的研发工程师/靠谱的测试工程师. 如果通过dtle, 您对全职参与类似的项目有兴趣, 请联系[我们的研发团队](mailto:huangyan@actionsky.com). -------------------------------------------------------------------------------- /7/7_roadmap.md: -------------------------------------------------------------------------------- 1 | # 路线图 2 | - 生态 3 | - [ ] 支持MGR Primary切换 4 | - [ ] 对于 MySQL分布式中间件 (如dble) 提供数据扩容方案 5 | - [ ] 复制到Kafka的数据格式支持Avro 6 | - [ ] 支持更多种类的公有云间的数据迁移 7 | 8 | - ETL-E 9 | - [ ] WHERE 过滤条件 支持更丰富的函数 (目前仅支持关系符和简单函数) 10 | - [ ] *oracle* 11 | - [ ] 动态增减同步对象 12 | 13 | - ETL-T 14 | - [ ] 列名变换 15 | - [ ] WHERE 过滤条件 支持更丰富的函数 (目前仅支持关系符和简单函数) 16 | - [ ] 数据变换 17 | - [x] 库.表名变换 (2.19.11.0+) 18 | - [x] 列选择、列顺序变换 (3.20.08.0+) 19 | 20 | - ETL-L 21 | - [ ] 支持MGR Primary切换 actiontech/dtle#541 22 | - [x] MTS (actiontech/dtle#688) 23 | 24 | - 链路管理 25 | - [ ] 对链路提供限流参 26 | - [ ] 支持2G级别的大事务 27 | - [ ] 限流 28 | - [ ] 加密 29 | - [ ] IPv6 actiontech/dtle#600 30 | 31 | - 运维 32 | - [ ] 提供告警功能 33 | - [ ] DTLE 容灾 34 | 35 | - 非技术指标 36 | - [ ] 支持2G级别的大事务 37 | - [x] 一致性DDL元数据 (2.19.03.0+ actiontech/dtle#321) 38 | - [ ] 免一致性快照事务的全量复制 39 | - [ ] 全量复制也可断点续传 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Mozilla Public License Version 2.0 2 | ================================== 3 | 4 | 1. Definitions 5 | -------------- 6 | 7 | 1.1. "Contributor" 8 | means each individual or legal entity that creates, contributes to 9 | the creation of, or owns Covered Software. 10 | 11 | 1.2. "Contributor Version" 12 | means the combination of the Contributions of others (if any) used 13 | by a Contributor and that particular Contributor's Contribution. 14 | 15 | 1.3. "Contribution" 16 | means Covered Software of a particular Contributor. 17 | 18 | 1.4. "Covered Software" 19 | means Source Code Form to which the initial Contributor has attached 20 | the notice in Exhibit A, the Executable Form of such Source Code 21 | Form, and Modifications of such Source Code Form, in each case 22 | including portions thereof. 23 | 24 | 1.5. "Incompatible With Secondary Licenses" 25 | means 26 | 27 | (a) that the initial Contributor has attached the notice described 28 | in Exhibit B to the Covered Software; or 29 | 30 | (b) that the Covered Software was made available under the terms of 31 | version 1.1 or earlier of the License, but not also under the 32 | terms of a Secondary License. 33 | 34 | 1.6. "Executable Form" 35 | means any form of the work other than Source Code Form. 36 | 37 | 1.7. "Larger Work" 38 | means a work that combines Covered Software with other material, in 39 | a separate file or files, that is not Covered Software. 40 | 41 | 1.8. "License" 42 | means this document. 43 | 44 | 1.9. "Licensable" 45 | means having the right to grant, to the maximum extent possible, 46 | whether at the time of the initial grant or subsequently, any and 47 | all of the rights conveyed by this License. 48 | 49 | 1.10. "Modifications" 50 | means any of the following: 51 | 52 | (a) any file in Source Code Form that results from an addition to, 53 | deletion from, or modification of the contents of Covered 54 | Software; or 55 | 56 | (b) any new file in Source Code Form that contains any Covered 57 | Software. 58 | 59 | 1.11. "Patent Claims" of a Contributor 60 | means any patent claim(s), including without limitation, method, 61 | process, and apparatus claims, in any patent Licensable by such 62 | Contributor that would be infringed, but for the grant of the 63 | License, by the making, using, selling, offering for sale, having 64 | made, import, or transfer of either its Contributions or its 65 | Contributor Version. 66 | 67 | 1.12. "Secondary License" 68 | means either the GNU General Public License, Version 2.0, the GNU 69 | Lesser General Public License, Version 2.1, the GNU Affero General 70 | Public License, Version 3.0, or any later versions of those 71 | licenses. 72 | 73 | 1.13. "Source Code Form" 74 | means the form of the work preferred for making modifications. 75 | 76 | 1.14. "You" (or "Your") 77 | means an individual or a legal entity exercising rights under this 78 | License. For legal entities, "You" includes any entity that 79 | controls, is controlled by, or is under common control with You. For 80 | purposes of this definition, "control" means (a) the power, direct 81 | or indirect, to cause the direction or management of such entity, 82 | whether by contract or otherwise, or (b) ownership of more than 83 | fifty percent (50%) of the outstanding shares or beneficial 84 | ownership of such entity. 85 | 86 | 2. License Grants and Conditions 87 | -------------------------------- 88 | 89 | 2.1. Grants 90 | 91 | Each Contributor hereby grants You a world-wide, royalty-free, 92 | non-exclusive license: 93 | 94 | (a) under intellectual property rights (other than patent or trademark) 95 | Licensable by such Contributor to use, reproduce, make available, 96 | modify, display, perform, distribute, and otherwise exploit its 97 | Contributions, either on an unmodified basis, with Modifications, or 98 | as part of a Larger Work; and 99 | 100 | (b) under Patent Claims of such Contributor to make, use, sell, offer 101 | for sale, have made, import, and otherwise transfer either its 102 | Contributions or its Contributor Version. 103 | 104 | 2.2. Effective Date 105 | 106 | The licenses granted in Section 2.1 with respect to any Contribution 107 | become effective for each Contribution on the date the Contributor first 108 | distributes such Contribution. 109 | 110 | 2.3. Limitations on Grant Scope 111 | 112 | The licenses granted in this Section 2 are the only rights granted under 113 | this License. No additional rights or licenses will be implied from the 114 | distribution or licensing of Covered Software under this License. 115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 116 | Contributor: 117 | 118 | (a) for any code that a Contributor has removed from Covered Software; 119 | or 120 | 121 | (b) for infringements caused by: (i) Your and any other third party's 122 | modifications of Covered Software, or (ii) the combination of its 123 | Contributions with other software (except as part of its Contributor 124 | Version); or 125 | 126 | (c) under Patent Claims infringed by Covered Software in the absence of 127 | its Contributions. 128 | 129 | This License does not grant any rights in the trademarks, service marks, 130 | or logos of any Contributor (except as may be necessary to comply with 131 | the notice requirements in Section 3.4). 132 | 133 | 2.4. Subsequent Licenses 134 | 135 | No Contributor makes additional grants as a result of Your choice to 136 | distribute the Covered Software under a subsequent version of this 137 | License (see Section 10.2) or under the terms of a Secondary License (if 138 | permitted under the terms of Section 3.3). 139 | 140 | 2.5. Representation 141 | 142 | Each Contributor represents that the Contributor believes its 143 | Contributions are its original creation(s) or it has sufficient rights 144 | to grant the rights to its Contributions conveyed by this License. 145 | 146 | 2.6. Fair Use 147 | 148 | This License is not intended to limit any rights You have under 149 | applicable copyright doctrines of fair use, fair dealing, or other 150 | equivalents. 151 | 152 | 2.7. Conditions 153 | 154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 155 | in Section 2.1. 156 | 157 | 3. Responsibilities 158 | ------------------- 159 | 160 | 3.1. Distribution of Source Form 161 | 162 | All distribution of Covered Software in Source Code Form, including any 163 | Modifications that You create or to which You contribute, must be under 164 | the terms of this License. You must inform recipients that the Source 165 | Code Form of the Covered Software is governed by the terms of this 166 | License, and how they can obtain a copy of this License. You may not 167 | attempt to alter or restrict the recipients' rights in the Source Code 168 | Form. 169 | 170 | 3.2. Distribution of Executable Form 171 | 172 | If You distribute Covered Software in Executable Form then: 173 | 174 | (a) such Covered Software must also be made available in Source Code 175 | Form, as described in Section 3.1, and You must inform recipients of 176 | the Executable Form how they can obtain a copy of such Source Code 177 | Form by reasonable means in a timely manner, at a charge no more 178 | than the cost of distribution to the recipient; and 179 | 180 | (b) You may distribute such Executable Form under the terms of this 181 | License, or sublicense it under different terms, provided that the 182 | license for the Executable Form does not attempt to limit or alter 183 | the recipients' rights in the Source Code Form under this License. 184 | 185 | 3.3. Distribution of a Larger Work 186 | 187 | You may create and distribute a Larger Work under terms of Your choice, 188 | provided that You also comply with the requirements of this License for 189 | the Covered Software. If the Larger Work is a combination of Covered 190 | Software with a work governed by one or more Secondary Licenses, and the 191 | Covered Software is not Incompatible With Secondary Licenses, this 192 | License permits You to additionally distribute such Covered Software 193 | under the terms of such Secondary License(s), so that the recipient of 194 | the Larger Work may, at their option, further distribute the Covered 195 | Software under the terms of either this License or such Secondary 196 | License(s). 197 | 198 | 3.4. Notices 199 | 200 | You may not remove or alter the substance of any license notices 201 | (including copyright notices, patent notices, disclaimers of warranty, 202 | or limitations of liability) contained within the Source Code Form of 203 | the Covered Software, except that You may alter any license notices to 204 | the extent required to remedy known factual inaccuracies. 205 | 206 | 3.5. Application of Additional Terms 207 | 208 | You may choose to offer, and to charge a fee for, warranty, support, 209 | indemnity or liability obligations to one or more recipients of Covered 210 | Software. However, You may do so only on Your own behalf, and not on 211 | behalf of any Contributor. You must make it absolutely clear that any 212 | such warranty, support, indemnity, or liability obligation is offered by 213 | You alone, and You hereby agree to indemnify every Contributor for any 214 | liability incurred by such Contributor as a result of warranty, support, 215 | indemnity or liability terms You offer. You may include additional 216 | disclaimers of warranty and limitations of liability specific to any 217 | jurisdiction. 218 | 219 | 4. Inability to Comply Due to Statute or Regulation 220 | --------------------------------------------------- 221 | 222 | If it is impossible for You to comply with any of the terms of this 223 | License with respect to some or all of the Covered Software due to 224 | statute, judicial order, or regulation then You must: (a) comply with 225 | the terms of this License to the maximum extent possible; and (b) 226 | describe the limitations and the code they affect. Such description must 227 | be placed in a text file included with all distributions of the Covered 228 | Software under this License. Except to the extent prohibited by statute 229 | or regulation, such description must be sufficiently detailed for a 230 | recipient of ordinary skill to be able to understand it. 231 | 232 | 5. Termination 233 | -------------- 234 | 235 | 5.1. The rights granted under this License will terminate automatically 236 | if You fail to comply with any of its terms. However, if You become 237 | compliant, then the rights granted under this License from a particular 238 | Contributor are reinstated (a) provisionally, unless and until such 239 | Contributor explicitly and finally terminates Your grants, and (b) on an 240 | ongoing basis, if such Contributor fails to notify You of the 241 | non-compliance by some reasonable means prior to 60 days after You have 242 | come back into compliance. Moreover, Your grants from a particular 243 | Contributor are reinstated on an ongoing basis if such Contributor 244 | notifies You of the non-compliance by some reasonable means, this is the 245 | first time You have received notice of non-compliance with this License 246 | from such Contributor, and You become compliant prior to 30 days after 247 | Your receipt of the notice. 248 | 249 | 5.2. If You initiate litigation against any entity by asserting a patent 250 | infringement claim (excluding declaratory judgment actions, 251 | counter-claims, and cross-claims) alleging that a Contributor Version 252 | directly or indirectly infringes any patent, then the rights granted to 253 | You by any and all Contributors for the Covered Software under Section 254 | 2.1 of this License shall terminate. 255 | 256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all 257 | end user license agreements (excluding distributors and resellers) which 258 | have been validly granted by You or Your distributors under this License 259 | prior to termination shall survive termination. 260 | 261 | ************************************************************************ 262 | * * 263 | * 6. Disclaimer of Warranty * 264 | * ------------------------- * 265 | * * 266 | * Covered Software is provided under this License on an "as is" * 267 | * basis, without warranty of any kind, either expressed, implied, or * 268 | * statutory, including, without limitation, warranties that the * 269 | * Covered Software is free of defects, merchantable, fit for a * 270 | * particular purpose or non-infringing. The entire risk as to the * 271 | * quality and performance of the Covered Software is with You. * 272 | * Should any Covered Software prove defective in any respect, You * 273 | * (not any Contributor) assume the cost of any necessary servicing, * 274 | * repair, or correction. This disclaimer of warranty constitutes an * 275 | * essential part of this License. No use of any Covered Software is * 276 | * authorized under this License except under this disclaimer. * 277 | * * 278 | ************************************************************************ 279 | 280 | ************************************************************************ 281 | * * 282 | * 7. Limitation of Liability * 283 | * -------------------------- * 284 | * * 285 | * Under no circumstances and under no legal theory, whether tort * 286 | * (including negligence), contract, or otherwise, shall any * 287 | * Contributor, or anyone who distributes Covered Software as * 288 | * permitted above, be liable to You for any direct, indirect, * 289 | * special, incidental, or consequential damages of any character * 290 | * including, without limitation, damages for lost profits, loss of * 291 | * goodwill, work stoppage, computer failure or malfunction, or any * 292 | * and all other commercial damages or losses, even if such party * 293 | * shall have been informed of the possibility of such damages. This * 294 | * limitation of liability shall not apply to liability for death or * 295 | * personal injury resulting from such party's negligence to the * 296 | * extent applicable law prohibits such limitation. Some * 297 | * jurisdictions do not allow the exclusion or limitation of * 298 | * incidental or consequential damages, so this exclusion and * 299 | * limitation may not apply to You. * 300 | * * 301 | ************************************************************************ 302 | 303 | 8. Litigation 304 | ------------- 305 | 306 | Any litigation relating to this License may be brought only in the 307 | courts of a jurisdiction where the defendant maintains its principal 308 | place of business and such litigation shall be governed by laws of that 309 | jurisdiction, without reference to its conflict-of-law provisions. 310 | Nothing in this Section shall prevent a party's ability to bring 311 | cross-claims or counter-claims. 312 | 313 | 9. Miscellaneous 314 | ---------------- 315 | 316 | This License represents the complete agreement concerning the subject 317 | matter hereof. If any provision of this License is held to be 318 | unenforceable, such provision shall be reformed only to the extent 319 | necessary to make it enforceable. Any law or regulation which provides 320 | that the language of a contract shall be construed against the drafter 321 | shall not be used to construe this License against a Contributor. 322 | 323 | 10. Versions of the License 324 | --------------------------- 325 | 326 | 10.1. New Versions 327 | 328 | Mozilla Foundation is the license steward. Except as provided in Section 329 | 10.3, no one other than the license steward has the right to modify or 330 | publish new versions of this License. Each version will be given a 331 | distinguishing version number. 332 | 333 | 10.2. Effect of New Versions 334 | 335 | You may distribute the Covered Software under the terms of the version 336 | of the License under which You originally received the Covered Software, 337 | or under the terms of any subsequent version published by the license 338 | steward. 339 | 340 | 10.3. Modified Versions 341 | 342 | If you create software not governed by this License, and you want to 343 | create a new license for such software, you may create and use a 344 | modified version of this License if you rename the license and remove 345 | any references to the name of the license steward (except to note that 346 | such modified license differs from this License). 347 | 348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 349 | Licenses 350 | 351 | If You choose to distribute Source Code Form that is Incompatible With 352 | Secondary Licenses under the terms of this version of the License, the 353 | notice described in Exhibit B of this License must be attached. 354 | 355 | Exhibit A - Source Code Form License Notice 356 | ------------------------------------------- 357 | 358 | This Source Code Form is subject to the terms of the Mozilla Public 359 | License, v. 2.0. If a copy of the MPL was not distributed with this 360 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 361 | 362 | If it is not possible or desirable to put the notice in a particular 363 | file, then You may include the notice in a location (such as a LICENSE 364 | file in a relevant directory) where a recipient would be likely to look 365 | for such a notice. 366 | 367 | You may add additional accurate notices of copyright ownership. 368 | 369 | Exhibit B - "Incompatible With Secondary Licenses" Notice 370 | --------------------------------------------------------- 371 | 372 | This Source Code Form is "Incompatible With Secondary Licenses", as 373 | defined by the Mozilla Public License, v. 2.0. 374 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | default: gitbook_build publish 2 | default_with_pdf: default gitbook_pdf_commit 3 | publish: publish_prepare publish_push 4 | 5 | gitbook_preview: 6 | docker run --rm -v "${PWD}":/gitbook -p 4000:4000 billryan/gitbook:zh-hans gitbook serve 7 | gitbook_build: 8 | docker run --rm -v "${PWD}":/gitbook -p 4000:4000 billryan/gitbook:zh-hans /bin/bash -c "gitbook install && gitbook build" 9 | 10 | gitbook_pdf: 11 | docker run --rm -v "${PWD}":/gitbook -p 4000:4000 billryan/gitbook:zh-hans gitbook pdf ./ ./dtle-manual.pdf 12 | 13 | gitbook_pdf_commit: gitbook_pdf 14 | git add . 15 | git commit -a -m "Update pdf" 16 | git push 17 | 18 | publish_prepare: 19 | git checkout gh-pages 20 | git pull origin gh-pages --rebase 21 | cp -R _book/* . || true 22 | # git clean -f node_modules 23 | git clean -fx _book 24 | git add . 25 | git commit -a -m "Update docs" 26 | 27 | publish_push: 28 | git push origin gh-pages 29 | git checkout master 30 | -------------------------------------------------------------------------------- /QR_code.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/QR_code.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # dtle 中文技术参考手册 2 | 3 | ## 目录 4 | 参考 [gitbook](https://actiontech.github.io/dtle-docs-cn) 左侧目录区 或 [SUMMARY.md](https://github.com/actiontech/dtle-docs-cn/blob/master/SUMMARY.md) 5 | 6 | ## PDF下载 7 | [PDF下载](https://github.com/actiontech/dtle-docs-cn/raw/pdf/dtle-manual.pdf) 8 | 9 | ## 官方技术支持 10 | - 代码库 github: [github.com/actiontech/dtle](https://github.com/actiontech/dtle) 11 | - 文档库 github: [github.com/actiontech/dtle-docs-cn](https://github.com/actiontech/dtle-docs-cn) 12 | - 文档库 github pages: [actiontech.github.io/dtle-docs-cn](https://actiontech.github.io/dtle-docs-cn) 13 | - QQ group: 852990221 14 | - 网站:[爱可生开源社区](https://opensource.actionsky.com) 15 | - 开源社区微信公众号 16 | 17 | ![QR_code](./QR_code.png) 18 | 19 | ## 联系我们 20 | 如果想获得 dtle 的商业支持, 您可以联系我们: 21 | * 全国支持: 400-820-6580 22 | * 华北地区: 86-13910506562, 汪先生 23 | * 华南地区: 86-18503063188, 曹先生 24 | * 华东地区: 86-18930110869, 梁先生 25 | * 西南地区: 86-13540040119, 洪先生 26 | -------------------------------------------------------------------------------- /SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | * [0.概述](0/0_overview.md) 4 | * 1.适用场景 5 | * [1.0.MySQL的单向复制/聚合/分散](1/1.0_mysql_replication.md) 6 | * [1.1.跨数据中心的双向复制](1/1.1_bidirectional_replication.md) 7 | * [1.2.公有云间的数据同步](1/1.2_sync_between_cloud.md) 8 | * [1.3.MySQL到Kafka的数据变更通知](1/1.3_mysql_kafka.md) 9 | * [1.4.Oracle到MySQL的数据同步](1/1.4_oracle_mysql.md) 10 | * 2.快速开始 11 | * [2.0.MySQL的单向复制](2/2.0_mysql_replication_1_1.md) 12 | * [2.0.1.HTTP API、nomad 命令行工具 和 Web界面](2/2.0.1_interface.md) 13 | * [2.1.MySQL的聚合复制](2/2.1_mysql_replication_n_1.md) 14 | * [2.2.MySQL的数据分散](2/2.2_mysql_replication_1_n.md) 15 | * [2.3.MySQL的跨数据中心的双向复制](2/2.3_dc_to_dc_bidirectional.md) 16 | * [2.4.阿里云到京东云的MySQL复制](2/2.4_ali_to_jd.md) 17 | * [2.5.MySQL到Kafka的数据变更通知](2/2.5_mysql_kafka.md) 18 | * [2.6.Oracle到MySQL的数据同步](2/2.6_oracle_mysql.md) 19 | * [2.7.多nomad server部署](2/2.7_multi_node.md) 20 | * 3.功能说明 21 | * [3.0.功能/场景的映射列表](3/3.0_function_scenario_mapping.md) 22 | * [3.1.使用限制](3/3.1_limitation.md) 23 | * [3.2.端口使用说明](3/3.2_ports.md) 24 | * [3.3.对目标端数据库的影响(gtid_executed表)](3/3.3_impact_on_dest.md) 25 | * [3.4.监控项说明](3/3.4_metrics.md) 26 | * [3.4.1.延迟监控告警](3/3.4.1_delay_alert.md) 27 | * [3.4.2.搭建监控系统](3/3.4.2_monitor.md) 28 | * [3.5.部署结构](3/3.5_deployment.md) 29 | * [3.6.DDL支持度](3/3.6_DDL.md) 30 | * [3.7.DCL支持度](3/3.7_DCL.md) 31 | * [3.8.dtle mapping支持](3/3.8_dtle_mapping.md) 32 | * [3.9.Binlog Relay (中继)](3/3.9_binlog_relay.md) 33 | * [3.10.consul 上的 job 数据管理](3/3.10_consul.md) 34 | * [3.11.Oracle MySQL同步支持](3/3.11_oracle_mysql.md) 35 | * 4.安装/配置说明 36 | * [4.0.安装步骤](4/4.0_installation.md) 37 | * [4.1.节点配置](4/4.1_node_configuration.md) 38 | * [4.2.命令说明](4/4.2_command.md) 39 | * [4.3.作业(job)配置](4/4.3_job_configuration.md) 40 | * [4.3.1.性能调优](4/4.3.1_tuning.md) 41 | * [4.3.2.Job示例](4/4.3.2_job_sample.md) 42 | * [4.4.HTTP API说明](4/4.4_http_api.md) 43 | * [4.4.1.dtle 3.x HTTP API说明](4/4.4.1_dtle_http_api.md) 44 | * [4.5.MySQL 用户权限说明](4/4.5_mysql_user_privileges.md) 45 | * [4.6.dtle 2升级到3](4/4.6_dtle_2to3.md) 46 | * [4.7.问题诊断 FAQ](4/4.7_diagnosing.md) 47 | * 5.设计说明 48 | * [5.1.时间/资源估算](5/5.1_resource_estimation.md) 49 | * [5.2 基本架构](5/5.2_architecture.md) 50 | * [5.3 Kafka 消息格式](5/5.3_kafka_message_format.md) 51 | * [5.4 Oracle MySQL 字段映射](5/5.4_columns_mapping.md) 52 | * [6.如何参与](6/howto_contribute.md) 53 | * [7.路线图](7/7_roadmap.md) 54 | -------------------------------------------------------------------------------- /book.json: -------------------------------------------------------------------------------- 1 | { 2 | "plugins": ["expandable-chapters"], 3 | "pluginsConfig": { 4 | "fontsettings": { 5 | "theme": "sepia", 6 | "family": "sans", 7 | "size": 2 8 | } 9 | }, 10 | "styles": { 11 | "website": "website.css" 12 | }, 13 | "author": "ActionTech", 14 | "title": "dtle manual" 15 | } 16 | -------------------------------------------------------------------------------- /dtle-manual.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actiontech/dtle-docs-cn/ed09c9816b6c10a6c71425620d25ebbe6c2ba6f6/dtle-manual.pdf -------------------------------------------------------------------------------- /package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "lockfileVersion": 1 3 | } 4 | -------------------------------------------------------------------------------- /website.css: -------------------------------------------------------------------------------- 1 | /* fix https://github.com/GitbookIO/gitbook/issues/1626 */ 2 | .markdown-section table { 3 | table-layout: fixed; 4 | display:block; 5 | overflow-x: auto; 6 | } 7 | --------------------------------------------------------------------------------