├── .gitignore ├── Dockerfile ├── README.md ├── app-example.toml ├── binlog-payload ├── parse.go └── payload.go ├── ca-cert ├── conf ├── config.go └── config_test.go ├── docker-compose.yml ├── gkafka └── gkafka.go ├── go-mysql-kafka.go ├── go.mod ├── go.sum ├── gredis ├── gredis.go └── gredis_test.go ├── holder └── holder.go ├── img-assets └── 01.png ├── mapper └── mapper.go ├── pkg └── file │ └── file.go └── sync_manager ├── mapper.go ├── master.go ├── metrics.go ├── river.go ├── sink.go └── sync.go /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | idea 17 | .idea/* 18 | .vscode 19 | .vscode/* 20 | app.toml -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.12-alpine AS builder 2 | 3 | RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories 4 | RUN apk --update --no-cache add git mercurial subversion bzr ca-certificates 5 | ENV GOPROXY=https://goproxy.io 6 | WORKDIR /app 7 | COPY . . 8 | RUN CGO_ENABLED=0 GOOS=linux go build -a -o go-mysql-kafka . 9 | 10 | 11 | FROM alpine:3.10 12 | WORKDIR /usr/local/bin 13 | RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories 14 | RUN mkdir -p /usr/local/bin/config 15 | COPY --from=builder /app/go-mysql-kafka /usr/local/bin/ 16 | COPY --from=builder /app/*.toml /usr/local/bin/ 17 | ENTRYPOINT ["go-mysql-kafka"] 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # go-mysql-kafka 2 | 监听binlog日志,并将其解析为json投递到kafka 3 | 4 | ### 如何使用 5 | 6 | ##### 工作流程 7 | 1、go-canal监听源数据库的binlog 8 | 9 | 2、数据库产生binlog,筛选被监听的表 10 | 11 | 3、解析binlog成json 12 | 13 | 4、投递到kafka 14 | 15 | 5、业务可以将kafka的消息投递到任何的下游存储中 16 | 17 | ![流程图](img-assets/01.png) 18 | 19 | ##### 资源准备 20 | * redis 21 | * kafka:2.0+ 22 | * MySQL:5.6+ 23 | 24 | ##### 确认数据库开始binlog 25 | ```shell script 26 | MySQL [(none)]> show variables like 'log_bin'; 27 | +---------------+-------+ 28 | | Variable_name | Value | 29 | +---------------+-------+ 30 | | log_bin | ON | 31 | +---------------+-------+ 32 | 1 row in set (0.01 sec) 33 | ``` 34 | 35 | ```shell script 36 | MySQL [(none)]> show master logs; 37 | +------------------+-----------+ 38 | | Log_name | File_size | 39 | +------------------+-----------+ 40 | | mysql-bin.001403 | 25309244 | 41 | | mysql-bin.001404 | 35811848 | 42 | | mysql-bin.001405 | 46125076 | 43 | | mysql-bin.001406 | 231169453 | 44 | | mysql-bin.001407 | 20879902 | 45 | | mysql-bin.001408 | 35196980 | 46 | +------------------+-----------+ 47 | 6 rows in set (0.01 sec) 48 | ``` 49 | 50 | ##### 配置go-mysql-kafka配置 51 | ```toml 52 | debug = true 53 | env = "dev" 54 | [sourceDB] 55 | # 56 | host = "" 57 | port = 3306 58 | username = "root" 59 | # <数据密码> 60 | password = "" 61 | charset = "" 62 | # server id like a slave,同时还作为ID生成器,如果有从库的请确保这个值的唯一性 63 | serverID = 130 64 | flavor = "mysql" 65 | mysqldump = "" 66 | # minimal items to be inserted in one bulk 67 | bulkSize = 128 68 | flushBulkTime = 200 69 | skipNoPkTable = false 70 | skipMasterData = false 71 | # 文件形式存储binlog位置,一般不建议使用 72 | dataDir = "" 73 | 74 | # 指定需要被监听的表 75 | [[sourceDB.sources]] 76 | schema = "otam" 77 | tables = ["student_sign_in_copy"] 78 | 79 | #[[sourceDB.sources]] 80 | #schema = "fonzie" 81 | #tables = ["t", "t_[0-9]{4}", "tfield", "tfilter"] 82 | 83 | #[[sourceDB.rule]] 84 | #schema = "test" 85 | #table = "t" 86 | #index = "test" 87 | #type = "t" 88 | 89 | [http] 90 | statAddr = "127.0.0.1:8000" 91 | statPath = "/metrics" 92 | 93 | [redis] 94 | # redis的连接地址 95 | host = "xxxxxxxxxxxxxxx:6379" 96 | # 密码 97 | password = "xxxxxxxxxxxxxxx" 98 | maxIdle = 30 99 | maxActive = 30 100 | idleTimeout = 200 101 | # binlog在redis的TTL,目前没有用到这个功能, 102 | binlogTimeout = 900 103 | # redis中存储的前缀 104 | binlogPrefix = "CONFIG:xxxxxxxx-mysql-sync" 105 | 106 | # 分表分库做映射 107 | [mapper] 108 | # 目前只兼容DRDS,DRDS分表分库后是:schemas_name.00001结构 109 | schemas = ["xxxxxxxxxxxx"] 110 | 111 | [kafka] 112 | # kafka地址 113 | brokers = ["xxxxxxxxxxxx:9093"] 114 | # 设置kafka版本 115 | version = "2.1.0" 116 | # 跳过证书可信性检测,需要saslEnable开启 117 | insecureSkipVerify = true 118 | # 开启ssl访问 119 | saslEnable = false 120 | # 用户名 121 | username = "alikafka_post-cn-xxxxxxx" 122 | # 密码 123 | password = "xxxxxxxxxxxxxx" 124 | # ca证书,阿里云kafka公网接入demo:https://github.com/AliwareMQ/aliware-kafka-demos 125 | # 帮助文档地址:https://help.aliyun.com/document_detail/99958.html?spm=5176.11065259.1996646101.searchclickresult.5fcc31f8N8MlXQ 126 | certFile = "ca-cert" 127 | 128 | 129 | 130 | 131 | [kafka.producer] 132 | # 等待服务器所有副本都保存成功后的响应, NoResponse: 0 WaitForLocal: 1 WaitForAll: -1,发送完数据需要leader和follow都确认 133 | requiredAcks = 1 134 | # 生产者投递影响消息在partitioner上的分布 135 | # Manual: 只投递到partition 0,一般用于保序 136 | # RoundRobin: rr轮训模式 137 | # Random: 随机投递 138 | # Hash: Hash投递 139 | # ReferenceHash: 140 | # 默认rr模式 141 | PartitionerType = "default" 142 | # 是否等待成功和失败后的响应,只有上面的RequireAcks设置不是NoReponse这里才有用. 143 | returnSuccesses = true 144 | returnErrors = true 145 | # 消息投递失败重试次数 146 | retryMax = 5 147 | 148 | # kafka版本低于0.11的版本这里不能为空 149 | [[kafka.producer.headers]] 150 | key = "xxxxxxxxx" 151 | value = "xxxxxxxxxxxxxx" 152 | 153 | # 默认目标topic是表名,如果要指定Topic就可以在这里配置对应关系 154 | [[kafka.producer.mapper]] 155 | sourceTable = "student_sign_in_copy" 156 | topic = "student_sign_in_fonzie_copy" 157 | ``` 158 | 159 | 160 | ### 参考&&感谢 161 | * [bailaohe](https://github.com/bailaohe) 162 | * [go-mysql](https://github.com/siddontang/go-mysql) 163 | -------------------------------------------------------------------------------- /app-example.toml: -------------------------------------------------------------------------------- 1 | debug = true 2 | env = "dev" 3 | [sourceDB] 4 | host = "xxxxxxxxx" 5 | port = 3306 6 | username = "xxxxxx" 7 | password = "xxxxxx" 8 | charset = "" 9 | # server id like a slave,同时还作为ID生成器,生成kafka消息的ID 10 | serverID = 130 11 | flavor = "mysql" 12 | mysqldump = "" 13 | # minimal items to be inserted in one bulk 14 | bulkSize = 128 15 | flushBulkTime = 200 16 | skipNoPkTable = false 17 | skipMasterData = false 18 | 19 | # 20 | [[sourceDB.sources]] 21 | schema = "xxxxx" 22 | tables = ["xxxxxx"] 23 | 24 | #[[sourceDB.sources]] 25 | #schema = "fonzie" 26 | #tables = ["t", "t_[0-9]{4}", "tfield", "tfilter"] 27 | 28 | #[[sourceDB.rule]] 29 | #schema = "test" 30 | #table = "t" 31 | #index = "test" 32 | #type = "t" 33 | 34 | [http] 35 | statAddr = "127.0.0.1:8000" 36 | statPath = "/metrics" 37 | 38 | [redis] 39 | host = "127.0.0.1:6379" 40 | password = "xxxxx" 41 | 42 | db = 0 43 | poolSize = 30 44 | idleTimeout = 30 45 | maxRetries = 5 46 | dialTimeout = 10 47 | 48 | # binlog在redis的TTL,目前没有用到这个功能 49 | binlogTimeout = 900 50 | binlogPrefix = "xxxxxxx" 51 | 52 | # 分表分库做映射 53 | [mapper] 54 | schemas = ["xxxxxx"] 55 | 56 | [kafka] 57 | # kafka地址 58 | brokers = ["xxxxxx:9092"] 59 | # 设置kafka版本 60 | version = "2.1.0" 61 | # 跳过证书可信性检测 62 | insecureSkipVerify = true 63 | # 开启ssl访问 64 | saslEnable = false 65 | # 用户名 66 | username = "xxxxx" 67 | # 密码 68 | password = "xxxxxx" 69 | # ca证书 70 | certFile = "ca-cert" 71 | 72 | 73 | 74 | 75 | [kafka.producer] 76 | # 等待服务器所有副本都保存成功后的响应, NoResponse: 0 WaitForLocal: 1 WaitForAll: -1,发送完数据需要leader和follow都确认 77 | requiredAcks = 1 78 | # 生产者投递影响消息在partitioner上的分布 79 | # Manual: 只投递到partition 0,一般用于保序 80 | # RoundRobin: rr轮训模式 81 | # Random: 随机投递 82 | # Hash: Hash投递 83 | # ReferenceHash: 84 | # 默认rr模式 85 | PartitionerType = "default" 86 | # 是否等待成功和失败后的响应,只有上面的RequireAcks设置不是NoReponse这里才有用. 87 | returnSuccesses = true 88 | returnErrors = true 89 | # 消息投递失败重试次数 90 | retryMax = 5 91 | 92 | 93 | 94 | [[kafka.producer.headers]] 95 | key = "xxxxxx" 96 | value = "xxxxxx" 97 | 98 | # 默认目标topic是表名,如果要重命名就可以在这里配置对应关系 99 | [[kafka.producer.mapper]] 100 | sourceTable = "xxx" 101 | topic = "xxxx" 102 | -------------------------------------------------------------------------------- /binlog-payload/parse.go: -------------------------------------------------------------------------------- 1 | package blp 2 | 3 | import ( 4 | "github.com/siddontang/go-mysql/canal" 5 | "github.com/siddontang/go-mysql/schema" 6 | "reflect" 7 | "strings" 8 | ) 9 | 10 | // DATA_FORMAT the data format of timestamp 11 | //const DATE_FORMAT = "2006-01-02T15:04:05.000+08:00" 12 | 13 | func parseRowMap(columns *[]schema.TableColumn, row []interface{}) *map[string]interface{} { 14 | rowMap := make(map[string]interface{}) 15 | 16 | nCol := len(*columns) 17 | if len(row) < nCol { 18 | nCol = len(row) 19 | } 20 | 21 | for colId := 0; colId < nCol; colId++ { 22 | if row[colId] != nil && ((*columns)[colId].RawType == "json" || (*columns)[colId].RawType == "text") { 23 | rowMap[(*columns)[colId].Name] = string(row[colId].([]uint8)) 24 | } else { 25 | rowMap[(*columns)[colId].Name] = row[colId] 26 | } 27 | } 28 | return &rowMap 29 | } 30 | 31 | func parseColumns(columns *[]schema.TableColumn) *map[string]schema.TableColumn { 32 | metaMap := make(map[string]schema.TableColumn) 33 | 34 | nCol := len(*columns) 35 | 36 | for colId := 0; colId < nCol; colId++ { 37 | metaMap[(*columns)[colId].Name] = (*columns)[colId] 38 | } 39 | return &metaMap 40 | } 41 | 42 | func ParsePayload(e *canal.RowsEvent) *DBSyncPayload { 43 | var columnChanged []string 44 | var rowChanges []*RowChange 45 | if e.Action == canal.InsertAction { 46 | for _, row := range e.Rows { 47 | rowChanges = append(rowChanges, &RowChange{ 48 | PreUpdate: map[string]interface{}{}, 49 | Snapshot: *parseRowMap(&e.Table.Columns, row), 50 | }) 51 | } 52 | } else if e.Action == canal.DeleteAction { 53 | for _, row := range e.Rows { 54 | rowChanges = append(rowChanges, &RowChange{ 55 | PreUpdate: *parseRowMap(&e.Table.Columns, row), 56 | Snapshot: map[string]interface{}{}, 57 | }) 58 | } 59 | } else if e.Action == canal.UpdateAction { 60 | for i := 0; i < len(e.Rows); i += 2 { 61 | pre := e.Rows[i] 62 | post := e.Rows[i+1] 63 | 64 | beforeUpdate := *parseRowMap(&e.Table.Columns, pre) 65 | afterUpdate := *parseRowMap(&e.Table.Columns, post) 66 | 67 | if len(columnChanged) == 0 { 68 | for col := range afterUpdate { 69 | if afterUpdate[col] == nil || reflect.TypeOf(afterUpdate[col]).Comparable() { 70 | if afterUpdate[col] != beforeUpdate[col] { 71 | columnChanged = append(columnChanged, col) 72 | } 73 | } else { 74 | if !reflect.DeepEqual(afterUpdate[col], beforeUpdate[col]) { 75 | columnChanged = append(columnChanged, col) 76 | } 77 | } 78 | } 79 | } 80 | 81 | preUpdate := make(map[string]interface{}) 82 | for _, c := range columnChanged { 83 | preUpdate[c] = beforeUpdate[c] 84 | } 85 | 86 | rowChanges = append(rowChanges, &RowChange{ 87 | PreUpdate: preUpdate, 88 | Snapshot: afterUpdate, 89 | }) 90 | } 91 | } 92 | 93 | payload := &DBSyncPayload{ 94 | EventType: strings.ToUpper(e.Action), 95 | Db: e.Table.Schema, 96 | Table: e.Table.Name, 97 | //TODO temporally remove scheme information from payload 98 | //PKColumn: e.Table.GetPKColumn(0).Name, 99 | //Columns: *parseColumns(&e.Table.Columns), 100 | Rows: rowChanges, 101 | ColumnsChanged: columnChanged, 102 | } 103 | return payload 104 | } 105 | -------------------------------------------------------------------------------- /binlog-payload/payload.go: -------------------------------------------------------------------------------- 1 | package blp 2 | 3 | import "github.com/siddontang/go-mysql/schema" 4 | 5 | // DBSyncPayload the payload struct of database sync 6 | type DBSyncPayload struct { 7 | 8 | // binlog event type: UPDATE/INSERT/DELETE 9 | EventType string `json:"eventType"` 10 | 11 | // the database name 12 | Db string `json:"db"` 13 | // the table name 14 | Table string `json:"table"` 15 | // the table columns: name to column definition 16 | Columns map[string]schema.TableColumn `json:"columns"` 17 | // the primary key column name 18 | PKColumn string `json:"pkColumn"` 19 | 20 | // the binlog file name 21 | LogFile string `json:"logFile"` 22 | // the offset position in the binlog file 23 | LogFileOffset string `json:"logfileOffset"` 24 | 25 | // the timestamp of the binlog event 26 | Ts uint8 `json:"ts"` 27 | 28 | // the rows batch of binlog event 29 | Rows []*RowChange `json:"rows"` 30 | 31 | // the columns changed in this batch 32 | ColumnsChanged []string `json:"columnsChanged"` 33 | } 34 | 35 | // RowChange the struct to describe a single row change 36 | type RowChange struct { 37 | // the complete snapshot of the single row data 38 | // when INSERT & UPDATE: store the post update row data 39 | // when DELETE: store the pre update row data 40 | Snapshot map[string]interface{} `json:"snapshot"` 41 | 42 | // Only used when UPDATE, store the pre update value of columns 43 | // corresponding to *ColumnsChanged* in *DBSyncPayload* 44 | PreUpdate map[string]interface{} `json:"preUpdate"` 45 | } 46 | -------------------------------------------------------------------------------- /ca-cert: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDPDCCAqWgAwIBAgIJAMRsb0DLM1fsMA0GCSqGSIb3DQEBBQUAMHIxCzAJBgNV 3 | BAYTAkNOMQswCQYDVQQIEwJIWjELMAkGA1UEBxMCSFoxCzAJBgNVBAoTAkFCMRAw 4 | DgYDVQQDEwdLYWZrYUNBMSowKAYJKoZIhvcNAQkBFht6aGVuZG9uZ2xpdS5semRA 5 | YWxpYmFiYS5jb20wIBcNMTcwMzA5MTI1MDUyWhgPMjEwMTAyMTcxMjUwNTJaMHIx 6 | CzAJBgNVBAYTAkNOMQswCQYDVQQIEwJIWjELMAkGA1UEBxMCSFoxCzAJBgNVBAoT 7 | AkFCMRAwDgYDVQQDEwdLYWZrYUNBMSowKAYJKoZIhvcNAQkBFht6aGVuZG9uZ2xp 8 | dS5semRAYWxpYmFiYS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALZV 9 | bbIO1ULQQN853BTBgRfPiRJaAOWf38u8GC0TNp/E9qtI88A+79ywAP17k5WYJ7XS 10 | wXMOJ3h1qkQT2TYJVetZ6E69CUJq4BsOvNlNRvmnW6eFymh5QZsEz2MTooxJjVjC 11 | JQPlI2XRDjIrTVYEQWUDxj2JhB8VVPEed+6u4KQVAgMBAAGjgdcwgdQwHQYDVR0O 12 | BBYEFHFlOoiqQxXanVi2GUoDiKDD33ujMIGkBgNVHSMEgZwwgZmAFHFlOoiqQxXa 13 | nVi2GUoDiKDD33ujoXakdDByMQswCQYDVQQGEwJDTjELMAkGA1UECBMCSFoxCzAJ 14 | BgNVBAcTAkhaMQswCQYDVQQKEwJBQjEQMA4GA1UEAxMHS2Fma2FDQTEqMCgGCSqG 15 | SIb3DQEJARYbemhlbmRvbmdsaXUubHpkQGFsaWJhYmEuY29tggkAxGxvQMszV+ww 16 | DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQBTSz04p0AJXKl30sHw+UM/ 17 | /k1jGFJzI5p0Z6l2JzKQYPP3PfE/biE8/rmiGYEenNqWNy1ZSniEHwa8L/Ux98ci 18 | 4H0ZSpUrMo2+6bfuNW9X35CFPp5vYYJqftilJBKIJX3C3J1ruOuBR28UxE42xx4K 19 | pQ70wChNi914c4B+SxkGUg== 20 | -----END CERTIFICATE----- -------------------------------------------------------------------------------- /conf/config.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import ( 4 | "github.com/BurntSushi/toml" 5 | "github.com/siddontang/go-mysql/schema" 6 | log "github.com/sirupsen/logrus" 7 | "io/ioutil" 8 | "time" 9 | ) 10 | 11 | type ConfigSet struct { 12 | Debug bool `toml:"debug"` // 是否开启debug模式 13 | Env string `toml:"env"` // 运行环境 14 | SourceDB MysqlSet `toml:"sourceDB"` // 源数据库的配置 15 | Http HttpSet `toml:"http"` // http配置 16 | Redis RedisSet `toml:"redis"` // redis配置 17 | Mapper Mapper `toml:"mapper"` // 分表分库匹配规则 18 | Kafka KafkaSet `toml:"kafka"` 19 | } 20 | 21 | // 分表分库 22 | type Mapper struct { 23 | Schemas []string `toml:"schemas"` 24 | } 25 | 26 | type HttpSet struct { 27 | StatAddr string `toml:"statAddr"` 28 | StatPath string `toml:"statPath"` // metrics 访问路劲 29 | } 30 | 31 | type KafkaSet struct { 32 | Brokers []string `toml:"brokers"` 33 | Version string `toml:"version"` // kafka的版本 34 | Producer KafkaProducerSet `toml:"producer"` 35 | 36 | InsecureSkipVerify bool `toml:"insecureSkipVerify"` 37 | SaslEnable bool `toml:"saslEnable"` 38 | Username string `toml:"username"` 39 | Password string `toml:"password"` 40 | CertFile string `toml:"certFile"` 41 | } 42 | 43 | type KafkaProducerSet struct { 44 | RequiredAcks int `toml:"requiredAcks"` 45 | ReturnSuccesses bool `toml:"returnSuccesses"` 46 | ReturnErrors bool `toml:"returnErrors"` 47 | Async bool `toml:"async"` 48 | RetryMax int `toml:"retryMax"` 49 | Headers []KafkaHeader `toml:"headers"` 50 | TableMapperTopic []KafkaMapperTopic `toml:"mapper"` 51 | PartitionerType string `toml:"partitionerType"` 52 | } 53 | 54 | type KafkaMapperTopic struct { 55 | Topic string `toml:"topic"` 56 | SourceTable string `toml:"sourceTable"` 57 | } 58 | 59 | type KafkaHeader struct { 60 | Key string `toml:"key"` 61 | Value string `toml:"value"` 62 | } 63 | 64 | type MysqlSet struct { 65 | Host string `toml:"host"` 66 | Port int `toml:"port"` 67 | UserName string `toml:"username"` 68 | Password string `toml:"password"` 69 | Charset string `toml:"charset"` 70 | ServerID uint32 `toml:"serverID"` 71 | Flavor string `toml:"flavor"` // mysql or mariadb 72 | DumpExec string `toml:"mysqldump"` // if not set or empty, ignore mysqldump. 73 | BulkSize int `toml:"bulkSize"` // minimal items to be inserted in one bulk 74 | FlushBulkTime time.Duration `toml:"flushBulkTime"` 75 | SkipNoPkTable bool `toml:"skipNoPkTable"` 76 | SkipMasterData bool `toml:"skipMasterData"` 77 | DataDir string `toml:"DataDir"` // 保存binlog到本地文件这个方法没有测试过 78 | 79 | Sources []SourceConfig `toml:"sources"` 80 | //Rules []*RuleConfig `toml:"rule"` 81 | } 82 | 83 | type RedisSet struct { 84 | Host string `toml:"host"` 85 | Password string `toml:"password"` 86 | DB int `toml:"db"` 87 | PoolSize int `toml:"poolSize"` 88 | MaxRetries int `toml:"maxRetries"` 89 | IdleTimeout time.Duration `toml:"idleTimeout"` 90 | DialTimeout time.Duration `toml:"dialTimeout"` 91 | BinlogPrefix string `toml:"binlogPrefix"` 92 | BinlogTimeout time.Duration `toml:"binlogTimeout"` 93 | } 94 | 95 | type SourceConfig struct { 96 | Schema string `toml:"schema"` 97 | Tables []string `toml:"tables"` 98 | } 99 | 100 | type RuleConfig struct { 101 | Schema string `toml:"schema"` 102 | Table string `toml:"table"` 103 | Index string `toml:"index"` 104 | Type string `toml:"type"` 105 | Parent string `toml:"parent"` 106 | ID []string `toml:"id"` 107 | 108 | // Default, a MySQL table field name is mapped to Elasticsearch field name. 109 | // Sometimes, you want to use different name, e.g, the MySQL file name is title, 110 | // but in Elasticsearch, you want to name it my_title. 111 | FieldMapping map[string]string `toml:"field"` 112 | 113 | // MySQL table information 114 | TableInfo *schema.Table 115 | 116 | //only MySQL fields in filter will be synced , default sync all fields 117 | Filter []string `toml:"filter"` 118 | 119 | // Elasticsearch pipeline 120 | // To pre-process documents before indexing 121 | Pipeline string `toml:"pipeline"` 122 | } 123 | 124 | var Config = &ConfigSet{} 125 | 126 | func init() { 127 | log.SetFormatter(&log.TextFormatter{ 128 | DisableColors: true, 129 | FullTimestamp: true, 130 | }) 131 | } 132 | 133 | func Setup(cfg string) { 134 | configPath := cfg 135 | data, err := ioutil.ReadFile(configPath) 136 | if err != nil { 137 | log.Fatalf("read toml config err: %+v", err) 138 | } 139 | 140 | if _, err := toml.Decode(string(data), &Config); err != nil { 141 | log.Fatalf("decode toml config err: %+v", err) 142 | } 143 | if Config.Debug == true { 144 | log.SetLevel(log.DebugLevel) 145 | } 146 | 147 | // redis 配置 148 | Config.Redis.IdleTimeout = Config.Redis.IdleTimeout * time.Second 149 | 150 | // 数据源配置 151 | Config.SourceDB.FlushBulkTime = Config.SourceDB.FlushBulkTime * time.Millisecond 152 | 153 | // redis 时间初始化 154 | Config.Redis.IdleTimeout = Config.Redis.IdleTimeout * time.Second 155 | Config.Redis.DialTimeout = Config.Redis.DialTimeout * time.Second 156 | Config.Redis.BinlogTimeout = Config.Redis.BinlogTimeout * time.Second 157 | 158 | // kafka 异步投递会卡死,目前先不开放 159 | Config.Kafka.Producer.Async = false 160 | 161 | //fmt.Printf("%+v", Config) 162 | } 163 | 164 | // 检查配置文件 165 | func (c *ConfigSet) checkConfig() { 166 | // kafka配置检查 167 | if len(c.Kafka.Brokers) < 1 { 168 | log.Fatalf("kafka brokers can not be empty, err") 169 | } 170 | 171 | } 172 | -------------------------------------------------------------------------------- /conf/config_test.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import "testing" 4 | 5 | func TestSetup(t *testing.T) { 6 | 7 | } 8 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | go-mysql-sync-to-kafka: 5 | image: go-mysql-kafka 6 | extra_hosts: 7 | - example.com:10.1.64.243 8 | ports: 9 | - 8000:8000 10 | -------------------------------------------------------------------------------- /gkafka/gkafka.go: -------------------------------------------------------------------------------- 1 | package gkafka 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "encoding/json" 7 | "fmt" 8 | "github.com/Shopify/sarama" 9 | "github.com/bwmarrin/snowflake" 10 | "github.com/pingcap/errors" 11 | "github.com/siddontang/go-mysql/canal" 12 | log "github.com/sirupsen/logrus" 13 | blp "go-mysql-kafka/binlog-payload" 14 | "go-mysql-kafka/conf" 15 | "io/ioutil" 16 | "strconv" 17 | "time" 18 | ) 19 | 20 | type Kafka struct { 21 | c *conf.ConfigSet 22 | producer sarama.SyncProducer 23 | producerAsync sarama.AsyncProducer 24 | Async bool 25 | idGen *snowflake.Node 26 | } 27 | 28 | func NewKafka(c *conf.ConfigSet) (*Kafka, error) { 29 | PartitionerType := c.Kafka.Producer.PartitionerType 30 | kafkaVersion, err := sarama.ParseKafkaVersion(c.Kafka.Version) 31 | if err != nil { 32 | return nil, errors.Trace(err) 33 | } 34 | 35 | config := sarama.NewConfig() 36 | config.Version = kafkaVersion 37 | // 是否等待成功和失败后的响应,只有的RequireAcks设置不是NoReponse这里才有用 38 | config.Producer.Return.Successes = c.Kafka.Producer.ReturnSuccesses 39 | config.Producer.Return.Errors = c.Kafka.Producer.ReturnErrors 40 | 41 | // 开启sasl认证 42 | if c.Kafka.SaslEnable == true { 43 | certBytes, err := ioutil.ReadFile(c.Kafka.CertFile) 44 | if err != nil { 45 | return nil, err 46 | } 47 | 48 | config.Net.SASL.Enable = true 49 | config.Net.SASL.User = c.Kafka.Username 50 | config.Net.SASL.Password = c.Kafka.Password 51 | config.Net.SASL.Handshake = true 52 | 53 | clientCertPool := x509.NewCertPool() 54 | if ok := clientCertPool.AppendCertsFromPEM(certBytes); !ok { 55 | return nil, fmt.Errorf("kafka producer failed to parse root certificate") 56 | } 57 | 58 | config.Net.TLS.Config = &tls.Config{ 59 | RootCAs: clientCertPool, 60 | InsecureSkipVerify: c.Kafka.InsecureSkipVerify, 61 | } 62 | config.Net.TLS.Enable = true 63 | } 64 | 65 | // 是否需要确认消息已经正常写入 66 | if c.Kafka.Producer.RequiredAcks == 0 { 67 | config.Producer.RequiredAcks = sarama.NoResponse 68 | } else if c.Kafka.Producer.RequiredAcks == 1 { 69 | config.Producer.RequiredAcks = sarama.WaitForLocal 70 | } else { 71 | config.Producer.RequiredAcks = sarama.WaitForAll 72 | } 73 | 74 | // 选择分区类型,用的最多的也就manual: 所有消息都投递到partitioner 0 上 75 | switch PartitionerType { 76 | case "Manual": 77 | config.Producer.Partitioner = sarama.NewManualPartitioner 78 | case "RoundRobin": 79 | config.Producer.Partitioner = sarama.NewRoundRobinPartitioner 80 | case "Random": 81 | config.Producer.Partitioner = sarama.NewRandomPartitioner 82 | case "Hash": 83 | config.Producer.Partitioner = sarama.NewHashPartitioner 84 | case "ReferenceHash": 85 | config.Producer.Partitioner = sarama.NewReferenceHashPartitioner 86 | default: 87 | config.Producer.Partitioner = sarama.NewRoundRobinPartitioner 88 | } 89 | 90 | // 验证配置的可行性 91 | if err = config.Validate(); err != nil { 92 | return nil, fmt.Errorf("kafka producer config invalidate. err: %v", err) 93 | } 94 | 95 | kafka := Kafka{ 96 | c: c, 97 | Async: c.Kafka.Producer.Async, 98 | } 99 | if c.Kafka.Producer.Async { 100 | kafka.producerAsync, err = sarama.NewAsyncProducer(c.Kafka.Brokers, config) 101 | if err != nil { 102 | return nil, errors.Trace(err) 103 | } 104 | } else { 105 | kafka.producer, err = sarama.NewSyncProducer(c.Kafka.Brokers, config) 106 | if err != nil { 107 | return nil, errors.Trace(err) 108 | } 109 | } 110 | 111 | // 初始化ID生成器 112 | kafka.idGen, err = snowflake.NewNode(int64(c.SourceDB.ServerID)) 113 | if err != nil { 114 | return nil, fmt.Errorf("id gen init err: %+v", err) 115 | } 116 | 117 | return &kafka, nil 118 | 119 | } 120 | 121 | // 分析binlog生成json 122 | func (k *Kafka) Parse(e *canal.RowsEvent) ([]interface{}, error) { 123 | now := time.Now() 124 | payload := blp.ParsePayload(e) 125 | payloadByte, err := json.Marshal(payload) 126 | if err != nil { 127 | return nil, err 128 | } 129 | 130 | var id = k.idGen.Generate().String() 131 | var hdrs []sarama.RecordHeader 132 | 133 | // 博士之前把解析出来的binlog存放在redis,这里就先不存储了 134 | 135 | topic := e.Table.Name 136 | 137 | // 判断topic是否需要特定,默认是表名 138 | for _, m := range k.c.Kafka.Producer.TableMapperTopic { 139 | if m.SourceTable == e.Table.Name && m.Topic != "" { 140 | topic = m.Topic 141 | break 142 | } 143 | } 144 | 145 | // 先填写自定义头部 146 | for _, h := range k.c.Kafka.Producer.Headers { 147 | hdrs = append(hdrs, sarama.RecordHeader{ 148 | Key: []byte(h.Key), 149 | Value: []byte(h.Value), 150 | }) 151 | } 152 | 153 | // 系统头部一个时间,一个事件ID 154 | hdrs = append(hdrs, []sarama.RecordHeader{ 155 | { 156 | Key: []byte("EventTriggerTime"), 157 | Value: []byte(strconv.FormatInt(now.Unix(), 10)), 158 | }, 159 | { 160 | Key: []byte("EventID"), 161 | Value: []byte(id), 162 | }, 163 | }...) 164 | var message *sarama.ProducerMessage 165 | message = &sarama.ProducerMessage{ 166 | Topic: topic, 167 | } 168 | message.Headers = hdrs 169 | message.Value = sarama.StringEncoder(string(payloadByte)) 170 | return []interface{}{ 171 | &message, 172 | }, nil 173 | 174 | } 175 | 176 | // 将kafka消息推送到kafka 177 | func (k *Kafka) Publish(reqs []interface{}) error { 178 | var msgs []*sarama.ProducerMessage 179 | 180 | if len(reqs) > 0 { 181 | for _, req := range reqs { 182 | msgs = append(msgs, *req.(**sarama.ProducerMessage)) 183 | } 184 | 185 | for _, msg := range msgs { 186 | // kafka异步写入,谨慎使用,kafka会挂的(´;︵;`) 187 | if k.Async == true { 188 | k.producerAsync.Input() <- msg 189 | //log.Infof("kafka async: %v", msg) 190 | //go k.asyncSendMessage() 191 | } else { 192 | // TODO: 可以优化成批量写入 193 | p, offset, err := k.producer.SendMessage(msg) 194 | if err != nil { 195 | return errors.Trace(err) 196 | } 197 | 198 | log.Debugf("sent to partition %d at offset %d", p, offset) 199 | } 200 | } 201 | } 202 | return nil 203 | } 204 | 205 | func (k *Kafka) SendMessageTest() { 206 | msg := sarama.ProducerMessage{ 207 | Topic: "FonzieTestTopic", 208 | } 209 | msg.Value = sarama.StringEncoder("value") 210 | // saram对于低版本的kafka的headers不能为空 211 | msg.Headers = []sarama.RecordHeader{ 212 | { 213 | Key: []byte("hello"), 214 | Value: []byte("world"), 215 | }, 216 | } 217 | p, offset, err := k.producer.SendMessage(&msg) 218 | if err != nil { 219 | fmt.Println(errors.Trace(err)) 220 | log.Fatalf("test send message error: %v", err) 221 | } 222 | log.Fatalf("sent to partition %d at offset %d", p, offset) 223 | } 224 | 225 | //func (k *Kafka) asyncSendMessage() { 226 | // signals := make(chan os.Signal, 1) 227 | // signal.Notify(signals, os.Interrupt) 228 | // 229 | // doneCh := make(chan struct{}) 230 | // for { 231 | // 232 | // select { 233 | // //case k.producerAsync.Input() <- msg: 234 | // case result := <-k.producerAsync.Successes(): 235 | // if k.c.Debug == true { 236 | // log.Infof("message: %s sent to partition %d at offset %d\n", result.Value, result.Partition, result.Offset) 237 | // } 238 | // doneCh <- struct{}{} 239 | // case err := <-k.producerAsync.Errors(): 240 | // log.Errorf("kafka async send message err: %+v", err) 241 | // case <-signals: 242 | // doneCh <- struct{}{} 243 | // } 244 | // } 245 | // <-doneCh 246 | // 247 | //} 248 | 249 | func (k *Kafka) Close() { 250 | var err error 251 | if k.Async && k.producerAsync != nil { 252 | err = k.producerAsync.Close() 253 | } else if k.Async == false && k.producer != nil { 254 | err = k.producer.Close() 255 | } else { 256 | err = fmt.Errorf("did not close any sarama kafka") 257 | } 258 | 259 | if err != nil { 260 | log.Error("close kafka err: %+v", err) 261 | } else { 262 | log.Info("close kafka is ok") 263 | } 264 | } 265 | -------------------------------------------------------------------------------- /go-mysql-kafka.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | log "github.com/sirupsen/logrus" 6 | "go-mysql-kafka/conf" 7 | "go-mysql-kafka/gkafka" 8 | "go-mysql-kafka/gredis" 9 | "go-mysql-kafka/holder" 10 | "go-mysql-kafka/mapper" 11 | "go-mysql-kafka/sync_manager" 12 | "os" 13 | "os/signal" 14 | "syscall" 15 | ) 16 | 17 | var cfg = flag.String("cfg", "app.toml", "setting up the configuration file") 18 | 19 | func main() { 20 | var err error 21 | flag.Parse() 22 | 23 | conf.Setup(*cfg) 24 | gredis.Setup() 25 | 26 | c := conf.Config 27 | 28 | // 创建一个信号chan 29 | sc := make(chan os.Signal, 1) 30 | signal.Notify(sc, 31 | os.Kill, 32 | os.Interrupt, 33 | syscall.SIGHUP, 34 | syscall.SIGINT, 35 | syscall.SIGQUIT, 36 | syscall.SIGILL, 37 | syscall.SIGTRAP, 38 | syscall.SIGABRT, 39 | syscall.SIGBUS, 40 | syscall.SIGFPE, 41 | syscall.SIGKILL, 42 | syscall.SIGSEGV, 43 | syscall.SIGPIPE, 44 | syscall.SIGALRM, 45 | syscall.SIGTERM) 46 | 47 | // 初始化存储binlog位置, 这里用的是redis存储 48 | positionHolder, err := holder.NewPosition(c) 49 | if err != nil { 50 | log.Fatalf("init position holder err: %+v", err) 51 | } 52 | kafkaProducer, err := gkafka.NewKafka(c) 53 | if err != nil { 54 | log.Fatalf("init kafka producer err: %+v", err) 55 | } 56 | 57 | //kafkaProducer.SendMessageTest() 58 | 59 | // 初始化分表分库的配置 60 | rowsMapper := mapper.NewDRDSMapper(c) 61 | 62 | var sm *sync_manager.SyncManager 63 | 64 | sm, err = sync_manager.NewSyncManager(c, positionHolder, rowsMapper, kafkaProducer) 65 | if err != nil { 66 | log.Fatalf("init sync manager err: %+v", err) 67 | } 68 | 69 | done := make(chan struct{}, 1) 70 | 71 | go func() { 72 | err = sm.Run() 73 | if err != nil { 74 | log.Fatalf("sync manager run err: %v", err) 75 | } 76 | 77 | done <- struct{}{} 78 | log.Infof("run end") 79 | 80 | }() 81 | 82 | // http服务 83 | st := &sync_manager.Stat{Sm: sm, C: c} 84 | go st.Run() 85 | 86 | select { 87 | case n := <-sc: 88 | log.Infof("receive signal %v, closing", n) 89 | //TODO 临时写一下,之后应该把多个manager等context归总到一个进行监听 90 | case <-sm.Ctx.Done(): 91 | log.Infof("context is done with %v, closing", sm.Ctx.Err()) 92 | } 93 | 94 | sm.Close() 95 | kafkaProducer.Close() 96 | gredis.Close() 97 | st.Close() 98 | <-done 99 | log.Infof("sync manager is stop") 100 | } 101 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module go-mysql-kafka 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/BurntSushi/toml v0.3.1 7 | github.com/Shopify/sarama v1.26.1 8 | github.com/bwmarrin/snowflake v0.3.0 9 | github.com/go-redis/redis v6.15.7+incompatible 10 | github.com/gomodule/redigo v2.0.0+incompatible 11 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 12 | github.com/modern-go/reflect2 v1.0.1 // indirect 13 | github.com/pingcap/errors v0.11.0 14 | github.com/prometheus/client_golang v1.5.1 15 | github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 16 | github.com/siddontang/go-mysql v0.0.0-20200311002057-7a62847fcdb5 17 | github.com/sirupsen/logrus v1.4.2 18 | ) 19 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= 2 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 3 | github.com/Shopify/sarama v1.26.1 h1:3jnfWKD7gVwbB1KSy/lE0szA9duPuSFLViK0o/d3DgA= 4 | github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= 5 | github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= 6 | github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= 7 | github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= 8 | github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= 9 | github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= 10 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= 11 | github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= 12 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 13 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 14 | github.com/bwmarrin/snowflake v0.3.0 h1:xm67bEhkKh6ij1790JB83OujPR5CzNe8QuQqAgISZN0= 15 | github.com/bwmarrin/snowflake v0.3.0/go.mod h1:NdZxfVWX+oR6y2K0o6qAYv6gIOP9rjG0/E9WsDpxqwE= 16 | github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= 17 | github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 18 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 19 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 20 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 21 | github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= 22 | github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= 23 | github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= 24 | github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= 25 | github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= 26 | github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= 27 | github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= 28 | github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= 29 | github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= 30 | github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= 31 | github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= 32 | github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= 33 | github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U= 34 | github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= 35 | github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= 36 | github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= 37 | github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= 38 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 39 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 40 | github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= 41 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 42 | github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= 43 | github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 44 | github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= 45 | github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= 46 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 47 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 48 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 49 | github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= 50 | github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 51 | github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= 52 | github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= 53 | github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= 54 | github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= 55 | github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= 56 | github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= 57 | github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= 58 | github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= 59 | github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= 60 | github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= 61 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 62 | github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 63 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 64 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 65 | github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= 66 | github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= 67 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 68 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 69 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 70 | github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 71 | github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= 72 | github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg= 73 | github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= 74 | github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= 75 | github.com/pingcap/errors v0.11.0 h1:DCJQB8jrHbQ1VVlMFIrbj2ApScNNotVmkSNplu2yUt4= 76 | github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= 77 | github.com/pingcap/parser v0.0.0-20190506092653-e336082eb825 h1:U9Kdnknj4n2v76Mg7wazevZ5N9U1OIaMwSNRVLEcLX0= 78 | github.com/pingcap/parser v0.0.0-20190506092653-e336082eb825/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= 79 | github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330 h1:rRMLMjIMFulCX9sGKZ1hoov/iROMsKyC8Snc02nSukw= 80 | github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= 81 | github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 82 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 83 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 84 | github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= 85 | github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= 86 | github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= 87 | github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= 88 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 89 | github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 90 | github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= 91 | github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 92 | github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= 93 | github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= 94 | github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= 95 | github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 96 | github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= 97 | github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= 98 | github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= 99 | github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= 100 | github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= 101 | github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= 102 | github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= 103 | github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= 104 | github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= 105 | github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM= 106 | github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw= 107 | github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 h1:oI+RNwuC9jF2g2lP0u0cVEEZrc/AYBCuFdvwrLWM/6Q= 108 | github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07/go.mod h1:yFdBgwXP24JziuRl2NMUahT7nGLNOKi1SIiFxMttVD4= 109 | github.com/siddontang/go-mysql v0.0.0-20200311002057-7a62847fcdb5 h1:oMe8o8CvdOdYXM0syanupGgzmcFy9POWsNYlxDpuQr8= 110 | github.com/siddontang/go-mysql v0.0.0-20200311002057-7a62847fcdb5/go.mod h1:+W4RCzesQDI11HvIkaDjS8yM36SpAnGNQ7jmTLn5BnU= 111 | github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= 112 | github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= 113 | github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= 114 | github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= 115 | github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= 116 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 117 | github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 118 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 119 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 120 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 121 | github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= 122 | github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= 123 | golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 124 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 125 | golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w= 126 | golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 127 | golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 128 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 129 | golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 130 | golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= 131 | golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 132 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 133 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 134 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 135 | golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 136 | golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 137 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 138 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 139 | golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 140 | golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= 141 | golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 142 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 143 | golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= 144 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 145 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 146 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 147 | gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= 148 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 149 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 150 | gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= 151 | gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= 152 | gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= 153 | gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= 154 | gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= 155 | gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= 156 | gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= 157 | gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= 158 | gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= 159 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 160 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 161 | gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 162 | gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 163 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 164 | -------------------------------------------------------------------------------- /gredis/gredis.go: -------------------------------------------------------------------------------- 1 | package gredis 2 | 3 | import ( 4 | "github.com/go-redis/redis" 5 | log "github.com/sirupsen/logrus" 6 | "go-mysql-kafka/conf" 7 | "time" 8 | ) 9 | 10 | var db *redis.Client 11 | 12 | func Setup() { 13 | config := conf.Config.Redis 14 | db = redis.NewClient(&redis.Options{ 15 | Addr: config.Host, 16 | Password: config.Password, 17 | DB: config.DB, 18 | IdleTimeout: config.IdleTimeout, 19 | PoolSize: config.PoolSize, 20 | MaxRetries: config.MaxRetries, 21 | }) 22 | 23 | pong, err := db.Ping().Result() 24 | if err != nil { 25 | log.Fatalf("redis 连接失败 err: %v", err) 26 | } 27 | 28 | log.Infof("redis 连接成功: %v", pong) 29 | 30 | go pingLoop() 31 | } 32 | 33 | func pingLoop() { 34 | for { 35 | _, err := db.Ping().Result() 36 | if err != nil { 37 | log.Errorf("redis 连接失败 err: %v", err) 38 | } 39 | 40 | time.Sleep(30) 41 | } 42 | } 43 | 44 | func Close() { 45 | err := db.Close() 46 | if err != nil { 47 | log.Errorf("close redis err: %v", err) 48 | return 49 | } 50 | log.Info("close redis is ok") 51 | } 52 | 53 | // 在redis插入数据 54 | func Set(key string, value interface{}, timeout time.Duration) error { 55 | t := timeout * time.Second 56 | err := db.Set(key, value, t).Err() 57 | if err != nil { 58 | return err 59 | } 60 | 61 | return nil 62 | } 63 | 64 | // 查询redis key是否存在 65 | func Exist(key string) (exist bool, err error) { 66 | _, err = db.Get(key).Result() 67 | if err == redis.Nil { 68 | return false, nil 69 | } else if err != nil { 70 | return false, err 71 | } 72 | 73 | return true, nil 74 | } 75 | 76 | // 获取key 77 | func Get(key string) (val []byte, err error) { 78 | return db.Get(key).Bytes() 79 | } 80 | 81 | // 获取key 82 | func GetString(key string) (val string, err error) { 83 | return db.Get(key).Result() 84 | } 85 | 86 | // 删除key 87 | func Delete(key string) error { 88 | return db.Del(key).Err() 89 | } 90 | 91 | // 批量删除 92 | func LikeDelete(key string) error { 93 | keys, err := db.Keys(key).Result() 94 | if err != nil { 95 | return err 96 | } 97 | 98 | for _, key := range keys { 99 | err = Delete(key) 100 | if err != nil { 101 | return err 102 | } 103 | } 104 | return nil 105 | } 106 | -------------------------------------------------------------------------------- /gredis/gredis_test.go: -------------------------------------------------------------------------------- 1 | package gredis 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestGet(t *testing.T) { 8 | 9 | } 10 | -------------------------------------------------------------------------------- /holder/holder.go: -------------------------------------------------------------------------------- 1 | package holder 2 | 3 | import ( 4 | "fmt" 5 | "github.com/go-redis/redis" 6 | "github.com/pingcap/errors" 7 | "github.com/siddontang/go-mysql/mysql" 8 | "go-mysql-kafka/conf" 9 | "go-mysql-kafka/gredis" 10 | "strconv" 11 | "strings" 12 | "time" 13 | ) 14 | 15 | // 覆盖默认的读取和存储binlog的函数 16 | 17 | type PositionHolder struct { 18 | // binlog超时时间 19 | binlogTimeout time.Duration 20 | // redis存储前缀 21 | prefix string 22 | // 一般是环境 23 | label string 24 | } 25 | 26 | func NewPosition(c *conf.ConfigSet) (*PositionHolder, error) { 27 | return &PositionHolder{ 28 | binlogTimeout: c.Redis.BinlogTimeout, 29 | prefix: c.Redis.BinlogPrefix, 30 | label: c.Env, 31 | }, nil 32 | } 33 | 34 | func (p *PositionHolder) Save(pos *mysql.Position) error { 35 | key := fmt.Sprintf("%s:%s", p.prefix, p.label) 36 | value := fmt.Sprintf("%s:%v", pos.Name, pos.Pos) 37 | 38 | return gredis.Set(key, value, -1) 39 | 40 | } 41 | 42 | func (p *PositionHolder) Load() (*mysql.Position, error) { 43 | var pos mysql.Position 44 | key := fmt.Sprintf("%s:%s", p.prefix, p.label) 45 | posval, err := gredis.GetString(key) 46 | if err == redis.Nil { 47 | return nil, nil 48 | } 49 | 50 | //posval := string(posvalByte) 51 | //re := regexp.MustCompile("\"") 52 | //posval = re.ReplaceAllString(posval, "") 53 | toks := strings.Split(posval, ":") 54 | if len(toks) == 2 { 55 | pos.Name = toks[0] 56 | rawPos, err := strconv.Atoi(toks[1]) 57 | if err != nil { 58 | return nil, err 59 | } 60 | 61 | pos.Pos = uint32(rawPos) 62 | return &pos, errors.Trace(err) 63 | } 64 | return nil, fmt.Errorf("cannot parse mysql position") 65 | } 66 | -------------------------------------------------------------------------------- /img-assets/01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vperson/go-mysql-kafka/96faade0c24071c537572ec939b56ca2d5a9b4cd/img-assets/01.png -------------------------------------------------------------------------------- /mapper/mapper.go: -------------------------------------------------------------------------------- 1 | package mapper 2 | 3 | import ( 4 | "github.com/siddontang/go-mysql/canal" 5 | "go-mysql-kafka/conf" 6 | "regexp" 7 | ) 8 | 9 | // 这块用于处理分表分库逻辑,这里只处理了drds的分表分库 10 | 11 | type MultiSourceMapper struct { 12 | c *conf.ConfigSet 13 | tabPat *regexp.Regexp 14 | } 15 | 16 | func NewDRDSMapper(c *conf.ConfigSet) *MultiSourceMapper { 17 | return &MultiSourceMapper{ 18 | c: c, 19 | tabPat: regexp.MustCompile(`\d+$`), 20 | } 21 | } 22 | 23 | // 处理分表分库的逻辑 24 | func (m *MultiSourceMapper) Transform(e *canal.RowsEvent) *canal.RowsEvent { 25 | //for _, name := range m.c.Mapper.Schemas { 26 | // if strings.HasPrefix(e.Table.Schema, name) { 27 | // e.Table.Schema = name 28 | // 29 | // if m.tabPat.MatchString(e.Table.Name) { 30 | // e.Table.Name = m.tabPat.ReplaceAllString(e.Table.Name, "") 31 | // } 32 | // } 33 | //} 34 | 35 | return e 36 | } 37 | -------------------------------------------------------------------------------- /pkg/file/file.go: -------------------------------------------------------------------------------- 1 | package file 2 | 3 | import "os" 4 | 5 | // 检查文件是否存在 6 | func CheckNotExist(src string) bool { 7 | _, err := os.Stat(src) 8 | return os.IsNotExist(err) 9 | } 10 | 11 | // 检查目标文件是否有权限 12 | func CheckPermission(src string) bool { 13 | _, err := os.Stat(src) 14 | return os.IsPermission(err) 15 | } 16 | -------------------------------------------------------------------------------- /sync_manager/mapper.go: -------------------------------------------------------------------------------- 1 | package sync_manager 2 | 3 | import "github.com/siddontang/go-mysql/canal" 4 | 5 | type RowMapper interface { 6 | Transform(e *canal.RowsEvent) *canal.RowsEvent 7 | } 8 | 9 | // 默认示例 10 | type DefaultRowMapper struct { 11 | } 12 | 13 | // 这块是为了给分表分库预留的, 不做任何处理 14 | func (m *DefaultRowMapper) Transform(e *canal.RowsEvent) *canal.RowsEvent { 15 | return e 16 | } 17 | -------------------------------------------------------------------------------- /sync_manager/master.go: -------------------------------------------------------------------------------- 1 | package sync_manager 2 | 3 | import ( 4 | "fmt" 5 | "github.com/pingcap/errors" 6 | "github.com/siddontang/go-mysql/mysql" 7 | "github.com/siddontang/go/ioutil2" 8 | log "github.com/sirupsen/logrus" 9 | "io/ioutil" 10 | "os" 11 | "path" 12 | "strconv" 13 | "strings" 14 | "sync" 15 | "time" 16 | ) 17 | 18 | type masterInfo struct { 19 | sync.RWMutex 20 | 21 | Name string 22 | Pos uint32 23 | 24 | filePath string 25 | lastSaveTime time.Time 26 | 27 | holder PositionHolder 28 | } 29 | 30 | // 默认从文件中加载 31 | type FilePositionHolder struct { 32 | dataDir string 33 | } 34 | 35 | // Save the function to save the binlog position 36 | func (h *FilePositionHolder) Save(pos *mysql.Position) error { 37 | if len(h.dataDir) == 0 { 38 | return nil 39 | } 40 | 41 | filePath := path.Join(h.dataDir, "master.info") 42 | 43 | posContent := fmt.Sprintf("%s:%v", pos.Name, pos.Pos) 44 | 45 | var err error 46 | if err = ioutil2.WriteFileAtomic(filePath, []byte(posContent), 0644); err != nil { 47 | log.Errorf("canal save master info to file %s err %v", filePath, err) 48 | } 49 | return err 50 | } 51 | 52 | // Load the function to retrieve the MySQL binlog position 53 | func (h *FilePositionHolder) Load() (*mysql.Position, error) { 54 | var pos mysql.Position 55 | 56 | if err := os.MkdirAll(h.dataDir, 0755); err != nil { 57 | return nil, errors.Trace(err) 58 | } 59 | 60 | filePath := path.Join(h.dataDir, "master.info") 61 | f, err := os.Open(filePath) 62 | if err != nil && !os.IsNotExist(errors.Cause(err)) { 63 | return nil, errors.Trace(err) 64 | } else if os.IsNotExist(errors.Cause(err)) { 65 | return nil, nil 66 | } 67 | defer f.Close() 68 | 69 | bytes, err := ioutil.ReadFile(filePath) 70 | if err != nil { 71 | return nil, err 72 | } 73 | 74 | toks := strings.Split(string(bytes), ":") 75 | if len(toks) == 2 { 76 | pos.Name = toks[0] 77 | 78 | rawPos, err := strconv.Atoi(toks[1]) 79 | 80 | if err != nil { 81 | return nil, err 82 | } 83 | pos.Pos = uint32(rawPos) 84 | return &pos, errors.Trace(err) 85 | } 86 | return nil, errors.New("Cannot parse mysql position") 87 | } 88 | 89 | // 使用接口实现,这样方便数据保存到文件或者redis中 90 | type PositionHolder interface { 91 | // 加载binlog位置函数 92 | Load() (*mysql.Position, error) 93 | // 保存binlog位置函数 94 | Save(pos *mysql.Position) error 95 | } 96 | 97 | func (m *masterInfo) loadPos() error { 98 | m.lastSaveTime = time.Now() 99 | 100 | pos, err := m.holder.Load() 101 | if err != nil { 102 | return errors.Trace(err) 103 | } 104 | 105 | if pos != nil { 106 | m.Name = pos.Name 107 | m.Pos = pos.Pos 108 | } 109 | 110 | return nil 111 | } 112 | 113 | func (m *masterInfo) Save(pos mysql.Position) error { 114 | log.Infof("save position %+v", pos) 115 | 116 | m.Lock() 117 | defer m.Unlock() 118 | 119 | m.Name = pos.Name 120 | m.Pos = pos.Pos 121 | 122 | now := time.Now() 123 | if now.Sub(m.lastSaveTime) < time.Second { 124 | return nil 125 | } 126 | m.lastSaveTime = now 127 | 128 | err := m.holder.Save(&pos) 129 | return errors.Trace(err) 130 | } 131 | 132 | func (m *masterInfo) Position() mysql.Position { 133 | m.RLock() 134 | defer m.RUnlock() 135 | 136 | return mysql.Position{ 137 | Name: m.Name, 138 | Pos: m.Pos, 139 | } 140 | } 141 | 142 | func (m *masterInfo) Close() error { 143 | pos := m.Position() 144 | 145 | return m.Save(pos) 146 | } 147 | -------------------------------------------------------------------------------- /sync_manager/metrics.go: -------------------------------------------------------------------------------- 1 | package sync_manager 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "github.com/prometheus/client_golang/prometheus" 7 | "github.com/prometheus/client_golang/prometheus/promauto" 8 | "github.com/prometheus/client_golang/prometheus/promhttp" 9 | log "github.com/sirupsen/logrus" 10 | "go-mysql-kafka/conf" 11 | "net/http" 12 | "net/http/pprof" 13 | "time" 14 | ) 15 | 16 | type Stat struct { 17 | C *conf.ConfigSet 18 | Sm *SyncManager 19 | srv *http.Server 20 | } 21 | 22 | var ( 23 | canalSyncState = promauto.NewGauge( 24 | prometheus.GaugeOpts{ 25 | Name: "mysql2kafka_canal_state", 26 | Help: "The canal slave running state: 0=stopped, 1=ok", 27 | }, 28 | ) 29 | dbInsertNum = promauto.NewCounterVec( 30 | prometheus.CounterOpts{ 31 | Name: "mysql_binlog_inserted_num", 32 | Help: "The number of docs inserted from mysql", 33 | }, []string{"index"}, 34 | ) 35 | dbUpdateNum = promauto.NewCounterVec( 36 | prometheus.CounterOpts{ 37 | Name: "mysql_binlog_updated_num", 38 | Help: "The number of docs updated from mysql", 39 | }, []string{"index"}, 40 | ) 41 | dbDeleteNum = promauto.NewCounterVec( 42 | prometheus.CounterOpts{ 43 | Name: "mysql_binlog_deleted_num", 44 | Help: "The number of docs deleted from mysql", 45 | }, []string{"index"}, 46 | ) 47 | ) 48 | 49 | func (s *Stat) Run() { 50 | var err error 51 | //s.l, err = net.Listen("tcp", s.C.Http.StatAddr) 52 | //if err != nil { 53 | // log.Errorf("listen stat addr %s err %v", s.C.Http.StatAddr, err) 54 | // return 55 | //} 56 | mux := http.NewServeMux() 57 | mux.Handle("/stat", s) 58 | mux.Handle(s.C.Http.StatPath, promhttp.Handler()) 59 | if s.C.Debug == true { 60 | mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) 61 | mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) 62 | mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) 63 | mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) 64 | mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) 65 | } 66 | srv := http.Server{ 67 | Addr: s.C.Http.StatAddr, 68 | Handler: mux, 69 | ReadTimeout: 60 * time.Second, 70 | WriteTimeout: 60 * time.Second, 71 | } 72 | 73 | log.Infof("http listen : http://%s", s.C.Http.StatAddr) 74 | err = srv.ListenAndServe() 75 | if err != nil { 76 | log.Errorf("http listen err : %v", err) 77 | s.Sm.cancel() 78 | } 79 | 80 | } 81 | 82 | // 关闭http 83 | func (s *Stat) Close() { 84 | if s.srv != nil { 85 | err := s.srv.Close() 86 | if err != nil { 87 | log.Errorf("http close err: %v", err) 88 | return 89 | } 90 | } 91 | 92 | log.Infof("http successful close") 93 | } 94 | 95 | func (s *Stat) ServeHTTP(w http.ResponseWriter, r *http.Request) { 96 | var buf bytes.Buffer 97 | 98 | sm := s.Sm 99 | rr, err := sm.canal.Execute("SHOW MASTER STATUS") 100 | if err != nil { 101 | w.WriteHeader(http.StatusInternalServerError) 102 | w.Write([]byte(fmt.Sprintf("execute sql error %v", err))) 103 | return 104 | } 105 | 106 | binName, _ := rr.GetString(0, 0) 107 | binPos, _ := rr.GetUint(0, 1) 108 | 109 | pos := sm.canal.SyncedPosition() 110 | 111 | buf.WriteString(fmt.Sprintf("sync info of debug: %v\n", sm.c.Debug)) 112 | buf.WriteString(fmt.Sprintf("source database addr %s:%d\n", sm.c.SourceDB.Host, sm.c.SourceDB.Port)) 113 | for _, s := range s.C.SourceDB.Sources { 114 | buf.WriteString(fmt.Sprintf("\nDB: %s\n", s.Schema)) 115 | for n, t := range s.Tables { 116 | buf.WriteString(fmt.Sprintf("Table%d: %s\n", n+1, t)) 117 | } 118 | } 119 | buf.WriteString(fmt.Sprintf("-------------------------------------------------------------------------------\n")) 120 | buf.WriteString(fmt.Sprintf("server_current_binlog:(%s, %d)\n", binName, binPos)) 121 | buf.WriteString(fmt.Sprintf("read_binlog:%s\n", pos)) 122 | 123 | buf.WriteString(fmt.Sprintf("insert_num:%d\n", sm.InsertNum.Get())) 124 | buf.WriteString(fmt.Sprintf("update_num:%d\n", sm.UpdateNum.Get())) 125 | buf.WriteString(fmt.Sprintf("delete_num:%d\n", sm.DeleteNum.Get())) 126 | buf.WriteString(fmt.Sprintf("sync chan capacity: %d\n", len(sm.syncCh))) 127 | buf.WriteString(fmt.Sprintf("-------------------------------------------------------------------------------\n")) 128 | buf.WriteString(fmt.Sprintf("kafka: %v", sm.c.Kafka.Brokers)) 129 | 130 | w.Write(buf.Bytes()) 131 | } 132 | -------------------------------------------------------------------------------- /sync_manager/river.go: -------------------------------------------------------------------------------- 1 | package sync_manager 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/pingcap/errors" 7 | "github.com/siddontang/go-mysql/canal" 8 | "github.com/siddontang/go/sync2" 9 | log "github.com/sirupsen/logrus" 10 | "go-mysql-kafka/conf" 11 | "net/http" 12 | "sync" 13 | ) 14 | 15 | type SyncManager struct { 16 | c *conf.ConfigSet 17 | 18 | canal *canal.Canal 19 | 20 | Ctx context.Context 21 | cancel context.CancelFunc 22 | 23 | wg sync.WaitGroup 24 | 25 | master *masterInfo 26 | 27 | // 推送内容到目标,是一个接口目标可以不同 28 | sink Sink 29 | // 保存binlog位置 30 | posHolder PositionHolder 31 | // 给分表分库的使用,先预留 32 | rowMapper RowMapper 33 | 34 | // 存储解析后的binlog 35 | syncCh chan interface{} 36 | 37 | // http服务 38 | http http.Server 39 | 40 | InsertNum sync2.AtomicInt64 41 | UpdateNum sync2.AtomicInt64 42 | DeleteNum sync2.AtomicInt64 43 | } 44 | 45 | func NewSyncManager(c *conf.ConfigSet, holder PositionHolder, rowMapper RowMapper, sink Sink) (*SyncManager, error) { 46 | sm := new(SyncManager) 47 | 48 | sm.c = c 49 | sm.syncCh = make(chan interface{}, 4096) 50 | sm.Ctx, sm.cancel = context.WithCancel(context.Background()) 51 | 52 | sm.posHolder = holder 53 | sm.rowMapper = rowMapper 54 | sm.sink = sink 55 | 56 | var err error 57 | // 初始化binlog位置接口体 58 | if err = sm.newMaster(); err != nil { 59 | return nil, errors.Trace(err) 60 | } 61 | 62 | // 初始化记录binlog位置 63 | if err = sm.prepareMaster(); err != nil { 64 | return nil, errors.Trace(err) 65 | } 66 | 67 | // 初始化canal配置 68 | if err = sm.newCanal(); err != nil { 69 | return nil, errors.Trace(err) 70 | } 71 | 72 | // 使用go-canal提供的接口 73 | if err = sm.prepareCanal(); err != nil { 74 | return nil, errors.Trace(err) 75 | } 76 | 77 | // We must use binlog full row image,这个应该是mysql里面的知识,后期学习一下 78 | if err = sm.canal.CheckBinlogRowImage("FULL"); err != nil { 79 | return nil, errors.Trace(err) 80 | } 81 | 82 | return sm, nil 83 | } 84 | 85 | func (s *SyncManager) newMaster() error { 86 | s.master = &masterInfo{} 87 | 88 | return nil 89 | } 90 | 91 | func (s *SyncManager) newCanal() error { 92 | cfg := canal.NewDefaultConfig() 93 | cfg.Addr = fmt.Sprintf("%s:%d", s.c.SourceDB.Host, s.c.SourceDB.Port) 94 | cfg.User = s.c.SourceDB.UserName 95 | cfg.Password = s.c.SourceDB.Password 96 | cfg.Charset = s.c.SourceDB.Charset 97 | cfg.Flavor = s.c.SourceDB.Flavor 98 | 99 | cfg.ServerID = s.c.SourceDB.ServerID 100 | cfg.Dump.ExecutionPath = "" 101 | cfg.Dump.DiscardErr = false 102 | cfg.Dump.SkipMasterData = s.c.SourceDB.SkipMasterData 103 | 104 | if s.c.SourceDB.DumpExec != "" { 105 | cfg.Dump.ExecutionPath = s.c.SourceDB.DumpExec 106 | } 107 | //cfg.SemiSyncEnabled = false 108 | 109 | for _, s := range s.c.SourceDB.Sources { 110 | for _, t := range s.Tables { 111 | cfg.IncludeTableRegex = append(cfg.IncludeTableRegex, s.Schema+"\\."+t) 112 | } 113 | } 114 | 115 | var err error 116 | 117 | s.canal, err = canal.NewCanal(cfg) 118 | return errors.Trace(err) 119 | } 120 | 121 | // 122 | func (s *SyncManager) prepareCanal() error { 123 | s.canal.SetEventHandler(&eventHandler{s: s}) 124 | return nil 125 | } 126 | 127 | // 加载binlog位置,建议使用接口外面自定义适合自己业务的加载方式,如果用docker不建议使用本地文件 128 | func (s *SyncManager) prepareMaster() error { 129 | if s.posHolder == nil { 130 | s.posHolder = &FilePositionHolder{dataDir: s.c.SourceDB.DataDir} 131 | } 132 | 133 | s.master.holder = s.posHolder 134 | return s.master.loadPos() 135 | } 136 | 137 | func (s *SyncManager) Run() error { 138 | s.wg.Add(1) 139 | // 添加prometheus的metrics值 140 | canalSyncState.Set(float64(1)) 141 | 142 | // 监听channel消息并记录存储 143 | go s.syncLoop() 144 | 145 | // 这里的值在前面的new的时候就从持久化中捞回来了 146 | pos := s.master.Position() 147 | if err := s.canal.RunFrom(pos); err != nil { 148 | log.Errorf("start canal err: %+v", err) 149 | return errors.Trace(err) 150 | } 151 | 152 | return nil 153 | } 154 | 155 | func (s *SyncManager) Close() { 156 | log.Infof("closing sync manager") 157 | s.cancel() 158 | s.canal.Close() 159 | s.master.Close() 160 | s.wg.Wait() 161 | } 162 | -------------------------------------------------------------------------------- /sync_manager/sink.go: -------------------------------------------------------------------------------- 1 | package sync_manager 2 | 3 | import "github.com/siddontang/go-mysql/canal" 4 | 5 | type Sink interface { 6 | // 分析binlog接口 7 | Parse(e *canal.RowsEvent) ([]interface{}, error) 8 | // 将结果推送到目标 9 | Publish([]interface{}) error 10 | } 11 | -------------------------------------------------------------------------------- /sync_manager/sync.go: -------------------------------------------------------------------------------- 1 | package sync_manager 2 | 3 | import ( 4 | "fmt" 5 | "github.com/pingcap/errors" 6 | "github.com/siddontang/go-mysql/canal" 7 | "github.com/siddontang/go-mysql/mysql" 8 | "github.com/siddontang/go-mysql/replication" 9 | log "github.com/sirupsen/logrus" 10 | "time" 11 | ) 12 | 13 | type eventHandler struct { 14 | s *SyncManager 15 | } 16 | 17 | type posSaver struct { 18 | pos mysql.Position 19 | force bool 20 | } 21 | 22 | // OnRotate the function to handle binlog position rotation 23 | func (e *eventHandler) OnRotate(roateEvent *replication.RotateEvent) error { 24 | pos := mysql.Position{ 25 | Name: string(roateEvent.NextLogName), 26 | Pos: uint32(roateEvent.Position), 27 | } 28 | 29 | e.s.syncCh <- posSaver{pos, true} 30 | 31 | return e.s.Ctx.Err() 32 | } 33 | 34 | func (e *eventHandler) OnTableChanged(schema string, table string) error { 35 | return nil 36 | } 37 | 38 | // OnDDL the function to handle DDL event 39 | func (e *eventHandler) OnDDL(nextPos mysql.Position, _ *replication.QueryEvent) error { 40 | e.s.syncCh <- posSaver{nextPos, true} 41 | return e.s.Ctx.Err() 42 | } 43 | 44 | // OnRow the function to handle row changed 45 | func (e *eventHandler) OnRow(rows *canal.RowsEvent) error { 46 | var ( 47 | reqs []interface{} 48 | err error 49 | //matchFlag = true 50 | ) 51 | 52 | // 处理分表分库逻辑 53 | if e.s.rowMapper != nil { 54 | rows = e.s.rowMapper.Transform(rows) 55 | } 56 | 57 | switch rows.Action { 58 | case canal.InsertAction: 59 | e.makeInsertRequest(canal.InsertAction, rows.Rows) 60 | case canal.DeleteAction: 61 | e.makeDeleteRequest(canal.DeleteAction, rows.Rows) 62 | case canal.UpdateAction: 63 | e.makeUpdateRequest(rows.Rows) 64 | default: 65 | err = errors.Errorf("invalid rows action %s", rows.Action) 66 | } 67 | 68 | reqs, err = e.s.sink.Parse(rows) 69 | if err != nil { 70 | e.s.cancel() 71 | return fmt.Errorf("make %s parse binlog err: %v, close sync", rows.Action, err) 72 | } 73 | 74 | if len(reqs) > 0 { 75 | e.s.syncCh <- reqs 76 | } 77 | return e.s.Ctx.Err() 78 | } 79 | 80 | // OnXID the function to handle XID event 81 | func (e *eventHandler) OnXID(nextPos mysql.Position) error { 82 | e.s.syncCh <- posSaver{nextPos, false} 83 | return e.s.Ctx.Err() 84 | } 85 | 86 | func (e *eventHandler) OnGTID(gtid mysql.GTIDSet) error { 87 | return nil 88 | } 89 | 90 | func (e *eventHandler) OnPosSynced(pos mysql.Position, set mysql.GTIDSet, force bool) error { 91 | return nil 92 | } 93 | 94 | func (e *eventHandler) String() string { 95 | return "EventHandler" 96 | } 97 | 98 | // 记录insert的行数 99 | func (e *eventHandler) makeInsertRequest(action string, rows [][]interface{}) error { 100 | return e.makeRequest(action, rows) 101 | } 102 | 103 | // 记录delete的行数 104 | func (e *eventHandler) makeDeleteRequest(action string, rows [][]interface{}) error { 105 | return e.makeRequest(action, rows) 106 | } 107 | 108 | // for insert and delete 109 | func (e *eventHandler) makeRequest(action string, rows [][]interface{}) error { 110 | realRows := int64(len(rows)) 111 | realRowsString := string(realRows) 112 | switch action { 113 | case canal.DeleteAction: 114 | e.s.DeleteNum.Add(realRows) 115 | dbDeleteNum.WithLabelValues(realRowsString).Inc() 116 | case canal.InsertAction: 117 | e.s.InsertNum.Add(realRows) 118 | dbInsertNum.WithLabelValues(realRowsString).Inc() 119 | default: 120 | log.Infof("make request no tasks to be processed: None") 121 | } 122 | return nil 123 | } 124 | 125 | // 统计binlog更新的行数 126 | func (e *eventHandler) makeUpdateRequest(rows [][]interface{}) error { 127 | if len(rows)%2 != 0 { 128 | return errors.Errorf("invalid update rows event, must have 2x rows, but %d", len(rows)) 129 | } 130 | realRows := int64(len(rows) / 2) 131 | dbUpdateNum.WithLabelValues(string(realRows)).Inc() 132 | e.s.UpdateNum.Add(realRows) 133 | return nil 134 | } 135 | 136 | func (s *SyncManager) syncLoop() { 137 | bulkSize := s.c.SourceDB.BulkSize 138 | if bulkSize == 0 { 139 | bulkSize = 128 140 | } 141 | 142 | // 默认200毫秒刷一次 143 | interval := s.c.SourceDB.FlushBulkTime 144 | if interval == 0 { 145 | interval = 200 * time.Millisecond 146 | } 147 | 148 | ticker := time.NewTicker(interval) 149 | defer ticker.Stop() 150 | defer s.wg.Done() 151 | 152 | lastSaveTime := time.Now() 153 | 154 | reqs := make([]interface{}, 0, 1024) 155 | 156 | var pos mysql.Position 157 | 158 | for { 159 | needFlush := false 160 | needSavePos := false 161 | 162 | select { 163 | case v := <-s.syncCh: 164 | switch v := v.(type) { 165 | case posSaver: 166 | now := time.Now() 167 | 168 | if v.force || now.Sub(lastSaveTime) > 3*time.Second { 169 | lastSaveTime = now 170 | needFlush = true 171 | needSavePos = true 172 | pos = v.pos 173 | } 174 | case []interface{}: 175 | reqs = append(reqs, v...) 176 | needFlush = len(reqs) > bulkSize 177 | } 178 | case <-ticker.C: 179 | needFlush = true 180 | case <-s.Ctx.Done(): 181 | if pos.Pos > 0 { 182 | if err := s.master.Save(pos); err != nil { 183 | log.Errorf("save sync loop position %s err %+v, close sync", pos, err) 184 | return 185 | } 186 | log.Infof("save sync loop position %+v successfully, close sync", pos) 187 | } 188 | return 189 | } 190 | 191 | // 投递到kafka 192 | if needFlush { 193 | if len(reqs) > 0 { 194 | // 重试机制在kafka配置中就有 195 | if err := s.sink.Publish(reqs); err != nil { 196 | log.Errorf("flush message failed : %+v", err) 197 | } 198 | } 199 | reqs = reqs[0:0] 200 | } 201 | 202 | // 刷新到文件或者redis 203 | if needSavePos { 204 | if err := s.master.Save(pos); err != nil { 205 | log.Errorf("save sync position %s err %v, close sync", pos, err) 206 | s.cancel() 207 | return 208 | } 209 | } 210 | } 211 | } 212 | --------------------------------------------------------------------------------