├── mysql
├── README.md
├── get_binlog_timestamp_info
│ ├── go.mod
│ ├── get_binlog_timestamp_info_single_thread.go
│ ├── go.sum
│ └── get_binlog_timestamp_info.go
├── monitor_ddl_progress.sh
├── mysql_native_password.py
├── connection_test_benchemark.py
├── mysql2mysql.py
├── find_config_diff.py
├── db_slowlog_digest.go
├── generate_my_cnf.go
└── binlog_summary.py
├── redis
├── redis_migrate
│ ├── README.md
│ ├── common
│ │ ├── util.go
│ │ └── ssh.go
│ ├── redis_migrate.go
│ ├── .idea
│ │ └── vcs.xml
│ ├── cmd
│ │ ├── root.go
│ │ ├── copyrdb.go
│ │ ├── copyslot.go
│ │ └── scpfile.go
│ └── redisUtil
│ │ ├── client.go
│ │ ├── single.go
│ │ └── cluster.go
├── install_redis_cluster.sh
├── migrate_redis_cluter.go
└── redis_mem_usage_analyzer.py
└── README.md
/mysql/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/redis/redis_migrate/README.md:
--------------------------------------------------------------------------------
1 | # redis_migrate
--------------------------------------------------------------------------------
/redis/redis_migrate/common/util.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 |
--------------------------------------------------------------------------------
/redis/redis_migrate/redis_migrate.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/slowtech/redis_migrate/cmd"
5 | )
6 |
7 | func main() {
8 | cmd.Execute()
9 | }
--------------------------------------------------------------------------------
/redis/redis_migrate/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/redis/redis_migrate/cmd/root.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "fmt"
5 | "github.com/spf13/cobra"
6 | )
7 |
8 | var rootCmd = &cobra.Command{
9 | Use: "redis_migrate",
10 | Long: `Migrate Redis Cluster by copying the RDB file`,
11 | Version: "0.1",
12 | }
13 |
14 | func Execute() {
15 | if err := rootCmd.Execute(); err != nil {
16 | fmt.Println(err)
17 | //os.Exit(1)
18 | }
19 |
20 | }
21 |
--------------------------------------------------------------------------------
/redis/redis_migrate/redisUtil/client.go:
--------------------------------------------------------------------------------
1 | package redisUtil
2 |
3 | import (
4 | "github.com/go-redis/redis"
5 | "log"
6 | )
7 |
8 | func createClient(addr string) *redis.Client {
9 | client := redis.NewClient(&redis.Options{
10 | Addr: addr,
11 | Password: "",
12 | DB: 0,
13 | })
14 | _, err := client.Ping().Result()
15 | if err != nil {
16 | log.Fatalf("Can't establish connection successfully %s", err)
17 | }
18 | return client
19 | }
20 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # dba-toolkit
2 | DBA工具集
3 |
4 | ## monitor_ddl_progress.sh
5 | 用于监控MySQL Online DDL的进度,简单,直接。
6 |
7 | #### 使用方法(Usage)
8 | 只需输入表的当前目录,及表名,如:
9 | ```
10 | # sh monitor_ddl_progress.sh /dbdata/mysql/3306/data/sbtest/ sbtest1
11 | Altering sbtest.sbtest1 ...
12 | Altering sbtest.sbtest1: 16% 00:01:08 remain
13 | Altering sbtest.sbtest1: 28% 00:01:03 remain
14 | Altering sbtest.sbtest1: 38% 00:01:01 remain
15 | Altering sbtest.sbtest1: 48% 00:00:47 remain
16 | Altering sbtest.sbtest1: 59% 00:00:39 remain
17 | Altering sbtest.sbtest1: 68% 00:00:33 remain
18 | Altering sbtest.sbtest1: 78% 00:00:23 remain
19 | Altering sbtest.sbtest1: 87% 00:00:12 remain
20 | Altering sbtest.sbtest1: 98% 00:00:01 remain
21 | Successfully altered sbtest.sbtest1
22 | ```
23 |
24 |
25 |
--------------------------------------------------------------------------------
/redis/redis_migrate/redisUtil/single.go:
--------------------------------------------------------------------------------
1 | package redisUtil
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "time"
7 | "path/filepath"
8 | )
9 |
10 | func BgSaveAndCheck(addr string) {
11 | client := createClient(addr)
12 | lastSaveTime ,err := client.LastSave().Result()
13 | if err != nil {
14 | log.Fatal(err)
15 | }
16 | fmt.Println(lastSaveTime)
17 | _,err = client.BgSave().Result()
18 | if err != nil {
19 | log.Fatal(err)
20 | }
21 | for {
22 | saveTime,_ := client.LastSave().Result()
23 | if saveTime != lastSaveTime {
24 | break
25 | }
26 | time.Sleep(time.Second*1)
27 | }
28 | }
29 |
30 | func GetRDBPath(addr string) string {
31 | client := createClient(addr)
32 | result,_ := client.ConfigGet("dir").Result()
33 | dir := result[1].(string)
34 | result,_ = client.ConfigGet("dbfilename").Result()
35 | dbfilename := result[1].(string)
36 | return filepath.Join(dir,dbfilename)
37 | }
38 |
--------------------------------------------------------------------------------
/mysql/get_binlog_timestamp_info/go.mod:
--------------------------------------------------------------------------------
1 | module get_binlog_timestamp_info
2 |
3 | go 1.20
4 |
5 | require (
6 | github.com/go-mysql-org/go-mysql v1.7.0
7 | github.com/go-sql-driver/mysql v1.6.0
8 | github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07
9 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
10 | golang.org/x/net v0.0.0-20201021035429-f5854403a974
11 | )
12 |
13 | require (
14 | github.com/google/uuid v1.3.0 // indirect
15 | github.com/mattn/go-runewidth v0.0.9 // indirect
16 | github.com/olekukonko/tablewriter v0.0.5 // indirect
17 | github.com/pingcap/errors v0.11.5-0.20210425183316-da1aaba5fb63 // indirect
18 | github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
19 | github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
20 | go.uber.org/atomic v1.7.0 // indirect
21 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f // indirect
22 | )
23 |
--------------------------------------------------------------------------------
/redis/redis_migrate/cmd/copyrdb.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "fmt"
5 | "github.com/spf13/cobra"
6 | )
7 |
8 | var (
9 | copyRDBCmd = &cobra.Command{
10 | Use: "copy",
11 | Short: "copy RDB from source Redis Cluster to dest Redis Cluster",
12 | Long: `copy RDB from source Redis Cluster to dest Redis Cluster`,
13 | Example: `
14 | $ redis_migrate copy --source 192.168.244.10:6379 --dest 192.168.244.20:6379
15 | `,
16 | Run: copyRDB,
17 | }
18 | runSource string
19 | runDest string
20 | )
21 |
22 | func init() {
23 | rootCmd.AddCommand(copyRDBCmd)
24 | copyRDBCmd.Flags().StringVarP(&runSource, "source", "", "", "The dest Host IP")
25 | copyRDBCmd.Flags().StringVarP(&runDest, "dest", "", "", "The file to scp in source Host")
26 | copyRDBCmd.MarkFlagRequired("source")
27 | copyRDBCmd.MarkFlagRequired("dest")
28 | }
29 |
30 | func copyRDB(cmd *cobra.Command, args []string) {
31 | fmt.Println("helloworld")
32 | }
33 |
--------------------------------------------------------------------------------
/redis/redis_migrate/cmd/copyslot.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "github.com/slowtech/redis_migrate/redisUtil"
5 | "github.com/spf13/cobra"
6 | )
7 |
8 | var (
9 | copySlotCmd = &cobra.Command{
10 | Use: "prepare",
11 | Short: "Reset the dest Redis Cluster",
12 | Long: `Reset the dest Redis Cluster,rearrange the slot followed the source redist Cluster`,
13 | Example: `
14 | $ redis_migrate prepare --source 192.168.244.10:6379 --dest 192.168.244.20:6379
15 | `,
16 | Run: copySlot,
17 | }
18 | source string
19 | dest string
20 | )
21 |
22 | func init() {
23 | rootCmd.AddCommand(copySlotCmd)
24 | copySlotCmd.Flags().StringVarP(&source, "source", "s", "", "The source Redis Cluster Address")
25 | copySlotCmd.Flags().StringVarP(&dest, "dest", "d", "", "The dest Redis Cluster Address")
26 | copySlotCmd.MarkFlagRequired("source")
27 | copySlotCmd.MarkFlagRequired("dest")
28 |
29 | }
30 |
31 | func copySlot(cmd *cobra.Command, args []string) {
32 | redisUtil.CopySlotInfo(source,dest)
33 | }
34 |
--------------------------------------------------------------------------------
/redis/redis_migrate/cmd/scpfile.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "fmt"
5 | "github.com/slowtech/redis_migrate/common"
6 | "github.com/spf13/cobra"
7 | )
8 |
9 | var (
10 | scpFileCmd = &cobra.Command{
11 | Use: "scp",
12 | Short: "scp the specified file to dest ip",
13 | Long: `scp the specified file to dest ip`,
14 | Example: `
15 | $ redis_migrate scp --sfile /opt/redis/data/dump_6379.rdb --dest 192.168.244.20 --dfile /opt/redis/data/dump_6379.rdb
16 | `,
17 | Run: scpFile,
18 | }
19 | sourceFile string
20 | destFile string
21 | sourceHost string
22 | destHost string
23 | )
24 |
25 | func init() {
26 | rootCmd.AddCommand(scpFileCmd)
27 | scpFileCmd.Flags().StringVarP(&destHost, "dest", "d", "", "The dest Host IP")
28 | scpFileCmd.Flags().StringVarP(&sourceFile, "sfile", "", "", "The file to scp in source Host")
29 | scpFileCmd.Flags().StringVarP(&destFile, "dfile", "", "", "The location for the file to save")
30 | scpFileCmd.MarkFlagRequired("dest")
31 | scpFileCmd.MarkFlagRequired("sfile")
32 | scpFileCmd.MarkFlagRequired("dfile")
33 | }
34 |
35 | func scpFile(cmd *cobra.Command, args []string) {
36 | var host common.Host
37 | host.Init(destHost,"22","root","123456")
38 | host.Scp(sourceFile,destFile)
39 | cmdString := fmt.Sprintf("sshpass -p %s scp -r %s %s@%s:%s", "123456", "/tmp/123.txt", "dba", "192.168.244.10", "/tmp/456")
40 | host.Run(cmdString)
41 | }
42 |
43 |
44 |
--------------------------------------------------------------------------------
/redis/redis_migrate/common/ssh.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "golang.org/x/crypto/ssh"
7 | "io"
8 | "net"
9 | "os"
10 | "path"
11 | "strings"
12 | )
13 |
14 |
15 | type Host struct {
16 | Connection *ssh.Client
17 | }
18 |
19 | func (host *Host)Init(hostname string,port string,username string,password string) {
20 | config := &ssh.ClientConfig{
21 | User: username,
22 | Auth: []ssh.AuthMethod{ssh.Password(password)},
23 | HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
24 | return nil
25 | },
26 | }
27 | hostaddress := strings.Join([]string{hostname, port}, ":")
28 | var err error
29 | host.Connection, err = ssh.Dial("tcp", hostaddress, config)
30 | if err != nil {
31 | panic(err.Error())
32 | }
33 |
34 | }
35 |
36 | func (host *Host) Run(cmd string) {
37 | session, err := host.Connection.NewSession()
38 | if err != nil {
39 | panic(err.Error())
40 | }
41 | defer session.Close()
42 | fmt.Println(cmd)
43 | var buff bytes.Buffer
44 | session.Stdout = &buff
45 | if err := session.Run(cmd); err != nil {
46 | panic(err)
47 | }
48 | fmt.Println(buff.String())
49 | }
50 |
51 | func (host *Host) Scp(sourceFilePath string,destFilePath string) {
52 | session, err := host.Connection.NewSession()
53 | if err != nil {
54 | panic(err.Error())
55 | }
56 | defer session.Close()
57 |
58 | destFile:= path.Base(destFilePath)
59 | destDir := path.Dir(destFilePath)
60 |
61 | go func() {
62 | Buf := make([]byte, 1024)
63 | w, _ := session.StdinPipe()
64 | defer w.Close()
65 | f, _ := os.Open(sourceFilePath)
66 | defer f.Close()
67 | fileInfo, _ := f.Stat()
68 | fmt.Fprintln(w, "C0644", fileInfo.Size(), destFile)
69 | for {
70 | n, err := f.Read(Buf)
71 | fmt.Fprint(w, string(Buf[:n]))
72 | //time.Sleep(time.Second*1)
73 | if err != nil {
74 | if err == io.EOF {
75 | return
76 | } else {
77 | panic(err)
78 | }
79 | }
80 | }
81 | }()
82 | if err := session.Run("/usr/bin/scp -qrt "+ destDir); err != nil {
83 | fmt.Println(err)
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/mysql/monitor_ddl_progress.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ $# -ne 2 ];then
4 | echo "sh $0 table_directory table_name"
5 | echo "Usage:sh $0 /var/lib/mysql/db1 t1"
6 | exit
7 | fi
8 |
9 | table_directory=$1
10 | target_table=$2
11 |
12 | function get_file_size() {
13 | local file=$1
14 | file_size=`stat -c '%s' $file 2>/dev/null`
15 | echo $file_size
16 | }
17 |
18 | target_table_file="$table_directory"/"$target_table".ibd
19 |
20 | if [[ ! -f "$target_table_file" ]]
21 | then
22 | echo "The $target_table.ibd does not exist in $table_directory !!!"
23 | exit
24 | fi
25 |
26 | target_table_file_size=`get_file_size "$target_table_file"`
27 | db_name=`basename "$table_directory"`
28 |
29 | intermediate_table_file=`ls "$table_directory"/"#sql"*".ibd" 2>/dev/null`
30 |
31 | if [[ -z "$intermediate_table_file" ]]
32 | then
33 | echo "Can not find the intermediate table for $target_table.ibd,Maybe the DDL has not started yet"
34 | exit
35 | fi
36 | last_intermediate_table_file_size=`get_file_size "$intermediate_table_file"`
37 |
38 | echo "Altering $db_name.$target_table ..."
39 |
40 | while true
41 | do
42 | sleep 10
43 | intermediate_table_file_size=`get_file_size "$intermediate_table_file"`
44 | if [[ -z "$intermediate_table_file_size" ]]
45 | then
46 | echo "Successfully altered $db_name.$target_table"
47 | exit
48 | fi
49 | percent=`echo "$intermediate_table_file_size*100/$target_table_file_size" | bc`
50 | if [[ "$percent" -gt 100 ]]
51 | then
52 | percent=100
53 | fi
54 | alter_speed=`echo "scale=2;($intermediate_table_file_size-$last_intermediate_table_file_size)/10" | bc`
55 | remain_second=`echo "($target_table_file_size-$intermediate_table_file_size)/$alter_speed" |bc `
56 | if [[ "$remain_second" -lt 0 ]]
57 | then
58 | remain_second=0
59 | fi
60 | remain_time=`date -u -d @$remain_second +"%T"`
61 | echo "Altering $db_name.$target_table: $percent% $remain_time remain"
62 | last_intermediate_table_file_size=$intermediate_table_file_size
63 | done
--------------------------------------------------------------------------------
/redis/install_redis_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | pkill redis-server
3 | sleep 3
4 | rm -rf /opt/redis/
5 | mkdir -p /opt/redis/data/
6 | mkdir -p /opt/redis/conf/
7 | mkdir -p /opt/redis/log
8 | cd /opt/redis/conf/
9 | cat > redis_6379.conf << EOF
10 | port 6379
11 | daemonize yes
12 | pidfile "/opt/redis/data/redis_6379.pid"
13 | loglevel notice
14 | logfile "/opt/redis/log/redis_6379.log"
15 | dbfilename "dump_6379.rdb"
16 | dir "/opt/redis/data"
17 | appendonly yes
18 | appendfilename "appendonly_6379.aof"
19 | cluster-enabled yes
20 | cluster-config-file /opt/redis/conf/nodes-6379.conf
21 | cluster-node-timeout 15000
22 | EOF
23 |
24 | cp redis_6379.conf redis_6380.conf
25 | cp redis_6379.conf redis_6381.conf
26 | cp redis_6379.conf redis_6382.conf
27 | cp redis_6379.conf redis_6383.conf
28 | cp redis_6379.conf redis_6384.conf
29 |
30 | sed -i 's/6379/6380/g' redis_6380.conf
31 | sed -i 's/6379/6381/g' redis_6381.conf
32 | sed -i 's/6379/6382/g' redis_6382.conf
33 | sed -i 's/6379/6383/g' redis_6383.conf
34 | sed -i 's/6379/6384/g' redis_6384.conf
35 |
36 | cd /opt/redis/conf
37 | redis-server redis_6379.conf
38 | redis-server redis_6380.conf
39 | redis-server redis_6381.conf
40 | redis-server redis_6382.conf
41 | redis-server redis_6383.conf
42 | redis-server redis_6384.conf
43 |
44 | redis-cli -p 6379 cluster meet 192.168.244.10 6380
45 | redis-cli -p 6379 cluster meet 192.168.244.10 6381
46 | redis-cli -p 6379 cluster meet 192.168.244.10 6382
47 | redis-cli -p 6379 cluster meet 192.168.244.10 6383
48 | redis-cli -p 6379 cluster meet 192.168.244.10 6384
49 |
50 | sleep 3
51 | echo "cluster replicate `redis-cli -p 6379 cluster nodes | grep 6379 | awk '{print $1}'`" | redis-cli -p 6382 -x
52 | echo "cluster replicate `redis-cli -p 6379 cluster nodes | grep 6380 | awk '{print $1}'`" | redis-cli -p 6383 -x
53 | echo "cluster replicate `redis-cli -p 6379 cluster nodes | grep 6381 | awk '{print $1}'`" | redis-cli -p 6384 -x
54 |
55 | redis-cli -p 6379 cluster addslots {0..5461}
56 |
57 | redis-cli -p 6380 cluster addslots {5462..10922}
58 |
59 | redis-cli -p 6381 cluster addslots {10923..16383}
60 | sleep 5
61 |
62 | redis-cli -p 6379 cluster nodes
63 |
64 |
--------------------------------------------------------------------------------
/mysql/mysql_native_password.py:
--------------------------------------------------------------------------------
1 | import secrets
2 | import hashlib
3 |
4 | def compute_sha1_hash(data):
5 | # 创建 SHA-1 摘要对象
6 | sha1 = hashlib.sha1()
7 |
8 | # 更新摘要对象的内容
9 | sha1.update(data)
10 |
11 | # 获取摘要的二进制表示
12 | digest = sha1.digest()
13 |
14 | return digest
15 |
16 | password = "123456".encode('utf-8')
17 | hash_stage1 = compute_sha1_hash(password)
18 | hash_stage2 = compute_sha1_hash(hash_stage1)
19 | print("hash_stage1: ", hash_stage1)
20 | print("hash_stage2: ", hash_stage2)
21 | print("authentication_string: *%s"%hash_stage2.hex().upper())
22 |
23 |
24 | def generate_user_salt(buffer_len):
25 | # 生成随机字节序列
26 | random_bytes = secrets.token_bytes(buffer_len)
27 |
28 | # 将字节序列转换为合法的 UTF-8 字符串
29 | salt = random_bytes.decode('utf-8', errors='ignore')
30 |
31 | # 处理特殊字符,确保生成的字符串不包含 '\0' 和 '$'
32 | salt = salt.replace('\0', '\1').replace('$', '\2')
33 |
34 | return salt.encode('utf-8')
35 |
36 | buffer_len = 20
37 | generated_salt = generate_user_salt(buffer_len)
38 | print("salt: %s"%generated_salt)
39 |
40 | def scramble_411(password, seed):
41 | # 计算 password 的 SHA-1 哈希值
42 | password_hash_stage1 = hashlib.sha1(password).digest()
43 |
44 | # 计算 password_hash_stage1 的 SHA-1 哈希值
45 | password_hash_stage2 = hashlib.sha1(password_hash_stage1).digest()
46 | # 更新 seed 和 password_hash_stage2,然后计算哈希值
47 | md = hashlib.sha1()
48 | md.update(seed)
49 | md.update(password_hash_stage2)
50 | to_be_xored = md.digest()
51 |
52 | # 将 to_be_xored 中的每个字节与 password_hash_stage1 中对应的字节进行异或操作
53 | reply = bytes(x ^ y for x, y in zip(to_be_xored, password_hash_stage1))
54 | return reply
55 |
56 | client_reply = scramble_411(password, generated_salt)
57 | print("client reply: ",client_reply)
58 |
59 | def compute_sha1_hash_multi(buf1, buf2):
60 | # 创建 SHA-1 哈希对象
61 | sha1_context = hashlib.sha1()
62 |
63 | # 更新哈希对象,将 buf1 和 buf2 的内容添加到计算中
64 | sha1_context.update(buf1)
65 | sha1_context.update(buf2)
66 |
67 | # 获取最终的 SHA-1 哈希值
68 | digest = sha1_context.digest()
69 | return digest
70 |
71 | def my_crypt(s1, s2):
72 | # 使用 zip 函数将 s1 和 s2 中对应位置的元素一一匹配
73 | # 使用异或运算符 ^ 对每一对元素执行按位异或操作
74 | result = bytes(a ^ b for a, b in zip(s1, s2))
75 |
76 | return result
77 |
78 | def check_scramble_sha1(client_reply, generated_salt, hash_stage2):
79 | buf=compute_sha1_hash_multi(generated_salt, hash_stage2)
80 | buf=my_crypt(buf, client_reply)
81 | hash_stage2_reassured=compute_sha1_hash(buf)
82 | print("hash_stage2_reassured: %s"%hash_stage2_reassured)
83 | if hash_stage2 == hash_stage2_reassured:
84 | print("passed")
85 | else:
86 | print("failed")
87 | check_scramble_sha1(client_reply, generated_salt, hash_stage2)
88 |
--------------------------------------------------------------------------------
/redis/migrate_redis_cluter.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "github.com/go-redis/redis"
6 | "log"
7 | "sort"
8 | "strings"
9 | )
10 |
11 | func createClient(addr string) *redis.Client {
12 | fmt.Println(addr)
13 | client := redis.NewClient(&redis.Options{
14 | Addr: addr,
15 | Password: "",
16 | DB: 0,
17 | })
18 | _, err := client.Ping().Result()
19 | if err != nil {
20 | log.Fatalf("Can't establish connection successfully %s", err)
21 | }
22 | return client
23 | }
24 |
25 | type SlotNodeMap struct {
26 | addr string
27 | start int
28 | end int
29 | }
30 |
31 | func GetSlotDistribute(addr string) []SlotNodeMap {
32 | client := createClient(addr)
33 | var slotNode []SlotNodeMap
34 | clusterSlot, err := client.ClusterSlots().Result()
35 | if err != nil {
36 | log.Fatal("Can't get the ClusterSlot info")
37 | }
38 | for _, each_node := range clusterSlot {
39 | slotNode = append(slotNode, SlotNodeMap{each_node.Nodes[0].Addr, each_node.Start, each_node.End})
40 | }
41 | sort.Slice(slotNode, func(i, j int) bool {
42 | return slotNode[i].addr < slotNode[j].addr
43 | })
44 | return slotNode
45 | }
46 |
47 | func GetMasterSlaveMap(addr string) map[string]string {
48 | client := createClient(addr)
49 | result, err := client.ClusterNodes().Result()
50 | if err != nil {
51 | log.Fatal("Can't get the ClusterNode info")
52 | }
53 | nodes := make(map[string]map[string]string)
54 | for _, line := range strings.Split(result, "\n") {
55 | if len(line) == 0 {
56 | continue
57 | }
58 | nodeInfo := strings.Split(line, " ")
59 | id := nodeInfo[0]
60 | addr := nodeInfo[1]
61 | masterFlag := nodeInfo[2]
62 | masterId := nodeInfo[3]
63 | nodes[id] = map[string]string{
64 | "addr": addr,
65 | "masterFlag": masterFlag,
66 | "masterId": masterId,
67 | }
68 | }
69 | masterSlaveMap := make(map[string]string)
70 | for _, node := range nodes {
71 | if node["masterFlag"] == "slave" {
72 | masterId := node["masterId"]
73 | masterAddr := nodes[masterId]["addr"]
74 | masterSlaveMap[masterAddr] = node["addr"]
75 | }
76 | }
77 | for master, slave := range masterSlaveMap {
78 | fmt.Println(master, slave)
79 | }
80 | return masterSlaveMap
81 | }
82 |
83 | func ClusterReset(masterSlaveMap map[string]string) {
84 | nodes := getNodes(masterSlaveMap)
85 | for _,each_node := range nodes {
86 | client := createClient(each_node)
87 | _, err := client.ClusterResetSoft().Result()
88 | if err != nil {
89 | log.Println(err)
90 | }
91 | }
92 | }
93 |
94 | func getNodes(masterSlaveMap map[string]string) []string {
95 | var nodes []string
96 | for master, slave := range masterSlaveMap {
97 | nodes = append(nodes, master, slave)
98 | }
99 | return nodes
100 | }
101 |
102 | func CreateCluser(masterSlaveMap map[string]string) {
103 | nodes := getNodes(masterSlaveMap)
104 | fmt.Println(nodes)
105 | }
106 | func main() {
107 | destAddr := "192.168.244.20:6379"
108 | masterSlaveMap := GetMasterSlaveMap(destAddr)
109 | CreateCluser(masterSlaveMap)
110 | //ClusterReset(masterSlaveMap)
111 | }
112 |
113 |
--------------------------------------------------------------------------------
/mysql/connection_test_benchemark.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import time
4 | import pymysql
5 |
6 | def timer(func):
7 | def wrapper(*args, **kwargs):
8 | start_time = time.time()
9 | result = func(*args, **kwargs)
10 | end_time = time.time()
11 | elapsed_time = end_time - start_time
12 | return result, elapsed_time
13 | return wrapper
14 |
15 | @timer
16 | def test_ping(unix_socket, user, password, database, num_iterations):
17 | try:
18 | connection = pymysql.connect(unix_socket=unix_socket, user=user, password=password, database=database,
19 | charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
20 | for _ in range(num_iterations):
21 | connection.ping(reconnect=False)
22 | except pymysql.MySQLError as e:
23 | print(f"Error during ping: {e}")
24 | finally:
25 | if connection:
26 | connection.close()
27 |
28 | @timer
29 | def test_select(unix_socket, user, password, database, num_iterations, sql):
30 | try:
31 | connection = pymysql.connect(unix_socket=unix_socket, user=user, password=password, database=database,
32 | charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
33 | with connection.cursor() as cursor:
34 | for _ in range(num_iterations):
35 | cursor.execute(sql)
36 | except pymysql.MySQLError as e:
37 | print(f"Error during {sql}: {e}")
38 | finally:
39 | if connection:
40 | connection.close()
41 |
42 | unix_socket = "/data/mysql/3306/data/mysql.sock"
43 | user = "root"
44 | password = "123456"
45 | database = "information_schema"
46 | num_iterations = 100000 # 执行次数
47 |
48 | # 测试 PING 操作
49 | result, elapsed_time = test_ping(unix_socket, user, password, database, num_iterations)
50 | print(f"PING time for {num_iterations} iterations: {elapsed_time:.5f} seconds")
51 |
52 | # 测试 SELECT 1
53 | result, elapsed_time = test_select(unix_socket, user, password, database, num_iterations, "SELECT 1")
54 | print(f"SELECT 1 time for {num_iterations} iterations: {elapsed_time:.5f} seconds")
55 |
56 | # 测试 SHOW FULL TABLES FROM `information_schema` LIKE 'PROBABLYNOT'
57 | result, elapsed_time = test_select(unix_socket, user, password, database, num_iterations, "SHOW FULL TABLES FROM `information_schema` LIKE 'PROBABLYNOT'")
58 | print(f"SHOW FULL TABLES time for {num_iterations} iterations: {elapsed_time:.5f} seconds")
59 |
60 | # 测试 INFORMATION_SCHEMA.TABLES
61 | new_get_tables_sql = "SELECT TABLE_SCHEMA AS TABLE_CAT, NULL AS TABLE_SCHEM, TABLE_NAME, CASE WHEN TABLE_TYPE='BASE TABLE' THEN CASE WHEN TABLE_SCHEMA = 'mysql' OR TABLE_SCHEMA = 'performance_schema' THEN 'SYSTEM TABLE' ELSE 'TABLE' END WHEN TABLE_TYPE='TEMPORARY' THEN 'LOCAL_TEMPORARY' ELSE TABLE_TYPE END AS TABLE_TYPE, TABLE_COMMENT AS REMARKS, NULL AS TYPE_CAT, NULL AS TYPE_SCHEM, NULL AS TYPE_NAME, NULL AS SELF_REFERENCING_COL_NAME, NULL AS REF_GENERATION FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'PROBABLYNOT' HAVING TABLE_TYPE IN ('TABLE',null,null,null,null) ORDER BY TABLE_TYPE, TABLE_SCHEMA, TABLE_NAME"
62 | result, elapsed_time = test_select(unix_socket, user, password, database, num_iterations, new_get_tables_sql)
63 | print(f"INFORMATION_SCHEMA.TABLES time for {num_iterations} iterations: {elapsed_time:.5f} seconds")
64 |
--------------------------------------------------------------------------------
/redis/redis_migrate/redisUtil/cluster.go:
--------------------------------------------------------------------------------
1 | package redisUtil
2 |
3 | import (
4 | "time"
5 | "log"
6 | "strings"
7 | "sort"
8 | )
9 |
10 | type cluster map[string]map[string]string
11 |
12 | // cluster["192.168.244.20:6379"] = map[string]string{
13 | // "id": 90a2b0a0453847dcd29be0a6e4dc86a574383ee2,
14 | // "slave": 192.168.244.20:6382,
15 | // }
16 | // 192.168.244.20:6379 is the master addr
17 |
18 | func clusterMap(addr string) cluster {
19 | client := createClient(addr)
20 | defer client.Close()
21 | result, err := client.ClusterNodes().Result()
22 | if err != nil {
23 | log.Fatal("Can't get the ClusterNode info")
24 | }
25 | nodes := make(map[string]map[string]string)
26 | for _, line := range strings.Split(result, "\n") {
27 | if len(line) == 0 {
28 | continue
29 | }
30 | nodeInfo := strings.Split(line, " ")
31 | id := nodeInfo[0]
32 | addr := nodeInfo[1]
33 | masterFlag := nodeInfo[2]
34 | masterId := nodeInfo[3]
35 | nodes[id] = map[string]string{
36 | "addr": addr,
37 | "masterFlag": masterFlag,
38 | "masterId": masterId,
39 | }
40 | }
41 | clustermap := make(cluster)
42 | for _, node := range nodes {
43 | if node["masterFlag"] == "slave" {
44 | masterId := node["masterId"]
45 | masterAddr := nodes[masterId]["addr"]
46 | clustermap[masterAddr] = map[string]string{
47 | "id": masterId,
48 | "slave": node["addr"],
49 | }
50 | }
51 | }
52 | return clustermap
53 | }
54 |
55 | func (c cluster) resetClusterInfo() {
56 | nodes := c.getNodes()
57 | for _, each_node := range nodes {
58 | client := createClient(each_node)
59 | defer client.Close()
60 | _, err := client.ClusterResetSoft().Result()
61 | if err != nil {
62 | log.Fatalln(err)
63 | }
64 | }
65 | }
66 |
67 | func (c cluster) getNodes() []string {
68 | var nodes []string
69 | for k, v := range c {
70 | nodes = append(nodes, k, v["slave"])
71 | }
72 | return nodes
73 | }
74 |
75 | func (c cluster) createCluster() {
76 | nodes := c.getNodes()
77 | firstNode, otherNode := nodes[0], nodes[1:]
78 | client := createClient(firstNode)
79 | defer client.Close()
80 | for _, each_node := range otherNode {
81 | go func(node string) {
82 | ipPort := strings.Split(node, ":")
83 | ip, port := ipPort[0], ipPort[1]
84 | _, err := client.ClusterMeet(ip, port).Result()
85 | if err != nil {
86 | log.Fatalln(err)
87 | }
88 | }(each_node)
89 | }
90 | for _, each_node := range nodes {
91 | client := createClient(each_node)
92 | defer client.Close()
93 | for {
94 | result, err := client.ClusterNodes().Result()
95 | if err != nil {
96 | log.Fatal(err)
97 | }
98 | if strings.Count(result, "master") == len(nodes) {
99 | break
100 | }
101 | time.Sleep(time.Millisecond * 100)
102 | }
103 | }
104 | for _, v := range c {
105 | go func(master map[string]string) {
106 | slaveIp := master["slave"]
107 | masterId := master["id"]
108 | client := createClient(slaveIp)
109 | defer client.Close()
110 | for {
111 | _, err := client.ClusterReplicate(masterId).Result()
112 | if err != nil {
113 | time.Sleep(time.Millisecond * 100)
114 | continue
115 | }
116 | break
117 | }
118 | }(v)
119 | }
120 | for _, each_node := range nodes {
121 | client := createClient(each_node)
122 | defer client.Close()
123 | for _, v := range c {
124 | for {
125 | result, err := client.ClusterSlaves(v["id"]).Result()
126 | if err == nil && len(result) == 1 {
127 | break
128 | }
129 | time.Sleep(time.Millisecond * 10)
130 | }
131 | }
132 | }
133 | }
134 |
135 | type slotMap map[string]map[string]int
136 |
137 | func getSlotDistribute(addr string) slotMap {
138 | client := createClient(addr)
139 | defer client.Close()
140 | s := make(slotMap)
141 | clusterSlot, err := client.ClusterSlots().Result()
142 | if err != nil {
143 | log.Fatal("Can't get the ClusterSlot info")
144 | }
145 | for _, each_node := range clusterSlot {
146 | s[each_node.Nodes[0].Addr] = map[string]int{
147 | "start": each_node.Start,
148 | "end": each_node.End,
149 | }
150 | }
151 | //sort.Slice(slotNode, func(i, j int) bool {
152 | // return slotNode[i].addr < slotNode[j].addr
153 | //})
154 | return s
155 | }
156 |
157 | func addSlots(sourceAddr string, destAddr string) {
158 | links := linkMaster(sourceAddr, destAddr)
159 | var sourceMaster string
160 | for k, _ := range links {
161 | sourceMaster = k
162 | break
163 | }
164 | slotmap := getSlotDistribute(sourceMaster)
165 | for source, dest := range links {
166 | client := createClient(dest)
167 | defer client.Close()
168 | _, err := client.ClusterAddSlotsRange(slotmap[source]["start"], slotmap[source]["end"]).Result()
169 | if err != nil {
170 | log.Fatal(err)
171 | }
172 | }
173 | }
174 |
175 | func resetCluster(addr string) {
176 | c := clusterMap(addr)
177 | c.resetClusterInfo()
178 | c.createCluster()
179 | }
180 |
181 | func getMaster(addr string) []string {
182 | c := clusterMap(addr)
183 | var master []string
184 | for k, _ := range c {
185 | master = append(master, k)
186 | }
187 | sort.Slice(master, func(i, j int) bool {
188 | return master[i] < master[j]
189 | })
190 | return master
191 | }
192 |
193 | func linkMaster(sourceAddr string, destAddr string) map[string]string {
194 | sourceMasters := getMaster(sourceAddr)
195 | destMasters := getMaster(destAddr)
196 |
197 | if len(sourceMasters) != len(destMasters) {
198 | log.Fatal("The number of nodes is not equal")
199 | }
200 | masterLink := make(map[string]string)
201 | for i := 0; i < len(sourceMasters); i++ {
202 | masterLink[sourceMasters[i]] = destMasters[i]
203 | }
204 | return masterLink
205 | }
206 |
207 | func CopySlotInfo(sourceAddr string, destAddr string) {
208 | resetCluster(destAddr)
209 | addSlots(sourceAddr, destAddr)
210 | }
211 |
--------------------------------------------------------------------------------
/redis/redis_mem_usage_analyzer.py:
--------------------------------------------------------------------------------
1 | import redis
2 | import time
3 | import sys
4 | import datetime
5 | import argparse
6 |
7 | def get_connection(host, port, password, tls=False):
8 | kwargs = {
9 | 'host': host,
10 | 'port': port,
11 | 'password': password,
12 | 'socket_timeout': 5,
13 | }
14 | if tls:
15 | kwargs['ssl'] = True
16 | return redis.Redis(**kwargs)
17 |
18 | def bytes_to_human(n):
19 | """Convert bytes to a human-readable format."""
20 | if n == 0:
21 | return '0B'
22 | sign = '-' if n < 0 else ''
23 | n = abs(n)
24 | units = ['B', 'K', 'M', 'G', 'T', 'P']
25 | unit = units[0]
26 | for u in units[1:]:
27 | if n < 1024:
28 | break
29 | n /= 1024
30 | unit = u
31 | return f"{sign}{n:.2f}".rstrip('0').rstrip('.') + unit
32 |
33 | def calculate_total_mem_hashtable(memory_stats):
34 | total_hashtable_main = 0
35 | total_hashtable_expires = 0
36 | total_hashtable_slot_to_keys = 0
37 |
38 | # 遍历字典中的每个 db
39 | for db_key, db_stats in memory_stats.items():
40 | # 只处理以 'db.' 开头的键
41 | if db_key.startswith('db.'):
42 | # 更新总和
43 | total_hashtable_main += db_stats.get('overhead.hashtable.main',0)
44 | total_hashtable_expires += db_stats.get('overhead.hashtable.expires',0)
45 | total_hashtable_slot_to_keys += db_stats.get('overhead.hashtable.slot-to-keys',0)
46 |
47 | return total_hashtable_main + total_hashtable_expires + total_hashtable_slot_to_keys
48 |
49 | def calculate_total_keys(info):
50 | total_keys = 0
51 | for key, value in info.items():
52 | if key.startswith('db'):
53 | total_keys += value.get('keys',0)
54 | return total_keys
55 |
56 | def calculate_total_mem_overhead(info, keys_to_sum):
57 | return sum(info.get(key, 0) for key in keys_to_sum if key != 'overhead_total')
58 |
59 | def print_diff(old_info, old_memory_stats, new_info, new_memory_stats, interval):
60 | """计算并打印两个内存统计信息的差值,按组输出。"""
61 | groups = {
62 | 'Summary': ['used_memory', 'used_memory_dataset', 'used_memory_overhead'],
63 | 'Overhead': ['overhead_total','mem_clients_normal', 'mem_clients_slaves', 'mem_replication_backlog', 'mem_aof_buffer','used_memory_startup', 'mem_cluster_links','used_memory_scripts','mem_hashtable'],
64 | 'Evict & Fragmentation': ['maxmemory', 'mem_not_counted_for_evict', 'mem_counted_for_evict', 'maxmemory_policy', 'used_memory_peak','used_memory_rss','mem_fragmentation_bytes'],
65 | 'Others': ['keys', 'instantaneous_ops_per_sec','lazyfree_pending_objects'],
66 | }
67 | now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
68 | header = "{:<30} {:<20} {:<20} {:<20}".format(f'Metric({now})', 'Old Value', f'New Value(+{interval}s)', 'Change per second')
69 | print(header)
70 | print("="*90)
71 |
72 | old_info["mem_hashtable"]= calculate_total_mem_hashtable(old_memory_stats)
73 | new_info["mem_hashtable"]= calculate_total_mem_hashtable(new_memory_stats)
74 | old_info["overhead_total"] = calculate_total_mem_overhead(old_info,groups['Overhead'])
75 | new_info["overhead_total"] = calculate_total_mem_overhead(new_info,groups['Overhead'])
76 | old_info["keys"] =calculate_total_keys(old_info)
77 | new_info["keys"] =calculate_total_keys(new_info)
78 | group_num = len(groups) # 之所以定义 group_num,主要是为了循环结束时不用打换行符
79 | i=0
80 | for group_name, keys in groups.items():
81 | i=i+1
82 | if group_name != 'Overhead':
83 | print(f"{group_name}")
84 | print("-"*45)
85 | for key in keys:
86 | if key not in old_info and key !='mem_counted_for_evict':
87 | continue
88 | old_value = old_info.get(key,0)
89 | new_value = new_info.get(key,0)
90 | if key == 'mem_counted_for_evict':
91 | old_value = old_info.get('used_memory', 0) - old_info.get('mem_not_counted_for_evict', 0)
92 | new_value = new_info.get('used_memory', 0) - new_info.get('mem_not_counted_for_evict', 0)
93 | if key in ["maxmemory_policy", "instantaneous_ops_per_sec"]:
94 | diff = ""
95 | else:
96 | diff = (new_value - old_value)/interval
97 | if any(x in key for x in ['ratio', 'percentage']) or key in ["maxmemory_policy","instantaneous_ops_per_sec","keys", "lazyfree_pending_objects"]:
98 | # These are non-byte metrics, no conversion to MB needed
99 | old_value_display = old_value
100 | new_value_display = new_value
101 | diff_display = diff
102 | else:
103 | # Convert bytes-based metrics to MB
104 | old_value_display = bytes_to_human(old_value)
105 | new_value_display = bytes_to_human(new_value)
106 | diff_display = bytes_to_human(diff)
107 | if key == "overhead_total":
108 | key = "Overhead(Total)"
109 | print(f"{key:<30} {old_value_display:<20} {new_value_display:<20} {diff_display:<20}")
110 | print("-"*45)
111 | else:
112 | print(f"{key:<30} {old_value_display:<20} {new_value_display:<20} {diff_display:<20}")
113 | if i != group_num:
114 | print()
115 |
116 | def get_redis_info(r):
117 | pipeline = r.pipeline()
118 | pipeline.info()
119 | pipeline.memory_stats()
120 | results = pipeline.execute()
121 | return results[0], results[1]
122 |
123 | def print_client_list(r):
124 | client_list = r.client_list()
125 | sorted_list = sorted(client_list, key=lambda x: int(x['tot-mem']), reverse=True)
126 | header = f"{'ID':<5} {'Address':<18} {'Name':<5} {'Age':<6} {'Command':<15} {'User':<8} {'Qbuf':<10} {'Omem':<10} {'Total Memory':<15}"
127 | print(header)
128 | print('-' * len(header))
129 | for client in sorted_list:
130 | line = (f"{client.get('id'):<5} "
131 | f"{client.get('addr'):<18} "
132 | f"{client.get('name'):<5} "
133 | f"{client.get('age'):<6} "
134 | f"{client.get('cmd'):<15} "
135 | f"{client.get('user'):<8} "
136 | f"{bytes_to_human(int(client.get('qbuf'))):<10} "
137 | f"{bytes_to_human(int(client.get('omem'))):<10} "
138 | f"{bytes_to_human(int(client.get('tot-mem'))):<15}")
139 | print(line)
140 |
141 | def main():
142 | parser = argparse.ArgumentParser(description='Monitor Redis memory usage and statistics.')
143 | parser.add_argument('-host', '--hostname', type=str, default='127.0.0.1', help='Server hostname (default: 127.0.0.1)')
144 | parser.add_argument('-p', '--port', type=int, default=6379, help='Server port (default: 6379)')
145 | parser.add_argument('-a', '--password', type=str, help='Password for Redis Auth')
146 | parser.add_argument('--tls', action='store_true', help='Enable TLS for Redis connection')
147 | parser.add_argument('-i', '--interval', type=int, default=3, help='Refresh interval in seconds (default: 3)')
148 | parser.add_argument('-c', '--client', action='store_true', help='Show client list info')
149 | args = parser.parse_args()
150 | print(args)
151 | try:
152 | r = get_connection(args.hostname, args.port, args.password, args.tls)
153 | except Exception as e:
154 | print(f"Failed to connect to Redis: {e}")
155 | return
156 |
157 | if args.client:
158 | print_client_list(r)
159 | return
160 |
161 | old_info, old_memory_stats = get_redis_info(r)
162 | while True:
163 | time.sleep(args.interval)
164 | new_info, new_memory_stats = get_redis_info(r)
165 | print_diff(old_info, old_memory_stats, new_info, new_memory_stats, args.interval)
166 | old_info, old_memory_stats = new_info, new_memory_stats
167 |
168 | if __name__ == "__main__":
169 | main()
170 |
--------------------------------------------------------------------------------
/mysql/get_binlog_timestamp_info/get_binlog_timestamp_info_single_thread.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "database/sql"
5 | "flag"
6 | "fmt"
7 | "github.com/go-mysql-org/go-mysql/mysql"
8 | "github.com/go-mysql-org/go-mysql/replication"
9 | _ "github.com/go-sql-driver/mysql"
10 | "github.com/olekukonko/tablewriter"
11 | "github.com/siddontang/go-log/log"
12 | "golang.org/x/crypto/ssh/terminal"
13 | "golang.org/x/net/context"
14 | "os"
15 | "strconv"
16 | "strings"
17 | "time"
18 | )
19 |
20 | type BinlogInfo struct {
21 | LogName string
22 | FileSize string
23 | StartTime uint32
24 | EndTime uint32
25 | PreviousGTIDs string
26 | NextLogPreviousGTIDs string
27 | }
28 |
29 | func GetGTIDSubtract(gtid1, gtid2 string) (string, error) {
30 | // 解析 GTID
31 | parsedGTID1, err := mysql.ParseGTIDSet("mysql", gtid1)
32 | if err != nil {
33 | return "", fmt.Errorf("error parsing GTID1: %v", err)
34 | }
35 | m1 := *parsedGTID1.(*mysql.MysqlGTIDSet)
36 | parsedGTID2, err := mysql.ParseGTIDSet("mysql", gtid2)
37 | if err != nil {
38 | return "", fmt.Errorf("error parsing GTID2: %v", err)
39 | }
40 |
41 | m2 := *parsedGTID2.(*mysql.MysqlGTIDSet)
42 | // 计算差值
43 | err = m1.Minus(m2)
44 | if err != nil {
45 | return "", fmt.Errorf("error calculating GTID difference: %v", err)
46 | }
47 |
48 | return m1.String(), nil
49 | }
50 |
51 | func ExtractGTIDSuffix(gtidStr string) string {
52 | if !strings.Contains(gtidStr, ",") && strings.Contains(gtidStr, ":") {
53 | parts := strings.Split(gtidStr, ":")
54 | if len(parts) == 2 {
55 | return parts[1]
56 | }
57 | }
58 | return gtidStr
59 | }
60 |
61 | func ConvertUnixTimestampToFormattedTime(unixTimestamp int64) (string, error) {
62 | // 转换为时间格式
63 | t := time.Unix(unixTimestamp, 0)
64 |
65 | // 格式化为默认的日期时间格式
66 | formattedTime := t.Format("2006-01-02 15:04:05")
67 |
68 | return formattedTime, nil
69 | }
70 |
71 | // ConvertBytesToHumanReadable 将 uint64 类型的字节大小转换为人类可读的单位
72 | func ConvertBytesToHumanReadable(bytes uint64) string {
73 | const (
74 | kib = 1024
75 | mib = 1024 * kib
76 | gib = 1024 * mib
77 | )
78 |
79 | unit := "bytes"
80 | divisor := uint64(1)
81 |
82 | switch {
83 | case bytes >= gib:
84 | divisor = gib
85 | unit = "GB"
86 | case bytes >= mib:
87 | divisor = mib
88 | unit = "MB"
89 | case bytes >= kib:
90 | divisor = kib
91 | unit = "KB"
92 | }
93 |
94 | value := float64(bytes) / float64(divisor)
95 | format := "%.2f %s"
96 | result := fmt.Sprintf(format, value, unit)
97 | return result
98 | }
99 |
100 | func getBinaryLogs(dsn string) ([][]string, error) {
101 | // 连接 MySQL 数据库
102 | db, err := sql.Open("mysql", dsn)
103 | if err != nil {
104 | return nil, fmt.Errorf("error connecting to MySQL: %v", err)
105 | }
106 | defer db.Close()
107 |
108 | // 执行 SQL 查询
109 | rows, err := db.Query("SHOW BINARY LOGS;")
110 | if err != nil {
111 | return nil, fmt.Errorf("error executing SHOW BINARY LOGS: %v", err)
112 | }
113 | defer rows.Close()
114 |
115 | // 存储二进制日志文件名的切片
116 | var binaryLogs [][]string
117 |
118 | // 遍历结果集并将日志文件名存储到切片中
119 | for rows.Next() {
120 | var logName, fileSize, encrypted string
121 | if err := rows.Scan(&logName, &fileSize, &encrypted); err != nil {
122 | return nil, fmt.Errorf("error scanning row: %v", err)
123 | }
124 | binaryLogs = append(binaryLogs, []string{logName, fileSize})
125 | }
126 |
127 | // 检查是否遍历过程中有错误
128 | if err := rows.Err(); err != nil {
129 | return nil, fmt.Errorf("error during row iteration: %v", err)
130 | }
131 |
132 | // 返回二进制日志文件名切片
133 | return binaryLogs, nil
134 | }
135 |
136 | func getFormatAndPreviousGTIDs(cfg replication.BinlogSyncerConfig, binlogFilename string) (uint32, string, error) {
137 | // 创建 BinlogSyncer 实例
138 | syncer := replication.NewBinlogSyncer(cfg)
139 | defer syncer.Close()
140 |
141 | streamer, err := syncer.StartSync(mysql.Position{Name: binlogFilename, Pos: 4})
142 | if err != nil {
143 | return 0, "", fmt.Errorf("error starting binlog syncer: %v", err)
144 | }
145 |
146 | var formatTimestamp uint32
147 | var previousGTIDs string
148 |
149 | ctx := context.Background()
150 | for i := 0; i < 3; i++ {
151 | // 读取事件
152 | ev, err := streamer.GetEvent(ctx)
153 | if err != nil {
154 | return 0, "", fmt.Errorf("error getting binlog event: %v", err)
155 | }
156 |
157 | // 如果是 FORMAT_DESCRIPTION_EVENT,则记录时间戳
158 | if ev.Header.EventType == replication.FORMAT_DESCRIPTION_EVENT {
159 | formatTimestamp = ev.Header.Timestamp
160 | }
161 |
162 | // 如果是 PREVIOUS_GTIDS_EVENT,则记录其内容并跳出循环
163 | if ev.Header.EventType == replication.PREVIOUS_GTIDS_EVENT {
164 | previousGTIDsEvent := ev.Event.(*replication.PreviousGTIDsEvent)
165 | previousGTIDs = previousGTIDsEvent.GTIDSets
166 | break
167 | }
168 | }
169 |
170 | return formatTimestamp, previousGTIDs, nil
171 | }
172 |
173 | func main() {
174 | // Parse command line arguments
175 | host := flag.String("h", "localhost", "MySQL host")
176 | port := flag.Int("P", 3306, "MySQL port")
177 | user := flag.String("u", "root", "MySQL user")
178 | password := flag.String("p", "", "MySQL password")
179 | var verbose bool
180 | flag.BoolVar(&verbose, "v", false, "Enable verbose logging")
181 | flag.Parse()
182 | if *password == "" {
183 | fmt.Print("Enter MySQL password: ")
184 | bytePassword, err := terminal.ReadPassword(int(os.Stdin.Fd()))
185 | fmt.Println()
186 | if err != nil {
187 | log.Fatalf("Error: Failed to read the password - %v", err)
188 | }
189 | *password = string(bytePassword)
190 | }
191 |
192 | dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/mysql", *user, *password, *host, *port)
193 |
194 | // 调用获取二进制日志文件名的函数
195 | binaryLogs, err := getBinaryLogs(dsn)
196 | if err != nil {
197 | fmt.Println("Error:", err)
198 | os.Exit(1)
199 | }
200 | if verbose {
201 | timestamp := time.Now().Format("2006/01/02 15:04:05")
202 | fmt.Printf("[%s] [info] get_binlog_timestamp_info.go SHOW BINARY LOGS done, %d binlogs to analyze\n", timestamp, len(binaryLogs))
203 |
204 | }
205 |
206 | cfg := replication.BinlogSyncerConfig{
207 | ServerID: 100,
208 | Flavor: "mysql",
209 | Host: *host,
210 | Port: uint16(*port),
211 | User: *user,
212 | Password: *password,
213 | }
214 | cfg.Logger = log.NewDefault(&log.NullHandler{})
215 |
216 | var binlogs []BinlogInfo
217 | var logEndTime uint32
218 | var nextLogPreviousGTIDs string
219 | for i := len(binaryLogs) - 1; i >= 0; i-- {
220 | log := binaryLogs[i]
221 | logName, fileSize := log[0], log[1]
222 | startTime, previousGTIDs, err := getFormatAndPreviousGTIDs(cfg, logName)
223 | if verbose {
224 | timestamp := time.Now().Format("2006/01/02 15:04:05")
225 | fmt.Printf("[%s] [info] get_binlog_timestamp_info.go %s done, still %d binlogs to analyze\n", timestamp, logName, i)
226 | }
227 | binlogs = append(binlogs, BinlogInfo{logName, fileSize, startTime, logEndTime, previousGTIDs, nextLogPreviousGTIDs})
228 | logEndTime = startTime
229 | nextLogPreviousGTIDs = previousGTIDs
230 |
231 | if err != nil {
232 | fmt.Println("Error:", err)
233 | os.Exit(1)
234 | }
235 | }
236 | table := tablewriter.NewWriter(os.Stdout)
237 | table.SetAutoFormatHeaders(false)
238 | table.SetHeader([]string{"Log_name", "File_size", "Start_time", "End_time", "Duration", "GTID"})
239 |
240 | for i := len(binlogs) - 1; i >= 0; i-- {
241 | binlog := binlogs[i]
242 | fileSize, err := strconv.ParseUint(binlog.FileSize, 10, 64)
243 | if err != nil {
244 | fmt.Println("Error parsing string to uint64:", err)
245 | return
246 | }
247 | startUnixTimestamp := int64(binlog.StartTime)
248 | startTime := time.Unix(startUnixTimestamp, 0)
249 | startFormattedTime, err := ConvertUnixTimestampToFormattedTime(startUnixTimestamp)
250 | if err != nil {
251 | fmt.Println("Error:", err)
252 | return
253 | }
254 | endUnixTimestamp := int64(binlog.EndTime)
255 | endTime := time.Unix(endUnixTimestamp, 0)
256 | endFormattedTime, err := ConvertUnixTimestampToFormattedTime(endUnixTimestamp)
257 |
258 | if err != nil {
259 | fmt.Println("Error:", err)
260 | return
261 | }
262 |
263 | duration := endTime.Sub(startTime)
264 | durationFormatted := fmt.Sprintf("%02d:%02d:%02d", int(duration.Hours()), int(duration.Minutes())%60, int(duration.Seconds())%60)
265 |
266 | if endUnixTimestamp == 0 {
267 | endFormattedTime, durationFormatted = "", ""
268 | }
269 | gtidDifference, err := GetGTIDSubtract(binlog.NextLogPreviousGTIDs, binlog.PreviousGTIDs)
270 | if err != nil {
271 | fmt.Println("Error:", err)
272 | return
273 |
274 | }
275 |
276 | table.Append([]string{binlog.LogName, fmt.Sprintf("%d (%s)", fileSize, ConvertBytesToHumanReadable(fileSize)), startFormattedTime, endFormattedTime, durationFormatted, ExtractGTIDSuffix(gtidDifference)})
277 | }
278 | table.Render()
279 |
280 | }
281 |
--------------------------------------------------------------------------------
/mysql/mysql2mysql.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import pymysql,argparse,time,json,sys
5 | from pymysqlreplication import BinLogStreamReader
6 |
7 | reload(sys)
8 | sys.setdefaultencoding('utf8')
9 |
10 | from pymysqlreplication.row_event import (
11 | DeleteRowsEvent,
12 | UpdateRowsEvent,
13 | WriteRowsEvent,
14 | )
15 | from pymysqlreplication.event import (RotateEvent,QueryEvent,XidEvent)
16 |
17 | class DbUtils:
18 | def __init__(self, host, user,passwd,port):
19 | self.conn=pymysql.connect(host,user,passwd,port=port,charset='utf8',autocommit=False)
20 | self.cursor=self.conn.cursor()
21 | def query(self,sql):
22 | self.cursor.execute(sql)
23 | result=self.cursor.fetchall()
24 | return result
25 | def execute(self,sql):
26 | binlog_file_pos=sql.pop()
27 | master_log_name, master_log_start_pos, master_log_end_pos = binlog_file_pos
28 | try:
29 | update_relay_info_sql="insert into mysql2mysql.relay_info (id, master_log_name, master_log_start_pos,master_log_end_pos) values (NULLIF(%s, 0), '%s'," \
30 | "%s,%s) on duplicate key update last_update=NOW(),master_log_name='%s',master_log_start_pos=%s,master_log_end_pos=%s "
31 | update_file_pos_state=update_relay_info_sql%(1,master_log_name, master_log_start_pos, master_log_end_pos,master_log_name, master_log_start_pos, master_log_end_pos)
32 | self.cursor.execute(update_file_pos_state)
33 | insert_file_pos_state=update_relay_info_sql%(0,master_log_name, master_log_start_pos, master_log_end_pos,master_log_name, master_log_start_pos, master_log_end_pos)
34 | self.cursor.execute(insert_file_pos_state)
35 | for each_sql in sql:
36 | self.cursor.execute(each_sql)
37 | self.conn.commit()
38 | except Exception,e:
39 | try:
40 | self.conn.rollback()
41 | except Exception,e1:
42 | print e1,sql
43 | raise Exception(e,sql)
44 |
45 | def get_binlog_file_pos(connection_values,role):
46 | db=DbUtils(**connection_values)
47 | if role == 'master':
48 | file_log_status = db.query('show master status')
49 | log_file,log_pos,_,_,_ = file_log_status[0]
50 | elif role == 'slave':
51 | file_log_status = db.query("select master_log_name,master_log_end_pos from mysql2mysql.relay_info where id=1")
52 | if not file_log_status:
53 | raise Exception("No record in mysql2mysql.ralay_info")
54 | log_file, log_pos= file_log_status[0]
55 | return log_file,log_pos
56 |
57 | def compare_items(items):
58 | (k, v) = items
59 | if v is None:
60 | return '`%s` IS %%s' % k
61 | else:
62 | return '`%s`=%%s' % k
63 |
64 | def execute_sql_in_dest_db(dest_connection_info,transaction_sql):
65 | db=DbUtils(**dest_connection_info)
66 | db.execute(transaction_sql)
67 |
68 | def handle_binlog_event(source_connection_info,dest_connection_info,log_file,log_pos):
69 | stream = BinLogStreamReader(connection_settings=source_connection_info,
70 | server_id=100, blocking=True,
71 | only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent, RotateEvent, QueryEvent,
72 | XidEvent],
73 | resume_stream=True,
74 | log_file=log_file, log_pos=log_pos)
75 | conn = pymysql.connect(**source_connection_info)
76 | cursor = conn.cursor()
77 | transaction_sql = []
78 | for binlog_event in stream:
79 | if isinstance(binlog_event, RotateEvent):
80 | log_file = binlog_event.next_binlog
81 | elif isinstance(binlog_event, QueryEvent) and binlog_event.query == 'BEGIN':
82 | transaction_start_pos = binlog_event.packet.log_pos
83 | elif isinstance(binlog_event, (DeleteRowsEvent, UpdateRowsEvent, WriteRowsEvent)):
84 | schema, table = binlog_event.schema, binlog_event.table
85 | for row in binlog_event.rows:
86 | if isinstance(binlog_event, DeleteRowsEvent):
87 | delete_sql_template = 'DELETE FROM `{0}`.`{1}` WHERE {2} LIMIT 1;'.format(
88 | schema, table, ' AND '.join(map(compare_items, row['values'].items())))
89 | delete_sql = cursor.mogrify(delete_sql_template, row['values'].values())
90 | transaction_sql.append(delete_sql)
91 |
92 | elif isinstance(binlog_event, UpdateRowsEvent):
93 | update_sql_template = 'UPDATE `{0}`.`{1}` SET {2} WHERE {3} LIMIT 1;'.format(
94 | schema, table,
95 | ', '.join(['`%s`=%%s' % k for k in row['after_values'].keys()]),
96 | ' AND '.join(map(compare_items, row['before_values'].items()))
97 | )
98 | values = list(row['after_values'].values()) + list(row['before_values'].values())
99 | update_sql = cursor.mogrify(update_sql_template, values)
100 | transaction_sql.append(update_sql)
101 |
102 | elif isinstance(binlog_event, WriteRowsEvent):
103 | insert_sql_template = 'INSERT INTO `{0}`.`{1}`({2}) VALUES ({3})'.format(
104 | schema, table,
105 | ', '.join(map(lambda key: '`%s`' % key, row['values'].keys())),
106 | ', '.join(['%s'] * len(row['values']))
107 | )
108 | insert_sql = cursor.mogrify(insert_sql_template, row['values'].values())
109 | transaction_sql.append(insert_sql)
110 | elif isinstance(binlog_event, XidEvent):
111 | transaction_end_pos = binlog_event.packet.log_pos
112 | #print '\n',log_file, transaction_start_pos, transaction_end_pos
113 | transaction_sql.append([log_file,transaction_start_pos, transaction_end_pos])
114 | execute_sql_in_dest_db(dest_connection_info,transaction_sql)
115 | transaction_sql = []
116 | # time.sleep(5)
117 | stream.close()
118 | cursor.close()
119 | conn.close()
120 |
121 | def parse_args():
122 | USAGE = "%(prog)s --source user:pass@host:port --dest user:pass@host:port " \
123 | "--start-file mysql-bin.000001 --start-pos 154"
124 | parser = argparse.ArgumentParser(usage=USAGE,version='0.1')
125 | parser.add_argument("--source", action="store", dest="source",type=str,
126 | help="connection information for source server in "
127 | "the form: [:]@[:]")
128 | parser.add_argument("--dest", action="store", dest="destination",type=str,
129 | help="connection information for destination server in "
130 | "the form: [:]@[:]")
131 | parser.add_argument("--start-file", dest='start_file', type=str,
132 | help='start binlog file to be parsed,if not given,get binlog file & pos from "show master status"')
133 | parser.add_argument('--start-pos', dest='start_pos', type=int,default=4,
134 | help='start position of the --start-file,if not given,default 4')
135 | parser.add_argument('-c','--continue',dest='continue_flag',action='store_true',default=False,
136 | help='get binlog file & postion from dest db mysql2mysql.ralay_info,default False')
137 | #args = parser.parse_args(r'--source root:123456@192.168.244.10:3306 --dest root:123456@192.168.244.20:3306'.split())
138 | args = parser.parse_args()
139 | if not args.source or not args.destination:
140 | parser.error("You must specify both --source and --dest.")
141 | if args.start_file and args.continue_flag:
142 | parser.error("You cannot use --start-file and -c together.")
143 | return args
144 |
145 | def parse_connection(connection_values):
146 | conn_format = connection_values.rsplit('@', 1)
147 | user,passwd=conn_format[0].split(":")
148 | host,port=conn_format[1].split(":")
149 | connection = {
150 | "user": user,
151 | "host": host,
152 | "port": int(port),
153 | "passwd": passwd
154 | }
155 | return connection
156 |
157 | def main():
158 | args=parse_args()
159 | source_connection_info=parse_connection(args.source)
160 | dest_connection_info=parse_connection(args.destination)
161 | if not args.start_file and not args.continue_flag:
162 | log_file,log_pos=get_binlog_file_pos(source_connection_info,'master')
163 | elif args.start_file:
164 | log_file,log_pos=args.start_file,args.start_pos
165 | elif args.continue_flag:
166 | log_file,log_pos=get_binlog_file_pos(dest_connection_info,'slave')
167 | handle_binlog_event(source_connection_info,dest_connection_info,log_file,log_pos)
168 |
169 | if __name__ == '__main__':
170 | main()
171 |
--------------------------------------------------------------------------------
/mysql/find_config_diff.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding:UTF-8 -*-
3 | import MySQLdb,re,prettytable,optparse,os,sys,subprocess,tempfile
4 |
5 | def get_variables(config_file):
6 | variables = {}
7 | with open(config_file) as f:
8 | mysqld_flag=0
9 | for line in f:
10 |
11 | #用于后续过滤空行
12 | line=line.strip()
13 |
14 | if line.startswith('[mysqld]'):
15 | mysqld_flag = 1
16 | elif line.startswith('['):
17 | mysqld_flag=0
18 | if mysqld_flag==1 and line and not line.startswith('#') and not line.startswith('[mysqld]'):
19 |
20 | #用于剔除参数后面的注释
21 | if "#" in line:
22 | line= line.split('#')[0]
23 |
24 | #之所以增加这个判断,是为了避免对于optimizer-trace-features greedy_search=on参数的误判
25 | if "=" in line:
26 | if len(re.split('=',line)[0].split()) == 1:
27 | line=line.replace('=',' ',1)
28 | if "(No default value)" in line:
29 | line_with_variables=line.split("(No default value)")
30 | variables[line_with_variables[0]] = ''
31 | else:
32 | line_with_variables=line.split()
33 | if len(line_with_variables) == 1:
34 | variables[line_with_variables[0]]=''
35 | else:
36 | variables[line_with_variables[0]] = line_with_variables[1]
37 | return variables
38 |
39 | def get_variables_from_instance(host,port,user,passwd):
40 | try:
41 | conn=MySQLdb.connect(host=host,port=port,user=user,passwd=passwd)
42 | cursor = conn.cursor()
43 | query='show global variables'
44 | cursor.execute(query)
45 | results=cursor.fetchall()
46 | variables=dict(results)
47 | return variables
48 | except Exception as e:
49 | print e
50 |
51 | def convert_variable_value(variable_value):
52 | #路径区分大小写,所以路径直接返回
53 | if not '/' in variable_value:
54 | if variable_value.lower() in ['false','off','0']:
55 | variable_value='0'
56 | elif variable_value.lower() in ['true','on','1']:
57 | variable_value='1'
58 | elif re.search(r'^(\d+)G$',variable_value,re.IGNORECASE):
59 | variable_value=str(int(re.split('G|g',variable_value)[0])*1024*1024*1024)
60 | elif re.search(r'^(\d+)M$',variable_value,re.IGNORECASE):
61 | variable_value=str(int(re.split('M|m',variable_value)[0])*1024*1024)
62 | elif re.search(r'^(\d+)K$', variable_value, re.IGNORECASE):
63 | variable_value = str(int(re.split('K|k',variable_value)[0]) * 1024)
64 | variable_value=variable_value.lower()
65 | return variable_value
66 |
67 | def convert_variable_name(variables):
68 | convert_variables={}
69 | for variable_name,variable_value in variables.iteritems():
70 | new_variable_name=variable_name.replace('-','_')
71 | new_variable_name=new_variable_name.strip()
72 | convert_variables[new_variable_name]=variable_value
73 | return convert_variables
74 |
75 | def convert_connect_info(instance_info):
76 | connect_info={}
77 | instance_info_dict=dict(info.split('=') for info in instance_info.split(','))
78 | connect_info['host']=instance_info_dict.get('h')
79 | connect_info['port'] = int(instance_info_dict.get('P'))
80 | connect_info['user'] = instance_info_dict.get('u')
81 | connect_info['passwd'] = instance_info_dict.get('p')
82 | return connect_info
83 |
84 | def get_variables_from_mysqld_help(default):
85 | if default == 'mysqld':
86 | command='mysqld --no-defaults --verbose --help'
87 | else:
88 | command=os.path.join(default,'mysqld --no-defaults --verbose --help')
89 | p=subprocess.Popen(command,shell=True,stdout=subprocess.PIPE)
90 | temp=tempfile.mkstemp()
91 | temp_file=temp[1]
92 | flag=0
93 | with open(temp_file,'w+') as f:
94 | for line in p.stdout:
95 | if line.startswith('---------'):
96 | f.write('[mysqld]\n')
97 | flag=1
98 | continue
99 | if flag == 1 and len(line) == 1:
100 | break
101 | if flag ==1:
102 | f.write(line)
103 | return temp
104 |
105 |
106 | def main():
107 | usage = '''Four types Comparison are supported
108 | 1. Config file vs Config file
109 | ./find_config_diff.py --f1 my_5.6.cnf --f2 my_5.7.cnf
110 | 2. Conifig file vs Instance variables
111 | ./find_config_diff.py --f1 my.cnf --instance h=192.168.244.10,P=3306,u=root,p=123456
112 | 3. Instance variables vs Default variables
113 | ./find_config_diff.py --instance h=192.168.244.10,P=3306,u=root,p=123456 --default=mysqld
114 | 4. Conifig file vs Default variables
115 | ./find_config_diff.py --f1 my.cnf --default=/usr/local/mysql/bin
116 | '''
117 | parser = optparse.OptionParser(usage)
118 | parser.add_option("--f1",action="store", help="The first config file")
119 | parser.add_option("--f2",action="store", help="The second config file")
120 | parser.add_option("--instance",action="store", help="Input the Connect info,like h=192.168.244.10,P=3306,u=root,p=123456")
121 | parser.add_option("--default",action="store", help="Input the mysqld's path,like '/usr/local/mysql/bin'\
122 | You can also specify mysqld if mysqld in $PATH"
123 | )
124 | # args = ['--f1', 'my.cnf','--instance','h=192.168.244.10,P=3306,u=root,p=123456']
125 | # args = ['--f1', 'my.cnf','--default','/usr/local/mysql/bin/']
126 | options, args = parser.parse_args()
127 | # (options, args) = parser.parse_args()
128 | config_file_one=options.f1
129 | config_file_two=options.f2
130 | instance=options.instance
131 | default=options.default
132 | if config_file_one and config_file_two:
133 | variables_one=get_variables(config_file_one)
134 | variables_two=get_variables(config_file_two)
135 | column_name=["Variable",config_file_one,config_file_two]
136 | elif config_file_one and instance:
137 | variables_one=get_variables(config_file_one)
138 | connect_info=convert_connect_info(instance)
139 | variables_two=get_variables_from_instance(**connect_info)
140 | column_name = ["Variable", config_file_one, "Instance"]
141 | elif config_file_one and default:
142 | variables_one=get_variables(config_file_one)
143 | temp=get_variables_from_mysqld_help(default)
144 | variables_two=get_variables(temp[1])
145 | os.close(temp[0])
146 | column_name = ["Variable", config_file_one, "Default"]
147 | elif instance and default:
148 | connect_info = convert_connect_info(instance)
149 | variables_one = get_variables_from_instance(**connect_info)
150 | temp=get_variables_from_mysqld_help(default)
151 | variables_two=get_variables(temp[1])
152 | os.close(temp[0])
153 | column_name = ["Variable","Instance", "Default"]
154 |
155 | convert_variables_one=convert_variable_name(variables_one)
156 | convert_variables_two = convert_variable_name(variables_two)
157 | set_variables_one=set(convert_variables_one.keys())
158 | set_variables_two=set(convert_variables_two.keys())
159 |
160 | common_variables=set_variables_one & set_variables_two
161 |
162 | pt = prettytable.PrettyTable(column_name)
163 | pt.align='l'
164 | pt.padding_width = 1 # One space between column edges and contents (default)
165 | pt.max_width=40
166 | for each_variable in sorted(common_variables):
167 | if convert_variable_value(convert_variables_one[each_variable]) == convert_variable_value(convert_variables_two[each_variable]):
168 | pt.add_row([each_variable,convert_variables_one[each_variable],convert_variables_two[each_variable]])
169 | row = ''.join(['-' for num in xrange(10)])
170 | pt.add_row([row,row,row])
171 |
172 | for each_variable in sorted(common_variables):
173 | if convert_variable_value(convert_variables_one[each_variable]) != convert_variable_value(convert_variables_two[each_variable]):
174 | pt.add_row([each_variable,convert_variables_one[each_variable],convert_variables_two[each_variable]])
175 | # print each_variable.ljust(25),convert_variables_one[each_variable].ljust(25),convert_variables_two[each_variable].ljust(25)
176 | if config_file_one and config_file_two:
177 | variables_one_only=set_variables_one - set_variables_two
178 | row = ''.join(['-' for num in xrange(10)])
179 | pt.add_row([row,row,row])
180 | for each_variable in sorted(variables_one_only):
181 | pt.add_row([each_variable,convert_variables_one[each_variable],''])
182 | # print each_variable.ljust(30),convert_variables_one[each_variable].ljust(30)
183 | variables_two_only= set_variables_two - set_variables_one
184 | pt.add_row([row,row,row])
185 | for each_variable in sorted(variables_two_only):
186 | pt.add_row([each_variable, '',convert_variables_two[each_variable]])
187 | # print each_variable.ljust(30), '--'.ljust(30),convert_variables_two[each_variable].ljust(30)
188 | print pt
189 | #print pt.get_html_string()
190 | if __name__ == '__main__':
191 | main()
192 |
--------------------------------------------------------------------------------
/mysql/db_slowlog_digest.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "strings"
5 | "fmt"
6 | "os/exec"
7 | "regexp"
8 | "html/template"
9 | "os"
10 | "flag"
11 | "time"
12 | )
13 |
14 | const temp = `
15 |
16 |
17 |
18 | Slow Log
19 |
127 |
128 |
129 |
130 |
131 |
132 |
Slow Log
133 |
生成时间:{{.now}}
134 |
135 |
136 |
137 | | Rank |
138 | Response time |
139 | Response ratio |
140 | Calls |
141 | R/Call |
142 | QueryId |
143 | Example |
144 | Remark |
145 |
146 |
147 | {{range .slowlogs}}
148 |
149 | | {{ .Rank}} |
150 | {{ .Response_time}} |
151 | {{ .Response_ratio}} |
152 | {{ .Calls}} |
153 | {{ .R_Call}} |
154 | {{ .QueryId}} |
155 | {{ .Example}} |
156 | |
157 |
158 | {{end}}
159 |
160 |
161 |
162 |
163 |
164 | `
165 | var (
166 | help bool
167 | since string
168 | until string
169 | all bool
170 | pt string
171 | slowlog string
172 | yday bool
173 | )
174 |
175 | func init() {
176 | flag.BoolVar(&help,"help",false, "Display usage")
177 | flag.StringVar(&since,"since","","Parse only queries newer than this value,YYYY-MM-DD [HH:MM:SS]")
178 | flag.StringVar(&until,"until","","Parse only queries older than this value,YYYY-MM-DD [HH:MM:SS]")
179 | flag.BoolVar(&all,"all",false,"Parse the whole slowlog")
180 | flag.BoolVar(&yday,"yday",true,"Parse yesterday's slowlog")
181 | flag.StringVar(&pt,"pt","","Absolute path for pt-query-digest. Example:/usr/local/percona-toolkit/bin/pt-query-digest")
182 | flag.StringVar(&slowlog,"slowlog","","Absolute path for slowlog. Example:/var/log/mysql/node1-slow.log")
183 | }
184 |
185 | func main() {
186 | flag.Parse()
187 | if help {
188 | fmt.Fprintf(os.Stdout, `db-slowlog-digest version: 1.0.0
189 | Usage:
190 | db-slowlog-digest --pt /usr/bin/pt-query-digest --slowlog /var/log/mysql/node1-slow.log
191 | Or
192 | db-slowlog-digest --pt /usr/bin/pt-query-digest --slowlog /var/log/mysql/node1-slow.log --all
193 | Or
194 | db-slowlog-digest --pt /usr/bin/pt-query-digest --slowlog /var/log/mysql/node1-slow.log --since "20180101" --until "20180108"
195 |
196 | Options:
197 | `)
198 | flag.PrintDefaults()
199 | return
200 | }
201 |
202 | if len(pt) ==0 || len(slowlog)==0 {
203 | fmt.Println("--pt and --slowlog are both required")
204 | return
205 | }
206 | if all && (len(since) !=0 || len(until) !=0) {
207 | fmt.Println("--all and --since(--until) are mutually exclusive")
208 | return
209 | }
210 |
211 | today := time.Now().Format("2006-01-02")
212 | yesterday := time.Now().AddDate(0,0,-1).Format("2006-01-02")
213 |
214 | parameter := make(map[string]string)
215 | if all {
216 | parameter["since"]=""
217 | parameter["until"]=""
218 | } else if len(since) !=0 || len(until) !=0 {
219 | if len(since) !=0 {
220 | parameter["since"]="--since "+since
221 | }
222 | if len(until) !=0 {
223 | parameter["until"]="--until "+until
224 | }
225 | } else {
226 | parameter["since"]="--since "+yesterday
227 | parameter["until"]="--until "+today
228 | }
229 | ptQueryDigestCmd := strings.Join([]string{"perl",pt,parameter["since"],parameter["until"],slowlog}," ")
230 | //fmt.Println(ptQueryDigestCmd)
231 | parseSlowLog(ptQueryDigestCmd)
232 | }
233 |
234 | func parseSlowLog(ptQueryDigestCmd string) {
235 | slowLog := execCmd("perl", ptQueryDigestCmd)
236 | lines := strings.Split(string(slowLog), "\n")
237 | linesNums := len(lines)
238 | profileFlag := false
239 | exampleFlag := false
240 | exampleSQL := []string{}
241 | slowLogProfile := [][]string{}
242 | exampleSQLs := make(map[string]string)
243 | var queryID string
244 | for k,line := range lines {
245 | if strings.Contains(line,"# Profile"){
246 | profileFlag = true
247 | continue
248 | } else if profileFlag && (len(line) == 0 || strings.HasPrefix(line,"# MISC 0xMISC")) {
249 | profileFlag = false
250 | continue
251 | }
252 | if profileFlag {
253 | if strings.HasPrefix(line, "# Rank") || strings.HasPrefix(line, "# ====") {
254 | continue
255 | }
256 | re, _ := regexp.Compile(" +")
257 | rowToArray := re.Split(line, 9)
258 | slowLogProfile = append(slowLogProfile, rowToArray)
259 | } else if strings.Contains(line,"concurrency, ID 0x"){
260 | re := regexp.MustCompile(`(?U)ID (0x.*) `)
261 | queryID = re.FindStringSubmatch(line)[1]
262 | exampleFlag = true
263 | exampleSQL = []string{}
264 | }else if exampleFlag && (! strings.HasPrefix(line,"#")) && len(line) !=0 {
265 | exampleSQL=append(exampleSQL,line)
266 | }else if exampleFlag && (len(line) == 0 || k == (linesNums-1)){
267 | exampleFlag = false
268 | exampleSQLs[queryID] = strings.Join(exampleSQL,"\n")
269 | }
270 | }
271 |
272 | for _,v := range slowLogProfile {
273 | for key := range exampleSQLs {
274 | miniQueryID := strings.Trim(v[2],".")
275 | if strings.Contains(key,miniQueryID) {
276 | v[8] = exampleSQLs[key]
277 | v[2] = key
278 | break
279 | }
280 | }
281 | }
282 |
283 | type slowlog struct {
284 | Rank string
285 | Response_time string
286 | Response_ratio string
287 | Calls string
288 | R_Call string
289 | QueryId string
290 | Example string
291 | }
292 |
293 | now := time.Now().Format("2006-01-02 15:04:05")
294 | slowlogs := []slowlog{}
295 | for _,value := range slowLogProfile {
296 | slowlogrecord := slowlog{value[1],value[3],value[4],value[5],value[6],value[2],value[8]}
297 | slowlogs = append(slowlogs,slowlogrecord)
298 | }
299 | var report = template.Must(template.New("slowlog").Parse(temp))
300 | report.Execute(os.Stdout,map[string]interface{}{"slowlogs":slowlogs,"now":now})
301 |
302 | }
303 |
304 |
305 | func execCmd(cmd_type string, cmd string) string {
306 | out, err := exec.Command("bash", "-c", cmd).Output()
307 | if cmd_type != "shell" {
308 | parts := strings.Fields(cmd)
309 | head := parts[0]
310 | parts = parts[1:len(parts)]
311 | out, err = exec.Command(head, parts...).Output()
312 | }
313 | if err != nil {
314 | fmt.Println("Failed to execute command:", cmd)
315 | os.Exit(1)
316 | }
317 | return string(out)
318 | }
319 |
--------------------------------------------------------------------------------
/mysql/generate_my_cnf.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "fmt"
7 | "math/rand"
8 | "os"
9 | "regexp"
10 | "strconv"
11 | "strings"
12 | "text/template"
13 | "flag"
14 | "time"
15 | )
16 |
17 | const config = `
18 | [client]
19 | socket = {{.datadir}}/mysql/{{.port}}/data/mysql.sock
20 |
21 | [mysql]
22 | no-auto-rehash
23 |
24 | [mysqld]
25 | # General
26 | user = mysql
27 | port = {{.port}}
28 | basedir = {{.basedir}}
29 | datadir = {{.datadir}}/mysql/{{.port}}/data
30 | socket = {{.datadir}}/mysql/{{.port}}/data/mysql.sock
31 | pid_file = {{.datadir}}/mysql/{{.port}}/data/mysql.pid
32 | character_set_server = utf8mb4
33 | transaction_isolation = READ-COMMITTED
34 | sql_mode = 'ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'
35 | log_error = {{.datadir}}/mysql/{{.port}}/log/mysqld.err
36 | default_time_zone = '+8:00'{{if or (.mysqld57) (.mysqld80)}}
37 | log_timestamps = system{{end}}
38 | tmpdir = {{.datadir}}/mysql/{{.port}}/tmp
39 | secure_file_priv = {{.datadir}}/mysql/{{.port}}/tmp
40 |
41 | # Slow log
42 | slow_query_log = ON
43 | long_query_time = 0.5
44 | slow_query_log_file = {{.datadir}}/mysql/{{.port}}/slowlog/slow.log
45 |
46 | # Connection
47 | back_log = 2048
48 | max_connections = 500
49 | max_connect_errors = 10000
50 | interactive_timeout = 1800
51 | wait_timeout = 1800
52 | thread_cache_size = 128
53 | max_allowed_packet = 1G
54 | skip_name_resolve = ON
55 |
56 | # Session
57 | read_buffer_size = {{.read_buffer_size}}
58 | read_rnd_buffer_size = {{.read_rnd_buffer_size}}
59 | sort_buffer_size = {{.sort_buffer_size}}
60 | join_buffer_size = {{.join_buffer_size}}
61 |
62 | # InnoDB
63 | innodb_buffer_pool_size = {{.innodb_buffer_pool_size}}
64 | innodb_buffer_pool_instances = {{.innodb_buffer_pool_instances}}
65 | innodb_log_file_size = {{.innodb_log_file_size}}
66 | innodb_log_files_in_group = 2
67 | innodb_log_buffer_size = 16M
68 | innodb_flush_log_at_trx_commit = 1{{if or (.mysqld57) (.mysqld80)}}
69 | innodb_undo_tablespaces = 2
70 | innodb_max_undo_log_size = 1024M
71 | innodb_undo_log_truncate = 1
72 | innodb_page_cleaners = 8{{end}}
73 | innodb_io_capacity = {{.innodb_io_capacity}}
74 | innodb_io_capacity_max = {{.innodb_io_capacity_max}}
75 | innodb_data_file_path = ibdata1:1G:autoextend
76 | innodb_flush_method = O_DIRECT
77 | innodb_purge_threads = 4
78 | innodb_autoinc_lock_mode = 2
79 | innodb_buffer_pool_load_at_startup = 1
80 | innodb_buffer_pool_dump_at_shutdown = 1
81 | innodb_read_io_threads = 8
82 | innodb_write_io_threads = 8
83 | innodb_flush_neighbors = {{.innodb_flush_neighbors}}
84 | innodb_checksum_algorithm = crc32
85 | innodb_strict_mode = ON{{if or (.mysqld56) (.mysqld57)}}
86 | innodb_file_format = Barracuda
87 | innodb_large_prefix = ON{{end}}
88 | innodb_print_all_deadlocks = ON
89 | innodb_numa_interleave = ON
90 | innodb_open_files = 65535
91 | innodb_adaptive_hash_index = OFF
92 |
93 | # Replication
94 | server_id = {{.server_id}}
95 | log_bin = {{.datadir}}/mysql/{{.port}}/binlog/mysql-bin
96 | relay_log = {{.datadir}}/mysql/{{.port}}/relaylog/relay-bin
97 | sync_binlog = 1
98 | binlog_format = ROW
99 | master_info_repository = TABLE
100 | relay_log_info_repository = TABLE
101 | relay_log_recovery = ON
102 | log_slave_updates = ON{{if (.mysqld80)}}
103 | binlog_expire_logs_seconds = 604800{{else}}
104 | expire_logs_days = 7{{end}}
105 | slave_rows_search_algorithms = 'INDEX_SCAN,HASH_SCAN'
106 | skip_slave_start = ON
107 | slave_net_timeout = 60
108 | binlog_error_action = ABORT_SERVER
109 | super_read_only = ON
110 |
111 | # Semi-Sync Replication
112 | plugin_load = "validate_password.so;semisync_master.so;semisync_slave.so"
113 | rpl_semi_sync_master_enabled = ON
114 | rpl_semi_sync_slave_enabled = ON
115 | rpl_semi_sync_master_timeout = 1000
116 |
117 | # GTID
118 | gtid_mode = ON
119 | enforce_gtid_consistency = ON
120 | binlog_gtid_simple_recovery = ON
121 | {{if or (.mysqld57) (.mysqld80)}}
122 | # Multithreaded Replication
123 | slave-parallel-type = LOGICAL_CLOCK
124 | slave-parallel-workers = 8
125 | slave_preserve_commit_order = ON
126 | transaction_write_set_extraction = XXHASH64
127 | binlog_transaction_dependency_tracking = WRITESET_SESSION
128 | binlog_transaction_dependency_history_size = 25000{{end}}
129 | {{if or (.mysqld56) (.mysqld57)}}
130 | # Query Cache
131 | query_cache_type = 0
132 | query_cache_size = 0
133 | {{end}}
134 | # Others
135 | open_files_limit = 65535
136 | max_heap_table_size = 32M
137 | tmp_table_size = 32M
138 | table_open_cache = 65535
139 | table_definition_cache = 65535
140 | table_open_cache_instances = 64
141 | `
142 |
143 | func GenerateMyCnf(args map[string]interface{}) (string) {
144 | serverId := getServerId()
145 |
146 | var totalMem int
147 | inputMem := args["memory"].(string)
148 | totalMem = formatMem(inputMem)
149 | var mycnfTemplate = template.Must(template.New("mycnf").Parse(config))
150 |
151 | dynamicvariables:= make(map[string]interface{})
152 | dynamicvariables["basedir"] = args["basedir"]
153 | dynamicvariables["datadir"] = args["datadir"]
154 | dynamicvariables["port"] = args["port"]
155 | dynamicvariables["innodb_buffer_pool_size"] = strconv.Itoa(getInnodbBufferPoolSize(totalMem)) + "M"
156 | dynamicvariables["server_id"] = serverId
157 | dynamicvariables["innodb_flush_neighbors"] = "0"
158 | dynamicvariables["innodb_io_capacity"] = "1000"
159 | dynamicvariables["innodb_io_capacity_max"] = "2500"
160 | if args["mysqld_version"] == "5.6" {
161 | dynamicvariables["mysqld56"] = true
162 | } else if args["mysqld_version"] == "5.7" {
163 | dynamicvariables["mysqld57"] = true
164 | } else {
165 | dynamicvariables["mysqld80"] = true
166 | }
167 | if args["ssd"] == false {
168 | dynamicvariables["innodb_flush_neighbors"] = "1"
169 | dynamicvariables["innodb_io_capacity"] = "200"
170 | dynamicvariables["innodb_io_capacity_max"] = "500"
171 | }
172 |
173 | //Assume read_rnd_buffer_size==sort_buffer_size==join_buffer_size==read_buffer_size*2
174 | read_buffer_size := getReadBufferSize(totalMem)
175 | dynamicvariables["read_buffer_size"] = strconv.Itoa(read_buffer_size) + "M"
176 | dynamicvariables["read_rnd_buffer_size"] = strconv.Itoa(read_buffer_size*2) + "M"
177 | dynamicvariables["sort_buffer_size"] = strconv.Itoa(read_buffer_size*2) + "M"
178 | dynamicvariables["join_buffer_size"] = strconv.Itoa(read_buffer_size*2) + "M"
179 | dynamicvariables["innodb_log_file_size"] = strconv.Itoa(getInnodbLogFileSize(totalMem)) + "M"
180 | b := bytes.NewBuffer(make([]byte, 0))
181 | w := bufio.NewWriter(b)
182 | mycnfTemplate.Execute(w, dynamicvariables)
183 | w.Flush()
184 |
185 | return b.String()
186 | }
187 |
188 | func getServerId() (string) {
189 | r := rand.New(rand.NewSource(time.Now().UnixNano()))
190 | randNum := r.Intn(1000000)
191 | return strconv.Itoa(randNum)
192 | }
193 |
194 | func getReadBufferSize(totalMem int) (read_buffer_size int) {
195 | innodb_buffer_pool_size := getInnodbBufferPoolSize(totalMem)
196 | freeSize := totalMem - innodb_buffer_pool_size
197 | //Assume read_rnd_buffer_size==sort_buffer_size==join_buffer_size==read_buffer_size*2
198 | //and max_connections=500
199 | if freeSize <= (2+4+4+4)*500 {
200 | read_buffer_size = 2
201 | } else if freeSize <= (4+8+8+8)*500 {
202 | read_buffer_size = 4
203 | } else if freeSize <= (8+16+16+16)*500 {
204 | read_buffer_size = 8
205 | } else {
206 | read_buffer_size = 16
207 | }
208 | return
209 | }
210 |
211 | func getInnodbBufferPoolSize(totalMem int) int {
212 | var innodb_buffer_pool_size int
213 |
214 | if totalMem < 1024 {
215 | innodb_buffer_pool_size = 128
216 | } else if totalMem <= 4*1024 {
217 | innodb_buffer_pool_size = totalMem / 2
218 | } else {
219 | innodb_buffer_pool_size = int(float32(totalMem) * 0.75)
220 | }
221 |
222 | return innodb_buffer_pool_size
223 | }
224 |
225 |
226 | func getInnodbLogFileSize(totalMem int) int {
227 | var innodb_log_file_size int
228 |
229 | if totalMem < 1024 {
230 | innodb_log_file_size = 48
231 | } else if totalMem <= 4*1024 {
232 | innodb_log_file_size = 128
233 | } else if totalMem <= 8*1024 {
234 | innodb_log_file_size = 512
235 | } else {
236 | innodb_log_file_size = 1024
237 | }
238 | return innodb_log_file_size
239 | }
240 |
241 | func formatMem(inputMem string) (totalMem int) {
242 | matched, _ := regexp.MatchString(`^(?i)\d+[M|G]B?$`, inputMem)
243 | if ! matched {
244 | fmt.Println(`Valid units for --memory are "M","G"`)
245 | os.Exit(1)
246 | }
247 | inputMemLower := strings.ToLower(inputMem)
248 | if strings.Contains(inputMemLower, "m") {
249 | inputMemLower = strings.Split(inputMemLower, "m")[0]
250 |
251 | } else if strings.Contains(inputMemLower, "g") {
252 | inputMemLower = strings.Split(inputMemLower, "g")[0]
253 | temp, _ := strconv.Atoi(inputMemLower)
254 | inputMemLower = strconv.Itoa(temp * 1024)
255 | }
256 | totalMem, _ = strconv.Atoi(inputMemLower)
257 | return
258 | }
259 |
260 | var (
261 | help bool
262 | mysql_version string
263 | basedir string
264 | datadir string
265 | port int
266 | memory string
267 | ssd bool
268 | )
269 |
270 | func init() {
271 | flag.BoolVar(&help,"help",false, "Display usage")
272 | flag.StringVar(&mysql_version,"mysql_version","8.0","MySQL version")
273 | flag.StringVar(&basedir,"basedir","/usr/local/mysql","Path to installation directory")
274 | flag.StringVar(&datadir,"datadir","/data","Path to the database root directory")
275 | flag.IntVar(&port,"port",3306,"Port number to use for connection")
276 | flag.StringVar(&memory,"memory","","The size of the server memory")
277 | flag.BoolVar(&ssd,"ssd",false, "Is it ssd")
278 | }
279 |
280 | func main() {
281 | flag.Parse()
282 | if help {
283 | fmt.Fprintf(os.Stdout, `mysql_cnf_generator version: 1.0.0
284 | Usage:
285 | db-slowlog-digest --pt /usr/bin/pt-query-digest --slowlog /var/log/mysql/node1-slow.log
286 | Options:
287 | `)
288 | flag.PrintDefaults()
289 | return
290 | }
291 | flag.PrintDefaults()
292 | mycnf_args := make(map[string]interface{})
293 | mycnf_args["basedir"] = basedir
294 | mycnf_args["datadir"] = datadir
295 | mycnf_args["port"] = port
296 | mycnf_args["memory"] = memory
297 | mycnf_args["mysqld_version"] = mysql_version
298 | mycnf_args["ssd"] = ssd
299 |
300 | mycnf := GenerateMyCnf(mycnf_args)
301 | fmt.Println(mycnf)
302 |
303 | }
304 |
--------------------------------------------------------------------------------
/mysql/get_binlog_timestamp_info/go.sum:
--------------------------------------------------------------------------------
1 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
2 | github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
3 | github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
4 | github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
5 | github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
6 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
7 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
8 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
9 | github.com/go-mysql-org/go-mysql v1.7.0 h1:qE5FTRb3ZeTQmlk3pjE+/m2ravGxxRDrVDTyDe9tvqI=
10 | github.com/go-mysql-org/go-mysql v1.7.0/go.mod h1:9cRWLtuXNKhamUPMkrDVzBhaomGvqLRLtBiyjvjc4pk=
11 | github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
12 | github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
13 | github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
14 | github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
15 | github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
16 | github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
17 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
18 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
19 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
20 | github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
21 | github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
22 | github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
23 | github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
24 | github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
25 | github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
26 | github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
27 | github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
28 | github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
29 | github.com/pingcap/errors v0.11.5-0.20210425183316-da1aaba5fb63 h1:+FZIDR/D97YOPik4N4lPDaUcLDF/EQPogxtlHB2ZZRM=
30 | github.com/pingcap/errors v0.11.5-0.20210425183316-da1aaba5fb63/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg=
31 | github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM=
32 | github.com/pingcap/tidb/parser v0.0.0-20221126021158-6b02a5d8ba7d/go.mod h1:ElJiub4lRy6UZDb+0JHDkGEdr6aOli+ykhyej7VCLoI=
33 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
34 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
35 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
36 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
37 | github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
38 | github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
39 | github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
40 | github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
41 | github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
42 | github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 h1:oI+RNwuC9jF2g2lP0u0cVEEZrc/AYBCuFdvwrLWM/6Q=
43 | github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07/go.mod h1:yFdBgwXP24JziuRl2NMUahT7nGLNOKi1SIiFxMttVD4=
44 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
45 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
46 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
47 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
48 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
49 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
50 | github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
51 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
52 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
53 | go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
54 | go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
55 | go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
56 | go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
57 | go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
58 | go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
59 | go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
60 | go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
61 | go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
62 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
63 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
64 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
65 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
66 | golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
67 | golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
68 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
69 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
70 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
71 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
72 | golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
73 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
74 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
75 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
76 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
77 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
78 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
79 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
80 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
81 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
82 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
83 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
84 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
85 | golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
86 | golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
87 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
88 | golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
89 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
90 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
91 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
92 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
93 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
94 | gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
95 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
96 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
97 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
98 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
99 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
100 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
101 | modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8=
102 | modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254=
103 | modernc.org/lex v1.0.0/go.mod h1:G6rxMTy3cH2iA0iXL/HRRv4Znu8MK4higxph/lE7ypk=
104 | modernc.org/lexer v1.0.0/go.mod h1:F/Dld0YKYdZCLQ7bD0USbWL4YKCyTDRDHiDTOs0q0vk=
105 | modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
106 | modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
107 | modernc.org/parser v1.0.0/go.mod h1:H20AntYJ2cHHL6MHthJ8LZzXCdDCHMWt1KZXtIMjejA=
108 | modernc.org/parser v1.0.2/go.mod h1:TXNq3HABP3HMaqLK7brD1fLA/LfN0KS6JxZn71QdDqs=
109 | modernc.org/scanner v1.0.1/go.mod h1:OIzD2ZtjYk6yTuyqZr57FmifbM9fIH74SumloSsajuE=
110 | modernc.org/sortutil v1.0.0/go.mod h1:1QO0q8IlIlmjBIwm6t/7sof874+xCfZouyqZMLIAtxM=
111 | modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
112 | modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
113 | modernc.org/y v1.0.1/go.mod h1:Ho86I+LVHEI+LYXoUKlmOMAM1JTXOCfj8qi1T8PsClE=
114 |
--------------------------------------------------------------------------------
/mysql/get_binlog_timestamp_info/get_binlog_timestamp_info.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "database/sql"
5 | "flag"
6 | "fmt"
7 | "github.com/go-mysql-org/go-mysql/mysql"
8 | "github.com/go-mysql-org/go-mysql/replication"
9 | _ "github.com/go-sql-driver/mysql"
10 | "github.com/olekukonko/tablewriter"
11 | "github.com/siddontang/go-log/log"
12 | "golang.org/x/crypto/ssh/terminal"
13 | "golang.org/x/net/context"
14 | "os"
15 | "strconv"
16 | "strings"
17 | "sync"
18 | "time"
19 | )
20 |
21 | type BinlogInfo struct {
22 | LogName string
23 | FileSize string
24 | StartTime uint32
25 | EndTime uint32
26 | PreviousGTIDs string
27 | NextLogPreviousGTIDs string
28 | }
29 |
30 | type ConcurrentResult struct {
31 | StartTime uint32
32 | PreviousGTIDs string
33 | Index int
34 | Err error
35 | }
36 |
37 | func GetGTIDSubtract(gtid1, gtid2 string) (string, error) {
38 | // 解析 GTID
39 | parsedGTID1, err := mysql.ParseGTIDSet("mysql", gtid1)
40 | if err != nil {
41 | return "", fmt.Errorf("error parsing GTID1: %v", err)
42 | }
43 | m1 := *parsedGTID1.(*mysql.MysqlGTIDSet)
44 | parsedGTID2, err := mysql.ParseGTIDSet("mysql", gtid2)
45 | if err != nil {
46 | return "", fmt.Errorf("error parsing GTID2: %v", err)
47 | }
48 |
49 | m2 := *parsedGTID2.(*mysql.MysqlGTIDSet)
50 | // 计算差值
51 | err = m1.Minus(m2)
52 | if err != nil {
53 | return "", fmt.Errorf("error calculating GTID difference: %v", err)
54 | }
55 |
56 | return m1.String(), nil
57 | }
58 |
59 | func ExtractGTIDSuffix(gtidStr string) string {
60 | if !strings.Contains(gtidStr, ",") && strings.Contains(gtidStr, ":") {
61 | parts := strings.Split(gtidStr, ":")
62 | if len(parts) == 2 {
63 | return parts[1]
64 | }
65 | }
66 | return gtidStr
67 | }
68 |
69 | func ConvertUnixTimestampToFormattedTime(unixTimestamp int64) (string, error) {
70 | // 转换为时间格式
71 | t := time.Unix(unixTimestamp, 0)
72 |
73 | // 格式化为默认的日期时间格式
74 | formattedTime := t.Format("2006-01-02 15:04:05")
75 |
76 | return formattedTime, nil
77 | }
78 |
79 | // ConvertBytesToHumanReadable 将 uint64 类型的字节大小转换为人类可读的单位
80 | func ConvertBytesToHumanReadable(bytes uint64) string {
81 | const (
82 | kib = 1024
83 | mib = 1024 * kib
84 | gib = 1024 * mib
85 | )
86 |
87 | unit := "bytes"
88 | divisor := uint64(1)
89 |
90 | switch {
91 | case bytes >= gib:
92 | divisor = gib
93 | unit = "GB"
94 | case bytes >= mib:
95 | divisor = mib
96 | unit = "MB"
97 | case bytes >= kib:
98 | divisor = kib
99 | unit = "KB"
100 | }
101 |
102 | value := float64(bytes) / float64(divisor)
103 | format := "%.2f %s"
104 | result := fmt.Sprintf(format, value, unit)
105 | return result
106 | }
107 |
108 | func getBinaryLogs(dsn string) ([][]string, error) {
109 | // 连接 MySQL 数据库
110 | db, err := sql.Open("mysql", dsn)
111 | if err != nil {
112 | return nil, fmt.Errorf("error connecting to MySQL: %v", err)
113 | }
114 | defer db.Close()
115 |
116 | // 执行 SQL 查询
117 | rows, err := db.Query("SHOW BINARY LOGS;")
118 | if err != nil {
119 | return nil, fmt.Errorf("error executing SHOW BINARY LOGS: %v", err)
120 | }
121 | defer rows.Close()
122 |
123 | // 存储二进制日志文件名的切片
124 | var binaryLogs [][]string
125 |
126 | // 遍历结果集并将日志文件名存储到切片中
127 | for rows.Next() {
128 | columns, err := rows.Columns()
129 | if err != nil {
130 | return nil, fmt.Errorf("error fetching columns: %v", err)
131 | }
132 |
133 | // 创建与列数相同的切片,用来接收扫描后的数据
134 | values := make([]interface{}, len(columns))
135 | for i := range values {
136 | values[i] = new(string) // 使用 *string 来接收每列的值
137 | }
138 |
139 | // 执行扫描
140 | if err := rows.Scan(values...); err != nil {
141 | return nil, fmt.Errorf("error scanning row: %v", err)
142 | }
143 |
144 | // 提取前两列的数据并加入结果
145 | logName := *(values[0].(*string))
146 | fileSize := *(values[1].(*string))
147 |
148 | binaryLogs = append(binaryLogs, []string{logName, fileSize})
149 | }
150 |
151 | // 检查是否遍历过程中有错误
152 | if err := rows.Err(); err != nil {
153 | return nil, fmt.Errorf("error during row iteration: %v", err)
154 | }
155 |
156 | // 返回二进制日志文件名切片
157 | return binaryLogs, nil
158 | }
159 |
160 | func getFormatAndPreviousGTIDs(host string, port int, user string, password string, binlogFilename string, index int, ch chan<- ConcurrentResult, wg *sync.WaitGroup) (uint32, string, error) {
161 | // 创建 BinlogSyncer 实例
162 | cfg := replication.BinlogSyncerConfig{
163 | ServerID: uint32(index + 33061),
164 | Flavor: "mysql",
165 | Host: host,
166 | Port: uint16(port),
167 | User: user,
168 | Password: password,
169 | }
170 |
171 | cfg.Logger = log.NewDefault(&log.NullHandler{})
172 |
173 | syncer := replication.NewBinlogSyncer(cfg)
174 | defer syncer.Close()
175 |
176 | streamer, err := syncer.StartSync(mysql.Position{Name: binlogFilename, Pos: 4})
177 | if err != nil {
178 | return 0, "", fmt.Errorf("error starting binlog syncer: %v", err)
179 | }
180 |
181 | var formatTimestamp uint32
182 | var previousGTIDs string
183 |
184 | ctx := context.Background()
185 | for i := 0; i < 3; i++ {
186 | // 读取事件
187 | ev, err := streamer.GetEvent(ctx)
188 | if err != nil {
189 | return 0, "", fmt.Errorf("error getting binlog event: %v", err)
190 | }
191 |
192 | // 如果是 FORMAT_DESCRIPTION_EVENT,则记录时间戳
193 | if ev.Header.EventType == replication.FORMAT_DESCRIPTION_EVENT {
194 | formatTimestamp = ev.Header.Timestamp
195 | }
196 |
197 | // 如果是 PREVIOUS_GTIDS_EVENT,则记录其内容并跳出循环
198 | if ev.Header.EventType == replication.PREVIOUS_GTIDS_EVENT {
199 | previousGTIDsEvent := ev.Event.(*replication.PreviousGTIDsEvent)
200 | previousGTIDs = previousGTIDsEvent.GTIDSets
201 | break
202 | }
203 | }
204 |
205 | return formatTimestamp, previousGTIDs, nil
206 | }
207 |
208 | func main() {
209 | // Parse command line arguments
210 | host := flag.String("h", "localhost", "MySQL host")
211 | port := flag.Int("P", 3306, "MySQL port")
212 | user := flag.String("u", "root", "MySQL user")
213 | password := flag.String("p", "", "MySQL password")
214 | var verbose bool
215 | flag.BoolVar(&verbose, "v", false, "Enable verbose logging")
216 | numParallel := flag.Int("n", 5, "Number of goroutines to run concurrently")
217 | flag.Parse()
218 | if *password == "" {
219 | fmt.Print("Enter MySQL password: ")
220 | bytePassword, err := terminal.ReadPassword(int(os.Stdin.Fd()))
221 | fmt.Println()
222 | if err != nil {
223 | log.Fatalf("Error: Failed to read the password - %v", err)
224 | }
225 | *password = string(bytePassword)
226 | }
227 |
228 | dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/mysql", *user, *password, *host, *port)
229 |
230 | // 调用获取二进制日志文件名的函数
231 | binaryLogs, err := getBinaryLogs(dsn)
232 | if err != nil {
233 | fmt.Println("Error:", err)
234 | os.Exit(1)
235 | }
236 | if verbose {
237 | timestamp := time.Now().Format("2006/01/02 15:04:05")
238 | fmt.Printf("[%s] [info] get_binlog_timestamp_info.go SHOW BINARY LOGS done, %d binlogs to analyze\n", timestamp, len(binaryLogs))
239 |
240 | }
241 |
242 | // Create wait group and result channel
243 | var wg sync.WaitGroup
244 | ch := make(chan ConcurrentResult, len(binaryLogs))
245 |
246 | // Limit parallelism
247 | sem := make(chan struct{}, *numParallel)
248 |
249 | // Iterate over binary logs and fetch format timestamp and previous GTIDs concurrently
250 | for i := len(binaryLogs) - 1; i >= 0; i-- {
251 | sem <- struct{}{}
252 | wg.Add(1)
253 | go func(index int) {
254 | defer func() {
255 | <-sem
256 | wg.Done()
257 | }()
258 | logName := binaryLogs[index][0]
259 | startTime, previousGTIDs, err := getFormatAndPreviousGTIDs(*host, *port, *user, *password, logName, index, ch, &wg)
260 | ch <- ConcurrentResult{StartTime: startTime, PreviousGTIDs: previousGTIDs, Index: index, Err: err}
261 | }(i)
262 | }
263 |
264 | // Wait for all goroutines to finish
265 | wg.Wait()
266 | close(ch)
267 |
268 | // Collect results from channel
269 | results := make([]ConcurrentResult, len(binaryLogs))
270 | for r := range ch {
271 | results[r.Index] = r
272 | }
273 | originalBinlogs := make([]BinlogInfo, len(binaryLogs))
274 | for _, result := range results {
275 | logName := binaryLogs[result.Index][0]
276 | fileSize := binaryLogs[result.Index][1]
277 | binlog := BinlogInfo{
278 | LogName: logName,
279 | FileSize: fileSize,
280 | StartTime: result.StartTime,
281 | PreviousGTIDs: result.PreviousGTIDs,
282 | }
283 | // fmt.Println(result.Index, logName,fileSize, result.StartTime, result.PreviousGTIDs)
284 | originalBinlogs[result.Index] = binlog
285 | }
286 |
287 | var logEndTime uint32
288 | var nextLogPreviousGTIDs string
289 | var processedBinlogs []BinlogInfo
290 | for i := len(binaryLogs) - 1; i >= 0; i-- {
291 | log := originalBinlogs[i]
292 | logName, fileSize, startTime, previousGTIDs := log.LogName, log.FileSize, log.StartTime, log.PreviousGTIDs
293 | if verbose {
294 | timestamp := time.Now().Format("2006/01/02 15:04:05")
295 | fmt.Printf("[%s] [info] get_binlog_timestamp_info.go %s done, still %d binlogs to analyze\n", timestamp, logName, i)
296 | }
297 | processedBinlogs = append(processedBinlogs, BinlogInfo{logName, fileSize, startTime, logEndTime, previousGTIDs, nextLogPreviousGTIDs})
298 | logEndTime = startTime
299 | nextLogPreviousGTIDs = previousGTIDs
300 |
301 | if err != nil {
302 | fmt.Println("Error:", err)
303 | os.Exit(1)
304 | }
305 | }
306 | table := tablewriter.NewWriter(os.Stdout)
307 | table.SetAutoFormatHeaders(false)
308 | table.SetHeader([]string{"Log_name", "File_size", "Start_time", "End_time", "Duration", "GTID"})
309 |
310 | for i := len(processedBinlogs) - 1; i >= 0; i-- {
311 | binlog := processedBinlogs[i]
312 | fileSize, err := strconv.ParseUint(binlog.FileSize, 10, 64)
313 | if err != nil {
314 | fmt.Println("Error parsing string to uint64:", err)
315 | return
316 | }
317 | startUnixTimestamp := int64(binlog.StartTime)
318 | startTime := time.Unix(startUnixTimestamp, 0)
319 | startFormattedTime, err := ConvertUnixTimestampToFormattedTime(startUnixTimestamp)
320 | if err != nil {
321 | fmt.Println("Error:", err)
322 | return
323 | }
324 | endUnixTimestamp := int64(binlog.EndTime)
325 | endTime := time.Unix(endUnixTimestamp, 0)
326 | endFormattedTime, err := ConvertUnixTimestampToFormattedTime(endUnixTimestamp)
327 |
328 | if err != nil {
329 | fmt.Println("Error:", err)
330 | return
331 | }
332 |
333 | duration := endTime.Sub(startTime)
334 | durationFormatted := fmt.Sprintf("%02d:%02d:%02d", int(duration.Hours()), int(duration.Minutes())%60, int(duration.Seconds())%60)
335 |
336 | if endUnixTimestamp == 0 {
337 | endFormattedTime, durationFormatted = "", ""
338 | }
339 | gtidDifference, err := GetGTIDSubtract(binlog.NextLogPreviousGTIDs, binlog.PreviousGTIDs)
340 | if err != nil {
341 | fmt.Println("Error:", err)
342 | return
343 |
344 | }
345 |
346 | table.Append([]string{binlog.LogName, fmt.Sprintf("%d (%s)", fileSize, ConvertBytesToHumanReadable(fileSize)), startFormattedTime, endFormattedTime, durationFormatted, ExtractGTIDSuffix(gtidDifference)})
347 | }
348 | table.Render()
349 |
350 | }
351 |
--------------------------------------------------------------------------------
/mysql/binlog_summary.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # @Time : 2021-04-18
4 | # @Author: Victor
5 | # @Blog : https://www.cnblogs.com/ivictor
6 |
7 | from __future__ import print_function
8 | import os
9 | import re
10 | import sqlite3
11 | import argparse
12 | import datetime
13 |
14 | SQLITE_DB_FILE = r'/tmp/Victor&sqlite3.db'
15 |
16 | class SQLite():
17 | def __init__(self,db_file):
18 | self.db_file=db_file
19 | def __enter__(self):
20 | self.conn = sqlite3.connect(self.db_file)
21 | self.cursor=self.conn.cursor()
22 | return self
23 | def execute(self,sql):
24 | self.cursor.execute(sql)
25 | result=self.cursor.fetchall()
26 | return result
27 | def executemany(self,sql,paras):
28 | self.cursor.executemany(sql, paras)
29 | def commit(self):
30 | self.conn.commit()
31 | def __exit__(self,exc_type, exc_value, traceback):
32 | self.cursor.close()
33 | self.conn.close()
34 |
35 | def init_sqlite_table():
36 | dml_info_create_sql="create table dml_info (id integer auto_increment primary key,transaction_name varchar(10), \
37 | schema_table_name varchar(50), dml_type varchar(10), dml_time datetime)"
38 | transaction_info_create_sql="create table transaction_info (id integer auto_increment primary key, \
39 | transaction_name varchar(10),transaction_begin_time datetime,transaction_commit_time datetime, \
40 | transaction_begin_log_pos integer,transaction_commit_log_pos integer)"
41 | with SQLite(SQLITE_DB_FILE) as db:
42 | db.execute(dml_info_create_sql)
43 | db.execute(transaction_info_create_sql)
44 | db.commit()
45 |
46 |
47 | def parse_binlog_text_file(binlog_text_file):
48 | transaction_number=1
49 | transaction_name='t1'
50 | with open(binlog_text_file) as f:
51 | dml_info_records = []
52 | transaction_info_records=[]
53 | use_database=""
54 | for line in f:
55 | dml_flag = 0
56 | match_sub_strings=["use","# at","server id","BEGIN","insert","delete","update","DELETE","INSERT","UPDATE","COMMIT"]
57 | if not any(each_str in line for each_str in match_sub_strings):
58 | continue
59 | if "server id" in line:
60 | if "Query" not in line and "Xid" not in line:
61 | continue
62 | if re.match(r'# at \d+',line):
63 | start_log_pos=line.split()[2]
64 | elif "server id" in line:
65 | m=re.match(r'#(.*) server id.*end_log_pos (\d+)',line)
66 | # dml_time is binlog event begin time
67 | dml_time=m.group(1)
68 | dml_time=datetime.datetime.strptime(dml_time,'%y%m%d %H:%M:%S').strftime('%Y-%m-%d %H:%M:%S')
69 | end_log_pos=m.group(2)
70 | elif re.match(r'^BEGIN\n$',line):
71 | transaction_begin_time=dml_time
72 | transaction_begin_log_pos=start_log_pos
73 | transaction_name="t%d"%(transaction_number)
74 | transaction_number=transaction_number+1
75 | elif re.match('use',line) and line.strip().endswith('/*!*/;'):
76 | use_database=re.split('`|`',line)[1]
77 | elif re.match('### (DELETE|INSERT|UPDATE)',line):
78 | line_split=line.split()
79 | schema_table_name=line_split[-1].replace('`','').strip('\n')
80 | dml_type=line_split[1]
81 | dml_flag=1
82 | elif re.match('insert|delete|update',line,re.I):
83 | if re.match('insert',line,re.I):
84 | m= re.search(r'(into)(.*?)(values|\(|\n|partition|select)',line,re.I)
85 | table=m.group(2).strip()
86 | dml_type='INSERT'
87 | elif re.match('delete',line,re.I):
88 | m=re.search(r'(from)(.*?)(partition|where|limit|\n)',line,re.I)
89 | table=m.group(2).strip()
90 | dml_type='DELETE'
91 | else:
92 | m=re.search(r'(update|LOW_PRIORITY|IGNORE)(.*?)(set|\n)',line,re.I)
93 | table=m.group(2).strip()
94 | dml_type='UPDATE'
95 | schema_table_name=table
96 | if '.' not in schema_table_name:
97 | if use_database != "":
98 | schema_table_name = use_database + '.' + table
99 | else:
100 | schema_table_name=table
101 | dml_flag=1
102 | elif 'COMMIT/*!*/;' in line:
103 | transaction_commit_time=dml_time
104 | transaction_commit_log_pos=end_log_pos
105 | transaction_info_records.append([transaction_name,transaction_begin_time,transaction_commit_time,transaction_begin_log_pos,transaction_commit_log_pos])
106 | if dml_flag ==1:
107 | dml_info_records.append([transaction_name,schema_table_name,dml_type,dml_time])
108 | if len(dml_info_records) % 10000 ==0:
109 | with SQLite(SQLITE_DB_FILE) as db:
110 | db.executemany("insert into dml_info(transaction_name,schema_table_name,dml_type,dml_time) values (?,?,?,?)",dml_info_records)
111 | db.commit()
112 | dml_info_records=[]
113 | if len(transaction_info_records) % 10000 ==0:
114 | with SQLite(SQLITE_DB_FILE) as db:
115 | db.executemany("insert into transaction_info(transaction_name,transaction_begin_time,transaction_commit_time, \
116 | transaction_begin_log_pos,transaction_commit_log_pos) values (?,?,?,?,?)", transaction_info_records)
117 | db.commit()
118 | transaction_info_records=[]
119 | with SQLite(SQLITE_DB_FILE) as db:
120 | db.executemany("insert into dml_info(transaction_name,schema_table_name,dml_type,dml_time) values (?,?,?,?)",dml_info_records)
121 | db.executemany("insert into transaction_info(transaction_name,transaction_begin_time,transaction_commit_time, \
122 | transaction_begin_log_pos,transaction_commit_log_pos) values (?,?,?,?,?)", transaction_info_records)
123 | db.commit()
124 |
125 | def query_and_print(col_name,sql,print_flag=True):
126 | with SQLite(SQLITE_DB_FILE) as db:
127 | query_result=db.execute(sql)
128 | if not print_flag:
129 | return query_result
130 | else:
131 | for each_col in col_name:
132 | print(each_col.ljust(18),end=' ')
133 | print()
134 | for each_row in query_result:
135 | for each_col in each_row:
136 | print(str(each_col).ljust(18),end=' ')
137 | print()
138 |
139 | def operation_per_second(start_datetime,stop_datetim,limit):
140 | if start_datetime:
141 | get_opr_sql = "select schema_table_name,upper(dml_type),count(*) times from dml_info \
142 | where dml_time BETWEEN '%s' and '%s' group by schema_table_name,dml_type order by 3 desc"%(start_datetime, stop_datetime)
143 | else:
144 | get_opr_sql = "select schema_table_name,upper(dml_type),count(*) times from dml_info group by schema_table_name,dml_type order by 3 desc"
145 | if limit:
146 | get_opr_sql = '%s limit %d'%(get_opr_sql,limit)
147 | query_and_print(("TABLE_NAME","DML_TYPE","NUMS"),get_opr_sql)
148 |
149 | def transaction_per_second(start_datetime,stop_datetime,limit):
150 | if start_datetime:
151 | get_tps_sql="select transaction_commit_time, count(*) from transaction_info \
152 | where transaction_commit_time BETWEEN '%s' and '%s' group by transaction_commit_time order by 1"%(start_datetime, stop_datetime)
153 | else:
154 | get_tps_sql = "select transaction_commit_time, count(*) from transaction_info group by transaction_commit_time order by 1"
155 | if limit:
156 | get_tps_sql = '%s limit %d'%(get_tps_sql,limit)
157 | query_and_print(("COMMIT_TIME","TPS"),get_tps_sql)
158 |
159 | def get_transaction_info(start_datetime,stop_datetime,sort_condition,extend,limit):
160 | if start_datetime:
161 | get_transaction_sql = "select transaction_name,transaction_begin_time,transaction_commit_time,transaction_begin_log_pos, \
162 | transaction_commit_log_pos,strftime('%%s',transaction_commit_time)-strftime('%%s',transaction_begin_time),\
163 | transaction_commit_log_pos-transaction_begin_log_pos from transaction_info where transaction_commit_time \
164 | BETWEEN '%s' and '%s'"%(start_datetime,stop_datetime)
165 | else:
166 | get_transaction_sql = "select transaction_name,transaction_begin_time,transaction_commit_time,transaction_begin_log_pos, \
167 | transaction_commit_log_pos,strftime('%s',transaction_commit_time)-strftime('%s',transaction_begin_time),\
168 | transaction_commit_log_pos-transaction_begin_log_pos from transaction_info"
169 | if sort_condition == "time":
170 | get_transaction_sql = '%s order by 6 desc'%(get_transaction_sql)
171 | elif sort_condition == "size":
172 | get_transaction_sql = '%s order by 7 desc'%(get_transaction_sql)
173 | if limit:
174 | get_transaction_sql = '%s limit %d'%(get_transaction_sql,limit)
175 | col_names=("TRANS_NAME","BEGIN_TIME","COMMIT_TIME","BEGIN_LOG_POS","COMMIT_LOG_POS","DURATION_TIME","SIZE")
176 | if not extend:
177 | query_and_print(col_names,get_transaction_sql)
178 | else:
179 | transaction_info=query_and_print(col_names,get_transaction_sql,False)
180 | get_opr_sql="select transaction_name,schema_table_name,upper(dml_type),count(*) times from dml_info \
181 | group by 1,2,3"
182 | opr_info=query_and_print([],get_opr_sql,False)
183 | opr_info_dict={}
184 | for each_opr in opr_info:
185 | if each_opr[0] in opr_info_dict:
186 | opr_info_dict[each_opr[0]].append([each_opr[1],each_opr[2],each_opr[3]])
187 | else:
188 | opr_info_dict[each_opr[0]]=[[each_opr[1],each_opr[2],each_opr[3]]]
189 | print("TRANS_NAME".ljust(15),"BEGIN_TIME".ljust(20),"COMMIT_TIME".ljust(20),"BEGIN_LOG_POS".ljust(15), \
190 | "COMMIT_LOG_POS".ljust(15),"DURATION_TIME".ljust(15),"SIZE")
191 | for each_transaction_info in transaction_info:
192 | transaction_name=each_transaction_info[0]
193 | print(each_transaction_info[0].ljust(15),each_transaction_info[1].ljust(20),each_transaction_info[2].ljust(20), \
194 | str(each_transaction_info[3]).ljust(15),str(each_transaction_info[4]).ljust(15), \
195 | str(each_transaction_info[5]).ljust(15),each_transaction_info[6])
196 | for each_opr in opr_info_dict[transaction_name]:
197 | print("├── ",each_opr[0].ljust(41),each_opr[1].ljust(15),each_opr[2])
198 |
199 | def main():
200 | parser = argparse.ArgumentParser()
201 | parser.add_argument("-f", "--file",dest="binlog_text_file", help="Binlog text file, not the Raw binary file")
202 | parser.add_argument("--new", action='store_true', help="Make a fresh start")
203 | parser.add_argument("-c","--command",dest='command_type',choices=['tps', 'opr', 'transaction'], help="Command type: [tps, opr, transaction],tps: transaction per second, opr: dml per table, transaction: show transaction info")
204 | parser.add_argument("--start", dest='start_datetime', help="Start datetime, for example: 2004-12-25 11:25:56")
205 | parser.add_argument("--stop", dest='stop_datetime', help="Stop datetime, for example: 2004-12-25 11:25:56")
206 | parser.add_argument("--sort",dest='sort_condition', help="Sort condition: time or size, you can use it when command type is transaction")
207 | parser.add_argument("-e","--extend",action='store_true', help="Show transaction info in detail,you can use it when command type is transaction")
208 | parser.add_argument("--limit",type=int, dest='limit', help="Limit the number of rows to display")
209 | args = parser.parse_args()
210 | if (args.start_datetime and not args.stop_datetime) or (not args.stop_datetime and args.start_datetime):
211 | print("you have to specify the start_datetime and stop_datetime both")
212 | exit()
213 | if args.new and os.path.exists(SQLITE_DB_FILE):
214 | os.remove(SQLITE_DB_FILE)
215 | if not os.path.exists(SQLITE_DB_FILE):
216 | init_sqlite_table()
217 | parse_binlog_text_file(args.binlog_text_file)
218 | if args.command_type == "opr":
219 | operation_per_second(args.start_datetime, args.stop_datetime, args.limit)
220 | if args.command_type == "tps":
221 | transaction_per_second(args.start_datetime, args.stop_datetime, args.limit)
222 | if args.command_type == "transaction":
223 | get_transaction_info(args.start_datetime, args.stop_datetime, args.sort_condition, args.extend, args.limit)
224 |
225 | if __name__ == '__main__':
226 | main()
227 |
--------------------------------------------------------------------------------