├── .gitignore ├── Documentation.txt ├── MANIFEST ├── README.md ├── bin ├── mtls-auto-fill ├── mtls-backup ├── mtls-big-files ├── mtls-delete-rows ├── mtls-expired-tables ├── mtls-fake-mysqld ├── mtls-file-stat ├── mtls-file-truncate ├── mtls-http ├── mtls-kill-all-conections ├── mtls-log ├── mtls-monitor ├── mtls-multi-session ├── mtls-perf-bench ├── mtls-ps-mem ├── mtls-random-passwd ├── mtls-rows-diff ├── mtls-sql-distribution ├── mtlsanalysis ├── mtlsbackup ├── mtlsbigfiles ├── mtlsdeleterows ├── mtlshttp ├── mtlslog └── mtlsmonitor ├── build ├── lib │ └── mtls │ │ ├── __init__.py │ │ ├── base.py │ │ ├── innodb_statu.py │ │ ├── inspection.py │ │ ├── mgr.py │ │ ├── replication.py │ │ ├── statu.py │ │ └── variable.py └── scripts-3.6 │ ├── mtlsbackup │ ├── mtlslog │ └── mtlsmonitor ├── conf └── mtlsbackup.cnf ├── imgs ├── cpu.png ├── ibrw.png ├── mem.png ├── mp-wechat.jpg ├── mtlshttp.png ├── net.png ├── reads.png ├── rs.png ├── t.png └── writes.png ├── mtls ├── __init__.py ├── base.py ├── binlog.py ├── core.py ├── innodb_statu.py ├── inspection.py ├── kits │ ├── __init__.py │ └── fileformat.py ├── mgr.py ├── ps │ ├── __init__.py │ └── mem.py ├── replication.py ├── sessions.py ├── statu.py ├── values.py └── variable.py ├── mtlsmonitor ├── requirements.txt ├── setup.py └── tests ├── __init__.py ├── test_base.py └── test_variable.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.DS_Store 2 | *.vscode 3 | *.pyc 4 | *.retry 5 | dist* -------------------------------------------------------------------------------- /Documentation.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/Documentation.txt -------------------------------------------------------------------------------- /MANIFEST: -------------------------------------------------------------------------------- 1 | # file GENERATED by distutils, do NOT edit 2 | setup.py 3 | bin/mtls-auto-fill 4 | bin/mtls-backup 5 | bin/mtls-big-files 6 | bin/mtls-delete-rows 7 | bin/mtls-expired-tables 8 | bin/mtls-fake-mysqld 9 | bin/mtls-file-stat 10 | bin/mtls-file-truncate 11 | bin/mtls-http 12 | bin/mtls-kill-all-conections 13 | bin/mtls-log 14 | bin/mtls-monitor 15 | bin/mtls-multi-session 16 | bin/mtls-perf-bench 17 | bin/mtls-ps-mem 18 | bin/mtls-random-passwd 19 | bin/mtls-rows-diff 20 | bin/mtls-sql-distribution 21 | bin/mtlsanalysis 22 | bin/mtlsbackup 23 | bin/mtlsbigfiles 24 | bin/mtlsdeleterows 25 | bin/mtlshttp 26 | bin/mtlslog 27 | bin/mtlsmonitor 28 | mtls/__init__.py 29 | mtls/base.py 30 | mtls/binlog.py 31 | mtls/core.py 32 | mtls/innodb_statu.py 33 | mtls/inspection.py 34 | mtls/mgr.py 35 | mtls/replication.py 36 | mtls/sessions.py 37 | mtls/statu.py 38 | mtls/values.py 39 | mtls/variable.py 40 | mtls/kits/__init__.py 41 | mtls/kits/fileformat.py 42 | mtls/ps/__init__.py 43 | mtls/ps/mem.py 44 | -------------------------------------------------------------------------------- /bin/mtls-auto-fill: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #-*- coding: UTF-8 -*- 3 | 4 | """ 5 | 自动的向给定的库表中完全随机的填充数据 6 | """ 7 | 8 | import os 9 | import sys 10 | import logging 11 | import argparse 12 | import concurrent.futures 13 | from mysql import connector 14 | from datetime import datetime 15 | from collections import namedtuple 16 | from mtls.values import InsertSQL,TableMeta 17 | from mysql.connector.errors import IntegrityError 18 | from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor 19 | 20 | 21 | name = os.path.basename(__file__) 22 | 23 | InsertStat = namedtuple('InsertStat','start_at end_at rows cost_time') 24 | 25 | def parser_cmd_args(): 26 | """ 27 | 处理命令行参数 28 | """ 29 | args = argparse.ArgumentParser(name) 30 | args.add_argument("--host",type=str,default="127.0.0.1",help="MySQL 主机 IP ") 31 | args.add_argument("--port",type=int,default=3306,help="MySQL 端口") 32 | args.add_argument("--user",type=str,default="appuser",help="用户名") 33 | args.add_argument("--password",type=str,default="mtls@0352",help="密码") 34 | args.add_argument("--database",type=str,default="tempdb",help="库名") 35 | args.add_argument("--table",type=str,default="t",help="表名") 36 | args.add_argument("--rows",type=int,default=100,help="要插入的行数") 37 | args.add_argument("--process",type=int,default=1,help="并发的进程数") 38 | args.add_argument("--ssl_disabled",type=bool, default=False, help="是否禁用 SSL") 39 | args.add_argument("action",choices=('review','execute')) 40 | return args.parse_args() 41 | 42 | 43 | def insert(host="127.0.0.1",port=3306,user="apuser",password="mtls@0352",ssl_disabled=False,database="tempdb",table="t",rows=100): 44 | """ 45 | 46 | """ 47 | t_meta = TableMeta(host,port,user,password,database,table) 48 | 49 | if t_meta.err != None and len(t_meta.meta): 50 | # 51 | logging.exception(t_meta.err) 52 | return [] 53 | 54 | # 如果执行到这里,说明表存在,并且可以正常的取得元数据 55 | meta = [_ for _ in t_meta.meta] 56 | ist = InsertSQL(database,table,meta) 57 | 58 | # 59 | cnx = None 60 | start_at = datetime.now() 61 | try: 62 | cnx = connector.connect(host=host,port=port,user=user,password=password,ssl_disabled=ssl_disabled) 63 | cursor = cnx.cursor() 64 | 65 | for i in range(rows): 66 | sql,args,*_ = ist[i] 67 | try: 68 | cursor.execute(sql,args) 69 | cnx.commit() 70 | except IntegrityError as err: 71 | # 主键重复时不用管 72 | pass 73 | 74 | except Exception as err: 75 | logging.exception(str(err)) 76 | return None 77 | finally: 78 | if hasattr(cnx,'close'): 79 | cnx.close() 80 | end_at = datetime.now() 81 | 82 | # 微秒级的精度 83 | cost_time = (end_at - start_at).total_seconds() 84 | stat = InsertStat(start_at=start_at,end_at=end_at,rows=rows,cost_time=cost_time) 85 | return stat 86 | 87 | def create_report(stats:InsertStat=None): 88 | """ 89 | """ 90 | assert stats is not None 91 | sum_rows = 0 92 | sum_cost_time = 0 93 | for s in stats: 94 | if hasattr(s,'rows'): 95 | sum_rows = sum_rows + s.rows 96 | sum_cost_time = sum_cost_time + s.cost_time 97 | 98 | avg_cost_time = sum_cost_time / len(stats) 99 | 100 | if avg_cost_time == 0: 101 | # 后面 avg_cost_time 要做为分母,所以当这个值是 0 的时候把它设置为 1 102 | avg_cost_time = 1 103 | tps = sum_rows / avg_cost_time 104 | 105 | print("-"*36) 106 | print(f"|tps = {tps}") 107 | print(f"|cost_time = {avg_cost_time}") 108 | print("-"*36) 109 | 110 | 111 | def main(): 112 | """ 113 | """ 114 | 115 | args = parser_cmd_args() 116 | # 多进程压力测试 117 | stats = [] 118 | if args.process > 1: 119 | # 创建进程池 120 | with ProcessPoolExecutor(max_workers=args.process) as e: 121 | futures = [e.submit(insert,args.host,args.port,args.user,args.password,args.database,args.table,args.rows) 122 | for i in range(args.process)] 123 | 124 | for future in concurrent.futures.as_completed(futures): 125 | # 取得“期物的值”以此来触发执行 126 | _ = future.result() 127 | if _ is not None: 128 | stats.append(_) 129 | else: 130 | # 单进程压力测试 131 | tmp = insert(args.host,args.port,args.user,args.password,args.ssl_disabled,args.database,args.table,args.rows) 132 | stats.append(tmp) 133 | 134 | print("\nReport:") 135 | create_report(stats) 136 | print("Compelete.\n") 137 | 138 | 139 | 140 | if __name__ == "__main__": 141 | main() 142 | 143 | -------------------------------------------------------------------------------- /bin/mtls-backup: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #-*- coding: UTF-8 -*- 3 | 4 | """ 5 | 融合mysqldump,xtrabackup,meb 三种备份方法 6 | """ 7 | 8 | __all__ = ['Meb','Xtrabackup','MysqlDump'] 9 | __author__ = '蒋乐哥哥' 10 | __version__ = '0.1' 11 | 12 | 13 | import os,argparse,logging,configparser,argparse,shutil 14 | from datetime import datetime 15 | logging.basicConfig(level = logging.INFO,format='[%(asctime)s] [%(filename)s] [%(levelname)s] %(message)s', ) 16 | 17 | 18 | 19 | class BackupTool(object): 20 | """ 21 | 作为备份工具的基类 22 | """ 23 | user = "backup" 24 | password = "DX3906" 25 | host = "127.0.0.1" 26 | port = 3306 27 | full_backup_days = "7" 28 | diff_backup_days = "1,2,3,4,5,6" 29 | backup_tool = "xtrabackup" 30 | backup_data_dir = "/database/backups/3306/data/" 31 | backup_log_dir = "database/backups/3306/log/" 32 | backup_temp_dir = "/database/backups/3306/temp/" 33 | current = None 34 | backup_types = { 35 | "full_backup":"FULLBACKUP", 36 | "diff_backup":"DIFFBACKUP", 37 | "increment_backup":"INCREMNETBACKUP"} 38 | 39 | def __init__(self,mtlsconf): 40 | """ 41 | mtlsconf 是经过configparser处理过的字典 42 | """ 43 | self.user = mtlsconf['global']['user'] if 'user' in mtlsconf['global'] else self.user 44 | self.password = mtlsconf['global']['password'] if 'password' in mtlsconf['global'] else self.password 45 | self.host = mtlsconf['global']['host'] if 'host' in mtlsconf['global'] else self.host 46 | self.port = int(mtlsconf['global']['port']) if 'port' in mtlsconf['global'] else self.port 47 | self.full_backup_days = mtlsconf['global']['full_backup_days'] if 'full_backup_days' in mtlsconf['global'] else self.full_backup_days 48 | self.diff_backup_days = mtlsconf['global']['diff_backup_days'] if 'diff_backup_days' in mtlsconf['global'] else self.diff_backup_days 49 | self.backup_tool = mtlsconf['global']['backup_tool'] if 'backup_tool' in mtlsconf['global'] else self.backup_tool 50 | self.backup_data_dir = mtlsconf['global']['backup_data_dir'] if 'backup_data_dir' in mtlsconf['global'] else self.backup_data_dir 51 | self.backup_log_dir = mtlsconf['global']['backup_log_dir'] if 'backup_log_dir' in mtlsconf['global'] else self.backup_log_dir 52 | self.backup_temp_dir = mtlsconf['global']['backup_temp_dir'] if 'backup_temp_dir' in mtlsconf['global'] else self.backup_temp_dir 53 | self.current = datetime.now() 54 | #开始检查环境信息 55 | self.preExec() 56 | 57 | def directorCheck(self,path): 58 | """ 59 | 在备份之前对目录进行检查 60 | """ 61 | logging.info("开始检查 {path} ".format(path=path)) 62 | if not os.path.exists(path): 63 | #目录不存在 64 | logging.warn("目录 {path} 不存在,准备创建... ".format(path=path)) 65 | os.makedirs(path) 66 | logging.info("目录 {path} 创建完成 ...".format(path=path)) 67 | 68 | 69 | def preExec(self): 70 | """ 71 | 在备份之前对环境进行检查 72 | """ 73 | #检查backup_data_dir目录是否存在 74 | self.directorCheck(self.backup_data_dir) 75 | #检查backup_log_dir目录是否存在 76 | self.directorCheck(self.backup_log_dir) 77 | #检查backup_temp_dir目录是否存在 78 | self.directorCheck(self.backup_temp_dir) 79 | 80 | def backupDecisionByWeekDay(self): 81 | """ 82 | 根据日期(星期几)、分析应该是要全备还是要差异备份 83 | """ 84 | weekday = str(self.current.weekday()) 85 | if weekday in self.full_backup_days: 86 | logging.info("今天星期 {0} 根据配置文件中的备份计划,决定进行全备".format(weekday)) 87 | return self.backup_types['full_backup'] 88 | elif weekday in self.diff_backup_days: 89 | logging.info("今天星期 {0} 根据配置文件中的备份计划,决定进行差异备份".format(weekday)) 90 | return self.backup_types['diff_backup'] 91 | 92 | @property 93 | def strCurrent(self): 94 | """ 95 | 以字符串的形式返回当前时间值(2018-07-26T16:42:00) 96 | """ 97 | return self.current.isoformat(timespec='seconds') 98 | 99 | @property 100 | def latestBackupSet(self): 101 | """ 102 | 返回最新的备份集 103 | 104 | 当没有任何备份集的时候返回None 105 | """ 106 | #得到所有可用的备份集 107 | backup_sets = [backup_set for backup_set in os.listdir(self.backup_data_dir) if os.path.isdir(os.path.join(self.backup_data_dir,backup_set))] 108 | 109 | #作为能用逻辑这里返回最后一个备份集、为最新的可用备份集 110 | if len(backup_sets) >=1: 111 | return backup_sets[-1] 112 | else: 113 | return None 114 | 115 | def fullbackup(self): 116 | raise NotImplementedError("请在子类中实现全备功能") 117 | 118 | def diffbackup(self): 119 | raise NotImplementedError("请在子类中实现差异备份功能") 120 | 121 | def backup(self): 122 | """ 123 | """ 124 | decision=self.backupDecisionByWeekDay() 125 | 126 | if decision == self.backup_types['full_backup']: 127 | self.fullbackup() 128 | elif decision == self.backup_types['diff_backup']: 129 | self.diffbackup() 130 | else: 131 | self.fullbackup() 132 | 133 | 134 | 135 | class Xtrabackup(BackupTool): 136 | """ 137 | extrabackup包装类 138 | """ 139 | full_backup_script = None 140 | diff_backup_script = None 141 | 142 | def __init__(self,mtlsconf): 143 | super().__init__(mtlsconf) 144 | self.full_backup_script = mtlsconf['xtrabackup']['full_backup_script'] 145 | self.diff_backup_script = mtlsconf['xtrabackup']['diff_backup_script'] 146 | 147 | @property 148 | def isLatestFullBackupSuccess(self): 149 | """ 150 | 用于确定最后一次全备是否成功! 151 | """ 152 | logging.info("准备检查最近一次的全备是否成功...") 153 | #如果备份集为None,也就是说不可能有成功的全备,所以返回False 154 | if super().latestBackupSet == None: 155 | logging.warn("没有可用的备份集(全备))") 156 | return False 157 | else: 158 | #拼接出最新一个可用备份集的目录 159 | logging.info("检查最后一个备份集{0}的可用性".format(self.latestBackupSet)) 160 | xtrabackup_log = os.path.join(self.backup_data_dir,self.latestBackupSet,self.latestBackupSet+'-full.log') 161 | logging.info("检查{0}".format(xtrabackup_log)) 162 | if (not os.path.exists(xtrabackup_log)) or (not os.path.isfile(xtrabackup_log)): 163 | logging.warn("{0} 不存在或它并不是一个文件".format(xtrabackup_log)) 164 | return False 165 | 166 | with open(xtrabackup_log) as xlf: 167 | last = [line for line in xlf ][-1] 168 | if 'completed OK!' in last: 169 | logging.warn("检查到最后一个全备 备份成功") 170 | xtrabackup_checkpoints = os.path.join(self.backup_log_dir,self.latestBackupSet,'xtrabackup_checkpoints') 171 | with open(xtrabackup_checkpoints) as xcf: 172 | line = [line for line in xcf if 'to_lsn' in line][0] 173 | *_,tolsn = line.split(' ') 174 | self.tolsn=tolsn.strip() 175 | logging.info("从xtrabackup_checkpoints文件中读到tolsn={0}".format(self.tolsn)) 176 | return True 177 | else: 178 | logging.warn("检查到最后一个全备 没有备份成功") 179 | return False 180 | 181 | def clearnBackupSets(self): 182 | """ 183 | 清理备份集(一个全备加上若干差异备份) 184 | """ 185 | backup_sets = [backup_set for backup_set in os.listdir(self.backup_data_dir) if os.path.isdir(os.path.join(self.backup_data_dir,backup_set))] 186 | if len(backup_sets) >=2: 187 | logging.info("备份集的数量为{0}大于2 准备清理备份集".format(len(backup_sets))) 188 | backup_sets = sorted(backup_sets) 189 | for backup_set in backup_sets[0:-1]: 190 | temp = os.path.join(self.backup_data_dir,backup_set) 191 | logging.info("清理备份集 {0}".format(temp)) 192 | shutil.rmtree(temp) 193 | 194 | #清理备份集相关的lsn日志文件 195 | lsns = [lsn for lsn in os.listdir(self.backup_log_dir) if os.path.isdir(os.path.join(self.backup_log_dir,lsn)) and lsn<=backup_set] 196 | for lsn in lsns: 197 | temp = os.path.join(self.backup_log_dir,lsn) 198 | logging.info("清理lsn日志 {0}".format(temp)) 199 | shutil.rmtree(temp) 200 | 201 | def fullbackup(self): 202 | """ 203 | 执行全备 204 | """ 205 | self.clearnBackupSets() 206 | 207 | #拼接出保留全备的的路径(/database/backups/3306/data/2018-07-26T16:42:00/) 208 | full_backup_dir = os.path.join(self.backup_data_dir,self.strCurrent) 209 | 210 | full_backup_file = os.path.join(self.backup_data_dir,self.strCurrent,self.strCurrent+'-full.xbstream') 211 | full_backup_log_file = os.path.join(self.backup_data_dir,self.strCurrent,self.strCurrent+'-full.log') 212 | 213 | self.full_backup_file = full_backup_file 214 | self.full_backup_log_file = full_backup_log_file 215 | 216 | #拼接出用于保留lsn的路径 217 | self.lsndir=os.path.join(self.backup_log_dir,self.strCurrent) 218 | os.makedirs(self.lsndir) 219 | 220 | #创建出保留全备的的路径(/database/backups/3306/data/2018-07-26T16:42:00/) 221 | logging.info("创建用于保存全备的目录 {0}".format(full_backup_dir)) 222 | os.makedirs(full_backup_dir) 223 | 224 | #根据实例属性格式化全备命令行 225 | full_backup_script = self.full_backup_script.format(self=self) 226 | 227 | #拼接出完整的全备命令 228 | logging.info("使用如下命令对MySQL数据库进行全备 {full_backup_script}".format(full_backup_script=full_backup_script)) 229 | 230 | #执行全备 231 | os.system( full_backup_script ) 232 | 233 | def diffbackup(self): 234 | """ 235 | 在全备的基础之上执行差异备份 236 | """ 237 | logging.info("进入差异备份流程") 238 | #差异备份是在全备之上建立的、所以在进行差异备份之前应该先检查最后一个全备是否成功 239 | if self.isLatestFullBackupSuccess: 240 | # 如果最后一个全备是成功的、那么进入差异备份流程 241 | 242 | #创建用于保存lsn的目录 243 | self.lsndir=os.path.join(self.backup_log_dir,self.strCurrent) 244 | os.makedirs(self.lsndir) 245 | 246 | #拼接出用于保存差异备份的目录/这个目录就是备份集的目录 247 | diff_backup_dir = os.path.join(self.backup_data_dir,self.latestBackupSet) 248 | self.diff_backup_file = os.path.join(diff_backup_dir,self.strCurrent + '-diff.xbstream') 249 | self.diff_backup_log_file = os.path.join(diff_backup_dir,self.strCurrent + '-diff.log') 250 | 251 | #根据实例属性格式化差异备命令行 252 | diff_backup_script = self.diff_backup_script.format(self=self) 253 | 254 | logging.info("使用如下命令对MySQL数据库进行差异备 {0} ".format(diff_backup_script)) 255 | 256 | os.system(diff_backup_script) 257 | 258 | else: 259 | # 跳转到全备流程 260 | self.fullbackup() 261 | 262 | 263 | class Meb(BackupTool): 264 | pass 265 | 266 | 267 | class MysqlDump(BackupTool): 268 | pass 269 | 270 | 271 | 272 | backup_tools_map = { 273 | 'xtrabackup':Xtrabackup 274 | } 275 | 276 | 277 | def main(mtlsconf): 278 | logging.info("read config file {0}".format(mtlsconf)) 279 | config = configparser.ConfigParser(inline_comment_prefixes=('#',';')) 280 | config.read(mtlsconf) 281 | tool_name = config['global']['backup_tool'] 282 | tool = backup_tools_map[tool_name](config) 283 | tool.backup() 284 | 285 | if __name__=="__main__": 286 | parser=argparse.ArgumentParser() 287 | parser.add_argument('-c','--conf',default='/etc/mtlsbackup.cnf',help='mtlsbackup.py config file') 288 | args=parser.parse_args() 289 | main(args.conf) 290 | -------------------------------------------------------------------------------- /bin/mtls-big-files: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #-*- coding: UTF-8 -*- 3 | 4 | import os 5 | import sys 6 | import stat 7 | import argparse 8 | import collections 9 | from mtls.kits import fileformat 10 | 11 | 12 | class FileStat(object): 13 | __slots__ = ('file_path','file_size') 14 | 15 | def __init__(self,file_path,file_szie): 16 | self.file_path = file_path 17 | self.file_size = file_szie 18 | 19 | def __eq__(self,other): 20 | return self.file_size == other.file_size 21 | 22 | def __le__(self,other): 23 | return self.file_size <= other.file_size 24 | 25 | def __lt__(self,other): 26 | return self.file_size < other.file_size 27 | 28 | def __str__(self): 29 | return f"{self.file_path} {fileformat(self.file_size)}" 30 | 31 | 32 | class BigFiles(object): 33 | def __init__(self,topdir,limit): 34 | self._files = [] 35 | self.limit = limit 36 | self.walktree(topdir) 37 | 38 | def append(self,filestat): 39 | self._files.append(filestat) 40 | self._files = sorted(self._files) 41 | if len(self._files) > self.limit: 42 | self._files = self._files[1:] 43 | 44 | def walktree(self,topdir): 45 | for file_name in os.listdir(topdir): 46 | try: 47 | file_path = os.path.join(topdir,file_name) 48 | st = os.stat(file_path) 49 | if stat.S_ISDIR(st.st_mode): 50 | self.walktree(file_path) 51 | else: 52 | self.append(FileStat(file_path,st.st_size)) 53 | except FileNotFoundError as err: 54 | pass 55 | 56 | 57 | def __str__(self): 58 | if len(self._files) == 0: 59 | return "empty dir" 60 | 61 | max_file_path_length = max( [len(f.file_path) for f in self._files] ) 62 | max_size_length = max( [len(fileformat(f.file_size)) for f in self._files] ) 63 | head = f"*" * (max_file_path_length + max_size_length + 6) + '\n' 64 | content_format = "|{{0:<{0}}} | {{1:>{1}}} \n".format(max_file_path_length,max_size_length) 65 | content = content_format.format("FILE PATH","FILE SIZE|") + head 66 | for f in self._files: 67 | content = content + content_format.format(f.file_path,fileformat(f.file_size)) 68 | s = head + content 69 | return s 70 | 71 | 72 | 73 | 74 | if __name__ == "__main__": 75 | parser = argparse.ArgumentParser("mtlsbigfiles") 76 | parser.add_argument('topdir',default='/usr/local/',help='absolute path') 77 | parser.add_argument('--limit',default=7,type=int,help='limits how much rows should be printed') 78 | args = parser.parse_args() 79 | bf = BigFiles(args.topdir,args.limit) 80 | print(bf) 81 | 82 | 83 | 84 | 85 | 86 | -------------------------------------------------------------------------------- /bin/mtls-delete-rows: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | 完成批量删除行的功能 5 | """ 6 | 7 | import os 8 | import re 9 | import sys 10 | import time 11 | import logging 12 | import argparse 13 | from mysql import connector 14 | from mysql.connector import errorcode 15 | 16 | name = os.path.basename(__file__) 17 | 18 | 19 | logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s",level=logging.INFO) 20 | 21 | 22 | def parser_cmd_args() -> argparse.ArgumentParser: 23 | """ 24 | 处理命令行参数 25 | """ 26 | parser = argparse.ArgumentParser(name) 27 | parser.add_argument('--host',type=str,default='127.0.0.1',help='mysql host') 28 | parser.add_argument('--port',type=int,default=3306,help="mysql port") 29 | parser.add_argument('--user',type=str,default="opsuser",help="mysql user") 30 | parser.add_argument('--password',type=str,default="mtls0352",help="mysql user's password") 31 | parser.add_argument('--sleep-time',type=int,default=1,help="sleep time per batch") 32 | parser.add_argument('--rows',type=int,default=1000,help="rows per batch") 33 | parser.add_argument('--sql-file',type=str,default="/tmp/dlt.sql",help="file containt sql statement") 34 | parser.add_argument('--encoding',type=str,default='utf8',help='sql file encoding default utf8') 35 | parser.add_argument('action',type=str,choices=['view','exec'],default="view") 36 | args = parser.parse_args() 37 | return args 38 | 39 | 40 | 41 | def batch_delete_rows(args:argparse.ArgumentParser): 42 | """ 43 | 实现批量删除行 44 | """ 45 | # 测试文件是否存在 46 | if not os.path.isfile(args.sql_file): 47 | logging.error(f"file '{args.sql_file}' not exists.") 48 | sys.exit(1) 49 | 50 | # 处理文件中的 SQL 语句 51 | sqls = [] 52 | try: 53 | with open(args.sql_file,'r',encoding=args.encoding) as file_obj: 54 | for line in file_obj: 55 | line = line.strip() 56 | if line.endswith(';'): 57 | line = line.replace(';',f" limit {args.rows};") 58 | else: 59 | line = line + f" limit {args.rows};" 60 | sqls.append(line) 61 | except UnicodeDecodeError as err: 62 | logging.error(str(err)) 63 | sys.exit(2) 64 | 65 | if args.action == 'view': 66 | for sql in sqls: 67 | logging.info(f"formatted sql statement : {sql}") 68 | sys.exit(0) 69 | 70 | # 建立连接并执行 SQL 语句 71 | cnx = None 72 | try: 73 | cnx = connector.connect(host=args.host,port=args.port,user=args.user,password=args.password) 74 | cursor = cnx.cursor() 75 | i = 0 76 | for sql in sqls: 77 | while True: 78 | cursor.execute(sql) 79 | cnx.commit() 80 | if cursor.rowcount == 0: 81 | break 82 | logging.info(f"{cursor.rowcount} row(s) affected by {sql} ") 83 | time.sleep(args.sleep_time) 84 | logging.info("compelete") 85 | 86 | except connector.Error as err: 87 | if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: 88 | logging.error(f" access denied user={args.user} password={args.password} host={args.host} port={args.port}") 89 | else: 90 | logging.error(str(err)) 91 | finally: 92 | if cnx != None and hasattr(cnx,'close'): 93 | cnx.close() 94 | 95 | def main(): 96 | args = parser_cmd_args() 97 | batch_delete_rows(args) 98 | 99 | if __name__ == "__main__": 100 | main() 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | -------------------------------------------------------------------------------- /bin/mtls-expired-tables: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import re 5 | import sys 6 | import stat 7 | import logging 8 | import argparse 9 | from datetime import datetime 10 | from datetime import timedelta 11 | 12 | name = os.path.basename(__file__) 13 | logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s",level=logging.INFO) 14 | 15 | def parser_cmd_args() -> argparse.ArgumentParser: 16 | """ 17 | 处理命令行参数 18 | """ 19 | def str_to_bool(s): 20 | if s.upper() in ('YES','TRUE','ON'): 21 | return True 22 | else: 23 | return False 24 | 25 | parser = argparse.ArgumentParser(name) 26 | parser.add_argument('--black-list-mode',default='ON',type=str_to_bool,help='backup mode default on') 27 | parser.add_argument('--not-used-days',type=int,default=30,help="how long the table not used(days)") 28 | parser.add_argument('datadir',type=str,default="/database/mysql/data/3306/",help="mysql datadir") 29 | args = parser.parse_args() 30 | return args 31 | 32 | 33 | def main(): 34 | """ 35 | 解析出若干天之内都没有访问过的表 36 | """ 37 | args = parser_cmd_args() 38 | baseline = datetime.now() - timedelta(days=args.not_used_days) 39 | datadir = args.datadir 40 | 41 | logging.info(f"分析数据目录({datadir})") 42 | if args.black_list_mode: 43 | logging.info(f"准备过虑出最近修改日期(mtime) < {baseline.isoformat()}") 44 | else: 45 | logging.info(f"准备过虑出最近修改日期(mtime) > {baseline.isoformat()}") 46 | 47 | if not os.path.isdir(datadir): 48 | logging.error(f"dir '{datadir}' not exits or permission denied ") 49 | exit(1) 50 | 51 | # 广度优先搜索 datadir 52 | 53 | schemas = [] 54 | 55 | # 解析出存在的数据库 56 | for item in os.listdir(args.datadir): 57 | path = os.path.join(datadir,item) 58 | if os.path.isdir(path): 59 | schemas.append(item) 60 | schemas = [schema for schema in schemas if schema not in ('mysql','information_schema','sys','performance_schema')] 61 | 62 | expired_tables = [] 63 | # 从数据库的目录解析文件 64 | for schema in schemas: 65 | for item in os.listdir(os.path.join(datadir,schema)): 66 | path = os.path.join(datadir,schema,item) 67 | if os.path.isfile(path) and ( path.endswith('.ibd') or path.endswith('.MYD') ): 68 | # 是 .ibd 文件或 .MYD 文件 69 | *_,atime,mtime,ctime = os.stat(path) 70 | mtime = datetime.fromtimestamp(mtime) 71 | if mtime < baseline and args.black_list_mode: 72 | table,*_ = item.split('.') 73 | expired_tables.append(f"{schema}.{table}") 74 | elif args.black_list_mode == False and mtime > baseline: 75 | table,*_ = item.split('.') 76 | expired_tables.append(f"{schema}.{table}") 77 | 78 | for table in expired_tables: 79 | print(table) 80 | 81 | 82 | if __name__ == "__main__": 83 | main() -------------------------------------------------------------------------------- /bin/mtls-fake-mysqld: -------------------------------------------------------------------------------- 1 | #!/usr/bin/evn python3 2 | """ 3 | 模拟 MySQL 服务端,目的是做到可以自定义消息。 4 | """ 5 | 6 | import os 7 | import struct 8 | import socket 9 | import logging 10 | import argparse 11 | 12 | name = os.path.basename(__file__) 13 | logging.basicConfig( 14 | format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) 15 | 16 | 17 | class FakeMySQLServer(object): 18 | """ 19 | 模拟 MySQL 服务端 20 | """ 21 | 22 | def __init__(self, host="0.0.0.0", port=3306, message="MySQL 服务器今天不上班!"): 23 | """ 24 | Parameter 25 | --------- 26 | host: str 27 | FakeMySQLServer 要绑定的 IP 地址 28 | 29 | port: int 30 | FakeMySQLServer 要监听的端口 31 | 32 | message: str 33 | 服务端要发给客户端的信息 34 | """ 35 | self.host = host 36 | self.port = port 37 | self.message = " \n" + message + '\n' 38 | 39 | def run_server(self): 40 | """ 41 | 启动服务端 42 | """ 43 | server_socket = None 44 | try: 45 | server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 46 | 47 | # 地址重用与端口重用 48 | server_socket.setsockopt( 49 | socket.SOL_SOCKET, socket.SO_REUSEADDR, True) 50 | server_socket.setsockopt( 51 | socket.SOL_SOCKET, socket.SO_REUSEPORT, True) 52 | 53 | # 启动监听队列长度设置为 5 54 | server_socket.bind((self.host, self.port)) 55 | server_socket.listen(5) 56 | 57 | while True: 58 | client = None 59 | try: 60 | # 循环接收客户端的连接 61 | client, _ = server_socket.accept() 62 | 63 | # 对消息进行编码 64 | bytes_message = self.message.encode('utf8') + b'\x00' 65 | payload_len = 9 + len(bytes_message) 66 | 67 | # payload-len seqence-id error-packet-content 68 | packet = struct.pack(" argparse.ArgumentParser: 91 | """ 92 | 处理命令行参数 93 | """ 94 | parser = argparse.ArgumentParser(name) 95 | parser.add_argument('--host', type=str, 96 | default='0.0.0.0', help='Fake-MySQL-Server 要绑定的IP') 97 | parser.add_argument('--port', type=int, default=3306, 98 | help="Fake-MySQL-Server 要监听的端口") 99 | parser.add_argument('--message', type=str, 100 | default="MySQL 服务器今天不上班!", help="要发送给客户端的信息") 101 | args = parser.parse_args() 102 | return args 103 | 104 | 105 | if __name__ == "__main__": 106 | args = parser_cmd_args() 107 | fake = FakeMySQLServer(args.host, args.port, args.message) 108 | fake.run_server() 109 | -------------------------------------------------------------------------------- /bin/mtls-file-stat: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | 4 | import os 5 | import re 6 | import sys 7 | import stat 8 | import logging 9 | import argparse 10 | from datetime import datetime 11 | 12 | name = os.path.basename(__file__) 13 | logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s",level=logging.INFO) 14 | 15 | def parser_cmd_args() -> argparse.ArgumentParser: 16 | """ 17 | 处理命令行参数 18 | """ 19 | 20 | parser = argparse.ArgumentParser(name) 21 | parser.add_argument('topdir',type=str,default="/tmp/") 22 | parser.add_argument('--order-by',type=str,default='mtime',choices=['atime','mtime','ctime']) 23 | parser.add_argument('--baseline',type=str,default=datetime.now().isoformat()) 24 | args = parser.parse_args() 25 | return args 26 | 27 | 28 | class FileStat(object): 29 | """ 30 | 定义一个用于保存 FileStat 的容器 31 | """ 32 | __slots__ = ('filepath','ctime','mtime','atime') 33 | 34 | def __init__(self,filepath,ctime,mtime,atime): 35 | self.filepath = filepath 36 | self.ctime = datetime.fromtimestamp(ctime) 37 | self.mtime = datetime.fromtimestamp(mtime) 38 | self.atime = datetime.fromtimestamp(atime) 39 | 40 | def __str__(self): 41 | return "{0:<48} | {1:20} | {2:20} | {3:20}\n".format(self.filepath,self.mtime.isoformat(),self.atime.isoformat(),self.ctime.isoformat()) 42 | 43 | 44 | class FileStatGather(object): 45 | """ 46 | 根据给出的目录广度搜索文件的Stat信息 47 | """ 48 | def __init__(self,topdir,order_by,baseline): 49 | """ 50 | """ 51 | # 如果 topdir 不是目录,说明参数有问题,直接退出程序 52 | if not os.path.isdir(topdir): 53 | logging.error(f"{topdir} 不是目录,{name} 准备退出") 54 | sys.exit(3) 55 | self.baseline = datetime.fromisoformat(args.baseline).timestamp() 56 | self.order_by = order_by 57 | self.file_stats = [] 58 | self.topdir = topdir 59 | self.walktree(topdir) 60 | 61 | def walktree(self,top_dir): 62 | """ 63 | """ 64 | logging.info(f"准备扫描目录 {top_dir}") 65 | try: 66 | for item in os.listdir(top_dir): 67 | pathname = os.path.join(top_dir,item) 68 | if os.path.isfile(pathname): 69 | *_,atime,mtime,ctime = os.stat(pathname) 70 | if self.order_by == 'mtime' and mtime < self.baseline: 71 | self.file_stats.append(FileStat(pathname,atime=atime,mtime=mtime,ctime=ctime)) 72 | elif self.order_by == 'ctime' and ctime < self.baseline: 73 | self.file_stats.append(FileStat(pathname,atime=atime,mtime=mtime,ctime=ctime)) 74 | elif self.order_by == 'atime' and atime < self.baseline: 75 | self.file_stats.append(FileStat(pathname,atime=atime,mtime=mtime,ctime=ctime)) 76 | elif os.path.isdir(pathname): 77 | self.walktree(pathname) 78 | else: 79 | pass 80 | except Exception as err: 81 | logging.warning(str(err)) 82 | 83 | 84 | def __str__(self): 85 | s = "\n\n" 86 | s = s + f"{self.topdir} 目录下文件统计信息明细 (order by {self.order_by} 小于 {datetime.fromtimestamp(self.baseline).isoformat()}):\n" 87 | s = s + '-' * 116 + '\n' 88 | s = s + "{0:<48} | {1:20} | {2:20} | {3:20}\n".format("file-path","mtime","atime","ctime") 89 | s = s + '-' * 116 + '\n' 90 | 91 | # 过滤出系统库中的表 92 | _tmp = [] 93 | for item in self.file_stats: 94 | *_,databasename,filename = item.filepath.split('/') 95 | if databasename not in ('performance_schema','mysql','information_schema','sys') and not filename.endswith('pem') and not re.search(r"([0-9]{6,6})$",filename): 96 | _tmp.append(item) 97 | self.file_stats = _tmp 98 | 99 | if self.order_by == 'atime': 100 | self.file_stats.sort(key=lambda s : s.atime) 101 | elif self.order_by == 'ctime': 102 | self.file_stats.sort(key=lambda s: s.ctime) 103 | else: 104 | self.file_stats.sort(key=lambda s: s.mtime) 105 | 106 | for i in self.file_stats: 107 | s = s + str(i) 108 | 109 | return s 110 | 111 | 112 | if __name__ == "__main__": 113 | args = parser_cmd_args() 114 | fsg = FileStatGather(args.topdir,args.order_by,args.baseline) 115 | print(fsg) 116 | 117 | 118 | -------------------------------------------------------------------------------- /bin/mtls-file-truncate: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import time 6 | import logging 7 | import argparse 8 | 9 | name = os.path.basename(__file__) 10 | logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s",level=logging.INFO) 11 | 12 | def parser_cmd_args() -> argparse.ArgumentParser: 13 | """ 14 | 处理命令行参数 15 | """ 16 | parser = argparse.ArgumentParser(name) 17 | parser.add_argument('--chunk',type=int,default=4,help="chunk size default 4 (MB)") 18 | parser.add_argument('--sleep-time',type=int,default=1,help="sleep time per truncate") 19 | parser.add_argument('file',type=str,default='/tmp/big.log',help='file path') 20 | args = parser.parse_args() 21 | return args 22 | 23 | 24 | 25 | def truncate_file(args:argparse.ArgumentParser): 26 | """ 27 | 分批次截断数据文件 28 | """ 29 | if not os.path.isfile(args.file): 30 | # file not exists 31 | logging.error(f"file {args.file} not exists or permission denied .") 32 | sys.exit(1) 33 | 34 | # 取得文件的字节大小 35 | size = os.stat(args.file).st_size 36 | # 计算 chunk 的大小 37 | chunk = int(args.chunk) * 1024 * 1024 38 | logging.info(f"file {args.file} size {size}(byte) chunck size {chunk}(byte)") 39 | 40 | try: 41 | while True: 42 | target_size = size - chunk if size > chunk else 0 43 | os.truncate(args.file,target_size) 44 | logging.info(f"truncate file to {target_size} byte(s)") 45 | if target_size == 0: 46 | break 47 | time.sleep(args.sleep_time) 48 | size = os.stat(args.file).st_size 49 | except Exception as err: 50 | logging.error(str(err)) 51 | sys.exit(1) 52 | 53 | logging.info("compelete") 54 | 55 | 56 | def main(): 57 | args = parser_cmd_args() 58 | truncate_file(args) 59 | 60 | 61 | if __name__ == "__main__": 62 | main() 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /bin/mtls-http: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #-*- coding: UTF-8 -*- 3 | 4 | import os 5 | import sys 6 | import socket 7 | import select 8 | import logging 9 | import argparse 10 | from datetime import datetime 11 | 12 | name = os.path.basename(__file__) 13 | logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s",level=logging.INFO) 14 | 15 | 16 | def block_http_server(ip='127.0.0.1',port=8080,msg='mtls-http is working ...'): 17 | """工作在阻塞模式下的 http 服务器 18 | """ 19 | try: 20 | print("{0} | prepare start block http server".format(datetime.now())) 21 | with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as server: 22 | html=""" 23 | 24 | block http server 25 | 26 | 27 |

{0}

28 | 29 | 30 | """.format(msg) 31 | length_html = len(html.encode('utf8')) 32 | head = 'HTTP/1.0 200 OK\r\nDate: Mon, 1 Jan 2049 01:01:01 GMT\r\nContent-Type: text/html; charset=UTF-8\r\nContent-Length: {0}\r\n\r\n'.format(length_html) 33 | response = (head + html).encode('utf8') 34 | server.bind((ip,port)) 35 | print("{0} | server binds on {1}:{2}".format(datetime.now(),ip,port)) 36 | server.listen(5) 37 | while True: 38 | cscok,addr = server.accept() 39 | print("{0} | accept a client from {1}".format(datetime.now(),addr)) 40 | request = cscok.recv(4096) 41 | cscok.send(response) 42 | cscok.close() 43 | print("{0} | response sended.".format(datetime.now())) 44 | except KeyboardInterrupt as err: 45 | sys.exit() 46 | 47 | def main_loop(poll): 48 | """定义主事件循环 49 | """ 50 | while True: 51 | events = poll.poll() 52 | for fileno,event in events: 53 | yield fileno,event 54 | 55 | def aio_http_server(ip='127.0.0.1',port=8080,msg='mtls-http is working ...'): 56 | """基于异步IO的http服务端 57 | """ 58 | logging.info("异步 http 服务器启动监听") 59 | html=f""" 60 | 61 | aio http server 62 | 63 | 64 |

{msg}

65 |

异步 http 服务器启动时间: {datetime.now().isoformat(' ')}

66 | 67 | 68 | """ 69 | logging.info("静态 html 模板生成完成") 70 | length_html = len(html.encode('utf8')) 71 | head = f"HTTP/1.0 200 OK\r\nDate: {datetime.now().isoformat(' ')}\r\nContent-Type: text/html; charset=UTF-8\r\nContent-Length: {length_html}\r\n\r\n" 72 | rspns = (head + html).encode('utf8') 73 | logging.info("TCP 字节流准备完成") 74 | try: 75 | poll = select.poll() 76 | with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as serversock: 77 | serversock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,True) 78 | serversock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEPORT,True) 79 | serversock.bind((ip,port)) 80 | serversock.listen(16) 81 | logging.info(f"异常IO套节字准备完成 http://{ip}:{port}") 82 | 83 | poll.register(serversock.fileno(),select.POLLIN) 84 | logging.info(f"等待客户端的连接请求") 85 | serversock_fileno = serversock.fileno() 86 | connections = {} 87 | requests = {} 88 | responses = {} 89 | addres = {} 90 | for fileno,event in main_loop(poll): 91 | if fileno == serversock_fileno: 92 | clientsock,client_addr = serversock.accept() 93 | logging.info(f"{client_addr[0]}:{client_addr[1]} 发来连接请求") 94 | clientsock.setblocking(False) 95 | poll.register(clientsock.fileno(),select.POLLIN | select.POLLERR | select.POLLHUP | select.POLLNVAL) 96 | connections[clientsock.fileno()] = clientsock 97 | requests[clientsock.fileno()] = b'' 98 | responses[clientsock.fileno()] = rspns 99 | addres[clientsock.fileno()]=client_addr 100 | 101 | elif event & select.POLLHUP: 102 | addr = addres[fileno] 103 | logging.info(f"客户端{addr[0]}:{addr[1]}的连接断开") 104 | del connections[fileno] 105 | del requests[fileno] 106 | del responses[fileno] 107 | del addres[fileno] 108 | poll.unregister(fileno) 109 | elif event & select.POLLIN: 110 | addr = addres[fileno] 111 | logging.info(f"{addr[0]}:{addr[1]} 发来信息") 112 | data = connections[fileno].recv(4096) 113 | requests[fileno] += data 114 | 115 | if b'\r\n\r\n' in requests[fileno]: 116 | logging.info(f"{addr[0]}:{addr[1]} 发来的信息接收完成") 117 | requests[fileno] = b'' 118 | poll.modify(fileno,select.POLLOUT | select.POLLERR | select.POLLHUP | select.POLLNVAL) 119 | elif event & select.POLLOUT: 120 | addr = addres[fileno] 121 | logging.info(f"发送响应报文到 {addr[0]}:{addr[1]}") 122 | send_len = connections[fileno].send(responses[fileno]) 123 | responses[fileno] = responses[fileno][send_len:] 124 | if len(responses[fileno]) == 0: 125 | poll.modify(fileno,select.POLLIN | select.POLLERR | select.POLLHUP | select.POLLNVAL) 126 | responses[fileno] = rspns 127 | elif event & (select.POLLERR | select.POLLHUP | select.POLLNVAL): 128 | logging.info("POLL.OTHER ") 129 | connections[fileno].close() 130 | del connections[fileno] 131 | del requests[fileno] 132 | del responses[fileno] 133 | del addres[fileno] 134 | logging.info(f"当前服务端依然保持着 {len(connections)} 个连接") 135 | except KeyboardInterrupt as err: 136 | logging.error(str(err)) 137 | sys.exit() 138 | finally: 139 | poll.unregister(serversock_fileno) 140 | 141 | servers = { 142 | 'block':block_http_server, 143 | 'aio':aio_http_server 144 | } 145 | 146 | if __name__ == "__main__": 147 | parse = argparse.ArgumentParser(name) 148 | parse.add_argument('--ip',default='127.0.0.1',help='listening ip') 149 | parse.add_argument('--port',default=8080,type=int,help='listening port') 150 | parse.add_argument('--message',default='mtls-http is working ...',help='display message') 151 | parse.add_argument('--server-type',default='aio',help='http server type',choices=('aio','block')) 152 | args = parse.parse_args() 153 | servers[args.server_type](args.ip,args.port,args.message) 154 | 155 | -------------------------------------------------------------------------------- /bin/mtls-kill-all-conections: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import logging 6 | import argparse 7 | import mysql.connector 8 | 9 | """ 10 | kill 所有数据库上的连接 11 | """ 12 | 13 | name = os.path.basename(__file__) 14 | logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s",level=logging.INFO) 15 | 16 | def parser_cmd_args() -> argparse.ArgumentParser: 17 | """ 18 | 处理命令行参数 19 | """ 20 | parser = argparse.ArgumentParser(name) 21 | parser.add_argument('--host',type=str,default='127.0.0.1',help='mysql host') 22 | parser.add_argument('--port',type=int,default=3306,help="mysql port") 23 | parser.add_argument('--user',type=str,default="opsuser",help="mysql user") 24 | parser.add_argument('--password',type=str,default="mtls0352",help="mysql user's password") 25 | parser.add_argument('--target-user',type=str,default=None,help="target user's session") 26 | args = parser.parse_args() 27 | return args 28 | 29 | 30 | def kill_all_connections(args): 31 | cnx = None 32 | try: 33 | cnx = mysql.connector.connect(host=args.host,port=args.port,user=args.user,password=args.password) 34 | cnx.autocommit=True 35 | cursor = cnx.cursor() 36 | # 查询出本身的连接 id 37 | cursor.execute("select connection_id();") 38 | self_connection_id,*_ = cursor.fetchone() 39 | #logging.info(f"self connection equal {self_connection_id}") 40 | 41 | # 查询出当前主机上的所有连接 42 | processlist_ids = [] 43 | if args.target_user == None: 44 | cursor.execute("select id from information_schema.processlist where command not in ('Daemon','Binlog Dump') ;") 45 | else: 46 | cursor.execute("select id from information_schema.processlist where command not in ('Daemon','Binlog Dump') and user=%s;",(args.target_user,)) 47 | # 提取出 processlist id 48 | for ip,*_ in cursor.fetchall(): 49 | if ip != self_connection_id: 50 | processlist_ids.append(ip) 51 | 52 | # kill 掉每一个 processlist id 53 | for processlist in processlist_ids: 54 | try: 55 | # 不管 kill 是否正常返回,都认为是正常的 56 | cursor.execute(f"kill {processlist};") 57 | logging.info(f"kill {processlist};") 58 | except mysql.connector.Error as err: 59 | logging.error(f"kill {processlist} failed '{str(err)}' ") 60 | 61 | except mysql.connector.Error as err: 62 | logging.error(str(err)) 63 | finally: 64 | if cnx != None and hasattr(cnx,'close'): 65 | cnx.close() 66 | 67 | 68 | if __name__ == "__main__": 69 | args = parser_cmd_args() 70 | kill_all_connections(args) 71 | -------------------------------------------------------------------------------- /bin/mtls-log: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #-*- coding: UTF-8 -*- 3 | 4 | import re 5 | import sys 6 | import argparse 7 | from collections import Counter 8 | 9 | def log_slice(args,slow_log_handler): 10 | """对时间在 [--starttime,--endtime] 这段时间内的slow-log直接“切”出来,以方便后面的分析 11 | """ 12 | _finde = False 13 | _charset = args.charset 14 | _starttime = args.starttime.encode(_charset) 15 | _endtime = args.endtime.encode(_charset) 16 | #迭代整个文件查找目标区间中的内容 17 | try: 18 | output_file = open(args.output_file,'bw') 19 | for line in slow_log_handler: 20 | #如果_find被标记为True,说明当前line正位于目标区间 21 | if _finde == True: 22 | #说明当前line位于目标区间,直接打印当前line,并判断是否已经到了退出的位置 23 | # 有可能遇到 latin1 都解码不了的情况 24 | output_file.write(line) 25 | if _endtime in line: 26 | break 27 | continue 28 | #判断是否已经进行目标区间 29 | if _finde == False: 30 | if _starttime in line: 31 | _finde=True 32 | output_file.write(line) 33 | except Exception as err: 34 | print(err) 35 | sys.exit() 36 | finally: 37 | output_file.close() 38 | 39 | 40 | def hot_table(args,slow_log_handler): 41 | """对慢查询中涉及到的表进行统计 42 | """ 43 | _charset = args.charset 44 | c = Counter() 45 | for line in slow_log_handler: 46 | table_name = re.search(r" \S*\.\S* ",line.decode(_charset)) 47 | if table_name != None : 48 | _is_digist = re.search(r"[0-9]{1,10}\.[0-9]{1,10}",table_name.group()) 49 | if _is_digist == None: 50 | c.update({table_name.group():1}) 51 | #格式化输出 52 | print("{0:<32} {1}".format("table_name".upper(),"counter".upper())) 53 | print("-"*48) 54 | for tbl,counter in c.most_common(args.top): 55 | print("{0:<32} {1}".format(tbl,counter)) 56 | 57 | def hot_uid(args,slow_log_handler): 58 | """对慢查询中涉及到的FUId进行统计 59 | """ 60 | c = Counter() 61 | for line in slow_log_handler: 62 | try: 63 | #先去掉所有的单引号,双引号,反斜线 64 | _line = line.replace(b"'",b"").replace(b'"',b'').replace(b"\\",b"") 65 | fuid_list = re.findall(b"FUId = \d*",_line) 66 | if fuid_list: 67 | first_fuid,*_ = fuid_list 68 | *_,fuid = first_fuid.split(b' ') 69 | fuid = fuid.decode('utf8') 70 | c.update({fuid:1}) 71 | else: 72 | continue 73 | except Exception as e: 74 | # 不管理是解码错误还是其它的异常都直接跳过 75 | pass 76 | #格式化输出 77 | print("{0:<32} {1}".format("FUId".upper(),"counter".upper())) 78 | print("-"*48) 79 | for uid,counter in c.most_common(args.top): 80 | print("{0:<32} {1}".format(uid,counter)) 81 | 82 | 83 | def hot_client(args,slow_log_handler): 84 | """对慢查询中涉及到的连接的host信息进行统计 85 | """ 86 | c = Counter() 87 | _charset = args.charset 88 | for line in slow_log_handler: 89 | _line = line.decode(_charset) 90 | if '# User@Host:' in _line : 91 | *_,_host = _line.split() 92 | host = _host[1:-1] 93 | c.update({host:1}) 94 | #格式化输出 95 | print("{0:<32} {1}".format("client_host_ip".upper(),"counter".upper())) 96 | print("-"*48) 97 | for host_ip,counter in c.most_common(args.top): 98 | print("{0:<32} {1}".format(host_ip,counter)) 99 | 100 | 101 | 102 | operations = { 103 | 'log_slice':log_slice, 104 | 'hot_table':hot_table, 105 | 'hot_uid':hot_uid, 106 | 'hot_client':hot_client 107 | } 108 | 109 | 110 | 111 | if __name__=="__main__": 112 | parser=argparse.ArgumentParser() 113 | parser.add_argument('--slow-log-file',help='slow log file absolute path') 114 | parser.add_argument('--output-file',default='/tmp/s.log',help='output file') 115 | parser.add_argument('--starttime',help='slow log start time flag') 116 | parser.add_argument('--endtime',help='slow log end time flag') 117 | parser.add_argument('--charset',default='latin1') 118 | parser.add_argument('--top',default=7,type=int) 119 | 120 | parser.add_argument('operation',choices=operations.keys()) 121 | args=parser.parse_args() 122 | with open(args.slow_log_file,'br') as slow_log_handler: 123 | operations[args.operation](args,slow_log_handler) 124 | 125 | 126 | -------------------------------------------------------------------------------- /bin/mtls-monitor: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #-*- coding: UTF-8 -*- 3 | 4 | from mtls import base,statu,variable,mgr,replication,binlog,innodb_statu 5 | import argparse 6 | 7 | #--------------------------------------- 8 | #monitor.py 用于实现对 mysql 的监控 9 | #--------------------------------------- 10 | 11 | #定义最基本的mysql监控项 12 | basic_items={ 13 | #定义mysql绝大多数variable 主要用于信息收集 对于性能监控的意思不大 但是有助于分析问题 14 | 'ServerID':variable.ServerID, 15 | 'BaseDir':variable.BaseDir, 16 | 'DataDir':variable.DataDir, 17 | 'Port':variable.Port, 18 | 'CharacterSetServer':variable.CharacterSetServer, 19 | 'Socket':variable.Socket, 20 | 'ReadOnly':variable.ReadOnly, 21 | 'SkipNameResolve': variable.SkipNameResolve, 22 | 'LowerCaseTableNames':variable.LowerCaseTableNames, 23 | 'ThreadCacheSize':variable.ThreadCacheSize, 24 | 'TableOpenCache':variable.TableOpenCache, 25 | 'TableDefinitionCache':variable.TableDefinitionCache, 26 | 'TableOpenCacheInstances':variable.TableOpenCacheInstances, 27 | 'MaxConnections':variable.MaxConnections, 28 | 'BinlogFormat':variable.BinlogFormat, 29 | 'LogBin':variable.LogBin, 30 | 'BinlogRowsQueryLogEvents':variable.BinlogRowsQueryLogEvents, 31 | 'LogSlaveUpdates':variable.LogSlaveUpdates, 32 | 'ExpireLogsDays':variable.ExpireLogsDays, 33 | 'BinlogCacheSize':variable.BinlogCacheSize, 34 | 'SyncBinlog':variable.SyncBinlog, 35 | 'ErrorLog':variable.ErrorLog, 36 | 'GtidMode':variable.GtidMode, 37 | 'EnforceGtidConsistency':variable.EnforceGtidConsistency, 38 | 'MasterInfoRepository': variable.MasterInfoRepository, 39 | 'RelayLogInfoRepository':variable.RelayLogInfoRepository, 40 | 'SlaveParallelType':variable.SlaveParallelType, 41 | 'SlaveParallelWorkers':variable.SlaveParallelWorkers, 42 | 'InnodbDataFilePath':variable.InnodbDataFilePath, 43 | 'InnodbTempDataFilePath':variable.InnodbTempDataFilePath, 44 | 'InnodbBufferPoolFilename':variable.InnodbBufferPoolFilename, 45 | 'InnodbLogGroupHomeDir':variable.InnodbLogGroupHomeDir, 46 | 'InnodbLogFilesInGroup':variable.InnodbLogFilesInGroup, 47 | 'InnodbLogFileSize':variable.InnodbLogFileSize, 48 | 'InnodbFileformat':variable.InnodbFileformat, 49 | 'InnodbFilePerTable':variable.InnodbFilePerTable, 50 | 'InnodbOnlineAlterLogMaxSize':variable.InnodbOnlineAlterLogMaxSize, 51 | 'InnodbOpenFiles':variable.InnodbOpenFiles, 52 | 'InnodbPageSize':variable.InnodbPageSize, 53 | 'InnodbThreadConcurrency':variable.InnodbThreadConcurrency, 54 | 'InnodbReadIoThreads':variable.InnodbReadIoThreads, 55 | 'InnodbWriteIoThreads':variable.InnodbWriteIoThreads, 56 | 'InnodbPurgeThreads':variable.InnodbPurgeThreads, 57 | 'InnodbLockWaitTimeout':variable.InnodbLockWaitTimeout, 58 | 'InnodbSpinWaitDelay':variable.InnodbSpinWaitDelay, 59 | 'InnodbAutoincLockMode':variable.InnodbAutoincLockMode, 60 | 'InnodbStatsAutoRecalc':variable.InnodbStatsAutoRecalc, 61 | 'InnodbStatsPersistent':variable.InnodbStatsPersistent, 62 | 'InnodbStatsPersistentSamplePages':variable.InnodbStatsPersistentSamplePages, 63 | 'InnodbBufferPoolInstances':variable.InnodbBufferPoolInstances, 64 | 'InnodbAdaptiveHashIndex': variable.InnodbAdaptiveHashIndex, 65 | 'InnodbChangeBuffering':variable.InnodbChangeBuffering, 66 | 'InnodbChangeBufferMaxSize':variable.InnodbChangeBufferMaxSize, 67 | 'InnodbFlushNeighbors':variable.InnodbFlushNeighbors, 68 | 'InnodbFlushMethod':variable.InnodbFlushMethod, 69 | 'InnodbDoublewrite':variable.InnodbDoublewrite, 70 | 'InnodbLogBufferSize':variable.InnodbLogBufferSize, 71 | 'InnodbFlushLogAtTimeout':variable.InnodbFlushLogAtTimeout, 72 | 'InnodbFlushLogAtTrxCommit':variable.InnodbFlushLogAtTrxCommit, 73 | 'InnodbBufferPoolSize':variable.InnodbBufferPoolSize, 74 | 'Autocommit':variable.Autocommit, 75 | 'InnodbOldBlocksPct':variable.InnodbOldBlocksPct, 76 | 'InnodbOldBlocksTime':variable.InnodbOldBlocksTime, 77 | 'InnodbReadAheadThreshold':variable.InnodbReadAheadThreshold, 78 | 'InnodbRandomReadAhead':variable.InnodbRandomReadAhead, 79 | 'InnodbBufferPoolDumpPct':variable.InnodbBufferPoolDumpPct, 80 | 'InnodbBufferPoolDumpAtShutdown':variable.InnodbBufferPoolDumpAtShutdown, 81 | 'InnodbBufferPoolLoadAtStartup':variable.InnodbBufferPoolLoadAtStartup, 82 | 'QueryCacheLimit':variable.QueryCacheLimit, 83 | 'QueryCacheMinResUnit':variable.QueryCacheMinResUnit, 84 | 'QueryCacheSize':variable.QueryCacheSize, 85 | 'QueryCacheType':variable.QueryCacheType, 86 | #Binlog写入性能 87 | 'BinlogFile':binlog.BinlogFile, 88 | 'BinlogPosition':binlog.BinlogPosition, 89 | 'BinlogDoDB':binlog.BinlogDoDB, 90 | 'BinlogIgnoreDB':binlog.BinlogIgnoreDB, 91 | #Innodb LSN、CheckPoint、LogFlushUpTo ... 92 | 'LogSequenceNumber':innodb_statu.LogSequenceNumber, 93 | 'LogFlushedUpTo':innodb_statu.LogFlushedUpTo, 94 | 'PagesFlushedUpTo':innodb_statu.PagesFlushedUpTo, 95 | 'LastCheckpointAt':innodb_statu.LastCheckpointAt, 96 | 97 | #定义mysql绝大多数status 主要用于性能监控 98 | 'AbortedClients':statu.AbortedClients, 99 | 'AbortedConnects':statu.AbortedConnects, 100 | 'BinlogCacheDiskUse':statu.BinlogCacheDiskUse, 101 | 'BinlogCacheUse':statu.BinlogCacheUse, 102 | 'BinlogStmtCacheDiskUse':statu.BinlogStmtCacheDiskUse, 103 | 'BinlogStmtCacheUse':statu.BinlogStmtCacheUse, 104 | 'BytesReceived':statu.BytesReceived, 105 | 'BytesSent':statu.BytesSent, 106 | 'ComBegin':statu.ComBegin, 107 | 'ComCallProcedure':statu.ComCallProcedure, 108 | 'ComChangeMaster':statu.ComChangeMaster, 109 | 'ComCommit':statu.ComCommit, 110 | 'ComDelete':statu.ComDelete, 111 | 'ComDeleteMulti':statu.ComDeleteMulti, 112 | 'ComInsert':statu.ComInsert, 113 | 'ComInsertSelect':statu.ComInsertSelect, 114 | 'ComSelect':statu.ComSelect, 115 | 'SelectFullJoin':statu.SelectFullJoin, 116 | 'SelectFullRangeJoin':statu.SelectFullRangeJoin, 117 | 'SelectRange':statu.SelectRange, 118 | 'SelectRangeCheck':statu.SelectRangeCheck, 119 | 'SelectScan':statu.SelectScan, 120 | 'ComUpdate':statu.ComUpdate, 121 | 'ComUpdateMulti':statu.ComUpdateMulti, 122 | 'Connections':statu.Connections, 123 | #DDL操作 124 | 'CreatedTmpDiskTables':statu.CreatedTmpDiskTables, 125 | 'CreatedTmpFiles':statu.CreatedTmpFiles, 126 | 'CreatedTmpTables':statu.CreatedTmpTables, 127 | 'ComCreateTable':statu.ComCreateTable, 128 | 'ComDropTable':statu.ComDropTable, 129 | 'ComRenameTable':statu.ComRenameTable, 130 | 'ComAlterTable':statu.ComAlterTable, 131 | ## Index 132 | 'ComCreateIndex':statu.ComCreateIndex, 133 | 'ComDropIndex':statu.ComDropIndex, 134 | ## User 135 | 'ComCreateUser':statu.ComCreateUser, 136 | 'ComAlterUser':statu.ComAlterUser, 137 | 'ComDropUser':statu.ComDropUser, 138 | ## Function 139 | 'ComCreateFunction':statu.ComCreateFunction, 140 | 'ComAlterFunction':statu.ComAlterFunction, 141 | 'ComDropFunction':statu.ComDropFunction, 142 | ## DB 143 | 'ComCreateDb':statu.ComCreateDb, 144 | 'ComAlterDb':statu.ComAlterDb, 145 | 'ComDropDb':statu.ComDropDb, 146 | ## Procedure 147 | 'ComCreateProcedure':statu.ComCreateProcedure, 148 | 'ComAlterProcedure':statu.ComAlterProcedure, 149 | 'ComDropProcedure':statu.ComDropProcedure, 150 | ## View 151 | 'ComCreateView':statu.ComCreateView, 152 | 'ComDropView':statu.ComDropView, 153 | ## Role 154 | 'ComCreateRole':statu.ComCreateRole, 155 | 'ComDropRole':statu.ComDropRole, 156 | ## Trigger 157 | 'ComCreateTrigger':statu.ComCreateTrigger, 158 | 'ComDropTrigger':statu.ComDropTrigger, 159 | 160 | 'InnodbBufferPoolDumpStatus':statu.InnodbBufferPoolDumpStatus, 161 | 'InnodbBufferPoolLoadStatus':statu.InnodbBufferPoolLoadStatus, 162 | 'InnodbBufferPoolResizeStatus':statu.InnodbBufferPoolResizeStatus, 163 | 'InnodbBufferPoolBytesData':statu.InnodbBufferPoolBytesData, 164 | 'InnodbBufferPoolPagesData':statu.InnodbBufferPoolPagesData, 165 | 'InnodbBufferPoolPagesDirty':statu.InnodbBufferPoolPagesDirty, 166 | 'InnodbBufferPoolBytesDirty':statu.InnodbBufferPoolBytesDirty, 167 | 'InnodbBufferPoolPagesFlushed':statu.InnodbBufferPoolPagesFlushed, 168 | 'InnodbBufferPoolPagesFree':statu.InnodbBufferPoolPagesFree, 169 | 'InnodbBufferPoolPagesMisc':statu.InnodbBufferPoolPagesMisc, 170 | 'InnodbBufferPoolPagesTotal':statu.InnodbBufferPoolPagesTotal, 171 | 'InnodbBufferPoolReadAhead':statu.InnodbBufferPoolReadAhead, 172 | 'InnodbBufferPoolReadAheadEvicted':statu.InnodbBufferPoolReadAheadEvicted, 173 | 'InnodbBufferPoolReadRequests':statu.InnodbBufferPoolReadRequests, 174 | 'InnodbBufferPoolReads':statu.InnodbBufferPoolReads, 175 | 'InnodbBufferPoolWaitFree':statu.InnodbBufferPoolWaitFree, 176 | 'InnodbBufferPoolWriteRequests':statu.InnodbBufferPoolWriteRequests, 177 | 'InnodbDataFsyncs':statu.InnodbDataFsyncs, 178 | 'InnodbDataPendingFsyncs':statu.InnodbDataPendingFsyncs, 179 | 'InnodbDataPendingReads':statu.InnodbDataPendingReads, 180 | 'InnodbDataPendingWrites':statu.InnodbDataPendingWrites, 181 | 'InnodbDataRead':statu.InnodbDataRead, 182 | 'InnodbDataReads':statu.InnodbDataReads, 183 | 'InnodbDataWrites':statu.InnodbDataWrites, 184 | 'InnodbDataWritten':statu.InnodbDataWritten, 185 | 'InnodbDblwrPagesWritten':statu.InnodbDblwrPagesWritten, 186 | 'InnodbDblwrWrites':statu.InnodbDblwrWrites, 187 | 'InnodbLogWaits':statu.InnodbLogWaits, 188 | 'InnodbLogWriteRequests':statu.InnodbLogWriteRequests, 189 | 'InnodbLogWrites':statu.InnodbLogWrites, 190 | 'InnodbOsLogFsyncs':statu.InnodbOsLogFsyncs, 191 | 'InnodbOsLogPendingFsyncs':statu.InnodbOsLogPendingFsyncs, 192 | 'InnodbOsLogPendingWrites':statu.InnodbOsLogPendingWrites, 193 | 'InnodbOsLogWritten':statu.InnodbOsLogWritten, 194 | 'InnodbPagesCreated':statu.InnodbPagesCreated, 195 | 'InnodbPagesRead':statu.InnodbPagesRead, 196 | 'InnodbPagesWritten':statu.InnodbPagesWritten, 197 | 'InnodbRowLockCurrentWaits':statu.InnodbRowLockCurrentWaits, 198 | 'InnodbRowLockTime':statu.InnodbRowLockTime, 199 | 'InnodbRowLockTimeAvg':statu.InnodbRowLockTimeAvg, 200 | 'InnodbRowLockTimeMax':statu.InnodbRowLockTimeMax, 201 | 'InnodbRowLockWaits':statu.InnodbRowLockWaits, 202 | 'InnodbRowsDeleted':statu.InnodbRowsDeleted, 203 | 'InnodbRowsInserted':statu.InnodbRowsInserted, 204 | 'InnodbRowsRead':statu.InnodbRowsRead, 205 | 'InnodbRowsUpdated':statu.InnodbRowsUpdated, 206 | 'InnodbAvailableUndoLogs':statu.InnodbAvailableUndoLogs, 207 | 'OpenTableDefinitions':statu.OpenTableDefinitions, 208 | 'OpenTables':statu.OpenTables, 209 | 'OpenedTableDefinitions':statu.OpenedTableDefinitions, 210 | 'OpenedTables':statu.OpenedTables, 211 | 'QcacheFreeBlocks':statu.QcacheFreeBlocks, 212 | 'QcacheFreeMemory':statu.QcacheFreeMemory, 213 | 'QcacheHits':statu.QcacheHits, 214 | 'QcacheInserts':statu.QcacheInserts, 215 | 'QcacheLowmemPrunes':statu.QcacheLowmemPrunes, 216 | 'QcacheNotCached':statu.QcacheNotCached, 217 | 'QcacheQueriesInCache':statu.QcacheQueriesInCache, 218 | 'QcacheTotalBlocks':statu.QcacheTotalBlocks, 219 | 'SlowQueries':statu.SlowQueries, 220 | 'TableLocksImmediate':statu.TableLocksImmediate, 221 | 'TableLocksWaited':statu.TableLocksWaited, 222 | 'TableOpenCacheOverflows':statu.TableOpenCacheOverflows, 223 | 'ThreadsCached':statu.ThreadsCached, 224 | 'ThreadsConnected':statu.ThreadsConnected, 225 | 'ThreadsCreated':statu.ThreadsCreated, 226 | 'ThreadsRunning':statu.ThreadsRunning, 227 | 'Uptime':statu.Uptime, 228 | #为了支持zabbix的自动发现 返回数据类似:{"data": [{"{#MYSQLPORT}": 3306}]} 229 | 'MySQLDiscovery':base.mysql_discovery, 230 | 'DiskDiscovery':base.disk_discovery, 231 | 232 | } 233 | 234 | #定义mysql主从复制时用到的监控项 235 | repl_items={} 236 | 237 | #定义mysql-group-replication时用到的监控项 238 | mgr_items={ 239 | 'MgrTotalMemberCount':mgr.MgrTotalMemberCount, 240 | 'MgrOnLineMemberCount':mgr.MgrOnLineMemberCount, 241 | 'MgrMemberState':mgr.MgrMemberState, 242 | 'MgrCountTransactionsInQueue':mgr.MgrCountTransactionsInQueue, 243 | 'MgrCountTransactionsChecked':mgr.MgrCountTransactionsChecked, 244 | 'MgrCountConflictsDetected':mgr.MgrCountConflictsDetected, 245 | 'MgrTransactionsCommittedAllMembers':mgr.MgrTransactionsCommittedAllMembers 246 | } 247 | 248 | replication_items = { 249 | 'RplSemiSyncMasterClients':replication.RplSemiSyncMasterClients, 250 | 'RplSemiSyncMasterStatus':replication.RplSemiSyncMasterStatus, 251 | 'RplSemiSyncMasterNoTx':replication.RplSemiSyncMasterNoTx, 252 | 'RplSemiSyncMasterYesTx':replication.RplSemiSyncMasterYesTx, 253 | 'RplSemiSyncSlaveStatus':replication.RplSemiSyncSlaveStatus, 254 | 'SlaveIORunning':replication.SlaveIORunning, 255 | 'SlaveSQLRunning':replication.SlaveSQLRunning, 256 | 'SecondsBehindMaster':replication.SecondsBehindMaster, 257 | } 258 | 259 | def export_zabbix_agent_config_file(): 260 | """ 261 | monitor.py 主要是用于zabbix监控mysql、所以在这里提供一个自动生成zabbix自定义key值的文件 262 | 方便后面使用 263 | """ 264 | fmt="UserParameter=mysql{0}[*],/usr/local/mtls/monitor.py -u=$1 -p=$2 -s=$3 -P=$4 {0} 2>>/var/log/mtls/monitor.log" 265 | lines=[fmt.format(line) for line in monitor_item_names if line != 'export'] 266 | for line in lines: 267 | print(line) 268 | 269 | monitor_items={} 270 | monitor_items.update(basic_items) 271 | monitor_items.update(mgr_items) 272 | monitor_items.update(replication_items) 273 | monitor_items.update({'export':export_zabbix_agent_config_file}) 274 | 275 | #已经定义好了的监控项名 276 | monitor_item_names=[key for key in monitor_items.keys()] 277 | 278 | 279 | if __name__=="__main__": 280 | parser=argparse.ArgumentParser() 281 | parser.add_argument('-u','--user',default='monitor',help='user name for connect to mysql') 282 | parser.add_argument('-p','--password',default='mtls0352',help='user password for connect to mysql') 283 | parser.add_argument('-s','--host',default='127.0.0.1',help='mysql host ip') 284 | parser.add_argument('-P','--port',default=3306,type=int,help='mysql port') 285 | parser.add_argument('-d','--database',default='information_schema',help='current database default information_schema') 286 | parser.add_argument('monitor_item_name',choices=monitor_item_names) 287 | args=parser.parse_args() 288 | if args.monitor_item_name =='export': 289 | export_zabbix_agent_config_file() 290 | exit() 291 | m=monitor_items[args.monitor_item_name](host=args.host,port=args.port,user=args.user,password=args.password,database=args.database) 292 | print(m.value) 293 | 294 | 295 | 296 | -------------------------------------------------------------------------------- /bin/mtls-multi-session: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #-*- coding: UTF-8 -*- 3 | #author: 蒋乐兴 1721900707@qq.com 4 | 5 | 6 | import os 7 | import time 8 | import logging 9 | import argparse 10 | 11 | from mtls.sessions import create_sessions 12 | 13 | 14 | name = os.path.basename(__file__) 15 | 16 | def parser_cmd_args(): 17 | """ 18 | 处理命令行参数 19 | """ 20 | args = argparse.ArgumentParser(name) 21 | args.add_argument("--host",type=str,default="127.0.0.1",help="MySQL 主机 IP ") 22 | args.add_argument("--port",type=int,default=3306,help="MySQL 端口") 23 | args.add_argument("--user",type=str,default="appuser",help="用户名") 24 | args.add_argument("--password",type=str,default="mtls@0352",help="密码") 25 | args.add_argument("--sql",type=str,default="select 'mysqltools-python' as 'this is ' ;",help="sql statement") 26 | args.add_argument("--sessions",type=int,default=7) 27 | args.add_argument("--duration",type=int,default=60) 28 | return args.parse_args() 29 | 30 | 31 | if __name__ == "__main__": 32 | try: 33 | args = parser_cmd_args() 34 | create_sessions(args.user,args.password,args.host,args.port,args.sql,args.sessions) 35 | time.sleep(args.duration) 36 | print("done!") 37 | except KeyboardInterrupt as err: 38 | print("bye bye .") 39 | except Exception as err: 40 | logging.exception(err) 41 | 42 | -------------------------------------------------------------------------------- /bin/mtls-perf-bench: -------------------------------------------------------------------------------- 1 | #!/usr/bin/evn python3 2 | """ 3 | 创建指定结构的表、并向其插件数据、主要用于分析数据库实例的写性能 4 | """ 5 | import os 6 | import sys 7 | import time 8 | import mysql 9 | import random 10 | import string 11 | import logging 12 | import argparse 13 | import threading 14 | import concurrent.futures 15 | from mysql import connector 16 | from collections import namedtuple 17 | from mysql.connector import errorcode 18 | from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor 19 | 20 | 21 | name = os.path.basename(__file__) 22 | 23 | def check_python_version() -> None: 24 | """ 25 | 检测当前的 python 版本是否被支持,只支持 python-3.0.x 以上的环境 26 | """ 27 | if sys.version_info.major <= 2: 28 | print("only support python-3.x",file=sys.stderr) 29 | sys.exit(1) 30 | 31 | def parse_cmd_arags() -> argparse.ArgumentParser: 32 | """ 33 | 处理命令行参数 34 | """ 35 | def to_bool(value): 36 | return value.upper() in ['YES','TRUE','1','ON'] 37 | 38 | parser = argparse.ArgumentParser(name) 39 | parser.add_argument('--host',type=str,default="127.0.0.1",help="mysql host") 40 | parser.add_argument('--port',type=int,default=3306,help="mysql port") 41 | parser.add_argument('--user',type=str,default='appuser',help="mysql user") 42 | parser.add_argument('--password',type=str,default='apps_352',help="mysql user's passowrd ") 43 | parser.add_argument('--database',type=str,default="tempdb",help="work schema(database)") 44 | parser.add_argument('--table',type=str,default="t",help="work table") 45 | parser.add_argument('--parallel',type=int,default=1,help="parallel workers") 46 | #parser.add_argument('--thread',type=int,default=1,help='parallel threads') 47 | parser.add_argument('--rows',type=int,default=100,help="rows") 48 | parser.add_argument('--log-level',type=str,choices=['info','debug','error'],default="info") 49 | parser.add_argument('--auto-primary-key',type=to_bool,default=True,choices=[False,True],help="whether table has primary key") 50 | parser.add_argument('--ints',type=int,default=0,help="int column counts") 51 | parser.add_argument('--floats',type=int,default=0,help="float column counts") 52 | parser.add_argument('--doubles',type=int,default=0,help='double column counts') 53 | parser.add_argument('--texts',type=int,default=0,help="text column counts") 54 | parser.add_argument('--varchars',type=int,default=0,help="varchar column counts") 55 | parser.add_argument('--varchar-length',type=int,default=128,help="varchar column length default 128") 56 | parser.add_argument('--decimals',type=int,default=0,help="decimal column counts") 57 | parser.add_argument('--decimal-precision',type=int,default=12,help="total digits length") 58 | parser.add_argument('--decimal-scale',type=int,default=2,help="the scale of decimal(the number of digits to the right of the decimal point)") 59 | parser.add_argument('action',type=str,choices=['create','drop','insert']) 60 | arags = parser.parse_args() 61 | return arags 62 | 63 | def config_logger(args:argparse.ArgumentParser) -> None: 64 | """ 65 | 配置日志的输出格式 66 | """ 67 | logger = logging.getLogger(name) 68 | if args.log_level == "debug": 69 | logger.setLevel(logging.DEBUG) 70 | elif args.log_level == "info": 71 | logger.setLevel(logging.INFO) 72 | elif args.log_level == "error": 73 | logger.setLevel(logging.ERROR) 74 | 75 | handler = logging.StreamHandler(sys.stderr) 76 | formater = logging.Formatter("%(asctime)s %(name)s %(process)d %(threadName)s %(levelname)s %(message)s") 77 | handler.setFormatter(formater) 78 | logger.addHandler(handler) 79 | 80 | def create(args:argparse.ArgumentParser): 81 | """ 82 | 根据 args 指定的参数来创建表 83 | """ 84 | logger = logging.getLogger(name) 85 | columns = [] 86 | # 检查是否自动加 primary key . 87 | if args.auto_primary_key == True: 88 | columns.append("id int not null auto_increment primary key") 89 | # 检查 int 字段的数量 90 | if args.ints >= 1: 91 | for i in range(args.ints): 92 | columns.append(f"i{i} int not null") 93 | # 检查 varchar 字段的数量 94 | if args.varchars >= 1: 95 | for c in range(args.varchars): 96 | columns.append(f"c{c} varchar({args.varchar_length}) not null") 97 | # 检查 float 字段的数量 98 | if args.floats >= 1: 99 | for f in range(args.floats): 100 | columns.append(f"f{f} float not null") 101 | # 检查 double 字段的数量 102 | if args.doubles >= 1: 103 | for d in range(args.doubles): 104 | columns.append(f"d{d} double not null") 105 | # 检查 decimal 字段的数量 106 | if args.decimals >= 1: 107 | if args.decimal_precision < args.decimal_scale: 108 | logger.error("decimal-precision argument must big then decimal-scale") 109 | sys.exit(2) 110 | if args.decimal_precision <=0: 111 | logger.error("decimal-precision argument must big then 0") 112 | sys.exit(3) 113 | for d in range(args.decimals): 114 | columns.append(f"dm{d} decimal({args.decimal_precision},{args.decimal_scale}) not null") 115 | # 检查 text 字段的数量 116 | if args.texts >=1: 117 | for t in range(args.texts): 118 | columns.append(f"t{t} text not null") 119 | 120 | # 如果没有指定任何类型的列,那么直接退出 121 | if len(columns) == 1 and args.auto_primary_key==True: 122 | logger.error(f"do not have any columns in table {args.database}.{args.table}") 123 | sys.exit(1) 124 | 125 | # 拼接 SQL 126 | sql = f"create table {args.database}.{args.table} ( {','.join(columns)});" 127 | 128 | # 执行 SQL 语句创建表 129 | logger.info(f"create table sql statement: {sql}") 130 | cnx = None 131 | try: 132 | cnx = connector.connect(host=args.host,port=args.port,user=args.user,password=args.password,database=args.database) 133 | cursor = cnx.cursor() 134 | cursor.execute(sql) 135 | cnx.commit() 136 | except connector.Error as err: 137 | if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: 138 | logger.error(f"host={args.host} port={args.port} user={args.user},passwrod={args.password}") 139 | finally: 140 | if cnx != None and hasattr(cnx,'close'): 141 | cnx.close() 142 | 143 | logger.info(f"complete") 144 | 145 | 146 | def get_int_value(): 147 | """ 148 | 整数生成器 149 | """ 150 | while True: 151 | yield random.randint(1,200000000) 152 | get_int_value = get_int_value() 153 | 154 | 155 | def get_str_value(length=64): 156 | """ 157 | 随机字符串生成器 158 | """ 159 | s = string.ascii_letters + string.digits 160 | ss = [ i for i in s + s] 161 | if len(ss) <= length: 162 | raise RuntimeError(f"varchar length must less then {length}") 163 | while True: 164 | random.shuffle(ss) 165 | yield ''.join(ss[:length]) 166 | get_str_value = get_str_value() 167 | 168 | 169 | def get_float_value(): 170 | """ 171 | 浮点数生成器 172 | """ 173 | while True: 174 | yield '{0:.2f}'.format(random.random() * 10000000000) 175 | get_float_value = get_float_value() 176 | 177 | 178 | # 类型与生成器的对应关系 179 | value_generates = { 180 | 'ints': get_float_value, 181 | 'varchar': get_str_value, 182 | 'float': get_float_value, 183 | 'double': get_float_value, 184 | 'decimal': get_float_value, 185 | } 186 | 187 | def insert_sql(args:argparse.ArgumentParser) -> tuple: 188 | """ 189 | """ 190 | logger = logging.getLogger(name) 191 | sql = None 192 | columns = [] 193 | if args.ints >=1: 194 | for i in range(args.ints): 195 | columns.append( (f'i{i}',get_int_value) ) 196 | 197 | if args.varchars >=1: 198 | for v in range(args.varchars): 199 | columns.append( (f"c{v}",get_str_value) ) 200 | 201 | if args.floats >= 1: 202 | for f in range(args.floats): 203 | columns.append( (f"f{f}",get_float_value) ) 204 | 205 | if args.doubles >= 1: 206 | for d in range(args.doubles): 207 | columns.append( (f"d{d}",get_float_value) ) 208 | 209 | if args.decimals >= 1: 210 | for dm in range(args.decimals): 211 | columns.append( (f"dm{dm}",get_float_value) ) 212 | 213 | if args.texts >= 1: 214 | for t in range(args.texts): 215 | columns.append( (f"t{t}",get_str_value) ) 216 | 217 | if len(columns) == 0: 218 | logger.error(f"columns counts equal 0") 219 | sys.exit(2) 220 | 221 | # 拼接出 SQL 模板 222 | names = [n for n,v in columns] 223 | s = ['%s' for n in names] 224 | sql = f"insert into {args.database}.{args.table} ({','.join(names)}) values({','.join(s)})" 225 | 226 | # 计算出动态的 SQL 参数 227 | sql_args = [v for n,v in columns] 228 | 229 | 230 | return (sql,sql_args) 231 | 232 | def insert(args,rows): 233 | """ 234 | 执行 insert 操作 235 | """ 236 | logger = logging.getLogger(name) 237 | sql,value_generates = insert_sql(args) 238 | cnx = None 239 | logger.info(f"sql statement: {sql}") 240 | #print(sql) 241 | try: 242 | cnx = connector.connect(host=args.host,port=args.port,user=args.user,password=args.password,database=args.database) 243 | cursor = cnx.cursor() 244 | for r in range(rows): 245 | values = [next(v) for v in value_generates] 246 | #logger.debug(values) 247 | cursor.execute(sql,values) 248 | cnx.commit() 249 | #logger.debug(f"{sql},{values}") 250 | except connector.Error as err: 251 | logger.info(str(err)) 252 | exit(2) 253 | finally: 254 | if cnx and hasattr(cnx,'close'): 255 | cnx.close() 256 | 257 | def drop(args): 258 | """ 259 | 删除表 260 | """ 261 | logger = logging.getLogger(name) 262 | cnx = None 263 | try: 264 | cnx = connector.connect(host=args.host,port=args.port,user=args.user,password=args.password,database=args.database) 265 | cursor = cnx.cursor() 266 | cursor.execute(f"drop table {args.database}.{args.table};") 267 | cnx.commit() 268 | except connector.Error as err: 269 | logger.error(str(err)) 270 | sys.exit(3) 271 | finally: 272 | if cnx and hasattr(cnx,'close'): 273 | cnx.close() 274 | 275 | 276 | 277 | 278 | def main(): 279 | check_python_version() 280 | args = parse_cmd_arags() 281 | config_logger(args) 282 | logger = logging.getLogger(name) 283 | 284 | if args.action == 'create': 285 | create(args) 286 | elif args.action == 'insert': 287 | start = time.time() 288 | logger.info(f"start time = {start}") 289 | logger.info("****") 290 | logger.info("****") 291 | 292 | # 多进程压力测试 293 | if args.parallel > 1: 294 | # 计算出每一个进程要执行的插入行数 295 | batch_size = int(args.rows / args.parallel) 296 | # 创建进程池 297 | with ProcessPoolExecutor(max_workers=args.parallel) as e: 298 | futures = [e.submit(insert,args,batch_size) for i in range(args.parallel)] 299 | for future in concurrent.futures.as_completed(futures): 300 | # 取得“期物的值”以此来触发执行 301 | _ = future.result() 302 | else: 303 | # 单进程压力测试 304 | insert(args,args.rows) 305 | # 完成后统计结果 306 | stop = time.time() 307 | duration = "{0:.2f}".format((stop - start)) 308 | tps = "{0:.2f}".format(args.rows/(stop - start)) 309 | logger.info("****") 310 | logger.info("****") 311 | logger.info(f"stop time = {stop}") 312 | logger.info(f"TPS:{tps} duration {duration}(s)") 313 | 314 | elif args.action == 'drop': 315 | drop(args) 316 | 317 | 318 | if __name__ == "__main__": 319 | main() 320 | 321 | -------------------------------------------------------------------------------- /bin/mtls-ps-mem: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #-*- coding: UTF-8 -*- 3 | 4 | """ 5 | 每秒打印一下进程的内存使用情况 6 | """ 7 | 8 | import os 9 | import datetime 10 | import argparse 11 | from mtls.ps.mem import mem_info_generator 12 | from mtls.ps.mem import global_connection_memory_generator 13 | from mtls.kits import fileformat 14 | 15 | 16 | name = os.path.basename(__file__) 17 | 18 | def parser_cmd_args(): 19 | """ 20 | 处理命令行参数 21 | """ 22 | args = argparse.ArgumentParser(name) 23 | args.add_argument('--user',type=str,default='') 24 | args.add_argument('--password',type=str,default='') 25 | args.add_argument('--port',type=int,default=3306) 26 | args.add_argument("pid",type=int) 27 | return args.parse_args() 28 | 29 | 30 | def main(): 31 | args = parser_cmd_args() 32 | pid = args.pid 33 | if args.user != '': 34 | for mem_info in mem_info_generator(pid): 35 | print(f"{datetime.datetime.now().isoformat(' ','seconds')} pid={pid} vms={fileformat(mem_info.vms)} rss={fileformat(mem_info.rss)} dirty={fileformat(mem_info.dirty)}") 36 | else: 37 | g = global_connection_memory_generator() 38 | next(g) 39 | for mem_info in mem_info_generator(pid): 40 | global_conn_mem_size = next(g) 41 | print(f"{datetime.datetime.now().isoformat(' ','seconds')} pid={pid} vms={fileformat(mem_info.vms)} rss={fileformat(mem_info.rss)} global_conn_mem={fileformat(global_conn_mem_size)}") 42 | 43 | 44 | if __name__ == "__main__": 45 | main() -------------------------------------------------------------------------------- /bin/mtls-random-passwd: -------------------------------------------------------------------------------- 1 | #!/usr/bin/evn python3 2 | """随机生成高强度的密码串 3 | """ 4 | 5 | import os 6 | import string 7 | import random 8 | import argparse 9 | 10 | 11 | def main(): 12 | """ 13 | """ 14 | # 处理参数 15 | name, *_ = os.path.basename(__file__).split('.') 16 | parser = argparse.ArgumentParser(name) 17 | parser.add_argument('--batch', '-b', type=int, default=1, 18 | help='一次随机产生多少密码(默认一个密码)') 19 | parser.add_argument('length', type=int, default=8, 20 | help='密码的长度') 21 | args = parser.parse_args() 22 | 23 | batch = args.batch if args.batch >= 1 else 1 24 | length = args.length if args.length >= 8 and args.length <= 32 else 8 25 | 26 | # 组合母串 27 | s = string.digits * 4 + string.ascii_letters + '+-*![{}]|_%$^&()~' * 2 28 | 29 | # 重生随机密码串 30 | for i in range(batch): 31 | print(''.join(random.sample(s, length))) 32 | 33 | 34 | if __name__ == "__main__": 35 | main() 36 | -------------------------------------------------------------------------------- /bin/mtls-rows-diff: -------------------------------------------------------------------------------- 1 | #!/usr/bin/evn python3 2 | # encoding:utf8 3 | 4 | """ 5 | 比较两个文件中不同的行 6 | """ 7 | 8 | import os 9 | import sys 10 | import argparse 11 | 12 | 13 | def parser_args(): 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument('-s', '--source', type=str, 16 | default='/tmp/source.txt', help='源文件路径') 17 | parser.add_argument('-t', '--target', type=str, 18 | default='/tmp/target.txt', help='目标文件路径') 19 | return parser.parse_args() 20 | 21 | 22 | def diff(source_file: str, target_file: str) -> set: 23 | """ 24 | 计算 source_file 中有,但是 target_file 中没有的行 25 | 26 | Parmter 27 | ------ 28 | source_file: str 源文件全路径 29 | target_file: str 目标文件全路径 30 | """ 31 | if not os.path.isfile(source_file): 32 | print(f"{source_file} is not a file or not exists.") 33 | sys.exit(0) 34 | 35 | if not os.path.isfile(target_file): 36 | print(f"{target_file} is not a file or not exists.") 37 | sys.exit(0) 38 | 39 | with open(source_file, 'r') as source: 40 | source_rows = set() 41 | for line in source: 42 | source_rows.update(set(line.strip())) 43 | 44 | with open(target_file) as target: 45 | target_rows = set() 46 | for line in target: 47 | target_rows.update(set(line.strip())) 48 | 49 | d = source_rows - target_rows 50 | 51 | for line in d: 52 | print(d) 53 | 54 | 55 | def main(): 56 | args = parser_args() 57 | diff(args.source, args.target) 58 | 59 | 60 | if __name__ == "__main__": 61 | main() 62 | -------------------------------------------------------------------------------- /bin/mtls-sql-distribution: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import re 4 | import os 5 | import sys 6 | import time 7 | import argparse 8 | import logging 9 | import mysql.connector 10 | from collections import Counter 11 | 12 | 13 | """ 14 | 读取慢查询文件分析它里面的 SQL 分布情况 15 | 16 | 17 | """ 18 | 19 | 20 | name = os.path.basename(__file__) 21 | logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s",level=logging.INFO) 22 | 23 | def parser_cmd_args() -> argparse.ArgumentParser: 24 | """ 25 | 处理命令行参数 26 | """ 27 | parser = argparse.ArgumentParser(name) 28 | parser.add_argument('--limit',type=int,default=7) 29 | parser.add_argument('sqlfile',type=str,default='slow.log',help='slow query log file') 30 | args = parser.parse_args() 31 | #print(args) 32 | return args 33 | 34 | class BaseAnalyze(object): 35 | """ 36 | 所有分析实现的基类 37 | """ 38 | def analyze(self,line:str)->None: 39 | """ 40 | 读入一行数据统计信息 41 | """ 42 | raise NotImplementedError("请在子类中实现 analyze 方法") 43 | 44 | def __str__(self): 45 | """ 46 | 格式化输出 47 | """ 48 | raise NotImplementedError("请在子类中实现 __str__ 方法") 49 | 50 | class SqlDistributionAnalyze(BaseAnalyze): 51 | def __init__(self): 52 | self._sql_type_counter = Counter({ 53 | 'select':0, 54 | 'insert':0, 55 | 'update':0, 56 | 'delete':0 57 | }) 58 | self.select = re.compile(rb'select ([\w\W]*) from ',re.IGNORECASE) 59 | self.insert = re.compile(rb"insert into ([\w\W]*) values",re.IGNORECASE) 60 | self.delete = re.compile(rb"delete from ([\w\W]*)",re.IGNORECASE) 61 | self.update = re.compile(rb"update ([\w\W]*) set",re.IGNORECASE) 62 | 63 | def analyze(self,line): 64 | """ 65 | 分析输入的 line 是 insert,update,delete,select 的哪一种,并统计它们的次数 66 | """ 67 | 68 | if self.select.match(line): 69 | self._sql_type_counter.update({'select':1}) 70 | return 71 | 72 | if self.insert.match(line): 73 | self._sql_type_counter.update({'insert':1}) 74 | return 75 | 76 | if self.delete.match(line): 77 | self._sql_type_counter.update({'delete':1}) 78 | return 79 | 80 | if self.update.match(line): 81 | self._sql_type_counter.update({'update':1}) 82 | return 83 | 84 | def __str__(self): 85 | """ 86 | 格式化输出 87 | """ 88 | s = "-"*48+'\n' 89 | s = s + "{0:<47}\n".format("SQL出现频率如下:") 90 | s = s + "-"*48+'\n' 91 | for k in self._sql_type_counter: 92 | t = "{0:<24}|{1:<23}\n".format(k,self._sql_type_counter[k]) 93 | s = s + t 94 | s = s + "-"*48+'\n\n' 95 | return s 96 | 97 | class TableDistributionAnalyze(BaseAnalyze): 98 | """ 99 | 统计表的分布情况 100 | """ 101 | def __init__(self,limit=7): 102 | self._table_counter = Counter() 103 | self.limit = limit 104 | self.select = re.compile(rb"select ([\w\W]*) from ([.\w]*)",re.IGNORECASE) 105 | self.insert = re.compile(rb"insert([\s]*)into([\s]*)([.\w]*)",re.IGNORECASE) 106 | self.update = re.compile(rb"update([\s]*)([.\w]*)([\s]*)set",re.IGNORECASE) 107 | self.delete = re.compile(rb"delete([\s]*)from([\s]*)([.\w]*)",re.IGNORECASE) 108 | 109 | 110 | def analyze(self,line): 111 | """ 112 | 分析 SQL 语句所操作的表 113 | """ 114 | p = self.select.match(line) 115 | if p and p.group(2) != b'': 116 | self._table_counter.update({ 117 | p.group(2):1 118 | }) 119 | return 120 | 121 | p = self.insert.match(line) 122 | if p and p.group(3) != b'': 123 | self._table_counter.update({ 124 | p.group(3):1 125 | }) 126 | return 127 | 128 | p = self.update.match(line) 129 | if p and p.group(2) != b'': 130 | self._table_counter.update({ 131 | p.group(2):1 132 | }) 133 | return 134 | 135 | p = self.delete.match(line) 136 | if p and p.group(3) != b'': 137 | self._table_counter.update({ 138 | p.group(3):1 139 | }) 140 | 141 | def __str__(self): 142 | s = "-"*48+'\n' 143 | s = s + "{0:<47}\n".format("表名出现频率如下:") 144 | s = s + "-"*48+'\n' 145 | common = self._table_counter.most_common(self.limit) 146 | for k,v in common: 147 | t = "{0:<40}|{1:<7}\n".format(k.decode('utf8'),v) 148 | s = s + t 149 | s = s + "-"*48+'\n' 150 | return s 151 | 152 | #class RowsAnalyze(BaseAnalyze): 153 | ##Query_time: 0.101302 Lock_time: 0.000084 Rows_sent: 37 Rows_examined: 269513 154 | # pass 155 | 156 | class main(): 157 | starting = time.time() 158 | # 处理命令行参数 159 | args = parser_cmd_args() 160 | 161 | # 检查文件是否存在 162 | if not os.path.isfile(args.sqlfile): 163 | logging.error(f"file {args.sqlfile} not exists.") 164 | sys.exit(1) 165 | 166 | # 全部以二进制的形式打开、避免出现解码问题 167 | distributions = [SqlDistributionAnalyze(),TableDistributionAnalyze(args.limit)] 168 | with open(args.sqlfile,'rb') as sql_file_obj: 169 | for line in sql_file_obj: 170 | if line.startswith(b'#') or line.startswith(b'SET'): 171 | continue 172 | for dis in distributions: 173 | dis.analyze(line) 174 | 175 | for dis in distributions: 176 | print(dis) 177 | 178 | ending = time.time() 179 | print(f"日志分析用时 {(ending - starting):.2f} s") 180 | 181 | 182 | 183 | 184 | if __name__ == "__main__": 185 | main() 186 | 187 | 188 | 189 | 190 | -------------------------------------------------------------------------------- /bin/mtlsanalysis: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #-*- coding: UTF-8 -*- 3 | 4 | import argparse 5 | from collections import Counter 6 | from mysql import connector 7 | 8 | 9 | def all_connections(args): 10 | """返回当前数据库实例中所有的连接 11 | """ 12 | c = Counter() 13 | cnx = None 14 | try: 15 | cnx = connector.connect(host=args.host,port=args.port,user=args.user,password=args.password) 16 | cursor = cnx.cursor() 17 | cursor.execute("select user,host,state from information_schema.processlist;") 18 | for user,host,state in cursor.fetchall(): 19 | if ':' in host: 20 | host,_ = host.split(':') 21 | c.update({host:1}) 22 | except Exception as e: 23 | print(e) 24 | exit() 25 | finally: 26 | if cnx != None: 27 | cnx.close() 28 | 29 | #格式化输出 30 | print("{0:<32} {1}".format("client_host_ip".upper(),"counter".upper())) 31 | print("-"*48) 32 | for host_ip,counter in c.most_common(args.top): 33 | print("{0:<32} {1}".format(host_ip,counter)) 34 | 35 | def active_connections(args): 36 | """返回当前实例中所有的非sleep状态的连接 37 | """ 38 | c = Counter() 39 | cnx = None 40 | try: 41 | cnx = connector.connect(host=args.host,port=args.port,user=args.user,password=args.password) 42 | cursor = cnx.cursor() 43 | cursor.execute("select user,host,state from information_schema.processlist;") 44 | for user,host,state in cursor.fetchall(): 45 | if ':' in host: 46 | host,_ = host.split(':') 47 | if 'sleep' not in state.lower() and state != '': 48 | c.update({host:1}) 49 | except Exception as e: 50 | print(e) 51 | exit() 52 | finally: 53 | if cnx != None: 54 | cnx.close() 55 | 56 | #格式化输出 57 | print("{0:<32} {1}".format("client_host_ip".upper(),"counter".upper())) 58 | print("-"*48) 59 | for host_ip,counter in c.most_common(args.top): 60 | print("{0:<32} {1}".format(host_ip,counter)) 61 | 62 | def all_user(args): 63 | """与all_connection类似,不同的是以user为维度进行聚合 64 | """ 65 | c = Counter() 66 | cnx = None 67 | try: 68 | cnx = connector.connect(host=args.host,port=args.port,user=args.user,password=args.password) 69 | cursor = cnx.cursor() 70 | cursor.execute("select user,host,state from information_schema.processlist;") 71 | for user,host,state in cursor.fetchall(): 72 | c.update({user:1}) 73 | except Exception as e: 74 | print(e) 75 | exit() 76 | finally: 77 | if cnx != None: 78 | cnx.close() 79 | 80 | #格式化输出 81 | print("{0:<32} {1}".format("client_host_ip".upper(),"counter".upper())) 82 | print("-"*48) 83 | for host_ip,counter in c.most_common(args.top): 84 | print("{0:<32} {1}".format(host_ip,counter)) 85 | 86 | def active_user(args): 87 | """以活跃用户为维度为当前连接进行聚合 88 | """ 89 | c = Counter() 90 | cnx = None 91 | try: 92 | cnx = connector.connect(host=args.host,port=args.port,user=args.user,password=args.password) 93 | cursor = cnx.cursor() 94 | cursor.execute("select user,host,state from information_schema.processlist;") 95 | for user,host,state in cursor.fetchall(): 96 | if 'sleep' not in state.lower() and state != '': 97 | c.update({user:1}) 98 | except Exception as e: 99 | print(e) 100 | exit() 101 | finally: 102 | if cnx != None: 103 | cnx.close() 104 | 105 | #格式化输出 106 | print("{0:<32} {1}".format("client_host_ip".upper(),"counter".upper())) 107 | print("-"*48) 108 | for host_ip,counter in c.most_common(args.top): 109 | print("{0:<32} {1}".format(host_ip,counter)) 110 | 111 | operations = { 112 | 'all_conn':all_connections, 113 | 'active_conn':active_connections, 114 | 'all_user':all_user, 115 | 'active_user':active_user, 116 | } 117 | 118 | if __name__ == "__main__": 119 | parser=argparse.ArgumentParser() 120 | parser.add_argument('--host',help='Connect to host',default='127.0.0.1') 121 | parser.add_argument('--port',help='Port number to use for connection',default=3306,type=int) 122 | parser.add_argument('--user',help='User for login if not current user',default='root') 123 | parser.add_argument('--password',default='Password to use when connecting to server') 124 | parser.add_argument('--top',default=7,type=int) 125 | parser.add_argument('operation',choices=operations.keys()) 126 | args=parser.parse_args() 127 | operations[args.operation](args) -------------------------------------------------------------------------------- /bin/mtlsbackup: -------------------------------------------------------------------------------- 1 | mtls-backup -------------------------------------------------------------------------------- /bin/mtlsbigfiles: -------------------------------------------------------------------------------- 1 | mtls-big-files -------------------------------------------------------------------------------- /bin/mtlsdeleterows: -------------------------------------------------------------------------------- 1 | mtls-delete-rows -------------------------------------------------------------------------------- /bin/mtlshttp: -------------------------------------------------------------------------------- 1 | mtls-http -------------------------------------------------------------------------------- /bin/mtlslog: -------------------------------------------------------------------------------- 1 | mtls-log -------------------------------------------------------------------------------- /bin/mtlsmonitor: -------------------------------------------------------------------------------- 1 | mtls-monitor -------------------------------------------------------------------------------- /build/lib/mtls/__init__.py: -------------------------------------------------------------------------------- 1 | name = "mysqltools-python" 2 | -------------------------------------------------------------------------------- /build/lib/mtls/base.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | """ 3 | 定义基类: 4 | ConnectorBase 代表一个到Mysql数据库的连接 5 | VariableBase 代表一个查询global variable的连接 6 | StatuBase 代表一个查询global statu 的连接 7 | """ 8 | 9 | __all__ = ['ConnectorBase','VariableBase','StatuBase','PsBase','ShowSlave'] 10 | 11 | import mysql.connector 12 | import logging 13 | 14 | 15 | class ConnectorBase(object): 16 | """ConnectorBase代表一个与数据库之间的连接 17 | ConnectorBase实例的.cursor属性代表着一个连接到数据库的cursor对象 18 | """ 19 | user='mtsuser' 20 | password='mts10352' 21 | host='127.0.0.1' 22 | port=3306 23 | _cnx=None 24 | _cursor=None 25 | _logger=None 26 | 27 | def __init__(self,host='127.0.0.1',port=3306,user='mtsuser',password='mts10352',database='information_schema',*args,**kws): 28 | self.host=host 29 | self.port=port 30 | self.user=user 31 | self.password=password 32 | self.database=database 33 | self._cnx=None 34 | self._cursor=None 35 | self._logger=None 36 | 37 | 38 | @property 39 | def cursor(self): 40 | """返回cursor对象、cursor对象可以完成对数据库的操作 41 | """ 42 | if self._cursor != None: 43 | return self._cursor 44 | else: 45 | try: 46 | self._cnx=mysql.connector.connect(user=self.user,password=self.password,host=self.host,port=self.port,database=self.database) 47 | self._cursor=self._cnx.cursor() 48 | return self._cursor 49 | except Exception as e: 50 | error_message=str(e) 51 | self.logger.info(e) 52 | self.logger.info("exit") 53 | self.close() 54 | exit() 55 | 56 | #def format_string_value(self,raw_value): 57 | # if isinstance(raw_value,str): 58 | # return raw_value 59 | # else: 60 | # self.logger.info(raw_value) 61 | # return 'invalidate str value' 62 | 63 | #def format_byte_value(self,raw_value): 64 | # if isinstance(raw_value,int): 65 | # kb_raw_value=raw_value/1024 66 | # if kb_raw_value >1024: 67 | # mb_raw_value=kb_raw_value/1024 68 | # if mb_raw_value>1024: 69 | # gb_raw_value=mb_raw_value/1024 70 | # if gb_raw_value >1024: 71 | # return "{0}TB".format(gb_raw_value/1024) 72 | # else: 73 | # return "{0}GB".format(gb_raw_value) 74 | # else: 75 | # return "{0}MB".format(mb_raw_value) 76 | # else: 77 | # return "{0}KB".format(kb_raw_value) 78 | # else: 79 | # return "invalidate byte value" 80 | 81 | #def format_intger_value(self,raw_value): 82 | # return int(raw_value) 83 | 84 | #def format_bool_value(self,raw_value): 85 | # if raw_value in ['off',0]: 86 | # return 'OFF' 87 | # else: 88 | # return 'ON' 89 | 90 | @property 91 | def logger(self): 92 | """返回logger对象方便日志的输出 93 | """ 94 | if self._logger != None: 95 | return self._logger 96 | else: 97 | self._logger=logging.getLogger("mts.base.{0}".format(self.__class__)) 98 | stream_handler=logging.StreamHandler() 99 | formater=logging.Formatter("%(asctime)-24s %(levelname)-8s %(name)-24s : %(message)s") 100 | stream_handler.setFormatter(formater) 101 | self._logger.addHandler(stream_handler) 102 | self._logger.setLevel(logging.DEBUG) 103 | return self._logger 104 | 105 | def __str__(self): 106 | """自定义ConnectorBase对象的字符表示 107 | """ 108 | obj_str="{0.__class__} instance (host={0.host},port={0.port},user={0.user},password={0.password} )".format(self) 109 | return obj_str 110 | 111 | def __del__(self): 112 | """资源回收 113 | """ 114 | #Object 类中没有__del__相关的方法 115 | #super(ConnectorBase,self).__del__() 116 | if self._cnx != None: 117 | self._cnx.close() 118 | 119 | def close(self): 120 | if self._cnx != None: 121 | self._cnx.close() 122 | 123 | 124 | class VariableBase(ConnectorBase): 125 | """定义一个用于查询variable的类,类字符variable_name用于指定variable的名字,variable_type用于指定variable对应值的类型 126 | """ 127 | variable_name=None 128 | variable_type="string" 129 | _variable_types=("string","byte","intger","percent","bool") 130 | _value=None 131 | 132 | def __init__(self,host='127.0.0.1',port=3306,user='mtsuser',password='mts10352',database='information_schema', 133 | variable_name=None,variable_type="string",*args,**kws): 134 | super(VariableBase,self).__init__(host,port,user,password) 135 | if variable_name != None: 136 | self.variable_name=variable_name 137 | self.variable_type=variable_type 138 | 139 | 140 | def _get_value(self): 141 | try: 142 | self.cursor.execute("select @@{0} ;".format(self.variable_name)) 143 | tmp_value=self.cursor.fetchone() 144 | if tmp_value != None and len(tmp_value)==1: 145 | return tmp_value[0] 146 | else: 147 | self.logger.info("variable {0} has a bad value {1}".format(self.variable_name,tmp_value)) 148 | self.close() 149 | exit() 150 | except Exception as e: 151 | errore_message=str(e) 152 | self.logger.info(errore_message) 153 | self.logger.info("exit") 154 | self.close() 155 | exit() 156 | 157 | 158 | @property 159 | def value(self): 160 | #format_mapper={'string':self.format_string_value, 161 | # 'byte' :self.format_byte_value, 162 | # 'intger':self.format_intger_value, 163 | # 'bool' :self.format_bool_value, 164 | #} 165 | if self._value == None: 166 | self._value=self._get_value() 167 | return self._value 168 | #return format_mapper[self.variable_type](self._value) 169 | 170 | #@property 171 | #def original_value(self): 172 | # return self._get_value() 173 | 174 | 175 | class StatuBase(ConnectorBase): 176 | statu_name="uptime" 177 | statu_type="intger" 178 | _statu_types=("string","byte","intger","percent","bool") 179 | _value=None 180 | 181 | def __init__(self,host='127.0.0.1',port=3306,user='mtsuser',password='mts10352', 182 | statu_name=None,statu_type="intger",*args,**kw): 183 | super(StatuBase,self).__init__(host,port,user,password) 184 | if statu_name != None: 185 | self.statu_name=statu_name 186 | self.statu_type=statu_type 187 | self._value=None 188 | 189 | #def format_byte_value(self,raw_value): 190 | # """ 191 | # 由于statu 是由show global status like 'xxx' 得到的,所以它返回的是str,对于字节类型的statu,转换一下才行 192 | # """ 193 | # return super(StatuBase,self).format_byte_value(int(self._value)) 194 | 195 | def _get_value(self): 196 | if self._value != None: 197 | return self._value 198 | else: 199 | try: 200 | self.cursor.execute("show global status like '{0}' ;".format(self.statu_name)) 201 | name_and_value=self.cursor.fetchone() 202 | if name_and_value == None: 203 | self.logger.info("get a None value for statu {0} ".format(self.statu_name)) 204 | self.close() 205 | exit() 206 | name,value=name_and_value 207 | self._value=value 208 | return self._value 209 | except Exception as e: 210 | error_message=str(e) 211 | self.logger.info(error_message) 212 | self.close() 213 | exit() 214 | 215 | @property 216 | def value(self): 217 | if self._value == None: 218 | self._value = self._get_value() 219 | return self._value 220 | #format_mapper={'string':self.format_string_value, 221 | # 'intger':self.format_intger_value, 222 | # 'byte' :self.format_byte_value,} 223 | #return format_mapper[self.statu_type](self._get_value()) 224 | 225 | #@property 226 | #def original_value(self): 227 | # return self._get_value() 228 | 229 | 230 | class PsBase(ConnectorBase): 231 | """ 232 | 所有与performance_schema操作相关的基类 233 | """ 234 | 235 | 236 | class ShowSlave(ConnectorBase): 237 | """通过show slave status 提取信息 238 | """ 239 | #mysql-8.0.11 版本下('Waiting for master to send event', '127.0.0.1', 'repl', 3307, 60, 'mysql-bin.000001', 151, 'sqlstudio-relay-bin.000002', 357, 'mysql-bin.000001', 'Yes', 'Yes', '', '', '', '', '', '', 0, '', 0, 151, 561, 'None', '', 0, 'No', '', '', '', '', '', 0, 'No', 0, '', 0, '', '', 375, '2c9732e2-8740-11e8-9514-000c29cb87a3', 'mysql.slave_master_info', 0, None, 'Slave has read all relay log; waiting for more updates', 86400, '', '', '', '', '', '', '8e64b57f-83eb-11e8-be2f-000c29cb87a3:1', 1, '', '', '', '', 0) 240 | show_slave_name=None 241 | dimensions ={ 242 | 'Slave_IO_State':0, 243 | 'Master_Host':1, 244 | 'Master_User':2, 245 | 'Master_Port':3, 246 | 'Connect_Retry':4, 247 | 'Master_Log_File':5, 248 | 'Read_Master_Log_Pos':6, 249 | 'Relay_Log_File':7, 250 | 'Relay_Log_Pos':8, 251 | 'Relay_Master_Log_File':9, 252 | 'Slave_IO_Running':10, 253 | 'Slave_SQL_Running':11, 254 | 'Replicate_Do_DB':12, 255 | 'Replicate_Ignore_DB':13, 256 | 'Replicate_Do_Table':14, 257 | 'Replicate_Ignore_Table':15, 258 | 'Replicate_Wild_Do_Table':16, 259 | 'Replicate_Wild_Ignore_Table':17, 260 | 'Last_Errno':18, 261 | 'Last_Error':19, 262 | 'Skip_Counter':20, 263 | 'Exec_Master_Log_Pos':21, 264 | 'Relay_Log_Space':22, 265 | 'Until_Condition':23, 266 | 'Until_Log_File':24, 267 | 'Until_Log_Pos':25, 268 | 'Master_SSL_Allowed':26, 269 | 'Master_SSL_CA_File':27, 270 | 'Master_SSL_CA_Path':28, 271 | 'Master_SSL_Cert':29, 272 | 'Master_SSL_Cipher':30, 273 | 'Master_SSL_Key':31, 274 | 'Seconds_Behind_Master':32, 275 | 'Master_SSL_Verify_Server_Cert':33, 276 | 'Last_IO_Errno':34, 277 | 'Last_IO_Error':35, 278 | 'Last_SQL_Errno':36, 279 | 'Last_SQL_Error':37, 280 | 'Replicate_Ignore_Server_Ids':38, 281 | 'Master_Server_Id':39, 282 | 'Master_UUID':40, 283 | 'Master_Info_File':41, 284 | 'SQL_Delay':42, 285 | 'SQL_Remaining_Delay':43, 286 | 'Slave_SQL_Running_State':44, 287 | 'Master_Retry_Count':45, 288 | 'Master_Bind':46, 289 | 'Last_IO_Error_Timestamp':47, 290 | 'Last_SQL_Error_Timestamp':48, 291 | 'Master_SSL_Crl':49, 292 | 'Master_SSL_Crlpath':50, 293 | 'Retrieved_Gtid_Set':51, 294 | 'Executed_Gtid_Set':52, 295 | 'Auto_Position':53, 296 | 'Replicate_Rewrite_DB':54, 297 | 'Channel_Name':55, 298 | 'Master_TLS_Version':56, 299 | 'Master_public_key_path':57, 300 | 'Get_master_public_key':58 301 | } 302 | dimensions_55 = { 303 | 'Slave_IO_State': 0, 304 | 'Master_Host': 1, 305 | 'Master_User': 2, 306 | 'Master_Port': 3, 307 | 'Connect_Retry': 4, 308 | 'Master_Log_File': 5, 309 | 'Read_Master_Log_Pos': 6, 310 | 'Relay_Log_File': 7, 311 | 'Relay_Log_Pos': 8, 312 | 'Relay_Master_Log_File': 9, 313 | 'Slave_IO_Running': 10, 314 | 'Slave_SQL_Running': 11, 315 | 'Replicate_Do_DB': 12, 316 | 'Replicate_Ignore_DB': 13, 317 | 'Replicate_Do_Table': 14, 318 | 'Replicate_Ignore_Table': 15, 319 | 'Replicate_Wild_Do_Table': 16, 320 | 'Replicate_Wild_Ignore_Table': 17, 321 | 'Last_Errno': 18, 322 | 'Last_Error': 19, 323 | 'Skip_Counter': 20, 324 | 'Exec_Master_Log_Pos': 21, 325 | 'Relay_Log_Space': 22, 326 | 'Until_Condition': 23, 327 | 'Until_Log_File': 24, 328 | 'Until_Log_Pos': 25, 329 | 'Master_SSL_Allowed': 26, 330 | 'Master_SSL_CA_File': 27, 331 | 'Master_SSL_CA_Path': 28, 332 | 'Master_SSL_Cert': 29, 333 | 'Master_SSL_Cipher': 30, 334 | 'Master_SSL_Key': 31, 335 | 'Seconds_Behind_Master': 32, 336 | 'Master_SSL_Verify_Server_Cert': 33, 337 | 'Last_IO_Errno': 34, 338 | 'Last_IO_Error': 35, 339 | 'Last_SQL_Errno': 36, 340 | 'Last_SQL_Error': 37, 341 | 'Replicate_Ignore_Server_Ids': 38, 342 | 'Master_Server_Id': 39, 343 | 'Cur_Exec_Relay_Log_File': 40, 344 | 'Cur_Exec_Relay_Log_Pos': 41, 345 | 'Wanted_Purge_Relay_Log': 42, 346 | 'Purged_Relay_Log': 43 347 | } 348 | 349 | def __init__(self,host='127.0.0.1',port=3306,user='mtsuser',password='mts10352',*args,**kw): 350 | super().__init__(host,port,user,password) 351 | self._value=None 352 | 353 | def _get_value(self): 354 | if self._value != None: 355 | return self._value 356 | else: 357 | try: 358 | #适配mysql-5.5.x版本的show slave status; 359 | self.cursor.execute("select @@version") 360 | mysql_version,*_ = self.cursor.fetchone() 361 | if '5.5' in mysql_version: 362 | self.dimensions = self.dimensions_55 363 | 364 | self.cursor.execute("show slave status") 365 | data = self.cursor.fetchone() 366 | if data == None: 367 | self._value = "this node is master" 368 | return self._value 369 | index = self.dimensions[self.show_slave_name] 370 | self._value = data[index] 371 | return self._value 372 | except Exception as e: 373 | error_message=str(e) 374 | self.logger.info(error_message) 375 | self.close() 376 | exit() 377 | 378 | @property 379 | def value(self): 380 | if self._value == None: 381 | self._value = self._get_value() 382 | return self._value 383 | 384 | #@property 385 | #def original_value(self): 386 | # return self._get_value() -------------------------------------------------------------------------------- /build/lib/mtls/innodb_statu.py: -------------------------------------------------------------------------------- 1 | from .base import ConnectorBase 2 | 3 | class InnodbStatuBase(ConnectorBase): 4 | 5 | def show_engine_innodb_status(self): 6 | self.cursor.execute("show innodb egine status ;") 7 | -------------------------------------------------------------------------------- /build/lib/mtls/inspection.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | 3 | import logging 4 | 5 | from . import variable 6 | from . import base 7 | from . import statu 8 | 9 | 10 | class Inspection(base.ConnectorBase): 11 | """ 12 | 巡检项的基类 13 | """ 14 | name =None 15 | _statu =None 16 | _variable=None 17 | 18 | @property 19 | def value(self): 20 | self.logger.info("mts.inspection.Inspection.value function is abstract") 21 | self.close() 22 | exit() 23 | 24 | @property 25 | def suggestion(self): 26 | self.logger.info("mts.inspection.Inspection.suggestion function is abstract") 27 | self.close() 28 | exit() 29 | 30 | @property 31 | def logger(self): 32 | if self._logger != None: 33 | return self._logger 34 | else: 35 | self._logger=logging.getLogger("mts.inspection.{0}".format(self.__class__)) 36 | stream_handler=logging.StreamHandler() 37 | formater=logging.Formatter("%(asctime)-24s %(levelname)-8s %(name)-24s : %(message)s") 38 | stream_handler.setFormatter(formater) 39 | self._logger.addHandler(stream_handler) 40 | self._logger.setLevel(logging.DEBUG) 41 | return self._logger -------------------------------------------------------------------------------- /build/lib/mtls/mgr.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | """ 3 | 定义所有与mgr相关的操作 4 | """ 5 | 6 | 7 | from .base import PsBase 8 | 9 | class MgrBase(PsBase): 10 | """ 11 | 所以mysql-group-replication相关查询的基类 12 | """ 13 | def raw_format(self,raw_data): 14 | return raw_data 15 | 16 | def to_string_format(self,raw_data): 17 | return raw_data.decode('utf8') 18 | 19 | @property 20 | def original_value(self): 21 | self.cursor.execute(self.scalar_stmt) 22 | node_count=self.cursor.fetchone() 23 | if len(node_count) == 1: 24 | formats={ 25 | 'raw_format': self.raw_format, 26 | 'to_string_format': self.to_string_format,} 27 | return formats[self.format_type](node_count[0]) 28 | else: 29 | self.logger.error("get unexpected value ' {0} ' for MgrNodeCount".format(node_count)) 30 | exit() 31 | 32 | @property 33 | def value(self): 34 | return self.original_value 35 | 36 | scalar_stmt="select 'this is test info for MgrBase class' as msg ;" 37 | format_type="raw_format" 38 | 39 | class MgrTotalMemberCount(MgrBase): 40 | """ 41 | mysql-group-replication的结点数量 42 | """ 43 | scalar_stmt="select count(*) from performance_schema.replication_group_members ;" 44 | 45 | class MgrOnLineMemberCount(MgrBase): 46 | """ 47 | 当前mysql-group-replication 集群中状态为online的结点数量 48 | """ 49 | scalar_stmt="select count(*) from performance_schema.replication_group_members where member_state='ONLINE' ;" 50 | 51 | class MgrMemberState(MgrBase): 52 | """ 53 | 查看当前结点的member_state 状态 54 | """ 55 | scalar_stmt="""select member_state 56 | from performance_schema.replication_group_members 57 | where member_id=@@server_uuid;""" 58 | 59 | format_type="to_string_format" 60 | 61 | class MgrCountTransactionsInQueue(MgrBase): 62 | """ 63 | 等待进行冲突检查的事务数量 64 | """ 65 | scalar_stmt="""select count_transactions_in_queue 66 | from performance_schema.replication_group_member_stats 67 | where member_id=@@server_uuid;""" 68 | 69 | class MgrCountTransactionsChecked(MgrBase): 70 | """ 71 | 已经完成冲突检测的事务数量 72 | """ 73 | scalar_stmt="""select count_transactions_checked 74 | from performance_schema.replication_group_member_stats 75 | where member_id=@@server_uuid;""" 76 | 77 | class MgrCountConflictsDetected(MgrBase): 78 | """ 79 | 没能通过冲突检测的事务数量 80 | """ 81 | scalar_stmt=""" 82 | select count_conflicts_detected 83 | from performance_schema.replication_group_member_stats 84 | where member_id=@@server_uuid; 85 | """ 86 | 87 | class MgrTransactionsCommittedAllMembers(MgrBase): 88 | scalar_stmt=""" select transactions_committed_all_members 89 | from performance_schema.replication_group_member_stats 90 | where member_id=@@server_uuid; 91 | """ -------------------------------------------------------------------------------- /build/lib/mtls/replication.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf8 -*- 2 | 3 | """ 4 | 为show global status 中的statu提供一个与之对应的类,以方便使用 5 | """ 6 | from .base import StatuBase,ShowSlave 7 | 8 | __all__=['RplSemiSyncMasterClients','RplSemiSyncMasterStatus','RplSemiSyncMasterNoTx', 9 | 'RplSemiSyncMasterYesTx','RplSemiSyncSlaveStatus','SlaveIORunning','SlaveSQLRunning', 10 | 'SecondsBehindMaster'] 11 | 12 | 13 | class RplSemiSyncMasterClients(StatuBase): 14 | """ 15 | The number of semisynchronous slaves 16 | """ 17 | statu_name="Rpl_semi_sync_master_clients" 18 | 19 | class RplSemiSyncMasterStatus(StatuBase): 20 | """ 21 | The value is 1 if the plugin has been enabled It is 0 if the plugin is not enabled 22 | """ 23 | statu_name="Rpl_semi_sync_master_status" 24 | 25 | class RplSemiSyncMasterNoTx(StatuBase): 26 | """ 27 | The number of commits that were not acknowledged successfully by a slave 28 | """ 29 | statu_name="Rpl_semi_sync_master_no_tx" 30 | 31 | class RplSemiSyncMasterYesTx(StatuBase): 32 | """ 33 | The number of commits that were acknowledged successfully by a slave 34 | """ 35 | statu_name="Rpl_semi_sync_master_yes_tx" 36 | 37 | class RplSemiSyncSlaveStatus(StatuBase): 38 | """ 39 | Whether semisynchronous replication currently is operational on the slave. This is 1 if the plugin has been enabled and the slave I/O thread is running, 0 otherwise. 40 | """ 41 | statu_name="Rpl_semi_sync_slave_status" 42 | 43 | class SlaveIORunning(ShowSlave): 44 | """ 45 | 1 --> yes 46 | 0 --> 其它可能的情况 47 | """ 48 | show_slave_name="Slave_IO_Running" 49 | 50 | def _get_value(self): 51 | value = super()._get_value() 52 | if value == 'this node is master': 53 | return -2 # 直接返回 -2 54 | if value.upper() == 'YES': 55 | self._value = 1 56 | return self._value 57 | else: 58 | self._value = 0 59 | return self._value 60 | 61 | class SlaveSQLRunning(ShowSlave): 62 | """ 63 | 1 --> yes 64 | 0 --> 其它可能的情况 65 | """ 66 | show_slave_name="Slave_SQL_Running" 67 | 68 | def _get_value(self): 69 | value = super()._get_value() 70 | if value == 'this node is master': 71 | return -2 # 直接返回 -2 72 | if value.upper() == 'YES': 73 | self._value = 1 74 | return self._value 75 | else: 76 | self._value = 0 77 | return self._value 78 | 79 | class SecondsBehindMaster(ShowSlave): 80 | """ 81 | """ 82 | show_slave_name="Seconds_Behind_Master" 83 | 84 | def _get_value(self): 85 | value = super()._get_value() 86 | if value == 'this node is master': 87 | return -2 # 直接返回 -2 88 | else: 89 | try: 90 | self._value = int(value) 91 | except Exception as e: 92 | return -1 93 | else: 94 | return self._value 95 | -------------------------------------------------------------------------------- /build/lib/mtls/variable.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf8 -*- 2 | """ 3 | 为常用的MySQL variable 定义与之对应的类,以方便使用。 4 | """ 5 | 6 | from .base import VariableBase 7 | 8 | #---------------------------------------------------- 9 | #全局配置相关的variable 10 | #---------------------------------------------------- 11 | class ServerID(VariableBase): 12 | variable_name="server_id" 13 | variable_type="intger" 14 | 15 | class BaseDir(VariableBase): 16 | variable_name="basedir" 17 | variable_type="string" 18 | 19 | class DataDir(VariableBase): 20 | variable_name="datadir" 21 | variable_type="string" 22 | 23 | class Port(VariableBase): 24 | variable_name="port" 25 | variable_type="intger" 26 | 27 | class CharacterSetServer(VariableBase): 28 | variable_name="character_set_server" 29 | variable_type="string" 30 | 31 | class Socket(VariableBase): 32 | variable_name="socket" 33 | variable_type="string" 34 | 35 | class ReadOnly(VariableBase): 36 | variable_name="read_only" 37 | variable_type="intger" 38 | 39 | class SkipNameResolve(VariableBase): 40 | variable_name="skip_name_resolve" 41 | variable_type="intger" 42 | 43 | class LowerCaseTableNames(VariableBase): 44 | variable_name="lower_case_table_names" 45 | variable_type="intger" 46 | 47 | class ThreadCacheSize(VariableBase): 48 | variable_name="thread_cache_size" 49 | variable_type="intger" 50 | 51 | class TableOpenCache(VariableBase): 52 | variable_name="table_open_cache" 53 | variable_type="intger" 54 | 55 | class TableDefinitionCache(VariableBase): 56 | variable_name="table_definition_cache" 57 | variable_type="intger" 58 | 59 | class TableOpenCacheInstances(VariableBase): 60 | variable_name="table_open_cache_instances" 61 | variable_type="intger" 62 | 63 | class MaxConnections(VariableBase): 64 | variable_name="max_connections" 65 | variable_type="intger" 66 | 67 | 68 | #---------------------------------------------------- 69 | #binlog配置相关的variable 70 | #---------------------------------------------------- 71 | class BinlogFormat(VariableBase): 72 | variable_name="binlog_format" 73 | variable_type="string" 74 | 75 | class LogBin(VariableBase): 76 | variable_name="log_bin" 77 | variable_type="string" 78 | 79 | @property 80 | def value(self): 81 | if self._value == None: 82 | self._value=self._get_value() 83 | if self._value==0: 84 | self._value='OFF' 85 | return self._value 86 | else: 87 | return self._value 88 | 89 | 90 | class BinlogRowsQueryLogEvents(VariableBase): 91 | variable_name="binlog_rows_query_log_events" 92 | variable_type="bool" 93 | 94 | class LogSlaveUpdates(VariableBase): 95 | variable_name="log_slave_updates" 96 | variable_type="bool" 97 | 98 | class ExpireLogsDays(VariableBase): 99 | variable_name="expire_logs_days" 100 | variable_type="intger" 101 | 102 | class BinlogCacheSize(VariableBase): 103 | variable_name="binlog_cache_size" 104 | variable_type="byte" 105 | class SyncBinlog(VariableBase): 106 | variable_name="sync_binlog" 107 | variable_type="intger" 108 | 109 | 110 | #---------------------------------------------------- 111 | #error log 配置相关的variable 112 | #---------------------------------------------------- 113 | class ErrorLog(VariableBase): 114 | variable_name="log_error" 115 | variable_type="string" 116 | #---------------------------------------------------- 117 | #gtid配置相关的variable 118 | #---------------------------------------------------- 119 | class GtidMode(VariableBase): 120 | variable_name="gtid_mode" 121 | variable_type="bool" 122 | class EnforceGtidConsistency(VariableBase): 123 | variable_name="enforce_gtid_consistency" 124 | variable_type="bool" 125 | #---------------------------------------------------- 126 | #replication配置相关的variable 127 | #---------------------------------------------------- 128 | class MasterInfoRepository(VariableBase): 129 | variable_name="master_info_repository" 130 | variable_type="string" 131 | class RelayLogInfoRepository(VariableBase): 132 | variable_name="relay_log_info_repository" 133 | variable_type="string" 134 | class SlaveParallelType(VariableBase): 135 | variable_name="slave_parallel_type" 136 | variable_type="string" 137 | class SlaveParallelWorkers(VariableBase): 138 | variable_name="slave_parallel_workers" 139 | variable_type="intger" 140 | #---------------------------------------------------- 141 | #innodb配置相关的variable 142 | #---------------------------------------------------- 143 | class InnodbDataFilePath(VariableBase): 144 | variable_name="innodb_data_file_path" 145 | class InnodbTempDataFilePath(VariableBase): 146 | variable_name="innodb_temp_data_file_path" 147 | class InnodbBufferPoolFilename(VariableBase): 148 | variable_name="innodb_buffer_pool_filename " 149 | class InnodbLogGroupHomeDir(VariableBase): 150 | variable_name="innodb_log_group_home_dir" 151 | class InnodbLogFilesInGroup(VariableBase): 152 | variable_name="innodb_log_files_in_group" 153 | variable_type="intger" 154 | class InnodbLogFileSize(VariableBase): 155 | variable_name="innodb_log_file_size" 156 | variable_type="byte" 157 | class InnodbFileformat(VariableBase): 158 | variable_name="innodb_file_format" 159 | class InnodbFilePerTable(VariableBase): 160 | variable_name="innodb_file_per_table" 161 | variable_type="bool" 162 | class InnodbOnlineAlterLogMaxSize(VariableBase): 163 | variable_name="innodb_online_alter_log_max_size" 164 | variable_type="byte" 165 | class InnodbOpenFiles(VariableBase): 166 | variable_name="innodb_open_files" 167 | variable_type="intger" 168 | class InnodbPageSize(VariableBase): 169 | variable_name="innodb_page_size" 170 | variable_type="byte" 171 | class InnodbThreadConcurrency(VariableBase): 172 | variable_name="innodb_thread_concurrency" 173 | variable_type="intger" 174 | class InnodbReadIoThreads(VariableBase): 175 | variable_name="innodb_read_io_threads" 176 | variable_type="intger" 177 | class InnodbWriteIoThreads(VariableBase): 178 | variable_name="innodb_write_io_threads" 179 | variable_type="intger" 180 | class InnodbPurgeThreads(VariableBase): 181 | variable_name="innodb_purge_threads" 182 | variable_type="intger" 183 | class InnodbLockWaitTimeout(VariableBase): 184 | variable_name="innodb_lock_wait_timeout" 185 | variable_type="intger" 186 | class InnodbSpinWaitDelay(VariableBase): 187 | variable_name="innodb_spin_wait_delay" 188 | variable_type="intger" 189 | class InnodbAutoincLockMode(VariableBase): 190 | variable_name="innodb_autoinc_lock_mode" 191 | variable_type="intger" 192 | class InnodbStatsAutoRecalc(VariableBase): 193 | variable_name="innodb_stats_auto_recalc" 194 | variable_type="intger" 195 | class InnodbStatsPersistent(VariableBase): 196 | variable_name="innodb_stats_persistent" 197 | variable_type="intger" 198 | class InnodbStatsPersistentSamplePages(VariableBase): 199 | variable_name="innodb_stats_persistent_sample_pages" 200 | variable_type="intger" 201 | class InnodbBufferPoolInstances(VariableBase): 202 | variable_name="innodb_buffer_pool_instances" 203 | variable_type="intger" 204 | class InnodbAdaptiveHashIndex(VariableBase): 205 | variable_name="innodb_adaptive_hash_index" 206 | variable_type="bool" 207 | class InnodbChangeBuffering(VariableBase): 208 | variable_name="innodb_change_buffering" 209 | class InnodbChangeBufferMaxSize(VariableBase): 210 | variable_name="innodb_change_buffer_max_size" 211 | variable_type="intger" 212 | class InnodbFlushNeighbors(VariableBase): 213 | variable_name="innodb_flush_neighbors" 214 | variable_type="bool" 215 | class InnodbFlushMethod(VariableBase): 216 | variable_name="innodb_flush_method" 217 | class InnodbDoublewrite(VariableBase): 218 | variable_name="innodb_doublewrite" 219 | variable_type="bool" 220 | class InnodbLogBufferSize(VariableBase): 221 | variable_name="innodb_log_buffer_size" 222 | variable_type="byte" 223 | class InnodbFlushLogAtTimeout(VariableBase): 224 | variable_name="innodb_flush_log_at_timeout" 225 | variable_type="intger" 226 | class InnodbFlushLogAtTrxCommit(VariableBase): 227 | variable_name="innodb_flush_log_at_trx_commit" 228 | variable_type="intger" 229 | class InnodbBufferPoolSize(VariableBase): 230 | variable_name="innodb_buffer_pool_size" 231 | variable_type="byte" 232 | class Autocommit(VariableBase): 233 | variable_name="autocommit" 234 | variable_type="bool" 235 | def _get_value(self): 236 | """ 237 | 由于mysql-connector会自己把autocommit=true这个设置,所以这里要重写_get_value 238 | 方法,以得到全局的autocommit设置 239 | """ 240 | try: 241 | self.cursor.execute("show global variables like '{0}';".format(self.variable_name)) 242 | tmp_value=self.cursor.fetchone() 243 | if tmp_value != None and len(tmp_value)==2: 244 | return tmp_value[1] 245 | else: 246 | self.logger.info("variable {0} has a bad value {1}".format(self.variable_name,tmp_value)) 247 | self.close() 248 | exit() 249 | except Exception as e: 250 | errore_message=str(e) 251 | self.logger.info(errore_message) 252 | self.logger.info("exit") 253 | self.close() 254 | exit() 255 | class InnodbOldBlocksPct(VariableBase): 256 | variable_name="innodb_old_blocks_pct" 257 | variable_type="intger" 258 | class InnodbOldBlocksTime(VariableBase): 259 | variable_name="innodb_old_blocks_time" 260 | variable_type="intger" 261 | class InnodbReadAheadThreshold(VariableBase): 262 | variable_name="innodb_read_ahead_threshold" 263 | variable_type="intger" 264 | class InnodbRandomReadAhead(VariableBase): 265 | variable_name="innodb_random_read_ahead" 266 | variable_type="bool" 267 | class InnodbBufferPoolDumpPct(VariableBase): 268 | variable_name="innodb_buffer_pool_dump_pct" 269 | variable_type="intger" 270 | class InnodbBufferPoolDumpAtShutdown(VariableBase): 271 | variable_name="innodb_buffer_pool_dump_at_shutdown" 272 | variable_type="bool" 273 | class InnodbBufferPoolLoadAtStartup(VariableBase): 274 | variable_name="innodb_buffer_pool_load_at_startup" 275 | variable_type="bool" 276 | 277 | #---------------------------------------------------- 278 | ## 查询缓存相关 279 | #---------------------------------------------------- 280 | class QueryCacheLimit(VariableBase): 281 | variable_name="query_cache_limit" 282 | variable_type="int" 283 | 284 | class QueryCacheMinResUnit(VariableBase): 285 | variable_name="query_cache_min_res_unit" 286 | variable_type="int" 287 | 288 | class QueryCacheSize(VariableBase): 289 | variable_name="query_cache_size" 290 | variable_type="int" 291 | 292 | class QueryCacheType(VariableBase): 293 | variable_name="query_cache_type" 294 | variable_type="str" 295 | 296 | class Version(VariableBase): 297 | variable_name="version" 298 | variable_type="str" 299 | -------------------------------------------------------------------------------- /build/scripts-3.6/mtlsbackup: -------------------------------------------------------------------------------- 1 | #!/Library/Frameworks/Python.framework/Versions/3.6/bin/python3 2 | #-*- coding: UTF-8 -*- 3 | 4 | """ 5 | 融合mysqldump,xtrabackup,meb 三种备份方法 6 | """ 7 | 8 | __all__ = ['Meb','Xtrabackup','MysqlDump'] 9 | __author__ = '蒋乐哥哥' 10 | __version__ = '0.1' 11 | 12 | 13 | import os,argparse,logging,configparser,argparse,shutil 14 | from datetime import datetime 15 | logging.basicConfig(level = logging.INFO,format='[%(asctime)s] [%(filename)s] [%(levelname)s] %(message)s', ) 16 | 17 | 18 | 19 | class BackupTool(object): 20 | """ 21 | 作为备份工具的基类 22 | """ 23 | user = "backup" 24 | password = "DX3906" 25 | host = "127.0.0.1" 26 | port = 3306 27 | full_backup_days = "7" 28 | diff_backup_days = "1,2,3,4,5,6" 29 | backup_tool = "xtrabackup" 30 | backup_data_dir = "/database/backups/3306/data/" 31 | backup_log_dir = "database/backups/3306/log/" 32 | backup_temp_dir = "/database/backups/3306/temp/" 33 | current = None 34 | backup_types = { 35 | "full_backup":"FULLBACKUP", 36 | "diff_backup":"DIFFBACKUP", 37 | "increment_backup":"INCREMNETBACKUP"} 38 | 39 | def __init__(self,mtlsconf): 40 | """ 41 | mtlsconf 是经过configparser处理过的字典 42 | """ 43 | self.user = mtlsconf['global']['user'] if 'user' in mtlsconf['global'] else self.user 44 | self.password = mtlsconf['global']['password'] if 'password' in mtlsconf['global'] else self.password 45 | self.host = mtlsconf['global']['host'] if 'host' in mtlsconf['global'] else self.host 46 | self.port = int(mtlsconf['global']['port']) if 'port' in mtlsconf['global'] else self.port 47 | self.full_backup_days = mtlsconf['global']['full_backup_days'] if 'full_backup_days' in mtlsconf['global'] else self.full_backup_days 48 | self.diff_backup_days = mtlsconf['global']['diff_backup_days'] if 'diff_backup_days' in mtlsconf['global'] else self.diff_backup_days 49 | self.backup_tool = mtlsconf['global']['backup_tool'] if 'backup_tool' in mtlsconf['global'] else self.backup_tool 50 | self.backup_data_dir = mtlsconf['global']['backup_data_dir'] if 'backup_data_dir' in mtlsconf['global'] else self.backup_data_dir 51 | self.backup_log_dir = mtlsconf['global']['backup_log_dir'] if 'backup_log_dir' in mtlsconf['global'] else self.backup_log_dir 52 | self.backup_temp_dir = mtlsconf['global']['backup_temp_dir'] if 'backup_temp_dir' in mtlsconf['global'] else self.backup_temp_dir 53 | self.current = datetime.now() 54 | #开始检查环境信息 55 | self.preExec() 56 | 57 | def directorCheck(self,path): 58 | """ 59 | 在备份之前对目录进行检查 60 | """ 61 | logging.info("开始检查 {path} ".format(path=path)) 62 | if not os.path.exists(path): 63 | #目录不存在 64 | logging.warn("目录 {path} 不存在,准备创建... ".format(path=path)) 65 | os.makedirs(path) 66 | logging.info("目录 {path} 创建完成 ...".format(path=path)) 67 | 68 | 69 | def preExec(self): 70 | """ 71 | 在备份之前对环境进行检查 72 | """ 73 | #检查backup_data_dir目录是否存在 74 | self.directorCheck(self.backup_data_dir) 75 | #检查backup_log_dir目录是否存在 76 | self.directorCheck(self.backup_log_dir) 77 | #检查backup_temp_dir目录是否存在 78 | self.directorCheck(self.backup_temp_dir) 79 | 80 | def backupDecisionByWeekDay(self): 81 | """ 82 | 根据日期(星期几)、分析应该是要全备还是要差异备份 83 | """ 84 | weekday = str(self.current.weekday()) 85 | if weekday in self.full_backup_days: 86 | logging.info("今天星期 {0} 根据配置文件中的备份计划,决定进行全备".format(weekday)) 87 | return self.backup_types['full_backup'] 88 | elif weekday in self.diff_backup_days: 89 | logging.info("今天星期 {0} 根据配置文件中的备份计划,决定进行差异备份".format(weekday)) 90 | return self.backup_types['diff_backup'] 91 | 92 | @property 93 | def strCurrent(self): 94 | """ 95 | 以字符串的形式返回当前时间值(2018-07-26T16:42:00) 96 | """ 97 | return self.current.isoformat(timespec='seconds') 98 | 99 | @property 100 | def latestBackupSet(self): 101 | """ 102 | 返回最新的备份集 103 | 104 | 当没有任何备份集的时候返回None 105 | """ 106 | #得到所有可用的备份集 107 | backup_sets = [backup_set for backup_set in os.listdir(self.backup_data_dir) if os.path.isdir(os.path.join(self.backup_data_dir,backup_set))] 108 | 109 | #作为能用逻辑这里返回最后一个备份集、为最新的可用备份集 110 | if len(backup_sets) >=1: 111 | return backup_sets[-1] 112 | else: 113 | return None 114 | 115 | def fullbackup(self): 116 | raise NotImplementedError("请在子类中实现全备功能") 117 | 118 | def diffbackup(self): 119 | raise NotImplementedError("请在子类中实现差异备份功能") 120 | 121 | def backup(self): 122 | """ 123 | """ 124 | decision=self.backupDecisionByWeekDay() 125 | 126 | if decision == self.backup_types['full_backup']: 127 | self.fullbackup() 128 | elif decision == self.backup_types['diff_backup']: 129 | self.diffbackup() 130 | else: 131 | self.fullbackup() 132 | 133 | 134 | 135 | class Xtrabackup(BackupTool): 136 | """ 137 | extrabackup包装类 138 | """ 139 | full_backup_script = None 140 | diff_backup_script = None 141 | 142 | def __init__(self,mtlsconf): 143 | super().__init__(mtlsconf) 144 | self.full_backup_script = mtlsconf['xtrabackup']['full_backup_script'] 145 | self.diff_backup_script = mtlsconf['xtrabackup']['diff_backup_script'] 146 | 147 | @property 148 | def isLatestFullBackupSuccess(self): 149 | """ 150 | 用于确定最后一次全备是否成功! 151 | """ 152 | logging.info("准备检查最近一次的全备是否成功...") 153 | #如果备份集为None,也就是说不可能有成功的全备,所以返回False 154 | if super().latestBackupSet == None: 155 | logging.warn("没有可用的备份集(全备))") 156 | return False 157 | else: 158 | #拼接出最新一个可用备份集的目录 159 | logging.info("检查最后一个备份集{0}的可用性".format(self.latestBackupSet)) 160 | xtrabackup_log = os.path.join(self.backup_data_dir,self.latestBackupSet,self.latestBackupSet+'-full.log') 161 | logging.info("检查{0}".format(xtrabackup_log)) 162 | if (not os.path.exists(xtrabackup_log)) or (not os.path.isfile(xtrabackup_log)): 163 | logging.warn("{0} 不存在或它并不是一个文件".format(xtrabackup_log)) 164 | return False 165 | 166 | with open(xtrabackup_log) as xlf: 167 | last = [line for line in xlf ][-1] 168 | if 'completed OK!' in last: 169 | logging.warn("检查到最后一个全备 备份成功") 170 | xtrabackup_checkpoints = os.path.join(self.backup_log_dir,self.latestBackupSet,'xtrabackup_checkpoints') 171 | with open(xtrabackup_checkpoints) as xcf: 172 | line = [line for line in xcf if 'to_lsn' in line][0] 173 | *_,tolsn = line.split(' ') 174 | self.tolsn=tolsn.strip() 175 | logging.info("从xtrabackup_checkpoints文件中读到tolsn={0}".format(self.tolsn)) 176 | return True 177 | else: 178 | logging.warn("检查到最后一个全备 没有备份成功") 179 | return False 180 | 181 | def clearnBackupSets(self): 182 | """ 183 | 清理备份集(一个全备加上若干差异备份) 184 | """ 185 | backup_sets = [backup_set for backup_set in os.listdir(self.backup_data_dir) if os.path.isdir(os.path.join(self.backup_data_dir,backup_set))] 186 | if len(backup_sets) >=2: 187 | logging.info("备份集的数量为{0}大于2 准备清理备份集".format(len(backup_sets))) 188 | backup_sets = sorted(backup_sets) 189 | for backup_set in backup_sets[0:-1]: 190 | temp = os.path.join(self.backup_data_dir,backup_set) 191 | logging.info("清理备份集 {0}".format(temp)) 192 | shutil.rmtree(temp) 193 | 194 | #清理备份集相关的lsn日志文件 195 | lsns = [lsn for lsn in os.listdir(self.backup_log_dir) if os.path.isdir(os.path.join(self.backup_log_dir,lsn)) and lsn<=backup_set] 196 | for lsn in lsns: 197 | temp = os.path.join(self.backup_log_dir,lsn) 198 | logging.info("清理lsn日志 {0}".format(temp)) 199 | shutil.rmtree(temp) 200 | 201 | def fullbackup(self): 202 | """ 203 | 执行全备 204 | """ 205 | self.clearnBackupSets() 206 | 207 | #拼接出保留全备的的路径(/database/backups/3306/data/2018-07-26T16:42:00/) 208 | full_backup_dir = os.path.join(self.backup_data_dir,self.strCurrent) 209 | 210 | full_backup_file = os.path.join(self.backup_data_dir,self.strCurrent,self.strCurrent+'-full.xbstream') 211 | full_backup_log_file = os.path.join(self.backup_data_dir,self.strCurrent,self.strCurrent+'-full.log') 212 | 213 | self.full_backup_file = full_backup_file 214 | self.full_backup_log_file = full_backup_log_file 215 | 216 | #拼接出用于保留lsn的路径 217 | self.lsndir=os.path.join(self.backup_log_dir,self.strCurrent) 218 | os.makedirs(self.lsndir) 219 | 220 | #创建出保留全备的的路径(/database/backups/3306/data/2018-07-26T16:42:00/) 221 | logging.info("创建用于保存全备的目录 {0}".format(full_backup_dir)) 222 | os.makedirs(full_backup_dir) 223 | 224 | #根据实例属性格式化全备命令行 225 | full_backup_script = self.full_backup_script.format(self=self) 226 | 227 | #拼接出完整的全备命令 228 | logging.info("使用如下命令对MySQL数据库进行全备 {full_backup_script}".format(full_backup_script=full_backup_script)) 229 | 230 | #执行全备 231 | os.system( full_backup_script ) 232 | 233 | def diffbackup(self): 234 | """ 235 | 在全备的基础之上执行差异备份 236 | """ 237 | logging.info("进入差异备份流程") 238 | #差异备份是在全备之上建立的、所以在进行差异备份之前应该先检查最后一个全备是否成功 239 | if self.isLatestFullBackupSuccess: 240 | # 如果最后一个全备是成功的、那么进入差异备份流程 241 | 242 | #创建用于保存lsn的目录 243 | self.lsndir=os.path.join(self.backup_log_dir,self.strCurrent) 244 | os.makedirs(self.lsndir) 245 | 246 | #拼接出用于保存差异备份的目录/这个目录就是备份集的目录 247 | diff_backup_dir = os.path.join(self.backup_data_dir,self.latestBackupSet) 248 | self.diff_backup_file = os.path.join(diff_backup_dir,self.strCurrent + '-diff.xbstream') 249 | self.diff_backup_log_file = os.path.join(diff_backup_dir,self.strCurrent + '-diff.log') 250 | 251 | #根据实例属性格式化差异备命令行 252 | diff_backup_script = self.diff_backup_script.format(self=self) 253 | 254 | logging.info("使用如下命令对MySQL数据库进行差异备 {0} ".format(diff_backup_script)) 255 | 256 | os.system(diff_backup_script) 257 | 258 | else: 259 | # 跳转到全备流程 260 | self.fullbackup() 261 | 262 | 263 | class Meb(BackupTool): 264 | pass 265 | 266 | 267 | class MysqlDump(BackupTool): 268 | pass 269 | 270 | 271 | 272 | backup_tools_map = { 273 | 'xtrabackup':Xtrabackup 274 | } 275 | 276 | 277 | def main(mtlsconf): 278 | logging.info("read config file {0}".format(mtlsconf)) 279 | config = configparser.ConfigParser(inline_comment_prefixes=('#',';')) 280 | config.read(mtlsconf) 281 | tool_name = config['global']['backup_tool'] 282 | tool = backup_tools_map[tool_name](config) 283 | tool.backup() 284 | 285 | if __name__=="__main__": 286 | parser=argparse.ArgumentParser() 287 | parser.add_argument('-c','--conf',default='/etc/mtlsbackup.cnf',help='mtlsbackup.py config file') 288 | args=parser.parse_args() 289 | main(args.conf) 290 | -------------------------------------------------------------------------------- /build/scripts-3.6/mtlslog: -------------------------------------------------------------------------------- 1 | #!/Library/Frameworks/Python.framework/Versions/3.6/bin/python3 2 | #-*- coding: UTF-8 -*- 3 | 4 | import re 5 | import argparse 6 | from collections import Counter 7 | 8 | def log_slice(args,slow_log_handler): 9 | """对时间在 [--starttime,--endtime] 这段时间内的slow-log直接“切”出来,以方便后面的分析 10 | """ 11 | _finde = False 12 | _charset = args.charset 13 | _starttime = args.starttime.encode(_charset) 14 | _endtime = args.endtime.encode(_charset) 15 | #迭代整个文件查找目标区间中的内容 16 | for line in slow_log_handler: 17 | #如果_find被标记为True,说明当前line正位于目标区间 18 | if _finde == True: 19 | #说明当前line位于目标区间,直接打印当前line,并判断是否已经到了退出的位置 20 | print(line.decode(_charset),end='') 21 | if _endtime in line: 22 | break 23 | continue 24 | #判断是否已经进行目标区间 25 | if _finde == False: 26 | if _starttime in line: 27 | _finde=True 28 | print(line.decode(_charset),end='') 29 | 30 | def hot_table(args,slow_log_handler): 31 | """对慢查询中涉及到的表进行统计 32 | """ 33 | _charset = args.charset 34 | c = Counter() 35 | for line in slow_log_handler: 36 | table_name = re.search(r" \S*\.\S* ",line.decode(_charset)) 37 | if table_name != None : 38 | _is_digist = re.search(r"[0-9]{1,10}\.[0-9]{1,10}",table_name.group()) 39 | if _is_digist == None: 40 | c.update({table_name.group():1}) 41 | #格式化输出 42 | print("{0:<32} {1}".format("table_name".upper(),"counter".upper())) 43 | print("-"*48) 44 | for tbl,counter in c.most_common(args.top): 45 | print("{0:<32} {1}".format(tbl,counter)) 46 | 47 | def hot_uid(args,slow_log_handler): 48 | """对慢查询中涉及到的FUId进行统计 49 | """ 50 | c = Counter() 51 | _charset = args.charset 52 | for line in slow_log_handler: 53 | #先去掉所有的单引号与双引号 54 | _line = line.decode(_charset).replace(r"'","").replace(r'"','') 55 | try: 56 | all_fuid = re.findall(r"FUId = \d*",_line) 57 | if len(all_fuid) > 0: 58 | fuid = [target for target in all_fuid if len(target) > 7][0] 59 | c.update({fuid:1}) 60 | except Exception as e: 61 | print(e) 62 | exit() 63 | #格式化输出 64 | print("{0:<32} {1}".format("FUId".upper(),"counter".upper())) 65 | print("-"*48) 66 | for uid,counter in c.most_common(args.top): 67 | print("{0:<32} {1}".format(uid,counter)) 68 | 69 | def hot_client(args,slow_log_handler): 70 | """对慢查询中涉及到的连接的host信息进行统计 71 | """ 72 | c = Counter() 73 | _charset = args.charset 74 | for line in slow_log_handler: 75 | _line = line.decode(_charset) 76 | if '# User@Host:' in _line : 77 | *_,_host = _line.split() 78 | host = _host[1:-1] 79 | c.update({host:1}) 80 | #格式化输出 81 | print("{0:<32} {1}".format("client_host_ip".upper(),"counter".upper())) 82 | print("-"*48) 83 | for host_ip,counter in c.most_common(args.top): 84 | print("{0:<32} {1}".format(host_ip,counter)) 85 | 86 | 87 | 88 | operations = { 89 | 'log_slice':log_slice, 90 | 'hot_table':hot_table, 91 | 'hot_uid':hot_uid, 92 | 'hot_client':hot_client 93 | } 94 | 95 | 96 | 97 | if __name__=="__main__": 98 | parser=argparse.ArgumentParser() 99 | parser.add_argument('--slow-log-file',help='slow log file absolute path') 100 | parser.add_argument('--starttime',help='slow log start time flag') 101 | parser.add_argument('--endtime',help='slow log end time flag') 102 | parser.add_argument('--charset',default='latin1') 103 | parser.add_argument('--top',default=7,type=int) 104 | 105 | parser.add_argument('operation',choices=operations.keys()) 106 | args=parser.parse_args() 107 | with open(args.slow_log_file,'br') as slow_log_handler: 108 | operations[args.operation](args,slow_log_handler) 109 | 110 | 111 | -------------------------------------------------------------------------------- /build/scripts-3.6/mtlsmonitor: -------------------------------------------------------------------------------- 1 | #!/Library/Frameworks/Python.framework/Versions/3.6/bin/python3 2 | #-*- coding: UTF-8 -*- 3 | 4 | from mtls import statu,variable,mgr,replication 5 | import argparse 6 | 7 | #--------------------------------------- 8 | #monitor.py 用于实现对 mysql 的监控 9 | #--------------------------------------- 10 | 11 | #定义最基本的mysql监控项 12 | basic_items={ 13 | #定义mysql绝大多数variable 主要用于信息收集 对于性能监控的意思不大 但是有助于分析问题 14 | 'ServerID':variable.ServerID, 15 | 'BaseDir':variable.BaseDir, 16 | 'DataDir':variable.DataDir, 17 | 'Port':variable.Port, 18 | 'CharacterSetServer':variable.CharacterSetServer, 19 | 'Socket':variable.Socket, 20 | 'ReadOnly':variable.ReadOnly, 21 | 'SkipNameResolve': variable.SkipNameResolve, 22 | 'LowerCaseTableNames':variable.LowerCaseTableNames, 23 | 'ThreadCacheSize':variable.ThreadCacheSize, 24 | 'TableOpenCache':variable.TableOpenCache, 25 | 'TableDefinitionCache':variable.TableDefinitionCache, 26 | 'TableOpenCacheInstances':variable.TableOpenCacheInstances, 27 | 'MaxConnections':variable.MaxConnections, 28 | 'BinlogFormat':variable.BinlogFormat, 29 | 'LogBin':variable.LogBin, 30 | 'BinlogRowsQueryLogEvents':variable.BinlogRowsQueryLogEvents, 31 | 'LogSlaveUpdates':variable.LogSlaveUpdates, 32 | 'ExpireLogsDays':variable.ExpireLogsDays, 33 | 'BinlogCacheSize':variable.BinlogCacheSize, 34 | 'SyncBinlog':variable.SyncBinlog, 35 | 'ErrorLog':variable.ErrorLog, 36 | 'GtidMode':variable.GtidMode, 37 | 'EnforceGtidConsistency':variable.EnforceGtidConsistency, 38 | 'MasterInfoRepository': variable.MasterInfoRepository, 39 | 'RelayLogInfoRepository':variable.RelayLogInfoRepository, 40 | 'SlaveParallelType':variable.SlaveParallelType, 41 | 'SlaveParallelWorkers':variable.SlaveParallelWorkers, 42 | 'InnodbDataFilePath':variable.InnodbDataFilePath, 43 | 'InnodbTempDataFilePath':variable.InnodbTempDataFilePath, 44 | 'InnodbBufferPoolFilename':variable.InnodbBufferPoolFilename, 45 | 'InnodbLogGroupHomeDir':variable.InnodbLogGroupHomeDir, 46 | 'InnodbLogFilesInGroup':variable.InnodbLogFilesInGroup, 47 | 'InnodbLogFileSize':variable.InnodbLogFileSize, 48 | 'InnodbFileformat':variable.InnodbFileformat, 49 | 'InnodbFilePerTable':variable.InnodbFilePerTable, 50 | 'InnodbOnlineAlterLogMaxSize':variable.InnodbOnlineAlterLogMaxSize, 51 | 'InnodbOpenFiles':variable.InnodbOpenFiles, 52 | 'InnodbPageSize':variable.InnodbPageSize, 53 | 'InnodbThreadConcurrency':variable.InnodbThreadConcurrency, 54 | 'InnodbReadIoThreads':variable.InnodbReadIoThreads, 55 | 'InnodbWriteIoThreads':variable.InnodbWriteIoThreads, 56 | 'InnodbPurgeThreads':variable.InnodbPurgeThreads, 57 | 'InnodbLockWaitTimeout':variable.InnodbLockWaitTimeout, 58 | 'InnodbSpinWaitDelay':variable.InnodbSpinWaitDelay, 59 | 'InnodbAutoincLockMode':variable.InnodbAutoincLockMode, 60 | 'InnodbStatsAutoRecalc':variable.InnodbStatsAutoRecalc, 61 | 'InnodbStatsPersistent':variable.InnodbStatsPersistent, 62 | 'InnodbStatsPersistentSamplePages':variable.InnodbStatsPersistentSamplePages, 63 | 'InnodbBufferPoolInstances':variable.InnodbBufferPoolInstances, 64 | 'InnodbAdaptiveHashIndex': variable.InnodbAdaptiveHashIndex, 65 | 'InnodbChangeBuffering':variable.InnodbChangeBuffering, 66 | 'InnodbChangeBufferMaxSize':variable.InnodbChangeBufferMaxSize, 67 | 'InnodbFlushNeighbors':variable.InnodbFlushNeighbors, 68 | 'InnodbFlushMethod':variable.InnodbFlushMethod, 69 | 'InnodbDoublewrite':variable.InnodbDoublewrite, 70 | 'InnodbLogBufferSize':variable.InnodbLogBufferSize, 71 | 'InnodbFlushLogAtTimeout':variable.InnodbFlushLogAtTimeout, 72 | 'InnodbFlushLogAtTrxCommit':variable.InnodbFlushLogAtTrxCommit, 73 | 'InnodbBufferPoolSize':variable.InnodbBufferPoolSize, 74 | 'Autocommit':variable.Autocommit, 75 | 'InnodbOldBlocksPct':variable.InnodbOldBlocksPct, 76 | 'InnodbOldBlocksTime':variable.InnodbOldBlocksTime, 77 | 'InnodbReadAheadThreshold':variable.InnodbReadAheadThreshold, 78 | 'InnodbRandomReadAhead':variable.InnodbRandomReadAhead, 79 | 'InnodbBufferPoolDumpPct':variable.InnodbBufferPoolDumpPct, 80 | 'InnodbBufferPoolDumpAtShutdown':variable.InnodbBufferPoolDumpAtShutdown, 81 | 'InnodbBufferPoolLoadAtStartup':variable.InnodbBufferPoolLoadAtStartup, 82 | 'QueryCacheLimit':variable.QueryCacheLimit, 83 | 'QueryCacheMinResUnit':variable.QueryCacheMinResUnit, 84 | 'QueryCacheSize':variable.QueryCacheSize, 85 | 'QueryCacheType':variable.QueryCacheType, 86 | 87 | #定义mysql绝大多数status 主要用于性能监控 88 | 'AbortedClients':statu.AbortedClients, 89 | 'AbortedConnects':statu.AbortedConnects, 90 | 'BinlogCacheDiskUse':statu.BinlogCacheDiskUse, 91 | 'BinlogCacheUse':statu.BinlogCacheUse, 92 | 'BinlogStmtCacheDiskUse':statu.BinlogStmtCacheDiskUse, 93 | 'BinlogStmtCacheUse':statu.BinlogStmtCacheUse, 94 | 'BytesReceived':statu.BytesReceived, 95 | 'BytesSent':statu.BytesSent, 96 | 'ComBegin':statu.ComBegin, 97 | 'ComCallProcedure':statu.ComCallProcedure, 98 | 'ComChangeMaster':statu.ComChangeMaster, 99 | 'ComCommit':statu.ComCommit, 100 | 'ComDelete':statu.ComDelete, 101 | 'ComDeleteMulti':statu.ComDeleteMulti, 102 | 'ComInsert':statu.ComInsert, 103 | 'ComInsertSelect':statu.ComInsertSelect, 104 | 'ComSelect':statu.ComSelect, 105 | 'SelectFullJoin':statu.SelectFullJoin, 106 | 'SelectFullRangeJoin':statu.SelectFullRangeJoin, 107 | 'SelectRange':statu.SelectRange, 108 | 'SelectRangeCheck':statu.SelectRangeCheck, 109 | 'SelectScan':statu.SelectScan, 110 | 'ComUpdate':statu.ComUpdate, 111 | 'ComUpdateMulti':statu.ComUpdateMulti, 112 | 'Connections':statu.Connections, 113 | 'CreatedTmpDiskTables':statu.CreatedTmpDiskTables, 114 | 'CreatedTmpFiles':statu.CreatedTmpFiles, 115 | 'CreatedTmpTables':statu.CreatedTmpTables, 116 | 'InnodbBufferPoolDumpStatus':statu.InnodbBufferPoolDumpStatus, 117 | 'InnodbBufferPoolLoadStatus':statu.InnodbBufferPoolLoadStatus, 118 | 'InnodbBufferPoolResizeStatus':statu.InnodbBufferPoolResizeStatus, 119 | 'InnodbBufferPoolBytesData':statu.InnodbBufferPoolBytesData, 120 | 'InnodbBufferPoolPagesData':statu.InnodbBufferPoolPagesData, 121 | 'InnodbBufferPoolPagesDirty':statu.InnodbBufferPoolPagesDirty, 122 | 'InnodbBufferPoolBytesDirty':statu.InnodbBufferPoolBytesDirty, 123 | 'InnodbBufferPoolPagesFlushed':statu.InnodbBufferPoolPagesFlushed, 124 | 'InnodbBufferPoolPagesFree':statu.InnodbBufferPoolPagesFree, 125 | 'InnodbBufferPoolPagesMisc':statu.InnodbBufferPoolPagesMisc, 126 | 'InnodbBufferPoolPagesTotal':statu.InnodbBufferPoolPagesTotal, 127 | 'InnodbBufferPoolReadAhead':statu.InnodbBufferPoolReadAhead, 128 | 'InnodbBufferPoolReadAheadEvicted':statu.InnodbBufferPoolReadAheadEvicted, 129 | 'InnodbBufferPoolReadRequests':statu.InnodbBufferPoolReadRequests, 130 | 'InnodbBufferPoolReads':statu.InnodbBufferPoolReads, 131 | 'InnodbBufferPoolWaitFree':statu.InnodbBufferPoolWaitFree, 132 | 'InnodbBufferPoolWriteRequests':statu.InnodbBufferPoolWriteRequests, 133 | 'InnodbDataFsyncs':statu.InnodbDataFsyncs, 134 | 'InnodbDataPendingFsyncs':statu.InnodbDataPendingFsyncs, 135 | 'InnodbDataPendingReads':statu.InnodbDataPendingReads, 136 | 'InnodbDataPendingWrites':statu.InnodbDataPendingWrites, 137 | 'InnodbDataRead':statu.InnodbDataRead, 138 | 'InnodbDataReads':statu.InnodbDataReads, 139 | 'InnodbDataWrites':statu.InnodbDataWrites, 140 | 'InnodbDataWritten':statu.InnodbDataWritten, 141 | 'InnodbDblwrPagesWritten':statu.InnodbDblwrPagesWritten, 142 | 'InnodbDblwrWrites':statu.InnodbDblwrWrites, 143 | 'InnodbLogWaits':statu.InnodbLogWaits, 144 | 'InnodbLogWriteRequests':statu.InnodbLogWriteRequests, 145 | 'InnodbLogWrites':statu.InnodbLogWrites, 146 | 'InnodbOsLogFsyncs':statu.InnodbOsLogFsyncs, 147 | 'InnodbOsLogPendingFsyncs':statu.InnodbOsLogPendingFsyncs, 148 | 'InnodbOsLogPendingWrites':statu.InnodbOsLogPendingWrites, 149 | 'InnodbOsLogWritten':statu.InnodbOsLogWritten, 150 | 'InnodbPagesCreated':statu.InnodbPagesCreated, 151 | 'InnodbPagesRead':statu.InnodbPagesRead, 152 | 'InnodbPagesWritten':statu.InnodbPagesWritten, 153 | 'InnodbRowLockCurrentWaits':statu.InnodbRowLockCurrentWaits, 154 | 'InnodbRowLockTime':statu.InnodbRowLockTime, 155 | 'InnodbRowLockTimeAvg':statu.InnodbRowLockTimeAvg, 156 | 'InnodbRowLockTimeMax':statu.InnodbRowLockTimeMax, 157 | 'InnodbRowLockWaits':statu.InnodbRowLockWaits, 158 | 'InnodbRowsDeleted':statu.InnodbRowsDeleted, 159 | 'InnodbRowsInserted':statu.InnodbRowsInserted, 160 | 'InnodbRowsRead':statu.InnodbRowsRead, 161 | 'InnodbRowsUpdated':statu.InnodbRowsUpdated, 162 | 'InnodbAvailableUndoLogs':statu.InnodbAvailableUndoLogs, 163 | 'OpenTableDefinitions':statu.OpenTableDefinitions, 164 | 'OpenTables':statu.OpenTables, 165 | 'OpenedTableDefinitions':statu.OpenedTableDefinitions, 166 | 'OpenedTables':statu.OpenedTables, 167 | 'QcacheFreeBlocks':statu.QcacheFreeBlocks, 168 | 'QcacheFreeMemory':statu.QcacheFreeMemory, 169 | 'QcacheHits':statu.QcacheHits, 170 | 'QcacheInserts':statu.QcacheInserts, 171 | 'QcacheLowmemPrunes':statu.QcacheLowmemPrunes, 172 | 'QcacheNotCached':statu.QcacheNotCached, 173 | 'QcacheQueriesInCache':statu.QcacheQueriesInCache, 174 | 'QcacheTotalBlocks':statu.QcacheTotalBlocks, 175 | 'SlowQueries':statu.SlowQueries, 176 | 'TableLocksImmediate':statu.TableLocksImmediate, 177 | 'TableLocksWaited':statu.TableLocksWaited, 178 | 'TableOpenCacheOverflows':statu.TableOpenCacheOverflows, 179 | 'ThreadsCached':statu.ThreadsCached, 180 | 'ThreadsConnected':statu.ThreadsConnected, 181 | 'ThreadsCreated':statu.ThreadsCreated, 182 | 'ThreadsRunning':statu.ThreadsRunning, 183 | 'Uptime':statu.Uptime 184 | } 185 | 186 | #定义mysql主从复制时用到的监控项 187 | repl_items={} 188 | 189 | #定义mysql-group-replication时用到的监控项 190 | mgr_items={ 191 | 'MgrTotalMemberCount':mgr.MgrTotalMemberCount, 192 | 'MgrOnLineMemberCount':mgr.MgrOnLineMemberCount, 193 | 'MgrMemberState':mgr.MgrMemberState, 194 | 'MgrCountTransactionsInQueue':mgr.MgrCountTransactionsInQueue, 195 | 'MgrCountTransactionsChecked':mgr.MgrCountTransactionsChecked, 196 | 'MgrCountConflictsDetected':mgr.MgrCountConflictsDetected, 197 | 'MgrTransactionsCommittedAllMembers':mgr.MgrTransactionsCommittedAllMembers 198 | } 199 | 200 | replication_items = { 201 | 'RplSemiSyncMasterClients':replication.RplSemiSyncMasterClients, 202 | 'RplSemiSyncMasterStatus':replication.RplSemiSyncMasterStatus, 203 | 'RplSemiSyncMasterNoTx':replication.RplSemiSyncMasterNoTx, 204 | 'RplSemiSyncMasterYesTx':replication.RplSemiSyncMasterYesTx, 205 | 'RplSemiSyncSlaveStatus':replication.RplSemiSyncSlaveStatus, 206 | 'SlaveIORunning':replication.SlaveIORunning, 207 | 'SlaveSQLRunning':replication.SlaveSQLRunning, 208 | 'SecondsBehindMaster':replication.SecondsBehindMaster, 209 | } 210 | 211 | def export_zabbix_agent_config_file(): 212 | """ 213 | monitor.py 主要是用于zabbix监控mysql、所以在这里提供一个自动生成zabbix自定义key值的文件 214 | 方便后面使用 215 | """ 216 | fmt="UserParameter=mysql{0}[*],/usr/local/mtls/monitor.py -u=$1 -p=$2 -s=$3 -P=$4 {0} 2>>/var/log/mtls/monitor.log" 217 | lines=[fmt.format(line) for line in monitor_item_names if line != 'export'] 218 | for line in lines: 219 | print(line) 220 | 221 | monitor_items={} 222 | monitor_items.update(basic_items) 223 | monitor_items.update(mgr_items) 224 | monitor_items.update(replication_items) 225 | monitor_items.update({'export':export_zabbix_agent_config_file}) 226 | 227 | #已经定义好了的监控项名 228 | monitor_item_names=[key for key in monitor_items.keys()] 229 | 230 | 231 | if __name__=="__main__": 232 | parser=argparse.ArgumentParser() 233 | parser.add_argument('-u','--user',default='monitor',help='user name for connect to mysql') 234 | parser.add_argument('-p','--password',default='mtls0352',help='user password for connect to mysql') 235 | parser.add_argument('-s','--host',default='127.0.0.1',help='mysql host ip') 236 | parser.add_argument('-P','--port',default=3306,type=int,help='mysql port') 237 | parser.add_argument('-d','--database',default='information_schema',help='current database default information_schema') 238 | parser.add_argument('monitor_item_name',choices=monitor_item_names) 239 | args=parser.parse_args() 240 | if args.monitor_item_name =='export': 241 | export_zabbix_agent_config_file() 242 | exit() 243 | m=monitor_items[args.monitor_item_name](host=args.host,port=args.port,user=args.user,password=args.password,database=args.database) 244 | print(m.value) 245 | 246 | 247 | -------------------------------------------------------------------------------- /conf/mtlsbackup.cnf: -------------------------------------------------------------------------------- 1 | [global] 2 | backup_tool=xtrabackup #备份工具xtrabackup,mysqldump,meb 之一 3 | user=backup #备份用户(mysql级别) 静态值请不要修改 4 | password=DX3906 #密码 静态值请不要修改 5 | host=127.0.0.1 #主机 静态值请不要修改 6 | port=3306 #端口 静态值请不要修改 7 | full_backup_days=6 #指定哪些天做全备 6-->周日 5-->周六 4-->周五... ... 8 | diff_backup_days=0,1,2,3,4,5 #指定哪些天做差异备 6-->周日 5-->周六 4-->周五... ... 9 | backup_data_dir=/database/backups/3306/data/ #备份保存的路径 10 | backup_log_dir=/database/backups/3306/log/ #使用xtrackup备份时check_point文件的目录 11 | backup_temp_dir=/database/backups/3306/temp/ #xtrabackup的工作目录 12 | 13 | [xtrabackup] 14 | full_backup_script=/usr/local/xtrabackup/bin/xtrabackup --defaults-file=/etc/my.cnf --host={self.host} --port={self.port} --user={self.user} --password={self.password} --no-version-check --compress --compress-threads=4 --use-memory=200M --stream=xbstream --parallel=8 --backup --extra-lsndir={self.lsndir} --target-dir={self.backup_temp_dir} > {self.full_backup_file} 2>{self.full_backup_log_file} & 15 | diff_backup_script=/usr/local/xtrabackup/bin/xtrabackup --defaults-file=/etc/my.cnf --host={self.host} --port={self.port} --user={self.user} --password={self.password} --no-version-check --compress --compress-threads=4 --use-memory=200M --stream=xbstream --parallel=8 --backup --extra-lsndir={self.lsndir} --target-dir={self.backup_temp_dir} --incremental --incremental-lsn={self.tolsn} > {self.diff_backup_file} 2>{self.diff_backup_log_file} & 16 | 17 | -------------------------------------------------------------------------------- /imgs/cpu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/imgs/cpu.png -------------------------------------------------------------------------------- /imgs/ibrw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/imgs/ibrw.png -------------------------------------------------------------------------------- /imgs/mem.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/imgs/mem.png -------------------------------------------------------------------------------- /imgs/mp-wechat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/imgs/mp-wechat.jpg -------------------------------------------------------------------------------- /imgs/mtlshttp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/imgs/mtlshttp.png -------------------------------------------------------------------------------- /imgs/net.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/imgs/net.png -------------------------------------------------------------------------------- /imgs/reads.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/imgs/reads.png -------------------------------------------------------------------------------- /imgs/rs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/imgs/rs.png -------------------------------------------------------------------------------- /imgs/t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/imgs/t.png -------------------------------------------------------------------------------- /imgs/writes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/imgs/writes.png -------------------------------------------------------------------------------- /mtls/__init__.py: -------------------------------------------------------------------------------- 1 | name = "mysqltools-python" 2 | -------------------------------------------------------------------------------- /mtls/base.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | """ 3 | 定义基类: 4 | ConnectorBase 代表一个到Mysql数据库的连接 5 | VariableBase 代表一个查询global variable的连接 6 | StatuBase 代表一个查询global statu 的连接 7 | """ 8 | 9 | __all__ = ['ConnectorBase','VariableBase','StatuBase','PsBase','ShowSlave'] 10 | 11 | import mysql.connector 12 | import logging 13 | import subprocess 14 | import socket 15 | import json 16 | 17 | class Discovery(object): 18 | def __init__(self,value): 19 | self._value = value 20 | 21 | @property 22 | def value(self): 23 | return json.dumps(self._value) 24 | 25 | 26 | def is_mysql_port(port): 27 | """传入一个端口、如果这个端口是MySQL协议用的,那么就返回这个端口,如果不是就返回None 28 | """ 29 | client_socket = None 30 | try: 31 | client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) 32 | client_socket.connect(('127.0.0.1',port)) 33 | client_socket.settimeout(0.1) 34 | #MySQL协议下是由Server端先发送握手信息到client的 35 | message = client_socket.recv(1024) 36 | message = message.decode('latin-1').lower() 37 | if 'password' in message: 38 | return port 39 | except Exception as e: 40 | return None 41 | finally: 42 | client_socket.close() 43 | 44 | def mysql_discovery(*args,**kwargs): 45 | """查找主机上的MySQL服务并返回它们的监听的port {"data": [{"{#MYSQLPORT}": 3306}]} 46 | """ 47 | output = subprocess.check_output(['netstat','-ltn']) 48 | output = output.decode('latin-1').split('\n')[2:-1] 49 | result = {"data":[]} 50 | ports = [] 51 | try: 52 | for line in output: 53 | _,_,_,host_and_port,*_ = line.split() 54 | index = host_and_port.rindex(":") 55 | port = int(host_and_port[index+1:]) 56 | #只有在这个端口是被MySQL占用的情况下才返回 57 | if is_mysql_port(port): 58 | result["data"].append({"{#MYSQLPORT}":port}) 59 | except Exception as e: 60 | print(e) 61 | exit() 62 | return Discovery(value=result) 63 | 64 | def disk_discovery(*args,**kwargs): 65 | result = {"data":[]} 66 | message = subprocess.check_output(['lsblk',]) 67 | message = message.decode('latin-1') 68 | for line in message.split('\n'): 69 | if 'disk' in line: 70 | disk_name,*_ = line.split() 71 | result['data'].append({"{#DISKNAME}":disk_name}) 72 | return Discovery(value=result) 73 | 74 | 75 | class ConnectorBase(object): 76 | """ConnectorBase代表一个与数据库之间的连接 77 | ConnectorBase实例的.cursor属性代表着一个连接到数据库的cursor对象 78 | """ 79 | user='mtsuser' 80 | password='mts10352' 81 | host='127.0.0.1' 82 | port=3306 83 | _cnx=None 84 | _cursor=None 85 | _logger=None 86 | 87 | def __init__(self,host='127.0.0.1',port=3306,user='mtsuser',password='mts10352',database='information_schema',*args,**kws): 88 | self.host=host 89 | self.port=port 90 | self.user=user 91 | self.password=password 92 | self.database=database 93 | self._cnx=None 94 | self._cursor=None 95 | self._logger=None 96 | 97 | 98 | @property 99 | def cursor(self): 100 | """返回cursor对象、cursor对象可以完成对数据库的操作 101 | """ 102 | if self._cursor != None: 103 | return self._cursor 104 | else: 105 | try: 106 | self._cnx=mysql.connector.connect(user=self.user,password=self.password,host=self.host,port=self.port,database=self.database) 107 | self._cursor=self._cnx.cursor() 108 | return self._cursor 109 | except Exception as e: 110 | error_message=str(e) 111 | self.logger.info(error_message) 112 | self.logger.info("exit") 113 | self.close() 114 | exit() 115 | 116 | @property 117 | def logger(self): 118 | """返回logger对象方便日志的输出 119 | """ 120 | if self._logger != None: 121 | return self._logger 122 | else: 123 | self._logger=logging.getLogger("mts.base.{0}".format(self.__class__)) 124 | stream_handler=logging.StreamHandler() 125 | formater=logging.Formatter("%(asctime)-24s %(levelname)-8s %(name)-24s : %(message)s") 126 | stream_handler.setFormatter(formater) 127 | self._logger.addHandler(stream_handler) 128 | self._logger.setLevel(logging.DEBUG) 129 | return self._logger 130 | 131 | def __str__(self): 132 | """自定义ConnectorBase对象的字符表示 133 | """ 134 | obj_str="{0.__class__} instance (host={0.host},port={0.port},user={0.user},password={0.password} )".format(self) 135 | return obj_str 136 | 137 | def __del__(self): 138 | """资源回收 139 | """ 140 | if self._cnx != None: 141 | self._cnx.close() 142 | 143 | def close(self): 144 | if self._cnx != None: 145 | self._cnx.close() 146 | 147 | 148 | class VariableBase(ConnectorBase): 149 | """定义一个用于查询variable的类,类字符variable_name用于指定variable的名字,variable_type用于指定variable对应值的类型 150 | """ 151 | variable_name=None 152 | variable_type="string" 153 | _variable_types=("string","byte","intger","percent","bool") 154 | _value=None 155 | 156 | def __init__(self,host='127.0.0.1',port=3306,user='mtsuser',password='mts10352',database='information_schema', 157 | variable_name=None,variable_type="string",*args,**kws): 158 | super(VariableBase,self).__init__(host,port,user,password) 159 | if variable_name != None: 160 | self.variable_name=variable_name 161 | self.variable_type=variable_type 162 | 163 | 164 | def _get_value(self): 165 | try: 166 | self.cursor.execute("select @@{0} ;".format(self.variable_name)) 167 | tmp_value=self.cursor.fetchone() 168 | if tmp_value != None and len(tmp_value)==1: 169 | return tmp_value[0] 170 | else: 171 | self.logger.info("variable {0} has a bad value {1}".format(self.variable_name,tmp_value)) 172 | self.close() 173 | exit() 174 | except Exception as e: 175 | errore_message=str(e) 176 | self.logger.info(errore_message) 177 | self.logger.info("exit") 178 | self.close() 179 | exit() 180 | 181 | 182 | @property 183 | def value(self): 184 | #format_mapper={'string':self.format_string_value, 185 | # 'byte' :self.format_byte_value, 186 | # 'intger':self.format_intger_value, 187 | # 'bool' :self.format_bool_value, 188 | #} 189 | if self._value == None: 190 | self._value=self._get_value() 191 | return self._value 192 | #return format_mapper[self.variable_type](self._value) 193 | 194 | #@property 195 | #def original_value(self): 196 | # return self._get_value() 197 | 198 | 199 | class StatuBase(ConnectorBase): 200 | statu_name="uptime" 201 | statu_type="intger" 202 | _statu_types=("string","byte","intger","percent","bool") 203 | _value=None 204 | 205 | def __init__(self,host='127.0.0.1',port=3306,user='mtsuser',password='mts10352', 206 | statu_name=None,statu_type="intger",*args,**kw): 207 | super(StatuBase,self).__init__(host,port,user,password) 208 | if statu_name != None: 209 | self.statu_name=statu_name 210 | self.statu_type=statu_type 211 | self._value=None 212 | 213 | #def format_byte_value(self,raw_value): 214 | # """ 215 | # 由于statu 是由show global status like 'xxx' 得到的,所以它返回的是str,对于字节类型的statu,转换一下才行 216 | # """ 217 | # return super(StatuBase,self).format_byte_value(int(self._value)) 218 | 219 | def _get_value(self): 220 | if self._value != None: 221 | return self._value 222 | else: 223 | try: 224 | self.cursor.execute("show global status like '{0}' ;".format(self.statu_name)) 225 | name_and_value=self.cursor.fetchone() 226 | if name_and_value == None: 227 | self.logger.info("get a None value for statu {0} ".format(self.statu_name)) 228 | self.close() 229 | exit() 230 | name,value=name_and_value 231 | self._value=value 232 | return self._value 233 | except Exception as e: 234 | error_message=str(e) 235 | self.logger.info(error_message) 236 | self.close() 237 | exit() 238 | 239 | @property 240 | def value(self): 241 | if self._value == None: 242 | self._value = self._get_value() 243 | return self._value 244 | #format_mapper={'string':self.format_string_value, 245 | # 'intger':self.format_intger_value, 246 | # 'byte' :self.format_byte_value,} 247 | #return format_mapper[self.statu_type](self._get_value()) 248 | 249 | #@property 250 | #def original_value(self): 251 | # return self._get_value() 252 | 253 | 254 | class PsBase(ConnectorBase): 255 | """ 256 | 所有与performance_schema操作相关的基类 257 | """ 258 | 259 | 260 | class ShowSlave(ConnectorBase): 261 | """通过show slave status 提取信息 262 | """ 263 | #mysql-8.0.11 版本下('Waiting for master to send event', '127.0.0.1', 'repl', 3307, 60, 'mysql-bin.000001', 151, 'sqlstudio-relay-bin.000002', 357, 'mysql-bin.000001', 'Yes', 'Yes', '', '', '', '', '', '', 0, '', 0, 151, 561, 'None', '', 0, 'No', '', '', '', '', '', 0, 'No', 0, '', 0, '', '', 375, '2c9732e2-8740-11e8-9514-000c29cb87a3', 'mysql.slave_master_info', 0, None, 'Slave has read all relay log; waiting for more updates', 86400, '', '', '', '', '', '', '8e64b57f-83eb-11e8-be2f-000c29cb87a3:1', 1, '', '', '', '', 0) 264 | show_slave_name=None 265 | dimensions ={ 266 | 'Slave_IO_State':0, 267 | 'Master_Host':1, 268 | 'Master_User':2, 269 | 'Master_Port':3, 270 | 'Connect_Retry':4, 271 | 'Master_Log_File':5, 272 | 'Read_Master_Log_Pos':6, 273 | 'Relay_Log_File':7, 274 | 'Relay_Log_Pos':8, 275 | 'Relay_Master_Log_File':9, 276 | 'Slave_IO_Running':10, 277 | 'Slave_SQL_Running':11, 278 | 'Replicate_Do_DB':12, 279 | 'Replicate_Ignore_DB':13, 280 | 'Replicate_Do_Table':14, 281 | 'Replicate_Ignore_Table':15, 282 | 'Replicate_Wild_Do_Table':16, 283 | 'Replicate_Wild_Ignore_Table':17, 284 | 'Last_Errno':18, 285 | 'Last_Error':19, 286 | 'Skip_Counter':20, 287 | 'Exec_Master_Log_Pos':21, 288 | 'Relay_Log_Space':22, 289 | 'Until_Condition':23, 290 | 'Until_Log_File':24, 291 | 'Until_Log_Pos':25, 292 | 'Master_SSL_Allowed':26, 293 | 'Master_SSL_CA_File':27, 294 | 'Master_SSL_CA_Path':28, 295 | 'Master_SSL_Cert':29, 296 | 'Master_SSL_Cipher':30, 297 | 'Master_SSL_Key':31, 298 | 'Seconds_Behind_Master':32, 299 | 'Master_SSL_Verify_Server_Cert':33, 300 | 'Last_IO_Errno':34, 301 | 'Last_IO_Error':35, 302 | 'Last_SQL_Errno':36, 303 | 'Last_SQL_Error':37, 304 | 'Replicate_Ignore_Server_Ids':38, 305 | 'Master_Server_Id':39, 306 | 'Master_UUID':40, 307 | 'Master_Info_File':41, 308 | 'SQL_Delay':42, 309 | 'SQL_Remaining_Delay':43, 310 | 'Slave_SQL_Running_State':44, 311 | 'Master_Retry_Count':45, 312 | 'Master_Bind':46, 313 | 'Last_IO_Error_Timestamp':47, 314 | 'Last_SQL_Error_Timestamp':48, 315 | 'Master_SSL_Crl':49, 316 | 'Master_SSL_Crlpath':50, 317 | 'Retrieved_Gtid_Set':51, 318 | 'Executed_Gtid_Set':52, 319 | 'Auto_Position':53, 320 | 'Replicate_Rewrite_DB':54, 321 | 'Channel_Name':55, 322 | 'Master_TLS_Version':56, 323 | 'Master_public_key_path':57, 324 | 'Get_master_public_key':58 325 | } 326 | dimensions_55 = { 327 | 'Slave_IO_State': 0, 328 | 'Master_Host': 1, 329 | 'Master_User': 2, 330 | 'Master_Port': 3, 331 | 'Connect_Retry': 4, 332 | 'Master_Log_File': 5, 333 | 'Read_Master_Log_Pos': 6, 334 | 'Relay_Log_File': 7, 335 | 'Relay_Log_Pos': 8, 336 | 'Relay_Master_Log_File': 9, 337 | 'Slave_IO_Running': 10, 338 | 'Slave_SQL_Running': 11, 339 | 'Replicate_Do_DB': 12, 340 | 'Replicate_Ignore_DB': 13, 341 | 'Replicate_Do_Table': 14, 342 | 'Replicate_Ignore_Table': 15, 343 | 'Replicate_Wild_Do_Table': 16, 344 | 'Replicate_Wild_Ignore_Table': 17, 345 | 'Last_Errno': 18, 346 | 'Last_Error': 19, 347 | 'Skip_Counter': 20, 348 | 'Exec_Master_Log_Pos': 21, 349 | 'Relay_Log_Space': 22, 350 | 'Until_Condition': 23, 351 | 'Until_Log_File': 24, 352 | 'Until_Log_Pos': 25, 353 | 'Master_SSL_Allowed': 26, 354 | 'Master_SSL_CA_File': 27, 355 | 'Master_SSL_CA_Path': 28, 356 | 'Master_SSL_Cert': 29, 357 | 'Master_SSL_Cipher': 30, 358 | 'Master_SSL_Key': 31, 359 | 'Seconds_Behind_Master': 32, 360 | 'Master_SSL_Verify_Server_Cert': 33, 361 | 'Last_IO_Errno': 34, 362 | 'Last_IO_Error': 35, 363 | 'Last_SQL_Errno': 36, 364 | 'Last_SQL_Error': 37, 365 | 'Replicate_Ignore_Server_Ids': 38, 366 | 'Master_Server_Id': 39, 367 | 'Cur_Exec_Relay_Log_File': 40, 368 | 'Cur_Exec_Relay_Log_Pos': 41, 369 | 'Wanted_Purge_Relay_Log': 42, 370 | 'Purged_Relay_Log': 43 371 | } 372 | 373 | def __init__(self,host='127.0.0.1',port=3306,user='mtsuser',password='mts10352',*args,**kw): 374 | super().__init__(host,port,user,password) 375 | self._value=None 376 | 377 | def _get_value(self): 378 | if self._value != None: 379 | return self._value 380 | else: 381 | try: 382 | #适配mysql-5.5.x版本的show slave status; 383 | self.cursor.execute("select @@version") 384 | mysql_version,*_ = self.cursor.fetchone() 385 | if '5.5' in mysql_version: 386 | self.dimensions = self.dimensions_55 387 | 388 | self.cursor.execute("show slave status") 389 | data = self.cursor.fetchone() 390 | if data == None: 391 | self._value = "this node is master" 392 | return self._value 393 | index = self.dimensions[self.show_slave_name] 394 | self._value = data[index] 395 | return self._value 396 | except Exception as e: 397 | error_message=str(e) 398 | self.logger.info(error_message) 399 | self.close() 400 | exit() 401 | 402 | @property 403 | def value(self): 404 | if self._value == None: 405 | self._value = self._get_value() 406 | return self._value 407 | 408 | 409 | class ShowMaster(ConnectorBase): 410 | """分析show master status 的输出 411 | """ 412 | show_master_name = None 413 | dimensions = { 414 | 'File':0, 415 | 'Position':1, 416 | 'Binlog_Do_DB':2, 417 | 'Binlog_Ignore_DB':3, 418 | 'Executed_Gtid_Set':4, 419 | } 420 | 421 | def __init__(self,host='127.0.0.1',port=3306,user='mtsuser',password='mts10352',*args,**kw): 422 | super().__init__(host,port,user,password) 423 | self._value=None 424 | 425 | def _get_value(self): 426 | if self._value != None: 427 | return self._value 428 | else: 429 | try: 430 | self.cursor.execute("show master status") 431 | data = self.cursor.fetchone() 432 | index = self.dimensions[self.show_master_name] 433 | self._value = data[index] 434 | return self._value 435 | except Exception as e: 436 | error_message=str(e) 437 | self.logger.info(error_message) 438 | self.close() 439 | exit() 440 | 441 | @property 442 | def value(self): 443 | if self._value == None: 444 | self._value = self._get_value() 445 | return self._value 446 | 447 | 448 | 449 | class InnodbStatus(ConnectorBase): 450 | """分析show engine innodb status 的输出 451 | """ 452 | def __init__(self,host='127.0.0.1',port=3306,user='mtsuser',password='mts10352',*args,**kw): 453 | super().__init__(host,port,user,password) 454 | self._value=None 455 | 456 | def _get_value(self): 457 | if self._value != None: 458 | return self._value 459 | else: 460 | try: 461 | self.cursor.execute("show engine innodb status") 462 | data = self.cursor.fetchone() 463 | self._value = data 464 | return self._value 465 | except Exception as e: 466 | error_message=str(e) 467 | self.logger.info(error_message) 468 | self.close() 469 | exit() 470 | 471 | @property 472 | def value(self): 473 | if self._value == None: 474 | self._value = self._get_value()[2] 475 | 476 | return self._value 477 | 478 | class InnodbStatusLog(InnodbStatus): 479 | """返回指定维度的数据 480 | """ 481 | dimension = "Log sequence number" 482 | 483 | @property 484 | def value(self): 485 | _ = super().value 486 | for line in self._value.split('\n'): 487 | if self.dimension in line: 488 | *_,lsn = line.split(self.dimension) 489 | return lsn.strip() 490 | return -1 -------------------------------------------------------------------------------- /mtls/binlog.py: -------------------------------------------------------------------------------- 1 | from .base import ShowMaster 2 | 3 | class BinlogFile(ShowMaster): 4 | show_master_name="File" 5 | 6 | class BinlogPosition(ShowMaster): 7 | show_master_name="Position" 8 | 9 | class BinlogDoDB(ShowMaster): 10 | show_master_name="Binlog_Do_DB" 11 | 12 | class BinlogIgnoreDB(ShowMaster): 13 | show_master_name="Binlog_Ignore_DB" 14 | 15 | -------------------------------------------------------------------------------- /mtls/core.py: -------------------------------------------------------------------------------- 1 | """ 2 | 实现若干通用的核心函数 3 | """ 4 | import os 5 | import sys 6 | import time 7 | import mysql 8 | import random 9 | import string 10 | import logging 11 | import argparse 12 | 13 | def check_python_version() -> None: 14 | """ 15 | 检测当前的 python 版本是否被支持,只支持 python-3.0.x 以上的环境 16 | """ 17 | if sys.version_info.major <= 2: 18 | print("only support python-3.x",file=sys.stderr) 19 | sys.exit(1) 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /mtls/innodb_statu.py: -------------------------------------------------------------------------------- 1 | from .base import InnodbStatus,InnodbStatusLog 2 | 3 | 4 | class LogSequenceNumber(InnodbStatusLog): 5 | dimension = "Log sequence number" 6 | 7 | class LogFlushedUpTo(InnodbStatusLog): 8 | dimension = "Log flushed up to" 9 | 10 | class PagesFlushedUpTo(InnodbStatusLog): 11 | dimension = "Pages flushed up to" 12 | 13 | class LastCheckpointAt(InnodbStatusLog): 14 | dimension = "Last checkpoint at" 15 | 16 | -------------------------------------------------------------------------------- /mtls/inspection.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | 3 | import logging 4 | 5 | from . import variable 6 | from . import base 7 | from . import statu 8 | 9 | 10 | class Inspection(base.ConnectorBase): 11 | """ 12 | 巡检项的基类 13 | """ 14 | name =None 15 | _statu =None 16 | _variable=None 17 | 18 | @property 19 | def value(self): 20 | self.logger.info("mts.inspection.Inspection.value function is abstract") 21 | self.close() 22 | exit() 23 | 24 | @property 25 | def suggestion(self): 26 | self.logger.info("mts.inspection.Inspection.suggestion function is abstract") 27 | self.close() 28 | exit() 29 | 30 | @property 31 | def logger(self): 32 | if self._logger != None: 33 | return self._logger 34 | else: 35 | self._logger=logging.getLogger("mts.inspection.{0}".format(self.__class__)) 36 | stream_handler=logging.StreamHandler() 37 | formater=logging.Formatter("%(asctime)-24s %(levelname)-8s %(name)-24s : %(message)s") 38 | stream_handler.setFormatter(formater) 39 | self._logger.addHandler(stream_handler) 40 | self._logger.setLevel(logging.DEBUG) 41 | return self._logger -------------------------------------------------------------------------------- /mtls/kits/__init__.py: -------------------------------------------------------------------------------- 1 | from .fileformat import fileformat 2 | 3 | __ALL__ = ['fileformat'] -------------------------------------------------------------------------------- /mtls/kits/fileformat.py: -------------------------------------------------------------------------------- 1 | """ 2 | 把以字节为单位的文件大小转换为对人类友好的方式 3 | 1024 B --> 1KiB 4 | 1024 * 1024 * 3 --> 3MiB 5 | ...... 6 | """ 7 | 8 | def fileformat(size,base=1000): 9 | """把文件大小从字节转化成对人类友好的形式 10 | >>> print(fileformat(1024)) 11 | 1.0 KiB 12 | >>> print(fileformat(1024 * 1024)) 13 | 1.0 MiB 14 | >>> print(fileformat(0)) 15 | 0.0 B 16 | >>> print(fileformat(6)) 17 | 6.0 B 18 | """ 19 | if base not in (1000,1024): 20 | # 为么以 1000 进位,要么以 1024 进位 21 | raise ValueError("the base argument must be 1000 or 1024 .") 22 | 23 | size = float(size) 24 | if base == 1024: 25 | suffix = ['KiB','MiB','GiB','TiB','PiB','EiB','ZiB','YiB'] 26 | else: 27 | suffix = ['KB','MB','GB','TB','PB','EB','ZB','YB'] 28 | 29 | if size < base: 30 | return f"{size} B" 31 | else: 32 | for i,suf in enumerate(suffix): 33 | unit = base ** (i + 2) 34 | if size < unit: 35 | return "{0:.1f} {1}".format(size * base /unit,suf) 36 | return "{0:.1f} {1}".format(size * base /unit,suf) 37 | 38 | 39 | if __name__ == "__main__": 40 | import doctest 41 | doctest.testmod() 42 | -------------------------------------------------------------------------------- /mtls/mgr.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | """ 3 | 定义所有与mgr相关的操作 4 | """ 5 | 6 | 7 | from .base import PsBase 8 | 9 | class MgrBase(PsBase): 10 | """ 11 | 所以mysql-group-replication相关查询的基类 12 | """ 13 | def raw_format(self,raw_data): 14 | return raw_data 15 | 16 | def to_string_format(self,raw_data): 17 | return raw_data.decode('utf8') 18 | 19 | @property 20 | def original_value(self): 21 | self.cursor.execute(self.scalar_stmt) 22 | node_count=self.cursor.fetchone() 23 | if len(node_count) == 1: 24 | formats={ 25 | 'raw_format': self.raw_format, 26 | 'to_string_format': self.to_string_format,} 27 | return formats[self.format_type](node_count[0]) 28 | else: 29 | self.logger.error("get unexpected value ' {0} ' for MgrNodeCount".format(node_count)) 30 | exit() 31 | 32 | @property 33 | def value(self): 34 | return self.original_value 35 | 36 | scalar_stmt="select 'this is test info for MgrBase class' as msg ;" 37 | format_type="raw_format" 38 | 39 | class MgrTotalMemberCount(MgrBase): 40 | """ 41 | mysql-group-replication的结点数量 42 | """ 43 | scalar_stmt="select count(*) from performance_schema.replication_group_members ;" 44 | 45 | class MgrOnLineMemberCount(MgrBase): 46 | """ 47 | 当前mysql-group-replication 集群中状态为online的结点数量 48 | """ 49 | scalar_stmt="select count(*) from performance_schema.replication_group_members where member_state='ONLINE' ;" 50 | 51 | class MgrMemberState(MgrBase): 52 | """ 53 | 查看当前结点的member_state 状态 54 | """ 55 | scalar_stmt="""select member_state 56 | from performance_schema.replication_group_members 57 | where member_id=@@server_uuid;""" 58 | 59 | format_type="to_string_format" 60 | 61 | class MgrCountTransactionsInQueue(MgrBase): 62 | """ 63 | 等待进行冲突检查的事务数量 64 | """ 65 | scalar_stmt="""select count_transactions_in_queue 66 | from performance_schema.replication_group_member_stats 67 | where member_id=@@server_uuid;""" 68 | 69 | class MgrCountTransactionsChecked(MgrBase): 70 | """ 71 | 已经完成冲突检测的事务数量 72 | """ 73 | scalar_stmt="""select count_transactions_checked 74 | from performance_schema.replication_group_member_stats 75 | where member_id=@@server_uuid;""" 76 | 77 | class MgrCountConflictsDetected(MgrBase): 78 | """ 79 | 没能通过冲突检测的事务数量 80 | """ 81 | scalar_stmt=""" 82 | select count_conflicts_detected 83 | from performance_schema.replication_group_member_stats 84 | where member_id=@@server_uuid; 85 | """ 86 | 87 | class MgrTransactionsCommittedAllMembers(MgrBase): 88 | scalar_stmt=""" select transactions_committed_all_members 89 | from performance_schema.replication_group_member_stats 90 | where member_id=@@server_uuid; 91 | """ -------------------------------------------------------------------------------- /mtls/ps/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/mtls/ps/__init__.py -------------------------------------------------------------------------------- /mtls/ps/mem.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | """ 3 | 4 | """ 5 | import time 6 | import psutil 7 | from collections import namedtuple 8 | from mysql import connector 9 | 10 | 11 | def mem_info_generator(pid): 12 | """ 13 | """ 14 | process = psutil.Process(pid) 15 | while True: 16 | yield process.memory_info() 17 | time.sleep(1) 18 | 19 | 20 | class MySQLMemCostGather(object): 21 | """ 22 | """ 23 | def __init__(self,user,password,host="127.0.0.1",port=3306): 24 | self.cnx = connector.connect(host=host,port=port,user=user,password=password) 25 | self.cursor = self.cnx.cursor() 26 | 27 | def global_connection_memory(self): 28 | self.cursor.execute("show global status like 'Global_connection_memory'") 29 | _,value = self.cursor.fetchone() 30 | return int(value) 31 | 32 | def global_connection_memory_generator(port=3306,user="root",password="dbma@0352"): 33 | try: 34 | gather = MySQLMemCostGather(user,password,port=port) 35 | while True: 36 | yield gather.global_connection_memory() 37 | #time.sleep(1) 38 | except Exception as err: 39 | while True: 40 | yield 0 -------------------------------------------------------------------------------- /mtls/replication.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf8 -*- 2 | 3 | """ 4 | 为show global status 中的statu提供一个与之对应的类,以方便使用 5 | """ 6 | from .base import StatuBase,ShowSlave 7 | 8 | __all__=['RplSemiSyncMasterClients','RplSemiSyncMasterStatus','RplSemiSyncMasterNoTx', 9 | 'RplSemiSyncMasterYesTx','RplSemiSyncSlaveStatus','SlaveIORunning','SlaveSQLRunning', 10 | 'SecondsBehindMaster'] 11 | 12 | 13 | class RplSemiSyncMasterClients(StatuBase): 14 | """ 15 | The number of semisynchronous slaves 16 | """ 17 | statu_name="Rpl_semi_sync_master_clients" 18 | 19 | class RplSemiSyncMasterStatus(StatuBase): 20 | """ 21 | The value is 1 if the plugin has been enabled It is 0 if the plugin is not enabled 22 | """ 23 | statu_name="Rpl_semi_sync_master_status" 24 | 25 | class RplSemiSyncMasterNoTx(StatuBase): 26 | """ 27 | The number of commits that were not acknowledged successfully by a slave 28 | """ 29 | statu_name="Rpl_semi_sync_master_no_tx" 30 | 31 | class RplSemiSyncMasterYesTx(StatuBase): 32 | """ 33 | The number of commits that were acknowledged successfully by a slave 34 | """ 35 | statu_name="Rpl_semi_sync_master_yes_tx" 36 | 37 | class RplSemiSyncSlaveStatus(StatuBase): 38 | """ 39 | Whether semisynchronous replication currently is operational on the slave. This is 1 if the plugin has been enabled and the slave I/O thread is running, 0 otherwise. 40 | """ 41 | statu_name="Rpl_semi_sync_slave_status" 42 | 43 | class SlaveIORunning(ShowSlave): 44 | """ 45 | 1 --> yes 46 | 0 --> 其它可能的情况 47 | """ 48 | show_slave_name="Slave_IO_Running" 49 | 50 | def _get_value(self): 51 | value = super()._get_value() 52 | if value == 'this node is master': 53 | return -2 # 直接返回 -2 54 | if value.upper() == 'YES': 55 | self._value = 1 56 | return self._value 57 | else: 58 | self._value = 0 59 | return self._value 60 | 61 | class SlaveSQLRunning(ShowSlave): 62 | """ 63 | 1 --> yes 64 | 0 --> 其它可能的情况 65 | """ 66 | show_slave_name="Slave_SQL_Running" 67 | 68 | def _get_value(self): 69 | value = super()._get_value() 70 | if value == 'this node is master': 71 | return -2 # 直接返回 -2 72 | if value.upper() == 'YES': 73 | self._value = 1 74 | return self._value 75 | else: 76 | self._value = 0 77 | return self._value 78 | 79 | class SecondsBehindMaster(ShowSlave): 80 | """ 81 | """ 82 | show_slave_name="Seconds_Behind_Master" 83 | 84 | def _get_value(self): 85 | value = super()._get_value() 86 | if value == 'this node is master': 87 | return -2 # 直接返回 -2 88 | else: 89 | try: 90 | self._value = int(value) 91 | except Exception as e: 92 | return -1 93 | else: 94 | return self._value 95 | -------------------------------------------------------------------------------- /mtls/sessions.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf8 -*- 2 | 3 | """ 4 | 模拟多个会话连接进数据库 5 | """ 6 | 7 | import time 8 | from threading import Thread 9 | from mysql import connector 10 | 11 | class MySQLSession(Thread): 12 | """ 13 | """ 14 | 15 | def __init__(self, user_name, user_password, host="127.0.0.1", port=3306, sql="select 'mysqltools-python' as softname ;"): 16 | Thread.__init__(self) 17 | self.user_name = user_name 18 | self.user_password = user_password 19 | self.host = host 20 | self.port = port 21 | self.sql = sql 22 | 23 | self._conn = None 24 | self.daemon = True 25 | 26 | if sql.lower().startswith("select"): 27 | self.is_select_sql = True 28 | 29 | def run(self): 30 | # 不释放了等待进行退出 31 | cnx = connector.connect(host=self.host,port=self.port,user=self.user_name,password=self.user_password) 32 | cursor = cnx.cursor() 33 | while True: 34 | cursor.execute(self.sql) 35 | if self.is_select_sql: 36 | _ = cursor.fetchall() 37 | else: 38 | cnx.commit() 39 | time.sleep(0.5) 40 | 41 | 42 | def create_sessions(user_name, user_password, host="127.0.0.1", port=3306, sql="select 'mysqltools-python' as softname ;",count = 7): 43 | sessions = [] 44 | for i in range(count): 45 | session = MySQLSession(user_name,user_password,host,port,sql) 46 | sessions.append(session) 47 | session.start() 48 | -------------------------------------------------------------------------------- /mtls/values.py: -------------------------------------------------------------------------------- 1 | """ 2 | 根据给定的类型完全随机的从它的值域中取值。 3 | 4 | select table_schema,table_name,column_name,column_type,extra from information_schema.columns where table_name="t"; 5 | +--------------+------------+-------------+-------------+----------------+ 6 | | TABLE_SCHEMA | TABLE_NAME | COLUMN_NAME | COLUMN_TYPE | EXTRA | 7 | +--------------+------------+-------------+-------------+----------------+ 8 | | tempdb | t | id | int | auto_increment | 9 | | tempdb | t | x | int | | 10 | +--------------+------------+-------------+-------------+----------------+ 11 | 2 rows in set (0.00 sec) 12 | 13 | """ 14 | import os 15 | import copy 16 | import uuid 17 | import random 18 | import string 19 | import logging 20 | from mysql import connector 21 | from datetime import datetime 22 | 23 | name = os.path.basename(__file__) 24 | logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s",level=logging.INFO) 25 | 26 | 27 | class Number(object): 28 | """ 29 | 所有随机数值类型的基类 30 | """ 31 | min_value = 0 32 | max_value = 127 33 | 34 | def __init__(self,min_value=None,max_value=None): 35 | """ 36 | Paramter 37 | -------- 38 | min_value: int 39 | 40 | max_value: int 41 | 42 | """ 43 | 44 | self.min_value = min_value if min_value is not None else self.__class__.min_value 45 | self.max_value = max_value if max_value is not None else self.__class__.max_value 46 | 47 | def __getitem__(self, index): 48 | """ 49 | """ 50 | return random.randint(self.min_value, self.max_value) 51 | 52 | def __len__(self): 53 | """ 54 | """ 55 | return self.max_value - self.min_value 56 | 57 | def __next__(self): 58 | return self[0] 59 | 60 | def __iter__(self): 61 | """ 62 | """ 63 | return self 64 | 65 | TinyInt = Number 66 | 67 | class Bool(Number): 68 | """ 69 | Bool 数的随机生成器 70 | """ 71 | def __getitem__(self,index): 72 | return (Number.__getitem__(self,index) % 2) == 0 73 | 74 | class SmallInt(Number): 75 | """ 76 | SmallInt 的随机生成器 77 | """ 78 | min_value = 0 79 | max_value = 32767 80 | 81 | class MediumInt(Number): 82 | """ 83 | MediumInt 的随机生成器 84 | """ 85 | max_value = 8388607 86 | 87 | class Int(Number): 88 | """ 89 | Int 的随机生成器 90 | """ 91 | max_value = 2147483647 92 | 93 | class BigInt(Number): 94 | """ 95 | BigInt 的随机生成器 96 | """ 97 | max_value = 9223372036854775807 98 | 99 | class Float(Int): 100 | """ 101 | """ 102 | def __getitem__(self,index): 103 | """ 104 | """ 105 | return Int.__getitem__(self,0) + random.random() 106 | 107 | Double = Float 108 | 109 | 110 | class Char(object): 111 | """ 112 | """ 113 | # 字符中只包含字母和数字 114 | letters = [item for item in string.ascii_letters + string.digits] 115 | 116 | def __init__(self,max_length=None): 117 | """ 118 | Char , VarChar 类型随机值的生成器 119 | """ 120 | self.max_length = max_length if max_length is not None else 24 121 | self.letters = copy.copy(self.__class__.letters) 122 | 123 | # 最多只生成 24 个字符的字符串 124 | if self.max_length > 24: 125 | self.max_length = 24 126 | 127 | def __getitem__(self,index): 128 | """ 129 | """ 130 | random.shuffle(self.letters) 131 | return ''.join(self.letters[0:self.max_length]) 132 | 133 | def __len__(self): 134 | return self.max_length 135 | 136 | def __next__(self): 137 | return self[0] 138 | 139 | def __iter__(self): 140 | return self 141 | 142 | VarChar = Char 143 | 144 | 145 | class Uuid(object): 146 | """ 147 | """ 148 | def __getitem__(self,index): 149 | """ 150 | """ 151 | return str(uuid.uuid4()) 152 | 153 | def __len__(self): 154 | return 128 155 | 156 | def __next__(self): 157 | return self[0] 158 | 159 | def __iter__(self): 160 | return self 161 | 162 | 163 | class DateTime(Number): 164 | """ 165 | """ 166 | def __init__(self): 167 | """ 168 | """ 169 | now = int(datetime.now().timestamp()) 170 | detal = int(now/10) 171 | self.min_value = now - detal 172 | self.max_value = 2145888000 # timestamp 安全 173 | 174 | def __getitem__(self,index)->datetime: 175 | """ 176 | """ 177 | tmp_second = random.randint(self.min_value, self.max_value) 178 | return datetime.fromtimestamp(tmp_second) 179 | 180 | class Date(DateTime): 181 | """ 182 | """ 183 | def __getitem__(self,index): 184 | return self.__class__.__getitem__(self,0).date() 185 | 186 | class Time(DateTime): 187 | """ 188 | """ 189 | def __getitem__(self,index): 190 | return self.__class__.__getitem__(self,0).time() 191 | 192 | Timestamp = DateTime 193 | 194 | 195 | class TableMeta(object): 196 | """ 197 | 查询给定表的元数据 198 | """ 199 | 200 | def __init__(self,host='127.0.0.1',port=3306,user='appuser',password="123456",database="tempdb",table="t"): 201 | """ 202 | """ 203 | self.host = host 204 | self.port = port 205 | self.user = user 206 | self.password = password 207 | self.database = database 208 | self.table = table 209 | 210 | self.cnx = None 211 | self.meta = [] 212 | self.err = None 213 | try: 214 | self.cnx = connector.connect(host=self.host,port=self.port,user=self.user,password=self.password) 215 | self.cursor = self.cnx.cursor() 216 | self.cursor.execute("select table_schema,table_name,column_name,column_type,extra from information_schema.columns where table_schema= %s and table_name= %s ;",(self.database,self.table)) 217 | for _,_,column_name,column_type,extra in self.cursor.fetchall(): 218 | _name,_type = self._parser_meta(column_name,column_type,extra) 219 | if _name is not None: 220 | self.meta.append((_name,_type)) 221 | 222 | except Exception as err: 223 | self.err = err 224 | logging.error(str(err)) 225 | logging.exception(str(err)) 226 | finally: 227 | if hasattr(self.cnx,'close'): 228 | self.cnx.close() 229 | 230 | def _parser_meta(self,column_name,column_type,extra): 231 | """ 232 | """ 233 | # 如果是自增列让 MySQL 自动自增 234 | if extra == 'auto_increment': 235 | return (None,None) 236 | 237 | if column_type.startswith(b'int'): 238 | return(column_name,Int()) 239 | 240 | if column_type.startswith(b'tinyint(1)'): 241 | return(column_name,Bool()) 242 | 243 | if column_type.startswith(b'tinyint'): 244 | return(column_name,TinyInt()) 245 | 246 | if column_type.startswith(b'smallint'): 247 | return(column_name,SmallInt()) 248 | 249 | if column_type.startswith(b'mediumint'): 250 | return(column_name,MediumInt()) 251 | 252 | if column_type.startswith(b'bigint'): 253 | return(column_name,BigInt()) 254 | 255 | if column_type.startswith(b'float') or column_type.startswith(b'double'): 256 | return(column_name,Float()) 257 | 258 | # 处理 uuid 的情况 259 | if column_name.endswith('uuid') and (column_type.startswith(b'varchar') or column_type.startswith(b'char')): 260 | _,n = column_type.split(b'(') 261 | number,_ = n.split(b')') 262 | length = int(number.decode('utf8')) 263 | if length >= 36: 264 | return (column_name,Uuid()) 265 | 266 | # 处理 varchar | char 的情况 267 | if column_type.startswith(b'varchar') or column_type.startswith(b'char'): 268 | _,n = column_type.split(b'(') 269 | number,_ = n.split(b")") 270 | length = int(number.decode('utf8')) 271 | 272 | return(column_name,Char(length)) 273 | 274 | # 处理时间日期类型 275 | if column_type.startswith(b'datetime'): 276 | return(column_name,DateTime()) 277 | 278 | if column_type.startswith(b'timestamp'): 279 | return(column_name,Timestamp()) 280 | 281 | if column_type.startswith(b'date'): 282 | return(column_name,Date()) 283 | 284 | if column_type.startswith(b'time'): 285 | return(column_name,Time()) 286 | 287 | # 应该永远都不会执行到这里 288 | return None,None 289 | 290 | def __getitem__(self,index): 291 | return self.meta[index] 292 | 293 | def __len__(self): 294 | return len(self.meta) 295 | 296 | def __iter__(self): 297 | return next(self) 298 | 299 | def __next__(self): 300 | yield from self.meta 301 | 302 | def __del__(self): 303 | if hasattr(self.cnx,'close'): 304 | self.cnx.close() 305 | 306 | class DMLSQL(object): 307 | """ 308 | 自动生成 DML-SQL 语句,这些语句可以让 cursor.execute 直接执行. 309 | """ 310 | def __init__(self,database:str="tmpdb",table:str="t",meta=None): 311 | """ 312 | """ 313 | self.database = database 314 | self.table = table 315 | self.meta = meta 316 | self.sql = None 317 | self.gens = [] 318 | for _,gen in self.meta: 319 | self.gens.append(gen) 320 | 321 | def _values(self): 322 | return [next(_) for _ in self.gens] 323 | 324 | def __str__(self): 325 | """ 326 | """ 327 | raise NotImplementedError() 328 | 329 | def __getitem__(self,index): 330 | """ 331 | """ 332 | tmp_sql = str(self) 333 | tmp_value = self._values() 334 | return(tmp_sql,tmp_value) 335 | 336 | class InsertSQL(DMLSQL): 337 | """ 338 | """ 339 | def __str__(self): 340 | """ 341 | insert into tempdb.t(c1,c2,c3,c4,c5,c6,c7,c8,x) values (%s,%s,%s,%s,%s,%s,%s,%s,%s); 342 | """ 343 | if self.sql != None: 344 | return self.sql 345 | 346 | cols = [_[0] for _ in self.meta if _[0] != None] 347 | vls = ("%s," * len(cols))[0:-1] 348 | cols = ','.join(cols) 349 | sql = f"insert into {self.database}.{self.table}({cols}) values ({vls});" 350 | self.sql = sql 351 | return sql 352 | 353 | 354 | 355 | 356 | 357 | 358 | 359 | 360 | 361 | 362 | 363 | 364 | 365 | -------------------------------------------------------------------------------- /mtls/variable.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf8 -*- 2 | """ 3 | 为常用的MySQL variable 定义与之对应的类,以方便使用。 4 | """ 5 | 6 | from .base import VariableBase 7 | 8 | #---------------------------------------------------- 9 | #全局配置相关的variable 10 | #---------------------------------------------------- 11 | class ServerID(VariableBase): 12 | variable_name="server_id" 13 | variable_type="intger" 14 | 15 | class BaseDir(VariableBase): 16 | variable_name="basedir" 17 | variable_type="string" 18 | 19 | class DataDir(VariableBase): 20 | variable_name="datadir" 21 | variable_type="string" 22 | 23 | class Port(VariableBase): 24 | variable_name="port" 25 | variable_type="intger" 26 | 27 | class CharacterSetServer(VariableBase): 28 | variable_name="character_set_server" 29 | variable_type="string" 30 | 31 | class Socket(VariableBase): 32 | variable_name="socket" 33 | variable_type="string" 34 | 35 | class ReadOnly(VariableBase): 36 | variable_name="read_only" 37 | variable_type="intger" 38 | 39 | class SkipNameResolve(VariableBase): 40 | variable_name="skip_name_resolve" 41 | variable_type="intger" 42 | 43 | class LowerCaseTableNames(VariableBase): 44 | variable_name="lower_case_table_names" 45 | variable_type="intger" 46 | 47 | class ThreadCacheSize(VariableBase): 48 | variable_name="thread_cache_size" 49 | variable_type="intger" 50 | 51 | class TableOpenCache(VariableBase): 52 | variable_name="table_open_cache" 53 | variable_type="intger" 54 | 55 | class TableDefinitionCache(VariableBase): 56 | variable_name="table_definition_cache" 57 | variable_type="intger" 58 | 59 | class TableOpenCacheInstances(VariableBase): 60 | variable_name="table_open_cache_instances" 61 | variable_type="intger" 62 | 63 | class MaxConnections(VariableBase): 64 | variable_name="max_connections" 65 | variable_type="intger" 66 | 67 | 68 | #---------------------------------------------------- 69 | #binlog配置相关的variable 70 | #---------------------------------------------------- 71 | class BinlogFormat(VariableBase): 72 | variable_name="binlog_format" 73 | variable_type="string" 74 | 75 | class LogBin(VariableBase): 76 | variable_name="log_bin" 77 | variable_type="string" 78 | 79 | @property 80 | def value(self): 81 | if self._value == None: 82 | self._value=self._get_value() 83 | if self._value==0: 84 | self._value='OFF' 85 | return self._value 86 | else: 87 | return self._value 88 | 89 | 90 | class BinlogRowsQueryLogEvents(VariableBase): 91 | variable_name="binlog_rows_query_log_events" 92 | variable_type="bool" 93 | 94 | class LogSlaveUpdates(VariableBase): 95 | variable_name="log_slave_updates" 96 | variable_type="bool" 97 | 98 | class ExpireLogsDays(VariableBase): 99 | variable_name="expire_logs_days" 100 | variable_type="intger" 101 | 102 | class BinlogCacheSize(VariableBase): 103 | variable_name="binlog_cache_size" 104 | variable_type="byte" 105 | class SyncBinlog(VariableBase): 106 | variable_name="sync_binlog" 107 | variable_type="intger" 108 | 109 | 110 | #---------------------------------------------------- 111 | #error log 配置相关的variable 112 | #---------------------------------------------------- 113 | class ErrorLog(VariableBase): 114 | variable_name="log_error" 115 | variable_type="string" 116 | #---------------------------------------------------- 117 | #gtid配置相关的variable 118 | #---------------------------------------------------- 119 | class GtidMode(VariableBase): 120 | variable_name="gtid_mode" 121 | variable_type="bool" 122 | class EnforceGtidConsistency(VariableBase): 123 | variable_name="enforce_gtid_consistency" 124 | variable_type="bool" 125 | #---------------------------------------------------- 126 | #replication配置相关的variable 127 | #---------------------------------------------------- 128 | class MasterInfoRepository(VariableBase): 129 | variable_name="master_info_repository" 130 | variable_type="string" 131 | class RelayLogInfoRepository(VariableBase): 132 | variable_name="relay_log_info_repository" 133 | variable_type="string" 134 | class SlaveParallelType(VariableBase): 135 | variable_name="slave_parallel_type" 136 | variable_type="string" 137 | class SlaveParallelWorkers(VariableBase): 138 | variable_name="slave_parallel_workers" 139 | variable_type="intger" 140 | #---------------------------------------------------- 141 | #innodb配置相关的variable 142 | #---------------------------------------------------- 143 | class InnodbDataFilePath(VariableBase): 144 | variable_name="innodb_data_file_path" 145 | class InnodbTempDataFilePath(VariableBase): 146 | variable_name="innodb_temp_data_file_path" 147 | class InnodbBufferPoolFilename(VariableBase): 148 | variable_name="innodb_buffer_pool_filename " 149 | class InnodbLogGroupHomeDir(VariableBase): 150 | variable_name="innodb_log_group_home_dir" 151 | class InnodbLogFilesInGroup(VariableBase): 152 | variable_name="innodb_log_files_in_group" 153 | variable_type="intger" 154 | class InnodbLogFileSize(VariableBase): 155 | variable_name="innodb_log_file_size" 156 | variable_type="byte" 157 | class InnodbFileformat(VariableBase): 158 | variable_name="innodb_file_format" 159 | class InnodbFilePerTable(VariableBase): 160 | variable_name="innodb_file_per_table" 161 | variable_type="bool" 162 | class InnodbOnlineAlterLogMaxSize(VariableBase): 163 | variable_name="innodb_online_alter_log_max_size" 164 | variable_type="byte" 165 | class InnodbOpenFiles(VariableBase): 166 | variable_name="innodb_open_files" 167 | variable_type="intger" 168 | class InnodbPageSize(VariableBase): 169 | variable_name="innodb_page_size" 170 | variable_type="byte" 171 | class InnodbThreadConcurrency(VariableBase): 172 | variable_name="innodb_thread_concurrency" 173 | variable_type="intger" 174 | class InnodbReadIoThreads(VariableBase): 175 | variable_name="innodb_read_io_threads" 176 | variable_type="intger" 177 | class InnodbWriteIoThreads(VariableBase): 178 | variable_name="innodb_write_io_threads" 179 | variable_type="intger" 180 | class InnodbPurgeThreads(VariableBase): 181 | variable_name="innodb_purge_threads" 182 | variable_type="intger" 183 | class InnodbLockWaitTimeout(VariableBase): 184 | variable_name="innodb_lock_wait_timeout" 185 | variable_type="intger" 186 | class InnodbSpinWaitDelay(VariableBase): 187 | variable_name="innodb_spin_wait_delay" 188 | variable_type="intger" 189 | class InnodbAutoincLockMode(VariableBase): 190 | variable_name="innodb_autoinc_lock_mode" 191 | variable_type="intger" 192 | class InnodbStatsAutoRecalc(VariableBase): 193 | variable_name="innodb_stats_auto_recalc" 194 | variable_type="intger" 195 | class InnodbStatsPersistent(VariableBase): 196 | variable_name="innodb_stats_persistent" 197 | variable_type="intger" 198 | class InnodbStatsPersistentSamplePages(VariableBase): 199 | variable_name="innodb_stats_persistent_sample_pages" 200 | variable_type="intger" 201 | class InnodbBufferPoolInstances(VariableBase): 202 | variable_name="innodb_buffer_pool_instances" 203 | variable_type="intger" 204 | class InnodbAdaptiveHashIndex(VariableBase): 205 | variable_name="innodb_adaptive_hash_index" 206 | variable_type="bool" 207 | class InnodbChangeBuffering(VariableBase): 208 | variable_name="innodb_change_buffering" 209 | class InnodbChangeBufferMaxSize(VariableBase): 210 | variable_name="innodb_change_buffer_max_size" 211 | variable_type="intger" 212 | class InnodbFlushNeighbors(VariableBase): 213 | variable_name="innodb_flush_neighbors" 214 | variable_type="bool" 215 | class InnodbFlushMethod(VariableBase): 216 | variable_name="innodb_flush_method" 217 | class InnodbDoublewrite(VariableBase): 218 | variable_name="innodb_doublewrite" 219 | variable_type="bool" 220 | class InnodbLogBufferSize(VariableBase): 221 | variable_name="innodb_log_buffer_size" 222 | variable_type="byte" 223 | class InnodbFlushLogAtTimeout(VariableBase): 224 | variable_name="innodb_flush_log_at_timeout" 225 | variable_type="intger" 226 | class InnodbFlushLogAtTrxCommit(VariableBase): 227 | variable_name="innodb_flush_log_at_trx_commit" 228 | variable_type="intger" 229 | class InnodbBufferPoolSize(VariableBase): 230 | variable_name="innodb_buffer_pool_size" 231 | variable_type="byte" 232 | class Autocommit(VariableBase): 233 | variable_name="autocommit" 234 | variable_type="bool" 235 | def _get_value(self): 236 | """ 237 | 由于mysql-connector会自己把autocommit=true这个设置,所以这里要重写_get_value 238 | 方法,以得到全局的autocommit设置 239 | """ 240 | try: 241 | self.cursor.execute("show global variables like '{0}';".format(self.variable_name)) 242 | tmp_value=self.cursor.fetchone() 243 | if tmp_value != None and len(tmp_value)==2: 244 | return tmp_value[1] 245 | else: 246 | self.logger.info("variable {0} has a bad value {1}".format(self.variable_name,tmp_value)) 247 | self.close() 248 | exit() 249 | except Exception as e: 250 | errore_message=str(e) 251 | self.logger.info(errore_message) 252 | self.logger.info("exit") 253 | self.close() 254 | exit() 255 | class InnodbOldBlocksPct(VariableBase): 256 | variable_name="innodb_old_blocks_pct" 257 | variable_type="intger" 258 | class InnodbOldBlocksTime(VariableBase): 259 | variable_name="innodb_old_blocks_time" 260 | variable_type="intger" 261 | class InnodbReadAheadThreshold(VariableBase): 262 | variable_name="innodb_read_ahead_threshold" 263 | variable_type="intger" 264 | class InnodbRandomReadAhead(VariableBase): 265 | variable_name="innodb_random_read_ahead" 266 | variable_type="bool" 267 | class InnodbBufferPoolDumpPct(VariableBase): 268 | variable_name="innodb_buffer_pool_dump_pct" 269 | variable_type="intger" 270 | class InnodbBufferPoolDumpAtShutdown(VariableBase): 271 | variable_name="innodb_buffer_pool_dump_at_shutdown" 272 | variable_type="bool" 273 | class InnodbBufferPoolLoadAtStartup(VariableBase): 274 | variable_name="innodb_buffer_pool_load_at_startup" 275 | variable_type="bool" 276 | 277 | #---------------------------------------------------- 278 | ## 查询缓存相关 279 | #---------------------------------------------------- 280 | class QueryCacheLimit(VariableBase): 281 | variable_name="query_cache_limit" 282 | variable_type="int" 283 | 284 | class QueryCacheMinResUnit(VariableBase): 285 | variable_name="query_cache_min_res_unit" 286 | variable_type="int" 287 | 288 | class QueryCacheSize(VariableBase): 289 | variable_name="query_cache_size" 290 | variable_type="int" 291 | 292 | class QueryCacheType(VariableBase): 293 | variable_name="query_cache_type" 294 | variable_type="str" 295 | 296 | class Version(VariableBase): 297 | variable_name="version" 298 | variable_type="str" 299 | -------------------------------------------------------------------------------- /mtlsmonitor: -------------------------------------------------------------------------------- 1 | bin/mtlsmonitor -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | mysql-connector-python>=8.0.17 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | 3 | olds_scripts = ['bin/mtlsmonitor', 'bin/mtlsbackup', 'bin/mtlslog', 'bin/mtlsanalysis', 4 | 'bin/mtlshttp', 'bin/mtlsbigfiles', 'bin/mtlsdeleterows'] 5 | 6 | news_scripts = ['bin/mtls-big-files', 'bin/mtls-delete-rows', 'bin/mtls-file-truncate', 7 | 'bin/mtls-http', 'bin/mtls-log', 'bin/mtls-monitor', 'bin/mtls-backup', 8 | 'bin/mtls-perf-bench', 'bin/mtls-kill-all-conections', 'bin/mtls-sql-distribution', 9 | 'bin/mtls-file-stat', 'bin/mtls-expired-tables', 'bin/mtls-random-passwd', 10 | 'bin/mtls-rows-diff', 'bin/mtls-fake-mysqld','bin/mtls-auto-fill','bin/mtls-multi-session', 11 | 'bin/mtls-ps-mem'] 12 | 13 | scripts = olds_scripts + news_scripts 14 | 15 | setup(name='mysqltools-python', 16 | version='2.22.07.20', 17 | scripts=scripts, 18 | packages=['mtls', 'mtls.kits','mtls.ps'], 19 | maintainer='Neeky', 20 | maintainer_email='neeky@live.com', 21 | url='https://github.com/Neeky/mysqltools-python', 22 | ) 23 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neeky/mysqltools-python/3b632790c690dac1d021d081923c8bb523d3301b/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_base.py: -------------------------------------------------------------------------------- 1 | """ 2 | 定义用于测试的连接信息 3 | """ 4 | mysql_user='monitor' 5 | mysql_password='monitor0352' 6 | mysql_host='127.0.0.1' 7 | mysql_port=3306 -------------------------------------------------------------------------------- /tests/test_variable.py: -------------------------------------------------------------------------------- 1 | """ 2 | 针对mtls.variable中的各个variable进行测试 3 | """ 4 | 5 | # -*- coding: utf8 -*- 6 | 7 | import sys 8 | import os 9 | import unittest 10 | cwd = os.getcwd() 11 | sys.path.append(cwd) 12 | 13 | from mtls import variable,replication 14 | from test_base import mysql_host,mysql_port,mysql_user,mysql_password 15 | 16 | class MtlsTest(unittest.TestCase): 17 | """ 18 | 针对mtls.variable中定义的各个variable的获取方式进行测试,用于查看返回的各个值是否正常 19 | """ 20 | def get_variable(self,cls): 21 | v = cls(host=mysql_host,port=mysql_port,user=mysql_user,password=mysql_password) 22 | return v.value 23 | 24 | def is_int(self,cls,msg=''): 25 | v = self.get_variable(cls) 26 | print("{cls.variable_name:32} = {v}".format(cls=cls,v=v)) 27 | self.assertEqual(type(v),int,msg) 28 | 29 | def is_str(self,cls,msg=''): 30 | v = self.get_variable(cls) 31 | print("{cls.variable_name:<32} = {v}".format(cls=cls,v=v)) 32 | self.assertEqual(type(v),str,msg) 33 | 34 | 35 | def test_ServerID(self): 36 | self.is_int(variable.ServerID) 37 | 38 | def test_BaseDir(self): 39 | self.is_str(variable.BaseDir) 40 | 41 | def test_DataDir(self): 42 | self.is_str(variable.DataDir) 43 | 44 | def test_Port(self): 45 | self.is_int(variable.Port) 46 | 47 | def test_CharacterSetServer(self): 48 | self.is_str(variable.CharacterSetServer) 49 | 50 | def test_Socket(self): 51 | self.is_str(variable.Socket) 52 | 53 | def test_ReadOnly(self): 54 | self.is_int(variable.ReadOnly) 55 | 56 | def test_SkipNameResolve(self): 57 | self.is_int(variable.SkipNameResolve) 58 | 59 | def test_LowerCaseTableNames(self): 60 | self.is_int(variable.LowerCaseTableNames) 61 | 62 | def test_ThreadCacheSize(self): 63 | self.is_int(variable.ThreadCacheSize) 64 | 65 | def test_TableOpenCache(self): 66 | self.is_int(variable.TableOpenCache) 67 | 68 | def test_TableDefinitionCache(self): 69 | self.is_int(variable.TableDefinitionCache) 70 | 71 | def test_TableOpenCacheInstances(self): 72 | self.is_int(variable.TableOpenCacheInstances) 73 | 74 | def test_MaxConnections(self): 75 | self.is_int(variable.MaxConnections) 76 | 77 | def test_BinlogFormat(self): 78 | self.is_str(variable.BinlogFormat) 79 | 80 | def test_LogBin(self): 81 | self.is_int(variable.LogBin) 82 | 83 | def test_BinlogRowsQueryLogEvents(self): 84 | self.is_int(variable.BinlogRowsQueryLogEvents) 85 | 86 | def test_LogSlaveUpdates(self): 87 | self.is_int(variable.LogSlaveUpdates) 88 | 89 | def test_ExpireLogsDays(self): 90 | self.is_int(variable.ExpireLogsDays) 91 | 92 | def test_BinlogCacheSize(self): 93 | self.is_int(variable.BinlogCacheSize) 94 | 95 | def test_SyncBinlog(self): 96 | self.is_int(variable.SyncBinlog) 97 | 98 | def test_ErrorLog(self): 99 | self.is_str(variable.ErrorLog) 100 | 101 | def test_GtidMode(self): 102 | self.is_str(variable.GtidMode) 103 | 104 | def test_EnforceGtidConsistency(self): 105 | self.is_str(variable.EnforceGtidConsistency) 106 | 107 | def test_MasterInfoRepository(self): 108 | self.is_str(variable.MasterInfoRepository) 109 | 110 | def test_RelayLogInfoRepository(self): 111 | self.is_str(variable.RelayLogInfoRepository) 112 | 113 | def test_SlaveParallelType(self): 114 | self.is_str(variable.SlaveParallelType) 115 | 116 | def test_SlaveParallelWorkers(self): 117 | self.is_int(variable.SlaveParallelWorkers) 118 | 119 | def test_InnodbDataFilePath(self): 120 | self.is_str(variable.InnodbDataFilePath) 121 | 122 | def test_InnodbTempDataFilePath(self): 123 | self.is_str(variable.InnodbTempDataFilePath) 124 | 125 | def test_InnodbBufferPoolFilename(self): 126 | self.is_str(variable.InnodbBufferPoolFilename) 127 | 128 | def test_InnodbLogGroupHomeDir(self): 129 | self.is_str(variable.InnodbLogGroupHomeDir) 130 | 131 | def test_InnodbLogFilesInGroup(self): 132 | self.is_int(variable.InnodbLogFilesInGroup) 133 | 134 | def test_InnodbLogFileSize(self): 135 | self.is_int(variable.InnodbLogFileSize) 136 | 137 | def test_InnodbFileformat(self): 138 | v = self.get_variable(variable.Version) 139 | if v.startswith('8'): 140 | #8.0.x 以上版本已经不再有innodb_file_format参数 141 | pass 142 | else: 143 | self.is_str(variable.InnodbFileformat) 144 | 145 | def test_InnodbFilePerTable(self): 146 | self.is_int(variable.InnodbFilePerTable) 147 | 148 | def test_InnodbOnlineAlterLogMaxSize(self): 149 | self.is_int(variable.InnodbOnlineAlterLogMaxSize) 150 | 151 | def test_InnodbOpenFiles(self): 152 | self.is_int(variable.InnodbOpenFiles) 153 | 154 | def test_InnodbPageSize(self): 155 | self.is_int(variable.InnodbPageSize) 156 | 157 | def test_InnodbThreadConcurrency(self): 158 | self.is_int(variable.InnodbThreadConcurrency) 159 | 160 | def test_InnodbReadIoThreads(self): 161 | self.is_int(variable.InnodbReadIoThreads) 162 | 163 | def test_InnodbWriteIoThreads(self): 164 | self.is_int(variable.InnodbWriteIoThreads) 165 | 166 | def test_InnodbPurgeThreads(self): 167 | self.is_int(variable.InnodbPurgeThreads) 168 | 169 | def test_InnodbLockWaitTimeout(self): 170 | self.is_int(variable.InnodbLockWaitTimeout) 171 | 172 | def test_InnodbSpinWaitDelay(self): 173 | self.is_int(variable.InnodbSpinWaitDelay) 174 | 175 | def test_InnodbAutoincLockMode(self): 176 | self.is_int(variable.InnodbAutoincLockMode) 177 | 178 | def test_InnodbStatsAutoRecalc(self): 179 | self.is_int(variable.InnodbStatsAutoRecalc) 180 | 181 | def test_InnodbStatsPersistent(self): 182 | self.is_int(variable.InnodbStatsPersistent) 183 | 184 | def test_InnodbStatsPersistentSamplePages(self): 185 | self.is_int(variable.InnodbStatsPersistentSamplePages) 186 | 187 | def test_InnodbBufferPoolInstances(self): 188 | self.is_int(variable.InnodbBufferPoolInstances) 189 | 190 | def test_InnodbAdaptiveHashIndex(self): 191 | self.is_int(variable.InnodbAdaptiveHashIndex) 192 | 193 | def test_InnodbChangeBuffering(self): 194 | self.is_str(variable.InnodbChangeBuffering) 195 | 196 | def test_InnodbChangeBufferMaxSize(self): 197 | self.is_int(variable.InnodbChangeBufferMaxSize) 198 | 199 | def test_InnodbFlushNeighbors(self): 200 | self.is_int(variable.InnodbFlushNeighbors) 201 | 202 | def test_InnodbFlushMethod(self): 203 | self.is_str(variable.InnodbFlushMethod) 204 | 205 | def test_InnodbDoublewrite(self): 206 | self.is_int(variable.InnodbDoublewrite) 207 | 208 | def test_InnodbLogBufferSize(self): 209 | self.is_int(variable.InnodbLogBufferSize) 210 | 211 | def test_InnodbFlushLogAtTimeout(self): 212 | self.is_int(variable.InnodbFlushLogAtTimeout) 213 | 214 | def test_InnodbFlushLogAtTrxCommit(self): 215 | self.is_int(variable.InnodbFlushLogAtTrxCommit) 216 | 217 | def test_InnodbBufferPoolSize(self): 218 | self.is_int(variable.InnodbBufferPoolSize) 219 | 220 | def test_Autocommit(self): 221 | self.is_str(variable.Autocommit) 222 | 223 | def test_InnodbOldBlocksPct(self): 224 | self.is_int(variable.InnodbOldBlocksPct) 225 | 226 | def test_InnodbOldBlocksTime(self): 227 | self.is_int(variable.InnodbOldBlocksTime) 228 | 229 | def test_InnodbReadAheadThreshold(self): 230 | self.is_int(variable.InnodbReadAheadThreshold) 231 | 232 | def test_InnodbRandomReadAhead(self): 233 | self.is_int(variable.InnodbRandomReadAhead) 234 | 235 | def test_InnodbBufferPoolDumpPct(self): 236 | self.is_int(variable.InnodbBufferPoolDumpPct) 237 | 238 | def test_InnodbBufferPoolDumpAtShutdown(self): 239 | self.is_int(variable.InnodbBufferPoolDumpAtShutdown) 240 | 241 | def test_InnodbBufferPoolLoadAtStartup(self): 242 | self.is_int(variable.InnodbBufferPoolLoadAtStartup) 243 | 244 | def test_QueryCacheLimit(self): 245 | if not self.get_variable(variable.Version).startswith('8'): 246 | self.is_int(variable.QueryCacheLimit) 247 | 248 | def test_QueryCacheMinResUnit(self): 249 | if not self.get_variable(variable.Version).startswith('8'): 250 | self.is_int(variable.QueryCacheMinResUnit) 251 | 252 | def test_QueryCacheSize(self): 253 | if not self.get_variable(variable.Version).startswith('8'): 254 | self.is_int(variable.QueryCacheSize) 255 | 256 | def test_QueryCacheType(self): 257 | if not self.get_variable(variable.Version).startswith('8'): 258 | self.is_int(variable.QueryCacheType) 259 | 260 | def test_Version(self): 261 | self.is_str(variable.Version) 262 | 263 | 264 | 265 | if __name__=="__main__": 266 | unittest.main() 267 | 268 | 269 | 270 | 271 | 272 | --------------------------------------------------------------------------------