├── DateTypeTestFile ├── MySQL.sql └── oracle.sql ├── Dockerfile ├── LICENSE ├── MySQL ├── binlogEventInfo.go ├── binlogHeadInfo.go ├── binlogPrepare.go ├── my_data_fix_sql.go ├── my_global_point.go ├── my_query_table_date.go └── my_scheme_table_column.go ├── Oracle ├── instantclient_11_2 │ └── instantclient_11_2.tar.xz ├── or_data_fix_sql.go ├── or_global_point.go ├── or_query_table_date.go └── or_scheme_table_column.go ├── README.md ├── actions ├── TerminalResultOutput.go ├── binlogPrepare.go ├── checkSum.go ├── differencesDataDispos.go ├── incDataDispos.go ├── p_introduce.go ├── rapirDML.go ├── schema_tab_struct.go ├── schema_table_access_permissions.go ├── table_count_check.go ├── table_index_dispos.go ├── table_no_Index_dispos.go ├── table_query_concurrency.go ├── table_sample_check.go └── tmp_file_io_operate.go ├── build-arm.sh ├── build-x86.sh ├── dataDispos ├── DBqueryDispos.go └── dataMerge.go ├── dbExec ├── DataFixSql.go ├── TableQueryAllColumn.go ├── connection.go ├── global_consistency_snapshot.go ├── query_table_date.go └── schem_Table_Column.go ├── docs ├── gc.conf.example └── gt-checksum-manual.md ├── gc.conf ├── gc.conf-simple ├── global ├── IncDatadispos.go ├── ThreadPool.go ├── dbConnPool.go ├── globalVariables.go ├── goRoutingPool.go ├── log.go ├── mysqlBinlogDispos.go └── tmpTableDateFile.go ├── go-log ├── log │ ├── LICENSE │ ├── README.md │ ├── doc.go │ ├── filehandler.go │ ├── handler.go │ ├── log.go │ ├── log_test.go │ └── logger.go ├── loggers │ └── loggers.go └── logrus │ ├── .gitignore │ ├── .golangci.yml │ ├── .travis.yml │ ├── CHANGELOG.md │ ├── LICENSE │ ├── README.md │ ├── alt_exit.go │ ├── alt_exit_test.go │ ├── appveyor.yml │ ├── buffer_pool.go │ ├── doc.go │ ├── entry.go │ ├── entry_test.go │ ├── example_basic_test.go │ ├── example_custom_caller_test.go │ ├── example_default_field_value_test.go │ ├── example_function_test.go │ ├── example_global_hook_test.go │ ├── example_hook_test.go │ ├── exported.go │ ├── formatter.go │ ├── formatter_bench_test.go │ ├── go.mod │ ├── go.sum │ ├── hook_test.go │ ├── hooks.go │ ├── hooks │ ├── syslog │ │ ├── README.md │ │ ├── syslog.go │ │ └── syslog_test.go │ ├── test │ │ ├── test.go │ │ └── test_test.go │ └── writer │ │ ├── README.md │ │ ├── writer.go │ │ └── writer_test.go │ ├── internal │ └── testutils │ │ └── testutils.go │ ├── json_formatter.go │ ├── json_formatter_test.go │ ├── level_test.go │ ├── logger.go │ ├── logger_bench_test.go │ ├── logger_test.go │ ├── logrus.go │ ├── logrus_test.go │ ├── terminal_check_appengine.go │ ├── terminal_check_bsd.go │ ├── terminal_check_js.go │ ├── terminal_check_no_terminal.go │ ├── terminal_check_notappengine.go │ ├── terminal_check_solaris.go │ ├── terminal_check_unix.go │ ├── terminal_check_windows.go │ ├── text_formatter.go │ ├── text_formatter_test.go │ ├── travis │ ├── cross_build.sh │ └── install.sh │ ├── writer.go │ └── writer_test.go ├── go.mod ├── go.sum ├── greatdbCheck.go ├── inputArg ├── checkParameter.go ├── flagHelp.go ├── getConf.go ├── inputInit.go └── p_intorduce.go └── relnotes └── CHANGELOG.zh-CN.md /DateTypeTestFile/MySQL.sql: -------------------------------------------------------------------------------- 1 | create database pcms; 2 | use pcms; 3 | #测试数值类型 4 | create table testInt( 5 | f1 TINYINT, 6 | f2 SMALLINT, 7 | f3 MEDIUMINT, 8 | f4 INT, 9 | f5 INT(5) ZEROFILL, 10 | f6 INT UNSIGNED, 11 | f7 BIGINT 12 | ) CHARACTER SET 'utf8'; 13 | alter table testint add index idx_1(f1); 14 | insert into testInt(f1,f2,f3,f4,f5,f6,f7) values(1,2,3,4,5,6,7); 15 | 16 | create table testFlod( 17 | f1 FLOAT, 18 | f2 FLOAT(5,2), 19 | f3 DOUBLE, 20 | f4 DOUBLE(5,3) 21 | ) CHARACTER SET 'utf8'; 22 | alter table testflod add index idx_1(f1); 23 | insert into testFlod(f1,f2,f3,f4) values(123.45,123.45,123.45,12.456); 24 | 25 | #测试二进制类型 26 | create table testBit( 27 | f1 BIT, 28 | f2 BIT(5), 29 | F3 bit(64) 30 | ); 31 | alter table testbit add index idx_1(f1); 32 | insert into testBit values(1,31,65); 33 | select * from testBit; #from bin,oct,hex bin转换为二进制,oct8进制,hex16进制 34 | #测试时间类型 35 | create table testTime( 36 | f1 YEAR, 37 | f2 YEAR(4), 38 | f3 date, 39 | f4 time, 40 | f5 datetime, 41 | f6 timestamp 42 | )CHARACTER SET 'utf8'; 43 | alter table testtime add index idx_1(f1); 44 | insert into testTime(f1,f2,f3,f4,f5,f6) values('2022',2022,'2022-07-12','2 12:30:29','2022-07-12 14:53:00','2022-07-12 14:54:00'); 45 | 46 | #测试字符串类型 47 | create table testString( 48 | f1 char, 49 | f2 char(5), 50 | f3 varchar(10), 51 | f4 tinytext, 52 | f5 text, 53 | f6 mediumtext, 54 | f7 longtext, 55 | f8 enum('a','b','c','d'), 56 | f9 set('aa','bb','cc','dd') 57 | )CHARACTER SET 'utf8'; 58 | alter table teststring add index idx_1(f1); 59 | insert into testString(f1,f2,f3,f4,f5,f6,f7,f8,f9) values('1','abcde','ab123','1adf','aaadfaewrwer','aa','aasdfasdfafdafasdfasf','d','aa,bb'); 60 | 61 | #测试二进制字符串类型 62 | create table testBin( 63 | f1 binary, 64 | f2 binary(3), 65 | f3 varbinary(10), 66 | f4 tinyblob, 67 | f5 blob, 68 | f6 mediumblob, 69 | f7 longblob 70 | )character set 'utf8'; 71 | alter table testbin add index idx_1(f1); 72 | insert into testBin(f1,f2,f3,f4,f5,f6,f7) values('a','abc','ab','01010101','0x9023123123','adfasdfasdfasdfasdf','aasdfasdfasdfasdfasf'); 73 | 74 | #索引列为null或为''的处理 75 | 76 | 77 | #触发器的处理 78 | 79 | //测试表及测试数据 80 | CREATE TABLE account (acct_num INT, amount DECIMAL(10,2)); 81 | INSERT INTO account VALUES(137,14.98),(141,1937.50),(97,-100.00); 82 | 83 | //创建影子表 84 | CREATE TABLE tmp_account (acct_num INT, amount DECIMAL(10,2),sql_text varchar(100)); 85 | 86 | //监控insert 87 | DELIMITER || 88 | create trigger accountInsert BEFORE insert 89 | on xxx for each row 90 | BEGIN 91 | INSERT INTO tmp_account values(new.acct_num,new.amount,"insert"); 92 | end || 93 | delimiter; 94 | 95 | //监控delete 96 | DELIMITER || 97 | create trigger accountDelete BEFORE delete 98 | on xxx for each row 99 | BEGIN 100 | insert into tmp_account values(old.acct_num,old.amount,"delete") 101 | end || 102 | delimiter; 103 | 104 | //监控update 105 | DELIMITER || 106 | create trigger accountUpdate BEFORE update 107 | on xxx for each row 108 | BEGIN 109 | insert into tmp_account values(old.acct_num,old.amount,"update_delete") 110 | insert into tmp_account values(new.acct_num,new.account,"update_insert") 111 | end || 112 | delimiter; 113 | 114 | 115 | //测试步骤 116 | //insert 测试 117 | insert into account values (150,33.32); 118 | select * from tmp_account where acct_num=150; 119 | 120 | //update 测试 121 | insert into account values(200,13.23); 122 | update account set acct_num = 201 where amount = 13.23; 123 | select * from tmp_account 124 | 125 | //delete 测试 126 | insert into account values(300,14.23); 127 | delete from account where acct_num = 300; 128 | select * from tmp_account 129 | 130 | 131 | //分区 132 | CREATE TABLE range_Partition_Table( 133 | range_key_column DATETIME, 134 | NAME VARCHAR(20), 135 | ID INT 136 | ) PARTITION BY RANGE(to_days(range_key_column))( 137 | PARTITION PART_202007 VALUES LESS THAN (to_days('2020-07-1')), 138 | PARTITION PART_202008 VALUES LESS THAN (to_days('2020-08-1')), 139 | PARTITION PART_202009 VALUES LESS THAN (to_days('2020-09-1')) 140 | ); 141 | 142 | CREATE TABLE PCMS.CUSTOMER( 143 | CUSTOMER_ID INT NOT NULL PRIMARY KEY, 144 | FIRST_NAME VARCHAR(30) NOT NULL, 145 | LAST_NAME VARCHAR(30) NOT NULL, 146 | PHONE VARCHAR(15) NOT NULL, 147 | EMAIL VARCHAR(80), 148 | STATUS CHAR(1) 149 | )PARTITION BY RANGE (CUSTOMER_ID)( 150 | PARTITION CUS_PART1 VALUES LESS THAN (100000), 151 | PARTITION CUS_PART2 VALUES LESS THAN (200000) 152 | ) 153 | CREATE TABLE PCMS.CUSTOMER1( 154 | CUSTOMER_ID VARCHAR(30) NOT NULL, 155 | FIRST_NAME VARCHAR(30) NOT NULL, 156 | LAST_NAME VARCHAR(30) NOT NULL, 157 | PHONE VARCHAR(15) NOT NULL, 158 | EMAIL VARCHAR(80), 159 | `STATUS` CHAR(1) 160 | )PARTITION BY RANGE COLUMNS (CUSTOMER_ID)( 161 | PARTITION CUS_PART1 VALUES LESS THAN ('100000'), 162 | PARTITION CUS_PART2 VALUES LESS THAN ('200000') 163 | ) 164 | 165 | CREATE TABLE list_Partition_Table( 166 | NAME VARCHAR(10), 167 | DATA VARCHAR(20) 168 | )PARTITION BY LIST COLUMNS (NAME)( 169 | PARTITION PART_01 VALUES IN ('ME','PE','QC','RD'), 170 | PARTITION PART_02 VALUES IN ('SMT','SALE') 171 | ); 172 | 173 | 174 | 175 | CREATE TABLE hash_Partition_Table( 176 | hash_key_column INT(30), 177 | DATA VARCHAR(20) 178 | ) PARTITION BY HASH (hash_key_column) 179 | PARTITIONS 4; 180 | 181 | 182 | CREATE TABLE range_hash_Partition_Table (id INT, purchased DATE) 183 | PARTITION BY RANGE( YEAR(purchased) ) 184 | SUBPARTITION BY HASH( TO_DAYS(purchased) ) 185 | SUBPARTITIONS 2 ( 186 | PARTITION p0 VALUES LESS THAN (1990), 187 | PARTITION p1 VALUES LESS THAN (2000), 188 | PARTITION p2 VALUES LESS THAN MAXVALUE 189 | ); 190 | 191 | 192 | CREATE TABLE tb_dept1 ( 193 | id INT(11) PRIMARY KEY, 194 | name VARCHAR(22) NOT NULL, 195 | location VARCHAR(50) 196 | ); 197 | 198 | CREATE TABLE tb_emp6( 199 | id INT(11) PRIMARY KEY, 200 | name VARCHAR(25), 201 | deptId INT(11), 202 | salary FLOAT, 203 | CONSTRAINT fk_emp_dept1 204 | FOREIGN KEY(deptId) REFERENCES tb_dept1(id) 205 | ); 206 | 207 | //存储函数 208 | DELIMITER $$ 209 | CREATE FUNCTION FUN_getAgeStr(age int) RETURNS varchar(20) 210 | BEGIN 211 | declare results varchar(20); 212 | IF age<16 then 213 | set results = '小屁孩'; 214 | ELSEIF age <22 THEN 215 | set results = '小鲜肉'; 216 | ELSEIF age <30 THEN 217 | set results = '小青年'; 218 | ELSE 219 | SET results = '大爷'; 220 | END IF; 221 | RETURN results; 222 | end $$ 223 | DELIMITER ; 224 | 225 | //触发器 226 | CREATE TABLE test1(a1 int); 227 | CREATE TABLE test2(a2 int); 228 | DELIMITER $ 229 | CREATE TRIGGER tri_test 230 | BEFORE INSERT ON test1 231 | FOR EACH ROW BEGIN 232 | INSERT INTO test2 SET a2=NEW.a1; 233 | END$ 234 | DELIMITER ; 235 | 236 | /* 237 | 索引 238 | */ 239 | create table IndexT( 240 | `id` int(11) NOT NULL, 241 | `tenantry_id` bigint(20) NOT NULL COMMENT '商品id', 242 | `code` varchar(64) NOT NULL COMMENT '商品编码(货号)', 243 | `goods_name` varchar(50) NOT NULL COMMENT '商品名称', 244 | `props_name` varchar(100) NOT NULL COMMENT '商品名称描述字符串,格式:p1:v1;p2:v2,例如:品牌:盈讯;型号:F908', 245 | `price` decimal(10,2) NOT NULL COMMENT '商品定价', 246 | `price_url` varchar(1000) NOT NULL COMMENT '商品主图片地址', 247 | `create_time` datetime NOT NULL COMMENT '商品创建时间', 248 | `modify_time` datetime DEFAULT NULL COMMENT '商品最近修改时间', 249 | `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '标记逻辑删除', 250 | PRIMARY KEY (`id`), 251 | KEY `idx_2` (`tenantry_id`,`code`), 252 | KEY `idx_3` (`code`,`tenantry_id`) 253 | ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='商品信息表'; -------------------------------------------------------------------------------- /DateTypeTestFile/oracle.sql: -------------------------------------------------------------------------------- 1 | //分区 2 | CREATE TABLE range_Partition_Table( 3 | range_key_column DATE, 4 | NAME VARCHAR2(20), 5 | ID integer 6 | ) PARTITION BY RANGE(range_key_column)( 7 | PARTITION PART_202007 VALUES LESS THAN (TO_DATE('2020-07-1 00:00:00','yyyy-mm-dd hh24:mi:ss')), 8 | PARTITION PART_202008 VALUES LESS THAN (TO_DATE('2020-08-1 00:00:00','yyyy-mm-dd hh24:mi:ss')), 9 | PARTITION PART_202009 VALUES LESS THAN (TO_DATE('2020-09-1 00:00:00','yyyy-mm-dd hh24:mi:ss')) 10 | ); 11 | 12 | CREATE TABLE "PCMS"."CUSTOMER"( 13 | CUSTOMER_ID NUMBER NOT NULL PRIMARY KEY, 14 | FIRST_NAME VARCHAR2(30) NOT NULL, 15 | LAST_NAME VARCHAR2(30) NOT NULL, 16 | PHONE VARCHAR2(15) NOT NULL, 17 | EMAIL VARCHAR2(80), 18 | STATUS CHAR(1) 19 | )PARTITION BY RANGE ("CUSTOMER_ID")( 20 | PARTITION CUS_PART1 VALUES LESS THAN (100000), 21 | PARTITION CUS_PART2 VALUES LESS THAN (200000) 22 | ) 23 | 24 | CREATE TABLE list_Partition_Table( 25 | NAME VARCHAR2(10), 26 | DATA VARCHAR2(20) 27 | )PARTITION BY LIST(NAME)( 28 | PARTITION PART_01 VALUES('ME','PE','QC','RD'), 29 | PARTITION PART_02 VALUES('SMT','SALE') 30 | ); 31 | 32 | CREATE TABLE hash_Partition_Table( 33 | hash_key_column VARCHAR2(30), 34 | DATA VARCHAR2(20) 35 | ) PARTITION BY HASH(hash_key_column)( 36 | PARTITION PART_0001, 37 | PARTITION PART_0002, 38 | PARTITION PART_0003, 39 | PARTITION PART_0004, 40 | PARTITION PART_0005 41 | ); 42 | 43 | CREATE TABLE range_hash_Partition_Table( 44 | range_column_key DATE, 45 | hash_column_key INT, 46 | DATA VARCHAR2(20) 47 | ) PARTITION BY RANGE(range_column_key) 48 | SUBPARTITION BY HASH(hash_column_key) SUBPARTITIONS 2 49 | ( 50 | PARTITION PART_202008 VALUES LESS THAN (TO_DATE('2020-08-01','yyyy-mm-dd'))( 51 | SUBPARTITION SUB_1, 52 | SUBPARTITION SUB_2, 53 | SUBPARTITION SUB_3 54 | ), 55 | PARTITION PART_202009 VALUES LESS THAN (TO_DATE('2020-09-01','yyyy-mm-dd'))( 56 | SUBPARTITION SUB_4, 57 | SUBPARTITION SUB_5 58 | ) 59 | ); 60 | 61 | 62 | //外键 63 | CREATE TABLE "PCMS"."tb_dept1" ( 64 | "ID" NUMBER(11) NOT NULL, 65 | "NAME" VARCHAR2(22) NOT NULL, 66 | "LOCATION" VARCHAR2(50), 67 | PRIMARY KEY ("ID") 68 | ) 69 | DROP TABLE "PCMS"."tb_emp6"; 70 | CREATE TABLE "PCMS"."tb_emp6" ( 71 | "id" NUMBER(11,0) NOT NULL, 72 | "name" VARCHAR2(25 BYTE), 73 | "deptId" NUMBER(11,0), 74 | "salary" FLOAT(126) 75 | ) 76 | ALTER TABLE "PCMS"."tb_emp6" ADD CONSTRAINT "SYS_C0011130" PRIMARY KEY ("id"); 77 | ALTER TABLE "PCMS"."tb_emp6" ADD CONSTRAINT "SYS_C0011129" CHECK ("id" IS NOT NULL) NOT DEFERRABLE INITIALLY IMMEDIATE NORELY VALIDATE; 78 | ALTER TABLE "PCMS"."tb_emp6" ADD CONSTRAINT "fk_emp_dept1" FOREIGN KEY ("deptId") REFERENCES "PCMS"."tb_dept1" ("ID") NOT DEFERRABLE INITIALLY IMMEDIATE NORELY VALIDATE; 79 | 80 | //存储函数 81 | CREATE OR REPLACE FUNCTION FUN_getAgeStr(age int) 82 | RETURN varchar2 IS 83 | results varchar2(20); 84 | BEGIN 85 | IF age<16 then 86 | results := '小屁孩'; 87 | ELSIF age <22 THEN 88 | results := '小鲜肉'; 89 | ELSIF age <30 THEN 90 | results := '小青年'; 91 | ELSE 92 | results := '大爷'; 93 | END IF; 94 | RETURN results; 95 | end; 96 | 97 | //存储过程 98 | 99 | CREATE TABLE "PCMS"."info" ( 100 | "ID" NUMBER NOT NULL, 101 | "AGE" NUMBER NOT NULL, 102 | "ADDRESS" VARCHAR2(20) NOT NULL, 103 | "SALARY" NUMBER(10,2) NOT NULL, 104 | PRIMARY KEY ("ID") 105 | ) 106 | INSERT INTO "info"(ID,NAME,AGE,ADDRESS,SALARY) VALUES(1,'ZHANG',32,'Beijing',2000.00); 107 | INSERT INTO "info"(ID,NAME,AGE,ADDRESS,SALARY) VALUES(2,'LI',25,'Shanghai',1500.00); 108 | INSERT INTO "info"(ID,NAME,AGE,ADDRESS,SALARY) VALUES(3,'PENG',23,'Hangzhou',2000.00); 109 | INSERT INTO "info"(ID,NAME,AGE,ADDRESS,SALARY) VALUES(4,'LIN',25,'Henan',6500.00); 110 | INSERT INTO "info"(ID,NAME,AGE,ADDRESS,SALARY) VALUES(5,'WANG',27,'Hunan',8500.00); 111 | INSERT INTO "info"(ID,NAME,AGE,ADDRESS,SALARY) VALUES(6,'WANG',22,'Hunan',4500.00); 112 | INSERT INTO "info"(ID,NAME,AGE,ADDRESS,SALARY) VALUES(7,'GAO',24,'Hebei',10000.00); 113 | 114 | 115 | CREATE OR REPLACE procedure countproc(sid IN INT,num OUT INT) is 116 | begin 117 | select count(*) into num from PCMS."info" where salary > 5000; 118 | end; 119 | 120 | //触发器 121 | CREATE TABLE "test1"(a1 NUMBER); 122 | CREATE TABLE "test2"(a2 NUMBER); 123 | CREATE OR REPLACE TRIGGER tri_test 124 | BEFORE INSERT ON "test1" 125 | FOR EACH ROW 126 | BEGIN 127 | INSERT INTO "test2"(a2) values(:NEW.a1); 128 | commit; 129 | END; 130 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ## Build 2 | # This dockerfile uses the linux image 3 | # VERSION 1.2.1 4 | # Author: greatsql 5 | # Command format: Instruction [arguments / command] … 6 | 7 | FROM golang:latest AS builder 8 | 9 | LABEL gt-checksum="greatsql" 10 | MAINTAINER greatsql 11 | 12 | ENV GO111MODULE=on \ 13 | GOOS=linux \ 14 | GOPROXY="https://goproxy.io" \ 15 | GOPRIVATE="github.com/marvinhosea/*" 16 | 17 | WORKDIR /go/release 18 | 19 | COPY . . 20 | 21 | ARG VERSION 22 | 23 | RUN go mod tidy 24 | RUN go build -o gt-checksum greatdbCheck.go 25 | RUN mkdir -p ./gt-checksum-${VERSION} && cp -rf docs gc.conf gc.conf-simple gt-checksum Oracle/instantclient_11_2 README.md relnotes gt-checksum-${VERSION} 26 | 27 | FROM scratch AS exporter 28 | 29 | ARG VERSION 30 | 31 | COPY --from=builder /go/release/gt-checksum-${VERSION} ./gt-checksum-${VERSION} 32 | 33 | # DOCKER_BUILDKIT=1 docker build --build-arg VERSION=v1.2.1 -f Dockerfile -o ./ . 34 | -------------------------------------------------------------------------------- /MySQL/binlogPrepare.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "strconv" 5 | "strings" 6 | "time" 7 | ) 8 | 9 | type IncDataBinlogPrepareStruct struct{} 10 | 11 | type IncDataBinlogPrepareInterface interface { 12 | OneEventSql(block chan struct{}, trxEvent chan map[string][]string, dQ chan struct{}) 13 | BinlogStreamer() interface{} 14 | BinlogStreamerClose(cfg interface{}) 15 | } 16 | 17 | type OracleIncDataBinlogPrepareStruct struct { 18 | } 19 | 20 | //func (my *MySQLIncDataBinlogPrepareStruct) aa() { 21 | // ev := my.getEvent() 22 | //} 23 | 24 | //func (or *OracleIncDataBinlogPrepareStruct) aa() *replication.BinlogStreamer{} 25 | 26 | func (idbps IncDataBinlogPrepareStruct) IncBinlogPrepareInit(dbDrive, jdbcUrl string, gs map[string]string, checkTableMap map[string]int) IncDataBinlogPrepareInterface { 27 | var incDbps IncDataBinlogPrepareInterface 28 | if dbDrive == "mysql" { 29 | userPassword := strings.Split(strings.Split(jdbcUrl, "@")[0], ":") 30 | tmpa := strings.Split(jdbcUrl, "@")[1] 31 | rightKindex := strings.Index(tmpa, ")") 32 | leftKindex := strings.Index(tmpa, "(") + 1 33 | hostPort := strings.Split(tmpa[leftKindex:rightKindex], ":") 34 | port, _ := strconv.Atoi(hostPort[1]) 35 | binlogFile := gs["file"] 36 | pos, _ := strconv.Atoi(gs["position"]) 37 | server_id := uint32(time.Now().Unix()) 38 | incDbps = MySQLIncDataBinlogPrepareStruct{ 39 | Mytype: dbDrive, 40 | User: userPassword[0], 41 | Password: userPassword[1], 42 | Host: hostPort[0], 43 | Port: uint16(port), 44 | BinlogFile: binlogFile, 45 | Pos: uint32(pos), 46 | ServerId: server_id, 47 | TableList: checkTableMap, 48 | } 49 | } 50 | //if dbDrive == "oracle" { 51 | // incDbps = &OracleIncDataBinlogPrepareStruct{} 52 | //} 53 | return incDbps 54 | } 55 | func IncDataBinlog() IncDataBinlogPrepareStruct { 56 | return IncDataBinlogPrepareStruct{} 57 | } 58 | -------------------------------------------------------------------------------- /Oracle/instantclient_11_2/instantclient_11_2.tar.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GreatSQL/gt-checksum/d8fe5d8fc9e7daf0e8d26bd92f08f6944200640b/Oracle/instantclient_11_2/instantclient_11_2.tar.xz -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![](https://img.shields.io/badge/GreatSQL-官网-orange.svg)](https://greatsql.cn/) 2 | [![](https://img.shields.io/badge/GreatSQL-论坛-brightgreen.svg)](https://greatsql.cn/forum.php) 3 | [![](https://img.shields.io/badge/GreatSQL-博客-brightgreen.svg)](https://greatsql.cn/home.php?mod=space&uid=10&do=blog&view=me&from=space) 4 | [![](https://img.shields.io/badge/License-Apache_v2.0-blue.svg)](https://gitee.com/GreatSQL/GreatSQL/blob/master/LICENSE) 5 | [![](https://img.shields.io/badge/release-1.2.1-blue.svg)](https://gitee.com/GreatSQL/gt-checksum/releases/tag/1.2.1) 6 | 7 | # 关于 gt-checksum 8 | gt-checksum是GreatSQL社区开源的一款静态数据库校验修复工具,支持MySQL、Oracle等主流数据库。 9 | 10 | # 特性 11 | --- 12 | MySQL DBA最常用的数据校验&修复工具应该是Percona Toolkit中的pt-table-checksum和pt-table-sync这两个工具,不过这两个工具并不支持MySQL MGR架构,以及国内常见的上云下云业务场景,还有MySQL、Oracle间的异构数据库等多种场景。 13 | 14 | GreatSQL开源的gt-checksum工具可以满足上述多种业务需求场景,解决这些痛点。 15 | 16 | gt-checksum工具支持以下几种常见业务需求场景: 17 | 1. **MySQL主从复制**:主从复制中断后较长时间才发现,且主从间差异的数据量太多,这时候通常基本上只能重建复制从库,如果利用pt-table-checksum先校验主从数据一致性后,再利用pt-table-sync工具修复差异数据,这个过程要特别久,时间代价太大。 18 | 2. **MySQL MGR组复制**:MySQL MGR因故崩溃整个集群报错退出,或某个节点异常退出,在恢复MGR集群时一般要面临着先检查各节点间数据一致性的需求,这时通常为了省事会选择其中一个节点作为主节点,其余从节点直接复制数据重建,这个过程要特别久,时间代价大。 19 | 3. **上云下云业务场景**:目前上云下云的业务需求很多,在这个过程中要进行大量的数据迁移及校验工作,如果出现字符集改变导致特殊数据出现乱码或其他的情况,如果数据迁移工具在迁移过程中出现bug或者数据异常而又迁移成功,此时都需要在迁移结束后进行一次数据校验才放心。 20 | 4. **异构迁移场景**:有时我们会遇到异构数据迁移场景,例如从Oracle迁移到MySQL,通常存在字符集不同,以及数据类型不同等情况,也需要在迁移结束后进行一次数据校验才放心。 21 | 5. **定期校验场景**:作为DBA在维护高可用架构中为了保证主节点出现异常后能够快速放心切换,就需要保证各节点间的数据一致性,需要定期执行数据校验工作。 22 | 23 | 以上这些场景,都可以利用gt-chcksum工具来满足。 24 | 25 | # 下载 26 | --- 27 | 可以 [这里](https://gitee.com/GreatSQL/gt-checksum/releases) 下载预编译好的二进制文件包,已经在Ubuntu、CentOS、RHEL等多个下测试通过。 28 | 29 | 如果需要校验Oracle数据库,则还需要先下载Oracle数据库相应版本的驱动程序,并配置驱动程序使之生效。例如:待校验的数据库为Oracle 11-2,则要下载Oracle 11-2的驱动程序,并使之生效,否则连接Oracle会报错。详细方法请见下方内容:[**下载配置Oracle驱动程序**](#%E4%B8%8B%E8%BD%BD%E9%85%8D%E7%BD%AEoracle%E9%A9%B1%E5%8A%A8%E7%A8%8B%E5%BA%8F)。 30 | 31 | # 快速运行 32 | --- 33 | ```shell 34 | # 不带任何参数 35 | shell> ./gt-checksum 36 | If no parameters are loaded, view the command with --help or -h 37 | 38 | # 查看版本号 39 | shell> ./gt-checksum -v 40 | gt-checksum version 1.2.1 41 | 42 | # 查看使用帮助 43 | shell> ./gt-checksum -h 44 | NAME: 45 | gt-checksum - A opensource table and data checksum tool by GreatSQL 46 | 47 | USAGE: 48 | gt-checksum [global options] command [command options] [arguments...] 49 | ... 50 | 51 | # 数据库授权 52 | # 想要运行gt-checksum工具,需要至少授予以下几个权限 53 | # MySQL端 54 | # 1.全局权限 55 | # a.`REPLICATION CLIENT` 56 | # b.`SESSION_VARIABLES_ADMIN`,如果是MySQL 8.0版本的话,MySQL 5.7版本不做这个要求 57 | # 2.校验数据对象 58 | # a.如果`datafix=file`,则只需要`SELECT`权限 59 | # b.如果`datafix=table`,则需要`SELECT、INSERT、DELETE`权限,如果还需要修复表结构不一致的情况,则需要`ALTER`权限 60 | # 61 | # 假设现在要对db1.t1做校验和修复,则可授权如下 62 | 63 | mysql> GRANT REPLICATION CLIENT, SESSION_VARIABLES_ADMIN ON *.* to ...; 64 | mysql> GRANT SELECT, INSERT, DELETE ON db1.t1 to ...; 65 | 66 | # Oracle端 67 | # 1.全局权限 68 | # a.`SELECT ANY DICTIONARY` 69 | # 2.校验数据对象 70 | # a.如果`datafix=file`,则只需要`SELECT ANY TABLE`权限 71 | # b.如果`datafix=table`,则需要`SELECT ANY TABLE、INSERT ANY TABLE、DELETE ANY TABLE`权限 72 | 73 | # 指定配置文件,开始执行数据校验,示例: 74 | shell> ./gt-checksum -f ./gc.conf 75 | -- gt-checksum init configuration files -- 76 | -- gt-checksum init log files -- 77 | -- gt-checksum init check parameter -- 78 | -- gt-checksum init check table name -- 79 | -- gt-checksum init check table column -- 80 | -- gt-checksum init check table index column -- 81 | -- gt-checksum init source and dest transaction snapshoot conn pool -- 82 | -- gt-checksum init cehck table query plan and check data -- 83 | begin checkSum index table db1.t1 84 | [█████████████████████████████████████████████████████████████████████████████████████████████████████████████████]113% task: 678/600 85 | table db1.t1 checksum complete 86 | 87 | ** gt-checksum Overview of results ** 88 | Check time: 73.81s (Seconds) 89 | Schema Table IndexCol checkMod Rows Differences Datafix 90 | db1 t1 ol_w_id,ol_d_id,ol_o_id,ol_number rows 5995934,5995918 yes file 91 | 92 | 93 | # 使用命令行传参方式执行数据校验 94 | shell> ./gt-checksum -S type=mysql,user=checksum,passwd=Checksum@123,host=172.16.0.1,port=3306,charset=utf8 -D type=mysql,user=checksum,passwd=Checksum@123,host=172.16.0.2,port=3306,charset=utf8 -t test.t2 -nit yes 95 | -- gt-checksum init configuration files -- 96 | -- gt-checksum init log files -- 97 | -- gt-checksum init check parameter -- 98 | -- gt-checksum init check table name -- 99 | -- gt-checksum init check table column -- 100 | -- gt-checksum init check table index column -- 101 | -- gt-checksum init source and dest transaction snapshoot conn pool -- 102 | -- gt-checksum init cehck table query plan and check data -- 103 | begin checkSum index table SCOTT.A5 104 | [█ ]100% task: 1/1 105 | table SCOTT.A5 checksum complete 106 | 107 | ** gt-checksum Overview of results ** 108 | Check time: 0.29s (Seconds) 109 | Schema Table IndexCol checkMod Rows Differences Datafix 110 | test t2 id rows 10,10 no file 111 | ``` 112 | 113 | # 下载配置Oracle驱动程序 114 | --- 115 | 如果需要校验Oracle数据库,则还需要先下载Oracle数据库相应版本的驱动程序。例如:待校验的数据库为Oracle 11-2,则要下载Oracle 11-2的驱动程序,并使之生效,否则连接Oracle会报错。 116 | 117 | ## 下载Oracle Instant Client 118 | 从 [https://www.oracle.com/database/technologies/instant-client/downloads.html](https://www.oracle.com/database/technologies/instant-client/downloads.html) 下载免费的Basic或Basic Light软件包。 119 | 120 | - oracle basic client, instantclient-basic-linux.x64-11.2.0.4.0.zip 121 | 122 | - oracle sqlplus, instantclient-sqlplus-linux.x64-11.2.0.4.0.zip 123 | 124 | - oracle sdk, instantclient-sdk-linux.x64-11.2.0.4.0.zip 125 | 126 | ## 配置oracle client并生效 127 | ```shell 128 | shell> unzip instantclient-basic-linux.x64-11.2.0.4.0.zip 129 | shell> unzip instantclient-sqlplus-linux.x64-11.2.0.4.0.zip 130 | shell> unzip instantclient-sdk-linux.x64-11.2.0.4.0.zip 131 | shell> mv instantclient_11_2 /usr/local 132 | shell> echo "export LD_LIBRARY_PATH=/usr/local/instantclient_11_2:$LD_LIBRARY_PATH" >> /etc/profile 133 | shell> source /etc/profile 134 | ``` 135 | 136 | # 源码编译 137 | gt-checksum工具采用GO语言开发,您可以自行编译生成二进制文件。 138 | 139 | 编译环境要求使用golang 1.17及以上版本。 140 | 141 | 请参考下面方法下载源码并进行编译: 142 | ```shell 143 | shell> git clone https://gitee.com/GreatSQL/gt-checksum.git 144 | shell> go build -o gt-checksum gt-checksum.go 145 | shell> chmod +x gt-checksum 146 | shell> mv gt-checksum /usr/local/bin 147 | ``` 148 | 149 | 也可以直接利用Docker环境编译,在已经准备好Docker运行环境的基础上,执行如下操作即可: 150 | ```shell 151 | shell> git clone https://gitee.com/GreatSQL/gt-checksum.git 152 | shell> cd gt-checksum 153 | shell> DOCKER_BUILDKIT=1 docker build --build-arg VERSION=v1.2.1 -f Dockerfile -o ./ . 154 | shell> cd gt-checksum-v1.2.1 155 | shell> ./gt-checksum -v 156 | gt-checksum version 1.2.1 157 | ``` 158 | 这就编译完成并可以开始愉快地玩耍了。 159 | 160 | # 使用文档 161 | --- 162 | - [gt-checksum manual](https://gitee.com/GreatSQL/gt-checksum/blob/master/docs/gt-checksum-manual.md) 163 | 164 | 165 | # 版本历史 166 | --- 167 | - [版本历史](https://gitee.com/GreatSQL/gt-checksum/blob/master/relnotes/CHANGELOG.zh-CN.md) 168 | 169 | 170 | # 已知缺陷 171 | --- 172 | 截止最新的1.2.1版本中,当表中有多行数据是完全重复的话,可能会导致校验结果不准确,详见 [已知缺陷](https://gitee.com/GreatSQL/gt-checksum/blob/master/docs/gt-checksum-manual.md#已知缺陷) 。 173 | 174 | # 问题反馈 175 | --- 176 | - [问题反馈 gitee](https://gitee.com/GreatSQL/gt-checksum/issues) 177 | 178 | 179 | # 联系我们 180 | --- 181 | 182 | 扫码关注微信公众号 183 | 184 | ![输入图片说明](https://images.gitee.com/uploads/images/2021/0802/141935_2ea2c196_8779455.jpeg "greatsql社区-wx-qrcode-0.5m.jpg") 185 | -------------------------------------------------------------------------------- /actions/binlogPrepare.go: -------------------------------------------------------------------------------- 1 | package actions 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "github.com/go-mysql-org/go-mysql/mysql" 8 | "github.com/go-mysql-org/go-mysql/replication" 9 | ) 10 | 11 | func PareBinlog() { 12 | cfg := replication.BinlogSyncerConfig{ 13 | ServerID: 1613306, 14 | Flavor: "mysql", 15 | Host: "172.16.50.162", 16 | Port: 3306, 17 | User: "pcms", 18 | Password: "pcms@123", 19 | } 20 | syncer := replication.NewBinlogSyncer(cfg) 21 | // Start sync with specified binlog file and position 22 | // or you can start a gtid replication like 23 | 24 | //streamer, _ := syncer.StartSyncGTID() 25 | // the mysql GTID set likes this "de278ad0-2106-11e4-9f8e-6edd0ca20947:1-2" 26 | // the mariadb GTID set likes this "0-1-100" 27 | streamer, _ := syncer.StartSync(mysql.Position{"mysql-bin.000007", 651861}) 28 | for { 29 | ev, _ := streamer.GetEvent(context.Background()) 30 | var a string 31 | buf := bytes.NewBufferString(a) 32 | ev.Dump(buf) 33 | fmt.Println("--------") 34 | fmt.Println(buf) 35 | fmt.Println("--------") 36 | 37 | } 38 | 39 | } 40 | -------------------------------------------------------------------------------- /actions/checkSum.go: -------------------------------------------------------------------------------- 1 | package actions 2 | 3 | import ( 4 | "crypto/md5" 5 | "crypto/sha1" 6 | "fmt" 7 | "io" 8 | "math/rand" 9 | "os" 10 | "time" 11 | ) 12 | 13 | type CheckSumTypeStruct struct{} 14 | 15 | /* 16 | 对字符串进行MD5哈希 17 | */ 18 | func (csts CheckSumTypeStruct) CheckMd5(data string) string { 19 | t := md5.New() 20 | io.WriteString(t, data) 21 | return fmt.Sprintf("%x", t.Sum(nil)) 22 | } 23 | 24 | /* 25 | 对字符串进行SHA1哈希 26 | */ 27 | func (csts CheckSumTypeStruct) CheckSha1(data string) string { 28 | t := sha1.New() 29 | io.WriteString(t, data) 30 | return fmt.Sprintf("%x", t.Sum(nil)) 31 | } 32 | 33 | func (csts CheckSumTypeStruct) Arrcmap(src, dest []string) []string { 34 | msrc := make(map[string]byte) //按源数组建索引 35 | mall := make(map[string]byte) //源+目所有元素建索引 36 | var set []string //交集 37 | //1、源数组建立map 38 | for _, v := range src { 39 | msrc[v] = 0 40 | mall[v] = 0 41 | } 42 | for _, v := range dest { 43 | l := len(mall) 44 | mall[v] = 1 45 | if l != len(mall) { 46 | l = len(mall) 47 | } else { 48 | set = append(set, v) 49 | } 50 | } 51 | return set 52 | } 53 | 54 | /* 55 | 数据校验并输出差异性数据 56 | */ 57 | func (csts CheckSumTypeStruct) Arrcmp(src []string, dest []string) ([]string, []string) { //对比数据 58 | msrc := make(map[string]byte) //按目数组建索引 59 | mall := make(map[string]byte) //源+目所有元素建索引 并集 60 | var set []string //交集 61 | //1.目数组建立map 62 | for _, v := range dest { 63 | if v != "" { 64 | msrc[v] = 0 65 | mall[v] = 0 66 | } 67 | } 68 | //2.源数组中,存不进去,即重复元素,所有存不进去的集合就是并集 69 | for _, v := range src { 70 | if v != "" { 71 | if val, ok := mall[v]; ok && val == 0 { 72 | set = append(set, v) 73 | } 74 | mall[v] = 1 75 | } 76 | } 77 | //3.遍历交集,在并集中找,找到就从并集中删,删完后就是补集(即并-交=所有变化的元素) 78 | for _, v := range set { 79 | delete(mall, v) 80 | } 81 | //4.此时,mall是补集,所有元素去源中找,找到就是删除的,找不到的必定能在目数组中找到,即新加的 82 | var added, deleted []string 83 | for v, _ := range mall { 84 | _, exist := msrc[v] 85 | if exist { 86 | deleted = append(deleted, v) 87 | } else { 88 | added = append(added, v) 89 | } 90 | } 91 | return added, deleted 92 | } 93 | 94 | /* 95 | 根据两个切片找到相同的字符 96 | */ 97 | func (csts CheckSumTypeStruct) Arrsame(src, dest []string) string { 98 | msrc := make(map[string]byte) //按源数组建索引 99 | mall := make(map[string]byte) //源+目所有元素建索引 100 | var set string //交集 101 | //1、源数组建立map 102 | for _, v := range src { 103 | msrc[v] = 0 104 | mall[v] = 0 105 | } 106 | for _, v := range dest { 107 | l := len(mall) 108 | mall[v] = 1 109 | if l != len(mall) { 110 | l = len(mall) 111 | } else { 112 | set = v 113 | } 114 | } 115 | return set 116 | } 117 | 118 | var defaultLetters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") 119 | 120 | // RandomString returns a random string with a fixed length 121 | //func (csts CheckSumTypeStruct) RandomString(n int, allowedChars ...[]rune) string { 122 | func (csts CheckSumTypeStruct) RandomString(n int, allowedChars ...[]rune) string { 123 | var letters []rune 124 | if len(allowedChars) == 0 { 125 | letters = defaultLetters 126 | } else { 127 | letters = allowedChars[0] 128 | } 129 | b := make([]rune, n) 130 | rand.Seed(time.Now().UnixNano()) 131 | for i := range b { 132 | b[i] = letters[rand.Intn(len(letters))] 133 | } 134 | return string(b) 135 | } 136 | 137 | /* 138 | 校验两个文件的md5值,是否一致 139 | */ 140 | func (csts CheckSumTypeStruct) FileMd5(f1 string) string { 141 | f, err := os.Open(f1) 142 | if err != nil { 143 | fmt.Println("Open", err) 144 | //return "", err 145 | } 146 | defer f.Close() 147 | md5hash := md5.New() 148 | if _, err = io.Copy(md5hash, f); err != nil { 149 | fmt.Println("Copy", err) 150 | //return "", err 151 | } 152 | md5Val := fmt.Sprintf("%x", md5hash.Sum(nil)) 153 | return md5Val 154 | } 155 | 156 | func CheckSum() *CheckSumTypeStruct { 157 | return &CheckSumTypeStruct{} 158 | } 159 | -------------------------------------------------------------------------------- /actions/differencesDataDispos.go: -------------------------------------------------------------------------------- 1 | package actions 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "strings" 7 | ) 8 | 9 | //解析binlog event生成回滚的sql语句 10 | var rollbackSQL = func(sl []string) []string { 11 | var newDelS []string 12 | for _, i := range sl { 13 | if strings.HasPrefix(i, "insert") { 14 | ii := strings.Replace(strings.Replace(i, "insert into", "delete from", 1), "values", "where", 1) 15 | newDelS = append(newDelS, ii) 16 | } 17 | if strings.HasPrefix(i, "update") { 18 | schemaTable := strings.TrimSpace(strings.Split(strings.Split(i, "where")[0], "update")[1]) 19 | e := strings.Split(strings.Split(i, "where")[1], "/*columnModify*/") 20 | oldrow := strings.Replace(e[0], "(", "", 1) 21 | newrow := strings.Replace(e[1], ");", "", 1) 22 | delSql := fmt.Sprintf("delete from %s where %s;", schemaTable, newrow) 23 | addSql := fmt.Sprintf("insert into %s values (%s);", schemaTable, oldrow) 24 | newDelS = append(newDelS, delSql, addSql) 25 | } 26 | if strings.HasPrefix(i, "delete") { 27 | ii := strings.Replace(strings.Replace(i, "delete from", "insert into", 1), "where", "values", 1) 28 | newDelS = append(newDelS, ii) 29 | } 30 | } 31 | return newDelS 32 | } 33 | 34 | //解析binlog event生成正序的sql语句 35 | var positiveSequenceSQL = func(sl []string) []string { 36 | var newDelS []string 37 | for _, i := range sl { 38 | if i != "" && strings.HasPrefix(i, "insert into") { 39 | newDelS = append(newDelS, i) 40 | } 41 | if i != "" && strings.HasPrefix(i, "delete") { 42 | newDelS = append(newDelS, i) 43 | } 44 | if i != "" && strings.HasPrefix(i, "update") { 45 | schemaTable := strings.TrimSpace(strings.Split(strings.Split(i, "where")[0], "update")[1]) 46 | e := strings.Split(i, "/*columnModify*/") 47 | delSql := fmt.Sprintf("delete from %s);", strings.Replace(e[0], "update ", "", 1)) 48 | newDelS = append(newDelS, delSql) 49 | addSql := fmt.Sprintf("insert into %s values (%s", schemaTable, e[1]) 50 | newDelS = append(newDelS, addSql) 51 | } 52 | } 53 | return newDelS 54 | } 55 | 56 | /* 57 | 针对全量、增量数据的差异做处理,生成add和delete 58 | */ 59 | func DifferencesDataDispos(SourceItemAbnormalDataChan chan SourceItemAbnormalDataStruct, addChan chan string, delChan chan string) { 60 | for { 61 | select { 62 | case aa := <-SourceItemAbnormalDataChan: 63 | addS, delS := CheckSum().Arrcmp(aa.sourceSqlGather, aa.destSqlGather) 64 | if len(addS) == 0 && len(delS) > 0 { 65 | sort.Slice(delS, func(i, j int) bool { 66 | return delS[i] > delS[j] 67 | }) 68 | dels := rollbackSQL(delS) 69 | fmt.Println(dels) 70 | } else if len(addS) > 0 && len(delS) > 0 { //针对目标端需要删除的事务进行回滚,针对事务生成回滚sql 71 | //此处需要将多余参数按照事务的方式进行倒叙 72 | sort.Slice(delS, func(i, j int) bool { 73 | return delS[i] > delS[j] 74 | }) 75 | dels := rollbackSQL(delS) 76 | fmt.Println(dels) 77 | adds := positiveSequenceSQL(addS) 78 | fmt.Println(adds) 79 | 80 | } else if len(addS) > 0 && len(delS) == 0 { 81 | fmt.Println("--1--", addS) 82 | adds := positiveSequenceSQL(addS) 83 | fmt.Println(adds) 84 | } 85 | } 86 | } 87 | } 88 | 89 | /* 90 | 针对差异数据,生成修复语句,并根据修复方式进行处理,通过对字符串做hash值,使用map进行group by去重 91 | */ 92 | func DataFixSql(addChan chan string, delChan chan string) { 93 | var ( 94 | delHashMap = make(map[string]string) 95 | addHashMap = make(map[string]string) 96 | ) 97 | 98 | for { 99 | select { 100 | case del := <-delChan: 101 | delStr := CheckSum().CheckSha1(del) 102 | if _, ok := delHashMap[delStr]; !ok { 103 | delHashMap[delStr] = "" 104 | } 105 | case add := <-addChan: 106 | addStr := CheckSum().CheckSha1(add) 107 | if _, ok := delHashMap[addStr]; !ok { 108 | addHashMap[addStr] = "" 109 | } 110 | } 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /actions/incDataDispos.go: -------------------------------------------------------------------------------- 1 | package actions 2 | 3 | import ( 4 | "crypto/md5" 5 | "encoding/json" 6 | "fmt" 7 | dbExec "gt-checksum/MySQL" 8 | "strings" 9 | ) 10 | 11 | //增量数据校验结构 12 | type IncDataDisposStruct struct { 13 | //mytype string //mysql体系的类型 是MySQL或miriadb 14 | //host string //数据库连接地址 15 | //user string //数据库user 16 | //password string //数据库password 17 | //port uint16 //数据库的端口 18 | //serverId uint32 //伪装slave的server id 19 | //binlogFile string //读取的增量binlog file文件 20 | //pos uint32 //读取的增量binlog pos点 21 | 22 | sdrive string //源驱动类型 23 | ddrive string //目驱动类型 24 | sJdbcUrl string 25 | dJdbcUrl string // 26 | sgs map[string]string //源端一致性点 27 | dgs map[string]string //目标端一致性点 28 | tableList map[string]int //校验的表列表 29 | //previousGtide string //当前的gtid集合 30 | } 31 | 32 | //存放异常数据的结构定义 33 | 34 | type SourceItemAbnormalDataStruct struct { 35 | sourceTrxType string //源端事务类型 36 | sourceSqlType string //源端sql类型 37 | sourceSqlGather []string 38 | destTrxType string //目标端事务类型 39 | destSqlType string //目标端sql类型 40 | destSqlGather []string //目标端sql集合 41 | } 42 | 43 | /* 44 | 根据源目标端的一致性点,读取源目标端的Event,解析sql语句,进行binlog event比对,出现差异则进行处理 45 | */ 46 | func (idds IncDataDisposStruct) Aa(fullDataCompletionStatus chan struct{}, cqMq chan SourceItemAbnormalDataStruct) { 47 | var ( 48 | sblockDone, dblockDone = make(chan struct{}, 1), make(chan struct{}, 1) 49 | s, d = make(chan map[string][]string, 1), make(chan map[string][]string, 1) 50 | strxEvent, dtrxEvent = make(map[string][]string, 1), make(map[string][]string, 1) 51 | trxCount uint64 = 1 52 | str, dtr []byte 53 | err error 54 | z = func(s chan map[string][]string, st *map[string][]string) { 55 | for { 56 | select { 57 | case *st = <-s: 58 | } 59 | } 60 | } 61 | //检测全量数据返回子线程退出信号 62 | x = func(f chan struct{}, d chan struct{}) { 63 | for { 64 | select { 65 | case _, ok := <-f: 66 | if ok { 67 | d <- struct{}{} 68 | f <- struct{}{} 69 | } 70 | } 71 | } 72 | } 73 | e = dbExec.IncDataBinlog().IncBinlogPrepareInit(idds.sdrive, idds.sJdbcUrl, idds.sgs, idds.tableList) 74 | f = dbExec.IncDataBinlog().IncBinlogPrepareInit(idds.ddrive, idds.dJdbcUrl, idds.dgs, idds.tableList) 75 | szQ, dzQ = make(chan struct{}, 1), make(chan struct{}, 1) 76 | ) 77 | 78 | //监测源目端binlog event的变化,以事务为单位 79 | go e.OneEventSql(sblockDone, s, szQ) 80 | go f.OneEventSql(dblockDone, d, dzQ) 81 | 82 | //读取源目端binlog event的事务 83 | go z(s, &strxEvent) 84 | go z(d, &dtrxEvent) 85 | 86 | //监测全量数据是否处理完成 87 | go x(fullDataCompletionStatus, szQ) 88 | go x(fullDataCompletionStatus, dzQ) 89 | 90 | for { 91 | _, ok := strxEvent["quit"] 92 | _, ok1 := dtrxEvent["quit"] 93 | if len(strxEvent) == 1 && len(dtrxEvent) == 1 && trxCount == 1 && !ok && !ok1 { //判断两端库的起点是否相同 94 | str, err = json.Marshal(strxEvent) 95 | if err != nil { 96 | fmt.Println(err) 97 | } 98 | dtr, err = json.Marshal(dtrxEvent) 99 | if err != nil { 100 | fmt.Println(err) 101 | } 102 | if md5.Sum(str) == md5.Sum(dtr) { 103 | strxEvent = make(map[string][]string, 1) 104 | dtrxEvent = make(map[string][]string, 1) 105 | str, dtr = []byte{}, []byte{} 106 | <-sblockDone 107 | <-dblockDone 108 | trxCount++ 109 | } else { 110 | for k, v := range dtrxEvent { 111 | for _, i := range v { 112 | var aa = SourceItemAbnormalDataStruct{ 113 | destTrxType: k, 114 | destSqlGather: []string{i}, 115 | } 116 | if strings.HasPrefix(i, "delete") { 117 | aa.destSqlType = "delete" 118 | } 119 | if strings.HasPrefix(i, "update") { 120 | aa.destSqlType = "update" 121 | } 122 | if strings.HasPrefix(i, "insert into") { 123 | aa.destSqlType = "insert" 124 | } 125 | aa.destSqlGather = []string{i} 126 | cqMq <- aa 127 | } 128 | } 129 | dtrxEvent = make(map[string][]string, 1) 130 | dtr = []byte{} 131 | <-dblockDone 132 | } 133 | } 134 | if len(strxEvent) == 1 && len(dtrxEvent) == 1 && trxCount > 1 && !ok1 && !ok { 135 | str, err = json.Marshal(strxEvent) 136 | if err != nil { 137 | fmt.Println(err) 138 | } 139 | dtr, err = json.Marshal(dtrxEvent) 140 | if err != nil { 141 | fmt.Println(err) 142 | } 143 | if md5.Sum(str) == md5.Sum(dtr) { 144 | strxEvent = make(map[string][]string, 1) 145 | dtrxEvent = make(map[string][]string, 1) 146 | str, dtr = []byte{}, []byte{} 147 | <-sblockDone 148 | <-dblockDone 149 | trxCount++ 150 | } else { 151 | var sk, dk string 152 | var sv, dv []string 153 | for k, v := range dtrxEvent { 154 | dk = k 155 | dv = v 156 | } 157 | for k, v := range strxEvent { 158 | sk = k 159 | sv = v 160 | } 161 | aa := SourceItemAbnormalDataStruct{ 162 | sourceTrxType: sk, 163 | sourceSqlGather: sv, 164 | destTrxType: dk, 165 | destSqlGather: dv, 166 | } 167 | 168 | cqMq <- aa 169 | dtrxEvent, strxEvent = make(map[string][]string, 1), make(map[string][]string, 1) 170 | str, dtr = []byte{}, []byte{} 171 | aa = SourceItemAbnormalDataStruct{} 172 | <-sblockDone 173 | <-dblockDone 174 | trxCount++ 175 | } 176 | } 177 | 178 | //读取源目端binlog的线程停止 179 | if ok && ok1 { 180 | fmt.Println("---退出__-") 181 | break 182 | } 183 | } 184 | } 185 | func IncDataDisops(sdbdrive, ddbdrive, sjdbcUrl, djdbcurl string, sgs, dgs map[string]string, tableList []string) *IncDataDisposStruct { 186 | checkTableMap := make(map[string]int, len(tableList)) 187 | for i := range tableList { 188 | checkTableMap[tableList[i]] = 0 189 | } 190 | return &IncDataDisposStruct{ 191 | sdrive: sdbdrive, 192 | ddrive: ddbdrive, 193 | sJdbcUrl: sjdbcUrl, 194 | dJdbcUrl: djdbcurl, 195 | sgs: sgs, 196 | dgs: dgs, 197 | tableList: checkTableMap, 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /actions/p_introduce.go: -------------------------------------------------------------------------------- 1 | package actions 2 | 3 | /* 4 | actions包为库表的具体操作,包含 5 | 创建人:梁行 6 | 创建时间:2022-10-13 7 | */ 8 | -------------------------------------------------------------------------------- /actions/rapirDML.go: -------------------------------------------------------------------------------- 1 | package actions 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "gt-checksum/global" 7 | "os" 8 | "strings" 9 | ) 10 | 11 | type rapirSqlStruct struct { 12 | Drive string 13 | JDBC string 14 | } 15 | 16 | func isFile(file string) *os.File { 17 | sfile, err := os.Open(file) 18 | if err != nil && os.IsNotExist(err) { 19 | sfile, err = os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_CREATE, 0666) 20 | } else { 21 | os.Remove(file) 22 | sfile, err = os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0666) 23 | } 24 | if err != nil { 25 | fmt.Println("actions open datafix file fail. error msg is :", err) 26 | global.Wlog.Error("actions open datafix file fail. error msg is :", err) 27 | os.Exit(1) 28 | } 29 | return sfile 30 | } 31 | 32 | /* 33 | 向目标端执行修复sql语句 34 | */ 35 | func (rs rapirSqlStruct) execRapirSql(sqlstr []string, dbType string, logThreadSeq int64) error { 36 | //执行sql语句不记录binlog 37 | var ( 38 | vlog string 39 | ) 40 | vlog = fmt.Sprintf("(%d) Execute the repair statement on the target side for the current table.", logThreadSeq) 41 | global.Wlog.Info(vlog) 42 | db := dbOpenTest(rs.Drive, rs.JDBC) 43 | ctx := context.Background() 44 | conn, err := db.Conn(ctx) 45 | if err != nil { 46 | vlog = fmt.Sprintf("(%d) database create session connection fail. Error Info: %s", logThreadSeq, err) 47 | global.Wlog.Error(vlog) 48 | return err 49 | } 50 | defer conn.Close() 51 | if dbType == "mysql" { 52 | sql1 := "set session sql_log_bin=off" 53 | if _, err1 := conn.ExecContext(ctx, sql1); err1 != nil { 54 | vlog = fmt.Sprintf("(%d) actions prepare dataFix SQL fail. sql is:{%s}, error info is : {%s}", logThreadSeq, "set session sql_log_bin=off", err1) 55 | global.Wlog.Error(vlog) 56 | return err1 57 | } 58 | sql2 := "set autocommit = 0;" 59 | if _, err1 := conn.ExecContext(ctx, sql2); err1 != nil { 60 | vlog = fmt.Sprintf("(%d) actions prepare dataFix SQL fail. sql is:{%s}, error info is : {%s}", logThreadSeq, "set session sql_log_bin=off", err1) 61 | global.Wlog.Error(vlog) 62 | return err1 63 | } 64 | 65 | } 66 | for _, i := range sqlstr { 67 | if strings.HasPrefix(strings.ToUpper(i), "ALTER TABLE") { 68 | if _, err = db.Exec(i); err != nil { 69 | vlog = fmt.Sprintf("(%d) commit dataFix SQL fail. error info is {%s}", logThreadSeq, err) 70 | global.Wlog.Error(vlog) 71 | return err 72 | } 73 | } else { 74 | if _, err = conn.ExecContext(ctx, i); err != nil { 75 | vlog = fmt.Sprintf("(%d) prepare dataFix SQL fail.start rollback! sql is {%s}, error info is {%s}.", logThreadSeq, i, err) 76 | global.Wlog.Error(vlog) 77 | conn.ExecContext(ctx, "rollback") 78 | return err 79 | } 80 | } 81 | 82 | } 83 | vlog = fmt.Sprintf("(%d) start commit dataFix SQL.", logThreadSeq) 84 | global.Wlog.Info(vlog) 85 | if _, err = conn.ExecContext(ctx, "commit"); err != nil { 86 | vlog = fmt.Sprintf("(%d) commit dataFix SQL fail. error info is {%s}", logThreadSeq, err) 87 | global.Wlog.Error(vlog) 88 | return err 89 | } 90 | defer db.Close() 91 | return nil 92 | } 93 | 94 | /* 95 | 生成修复sql语句,并写入到文件中 96 | */ 97 | func (rs rapirSqlStruct) SqlFile(sfile *os.File, sql []string, logThreadSeq int64) error { //在/tmp/下创建数据修复文件,将在目标端数据修复的语句写入到文件中 98 | var ( 99 | vlog string 100 | sqlCommit []string 101 | ) 102 | vlog = fmt.Sprintf("(%d) Start writing repair statements to the repair file.", logThreadSeq) 103 | global.Wlog.Info(vlog) 104 | if strings.HasPrefix(strings.ToUpper(strings.Join(sql, ";")), "ALTER TABLE") { 105 | sqlCommit = sql 106 | } else { 107 | sqlCommit = []string{"begin;"} 108 | sqlCommit = append(sqlCommit, sql...) 109 | sqlCommit = append(sqlCommit, "commit;") 110 | } 111 | _, err := FileOperate{File: sfile, BufSize: 1024 * 4 * 1024, SqlType: "sql"}.ConcurrencyWriteFile(sqlCommit) 112 | if err != nil { 113 | return err 114 | } 115 | vlog = fmt.Sprintf("(%d) Write the repair statement to the repair file successfully.", logThreadSeq) 116 | global.Wlog.Info(vlog) 117 | return nil 118 | } 119 | func ApplyDataFix(fixSql []string, datafixType string, sfile *os.File, ddrive, jdbc string, logThreadSeq int64) error { 120 | var ( 121 | err error 122 | rapirdml = rapirSqlStruct{ 123 | Drive: ddrive, 124 | JDBC: jdbc, 125 | } 126 | ) 127 | if datafixType == "file" { 128 | if err = rapirdml.SqlFile(sfile, fixSql, logThreadSeq); err != nil { 129 | return err 130 | } 131 | } 132 | if datafixType == "table" { 133 | if err = rapirdml.execRapirSql(fixSql, ddrive, logThreadSeq); err != nil { 134 | return err 135 | } 136 | } 137 | return nil 138 | } 139 | -------------------------------------------------------------------------------- /actions/schema_table_access_permissions.go: -------------------------------------------------------------------------------- 1 | package actions 2 | 3 | import ( 4 | "fmt" 5 | "gt-checksum/dbExec" 6 | "gt-checksum/global" 7 | ) 8 | 9 | /* 10 | 检查当前用户对该库表是否有响应的权限(权限包括:查询权限,flush_tables,session_variables_admin) 11 | */ 12 | func (stcls *schemaTable) GlobalAccessPriCheck(logThreadSeq, logThreadSeq2 int64) bool { 13 | var ( 14 | vlog string 15 | err error 16 | StableList, DtableList bool 17 | ) 18 | vlog = fmt.Sprintf("(%d) Start to get the source and target Global Access Permissions information and check whether they are consistent", logThreadSeq) 19 | global.Wlog.Info(vlog) 20 | tc := dbExec.TableColumnNameStruct{Schema: stcls.schema, Table: stcls.table, Drive: stcls.sourceDrive, Datafix: stcls.datefix} 21 | vlog = fmt.Sprintf("(%d) Start to get the source Global Access Permissions information and check whether they are consistent", logThreadSeq) 22 | global.Wlog.Debug(vlog) 23 | if StableList, err = tc.Query().GlobalAccessPri(stcls.sourceDB, logThreadSeq2); err != nil { 24 | return false 25 | } 26 | vlog = fmt.Sprintf("(%d) The Global Access Permission verification of the source DB is completed, and the status of the global access permission is {%v}.", logThreadSeq, StableList) 27 | global.Wlog.Debug(vlog) 28 | tc.Drive = stcls.destDrive 29 | vlog = fmt.Sprintf("(%d) Start to get the dest Global Access Permissions information and check whether they are consistent", logThreadSeq) 30 | global.Wlog.Debug(vlog) 31 | 32 | if DtableList, err = tc.Query().GlobalAccessPri(stcls.destDB, logThreadSeq2); err != nil { 33 | return false 34 | } 35 | vlog = fmt.Sprintf("(%d) The Global Access Permission verification of the dest DB is completed, and the status of the global access permission is {%v}.", logThreadSeq, DtableList) 36 | global.Wlog.Debug(vlog) 37 | if StableList && DtableList { 38 | vlog = fmt.Sprintf("(%d) The verification of the global access permission of the source and destination is completed", logThreadSeq) 39 | global.Wlog.Info(vlog) 40 | return true 41 | } 42 | vlog = fmt.Sprintf("(%d) Some global access permissions are missing at the source and destination, and verification cannot continue.", logThreadSeq) 43 | global.Wlog.Error(vlog) 44 | return false 45 | } 46 | func (stcls *schemaTable) TableAccessPriCheck(checkTableList []string, logThreadSeq, logThreadSeq2 int64) ([]string, []string, error) { 47 | var ( 48 | vlog string 49 | err error 50 | StableList, DtableList map[string]int 51 | newCheckTableList, abnormalTableList []string 52 | ) 53 | vlog = fmt.Sprintf("(%d) Start to get the source and target table access permissions information and check whether they are consistent", logThreadSeq) 54 | global.Wlog.Info(vlog) 55 | tc := dbExec.TableColumnNameStruct{Schema: stcls.schema, Table: stcls.table, Drive: stcls.sourceDrive} 56 | vlog = fmt.Sprintf("(%d) Start to get the source table access permissions information and check whether they are consistent", logThreadSeq) 57 | global.Wlog.Debug(vlog) 58 | if StableList, err = tc.Query().TableAccessPriCheck(stcls.sourceDB, checkTableList, stcls.datefix, logThreadSeq2); err != nil { 59 | return nil, nil, err 60 | } 61 | if len(StableList) == 0 { 62 | vlog = fmt.Sprintf("(%d) Complete the verification table permission verification of the source DB, the current verification table with permission is {%v}.", logThreadSeq, StableList) 63 | global.Wlog.Error(vlog) 64 | } else { 65 | vlog = fmt.Sprintf("(%d) Complete the verification table permission verification of the source DB, the current verification table with permission is {%v}.", logThreadSeq, StableList) 66 | global.Wlog.Debug(vlog) 67 | } 68 | 69 | tc.Drive = stcls.destDrive 70 | vlog = fmt.Sprintf("(%d) Start to get the dest table access permissions information and check whether they are consistent", logThreadSeq) 71 | global.Wlog.Debug(vlog) 72 | if DtableList, err = tc.Query().TableAccessPriCheck(stcls.destDB, checkTableList, stcls.datefix, logThreadSeq2); err != nil { 73 | return nil, nil, err 74 | } 75 | if len(DtableList) == 0 { 76 | vlog = fmt.Sprintf("(%d) Complete the verification table permission verification of the source DB, the current verification table with permission is {%v}.", logThreadSeq, DtableList) 77 | global.Wlog.Error(vlog) 78 | } else { 79 | vlog = fmt.Sprintf("(%d) Complete the verification table permission verification of the source DB, the current verification table with permission is {%v}.", logThreadSeq, DtableList) 80 | global.Wlog.Debug(vlog) 81 | } 82 | vlog = fmt.Sprintf("(%d) Start processing the difference of the table to be checked at the source and target.", logThreadSeq) 83 | global.Wlog.Debug(vlog) 84 | for k, _ := range StableList { 85 | if _, ok := DtableList[k]; ok { 86 | newCheckTableList = append(newCheckTableList, k) 87 | } else { 88 | abnormalTableList = append(abnormalTableList, k) 89 | } 90 | } 91 | vlog = fmt.Sprintf("(%d) The difference processing of the table to be checked at the source and target ends is completed. normal table message is {%s} num [%d] abnormal table message is {%s} num [%d]", logThreadSeq, newCheckTableList, len(newCheckTableList), abnormalTableList, len(abnormalTableList)) 92 | global.Wlog.Info(vlog) 93 | return newCheckTableList, abnormalTableList, nil 94 | } 95 | -------------------------------------------------------------------------------- /actions/table_count_check.go: -------------------------------------------------------------------------------- 1 | package actions 2 | 3 | import ( 4 | "fmt" 5 | "gt-checksum/dbExec" 6 | "gt-checksum/global" 7 | "math/rand" 8 | "os" 9 | "strings" 10 | "time" 11 | ) 12 | 13 | func (sp *SchedulePlan) getErr(msg string, err error) { 14 | if err != nil { 15 | fmt.Println(err, ":", msg) 16 | os.Exit(1) 17 | } 18 | } 19 | 20 | /* 21 | 使用count(1)的方式进行校验 22 | */ 23 | func (sp *SchedulePlan) DoCountDataCheck() { 24 | var ( 25 | schema, table string 26 | stmpTableCount, dtmpTableCount uint64 27 | err error 28 | vlog string 29 | ) 30 | rand.Seed(time.Now().UnixNano()) 31 | logThreadSeq := rand.Int63() 32 | vlog = fmt.Sprintf("(%d) Start the table validation for the total number of rows ...", logThreadSeq) 33 | global.Wlog.Info(vlog) 34 | for k, v := range sp.tableIndexColumnMap { 35 | if sp.checkNoIndexTable == "no" && len(v) == 0 { 36 | continue 37 | } 38 | ki := strings.Split(k, "/*indexColumnType*/")[0] 39 | if strings.Contains(ki, "/*greatdbSchemaTable*/") { 40 | schema = strings.Split(ki, "/*greatdbSchemaTable*/")[0] 41 | table = strings.Split(ki, "/*greatdbSchemaTable*/")[1] 42 | } 43 | vlog = fmt.Sprintf("(%d) Check table %s.%s initialization single check row number.", logThreadSeq, schema, table) 44 | global.Wlog.Debug(vlog) 45 | 46 | sdb := sp.sdbPool.Get(logThreadSeq) 47 | //查询原目标端的表总行数,并生成调度计划 48 | idxc := dbExec.IndexColumnStruct{Schema: schema, Table: table, ColumnName: sp.columnName, Drivce: sp.sdrive} 49 | stmpTableCount, err = idxc.TableIndexColumn().TmpTableIndexColumnRowsCount(sdb, logThreadSeq) 50 | if err != nil { 51 | return 52 | } 53 | sp.sdbPool.Put(sdb, logThreadSeq) 54 | 55 | ddb := sp.ddbPool.Get(logThreadSeq) 56 | idxc.Drivce = sp.ddrive 57 | dtmpTableCount, err = idxc.TableIndexColumn().TmpTableIndexColumnRowsCount(ddb, logThreadSeq) 58 | if err != nil { 59 | return 60 | } 61 | sp.ddbPool.Put(ddb, logThreadSeq) 62 | 63 | //输出校验结果信息 64 | var pods = Pod{ 65 | Schema: schema, 66 | Table: table, 67 | CheckObject: sp.checkObject, 68 | CheckMod: sp.checkMod, 69 | } 70 | vlog = fmt.Sprintf("(%d) Start to verify the total number of rows of table %s.%s source and target ...", logThreadSeq, schema, table) 71 | global.Wlog.Debug(vlog) 72 | if stmpTableCount == dtmpTableCount { 73 | vlog = fmt.Sprintf("(%d) Verify that the total number of rows at the source and destination of table %s.%s is consistent", logThreadSeq, schema, table) 74 | global.Wlog.Debug(vlog) 75 | pods.Differences = "no" 76 | pods.Rows = fmt.Sprintf("%d,%d", stmpTableCount, dtmpTableCount) 77 | } else { 78 | vlog = fmt.Sprintf("(%d) Verify that the total number of rows at the source and destination of table %s.%s is inconsistent.", logThreadSeq, schema, table) 79 | global.Wlog.Debug(vlog) 80 | pods.Differences = "yes" 81 | pods.Rows = fmt.Sprintf("%d,%d", stmpTableCount, dtmpTableCount) 82 | } 83 | measuredDataPods = append(measuredDataPods, pods) 84 | vlog = fmt.Sprintf("(%d) Check table %s.%s The total number of rows at the source and target end has been checked.", logThreadSeq, schema, table) 85 | global.Wlog.Debug(vlog) 86 | } 87 | vlog = fmt.Sprintf("(%d) The total number of rows in the check table has been checked !!!", logThreadSeq) 88 | global.Wlog.Info(vlog) 89 | } 90 | -------------------------------------------------------------------------------- /actions/table_query_concurrency.go: -------------------------------------------------------------------------------- 1 | package actions 2 | 3 | import ( 4 | "fmt" 5 | "gt-checksum/global" 6 | "gt-checksum/inputArg" 7 | "math/rand" 8 | "os" 9 | "strings" 10 | "time" 11 | ) 12 | 13 | type SchedulePlan struct { 14 | singleIndexChanRowCount, jointIndexChanRowCount, mqQueueDepth int 15 | schema, table string //待校验库名、表名 16 | columnName []string //待校验表的列名,有可能是多个 17 | tmpTableDataFileDir string //临时表文件生成的相对路径 18 | tableIndexColumnMap map[string][]string 19 | sdbPool, ddbPool *global.Pool 20 | datafixType string 21 | datafixSql string 22 | sdrive, ddrive string 23 | sfile *os.File 24 | checkMod, checkObject string 25 | checkNoIndexTable string //是否检查无索引表 26 | tableAllCol map[string]global.TableAllColumnInfoS 27 | ratio int 28 | file *os.File 29 | TmpFileName string 30 | bar *Bar 31 | fixTrxNum int 32 | chanrowCount, concurrency int //单次并发一次校验的行数 33 | TmpTablePath string 34 | smtype string //是源端还是目标端 35 | indexColumnType string 36 | pods *Pod 37 | tableMaxRows uint64 38 | sampDataGroupNumber int64 39 | djdbc string 40 | } 41 | 42 | /* 43 | 差异数据信息结构体 44 | */ 45 | type DifferencesDataStruct struct { 46 | Schema string //存在差异数据的库 47 | Table string //存在差异数据的表 48 | Spoint string //校验开始时的源端全局一致性点 49 | Dpoint string //校验开始时的目端全局一致性点 50 | //TableColumnInfo []map[string]string //该表的所有列信息,包括列类型 51 | TableColumnInfo global.TableAllColumnInfoS //该表的所有列信息,包括列类型 52 | SqlWhere map[string]string //差异数据查询的where 条件 53 | indexColumnType string //索引列类型 54 | } 55 | 56 | /* 57 | 查询索引列信息,并发执行调度生成 58 | */ 59 | func (sp *SchedulePlan) Schedulingtasks() { 60 | sp.bar = &Bar{} 61 | rand.Seed(time.Now().UnixNano()) 62 | for k, v := range sp.tableIndexColumnMap { 63 | //是否校验无索引表 64 | if sp.checkNoIndexTable == "no" && len(v) == 0 { 65 | continue 66 | } 67 | sp.file, _ = os.OpenFile(sp.TmpFileName, os.O_CREATE|os.O_RDWR, 0777) 68 | if strings.Contains(k, "/*indexColumnType*/") { 69 | ki := strings.Split(k, "/*indexColumnType*/")[0] 70 | sp.indexColumnType = strings.Split(k, "/*indexColumnType*/")[1] 71 | if strings.Contains(ki, "/*greatdbSchemaTable*/") { 72 | sp.schema = strings.Split(ki, "/*greatdbSchemaTable*/")[0] 73 | sp.table = strings.Split(ki, "/*greatdbSchemaTable*/")[1] 74 | } 75 | } else { 76 | if strings.Contains(k, "/*greatdbSchemaTable*/") { 77 | sp.schema = strings.Split(k, "/*greatdbSchemaTable*/")[0] 78 | sp.table = strings.Split(k, "/*greatdbSchemaTable*/")[1] 79 | } 80 | } 81 | if len(v) == 0 { //校验无索引表 82 | if sp.singleIndexChanRowCount <= sp.jointIndexChanRowCount { 83 | sp.chanrowCount = sp.singleIndexChanRowCount 84 | } else { 85 | sp.chanrowCount = sp.jointIndexChanRowCount 86 | } 87 | logThreadSeq := rand.Int63() 88 | sp.SingleTableCheckProcessing(sp.chanrowCount, logThreadSeq) 89 | } else { //校验有索引的表 90 | if len(v) > 1 { //根据索引列数量觉得chanrowCount数 91 | sp.chanrowCount = sp.jointIndexChanRowCount 92 | } else { 93 | sp.chanrowCount = sp.singleIndexChanRowCount 94 | } 95 | sp.columnName = v 96 | fmt.Println(fmt.Sprintf("begin checkSum index table %s.%s", sp.schema, sp.table)) 97 | sp.doIndexDataCheck() 98 | fmt.Println() 99 | fmt.Println(fmt.Sprintf("table %s.%s checksum complete", sp.schema, sp.table)) 100 | } 101 | sp.file.Close() 102 | os.Remove(sp.TmpFileName) 103 | } 104 | } 105 | 106 | func CheckTableQuerySchedule(sdb, ddb *global.Pool, tableIndexColumnMap map[string][]string, tableAllCol map[string]global.TableAllColumnInfoS, m inputArg.ConfigParameter) *SchedulePlan { 107 | return &SchedulePlan{ 108 | concurrency: m.SecondaryL.RulesV.ParallelThds, 109 | sdbPool: sdb, 110 | ddbPool: ddb, 111 | singleIndexChanRowCount: m.SecondaryL.RulesV.ChanRowCount, 112 | jointIndexChanRowCount: m.SecondaryL.RulesV.ChanRowCount, 113 | tableIndexColumnMap: tableIndexColumnMap, 114 | tableAllCol: tableAllCol, 115 | datafixType: m.SecondaryL.RepairV.Datafix, 116 | datafixSql: m.SecondaryL.RepairV.FixFileName, 117 | sdrive: m.SecondaryL.DsnsV.SrcDrive, 118 | ddrive: m.SecondaryL.DsnsV.DestDrive, 119 | mqQueueDepth: m.SecondaryL.RulesV.QueueSize, 120 | checkNoIndexTable: m.SecondaryL.SchemaV.CheckNoIndexTable, 121 | checkMod: m.SecondaryL.RulesV.CheckMode, 122 | ratio: m.SecondaryL.RulesV.Ratio, 123 | sfile: m.SecondaryL.RepairV.FixFileFINE, 124 | checkObject: m.SecondaryL.RulesV.CheckObject, 125 | TmpFileName: m.NoIndexTableTmpFile, 126 | fixTrxNum: m.SecondaryL.RepairV.FixTrxNum, 127 | djdbc: m.SecondaryL.DsnsV.DestJdbc, 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /actions/tmp_file_io_operate.go: -------------------------------------------------------------------------------- 1 | package actions 2 | 3 | import ( 4 | "bufio" 5 | "crypto/md5" 6 | "encoding/hex" 7 | "fmt" 8 | "gt-checksum/global" 9 | "io" 10 | "math" 11 | "os" 12 | "strings" 13 | "sync" 14 | ) 15 | 16 | /* 17 | 文件io操作,并行写入和读取文件 18 | */ 19 | 20 | var ( 21 | mutex sync.Mutex 22 | ) 23 | 24 | type FileOperate struct { 25 | File *os.File 26 | BufSize int 27 | SqlType string 28 | fileName string 29 | } 30 | 31 | /* 32 | 文件并发写入 33 | */ 34 | func (f FileOperate) ConcurrencyWriteFile(writeString []string) ([]string, error) { 35 | var ( 36 | c string 37 | md5Slice []string 38 | event string 39 | vlog string 40 | ) 41 | bufWriter := bufio.NewWriterSize(f.File, f.BufSize) 42 | event = fmt.Sprintf("[%s]", "write_file") 43 | for _, i := range writeString { 44 | sum := md5.Sum([]byte(i)) 45 | sumS := hex.EncodeToString(sum[:]) 46 | md5Slice = append(md5Slice, sumS) 47 | if f.SqlType == "sql" { 48 | c = fmt.Sprintf("%s \n", i) 49 | } else { 50 | c = fmt.Sprintf("%s %s %s \n", sumS, f.SqlType, i) 51 | } 52 | mutex.Lock() 53 | vlog = fmt.Sprintf("() %s Start to write data to file %s, the written content is {%v}", event, f.fileName, c) 54 | global.Wlog.Debug(vlog) 55 | wc, err := bufWriter.WriteString(c) 56 | bufWriter.Flush() 57 | if err != nil { 58 | vlog = fmt.Sprintf("() %s File %s failed to write content %s, the error message is {%v}", event, f.fileName, c, err) 59 | global.Wlog.Error(vlog) 60 | return nil, err 61 | } 62 | if wc != len(c) { 63 | vlog = fmt.Sprintf("() %s The number of written bytes of file %s does not match the number of successful bytes, the number of written bytes is {%v}, and the number of successful bytes is {%v}", event, f.fileName, len(c), wc) 64 | global.Wlog.Error(vlog) 65 | return nil, err 66 | } 67 | mutex.Unlock() 68 | vlog = fmt.Sprintf("() %s The data in file %s is successfully written.", event, f.fileName) 69 | global.Wlog.Debug(vlog) 70 | } 71 | 72 | return md5Slice, nil 73 | } 74 | 75 | func ProcessChunk(chunk []byte, linesPool *sync.Pool, stringPool *sync.Pool, m map[string]string, c chan<- map[string]string) { 76 | var ( 77 | wg2 sync.WaitGroup 78 | ) 79 | logs := stringPool.Get().(string) 80 | logs = string(chunk) 81 | linesPool.Put(chunk) 82 | logsSlice := strings.Split(logs, "\n") 83 | stringPool.Put(logs) 84 | chunkSize := 300 85 | n := len(logsSlice) 86 | noOfThread := n / chunkSize 87 | if n%chunkSize != 0 { 88 | noOfThread++ 89 | } 90 | for i := 0; i < (noOfThread); i++ { 91 | wg2.Add(1) 92 | go func(s int, e int) { 93 | defer wg2.Done() //to avaoid deadlocks 94 | for i := s; i < e; i++ { 95 | text := logsSlice[i] 96 | if len(text) == 0 { 97 | continue 98 | } 99 | logSlice := strings.SplitN(text, " ", 3) 100 | md5Sum := logSlice[0] 101 | sqlType := logSlice[1] 102 | if v, ok := m[md5Sum]; ok && v == sqlType { 103 | //fmt.Println(logSlice[2]) 104 | c <- map[string]string{logSlice[2]: v} 105 | } 106 | } 107 | }(i*chunkSize, int(math.Min(float64((i+1)*chunkSize), float64(len(logsSlice))))) 108 | } 109 | wg2.Wait() 110 | logsSlice = nil 111 | } 112 | 113 | //func ProcessStatus(chunk []byte, linesPool *sync.Pool, stringPool *sync.Pool, m string) bool { 114 | // 115 | // return exist 116 | //} 117 | 118 | func (f FileOperate) ConcurrencyReadFile(F map[string]string, c chan map[string]string) error { 119 | var err error 120 | //sync pools to reuse the memory and decrease the preassure on //Garbage Collector 121 | linesPool := sync.Pool{New: func() interface{} { 122 | lines := make([]byte, 500*1024) 123 | return lines 124 | }} 125 | stringPool := sync.Pool{New: func() interface{} { 126 | lines := "" 127 | return lines 128 | }} 129 | //slicePool := sync.Pool{New: func() interface{} { 130 | // lines := make([]string, 100) 131 | // return lines 132 | //}} 133 | file, _ := os.Open(f.fileName) 134 | bufReader := bufio.NewReader(file) 135 | var wg sync.WaitGroup //wait group to keep track off all threads 136 | for { 137 | var n int 138 | var nextUntillNewline []byte 139 | buf := linesPool.Get().([]byte) 140 | n, err = bufReader.Read(buf) 141 | buf = buf[:n] 142 | if n == 0 { 143 | if err == io.EOF { 144 | break 145 | } 146 | if err != nil { 147 | fmt.Println("--error--", err) 148 | break 149 | } 150 | close(c) 151 | return err 152 | } 153 | nextUntillNewline, err = bufReader.ReadBytes('\n') //read entire line 154 | if err != io.EOF { 155 | buf = append(buf, nextUntillNewline...) 156 | } 157 | wg.Add(1) 158 | go func() { 159 | //process each chunk concurrently 160 | //start -> log start time, end -> log end time 161 | ProcessChunk(buf, &linesPool, &stringPool, F, c) 162 | wg.Done() 163 | }() 164 | } 165 | wg.Wait() 166 | return nil 167 | } 168 | 169 | //写文件内容之前需要判断一下文件内容中是否存在,不存在则写入 170 | func (f FileOperate) ReadWriteFile(F ...interface{}) ([]string, []string) { 171 | var err error 172 | var exist, noexit []string 173 | //sync pools to reuse the memory and decrease the preassure on //Garbage Collector 174 | linesPool := sync.Pool{New: func() interface{} { 175 | lines := make([]byte, 500*1024) 176 | return lines 177 | }} 178 | stringPool := sync.Pool{New: func() interface{} { 179 | lines := "" 180 | return lines 181 | }} 182 | //slicePool := sync.Pool{New: func() interface{} { 183 | // lines := make([]string, 100) 184 | // return lines 185 | //}} 186 | fp, _ := os.Open(f.fileName) 187 | bufReader := bufio.NewReader(fp) 188 | defer fp.Close() 189 | var wg sync.WaitGroup //wait group to keep track off all threads 190 | for { 191 | var n int 192 | var nextUntillNewline []byte 193 | buf := linesPool.Get().([]byte) 194 | n, err = bufReader.Read(buf) 195 | buf = buf[:n] 196 | if n == 0 { 197 | if err == io.EOF { 198 | break 199 | } 200 | if err != nil { 201 | fmt.Println("--error--", err) 202 | break 203 | } 204 | return exist, noexit 205 | } 206 | nextUntillNewline, err = bufReader.ReadBytes('\n') //read entire line 207 | if err != io.EOF { 208 | buf = append(buf, nextUntillNewline...) 209 | } 210 | wg.Add(1) 211 | go func() { 212 | //process each chunk concurrently 213 | //start -> log start time, end -> log end time 214 | var ( 215 | wg2 sync.WaitGroup 216 | ) 217 | logs := stringPool.Get().(string) 218 | logs = string(buf) 219 | linesPool.Put(buf) 220 | logsSlice := strings.Split(logs, "\n") 221 | stringPool.Put(logs) 222 | chunkSize := 300 223 | n := len(logsSlice) 224 | noOfThread := n / chunkSize 225 | if n%chunkSize != 0 { 226 | noOfThread++ 227 | } 228 | for i := 0; i < (noOfThread); i++ { 229 | wg2.Add(1) 230 | go func(s int, e int) { 231 | defer wg2.Done() //to avaoid deadlocks 232 | for i := s; i < e; i++ { 233 | text := logsSlice[i] 234 | if len(text) == 0 { 235 | continue 236 | } 237 | logSlice := strings.SplitN(text, " ", 3) 238 | exist, noexit = nil, nil 239 | for _, vv := range F { 240 | for _, vvi := range vv.([]map[string]string) { 241 | if strings.Split(logSlice[0], ",")[0] == vvi["columnName"] { 242 | fmt.Println("del--:", fmt.Sprintf("%s,%s)", vvi["columnName"], vvi["count"])) 243 | exist = append(exist, fmt.Sprintf("%s,%s)", vvi["columnName"], vvi["count"])) 244 | } else { 245 | fmt.Println("add--:", fmt.Sprintf("%s,%s)", vvi["columnName"], vvi["count"])) 246 | noexit = append(noexit, fmt.Sprintf("%s,%s)", vvi["columnName"], vvi["count"])) 247 | } 248 | } 249 | 250 | } 251 | } 252 | }(i*chunkSize, int(math.Min(float64((i+1)*chunkSize), float64(len(logsSlice))))) 253 | } 254 | wg2.Wait() 255 | logsSlice = nil 256 | wg.Done() 257 | }() 258 | } 259 | wg.Wait() 260 | return exist, noexit 261 | } 262 | -------------------------------------------------------------------------------- /build-arm.sh: -------------------------------------------------------------------------------- 1 | set -x 2 | 3 | export PATH=$PATH:/usr/local/go/bin 4 | export GO111MODULE=on 5 | export GOPROXY=https://goproxy.cn 6 | export CXXFLAGS="-stdlib=libstdc++" CC=/usr/bin/gcc CXX=/usr/bin/g++ 7 | 8 | vs=`cat ./inputArg/flagHelp.go| grep "app.Version"|awk -F "=" '{print $2}'|sed 's/\"//g'|sed 's/\/\/版本//g'|sed 's/ //g'` 9 | OracleDrive="instantclient_11_2" 10 | if [ ! -d "/usr/lcoal/$OracleDrive" ];then 11 | cp -rpf Oracle/$OracleDrive /usr/lcoal/ 12 | fi 13 | export LD_LIBRARY_PATH=/usr/local/$OracleDrive:$LD_LIBRARY_PATH 14 | 15 | go build -o gt-checksum greatdbCheck.go 16 | mkdir gt-checksum-${vs}-linux-aarch64 17 | cp -rpf Oracle/${OracleDrive} gt-checksum gc.conf gc.conf-simple relnotes docs README.md gt-checksum-${vs}-linux-aarch64 18 | tar zcf gt-checksum-${vs}-linux-aarch64.tar.gz gt-checksum-${vs}-linux-aarch64 19 | mkdir binary 20 | mv gt-checksum-${vs}-linux-aarch64.tar.gz binary 21 | -------------------------------------------------------------------------------- /build-x86.sh: -------------------------------------------------------------------------------- 1 | set -x 2 | 3 | export PATH=$PATH:/usr/local/go/bin 4 | export GO111MODULE=on 5 | export GOPROXY=https://goproxy.cn 6 | export CXXFLAGS="-stdlib=libstdc++" CC=/usr/bin/gcc CXX=/usr/bin/g++ 7 | 8 | vs=`cat ./inputArg/flagHelp.go| grep "app.Version"|awk -F "=" '{print $2}'|sed 's/\"//g'|sed 's/\/\/版本//g'|sed 's/ //g'` 9 | OracleDrive="instantclient_11_2" 10 | if [ ! -d "/usr/lcoal/${OracleDrive}" ];then 11 | cp -rpf Oracle/${OracleDrive} /usr/lcoal/ 12 | fi 13 | export LD_LIBRARY_PATH=/usr/local/${OracleDrive}:$LD_LIBRARY_PATH 14 | 15 | go build -o gt-checksum greatdbCheck.go 16 | chmod +x gt-checksum 17 | mkdir gt-checksum-${vs}-linux-x86-64 18 | cp -rpf Oracle/${OracleDrive} gt-checksum gc.conf gc.conf-simple relnotes docs README.md gt-checksum-${vs}-linux-x86-64 19 | tar zcf gt-checksum-${vs}-linux-x86-64.tar.gz gt-checksum-${vs}-linux-x86-64 20 | mkdir binary 21 | mv gt-checksum-${vs}-linux-x86-64.tar.gz binary -------------------------------------------------------------------------------- /dataDispos/dataMerge.go: -------------------------------------------------------------------------------- 1 | package dataDispos 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | type DataInfo struct { 9 | ChanQueueDepth int 10 | } 11 | 12 | /* 13 | 两个管道的正序数据进行合并排序到一个管道 14 | */ 15 | func (sp *DataInfo) ChangeMerge(ch1 <-chan map[string]interface{}, ch2 <-chan map[string]interface{}) chan map[string]interface{} { 16 | var cc = make(chan map[string]interface{}, sp.ChanQueueDepth) 17 | go func() { 18 | var ok1, ok2 bool 19 | var v1, v2 map[string]interface{} 20 | var c1, c2 = true, true 21 | for { 22 | if c1 { 23 | v1, ok1 = <-ch1 24 | } 25 | if c2 { 26 | v2, ok2 = <-ch2 27 | } 28 | if ok1 || ok2 { 29 | if ok1 && ok2 { 30 | var v11, v22 string 31 | for k, _ := range v1 { 32 | v11 = fmt.Sprintf("%v", k) 33 | } 34 | for k, _ := range v2 { 35 | v22 = fmt.Sprintf("%v", k) 36 | } 37 | if strings.Compare(v11, v22) == -1 { 38 | c1 = true 39 | c2 = false 40 | cc <- v1 41 | } else if strings.Compare(v11, v22) == 0 { 42 | c1 = true 43 | c2 = true 44 | cc <- v1 45 | } else { 46 | c1 = false 47 | c2 = true 48 | cc <- v2 49 | } 50 | } else if ok1 && !ok2 { 51 | c1 = true 52 | c2 = false 53 | cc <- v1 54 | } else if !ok1 && ok2 { 55 | c1 = false 56 | c2 = true 57 | cc <- v2 58 | } 59 | } else { 60 | cc <- map[string]interface{}{"END": "0"} 61 | close(cc) 62 | break 63 | } 64 | } 65 | }() 66 | return cc 67 | } 68 | 69 | /* 70 | 两个管道的条件,按照指定字符进行先后顺序进行合并,先梳理delete,再梳理insert 71 | */ 72 | func (sp *DataInfo) Merge(ch1 <-chan map[string]interface{}, ch2 <-chan map[string]interface{}, beginST, endST string) chan map[string]interface{} { 73 | var cc = make(chan map[string]interface{}, sp.ChanQueueDepth) 74 | 75 | go func() { 76 | var ok1, ok2 bool 77 | var v1, v2 map[string]interface{} 78 | var c1, c2 = true, true 79 | for { 80 | if c1 { 81 | v1, ok1 = <-ch1 82 | } 83 | if c2 { 84 | v2, ok2 = <-ch2 85 | } 86 | if ok1 || ok2 { 87 | if ok1 && ok2 { 88 | var v11, v22 string 89 | for k, _ := range v1 { 90 | v11 = fmt.Sprintf("%v", k) 91 | } 92 | for k, _ := range v2 { 93 | v22 = fmt.Sprintf("%v", k) 94 | } 95 | if strings.HasPrefix(strings.TrimSpace(v11), beginST) && strings.HasPrefix(strings.TrimSpace(v22), beginST) { 96 | c1 = true 97 | c2 = true 98 | cc <- v1 99 | cc <- v2 100 | 101 | } 102 | if strings.HasPrefix(strings.TrimSpace(v11), beginST) && !strings.HasPrefix(strings.TrimSpace(v22), beginST) { 103 | c1 = true 104 | c2 = false 105 | cc <- v1 106 | } 107 | if strings.Compare(v11, v22) == -1 { 108 | c1 = true 109 | c2 = false 110 | cc <- v1 111 | } else if strings.Compare(v11, v22) == 0 { 112 | c1 = true 113 | c2 = true 114 | cc <- v1 115 | } else { 116 | c1 = false 117 | c2 = true 118 | cc <- v2 119 | } 120 | } else if ok1 && !ok2 { 121 | c1 = true 122 | c2 = false 123 | cc <- v1 124 | } else if !ok1 && ok2 { 125 | c1 = false 126 | c2 = true 127 | cc <- v2 128 | } 129 | } else { 130 | cc <- map[string]interface{}{"END": "0"} 131 | close(cc) 132 | break 133 | } 134 | } 135 | }() 136 | return cc 137 | } 138 | -------------------------------------------------------------------------------- /dbExec/DataFixSql.go: -------------------------------------------------------------------------------- 1 | package dbExec 2 | 3 | import ( 4 | "database/sql" 5 | mysql "gt-checksum/MySQL" 6 | oracle "gt-checksum/Oracle" 7 | ) 8 | 9 | type DataAbnormalFixStruct struct { 10 | Schema string 11 | Table string 12 | RowData string 13 | SourceDevice string 14 | DestDevice string 15 | Sqlwhere string 16 | IndexColumnType string 17 | ColData []map[string]string 18 | IndexType string 19 | IndexColumn []string 20 | DatafixType string 21 | } 22 | type DataAbnormalFixInterface interface { 23 | FixInsertSqlExec(db *sql.DB, sourceDrive string, logThreadSeq int64) (string, error) 24 | FixDeleteSqlExec(db *sql.DB, sourceDrive string, logThreadSeq int64) (string, error) 25 | FixAlterIndexSqlExec(e, f []string, si map[string][]string, sourceDrive string, logThreadSeq int64) []string 26 | FixAlterColumnSqlDispos(alterType string, columnDataType []string, columnSeq int, lastColumn, curryColumn string, logThreadSeq int64) string 27 | FixAlterColumnSqlGenerate(modifyColumn []string, logThreadSeq int64) []string 28 | } 29 | 30 | func (dafs DataAbnormalFixStruct) DataAbnormalFix() DataAbnormalFixInterface { 31 | var tqaci DataAbnormalFixInterface 32 | if dafs.DestDevice == "mysql" { 33 | tqaci = &mysql.MysqlDataAbnormalFixStruct{ 34 | Schema: dafs.Schema, 35 | Table: dafs.Table, 36 | Sqlwhere: dafs.Sqlwhere, 37 | RowData: dafs.RowData, 38 | SourceDevice: dafs.SourceDevice, 39 | IndexColumnType: dafs.IndexColumnType, 40 | ColData: dafs.ColData, 41 | IndexType: dafs.IndexType, 42 | IndexColumn: dafs.IndexColumn, 43 | } 44 | } 45 | if dafs.DestDevice == "godror" { 46 | tqaci = &oracle.OracleDataAbnormalFixStruct{ 47 | Schema: dafs.Schema, 48 | Table: dafs.Table, 49 | Sqlwhere: dafs.Sqlwhere, 50 | RowData: dafs.RowData, 51 | SourceDevice: dafs.SourceDevice, 52 | IndexColumnType: dafs.IndexColumnType, 53 | ColData: dafs.ColData, 54 | IndexType: dafs.IndexType, 55 | IndexColumn: dafs.IndexColumn, 56 | DatafixType: dafs.DatafixType, 57 | } 58 | } 59 | return tqaci 60 | } 61 | -------------------------------------------------------------------------------- /dbExec/TableQueryAllColumn.go: -------------------------------------------------------------------------------- 1 | package dbExec 2 | 3 | // 4 | //import ( 5 | // "database/sql" 6 | // "fmt" 7 | // "gt-checksum/global" 8 | // "strings" 9 | //) 10 | // 11 | //type TableQueryAllColumnStruct struct{} 12 | // 13 | //type TableQueryAllColumnInterface interface { 14 | // GeneratingQuerySql() string 15 | // GeneratingQueryCriteria(db *sql.DB) (string, error) 16 | //} 17 | // 18 | //type MySQLAllColumnStruct struct { 19 | // schema string 20 | // table string 21 | // tableColumn []map[string]string 22 | // sqlwhere string 23 | // colData []map[string]interface{} 24 | //} 25 | //type OracleAllColumnStruct struct { 26 | // schema string 27 | // table string 28 | // tableColumn []map[string]string 29 | // sqlwhere string 30 | // colData []map[string]interface{} 31 | //} 32 | // 33 | //func (my *MySQLAllColumnStruct) performQueryConditions(db *sql.DB, sqlstr string) (string, error) { 34 | // var rows *sql.Rows 35 | // var rowDataString []string 36 | // rows, err := db.Query(sqlstr) 37 | // if err != nil { 38 | // fmt.Println(err) 39 | // } 40 | // global.Wlog.Debug("GreatdbCheck exec sql: \"", sqlstr, "\" at the MySQL") 41 | // columns, err := rows.Columns() 42 | // if err != nil { 43 | // global.Wlog.Error("GreatdbCheck exec sql fail. sql: ", sqlstr, "error info: ", err) 44 | // return "", err 45 | // } 46 | // valuePtrs := make([]interface{}, len(columns)) 47 | // values := make([]interface{}, len(columns)) 48 | // for rows.Next() { 49 | // var tmpaaS []string 50 | // for i := 0; i < len(columns); i++ { 51 | // valuePtrs[i] = &values[i] 52 | // } 53 | // rows.Scan(valuePtrs...) 54 | // for i := range columns { 55 | // var v interface{} 56 | // val := values[i] 57 | // b, ok := val.([]byte) 58 | // if ok { 59 | // v = string(b) 60 | // } else { 61 | // v = val 62 | // } 63 | // tmpaaS = append(tmpaaS, fmt.Sprintf("%v", v)) 64 | // } 65 | // tmpaa := strings.Join(tmpaaS, "/*go actions columnData*/") 66 | // rowDataString = append(rowDataString, tmpaa) 67 | // } 68 | // rows.Close() 69 | // return strings.Join(rowDataString, "/*go actions rowData*/"), nil 70 | //} 71 | // 72 | ///* 73 | // 该函数用于需要查询源目表端数据库校验块数据,查询数据生成带有greatdbCheck标识的数据块 74 | //*/ 75 | //func (my *MySQLAllColumnStruct) GeneratingQueryCriteria(db *sql.DB) (string, error) { 76 | // var columnNameSeq []string 77 | // //查询该表的列名和列信息 78 | // var sqlStr string 79 | // //处理mysql查询时间列时数据带时区问题 2021-01-23 10:16:29 +0800 CST 80 | // for i := range my.tableColumn { 81 | // var tmpcolumnName string 82 | // tmpcolumnName = my.tableColumn[i]["columnName"] 83 | // if strings.ToUpper(my.tableColumn[i]["dataType"]) == "DATETIME" { 84 | // tmpcolumnName = fmt.Sprintf("date_format(%s,'%%Y-%%m-%%d %%H:%%i:%%s')", my.tableColumn[i]["columnName"]) 85 | // } 86 | // if strings.Contains(strings.ToUpper(my.tableColumn[i]["dataType"]), "TIMESTAMP") { 87 | // tmpcolumnName = fmt.Sprintf("date_format(%s,'%%Y-%%m-%%d %%H:%%i:%%s')", my.tableColumn[i]["columnName"]) 88 | // } 89 | // columnNameSeq = append(columnNameSeq, tmpcolumnName) 90 | // } 91 | // queryColumn := strings.Join(columnNameSeq, ",") 92 | // sqlStr = fmt.Sprintf("select %s from `%s`.`%s` where %s", queryColumn, my.schema, my.table, my.sqlwhere) 93 | // fmt.Println(sqlStr) 94 | // //rowData, err := my.performQueryConditions(db, sqlStr) 95 | // //if err != nil { 96 | // // return "", err 97 | // //} 98 | // rowData := "" 99 | // return rowData, nil 100 | //} 101 | // 102 | ///* 103 | // 该函数用于需要查询源目表端数据库校验块数据,查询数据生成带有greatdbCheck标识的数据块 104 | //*/ 105 | //func (my *MySQLAllColumnStruct) GeneratingQuerySql() (string) { 106 | // var columnNameSeq []string 107 | // //处理mysql查询时间列时数据带时区问题 2021-01-23 10:16:29 +0800 CST 108 | // for i := range my.tableColumn { 109 | // var tmpcolumnName string 110 | // tmpcolumnName = my.tableColumn[i]["columnName"] 111 | // if strings.ToUpper(my.tableColumn[i]["dataType"]) == "DATETIME" { 112 | // tmpcolumnName = fmt.Sprintf("date_format(%s,'%%Y-%%m-%%d %%H:%%i:%%s')", my.tableColumn[i]["columnName"]) 113 | // } 114 | // if strings.Contains(strings.ToUpper(my.tableColumn[i]["dataType"]), "TIMESTAMP") { 115 | // tmpcolumnName = fmt.Sprintf("date_format(%s,'%%Y-%%m-%%d %%H:%%i:%%s')", my.tableColumn[i]["columnName"]) 116 | // } 117 | // columnNameSeq = append(columnNameSeq, tmpcolumnName) 118 | // } 119 | // queryColumn := strings.Join(columnNameSeq, ",") 120 | // return fmt.Sprintf("select %s from `%s`.`%s` where %s", queryColumn, my.schema, my.table, my.sqlwhere) 121 | //} 122 | // 123 | //func (or *OracleAllColumnStruct) GeneratingQueryCriteria(db *sql.DB) (string, error) { 124 | // return "", nil 125 | //} 126 | // 127 | // 128 | //func (or *OracleAllColumnStruct) GeneratingQuerySql() (string) { 129 | // return "" 130 | //} 131 | // 132 | //func (tqacs TableQueryAllColumnStruct) IndexColumnExec(dname, tname string, tableColumn []map[string]string, sqlwhere string, dbDevice string) TableQueryAllColumnInterface { 133 | // var tqaci TableQueryAllColumnInterface 134 | // if dbDevice == "mysql" { 135 | // tqaci = &MySQLAllColumnStruct{ 136 | // schema: dname, 137 | // table: tname, 138 | // tableColumn: tableColumn, 139 | // sqlwhere: sqlwhere, 140 | // } 141 | // } 142 | // if dbDevice == "oracle" { 143 | // tqaci = &OracleAllColumnStruct{} 144 | // } 145 | // return tqaci 146 | //} 147 | //func Tqacs() *TableQueryAllColumnStruct { 148 | // return &TableQueryAllColumnStruct{} 149 | //} 150 | -------------------------------------------------------------------------------- /dbExec/connection.go: -------------------------------------------------------------------------------- 1 | package dbExec 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | _ "github.com/go-sql-driver/mysql" 7 | _ "github.com/godror/godror" 8 | "gt-checksum/global" 9 | "os" 10 | "strings" 11 | "time" 12 | ) 13 | 14 | type DBConnStruct struct { 15 | DBDevice string 16 | JDBC string 17 | MaxIdleConns int 18 | MaxOpenConns int 19 | ConnMaxIdleTime time.Duration 20 | ConnMaxLifetime time.Duration 21 | } 22 | 23 | /* 24 | 连接数据库,返回连接内存地址 25 | */ 26 | func (dbs *DBConnStruct) openDb() (*sql.DB, error) { 27 | db, err := sql.Open(dbs.DBDevice, dbs.JDBC) 28 | if err != nil { 29 | global.Wlog.Info("(0) database open fail. Error Info: ", err) 30 | return nil, err 31 | } 32 | 33 | if err = db.Ping(); err != nil { 34 | global.Wlog.Error("(0) database connection fail. Error Info: ", err) 35 | return nil, err 36 | } 37 | db.SetMaxIdleConns(dbs.MaxIdleConns) 38 | db.SetMaxOpenConns(dbs.MaxOpenConns) 39 | db.SetConnMaxLifetime(-1) 40 | db.SetConnMaxIdleTime(-1) 41 | return db, nil 42 | } 43 | 44 | func (dbs *DBConnStruct) OpenDB() (*sql.DB, error) { 45 | defer func() { 46 | if err := recover(); err != nil { 47 | fmt.Println("database create session connection fail, Check the database connection information.") 48 | os.Exit(0) 49 | } 50 | }() 51 | return dbs.openDb() 52 | } 53 | func (dbs *DBConnStruct) QPrepareRow(db *sql.DB, sqlStr string) (*sql.Rows, error) { 54 | global.Wlog.Info("begin prepare sql \"", sqlStr, "\"") 55 | var sqlRows *sql.Rows 56 | stmt, err := db.Prepare(sqlStr) 57 | if err != err { 58 | global.Wlog.Error("sql prepare fail. sql: ", sqlStr, " Error info: ", err) 59 | return nil, err 60 | } 61 | if strings.HasPrefix(strings.ToUpper(sqlStr), "SELECT") { 62 | sqlRows, err = stmt.Query() 63 | if err != nil { 64 | global.Wlog.Error("select sql exec fail. sql: ", sqlStr, " Error info: ", err) 65 | return nil, err 66 | } 67 | } else { 68 | if _, err = stmt.Exec(); err != nil { 69 | global.Wlog.Error("transaction sql exec fail. sql: ", sqlStr, " Error info: ", err) 70 | return nil, err 71 | } 72 | } 73 | stmt.Close() 74 | global.Wlog.Info("sql exec successful. sql info: ", sqlStr) 75 | return sqlRows, nil 76 | } 77 | 78 | /* 79 | 查询数据库,返回数据库接口切片,或返回json(包含列名) 80 | */ 81 | func (dbs *DBConnStruct) QMapData(db *sql.DB, sqlStr string) ([]map[string]interface{}, error) { 82 | var ( 83 | sqlRows *sql.Rows 84 | err error 85 | ) 86 | if sqlRows, err = dbs.QPrepareRow(db, sqlStr); err != nil { 87 | return nil, err 88 | } 89 | // 获取列名 90 | columns, err := sqlRows.Columns() 91 | 92 | if err != nil { 93 | errInfo := fmt.Sprintf("get database table column name fail. Error info: %s.", err) 94 | global.Wlog.Error(errInfo) 95 | return nil, err 96 | } 97 | // 定义一个切片,长度是字段的个数,切片里面的元素类型是sql.RawBytes 98 | //values := make([]sql.RawBytes,len(columns)) 99 | //定义一个切片,元素类型是interface{}接口 100 | //scanArgs := make([]interface{},len(values)) 101 | valuePtrs := make([]interface{}, len(columns)) 102 | tableData := make([]map[string]interface{}, 0) 103 | values := make([]interface{}, len(columns)) 104 | for sqlRows.Next() { 105 | for i := 0; i < len(columns); i++ { 106 | valuePtrs[i] = &values[i] 107 | } 108 | sqlRows.Scan(valuePtrs...) 109 | entry := make(map[string]interface{}) 110 | for i, col := range columns { 111 | var v interface{} 112 | val := values[i] 113 | b, ok := val.([]byte) 114 | if ok { 115 | v = string(b) 116 | } else { 117 | v = val 118 | } 119 | entry[col] = v 120 | } 121 | tableData = append(tableData, entry) 122 | } 123 | return tableData, nil 124 | } 125 | 126 | func GetDBexec(jdbcurl, dbDevice string) *DBConnStruct { 127 | return &DBConnStruct{ 128 | JDBC: jdbcurl, 129 | DBDevice: dbDevice, 130 | MaxOpenConns: 1000, 131 | MaxIdleConns: 100, 132 | ConnMaxIdleTime: 100, 133 | ConnMaxLifetime: 0, 134 | } 135 | } 136 | 137 | /* 138 | 长事务会话执行 139 | */ 140 | func (dbs *DBConnStruct) LongSessionExec(db *sql.DB, sqlstr string) error { 141 | global.Wlog.Debug("GreatdbCheck executes \"", sqlstr, "\" at the MySQL") 142 | _, err := db.Exec(sqlstr) 143 | if err != nil { 144 | global.Wlog.Error("exec sql fail. sql: ", sqlstr, "error info: ", err) 145 | return err 146 | } 147 | return nil 148 | } 149 | 150 | /* 151 | 长会话连接查询、返回单行int类型,用于查询数据建库数值列,并返回值 152 | */ 153 | func (dbs *DBConnStruct) LSQInt(db *sql.DB, sqlstr string) (int, error) { 154 | var tmpTableCount int 155 | global.Wlog.Debug("GreatdbCheck prepare sql: \"", sqlstr, "\" at the MySQL") 156 | stamt, err := db.Prepare(sqlstr) 157 | if err != nil { 158 | global.Wlog.Error("GreatdbCheck parpare sql fail. sql: ", sqlstr, "error info: ", err) 159 | return 0, err 160 | } 161 | global.Wlog.Debug("GreatdbCheck exec sql: \"", sqlstr, "\" at the MySQL") 162 | rows, err := stamt.Query() 163 | if err != nil { 164 | global.Wlog.Error("GreatdbCheck exec sql fail. sql: ", sqlstr, "error info: ", err) 165 | return 0, err 166 | } 167 | for rows.Next() { 168 | rows.Scan(&tmpTableCount) 169 | } 170 | return tmpTableCount, nil 171 | } 172 | 173 | /* 174 | 长会话连接查询、返回多行string类型,用于查询数据建库数值列,并返回值 175 | */ 176 | func (dbs *DBConnStruct) LSQSEInt(db *sql.DB, sqlstr string) ([]string, error) { 177 | var tmpTableCount []string 178 | global.Wlog.Debug("GreatdbCheck prepare sql: \"", sqlstr, "\" at the MySQL") 179 | stmat, err := db.Prepare(sqlstr) 180 | if err != err { 181 | global.Wlog.Error("GreatdbCheck parpare sql fail. sql: ", sqlstr, "error info: ", err) 182 | return tmpTableCount, err 183 | } 184 | global.Wlog.Debug("GreatdbCheck exec sql: \"", sqlstr, "\" at the MySQL") 185 | rows, err := stmat.Query() 186 | var num string 187 | if err != err { 188 | global.Wlog.Error("GreatdbCheck exec sql fail. sql: ", sqlstr, "error info: ", err) 189 | return tmpTableCount, err 190 | } else { 191 | for rows.Next() { 192 | rows.Scan(&num) 193 | tmpTableCount = append(tmpTableCount, num) 194 | } 195 | } 196 | return tmpTableCount, nil 197 | } 198 | 199 | func (dbs *DBConnStruct) DbSqlExecString(db *sql.DB, sqlstr string) (string, error) { 200 | var ( 201 | rows *sql.Rows 202 | rowDataString []string 203 | err error 204 | columns []string 205 | ) 206 | global.Wlog.Debug("GreatdbCheck prepare sql: \"", sqlstr, "\" at the MySQL") 207 | //stmt, err = db.Prepare(sqlstr) 208 | //if err != nil { 209 | // global.Wlog.Error("GreatdbCheck parpare sql fail. sql: ", sqlstr, "error info: ", err) 210 | //} 211 | //rows, err = stmt.Query() 212 | rows, err = db.Query(sqlstr) 213 | if err != nil { 214 | fmt.Println(err) 215 | rows, err = db.Query(sqlstr) 216 | } 217 | global.Wlog.Debug("GreatdbCheck exec sql: \"", sqlstr, "\" at the MySQL") 218 | columns, err = rows.Columns() 219 | if err != nil { 220 | global.Wlog.Error("GreatdbCheck exec sql fail. sql: ", sqlstr, "error info: ", err) 221 | return "", err 222 | } 223 | valuePtrs := make([]interface{}, len(columns)) 224 | values := make([]interface{}, len(columns)) 225 | for rows.Next() { 226 | var tmpaaS []string 227 | for i := 0; i < len(columns); i++ { 228 | valuePtrs[i] = &values[i] 229 | } 230 | rows.Scan(valuePtrs...) 231 | for i := range columns { 232 | var v interface{} 233 | val := values[i] 234 | b, ok := val.([]byte) 235 | if ok { 236 | v = string(b) 237 | } else { 238 | v = val 239 | } 240 | tmpaaS = append(tmpaaS, fmt.Sprintf("%v", v)) 241 | } 242 | tmpaa := strings.Join(tmpaaS, "/*go actions columnData*/") 243 | rowDataString = append(rowDataString, tmpaa) 244 | } 245 | rows.Close() 246 | return strings.Join(rowDataString, "/*go actions rowData*/"), nil 247 | } 248 | 249 | func DBexec() *DBConnStruct { 250 | return &DBConnStruct{} 251 | } 252 | -------------------------------------------------------------------------------- /dbExec/global_consistency_snapshot.go: -------------------------------------------------------------------------------- 1 | package dbExec 2 | 3 | import ( 4 | mysql "gt-checksum/MySQL" 5 | oracle "gt-checksum/Oracle" 6 | "gt-checksum/global" 7 | ) 8 | 9 | type GlobalSNStruct struct { 10 | mysql DBGlobalCS 11 | oracle DBGlobalCS 12 | } 13 | 14 | /* 15 | 全局一致性接口,初始化连接池、获取全局一致性位点 16 | */ 17 | type DBGlobalCS interface { 18 | GlobalCN(logThreadSeq int) (map[string]string, error) //全局一致性位点 19 | NewConnPool(logThreadSeq int) (*global.Pool, bool) //连接池 20 | } 21 | 22 | func (gs GlobalSNStruct) GcnObject(poolMin int, jdbc, dbDevice string) DBGlobalCS { 23 | var dbcs DBGlobalCS 24 | if dbDevice == "mysql" { 25 | dbcs = &mysql.GlobalCS{ 26 | Jdbc: jdbc, 27 | ConnPoolMin: poolMin, 28 | Drive: dbDevice, 29 | } 30 | } 31 | if dbDevice == "godror" { 32 | dbcs = &oracle.GlobalCS{ 33 | Jdbc: jdbc, 34 | ConnPoolMin: poolMin, 35 | Drive: dbDevice, 36 | } 37 | } 38 | return dbcs 39 | 40 | } 41 | 42 | func GCN() *GlobalSNStruct { 43 | return &GlobalSNStruct{} 44 | } 45 | -------------------------------------------------------------------------------- /dbExec/query_table_date.go: -------------------------------------------------------------------------------- 1 | package dbExec 2 | 3 | import ( 4 | "database/sql" 5 | mysql "gt-checksum/MySQL" 6 | oracle "gt-checksum/Oracle" 7 | ) 8 | 9 | type IndexColumnStruct struct { 10 | Drivce string 11 | Schema string 12 | Table string 13 | TmpTableFileName string 14 | ColumnName []string 15 | ChanrowCount int 16 | TableColumn []map[string]string 17 | Sqlwhere string 18 | ColData []map[string]string 19 | BeginSeq string 20 | RowDataCh int64 21 | SelectColumn map[string]string 22 | } 23 | 24 | type TableIndexColumner interface { 25 | TmpTableIndexColumnSelectDispos(logThreadSeq int64) map[string]string 26 | TmpTableIndexColumnRowsCount(db *sql.DB, logThreadSeq int64) (uint64, error) 27 | TmpTableColumnGroupDataDispos(db *sql.DB, where string, columnName string, logThreadSeq int64) (chan map[string]interface{}, error) 28 | TableRows(db *sql.DB, logThreadSeq int64) (uint64, error) 29 | QueryTableIndexColumnInfo(db *sql.DB, logThreadSeq int64) ([]map[string]interface{}, error) 30 | IndexDisposF(queryData []map[string]interface{}, logThreadSeq int64) (map[string][]string, map[string][]string, map[string][]string) 31 | NoIndexOrderBySingerColumn(orderCol []map[string]string) []string 32 | NoIndexGeneratingQueryCriteria(db *sql.DB, beginSeq uint64, chanrowCount int, logThreadSeq int64) (string, error) 33 | GeneratingQuerySql(db *sql.DB, logThreadSeq int64) (string, error) 34 | GeneratingQueryCriteria(db *sql.DB, logThreadSeq int64) (string, error) 35 | } 36 | 37 | func (qticis *IndexColumnStruct) TableIndexColumn() TableIndexColumner { 38 | var aa TableIndexColumner 39 | if qticis.Drivce == "mysql" { 40 | aa = &mysql.QueryTable{ 41 | Schema: qticis.Schema, 42 | Table: qticis.Table, 43 | ColumnName: qticis.ColumnName, 44 | ChanrowCount: qticis.ChanrowCount, 45 | TableColumn: qticis.TableColumn, 46 | Sqlwhere: qticis.Sqlwhere, 47 | ColData: qticis.ColData, 48 | SelectColumn: qticis.SelectColumn, 49 | BeginSeq: qticis.BeginSeq, 50 | RowDataCh: qticis.RowDataCh, 51 | } 52 | } 53 | if qticis.Drivce == "godror" { 54 | aa = &oracle.QueryTable{ 55 | Schema: qticis.Schema, 56 | Table: qticis.Table, 57 | ColumnName: qticis.ColumnName, 58 | ChanrowCount: qticis.ChanrowCount, 59 | TableColumn: qticis.TableColumn, 60 | Sqlwhere: qticis.Sqlwhere, 61 | ColData: qticis.ColData, 62 | SelectColumn: qticis.SelectColumn, 63 | BeginSeq: qticis.BeginSeq, 64 | RowDataCh: qticis.RowDataCh, 65 | } 66 | } 67 | return aa 68 | } 69 | -------------------------------------------------------------------------------- /dbExec/schem_Table_Column.go: -------------------------------------------------------------------------------- 1 | package dbExec 2 | 3 | import ( 4 | "database/sql" 5 | "gt-checksum/MySQL" 6 | "gt-checksum/Oracle" 7 | ) 8 | 9 | type TableColumnNameStruct struct { 10 | Schema string 11 | Table string 12 | IgnoreTable string 13 | Drive string 14 | Db *sql.DB 15 | Datafix string 16 | LowerCaseTableNames string 17 | } 18 | 19 | type QueryTableColumnNameInterface interface { 20 | TableColumnName(db *sql.DB, logThreadSeq int64) ([]map[string]interface{}, error) 21 | GlobalAccessPri(db *sql.DB, logThreadSeq int64) (bool, error) 22 | TableAccessPriCheck(db *sql.DB, checkTableList []string, datefix string, logThreadSeq int64) (map[string]int, error) 23 | DatabaseNameList(db *sql.DB, logThreadSeq int64) (map[string]int, error) 24 | TableAllColumn(db *sql.DB, logThreadSeq int64) ([]map[string]interface{}, error) 25 | TableIndexChoice(queryData []map[string]interface{}, logThreadSeq int64) map[string][]string 26 | Trigger(db *sql.DB, logThreadSeq int64) (map[string]string, error) 27 | Proc(db *sql.DB, logThreadSeq int64) (map[string]string, error) 28 | Func(db *sql.DB, logThreadSeq int64) (map[string]string, error) 29 | Struct(db *sql.DB) (map[string]string, error) 30 | Foreign(db *sql.DB, logThreadSeq int64) (map[string]string, error) 31 | Partitions(db *sql.DB, logThreadSeq int64) (map[string]string, error) 32 | } 33 | 34 | func (tcns *TableColumnNameStruct) Query() QueryTableColumnNameInterface { 35 | var aa QueryTableColumnNameInterface 36 | if tcns.Drive == "mysql" { 37 | aa = &mysql.QueryTable{ 38 | Schema: tcns.Schema, 39 | Table: tcns.Table, 40 | Db: tcns.Db, 41 | LowerCaseTableNames: tcns.LowerCaseTableNames, 42 | } 43 | } 44 | if tcns.Drive == "godror" { 45 | aa = &oracle.QueryTable{ 46 | Schema: tcns.Schema, 47 | Table: tcns.Table, 48 | Db: tcns.Db, 49 | LowerCaseTableNames: tcns.LowerCaseTableNames, 50 | } 51 | } 52 | return aa 53 | } 54 | -------------------------------------------------------------------------------- /docs/gc.conf.example: -------------------------------------------------------------------------------- 1 | ; gt-checksum 配置文件参考 2 | 3 | ; 定义源、目标数据源 4 | ; 目前只支持MySQL、Oracle两种数据源 5 | 6 | [DSNs] 7 | ;oracle的连接串格式为:oracle|user/password@ip:port/sid 8 | ;例如:srcDSN = oracle|scott/tiger@172.16.0.1:1521/helowin 9 | 10 | ;mysql的连接串格式为:mysql|usr:password@tcp(ip:port)/dbname?charset=xxx 11 | ;例如:dstDSN = mysql|pcms:abc123@tcp(172.16.0.1:3306)/information_schema?charset=utf8 12 | 13 | srcDSN = mysql|pcms:abc123@tcp(172.16.0.1:3306)/information_schema?charset=utf8 14 | dstDSN = mysql|pcms:abc123@tcp(172.16.0.2:3306)/information_schema?charset=utf8 15 | 16 | ; 定义校验数据对象 17 | [Schema] 18 | ; 配置参数中,table=*.*表示匹配所有库(MySQL不包含 information_schema\mysql\performance_schema\sys),库表都支持模糊匹配(无论是table还是ignoreTable),%代表模糊,*代表所有,包含的模糊规则:%schema.xxx,%schema%.xxx schema%.xxx schema.%table schema.table% schema.%table% schema.table 其中如果设置了*.*,则不能在输入其他的值,例如:*.*,pcms%.*,则是错误的,会报table设置错误,table和ignoreTable的值相同,也会报错 19 | 20 | ; 选项tables用来定义校验数据表对象,支持通配符"%"和"*" 21 | ; 例如: 22 | ; *.* 表示所有库表对象(MySQL不包含 information_schema\mysql\performance_schema\sys) 23 | ; test.* 表示test库下的所有表 24 | ; test.t% 表示test库下所有表名中包含字母"t"开头的表 25 | ; db%.* 表示所有库名中包含字母"db"开头的数据库中的所有表 26 | ; %db.* 表示所有库名中包含字母"db"结尾的数据库中的所有表 27 | ; 28 | ; 如果已经设置为 "*.*",则不能再增加其他的规则,例如:设置 "*.*,pcms%.*" 则会报告规则错误 29 | ; 如果 table 和 ignore-tables 设置的值相同的话也会报告规则错误 30 | tables = test.* 31 | 32 | ; 选项 ignore-tables 用来定义忽略的数据对象规则,也支持通配符"%"和"*",具体用法参考上面的案例 33 | ; ignore-tables = db1.* 34 | ignore-tables = 35 | 36 | ; 设置是否检查没有索引的表,可设置为:yes/no,默认值为:no 37 | ; checkNoIndexTable = yes | no 38 | checkNoIndexTable = yes 39 | 40 | ; 设置是否忽略表名大小写,可设置为:yes/no,默认值为:no 41 | ; yes => 会按照配置的大小写进行匹配 42 | ; no => 统一用大写表名 43 | ; lowerCaseTableNames = yes | no 44 | lowerCaseTableNames = no 45 | 46 | ; 设置日志文件名及等级 47 | [Logs] 48 | ; 设置日志文件名,可以指定为绝对路径或相对路径 49 | log = ./gt-checksum.log 50 | 51 | ; 设置日志等级,支持 debug/info/warn/error 几个等级,默认值为:info 52 | ; logLevel = info | debug | warn | error 53 | logLevel = info 54 | 55 | ; 其他校验规则 56 | [Rules] 57 | ; 数据校验并行线程数 58 | parallel-thds = 10 59 | 60 | ; 设置单列索引每次检索多少条数据进行校验,默认值:1000,建议范围:1000 - 5000 61 | ; 注:该值设置太大时有可能会造成SQL查询效率反倒下降的情况发生,一般建议设置不超过5000 62 | singleIndexChanRowCount = 10000 63 | 64 | ; 设置多列索引每次检索多少条数据进行校验,默认值:1000,建议范围:1000 - 5000 65 | ; 注:该值设置太大时有可能会造成SQL查询效率反倒下降的情况发生,一般建议设置不超过5000 66 | jointIndexChanRowCount = 10000 67 | 68 | ; 设置校验队列深度,默认值:100 69 | queue-size = 100 70 | 71 | ; 设置数据校验模式,支持 count/rows/sample 三种模式,默认值为:rows 72 | ; count 表示只校验源、目标表的数据量 73 | ; rows 表示逐行校验源、目标数据 74 | ; sample 表示只进行抽样数据校验,配合参数ratio设置采样率 75 | ; checkMode = rows | count | sample 76 | checkMode = rows 77 | 78 | ; 当 checkMode = sample 时,设置数据采样率,设置范围1-100,用百分比表示,1表示1%,100表示100%,默认值:10 79 | ; ratio = 10 80 | 81 | ; 设置数据校验对象,支持 data/struct/index/partitions/foreign/trigger/func/proc,默认值为:data 82 | ; 分别表示:行数据/表结构/索引/分区/外键/触发器/存储函数/存储过程 83 | ; checkObject = data | struct | index | partitions | foreign | trigger | func | proc 84 | checkObject = data 85 | 86 | ;设置表结构校验规则,当checkObject为struct时才会生效 87 | [Struct] 88 | ; 设置struct校验时的校验模式,可设置为:strict/loose,为strict时,则会严格匹配列的所有属性,为loose时,则为宽松模式只匹配列名,默认值为:strict 89 | ; ScheckMod = strict | loose 90 | ScheckMod = strict 91 | 92 | ; 设置struct校验时是否校验列的顺序,可设置为:yes/no,设置为yes,则会按照源端的列的正序进行校验,默认值为:yes 93 | ; ScheckOrder = yes | no 94 | ScheckOrder = yes 95 | 96 | ; 设置修复列的属性及顺序的依据原则,可设置为src/dst,设置为src则按照源端的列属性进行修复,默认值为:src 97 | ; 当缺少列时,修复语句会按照源端的列数据类型生成 98 | ; ScheckFixRule = src | dst 99 | ScheckFixRule = src 100 | 101 | ; 设置日志文件名及等级 102 | 103 | ; 设置数据修复方案 104 | [Repair] 105 | ; 数据修复方式,支持 file/table 两种方式 106 | ; file,生成数据修复SQL文件 107 | ; table 直接在线修复数据 108 | ; datafix = file | table 109 | datafix = file 110 | 111 | ; 修复事务数,即单个事务包含多少个dml语句,默认值为:100 112 | fixTrxNum = 100 113 | 114 | ; 当 datafix = file 时,设置生成的SQL文件名,可以指定为绝对路径或相对路径 115 | ; 当 datafix = table 时,可以不用设置 fixFileName 参数 116 | fixFileName = ./gt-checksum-DataFix.sql 117 | -------------------------------------------------------------------------------- /gc.conf: -------------------------------------------------------------------------------- 1 | ; gt-checksum 配置文件参考 2 | 3 | ; 定义源、目标数据源 4 | ; 目前只支持MySQL、Oracle两种数据源 5 | 6 | [DSNs] 7 | ;oracle的连接串格式为:oracle|user/password@ip:port/sid 8 | ;例如:srcDSN = oracle|scott/tiger@172.16.0.1:1521/helowin 9 | 10 | ;mysql的连接串格式为:mysql|usr:password@tcp(ip:port)/dbname?charset=xxx 11 | srcDSN = mysql|u1:p1@tcp(172.17.0.1:3307)/information_schema?charset=utf8mb4 12 | dstDSN = mysql|u1:p1@tcp(172.17.0.2:3307)/information_schema?charset=utf8mb4 13 | 14 | ; 定义校验数据对象 15 | [Schema] 16 | ; 选项tables用来定义校验数据表对象,支持通配符"%"和"*" 17 | ; 例如: 18 | ; *.* 表示所有库表对象(MySQL不包含 information_schema\mysql\performance_schema\sys) 19 | ; test.* 表示test库下的所有表 20 | ; test.t% 表示test库下所有表名中包含字母"t"开头的表 21 | ; db%.* 表示所有库名中包含字母"db"开头的数据库中的所有表 22 | ; %db.* 表示所有库名中包含字母"db"结尾的数据库中的所有表 23 | ; 24 | ; 如果已经设置为 "*.*",则不能再增加其他的规则,例如:设置 "*.*,pcms%.*" 则会报告规则错误 25 | ; 如果 table 和 ignore-tables 设置的值相同的话也会报告规则错误 26 | 27 | tables = db1.t1 28 | ignore-tables = 29 | 30 | ; 设置是否检查没有索引的表,可设置为:yes/no,默认值为:no 31 | checkNoIndexTable = no 32 | 33 | ; 设置是否忽略表名大小写,可设置为:yes/no 34 | ; 当为no时,统一使用大写表名;当为yes时,会按照配置的大小写进行匹配 35 | ; 默认值为:no 36 | lowerCaseTableNames = no 37 | 38 | ; 其他校验规则 39 | [Rules] 40 | ; 数据校验并行线程数 41 | parallel-thds = 10 42 | 43 | ; 设置每次检索多少条数据进行校验,默认值:10000 44 | chanRowCount = 10000 45 | 46 | ; 设置校验队列深度,默认值:100 47 | queue-size = 100 48 | 49 | ; 设置数据校验模式,支持 count/rows/sample 三种模式,默认值为:rows 50 | ; count 表示只校验源、目标表的数据量 51 | ; rows 表示逐行校验源、目标数据 52 | ; sample 表示只进行抽样数据校验,配合参数ratio设置采样率 53 | checkMode = rows 54 | 55 | ; 当 checkMode = sample 时,设置数据采样率,设置范围1-100,用百分比表示,1表示1%,100表示100%,默认值:10 56 | ratio = 10 57 | 58 | ; 设置数据校验对象,支持 data/struct/index/partitions/foreign/trigger/func/proc,默认值为:data 59 | ; 分别表示:行数据/表结构/索引/分区/外键/触发器/存储函数/存储过程 60 | checkObject = data 61 | 62 | ;设置表结构校验规则,当checkObject为struct时才会生效 63 | [Struct] 64 | ; 设置struct校验时的校验模式,可设置为:strict/loose,为strict时,则会严格匹配列的所有属性,为loose时,则为宽松模式只匹配列名,默认值为:strict 65 | ScheckMod = strict 66 | 67 | ; 设置struct校验时是否校验列的顺序,可设置为:yes/no,设置为yes,则会按照源端的列的正序进行校验,默认值为:yes 68 | ScheckOrder = yes 69 | 70 | ; 设置修复列的属性及顺序的依据原则,可设置为src/dst,设置为src则按照源端的列属性进行修复,默认值为:src 71 | ; 当缺少列时,修复语句会按照源端的列数据类型生成 72 | ScheckFixRule = src 73 | 74 | ; 设置日志文件名及等级 75 | [Logs] 76 | ; 设置日志文件名,可以指定为绝对路径或相对路径 77 | log = ./gt-checksum.log 78 | 79 | ; 设置日志等级,支持 debug/info/warn/error 几个等级,默认值为:info 80 | logLevel = info 81 | 82 | ; 设置数据修复方案 83 | [Repair] 84 | ; 数据修复方式,支持 file/table 两种方式 85 | ; file,生成数据修复SQL文件 86 | ; table 直接在线修复数据 87 | datafix = file 88 | 89 | ; 修复事务数,即单个事务包含多少个dml语句,默认值为:100 90 | fixTrxNum = 100 91 | 92 | ; 当 datafix = file 时,设置生成的SQL文件名,可以指定为绝对路径或相对路径 93 | ; 当 datafix = table 时,可以不用设置 fixFileName 参数 94 | fixFileName = ./gt-checksum-DataFix.sql 95 | -------------------------------------------------------------------------------- /gc.conf-simple: -------------------------------------------------------------------------------- 1 | ; 2 | ; gc.cnf-simple 3 | ; 4 | ; 极简配置文件模板,只需要最少的几个参数即可 5 | ; 6 | [DSNs] 7 | ;oracle的连接串为 oracle|user/password@ip:port/sid 8 | ;mysql的连接串为 mysql|usr:password@tcp(ip:port)/dbname?charset=xxx 9 | srcDSN = mysql|u1:p1@tcp(172.17.0.1:3307)/information_schema?charset=utf8mb4 10 | dstDSN = mysql|u1:p1@tcp(172.17.0.2:3307)/information_schema?charset=utf8mb4 11 | 12 | [Schema] 13 | ; 选项tables用来定义校验数据表对象,支持通配符"%"和"*" 14 | ; 例如: 15 | ; *.* 表示所有库表对象(MySQL不包含 information_schema\mysql\performance_schema\sys) 16 | ; test.* 表示test库下的所有表 17 | ; test.t% 表示test库下所有表名中包含字母"t"开头的表 18 | ; db%.* 表示所有库名中包含字母"db"开头的数据库中的所有表 19 | ; %db.* 表示所有库名中包含字母"db"结尾的数据库中的所有表 20 | ; 21 | ; 如果已经设置为 "*.*",则不能再增加其他的规则,例如:设置 "*.*,pcms%.*" 则会报告规则错误 22 | ; 如果 table 和 ignore-tables 设置的值相同的话也会报告规则错误 23 | tables = db1.t1 -------------------------------------------------------------------------------- /global/IncDatadispos.go: -------------------------------------------------------------------------------- 1 | package global 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "github.com/go-mysql-org/go-mysql/mysql" 8 | "github.com/go-mysql-org/go-mysql/replication" 9 | "strings" 10 | ) 11 | 12 | type IncDataDisposInterface interface { 13 | readIncData() //读取增量数据 14 | } 15 | type MySQLIncDataDisposStruct struct {} 16 | type OraceIncDataDisposStruct struct {} 17 | type IncDataDisposFunStruct struct {} 18 | 19 | 20 | //var binlogInfo = make(map[string]string) 21 | func (my MySQLIncDataDisposStruct) binlogGtidDispos(pos *bytes.Buffer){ 22 | tmpa := fmt.Sprintf("%v",pos) 23 | //fmt.Println(tmpa) 24 | tmpb := strings.Split(tmpa,"\n") 25 | fmt.Println(tmpb) 26 | //var tmpc []string 27 | //for i := range tmpb{ 28 | // fmt.Println(tmpb[i]) 29 | //} 30 | 31 | } 32 | func (my MySQLIncDataDisposStruct) readIncData(){ 33 | cfg := replication.BinlogSyncerConfig{ 34 | ServerID: 1613306, 35 | Flavor: "mysql", 36 | Host:"172.16.50.162", 37 | Port: 3306, 38 | User: "pcms", 39 | Password: "pcms@123", 40 | } 41 | syncer := replication.NewBinlogSyncer(cfg) 42 | streamer, _ := syncer.StartSync(mysql.Position{ "mysql-bin.000007", 653178}) 43 | for { 44 | // fmt.Println(streamer.DumpEvents()) 45 | ev, _ := streamer.GetEvent(context.Background()) 46 | var a string 47 | buf := bytes.NewBufferString(a) 48 | ev.Dump(buf) 49 | my.binlogGtidDispos(buf) 50 | fmt.Println("------------------") 51 | } 52 | 53 | 54 | } 55 | func (my OraceIncDataDisposStruct) readIncData(){ 56 | 57 | } 58 | func (idd IncDataDisposFunStruct) IncDataDispos(iddi IncDataDisposInterface){ 59 | iddi.readIncData() 60 | } 61 | -------------------------------------------------------------------------------- /global/ThreadPool.go: -------------------------------------------------------------------------------- 1 | package global 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | //定义一个任务接口 8 | type Job interface { 9 | DoDataCheck() 10 | //Func() 11 | } 12 | 13 | //定义一个任务队列或者队列池子 14 | type JobQueue chan Job 15 | 16 | //定义一个工作结构体,里面包含任务池子 17 | type Worker struct { 18 | JobChan JobQueue //每一个worker对象具有JobQueue(队列)属性。 19 | } 20 | 21 | //定义一个工作池子,里面包含工作池子的大小,任务池子,以及工作池子的队列 22 | type WorkerPool struct { //线程池: 23 | Workerlen int //线程池的大小 24 | JobQueue JobQueue //Job队列,接收外部的数据 25 | WorkerQueue chan JobQueue //worker队列:处理任务的Go程队列 26 | } 27 | 28 | //定义一个函数,返回一个任务初始化结构体 29 | func NewWorker() Worker { 30 | return Worker{JobChan: make(chan Job)} 31 | } 32 | 33 | //定义一个方法,名字叫run,将任务队列里面的任务加到工作池子中,并执行 34 | //启动参与程序运行的Go程数量 35 | func (w Worker) Run(wq chan JobQueue) { 36 | go func() { 37 | for { 38 | wq <- w.JobChan //处理任务的Go程队列数量有限,每运行1个,向队列中添加1个,队列剩余数量少1个 (JobChain入队列) 39 | select { 40 | case job := <-w.JobChan: 41 | //defer func() { 42 | // if err := recover(); err != nil { 43 | // fmt.Println("携程报错:", err) 44 | // w.JobChan <- job 45 | // } 46 | //}() 47 | job.DoDataCheck() //执行操作 48 | //job.Func() 49 | } 50 | } 51 | }() 52 | } 53 | 54 | //初始化工作池子 55 | func NewWorkerPool(workerlen int) *WorkerPool { 56 | return &WorkerPool{ 57 | Workerlen: workerlen, 58 | JobQueue: make(JobQueue), 59 | WorkerQueue: make(chan JobQueue, workerlen), 60 | } 61 | } 62 | 63 | func (wp *WorkerPool) Run() { 64 | defer func() { 65 | if err := recover(); err != nil { 66 | fmt.Println(err) 67 | } 68 | }() 69 | //初始化worker(多个Go程) 70 | for i := 0; i < wp.Workerlen; i++ { 71 | worker := NewWorker() 72 | worker.Run(wp.WorkerQueue) //开启每一个Go程 73 | } 74 | 75 | // 循环获取可用的worker,往worker中写job 76 | go func() { 77 | for { 78 | select { 79 | //将JobQueue中的数据存入WorkerQueue 80 | case job := <-wp.JobQueue: //线程池中有需要待处理的任务(数据来自于请求的任务) :读取JobQueue中的内容 81 | worker := <-wp.WorkerQueue //队列中有空闲的Go程 :读取WorkerQueue中的内容,类型为:JobQueue 82 | worker <- job //空闲的Go程执行任务 :整个job入队列(channel) 类型为:传递的参数(Score结构体) 83 | } 84 | } 85 | }() 86 | } 87 | func (wp *WorkerPool) Close() { 88 | 89 | } 90 | 91 | ////定义一个实现Job接口的数据 92 | //type Score struct { 93 | // Num int 94 | //} 95 | // 96 | ////定义对数据的处理 97 | //func (s *Score) Do() { 98 | // fmt.Println("num:", s.Num) 99 | // time.Sleep(500 * time.Millisecond) //模拟执行的耗时任务 100 | //} 101 | 102 | //func main() { 103 | // num := 100 * 2 //开启 2万个线程 104 | // // debug.SetMaxThreads(num + 1000) //设置最大线程数 105 | // // 注册工作池,传入任务 106 | // // 参数1 worker并发个数 107 | // p := NewWorkerPool(num) 108 | // p.Run() 109 | // dataNum := 10 110 | // for i := 1; i <= dataNum; i++ { 111 | // sc := &Score{Num: i} 112 | // p.JobQueue <- sc //数据传进去会被自动执行Do()方法,具体对数据的处理自己在Do()方法中定义 113 | // } 114 | //} 115 | -------------------------------------------------------------------------------- /global/dbConnPool.go: -------------------------------------------------------------------------------- 1 | package global 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "sync" 7 | ) 8 | 9 | type Pool struct { 10 | mu sync.Mutex 11 | minConn int // 最小连接数 12 | maxConn int // 最大连接数 13 | numConn int // 池已申请的连接数 14 | conns chan *sql.DB //当前池中空闲连接实例 15 | close bool 16 | drive string //数据库类型 17 | } 18 | 19 | // 初始化池实例 20 | func NewPool(min int, db []*sql.DB, logThreadSeq int, drive string) *Pool { 21 | var ( 22 | vlog string 23 | ) 24 | defer func() { 25 | if err := recover(); err != nil { 26 | fmt.Println(err) 27 | } 28 | }() 29 | p := &Pool{ 30 | minConn: min, 31 | maxConn: min, 32 | numConn: 1, 33 | conns: make(chan *sql.DB, min), 34 | close: false, 35 | drive: drive, 36 | } 37 | vlog = fmt.Sprintf("(%d) Start adding session connections to the %s DB connection pool ...", logThreadSeq, p.drive) 38 | Wlog.Debug(vlog) 39 | for i := 0; i < min; i++ { 40 | p.conns <- db[i] 41 | //p.conns <- dbconn 42 | } 43 | vlog = fmt.Sprintf("(%d) %s DB Connection pool join session connection action completed !!!", logThreadSeq, p.drive) 44 | Wlog.Debug(vlog) 45 | vlog = fmt.Sprintf("(%d) The current number of %s DB session connection pools is [%d]", logThreadSeq, p.drive, len(p.conns)) 46 | Wlog.Debug(vlog) 47 | return p 48 | } 49 | 50 | type DBConn struct { 51 | db *sql.DB 52 | idleTime int // 标记该数据库连接空闲时间 53 | } 54 | 55 | // 新建数据库连接 56 | //func NewDBConn(dbcon *sql.DB) *DBConn { 57 | // return &DBConn{ 58 | // db: dbcon, 59 | // idleTime: 0, 60 | // } 61 | //} 62 | 63 | // 从池中取出连接 64 | func (p *Pool) Get(logThreadSeq int64) *sql.DB { 65 | var ( 66 | vlog string 67 | d *sql.DB 68 | ) 69 | defer func() { 70 | if err := recover(); err != nil { 71 | fmt.Println(err) 72 | } 73 | }() 74 | vlog = fmt.Sprintf("(%d) Get a session connection from the %s DB session connection pool ...", logThreadSeq, p.drive) 75 | Wlog.Debug(vlog) 76 | if p.close { 77 | close(p.conns) 78 | return nil 79 | } 80 | p.mu.Lock() 81 | defer p.mu.Unlock() 82 | if p.numConn >= p.minConn || len(p.conns) > 0 { // 保证了池申请连接数量不超过最大连接数 83 | if p.numConn >= p.minConn { 84 | vlog = fmt.Sprintf("(%d) The current %s DB session connection pool is full. use session [%d], total session [%d], no memory available, please wait...", logThreadSeq, p.drive, p.numConn, p.minConn) 85 | Wlog.Warn(vlog) 86 | } 87 | d = <-p.conns // 若池中没有可取的连接,则等待其他请求返回连接至池中再取 88 | } 89 | p.numConn++ 90 | vlog = fmt.Sprintf("(%d) Obtain a connection successfully, the current %s DB connection pool status, the number of applied connections is [%d], and the remaining number is [%d].", logThreadSeq, p.drive, p.minConn-len(p.conns), len(p.conns)) 91 | Wlog.Debug(vlog) 92 | return d 93 | } 94 | 95 | // 将连接返回池中 96 | func (p *Pool) Put(d *sql.DB, logThreadSeq int64) { 97 | var ( 98 | vlog string 99 | ) 100 | defer func() { 101 | if err := recover(); err != nil { 102 | fmt.Println(err) 103 | } 104 | }() 105 | vlog = fmt.Sprintf("(%d) Put a session connection into the %s DB session connection pool ...", logThreadSeq, p.drive) 106 | Wlog.Debug(vlog) 107 | if p.close { 108 | return 109 | } 110 | p.mu.Lock() 111 | defer p.mu.Unlock() 112 | p.conns <- d 113 | p.numConn-- 114 | vlog = fmt.Sprintf("(%d) The connection is put in successfully, the %s DB current connection pool status, the number of applied connections is [%d], and the remaining number is [%d].", logThreadSeq, p.drive, p.minConn-len(p.conns), len(p.conns)) 115 | Wlog.Debug(vlog) 116 | } 117 | 118 | // 关闭池 119 | func (p *Pool) Close(logThreadSeq int) { 120 | var ( 121 | vlog string 122 | ) 123 | p.mu.Lock() 124 | defer p.mu.Unlock() 125 | vlog = fmt.Sprintf("(%d) Start closing the %s DB session connection pool ...", logThreadSeq, p.drive) 126 | Wlog.Debug(vlog) 127 | close(p.conns) 128 | for d := range p.conns { 129 | d.Close() 130 | } 131 | vlog = fmt.Sprintf("(%d) %s DB Session connection pool closed successfully.", logThreadSeq, p.drive) 132 | Wlog.Debug(vlog) 133 | p.close = true 134 | } 135 | -------------------------------------------------------------------------------- /global/globalVariables.go: -------------------------------------------------------------------------------- 1 | package global 2 | 3 | import ( 4 | "gt-checksum/go-log/log" 5 | ) 6 | 7 | /* 8 | 初始化日志文件 9 | */ 10 | var Wlog *log.Logger 11 | 12 | type TableAllColumnInfoS struct { 13 | SColumnInfo, DColumnInfo []map[string]string //表的所有列信息 14 | } 15 | -------------------------------------------------------------------------------- /global/goRoutingPool.go: -------------------------------------------------------------------------------- 1 | package global 2 | 3 | // 4 | //import ( 5 | // "errors" 6 | // "sync" 7 | // "time" 8 | //) 9 | // 10 | //// goroutine pool 11 | //type GoroutinePool struct { 12 | // c chan struct{} 13 | // wg *sync.WaitGroup 14 | //} 15 | // 16 | //// 采用有缓冲channel实现,当channel满的时候阻塞 17 | //func NewGoroutinePool(maxSize int) *GoroutinePool { 18 | // if maxSize <= 0 { 19 | // panic("max size too small") 20 | // } 21 | // return &GoroutinePool{ 22 | // c: make(chan struct{}, maxSize), 23 | // wg: new(sync.WaitGroup), 24 | // } 25 | //} 26 | // 27 | //// add 28 | //func (g *GoroutinePool) Add(delta int) { 29 | // g.wg.Add(delta) 30 | // for i := 0; i < delta; i++ { 31 | // g.c <- struct{}{} 32 | // } 33 | // 34 | //} 35 | // 36 | //// done 37 | //func (g *GoroutinePool) Done() { 38 | // <-g.c 39 | // g.wg.Done() 40 | //} 41 | // 42 | //// wait 43 | //func (g *GoroutinePool) Wait() { 44 | // g.wg.Wait() 45 | //} 46 | // 47 | //func testGoroutineWithTimeOut() error { 48 | // done := make(chan struct{}) 49 | // // 新增阻塞chan 50 | // errChan := make(chan error) 51 | // var err error 52 | // pool := NewGoroutinePool(10) 53 | // for i := 0; i < 10; i++ { 54 | // pool.Add(1) 55 | // go func() { 56 | // pool.Done() 57 | // if err != nil { 58 | // errChan <- errors.New("error") 59 | // } 60 | // }() 61 | // } 62 | // go func() { 63 | // pool.Wait() 64 | // close(done) 65 | // }() 66 | // 67 | // select { 68 | // // 错误快返回,适用于get接口 69 | // case err := <-errChan: 70 | // return err 71 | // case <-done: 72 | // case <-time.After(500 * time.Millisecond): 73 | // } 74 | // return nil 75 | //} 76 | -------------------------------------------------------------------------------- /global/log.go: -------------------------------------------------------------------------------- 1 | package global 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "fmt" 7 | rotatelogs "github.com/lestrrat-go/file-rotatelogs" /* 引入日志回滚功能 */ 8 | "github.com/rifflock/lfshook" /* logrus本地文件系统钩子 */ 9 | "github.com/sirupsen/logrus" /* logrus日志包 */ 10 | "os" 11 | "path" 12 | "path/filepath" 13 | "strings" 14 | "time" 15 | ) 16 | 17 | /* 创建logrus 日志实例 */ 18 | var Logger = logrus.New() 19 | 20 | /* 定义日志级别 */ 21 | const LOG_TRACE = 0 22 | const LOG_DEBUG = 1 23 | const LOG_INFO = 2 24 | const LOG_WARN = 3 25 | const LOG_ERROR = 4 26 | const LOG_FATAL = 5 27 | const LOG_PANIC = 6 28 | 29 | //Custom log format definition 30 | /*logrus原生支持两种日志格式,一种是text,另一种是json格式,同时也支持自定义格式,当前根据自己需求进行自定义*/ 31 | /*首先定义一个结构体*/ 32 | type wlLogFormatter struct{} 33 | 34 | /*定义一个方法*/ 35 | func (s *wlLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { 36 | var b *bytes.Buffer 37 | if entry.Buffer != nil { 38 | b = entry.Buffer 39 | } else { 40 | b = &bytes.Buffer{} 41 | } 42 | 43 | var msg string 44 | timestamp := time.Now().Local().Format("2006/01/02 15:04:05") 45 | //HasCaller()为true才会有调用信息 46 | if entry.HasCaller() { 47 | fName := filepath.Base(entry.Caller.File) 48 | msg = fmt.Sprintf("%s [%s] [%s:%d %s] %s\n", timestamp, strings.ToUpper(entry.Level.String()), fName, entry.Caller.Line, entry.Caller.Function, entry.Message) 49 | } else { 50 | msg = fmt.Sprintf("%s [%s] %s\n", timestamp, strings.ToUpper(entry.Level.String()), entry.Message) 51 | } 52 | b.WriteString(msg) 53 | return b.Bytes(), nil 54 | } 55 | 56 | /* 使用闭包特性,初始化带回滚功能的logrus日志环境 */ 57 | func LoggerToFile() func(int, ...interface{}) { 58 | /* 日志路径和名称 */ 59 | logFilePath := "E:\\万里开源\\goProject\\tableCheckSum\\src\\wl-table-checkSum" 60 | logFileName := "table-checksum" 61 | partFileName := path.Join(logFilePath, logFileName) 62 | /* 禁止日志打印到标准输出stdout */ 63 | devnull, err := os.OpenFile(os.DevNull, os.O_APPEND|os.O_WRONLY, os.ModeAppend) 64 | if err != nil { 65 | fmt.Printf("LoggerToFile open os.DevNull failed: ", err) 66 | } 67 | writernull := bufio.NewWriter(devnull) 68 | /*设置日志的输出方式,默认为两种,一种是os.stid,一种是io.write */ 69 | Logger.SetOutput(writernull) 70 | /* */ 71 | Logger.SetReportCaller(true) 72 | /* 设置日志输出格式 */ 73 | Logger.SetFormatter(&wlLogFormatter{}) 74 | /* 设置默认日志级别为 INFO */ 75 | Logger.SetLevel(logrus.InfoLevel) 76 | 77 | /* 创建日志回滚实例,日志名称格式,日志回滚模式(日志每20M回滚,保留10个日志文件) */ 78 | logWriter, err := rotatelogs.New( 79 | partFileName+".%Y%m%d.log", 80 | rotatelogs.WithLinkName(logFileName+".log"), /* 链接文件,链接到当前实际的日志文件 */ 81 | //WithRotationTime设置日志分割时间,多长时间切割一次 82 | rotatelogs.WithRotationTime(time.Hour*24), 83 | //rotatelogs.WithRotationTime(time.Second), 84 | //WithMaxAge和WithRotationCount二者只能设置一个, 85 | //WithMaxAge设置文件清理前的最长保存时间 86 | //WithRotationCount设置文件清理前最多保存的个数 87 | //rotatelogs.WithMaxAge(time.Hour*24) 88 | rotatelogs.WithRotationSize(50*1024*1024), 89 | rotatelogs.WithRotationCount(10), 90 | ) 91 | /* 日志输出到本地文件系统,不同级别都输出到相同的日志中 */ 92 | writeMap := lfshook.WriterMap{ 93 | logrus.InfoLevel: logWriter, 94 | logrus.FatalLevel: logWriter, 95 | logrus.DebugLevel: logWriter, 96 | logrus.WarnLevel: logWriter, 97 | logrus.ErrorLevel: logWriter, 98 | logrus.PanicLevel: logWriter, 99 | } 100 | /* 创建新的lfs钩子 */ 101 | lfHook := lfshook.NewHook(writeMap, &wlLogFormatter{}) 102 | 103 | /* logrus实例添加lfshook钩子 */ 104 | Logger.AddHook(lfHook) 105 | 106 | /* 返回日志函数实例,这里可以根据level参数,实现不同级别的日志输出控制 */ 107 | return func(level int, args ...interface{}) { 108 | loginfo := fmt.Sprintf("%v", args) 109 | switch level { 110 | case 0: 111 | Logger.Trace(loginfo) 112 | case 1: 113 | Logger.Debug(loginfo) 114 | case 2: 115 | Logger.Info(loginfo) 116 | case 3: 117 | Logger.Warn(loginfo) 118 | case 4: 119 | Logger.Error(loginfo) 120 | case 5: 121 | Logger.Fatal(loginfo) 122 | case 6: 123 | Logger.Panic(loginfo) 124 | } 125 | } 126 | } 127 | 128 | /* 创建一个日志函数实例(闭包) */ 129 | var testLog = LoggerToFile() 130 | 131 | type loggsStruct struct{} 132 | type loggsInter interface { 133 | TraceWrite() 134 | DebugWrite() 135 | InfoWrite() 136 | WarnWrite() 137 | ErrorWrite() 138 | FatalWrite() 139 | PanicWrite() 140 | } 141 | 142 | func (log *loggsStruct) TraceWrite(msg string) { 143 | testLog(LOG_TRACE, msg) 144 | } 145 | func (log *loggsStruct) DebugWrite(msg string) { 146 | testLog(LOG_DEBUG, msg) 147 | } 148 | func (log *loggsStruct) InfoWrite(msg string) { 149 | testLog(LOG_INFO, msg) 150 | } 151 | func (log *loggsStruct) WarnWrite(msg string) { 152 | testLog(LOG_WARN, msg) 153 | } 154 | func (log *loggsStruct) ErrorWrite(msg string) { 155 | testLog(LOG_ERROR, msg) 156 | } 157 | func (log *loggsStruct) FatalWrite(msg string) { 158 | testLog(LOG_FATAL, msg) 159 | } 160 | func (log *loggsStruct) PanicWrite(msg string) { 161 | testLog(LOG_PANIC, msg) 162 | } 163 | func Glog() loggsStruct { 164 | var log = loggsStruct{} 165 | return log 166 | } 167 | -------------------------------------------------------------------------------- /global/mysqlBinlogDispos.go: -------------------------------------------------------------------------------- 1 | package global 2 | 3 | import "strings" 4 | 5 | type binlogInfoStruct struct{ 6 | evetRotate RotateEventStruct 7 | formatdesEvent FormatDescriptionEventStruct 8 | PreviousGTIDsEvent PreviousGTIDsEventStruct 9 | GtidEvent GtidEventStruct 10 | QueryEvent QueryEventStruct 11 | TableMapEvent TableMapEventStruct 12 | WriteRowsEventV2 WriteRowsEventV2Struct 13 | UpdateRowsEventV2 UpdateRowsEventV2Struct 14 | DeleteRowsEventV2 DeleteRowsEventV2Struct 15 | XidEvent XidEventStruct 16 | } 17 | 18 | type RotateEventStruct struct { 19 | Datetime string 20 | LogPosition string 21 | EventSize string 22 | Position string 23 | NextLogName string 24 | } 25 | type FormatDescriptionEventStruct struct { 26 | Datetime string 27 | LogPosition string 28 | EventSize string 29 | Version string 30 | ServerVersion string 31 | ChecksumAlgorithm string 32 | } 33 | type PreviousGTIDsEventStruct struct { 34 | Datetime string 35 | LogPosition string 36 | EventSize string 37 | GtidEvent string 38 | } 39 | type GtidEventStruct struct{ 40 | Datetime string 41 | LogPosition string 42 | EventSize string 43 | GtidNext string 44 | LastCommitted string 45 | sequenceNumber string 46 | transactionLength string 47 | } 48 | type QueryEventStruct struct{ 49 | Datetime string 50 | LogPosition string 51 | EventSize string 52 | Executiontime string 53 | ErrorCode string 54 | Schema string 55 | Query string 56 | } 57 | type TableMapEventStruct struct { 58 | Datetime string 59 | LogPosition string 60 | EventSize string 61 | TableID string 62 | TableIDSize string 63 | Schema string 64 | Table string 65 | } 66 | type WriteRowsEventV2Struct struct { 67 | Datetime string 68 | LogPosition string 69 | EventSize string 70 | TableID string 71 | ColumnCount string 72 | Values string 73 | } 74 | type UpdateRowsEventV2Struct struct { 75 | Datetime string 76 | LogPosition string 77 | EventSize string 78 | TableID string 79 | ColumnCount string 80 | Values string 81 | } 82 | type DeleteRowsEventV2Struct struct { 83 | Datetime string 84 | LogPosition string 85 | EventSize string 86 | TableID string 87 | ColumnCount string 88 | Values string 89 | } 90 | 91 | type XidEventStruct struct { 92 | Datetime string 93 | LogPosition string 94 | EventSize string 95 | Xid string 96 | } 97 | 98 | 99 | func (binlog *binlogInfoStruct) EventFilter(tmpb []string) *binlogInfoStruct{ 100 | var binlogInfo = &binlogInfoStruct{} 101 | dmlStatus := true 102 | for i := range tmpb { 103 | if strings.HasPrefix(tmpb[i],"=== RotateEvent ==="){ 104 | } 105 | if strings.HasPrefix(tmpb[i],"=== FormatDescriptionEvent ==="){ 106 | } 107 | if strings.HasPrefix(tmpb[i],"=== PreviousGTIDsEvent ==="){ 108 | 109 | } 110 | 111 | if strings.HasPrefix(tmpb[i],"=== GTIDEvent ==="){ 112 | if strings.HasPrefix(tmpb[i], "GTID_NEXT") { 113 | binlogInfo.GtidEvent.GtidNext = strings.Split(tmpb[i], ": ")[1] 114 | } 115 | } 116 | if strings.HasPrefix(tmpb[i],"=== QueryEvent ==="){ 117 | if strings.HasPrefix(tmpb[i], "Query") { 118 | if strings.Split(tmpb[i], ": ")[1] == "BEGIN"{ 119 | dmlStatus = true 120 | }else{ 121 | dmlStatus = false 122 | binlogInfo.GtidEvent.GtidNext = strings.Split(tmpb[i], ": ")[1] 123 | } 124 | } 125 | } 126 | if dmlStatus && strings.HasPrefix(tmpb[i],"=== TableMapEvent ==="){ 127 | 128 | } 129 | if dmlStatus && strings.HasPrefix(tmpb[i],"=== WriteRowsEventV2 ==="){ 130 | 131 | } 132 | if dmlStatus && strings.HasPrefix(tmpb[i],"=== UpdateRowsEventV2 ==="){ 133 | 134 | } 135 | if dmlStatus && strings.HasPrefix(tmpb[i],"=== DeleteRowsEventV2 ==="){ 136 | 137 | } 138 | 139 | //if strings.HasPrefix(tmpb[i], "Log position") { 140 | // binl.binlogPos = strings.Split(tmpb[i], ": ")[1] 141 | //} 142 | //if strings.HasPrefix(tmpb[i], "Next log name") { 143 | // binl.binlogFile = strings.Split(tmpb[i], ": ")[1] 144 | //} 145 | //if strings.HasPrefix(tmpb[i], "Date") { 146 | // binl.dateTime = strings.Split(tmpb[i], ": ")[1] 147 | //} 148 | //if strings.HasPrefix(tmpb[i], "Version") { 149 | // binl.version = strings.Split(tmpb[i], ": ")[1] 150 | //} 151 | //if strings.HasPrefix(tmpb[i], "Server version") { 152 | // binl.serverVersion = strings.Split(tmpb[i], ": ")[1] 153 | //} 154 | //if strings.HasPrefix(tmpb[i], "Checksum algorithm") { 155 | // binl.checksumAlgorithm = strings.Split(tmpb[i], ": ")[1] 156 | //} 157 | 158 | } 159 | return binlogInfo 160 | } -------------------------------------------------------------------------------- /global/tmpTableDateFile.go: -------------------------------------------------------------------------------- 1 | package global 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "os" 8 | "strings" 9 | "sync" 10 | ) 11 | 12 | /* 13 | 生成调度任务,查询表数据时生成的结构文件 14 | */ 15 | type TableDateFileStruct struct { 16 | FileName string 17 | } 18 | 19 | var TableDataFile = &TableDateFileStruct{} 20 | var fileSync sync.Mutex 21 | 22 | /* 23 | 判断文件目录是否存在,不存在则创建文件目录 24 | */ 25 | func (tds TableDateFileStruct) TmpIsDir(dirName string, dirAction string) { 26 | _, exist := os.Stat(dirName) 27 | 28 | if dirAction == "create" { 29 | if os.IsNotExist(exist) { 30 | os.Mkdir(dirName, os.ModePerm) 31 | } 32 | } 33 | if dirAction == "drop" { 34 | if !os.IsExist(exist) { 35 | os.RemoveAll(dirName) 36 | } 37 | } 38 | 39 | } 40 | 41 | /* 42 | 针对每个表创建一个临时文件,临时文件中的内容为每个表的查询的数据索引列条件值 43 | */ 44 | func (tds TableDateFileStruct) WriteFile(file *os.File, writeMapString []string) error { 45 | //写入数据 46 | write := bufio.NewWriter(file) 47 | //fileSync.Lock() 48 | //for _, v := range writeMapString { 49 | // for is, i := range v { 50 | for is, i := range writeMapString { 51 | write.WriteString(i) 52 | if is < len(i)-1 { 53 | write.WriteString("/*greatdbCheckColumnSplict*/") 54 | } 55 | Wlog.Debug(fmt.Sprintf("GreatdbCheck writes data \"%s\" to file %s.", i, tds.FileName)) 56 | } 57 | write.WriteString("\n") 58 | //} 59 | 60 | //把缓冲区清空,立即将最后的数据写入文件 61 | //fileSync.Unlock() 62 | write.Flush() 63 | Wlog.Debug(fmt.Sprintf("GreatdbCheck refreshes the data in the disk cache to the disk file and continues to drop the disk")) 64 | return nil 65 | } 66 | func (tds TableDateFileStruct) ReadFile(from, to, rownum int, fseek int64) ([]string, int, int64, error) { 67 | var cur_offset int64 68 | var aa string 69 | file, err := os.OpenFile(tds.FileName, os.O_RDONLY, 0666) 70 | if err != nil { 71 | Wlog.Error(fmt.Sprintf("GreatdbCheck Failed to open file %s, error message:%s", tds.FileName, err)) 72 | return []string{}, 0, 0, err 73 | } 74 | //延迟关闭文件:在函数return前执行的程序 75 | defer func() { 76 | file.Close() 77 | Wlog.Debug(fmt.Sprintf("actions colse file %s file.", tds.FileName)) 78 | }() 79 | file.Seek(fseek, io.SeekStart) 80 | //创建文件读取器 81 | reader := bufio.NewReader(file) 82 | cur_offset = fseek 83 | for { 84 | if rownum >= from && rownum <= to { 85 | data, err := reader.ReadBytes('\n') 86 | cur_offset += int64(len(data)) 87 | line := strings.TrimSpace(string(data)) 88 | aa += line 89 | if err != nil { 90 | if err == io.EOF { 91 | Wlog.Debug(fmt.Sprintf("GreatdbCheck has read the end of file %s and will stop reading data from the file.", tds.FileName)) 92 | break 93 | } else { 94 | //读取异常,打印异常并结束 95 | Wlog.Error(fmt.Sprintf("GreatdbCheck Fails to read file %s, and the actions stops reading file A. Error message: %s", tds.FileName, err)) 96 | return []string{}, 0, 0, err 97 | } 98 | } 99 | } 100 | if rownum > to { 101 | break 102 | } 103 | aa += "," 104 | rownum++ 105 | } 106 | 107 | bb := strings.Split(aa, ",") 108 | return bb[:len(bb)-1], rownum, cur_offset, nil 109 | } 110 | func (tds TableDateFileStruct) FindFile(from, to int) (bool, error) { 111 | f, err := os.Open(tds.FileName) 112 | if err != nil { 113 | return false, err 114 | } 115 | defer f.Close() 116 | n := 0 117 | scanner := bufio.NewScanner(f) 118 | for scanner.Scan() { 119 | n++ 120 | if n < from { 121 | fmt.Println("-----", string(scanner.Bytes())) 122 | continue 123 | } 124 | if n > to { 125 | break 126 | } 127 | } 128 | return false, scanner.Err() 129 | } 130 | 131 | //删除文件 132 | func (tds TableDateFileStruct) RmFile(f ...string) error { 133 | for _, i := range f { 134 | err := os.Remove(i) //删除文件test.txt 135 | if err != nil { 136 | //如果删除失败则输出 file remove Error! 137 | Wlog.Error(fmt.Sprintf("GreatdbCheck Failed to delete file %s, error message: %s", i, err)) 138 | //输出错误详细信息 139 | return err 140 | } else { 141 | //如果删除成功则输出 file remove OK! 142 | Wlog.Debug(fmt.Sprintf("GreatdbCheck Successfully delete file %s.", i)) 143 | } 144 | } 145 | return nil 146 | } 147 | -------------------------------------------------------------------------------- /go-log/log/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 siddontang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /go-log/log/README.md: -------------------------------------------------------------------------------- 1 | ## go-log 2 | 3 | a golang log lib supports level and multi handlers 4 | 5 | ## Use 6 | 7 | import "github.com/siddontang/go-log/log" 8 | 9 | //log with different level 10 | log.Info("hello world") 11 | log.Error("hello world") 12 | 13 | //create a logger with specified handler 14 | h := NewStreamHandler(os.Stdout) 15 | l := log.NewDefault(h) 16 | l.Info("hello world") 17 | 18 | ## go-doc 19 | 20 | [![GoDoc](https://godoc.org/github.com/siddontang/go-log?status.png)](https://godoc.org/github.com/siddontang/go-log) 21 | -------------------------------------------------------------------------------- /go-log/log/doc.go: -------------------------------------------------------------------------------- 1 | // Package log supplies more advanced features than go orign log package. 2 | // 3 | // It supports log different level: trace, debug, info, warn, error, fatal. 4 | // 5 | // It also supports different log handlers which you can log to stdout, file, socket, etc... 6 | // 7 | // Use 8 | // 9 | // import "github.com/siddontang/go-log/log" 10 | // 11 | // //log with different level 12 | // log.Info("hello world") 13 | // log.Error("hello world") 14 | // 15 | // //create a logger with specified handler 16 | // h := NewStreamHandler(os.Stdout) 17 | // l := log.NewDefault(h) 18 | // l.Info("hello world") 19 | // 20 | package log 21 | -------------------------------------------------------------------------------- /go-log/log/filehandler.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path" 7 | "time" 8 | ) 9 | 10 | // FileHandler writes log to a file. 11 | type FileHandler struct { 12 | fd *os.File 13 | } 14 | 15 | // NewFileHandler creates a FileHander 16 | func NewFileHandler(fileName string, flag int) (*FileHandler, error) { 17 | dir := path.Dir(fileName) 18 | os.Mkdir(dir, 0777) 19 | 20 | f, err := os.OpenFile(fileName, flag, 0755) 21 | if err != nil { 22 | return nil, err 23 | } 24 | 25 | h := new(FileHandler) 26 | 27 | h.fd = f 28 | 29 | return h, nil 30 | } 31 | 32 | // Write implements Handler interface 33 | func (h *FileHandler) Write(b []byte) (n int, err error) { 34 | return h.fd.Write(b) 35 | } 36 | 37 | // Close implements Handler interface 38 | func (h *FileHandler) Close() error { 39 | return h.fd.Close() 40 | } 41 | 42 | // RotatingFileHandler writes log a file, if file size exceeds maxBytes, 43 | // it will backup current file and open a new one. 44 | // 45 | // max backup file number is set by backupCount, it will delete oldest if backups too many. 46 | type RotatingFileHandler struct { 47 | fd *os.File 48 | 49 | fileName string 50 | maxBytes int 51 | curBytes int 52 | backupCount int 53 | } 54 | 55 | // NewRotatingFileHandler creates a RotatingFileHandler 56 | func NewRotatingFileHandler(fileName string, maxBytes int, backupCount int) (*RotatingFileHandler, error) { 57 | dir := path.Dir(fileName) 58 | os.MkdirAll(dir, 0777) 59 | 60 | h := new(RotatingFileHandler) 61 | 62 | if maxBytes <= 0 { 63 | return nil, fmt.Errorf("invalid max bytes") 64 | } 65 | 66 | h.fileName = fileName 67 | h.maxBytes = maxBytes 68 | h.backupCount = backupCount 69 | 70 | var err error 71 | h.fd, err = os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) 72 | if err != nil { 73 | return nil, err 74 | } 75 | 76 | f, err := h.fd.Stat() 77 | if err != nil { 78 | return nil, err 79 | } 80 | h.curBytes = int(f.Size()) 81 | 82 | return h, nil 83 | } 84 | 85 | // Write implements Handler interface 86 | func (h *RotatingFileHandler) Write(p []byte) (n int, err error) { 87 | h.doRollover() 88 | n, err = h.fd.Write(p) 89 | h.curBytes += n 90 | return 91 | } 92 | 93 | // Close implements Handler interface 94 | func (h *RotatingFileHandler) Close() error { 95 | if h.fd != nil { 96 | return h.fd.Close() 97 | } 98 | return nil 99 | } 100 | 101 | func (h *RotatingFileHandler) doRollover() { 102 | if h.curBytes < h.maxBytes { 103 | return 104 | } 105 | 106 | f, err := h.fd.Stat() 107 | if err != nil { 108 | return 109 | } 110 | 111 | if h.maxBytes <= 0 { 112 | return 113 | } else if f.Size() < int64(h.maxBytes) { 114 | h.curBytes = int(f.Size()) 115 | return 116 | } 117 | 118 | if h.backupCount > 0 { 119 | h.fd.Close() 120 | 121 | for i := h.backupCount - 1; i > 0; i-- { 122 | sfn := fmt.Sprintf("%s.%d", h.fileName, i) 123 | dfn := fmt.Sprintf("%s.%d", h.fileName, i+1) 124 | 125 | os.Rename(sfn, dfn) 126 | } 127 | 128 | dfn := fmt.Sprintf("%s.1", h.fileName) 129 | os.Rename(h.fileName, dfn) 130 | 131 | h.fd, _ = os.OpenFile(h.fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) 132 | h.curBytes = 0 133 | f, err := h.fd.Stat() 134 | if err != nil { 135 | return 136 | } 137 | h.curBytes = int(f.Size()) 138 | } 139 | } 140 | 141 | // TimeRotatingFileHandler writes log to a file, 142 | // it will backup current and open a new one, with a period time you sepecified. 143 | // 144 | // refer: http://docs.python.org/2/library/logging.handlers.html. 145 | // same like python TimedRotatingFileHandler. 146 | type TimeRotatingFileHandler struct { 147 | fd *os.File 148 | 149 | baseName string 150 | interval int64 151 | suffix string 152 | rolloverAt int64 153 | } 154 | 155 | // TimeRotating way 156 | const ( 157 | WhenSecond = iota 158 | WhenMinute 159 | WhenHour 160 | WhenDay 161 | ) 162 | 163 | // NewTimeRotatingFileHandler creates a TimeRotatingFileHandler 164 | func NewTimeRotatingFileHandler(baseName string, when int8, interval int) (*TimeRotatingFileHandler, error) { 165 | dir := path.Dir(baseName) 166 | os.MkdirAll(dir, 0777) 167 | 168 | h := new(TimeRotatingFileHandler) 169 | 170 | h.baseName = baseName 171 | 172 | switch when { 173 | case WhenSecond: 174 | h.interval = 1 175 | h.suffix = "2006-01-02_15-04-05" 176 | case WhenMinute: 177 | h.interval = 60 178 | h.suffix = "2006-01-02_15-04" 179 | case WhenHour: 180 | h.interval = 3600 181 | h.suffix = "2006-01-02_15" 182 | case WhenDay: 183 | h.interval = 3600 * 24 184 | h.suffix = "2006-01-02" 185 | default: 186 | return nil, fmt.Errorf("invalid when_rotate: %d", when) 187 | } 188 | 189 | h.interval = h.interval * int64(interval) 190 | 191 | var err error 192 | h.fd, err = os.OpenFile(h.baseName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) 193 | if err != nil { 194 | return nil, err 195 | } 196 | 197 | fInfo, _ := h.fd.Stat() 198 | h.rolloverAt = fInfo.ModTime().Unix() + h.interval 199 | 200 | return h, nil 201 | } 202 | 203 | func (h *TimeRotatingFileHandler) doRollover() { 204 | //refer http://hg.python.org/cpython/file/2.7/Lib/logging/handlers.py 205 | now := time.Now() 206 | 207 | if h.rolloverAt <= now.Unix() { 208 | fName := h.baseName + now.Format(h.suffix) 209 | h.fd.Close() 210 | e := os.Rename(h.baseName, fName) 211 | if e != nil { 212 | panic(e) 213 | } 214 | 215 | h.fd, _ = os.OpenFile(h.baseName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) 216 | 217 | h.rolloverAt = time.Now().Unix() + h.interval 218 | } 219 | } 220 | 221 | // Write implements Handler interface 222 | func (h *TimeRotatingFileHandler) Write(b []byte) (n int, err error) { 223 | h.doRollover() 224 | return h.fd.Write(b) 225 | } 226 | 227 | // Close implements Handler interface 228 | func (h *TimeRotatingFileHandler) Close() error { 229 | return h.fd.Close() 230 | } 231 | -------------------------------------------------------------------------------- /go-log/log/handler.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | //Handler writes logs to somewhere 8 | type Handler interface { 9 | Write(p []byte) (n int, err error) 10 | Close() error 11 | } 12 | 13 | // StreamHandler writes logs to a specified io Writer, maybe stdout, stderr, etc... 14 | type StreamHandler struct { 15 | w io.Writer 16 | } 17 | 18 | // NewStreamHandler creates a StreamHandler 19 | func NewStreamHandler(w io.Writer) (*StreamHandler, error) { 20 | h := new(StreamHandler) 21 | h.w = w 22 | return h, nil 23 | } 24 | 25 | // Write implements Handler interface 26 | func (h *StreamHandler) Write(b []byte) (n int, err error) { 27 | return h.w.Write(b) 28 | } 29 | 30 | // Close implements Handler interface 31 | func (h *StreamHandler) Close() error { 32 | return nil 33 | } 34 | 35 | // NullHandler does nothing, it discards anything. 36 | type NullHandler struct { 37 | } 38 | 39 | // NewNullHandler creates a NullHandler 40 | func NewNullHandler() (*NullHandler, error) { 41 | return new(NullHandler), nil 42 | } 43 | 44 | // // Write implements Handler interface 45 | func (h *NullHandler) Write(b []byte) (n int, err error) { 46 | return len(b), nil 47 | } 48 | 49 | // Close implements Handler interface 50 | func (h *NullHandler) Close() error { 51 | return nil 52 | } 53 | -------------------------------------------------------------------------------- /go-log/log/log.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | ) 7 | 8 | var logger = NewDefault(newStdHandler()) 9 | 10 | // SetDefaultLogger changes the global logger 11 | func SetDefaultLogger(l *Logger) { 12 | logger = l 13 | } 14 | 15 | // SetLevel changes the logger level 16 | func SetLevel(level Level) { 17 | logger.SetLevel(level) 18 | } 19 | 20 | // SetLevelByName changes the logger level by name 21 | func SetLevelByName(name string) { 22 | logger.SetLevelByName(name) 23 | } 24 | 25 | // Fatal records the log with fatal level and exits 26 | func Fatal(args ...interface{}) { 27 | logger.Output(2, LevelFatal, fmt.Sprint(args...)) 28 | os.Exit(1) 29 | } 30 | 31 | // Fatalf records the log with fatal level and exits 32 | func Fatalf(format string, args ...interface{}) { 33 | logger.Output(2, LevelFatal, fmt.Sprintf(format, args...)) 34 | os.Exit(1) 35 | } 36 | 37 | // Fatalln records the log with fatal level and exits 38 | func Fatalln(args ...interface{}) { 39 | logger.Output(2, LevelFatal, fmt.Sprintln(args...)) 40 | os.Exit(1) 41 | } 42 | 43 | // Panic records the log with fatal level and panics 44 | func Panic(args ...interface{}) { 45 | msg := fmt.Sprint(args...) 46 | logger.Output(2, LevelError, msg) 47 | panic(msg) 48 | } 49 | 50 | // Panicf records the log with fatal level and panics 51 | func Panicf(format string, args ...interface{}) { 52 | msg := fmt.Sprintf(format, args...) 53 | logger.Output(2, LevelError, msg) 54 | panic(msg) 55 | } 56 | 57 | // Panicln records the log with fatal level and panics 58 | func Panicln(args ...interface{}) { 59 | msg := fmt.Sprintln(args...) 60 | logger.Output(2, LevelError, msg) 61 | panic(msg) 62 | } 63 | 64 | // Print records the log with trace level 65 | func Print(args ...interface{}) { 66 | logger.Output(2, LevelTrace, fmt.Sprint(args...)) 67 | } 68 | 69 | // Printf records the log with trace level 70 | func Printf(format string, args ...interface{}) { 71 | logger.Output(2, LevelTrace, fmt.Sprintf(format, args...)) 72 | } 73 | 74 | // Println records the log with trace level 75 | func Println(args ...interface{}) { 76 | logger.Output(2, LevelTrace, fmt.Sprintln(args...)) 77 | } 78 | 79 | // Debug records the log with debug level 80 | func Debug(args ...interface{}) { 81 | logger.Output(2, LevelDebug, fmt.Sprint(args...)) 82 | } 83 | 84 | // Debugf records the log with debug level 85 | func Debugf(format string, args ...interface{}) { 86 | logger.Output(2, LevelDebug, fmt.Sprintf(format, args...)) 87 | } 88 | 89 | // Debugln records the log with debug level 90 | func Debugln(args ...interface{}) { 91 | logger.Output(2, LevelDebug, fmt.Sprintln(args...)) 92 | } 93 | 94 | // Error records the log with error level 95 | func Error(args ...interface{}) { 96 | logger.Output(2, LevelError, fmt.Sprint(args...)) 97 | } 98 | 99 | // Errorf records the log with error level 100 | func Errorf(format string, args ...interface{}) { 101 | logger.Output(2, LevelError, fmt.Sprintf(format, args...)) 102 | } 103 | 104 | // Errorln records the log with error level 105 | func Errorln(args ...interface{}) { 106 | logger.Output(2, LevelError, fmt.Sprintln(args...)) 107 | } 108 | 109 | // Info records the log with info level 110 | func Info(args ...interface{}) { 111 | logger.Output(2, LevelInfo, fmt.Sprint(args...)) 112 | } 113 | 114 | // Infof records the log with info level 115 | func Infof(format string, args ...interface{}) { 116 | logger.Output(2, LevelInfo, fmt.Sprintf(format, args...)) 117 | } 118 | 119 | // Infoln records the log with info level 120 | func Infoln(args ...interface{}) { 121 | logger.Output(2, LevelInfo, fmt.Sprintln(args...)) 122 | } 123 | 124 | // Warn records the log with warn level 125 | func Warn(args ...interface{}) { 126 | logger.Output(2, LevelWarn, fmt.Sprint(args...)) 127 | } 128 | 129 | // Warnf records the log with warn level 130 | func Warnf(format string, args ...interface{}) { 131 | logger.Output(2, LevelWarn, fmt.Sprintf(format, args...)) 132 | } 133 | 134 | // Warnln records the log with warn level 135 | func Warnln(args ...interface{}) { 136 | logger.Output(2, LevelWarn, fmt.Sprintln(args...)) 137 | } 138 | 139 | func NewWlog(logfile, logLevel string) *Logger { 140 | //var logFile string 141 | //if runtime.GOOS == "windows"{ 142 | // logFile = fmt.Sprintf("%s\\%s",logpath,logfile) 143 | //} 144 | //if runtime.GOOS == "linux"{ 145 | // logFile = fmt.Sprintf("%s/%s",logpath,logfile) 146 | //} 147 | _, err := os.Open(logfile) 148 | if err != nil && os.IsNotExist(err) { 149 | _, err = os.OpenFile(logfile, os.O_WRONLY|os.O_CREATE, 0666) 150 | if err != nil { 151 | fmt.Println(err) 152 | } 153 | } 154 | fp, err := os.OpenFile(logfile, os.O_RDWR|os.O_APPEND, 0666) 155 | if err != nil { 156 | fmt.Println("open log file or create log file fail. Errof info: ", err) 157 | os.Exit(1) 158 | } 159 | h, err := NewStreamHandler(fp) 160 | if err != nil { 161 | Errorln("create log file StreamHandler fail. Errof info: ", err) 162 | os.Exit(1) 163 | } 164 | wlog := NewDefault(h) 165 | wlog.SetLevelByName(logLevel) 166 | return wlog 167 | } 168 | -------------------------------------------------------------------------------- /go-log/log/log_test.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | ) 7 | 8 | func TestStdStreamLog(t *testing.T) { 9 | h, _ := NewStreamHandler(os.Stdout) 10 | s := NewDefault(h) 11 | s.Info("hello world") 12 | 13 | s.Close() 14 | 15 | Info("hello world") 16 | 17 | defer func() { 18 | recover() 19 | }() 20 | 21 | Panic("hello wrold") 22 | } 23 | 24 | func TestRotatingFileLog(t *testing.T) { 25 | path := "./test_log" 26 | os.RemoveAll(path) 27 | 28 | os.Mkdir(path, 0777) 29 | fileName := path + "/test" 30 | 31 | h, err := NewRotatingFileHandler(fileName, 10, 2) 32 | if err != nil { 33 | t.Fatal(err) 34 | } 35 | 36 | buf := make([]byte, 10) 37 | 38 | h.Write(buf) 39 | 40 | h.Write(buf) 41 | 42 | if _, err := os.Stat(fileName + ".1"); err != nil { 43 | t.Fatal(err) 44 | } 45 | 46 | if _, err := os.Stat(fileName + ".2"); err == nil { 47 | t.Fatal(err) 48 | } 49 | 50 | h.Write(buf) 51 | if _, err := os.Stat(fileName + ".2"); err != nil { 52 | t.Fatal(err) 53 | } 54 | 55 | h.Close() 56 | 57 | os.RemoveAll(path) 58 | } 59 | -------------------------------------------------------------------------------- /go-log/loggers/loggers.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | 3 | // Copyright (c) 2017 Birkir A. Barkarson 4 | 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | 12 | // The above copyright notice and this permission notice shall be included in all 13 | // copies or substantial portions of the Software. 14 | 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | package loggers 24 | 25 | // Standard is the interface used by Go's standard library's log package. 26 | type Standard interface { 27 | Fatal(args ...interface{}) 28 | Fatalf(format string, args ...interface{}) 29 | Fatalln(args ...interface{}) 30 | 31 | Panic(args ...interface{}) 32 | Panicf(format string, args ...interface{}) 33 | Panicln(args ...interface{}) 34 | 35 | Print(args ...interface{}) 36 | Printf(format string, args ...interface{}) 37 | Println(args ...interface{}) 38 | } 39 | 40 | // Advanced is an interface with commonly used log level methods. 41 | type Advanced interface { 42 | Standard 43 | 44 | Debug(args ...interface{}) 45 | Debugf(format string, args ...interface{}) 46 | Debugln(args ...interface{}) 47 | 48 | Error(args ...interface{}) 49 | Errorf(format string, args ...interface{}) 50 | Errorln(args ...interface{}) 51 | 52 | Info(args ...interface{}) 53 | Infof(format string, args ...interface{}) 54 | Infoln(args ...interface{}) 55 | 56 | Warn(args ...interface{}) 57 | Warnf(format string, args ...interface{}) 58 | Warnln(args ...interface{}) 59 | } 60 | 61 | // Contextual is an interface that allows context addition to a log statement before 62 | // calling the final print (message/level) method. 63 | type Contextual interface { 64 | Advanced 65 | 66 | WithField(key string, value interface{}) Advanced 67 | WithFields(fields ...interface{}) Advanced 68 | } 69 | -------------------------------------------------------------------------------- /go-log/logrus/.gitignore: -------------------------------------------------------------------------------- 1 | logrus 2 | vendor 3 | 4 | .idea/ 5 | -------------------------------------------------------------------------------- /go-log/logrus/.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | # do not run on test files yet 3 | tests: false 4 | 5 | # all available settings of specific linters 6 | linters-settings: 7 | errcheck: 8 | # report about not checking of errors in type assetions: `a := b.(MyStruct)`; 9 | # default is false: such cases aren't reported by default. 10 | check-type-assertions: false 11 | 12 | # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; 13 | # default is false: such cases aren't reported by default. 14 | check-blank: false 15 | 16 | lll: 17 | line-length: 100 18 | tab-width: 4 19 | 20 | prealloc: 21 | simple: false 22 | range-loops: false 23 | for-loops: false 24 | 25 | whitespace: 26 | multi-if: false # Enforces newlines (or comments) after every multi-line if statement 27 | multi-func: false # Enforces newlines (or comments) after every multi-line function signature 28 | 29 | linters: 30 | enable: 31 | - megacheck 32 | - govet 33 | disable: 34 | - maligned 35 | - prealloc 36 | disable-all: false 37 | presets: 38 | - bugs 39 | - unused 40 | fast: false 41 | -------------------------------------------------------------------------------- /go-log/logrus/.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | go_import_path: github.com/sirupsen/logrus 3 | git: 4 | depth: 1 5 | env: 6 | - GO111MODULE=on 7 | go: 1.15.x 8 | os: linux 9 | install: 10 | - ./travis/install.sh 11 | script: 12 | - cd ci 13 | - go run mage.go -v -w ../ crossBuild 14 | - go run mage.go -v -w ../ lint 15 | - go run mage.go -v -w ../ test 16 | -------------------------------------------------------------------------------- /go-log/logrus/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Simon Eskildsen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /go-log/logrus/alt_exit.go: -------------------------------------------------------------------------------- 1 | package logrus 2 | 3 | // The following code was sourced and modified from the 4 | // https://github.com/tebeka/atexit package governed by the following license: 5 | // 6 | // Copyright (c) 2012 Miki Tebeka . 7 | // 8 | // Permission is hereby granted, free of charge, to any person obtaining a copy of 9 | // this software and associated documentation files (the "Software"), to deal in 10 | // the Software without restriction, including without limitation the rights to 11 | // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 12 | // the Software, and to permit persons to whom the Software is furnished to do so, 13 | // subject to the following conditions: 14 | // 15 | // The above copyright notice and this permission notice shall be included in all 16 | // copies or substantial portions of the Software. 17 | // 18 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 20 | // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 21 | // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 22 | // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 23 | // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 | 25 | import ( 26 | "fmt" 27 | "os" 28 | ) 29 | 30 | var handlers = []func(){} 31 | 32 | func runHandler(handler func()) { 33 | defer func() { 34 | if err := recover(); err != nil { 35 | fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) 36 | } 37 | }() 38 | 39 | handler() 40 | } 41 | 42 | func runHandlers() { 43 | for _, handler := range handlers { 44 | runHandler(handler) 45 | } 46 | } 47 | 48 | // Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) 49 | func Exit(code int) { 50 | runHandlers() 51 | os.Exit(code) 52 | } 53 | 54 | // RegisterExitHandler appends a Logrus Exit handler to the list of handlers, 55 | // call logrus.Exit to invoke all handlers. The handlers will also be invoked when 56 | // any Fatal log entry is made. 57 | // 58 | // This method is useful when a caller wishes to use logrus to log a fatal 59 | // message but also needs to gracefully shutdown. An example usecase could be 60 | // closing database connections, or sending a alert that the application is 61 | // closing. 62 | func RegisterExitHandler(handler func()) { 63 | handlers = append(handlers, handler) 64 | } 65 | 66 | // DeferExitHandler prepends a Logrus Exit handler to the list of handlers, 67 | // call logrus.Exit to invoke all handlers. The handlers will also be invoked when 68 | // any Fatal log entry is made. 69 | // 70 | // This method is useful when a caller wishes to use logrus to log a fatal 71 | // message but also needs to gracefully shutdown. An example usecase could be 72 | // closing database connections, or sending a alert that the application is 73 | // closing. 74 | func DeferExitHandler(handler func()) { 75 | handlers = append([]func(){handler}, handlers...) 76 | } 77 | -------------------------------------------------------------------------------- /go-log/logrus/alt_exit_test.go: -------------------------------------------------------------------------------- 1 | package logrus 2 | 3 | import ( 4 | "io/ioutil" 5 | "log" 6 | "os" 7 | "os/exec" 8 | "path/filepath" 9 | "runtime" 10 | "strings" 11 | "testing" 12 | "time" 13 | ) 14 | 15 | func TestRegister(t *testing.T) { 16 | current := len(handlers) 17 | 18 | var results []string 19 | 20 | h1 := func() { results = append(results, "first") } 21 | h2 := func() { results = append(results, "second") } 22 | 23 | RegisterExitHandler(h1) 24 | RegisterExitHandler(h2) 25 | 26 | if len(handlers) != current+2 { 27 | t.Fatalf("expected %d handlers, got %d", current+2, len(handlers)) 28 | } 29 | 30 | runHandlers() 31 | 32 | if len(results) != 2 { 33 | t.Fatalf("expected 2 handlers to be run, ran %d", len(results)) 34 | } 35 | 36 | if results[0] != "first" { 37 | t.Fatal("expected handler h1 to be run first, but it wasn't") 38 | } 39 | 40 | if results[1] != "second" { 41 | t.Fatal("expected handler h2 to be run second, but it wasn't") 42 | } 43 | } 44 | 45 | func TestDefer(t *testing.T) { 46 | current := len(handlers) 47 | 48 | var results []string 49 | 50 | h1 := func() { results = append(results, "first") } 51 | h2 := func() { results = append(results, "second") } 52 | 53 | DeferExitHandler(h1) 54 | DeferExitHandler(h2) 55 | 56 | if len(handlers) != current+2 { 57 | t.Fatalf("expected %d handlers, got %d", current+2, len(handlers)) 58 | } 59 | 60 | runHandlers() 61 | 62 | if len(results) != 2 { 63 | t.Fatalf("expected 2 handlers to be run, ran %d", len(results)) 64 | } 65 | 66 | if results[0] != "second" { 67 | t.Fatal("expected handler h2 to be run first, but it wasn't") 68 | } 69 | 70 | if results[1] != "first" { 71 | t.Fatal("expected handler h1 to be run second, but it wasn't") 72 | } 73 | } 74 | 75 | func TestHandler(t *testing.T) { 76 | testprog := testprogleader 77 | testprog = append(testprog, getPackage()...) 78 | testprog = append(testprog, testprogtrailer...) 79 | tempDir, err := ioutil.TempDir("", "test_handler") 80 | if err != nil { 81 | log.Fatalf("can't create temp dir. %q", err) 82 | } 83 | defer os.RemoveAll(tempDir) 84 | 85 | gofile := filepath.Join(tempDir, "gofile.go") 86 | if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil { 87 | t.Fatalf("can't create go file. %q", err) 88 | } 89 | 90 | outfile := filepath.Join(tempDir, "outfile.out") 91 | arg := time.Now().UTC().String() 92 | err = exec.Command("go", "run", gofile, outfile, arg).Run() 93 | if err == nil { 94 | t.Fatalf("completed normally, should have failed") 95 | } 96 | 97 | data, err := ioutil.ReadFile(outfile) 98 | if err != nil { 99 | t.Fatalf("can't read output file %s. %q", outfile, err) 100 | } 101 | 102 | if string(data) != arg { 103 | t.Fatalf("bad data. Expected %q, got %q", data, arg) 104 | } 105 | } 106 | 107 | // getPackage returns the name of the current package, which makes running this 108 | // test in a fork simpler 109 | func getPackage() []byte { 110 | pc, _, _, _ := runtime.Caller(0) 111 | fullFuncName := runtime.FuncForPC(pc).Name() 112 | idx := strings.LastIndex(fullFuncName, ".") 113 | return []byte(fullFuncName[:idx]) // trim off function details 114 | } 115 | 116 | var testprogleader = []byte(` 117 | // Test program for atexit, gets output file and data as arguments and writes 118 | // data to output file in atexit handler. 119 | package main 120 | 121 | import ( 122 | "`) 123 | var testprogtrailer = []byte( 124 | `" 125 | "flag" 126 | "fmt" 127 | "io/ioutil" 128 | ) 129 | 130 | var outfile = "" 131 | var data = "" 132 | 133 | func handler() { 134 | ioutil.WriteFile(outfile, []byte(data), 0666) 135 | } 136 | 137 | func badHandler() { 138 | n := 0 139 | fmt.Println(1/n) 140 | } 141 | 142 | func main() { 143 | flag.Parse() 144 | outfile = flag.Arg(0) 145 | data = flag.Arg(1) 146 | 147 | logrus.RegisterExitHandler(handler) 148 | logrus.RegisterExitHandler(badHandler) 149 | logrus.Fatal("Bye bye") 150 | } 151 | `) 152 | -------------------------------------------------------------------------------- /go-log/logrus/appveyor.yml: -------------------------------------------------------------------------------- 1 | version: "{build}" 2 | platform: x64 3 | clone_folder: c:\gopath\src\github.com\sirupsen\logrus 4 | environment: 5 | GOPATH: c:\gopath 6 | branches: 7 | only: 8 | - master 9 | install: 10 | - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% 11 | - go version 12 | build_script: 13 | - go get -t 14 | - go test 15 | -------------------------------------------------------------------------------- /go-log/logrus/buffer_pool.go: -------------------------------------------------------------------------------- 1 | package logrus 2 | 3 | import ( 4 | "bytes" 5 | "sync" 6 | ) 7 | 8 | var ( 9 | bufferPool BufferPool 10 | ) 11 | 12 | type BufferPool interface { 13 | Put(*bytes.Buffer) 14 | Get() *bytes.Buffer 15 | } 16 | 17 | type defaultPool struct { 18 | pool *sync.Pool 19 | } 20 | 21 | func (p *defaultPool) Put(buf *bytes.Buffer) { 22 | p.pool.Put(buf) 23 | } 24 | 25 | func (p *defaultPool) Get() *bytes.Buffer { 26 | return p.pool.Get().(*bytes.Buffer) 27 | } 28 | 29 | func getBuffer() *bytes.Buffer { 30 | return bufferPool.Get() 31 | } 32 | 33 | func putBuffer(buf *bytes.Buffer) { 34 | buf.Reset() 35 | bufferPool.Put(buf) 36 | } 37 | 38 | // SetBufferPool allows to replace the default logrus buffer pool 39 | // to better meets the specific needs of an application. 40 | func SetBufferPool(bp BufferPool) { 41 | bufferPool = bp 42 | } 43 | 44 | func init() { 45 | SetBufferPool(&defaultPool{ 46 | pool: &sync.Pool{ 47 | New: func() interface{} { 48 | return new(bytes.Buffer) 49 | }, 50 | }, 51 | }) 52 | } 53 | -------------------------------------------------------------------------------- /go-log/logrus/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package logrus is a structured logger for Go, completely API compatible with the standard library logger. 3 | 4 | 5 | The simplest way to use Logrus is simply the package-level exported logger: 6 | 7 | package main 8 | 9 | import ( 10 | log "github.com/sirupsen/logrus" 11 | ) 12 | 13 | func main() { 14 | log.WithFields(log.Fields{ 15 | "animal": "walrus", 16 | "number": 1, 17 | "size": 10, 18 | }).Info("A walrus appears") 19 | } 20 | 21 | Output: 22 | time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 23 | 24 | For a full guide visit https://github.com/sirupsen/logrus 25 | */ 26 | package logrus 27 | -------------------------------------------------------------------------------- /go-log/logrus/example_basic_test.go: -------------------------------------------------------------------------------- 1 | package logrus_test 2 | 3 | import ( 4 | "os" 5 | ) 6 | 7 | func Example_basic() { 8 | var log = New() 9 | log.Formatter = new(JSONFormatter) 10 | log.Formatter = new(TextFormatter) //default 11 | log.Formatter.(*TextFormatter).DisableColors = true // remove colors 12 | log.Formatter.(*TextFormatter).DisableTimestamp = true // remove timestamp from test output 13 | log.Level = TraceLevel 14 | log.Out = os.Stdout 15 | 16 | // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) 17 | // if err == nil { 18 | // log.Out = file 19 | // } else { 20 | // log.Info("Failed to log to file, using default stderr") 21 | // } 22 | 23 | defer func() { 24 | err := recover() 25 | if err != nil { 26 | entry := err.(*Entry) 27 | log.WithFields(Fields{ 28 | "omg": true, 29 | "err_animal": entry.Data["animal"], 30 | "err_size": entry.Data["size"], 31 | "err_level": entry.Level, 32 | "err_message": entry.Message, 33 | "number": 100, 34 | }).Error("The ice breaks!") // or use Fatal() to force the process to exit with a nonzero code 35 | } 36 | }() 37 | 38 | log.WithFields(Fields{ 39 | "animal": "walrus", 40 | "number": 0, 41 | }).Trace("Went to the beach") 42 | 43 | log.WithFields(Fields{ 44 | "animal": "walrus", 45 | "number": 8, 46 | }).Debug("Started observing beach") 47 | 48 | log.WithFields(Fields{ 49 | "animal": "walrus", 50 | "size": 10, 51 | }).Info("A group of walrus emerges from the ocean") 52 | 53 | log.WithFields(Fields{ 54 | "omg": true, 55 | "number": 122, 56 | }).Warn("The group's number increased tremendously!") 57 | 58 | log.WithFields(Fields{ 59 | "temperature": -4, 60 | }).Debug("Temperature changes") 61 | 62 | log.WithFields(Fields{ 63 | "animal": "orca", 64 | "size": 9009, 65 | }).Panic("It's over 9000!") 66 | 67 | // Output: 68 | // level=trace msg="Went to the beach" animal=walrus number=0 69 | // level=debug msg="Started observing beach" animal=walrus number=8 70 | // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 71 | // level=warning msg="The group's number increased tremendously!" number=122 omg=true 72 | // level=debug msg="Temperature changes" temperature=-4 73 | // level=panic msg="It's over 9000!" animal=orca size=9009 74 | // level=error msg="The ice breaks!" err_animal=orca err_level=panic err_message="It's over 9000!" err_size=9009 number=100 omg=true 75 | } 76 | -------------------------------------------------------------------------------- /go-log/logrus/example_custom_caller_test.go: -------------------------------------------------------------------------------- 1 | package logrus_test 2 | 3 | import ( 4 | "os" 5 | "path" 6 | "runtime" 7 | "strings" 8 | ) 9 | 10 | func ExampleJSONFormatter_CallerPrettyfier() { 11 | l := New() 12 | l.SetReportCaller(true) 13 | l.Out = os.Stdout 14 | l.Formatter = &JSONFormatter{ 15 | DisableTimestamp: true, 16 | CallerPrettyfier: func(f *runtime.Frame) (string, string) { 17 | s := strings.Split(f.Function, ".") 18 | funcname := s[len(s)-1] 19 | _, filename := path.Split(f.File) 20 | return funcname, filename 21 | }, 22 | } 23 | l.Info("example of custom format caller") 24 | // Output: 25 | // {"file":"example_custom_caller_test.go","func":"ExampleJSONFormatter_CallerPrettyfier","level":"info","msg":"example of custom format caller"} 26 | } 27 | -------------------------------------------------------------------------------- /go-log/logrus/example_default_field_value_test.go: -------------------------------------------------------------------------------- 1 | package logrus_test 2 | 3 | import ( 4 | "os" 5 | ) 6 | 7 | type DefaultFieldHook struct { 8 | GetValue func() string 9 | } 10 | 11 | func (h *DefaultFieldHook) Levels() []Level { 12 | return AllLevels 13 | } 14 | 15 | func (h *DefaultFieldHook) Fire(e *Entry) error { 16 | e.Data["aDefaultField"] = h.GetValue() 17 | return nil 18 | } 19 | 20 | func ExampleDefaultFieldHook() { 21 | l := New() 22 | l.Out = os.Stdout 23 | l.Formatter = &TextFormatter{DisableTimestamp: true, DisableColors: true} 24 | 25 | l.AddHook(&DefaultFieldHook{GetValue: func() string { return "with its default value" }}) 26 | l.Info("first log") 27 | // Output: 28 | // level=info msg="first log" aDefaultField="with its default value" 29 | } 30 | -------------------------------------------------------------------------------- /go-log/logrus/example_function_test.go: -------------------------------------------------------------------------------- 1 | package logrus_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestLogger_LogFn(t *testing.T) { 10 | log.SetFormatter(&log.JSONFormatter{}) 11 | log.SetLevel(log.WarnLevel) 12 | 13 | notCalled := 0 14 | log.InfoFn(func() []interface{} { 15 | notCalled++ 16 | return []interface{}{ 17 | "Hello", 18 | } 19 | }) 20 | assert.Equal(t, 0, notCalled) 21 | 22 | called := 0 23 | log.ErrorFn(func() []interface{} { 24 | called++ 25 | return []interface{}{ 26 | "Oopsi", 27 | } 28 | }) 29 | assert.Equal(t, 1, called) 30 | } 31 | -------------------------------------------------------------------------------- /go-log/logrus/example_global_hook_test.go: -------------------------------------------------------------------------------- 1 | package logrus_test 2 | 3 | import ( 4 | "os" 5 | ) 6 | 7 | var ( 8 | mystring string 9 | ) 10 | 11 | type GlobalHook struct { 12 | } 13 | 14 | func (h *GlobalHook) Levels() []Level { 15 | return AllLevels 16 | } 17 | 18 | func (h *GlobalHook) Fire(e *Entry) error { 19 | e.Data["mystring"] = mystring 20 | return nil 21 | } 22 | 23 | func ExampleGlobalHook() { 24 | l := New() 25 | l.Out = os.Stdout 26 | l.Formatter = &TextFormatter{DisableTimestamp: true, DisableColors: true} 27 | l.AddHook(&GlobalHook{}) 28 | mystring = "first value" 29 | l.Info("first log") 30 | mystring = "another value" 31 | l.Info("second log") 32 | // Output: 33 | // level=info msg="first log" mystring="first value" 34 | // level=info msg="second log" mystring="another value" 35 | } 36 | -------------------------------------------------------------------------------- /go-log/logrus/example_hook_test.go: -------------------------------------------------------------------------------- 1 | // +build !windows 2 | 3 | package logrus_test 4 | 5 | import ( 6 | "log/syslog" 7 | "os" 8 | 9 | slhooks "github.com/sirupsen/logrus/hooks/syslog" 10 | ) 11 | 12 | // An example on how to use a hook 13 | func Example_hook() { 14 | var log = New() 15 | log.Formatter = new(TextFormatter) // default 16 | log.Formatter.(*TextFormatter).DisableColors = true // remove colors 17 | log.Formatter.(*TextFormatter).DisableTimestamp = true // remove timestamp from test output 18 | if sl, err := slhooks.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, ""); err == nil { 19 | log.Hooks.Add(sl) 20 | } 21 | log.Out = os.Stdout 22 | 23 | log.WithFields(Fields{ 24 | "animal": "walrus", 25 | "size": 10, 26 | }).Info("A group of walrus emerges from the ocean") 27 | 28 | log.WithFields(Fields{ 29 | "omg": true, 30 | "number": 122, 31 | }).Warn("The group's number increased tremendously!") 32 | 33 | log.WithFields(Fields{ 34 | "omg": true, 35 | "number": 100, 36 | }).Error("The ice breaks!") 37 | 38 | // Output: 39 | // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 40 | // level=warning msg="The group's number increased tremendously!" number=122 omg=true 41 | // level=error msg="The ice breaks!" number=100 omg=true 42 | } 43 | -------------------------------------------------------------------------------- /go-log/logrus/formatter.go: -------------------------------------------------------------------------------- 1 | package logrus 2 | 3 | import "time" 4 | 5 | // Default key names for the default fields 6 | const ( 7 | defaultTimestampFormat = time.RFC3339 8 | FieldKeyMsg = "msg" 9 | FieldKeyLevel = "level" 10 | FieldKeyTime = "time" 11 | FieldKeyLogrusError = "logrus_error" 12 | FieldKeyFunc = "func" 13 | FieldKeyFile = "file" 14 | ) 15 | 16 | // The Formatter interface is used to implement a custom Formatter. It takes an 17 | // `Entry`. It exposes all the fields, including the default ones: 18 | // 19 | // * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. 20 | // * `entry.Data["time"]`. The timestamp. 21 | // * `entry.Data["level"]. The level the entry was logged at. 22 | // 23 | // Any additional fields added with `WithField` or `WithFields` are also in 24 | // `entry.Data`. Format is expected to return an array of bytes which are then 25 | // logged to `logger.Out`. 26 | type Formatter interface { 27 | Format(*Entry) ([]byte, error) 28 | } 29 | 30 | // This is to not silently overwrite `time`, `msg`, `func` and `level` fields when 31 | // dumping it. If this code wasn't there doing: 32 | // 33 | // logrus.WithField("level", 1).Info("hello") 34 | // 35 | // Would just silently drop the user provided level. Instead with this code 36 | // it'll logged as: 37 | // 38 | // {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} 39 | // 40 | // It's not exported because it's still using Data in an opinionated way. It's to 41 | // avoid code duplication between the two default formatters. 42 | func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { 43 | timeKey := fieldMap.resolve(FieldKeyTime) 44 | if t, ok := data[timeKey]; ok { 45 | data["fields."+timeKey] = t 46 | delete(data, timeKey) 47 | } 48 | 49 | msgKey := fieldMap.resolve(FieldKeyMsg) 50 | if m, ok := data[msgKey]; ok { 51 | data["fields."+msgKey] = m 52 | delete(data, msgKey) 53 | } 54 | 55 | levelKey := fieldMap.resolve(FieldKeyLevel) 56 | if l, ok := data[levelKey]; ok { 57 | data["fields."+levelKey] = l 58 | delete(data, levelKey) 59 | } 60 | 61 | logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) 62 | if l, ok := data[logrusErrKey]; ok { 63 | data["fields."+logrusErrKey] = l 64 | delete(data, logrusErrKey) 65 | } 66 | 67 | // If reportCaller is not set, 'func' will not conflict. 68 | if reportCaller { 69 | funcKey := fieldMap.resolve(FieldKeyFunc) 70 | if l, ok := data[funcKey]; ok { 71 | data["fields."+funcKey] = l 72 | } 73 | fileKey := fieldMap.resolve(FieldKeyFile) 74 | if l, ok := data[fileKey]; ok { 75 | data["fields."+fileKey] = l 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /go-log/logrus/formatter_bench_test.go: -------------------------------------------------------------------------------- 1 | package logrus 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | // smallFields is a small size data set for benchmarking 10 | var smallFields = Fields{ 11 | "foo": "bar", 12 | "baz": "qux", 13 | "one": "two", 14 | "three": "four", 15 | } 16 | 17 | // largeFields is a large size data set for benchmarking 18 | var largeFields = Fields{ 19 | "foo": "bar", 20 | "baz": "qux", 21 | "one": "two", 22 | "three": "four", 23 | "five": "six", 24 | "seven": "eight", 25 | "nine": "ten", 26 | "eleven": "twelve", 27 | "thirteen": "fourteen", 28 | "fifteen": "sixteen", 29 | "seventeen": "eighteen", 30 | "nineteen": "twenty", 31 | "a": "b", 32 | "c": "d", 33 | "e": "f", 34 | "g": "h", 35 | "i": "j", 36 | "k": "l", 37 | "m": "n", 38 | "o": "p", 39 | "q": "r", 40 | "s": "t", 41 | "u": "v", 42 | "w": "x", 43 | "y": "z", 44 | "this": "will", 45 | "make": "thirty", 46 | "entries": "yeah", 47 | } 48 | 49 | var errorFields = Fields{ 50 | "foo": fmt.Errorf("bar"), 51 | "baz": fmt.Errorf("qux"), 52 | } 53 | 54 | func BenchmarkErrorTextFormatter(b *testing.B) { 55 | doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) 56 | } 57 | 58 | func BenchmarkSmallTextFormatter(b *testing.B) { 59 | doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) 60 | } 61 | 62 | func BenchmarkLargeTextFormatter(b *testing.B) { 63 | doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) 64 | } 65 | 66 | func BenchmarkSmallColoredTextFormatter(b *testing.B) { 67 | doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) 68 | } 69 | 70 | func BenchmarkLargeColoredTextFormatter(b *testing.B) { 71 | doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) 72 | } 73 | 74 | func BenchmarkSmallJSONFormatter(b *testing.B) { 75 | doBenchmark(b, &JSONFormatter{}, smallFields) 76 | } 77 | 78 | func BenchmarkLargeJSONFormatter(b *testing.B) { 79 | doBenchmark(b, &JSONFormatter{}, largeFields) 80 | } 81 | 82 | func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { 83 | logger := New() 84 | 85 | entry := &Entry{ 86 | Time: time.Time{}, 87 | Level: InfoLevel, 88 | Message: "message", 89 | Data: fields, 90 | Logger: logger, 91 | } 92 | var d []byte 93 | var err error 94 | for i := 0; i < b.N; i++ { 95 | d, err = formatter.Format(entry) 96 | if err != nil { 97 | b.Fatal(err) 98 | } 99 | b.SetBytes(int64(len(d))) 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /go-log/logrus/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/sirupsen/logrus 2 | 3 | require ( 4 | github.com/davecgh/go-spew v1.1.1 // indirect 5 | github.com/pmezard/go-difflib v1.0.0 // indirect 6 | github.com/stretchr/testify v1.2.2 7 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 8 | ) 9 | 10 | go 1.13 11 | -------------------------------------------------------------------------------- /go-log/logrus/go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 4 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 5 | github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= 6 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 7 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= 8 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 9 | -------------------------------------------------------------------------------- /go-log/logrus/hook_test.go: -------------------------------------------------------------------------------- 1 | package logrus_test 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "sync" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | 13 | "github.com/sirupsen/logrus/hooks/test" 14 | . "github.com/sirupsen/logrus/internal/testutils" 15 | ) 16 | 17 | type TestHook struct { 18 | Fired bool 19 | } 20 | 21 | func (hook *TestHook) Fire(entry *Entry) error { 22 | hook.Fired = true 23 | return nil 24 | } 25 | 26 | func (hook *TestHook) Levels() []Level { 27 | return []Level{ 28 | TraceLevel, 29 | DebugLevel, 30 | InfoLevel, 31 | WarnLevel, 32 | ErrorLevel, 33 | FatalLevel, 34 | PanicLevel, 35 | } 36 | } 37 | 38 | func TestHookFires(t *testing.T) { 39 | hook := new(TestHook) 40 | 41 | LogAndAssertJSON(t, func(log *Logger) { 42 | log.Hooks.Add(hook) 43 | assert.Equal(t, hook.Fired, false) 44 | 45 | log.Print("test") 46 | }, func(fields Fields) { 47 | assert.Equal(t, hook.Fired, true) 48 | }) 49 | } 50 | 51 | type ModifyHook struct { 52 | } 53 | 54 | func (hook *ModifyHook) Fire(entry *Entry) error { 55 | entry.Data["wow"] = "whale" 56 | return nil 57 | } 58 | 59 | func (hook *ModifyHook) Levels() []Level { 60 | return []Level{ 61 | TraceLevel, 62 | DebugLevel, 63 | InfoLevel, 64 | WarnLevel, 65 | ErrorLevel, 66 | FatalLevel, 67 | PanicLevel, 68 | } 69 | } 70 | 71 | func TestHookCanModifyEntry(t *testing.T) { 72 | hook := new(ModifyHook) 73 | 74 | LogAndAssertJSON(t, func(log *Logger) { 75 | log.Hooks.Add(hook) 76 | log.WithField("wow", "elephant").Print("test") 77 | }, func(fields Fields) { 78 | assert.Equal(t, fields["wow"], "whale") 79 | }) 80 | } 81 | 82 | func TestCanFireMultipleHooks(t *testing.T) { 83 | hook1 := new(ModifyHook) 84 | hook2 := new(TestHook) 85 | 86 | LogAndAssertJSON(t, func(log *Logger) { 87 | log.Hooks.Add(hook1) 88 | log.Hooks.Add(hook2) 89 | 90 | log.WithField("wow", "elephant").Print("test") 91 | }, func(fields Fields) { 92 | assert.Equal(t, fields["wow"], "whale") 93 | assert.Equal(t, hook2.Fired, true) 94 | }) 95 | } 96 | 97 | type SingleLevelModifyHook struct { 98 | ModifyHook 99 | } 100 | 101 | func (h *SingleLevelModifyHook) Levels() []Level { 102 | return []Level{InfoLevel} 103 | } 104 | 105 | func TestHookEntryIsPristine(t *testing.T) { 106 | l := New() 107 | b := &bytes.Buffer{} 108 | l.Formatter = &JSONFormatter{} 109 | l.Out = b 110 | l.AddHook(&SingleLevelModifyHook{}) 111 | 112 | l.Error("error message") 113 | data := map[string]string{} 114 | err := json.Unmarshal(b.Bytes(), &data) 115 | require.NoError(t, err) 116 | _, ok := data["wow"] 117 | require.False(t, ok) 118 | b.Reset() 119 | 120 | l.Info("error message") 121 | data = map[string]string{} 122 | err = json.Unmarshal(b.Bytes(), &data) 123 | require.NoError(t, err) 124 | _, ok = data["wow"] 125 | require.True(t, ok) 126 | b.Reset() 127 | 128 | l.Error("error message") 129 | data = map[string]string{} 130 | err = json.Unmarshal(b.Bytes(), &data) 131 | require.NoError(t, err) 132 | _, ok = data["wow"] 133 | require.False(t, ok) 134 | b.Reset() 135 | } 136 | 137 | type ErrorHook struct { 138 | Fired bool 139 | } 140 | 141 | func (hook *ErrorHook) Fire(entry *Entry) error { 142 | hook.Fired = true 143 | return nil 144 | } 145 | 146 | func (hook *ErrorHook) Levels() []Level { 147 | return []Level{ 148 | ErrorLevel, 149 | } 150 | } 151 | 152 | func TestErrorHookShouldntFireOnInfo(t *testing.T) { 153 | hook := new(ErrorHook) 154 | 155 | LogAndAssertJSON(t, func(log *Logger) { 156 | log.Hooks.Add(hook) 157 | log.Info("test") 158 | }, func(fields Fields) { 159 | assert.Equal(t, hook.Fired, false) 160 | }) 161 | } 162 | 163 | func TestErrorHookShouldFireOnError(t *testing.T) { 164 | hook := new(ErrorHook) 165 | 166 | LogAndAssertJSON(t, func(log *Logger) { 167 | log.Hooks.Add(hook) 168 | log.Error("test") 169 | }, func(fields Fields) { 170 | assert.Equal(t, hook.Fired, true) 171 | }) 172 | } 173 | 174 | func TestAddHookRace(t *testing.T) { 175 | var wg sync.WaitGroup 176 | wg.Add(2) 177 | hook := new(ErrorHook) 178 | LogAndAssertJSON(t, func(log *Logger) { 179 | go func() { 180 | defer wg.Done() 181 | log.AddHook(hook) 182 | }() 183 | go func() { 184 | defer wg.Done() 185 | log.Error("test") 186 | }() 187 | wg.Wait() 188 | }, func(fields Fields) { 189 | // the line may have been logged 190 | // before the hook was added, so we can't 191 | // actually assert on the hook 192 | }) 193 | } 194 | 195 | func TestAddHookRace2(t *testing.T) { 196 | t.Parallel() 197 | 198 | for i := 0; i < 3; i++ { 199 | testname := fmt.Sprintf("Test %d", i) 200 | t.Run(testname, func(t *testing.T) { 201 | t.Parallel() 202 | 203 | _ = test.NewGlobal() 204 | Info(testname) 205 | }) 206 | } 207 | } 208 | 209 | type HookCallFunc struct { 210 | F func() 211 | } 212 | 213 | func (h *HookCallFunc) Levels() []Level { 214 | return AllLevels 215 | } 216 | 217 | func (h *HookCallFunc) Fire(e *Entry) error { 218 | h.F() 219 | return nil 220 | } 221 | 222 | func TestHookFireOrder(t *testing.T) { 223 | checkers := []string{} 224 | h := LevelHooks{} 225 | h.Add(&HookCallFunc{F: func() { checkers = append(checkers, "first hook") }}) 226 | h.Add(&HookCallFunc{F: func() { checkers = append(checkers, "second hook") }}) 227 | h.Add(&HookCallFunc{F: func() { checkers = append(checkers, "third hook") }}) 228 | 229 | if err := h.Fire(InfoLevel, &Entry{}); err != nil { 230 | t.Error("unexpected error:", err) 231 | } 232 | require.Equal(t, []string{"first hook", "second hook", "third hook"}, checkers) 233 | } 234 | -------------------------------------------------------------------------------- /go-log/logrus/hooks.go: -------------------------------------------------------------------------------- 1 | package logrus 2 | 3 | // A hook to be fired when logging on the logging levels returned from 4 | // `Levels()` on your implementation of the interface. Note that this is not 5 | // fired in a goroutine or a channel with workers, you should handle such 6 | // functionality yourself if your call is non-blocking and you don't wish for 7 | // the logging calls for levels returned from `Levels()` to block. 8 | type Hook interface { 9 | Levels() []Level 10 | Fire(*Entry) error 11 | } 12 | 13 | // Internal type for storing the hooks on a logger instance. 14 | type LevelHooks map[Level][]Hook 15 | 16 | // Add a hook to an instance of logger. This is called with 17 | // `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. 18 | func (hooks LevelHooks) Add(hook Hook) { 19 | for _, level := range hook.Levels() { 20 | hooks[level] = append(hooks[level], hook) 21 | } 22 | } 23 | 24 | // Fire all the hooks for the passed level. Used by `entry.log` to fire 25 | // appropriate hooks for a log entry. 26 | func (hooks LevelHooks) Fire(level Level, entry *Entry) error { 27 | for _, hook := range hooks[level] { 28 | if err := hook.Fire(entry); err != nil { 29 | return err 30 | } 31 | } 32 | 33 | return nil 34 | } 35 | -------------------------------------------------------------------------------- /go-log/logrus/hooks/syslog/README.md: -------------------------------------------------------------------------------- 1 | # Syslog Hooks for Logrus :walrus: 2 | 3 | ## Usage 4 | 5 | ```go 6 | import ( 7 | "log/syslog" 8 | "github.com/sirupsen/logrus" 9 | lSyslog "github.com/sirupsen/logrus/hooks/syslog" 10 | ) 11 | 12 | func main() { 13 | log := logrus.New() 14 | hook, err := lSyslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") 15 | 16 | if err == nil { 17 | log.Hooks.Add(hook) 18 | } 19 | } 20 | ``` 21 | 22 | If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following. 23 | 24 | ```go 25 | import ( 26 | "log/syslog" 27 | "github.com/sirupsen/logrus" 28 | lSyslog "github.com/sirupsen/logrus/hooks/syslog" 29 | ) 30 | 31 | func main() { 32 | log := logrus.New() 33 | hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "") 34 | 35 | if err == nil { 36 | log.Hooks.Add(hook) 37 | } 38 | } 39 | ``` 40 | -------------------------------------------------------------------------------- /go-log/logrus/hooks/syslog/syslog.go: -------------------------------------------------------------------------------- 1 | // +build !windows,!nacl,!plan9 2 | 3 | package syslog 4 | 5 | import ( 6 | "fmt" 7 | "log/syslog" 8 | "os" 9 | 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | // SyslogHook to send logs via syslog. 14 | type SyslogHook struct { 15 | Writer *syslog.Writer 16 | SyslogNetwork string 17 | SyslogRaddr string 18 | } 19 | 20 | // Creates a hook to be added to an instance of logger. This is called with 21 | // `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` 22 | // `if err == nil { log.Hooks.Add(hook) }` 23 | func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { 24 | w, err := syslog.Dial(network, raddr, priority, tag) 25 | return &SyslogHook{w, network, raddr}, err 26 | } 27 | 28 | func (hook *SyslogHook) Fire(entry *logrus.Entry) error { 29 | line, err := entry.String() 30 | if err != nil { 31 | fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) 32 | return err 33 | } 34 | 35 | switch entry.Level { 36 | case logrus.PanicLevel: 37 | return hook.Writer.Crit(line) 38 | case logrus.FatalLevel: 39 | return hook.Writer.Crit(line) 40 | case logrus.ErrorLevel: 41 | return hook.Writer.Err(line) 42 | case logrus.WarnLevel: 43 | return hook.Writer.Warning(line) 44 | case logrus.InfoLevel: 45 | return hook.Writer.Info(line) 46 | case logrus.DebugLevel, logrus.TraceLevel: 47 | return hook.Writer.Debug(line) 48 | default: 49 | return nil 50 | } 51 | } 52 | 53 | func (hook *SyslogHook) Levels() []logrus.Level { 54 | return logrus.AllLevels 55 | } 56 | -------------------------------------------------------------------------------- /go-log/logrus/hooks/syslog/syslog_test.go: -------------------------------------------------------------------------------- 1 | // +build !windows,!nacl,!plan9 2 | 3 | package syslog 4 | 5 | import ( 6 | "log/syslog" 7 | "testing" 8 | 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | func TestLocalhostAddAndPrint(t *testing.T) { 13 | log := logrus.New() 14 | hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") 15 | 16 | if err != nil { 17 | t.Errorf("Unable to connect to local syslog.") 18 | } 19 | 20 | log.Hooks.Add(hook) 21 | 22 | for _, level := range hook.Levels() { 23 | if len(log.Hooks[level]) != 1 { 24 | t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) 25 | } 26 | } 27 | 28 | log.Info("Congratulations!") 29 | } 30 | -------------------------------------------------------------------------------- /go-log/logrus/hooks/test/test.go: -------------------------------------------------------------------------------- 1 | // The Test package is used for testing logrus. 2 | // It provides a simple hooks which register logged messages. 3 | package test 4 | 5 | import ( 6 | "io/ioutil" 7 | "sync" 8 | 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | // Hook is a hook designed for dealing with logs in test scenarios. 13 | type Hook struct { 14 | // Entries is an array of all entries that have been received by this hook. 15 | // For safe access, use the AllEntries() method, rather than reading this 16 | // value directly. 17 | Entries []logrus.Entry 18 | mu sync.RWMutex 19 | } 20 | 21 | // NewGlobal installs a test hook for the global logger. 22 | func NewGlobal() *Hook { 23 | 24 | hook := new(Hook) 25 | logrus.AddHook(hook) 26 | 27 | return hook 28 | 29 | } 30 | 31 | // NewLocal installs a test hook for a given local logger. 32 | func NewLocal(logger *logrus.Logger) *Hook { 33 | 34 | hook := new(Hook) 35 | logger.Hooks.Add(hook) 36 | 37 | return hook 38 | 39 | } 40 | 41 | // NewNullLogger creates a discarding logger and installs the test hook. 42 | func NewNullLogger() (*logrus.Logger, *Hook) { 43 | 44 | logger := logrus.New() 45 | logger.Out = ioutil.Discard 46 | 47 | return logger, NewLocal(logger) 48 | 49 | } 50 | 51 | func (t *Hook) Fire(e *logrus.Entry) error { 52 | t.mu.Lock() 53 | defer t.mu.Unlock() 54 | t.Entries = append(t.Entries, *e) 55 | return nil 56 | } 57 | 58 | func (t *Hook) Levels() []logrus.Level { 59 | return logrus.AllLevels 60 | } 61 | 62 | // LastEntry returns the last entry that was logged or nil. 63 | func (t *Hook) LastEntry() *logrus.Entry { 64 | t.mu.RLock() 65 | defer t.mu.RUnlock() 66 | i := len(t.Entries) - 1 67 | if i < 0 { 68 | return nil 69 | } 70 | return &t.Entries[i] 71 | } 72 | 73 | // AllEntries returns all entries that were logged. 74 | func (t *Hook) AllEntries() []*logrus.Entry { 75 | t.mu.RLock() 76 | defer t.mu.RUnlock() 77 | // Make a copy so the returned value won't race with future log requests 78 | entries := make([]*logrus.Entry, len(t.Entries)) 79 | for i := 0; i < len(t.Entries); i++ { 80 | // Make a copy, for safety 81 | entries[i] = &t.Entries[i] 82 | } 83 | return entries 84 | } 85 | 86 | // Reset removes all Entries from this test hook. 87 | func (t *Hook) Reset() { 88 | t.mu.Lock() 89 | defer t.mu.Unlock() 90 | t.Entries = make([]logrus.Entry, 0) 91 | } 92 | -------------------------------------------------------------------------------- /go-log/logrus/hooks/test/test_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "math/rand" 5 | "sync" 6 | "testing" 7 | "time" 8 | 9 | "github.com/sirupsen/logrus" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestAllHooks(t *testing.T) { 14 | assert := assert.New(t) 15 | 16 | logger, hook := NewNullLogger() 17 | assert.Nil(hook.LastEntry()) 18 | assert.Equal(0, len(hook.Entries)) 19 | 20 | logger.Error("Hello error") 21 | assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) 22 | assert.Equal("Hello error", hook.LastEntry().Message) 23 | assert.Equal(1, len(hook.Entries)) 24 | 25 | logger.Warn("Hello warning") 26 | assert.Equal(logrus.WarnLevel, hook.LastEntry().Level) 27 | assert.Equal("Hello warning", hook.LastEntry().Message) 28 | assert.Equal(2, len(hook.Entries)) 29 | 30 | hook.Reset() 31 | assert.Nil(hook.LastEntry()) 32 | assert.Equal(0, len(hook.Entries)) 33 | 34 | hook = NewGlobal() 35 | 36 | logrus.Error("Hello error") 37 | assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) 38 | assert.Equal("Hello error", hook.LastEntry().Message) 39 | assert.Equal(1, len(hook.Entries)) 40 | } 41 | 42 | func TestLoggingWithHooksRace(t *testing.T) { 43 | 44 | rand.Seed(time.Now().Unix()) 45 | unlocker := rand.Int() % 100 46 | 47 | assert := assert.New(t) 48 | logger, hook := NewNullLogger() 49 | 50 | var wgOne, wgAll sync.WaitGroup 51 | wgOne.Add(1) 52 | wgAll.Add(100) 53 | 54 | for i := 0; i < 100; i++ { 55 | go func(i int) { 56 | logger.Info("info") 57 | wgAll.Done() 58 | if i == unlocker { 59 | wgOne.Done() 60 | } 61 | }(i) 62 | } 63 | 64 | wgOne.Wait() 65 | 66 | assert.Equal(logrus.InfoLevel, hook.LastEntry().Level) 67 | assert.Equal("info", hook.LastEntry().Message) 68 | 69 | wgAll.Wait() 70 | 71 | entries := hook.AllEntries() 72 | assert.Equal(100, len(entries)) 73 | } 74 | 75 | func TestFatalWithAlternateExit(t *testing.T) { 76 | assert := assert.New(t) 77 | 78 | logger, hook := NewNullLogger() 79 | logger.ExitFunc = func(code int) {} 80 | 81 | logger.Fatal("something went very wrong") 82 | assert.Equal(logrus.FatalLevel, hook.LastEntry().Level) 83 | assert.Equal("something went very wrong", hook.LastEntry().Message) 84 | assert.Equal(1, len(hook.Entries)) 85 | } 86 | -------------------------------------------------------------------------------- /go-log/logrus/hooks/writer/README.md: -------------------------------------------------------------------------------- 1 | # Writer Hooks for Logrus 2 | 3 | Send logs of given levels to any object with `io.Writer` interface. 4 | 5 | ## Usage 6 | 7 | If you want for example send high level logs to `Stderr` and 8 | logs of normal execution to `Stdout`, you could do it like this: 9 | 10 | ```go 11 | package main 12 | 13 | import ( 14 | "io/ioutil" 15 | "os" 16 | 17 | log "github.com/sirupsen/logrus" 18 | "github.com/sirupsen/logrus/hooks/writer" 19 | ) 20 | 21 | func main() { 22 | log.SetOutput(ioutil.Discard) // Send all logs to nowhere by default 23 | 24 | log.AddHook(&writer.Hook{ // Send logs with level higher than warning to stderr 25 | Writer: os.Stderr, 26 | LogLevels: []log.Level{ 27 | log.PanicLevel, 28 | log.FatalLevel, 29 | log.ErrorLevel, 30 | log.WarnLevel, 31 | }, 32 | }) 33 | log.AddHook(&writer.Hook{ // Send info and debug logs to stdout 34 | Writer: os.Stdout, 35 | LogLevels: []log.Level{ 36 | log.InfoLevel, 37 | log.DebugLevel, 38 | }, 39 | }) 40 | log.Info("This will go to stdout") 41 | log.Warn("This will go to stderr") 42 | } 43 | ``` 44 | -------------------------------------------------------------------------------- /go-log/logrus/hooks/writer/writer.go: -------------------------------------------------------------------------------- 1 | package writer 2 | 3 | import ( 4 | "io" 5 | 6 | log "github.com/sirupsen/logrus" 7 | ) 8 | 9 | // Hook is a hook that writes logs of specified LogLevels to specified Writer 10 | type Hook struct { 11 | Writer io.Writer 12 | LogLevels []log.Level 13 | } 14 | 15 | // Fire will be called when some logging function is called with current hook 16 | // It will format log entry to string and write it to appropriate writer 17 | func (hook *Hook) Fire(entry *log.Entry) error { 18 | line, err := entry.Bytes() 19 | if err != nil { 20 | return err 21 | } 22 | _, err = hook.Writer.Write(line) 23 | return err 24 | } 25 | 26 | // Levels define on which log levels this hook would trigger 27 | func (hook *Hook) Levels() []log.Level { 28 | return hook.LogLevels 29 | } 30 | -------------------------------------------------------------------------------- /go-log/logrus/hooks/writer/writer_test.go: -------------------------------------------------------------------------------- 1 | package writer 2 | 3 | import ( 4 | "bytes" 5 | "io/ioutil" 6 | "testing" 7 | 8 | log "github.com/sirupsen/logrus" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestDifferentLevelsGoToDifferentWriters(t *testing.T) { 13 | var a, b bytes.Buffer 14 | 15 | log.SetFormatter(&log.TextFormatter{ 16 | DisableTimestamp: true, 17 | DisableColors: true, 18 | }) 19 | log.SetOutput(ioutil.Discard) // Send all logs to nowhere by default 20 | 21 | log.AddHook(&Hook{ 22 | Writer: &a, 23 | LogLevels: []log.Level{ 24 | log.WarnLevel, 25 | }, 26 | }) 27 | log.AddHook(&Hook{ // Send info and debug logs to stdout 28 | Writer: &b, 29 | LogLevels: []log.Level{ 30 | log.InfoLevel, 31 | }, 32 | }) 33 | log.Warn("send to a") 34 | log.Info("send to b") 35 | 36 | assert.Equal(t, a.String(), "level=warning msg=\"send to a\"\n") 37 | assert.Equal(t, b.String(), "level=info msg=\"send to b\"\n") 38 | } 39 | -------------------------------------------------------------------------------- /go-log/logrus/internal/testutils/testutils.go: -------------------------------------------------------------------------------- 1 | package testutils 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "strconv" 7 | "strings" 8 | "testing" 9 | 10 | . "github.com/sirupsen/logrus" 11 | 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { 16 | var buffer bytes.Buffer 17 | var fields Fields 18 | 19 | logger := New() 20 | logger.Out = &buffer 21 | logger.Formatter = new(JSONFormatter) 22 | 23 | log(logger) 24 | 25 | err := json.Unmarshal(buffer.Bytes(), &fields) 26 | require.Nil(t, err) 27 | 28 | assertions(fields) 29 | } 30 | 31 | func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { 32 | var buffer bytes.Buffer 33 | 34 | logger := New() 35 | logger.Out = &buffer 36 | logger.Formatter = &TextFormatter{ 37 | DisableColors: true, 38 | } 39 | 40 | log(logger) 41 | 42 | fields := make(map[string]string) 43 | for _, kv := range strings.Split(strings.TrimRight(buffer.String(), "\n"), " ") { 44 | if !strings.Contains(kv, "=") { 45 | continue 46 | } 47 | kvArr := strings.Split(kv, "=") 48 | key := strings.TrimSpace(kvArr[0]) 49 | val := kvArr[1] 50 | if kvArr[1][0] == '"' { 51 | var err error 52 | val, err = strconv.Unquote(val) 53 | require.NoError(t, err) 54 | } 55 | fields[key] = val 56 | } 57 | assertions(fields) 58 | } 59 | -------------------------------------------------------------------------------- /go-log/logrus/json_formatter.go: -------------------------------------------------------------------------------- 1 | package logrus 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "runtime" 8 | ) 9 | 10 | type fieldKey string 11 | 12 | // FieldMap allows customization of the key names for default fields. 13 | type FieldMap map[fieldKey]string 14 | 15 | func (f FieldMap) resolve(key fieldKey) string { 16 | if k, ok := f[key]; ok { 17 | return k 18 | } 19 | 20 | return string(key) 21 | } 22 | 23 | // JSONFormatter formats logs into parsable json 24 | type JSONFormatter struct { 25 | // TimestampFormat sets the format used for marshaling timestamps. 26 | // The format to use is the same than for time.Format or time.Parse from the standard 27 | // library. 28 | // The standard Library already provides a set of predefined format. 29 | TimestampFormat string 30 | 31 | // DisableTimestamp allows disabling automatic timestamps in output 32 | DisableTimestamp bool 33 | 34 | // DisableHTMLEscape allows disabling html escaping in output 35 | DisableHTMLEscape bool 36 | 37 | // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. 38 | DataKey string 39 | 40 | // FieldMap allows users to customize the names of keys for default fields. 41 | // As an example: 42 | // formatter := &JSONFormatter{ 43 | // FieldMap: FieldMap{ 44 | // FieldKeyTime: "@timestamp", 45 | // FieldKeyLevel: "@level", 46 | // FieldKeyMsg: "@message", 47 | // FieldKeyFunc: "@caller", 48 | // }, 49 | // } 50 | FieldMap FieldMap 51 | 52 | // CallerPrettyfier can be set by the user to modify the content 53 | // of the function and file keys in the json data when ReportCaller is 54 | // activated. If any of the returned value is the empty string the 55 | // corresponding key will be removed from json fields. 56 | CallerPrettyfier func(*runtime.Frame) (function string, file string) 57 | 58 | // PrettyPrint will indent all json logs 59 | PrettyPrint bool 60 | } 61 | 62 | // Format renders a single log entry 63 | func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { 64 | data := make(Fields, len(entry.Data)+4) 65 | for k, v := range entry.Data { 66 | switch v := v.(type) { 67 | case error: 68 | // Otherwise errors are ignored by `encoding/json` 69 | // https://github.com/sirupsen/logrus/issues/137 70 | data[k] = v.Error() 71 | default: 72 | data[k] = v 73 | } 74 | } 75 | 76 | if f.DataKey != "" { 77 | newData := make(Fields, 4) 78 | newData[f.DataKey] = data 79 | data = newData 80 | } 81 | 82 | prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) 83 | 84 | timestampFormat := f.TimestampFormat 85 | if timestampFormat == "" { 86 | timestampFormat = defaultTimestampFormat 87 | } 88 | 89 | if entry.err != "" { 90 | data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err 91 | } 92 | if !f.DisableTimestamp { 93 | data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) 94 | } 95 | data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message 96 | data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() 97 | if entry.HasCaller() { 98 | funcVal := entry.Caller.Function 99 | fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) 100 | if f.CallerPrettyfier != nil { 101 | funcVal, fileVal = f.CallerPrettyfier(entry.Caller) 102 | } 103 | if funcVal != "" { 104 | data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal 105 | } 106 | if fileVal != "" { 107 | data[f.FieldMap.resolve(FieldKeyFile)] = fileVal 108 | } 109 | } 110 | 111 | var b *bytes.Buffer 112 | if entry.Buffer != nil { 113 | b = entry.Buffer 114 | } else { 115 | b = &bytes.Buffer{} 116 | } 117 | 118 | encoder := json.NewEncoder(b) 119 | encoder.SetEscapeHTML(!f.DisableHTMLEscape) 120 | if f.PrettyPrint { 121 | encoder.SetIndent("", " ") 122 | } 123 | if err := encoder.Encode(data); err != nil { 124 | return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) 125 | } 126 | 127 | return b.Bytes(), nil 128 | } 129 | -------------------------------------------------------------------------------- /go-log/logrus/level_test.go: -------------------------------------------------------------------------------- 1 | package logrus_test 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestLevelJsonEncoding(t *testing.T) { 12 | type X struct { 13 | Level Level 14 | } 15 | 16 | var x X 17 | x.Level = WarnLevel 18 | var buf bytes.Buffer 19 | enc := json.NewEncoder(&buf) 20 | require.NoError(t, enc.Encode(x)) 21 | dec := json.NewDecoder(&buf) 22 | var y X 23 | require.NoError(t, dec.Decode(&y)) 24 | } 25 | 26 | func TestLevelUnmarshalText(t *testing.T) { 27 | var u Level 28 | for _, level := range AllLevels { 29 | t.Run(level.String(), func(t *testing.T) { 30 | require.NoError(t, u.UnmarshalText([]byte(level.String()))) 31 | require.Equal(t, level, u) 32 | }) 33 | } 34 | t.Run("invalid", func(t *testing.T) { 35 | require.Error(t, u.UnmarshalText([]byte("invalid"))) 36 | }) 37 | } 38 | 39 | func TestLevelMarshalText(t *testing.T) { 40 | levelStrings := []string{ 41 | "panic", 42 | "fatal", 43 | "error", 44 | "warning", 45 | "info", 46 | "debug", 47 | "trace", 48 | } 49 | for idx, val := range AllLevels { 50 | level := val 51 | t.Run(level.String(), func(t *testing.T) { 52 | var cmp Level 53 | b, err := level.MarshalText() 54 | require.NoError(t, err) 55 | require.Equal(t, levelStrings[idx], string(b)) 56 | err = cmp.UnmarshalText(b) 57 | require.NoError(t, err) 58 | require.Equal(t, level, cmp) 59 | }) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /go-log/logrus/logger_bench_test.go: -------------------------------------------------------------------------------- 1 | package logrus 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "testing" 7 | ) 8 | 9 | func BenchmarkDummyLogger(b *testing.B) { 10 | nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666) 11 | if err != nil { 12 | b.Fatalf("%v", err) 13 | } 14 | defer nullf.Close() 15 | doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields) 16 | } 17 | 18 | func BenchmarkDummyLoggerNoLock(b *testing.B) { 19 | nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666) 20 | if err != nil { 21 | b.Fatalf("%v", err) 22 | } 23 | defer nullf.Close() 24 | doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields) 25 | } 26 | 27 | func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) { 28 | logger := Logger{ 29 | Out: out, 30 | Level: InfoLevel, 31 | Formatter: formatter, 32 | } 33 | entry := logger.WithFields(fields) 34 | b.RunParallel(func(pb *testing.PB) { 35 | for pb.Next() { 36 | entry.Info("aaa") 37 | } 38 | }) 39 | } 40 | 41 | func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) { 42 | logger := Logger{ 43 | Out: out, 44 | Level: InfoLevel, 45 | Formatter: formatter, 46 | } 47 | logger.SetNoLock() 48 | entry := logger.WithFields(fields) 49 | b.RunParallel(func(pb *testing.PB) { 50 | for pb.Next() { 51 | entry.Info("aaa") 52 | } 53 | }) 54 | } 55 | 56 | func BenchmarkLoggerJSONFormatter(b *testing.B) { 57 | doLoggerBenchmarkWithFormatter(b, &JSONFormatter{}) 58 | } 59 | 60 | func BenchmarkLoggerTextFormatter(b *testing.B) { 61 | doLoggerBenchmarkWithFormatter(b, &TextFormatter{}) 62 | } 63 | 64 | func doLoggerBenchmarkWithFormatter(b *testing.B, f Formatter) { 65 | b.SetParallelism(100) 66 | log := New() 67 | log.Formatter = f 68 | log.Out = ioutil.Discard 69 | b.RunParallel(func(pb *testing.PB) { 70 | for pb.Next() { 71 | log. 72 | WithField("foo1", "bar1"). 73 | WithField("foo2", "bar2"). 74 | Info("this is a dummy log") 75 | } 76 | }) 77 | } 78 | -------------------------------------------------------------------------------- /go-log/logrus/logger_test.go: -------------------------------------------------------------------------------- 1 | package logrus 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestFieldValueError(t *testing.T) { 14 | buf := &bytes.Buffer{} 15 | l := &Logger{ 16 | Out: buf, 17 | Formatter: new(JSONFormatter), 18 | Hooks: make(LevelHooks), 19 | Level: DebugLevel, 20 | } 21 | l.WithField("func", func() {}).Info("test") 22 | fmt.Println(buf.String()) 23 | var data map[string]interface{} 24 | if err := json.Unmarshal(buf.Bytes(), &data); err != nil { 25 | t.Error("unexpected error", err) 26 | } 27 | _, ok := data[FieldKeyLogrusError] 28 | require.True(t, ok, `cannot found expected "logrus_error" field: %v`, data) 29 | } 30 | 31 | func TestNoFieldValueError(t *testing.T) { 32 | buf := &bytes.Buffer{} 33 | l := &Logger{ 34 | Out: buf, 35 | Formatter: new(JSONFormatter), 36 | Hooks: make(LevelHooks), 37 | Level: DebugLevel, 38 | } 39 | l.WithField("str", "str").Info("test") 40 | fmt.Println(buf.String()) 41 | var data map[string]interface{} 42 | if err := json.Unmarshal(buf.Bytes(), &data); err != nil { 43 | t.Error("unexpected error", err) 44 | } 45 | _, ok := data[FieldKeyLogrusError] 46 | require.False(t, ok) 47 | } 48 | 49 | func TestWarninglnNotEqualToWarning(t *testing.T) { 50 | buf := &bytes.Buffer{} 51 | bufln := &bytes.Buffer{} 52 | 53 | formatter := new(TextFormatter) 54 | formatter.DisableTimestamp = true 55 | formatter.DisableLevelTruncation = true 56 | 57 | l := &Logger{ 58 | Out: buf, 59 | Formatter: formatter, 60 | Hooks: make(LevelHooks), 61 | Level: DebugLevel, 62 | } 63 | l.Warning("hello,", "world") 64 | 65 | l.SetOutput(bufln) 66 | l.Warningln("hello,", "world") 67 | 68 | assert.NotEqual(t, buf.String(), bufln.String(), "Warning() and Wantingln() should not be equal") 69 | } 70 | -------------------------------------------------------------------------------- /go-log/logrus/logrus.go: -------------------------------------------------------------------------------- 1 | package logrus 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "strings" 7 | ) 8 | 9 | // Fields type, used to pass to `WithFields`. 10 | type Fields map[string]interface{} 11 | 12 | // Level type 13 | type Level uint32 14 | 15 | // Convert the Level to a string. E.g. PanicLevel becomes "panic". 16 | func (level Level) String() string { 17 | if b, err := level.MarshalText(); err == nil { 18 | return string(b) 19 | } else { 20 | return "unknown" 21 | } 22 | } 23 | 24 | // ParseLevel takes a string level and returns the Logrus log level constant. 25 | func ParseLevel(lvl string) (Level, error) { 26 | switch strings.ToLower(lvl) { 27 | case "panic": 28 | return PanicLevel, nil 29 | case "fatal": 30 | return FatalLevel, nil 31 | case "error": 32 | return ErrorLevel, nil 33 | case "warn", "warning": 34 | return WarnLevel, nil 35 | case "info": 36 | return InfoLevel, nil 37 | case "debug": 38 | return DebugLevel, nil 39 | case "trace": 40 | return TraceLevel, nil 41 | } 42 | 43 | var l Level 44 | return l, fmt.Errorf("not a valid logrus Level: %q", lvl) 45 | } 46 | 47 | // UnmarshalText implements encoding.TextUnmarshaler. 48 | func (level *Level) UnmarshalText(text []byte) error { 49 | l, err := ParseLevel(string(text)) 50 | if err != nil { 51 | return err 52 | } 53 | 54 | *level = l 55 | 56 | return nil 57 | } 58 | 59 | func (level Level) MarshalText() ([]byte, error) { 60 | switch level { 61 | case TraceLevel: 62 | return []byte("trace"), nil 63 | case DebugLevel: 64 | return []byte("debug"), nil 65 | case InfoLevel: 66 | return []byte("info"), nil 67 | case WarnLevel: 68 | return []byte("warning"), nil 69 | case ErrorLevel: 70 | return []byte("error"), nil 71 | case FatalLevel: 72 | return []byte("fatal"), nil 73 | case PanicLevel: 74 | return []byte("panic"), nil 75 | } 76 | 77 | return nil, fmt.Errorf("not a valid logrus level %d", level) 78 | } 79 | 80 | // A constant exposing all logging levels 81 | var AllLevels = []Level{ 82 | PanicLevel, 83 | FatalLevel, 84 | ErrorLevel, 85 | WarnLevel, 86 | InfoLevel, 87 | DebugLevel, 88 | TraceLevel, 89 | } 90 | 91 | // These are the different logging levels. You can set the logging level to log 92 | // on your instance of logger, obtained with `logrus.New()`. 93 | const ( 94 | // PanicLevel level, highest level of severity. Logs and then calls panic with the 95 | // message passed to Debug, Info, ... 96 | PanicLevel Level = iota 97 | // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the 98 | // logging level is set to Panic. 99 | FatalLevel 100 | // ErrorLevel level. Logs. Used for errors that should definitely be noted. 101 | // Commonly used for hooks to send errors to an error tracking service. 102 | ErrorLevel 103 | // WarnLevel level. Non-critical entries that deserve eyes. 104 | WarnLevel 105 | // InfoLevel level. General operational entries about what's going on inside the 106 | // application. 107 | InfoLevel 108 | // DebugLevel level. Usually only enabled when debugging. Very verbose logging. 109 | DebugLevel 110 | // TraceLevel level. Designates finer-grained informational events than the Debug. 111 | TraceLevel 112 | ) 113 | 114 | // Won't compile if StdLogger can't be realized by a log.Logger 115 | var ( 116 | _ StdLogger = &log.Logger{} 117 | _ StdLogger = &Entry{} 118 | _ StdLogger = &Logger{} 119 | ) 120 | 121 | // StdLogger is what your logrus-enabled library should take, that way 122 | // it'll accept a stdlib logger and a logrus logger. There's no standard 123 | // interface, this is the closest we get, unfortunately. 124 | type StdLogger interface { 125 | Print(...interface{}) 126 | Printf(string, ...interface{}) 127 | Println(...interface{}) 128 | 129 | Fatal(...interface{}) 130 | Fatalf(string, ...interface{}) 131 | Fatalln(...interface{}) 132 | 133 | Panic(...interface{}) 134 | Panicf(string, ...interface{}) 135 | Panicln(...interface{}) 136 | } 137 | 138 | // The FieldLogger interface generalizes the Entry and Logger types 139 | type FieldLogger interface { 140 | WithField(key string, value interface{}) *Entry 141 | WithFields(fields Fields) *Entry 142 | WithError(err error) *Entry 143 | 144 | Debugf(format string, args ...interface{}) 145 | Infof(format string, args ...interface{}) 146 | Printf(format string, args ...interface{}) 147 | Warnf(format string, args ...interface{}) 148 | Warningf(format string, args ...interface{}) 149 | Errorf(format string, args ...interface{}) 150 | Fatalf(format string, args ...interface{}) 151 | Panicf(format string, args ...interface{}) 152 | 153 | Debug(args ...interface{}) 154 | Info(args ...interface{}) 155 | Print(args ...interface{}) 156 | Warn(args ...interface{}) 157 | Warning(args ...interface{}) 158 | Error(args ...interface{}) 159 | Fatal(args ...interface{}) 160 | Panic(args ...interface{}) 161 | 162 | Debugln(args ...interface{}) 163 | Infoln(args ...interface{}) 164 | Println(args ...interface{}) 165 | Warnln(args ...interface{}) 166 | Warningln(args ...interface{}) 167 | Errorln(args ...interface{}) 168 | Fatalln(args ...interface{}) 169 | Panicln(args ...interface{}) 170 | 171 | // IsDebugEnabled() bool 172 | // IsInfoEnabled() bool 173 | // IsWarnEnabled() bool 174 | // IsErrorEnabled() bool 175 | // IsFatalEnabled() bool 176 | // IsPanicEnabled() bool 177 | } 178 | 179 | // Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is 180 | // here for consistancy. Do not use. Use Logger or Entry instead. 181 | type Ext1FieldLogger interface { 182 | FieldLogger 183 | Tracef(format string, args ...interface{}) 184 | Trace(args ...interface{}) 185 | Traceln(args ...interface{}) 186 | } 187 | -------------------------------------------------------------------------------- /go-log/logrus/terminal_check_appengine.go: -------------------------------------------------------------------------------- 1 | // +build appengine 2 | 3 | package logrus 4 | 5 | import ( 6 | "io" 7 | ) 8 | 9 | func checkIfTerminal(w io.Writer) bool { 10 | return true 11 | } 12 | -------------------------------------------------------------------------------- /go-log/logrus/terminal_check_bsd.go: -------------------------------------------------------------------------------- 1 | // +build darwin dragonfly freebsd netbsd openbsd 2 | // +build !js 3 | 4 | package logrus 5 | 6 | import "golang.org/x/sys/unix" 7 | 8 | const ioctlReadTermios = unix.TIOCGETA 9 | 10 | func isTerminal(fd int) bool { 11 | _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) 12 | return err == nil 13 | } 14 | -------------------------------------------------------------------------------- /go-log/logrus/terminal_check_js.go: -------------------------------------------------------------------------------- 1 | // +build js 2 | 3 | package logrus 4 | 5 | func isTerminal(fd int) bool { 6 | return false 7 | } 8 | -------------------------------------------------------------------------------- /go-log/logrus/terminal_check_no_terminal.go: -------------------------------------------------------------------------------- 1 | // +build js nacl plan9 2 | 3 | package logrus 4 | 5 | import ( 6 | "io" 7 | ) 8 | 9 | func checkIfTerminal(w io.Writer) bool { 10 | return false 11 | } 12 | -------------------------------------------------------------------------------- /go-log/logrus/terminal_check_notappengine.go: -------------------------------------------------------------------------------- 1 | // +build !appengine,!js,!windows,!nacl,!plan9 2 | 3 | package logrus 4 | 5 | import ( 6 | "io" 7 | "os" 8 | ) 9 | 10 | func checkIfTerminal(w io.Writer) bool { 11 | switch v := w.(type) { 12 | case *os.File: 13 | return isTerminal(int(v.Fd())) 14 | default: 15 | return false 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /go-log/logrus/terminal_check_solaris.go: -------------------------------------------------------------------------------- 1 | package logrus 2 | 3 | import ( 4 | "golang.org/x/sys/unix" 5 | ) 6 | 7 | // IsTerminal returns true if the given file descriptor is a terminal. 8 | func isTerminal(fd int) bool { 9 | _, err := unix.IoctlGetTermio(fd, unix.TCGETA) 10 | return err == nil 11 | } 12 | -------------------------------------------------------------------------------- /go-log/logrus/terminal_check_unix.go: -------------------------------------------------------------------------------- 1 | // +build linux aix zos 2 | // +build !js 3 | 4 | package logrus 5 | 6 | import "golang.org/x/sys/unix" 7 | 8 | const ioctlReadTermios = unix.TCGETS 9 | 10 | func isTerminal(fd int) bool { 11 | _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) 12 | return err == nil 13 | } 14 | -------------------------------------------------------------------------------- /go-log/logrus/terminal_check_windows.go: -------------------------------------------------------------------------------- 1 | // +build !appengine,!js,windows 2 | 3 | package logrus 4 | 5 | import ( 6 | "io" 7 | "os" 8 | 9 | "golang.org/x/sys/windows" 10 | ) 11 | 12 | func checkIfTerminal(w io.Writer) bool { 13 | switch v := w.(type) { 14 | case *os.File: 15 | handle := windows.Handle(v.Fd()) 16 | var mode uint32 17 | if err := windows.GetConsoleMode(handle, &mode); err != nil { 18 | return false 19 | } 20 | mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING 21 | if err := windows.SetConsoleMode(handle, mode); err != nil { 22 | return false 23 | } 24 | return true 25 | } 26 | return false 27 | } 28 | -------------------------------------------------------------------------------- /go-log/logrus/travis/cross_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "$TRAVIS_GO_VERSION" =~ ^1\.13\. ]] && [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ "$GO111MODULE" == "on" ]]; then 4 | $(go env GOPATH)/bin/gox -build-lib 5 | fi 6 | -------------------------------------------------------------------------------- /go-log/logrus/travis/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Install golanci 1.32.2 6 | if [[ "$TRAVIS_GO_VERSION" =~ ^1\.15\. ]]; then 7 | curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(go env GOPATH)/bin v1.32.2 8 | fi 9 | -------------------------------------------------------------------------------- /go-log/logrus/writer.go: -------------------------------------------------------------------------------- 1 | package logrus 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | "runtime" 7 | ) 8 | 9 | // Writer at INFO level. See WriterLevel for details. 10 | func (logger *Logger) Writer() *io.PipeWriter { 11 | return logger.WriterLevel(InfoLevel) 12 | } 13 | 14 | // WriterLevel returns an io.Writer that can be used to write arbitrary text to 15 | // the logger at the given log level. Each line written to the writer will be 16 | // printed in the usual way using formatters and hooks. The writer is part of an 17 | // io.Pipe and it is the callers responsibility to close the writer when done. 18 | // This can be used to override the standard library logger easily. 19 | func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { 20 | return NewEntry(logger).WriterLevel(level) 21 | } 22 | 23 | func (entry *Entry) Writer() *io.PipeWriter { 24 | return entry.WriterLevel(InfoLevel) 25 | } 26 | 27 | func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { 28 | reader, writer := io.Pipe() 29 | 30 | var printFunc func(args ...interface{}) 31 | 32 | switch level { 33 | case TraceLevel: 34 | printFunc = entry.Trace 35 | case DebugLevel: 36 | printFunc = entry.Debug 37 | case InfoLevel: 38 | printFunc = entry.Info 39 | case WarnLevel: 40 | printFunc = entry.Warn 41 | case ErrorLevel: 42 | printFunc = entry.Error 43 | case FatalLevel: 44 | printFunc = entry.Fatal 45 | case PanicLevel: 46 | printFunc = entry.Panic 47 | default: 48 | printFunc = entry.Print 49 | } 50 | 51 | go entry.writerScanner(reader, printFunc) 52 | runtime.SetFinalizer(writer, writerFinalizer) 53 | 54 | return writer 55 | } 56 | 57 | func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { 58 | scanner := bufio.NewScanner(reader) 59 | for scanner.Scan() { 60 | printFunc(scanner.Text()) 61 | } 62 | if err := scanner.Err(); err != nil { 63 | entry.Errorf("Error while reading from Writer: %s", err) 64 | } 65 | reader.Close() 66 | } 67 | 68 | func writerFinalizer(writer *io.PipeWriter) { 69 | writer.Close() 70 | } 71 | -------------------------------------------------------------------------------- /go-log/logrus/writer_test.go: -------------------------------------------------------------------------------- 1 | package logrus_test 2 | 3 | import ( 4 | "log" 5 | "net/http" 6 | ) 7 | 8 | func ExampleLogger_Writer_httpServer() { 9 | logger := New() 10 | w := logger.Writer() 11 | defer w.Close() 12 | 13 | srv := http.Server{ 14 | // create a stdlib log.Logger that writes to 15 | // logrus.Logger. 16 | ErrorLog: log.New(w, "", 0), 17 | } 18 | 19 | if err := srv.ListenAndServe(); err != nil { 20 | logger.Fatal(err) 21 | } 22 | } 23 | 24 | func ExampleLogger_Writer_stdlib() { 25 | logger := New() 26 | logger.Formatter = &JSONFormatter{} 27 | 28 | // Use logrus for standard log output 29 | // Note that `log` here references stdlib's log 30 | // Not logrus imported under the name `log`. 31 | log.SetOutput(logger.Writer()) 32 | } 33 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module gt-checksum 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/go-mysql-org/go-mysql v1.4.0 7 | github.com/go-sql-driver/mysql v1.6.0 8 | github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible 9 | github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 10 | github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 11 | github.com/satori/go.uuid v1.2.0 12 | github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 13 | github.com/sirupsen/logrus v1.8.1 14 | ) 15 | 16 | require ( 17 | github.com/BurntSushi/toml v0.3.1 // indirect 18 | github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d // indirect 19 | github.com/fatih/color v1.13.0 // indirect 20 | github.com/go-logfmt/logfmt v0.5.1 // indirect 21 | github.com/go-logr/logr v1.2.3 // indirect 22 | github.com/godror/godror v0.33.3 // indirect 23 | github.com/godror/knownpb v0.1.0 // indirect 24 | github.com/gosuri/uitable v0.0.4 // indirect 25 | github.com/jonboulle/clockwork v0.2.2 // indirect 26 | github.com/karalabe/xgo v0.0.0-20191115072854-c5ccff8648a7 // indirect 27 | github.com/lestrrat-go/strftime v1.0.5 // indirect 28 | github.com/mattn/go-colorable v0.1.9 // indirect 29 | github.com/mattn/go-isatty v0.0.14 // indirect 30 | github.com/mattn/go-runewidth v0.0.13 // indirect 31 | github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 // indirect 32 | github.com/pingcap/parser v0.0.0-20210415081931-48e7f467fd74 // indirect 33 | github.com/pkg/errors v0.9.1 // indirect 34 | github.com/rivo/uniseg v0.2.0 // indirect 35 | github.com/russross/blackfriday/v2 v2.0.1 // indirect 36 | github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect 37 | github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect 38 | github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect 39 | github.com/urfave/cli v1.22.9 // indirect 40 | go.uber.org/atomic v1.7.0 // indirect 41 | go.uber.org/multierr v1.6.0 // indirect 42 | go.uber.org/zap v1.16.0 // indirect 43 | golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c // indirect 44 | golang.org/x/text v0.3.6 // indirect 45 | google.golang.org/protobuf v1.27.1 // indirect 46 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 47 | gopkg.in/ini.v1 v1.66.6 // indirect 48 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect 49 | ) 50 | 51 | replace github.com/go-mysql-org/go-mysql => github.com/siddontang/go-mysql v1.4.0 // indirect 52 | -------------------------------------------------------------------------------- /greatdbCheck.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "gt-checksum/actions" 6 | "gt-checksum/dbExec" 7 | "gt-checksum/global" 8 | "gt-checksum/inputArg" 9 | "os" 10 | "time" 11 | ) 12 | 13 | var err error 14 | 15 | func main() { 16 | //获取当前时间 17 | beginTime := time.Now() 18 | 19 | //获取配置文件 20 | m := inputArg.ConfigInit(0) 21 | if !actions.SchemaTableInit(m).GlobalAccessPriCheck(1, 2) { 22 | fmt.Println("gt-checksum report: Missing global permissions, please check the log for details.") 23 | os.Exit(1) 24 | } 25 | //获取待校验表信息 26 | var tableList []string 27 | if tableList, err = actions.SchemaTableInit(m).SchemaTableFilter(3, 4); err != nil || len(tableList) == 0 { 28 | fmt.Println("gt-checksum report: check table is empty,please check the log for details!") 29 | os.Exit(1) 30 | } 31 | 32 | switch m.SecondaryL.RulesV.CheckObject { 33 | case "struct": 34 | if err = actions.SchemaTableInit(m).Struct(tableList, 5, 6); err != nil { 35 | fmt.Println("-- gt-checksum report: The table Struct verification failed, please refer to the log file for details, enable debug to get more information -- ") 36 | os.Exit(1) 37 | } 38 | case "index": 39 | if err = actions.SchemaTableInit(m).Index(tableList, 7, 8); err != nil { 40 | fmt.Println("-- gt-checksum report: The table Index verification failed, please refer to the log file for details, enable debug to get more information -- ") 41 | os.Exit(1) 42 | } 43 | case "partitions": 44 | //9、10 45 | actions.SchemaTableInit(m).Partitions(tableList, 9, 10) 46 | case "foreign": 47 | //11、12 48 | actions.SchemaTableInit(m).Foreign(tableList, 11, 12) 49 | case "func": 50 | //13、14 51 | actions.SchemaTableInit(m).Func(tableList, 13, 14) 52 | case "proc": 53 | //15、16 54 | actions.SchemaTableInit(m).Proc(tableList, 15, 16) 55 | case "trigger": 56 | //17、18 57 | // 部分ok,异构数据库需要部分内容进行手动验证,例如:触发器结构体中包含的sql语句不一致的情况 58 | actions.SchemaTableInit(m).Trigger(tableList, 17, 18) 59 | case "data": 60 | //校验表结构 61 | tableList, _, err = actions.SchemaTableInit(m).TableColumnNameCheck(tableList, 9, 10) 62 | if err != nil { 63 | fmt.Println("-- gt-checksum report: The table structure verification failed, please refer to the log file for details, enable debug to get more information -- ") 64 | os.Exit(1) 65 | } else if len(tableList) == 0 { 66 | fmt.Println("gt-checksum report: No checklist, please check the log for details.") 67 | os.Exit(1) 68 | } 69 | //19、20 70 | if tableList, _, err = actions.SchemaTableInit(m).TableAccessPriCheck(tableList, 19, 20); err != nil { 71 | fmt.Println("-- gt-checksum report: The table access permissions query failed, please refer to the log file for details, enable debug to get more information -- ") 72 | os.Exit(1) 73 | } else if len(tableList) == 0 { 74 | fmt.Println("gt-checksum report: Insufficient permissions for the verification table, please check the log for details.") 75 | os.Exit(1) 76 | } 77 | 78 | //根据要校验的表,获取该表的全部列信息 79 | fmt.Println("-- gt-checksum init check table column --") 80 | tableAllCol := actions.SchemaTableInit(m).SchemaTableAllCol(tableList, 21, 22) 81 | //根据要校验的表,筛选查询数据时使用到的索引列信息 82 | fmt.Println("-- gt-checksum init check table index column --") 83 | tableIndexColumnMap := actions.SchemaTableInit(m).TableIndexColumn(tableList, 23, 24) 84 | //获取全局一致 x性位点 85 | //fmt.Println("-- GreatdbCheck Obtain global consensus sites --") 86 | //sglobalSites, err := dbExec.GCN().GcnObject(m.PoolMin, m.PoolMax, m.SourceJdbc, m.SourceDrive).GlobalCN(25) 87 | //if err != nil { 88 | // os.Exit(1) 89 | //} 90 | //dglobalSites, err := dbExec.GCN().GcnObject(m.PoolMin, m.PoolMax, m.DestJdbc, m.DestDrive).GlobalCN(26) 91 | //if err != nil { 92 | // os.Exit(1) 93 | //} 94 | //fmt.Println(sglobalSites, dglobalSites) 95 | 96 | //var SourceItemAbnormalDataChan = make(chan actions.SourceItemAbnormalDataStruct, 100) 97 | //var addChan, delChan = make(chan string, 100), make(chan string, 100) 98 | 99 | // 开启差异数据修复的线程 100 | //go actions.DifferencesDataDispos(SourceItemAbnormalDataChan, addChan, delChan) 101 | //go actions.DataFixSql(addChan, delChan) 102 | 103 | //开始进行增量校验 104 | //if m.IncCheckSwitch == "yesno" { 105 | // fmt.Println("-- GreatdbCheck begin cehck table incerment date --") 106 | // actions.IncDataDisops(m.SourceDrive, m.DestDrive, m.SourceJdbc, m.DestJdbc, sglobalSites, dglobalSites, tableList).Aa(fullDataCompletionStatus, SourceItemAbnormalDataChan) 107 | //} 108 | 109 | //初始化数据库连接池 110 | fmt.Println("-- gt-checksum init source and dest transaction snapshoot conn pool --") 111 | sdc, _ := dbExec.GCN().GcnObject(m.ConnPoolV.PoolMin, m.SecondaryL.DsnsV.SrcJdbc, m.SecondaryL.DsnsV.SrcDrive).NewConnPool(27) 112 | ddc, _ := dbExec.GCN().GcnObject(m.ConnPoolV.PoolMin, m.SecondaryL.DsnsV.DestJdbc, m.SecondaryL.DsnsV.DestDrive).NewConnPool(28) 113 | 114 | //针对待校验表生成查询条件计划清单 115 | fmt.Println("-- gt-checksum init cehck table query plan and check data --") 116 | switch m.SecondaryL.RulesV.CheckMode { 117 | case "rows": 118 | actions.CheckTableQuerySchedule(sdc, ddc, tableIndexColumnMap, tableAllCol, *m).Schedulingtasks() 119 | case "count": 120 | actions.CheckTableQuerySchedule(sdc, ddc, tableIndexColumnMap, tableAllCol, *m).DoCountDataCheck() 121 | case "sample": 122 | actions.CheckTableQuerySchedule(sdc, ddc, tableIndexColumnMap, tableAllCol, *m).DoSampleDataCheck() 123 | } 124 | //关闭连接池连接 125 | sdc.Close(27) 126 | ddc.Close(28) 127 | default: 128 | fmt.Println("-- gt-checksum report: checkObject parameter selection error, please refer to the log file for details, enable debug to get more information -- ") 129 | os.Exit(1) 130 | } 131 | global.Wlog.Info("gt-checksum check object {", m.SecondaryL.RulesV.CheckObject, "} complete !!!") 132 | //输出结果信息 133 | fmt.Println("") 134 | fmt.Println("** gt-checksum Overview of results **") 135 | fmt.Println("Check time: ", fmt.Sprintf("%.2fs", time.Since(beginTime).Seconds()), "(Seconds)") 136 | actions.CheckResultOut(m) 137 | } 138 | -------------------------------------------------------------------------------- /inputArg/inputInit.go: -------------------------------------------------------------------------------- 1 | package inputArg 2 | 3 | import ( 4 | "fmt" 5 | "gopkg.in/ini.v1" 6 | "gt-checksum/global" 7 | "gt-checksum/go-log/log" 8 | "os" 9 | "runtime" 10 | "strings" 11 | ) 12 | 13 | type FirstLevel struct { 14 | DSNs *ini.Section 15 | Schema *ini.Section 16 | Rules *ini.Section 17 | Struct *ini.Section 18 | Logs *ini.Section 19 | Repair *ini.Section 20 | } 21 | type DSNsS struct { 22 | SrcDSN string 23 | DstDSN string 24 | SrcDrive string 25 | SrcJdbc string 26 | DestDrive string 27 | DestJdbc string 28 | } 29 | type SchemaS struct { 30 | Tables string 31 | IgnoreTables string 32 | CheckNoIndexTable string 33 | LowerCaseTableNames string 34 | } 35 | type RulesS struct { 36 | ParallelThds int 37 | ChanRowCount int 38 | QueueSize int 39 | CheckMode string 40 | Ratio int 41 | CheckObject string 42 | } 43 | type StructS struct { 44 | ScheckMod string 45 | ScheckOrder string 46 | ScheckFixRule string 47 | } 48 | type LogS struct { 49 | LogFile string 50 | LogLevel string 51 | } 52 | type RepairS struct { 53 | Datafix string 54 | FixTrxNum int 55 | FixFileName string 56 | FixFileFINE *os.File 57 | } 58 | type SecondaryLevel struct { 59 | DsnsV DSNsS 60 | SchemaV SchemaS 61 | RulesV RulesS 62 | StructV StructS 63 | LogV LogS 64 | RepairV RepairS 65 | } 66 | type ConnPool struct { 67 | PoolMin int 68 | PoolMax int 69 | } 70 | type ConfigParameter struct { 71 | FirstL FirstLevel 72 | SecondaryL SecondaryLevel 73 | ConfFine *ini.File 74 | ConnPoolV ConnPool 75 | ParametersSwitch bool 76 | Config string //配置文件信息 77 | LogThreadSeq int64 78 | NoIndexTableTmpFile string 79 | } 80 | 81 | var rc ConfigParameter 82 | 83 | func init() { 84 | rc.cliHelp() 85 | fmt.Println("-- gt-checksum init configuration files -- ") 86 | if rc.Config != "" { 87 | if !strings.Contains(rc.Config, "/") { 88 | sysType := runtime.GOOS 89 | if sysType == "linux" { 90 | rc.Config = fmt.Sprintf("./%s", rc.Config) 91 | } else if sysType == "windows" { 92 | rc.Config = fmt.Sprintf(".\\%s", rc.Config) 93 | } 94 | } 95 | rc.getConfig() 96 | } 97 | //初始化日志文件 98 | fmt.Println("-- gt-checksum init log files -- ") 99 | global.Wlog = log.NewWlog(rc.SecondaryL.LogV.LogFile, rc.SecondaryL.LogV.LogLevel) 100 | fmt.Println("-- gt-checksum init check parameter --") 101 | rc.checkPar() 102 | } 103 | 104 | func ConfigInit(logThreadSeq int64) *ConfigParameter { 105 | rc.LogThreadSeq = logThreadSeq 106 | return &rc 107 | } 108 | -------------------------------------------------------------------------------- /inputArg/p_intorduce.go: -------------------------------------------------------------------------------- 1 | package inputArg 2 | 3 | /* 4 | inputArg 包主要是为了处理入参的输入处理 5 | 创建者:梁行 6 | 创建时间:2022/10/13 7 | */ 8 | -------------------------------------------------------------------------------- /relnotes/CHANGELOG.zh-CN.md: -------------------------------------------------------------------------------- 1 | ## 1.2.1(2023.3.20) 2 | 新增表结构校验、列类型校验等新特性及修复数个bug。 3 | `gt-checksum` 修复bug及新增功能如下: 4 | - 新增表结构的校验,并生成修复语句,支持对象包括如下(源目标端校验表都存在): 5 | - 支持列的数据类型的校验及修复 6 | - 支持列的字符集及校验级的校验及修复(MySQL支持字符串校验,oracle不校验) 7 | - 支持列是否允许null的校验及修复 8 | - 支持列的默认值是否一致的校验及修复 9 | - 支持列的乱序的验证及修复 10 | - 支持列数据存在多列、少列的验证及修复 11 | - 支持列的comment的校验及修复 12 | - 支持宽松模式和严谨模式校验 13 | - 支持校验列时是按正序校验还是乱序校验 14 | - 支持修复语句列属性的指定依据,是按源端校验还是目标端校验 15 | - 修复索引校验并生成修复语句时出现的空指针错误 16 | - 修复因为8.0数据库查询条件没有产生where关键字导致的sql执行失败 17 | - 优化代码(参数input输入部分),精简代码,并结构化处理 18 | - 修复因数据库开启lowerCaseTableNames不生效导致无法校验区分大小写的表 19 | 20 | ## 1.2.0(2023.3.6) 21 | gt-checksum正式发布,版本号1.2.0,可以满足绝大多数场景下的数据校验&修复需求,尤其是MySQL、Oracle间的异构数据库场景。 22 | 23 | `gt-checksum` 工具主要功能特性有: 24 | - 支持主从复制、MGR以及MySQL、Oracle间的数据校验&修复; 25 | - 数据库名、表名设置支持多种正则表达式 26 | - 支持多种字符集 27 | - 支持设置表名大小写敏感 28 | - 支持多种数据校验模式,数据、表结构、索引、分区、外键、存储过程等 29 | - 支持多种数据校验方式,全量校验,抽样校验和行数校验 30 | - 支持多种数据修复模式,校验完毕后直接修复或是生成修复SQL文件再自行手动处理 31 | - 支持校验无索引表 32 | - 支持并发多线程校验 33 | - 更好支持大表数据校验,效率更高,且基本不会发生OOM等问题 34 | --------------------------------------------------------------------------------