├── .gitignore
├── ChangeLog
├── LICENSE
├── README.md
├── build.sh
├── resources
└── dataflow.png
└── src
└── full_check
├── checker
├── base.go
├── full_value_verifier.go
├── key_outline_verifier.go
└── value_outline_verifier.go
├── client
├── address.go
├── client.go
└── db.go
├── common
├── cluster.go
├── command.go
├── common.go
├── helper.go
├── keytype.go
├── log.go
├── mix.go
├── speed.go
├── trieTree.go
└── trieTree_test.go
├── configure
└── conf.go
├── full_check
├── full_check.go
├── full_check_test.go
└── scan.go
├── go.mod
├── go.sum
├── main.go
└── metric
├── counter.go
├── metric.go
└── stat.go
/.gitignore:
--------------------------------------------------------------------------------
1 | pkg
2 | .gopath
3 | .idea
4 | *.iml
5 | logs
6 | *.pprof
7 | *.output
8 | *.data
9 | *.sw[ap]
10 | *.yml
11 | tags
12 | result.db.*
13 | *.tar.gz
14 |
15 | dump.data
16 | runtime.trace
17 |
18 | bin/redis-full-check
19 |
20 | .DS_Store
21 |
22 | data
23 |
24 | .cache/
25 | diagnostic/
26 | *.pid
27 | src/vendor/*
28 | !src/vendor/vendor.json
29 |
--------------------------------------------------------------------------------
/ChangeLog:
--------------------------------------------------------------------------------
1 | 2023-04-27 Alibaba Cloud.
2 | * VERSION: 1.4.10
3 | * BUGFIX: Fix CGO_ENABLED=0 error
4 | * IMPROVE: Modify the license from GPLv3 to Apache2.0
5 | 2023-02-06 Alibaba Cloud.
6 | * VERSION: 1.4.9
7 | * IMRPOVE: sourcepassword and targetpassword can contain
8 | username, format: username:password.
9 | * IMPORVE: use go modules.
10 | 2020-02-13 Alibaba Cloud.
11 | * VERSION: 1.4.8
12 | * BUGFIX: fetch newest redis-go-cluster driver which solve MOVED problem
13 | in redis-shake.
14 | * IMRPOVE: polish error log.
15 | * IMPROVE: validate the input address. if type is cluster, the input list
16 | should be either all masters or all slaves, can't be mix.
17 | 2019-12-03 Alibaba Cloud.
18 | * VERSION: 1.4.7
19 | * BUGFIX: merge PR of solving problem that key difference results are not
20 | verified. Thanks @shuff1e(sfxu@foxmail.com). see#74.
21 | 2019-11-26 Alibaba Cloud.
22 | * VERSION: 1.4.6
23 | * BUGFIX: update redis-go-cluster to solve \r\n bug. see #73.
24 | 2019-11-12 Alibaba Cloud.
25 | * VERSION: 1.4.5
26 | * BUGFIX: update redis-go-cluster to solve the MOVED error when
27 | redis type is cluster and key is unicode encoding. Thanks
28 | @shuff1e(sfxu@foxmail.com). see#68.
29 | 2019-09-26 Alibaba Cloud.
30 | * VERSION: 1.4.4
31 | * BUGFIX: conflicts number is 0 in diff file and console print.
32 | 2019-08-13 Alibaba Cloud.
33 | * VERSION: 1.4.3
34 | * BUGFIX: single db use cluster client.
35 | * IMPROVE: add log level configuration and print more log info.
36 | * BUGFIX: key conflicts stat inaccurate.
37 | 2019-08-10 Alibaba Cloud.
38 | * VERSION: 1.4.2
39 | * BUGFIX: judge the source is type cluster or db.
40 | 2019-08-07 Alibaba Cloud.
41 | * VERSION: 1.4.1
42 | * BUGFIX: cluster connection panic. see #50, #52.
43 | * IMPROVE: cluster connection auto-detection. see #49.
44 | 2019-07-23 Alibaba Cloud.
45 | * VERSION: 1.4.0
46 | * FEATURE: support cluster.
47 | 2019-06-23 Alibaba Cloud.
48 | * VERSION: 1.2.4
49 | * BUGFIX: fix bug of stat print.
50 | * IMRPOVE: support tencent cloud redis proxy.
51 | 2019-04-04 Alibaba Cloud.
52 | * VERSION: 1.2.3
53 | * BUGFIX: ignore key when type changed between run `scan` and the
54 | following comparison.
55 | * BUGFIX: retry when connection meets net error.
56 | * THANKS: fengweiyuan
57 | 2019-03-28 Alibaba Cloud.
58 | * VERSION: 1.2.2
59 | * BUGFIX: set length to 0 when key is expired on the source, this is used
60 | to fix the bug when source side is slave and keys doesn't be delete when
61 | expired.
62 | 2019-03-25 Alibaba Cloud.
63 | * VERSION: 1.2.1
64 | * BUGFIX: input commands is 0 in `PipeRawCommand`.
65 | 2019-03-14 Alibaba Cloud.
66 | * VERSION: 1.2.0
67 | * FEATURE: polish code structure and support 5.0 stream. Different
68 | comparemodes in stream type have different result: comparemode=1,
69 | compare full value including stream list(`xrange`) and groups info(`xinfo groups`
70 | and `xpending`); comparemode=2, only compare stream list length; comparemode=3,
71 | only compare key existence; comparemode=4, switch from comparemode=1 to
72 | comparemode=2 when current key is a big key(exceed the `bigkeythreshold`).
73 | 2019-03-07 Alibaba Cloud.
74 | * VERSION: 1.0.5
75 | * BUGFIX: all keys are filtered when input filter is empty, bug from v1.0.3.
76 | 2019-03-04 Alibaba Cloud.
77 | * VERSION: 1.0.4
78 | * BUGFIX: divisor is zero when parallel is big
79 | 2019-03-01 Alibaba Cloud.
80 | * version: 1.0.3
81 | * IMPROVE: improve filter list performance.
82 | * THANKS: glowwormX
83 | 2019-02-27 Alibaba Cloud.
84 | * VERSION: 1.0.2
85 | * IMPROVE: add filter list.
86 | 2019-02-27 Alibaba Cloud.
87 | * VERSION: 1.0.1
88 | * IMPROVE: add comparemode=4 to using value length comparison
89 | method for big key only in full-comparison(comparemode=2).
90 | 2019-02-15 Alibaba Cloud.
91 | * VERSION: 1.0.0
92 | * REDIS-FULL-CHECK: initial release.
93 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Redis-full-check is used to compare whether two redis have the same data. We also offer a data synchronization tool called [redis-shake](https://github.com/aliyun/redis-shake) to syncing data from one redis to another redis.
2 | Thanks to the Douyu's WSD team for the support.
3 |
4 | * [中文文档](https://yq.aliyun.com/articles/690463)
5 | * [Download redis-full-check.tar.gz](https://github.com/alibaba/RedisFullCheck/releases)
6 | * [第一次使用,如何进行配置](https://github.com/alibaba/RedisFullCheck/wiki/%E7%AC%AC%E4%B8%80%E6%AC%A1%E4%BD%BF%E7%94%A8%EF%BC%8C%E5%A6%82%E4%BD%95%E8%BF%9B%E8%A1%8C%E9%85%8D%E7%BD%AE%EF%BC%9F)
7 |
8 | # redis-full-check
9 | ---
10 | Redis-full-check is developed and maintained by NoSQL Team in Alibaba-Cloud Database department.
11 | Redis-full-check performs full data verification by comparing the data of the source database and the destination database. The entire check process consists of multiple comparisons, in every comparison, redis-full-check fetches data from two dabatases and then compared, the inconsistent data is put into sqlite3 db for the next comparison. By this iteratively comparing method, the difference continues to converge. The following figure shows the dataflow. In every comparison which is the yellow box, redis-full-check fetches all keys firstly. After that, it runs comparison and stores the difference result(key and field) into the sqlite3 db which is the position that keys and fields can be fetched in next round instead of the source database.
12 | 
13 | Redis-full-check fetches keys from source and then checks these keys exist on the target. So if one key exists on the target but lack on the source, redis-full-check can't find it. If you want to know whether the data in the source and destination databases are exactly the same, you need to set up a bidirectional link:
14 |
15 | * source->RedisFullCheck->target
16 | * target->RedisFullCheck->source
17 |
18 | # supports
19 | standalone, cluster, proxy(aliyun-cluster, tencent-cluster). Redis version from 2.x to 7.x (Don't support Redis Modules).
20 |
21 | # Code branch rules
22 | Version rules: a.b.c.
23 |
24 | * a: major version
25 | * b: minor version. even number means stable version.
26 | * c: bugfix version
27 |
28 | | branch name | rules |
29 | | - | :- |
30 | | master | master branch, do not allowed push code. store the latest stable version. develop branch will merge into this branch once new version created. |
31 | | **develop**(main branch) | develop branch. all the bellowing branches fork from this. |
32 | | feature-\* | new feature branch. forked from develop branch and then merge back after finish developing, testing, and code review. |
33 | | bugfix-\* | bugfix branch. forked from develop branch and then merge back after finish developing, testing, and code review. |
34 | | improve-\* | improvement branch. forked from develop branch and then merge back after finish developing, testing, and code review. |
35 |
36 | Tag rules:
37 | Add tag when releasing: "release-v{version}-{date}". for example: "release-v1.0.2-20180628"
38 |
39 | # Paramters
40 | ```
41 | Usage:
42 | redis-full-check [OPTIONS]
43 |
44 | Application Options:
45 | -s, --source=SOURCE Set host:port of source redis.
46 | -p, --sourcepassword=Password Set source redis password (format: password or username:password)
47 | --sourceauthtype=AUTH-TYPE useless for opensource redis, valid value:auth/adminauth (default: auth)
48 | -t, --target=TARGET Set host:port of target redis.
49 | -a, --targetpassword=Password Set target redis password (format: password or username:password)
50 | --targetauthtype=AUTH-TYPE useless for opensource redis, valid value:auth/adminauth (default: auth)
51 | -d, --db=Sqlite3-DB-FILE sqlite3 db file for store result. If exist, it will be removed and a new file is created. (default: result.db)
52 | --comparetimes=COUNT Total compare count, at least 1. In the first round, all keys will be compared. The subsequent rounds of the comparison
53 | will be done on the previous results. (default: 3)
54 | -m, --comparemode= compare mode, 1: compare full value, 2: only compare value length, 3: only compare keys outline, 4: compare full value,
55 | but only compare value length when meets big key (default: 2)
56 | --id= used in metric, run id (default: unknown)
57 | --jobid= used in metric, job id (default: unknown)
58 | --taskid= used in metric, task id (default: unknown)
59 | -q, --qps= max qps limit (default: 15000)
60 | --interval=Second The time interval for each round of comparison(Second) (default: 5)
61 | --batchcount=COUNT the count of key/field per batch compare, valid value [1, 10000] (default: 256)
62 | --parallel=COUNT concurrent goroutine number for comparison, valid value [1, 100] (default: 5)
63 | --log=FILE log file, if not specified, log is put to console
64 | --result=FILE store all diff result, format is 'db diff-type key field'
65 | --metric=FILE metrics file
66 | --bigkeythreshold=COUNT
67 | -f, --filterlist=FILTER if the filter list isn't empty, all elements in list will be synced. The input should be split by '|'. The end of the
68 | string is followed by a * to indicate a prefix match, otherwise it is a full match. e.g.: 'abc*|efg|m*' matches 'abc',
69 | 'abc1', 'efg', 'm', 'mxyz', but 'efgh', 'p' aren't'
70 | -v, --version
71 |
72 | Help Options:
73 | -h, --help Show this help message
74 | ```
75 |
76 | # Usage
77 | ---
78 | Run `./bin/redis-full-check.darwin64` or `redis-full-check.linux64` which is built in OSX and Linux respectively, however, the binaries aren't always the newest version.
79 | Or you can build redis-full-check yourself according to the following steps:
80 | * git clone https://github.com/alibaba/RedisFullCheck.git
81 | * cd RedisFullCheck/
82 | * ./build.sh
83 | * ./bin/redis-full-check -s $(source_redis_ip_port) -p $(source_password) -t $(target_redis_ip_port) -a $(target_password) # these parameters should be given by users
84 |
85 | Here comes the sqlite3 example to display the conflict result:
86 | ```
87 | $ sqlite3 result.db.3 # result.db.x shows the x-round comparison conflict result. len == -1 means inconsistent key type.
88 |
89 | sqlite> select * from key;
90 | id key type conflict_type db source_len target_len
91 | ---------- --------------- ---------- ------------- ---------- ---------- ----------
92 | 1 keydiff1_string string value 1 6 6
93 | 2 keydiff_hash hash value 0 2 1
94 | 3 keydiff_string string value 0 6 6
95 | 4 key_string_diff string value 0 6 6
96 | 5 keylack_string string lack_target 0 6 0
97 | sqlite>
98 |
99 | sqlite> select * from field;
100 | id field conflict_type key_id
101 | ---------- ---------- ------------- ----------
102 | 1 k1 lack_source 2
103 | 2 k2 value 2
104 | 3 k3 lack_target 2
105 | ```
106 |
107 | # Shake series tool
108 | ---
109 | We also provide some tools for synchronization in Shake series.
110 |
111 | * [MongoShake](https://github.com/aliyun/MongoShake): mongodb data synchronization tool.
112 | * [RedisShake](https://github.com/aliyun/RedisShake): redis data synchronization tool.
113 | * [RedisFullCheck](https://github.com/aliyun/RedisFullCheck): redis data synchronization verification tool.
114 |
115 | # License
116 | - On `20230427` and later, we distribute this library under the new [Apache2.0](https://www.apache.org/licenses/LICENSE-2.0) protocol, `1.4.10` is the first version to support the Apache2.0 protocol.
117 | - Prior to 20230427, it was distributed under the [GPLV3.0](https://www.gnu.org/licenses/gpl-3.0.html) protocol.
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o errexit
4 |
5 | fullcheckVersion=$(cat ChangeLog | head -n 2 | grep VERSION | awk '{print $3}')
6 |
7 | # older version Git don't support --short !
8 | if [ -d ".git" ];then
9 | #branch=`git symbolic-ref --short -q HEAD`
10 | branch=$(git symbolic-ref -q HEAD | awk -F'/' '{print $3;}')
11 | cid=$(git rev-parse HEAD)
12 | else
13 | branch="unknown"
14 | cid="0.0"
15 | fi
16 | branch=$branch","$cid
17 |
18 | # make sure we're in the directory where the script lives
19 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
20 | cd "$SCRIPT_DIR"
21 |
22 | GOPATH=$(pwd)
23 | export GOPATH
24 |
25 | info="main.VERSION=$branch"
26 | # golang version
27 | goversion=$(go version | awk -F' ' '{print $3;}')
28 | info=$info","$goversion
29 | bigVersion=$(echo $goversion | awk -F'[o.]' '{print $2}')
30 | midVersion=$(echo $goversion | awk -F'[o.]' '{print $3}')
31 | if [ $bigVersion -lt "1" -o $bigVersion -eq "1" -a $midVersion -lt "9" ]; then
32 | echo "go version[$goversion] must >= 1.9"
33 | exit 1
34 | fi
35 |
36 | t=$(date "+%Y-%m-%d_%H:%M:%S")
37 | info=$info","$t
38 |
39 | output=$(pwd)/bin/
40 | rm -rf ${output}
41 |
42 | echo "[ BUILD RELEASE ]"
43 | run_builder='go build -v'
44 |
45 | #cd src/full_check
46 | #goos=(windows darwin linux)
47 | #for g in "${goos[@]}"; do
48 | # export GOOS=$g
49 | # echo "try build goos=$g"
50 | # $run_builder -ldflags "-X $info" -o "$output/redis-full-check.$g"
51 | # unset GOOS
52 | # echo "build successfully!"
53 | #done
54 | #cp $output/../ChangeLog $output
55 | #cd $output
56 | #tar -cvzf redis-full-check-"$fullcheckVersion".tar.gz redis-full-check.darwin redis-full-check.linux redis-full-check.windows ChangeLog
57 |
58 | cd src/full_check
59 | $run_builder -ldflags "-X $info" -o "$output/redis-full-check"
60 | echo "build successfully!"
61 |
--------------------------------------------------------------------------------
/resources/dataflow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tair-opensource/RedisFullCheck/fd5c1d56401b135380551b4a5b84f940730d8e69/resources/dataflow.png
--------------------------------------------------------------------------------
/src/full_check/checker/base.go:
--------------------------------------------------------------------------------
1 | package checker
2 |
3 | import (
4 | "full_check/common"
5 | "sync"
6 | "full_check/metric"
7 | "full_check/client"
8 | )
9 |
10 | type FullCheckParameter struct {
11 | SourceHost client.RedisHost
12 | TargetHost client.RedisHost
13 | ResultDBFile string
14 | CompareCount int
15 | Interval int
16 | BatchCount int
17 | Parallel int
18 | FilterTree *common.Trie
19 | }
20 |
21 | type VerifierBase struct {
22 | Stat *metric.Stat
23 | Param *FullCheckParameter
24 | }
25 |
26 | func (p *VerifierBase) IncrKeyStat(oneKeyInfo *common.Key) {
27 | p.Stat.ConflictKey[oneKeyInfo.Tp.Index][oneKeyInfo.ConflictType].Inc(1)
28 | }
29 |
30 | func (p *VerifierBase) IncrFieldStat(oneKeyInfo *common.Key, conType common.ConflictType) {
31 | p.Stat.ConflictField[oneKeyInfo.Tp.Index][conType].Inc(1)
32 | }
33 |
34 | func (p *VerifierBase) FetchTypeAndLen(keyInfo []*common.Key, sourceClient, targetClient *client.RedisClient) {
35 | // fetch type
36 | sourceKeyTypeStr, err := sourceClient.PipeTypeCommand(keyInfo)
37 | if err != nil {
38 | panic(common.Logger.Critical(err))
39 | }
40 | for i, t := range sourceKeyTypeStr {
41 | keyInfo[i].Tp = common.NewKeyType(t)
42 | // fmt.Printf("key:%v, type:%v cmd:%v\n", string(keyInfo[i].Key), t, keyInfo[i].Tp.FetchLenCommand)
43 | }
44 |
45 | var wg sync.WaitGroup
46 | wg.Add(1)
47 | // fetch len
48 | go func() {
49 | sourceKeyLen, err := sourceClient.PipeLenCommand(keyInfo)
50 | if err != nil {
51 | panic(common.Logger.Critical(err))
52 | }
53 | for i, keylen := range sourceKeyLen {
54 | keyInfo[i].SourceAttr.ItemCount = keylen
55 | }
56 | wg.Done()
57 | }()
58 |
59 | wg.Add(1)
60 | go func() {
61 | targetKeyLen, err := targetClient.PipeLenCommand(keyInfo)
62 | if err != nil {
63 | panic(common.Logger.Critical(err))
64 | }
65 | for i, keylen := range targetKeyLen {
66 | keyInfo[i].TargetAttr.ItemCount = keylen
67 | }
68 | wg.Done()
69 | }()
70 |
71 | wg.Wait()
72 | }
73 |
74 | func (p *VerifierBase) RecheckTTL(keyInfo []*common.Key, client *client.RedisClient) {
75 | reCheckKeys := make([]*common.Key, 0, len(keyInfo))
76 | for _, key := range keyInfo {
77 | if key.TargetAttr.ItemCount == 0 && key.SourceAttr.ItemCount > 0 {
78 | reCheckKeys = append(reCheckKeys, key)
79 | }
80 | }
81 | if len(reCheckKeys) != 0 {
82 | p.recheckTTL(reCheckKeys, client)
83 | }
84 | }
85 |
86 | func (p *VerifierBase) recheckTTL(keyInfo []*common.Key, client *client.RedisClient) {
87 | keyExpire, err := client.PipeTTLCommand(keyInfo)
88 | if err != nil {
89 | panic(common.Logger.Critical(err))
90 | }
91 | for i, expire := range keyExpire {
92 | if expire {
93 | keyInfo[i].SourceAttr.ItemCount = 0
94 | }
95 | }
96 | }
97 |
98 | type IVerifier interface {
99 | VerifyOneGroupKeyInfo(keyInfo []*common.Key, conflictKey chan<- *common.Key, sourceClient *client.RedisClient,
100 | targetClient *client.RedisClient)
101 | }
102 |
103 | type ValueOutlineVerifier struct {
104 | VerifierBase
105 | }
--------------------------------------------------------------------------------
/src/full_check/checker/full_value_verifier.go:
--------------------------------------------------------------------------------
1 | package checker
2 |
3 | import (
4 | "full_check/common"
5 | "bytes"
6 | "full_check/metric"
7 | "full_check/client"
8 | "strconv"
9 | "reflect"
10 | "math"
11 | )
12 |
13 | const(
14 | StreamSegment = 5000
15 | )
16 |
17 | type FullValueVerifier struct {
18 | VerifierBase
19 | ignoreBigKey bool // only compare value length for big key when this parameter is enabled.
20 | }
21 |
22 | func NewFullValueVerifier(stat *metric.Stat, param *FullCheckParameter, ignoreBigKey bool) *FullValueVerifier {
23 | return &FullValueVerifier{
24 | VerifierBase: VerifierBase{stat, param},
25 | ignoreBigKey: ignoreBigKey,
26 | }
27 | }
28 |
29 | func (p *FullValueVerifier) VerifyOneGroupKeyInfo(keyInfo []*common.Key, conflictKey chan<- *common.Key, sourceClient *client.RedisClient, targetClient *client.RedisClient) {
30 | // 对于没有类型的Key, 取类型和长度
31 | noTypeKeyInfo := make([]*common.Key, 0, len(keyInfo))
32 | for i := 0; i < len(keyInfo); i++ {
33 | if keyInfo[i].Tp == common.EndKeyType {
34 | noTypeKeyInfo = append(noTypeKeyInfo, keyInfo[i])
35 | }
36 | }
37 | if len(noTypeKeyInfo) != 0 {
38 | p.FetchTypeAndLen(noTypeKeyInfo, sourceClient, targetClient)
39 | }
40 |
41 | // re-check ttl on the source side when key missing on the target side
42 | p.RecheckTTL(keyInfo, sourceClient)
43 |
44 | // compare, filter
45 | fullCheckFetchAllKeyInfo := make([]*common.Key, 0, len(keyInfo))
46 | retryNewVerifyKeyInfo := make([]*common.Key, 0, len(keyInfo))
47 | for i := 0; i < len(keyInfo); i++ {
48 | /************ 所有第一次比较的key,之前未比较的 key ***********/
49 | if keyInfo[i].ConflictType == common.EndConflict { // 第二轮及以后比较的key,conflictType 肯定不是EndConflict
50 | // 取type时,source redis上key已经被删除,认为是没有不一致
51 | if keyInfo[i].Tp == common.NoneKeyType {
52 | keyInfo[i].ConflictType = common.NoneConflict
53 | p.IncrKeyStat(keyInfo[i])
54 | continue
55 | }
56 |
57 | // 在fetch type和之后的轮次扫描之间源端类型更改,不处理这种错误
58 | if keyInfo[i].SourceAttr.ItemCount == common.TypeChanged {
59 | continue
60 | }
61 |
62 | // key lack in the target redis
63 | if keyInfo[i].TargetAttr.ItemCount == 0 {
64 | if keyInfo[i].TargetAttr.ItemCount != keyInfo[i].SourceAttr.ItemCount {
65 | keyInfo[i].ConflictType = common.LackTargetConflict
66 | p.IncrKeyStat(keyInfo[i])
67 | conflictKey <- keyInfo[i]
68 | continue
69 | } else if keyInfo[i].SourceAttr.ItemCount == 0 {
70 | // no need to check anymore
71 | continue
72 | }
73 | }
74 |
75 | // type mismatch, ItemCount == -1,表明key在target redis上的type与source不同
76 | if keyInfo[i].TargetAttr.ItemCount == common.TypeChanged {
77 | keyInfo[i].ConflictType = common.TypeConflict
78 | p.IncrKeyStat(keyInfo[i])
79 | conflictKey <- keyInfo[i]
80 | continue
81 | }
82 |
83 | // string, strlen mismatch, 先过滤一遍
84 | if keyInfo[i].Tp == common.StringKeyType && keyInfo[i].SourceAttr.ItemCount != keyInfo[i].TargetAttr.ItemCount {
85 | keyInfo[i].ConflictType = common.ValueConflict
86 | p.IncrKeyStat(keyInfo[i])
87 | conflictKey <- keyInfo[i]
88 | continue
89 | }
90 |
91 | // 太大的 hash、list、set、zset 特殊单独处理。
92 | if keyInfo[i].Tp != common.StringKeyType &&
93 | (keyInfo[i].SourceAttr.ItemCount > common.BigKeyThreshold ||
94 | keyInfo[i].TargetAttr.ItemCount > common.BigKeyThreshold) {
95 | if p.ignoreBigKey {
96 | // 如果启用忽略大key开关,则进入这个分支
97 | if keyInfo[i].SourceAttr.ItemCount != keyInfo[i].TargetAttr.ItemCount {
98 | keyInfo[i].ConflictType = common.ValueConflict
99 | p.IncrKeyStat(keyInfo[i])
100 | conflictKey <- keyInfo[i]
101 | } else {
102 | keyInfo[i].ConflictType = common.NoneConflict
103 | p.IncrKeyStat(keyInfo[i])
104 | }
105 | continue
106 | }
107 |
108 | switch keyInfo[i].Tp {
109 | case common.HashKeyType:
110 | fallthrough
111 | case common.SetKeyType:
112 | fallthrough
113 | case common.ZsetKeyType:
114 | sourceValue, err := sourceClient.FetchValueUseScan_Hash_Set_SortedSet(keyInfo[i], p.Param.BatchCount)
115 | if err != nil {
116 | panic(common.Logger.Error(err))
117 | }
118 | targetValue, err := targetClient.FetchValueUseScan_Hash_Set_SortedSet(keyInfo[i], p.Param.BatchCount)
119 | if err != nil {
120 | panic(common.Logger.Error(err))
121 | }
122 | p.Compare_Hash_Set_SortedSet(keyInfo[i], conflictKey, sourceValue, targetValue)
123 | case common.ListKeyType:
124 | p.CheckFullBigValue_List(keyInfo[i], conflictKey, sourceClient, targetClient)
125 | case common.StreamKeyType:
126 | p.CompareStream(keyInfo[i], conflictKey, sourceClient, targetClient)
127 | }
128 | continue
129 | }
130 |
131 | // special handle for stream type
132 | if keyInfo[i].Tp == common.StreamKeyType {
133 | p.CompareStream(keyInfo[i], conflictKey, sourceClient, targetClient)
134 | continue
135 | }
136 |
137 | // 剩下的都进入 fullCheckFetchAllKeyInfo(), pipeline + 一次性取全量数据的方式比较value
138 | fullCheckFetchAllKeyInfo = append(fullCheckFetchAllKeyInfo, keyInfo[i])
139 |
140 | continue
141 | } else {
142 | /************ 之前比较过的key,进入后面的多轮比较 ***********/
143 | // 这3种类型,重新比较
144 | if keyInfo[i].ConflictType == common.LackSourceConflict ||
145 | keyInfo[i].ConflictType == common.LackTargetConflict ||
146 | keyInfo[i].ConflictType == common.TypeConflict {
147 | keyInfo[i].Tp = common.EndKeyType // 重新取 type、len
148 | keyInfo[i].ConflictType = common.EndConflict // 使用 第一轮比较用的方式
149 | retryNewVerifyKeyInfo = append(retryNewVerifyKeyInfo, keyInfo[i])
150 | continue
151 | }
152 |
153 | if keyInfo[i].ConflictType == common.ValueConflict {
154 | if keyInfo[i].Tp != common.StringKeyType &&
155 | (keyInfo[i].SourceAttr.ItemCount > common.BigKeyThreshold ||
156 | keyInfo[i].TargetAttr.ItemCount > common.BigKeyThreshold) &&
157 | p.ignoreBigKey {
158 | // 如果启用忽略大key开关,则进入这个分支
159 | if keyInfo[i].SourceAttr.ItemCount != keyInfo[i].TargetAttr.ItemCount {
160 | keyInfo[i].ConflictType = common.ValueConflict
161 | p.IncrKeyStat(keyInfo[i])
162 | conflictKey <- keyInfo[i]
163 | } else {
164 | keyInfo[i].ConflictType = common.NoneConflict
165 | p.IncrKeyStat(keyInfo[i])
166 | }
167 | continue
168 | }
169 |
170 | switch keyInfo[i].Tp {
171 | // string 和 list 每次都要重新比较所有field value。
172 | // list有lpush、lpop,会导致field value平移,所以需要重新比较所有field value
173 | case common.StringKeyType:
174 | fullCheckFetchAllKeyInfo = append(fullCheckFetchAllKeyInfo, keyInfo[i])
175 | case common.ListKeyType:
176 | if keyInfo[i].SourceAttr.ItemCount > common.BigKeyThreshold ||
177 | keyInfo[i].TargetAttr.ItemCount > common.BigKeyThreshold {
178 | p.CheckFullBigValue_List(keyInfo[i], conflictKey, sourceClient, targetClient)
179 | } else {
180 | fullCheckFetchAllKeyInfo = append(fullCheckFetchAllKeyInfo, keyInfo[i])
181 | }
182 | // hash、set、zset, 只比较前一轮有不一致的field
183 | case common.HashKeyType:
184 | p.CheckPartialValueHash(keyInfo[i], conflictKey, sourceClient, targetClient)
185 | case common.SetKeyType:
186 | p.CheckPartialValueSet(keyInfo[i], conflictKey, sourceClient, targetClient)
187 | case common.ZsetKeyType:
188 | p.CheckPartialValueSortedSet(keyInfo[i], conflictKey, sourceClient, targetClient)
189 | case common.StreamKeyType:
190 | p.CompareStream(keyInfo[i], conflictKey, sourceClient, targetClient)
191 | }
192 | continue
193 | }
194 | }
195 | } // end of for i := 0; i < len(keyInfo); i++
196 |
197 | if len(fullCheckFetchAllKeyInfo) != 0 {
198 | p.CheckFullValueFetchAll(fullCheckFetchAllKeyInfo, conflictKey, sourceClient, targetClient)
199 | }
200 | if len(retryNewVerifyKeyInfo) != 0 {
201 | p.VerifyOneGroupKeyInfo(retryNewVerifyKeyInfo, conflictKey, sourceClient, targetClient)
202 | }
203 |
204 | }
205 |
206 | func (p *FullValueVerifier) CheckFullValueFetchAll(keyInfo []*common.Key, conflictKey chan<- *common.Key,
207 | sourceClient, targetClient *client.RedisClient) {
208 | // fetch value
209 | sourceReply, err := sourceClient.PipeValueCommand(keyInfo)
210 | if err != nil {
211 | panic(common.Logger.Critical(err))
212 | }
213 |
214 | targetReply, err := targetClient.PipeValueCommand(keyInfo)
215 | if err != nil {
216 | panic(common.Logger.Critical(err))
217 | }
218 |
219 | // compare value
220 | for i, oneKeyInfo := range keyInfo {
221 | switch oneKeyInfo.Tp {
222 | case common.StringKeyType:
223 | var sourceValue, targetValue []byte
224 | if sourceReply[i] != nil {
225 | sourceValue = sourceReply[i].([]byte)
226 | }
227 | if targetReply[i] != nil {
228 | targetValue = targetReply[i].([]byte)
229 | }
230 | p.Compare_String(oneKeyInfo, conflictKey, sourceValue, targetValue)
231 | p.IncrKeyStat(oneKeyInfo)
232 | case common.HashKeyType:
233 | fallthrough
234 | case common.ZsetKeyType:
235 | sourceValue, targetValue := common.ValueHelper_Hash_SortedSet(sourceReply[i]), common.ValueHelper_Hash_SortedSet(targetReply[i])
236 | p.Compare_Hash_Set_SortedSet(oneKeyInfo, conflictKey, sourceValue, targetValue)
237 | case common.ListKeyType:
238 | sourceValue, targetValue := common.ValueHelper_List(sourceReply[i]), common.ValueHelper_List(targetReply[i])
239 | p.Compare_List(oneKeyInfo, conflictKey, sourceValue, targetValue)
240 | case common.SetKeyType:
241 | sourceValue, targetValue := common.ValueHelper_Set(sourceReply[i]), common.ValueHelper_Set(targetReply[i])
242 | p.Compare_Hash_Set_SortedSet(oneKeyInfo, conflictKey, sourceValue, targetValue)
243 | }
244 | }
245 | }
246 |
247 | func (p *FullValueVerifier) CheckPartialValueHash(oneKeyInfo *common.Key, conflictKey chan<- *common.Key, sourceClient *client.RedisClient, targetClient *client.RedisClient) {
248 | sourceValue, targetValue := make(map[string][]byte), make(map[string][]byte)
249 | for fieldIndex := 0; fieldIndex < len(oneKeyInfo.Field); {
250 | args := make([]interface{}, 0, p.Param.BatchCount)
251 | args = append(args, oneKeyInfo.Key)
252 | for count := 0; count < p.Param.BatchCount && fieldIndex < len(oneKeyInfo.Field); count, fieldIndex = count+1, fieldIndex+1 {
253 | args = append(args, oneKeyInfo.Field[fieldIndex].Field)
254 | }
255 |
256 | sourceReply, err := sourceClient.Do("hmget", args...)
257 | if err != nil {
258 | panic(common.Logger.Error(err))
259 | }
260 | targetReply, err := targetClient.Do("hmget", args...)
261 | if err != nil {
262 | panic(common.Logger.Error(err))
263 | }
264 | sendField := args[1:]
265 |
266 | tmpSourceValue, tmpTargetValue := sourceReply.([]interface{}), targetReply.([]interface{})
267 | for i := 0; i < len(sendField); i++ {
268 | fieldStr := string(sendField[i].([]byte))
269 | if tmpSourceValue[i] != nil {
270 | sourceValue[fieldStr] = tmpSourceValue[i].([]byte)
271 | }
272 | if tmpTargetValue[i] != nil {
273 | targetValue[fieldStr] = tmpTargetValue[i].([]byte)
274 | }
275 | }
276 | } // end of for fieldIndex := 0; fieldIndex < len(oneKeyInfo.Field)
277 | p.Compare_Hash_Set_SortedSet(oneKeyInfo, conflictKey, sourceValue, targetValue)
278 | }
279 |
280 | func (p *FullValueVerifier) CheckPartialValueSet(oneKeyInfo *common.Key, conflictKey chan<- *common.Key, sourceClient *client.RedisClient, targetClient *client.RedisClient) {
281 | sourceValue, targetValue := make(map[string][]byte), make(map[string][]byte)
282 | for fieldIndex := 0; fieldIndex < len(oneKeyInfo.Field); {
283 | sendField := make([][]byte, 0, p.Param.BatchCount)
284 | for count := 0; count < p.Param.BatchCount && fieldIndex < len(oneKeyInfo.Field); count, fieldIndex = count+1, fieldIndex+1 {
285 | sendField = append(sendField, oneKeyInfo.Field[fieldIndex].Field)
286 | }
287 | tmpSourceValue, err := sourceClient.PipeSismemberCommand(oneKeyInfo.Key, sendField)
288 | if err != nil {
289 | panic(common.Logger.Error(err))
290 | }
291 | tmpTargetValue, err := targetClient.PipeSismemberCommand(oneKeyInfo.Key, sendField)
292 | if err != nil {
293 | panic(common.Logger.Error(err))
294 | }
295 | for i := 0; i < len(sendField); i++ {
296 | fieldStr := string(sendField[i])
297 | sourceNum := tmpSourceValue[i].(int64)
298 | if sourceNum != 0 {
299 | sourceValue[fieldStr] = nil
300 | }
301 | targetNum := tmpTargetValue[i].(int64)
302 | if targetNum != 0 {
303 | targetValue[fieldStr] = nil
304 | }
305 | }
306 | } // for fieldIndex := 0; fieldIndex < len(oneKeyInfo.Field);
307 | p.Compare_Hash_Set_SortedSet(oneKeyInfo, conflictKey, sourceValue, targetValue)
308 | }
309 |
310 | func (p *FullValueVerifier) CheckPartialValueSortedSet(oneKeyInfo *common.Key, conflictKey chan<- *common.Key, sourceClient *client.RedisClient, targetClient *client.RedisClient) {
311 | sourceValue, targetValue := make(map[string][]byte), make(map[string][]byte)
312 | for fieldIndex := 0; fieldIndex < len(oneKeyInfo.Field); {
313 | sendField := make([][]byte, 0, p.Param.BatchCount)
314 | for count := 0; count < p.Param.BatchCount && fieldIndex < len(oneKeyInfo.Field); count, fieldIndex = count+1, fieldIndex+1 {
315 | sendField = append(sendField, oneKeyInfo.Field[fieldIndex].Field)
316 | }
317 |
318 | tmpSourceValue, err := sourceClient.PipeZscoreCommand(oneKeyInfo.Key, sendField)
319 | if err != nil {
320 | panic(common.Logger.Error(err))
321 | }
322 | tmpTargetValue, err := targetClient.PipeZscoreCommand(oneKeyInfo.Key, sendField)
323 | if err != nil {
324 | panic(common.Logger.Error(err))
325 | }
326 |
327 | for i := 0; i < len(sendField); i++ {
328 | fieldStr := string(sendField[i])
329 | if tmpSourceValue[i] != nil {
330 | sourceValue[fieldStr] = tmpSourceValue[i].([]byte)
331 | }
332 | if tmpTargetValue[i] != nil {
333 | targetValue[fieldStr] = tmpTargetValue[i].([]byte)
334 | }
335 | }
336 | }
337 | p.Compare_Hash_Set_SortedSet(oneKeyInfo, conflictKey, sourceValue, targetValue)
338 | }
339 |
340 | func (p *FullValueVerifier) CheckFullBigValue_List(oneKeyInfo *common.Key, conflictKey chan<- *common.Key,
341 | sourceClient *client.RedisClient, targetClient *client.RedisClient) {
342 | conflictField := make([]common.Field, 0, oneKeyInfo.SourceAttr.ItemCount/100+1)
343 | oneCmpCount := p.Param.BatchCount * 10
344 | if oneCmpCount > 10240 {
345 | oneCmpCount = 10240
346 | }
347 |
348 | startIndex := 0
349 | for {
350 | sourceReply, err := sourceClient.Do("lrange", oneKeyInfo.Key, startIndex, startIndex+oneCmpCount-1)
351 | if err != nil {
352 | panic(common.Logger.Critical(err))
353 | }
354 | sourceValue := sourceReply.([]interface{})
355 |
356 | targetReply, err := targetClient.Do("lrange", oneKeyInfo.Key, startIndex, startIndex+oneCmpCount-1)
357 | if err != nil {
358 | panic(common.Logger.Error(err))
359 | }
360 | targetValue := targetReply.([]interface{})
361 |
362 | minLen := common.Min(len(sourceValue), len(targetValue))
363 | for i := 0; i < minLen; i++ {
364 | if bytes.Equal(sourceValue[i].([]byte), targetValue[i].([]byte)) == false {
365 | field := common.Field{
366 | Field: []byte(strconv.FormatInt(int64(startIndex+i), 10)),
367 | ConflictType: common.ValueConflict,
368 | }
369 | conflictField = append(conflictField, field)
370 | p.IncrFieldStat(oneKeyInfo, common.ValueConflict)
371 | } else {
372 | p.IncrFieldStat(oneKeyInfo, common.NoneConflict)
373 | }
374 | }
375 | // list 只返回第一个不相同的位置
376 | if len(conflictField) != 0 {
377 | break
378 | }
379 | // 说明source或者target list,已经读完了
380 | if minLen < oneCmpCount {
381 | break
382 | }
383 | startIndex += oneCmpCount
384 | } // end for{}
385 |
386 | if len(conflictField) != 0 {
387 | // list 只返回第一个不相同的位置
388 | oneKeyInfo.Field = conflictField[0:1]
389 | oneKeyInfo.ConflictType = common.ValueConflict
390 | conflictKey <- oneKeyInfo
391 | } else {
392 | oneKeyInfo.Field = nil
393 | oneKeyInfo.ConflictType = common.NoneConflict
394 | }
395 | p.IncrKeyStat(oneKeyInfo)
396 | }
397 |
398 | func (p *FullValueVerifier) Compare_String(oneKeyInfo *common.Key, conflictKey chan<- *common.Key, sourceValue, targetValue []byte) {
399 | if len(sourceValue) == 0 {
400 | if len(targetValue) == 0 {
401 | oneKeyInfo.ConflictType = common.NoneConflict
402 | } else {
403 | oneKeyInfo.ConflictType = common.LackSourceConflict
404 | }
405 | } else if len(targetValue) == 0 {
406 | if len(sourceValue) == 0 {
407 | oneKeyInfo.ConflictType = common.NoneConflict
408 | } else {
409 | oneKeyInfo.ConflictType = common.LackTargetConflict
410 | }
411 | } else if bytes.Equal(sourceValue, targetValue) == false {
412 | oneKeyInfo.ConflictType = common.ValueConflict
413 | } else {
414 | oneKeyInfo.ConflictType = common.NoneConflict
415 | }
416 | if oneKeyInfo.ConflictType != common.NoneConflict {
417 | conflictKey <- oneKeyInfo
418 | }
419 | }
420 |
421 | func (p *FullValueVerifier) Compare_Hash_Set_SortedSet(oneKeyInfo *common.Key, conflictKey chan<- *common.Key, sourceValue, targetValue map[string][]byte) {
422 | conflictField := make([]common.Field, 0, len(sourceValue)/50+1)
423 | for k, v := range sourceValue {
424 | vTarget, ok := targetValue[k]
425 | if ok == false {
426 | conflictField = append(conflictField, common.Field{
427 | Field: []byte(k),
428 | ConflictType: common.LackTargetConflict})
429 | p.IncrFieldStat(oneKeyInfo, common.LackTargetConflict)
430 | } else {
431 | delete(targetValue, k)
432 | if bytes.Equal(v, vTarget) == false {
433 | conflictField = append(conflictField, common.Field{
434 | Field: []byte(k),
435 | ConflictType: common.ValueConflict})
436 | p.IncrFieldStat(oneKeyInfo, common.ValueConflict)
437 | } else {
438 | p.IncrFieldStat(oneKeyInfo, common.NoneConflict)
439 | }
440 | }
441 | }
442 |
443 | for k, _ := range targetValue {
444 | conflictField = append(conflictField, common.Field{
445 | Field: []byte(k),
446 | ConflictType: common.LackSourceConflict})
447 | p.IncrFieldStat(oneKeyInfo, common.LackSourceConflict)
448 | }
449 |
450 | if len(conflictField) != 0 {
451 | oneKeyInfo.Field = conflictField
452 | oneKeyInfo.ConflictType = common.ValueConflict
453 | conflictKey <- oneKeyInfo
454 | } else {
455 | oneKeyInfo.ConflictType = common.NoneConflict
456 | }
457 | p.IncrKeyStat(oneKeyInfo)
458 | }
459 |
460 | func (p *FullValueVerifier) Compare_List(oneKeyInfo *common.Key, conflictKey chan<- *common.Key, sourceValue, targetValue [][]byte) {
461 | minLen := common.Min(len(sourceValue), len(targetValue))
462 |
463 | oneKeyInfo.ConflictType = common.NoneConflict
464 | for i := 0; i < minLen; i++ {
465 | if bytes.Equal(sourceValue[i], targetValue[i]) == false {
466 | // list 只保存第一个不一致的field
467 | oneKeyInfo.Field = make([]common.Field, 1)
468 | oneKeyInfo.Field[0] = common.Field{
469 | Field: []byte(strconv.FormatInt(int64(i), 10)),
470 | ConflictType: common.ValueConflict}
471 | oneKeyInfo.ConflictType = common.ValueConflict
472 | conflictKey <- oneKeyInfo
473 | break
474 | }
475 | }
476 | p.IncrKeyStat(oneKeyInfo)
477 | }
478 |
479 | /*
480 | * In this function, I separate comparison into the following steps:
481 | * 1. compare groups info(`xinfo groups ${stream_name}`)
482 | * 2. compare all elements in stream(`xrange ${stream_name} - + count ${number}`)
483 | * 3. compare all elements in PEL(`xpending ${stream_name} ${group} - + ${number}`)
484 | */
485 | func (p *FullValueVerifier) CompareStream(oneKeyInfo *common.Key, conflictKey chan<- *common.Key,
486 | sourceClient, targetClient *client.RedisClient) {
487 | // 1. fetch source and target groups info
488 | sourceGroupsInfo, err := sourceClient.Do("XINFO", "GROUPS", oneKeyInfo.Key)
489 | if err != nil {
490 | panic(common.Logger.Error(err))
491 | }
492 |
493 | targetGroupsInfo, err := targetClient.Do("XINFO", "GROUPS", oneKeyInfo.Key)
494 | if err != nil {
495 | panic(common.Logger.Error(err))
496 | }
497 |
498 | if reflect.DeepEqual(sourceGroupsInfo, targetGroupsInfo) == false {
499 | oneKeyInfo.ConflictType = common.ValueConflict
500 | p.IncrKeyStat(oneKeyInfo)
501 | conflictKey <- oneKeyInfo
502 | return
503 | }
504 |
505 | // get groups and pending length which will be used in step 3
506 | type groupOutline struct {
507 | name string
508 | pendingLength int64
509 | }
510 | groupsBasic := make([]groupOutline, 0, len(sourceGroupsInfo.([]interface{})))
511 | for _, ele := range sourceGroupsInfo.([]interface{}) {
512 | line := ele.([]interface{})
513 | /*
514 | * 1) 1) "name"
515 | * 2) "cg1"
516 | * 3) "consumers"
517 | * 4) (integer) 2
518 | * 5) "pending"
519 | * 6) (integer) 7
520 | * 7) "last-delivered-id"
521 | * 8) "1552541825298-0"
522 | */
523 | groupsBasic = append(groupsBasic, groupOutline{
524 | name: string(line[1].([]byte)),
525 | pendingLength: line[5].(int64),
526 | })
527 | // fmt.Println(groupsBasic[len(groupsBasic) - 1].name, groupsBasic[len(groupsBasic) - 1].pendingLength)
528 | }
529 |
530 | // 2. compare all elements in stream
531 | length := oneKeyInfo.SourceAttr.ItemCount
532 | step := int64(math.Max(float64(StreamSegment), float64(length) / 20))
533 | for sum, startTs := int64(0), "0-0"; sum < length; sum += step {
534 | // fetch all elements in stream
535 | // 1. from source
536 | sourceXrange, err := sourceClient.Do("XRANGE", oneKeyInfo.Key, startTs, "+", "COUNT", step)
537 | if err != nil {
538 | panic(common.Logger.Error(err))
539 | }
540 |
541 | // 2. from target
542 | targetXrange, err := targetClient.Do("XRANGE", oneKeyInfo.Key, startTs, "+", "COUNT", step)
543 | if err != nil {
544 | panic(common.Logger.Error(err))
545 | }
546 |
547 | // 3. deep comparison
548 | if reflect.DeepEqual(sourceXrange, targetXrange) == false {
549 | oneKeyInfo.ConflictType = common.ValueConflict
550 | p.IncrKeyStat(oneKeyInfo)
551 | conflictKey <- oneKeyInfo
552 | return
553 | }
554 |
555 | // 4. get last ts in this batch
556 | length := len(sourceXrange.([]interface{}))
557 | if length > 0 {
558 | last := sourceXrange.([]interface{})[length-1]
559 | lastTs := last.([]interface{})[0].([]byte)
560 | startTs = string(lastTs)
561 | }
562 | }
563 |
564 | // 3. compare all elements in PEL
565 | for _, groupEle := range groupsBasic {
566 | step := int64(math.Max(float64(StreamSegment), float64(length) / 20))
567 | for sum, startTs := int64(0), "0-0"; sum < length; sum += step {
568 | sourceXpending, err := sourceClient.Do("XPENDING", oneKeyInfo.Key, groupEle.name, startTs,
569 | "+", step)
570 | if err != nil {
571 | panic(common.Logger.Error(err))
572 | }
573 |
574 | targetXpending, err := targetClient.Do("XPENDING", oneKeyInfo.Key, groupEle.name, startTs,
575 | "+", step)
576 | if err != nil {
577 | panic(common.Logger.Error(err))
578 | }
579 |
580 | sourceXpendingArray := sourceXpending.([]interface{})
581 | targetXpendingArray := targetXpending.([]interface{})
582 |
583 | if len(sourceXpendingArray) != len(targetXpendingArray) {
584 | oneKeyInfo.ConflictType = common.ValueConflict
585 | conflictKey <- oneKeyInfo
586 | return
587 | }
588 |
589 | // fmt.Println("aa ", len(sourceXpendingArray), len(targetXpendingArray))
590 |
591 | for i := 0; i < len(sourceXpendingArray); i++ {
592 | // only compare timestamp and consumer
593 | /*
594 | * 1) 1) "1552541673724-0"
595 | * 2) "Bob"
596 | * 3) (integer) 349116818
597 | * 4) (integer) 1
598 | */
599 | s := sourceXpendingArray[i].([]interface{})
600 | t := targetXpendingArray[i].([]interface{})
601 | if reflect.DeepEqual(s[0], t[0]) == false || reflect.DeepEqual(s[1], t[1]) == false {
602 | oneKeyInfo.ConflictType = common.ValueConflict
603 | p.IncrKeyStat(oneKeyInfo)
604 | conflictKey <- oneKeyInfo
605 | return
606 | }
607 | }
608 |
609 | // set last-ts
610 | last := sourceXpendingArray[len(sourceXpendingArray) - 1]
611 | lastTs := last.([]interface{})[0].([]byte)
612 | startTs = string(lastTs)
613 | }
614 | }
615 | }
--------------------------------------------------------------------------------
/src/full_check/checker/key_outline_verifier.go:
--------------------------------------------------------------------------------
1 | package checker
2 |
3 | import (
4 | "full_check/common"
5 | "sync"
6 | "full_check/metric"
7 | "full_check/client"
8 | )
9 |
10 | type KeyOutlineVerifier struct {
11 | VerifierBase
12 | }
13 |
14 | func NewKeyOutlineVerifier(stat *metric.Stat, param *FullCheckParameter) *KeyOutlineVerifier {
15 | return &KeyOutlineVerifier{VerifierBase{stat, param}}
16 | }
17 |
18 | func (p *KeyOutlineVerifier) FetchKeys(keyInfo []*common.Key, sourceClient *client.RedisClient, targetClient *client.RedisClient) {
19 | // fetch type
20 | var wg sync.WaitGroup
21 | wg.Add(1)
22 | go func() {
23 | sourceKeyTypeStr, err := sourceClient.PipeTypeCommand(keyInfo)
24 | if err != nil {
25 | panic(common.Logger.Critical(err))
26 | }
27 | for i, t := range sourceKeyTypeStr {
28 | keyInfo[i].Tp = common.NewKeyType(t)
29 | /*
30 | * Bugfix: see https://github.com/alibaba/RedisFullCheck/issues/74.
31 | * It will skip the conflict key check because keyInfo[i].SourceAttr.ItemCount is zero here.
32 | * Unlike the FetchTypeAndLen method in full_value_verifier, which will assign a non -zero value of keylen to keyInfo[i].SourceAttr.ItemCount
33 | * Refer to the VerifyOneGroupKeyInfo method for details.
34 | */
35 | keyInfo[i].SourceAttr.ItemCount = 1
36 | }
37 | wg.Done()
38 | }()
39 |
40 | wg.Add(1)
41 | go func() {
42 | targetKeyTypeStr, err := targetClient.PipeExistsCommand(keyInfo)
43 | if err != nil {
44 | panic(common.Logger.Critical(err))
45 | }
46 | for i, t := range targetKeyTypeStr {
47 | keyInfo[i].TargetAttr.ItemCount = t
48 | }
49 | wg.Done()
50 | }()
51 |
52 | wg.Wait()
53 | }
54 |
55 | func (p *KeyOutlineVerifier) VerifyOneGroupKeyInfo(keyInfo []*common.Key, conflictKey chan<- *common.Key, sourceClient *client.RedisClient, targetClient *client.RedisClient) {
56 | p.FetchKeys(keyInfo, sourceClient, targetClient)
57 |
58 | // re-check ttl on the source side when key missing on the target side
59 | p.RecheckTTL(keyInfo, sourceClient)
60 |
61 | // compare, filter
62 | for i := 0; i < len(keyInfo); i++ {
63 | // 在fetch type和之后的轮次扫描之间源端类型更改,不处理这种错误
64 | if keyInfo[i].SourceAttr.ItemCount == common.TypeChanged {
65 | continue
66 | }
67 |
68 | // key lack in target redis
69 | if keyInfo[i].TargetAttr.ItemCount == 0 &&
70 | keyInfo[i].TargetAttr.ItemCount != keyInfo[i].SourceAttr.ItemCount {
71 | keyInfo[i].ConflictType = common.LackTargetConflict
72 | p.IncrKeyStat(keyInfo[i])
73 | conflictKey <- keyInfo[i]
74 | }
75 | } // end of for i := 0; i < len(keyInfo); i++
76 | }
77 |
--------------------------------------------------------------------------------
/src/full_check/checker/value_outline_verifier.go:
--------------------------------------------------------------------------------
1 | package checker
2 |
3 | import (
4 | "full_check/metric"
5 | "full_check/common"
6 | "full_check/client"
7 | )
8 |
9 | func NewValueOutlineVerifier(stat *metric.Stat, param *FullCheckParameter) *ValueOutlineVerifier {
10 | return &ValueOutlineVerifier{VerifierBase{stat, param}}
11 | }
12 |
13 | func (p *ValueOutlineVerifier) VerifyOneGroupKeyInfo(keyInfo []*common.Key, conflictKey chan<- *common.Key, sourceClient *client.RedisClient, targetClient *client.RedisClient) {
14 | p.FetchTypeAndLen(keyInfo, sourceClient, targetClient)
15 |
16 | // re-check ttl on the source side when key missing on the target side
17 | p.RecheckTTL(keyInfo, sourceClient)
18 |
19 | // compare, filter
20 | for i := 0; i < len(keyInfo); i++ {
21 | // 取type时,source redis上key已经被删除,认为是没有不一致
22 | if keyInfo[i].Tp == common.NoneKeyType {
23 | keyInfo[i].ConflictType = common.NoneConflict
24 | p.IncrKeyStat(keyInfo[i])
25 | continue
26 | }
27 |
28 | // 在fetch type和之后的轮次扫描之间源端类型更改,不处理这种错误
29 | if keyInfo[i].SourceAttr.ItemCount == common.TypeChanged {
30 | continue
31 | }
32 |
33 | // key lack in target redis
34 | if keyInfo[i].TargetAttr.ItemCount == 0 && keyInfo[i].TargetAttr.ItemCount != keyInfo[i].SourceAttr.ItemCount {
35 | keyInfo[i].ConflictType = common.LackTargetConflict
36 | p.IncrKeyStat(keyInfo[i])
37 | conflictKey <- keyInfo[i]
38 | continue
39 | }
40 |
41 | // type mismatch, ItemCount == -1,表明key在target redis上的type与source不同
42 | if keyInfo[i].TargetAttr.ItemCount == common.TypeChanged {
43 | keyInfo[i].ConflictType = common.TypeConflict
44 | p.IncrKeyStat(keyInfo[i])
45 | conflictKey <- keyInfo[i]
46 | continue
47 | }
48 |
49 | // string, strlen mismatch, 先过滤一遍
50 | if keyInfo[i].SourceAttr.ItemCount != keyInfo[i].TargetAttr.ItemCount {
51 | keyInfo[i].ConflictType = common.ValueConflict
52 | p.IncrKeyStat(keyInfo[i])
53 | conflictKey <- keyInfo[i]
54 | continue
55 | }
56 | } // end of for i := 0; i < len(keyInfo); i++
57 | }
--------------------------------------------------------------------------------
/src/full_check/client/address.go:
--------------------------------------------------------------------------------
1 | package client
2 |
3 | import (
4 | "strings"
5 | "fmt"
6 |
7 | "full_check/common"
8 | )
9 |
10 | const (
11 | AddressSplitter = "@"
12 | AddressClusterSplitter = ";"
13 |
14 | RoleMaster = "master"
15 | RoleSlave = "slave"
16 | )
17 |
18 | func HandleAddress(address, password, authType string) ([]string, error) {
19 | if strings.Contains(address, AddressSplitter) {
20 | arr := strings.Split(address, AddressSplitter)
21 | if len(arr) != 2 {
22 | return nil, fmt.Errorf("redis address[%v] length[%v] != 2", address, len(arr))
23 | }
24 |
25 | if arr[0] != RoleMaster && arr[0] != RoleSlave && arr[0] != "" {
26 | return nil, fmt.Errorf("unknown role type[%v], should be 'master' or 'slave'", arr[0])
27 | }
28 |
29 | clusterList := strings.Split(arr[1], AddressClusterSplitter)
30 |
31 | role := arr[0]
32 | if role == "" {
33 | role = RoleMaster
34 | }
35 |
36 | return fetchNodeList(clusterList[0], password, authType, role)
37 | } else {
38 | clusterList := strings.Split(address, AddressClusterSplitter)
39 | if len(clusterList) <= 1 {
40 | return clusterList, nil
41 | }
42 |
43 | // fetch master
44 | masterList, err := fetchNodeList(clusterList[0], password, authType, common.TypeMaster)
45 | if err != nil {
46 | return nil, err
47 | }
48 | // compare master list equal
49 | if common.CompareUnorderedList(masterList, clusterList) {
50 | return clusterList, nil
51 | }
52 |
53 | slaveList, err := fetchNodeList(clusterList[0], password, authType, common.TypeSlave)
54 | if err != nil {
55 | return nil, err
56 | }
57 | // compare slave list equal
58 | if common.CompareUnorderedList(slaveList, clusterList) {
59 | return clusterList, nil
60 | }
61 |
62 | return nil, fmt.Errorf("if type isn't cluster, should only used 1 node. if type is cluster, " +
63 | "input list should be all master or all slave: 'master1;master2;master3...' or " +
64 | "'slave1;slave2;slave3...'")
65 | }
66 | }
67 |
68 | func fetchNodeList(oneNode, password, authType, role string) ([]string, error) {
69 | // create client to fetch
70 | client, err := NewRedisClient(RedisHost{
71 | Addr: []string{oneNode},
72 | Password: password,
73 | Authtype: authType,
74 | }, 0)
75 | if err != nil {
76 | return nil, fmt.Errorf("fetch cluster info failed[%v]", err)
77 | }
78 |
79 | if addressList, err := common.GetAllClusterNode(client.conn, role, "address"); err != nil {
80 | return nil, fmt.Errorf("fetch cluster node failed[%v]", err)
81 | } else {
82 | return addressList, nil
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/src/full_check/client/client.go:
--------------------------------------------------------------------------------
1 | package client
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "net"
7 | "strconv"
8 | "strings"
9 | "time"
10 | "errors"
11 |
12 | "full_check/common"
13 |
14 | "github.com/gomodule/redigo/redis"
15 | redigoCluster "github.com/najoast/redis-go-cluster"
16 | "reflect"
17 | )
18 |
19 | var (
20 | emptyError = errors.New("empty")
21 | )
22 |
23 | type RedisHost struct {
24 | Addr []string
25 | Password string
26 | TimeoutMs uint64
27 | Role string // "source" or "target"
28 | Authtype string // "auth" or "adminauth"
29 | DBType int
30 | DBFilterList map[int]struct{} // whitelist
31 | }
32 |
33 | func (p RedisHost) String() string {
34 | return fmt.Sprintf("%s redis addr: %s", p.Role, p.Addr)
35 | }
36 |
37 | func (p RedisHost) IsCluster() bool {
38 | return p.DBType == common.TypeCluster
39 | }
40 |
41 | type RedisClient struct {
42 | redisHost RedisHost
43 | db int32
44 | conn redis.Conn
45 | }
46 |
47 | func (p RedisClient) String() string {
48 | return p.redisHost.String()
49 | }
50 |
51 | func NewRedisClient(redisHost RedisHost, db int32) (RedisClient, error) {
52 | rc := RedisClient{
53 | redisHost: redisHost,
54 | db: db,
55 | }
56 |
57 | // send ping command first
58 | ret, err := rc.Do("ping")
59 | if err == nil && ret.(string) != "PONG" {
60 | return RedisClient{}, fmt.Errorf("ping return invaild[%v]", ret)
61 | }
62 | return rc, err
63 | }
64 |
65 | func (p *RedisClient) CheckHandleNetError(err error) bool {
66 | if err == io.EOF { // 对方断开网络
67 | if p.conn != nil {
68 | p.conn.Close()
69 | p.conn = nil
70 | // 网络相关错误1秒后重试
71 | time.Sleep(time.Second)
72 | }
73 | return true
74 | } else if _, ok := err.(net.Error); ok {
75 | if p.conn != nil {
76 | p.conn.Close()
77 | p.conn = nil
78 | // 网络相关错误1秒后重试
79 | time.Sleep(time.Second)
80 | }
81 | return true
82 | }
83 | return false
84 | }
85 |
86 | func (p *RedisClient) Connect() error {
87 | if p.conn != nil {
88 | return nil
89 | }
90 |
91 | var err error
92 | if p.redisHost.IsCluster() == false {
93 | // single db or proxy
94 | if p.redisHost.TimeoutMs == 0 {
95 | p.conn, err = redis.Dial("tcp", p.redisHost.Addr[0])
96 | } else {
97 | p.conn, err = redis.DialTimeout("tcp", p.redisHost.Addr[0], time.Millisecond*time.Duration(p.redisHost.TimeoutMs),
98 | time.Millisecond*time.Duration(p.redisHost.TimeoutMs), time.Millisecond*time.Duration(p.redisHost.TimeoutMs))
99 | }
100 | } else {
101 | // cluster
102 | cluster, err := redigoCluster.NewCluster(
103 | &redigoCluster.Options{
104 | StartNodes: p.redisHost.Addr,
105 | ConnTimeout: time.Duration(p.redisHost.TimeoutMs) * time.Millisecond,
106 | ReadTimeout: 0,
107 | WriteTimeout: 0,
108 | KeepAlive: 16,
109 | AliveTime: 60 * time.Second,
110 | Password: p.redisHost.Password,
111 | })
112 | if err == nil {
113 | p.conn = common.NewClusterConn(cluster, 0)
114 | }
115 | }
116 | if err != nil {
117 | return err
118 | }
119 |
120 | if len(p.redisHost.Password) != 0 {
121 | var args []interface{}
122 | for _, arg := range strings.Split(p.redisHost.Password, ":") {
123 | args = append(args, arg)
124 | }
125 | if _, err := p.conn.Do(p.redisHost.Authtype, args...); err != nil {
126 | return err
127 | }
128 | }
129 |
130 | if p.redisHost.DBType != common.TypeCluster {
131 | _, err = p.conn.Do("select", p.db)
132 | if err != nil {
133 | return err
134 | }
135 | }
136 |
137 | if p.conn == nil {
138 | return fmt.Errorf("connect host[%v] failed: unknown", p.redisHost.Addr)
139 | }
140 | return nil
141 | }
142 |
143 | func (p *RedisClient) Do(commandName string, args ...interface{}) (interface{}, error) {
144 | var err error
145 | var result interface{}
146 | for tryCount := 0; tryCount < common.MaxRetryCount; tryCount++ {
147 | if p.conn == nil {
148 | err = p.Connect()
149 | if err != nil {
150 | if p.CheckHandleNetError(err) {
151 | continue
152 | }
153 | return nil, err
154 | }
155 | }
156 |
157 | result, err = p.conn.Do(commandName, args...)
158 | if err != nil {
159 | if p.CheckHandleNetError(err) {
160 | continue
161 | }
162 | return nil, err
163 | }
164 | break
165 | } // end for {}
166 | return result, err
167 | }
168 |
169 | func (p *RedisClient) Close() {
170 | if p.conn != nil {
171 | p.conn.Close()
172 | p.conn = nil
173 | }
174 | }
175 |
176 | type combine struct {
177 | command string
178 | params []interface{}
179 | }
180 |
181 | func (c combine) String() string {
182 | all := make([]string, 0, len(c.params) + 1)
183 | all = append(all, c.command)
184 | for _, ele := range c.params {
185 | all = append(all, string(ele.([]byte)))
186 | }
187 | return strings.Join(all, " ")
188 | }
189 |
190 | func (p *RedisClient) PipeRawCommand(commands []combine, specialErrorPrefix string) ([]interface{}, error) {
191 | if len(commands) == 0 {
192 | common.Logger.Warnf("input commands length is 0")
193 | return nil, emptyError
194 | }
195 |
196 | result := make([]interface{}, len(commands))
197 | var err error
198 | begin:
199 | for tryCount := 0; tryCount < common.MaxRetryCount; tryCount++ {
200 | if p.conn == nil {
201 | err = p.Connect()
202 | if err != nil {
203 | if p.CheckHandleNetError(err) {
204 | continue
205 | }
206 | common.Logger.Errorf("connect failed[%v]", err)
207 | return nil, err
208 | }
209 | }
210 |
211 | for _, ele := range commands {
212 | err = p.conn.Send(ele.command, ele.params...)
213 | if err != nil {
214 | if p.CheckHandleNetError(err) {
215 | continue begin
216 | }
217 | common.Logger.Errorf("send command[%v] failed[%v]", ele.command, err)
218 | return nil, err
219 | }
220 | }
221 | err = p.conn.Flush()
222 | if err != nil {
223 | if p.CheckHandleNetError(err) {
224 | continue
225 | }
226 | common.Logger.Errorf("flush failed[%v]", err)
227 | return nil, err
228 | }
229 |
230 | for i := 0; i < len(commands); i++ {
231 | reply, err := p.conn.Receive()
232 | if err != nil {
233 | if p.CheckHandleNetError(err) {
234 | continue begin
235 | }
236 | // 此处处理不太好,但是别人代码写死了,我只能这么改了
237 | if strings.HasPrefix(err.Error(), specialErrorPrefix) {
238 | // this error means the type between initial 'scan' and the following round comparison
239 | // is different. we should marks this.
240 | result[i] = common.TypeChanged
241 | continue
242 | }
243 | common.Logger.Errorf("receive command[%v] failed[%v]", commands[i], err)
244 | return nil, err
245 | }
246 | result[i] = reply
247 | }
248 | break
249 | } // end for {}
250 | return result, nil
251 | }
252 |
253 | func (p *RedisClient) PipeTypeCommand(keyInfo []*common.Key) ([]string, error) {
254 | commands := make([]combine, len(keyInfo))
255 | for i, key := range keyInfo {
256 | commands[i] = combine{
257 | command: "type",
258 | params: []interface{}{key.Key},
259 | }
260 | }
261 |
262 | result := make([]string, len(keyInfo))
263 | if ret, err := p.PipeRawCommand(commands, ""); err != nil {
264 | if err != emptyError {
265 | common.Logger.Errorf("run PipeRawCommand with commands[%v] failed[%v]", commands, err)
266 | return nil, err
267 | }
268 | } else {
269 | for i, ele := range ret {
270 | if v, ok := ele.(string); ok {
271 | result[i] = v
272 | } else {
273 | err := fmt.Errorf("run PipeRawCommand with commands[%s] return element[%v] isn't type string[%v]",
274 | printCombinList(commands), ele, reflect.TypeOf(ele))
275 | common.Logger.Error(err)
276 | return nil, err
277 | }
278 | }
279 | }
280 | return result, nil
281 | }
282 |
283 | func (p *RedisClient) PipeExistsCommand(keyInfo []*common.Key) ([]int64, error) {
284 | commands := make([]combine, len(keyInfo))
285 | for i, key := range keyInfo {
286 | commands[i] = combine{
287 | command: "exists",
288 | params: []interface{}{key.Key},
289 | }
290 | }
291 |
292 | result := make([]int64, len(keyInfo))
293 | if ret, err := p.PipeRawCommand(commands, ""); err != nil {
294 | if err != emptyError {
295 | return nil, err
296 | }
297 | } else {
298 | for i, ele := range ret {
299 | if v, ok := ele.(int64); ok {
300 | result[i] = v
301 | } else {
302 | err := fmt.Errorf("run PipeRawCommand with commands[%s] return element[%v] isn't type int64[%v]",
303 | printCombinList(commands), ele, reflect.TypeOf(ele))
304 | common.Logger.Error(err)
305 | return nil, err
306 | }
307 | }
308 | }
309 | return result, nil
310 | }
311 |
312 | func (p *RedisClient) PipeLenCommand(keyInfo []*common.Key) ([]int64, error) {
313 | commands := make([]combine, len(keyInfo))
314 | for i, key := range keyInfo {
315 | commands[i] = combine{
316 | command: key.Tp.FetchLenCommand,
317 | params: []interface{}{key.Key},
318 | }
319 | }
320 |
321 | result := make([]int64, len(keyInfo))
322 | if ret, err := p.PipeRawCommand(commands, "WRONGTYPE"); err != nil {
323 | if err != emptyError {
324 | return nil, err
325 | }
326 | } else {
327 | for i, ele := range ret {
328 | if v, ok := ele.(int64); ok {
329 | result[i] = v
330 | } else {
331 | err := fmt.Errorf("run PipeRawCommand with commands[%s] return element[%v] isn't type int64[%v]",
332 | printCombinList(commands), ele, reflect.TypeOf(ele))
333 | common.Logger.Error(err)
334 | return nil, err
335 | }
336 | }
337 | }
338 | return result, nil
339 | }
340 |
341 | func (p *RedisClient) PipeTTLCommand(keyInfo []*common.Key) ([]bool, error) {
342 | commands := make([]combine, len(keyInfo))
343 | for i, key := range keyInfo {
344 | commands[i] = combine{
345 | command: "ttl",
346 | params: []interface{}{key.Key},
347 | }
348 | }
349 |
350 | result := make([]bool, len(keyInfo))
351 | if ret, err := p.PipeRawCommand(commands, ""); err != nil {
352 | if err != emptyError {
353 | return nil, err
354 | }
355 | } else {
356 | for i, ele := range ret {
357 | if v, ok := ele.(int64); ok {
358 | result[i] = v == 0
359 | } else {
360 | err := fmt.Errorf("run PipeRawCommand with commands[%s] return element[%v] isn't type int64[%v]",
361 | printCombinList(commands), ele, reflect.TypeOf(ele))
362 | common.Logger.Error(err)
363 | return nil, err
364 | }
365 | }
366 | }
367 | return result, nil
368 | }
369 |
370 | func (p *RedisClient) PipeValueCommand(keyInfo []*common.Key) ([]interface{}, error) {
371 | commands := make([]combine, len(keyInfo))
372 | for i, key := range keyInfo {
373 | switch key.Tp {
374 | case common.StringKeyType:
375 | commands[i] = combine{
376 | command: "get",
377 | params: []interface{}{key.Key},
378 | }
379 | case common.HashKeyType:
380 | commands[i] = combine{
381 | command: "hgetall",
382 | params: []interface{}{key.Key},
383 | }
384 | case common.ListKeyType:
385 | commands[i] = combine{
386 | command: "lrange",
387 | params: []interface{}{key.Key, "0", "-1"},
388 | }
389 | case common.SetKeyType:
390 | commands[i] = combine{
391 | command: "smembers",
392 | params: []interface{}{key.Key},
393 | }
394 | case common.ZsetKeyType:
395 | commands[i] = combine{
396 | command: "zrange",
397 | params: []interface{}{key.Key, "0", "-1", "WITHSCORES"},
398 | }
399 | default:
400 | commands[i] = combine{
401 | command: "get",
402 | params: []interface{}{key.Key},
403 | }
404 | }
405 | }
406 |
407 | if ret, err := p.PipeRawCommand(commands, ""); err != nil && err != emptyError {
408 | return nil, err
409 | } else {
410 | return ret, nil
411 | }
412 | }
413 |
414 | func (p *RedisClient) PipeSismemberCommand(key []byte, field [][]byte) ([]interface{}, error) {
415 | commands := make([]combine, len(field))
416 | for i, ele := range field {
417 | commands[i] = combine{
418 | command: "SISMEMBER",
419 | params: []interface{}{key, ele},
420 | }
421 | }
422 |
423 | if ret, err := p.PipeRawCommand(commands, ""); err != nil && err != emptyError {
424 | return nil, err
425 | } else {
426 | return ret, nil
427 | }
428 | }
429 |
430 | func (p *RedisClient) PipeZscoreCommand(key []byte, field [][]byte) ([]interface{}, error) {
431 | commands := make([]combine, len(field))
432 | for i, ele := range field {
433 | commands[i] = combine{
434 | command: "ZSCORE",
435 | params: []interface{}{key, ele},
436 | }
437 | }
438 |
439 | if ret, err := p.PipeRawCommand(commands, ""); err != nil && err != emptyError {
440 | return nil, err
441 | } else {
442 | return ret, nil
443 | }
444 | }
445 |
446 | func (p *RedisClient) FetchValueUseScan_Hash_Set_SortedSet(oneKeyInfo *common.Key, onceScanCount int) (map[string][]byte, error) {
447 | var scanCmd string
448 | switch oneKeyInfo.Tp {
449 | case common.HashKeyType:
450 | scanCmd = "hscan"
451 | case common.SetKeyType:
452 | scanCmd = "sscan"
453 | case common.ZsetKeyType:
454 | scanCmd = "zscan"
455 | default:
456 | return nil, fmt.Errorf("key type %s is not hash/set/zset", oneKeyInfo.Tp)
457 | }
458 | cursor := 0
459 | value := make(map[string][]byte)
460 | for {
461 | reply, err := p.Do(scanCmd, oneKeyInfo.Key, cursor, "count", onceScanCount)
462 | if err != nil {
463 | return nil, err
464 | }
465 |
466 | replyList, ok := reply.([]interface{})
467 | if ok == false || len(replyList) != 2 {
468 | return nil, fmt.Errorf("%s %s %d count %d failed, result: %+v", scanCmd, string(oneKeyInfo.Key),
469 | cursor, onceScanCount, reply)
470 | }
471 |
472 | cursorBytes, ok := replyList[0].([]byte)
473 | if ok == false {
474 | return nil, fmt.Errorf("%s %s %d count %d failed, result: %+v", scanCmd, string(oneKeyInfo.Key),
475 | cursor, onceScanCount, reply)
476 | }
477 |
478 | cursor, err = strconv.Atoi(string(cursorBytes))
479 | if err != nil {
480 | return nil, err
481 | }
482 |
483 | keylist, ok := replyList[1].([]interface{})
484 | if ok == false {
485 | panic(common.Logger.Criticalf("%s %s failed, result: %+v", scanCmd, string(oneKeyInfo.Key), reply))
486 | }
487 | switch oneKeyInfo.Tp {
488 | case common.HashKeyType:
489 | fallthrough
490 | case common.ZsetKeyType:
491 | for i := 0; i < len(keylist); i += 2 {
492 | value[string(keylist[i].([]byte))] = keylist[i+1].([]byte)
493 | }
494 | case common.SetKeyType:
495 | for i := 0; i < len(keylist); i++ {
496 | value[string(keylist[i].([]byte))] = nil
497 | }
498 | default:
499 | return nil, fmt.Errorf("key type %s is not hash/set/zset", oneKeyInfo.Tp)
500 | }
501 |
502 | if cursor == 0 {
503 | break
504 | }
505 | } // end for{}
506 | return value, nil
507 | }
508 |
509 | func printCombinList(input []combine) string {
510 | ret := make([]string, 0, len(input))
511 | for _, ele := range input {
512 | ret = append(ret, ele.String())
513 | }
514 | return strings.Join(ret, "; ")
515 | }
516 |
--------------------------------------------------------------------------------
/src/full_check/client/db.go:
--------------------------------------------------------------------------------
1 | package client
2 |
3 | import (
4 | "strconv"
5 | "fmt"
6 |
7 | "full_check/common"
8 |
9 | "github.com/gomodule/redigo/redis"
10 | )
11 |
12 | /*
13 | * Get base db info.
14 | * Return:
15 | * map[int32]int64: logical db node map.
16 | * []string: physical db nodes.
17 | */
18 | func (p *RedisClient) FetchBaseInfo(isCluster bool) (map[int32]int64, []string, error) {
19 | var logicalDBMap map[int32]int64
20 |
21 | if !isCluster {
22 | // get keyspace
23 | keyspaceContent, err := p.Do("info", "Keyspace")
24 | if err != nil {
25 | return nil, nil, fmt.Errorf("get keyspace failed[%v]", err)
26 | }
27 |
28 | // parse to map
29 | logicalDBMap, err = common.ParseKeyspace(keyspaceContent.([]byte))
30 | if err != nil {
31 | return nil, nil, fmt.Errorf("parse keyspace failed[%v]", err)
32 | }
33 |
34 | // set to 1 logical db if source is tencentProxy and map length is null
35 | if len(logicalDBMap) == 0 && p.redisHost.DBType == common.TypeTencentProxy {
36 | logicalDBMap[0] = 0
37 | }
38 | } else {
39 | // is cluster
40 | logicalDBMap = make(map[int32]int64)
41 | logicalDBMap[0] = 0
42 | }
43 |
44 | // remove db that isn't in DBFilterList(white list)
45 | if len(p.redisHost.DBFilterList) != 0 {
46 | for key := range logicalDBMap {
47 | if _, ok := p.redisHost.DBFilterList[int(key)]; !ok {
48 | delete(logicalDBMap, key)
49 | }
50 | }
51 | }
52 |
53 |
54 | physicalDBList := make([]string, 0)
55 | // get db list
56 | switch p.redisHost.DBType {
57 | case common.TypeAliyunProxy:
58 | info, err := redis.Bytes(p.Do("info", "Cluster"))
59 | if err != nil {
60 | return nil, nil, fmt.Errorf("get cluster info failed[%v]", err)
61 | }
62 |
63 | result := common.ParseInfo(info)
64 | if count, err := strconv.ParseInt(result["nodecount"], 10, 0); err != nil {
65 | return nil, nil, fmt.Errorf("parse node count failed[%v]", err)
66 | } else if count <= 0 {
67 | return nil, nil, fmt.Errorf("source node count[%v] illegal", count)
68 | } else {
69 | for id := int64(0); id < count; id++ {
70 | physicalDBList = append(physicalDBList, fmt.Sprintf("%v", id))
71 | }
72 | }
73 | case common.TypeTencentProxy:
74 | var err error
75 | physicalDBList, err = common.GetAllClusterNode(p.conn, "master", "id")
76 | if err != nil {
77 | return nil, nil, fmt.Errorf("get tencent cluster node failed[%v]", err)
78 | }
79 | case common.TypeDB:
80 | // do nothing
81 | physicalDBList = append(physicalDBList, "meaningless")
82 | case common.TypeCluster:
83 | // equal to the source ip list
84 | physicalDBList = p.redisHost.Addr
85 | default:
86 | return nil, nil, fmt.Errorf("unknown redis db type[%v]", p.redisHost.DBType)
87 | }
88 |
89 | return logicalDBMap, physicalDBList, nil
90 | }
91 |
--------------------------------------------------------------------------------
/src/full_check/common/cluster.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | redigoCluster "github.com/najoast/redis-go-cluster"
5 | redigo "github.com/gomodule/redigo/redis"
6 | )
7 |
8 | const(
9 | RecvChanSize = 4096
10 | )
11 |
12 | /* implement redigo.Conn(https://github.com/garyburd/redigo)
13 | * Embed redis-go-cluster(https://github.com/chasex/redis-go-cluster)
14 | * The reason I create this struct is that redis-go-cluster isn't fulfill redigo.Conn
15 | * interface. So I implement "Err", "Send", "Flush" and "Receive" interfaces.
16 | */
17 | type ClusterConn struct {
18 | client *redigoCluster.Cluster
19 | recvChan chan reply
20 | batcher *redigoCluster.Batch
21 | }
22 |
23 | type reply struct {
24 | answer interface{}
25 | err error
26 | }
27 |
28 | func NewClusterConn(clusterClient *redigoCluster.Cluster, recvChanSize int) redigo.Conn {
29 | if recvChanSize == 0 {
30 | recvChanSize = RecvChanSize
31 | }
32 |
33 | return &ClusterConn{
34 | client: clusterClient,
35 | recvChan: make(chan reply, recvChanSize),
36 | }
37 | }
38 |
39 | func (cc *ClusterConn) Close() error {
40 | cc.client.Close()
41 | return nil
42 | }
43 |
44 | func (cc *ClusterConn) Err() error {
45 | return nil
46 | }
47 |
48 | func (cc *ClusterConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) {
49 | return cc.client.Do(commandName, args...)
50 | }
51 |
52 | // just add into batcher
53 | func (cc *ClusterConn) Send(commandName string, args ...interface{}) error {
54 | if cc.batcher == nil {
55 | cc.batcher = cc.client.NewBatch()
56 | }
57 | return cc.batcher.Put(commandName, args...)
58 | }
59 |
60 | // send batcher and put the return into recvChan
61 | func (cc *ClusterConn) Flush() error {
62 | ret, err := cc.client.RunBatch(cc.batcher)
63 | defer func() {
64 | cc.batcher = nil // reset batcher
65 | }()
66 |
67 | if err != nil {
68 | cc.recvChan <- reply{
69 | answer: nil,
70 | err: err,
71 | }
72 |
73 | return err
74 | }
75 |
76 | // for redis-go-cluster driver, "Receive" function returns all the replies once flushed.
77 | // However, this action is different with redigo driver that "Receive" only returns 1
78 | // reply each time.
79 |
80 | retLength := len(ret)
81 | availableSize := cap(cc.recvChan) - len(cc.recvChan)
82 | if availableSize < retLength {
83 | Logger.Warnf("available channel size[%v] less than current returned batch size[%v]", availableSize, retLength)
84 | }
85 | // Logger.Debugf("cluster flush batch with size[%v], return replies size[%v]", cc.batcher.GetBatchSize(), retLength)
86 |
87 | for _, ele := range ret {
88 | cc.recvChan <- reply{
89 | answer: ele,
90 | err: err,
91 | }
92 | }
93 |
94 | return err
95 | }
96 |
97 | // read recvChan
98 | func (cc *ClusterConn) Receive() (reply interface{}, err error) {
99 | ret := <- cc.recvChan
100 | return ret.answer, ret.err
101 | }
--------------------------------------------------------------------------------
/src/full_check/common/command.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "strconv"
7 |
8 | redigo "github.com/gomodule/redigo/redis"
9 | )
10 |
11 | type ClusterNodeInfo struct {
12 | Id string
13 | Address string
14 | Flags string
15 | Master string
16 | PingSent string
17 | PongRecv string
18 | ConfigEpoch string
19 | LinkStat string
20 | Slot string
21 | }
22 |
23 | func ParseKeyspace(content []byte) (map[int32]int64, error) {
24 | if bytes.HasPrefix(content, []byte("# Keyspace")) == false {
25 | return nil, fmt.Errorf("invalid info Keyspace: %s", string(content))
26 | }
27 |
28 | lines := bytes.Split(content, []byte("\n"))
29 | reply := make(map[int32]int64)
30 | for _, line := range lines {
31 | line = bytes.TrimSpace(line)
32 | if bytes.HasPrefix(line, []byte("db")) == true {
33 | // line "db0:keys=18,expires=0,avg_ttl=0"
34 | items := bytes.Split(line, []byte(":"))
35 | db, err := strconv.Atoi(string(items[0][2:]))
36 | if err != nil {
37 | return nil, err
38 | }
39 | nums := bytes.Split(items[1], []byte(","))
40 | if bytes.HasPrefix(nums[0], []byte("keys=")) == false {
41 | return nil, fmt.Errorf("invalid info Keyspace: %s", string(content))
42 | }
43 | keysNum, err := strconv.ParseInt(string(nums[0][5:]), 10, 0)
44 | if err != nil {
45 | return nil, err
46 | }
47 | reply[int32(db)] = int64(keysNum)
48 | } // end true
49 | } // end for
50 | return reply, nil
51 | }
52 |
53 | /*
54 | * 10.1.1.1:21331> cluster nodes
55 | * d49a4c7b516b8da222d46a0a589b77f381285977 10.1.1.1:21333@31333 master - 0 1557996786000 3 connected 10923-16383
56 | * f23ba7be501b2dcd4d6eeabd2d25551513e5c186 10.1.1.1:21336@31336 slave d49a4c7b516b8da222d46a0a589b77f381285977 0 1557996785000 6 connected
57 | * 75fffcd521738606a919607a7ddd52bcd6d65aa8 10.1.1.1:21331@31331 myself,master - 0 1557996784000 1 connected 0-5460
58 | * da3dd51bb9cb5803d99942e0f875bc5f36dc3d10 10.1.1.1:21332@31332 master - 0 1557996786260 2 connected 5461-10922
59 | * eff4e654d3cc361a8ec63640812e394a8deac3d6 10.1.1.1:21335@31335 slave da3dd51bb9cb5803d99942e0f875bc5f36dc3d10 0 1557996787261 5 connected
60 | * 486e081f8d47968df6a7e43ef9d3ba93b77d03b2 10.1.1.1:21334@31334 slave 75fffcd521738606a919607a7ddd52bcd6d65aa8 0 1557996785258 4 connected
61 | */
62 | func ParseClusterNode(content []byte) []*ClusterNodeInfo {
63 | lines := bytes.Split(content, []byte("\n"))
64 | ret := make([]*ClusterNodeInfo, 0, len(lines))
65 | for _, line := range lines {
66 | if bytes.Compare(line, []byte{}) == 0 {
67 | continue
68 | }
69 |
70 | items := bytes.Split(line, []byte(" "))
71 |
72 | address := bytes.Split(items[1], []byte{'@'})
73 | flag := bytes.Split(items[2], []byte{','})
74 | var role string
75 | if len(flag) > 1 {
76 | role = string(flag[1])
77 | } else {
78 | role = string(flag[0])
79 | }
80 | var slot string
81 | if len(items) > 7 {
82 | slot = string(items[7])
83 | }
84 | ret = append(ret, &ClusterNodeInfo{
85 | Id: string(items[0]),
86 | Address: string(address[0]),
87 | Flags: role,
88 | Master: string(items[3]),
89 | PingSent: string(items[4]),
90 | PongRecv: string(items[5]),
91 | ConfigEpoch: string(items[6]),
92 | LinkStat: string(items[7]),
93 | Slot: slot,
94 | })
95 | }
96 | return ret
97 | }
98 |
99 | // needMaster: true(master), false(slave)
100 | func ClusterNodeChoose(input []*ClusterNodeInfo, role string) []*ClusterNodeInfo {
101 | ret := make([]*ClusterNodeInfo, 0, len(input))
102 | for _, ele := range input {
103 | if ele.Flags == TypeMaster && role == TypeMaster ||
104 | ele.Flags == TypeSlave && role == TypeSlave ||
105 | role == TypeAll {
106 | ret = append(ret, ele)
107 | }
108 | }
109 | return ret
110 | }
111 |
112 | // return id list if choose == "id", otherwise address
113 | func GetAllClusterNode(client redigo.Conn, role string, choose string) ([]string, error) {
114 | ret, err := client.Do("cluster", "nodes")
115 | if err != nil {
116 | return nil, err
117 | }
118 |
119 | nodeList := ParseClusterNode(ret.([]byte))
120 | nodeListChoose := ClusterNodeChoose(nodeList, role)
121 |
122 | result := make([]string, 0, len(nodeListChoose))
123 | for _, ele := range nodeListChoose {
124 | if choose == "id" {
125 | result = append(result, ele.Id)
126 | } else {
127 | result = append(result, ele.Address)
128 | }
129 | }
130 |
131 | return result, nil
132 | }
133 |
134 | // compare two unordered list. return true means equal.
135 | func CompareUnorderedList(a, b []string) bool {
136 | if len(a) != len(b) {
137 | return false
138 | }
139 |
140 | if len(a) == 0 {
141 | return true
142 | }
143 |
144 | setA := map[string]struct{}{}
145 |
146 | for _, x := range a {
147 | setA[x] = struct{}{}
148 | }
149 |
150 | for _, x := range b {
151 | if _, ok := setA[x]; !ok {
152 | return false
153 | }
154 | }
155 |
156 | return true
157 | }
--------------------------------------------------------------------------------
/src/full_check/common/common.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "github.com/cihub/seelog"
5 | "fmt"
6 | )
7 |
8 | const (
9 | MaxRetryCount = 20 // client attribute
10 | StatRollFrequency = 2 // client attribute
11 |
12 | TypeChanged int64 = -1 // marks the given key type is change, e.g. from string to list
13 |
14 | // db type
15 | TypeDB = 0 // db
16 | TypeCluster = 1
17 | TypeAliyunProxy = 2 // aliyun proxy
18 | TypeTencentProxy = 3 // tencent cloud proxy
19 |
20 | TypeMaster = "master"
21 | TypeSlave = "slave"
22 | TypeAll = "all"
23 |
24 | Splitter = ";"
25 | )
26 |
27 | var (
28 | BigKeyThreshold int64 = 16384
29 | Logger seelog.LoggerInterface
30 | )
31 |
32 | /*
33 | * @Vinllen Chen. check filter hit the key.
34 | * return: true/false. true means pass.
35 | * Actually, it's better to use trie tree instead of for-loop brute way. The reason I choose this is because
36 | * input filterList is not long in general, and I'm a lazy guy~.
37 | */
38 | func CheckFilter(filterTree *Trie, keyBytes []byte) bool {
39 | if filterTree == nil { // all pass when filter list is empty
40 | return true
41 | }
42 | return filterTree.Search(keyBytes)
43 | }
44 |
45 | func HandleLogLevel(logLevel string) (string, error) {
46 | // seelog library is disgusting
47 | switch logLevel {
48 | case "debug":
49 | return "debug,info,warn,error,critical", nil
50 | case "":
51 | fallthrough
52 | case "info":
53 | return "info,warn,error,critical", nil
54 | case "warn":
55 | return "warn,error,critical", nil
56 | case "error":
57 | return "error,critical", nil
58 | default:
59 | return "", fmt.Errorf("unknown log level[%v]", logLevel)
60 | }
61 | }
--------------------------------------------------------------------------------
/src/full_check/common/helper.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | func ValueHelper_Hash_SortedSet(reply interface{}) map[string][]byte {
4 | if reply == nil {
5 | return nil
6 | }
7 |
8 | tmpValue := reply.([]interface{})
9 | if len(tmpValue) == 0 {
10 | return nil
11 | }
12 | value := make(map[string][]byte)
13 | for i := 0; i < len(tmpValue); i += 2 {
14 | value[string(tmpValue[i].([]byte))] = tmpValue[i+1].([]byte)
15 | }
16 | return value
17 | }
18 |
19 | func ValueHelper_Set(reply interface{}) map[string][]byte {
20 | tmpValue := reply.([]interface{})
21 | if len(tmpValue) == 0 {
22 | return nil
23 | }
24 | value := make(map[string][]byte)
25 | for i := 0; i < len(tmpValue); i++ {
26 | value[string(tmpValue[i].([]byte))] = nil
27 | }
28 | return value
29 | }
30 |
31 | func ValueHelper_List(reply interface{}) [][]byte {
32 | tmpValue := reply.([]interface{})
33 | if len(tmpValue) == 0 {
34 | return nil
35 | }
36 | value := make([][]byte, len(tmpValue))
37 | for i := 0; i < len(tmpValue); i++ {
38 | value[i] = tmpValue[i].([]byte)
39 | }
40 | return value
41 | }
--------------------------------------------------------------------------------
/src/full_check/common/keytype.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | type KeyTypeIndex int
4 |
5 | const (
6 | StringTypeIndex KeyTypeIndex = iota
7 | HashTypeIndex
8 | ListTypeIndex
9 | SetTypeIndex
10 | ZsetTypeIndex
11 | StreamTypeIndex
12 | NoneTypeIndex
13 | EndKeyTypeIndex
14 | )
15 |
16 | func (p KeyTypeIndex) String() string {
17 | switch p {
18 | case StringTypeIndex:
19 | return "string"
20 | case HashTypeIndex:
21 | return "hash"
22 | case ListTypeIndex:
23 | return "list"
24 | case SetTypeIndex:
25 | return "set"
26 | case ZsetTypeIndex:
27 | return "zset"
28 | case StreamTypeIndex:
29 | return "stream"
30 | case NoneTypeIndex:
31 | return "none"
32 | default:
33 | return "none"
34 | }
35 | }
36 |
37 | type KeyType struct {
38 | Name string // type
39 | Index KeyTypeIndex
40 | FetchLenCommand string
41 | }
42 |
43 | func (p KeyType) String() string {
44 | return p.Name
45 | }
46 |
47 | var StringKeyType = &KeyType{
48 | Name: "string",
49 | Index: StringTypeIndex,
50 | FetchLenCommand: "strlen",
51 | }
52 |
53 | var HashKeyType = &KeyType{
54 | Name: "hash",
55 | Index: HashTypeIndex,
56 | FetchLenCommand: "hlen",
57 | }
58 |
59 | var ListKeyType = &KeyType{
60 | Name: "list",
61 | Index: ListTypeIndex,
62 | FetchLenCommand: "llen",
63 | }
64 |
65 | var SetKeyType = &KeyType{
66 | Name: "set",
67 | Index: SetTypeIndex,
68 | FetchLenCommand: "scard",
69 | }
70 |
71 | var ZsetKeyType = &KeyType{
72 | Name: "zset",
73 | Index: ZsetTypeIndex,
74 | FetchLenCommand: "zcard",
75 | }
76 |
77 | var StreamKeyType = &KeyType{
78 | Name: "stream",
79 | Index: StreamTypeIndex,
80 | FetchLenCommand: "xlen",
81 | }
82 |
83 | var NoneKeyType = &KeyType{
84 | Name: "none",
85 | Index: NoneTypeIndex,
86 | FetchLenCommand: "strlen",
87 | }
88 |
89 | var EndKeyType = &KeyType{
90 | Name: "unknown",
91 | Index: EndKeyTypeIndex,
92 | FetchLenCommand: "unknown",
93 | }
94 |
95 | func NewKeyType(a string) *KeyType {
96 | switch a {
97 | case "string":
98 | return StringKeyType
99 | case "hash":
100 | return HashKeyType
101 | case "list":
102 | return ListKeyType
103 | case "set":
104 | return SetKeyType
105 | case "zset":
106 | return ZsetKeyType
107 | case "stream":
108 | return StreamKeyType
109 | case "none":
110 | return NoneKeyType
111 | default:
112 | return EndKeyType
113 | }
114 | }
115 |
116 | type Field struct {
117 | Field []byte
118 | ConflictType ConflictType
119 | }
120 |
121 | type Attribute struct {
122 | ItemCount int64 // the length of value
123 | }
124 |
125 | type Key struct {
126 | Key []byte
127 | Db int32
128 | Tp *KeyType
129 | ConflictType ConflictType
130 | SourceAttr Attribute
131 | TargetAttr Attribute
132 |
133 | Field []Field
134 | }
135 |
136 | type ConflictType int
137 |
138 | const (
139 | TypeConflict ConflictType = iota
140 | ValueConflict
141 | LackSourceConflict
142 | LackTargetConflict
143 | NoneConflict
144 | EndConflict
145 | )
146 |
147 | func (p ConflictType) String() string {
148 | switch p {
149 | case TypeConflict:
150 | return "type"
151 | case ValueConflict:
152 | return "value"
153 | case LackSourceConflict:
154 | return "lack_source"
155 | case LackTargetConflict:
156 | return "lack_target"
157 | case NoneConflict:
158 | return "equal"
159 | default:
160 | return "unknown_conflict"
161 | }
162 | }
163 |
164 | func NewConflictType(a string) ConflictType {
165 | switch a {
166 | case "type":
167 | return TypeConflict
168 | case "value":
169 | return ValueConflict
170 | case "lack_source":
171 | return LackSourceConflict
172 | case "lack_target":
173 | return LackTargetConflict
174 | case "equal":
175 | return NoneConflict
176 | default:
177 | return EndConflict
178 | }
179 | }
180 |
--------------------------------------------------------------------------------
/src/full_check/common/log.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "github.com/cihub/seelog"
5 | )
6 |
7 | func InitLog(logFile string, logLevel string) (seelog.LoggerInterface, error) {
8 | var logConfig string
9 | if len(logFile) == 0 {
10 | logConfig = `
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 | `
21 | } else {
22 | logConfig = `
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 | `
33 | }
34 | return seelog.LoggerFromConfigAsBytes([]byte(logConfig))
35 | }
36 |
--------------------------------------------------------------------------------
/src/full_check/common/mix.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "bytes"
5 | "strings"
6 | "strconv"
7 | )
8 |
9 | func Min(a, b int) int {
10 | if a < b {
11 | return a
12 | }
13 | return b
14 | }
15 |
16 | // ParseInfo convert result of info command to map[string]string.
17 | // For example, "opapply_source_count:1\r\nopapply_source_0:server_id=3171317,applied_opid=1\r\n" is converted to map[string]string{"opapply_source_count": "1", "opapply_source_0": "server_id=3171317,applied_opid=1"}.
18 | func ParseInfo(content []byte) map[string]string {
19 | result := make(map[string]string, 10)
20 | lines := bytes.Split(content, []byte("\r\n"))
21 | for i := 0; i < len(lines); i++ {
22 | items := bytes.SplitN(lines[i], []byte(":"), 2)
23 | if len(items) != 2 {
24 | continue
25 | }
26 | result[string(items[0])] = string(items[1])
27 | }
28 | return result
29 | }
30 |
31 | func FilterDBList(dbs string) map[int]struct{} {
32 | ret := make(map[int]struct{})
33 | // empty
34 | if dbs == "-1" {
35 | return ret
36 | }
37 |
38 | // empty
39 | dbList := strings.Split(dbs, Splitter)
40 | if len(dbList) == 0 {
41 | return ret
42 | }
43 |
44 | for _, ele := range dbList {
45 | val, err := strconv.Atoi(ele)
46 | if err != nil {
47 | panic(err)
48 | }
49 |
50 | ret[val] = struct{}{}
51 | }
52 | return ret
53 | }
--------------------------------------------------------------------------------
/src/full_check/common/speed.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import "time"
4 |
5 | type Qos struct {
6 | Bucket chan struct{}
7 |
8 | limit int // qps
9 | close bool
10 | }
11 |
12 | func StartQoS(limit int) *Qos {
13 | q := new(Qos)
14 | q.limit = limit
15 | q.Bucket = make(chan struct{}, limit)
16 |
17 | go q.timer()
18 | return q
19 | }
20 |
21 | func (q *Qos) timer() {
22 | for range time.NewTicker(1 * time.Second).C {
23 | if q.close {
24 | return
25 | }
26 | for i := 0; i < q.limit; i++ {
27 | select {
28 | case q.Bucket <- struct{}{}:
29 | default:
30 | // break if bucket if full
31 | break
32 | }
33 | }
34 | }
35 | }
36 |
37 | func (q *Qos) Close() {
38 | q.close = true
39 | }
40 |
--------------------------------------------------------------------------------
/src/full_check/common/trieTree.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | const (
4 | Star = byte('*')
5 | )
6 |
7 | type TrieNode struct {
8 | children map[byte]*TrieNode
9 | isEnd bool
10 | isStar bool // is ending by *
11 | }
12 |
13 | func newTrieNode() *TrieNode {
14 | return &TrieNode{children: make(map[byte]*TrieNode), isEnd: false, isStar: false}
15 | }
16 |
17 | type Trie struct {
18 | root *TrieNode
19 | }
20 |
21 | func NewTrie() *Trie {
22 | return &Trie{root: newTrieNode()}
23 | }
24 |
25 | func (trie *Trie) Insert(word []byte) {
26 | node := trie.root
27 | for _, char := range word {
28 | if char == Star {
29 | node.isStar = true
30 | break
31 | }
32 |
33 | _, ok := node.children[char]
34 | if !ok {
35 | node.children[char] = newTrieNode()
36 | }
37 | node = node.children[char]
38 | }
39 | node.isEnd = true
40 | }
41 |
42 | func (trie *Trie) Search(word []byte) bool {
43 | node := trie.root
44 | for _, char := range word {
45 | if node.isStar {
46 | return true
47 | }
48 | if _, ok := node.children[char]; !ok {
49 | return false
50 | }
51 | node = node.children[char]
52 | }
53 | return node.isEnd || node.isStar
54 | }
55 |
--------------------------------------------------------------------------------
/src/full_check/common/trieTree_test.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | )
9 |
10 | func TestTrie(t *testing.T) {
11 | var nr int
12 | {
13 | nr++
14 | fmt.Printf("TestTrie case %d.\n", nr)
15 |
16 | trie := NewTrie()
17 | assert.Equal(t, false, trie.Search([]byte("abc")), "should be equal")
18 | assert.Equal(t, false, trie.Search([]byte("df")), "should be equal")
19 | assert.Equal(t, false, trie.Search([]byte("")), "should be equal")
20 | }
21 |
22 | {
23 | nr++
24 | fmt.Printf("TestTrie case %d.\n", nr)
25 | trie := NewTrie()
26 | insertList := []string{"abc", "adf" ,"bdf*", "m*"}
27 | for _, element := range insertList {
28 | trie.Insert([]byte(element))
29 | }
30 |
31 | assert.Equal(t, true, trie.Search([]byte("abc")), "should be equal")
32 | assert.Equal(t, false, trie.Search([]byte("abcd")), "should be equal")
33 | assert.Equal(t, false, trie.Search([]byte("adff")), "should be equal")
34 | assert.Equal(t, true, trie.Search([]byte("m")), "should be equal")
35 | assert.Equal(t, true, trie.Search([]byte("m1")), "should be equal")
36 | assert.Equal(t, false, trie.Search([]byte("")), "should be equal")
37 | }
38 |
39 | {
40 | nr++
41 | fmt.Printf("TestTrie case %d.\n", nr)
42 | trie := NewTrie()
43 | insertList := []string{"*"}
44 | for _, element := range insertList {
45 | trie.Insert([]byte(element))
46 | }
47 |
48 | assert.Equal(t, true, trie.Search([]byte("abc")), "should be equal")
49 | assert.Equal(t, true, trie.Search([]byte("abcd")), "should be equal")
50 | assert.Equal(t, true, trie.Search([]byte("adff")), "should be equal")
51 | assert.Equal(t, true, trie.Search([]byte("m")), "should be equal")
52 | assert.Equal(t, true, trie.Search([]byte("m1")), "should be equal")
53 | assert.Equal(t, true, trie.Search([]byte("")), "should be equal")
54 | }
55 | }
--------------------------------------------------------------------------------
/src/full_check/configure/conf.go:
--------------------------------------------------------------------------------
1 | package conf
2 |
3 | var Opts struct {
4 | SourceAddr string `short:"s" long:"source" value-name:"SOURCE" description:"Set host:port of source redis. If db type is cluster, split by semicolon(;'), e.g., 10.1.1.1:1000;10.2.2.2:2000;10.3.3.3:3000. We also support auto-detection, so \"master@10.1.1.1:1000\" or \"slave@10.1.1.1:1000\" means choose master or slave. Only need to give a role in the master or slave."`
5 | SourcePassword string `short:"p" long:"sourcepassword" value-name:"Password" description:"Set source redis password (format: password or username:password)"`
6 | SourceAuthType string `long:"sourceauthtype" value-name:"AUTH-TYPE" default:"auth" description:"useless for opensource redis, valid value:auth/adminauth" `
7 | SourceDBType int `long:"sourcedbtype" default:"0" description:"0: db, 1: cluster 2: aliyun proxy, 3: tencent proxy"`
8 | SourceDBFilterList string `long:"sourcedbfilterlist" default:"-1" description:"db white list that need to be compared, -1 means fetch all, \"0;5;15\" means fetch db 0, 5, and 15"`
9 | TargetAddr string `short:"t" long:"target" value-name:"TARGET" description:"Set host:port of target redis. If db type is cluster, split by semicolon(;'), e.g., 10.1.1.1:1000;10.2.2.2:2000;10.3.3.3:3000. We also support auto-detection, so \"master@10.1.1.1:1000\" or \"slave@10.1.1.1:1000\" means choose master or slave. Only need to give a role in the master or slave."`
10 | TargetPassword string `short:"a" long:"targetpassword" value-name:"Password" description:"Set target redis password (format: password or username:password)"`
11 | TargetAuthType string `long:"targetauthtype" value-name:"AUTH-TYPE" default:"auth" description:"useless for opensource redis, valid value:auth/adminauth" `
12 | TargetDBType int `long:"targetdbtype" default:"0" description:"0: db, 1: cluster 2: aliyun proxy 3: tencent proxy"`
13 | TargetDBFilterList string `long:"targetdbfilterlist" default:"-1" description:"db white list that need to be compared, -1 means fetch all, \"0;5;15\" means fetch db 0, 5, and 15"`
14 | ResultDBFile string `short:"d" long:"db" value-name:"Sqlite3-DB-FILE" default:"result.db" description:"sqlite3 db file for store result. If exist, it will be removed and a new file is created."`
15 | ResultFile string `long:"result" value-name:"FILE" description:"store all diff result into the file, format is 'db\tdiff-type\tkey\tfield'"`
16 | CompareTimes string `long:"comparetimes" value-name:"COUNT" default:"3" description:"Total compare count, at least 1. In the first round, all keys will be compared. The subsequent rounds of the comparison will be done on the previous results."`
17 | CompareMode int `short:"m" long:"comparemode" default:"2" description:"compare mode, 1: compare full value, 2: only compare value length, 3: only compare keys outline, 4: compare full value, but only compare value length when meets big key"`
18 | Id string `long:"id" default:"unknown" description:"used in metric, run id, useless for open source"`
19 | JobId string `long:"jobid" default:"unknown" description:"used in metric, job id, useless for open source"`
20 | TaskId string `long:"taskid" default:"unknown" description:"used in metric, task id, useless for open source"`
21 | Qps int `short:"q" long:"qps" default:"15000" description:"max batch qps limit: e.g., if qps is 10, full-check fetches 10 * $batch keys every second"`
22 | Interval int `long:"interval" value-name:"Second" default:"5" description:"The time interval for each round of comparison(Second)"`
23 | BatchCount string `long:"batchcount" value-name:"COUNT" default:"256" description:"the count of key/field per batch compare, valid value [1, 10000]"`
24 | Parallel int `long:"parallel" value-name:"COUNT" default:"5" description:"concurrent goroutine number for comparison, valid value [1, 100]"`
25 | LogFile string `long:"log" value-name:"FILE" description:"log file, if not specified, log is put to console"`
26 | LogLevel string `long:"loglevel" value-name:"LEVEL" description:"log level: 'debug', 'info', 'warn', 'error', default is 'info'"`
27 | MetricPrint bool `long:"metric" value-name:"BOOL" description:"print metric in log"`
28 | BigKeyThreshold int64 `long:"bigkeythreshold" value-name:"COUNT" default:"16384"`
29 | FilterList string `short:"f" long:"filterlist" value-name:"FILTER" default:"" description:"if the filter list isn't empty, all elements in list will be synced. The input should be split by '|'. The end of the string is followed by a * to indicate a prefix match, otherwise it is a full match. e.g.: 'abc*|efg|m*' matches 'abc', 'abc1', 'efg', 'm', 'mxyz', but 'efgh', 'p' aren't'"`
30 | SystemProfile uint `long:"systemprofile" value-name:"SYSTEM-PROFILE" default:"20445" description:"port that used to print golang inner head and stack message"`
31 | Version bool `short:"v" long:"version"`
32 | }
33 |
--------------------------------------------------------------------------------
/src/full_check/full_check/full_check.go:
--------------------------------------------------------------------------------
1 | package full_check
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "database/sql"
7 | "encoding/json"
8 | "fmt"
9 | "os"
10 | _ "path"
11 | "strconv"
12 | "sync"
13 | "time"
14 |
15 | "full_check/common"
16 | "full_check/metric"
17 | "full_check/checker"
18 | "full_check/configure"
19 | "full_check/client"
20 |
21 | _ "github.com/mattn/go-sqlite3"
22 | )
23 |
24 | type CheckType int
25 |
26 | const (
27 | FullValue = 1
28 | ValueLengthOutline = 2
29 | KeyOutline = 3
30 | FullValueWithOutline = 4
31 | )
32 |
33 | type FullCheck struct {
34 | checker.FullCheckParameter
35 |
36 | stat metric.Stat
37 | currentDB int32
38 | times int
39 | db [100]*sql.DB
40 | sourcePhysicalDBList []string
41 | sourceLogicalDBMap map[int32]int64
42 |
43 | totalConflict int64
44 | totalKeyConflict int64
45 | totalFieldConflict int64
46 |
47 | verifier checker.IVerifier
48 | }
49 |
50 | func NewFullCheck(f checker.FullCheckParameter, checktype CheckType) *FullCheck {
51 | var verifier checker.IVerifier
52 |
53 | fullcheck := &FullCheck{
54 | FullCheckParameter: f,
55 | }
56 |
57 | switch checktype {
58 | case ValueLengthOutline:
59 | verifier = checker.NewValueOutlineVerifier(&fullcheck.stat, &fullcheck.FullCheckParameter)
60 | case KeyOutline:
61 | verifier = checker.NewKeyOutlineVerifier(&fullcheck.stat, &fullcheck.FullCheckParameter)
62 | case FullValue:
63 | verifier = checker.NewFullValueVerifier(&fullcheck.stat, &fullcheck.FullCheckParameter, false)
64 | case FullValueWithOutline:
65 | verifier = checker.NewFullValueVerifier(&fullcheck.stat, &fullcheck.FullCheckParameter, true)
66 | default:
67 | panic(fmt.Sprintf("no such check type : %d", checktype))
68 | }
69 |
70 | fullcheck.verifier = verifier
71 | return fullcheck
72 | }
73 |
74 | func (p *FullCheck) PrintStat(finished bool) {
75 | var buf bytes.Buffer
76 |
77 | var metricStat *metric.Metric
78 | var finishPercent int64
79 | if p.SourceHost.IsCluster() == false {
80 | finishPercent = p.stat.Scan.Total() * 100 * int64(p.times) / (p.sourceLogicalDBMap[p.currentDB] * int64(p.CompareCount))
81 | } else {
82 | // meaningless for cluster
83 | finishPercent = -1
84 | }
85 |
86 | if p.times == 1 {
87 | metricStat = &metric.Metric{
88 | CompareTimes: p.times,
89 | Db: p.currentDB,
90 | DbKeys: p.sourceLogicalDBMap[p.currentDB],
91 | Process: finishPercent, // meaningless for cluster
92 | OneCompareFinished: finished,
93 | AllFinished: false,
94 | Timestamp: time.Now().Unix(),
95 | DateTime: time.Now().Format("2006-01-02T15:04:05Z"),
96 | Id: conf.Opts.Id,
97 | JobId: conf.Opts.JobId,
98 | TaskId: conf.Opts.TaskId}
99 | fmt.Fprintf(&buf, "times:%d, db:%d, dbkeys:%d, finish:%d%%, finished:%v\n", p.times, p.currentDB,
100 | p.sourceLogicalDBMap[p.currentDB], finishPercent, finished)
101 | } else {
102 | metricStat = &metric.Metric{
103 | CompareTimes: p.times,
104 | Db: p.currentDB,
105 | Process: finishPercent, // meaningless for cluster
106 | OneCompareFinished: finished,
107 | AllFinished: false,
108 | Timestamp: time.Now().Unix(),
109 | DateTime: time.Now().Format("2006-01-02T15:04:05Z"),
110 | Id: conf.Opts.Id,
111 | JobId: conf.Opts.JobId,
112 | TaskId: conf.Opts.TaskId}
113 | fmt.Fprintf(&buf, "times:%d, db:%d, finished:%v\n", p.times, p.currentDB, finished)
114 | }
115 |
116 | p.totalConflict = int64(0)
117 | p.totalKeyConflict = int64(0)
118 | p.totalFieldConflict = int64(0)
119 |
120 | // fmt.Fprintf(&buf, "--- key scan ---\n")
121 | fmt.Fprintf(&buf, "KeyScan:%v\n", p.stat.Scan)
122 | metricStat.KeyScan = p.stat.Scan.Json()
123 | metricStat.KeyMetric = make(map[string]map[string]*metric.CounterStat)
124 |
125 | // fmt.Fprintf(&buf, "--- key equal ---\n")
126 | for i := common.KeyTypeIndex(0); i < common.EndKeyTypeIndex; i++ {
127 | metricStat.KeyMetric[i.String()] = make(map[string]*metric.CounterStat)
128 | if p.stat.ConflictKey[i][common.NoneConflict].Total() != 0 {
129 | metricStat.KeyMetric[i.String()]["equal"] = p.stat.ConflictKey[i][common.NoneConflict].Json()
130 | if p.times == p.CompareCount {
131 | fmt.Fprintf(&buf, "KeyEqualAtLast|%s|%s|%v\n", i, common.NoneConflict,
132 | p.stat.ConflictKey[i][common.NoneConflict])
133 | } else {
134 | fmt.Fprintf(&buf, "KeyEqualInProcess|%s|%s|%v\n", i, common.NoneConflict,
135 | p.stat.ConflictKey[i][common.NoneConflict])
136 | }
137 | }
138 | }
139 | // fmt.Fprintf(&buf, "--- key conflict ---\n")
140 | for i := common.KeyTypeIndex(0); i < common.EndKeyTypeIndex; i++ {
141 | for j := common.ConflictType(0); j < common.NoneConflict; j++ {
142 | // fmt.Println(i, j, p.stat.ConflictKey[i][j].Total())
143 | if p.stat.ConflictKey[i][j].Total() != 0 {
144 | metricStat.KeyMetric[i.String()][j.String()] = p.stat.ConflictKey[i][j].Json()
145 | if p.times == p.CompareCount {
146 | fmt.Fprintf(&buf, "KeyConflictAtLast|%s|%s|%v\n", i, j, p.stat.ConflictKey[i][j])
147 | p.totalKeyConflict += p.stat.ConflictKey[i][j].Total()
148 | } else {
149 | fmt.Fprintf(&buf, "KeyConflictInProcess|%s|%s|%v\n", i, j, p.stat.ConflictKey[i][j])
150 | }
151 | }
152 | }
153 | }
154 |
155 | metricStat.FieldMetric = make(map[string]map[string]*metric.CounterStat)
156 | // fmt.Fprintf(&buf, "--- field equal ---\n")
157 | for i := common.KeyTypeIndex(0); i < common.EndKeyTypeIndex; i++ {
158 | metricStat.FieldMetric[i.String()] = make(map[string]*metric.CounterStat)
159 | if p.stat.ConflictField[i][common.NoneConflict].Total() != 0 {
160 | metricStat.FieldMetric[i.String()]["equal"] = p.stat.ConflictField[i][common.NoneConflict].Json()
161 | if p.times == p.CompareCount {
162 | fmt.Fprintf(&buf, "FieldEqualAtLast|%s|%s|%v\n", i, common.NoneConflict,
163 | p.stat.ConflictField[i][common.NoneConflict])
164 | } else {
165 | fmt.Fprintf(&buf, "FieldEqualInProcess|%s|%s|%v\n", i, common.NoneConflict,
166 | p.stat.ConflictField[i][common.NoneConflict])
167 | }
168 | }
169 | }
170 | // fmt.Fprintf(&buf, "--- field conflict ---\n")
171 | for i := common.KeyTypeIndex(0); i < common.EndKeyTypeIndex; i++ {
172 | for j := common.ConflictType(0); j < common.NoneConflict; j++ {
173 | if p.stat.ConflictField[i][j].Total() != 0 {
174 | metricStat.FieldMetric[i.String()][j.String()] = p.stat.ConflictField[i][j].Json()
175 | if p.times == p.CompareCount {
176 | fmt.Fprintf(&buf, "FieldConflictAtLast|%s|%s|%v\n", i, j, p.stat.ConflictField[i][j])
177 | p.totalFieldConflict += p.stat.ConflictField[i][j].Total()
178 | } else {
179 | fmt.Fprintf(&buf, "FieldConflictInProcess|%s|%s|%v\n", i, j, p.stat.ConflictField[i][j])
180 | }
181 | }
182 | }
183 | }
184 |
185 | p.totalConflict = p.totalKeyConflict + p.totalFieldConflict
186 | if conf.Opts.MetricPrint {
187 | metricstr, _ := json.Marshal(metricStat)
188 | common.Logger.Info(string(metricstr))
189 | // fmt.Println(string(metricstr))
190 |
191 | if p.times == p.CompareCount && finished {
192 | metricStat.AllFinished = true
193 | metricStat.Process = int64(100)
194 | metricStat.TotalConflict = p.totalConflict
195 | metricStat.TotalKeyConflict = p.totalKeyConflict
196 | metricStat.TotalFieldConflict = p.totalFieldConflict
197 |
198 | metricstr, _ := json.Marshal(metricStat)
199 | common.Logger.Info(string(metricstr))
200 | // fmt.Println(string(metricstr))
201 | }
202 | } else {
203 | common.Logger.Infof("stat:\n%s", string(buf.Bytes()))
204 | }
205 | }
206 |
207 | func (p *FullCheck) IncrScanStat(a int) {
208 | p.stat.Scan.Inc(a)
209 | }
210 |
211 | func (p *FullCheck) Start() {
212 | var err error
213 |
214 | for i := 1; i <= p.CompareCount; i++ {
215 | // init sqlite db
216 | os.Remove(p.ResultDBFile + "." + strconv.Itoa(i))
217 | p.db[i], err = sql.Open("sqlite3", p.ResultDBFile+"."+strconv.Itoa(i))
218 | if err != nil {
219 | panic(common.Logger.Critical(err))
220 | }
221 | defer p.db[i].Close()
222 | }
223 |
224 | sourceClient, err := client.NewRedisClient(p.SourceHost, 0)
225 | if err != nil {
226 | panic(common.Logger.Errorf("create redis client with host[%v] db[%v] error[%v]",
227 | p.SourceHost, 0, err))
228 | }
229 |
230 | p.sourceLogicalDBMap, p.sourcePhysicalDBList, err = sourceClient.FetchBaseInfo(conf.Opts.SourceDBType == 1)
231 | if err != nil {
232 | panic(common.Logger.Critical(err))
233 | }
234 |
235 | common.Logger.Infof("sourceDbType=%v, p.sourcePhysicalDBList=%v", p.FullCheckParameter.SourceHost.DBType,
236 | p.sourcePhysicalDBList)
237 |
238 | sourceClient.Close()
239 | for db, keyNum := range p.sourceLogicalDBMap {
240 | if p.SourceHost.IsCluster() == true {
241 | common.Logger.Infof("db=%d:keys=%d(inaccurate for type cluster)", db, keyNum)
242 | } else {
243 | common.Logger.Infof("db=%d:keys=%d", db, keyNum)
244 | }
245 | }
246 |
247 | for p.times = 1; p.times <= p.CompareCount; p.times++ {
248 | p.CreateDbTable(p.times)
249 | if p.times != 1 {
250 | common.Logger.Infof("wait %d seconds before start", p.Interval)
251 | time.Sleep(time.Second * time.Duration(p.Interval))
252 | }
253 | common.Logger.Infof("---------------- start %dth time compare", p.times)
254 |
255 | for db := range p.sourceLogicalDBMap {
256 | p.currentDB = db
257 | p.stat.Reset(false)
258 | // init stat timer
259 | tickerStat := time.NewTicker(time.Second * common.StatRollFrequency)
260 | ctxStat, cancelStat := context.WithCancel(context.Background()) // 主动cancel
261 | go func(ctx context.Context) {
262 | defer func() {
263 | tickerStat.Stop()
264 | }()
265 |
266 | for range tickerStat.C {
267 | select { // 判断是否结束
268 | case <-ctx.Done():
269 | return
270 | default:
271 | }
272 | p.stat.Rotate()
273 | p.PrintStat(false)
274 | }
275 | }(ctxStat)
276 |
277 | common.Logger.Infof("start compare db %d", p.currentDB)
278 | keys := make(chan []*common.Key, 1024)
279 | conflictKey := make(chan *common.Key, 1024)
280 | var wg, wg2 sync.WaitGroup
281 | // start scan, get all keys
282 | if p.times == 1 {
283 | wg.Add(1)
284 | go func() {
285 | defer wg.Done()
286 | p.ScanFromSourceRedis(keys)
287 | }()
288 | } else {
289 | wg.Add(1)
290 | go func() {
291 | defer wg.Done()
292 | p.ScanFromDB(keys)
293 | }()
294 | }
295 |
296 | // start check
297 | wg.Add(p.Parallel)
298 | for i := 0; i < p.Parallel; i++ {
299 | go func() {
300 | defer wg.Done()
301 | p.VerifyAllKeyInfo(keys, conflictKey)
302 | }()
303 | }
304 |
305 | // start write conflictKey
306 | wg2.Add(1)
307 | go func() {
308 | defer wg2.Done()
309 | p.WriteConflictKey(conflictKey)
310 | }()
311 |
312 | wg.Wait()
313 | close(conflictKey)
314 | wg2.Wait()
315 | cancelStat() // stop stat goroutine
316 | p.PrintStat(true)
317 | } // for db, keyNum := range dbNums
318 |
319 | // do not reset when run the final time
320 | if p.times < p.CompareCount {
321 | p.stat.Reset(true)
322 | }
323 | } // end for
324 |
325 | p.stat.Reset(false)
326 | common.Logger.Infof("--------------- finished! ----------------\nall finish successfully, totally %d key(s) and %d field(s) conflict",
327 | p.stat.TotalConflictKeys, p.stat.TotalConflictFields)
328 | }
329 |
330 | func (p *FullCheck) GetCurrentResultTable() (key string, field string) {
331 | if p.times != p.CompareCount {
332 | return fmt.Sprintf("key_%d", p.times), fmt.Sprintf("field_%d", p.times)
333 | } else {
334 | return "key", "field"
335 | }
336 | }
337 |
338 | func (p *FullCheck) GetLastResultTable() (key string, field string) {
339 | return fmt.Sprintf("key_%d", p.times-1), fmt.Sprintf("field_%d", p.times-1)
340 | }
341 |
342 | func (p *FullCheck) CreateDbTable(times int) {
343 | /** create table **/
344 | conflictKeyTableName, conflictFieldTableName := p.GetCurrentResultTable()
345 |
346 | conflictKeyTableSql := fmt.Sprintf(`
347 | CREATE TABLE %s(
348 | id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
349 | key TEXT NOT NULL,
350 | type TEXT NOT NULL,
351 | conflict_type TEXT NOT NULL,
352 | db INTEGER NOT NULL,
353 | source_len INTEGER NOT NULL,
354 | target_len INTEGER NOT NULL
355 | );
356 | `, conflictKeyTableName)
357 | _, err := p.db[times].Exec(conflictKeyTableSql)
358 | if err != nil {
359 | panic(common.Logger.Errorf("exec sql %s failed: %s", conflictKeyTableSql, err))
360 | }
361 | conflictFieldTableSql := fmt.Sprintf(`
362 | CREATE TABLE %s(
363 | id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
364 | field TEXT NOT NULL,
365 | conflict_type TEXT NOT NULL,
366 | key_id INTEGER NOT NULL
367 | );
368 | `, conflictFieldTableName)
369 | _, err = p.db[times].Exec(conflictFieldTableSql)
370 | if err != nil {
371 | panic(common.Logger.Errorf("exec sql %s failed: %s", conflictFieldTableSql, err))
372 | }
373 |
374 | conflictResultSql := fmt.Sprintf(`
375 | CREATE TABLE IF NOT EXISTS %s(
376 | InstanceA TEXT NOT NULL,
377 | InstanceB TEXT NOT NULL,
378 | Key TEXT NOT NULL,
379 | Schema TEXT NOT NULL,
380 | InconsistentType TEXT NOT NULL,
381 | Extra TEXT NOT NULL
382 | );`, "FINAL_RESULT")
383 | _, err = p.db[times].Exec(conflictResultSql)
384 | if err != nil {
385 | panic(common.Logger.Errorf("exec sql %s failed: %s", conflictResultSql, err))
386 | }
387 | }
388 |
389 | func (p *FullCheck) VerifyAllKeyInfo(allKeys <-chan []*common.Key, conflictKey chan<- *common.Key) {
390 | sourceClient, err := client.NewRedisClient(p.SourceHost, p.currentDB)
391 | if err != nil {
392 | panic(common.Logger.Errorf("create redis client with host[%v] db[%v] error[%v]",
393 | p.SourceHost, p.currentDB, err))
394 | }
395 | defer sourceClient.Close()
396 |
397 | targetClient, err := client.NewRedisClient(p.TargetHost, p.currentDB)
398 | if err != nil {
399 | panic(common.Logger.Errorf("create redis client with host[%v] db[%v] error[%v]",
400 | p.TargetHost, p.currentDB, err))
401 | }
402 | defer targetClient.Close()
403 |
404 | // limit qps
405 | qos := common.StartQoS(conf.Opts.Qps)
406 | for keyInfo := range allKeys {
407 | <-qos.Bucket
408 | p.verifier.VerifyOneGroupKeyInfo(keyInfo, conflictKey, &sourceClient, &targetClient)
409 | } // for oneGroupKeys := range allKeys
410 |
411 | qos.Close()
412 | }
413 |
414 | func (p *FullCheck) WriteConflictKey(conflictKey <-chan *common.Key) {
415 | conflictKeyTableName, conflictFieldTableName := p.GetCurrentResultTable()
416 |
417 | var resultfile *os.File
418 | if len(conf.Opts.ResultFile) > 0 {
419 | resultfile, _ = os.OpenFile(conf.Opts.ResultFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
420 | defer resultfile.Close()
421 | }
422 |
423 | tx, _ := p.db[p.times].Begin()
424 | statInsertKey, err := tx.Prepare(fmt.Sprintf("insert into %s (key, type, conflict_type, db, source_len, target_len) values(?,?,?,?,?,?)", conflictKeyTableName))
425 | if err != nil {
426 | panic(common.Logger.Error(err))
427 | }
428 | statInsertField, err := tx.Prepare(fmt.Sprintf("insert into %s (field, conflict_type, key_id) values (?,?,?)", conflictFieldTableName))
429 | if err != nil {
430 | panic(common.Logger.Error(err))
431 | }
432 |
433 | count := 0
434 | for oneKeyInfo := range conflictKey {
435 | if count%1000 == 0 {
436 | var err error
437 | statInsertKey.Close()
438 | statInsertField.Close()
439 | e := tx.Commit()
440 | if e != nil {
441 | common.Logger.Error(e.Error())
442 | }
443 |
444 | tx, _ = p.db[p.times].Begin()
445 | statInsertKey, err = tx.Prepare(fmt.Sprintf("insert into %s (key, type, conflict_type, db, source_len, target_len) values(?,?,?,?,?,?)", conflictKeyTableName))
446 | if err != nil {
447 | panic(common.Logger.Error(err))
448 | }
449 |
450 | statInsertField, err = tx.Prepare(fmt.Sprintf("insert into %s (field, conflict_type, key_id) values (?,?,?)", conflictFieldTableName))
451 | if err != nil {
452 | panic(common.Logger.Error(err))
453 | }
454 | }
455 | count += 1
456 |
457 | result, err := statInsertKey.Exec(string(oneKeyInfo.Key), oneKeyInfo.Tp.Name, oneKeyInfo.ConflictType.String(), p.currentDB, oneKeyInfo.SourceAttr.ItemCount, oneKeyInfo.TargetAttr.ItemCount)
458 | if err != nil {
459 | panic(common.Logger.Error(err))
460 | }
461 | if len(oneKeyInfo.Field) != 0 {
462 | lastId, _ := result.LastInsertId()
463 | for i := 0; i < len(oneKeyInfo.Field); i++ {
464 | _, err = statInsertField.Exec(string(oneKeyInfo.Field[i].Field), oneKeyInfo.Field[i].ConflictType.String(), lastId)
465 | if err != nil {
466 | panic(common.Logger.Error(err))
467 | }
468 |
469 | if p.times == p.CompareCount {
470 | finalstat, err := tx.Prepare(fmt.Sprintf("insert into FINAL_RESULT (InstanceA, InstanceB, Key, Schema, InconsistentType, Extra) VALUES(?, ?, ?, ?, ?, ?)"))
471 | if err != nil {
472 | panic(common.Logger.Error(err))
473 | }
474 | // defer finalstat.Close()
475 | _, err = finalstat.Exec("", "", string(oneKeyInfo.Key), strconv.Itoa(int(p.currentDB)),
476 | oneKeyInfo.Field[i].ConflictType.String(),
477 | string(oneKeyInfo.Field[i].Field))
478 | if err != nil {
479 | panic(common.Logger.Error(err))
480 | }
481 |
482 | finalstat.Close()
483 |
484 | if len(conf.Opts.ResultFile) != 0 {
485 | resultfile.WriteString(fmt.Sprintf("%d\t%s\t%s\t%s\n", int(p.currentDB), oneKeyInfo.Field[i].ConflictType.String(), string(oneKeyInfo.Key), string(oneKeyInfo.Field[i].Field)))
486 | }
487 | }
488 | }
489 | } else {
490 | if p.times == p.CompareCount {
491 | finalstat, err := tx.Prepare(fmt.Sprintf("insert into FINAL_RESULT (InstanceA, InstanceB, Key, Schema, InconsistentType, Extra) VALUES(?, ?, ?, ?, ?, ?)"))
492 | if err != nil {
493 | panic(common.Logger.Error(err))
494 | }
495 | // defer finalstat.Close()
496 | _, err = finalstat.Exec("", "", string(oneKeyInfo.Key), strconv.Itoa(int(p.currentDB)), oneKeyInfo.ConflictType.String(), "")
497 | if err != nil {
498 | panic(common.Logger.Error(err))
499 | }
500 | finalstat.Close()
501 |
502 | if len(conf.Opts.ResultFile) != 0 {
503 | resultfile.WriteString(fmt.Sprintf("%d\t%s\t%s\t%s\n", int(p.currentDB), oneKeyInfo.ConflictType.String(), string(oneKeyInfo.Key), ""))
504 | }
505 | }
506 | }
507 | }
508 | statInsertKey.Close()
509 | statInsertField.Close()
510 | tx.Commit()
511 | }
512 |
--------------------------------------------------------------------------------
/src/full_check/full_check/full_check_test.go:
--------------------------------------------------------------------------------
1 | package full_check
2 |
3 | import (
4 | "database/sql"
5 | "fmt"
6 | "github.com/gomodule/redigo/redis"
7 | "github.com/stretchr/testify/assert"
8 | "github.com/stretchr/testify/suite"
9 | "os"
10 | "os/exec"
11 | "testing"
12 | )
13 |
14 | type RedisFullCheckTestSuite struct {
15 | suite.Suite
16 | }
17 |
18 | func (suite *RedisFullCheckTestSuite) SetupTest() {
19 | conn, err := redis.Dial("tcp", "127.0.0.1:6000")
20 | if err != nil {
21 | panic(err.Error())
22 | }
23 |
24 | conn.Do("FLUSHALL")
25 |
26 | // same value
27 | conn.Do("SET", "SameValue", "val")
28 | // lack key
29 | conn.Do("SET", "LackKeyA", "valA")
30 | // different length
31 | conn.Do("SET", "DiffLength", "valA")
32 | // same length & different value
33 | conn.Do("SET", "SameLength", "valA")
34 | // type
35 | conn.Do("SADD", "TypeError", "a", "b")
36 |
37 | // set same
38 | conn.Do("SADD", "SetSame", "a")
39 | // set different field
40 | conn.Do("SADD", "SetDiffField", "a", "b")
41 | // set different fields
42 | conn.Do("SADD", "SetDiffFields", "a", "b")
43 |
44 | // hash same
45 | conn.Do("HSET", "HashSame", "a", "b")
46 | // hash different field
47 | conn.Do("HSET", "HashDiffField", "a", "b")
48 | // hash different fields
49 | conn.Do("HSET", "HashDiffFields", "a", "b")
50 | conn.Do("HSET", "HashDiffFields", "b", "c")
51 |
52 | // zset same
53 | conn.Do("ZADD", "ZsetSame", "1", "a")
54 | // zset different field
55 | conn.Do("ZADD", "ZsetDiffField", "2", "a")
56 | // zset different fields
57 | conn.Do("ZADD", "ZsetDiffFields", "2", "a")
58 | conn.Do("ZADD", "ZsetDiffFields", "2", "b")
59 |
60 | // list same
61 | conn.Do("LPUSH", "ListSame", "a")
62 | // list different field
63 | conn.Do("LPUSH", "ListDiffField", "c")
64 | // list different fields
65 | conn.Do("LPUSH", "ListDiffFields", "a")
66 |
67 | conn.Close()
68 |
69 | conn, err = redis.Dial("tcp", "127.0.0.1:7000")
70 | if err != nil {
71 | panic(err.Error())
72 | }
73 |
74 | conn.Do("FLUSHALL")
75 |
76 | // same value
77 | conn.Do("SET", "SameValue", "val")
78 | // different length
79 | conn.Do("SET", "DiffLength", "valAA")
80 | // same length & different value
81 | conn.Do("SET", "SameLength", "valB")
82 | // lack key
83 | conn.Do("SET", "LackKeyB", "valB")
84 | // type
85 | conn.Do("HSET", "TypeError", "a", "b")
86 |
87 | // set same
88 | conn.Do("SADD", "SetSame", "a")
89 | // set different field
90 | conn.Do("SADD", "SetDiffField", "c", "b")
91 | // set different fields
92 | conn.Do("SADD", "SetDiffFields", "a")
93 |
94 | // hash same
95 | conn.Do("HSET", "HashSame", "a", "b")
96 | // hash different field
97 | conn.Do("HSET", "HashDiffField", "b", "d")
98 | // hash different fields
99 | conn.Do("HSET", "HashDiffFields", "a", "b")
100 |
101 | // zset same
102 | conn.Do("ZADD", "ZsetSame", "1", "a")
103 | // zset different field
104 | conn.Do("ZADD", "ZsetDiffField", "2", "b")
105 | // zset different fields
106 | conn.Do("ZADD", "ZsetDiffFields", "2", "a")
107 |
108 | // list same
109 | conn.Do("LPUSH", "ListSame", "a")
110 | // list different field
111 | conn.Do("LPUSH", "ListDiffField", "b")
112 | // list different fields
113 | conn.Do("LPUSH", "ListDiffFields", "3", "a")
114 |
115 | conn.Close()
116 |
117 | }
118 |
119 | func (suite *RedisFullCheckTestSuite) TestValueLength() {
120 | cmd := exec.Command("/bin/bash", "-c", "./full_check -s 127.0.0.1:6000 -p '' -t 127.0.0.1:7000 -a '' --comparetimes=3 --comparemode=2 --interval=1 --log FFFF")
121 | if err := cmd.Run(); err != nil {
122 | panic(err)
123 | }
124 |
125 | db, err := sql.Open("sqlite3", "result.db.3")
126 | if err != nil {
127 | panic(err)
128 | }
129 | defer db.Close()
130 |
131 | statem, err := db.Prepare(fmt.Sprintf("SELECT * FROM FINAL_RESULT WHERE Key=?"))
132 | if err != nil {
133 | panic(err)
134 | }
135 | defer statem.Close()
136 |
137 | suite.checkKey(statem, "LackKeyA", "lack_target", "", 1)
138 | suite.checkKey(statem, "DiffLength", "value", "", 1)
139 | suite.checkKey(statem, "SameLength", "value", "", 0)
140 | suite.checkKey(statem, "TypeError", "type", "", 1)
141 |
142 | suite.checkKey(statem, "HashDiffFields", "value", "", 1)
143 | suite.checkKey(statem, "HashSame", "", "", 0)
144 | suite.checkKey(statem, "HashDiffField", "", "", 0)
145 |
146 | suite.checkKey(statem, "ZsetDiffFields", "value", "", 1)
147 | suite.checkKey(statem, "ZsetSame", "", "", 0)
148 | suite.checkKey(statem, "ZsetDiffField", "", "", 0)
149 |
150 | suite.checkKey(statem, "SetDiffFields", "value", "", 1)
151 | suite.checkKey(statem, "SetSame", "", "", 0)
152 | suite.checkKey(statem, "SetDiffField", "", "", 0)
153 |
154 | suite.checkKey(statem, "ListDiffFields", "value", "", 1)
155 | suite.checkKey(statem, "ListSame", "", "", 0)
156 | suite.checkKey(statem, "ListDiffField", "", "", 0)
157 | }
158 |
159 | func (suite *RedisFullCheckTestSuite) TestFullValueCheck() {
160 | cmd := exec.Command("/bin/bash", "-c", "./full_check -s 127.0.0.1:6000 -p '' -t 127.0.0.1:7000 -a '' --comparetimes=3 --comparemode=1 --interval=1 --log FFFF")
161 | if err := cmd.Run(); err != nil {
162 | panic(err)
163 | }
164 |
165 | db, err := sql.Open("sqlite3", "result.db.3")
166 | if err != nil {
167 | panic(err)
168 | }
169 | defer db.Close()
170 |
171 | statem, err := db.Prepare(fmt.Sprintf("SELECT * FROM FINAL_RESULT WHERE Key=?"))
172 | if err != nil {
173 | panic(err)
174 | }
175 | defer statem.Close()
176 |
177 | suite.checkKey(statem, "LackKeyA", "lack_target", "", 1)
178 | suite.checkKey(statem, "DiffLength", "value", "", 1)
179 | suite.checkKey(statem, "SameLength", "value", "", 1)
180 | suite.checkKey(statem, "TypeError", "type", "", 1)
181 |
182 | suite.checkKey(statem, "HashDiffFields", "lack_target", "b", 1)
183 | suite.checkKey(statem, "HashSame", "", "", 0)
184 | suite.checkKey(statem, "HashDiffField", "lack_target", "a", 1)
185 |
186 | // ???
187 | suite.checkKey(statem, "ZsetDiffFields", "lack_target", "b", 1)
188 | suite.checkKey(statem, "ZsetSame", "", "", 0)
189 | suite.checkKey(statem, "ZsetDiffField", "lack_target", "a", 1)
190 |
191 | suite.checkKey(statem, "SetDiffFields", "lack_target", "b", 1)
192 | suite.checkKey(statem, "SetSame", "", "", 0)
193 | suite.checkKey(statem, "SetDiffField", "lack_target", "a", 1)
194 |
195 | // ???
196 | suite.checkKey(statem, "ListDiffFields", "lack_target", "", 0)
197 | suite.checkKey(statem, "ListSame", "", "", 0)
198 | suite.checkKey(statem, "ListDiffField", "value", "0", 1)
199 | }
200 |
201 | func (suite *RedisFullCheckTestSuite) TestKeyOutline() {
202 | cmd := exec.Command("/bin/bash", "-c", "./full_check -s 127.0.0.1:6000 -p '' -t 127.0.0.1:7000 -a '' --comparetimes=3 --comparemode=3 --interval=1 --log FFFF")
203 | if err := cmd.Run(); err != nil {
204 | panic(err)
205 | }
206 |
207 | db, err := sql.Open("sqlite3", "result.db.3")
208 | if err != nil {
209 | panic(err)
210 | }
211 | defer db.Close()
212 |
213 | statem, err := db.Prepare(fmt.Sprintf("SELECT * FROM FINAL_RESULT WHERE Key=?"))
214 | if err != nil {
215 | panic(err)
216 | }
217 | defer statem.Close()
218 |
219 | suite.checkKey(statem, "LackKeyA", "lack_target", "", 1)
220 | suite.checkKey(statem, "DiffLength", "value", "", 0)
221 | suite.checkKey(statem, "SameLength", "value", "", 0)
222 | suite.checkKey(statem, "TypeError", "type", "", 0)
223 |
224 | suite.checkKey(statem, "HashDiffFields", "value", "", 0)
225 | suite.checkKey(statem, "HashSame", "", "", 0)
226 | suite.checkKey(statem, "HashDiffField", "", "", 0)
227 |
228 | suite.checkKey(statem, "ZsetDiffFields", "value", "", 0)
229 | suite.checkKey(statem, "ZsetSame", "", "", 0)
230 | suite.checkKey(statem, "ZsetDiffField", "", "", 0)
231 |
232 | suite.checkKey(statem, "SetDiffFields", "value", "", 0)
233 | suite.checkKey(statem, "SetSame", "", "", 0)
234 | suite.checkKey(statem, "SetDiffField", "", "", 0)
235 |
236 | suite.checkKey(statem, "ListDiffFields", "value", "", 0)
237 | suite.checkKey(statem, "ListSame", "", "", 0)
238 | suite.checkKey(statem, "ListDiffField", "", "", 0)
239 | }
240 |
241 | func (suite *RedisFullCheckTestSuite) checkKey(statem *sql.Stmt, key string, inconsistentType string, field string, num int) {
242 | rows, err := statem.Query(key)
243 | if err != nil {
244 | panic(err)
245 | }
246 |
247 | count := 0
248 | for rows.Next() {
249 | var InstanceA, InstanceB, Key, Schema, InconsistentType, Extra string
250 | err := rows.Scan(&InstanceA, &InstanceB, &Key, &Schema, &InconsistentType, &Extra)
251 | if err != nil {
252 | panic(err)
253 | }
254 |
255 | assert.Equal(suite.T(), key, Key)
256 | assert.Equal(suite.T(), "0", Schema)
257 | assert.Equal(suite.T(), inconsistentType, InconsistentType)
258 | assert.Equal(suite.T(), field, Extra)
259 |
260 | count += 1
261 | break
262 | }
263 |
264 | assert.Equal(suite.T(), count, num)
265 | }
266 |
267 | func TestFullCheck(t *testing.T) {
268 | suite.Run(t, new(RedisFullCheckTestSuite))
269 | }
270 |
271 | func TestMain(m *testing.M) {
272 | os.Exit(m.Run())
273 | }
274 |
--------------------------------------------------------------------------------
/src/full_check/full_check/scan.go:
--------------------------------------------------------------------------------
1 | package full_check
2 |
3 | import (
4 | "strconv"
5 | "fmt"
6 |
7 | "full_check/common"
8 | "full_check/client"
9 |
10 | "github.com/jinzhu/copier"
11 | "sync"
12 | )
13 |
14 | func (p *FullCheck) ScanFromSourceRedis(allKeys chan<- []*common.Key) {
15 | var wg sync.WaitGroup
16 |
17 | wg.Add(len(p.sourcePhysicalDBList))
18 | for idx := 0; idx < len(p.sourcePhysicalDBList); idx++ {
19 | // use goroutine to run db concurrently
20 | go func(index int) {
21 | defer wg.Done()
22 | cursor := 0
23 | var sourceClient client.RedisClient
24 | var err error
25 |
26 | // build client
27 | if p.SourceHost.IsCluster() {
28 | var singleHost client.RedisHost
29 | copier.Copy(&singleHost, &p.SourceHost)
30 | // set single host address
31 | singleHost.Addr = []string{singleHost.Addr[index]}
32 | singleHost.DBType = common.TypeDB
33 | // build client by single db
34 | if sourceClient, err = client.NewRedisClient(singleHost, p.currentDB); err != nil {
35 | panic(common.Logger.Critical(err))
36 | }
37 | } else {
38 | sourceClient, err = client.NewRedisClient(p.SourceHost, p.currentDB)
39 | if err != nil {
40 | panic(common.Logger.Errorf("create redis client with host[%v] db[%v] error[%v]",
41 | p.SourceHost, p.currentDB, err))
42 | }
43 | }
44 | defer sourceClient.Close()
45 |
46 | common.Logger.Infof("build connection[%v]", sourceClient.String())
47 |
48 | for {
49 | var reply interface{}
50 | var err error
51 |
52 | switch p.SourceHost.DBType {
53 | case common.TypeDB:
54 | fallthrough
55 | case common.TypeCluster:
56 | reply, err = sourceClient.Do("scan", cursor, "count", p.BatchCount)
57 | case common.TypeAliyunProxy:
58 | reply, err = sourceClient.Do("iscan", index, cursor, "count", p.BatchCount)
59 | case common.TypeTencentProxy:
60 | reply, err = sourceClient.Do("scan", cursor, "count", p.BatchCount, p.sourcePhysicalDBList[index])
61 | }
62 | if err != nil {
63 | panic(common.Logger.Critical(err))
64 | }
65 |
66 | replyList, ok := reply.([]interface{})
67 | if ok == false || len(replyList) != 2 {
68 | panic(common.Logger.Criticalf("scan %d count %d failed, result: %+v", cursor, p.BatchCount, reply))
69 | }
70 |
71 | bytes, ok := replyList[0].([]byte)
72 | if ok == false {
73 | panic(common.Logger.Criticalf("scan %d count %d failed, result: %+v", cursor, p.BatchCount, reply))
74 | }
75 |
76 | cursor, err = strconv.Atoi(string(bytes))
77 | if err != nil {
78 | panic(common.Logger.Critical(err))
79 | }
80 |
81 | keylist, ok := replyList[1].([]interface{})
82 | if ok == false {
83 | panic(common.Logger.Criticalf("scan failed, result: %+v", reply))
84 | }
85 | keysInfo := make([]*common.Key, 0, len(keylist))
86 | for _, value := range keylist {
87 | bytes, ok = value.([]byte)
88 | if ok == false {
89 | panic(common.Logger.Criticalf("scan failed, result: %+v", reply))
90 | }
91 |
92 | // check filter list
93 | if common.CheckFilter(p.FilterTree, bytes) == false {
94 | continue
95 | }
96 |
97 | keysInfo = append(keysInfo, &common.Key{
98 | Key: bytes,
99 | Tp: common.EndKeyType,
100 | ConflictType: common.EndConflict,
101 | })
102 | // common.Logger.Debugf("read key: %v", string(bytes))
103 | }
104 | p.IncrScanStat(len(keysInfo))
105 | allKeys <- keysInfo
106 |
107 | if cursor == 0 {
108 | break
109 | }
110 | } // end for{}
111 | }(idx)
112 | } // end fo for idx := 0; idx < p.sourcePhysicalDBList; idx++
113 |
114 | wg.Wait()
115 | close(allKeys)
116 | }
117 |
118 | func (p *FullCheck) ScanFromDB(allKeys chan<- []*common.Key) {
119 | conflictKeyTableName, conflictFieldTableName := p.GetLastResultTable()
120 |
121 | keyQuery := fmt.Sprintf("select id,key,type,conflict_type,source_len,target_len from %s where id>? and db=%d limit %d",
122 | conflictKeyTableName, p.currentDB, p.BatchCount)
123 | keyStatm, err := p.db[p.times-1].Prepare(keyQuery)
124 | if err != nil {
125 | panic(common.Logger.Error(err))
126 | }
127 | defer keyStatm.Close()
128 |
129 | fieldQuery := fmt.Sprintf("select field,conflict_type from %s where key_id=?", conflictFieldTableName)
130 | fieldStatm, err := p.db[p.times-1].Prepare(fieldQuery)
131 | if err != nil {
132 | panic(common.Logger.Error(err))
133 | }
134 | defer fieldStatm.Close()
135 |
136 | var startId int64 = 0
137 | for {
138 | rows, err := keyStatm.Query(startId)
139 | if err != nil {
140 | panic(common.Logger.Error(err))
141 | }
142 | keyInfo := make([]*common.Key, 0, p.BatchCount)
143 | for rows.Next() {
144 | var key, keytype, conflictType string
145 | var id, source_len, target_len int64
146 | err = rows.Scan(&id, &key, &keytype, &conflictType, &source_len, &target_len)
147 | if err != nil {
148 | panic(common.Logger.Error(err))
149 | }
150 | oneKeyInfo := &common.Key{
151 | Key: []byte(key),
152 | Tp: common.NewKeyType(keytype),
153 | ConflictType: common.NewConflictType(conflictType),
154 | SourceAttr: common.Attribute{ItemCount: source_len},
155 | TargetAttr: common.Attribute{ItemCount: target_len},
156 | }
157 | if oneKeyInfo.Tp == common.EndKeyType {
158 | panic(common.Logger.Errorf("invalid type from table %s: key=%s type=%s ", conflictKeyTableName, key, keytype))
159 | }
160 | if oneKeyInfo.ConflictType == common.EndConflict {
161 | panic(common.Logger.Errorf("invalid conflict_type from table %s: key=%s conflict_type=%s ", conflictKeyTableName, key, conflictType))
162 | }
163 |
164 | if oneKeyInfo.Tp != common.StringKeyType {
165 | oneKeyInfo.Field = make([]common.Field, 0, 10)
166 | rowsField, err := fieldStatm.Query(id)
167 | if err != nil {
168 | panic(common.Logger.Error(err))
169 | }
170 | for rowsField.Next() {
171 | var field, conflictType string
172 | err = rowsField.Scan(&field, &conflictType)
173 | if err != nil {
174 | panic(common.Logger.Error(err))
175 | }
176 | oneField := common.Field{
177 | Field: []byte(field),
178 | ConflictType: common.NewConflictType(conflictType),
179 | }
180 | if oneField.ConflictType == common.EndConflict {
181 | panic(common.Logger.Errorf("invalid conflict_type from table %s: field=%s type=%s ", conflictFieldTableName, field, conflictType))
182 | }
183 | oneKeyInfo.Field = append(oneKeyInfo.Field, oneField)
184 | }
185 | if err := rowsField.Err(); err != nil {
186 | panic(common.Logger.Error(err))
187 | }
188 | rowsField.Close()
189 | }
190 | keyInfo = append(keyInfo, oneKeyInfo)
191 | if startId < id {
192 | startId = id
193 | }
194 | } // rows.Next
195 | if err := rows.Err(); err != nil {
196 | panic(common.Logger.Error(err))
197 | }
198 | rows.Close()
199 | // 结束
200 | if len(keyInfo) == 0 {
201 | close(allKeys)
202 | break
203 | }
204 | p.IncrScanStat(len(keyInfo))
205 | allKeys <- keyInfo
206 | } // for{}
207 | }
--------------------------------------------------------------------------------
/src/full_check/go.mod:
--------------------------------------------------------------------------------
1 | module full_check
2 |
3 | go 1.17
4 |
5 | require (
6 | github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575
7 | github.com/gomodule/redigo v1.8.9
8 | github.com/gugemichael/nimo4go v0.0.0-20210413043712-ccb2ff0d7b40
9 | github.com/jessevdk/go-flags v1.5.0
10 | github.com/jinzhu/copier v0.3.5
11 | github.com/mattn/go-sqlite3 v1.14.16
12 | github.com/najoast/redis-go-cluster v1.0.0
13 | github.com/stretchr/testify v1.8.1
14 | github.com/vinllen/redis-go-cluster v1.0.0
15 | )
16 |
17 | require (
18 | github.com/davecgh/go-spew v1.1.1 // indirect
19 | github.com/pmezard/go-difflib v1.0.0 // indirect
20 | golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 // indirect
21 | gopkg.in/yaml.v3 v3.0.1 // indirect
22 | )
23 |
--------------------------------------------------------------------------------
/src/full_check/go.sum:
--------------------------------------------------------------------------------
1 | github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs=
2 | github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo=
3 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
4 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
5 | github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws=
6 | github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
7 | github.com/gugemichael/nimo4go v0.0.0-20210413043712-ccb2ff0d7b40 h1:6TWAiHVyKs75ZHEn7XtVv7SO7M4rHwvY/5Tf7xdJBkc=
8 | github.com/gugemichael/nimo4go v0.0.0-20210413043712-ccb2ff0d7b40/go.mod h1:ibO7uKpO8fOH/bKD4trmwm5tHhHKiAjC0u288Rd+GnI=
9 | github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
10 | github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
11 | github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg=
12 | github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
13 | github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
14 | github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
15 | github.com/najoast/redis-go-cluster v1.0.0 h1:GJhtiwitgaQ0Kc9ZcRE9FJCcu1GLCIIW7u7vpRrgE6k=
16 | github.com/najoast/redis-go-cluster v1.0.0/go.mod h1:lGMMsVLZW+0gAuA+oo1YrFTZjjaIhkmhR6cA77/etiw=
17 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
18 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
19 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
20 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
21 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
22 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
23 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
24 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
25 | github.com/vinllen/redis-go-cluster v1.0.0/go.mod h1:xig5hQAOZX1K+KNUVDqAbhTRzMTPcb257nJl7OCHrI4=
26 | golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc=
27 | golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
28 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
29 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
30 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
31 |
--------------------------------------------------------------------------------
/src/full_check/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "strconv"
7 | "strings"
8 |
9 | "full_check/configure"
10 | "full_check/full_check"
11 | "full_check/checker"
12 | "full_check/client"
13 | "full_check/common"
14 |
15 | "github.com/jessevdk/go-flags"
16 | "github.com/gugemichael/nimo4go"
17 | )
18 |
19 | var VERSION = "$"
20 |
21 | func main() {
22 | // parse conf.Opts
23 | args, err := flags.Parse(&conf.Opts)
24 |
25 | if conf.Opts.Version {
26 | fmt.Println(VERSION)
27 | os.Exit(0)
28 | }
29 |
30 | // 若err != nil, 会自动打印错误到 stderr
31 | if err != nil {
32 | if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp {
33 | os.Exit(0)
34 | } else {
35 | fmt.Fprintf(os.Stderr, "flag err %s\n", flagsErr)
36 | os.Exit(1)
37 | }
38 | }
39 |
40 | if conf.Opts.SourceAddr == "" || conf.Opts.TargetAddr == "" {
41 | fmt.Fprintf(os.Stderr, "-s, --source or -t, --target not specified\n")
42 | os.Exit(1)
43 | }
44 |
45 | if len(args) != 0 {
46 | fmt.Fprintf(os.Stderr, "unexpected args %+v", args)
47 | os.Exit(1)
48 | }
49 |
50 | // init log
51 | logLevel, err := common.HandleLogLevel(conf.Opts.LogLevel)
52 | if err != nil {
53 | fmt.Fprintln(os.Stderr, err)
54 | os.Exit(1)
55 | }
56 |
57 | nimo.Profiling(int(conf.Opts.SystemProfile))
58 |
59 | common.Logger, err = common.InitLog(conf.Opts.LogFile, logLevel)
60 | if err != nil {
61 | fmt.Fprintln(os.Stderr, "init log failed: ", err)
62 | os.Exit(1)
63 | }
64 | common.Logger.Info("init log success")
65 | defer common.Logger.Flush()
66 |
67 | compareCount, err := strconv.Atoi(conf.Opts.CompareTimes)
68 | if err != nil || compareCount < 1 {
69 | panic(common.Logger.Errorf("invalid option cmpcount %s, expect int >=1", conf.Opts.CompareTimes))
70 | }
71 | if conf.Opts.Interval < 0 {
72 | panic(common.Logger.Errorf("invalid option interval %d, expect int >=0", conf.Opts.Interval))
73 | }
74 | batchCount, err := strconv.Atoi(conf.Opts.BatchCount)
75 | if err != nil || batchCount < 1 || batchCount > 10000 {
76 | panic(common.Logger.Errorf("invalid option batchcount %s, expect int 1<=batchcount<=10000", conf.Opts.BatchCount))
77 | }
78 | parallel := conf.Opts.Parallel
79 | if parallel < 1 || parallel > 100 {
80 | panic(common.Logger.Errorf("invalid option parallel %d, expect 1<=parallel<=100", conf.Opts.Parallel))
81 | }
82 | qps := conf.Opts.Qps
83 | if qps < 1 || qps > 5000000 {
84 | panic(common.Logger.Errorf("invalid option qps %d, expect 1<=qps<=5000000", conf.Opts.Qps))
85 | }
86 | if conf.Opts.SourceAuthType != "auth" && conf.Opts.SourceAuthType != "adminauth" {
87 | panic(common.Logger.Errorf("invalid sourceauthtype %s, expect auth/adminauth", conf.Opts.SourceAuthType))
88 | }
89 | if conf.Opts.TargetAuthType != "auth" && conf.Opts.TargetAuthType != "adminauth" {
90 | panic(common.Logger.Errorf("invalid targetauthtype %s, expect auth/adminauth", conf.Opts.TargetAuthType))
91 | }
92 | if conf.Opts.CompareMode < full_check.FullValue || conf.Opts.CompareMode > full_check.FullValueWithOutline {
93 | panic(common.Logger.Errorf("invalid compare mode %d", conf.Opts.CompareMode))
94 | }
95 | if conf.Opts.BigKeyThreshold < 0 {
96 | panic(common.Logger.Errorf("invalid big key threshold: %d", conf.Opts.BigKeyThreshold))
97 | } else if conf.Opts.BigKeyThreshold == 0 {
98 | common.BigKeyThreshold = 16384
99 | } else {
100 | common.BigKeyThreshold = conf.Opts.BigKeyThreshold
101 | }
102 |
103 | sourceAddressList, err := client.HandleAddress(conf.Opts.SourceAddr, conf.Opts.SourcePassword, conf.Opts.SourceAuthType)
104 | if err != nil {
105 | panic(common.Logger.Errorf("source address[%v] illegal[%v]", conf.Opts.SourceAddr, err))
106 | } else if len(sourceAddressList) > 1 && conf.Opts.SourceDBType != 1 {
107 | panic(common.Logger.Errorf("looks like the source is cluster? please set sourcedbtype"))
108 | } else if len(sourceAddressList) == 0 {
109 | panic(common.Logger.Errorf("input source address is empty"))
110 | }
111 |
112 | targetAddressList, err := client.HandleAddress(conf.Opts.TargetAddr, conf.Opts.TargetPassword, conf.Opts.TargetAuthType)
113 | if err != nil {
114 | panic(common.Logger.Errorf("target address[%v] illegal[%v]", conf.Opts.TargetAddr, err))
115 | } else if len(targetAddressList) > 1 && conf.Opts.TargetDBType != 1 {
116 | panic(common.Logger.Errorf("looks like the target is cluster? please set targetdbtype"))
117 | } else if len(targetAddressList) == 0 {
118 | panic(common.Logger.Errorf("input target address is empty"))
119 | }
120 |
121 | // filter list
122 | var filterTree *common.Trie
123 | if len(conf.Opts.FilterList) != 0 {
124 | filterTree = common.NewTrie()
125 | filterList := strings.Split(conf.Opts.FilterList, "|")
126 | for _, filter := range filterList {
127 | if filter == "" {
128 | panic(common.Logger.Errorf("invalid input filter list: %v", filterList))
129 | }
130 | filterTree.Insert([]byte(filter))
131 | }
132 | common.Logger.Infof("filter list enabled: %v", filterList)
133 | }
134 |
135 | // remove result file if has
136 | if len(conf.Opts.ResultFile) > 0 {
137 | os.Remove(conf.Opts.ResultFile)
138 | }
139 |
140 | fullCheckParameter := checker.FullCheckParameter{
141 | SourceHost: client.RedisHost{
142 | Addr: sourceAddressList,
143 | Password: conf.Opts.SourcePassword,
144 | TimeoutMs: 0,
145 | Role: "source",
146 | Authtype: conf.Opts.SourceAuthType,
147 | DBType: conf.Opts.SourceDBType,
148 | DBFilterList: common.FilterDBList(conf.Opts.SourceDBFilterList),
149 | },
150 | TargetHost: client.RedisHost{
151 | Addr: targetAddressList,
152 | Password: conf.Opts.TargetPassword,
153 | TimeoutMs: 0,
154 | Role: "target",
155 | Authtype: conf.Opts.TargetAuthType,
156 | DBType: conf.Opts.TargetDBType,
157 | DBFilterList: common.FilterDBList(conf.Opts.TargetDBFilterList),
158 | },
159 | ResultDBFile: conf.Opts.ResultDBFile,
160 | CompareCount: compareCount,
161 | Interval: conf.Opts.Interval,
162 | BatchCount: batchCount,
163 | Parallel: parallel,
164 | FilterTree: filterTree,
165 | }
166 |
167 | common.Logger.Info("configuration: ", conf.Opts)
168 | common.Logger.Info("---------")
169 |
170 | fullCheck := full_check.NewFullCheck(fullCheckParameter, full_check.CheckType(conf.Opts.CompareMode))
171 | fullCheck.Start()
172 | }
173 |
--------------------------------------------------------------------------------
/src/full_check/metric/counter.go:
--------------------------------------------------------------------------------
1 | package metric
2 |
3 | import (
4 | "fmt"
5 | "sync/atomic"
6 | "full_check/common"
7 | )
8 |
9 | type CounterStat struct {
10 | Total int64 `json:"total"`
11 | Speed int64 `json:"speed"`
12 | }
13 |
14 | type AtomicSpeedCounter struct {
15 | total int64
16 | intervalSum int64
17 | lastSpeed int64
18 | }
19 |
20 | func (p *AtomicSpeedCounter) Inc(i int) {
21 | atomic.AddInt64(&p.total, int64(i))
22 | atomic.AddInt64(&p.intervalSum, int64(i))
23 | }
24 |
25 | // return previous intervalSum
26 | func (p *AtomicSpeedCounter) Rotate() int64 {
27 | old := atomic.SwapInt64(&p.intervalSum, 0)
28 | p.lastSpeed = (old + common.StatRollFrequency - 1) / common.StatRollFrequency
29 |
30 | return old
31 | }
32 |
33 | func (p *AtomicSpeedCounter) Reset() {
34 | p.total = 0
35 | p.intervalSum = 0
36 | p.lastSpeed = 0
37 | }
38 |
39 | func (p *AtomicSpeedCounter) Total() int64 {
40 | return p.total
41 | }
42 |
43 | func (p *AtomicSpeedCounter) Speed() int64 {
44 | return p.lastSpeed
45 | }
46 |
47 | func (p *AtomicSpeedCounter) String() string {
48 | return fmt.Sprintf("total:%d,speed:%d", p.total, p.lastSpeed)
49 | }
50 |
51 | func (p *AtomicSpeedCounter) Json() *CounterStat {
52 | return &CounterStat{Total: p.total, Speed: p.lastSpeed}
53 | }
54 |
--------------------------------------------------------------------------------
/src/full_check/metric/metric.go:
--------------------------------------------------------------------------------
1 | package metric
2 |
3 | type Metric struct {
4 | DateTime string `json:"datetime"`
5 | Timestamp int64 `json:"timestamp"`
6 | CompareTimes int `json:"comparetimes"`
7 | Id string `json:"id"`
8 | JobId string `json:"jobid"`
9 | TaskId string `json:"taskid"`
10 | Db int32 `json:"db"`
11 | DbKeys int64 `json:"dbkeys"`
12 | Process int64 `json:"process"`
13 | OneCompareFinished bool `json:"has_finished"`
14 | AllFinished bool `json:"all_finished"`
15 | KeyScan *CounterStat `json:"key_scan"`
16 | TotalConflict int64 `json:"total_conflict"`
17 | TotalKeyConflict int64 `json:"total_key_conflict"`
18 | TotalFieldConflict int64 `json:"total_field_conflict"`
19 | KeyMetric map[string]map[string]*CounterStat `json:"key_stat"`
20 | FieldMetric map[string]map[string]*CounterStat `json:"field_stat"`
21 | }
22 |
23 | type MetricItem struct {
24 | Type string `json:"type"`
25 | Conflict string `json:"conflict"`
26 | Stat *CounterStat `json:"stat"`
27 | }
--------------------------------------------------------------------------------
/src/full_check/metric/stat.go:
--------------------------------------------------------------------------------
1 | package metric
2 |
3 | import (
4 | "full_check/common"
5 | )
6 |
7 | type Stat struct {
8 | Scan AtomicSpeedCounter
9 | ConflictField [common.EndKeyTypeIndex][common.EndConflict]AtomicSpeedCounter
10 | ConflictKey [common.EndKeyTypeIndex][common.EndConflict]AtomicSpeedCounter
11 |
12 | TotalConflictFields int64
13 | TotalConflictKeys int64
14 | }
15 |
16 | func (p *Stat) Rotate() {
17 | p.Scan.Rotate()
18 | for keyType := common.KeyTypeIndex(0); keyType < common.EndKeyTypeIndex; keyType++ {
19 | for conType := common.ConflictType(0); conType < common.EndConflict; conType++ {
20 | p.ConflictField[keyType][conType].Rotate()
21 | p.ConflictKey[keyType][conType].Rotate()
22 | }
23 | }
24 | }
25 |
26 | func (p *Stat) Reset(clear bool) {
27 | p.Scan.Reset()
28 | if clear {
29 | p.TotalConflictFields = 0
30 | p.TotalConflictKeys = 0
31 | return
32 | }
33 | for keyType := common.KeyTypeIndex(0); keyType < common.EndKeyTypeIndex; keyType++ {
34 | for conType := common.ConflictType(0); conType < common.EndConflict; conType++ {
35 | if conType < common.NoneConflict {
36 | keyConflict := p.ConflictKey[keyType][conType].Total()
37 | fieldConflict := p.ConflictField[keyType][conType].Total()
38 | if keyConflict != 0 {
39 | p.TotalConflictKeys += keyConflict
40 | common.Logger.Debugf("key conflict: keyType[%v] conType[%v]", keyType, conType)
41 | }
42 | if fieldConflict != 0 {
43 | p.TotalConflictFields += fieldConflict
44 | common.Logger.Debugf("field conflict: keyType[%v] conType[%v]", keyType, conType)
45 | }
46 | }
47 |
48 | p.ConflictField[keyType][conType].Reset()
49 | p.ConflictKey[keyType][conType].Reset()
50 | }
51 | }
52 | }
--------------------------------------------------------------------------------