├── LICENSE ├── README.md ├── bin └── migrate ├── log └── ms.log ├── migrate └── src └── migrate.go /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mongo-migrate 工具使用介绍 2 | 3 | --- 4 | 5 | ## 功能 6 | 将一个集群,复制集或者单机 mongoDB 中的一个集合的数据,迁移到另一个集群,复制集或者单机 mongoDB. 7 | 如果源集合使用了分片,会在目标自动分片,并按照源集合的 chunk 进行 split 与随机 movechunk. 8 | 如果源集合存在索引,会拷贝索引. 9 | 如果源使用了复制集,会使用 oplog 做增量迁移. 10 | 11 | ## 参数 12 | --src "ip:host" 源地址 13 | --srcDB "db name" 要迁移的数据库名称 14 | --srcColl "collection name" 要迁移的集合名称 15 | --srcUserName "user name" 源 auth 用户名 16 | --srcPassWord "password" 源 auth 密码 17 | --dest "ip:host" 目标地址 18 | --destDB "db name" 目标数据库名称 19 | --destColl "collection name" 目标集合名称 20 | --destUserName "user name" 目标 auth 用户名 21 | --destPassWord "password" 目标 auth 密码 22 | --findAndInsertWorkerNum 10 拷贝数据时并发读写goroutine 数目 23 | 24 | --writeAck 1 "same with w param in other driver" 25 | --writeMode "Majority" "see mongodb doc if needed" 26 | --journal true "whether use journal" 27 | --fsync false "whether wait for fsync for each write" 28 | 29 | ## 已知 bug 30 | 1. 拷贝索引时,文本索引不能正常建立 31 | 32 | ## 范围迁移使用 33 | 范围迁移为应对数据量较多时的方法,使用有许多限制,相关参数如下: 34 | --minKey 范围最小值(包含) 35 | --maxKey 范围最大值(不包含) 36 | --keyType "int or string" key 的类型,只支持数字与字符串 37 | 这里的范围是进行 shardCollection 时第一个 key 的范围 38 | 范围迁移的一些行为: 39 | 40 | * 如果目标 ns 已经分片,则不进行分片 **以及** 预分配操作. 41 | * 如果指定了范围为 int 类型的0 - 100 ,则所有非 int 型的数据不能被迁移. 42 | * int 型包含 int64与 float64,对应于在 mongoDB 中直接写入{ key:1 } 与 {key:NumberLong:1} 43 | 44 | 额外参数,-- withOutKeyType string 45 | 一般用来配合范围迁移使用,使用场景: 46 | 有集合, shardkey 的第一个 key 99% 都为数字,先通过范围迁移: 0-100,100-200...将99% 数据迁移. 47 | 再使用非范围迁移,并指定 --withOutKeyType "int" 将剩余部分数据迁移. 48 | -------------------------------------------------------------------------------- /bin/migrate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xbsura/mongo-migrate/51596e2cfce55d438401c0fac1626f4d6447a748/bin/migrate -------------------------------------------------------------------------------- /log/ms.log: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xbsura/mongo-migrate/51596e2cfce55d438401c0fac1626f4d6447a748/log/ms.log -------------------------------------------------------------------------------- /migrate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xbsura/mongo-migrate/51596e2cfce55d438401c0fac1626f4d6447a748/migrate -------------------------------------------------------------------------------- /src/migrate.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "gopkg.in/mgo.v2" 6 | "gopkg.in/mgo.v2/bson" 7 | "io" 8 | "log" 9 | "math/rand" 10 | "os" 11 | "os/signal" 12 | "reflect" 13 | "strconv" 14 | "strings" 15 | "sync" 16 | "time" 17 | ) 18 | 19 | //generate mongodb uri , use ip,port,username and password 20 | func GenMongoDBUri(addr, userName, passWord string) string { 21 | var mongoDBUri string 22 | if userName == "" || passWord == "" { 23 | mongoDBUri = "mongodb://" + addr 24 | } else { 25 | mongoDBUri = "mongodb://" + userName + ":" + passWord + "@" + addr 26 | } 27 | return mongoDBUri 28 | } 29 | 30 | var DoShardCollection bool // if true,do shard and pre split,movechunk in dest mongos 31 | var DoOplogSync bool // if true,apply oplog during copy data 32 | var connTimeOut time.Duration 33 | 34 | var chunkQueueLock sync.Mutex // channel does not have timeout or getNoWait,use this lock,check whether empty first before get. 35 | 36 | var logFile *os.File 37 | var logger *log.Logger 38 | var oplogSyncTs map[string]bson.MongoTimestamp // while syncing oplog,each replset syncing ts 39 | 40 | type InitCollection struct { 41 | src string 42 | dest string 43 | srcDB string 44 | srcColl string 45 | srcUserName string 46 | srcPassWord string 47 | destDB string 48 | destColl string 49 | destUserName string 50 | destPassWord string 51 | writeAck int 52 | writeMode string 53 | journal bool 54 | fsync bool 55 | srcClient *mgo.Session 56 | destClient *mgo.Session 57 | srcDBConn *mgo.Database 58 | srcCollConn *mgo.Collection 59 | destDBConn *mgo.Database 60 | destCollConn *mgo.Collection 61 | srcIsMongos bool 62 | srcIsSharded bool 63 | destIsMongos bool 64 | srcOplogNodes map[string]string 65 | srcShardKey bson.D 66 | srcChunks []bson.M 67 | srcBalancerStopped bool 68 | destBalancerStopped bool 69 | minKey string 70 | maxKey string 71 | keyType string 72 | queryKey string 73 | withOutKeyType string 74 | skipPreAlloc bool 75 | isTokumx bool 76 | destShards []string 77 | } 78 | 79 | func NewInitCollection(src, dest, srcDB, srcColl, srcUserName, srcPassWord, destDB, destColl, destUserName, destPassWord string, writeAck int, writeMode string, fsync, journal bool, minKey, maxKey, keyType, withOutKeyType string,isTokumx bool) *InitCollection { 80 | initColl := &InitCollection{src, dest, srcDB, srcColl, srcUserName, srcPassWord, destDB, destColl, destUserName, destPassWord, writeAck, writeMode, journal, fsync, nil, nil, nil, nil, nil, nil, false, false, false, nil, nil, nil, false, false, minKey, maxKey, keyType, "", withOutKeyType, false,isTokumx,nil} 81 | initColl.srcOplogNodes = make(map[string]string) 82 | return initColl 83 | } 84 | func (initColl *InitCollection) documentLegal(result bson.M, resType string) bool { 85 | var isValueString, isValueFloat64 bool 86 | var valueString string 87 | var valueFloat64 float64 88 | 89 | oResult := result 90 | 91 | if resType == "oplog" { 92 | oResult, _ = result["o"].(bson.M) 93 | } 94 | 95 | if initColl.withOutKeyType != "" { 96 | queryKeyValue, isQueryKeyExist := oResult[initColl.queryKey] 97 | if !isQueryKeyExist { 98 | return true 99 | } else { 100 | queryKeyValueType := reflect.TypeOf(queryKeyValue).String() 101 | if queryKeyValueType == "int64" || queryKeyValueType == "float64" { 102 | queryKeyValueType = "int" 103 | } 104 | if strings.Contains(initColl.withOutKeyType, queryKeyValueType) { 105 | return false 106 | } 107 | 108 | } 109 | } 110 | 111 | valueString, isValueString = oResult[initColl.queryKey].(string) 112 | if valueInt64, isValueInt64 := oResult[initColl.queryKey].(int64); isValueInt64 { 113 | valueFloat64 = float64(valueInt64) 114 | isValueFloat64 = true 115 | } 116 | if !isValueFloat64 { 117 | valueFloat64, isValueFloat64 = oResult[initColl.queryKey].(float64) 118 | } 119 | 120 | if initColl.keyType == "string" { 121 | 122 | if !isValueString { 123 | return false 124 | } 125 | 126 | if initColl.minKey != "" { 127 | if valueString < initColl.minKey { 128 | return false 129 | } 130 | } 131 | 132 | if initColl.maxKey != "" { 133 | if valueString >= initColl.maxKey { 134 | return false 135 | } 136 | } 137 | 138 | } 139 | 140 | if initColl.keyType == "int" { 141 | if !isValueFloat64 { 142 | return false 143 | } 144 | 145 | if initColl.minKey != "" { 146 | intMinKey, _ := strconv.Atoi(initColl.minKey) 147 | if valueFloat64 < float64(intMinKey) { 148 | return false 149 | } 150 | } 151 | 152 | if initColl.maxKey != "" { 153 | intMaxKey, _ := strconv.Atoi(initColl.maxKey) 154 | if valueFloat64 >= float64(intMaxKey) { 155 | return false 156 | } 157 | } 158 | 159 | } 160 | 161 | return true 162 | } 163 | func (initColl *InitCollection) InitConn() { 164 | srcMongoUri := GenMongoDBUri(initColl.src, initColl.srcUserName, initColl.srcPassWord) 165 | destMongoUri := GenMongoDBUri(initColl.dest, initColl.destUserName, initColl.destPassWord) 166 | 167 | var err error 168 | 169 | initColl.srcClient, err = mgo.DialWithTimeout(srcMongoUri, connTimeOut) 170 | if err != nil { 171 | logger.Panicln("connect src failed.") 172 | } else { 173 | logger.Println("connect src success.") 174 | } 175 | 176 | initColl.destClient, err = mgo.DialWithTimeout(destMongoUri, connTimeOut) 177 | if err != nil { 178 | logger.Panicln("connect dest failed.") 179 | } else { 180 | logger.Println("connect src success.") 181 | } 182 | 183 | initColl.srcDBConn = initColl.srcClient.DB(initColl.srcDB) 184 | initColl.srcCollConn = initColl.srcDBConn.C(initColl.srcColl) 185 | 186 | initColl.destDBConn = initColl.destClient.DB(initColl.destDB) 187 | initColl.destCollConn = initColl.destDBConn.C(initColl.destColl) 188 | } 189 | 190 | func (initColl *InitCollection) GetSrcDestType() { 191 | command := bson.M{"isMaster": 1} 192 | result := bson.M{} 193 | initColl.srcDBConn.Run(command, &result) 194 | if result["msg"] == "isdbgrid" { 195 | initColl.srcIsMongos = true 196 | logger.Println("src is mongos") 197 | } else { 198 | logger.Println("src is not mongos,may be mongod.") 199 | } 200 | initColl.destDBConn.Run(command, &result) 201 | if result["msg"] == "isdbgrid" { 202 | initColl.destIsMongos = true 203 | logger.Println("dest is mongos") 204 | destShardsIter := initColl.destClient.DB("config").C("shards").Find(bson.M{}).Iter() 205 | for destShardsIter.Next(&result) { 206 | if destShardId,ok := result["_id"].(string);ok { 207 | initColl.destShards = append(initColl.destShards,destShardId) 208 | } 209 | } 210 | 211 | logger.Println("dest shards:",initColl.destShards) 212 | 213 | 214 | } else { 215 | logger.Println("dest is not mongos,may be mongod.") 216 | } 217 | } 218 | 219 | //only src and dest are ALL mongos AND src ns sharded,then do shard on dest ns 220 | func (initColl *InitCollection) ShouldDoShardCollection() { 221 | if initColl.srcIsMongos && initColl.destIsMongos { 222 | command := bson.M{"collStats": initColl.srcColl} 223 | result := bson.M{} 224 | initColl.srcDBConn.Run(command, &result) 225 | srcIsSharded, _ := result["sharded"] 226 | if srcIsSharded == true { 227 | DoShardCollection = true 228 | initColl.srcIsSharded = true 229 | query := bson.M{"_id": initColl.srcDB + "." + initColl.srcColl} 230 | var result bson.D 231 | initColl.srcClient.DB("config").C("collections").Find(query).One(&result) 232 | for _, doc := range result { 233 | if doc.Name == "key" { 234 | if key, ok := doc.Value.(bson.D); ok { 235 | initColl.srcShardKey = key 236 | logger.Println("dest ns sharded,and shardkey is", initColl.srcShardKey) 237 | break 238 | } 239 | } 240 | } 241 | } else { 242 | logger.Println("dest ns not do shard as : src ns not sharded.") 243 | } 244 | } else { 245 | DoShardCollection = false 246 | logger.Println("dest ns not do shard as : src or dest is not mongos.") 247 | } 248 | } 249 | 250 | // select a node which has no slaveDelay and better to be secondary if possible 251 | func (initColl *InitCollection) SelectOplogSyncNode(nodes string) string { 252 | var selectHost string 253 | var hosts string 254 | if strings.Contains(nodes, "/") { 255 | hosts = strings.Split(nodes, "/")[1] 256 | } else { 257 | hosts = nodes 258 | } 259 | 260 | var host string 261 | if strings.Contains(hosts, ",") { 262 | host = strings.Split(hosts, ",")[1] 263 | } else { 264 | host = hosts 265 | } 266 | 267 | mongoUri := GenMongoDBUri(host, initColl.srcUserName, initColl.srcPassWord) 268 | 269 | mongoClient, _ := mgo.Dial(mongoUri) 270 | 271 | var replConf, replStatus bson.M 272 | command := bson.M{"replSetGetStatus": 1} 273 | 274 | replConfMap := make(map[interface{}]bson.M) 275 | replStatusMap := make(map[interface{}]interface{}) 276 | 277 | mongoClient.DB("local").C("system.replset").Find(bson.M{}).One(&replConf) 278 | mongoClient.DB("admin").Run(command, &replStatus) 279 | 280 | if confMembers, isConfMembersLegal := replConf["members"].([]interface{}); isConfMembersLegal { 281 | for _, confMember := range confMembers { 282 | if bsonConfMember, isBsonConfMember := confMember.(bson.M); isBsonConfMember { 283 | hostAndDelay := bson.M{"host": bsonConfMember["host"], "slaveDelay": bsonConfMember["slaveDelay"]} 284 | replConfMap[bsonConfMember["_id"]] = hostAndDelay 285 | } 286 | } 287 | } 288 | 289 | if statusMembers, isStatusMembersLegal := replStatus["members"].([]interface{}); isStatusMembersLegal { 290 | for _, statusMember := range statusMembers { 291 | if bsonStatusMember, isBsonStatusMember := statusMember.(bson.M); isBsonStatusMember { 292 | replStatusMap[bsonStatusMember["_id"]] = bsonStatusMember["state"] 293 | } 294 | } 295 | } 296 | 297 | logger.Println("replStatus:", replStatusMap) 298 | 299 | logger.Println("replConf:", replConfMap) 300 | 301 | for id, state := range replStatusMap { 302 | if state == 1 || state == 2 { 303 | if replConfMap[id]["slaveDelay"] == 0 { 304 | if host, ok := replConfMap[id]["host"].(string); ok { 305 | selectHost = host 306 | } 307 | } 308 | if state == 2 { 309 | break 310 | } 311 | } 312 | } 313 | 314 | logger.Println("oplog sync node selected:", selectHost) 315 | 316 | mongoClient.Close() 317 | 318 | return selectHost 319 | 320 | } 321 | 322 | // if one replset available to src ns,do oplog sync 323 | func (initColl *InitCollection) ShouldDoOplogSync() { 324 | var query bson.M 325 | var result bson.M 326 | if initColl.srcIsMongos { 327 | if initColl.srcIsSharded { 328 | query = bson.M{} 329 | } else { 330 | databaseDB := initColl.srcClient.DB("config").C("databases") 331 | databaseDB.Find(bson.M{"_id": initColl.srcDB}).One(&result) 332 | primaryShard := result["primary"] 333 | query = bson.M{"_id": primaryShard} 334 | } 335 | logger.Println("src ns do oplogsync chenck for query", query) 336 | shardsColl := initColl.srcClient.DB("config").C("shards") 337 | shards := shardsColl.Find(query).Iter() 338 | var valueId, valueHost string 339 | var ok bool 340 | for shards.Next(&result) { 341 | if valueHost, ok = result["host"].(string); ok { 342 | if strings.Contains(valueHost, "/") { 343 | DoOplogSync = true 344 | } 345 | if valueId, ok = result["_id"].(string); ok { 346 | initColl.srcOplogNodes[valueId] = initColl.SelectOplogSyncNode(valueHost) 347 | } 348 | } 349 | } 350 | } else { 351 | count, _ := initColl.srcClient.DB("local").C("system.replset").Find(bson.M{}).Count() 352 | if count != 0 { 353 | DoOplogSync = true 354 | initColl.srcOplogNodes["shard"] = initColl.SelectOplogSyncNode(initColl.src) 355 | } else { 356 | DoOplogSync = false 357 | } 358 | } 359 | 360 | logger.Println("src oplog nodes:", initColl.srcOplogNodes) 361 | 362 | if DoOplogSync { 363 | logger.Println("do oplog sync as at least one replset deployed.") 364 | } else { 365 | logger.Println("will not do oplog sync as no replset found.") 366 | } 367 | 368 | } 369 | 370 | // when moveing chunk,select a random shard name 371 | func (initColl *InitCollection) GetRandomShard() string { 372 | var randomShardId string 373 | destShardsNum := len(initColl.destShards) 374 | randomNum := rand.Intn(destShardsNum) 375 | randomShardId = initColl.destShards[randomNum] 376 | return randomShardId 377 | } 378 | 379 | func (initColl *InitCollection) SetStepSign() { 380 | initColl.GetSrcDestType() 381 | initColl.ShouldDoShardCollection() 382 | initColl.ShouldDoOplogSync() 383 | } 384 | 385 | // do shard on dest ns 386 | func (initColl *InitCollection) ShardDestCollection() { 387 | destNs := initColl.destDB + "." + initColl.destColl 388 | logger.Println("start sharding dest collection") 389 | var result, command bson.D 390 | command = bson.D{{"enableSharding", initColl.destDB}} 391 | initColl.destClient.DB("admin").Run(command, &result) 392 | command = bson.D{{"shardCollection", destNs}, {"key", initColl.srcShardKey}} 393 | err := initColl.destClient.DB("admin").Run(command, &result) 394 | if err != nil { 395 | if strings.Contains(err.Error(), "already") { 396 | logger.Println("dest ns already sharded,skip pre alloc...") 397 | initColl.skipPreAlloc = true 398 | } else { 399 | logger.Panicln("shard dest collection fail:", err) 400 | } 401 | } 402 | } 403 | 404 | // pre split and move chunks 405 | func (initColl *InitCollection) PreAllocChunks() { 406 | logger.Println("start pre split and move chunks") 407 | rand.Seed(time.Now().UnixNano()) 408 | var result, chunk bson.M 409 | var command bson.D 410 | var randomShard string 411 | var chunkMin bson.M 412 | var isChunkLegal bool 413 | var err error 414 | destNs := initColl.destDB + "." + initColl.destColl 415 | srcNs := initColl.srcDB + "." + initColl.srcColl 416 | query := bson.M{"ns": srcNs} 417 | logger.Println("chunks query:", query) 418 | chunksColl := initColl.srcClient.DB("config").C("chunks") 419 | srcChunksIter := chunksColl.Find(query).Iter() 420 | var chunkSimple bson.M 421 | if initColl.withOutKeyType == "" { 422 | for srcChunksIter.Next(&chunk) { 423 | if !initColl.chunkRangeOk(chunk) { 424 | continue 425 | } 426 | if chunkMin, isChunkLegal = chunk["min"].(bson.M); isChunkLegal { 427 | if !initColl.skipPreAlloc { 428 | command = bson.D{{"split", destNs}, {"middle", chunkMin}} 429 | err = initColl.destClient.DB("admin").Run(command, &result) 430 | if err != nil { 431 | logger.Println("command is:",command) 432 | logger.Println("split chunk fail,err is : ", err) 433 | } else { 434 | logger.Println("split chunk success") 435 | } 436 | randomShard = initColl.GetRandomShard() 437 | command = bson.D{{"moveChunk", destNs}, {"find", chunkMin}, {"to", randomShard}} 438 | err = initColl.destClient.DB("admin").Run(command, &result) 439 | if err != nil { 440 | logger.Println("move chunk to ", randomShard, " fail,err is : ", err) 441 | } else { 442 | logger.Println("move chunk to ", randomShard, "success") 443 | } 444 | } else { 445 | logger.Println("dest ns sharded,skip pre alloc...") 446 | } 447 | } 448 | chunkSimple = bson.M{"min": chunk["min"], "max": chunk["max"]} 449 | initColl.srcChunks = append(initColl.srcChunks, chunkSimple) 450 | } 451 | logger.Println("pre split and move chunks finished.") 452 | } else { 453 | var srcChunksSingle []bson.M 454 | chunkSimple = bson.M{} 455 | srcChunksSingle = append(srcChunksSingle, chunkSimple) 456 | logger.Println("srcChunksSingle:", srcChunksSingle) 457 | initColl.srcChunks = srcChunksSingle 458 | } 459 | } 460 | 461 | func (initColl *InitCollection) StopBalancer() { 462 | query := bson.M{"_id": "balancer"} 463 | updateDocument := bson.M{"$set": bson.M{"stopped": true}} 464 | var result bson.M 465 | var srcBalancerStopped, destBalancerStopped, ok bool 466 | if initColl.srcIsMongos { 467 | initColl.srcClient.DB("config").C("settings").Find(query).One(&result) 468 | if srcBalancerStopped, ok = result["stopped"].(bool); ok { 469 | initColl.srcBalancerStopped = srcBalancerStopped 470 | } else { 471 | initColl.srcBalancerStopped = false 472 | } 473 | 474 | } 475 | if initColl.destIsMongos { 476 | initColl.destClient.DB("config").C("settings").Find(query).One(&result) 477 | if destBalancerStopped, ok = result["stopped"].(bool); ok { 478 | initColl.destBalancerStopped = destBalancerStopped 479 | } else { 480 | initColl.destBalancerStopped = false 481 | } 482 | } 483 | logger.Println("stop src balancer ... ") 484 | initColl.destClient.DB("config").C("settings").Update(query, updateDocument) 485 | 486 | logger.Println("stop dest balancer...") 487 | initColl.srcClient.DB("config").C("settings").Update(query, updateDocument) 488 | 489 | logger.Println("src origin balancer is stopped:", initColl.srcBalancerStopped) 490 | logger.Println("dest origin balancer is stopped:", initColl.destBalancerStopped) 491 | } 492 | 493 | func (initColl *InitCollection) ResetBalancer() { 494 | query := bson.M{"_id": "balancer"} 495 | if initColl.srcIsMongos { 496 | srcBalancerDocument := bson.M{"stopped": initColl.srcBalancerStopped} 497 | initColl.srcClient.DB("config").C("settings").Update(query, srcBalancerDocument) 498 | } 499 | if initColl.destIsMongos { 500 | destBalancerDocument := bson.M{"stopped": initColl.destBalancerStopped} 501 | initColl.destClient.DB("config").C("settings").Update(query, destBalancerDocument) 502 | } 503 | } 504 | func (initColl *InitCollection) GetQueryKey() { 505 | if initColl.srcIsSharded { 506 | logger.Println("select the first key of shardkey as query condition.") 507 | initColl.queryKey = initColl.srcShardKey[0].Name 508 | } else { 509 | initColl.queryKey = "" 510 | } 511 | logger.Println("query condition key is : ", initColl.queryKey) 512 | } 513 | 514 | func (initColl *InitCollection) chunkRangeOk(chunk bson.M) bool { 515 | return true 516 | } 517 | 518 | func (initColl *InitCollection) Run() { 519 | logger.Println("pre checking conn status.") 520 | initColl.InitConn() 521 | logger.Println("setting migrate step.") 522 | initColl.SetStepSign() 523 | initColl.StopBalancer() 524 | initColl.ShardDestCollection() 525 | initColl.GetQueryKey() 526 | initColl.PreAllocChunks() 527 | } 528 | 529 | type CopyData struct { 530 | initColl *InitCollection 531 | workerNum int 532 | queryChunk []bson.M 533 | } 534 | 535 | func NewCopyData(initColl *InitCollection, findAndInsertWorkerNum int) *CopyData { 536 | return &CopyData{workerNum: findAndInsertWorkerNum, initColl: initColl} 537 | } 538 | 539 | // when doing find and insert , we use many goroutine , each goroutine copy one range of data, 540 | // we use the first key of shardkey as the query condition 541 | 542 | // one goroutine,find and insert data 543 | func (copyData *CopyData) RangeCopy(chunkQueue chan bson.M, ch chan int) { 544 | srcMongoUri := GenMongoDBUri(copyData.initColl.src, copyData.initColl.srcUserName, copyData.initColl.srcPassWord) 545 | destMongoUri := GenMongoDBUri(copyData.initColl.dest, copyData.initColl.destUserName, copyData.initColl.destPassWord) 546 | srcClient, _ := mgo.Dial(srcMongoUri) 547 | destClient, _ := mgo.Dial(destMongoUri) 548 | destClient.EnsureSafe(&mgo.Safe{W: copyData.initColl.writeAck, WMode: copyData.initColl.writeMode, FSync: copyData.initColl.fsync, J: copyData.initColl.journal}) 549 | srcCollConn := srcClient.DB(copyData.initColl.srcDB).C(copyData.initColl.srcColl) 550 | destCollConn := destClient.DB(copyData.initColl.destDB).C(copyData.initColl.destColl) 551 | var query bson.M 552 | for { 553 | chunkQueueLock.Lock() // as read from channel has no wait time or no wait get,we use one lock here,check whether the queue is empty 554 | if len(chunkQueue) == 0 { 555 | chunkQueueLock.Unlock() 556 | break 557 | } else { 558 | query = <-chunkQueue 559 | chunkQueueLock.Unlock() 560 | } 561 | logger.Println("copy data query:", query) 562 | documentsIter := srcCollConn.Find(query).Iter() 563 | var document bson.M 564 | 565 | for documentsIter.Next(&document) { 566 | if !copyData.initColl.documentLegal(document, "document") { 567 | continue 568 | } 569 | destCollConn.Insert(document) 570 | } 571 | 572 | } 573 | ch <- 1 574 | } 575 | 576 | // make query queue,start copy goroutine 577 | func (copyData *CopyData) StartCopyData() { 578 | chunkQueue := make(chan bson.M, len(copyData.queryChunk)) 579 | tmpChunkFilter := make(map[interface{}]bool) // in case copy same chunk(as _id is unique,will not cause error,but waste time) 580 | for _, queryRange := range copyData.queryChunk { 581 | var query bson.M 582 | if copyData.initColl.withOutKeyType == "" { 583 | query = bson.M{copyData.initColl.queryKey: bson.M{"$gte": queryRange["min"], "$lt": queryRange["max"]}} 584 | } else { 585 | query = bson.M{} 586 | } 587 | logger.Println("chunk query:", query) 588 | if tmpChunkFilter[queryRange["min"]] == false { 589 | chunkQueue <- query 590 | tmpChunkFilter[queryRange["min"]] = true 591 | } 592 | } 593 | 594 | chs := make([]chan int, copyData.workerNum) 595 | 596 | for i := 0; i < copyData.workerNum; i++ { 597 | chs[i] = make(chan int) 598 | go copyData.RangeCopy(chunkQueue, chs[i]) 599 | } 600 | for { 601 | logger.Println("chunk query queue has", len(copyData.queryChunk)-len(chunkQueue), "copying or copyed ,all chunk num is", len(copyData.queryChunk), "process is", (len(copyData.queryChunk)-len(chunkQueue))*100.0/len(copyData.queryChunk), "%") 602 | if len(chunkQueue) != 0 { 603 | time.Sleep(1e10) 604 | } else { 605 | logger.Println("all chunk copying,please wait before all copy goroutine finished.") 606 | break 607 | } 608 | } 609 | 610 | var finshGoRoutine int = 0 611 | 612 | for _, ch := range chs { 613 | <-ch 614 | finshGoRoutine++ 615 | logger.Println("copy goroutine finished", finshGoRoutine, ",all goroutine num is", copyData.workerNum, "process is", finshGoRoutine*100.0/copyData.workerNum, "%") 616 | } 617 | 618 | logger.Println("copy data finished.") 619 | } 620 | 621 | func (copyData *CopyData) GetQueyRange() { 622 | if copyData.initColl.srcIsSharded && copyData.initColl.withOutKeyType == "" { 623 | logger.Println("src chunks:", copyData.initColl.srcChunks) 624 | for _, chunk := range copyData.initColl.srcChunks { 625 | if minChunk, isMinChunkLegal := chunk["min"].(bson.M); isMinChunkLegal { 626 | if maxChunk, isMaxChunkLegal := chunk["max"].(bson.M); isMaxChunkLegal { 627 | minQueryKey := minChunk[copyData.initColl.queryKey] 628 | maxQueryKey := maxChunk[copyData.initColl.queryKey] 629 | copyData.queryChunk = append(copyData.queryChunk, bson.M{"min": minQueryKey, "max": maxQueryKey}) 630 | } 631 | } 632 | } 633 | } else { 634 | copyData.queryChunk = append(copyData.queryChunk, bson.M{}) 635 | } 636 | logger.Println("get query key range finished,multi conn copy data will be faster.") 637 | } 638 | 639 | // *bug:* text index create fail. 640 | func (copyData *CopyData) BuildIndexes() { 641 | logger.Println("start build indexes") 642 | indexes, _ := copyData.initColl.srcCollConn.Indexes() 643 | var err error 644 | for _, index := range indexes { 645 | err = copyData.initColl.destCollConn.EnsureIndex(index) 646 | if err != nil { 647 | logger.Println("build index Fail,please check it yourself.Fail index is : ", index) 648 | } else { 649 | logger.Println("build index : ", index, " success") 650 | } 651 | } 652 | logger.Println("build index fnished.") 653 | } 654 | 655 | // find the smallest ts and save it(as oplog can be ran many times). 656 | func (copyData *CopyData) SaveLastOpTs() { 657 | logger.Println("save last ts in oplog,use it when syncing oplog.") 658 | chs := make([]chan int, len(copyData.initColl.srcOplogNodes)) 659 | i := 0 660 | for shard, oplogNode := range copyData.initColl.srcOplogNodes { 661 | chs[i] = make(chan int) 662 | go copyData.GetLastOpTs(chs[i], shard, oplogNode) 663 | i++ 664 | } 665 | 666 | for _, ts := range chs { 667 | <-ts 668 | } 669 | 670 | for shardAndNode, ts := range oplogSyncTs { 671 | logger.Println("saved last op ts for ", shardAndNode, " is : ", ts>>32) 672 | } 673 | } 674 | 675 | func (copyData *CopyData) GetLastOpTs(ch chan int, shard string, node string) { 676 | mongoUri := GenMongoDBUri(node, copyData.initColl.srcUserName, copyData.initColl.srcPassWord) 677 | mongoClient, _ := mgo.Dial(mongoUri) 678 | var result bson.M 679 | mongoClient.DB("local").C("oplog.rs").Find(bson.M{}).Sort("-$natural").Limit(1).One(&result) 680 | if lastOpTs, ok := result["ts"].(bson.MongoTimestamp); ok { 681 | oplogSyncTs[shard+"/"+node] = lastOpTs 682 | } 683 | ch <- 1 684 | } 685 | 686 | func (copyData *CopyData) Run() { 687 | copyData.BuildIndexes() 688 | copyData.GetQueyRange() 689 | if !copyData.initColl.isTokumx { 690 | copyData.SaveLastOpTs() 691 | } 692 | copyData.StartCopyData() 693 | } 694 | 695 | func (copyData *CopyData) LoadProgress() { 696 | 697 | } 698 | 699 | func (copyData *CopyData) DumpProgress() { 700 | 701 | } 702 | 703 | func (copyData *CopyData) DumpFailDocuments() { 704 | 705 | } 706 | 707 | type OplogSync struct { 708 | initColl *InitCollection 709 | } 710 | 711 | func NewOplogSync(initColl *InitCollection) *OplogSync { 712 | return &OplogSync{initColl} 713 | } 714 | 715 | func (oplogSync *OplogSync) ApplyOp(oplog bson.M) { 716 | op := oplog["op"] 717 | switch op { 718 | case "i": 719 | oplogSync.initColl.destCollConn.Insert(oplog["o"]) 720 | case "u": 721 | oplogSync.initColl.destCollConn.Update(oplog["o2"], oplog["o"]) 722 | case "d": 723 | oplogSync.initColl.destCollConn.Remove(oplog["o"]) 724 | } 725 | } 726 | 727 | func (oplogSync *OplogSync) StartOplogSync(shard string, node string) { 728 | mongoUri := GenMongoDBUri(node, oplogSync.initColl.srcUserName, oplogSync.initColl.srcPassWord) 729 | mongoClient, _ := mgo.Dial(mongoUri) 730 | mongoClient.EnsureSafe(&mgo.Safe{W: oplogSync.initColl.writeAck, WMode: oplogSync.initColl.writeMode, FSync: oplogSync.initColl.fsync, J: oplogSync.initColl.journal}) 731 | var result bson.M 732 | 733 | // ok to use OPLOG_REPLAY here now 734 | startLastOpTs := oplogSyncTs[shard+"/"+node] 735 | oplogDB := mongoClient.DB("local").C("oplog.rs") 736 | oplogQuery := bson.M{"ts": bson.M{"$gte": startLastOpTs}} 737 | oplogIter := oplogDB.Find(oplogQuery).LogReplay().Sort("$natural").Tail(-1) 738 | srcNs := oplogSync.initColl.srcDB + "." + oplogSync.initColl.srcColl 739 | for oplogIter.Next(&result) { 740 | if ts, ok := result["ts"].(bson.MongoTimestamp); ok { 741 | oplogSyncTs[shard+"/"+node] = ts 742 | } 743 | 744 | if result["fromMigrate"] == true { 745 | continue 746 | } 747 | 748 | if result["ns"] != srcNs { 749 | continue 750 | } 751 | 752 | if !oplogSync.initColl.documentLegal(result, "oplog") { 753 | continue 754 | } 755 | 756 | oplogSync.ApplyOp(result) 757 | } 758 | } 759 | 760 | func (oplogSync *OplogSync) Run() { 761 | isResetBalancer := false 762 | for shard, oplogNode := range oplogSync.initColl.srcOplogNodes { 763 | go oplogSync.StartOplogSync(shard, oplogNode) 764 | } 765 | for { 766 | shouldResetBalancer := true 767 | for shardAndNode, ts := range oplogSyncTs { 768 | node := strings.Split(shardAndNode, "/")[1] 769 | srcMongoUri := GenMongoDBUri(node, oplogSync.initColl.srcUserName, oplogSync.initColl.srcPassWord) 770 | mongoClient, _ := mgo.Dial(srcMongoUri) 771 | var firstOplog bson.M 772 | mongoClient.DB("local").C("oplog.rs").Find(bson.M{}).Sort("-$natural").Limit(1).One(&firstOplog) 773 | if firstOplogTs, ok := firstOplog["ts"].(bson.MongoTimestamp); ok { 774 | delay := firstOplogTs>>32 - ts>>32 775 | logger.Print("node :", shardAndNode, " delay is :", delay) 776 | if delay > 100 { 777 | shouldResetBalancer = false 778 | } 779 | } 780 | mongoClient.Close() 781 | } 782 | if shouldResetBalancer && !isResetBalancer { 783 | logger.Println("oplog sync almost finished,reset balancer state...") 784 | oplogSync.initColl.ResetBalancer() 785 | isResetBalancer = true 786 | } 787 | time.Sleep(1e10) 788 | } 789 | } 790 | 791 | func (oplogSync *OplogSync) LoadProgress() { 792 | 793 | } 794 | 795 | func (oplogSync *OplogSync) DumpProgress() { 796 | 797 | } 798 | 799 | func cleanJob() { 800 | c := make(chan os.Signal, 1) 801 | signal.Notify(c, os.Interrupt) 802 | for sig := range c { 803 | logger.Println("got ctrl-c,exit ing ...", sig) 804 | logger.Println("===================================end this job.==================================") 805 | os.Exit(0) 806 | } 807 | } 808 | 809 | func main() { 810 | var src, dest, srcDB, srcColl, destDB, destColl, srcUserName, srcPassWord, destUserName, destPassWord string 811 | var findAndInsertWorkerNum int 812 | 813 | var writeAck int 814 | var writeMode string 815 | var journal, fsync bool 816 | 817 | flag.StringVar(&src, "src", "127.0.0.1:27017", "src , ip:port") 818 | flag.StringVar(&dest, "dest", "127.0.0.1:27017", "dest , ip:port") 819 | 820 | flag.StringVar(&srcDB, "srcDB", "test", "db of src being migrated") 821 | flag.StringVar(&srcColl, "srcColl", "test", "collection of src being migrated") 822 | flag.StringVar(&srcUserName, "srcUserName", "", "src auth user name") 823 | flag.StringVar(&srcPassWord, "srcPassWord", "", "src auth password") 824 | 825 | flag.StringVar(&destDB, "destDB", "test", "db of dest being migrated") 826 | flag.StringVar(&destColl, "destColl", "test", "collection of dest being migrated") 827 | flag.StringVar(&destUserName, "destUserName", "", "dest auth user name") 828 | flag.StringVar(&destPassWord, "destPassWord", "", "dest auth password") 829 | 830 | flag.IntVar(&writeAck, "writeAck", 1, "write acknowledged") 831 | flag.StringVar(&writeMode, "write Mode, e.g Majority", "", "write acknowledged") 832 | flag.BoolVar(&journal, "journal", true, "whether use journal") 833 | flag.BoolVar(&fsync, "fsync", false, "whether use fsync for each write") 834 | 835 | flag.IntVar(&findAndInsertWorkerNum, "findAndInsertWorkerNum", 10, "find and insert worker num") 836 | 837 | var argsMinKey, argsMaxKey, keyType string 838 | 839 | flag.StringVar(&argsMinKey, "minKey", "", "min query key") 840 | flag.StringVar(&argsMaxKey, "maxKey", "", "max query key") 841 | flag.StringVar(&keyType, "keyType", "", "min and max query key type") 842 | 843 | var withOutKeyType string 844 | 845 | flag.StringVar(&withOutKeyType, "withOutKeyType", "", "range copy other data,copy other key type data") 846 | 847 | var isTokumx bool 848 | flag.BoolVar(&isTokumx, "isTokumx", false, "if src is tokumx,skip oplog replay.") 849 | 850 | flag.Parse() 851 | 852 | logFile, _ = os.OpenFile("log/ms.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) 853 | writers := []io.Writer{ 854 | logFile, 855 | os.Stdout, 856 | } 857 | fileAndStdoutWriter := io.MultiWriter(writers...) 858 | logger = log.New(fileAndStdoutWriter, "\r\n", log.Ldate|log.Ltime|log.Lshortfile) 859 | connTimeOut = time.Duration(5) * time.Second 860 | oplogSyncTs = make(map[string]bson.MongoTimestamp) 861 | 862 | go cleanJob() 863 | 864 | logger.Println("===================================start one new job.==================================") 865 | //init step 866 | logger.Println("start init collection") 867 | initColl := NewInitCollection(src, dest, srcDB, srcColl, srcUserName, srcPassWord, destDB, destColl, destUserName, destPassWord, writeAck, writeMode, journal, fsync, argsMinKey, argsMaxKey, keyType, withOutKeyType,isTokumx) 868 | initColl.Run() 869 | 870 | //copy data step 871 | 872 | logger.Println("start copy data") 873 | copyData := NewCopyData(initColl, findAndInsertWorkerNum) 874 | copyData.Run() 875 | 876 | //oplog sync step 877 | 878 | if isTokumx == false { 879 | logger.Println("start sync oplog") 880 | oplogSync := NewOplogSync(initColl) 881 | oplogSync.Run() 882 | } else { 883 | logger.Println("skip sync oplog.") 884 | } 885 | 886 | } 887 | --------------------------------------------------------------------------------