├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── cmd ├── decode.go ├── dump.go ├── main.go ├── restore.go ├── sync.go └── utils.go └── wangdoujia_license /.gitignore: -------------------------------------------------------------------------------- 1 | *.rdb 2 | *.swp 3 | *.log 4 | *.tmp 5 | *.out 6 | /bin/* 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 reborndb 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: build 2 | 3 | build: 4 | @mkdir -p bin 5 | go build -o bin/redis-port ./cmd 6 | 7 | clean: 8 | rm -rf bin 9 | 10 | gotest: 11 | go test -cover -v ./... 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | redis-port 2 | =========== 3 | 4 | parse redis rdb file, sync data between redis master and slave 5 | 6 | * **DECODE** dumped payload to human readable format (hex-encoding) 7 | 8 | ```sh 9 | redis-port decode [--ncpu=N] [--input=INPUT] [--output=OUTPUT] 10 | ``` 11 | 12 | * **RESTORE** rdb file to target redis 13 | 14 | ```sh 15 | redis-port restore [--ncpu=N] [--input=INPUT] --target=TARGET [--extra] 16 | ``` 17 | 18 | * **DUMP** rdb file from master redis 19 | 20 | ```sh 21 | redis-port dump [--ncpu=N] --from=MASTER [--output=OUTPUT] [--extra] 22 | ``` 23 | 24 | * **SYNC** data from master to slave 25 | 26 | ```sh 27 | redis-port sync [--ncpu=N] --from=MASTER --target=TARGET [--sockfile=FILE [--filesize=SIZE]] [--filterdb=DB] 28 | ``` 29 | 30 | Options 31 | ------- 32 | + -n _N_, --ncpu=_N_ 33 | 34 | > set runtime.GOMAXPROCS to _N_ 35 | 36 | + -i _INPUT_, --input=_INPUT_ 37 | 38 | > use _INPUT_ as input file, or if it is not given, redis-port reads from stdin (means '/dev/stdin') 39 | 40 | + -o _OUTPUT_, --output=_OUTPUT_ 41 | 42 | > use _OUTPUT_ as output file, or if it is not given, redis-port writes to stdout (means '/dev/stdout') 43 | 44 | + -m _MASTER_, --master=_MASTER_ 45 | 46 | > specify the master redis 47 | 48 | + -t _TARGET_, --target=_TARGET_ 49 | 50 | > specify the slave redis (or target redis) 51 | 52 | + -e, --extra 53 | 54 | > dump or restore following redis backlog commands 55 | 56 | + --filterdb=DB 57 | 58 | > filter specifed db number, default is '*' 59 | 60 | Examples 61 | ------- 62 | 63 | * **DECODE** 64 | 65 | ```sh 66 | $ cat dump.rdb | ./redis-port decode 2>/dev/null 67 | {"db":0,"type":"string","expireat":0,"key":"a","key64":"YQ==","value64":"MTAwMDA="} 68 | {"db":0,"type":"string","expireat":0,"key":"b","key64":"Yg==","value64":"aGVsbG8ud29ybGQ="} 69 | {"db":0,"type":"hash","expireat":0,"key":"c","key64":"Yw==","field":"c1","field64":"YzE=","member64":"MTAw" 70 | {"db":0,"type":"hash","expireat":0,"key":"c","key64":"Yw==","field":"c2","field64":"YzI=","member64":"dGVzdC5zdHJpbmc="} 71 | {"db":0,"type":"list","expireat":0,"key":"d","key64":"ZA==","index":0,"value64":"bDE="} 72 | {"db":0,"type":"list","expireat":0,"key":"d","key64":"ZA==","index":1,"value64":"bDI="} 73 | {"db":0,"type":"zset","expireat":0,"key":"e","key64":"ZQ==","member":"e1","member64":"ZTE=","score":1.000000} 74 | {"db":0,"type":"zset","expireat":0,"key":"e","key64":"ZQ==","member":"e2","member64":"ZTI=","score":2.000000} 75 | ... ... 76 | ``` 77 | 78 | * **RESTORE** 79 | 80 | ```sh 81 | $ ./redis-port restore -i dump.rdb -t 127.0.0.1:6379 -n 8 82 | 2014/10/28 15:08:26 [ncpu=8] restore from 'dump.rdb' to '127.0.0.1:6379' 83 | 2014/10/28 15:08:27 total = 280149161 - 14267777 [ 5%] 84 | 2014/10/28 15:08:28 total = 280149161 - 27325530 [ 9%] 85 | 2014/10/28 15:08:29 total = 280149161 - 40670677 [ 14%] 86 | ... ... 87 | 2014/10/28 15:08:47 total = 280149161 - 278070563 [ 99%] 88 | 2014/10/28 15:08:47 total = 280149161 - 280149161 [100%] 89 | 2014/10/28 15:08:47 done 90 | ``` 91 | 92 | * **DUMP** 93 | 94 | ```sh 95 | $ ./redis-port dump -f 127.0.0.1:6379 -o save.rdb 96 | 2014/10/28 15:12:05 [ncpu=1] dump from '127.0.0.1:6379' to 'save.rdb' 97 | 2014/10/28 15:12:06 - 98 | 2014/10/28 15:12:07 - 99 | ... ... 100 | 2014/10/28 15:12:10 total = 278110192 - 0 [ 0%] 101 | 2014/10/28 15:12:11 total = 278110192 - 278110192 [100%] 102 | 2014/10/28 15:12:11 done 103 | 104 | $ ./redis-port dump -f 127.0.0.1:6379 | tee save.rdb | ./redis-port decode -o save.log -n 8 2>/dev/null 105 | 2014/10/28 15:12:55 [ncpu=1] dump from '127.0.0.1:6379' to '/dev/stdout' 106 | 2014/10/28 15:12:56 - 107 | ... ... 108 | 2014/10/28 15:13:10 total = 278110192 - 264373070 [ 0%] 109 | 2014/10/28 15:13:11 total = 278110192 - 278110192 [100%] 110 | 2014/10/28 15:13:11 done 111 | ``` 112 | 113 | * **SYNC** 114 | 115 | ```sh 116 | $ ./redis-port sync -f 127.0.0.1:6379 -t 127.0.0.1:6380 -n 8 117 | 2014/10/28 15:15:41 [ncpu=8] sync from '127.0.0.1:6379' to '127.0.0.1:6380' 118 | 2014/10/28 15:15:42 - 119 | 2014/10/28 15:15:43 - 120 | 2014/10/28 15:15:44 - 121 | 2014/10/28 15:15:46 total = 278110192 - 9380927 [ 3%] 122 | 2014/10/28 15:15:47 total = 278110192 - 18605075 [ 6%] 123 | ... ... [ ] 124 | 2014/10/28 15:16:14 total = 278110192 - 269990892 [ 97%] 125 | 2014/10/28 15:16:15 total = 278110192 - 278110192 [100%] 126 | 2014/10/28 15:16:15 done 127 | 2014/10/28 15:16:17 pipe: send = 0 recv = 0 128 | 2014/10/28 15:16:18 pipe: send = 0 recv = 0 129 | ... ... 130 | ``` 131 | -------------------------------------------------------------------------------- /cmd/decode.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Reborndb Org. All Rights Reserved. 2 | // Licensed under the MIT (MIT-LICENSE.txt) license. 3 | 4 | package main 5 | 6 | import ( 7 | "bufio" 8 | "bytes" 9 | "encoding/base64" 10 | "encoding/json" 11 | "fmt" 12 | "io" 13 | "os" 14 | "time" 15 | 16 | "github.com/reborndb/go/atomic2" 17 | "github.com/reborndb/go/io/ioutils" 18 | "github.com/reborndb/go/log" 19 | "github.com/reborndb/go/redis/rdb" 20 | ) 21 | 22 | type cmdDecode struct { 23 | nread, nsave, nobjs atomic2.Int64 24 | } 25 | 26 | func (cmd *cmdDecode) Main() { 27 | input, output := args.input, args.output 28 | if len(input) == 0 { 29 | input = "/dev/stdin" 30 | } 31 | if len(output) == 0 { 32 | output = "/dev/stdout" 33 | } 34 | 35 | log.Infof("decode from '%s' to '%s'\n", input, output) 36 | 37 | var readin io.ReadCloser 38 | var nsize int64 39 | if input != "/dev/stdin" { 40 | readin, nsize = openReadFile(input) 41 | defer readin.Close() 42 | } else { 43 | readin, nsize = os.Stdin, 0 44 | } 45 | 46 | var saveto io.WriteCloser 47 | if output != "/dev/stdout" { 48 | saveto = openWriteFile(output) 49 | defer saveto.Close() 50 | } else { 51 | saveto = os.Stdout 52 | } 53 | 54 | reader := bufio.NewReaderSize(ioutils.NewCountReader(readin, &cmd.nread), ReaderBufferSize) 55 | writer := bufio.NewWriterSize(ioutils.NewCountWriter(saveto, &cmd.nsave), WriterBufferSize) 56 | 57 | ipipe := newRDBLoader(reader, args.parallel*32) 58 | opipe := make(chan string, cap(ipipe)) 59 | 60 | go func() { 61 | defer close(opipe) 62 | group := make(chan int, args.parallel) 63 | for i := 0; i < cap(group); i++ { 64 | go func() { 65 | defer func() { 66 | group <- 0 67 | }() 68 | cmd.decoderMain(ipipe, opipe) 69 | }() 70 | } 71 | for i := 0; i < cap(group); i++ { 72 | <-group 73 | } 74 | }() 75 | 76 | wait := make(chan struct{}) 77 | go func() { 78 | defer close(wait) 79 | for s := range opipe { 80 | if _, err := writer.WriteString(s); err != nil { 81 | log.PanicError(err, "write string failed") 82 | } 83 | flushWriter(writer) 84 | } 85 | }() 86 | 87 | for done := false; !done; { 88 | select { 89 | case <-wait: 90 | done = true 91 | case <-time.After(time.Second): 92 | } 93 | n, w, o := cmd.nread.Get(), cmd.nsave.Get(), cmd.nobjs.Get() 94 | if nsize != 0 { 95 | p := 100 * n / nsize 96 | log.Infof("total = %d - %12d [%3d%%] write=%-12d objs=%d\n", nsize, n, p, w, o) 97 | } else { 98 | log.Infof("total = %12d write=%-12d objs=%d\n", n, w, o) 99 | } 100 | } 101 | log.Info("done") 102 | } 103 | 104 | func (cmd *cmdDecode) decoderMain(ipipe <-chan *rdb.BinEntry, opipe chan<- string) { 105 | toText := func(p []byte) string { 106 | var b bytes.Buffer 107 | for _, c := range p { 108 | switch { 109 | case c >= '#' && c <= '~': 110 | b.WriteByte(c) 111 | default: 112 | b.WriteByte('.') 113 | } 114 | } 115 | return b.String() 116 | } 117 | toBase64 := func(p []byte) string { 118 | return base64.StdEncoding.EncodeToString(p) 119 | } 120 | toJson := func(o interface{}) string { 121 | b, err := json.Marshal(o) 122 | if err != nil { 123 | log.PanicError(err, "encode to json failed") 124 | } 125 | return string(b) 126 | } 127 | for e := range ipipe { 128 | o, err := rdb.DecodeDump(e.Value) 129 | if err != nil { 130 | log.PanicError(err, "decode failed") 131 | } 132 | var b bytes.Buffer 133 | switch obj := o.(type) { 134 | default: 135 | log.Panicf("unknown object %v", o) 136 | case rdb.String: 137 | o := &struct { 138 | DB uint32 `json:"db"` 139 | Type string `json:"type"` 140 | ExpireAt uint64 `json:"expireat"` 141 | Key string `json:"key"` 142 | Key64 string `json:"key64"` 143 | Value64 string `json:"value64"` 144 | }{ 145 | e.DB, "string", e.ExpireAt, toText(e.Key), toBase64(e.Key), 146 | toBase64(obj), 147 | } 148 | fmt.Fprintf(&b, "%s\n", toJson(o)) 149 | case rdb.List: 150 | for i, ele := range obj { 151 | o := &struct { 152 | DB uint32 `json:"db"` 153 | Type string `json:"type"` 154 | ExpireAt uint64 `json:"expireat"` 155 | Key string `json:"key"` 156 | Key64 string `json:"key64"` 157 | Index int `json:"index"` 158 | Value64 string `json:"value64"` 159 | }{ 160 | e.DB, "list", e.ExpireAt, toText(e.Key), toBase64(e.Key), 161 | i, toBase64(ele), 162 | } 163 | fmt.Fprintf(&b, "%s\n", toJson(o)) 164 | } 165 | case rdb.Hash: 166 | for _, ele := range obj { 167 | o := &struct { 168 | DB uint32 `json:"db"` 169 | Type string `json:"type"` 170 | ExpireAt uint64 `json:"expireat"` 171 | Key string `json:"key"` 172 | Key64 string `json:"key64"` 173 | Field string `json:"field"` 174 | Field64 string `json:"field64"` 175 | Value64 string `json:"value64"` 176 | }{ 177 | e.DB, "hash", e.ExpireAt, toText(e.Key), toBase64(e.Key), 178 | toText(ele.Field), toBase64(ele.Field), toBase64(ele.Value), 179 | } 180 | fmt.Fprintf(&b, "%s\n", toJson(o)) 181 | } 182 | case rdb.Set: 183 | for _, mem := range obj { 184 | o := &struct { 185 | DB uint32 `json:"db"` 186 | Type string `json:"type"` 187 | ExpireAt uint64 `json:"expireat"` 188 | Key string `json:"key"` 189 | Key64 string `json:"key64"` 190 | Member string `json:"member"` 191 | Member64 string `json:"member64"` 192 | }{ 193 | e.DB, "set", e.ExpireAt, toText(e.Key), toBase64(e.Key), 194 | toText(mem), toBase64(mem), 195 | } 196 | fmt.Fprintf(&b, "%s\n", toJson(o)) 197 | } 198 | case rdb.ZSet: 199 | for _, ele := range obj { 200 | o := &struct { 201 | DB uint32 `json:"db"` 202 | Type string `json:"type"` 203 | ExpireAt uint64 `json:"expireat"` 204 | Key string `json:"key"` 205 | Key64 string `json:"key64"` 206 | Member string `json:"member"` 207 | Member64 string `json:"member64"` 208 | Score float64 `json:"score"` 209 | }{ 210 | e.DB, "zset", e.ExpireAt, toText(e.Key), toBase64(e.Key), 211 | toText(ele.Member), toBase64(ele.Member), ele.Score, 212 | } 213 | fmt.Fprintf(&b, "%s\n", toJson(o)) 214 | } 215 | } 216 | cmd.nobjs.Incr() 217 | opipe <- b.String() 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /cmd/dump.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Reborndb Org. All Rights Reserved. 2 | // Licensed under the MIT (MIT-LICENSE.txt) license. 3 | 4 | package main 5 | 6 | import ( 7 | "bufio" 8 | "io" 9 | "net" 10 | "os" 11 | "time" 12 | 13 | "github.com/reborndb/go/atomic2" 14 | "github.com/reborndb/go/io/ioutils" 15 | "github.com/reborndb/go/log" 16 | ) 17 | 18 | type cmdDump struct { 19 | ndump atomic2.Int64 20 | } 21 | 22 | func (cmd *cmdDump) Main() { 23 | from, output := args.from, args.output 24 | if len(from) == 0 { 25 | log.Panic("invalid argument: from") 26 | } 27 | if len(output) == 0 { 28 | output = "/dev/stdout" 29 | } 30 | 31 | log.Infof("dump from '%s' to '%s'\n", from, output) 32 | 33 | var dumpto io.WriteCloser 34 | if output != "/dev/stdout" { 35 | dumpto = openWriteFile(output) 36 | defer dumpto.Close() 37 | } else { 38 | dumpto = os.Stdout 39 | } 40 | 41 | master, nsize := cmd.SendCmd(from) 42 | defer master.Close() 43 | 44 | log.Infof("rdb file = %d\n", nsize) 45 | 46 | reader := bufio.NewReaderSize(master, ReaderBufferSize) 47 | writer := bufio.NewWriterSize(ioutils.NewCountWriter(dumpto, &cmd.ndump), WriterBufferSize) 48 | 49 | cmd.DumpRDBFile(reader, writer, nsize) 50 | 51 | if !args.extra { 52 | return 53 | } 54 | 55 | cmd.DumpCommand(reader, writer) 56 | } 57 | 58 | func (cmd *cmdDump) SendCmd(master string) (net.Conn, int64) { 59 | c, wait := openSyncConn(master, args.auth) 60 | for { 61 | select { 62 | case nsize := <-wait: 63 | if nsize == 0 { 64 | log.Info("+") 65 | } else { 66 | return c, nsize 67 | } 68 | case <-time.After(time.Second): 69 | log.Info("-") 70 | } 71 | } 72 | } 73 | 74 | func (cmd *cmdDump) DumpRDBFile(reader *bufio.Reader, writer *bufio.Writer, nsize int64) { 75 | var nread atomic2.Int64 76 | wait := make(chan struct{}) 77 | go func() { 78 | defer close(wait) 79 | p := make([]byte, WriterBufferSize) 80 | for nsize != nread.Get() { 81 | cnt := iocopy(reader, writer, p, int(nsize-nread.Get())) 82 | nread.Add(int64(cnt)) 83 | } 84 | flushWriter(writer) 85 | }() 86 | 87 | for done := false; !done; { 88 | select { 89 | case <-wait: 90 | done = true 91 | case <-time.After(time.Second): 92 | } 93 | n := nread.Get() 94 | p := 100 * n / nsize 95 | log.Infof("total = %d - %12d [%3d%%]\n", nsize, n, p) 96 | } 97 | log.Info("dump: rdb done") 98 | } 99 | 100 | func (cmd *cmdDump) DumpCommand(reader *bufio.Reader, writer *bufio.Writer) { 101 | go func() { 102 | p := make([]byte, ReaderBufferSize) 103 | for { 104 | iocopy(reader, writer, p, len(p)) 105 | flushWriter(writer) 106 | } 107 | }() 108 | 109 | for { 110 | time.Sleep(time.Second) 111 | log.Infof("dump: size = %d\n", cmd.ndump.Get()) 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Reborndb Org. All Rights Reserved. 2 | // Licensed under the MIT (MIT-LICENSE.txt) license. 3 | 4 | package main 5 | 6 | import ( 7 | "runtime" 8 | "strconv" 9 | "strings" 10 | "time" 11 | 12 | "github.com/docopt/docopt-go" 13 | "github.com/reborndb/go/bytesize" 14 | "github.com/reborndb/go/errors" 15 | "github.com/reborndb/go/log" 16 | ) 17 | 18 | var args struct { 19 | input string 20 | output string 21 | parallel int 22 | 23 | from string 24 | target string 25 | extra bool 26 | 27 | sockfile string 28 | filesize int64 29 | 30 | auth string 31 | 32 | shift time.Duration 33 | } 34 | 35 | const ( 36 | ReaderBufferSize = bytesize.MB * 32 37 | WriterBufferSize = bytesize.MB * 8 38 | ) 39 | 40 | func parseInt(s string, min, max int) (int, error) { 41 | n, err := strconv.Atoi(s) 42 | if err != nil { 43 | return 0, err 44 | } 45 | if n >= min && n <= max { 46 | return n, nil 47 | } 48 | return 0, errors.Errorf("out of range [%d,%d], got %d", min, max, n) 49 | } 50 | 51 | const ( 52 | MinDB = 0 53 | MaxDB = 1023 54 | ) 55 | 56 | var acceptDB = func(db uint32) bool { 57 | return db >= MinDB && db <= MaxDB 58 | } 59 | 60 | func main() { 61 | usage := ` 62 | Usage: 63 | redis-port decode [--ncpu=N] [--parallel=M] [--input=INPUT] [--output=OUTPUT] 64 | redis-port restore [--ncpu=N] [--parallel=M] [--input=INPUT] --target=TARGET [--extra] [--faketime=FAKETIME] [--filterdb=DB] 65 | redis-port dump [--ncpu=N] [--parallel=M] --from=MASTER [--output=OUTPUT] [--extra] [--password=PASSWORD] 66 | redis-port sync [--ncpu=N] [--parallel=M] --from=MASTER --target=TARGET [--sockfile=FILE [--filesize=SIZE]] [--filterdb=DB] [--password=PASSWORD] 67 | 68 | Options: 69 | -P PASSWORD, --password Set master's auth code. 70 | -n N, --ncpu=N Set runtime.GOMAXPROCS to N. 71 | -p M, --parallel=M Set the number of parallel routines to M. 72 | -i INPUT, --input=INPUT Set input file, default is stdin ('/dev/stdin'). 73 | -o OUTPUT, --output=OUTPUT Set output file, default is stdout ('/dev/stdout'). 74 | -f MASTER, --from=MASTER Set host:port of master redis. 75 | -t TARGET, --target=TARGET Set host:port of slave redis. 76 | --faketime=FAKETIME Set current system time to adjust key's expire time. 77 | --sockfile=FILE Use FILE to as socket buffer, default is disabled. 78 | --filesize=SIZE Set FILE size, default value is 1gb. 79 | -e, --extra Set ture to send/receive following redis commands, default is false. 80 | --filterdb=DB Filter db = DB, default is *. 81 | ` 82 | d, err := docopt.Parse(usage, nil, true, "", false) 83 | if err != nil { 84 | log.PanicError(err, "parse arguments failed") 85 | } 86 | 87 | if s, ok := d["--ncpu"].(string); ok && s != "" { 88 | n, err := parseInt(s, 1, 1024) 89 | if err != nil { 90 | log.PanicErrorf(err, "parse --ncpu failed") 91 | } 92 | runtime.GOMAXPROCS(n) 93 | } 94 | ncpu := runtime.GOMAXPROCS(0) 95 | 96 | if s, ok := d["--parallel"].(string); ok && s != "" { 97 | n, err := parseInt(s, 1, 1024) 98 | if err != nil { 99 | log.PanicErrorf(err, "parse --parallel failed") 100 | } 101 | args.parallel = n 102 | } 103 | if ncpu > args.parallel { 104 | args.parallel = ncpu 105 | } 106 | if args.parallel == 0 { 107 | args.parallel = 4 108 | } 109 | 110 | args.input, _ = d["--input"].(string) 111 | args.output, _ = d["--output"].(string) 112 | 113 | args.target, _ = d["--target"].(string) 114 | args.from, _ = d["--from"].(string) 115 | 116 | args.extra, _ = d["--extra"].(bool) 117 | args.sockfile, _ = d["--sockfile"].(string) 118 | 119 | args.auth, _ = d["--password"].(string) 120 | 121 | if s, ok := d["--faketime"].(string); ok && s != "" { 122 | switch s[0] { 123 | case '-', '+': 124 | d, err := time.ParseDuration(strings.ToLower(s)) 125 | if err != nil { 126 | log.PanicError(err, "parse --faketime failed") 127 | } 128 | args.shift = d 129 | case '@': 130 | n, err := strconv.ParseInt(s[1:], 10, 64) 131 | if err != nil { 132 | log.PanicError(err, "parse --faketime failed") 133 | } 134 | args.shift = time.Duration(n*int64(time.Millisecond) - time.Now().UnixNano()) 135 | default: 136 | t, err := time.Parse("2006-01-02 15:04:05", s) 137 | if err != nil { 138 | log.PanicError(err, "parse --faketime failed") 139 | } 140 | args.shift = time.Duration(t.UnixNano() - time.Now().UnixNano()) 141 | } 142 | } 143 | 144 | if s, ok := d["--filterdb"].(string); ok && s != "" && s != "*" { 145 | n, err := parseInt(s, MinDB, MaxDB) 146 | if err != nil { 147 | log.PanicError(err, "parse --filterdb failed") 148 | } 149 | u := uint32(n) 150 | acceptDB = func(db uint32) bool { 151 | return db == u 152 | } 153 | } 154 | 155 | if s, ok := d["--filesize"].(string); ok && s != "" { 156 | if len(args.sockfile) == 0 { 157 | log.Panic("please specify --sockfile first") 158 | } 159 | n, err := bytesize.Parse(s) 160 | if err != nil { 161 | log.PanicError(err, "parse --filesize failed") 162 | } 163 | if n <= 0 { 164 | log.Panicf("parse --filesize = %d, invalid number", n) 165 | } 166 | args.filesize = n 167 | } else { 168 | args.filesize = bytesize.GB 169 | } 170 | 171 | log.Infof("set ncpu = %d, parallel = %d\n", ncpu, args.parallel) 172 | 173 | switch { 174 | case d["decode"].(bool): 175 | new(cmdDecode).Main() 176 | case d["restore"].(bool): 177 | new(cmdRestore).Main() 178 | case d["dump"].(bool): 179 | new(cmdDump).Main() 180 | case d["sync"].(bool): 181 | new(cmdSync).Main() 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /cmd/restore.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Reborndb Org. All Rights Reserved. 2 | // Licensed under the MIT (MIT-LICENSE.txt) license. 3 | 4 | package main 5 | 6 | import ( 7 | "bufio" 8 | "io" 9 | "os" 10 | "time" 11 | 12 | "github.com/reborndb/go/atomic2" 13 | "github.com/reborndb/go/io/ioutils" 14 | "github.com/reborndb/go/log" 15 | redis "github.com/reborndb/go/redis/resp" 16 | ) 17 | 18 | type cmdRestore struct { 19 | nread, nobjs atomic2.Int64 20 | } 21 | 22 | func (cmd *cmdRestore) Main() { 23 | input, target := args.input, args.target 24 | if len(target) == 0 { 25 | log.Panic("invalid argument: target") 26 | } 27 | if len(input) == 0 { 28 | input = "/dev/stdin" 29 | } 30 | 31 | log.Infof("restore from '%s' to '%s'\n", input, target) 32 | 33 | var readin io.ReadCloser 34 | var nsize int64 35 | if input != "/dev/stdin" { 36 | readin, nsize = openReadFile(input) 37 | defer readin.Close() 38 | } else { 39 | readin, nsize = os.Stdin, 0 40 | } 41 | 42 | reader := bufio.NewReaderSize(ioutils.NewCountReader(readin, &cmd.nread), ReaderBufferSize) 43 | 44 | cmd.RestoreRDBFile(reader, target, nsize) 45 | 46 | if !args.extra { 47 | return 48 | } 49 | 50 | if nsize != 0 && nsize == cmd.nread.Get() { 51 | return 52 | } 53 | 54 | cmd.RestoreCommand(reader, target, nsize) 55 | } 56 | 57 | func (cmd *cmdRestore) RestoreRDBFile(reader *bufio.Reader, target string, nsize int64) { 58 | pipe := newRDBLoader(reader, args.parallel*32) 59 | wait := make(chan struct{}) 60 | go func() { 61 | defer close(wait) 62 | group := make(chan int, args.parallel) 63 | for i := 0; i < cap(group); i++ { 64 | go func() { 65 | defer func() { 66 | group <- 0 67 | }() 68 | c := openRedisConn(target) 69 | defer c.Close() 70 | var lastdb uint32 = 0 71 | for e := range pipe { 72 | if !acceptDB(e.DB) { 73 | continue 74 | } 75 | if e.DB != lastdb { 76 | lastdb = e.DB 77 | selectDB(c, lastdb) 78 | } 79 | restoreRdbEntry(c, e) 80 | cmd.nobjs.Incr() 81 | } 82 | }() 83 | } 84 | for i := 0; i < cap(group); i++ { 85 | <-group 86 | } 87 | }() 88 | 89 | for done := false; !done; { 90 | select { 91 | case <-wait: 92 | done = true 93 | case <-time.After(time.Second): 94 | } 95 | n, o := cmd.nread.Get(), cmd.nobjs.Get() 96 | if nsize != 0 { 97 | p := 100 * n / nsize 98 | log.Infof("total = %d - %12d [%3d%%] objs=%d\n", nsize, n, p, o) 99 | } else { 100 | log.Infof("total = %12d objs=%d\n", n, o) 101 | } 102 | } 103 | log.Info("restore: rdb done") 104 | } 105 | 106 | func (cmd *cmdRestore) RestoreCommand(reader *bufio.Reader, slave string, nsize int64) { 107 | var forward, nbypass atomic2.Int64 108 | c := openNetConn(slave) 109 | defer c.Close() 110 | 111 | writer := bufio.NewWriterSize(c, WriterBufferSize) 112 | defer flushWriter(writer) 113 | 114 | discard := bufio.NewReaderSize(c, ReaderBufferSize) 115 | 116 | go func() { 117 | var bypass bool = false 118 | for { 119 | resp := redis.MustDecode(reader) 120 | if cmd, args, err := redis.ParseArgs(resp); err != nil { 121 | log.PanicError(err, "parse command arguments failed") 122 | } else if cmd != "ping" { 123 | if cmd == "select" { 124 | if len(args) != 1 { 125 | log.Panicf("select command len(args) = %d", len(args)) 126 | } 127 | s := string(args[0]) 128 | n, err := parseInt(s, MinDB, MaxDB) 129 | if err != nil { 130 | log.PanicErrorf(err, "parse db = %s failed", s) 131 | } 132 | bypass = !acceptDB(uint32(n)) 133 | } 134 | if bypass { 135 | nbypass.Incr() 136 | continue 137 | } 138 | } 139 | redis.MustEncode(writer, resp) 140 | flushWriter(writer) 141 | forward.Incr() 142 | redis.MustDecode(discard) 143 | } 144 | }() 145 | 146 | for { 147 | lastForward := forward.Get() 148 | lastByPass := nbypass.Get() 149 | time.Sleep(time.Second) 150 | log.Infof("restore: +forward=%-6d +bypass=%-6d\n", forward.Get()-lastForward, nbypass.Get()-lastByPass) 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /cmd/sync.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Reborndb Org. All Rights Reserved. 2 | // Licensed under the MIT (MIT-LICENSE.txt) license. 3 | 4 | package main 5 | 6 | import ( 7 | "bufio" 8 | "io" 9 | "io/ioutil" 10 | "net" 11 | "os" 12 | "sync" 13 | "time" 14 | 15 | "github.com/reborndb/go/atomic2" 16 | "github.com/reborndb/go/io/ioutils" 17 | "github.com/reborndb/go/io/pipe" 18 | "github.com/reborndb/go/log" 19 | redis "github.com/reborndb/go/redis/resp" 20 | ) 21 | 22 | type cmdSync struct { 23 | nread, nrecv, nobjs atomic2.Int64 24 | } 25 | 26 | func (cmd *cmdSync) Main() { 27 | from, target := args.from, args.target 28 | if len(from) == 0 { 29 | log.Panic("invalid argument: from") 30 | } 31 | if len(target) == 0 { 32 | log.Panic("invalid argument: target") 33 | } 34 | 35 | log.Infof("sync from '%s' to '%s'\n", from, target) 36 | 37 | var sockfile *os.File 38 | if len(args.sockfile) != 0 { 39 | f, err := pipe.OpenFile(args.sockfile, false) 40 | if err != nil { 41 | log.PanicError(err, "open sockbuff file failed") 42 | } 43 | sockfile = f 44 | } 45 | 46 | master, nsize := cmd.SendCmd(from) 47 | defer master.Close() 48 | 49 | log.Infof("rdb file = %d\n", nsize) 50 | 51 | var input io.Reader 52 | if sockfile != nil { 53 | r, w := pipe.PipeFile(ReaderBufferSize, int(args.filesize), sockfile) 54 | defer r.Close() 55 | go func() { 56 | defer w.Close() 57 | p := make([]byte, ReaderBufferSize) 58 | for { 59 | iocopy(master, w, p, len(p)) 60 | } 61 | }() 62 | input = r 63 | } else { 64 | input = master 65 | } 66 | 67 | reader := bufio.NewReaderSize(ioutils.NewCountReader(input, &cmd.nread), ReaderBufferSize) 68 | 69 | cmd.SyncRDBFile(reader, target, nsize) 70 | cmd.SyncCommand(reader, target) 71 | } 72 | 73 | func (cmd *cmdSync) SendCmd(master string) (net.Conn, int64) { 74 | c, wait := openSyncConn(master, args.auth) 75 | for { 76 | select { 77 | case nsize := <-wait: 78 | if nsize == 0 { 79 | log.Info("+") 80 | } else { 81 | return c, nsize 82 | } 83 | case <-time.After(time.Second): 84 | log.Info("-") 85 | } 86 | } 87 | } 88 | 89 | func (cmd *cmdSync) SyncRDBFile(reader *bufio.Reader, slave string, nsize int64) { 90 | pipe := newRDBLoader(reader, args.parallel*32) 91 | wait := make(chan struct{}) 92 | go func() { 93 | defer close(wait) 94 | group := make(chan int, args.parallel) 95 | for i := 0; i < cap(group); i++ { 96 | go func() { 97 | defer func() { 98 | group <- 0 99 | }() 100 | c := openRedisConn(slave) 101 | defer c.Close() 102 | var lastdb uint32 = 0 103 | for e := range pipe { 104 | if !acceptDB(e.DB) { 105 | continue 106 | } 107 | if e.DB != lastdb { 108 | lastdb = e.DB 109 | selectDB(c, lastdb) 110 | } 111 | restoreRdbEntry(c, e) 112 | cmd.nobjs.Incr() 113 | } 114 | }() 115 | } 116 | for i := 0; i < cap(group); i++ { 117 | <-group 118 | } 119 | }() 120 | 121 | for done := false; !done; { 122 | select { 123 | case <-wait: 124 | done = true 125 | case <-time.After(time.Second): 126 | } 127 | n, o := cmd.nread.Get(), cmd.nobjs.Get() 128 | p := 100 * n / nsize 129 | log.Infof("total=%d - %12d [%3d%%] objs=%d\n", nsize, n, p, o) 130 | } 131 | log.Info("sync rdb done") 132 | } 133 | 134 | func (cmd *cmdSync) SyncCommand(reader *bufio.Reader, slave string) { 135 | var forward, nbypass atomic2.Int64 136 | c := openNetConn(slave) 137 | defer c.Close() 138 | 139 | writer := bufio.NewWriterSize(c, WriterBufferSize) 140 | defer flushWriter(writer) 141 | 142 | go func() { 143 | p := make([]byte, ReaderBufferSize) 144 | for { 145 | cnt := iocopy(c, ioutil.Discard, p, len(p)) 146 | cmd.nrecv.Add(int64(cnt)) 147 | } 148 | }() 149 | 150 | var mu sync.Mutex 151 | go func() { 152 | for { 153 | time.Sleep(time.Second) 154 | mu.Lock() 155 | flushWriter(writer) 156 | mu.Unlock() 157 | } 158 | }() 159 | 160 | go func() { 161 | var bypass bool = false 162 | for { 163 | resp := redis.MustDecode(reader) 164 | if cmd, args, err := redis.ParseArgs(resp); err != nil { 165 | log.PanicError(err, "parse command arguments failed") 166 | } else if cmd != "ping" { 167 | if cmd == "select" { 168 | if len(args) != 1 { 169 | log.Panicf("select command len(args) = %d", len(args)) 170 | } 171 | s := string(args[0]) 172 | n, err := parseInt(s, MinDB, MaxDB) 173 | if err != nil { 174 | log.PanicErrorf(err, "parse db = %s failed", s) 175 | } 176 | bypass = !acceptDB(uint32(n)) 177 | } 178 | if bypass { 179 | nbypass.Incr() 180 | continue 181 | } 182 | } 183 | mu.Lock() 184 | redis.MustEncode(writer, resp) 185 | mu.Unlock() 186 | forward.Incr() 187 | } 188 | }() 189 | 190 | for { 191 | lastForward := forward.Get() 192 | lastByPass := nbypass.Get() 193 | lastRead := cmd.nread.Get() 194 | lastRecv := cmd.nrecv.Get() 195 | time.Sleep(time.Second) 196 | log.Infof("sync: +forward=%-6d +bypass=%-6d +read=%-9d +recv=%-9d\n", forward.Get()-lastForward, nbypass.Get()-lastByPass, cmd.nread.Get()-lastRead, cmd.nrecv.Get()-lastRecv) 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /cmd/utils.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Reborndb Org. All Rights Reserved. 2 | // Licensed under the MIT (MIT-LICENSE.txt) license. 3 | 4 | package main 5 | 6 | import ( 7 | "bufio" 8 | "fmt" 9 | "io" 10 | "net" 11 | "os" 12 | "strconv" 13 | "strings" 14 | "time" 15 | 16 | redigo "github.com/garyburd/redigo/redis" 17 | "github.com/reborndb/go/io/ioutils" 18 | "github.com/reborndb/go/log" 19 | "github.com/reborndb/go/redis/rdb" 20 | ) 21 | 22 | func openRedisConn(target string) redigo.Conn { 23 | return redigo.NewConn(openNetConn(target), 0, 0) 24 | } 25 | 26 | func openNetConn(target string) net.Conn { 27 | c, err := net.Dial("tcp", target) 28 | if err != nil { 29 | log.PanicErrorf(err, "cannot connect to '%s'", target) 30 | } 31 | return c 32 | } 33 | 34 | func openReadFile(name string) (*os.File, int64) { 35 | f, err := os.Open(name) 36 | if err != nil { 37 | log.PanicErrorf(err, "cannot open file-reader '%s'", name) 38 | } 39 | s, err := f.Stat() 40 | if err != nil { 41 | log.PanicErrorf(err, "cannot stat file-reader '%s'", name) 42 | } 43 | return f, s.Size() 44 | } 45 | 46 | func openWriteFile(name string) *os.File { 47 | f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) 48 | if err != nil { 49 | log.PanicErrorf(err, "cannot open file-writer '%s'", name) 50 | } 51 | return f 52 | } 53 | 54 | func openSyncConn(target string, authCode string) (net.Conn, chan int64) { 55 | c := openNetConn(target) 56 | 57 | // send auth to master 58 | if len(authCode) > 0 { 59 | cmd := fmt.Sprintf("*2\r\n$4\r\nauth\r\n$%d\r\n%s\r\n", len(authCode), authCode) 60 | if _, err := ioutils.WriteFull(c, []byte(cmd)); err != nil { 61 | log.PanicError(err, "write auth command failed") 62 | } 63 | 64 | resp := make([]byte, 5) 65 | if _, err := io.ReadFull(c, resp); err != nil { 66 | log.PanicError(err, "read auth response failed") 67 | } 68 | 69 | if string(resp) != "+OK\r\n" { 70 | log.Panic("auth failed") 71 | } 72 | 73 | } 74 | 75 | if _, err := ioutils.WriteFull(c, []byte("*1\r\n$4\r\nsync\r\n")); err != nil { 76 | log.PanicError(err, "write sync command failed") 77 | } 78 | size := make(chan int64) 79 | go func() { 80 | var rsp string 81 | for { 82 | b := []byte{0} 83 | if _, err := c.Read(b); err != nil { 84 | log.PanicErrorf(err, "read sync response = '%s'", rsp) 85 | } 86 | if len(rsp) == 0 && b[0] == '\n' { 87 | size <- 0 88 | continue 89 | } 90 | rsp += string(b) 91 | if strings.HasSuffix(rsp, "\r\n") { 92 | break 93 | } 94 | } 95 | if rsp[0] != '$' { 96 | log.Panicf("invalid sync response, rsp = '%s'", rsp) 97 | } 98 | n, err := strconv.Atoi(rsp[1 : len(rsp)-2]) 99 | if err != nil || n <= 0 { 100 | log.PanicErrorf(err, "invalid sync response = '%s', n = %d", rsp, n) 101 | } 102 | size <- int64(n) 103 | }() 104 | return c, size 105 | } 106 | 107 | func selectDB(c redigo.Conn, db uint32) { 108 | s, err := redigo.String(c.Do("select", db)) 109 | if err != nil { 110 | log.PanicError(err, "select command error") 111 | } 112 | if s != "OK" { 113 | log.Panicf("select command response = '%s', should be 'OK'", s) 114 | } 115 | } 116 | 117 | func restoreRdbEntry(c redigo.Conn, e *rdb.BinEntry) { 118 | var ttlms uint64 119 | if e.ExpireAt != 0 { 120 | now := uint64(time.Now().Add(args.shift).UnixNano()) 121 | now /= uint64(time.Millisecond) 122 | if now >= e.ExpireAt { 123 | ttlms = 1 124 | } else { 125 | ttlms = e.ExpireAt - now 126 | } 127 | } 128 | s, err := redigo.String(c.Do("slotsrestore", e.Key, ttlms, e.Value)) 129 | if err != nil { 130 | log.PanicError(err, "restore command error") 131 | } 132 | if s != "OK" { 133 | log.Panicf("restore command response = '%s', should be 'OK'", s) 134 | } 135 | } 136 | 137 | func iocopy(r io.Reader, w io.Writer, p []byte, max int) int { 138 | if max <= 0 || len(p) == 0 { 139 | log.Panicf("invalid max = %d, len(p) = %d", max, len(p)) 140 | } 141 | if len(p) > max { 142 | p = p[:max] 143 | } 144 | if n, err := r.Read(p); err != nil { 145 | log.PanicError(err, "read error") 146 | } else { 147 | p = p[:n] 148 | } 149 | if _, err := ioutils.WriteFull(w, p); err != nil { 150 | log.PanicError(err, "write full error") 151 | } 152 | return len(p) 153 | } 154 | 155 | func flushWriter(w *bufio.Writer) { 156 | if err := w.Flush(); err != nil { 157 | log.PanicError(err, "flush error") 158 | } 159 | } 160 | 161 | func newRDBLoader(reader *bufio.Reader, size int) chan *rdb.BinEntry { 162 | pipe := make(chan *rdb.BinEntry, size) 163 | go func() { 164 | defer close(pipe) 165 | l := rdb.NewLoader(reader) 166 | if err := l.Header(); err != nil { 167 | log.PanicError(err, "parse rdb header error") 168 | } 169 | for { 170 | if entry, err := l.NextBinEntry(); err != nil { 171 | log.PanicError(err, "parse rdb entry error") 172 | } else { 173 | if entry != nil { 174 | pipe <- entry 175 | } else { 176 | if err := l.Footer(); err != nil { 177 | log.PanicError(err, "parse rdb checksum error") 178 | } 179 | return 180 | } 181 | } 182 | } 183 | }() 184 | return pipe 185 | } 186 | -------------------------------------------------------------------------------- /wangdoujia_license: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Wandoujia Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | --------------------------------------------------------------------------------