├── tests ├── __init__.py ├── pip_requirement.txt ├── base.py ├── test_iptable_block.py ├── test1.py ├── test_log_rotate.py ├── test2.py └── test_down_1.py ├── .gitignore ├── Makefile ├── src ├── cmem │ ├── cmem_test.go │ └── cmem.go ├── proxy │ ├── config.go │ ├── config_test.go │ └── proxy.go └── memcache │ ├── host_test.go │ ├── client_test.go │ ├── server_test.go │ ├── proxy_test.go │ ├── hash_test.go │ ├── hash.go │ ├── flowctrl.go │ ├── stats.go │ ├── log.go │ ├── store.go │ ├── protocol_test.go │ ├── rclient.go │ ├── schedule_test.go │ ├── store_test.go │ ├── server.go │ ├── host.go │ ├── client.go │ ├── schedule.go │ └── protocol.go ├── conf ├── example.yaml └── example.ini ├── README.md └── static ├── header.html ├── info.html ├── stats.html ├── matrix.html ├── index.html ├── server.html └── mfs.css /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .*.swp 2 | /bin/ 3 | /pkg/ 4 | *.test 5 | *.a 6 | *.out 7 | *.log 8 | /src/github.com/ 9 | tags 10 | *.pyc 11 | -------------------------------------------------------------------------------- /tests/pip_requirement.txt: -------------------------------------------------------------------------------- 1 | PyYAML 2 | python-libmemcached 3 | -e git+http://code.dapps.douban.com/douban-corelib.git#egg=DoubanCoreLib-dev 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: install 2 | 3 | GOPATH:=$(CURDIR) 4 | export GOPATH 5 | 6 | dep: 7 | go get gopkg.in/yaml.v2 8 | go install gopkg.in/yaml.v2 9 | 10 | install:dep 11 | go install proxy 12 | 13 | test: 14 | go test memcache 15 | 16 | debug:dep 17 | go install proxy 18 | -------------------------------------------------------------------------------- /src/cmem/cmem_test.go: -------------------------------------------------------------------------------- 1 | package cmem 2 | 3 | import ( 4 | "runtime" 5 | "testing" 6 | ) 7 | 8 | func TestCmem(t *testing.T) { 9 | size := uintptr(1024 * 1024 * 10) 10 | s := Alloc(size) 11 | Free(s, size) 12 | if Alloced() != 0 { 13 | runtime.Gosched() 14 | if Alloced() != 0 { 15 | t.Error("memory leak") 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /src/proxy/config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | type Eye struct { 4 | Servers []string 5 | Port int 6 | WebPort int 7 | Threads int 8 | N int 9 | W int 10 | R int 11 | Buckets int 12 | Slow int 13 | Listen string 14 | Proxies []string 15 | AccessLog string 16 | ErrorLog string 17 | Basepath string 18 | Readonly bool 19 | } 20 | -------------------------------------------------------------------------------- /conf/example.yaml: -------------------------------------------------------------------------------- 1 | servers: 2 | - localhost:7900 0 1 2 3 | - 127.0.0.1:7900 A -1 B 4 | port: 7905 5 | webport: 7908 6 | threads: 8 7 | n: 3 8 | w: 2 9 | r: 1 10 | buckets: 16 11 | slow: 200 12 | listen: 0.0.0.0 13 | proxies: 14 | - localhost:7905 15 | accesslog: /log/beansproxy/beansproxy.log 16 | errorlog: /log/beansproxy/beansproxy_error.log 17 | basepath: /var/lib/beanseye 18 | readonly: false 19 | -------------------------------------------------------------------------------- /src/memcache/host_test.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestHost(t *testing.T) { 8 | host := NewHost("localhost") 9 | testStore(t, host) 10 | st, err := host.Stat(nil) 11 | if err != nil { 12 | t.Error("stat fail", err) 13 | } 14 | if up, ok := st["uptime"]; !ok || len(up) < 1 { 15 | t.Error("no uptime in stat", st) 16 | } 17 | testFailStore(t, NewHost("localhost:11911")) 18 | } 19 | -------------------------------------------------------------------------------- /conf/example.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | server_port=7900 # default port 3 | servers=localhost,localhost:7901 # beansdb nodes, used by proxy and monitor 4 | 5 | [proxy] 6 | port=7905 # proxy port for accessing 7 | N=3 # nodes selected to write 8 | W=1 # min successfully writes when return success 9 | R=1 # min successfully reads when return success (not used) 10 | #accesslog=/var/log/beanseye/access.log # access log 11 | 12 | [monitor] 13 | port=7908 # monitor port for web 14 | proxy=localhost:7905 # proxy list to monitor 15 | -------------------------------------------------------------------------------- /src/memcache/client_test.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import "testing" 4 | 5 | var config map[string][]int = map[string][]int{ 6 | "localhost": []int{0}, 7 | "localhost:11599": []int{0}, 8 | } 9 | var badconfig map[string][]int = map[string][]int{ 10 | "localhost:11599": []int{0}, 11 | } 12 | 13 | func TestClient(t *testing.T) { 14 | client := NewClient(NewManualScheduler(config)) 15 | client.W = 1 16 | testStore(t, client) 17 | 18 | client = NewClient(NewManualScheduler(badconfig)) 19 | client.W = 1 20 | testFailStore(t, client) 21 | } 22 | -------------------------------------------------------------------------------- /src/cmem/cmem.go: -------------------------------------------------------------------------------- 1 | package cmem 2 | 3 | /* 4 | #include 5 | */ 6 | import "C" 7 | import "unsafe" 8 | 9 | var alloced int64 10 | var alloc_ch chan int64 11 | 12 | func init() { 13 | alloc_ch = make(chan int64, 10) 14 | go func() { 15 | for i := range alloc_ch { 16 | alloced += i 17 | } 18 | }() 19 | } 20 | 21 | func Alloc(size uintptr) *byte { 22 | alloc_ch <- int64(size) 23 | return (*byte)(C.malloc(C.size_t(size))) 24 | } 25 | 26 | func Free(ptr *byte, size uintptr) { 27 | alloc_ch <- -int64(size) 28 | C.free(unsafe.Pointer(ptr)) 29 | } 30 | 31 | func Alloced() int64 { 32 | return alloced 33 | } 34 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # What is beanseye ? 2 | 3 | Beanseye is proxy and monitor for beansdb, written in Go. 4 | 5 | # How to build 6 | 7 | Install Go first, then 8 | ``` bash 9 | $ git clone URL 10 | $ cd beanseye 11 | $ make 12 | ``` 13 | 14 | # How to run 15 | 16 | prepare your configuration, according to conf/example.ini 17 | ``` bash 18 | $ ./bin/proxy -conf conf/example.yaml -basepath the_path_has_static 19 | ``` 20 | 21 | # Proxy 22 | 23 | You can access whole beansdb cluster throught localhost:7905 24 | as configured, by any memcached client. 25 | 26 | # Monitor 27 | 28 | There is a web monitor on http://localhost:7908/ at default. 29 | -------------------------------------------------------------------------------- /static/header.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 16 | 18 | 19 |
17 |
20 | -------------------------------------------------------------------------------- /static/info.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 |
Info
versionserversproxiesbucketstotal recordsuniq recordschunks
{{len .server_stats}}{{len .proxy_stats}}16{{.total_records|num}}{{.uniq_records|num}}
22 | -------------------------------------------------------------------------------- /static/stats.html: -------------------------------------------------------------------------------- 1 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | {{range $i,$n := .bucket_stats}} 13 | 14 | {{end}} 15 | 16 | {{range $i, $st := .stats}} 17 | 18 | 19 | 20 | {{range .stat}} 21 | 22 | {{end}} 23 | 24 | {{end}} 25 |
serverbuckets
{{$i}}
{{$i}}{{.name}}{{if .}}{{.| size}}{{end}}
26 |
27 | -------------------------------------------------------------------------------- /src/memcache/server_test.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "net" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestServer(t *testing.T) { 10 | s, _ := StartServer("localhost:11299") 11 | time.Sleep(1e8) 12 | client := NewClient(NewManualScheduler(map[string][]int{"localhost:11299": []int{0}})) 13 | client.W = 1 14 | 15 | testStore(t, client) 16 | s.Shutdown() 17 | } 18 | 19 | func TestShutdown(t *testing.T) { 20 | addr := "localhost:11298" 21 | s, _ := StartServer(addr) 22 | go func() { 23 | time.Sleep(1e8) 24 | s.Shutdown() 25 | }() 26 | if _, err := net.Dial("tcp", addr); err != nil { 27 | t.Error("server fail") 28 | } 29 | time.Sleep(2e8) // wait for close 30 | if _, err := net.Dial("tcp", addr); err == nil { 31 | t.Error("server not shundown") 32 | } 33 | 34 | } 35 | -------------------------------------------------------------------------------- /src/memcache/proxy_test.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | // "os" 7 | ) 8 | 9 | func testProxy(t *testing.T, auto bool) { 10 | //AccessLog = log.New(os.Stdout, nil, "", log.Ldate|log.Ltime) 11 | s1, _ := StartServer("localhost:11297") 12 | defer s1.Shutdown() 13 | s2, _ := StartServer("localhost:11296") 14 | defer s2.Shutdown() 15 | 16 | config := map[string][]int{ 17 | "localhost:11296": []int{0}, 18 | "localhost:11297": []int{0}, 19 | } 20 | addr := "localhost:11295" 21 | p := NewProxy(config, auto) 22 | p.Listen(addr) 23 | defer p.Shutdown() 24 | go func() { 25 | p.Serve() 26 | }() 27 | time.Sleep(1e8) 28 | 29 | client := NewHost(addr) 30 | testStore(t, client) 31 | } 32 | 33 | func TestProxy(t *testing.T) { 34 | testProxy(t, false) 35 | time.Sleep(1e9) 36 | testProxy(t, true) 37 | } 38 | -------------------------------------------------------------------------------- /static/matrix.html: -------------------------------------------------------------------------------- 1 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | {{range $i,$n := .bucket_stats}} 14 | 15 | {{end}} 16 | 17 | 18 | {{range .server_stats}} 19 | 20 | 21 | {{if .buckets}} 22 | {{range .buckets}} 23 | 24 | {{end}} 25 | 28 | {{end}} 29 |
All records in buckets
serverbuckets
{{$i}}all
{{.name}}{{if .}}{{.}}{{end}}{{sum .buckets | size}} 26 | {{end}} 27 |
30 |
31 | -------------------------------------------------------------------------------- /src/memcache/hash_test.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | type TestCase struct { 8 | name string 9 | value string 10 | hash uint32 11 | } 12 | 13 | var tests []TestCase = []TestCase{ 14 | TestCase{"md5", "", 3649838548}, 15 | TestCase{"crc32", "", 0}, 16 | TestCase{"fnv1a", "", 2166136261}, 17 | TestCase{"fnv1a1", "", 2166136261}, 18 | 19 | TestCase{"md5", "hello", 708854109}, 20 | TestCase{"crc32", "hello", 907060870}, 21 | TestCase{"fnv1a", "hello", 1335831723}, 22 | TestCase{"fnv1a1", "hello", 1335831723}, 23 | 24 | TestCase{"md5", "你好", 2674444926}, 25 | TestCase{"crc32", "你好", 1352841281}, 26 | TestCase{"fnv1a", "你好", 2257816995}, 27 | TestCase{"fnv1a1", "你好", 718964643}, 28 | } 29 | 30 | func Test(t *testing.T) { 31 | for _, c := range tests { 32 | h := hashMethods[c.name]([]byte(c.value)) 33 | if c.hash != h { 34 | t.Errorf("test hash faile: %s(%s)(%d) != %d", c.name, c.value, h, c.hash) 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /static/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Beansdb Info 6 | 7 | 8 | 9 | 10 | 13 | 14 |
15 | 16 | {{if in .sections "IN"}} 17 | {{template "info.html" .}}
18 | {{template "matrix.html" .}}
19 | {{end}} 20 | 21 | {{if in .sections "SS"}} 22 | {{template "server.html" .proxy_stats}}
23 | {{template "server.html" .server_stats}}
24 | {{end}} 25 | 26 | {{if in .sections "ST"}} 27 | {{template "stats.html" .}}
28 | {{end}} 29 | 30 |
31 | 32 | 33 | -------------------------------------------------------------------------------- /src/proxy/config_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "github.com/douban/goyaml" 6 | "io/ioutil" 7 | "testing" 8 | "os" 9 | ) 10 | 11 | func Test_GetEyeConfig(t *testing.T) { 12 | eye := &Eye{ 13 | Servers: []string{"localhost:7900 0 1 2", "127.0.0.1:7900 A -1 B"}, 14 | Port: 7905, 15 | WebPort: 7908, 16 | Threads: 8, 17 | N: 3, 18 | W: 2, 19 | R: 1, 20 | Buckets: 16, 21 | Listen: "0.0.0.0", 22 | Slow: 200, 23 | Proxies: []string{"localhost:7905"}, 24 | AccessLog: "/log/beansproxy/beansproxy.log", 25 | ErrorLog: "/log/beansproxy/beansproxy_error.log", 26 | Basepath: "/var/lib/beanseye", 27 | Readonly: false, 28 | } 29 | 30 | content1, _ := goyaml.Marshal(eye) 31 | ioutil.WriteFile("./example.yaml", content1, os.ModePerm) 32 | 33 | var new_eye Eye 34 | content, _ := ioutil.ReadFile("./example.yaml") 35 | 36 | goyaml.Unmarshal(content, &new_eye) 37 | fmt.Println("eye") 38 | fmt.Println(*eye) 39 | fmt.Println("new_eye") 40 | fmt.Println(new_eye) 41 | 42 | } 43 | -------------------------------------------------------------------------------- /src/memcache/hash.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "crypto/md5" 5 | "hash/crc32" 6 | "hash/fnv" 7 | ) 8 | 9 | type HashMethod func(v []byte) (h uint32) 10 | 11 | func md5hash(src []byte) uint32 { 12 | hash := md5.New() 13 | hash.Write(src) 14 | s := hash.Sum(nil) 15 | return (uint32(s[3]) << 24) | (uint32(s[2]) << 16) | (uint32(s[1]) << 8) | uint32(s[0]) 16 | } 17 | 18 | func crc32hash(s []byte) uint32 { 19 | hash := crc32.NewIEEE() 20 | hash.Write(s) 21 | return hash.Sum32() 22 | } 23 | 24 | func fnv1a(s []byte) uint32 { 25 | h := fnv.New32a() 26 | h.Write(s) 27 | return h.Sum32() 28 | } 29 | 30 | const FNV1A_PRIME uint32 = 0x01000193 31 | const FNV1A_INIT uint32 = 0x811c9dc5 32 | 33 | // Bugy version of fnv1a 34 | func fnv1a1(s []byte) uint32 { 35 | h := FNV1A_INIT 36 | for _, c := range s { 37 | h ^= uint32(int8(c)) 38 | h *= FNV1A_PRIME 39 | } 40 | return h 41 | } 42 | 43 | var hashMethods = map[string]HashMethod{ 44 | "fnv1a": fnv1a, 45 | "fnv1a1": fnv1a1, 46 | "crc32": crc32hash, 47 | "md5": md5hash, 48 | } 49 | -------------------------------------------------------------------------------- /static/server.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | {{range $i,$st := .}} 24 | 25 | 26 | 27 | {{if .uptime }} 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | {{end}} 45 | 46 | {{end}} 47 | -------------------------------------------------------------------------------- /src/memcache/flowctrl.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "time" 7 | ) 8 | 9 | type FlowServerConn struct { 10 | conn *ServerConn 11 | stamp time.Time 12 | } 13 | 14 | func (c *FlowServerConn) Close() { 15 | c.conn.Close() 16 | } 17 | 18 | func NewFlowServerConn(c *ServerConn) *FlowServerConn { 19 | return &FlowServerConn{conn: c, stamp: time.Now()} 20 | } 21 | 22 | type FlowController struct { 23 | max_qps, curr_qps int 24 | conn_timeout float32 25 | flow, pipe chan *FlowServerConn 26 | timeout time.Duration 27 | } 28 | 29 | func NewFlowController(maxqps int, timeout float32) *FlowController { 30 | f := new(FlowController) 31 | f.max_qps = maxqps 32 | f.curr_qps = 0 33 | f.conn_timeout = timeout 34 | f.flow = make(chan *FlowServerConn, 1+int(f.conn_timeout*float32(f.max_qps))) 35 | f.pipe = nil 36 | duration_string := fmt.Sprintf("%fs", timeout) 37 | f.timeout, _ = time.ParseDuration(duration_string) 38 | return f 39 | } 40 | 41 | func (f *FlowController) Put(c *ServerConn) (ok bool, err error) { 42 | flow_c := NewFlowServerConn(c) 43 | select { 44 | case f.flow <- flow_c: 45 | default: 46 | return false, errors.New("channel is full") 47 | } 48 | return true, nil 49 | } 50 | 51 | func (f *FlowController) CanTransmitNow() bool { 52 | return true 53 | } 54 | 55 | func (f *FlowController) WhenToTransmit() time.Duration { 56 | r, _ := time.ParseDuration("1s") 57 | return r 58 | } 59 | 60 | func (f *FlowController) transmit() { 61 | tran := func() { 62 | conn := <-f.flow // blocking mode is ok 63 | if time.Now().Sub(conn.stamp) > f.timeout { 64 | // timeout , Close and drop 65 | conn.Close() 66 | return 67 | } 68 | select { 69 | // do not block in sending procedure 70 | case f.pipe <- conn: 71 | return 72 | default: 73 | // ignore the pipe full time 74 | // because caller handle it. 75 | conn.Close() 76 | return 77 | } 78 | } 79 | for { 80 | if f.CanTransmitNow() { 81 | tran() 82 | } else { 83 | to_next := f.WhenToTransmit() 84 | time.AfterFunc(to_next, tran) 85 | } 86 | } 87 | } 88 | 89 | func (f *FlowController) Bind(output chan *FlowServerConn) bool { 90 | if f.pipe == nil { 91 | f.pipe = output 92 | go f.transmit() 93 | return true 94 | } else { 95 | return false 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/memcache/stats.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "cmem" 5 | "os" 6 | "runtime" 7 | "syscall" 8 | "time" 9 | ) 10 | 11 | type Stats struct { 12 | start time.Time 13 | curr_item, total_items int64 14 | cmd_get, cmd_set, cmd_delete int64 15 | get_hits, get_misses int64 16 | threads int64 17 | curr_connections, total_connections int64 18 | bytes_read, bytes_written int64 19 | stat map[string]int64 20 | } 21 | 22 | func NewStats() *Stats { 23 | s := new(Stats) 24 | s.stat = make(map[string]int64) 25 | s.start = time.Now() 26 | return s 27 | } 28 | 29 | func (s *Stats) UpdateStat(key string, value int64) { 30 | oldv, ok := s.stat[key] 31 | if !ok { 32 | oldv = 0 33 | } 34 | s.stat[key] = oldv + value 35 | } 36 | 37 | func mem_in_go(include_zero bool) runtime.MemProfileRecord { 38 | var p []runtime.MemProfileRecord 39 | n, ok := runtime.MemProfile(nil, include_zero) 40 | for { 41 | // Allocate room for a slightly bigger profile, 42 | // in case a few more entries have been added 43 | // since the call to MemProfile. 44 | p = make([]runtime.MemProfileRecord, n+50) 45 | n, ok = runtime.MemProfile(p, include_zero) 46 | if ok { 47 | p = p[0:n] 48 | break 49 | } 50 | // Profile grew; try again. 51 | } 52 | 53 | var total runtime.MemProfileRecord 54 | for i := range p { 55 | r := &p[i] 56 | total.AllocBytes += r.AllocBytes 57 | total.AllocObjects += r.AllocObjects 58 | total.FreeBytes += r.FreeBytes 59 | total.FreeObjects += r.FreeObjects 60 | } 61 | return total 62 | } 63 | 64 | func (s *Stats) Stats() map[string]int64 { 65 | st := make(map[string]int64) 66 | st["cmd_get"] = s.cmd_get 67 | st["cmd_set"] = s.cmd_set 68 | st["cmd_delete"] = s.cmd_delete 69 | st["get_hits"] = s.get_hits 70 | st["get_misses"] = s.get_misses 71 | st["curr_connections"] = s.curr_connections 72 | st["total_connections"] = s.total_connections 73 | st["bytes_read"] = s.bytes_read 74 | st["bytes_written"] = s.bytes_written 75 | for k, v := range s.stat { 76 | st[k] = v 77 | } 78 | 79 | t := time.Now() 80 | st["time"] = int64(t.Second()) 81 | st["uptime"] = int64(t.Sub(s.start).Seconds()) 82 | st["pid"] = int64(os.Getpid()) 83 | st["threads"] = int64(runtime.NumGoroutine()) 84 | rusage := syscall.Rusage{} 85 | syscall.Getrusage(0, &rusage) 86 | st["rusage_user"] = int64(rusage.Utime.Sec) 87 | st["rusage_system"] = int64(rusage.Stime.Sec) 88 | 89 | var memstat runtime.MemStats 90 | runtime.ReadMemStats(&memstat) 91 | st["rusage_maxrss"] = int64(memstat.Sys/1024) + cmem.Alloced()/1024 92 | return st 93 | } 94 | -------------------------------------------------------------------------------- /src/memcache/log.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "sync" 7 | "sync/atomic" 8 | "unsafe" 9 | ) 10 | var AccessLogPath string 11 | var ErrorLogPath string 12 | var AccessLog *log.Logger = nil 13 | var ErrorLog *log.Logger = nil 14 | var AccessFd *os.File = nil 15 | var ErrorFd *os.File = nil 16 | var lock *sync.Mutex = new(sync.Mutex) 17 | 18 | func openLogWithFd(fd *os.File) *log.Logger { 19 | return log.New(fd, "", log.Ldate|log.Ltime|log.Lmicroseconds) 20 | } 21 | 22 | func openLog(path string) (logger *log.Logger, fd *os.File, err error) { 23 | if fd, err = os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644); err == nil { 24 | logger = openLogWithFd(fd) 25 | } 26 | return 27 | } 28 | 29 | func OpenAccessLog(access_log_path string) (success bool, err error) { 30 | lock.Lock() 31 | defer lock.Unlock() 32 | success = false 33 | if AccessLog == nil { 34 | if AccessLog, AccessFd, err = openLog(access_log_path); err == nil { 35 | success = true 36 | } 37 | } else { 38 | // start swap exist logger and new logger, and Close the older fd in later, if it is Stdout, leave it 39 | access_log, access_file, e := openLog(access_log_path) 40 | err = e 41 | if err == nil { 42 | success = true 43 | access_log = (*log.Logger)(atomic.SwapPointer((*unsafe.Pointer)(unsafe.Pointer(&AccessLog)), unsafe.Pointer(access_log))) 44 | access_file = (*os.File)(atomic.SwapPointer((*unsafe.Pointer)(unsafe.Pointer(&AccessFd)), unsafe.Pointer(access_file))) 45 | if e = access_file.Close(); e != nil { 46 | log.Println("close the old accesslog fd failure with, ", e) 47 | } 48 | } else { 49 | log.Println("open " + access_log_path + " failed: " + err.Error()) 50 | } 51 | } 52 | return 53 | } 54 | 55 | func OpenErrorLog(error_log_path string) (success bool, err error) { 56 | lock.Lock() 57 | defer lock.Unlock() 58 | success = false 59 | if ErrorLog == nil { 60 | if ErrorLog, ErrorFd, err = openLog(error_log_path); err == nil { 61 | success = true 62 | } 63 | } else { 64 | // start swap exist logger and new logger, and Close the older fd in later, if it is Stdout, leave it 65 | error_log, error_file, e := openLog(error_log_path) 66 | err = e 67 | if err == nil { 68 | success = true 69 | error_log = (*log.Logger)(atomic.SwapPointer((*unsafe.Pointer)(unsafe.Pointer(&ErrorLog)), unsafe.Pointer(error_log))) 70 | error_file = (*os.File)(atomic.SwapPointer((*unsafe.Pointer)(unsafe.Pointer(&ErrorFd)), unsafe.Pointer(error_file))) 71 | if e = error_file.Close(); e != nil { 72 | log.Println("close the old errorlog fd failure with, ", e) 73 | } 74 | } else { 75 | log.Println("open " + error_log_path + " failed: " + err.Error()) 76 | } 77 | } 78 | return 79 | } 80 | -------------------------------------------------------------------------------- /src/memcache/store.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "math/rand" 5 | "strconv" 6 | "sync" 7 | ) 8 | 9 | type Storage interface { 10 | Get(key string) (*Item, error) 11 | GetMulti(keys []string) (map[string]*Item, error) 12 | Set(key string, item *Item, noreply bool) (bool, error) 13 | Append(key string, value []byte) (bool, error) 14 | Incr(key string, value int) (int, error) 15 | Delete(key string) (bool, error) 16 | Len() int 17 | } 18 | 19 | type DistributeStorage interface { 20 | Get(key string) (*Item, []string, error) 21 | GetMulti(keys []string) (map[string]*Item, []string, error) 22 | Set(key string, item *Item, noreply bool) (bool, []string, error) 23 | Append(key string, value []byte) (bool, []string, error) 24 | Incr(key string, value int) (int, []string, error) 25 | Delete(key string) (bool, []string, error) 26 | Len() int 27 | } 28 | 29 | type mapStore struct { 30 | lock sync.Mutex 31 | data map[string]*Item 32 | } 33 | 34 | func NewMapStore() *mapStore { 35 | s := new(mapStore) 36 | s.data = make(map[string]*Item) 37 | return s 38 | } 39 | 40 | func (s *mapStore) Get(key string) (*Item, error) { 41 | s.lock.Lock() 42 | defer s.lock.Unlock() 43 | 44 | r, _ := s.data[key] 45 | return r, nil 46 | } 47 | 48 | func (s *mapStore) GetMulti(keys []string) (map[string]*Item, error) { 49 | s.lock.Lock() 50 | defer s.lock.Unlock() 51 | 52 | rs := make(map[string]*Item, len(keys)) 53 | for _, key := range keys { 54 | r, _ := s.data[key] 55 | if r != nil { 56 | rs[key] = r 57 | } 58 | } 59 | return rs, nil 60 | } 61 | 62 | func (s *mapStore) Set(key string, item *Item, noreply bool) (bool, error) { 63 | s.lock.Lock() 64 | defer s.lock.Unlock() 65 | 66 | item.Cas = rand.Int() 67 | it := *item 68 | item.alloc = nil 69 | s.data[key] = &it 70 | return true, nil 71 | } 72 | 73 | func (s *mapStore) Append(key string, value []byte) (suc bool, err error) { 74 | s.lock.Lock() 75 | defer s.lock.Unlock() 76 | 77 | r, ok := s.data[key] 78 | if ok && r.Flag == 0 { 79 | r.Body = append(r.Body, value...) 80 | s.data[key] = r 81 | return true, nil 82 | } 83 | return false, nil 84 | } 85 | 86 | func (s *mapStore) Incr(key string, v int) (n int, err error) { 87 | s.lock.Lock() 88 | defer s.lock.Unlock() 89 | r, ok := s.data[key] 90 | if ok { 91 | n, err = strconv.Atoi(string(r.Body)) 92 | if err != nil { 93 | return 94 | } 95 | n += v 96 | r.Body = []byte(strconv.Itoa(n)) 97 | } else { 98 | n = 0 99 | } 100 | return 101 | } 102 | 103 | func (s *mapStore) Delete(key string) (r bool, err error) { 104 | s.lock.Lock() 105 | defer s.lock.Unlock() 106 | _, ok := s.data[key] 107 | if ok { 108 | delete(s.data, key) 109 | r = true 110 | } 111 | return 112 | } 113 | 114 | func (s *mapStore) Len() int { 115 | return len(s.data) 116 | } 117 | -------------------------------------------------------------------------------- /tests/base.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding:utf-8 3 | 4 | import unittest 5 | import subprocess 6 | import shlex 7 | import time 8 | import os 9 | import sys 10 | import string 11 | import random 12 | import yaml 13 | import shutil 14 | from douban.beansdb import MCStore 15 | 16 | def start_svc(cmd): 17 | print "start", cmd 18 | p = subprocess.Popen(isinstance(cmd, (tuple, list,)) and cmd or shlex.split(cmd) , close_fds=True) 19 | time.sleep(0.2) 20 | if p.poll() is not None: 21 | raise Exception("cannot start %s" % (cmd)) 22 | return p 23 | 24 | def stop_svc(popen): 25 | if popen.poll() is not None: 26 | return 27 | popen.terminate() 28 | popen.wait() 29 | 30 | 31 | class BeansdbInstance: 32 | 33 | def __init__(self, base_path, port): 34 | self.port = port 35 | self.popen = None 36 | self.db_home = os.path.join(base_path, "beansdb_%s" % (self.port)) 37 | if not os.path.exists(self.db_home): 38 | os.makedirs(self.db_home) 39 | self.cmd = "/usr/bin/beansdb -p %s -H %s -T 1 -L /dev/null" % (self.port, self.db_home) 40 | 41 | 42 | def start(self): 43 | assert self.popen is None 44 | self.popen = start_svc(self.cmd) 45 | 46 | def stop(self): 47 | print "stop", self.cmd 48 | if self.popen: 49 | stop_svc(self.popen) 50 | self.popen = None 51 | 52 | def clean(self): 53 | if self.popen: 54 | self.stop() 55 | if os.path.exists(self.db_home): 56 | shutil.rmtree(self.db_home) 57 | 58 | 59 | def random_string(n): 60 | s = string.ascii_letters + string.digits 61 | result = "" 62 | for i in xrange(n): 63 | result += random.choice(s) 64 | return result 65 | 66 | 67 | class TestBeanseyeBase(unittest.TestCase): 68 | 69 | data_base_path = os.path.join("/tmp", "beanseye_test") 70 | code_base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 71 | #NOTE: should override accesslog in tests 72 | # accesslog = os.path.join(self.data_base_path, 'beansproxy.log') 73 | # errorlog = os.path.join(self.data_base_path, 'beansproxy.log') 74 | 75 | 76 | def _init_dir(self): 77 | if not os.path.exists(self.data_base_path): 78 | os.makedirs(self.data_base_path) 79 | 80 | 81 | def _start_proxy(self, config): 82 | config_file = self._gen_proxy_config(config) 83 | proxy_bin = os.path.join(self.code_base_path, "bin", "proxy") 84 | cmd = "%s -conf %s -basepath %s" % (proxy_bin, config_file, self.code_base_path) 85 | return start_svc(cmd) 86 | 87 | def _gen_proxy_config(self, config): 88 | config['basepath'] = self.code_base_path 89 | config['accesslog'] = self.accesslog 90 | config['errorlog'] = self.errorlog 91 | config_file = os.path.join(self.data_base_path, 'beanseye_conf.yaml') 92 | with open(config_file, 'w') as f: 93 | yaml.safe_dump(config, f, default_flow_style=False, canonical=False) 94 | return config_file 95 | 96 | def _assert_data(self, addr, key, data, mesg=None): 97 | store = MCStore(addr) 98 | self.assertEqual(store.get(key), data, mesg) 99 | 100 | 101 | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 : 102 | -------------------------------------------------------------------------------- /src/memcache/protocol_test.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "fmt" 7 | "strings" 8 | "testing" 9 | ) 10 | 11 | type reqTest struct { 12 | cmd string 13 | anwser string 14 | } 15 | 16 | var reqTests = []reqTest{ 17 | reqTest{ 18 | "get abc cdf \r\n", 19 | "END\r\n", 20 | }, 21 | reqTest{ 22 | "get \r\n", 23 | "CLIENT_ERROR invalid cmd\r\n", 24 | }, 25 | reqTest{ 26 | "get " + strings.Repeat("a", 300) + " \r\n", 27 | "CLIENT_ERROR key too long\r\n", 28 | }, 29 | reqTest{ 30 | "set abc 2 3 2 noreply\r\nok\r\n", 31 | "", 32 | }, 33 | reqTest{ 34 | "set abc a 3 2 noreply\r\nok\r\n", 35 | "CLIENT_ERROR strconv.ParseInt: parsing \"a\": invalid syntax\r\n", 36 | }, 37 | reqTest{ 38 | "set abc 3 3 3 2 noreply\r\nok\r\n", 39 | "CLIENT_ERROR invalid cmd\r\n", 40 | }, 41 | reqTest{ 42 | "set abc a 3 2 noreply\r\nok\r\n", 43 | "CLIENT_ERROR strconv.ParseInt: parsing \"a\": invalid syntax\r\n", 44 | }, 45 | reqTest{ 46 | "set abc 3 3 10\r\nok\r\n", 47 | "CLIENT_ERROR unexpected EOF\r\n", 48 | }, 49 | reqTest{ 50 | "set cdf 0 0 2\r\nok\r\n", 51 | "STORED\r\n", 52 | }, 53 | reqTest{ 54 | "get cdf\r\n", 55 | "VALUE cdf 0 2\r\nok\r\nEND\r\n", 56 | }, 57 | reqTest{ 58 | "get abc \r\n", 59 | "VALUE abc 2 2\r\nok\r\nEND\r\n", 60 | }, 61 | // reqTest{ 62 | // "get abc cdf\r\n", 63 | // "VALUE cdf 0 2\r\nok\r\nVALUE abc 2 2\r\nok\r\nEND\r\n", 64 | // }, 65 | // reqTest{ 66 | // "cas abc -5 10 0 134020434\r\n\r\n", 67 | // "STORED\r\n", 68 | // }, 69 | reqTest{ 70 | "delete abc\r\n", 71 | "DELETED\r\n", 72 | }, 73 | reqTest{ 74 | "delete abc noreply\r\n", 75 | "", 76 | }, 77 | reqTest{ 78 | "append cdf 0 0 2\r\n 2\r\n", 79 | "STORED\r\n", 80 | }, 81 | // reqTest{ 82 | // "prepend cdf 0 0 2\r\n1 \r\n", 83 | // "STORED", 84 | // }, 85 | reqTest{ 86 | "get cdf\r\n", 87 | "VALUE cdf 0 4\r\nok 2\r\nEND\r\n", 88 | }, 89 | reqTest{ 90 | "append ap 0 0 2\r\nap\r\n", 91 | "NOT_STORED\r\n", 92 | }, 93 | 94 | reqTest{ 95 | "set n 4 0 1\r\n5\r\n", 96 | "STORED\r\n", 97 | }, 98 | reqTest{ 99 | "incr n 3\r\n", 100 | "8\r\n", 101 | }, 102 | reqTest{ 103 | "incr nn 7\r\n", 104 | "NOT_FOUND\r\n", 105 | }, 106 | 107 | reqTest{ 108 | "stats curr_items cmd_get cmd_set get_hits get_misses\r\n", 109 | "STAT curr_items 2\r\n" + 110 | "STAT cmd_get 5\r\n" + 111 | "STAT cmd_set 7\r\n" + 112 | "STAT get_hits 3\r\n" + 113 | "STAT get_misses 2\r\n" + 114 | "END\r\n", 115 | }, 116 | reqTest{ 117 | "flush_all\r\n", 118 | "OK\r\n", 119 | }, 120 | reqTest{ 121 | "verbosity 1\r\n", 122 | "OK\r\n", 123 | }, 124 | reqTest{ 125 | "version\r\n", 126 | "VERSION " + VERSION + "\r\n", 127 | }, 128 | 129 | reqTest{ 130 | "quit\r\n", 131 | "", 132 | }, 133 | reqTest{ 134 | "error\r\n", 135 | "CLIENT_ERROR unknown command: error\r\n", 136 | }, 137 | } 138 | 139 | func TestRequest(t *testing.T) { 140 | store := NewMapStore() 141 | stats := NewStats() 142 | 143 | for i, test := range reqTests { 144 | buf := bytes.NewBufferString(test.cmd) 145 | req := new(Request) 146 | e := req.Read(bufio.NewReader(buf)) 147 | var resp *Response 148 | if e != nil { 149 | resp = &Response{status: "CLIENT_ERROR", msg: e.Error()} 150 | } else { 151 | resp = req.Process(store, stats) 152 | } 153 | 154 | r := make([]byte, 0) 155 | wr := bytes.NewBuffer(r) 156 | if resp != nil { 157 | resp.Write(wr) 158 | } 159 | ans := wr.String() 160 | if test.anwser != ans { 161 | fmt.Print(req, resp) 162 | t.Errorf("test %d: expect %s[%d], bug got %s[%d]\n", i, 163 | test.anwser, len(test.anwser), ans, len(ans)) 164 | } 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /tests/test_iptable_block.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding:utf-8 3 | 4 | import os 5 | import sys 6 | import time 7 | from base import TestBeanseyeBase, BeansdbInstance, random_string, stop_svc 8 | import unittest 9 | from douban.beansdb import BeansDBProxy, MCStore 10 | import subprocess 11 | 12 | class Test4(TestBeanseyeBase): 13 | 14 | proxy_addr = 'localhost:7905' 15 | backend1_addr = 'localhost:57901' 16 | backend2_addr = 'localhost:57902' 17 | backend3_addr = 'localhost:57903' 18 | backend4_addr = 'localhost:57904' 19 | 20 | data_base_path = os.path.join("/tmp", "beanseye_test") 21 | accesslog = os.path.join(data_base_path, 'beansproxy_test4.log') 22 | errorlog = os.path.join(data_base_path, 'beansproxy_error_test4.log') 23 | 24 | def setUp(self): 25 | self._init_dir() 26 | if os.path.exists(self.accesslog): 27 | os.remove(self.accesslog) 28 | if os.path.exists(self.errorlog): 29 | os.remove(self.errorlog) 30 | 31 | self.backend1 = BeansdbInstance(self.data_base_path, 57901) 32 | self.backend2 = BeansdbInstance(self.data_base_path, 57902) 33 | self.backend3 = BeansdbInstance(self.data_base_path, 57903) 34 | self.backend4 = BeansdbInstance(self.data_base_path, 57904) 35 | self.backend1.start() 36 | self.backend2.start() 37 | self.backend3.start() 38 | self.backend4.start() 39 | # start 3 backend, no additional temporary node 40 | proxy_conf = { 41 | 'servers': [ 42 | self.backend1_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 43 | self.backend2_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 44 | self.backend3_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 45 | self.backend4_addr + ' -F -E -D -C -B -A -9 -8 -7 -6 -5 -4 -3 -2 -1 -0', 46 | ], 47 | 'port': 7905, 48 | 'webport': 7908, 49 | 'threads': 8, 50 | 'n': 3, 51 | 'w': 2, 52 | 'r': 1, 53 | 'buckets': 16, 54 | 'slow': 100, 55 | 'listen': '0.0.0.0', 56 | 'proxies': [self.proxy_addr], 57 | } 58 | self.proxy_p = self._start_proxy(proxy_conf) 59 | 60 | def _iptable_block(self, addr): 61 | ip, port = addr.split(":") 62 | cmd = "sudo iptables -I INPUT 1 -i lo -p tcp --dport %s -j DROP" % (port) 63 | print cmd 64 | os.system(cmd) 65 | 66 | def _iptable_unblock(self, addr): 67 | ip, port = addr.split(":") 68 | cmd = "sudo iptables -i lo -D INPUT -p tcp --dport %s -j DROP" % (port) 69 | print cmd 70 | os.system(cmd) 71 | 72 | def test_iptable_silence(self): 73 | """ test wether a node slow will affect response time """ 74 | proxy = BeansDBProxy([self.proxy_addr]) 75 | key3 = 'key3' 76 | i = 0 77 | start_time = time.time() 78 | for i in range(20000): 79 | data3 = random_string(10) 80 | proxy.set(key3, data3) 81 | self.assertEqual(proxy.get(key3), data3) 82 | self.assertEqual(proxy.get(key3), data3) 83 | print "avg get&set time", (time.time() - start_time) / 2000 84 | 85 | i = 0 86 | self._iptable_block(self.backend1_addr) 87 | start_time = time.time() 88 | for i in range(20000): 89 | data3 = random_string(10) 90 | proxy.set(key3, data3) 91 | self.assertEqual(proxy.get(key3), data3) 92 | self.assertEqual(proxy.get(key3), data3) 93 | print "avg get&set time", (time.time() - start_time) / 2000 94 | self._iptable_unblock(self.backend1_addr) 95 | 96 | 97 | 98 | def tearDown(self): 99 | stop_svc(self.proxy_p) 100 | self.backend1.stop() 101 | self.backend2.stop() 102 | self.backend3.stop() 103 | self.backend4.stop() 104 | self.backend1.clean() 105 | self.backend2.clean() 106 | self.backend3.clean() 107 | self.backend4.clean() 108 | 109 | if __name__ == '__main__': 110 | unittest.main() 111 | 112 | 113 | 114 | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 : 115 | -------------------------------------------------------------------------------- /tests/test1.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding:utf-8 3 | 4 | import os 5 | import sys 6 | import time 7 | from base import TestBeanseyeBase, BeansdbInstance, random_string, stop_svc 8 | import unittest 9 | from douban.beansdb import BeansDBProxy, MCStore 10 | 11 | 12 | class Test1(TestBeanseyeBase): 13 | 14 | proxy_addr = 'localhost:7905' 15 | backend1_addr = 'localhost:57901' 16 | backend2_addr = 'localhost:57902' 17 | backend3_addr = 'localhost:57903' 18 | data_base_path = os.path.join("/tmp", "beanseye_test") 19 | accesslog = os.path.join(data_base_path, 'beansproxy_test1.log') 20 | errorlog = os.path.join(data_base_path, 'beansproxy_error_test1.log') 21 | 22 | 23 | def setUp(self): 24 | self._init_dir() 25 | if os.path.exists(self.accesslog): 26 | os.remove(self.accesslog) 27 | if os.path.exists(self.errorlog): 28 | os.remove(self.errorlog) 29 | self.backend1 = BeansdbInstance(self.data_base_path, 57901) 30 | self.backend2 = BeansdbInstance(self.data_base_path, 57902) 31 | self.backend3 = BeansdbInstance(self.data_base_path, 57903) 32 | self.backend1.start() 33 | self.backend2.start() 34 | self.backend3.start() 35 | # start 3 backend, no additional temporary node 36 | proxy_conf = { 37 | 'servers': [ 38 | self.backend1_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 39 | self.backend2_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 40 | self.backend3_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 41 | ], 42 | 'port': 7905, 43 | 'webport': 7908, 44 | 'threads': 8, 45 | 'n': 3, 46 | 'w': 2, 47 | 'r': 1, 48 | 'buckets': 16, 49 | 'slow': 100, 50 | 'listen': '0.0.0.0', 51 | 'proxies': [self.proxy_addr], 52 | } 53 | self.proxy_p = self._start_proxy(proxy_conf) 54 | 55 | def test1(self): 56 | data1 = random_string(10) 57 | data2 = random_string(10) 58 | time.sleep(1) 59 | 60 | print "test normal write" 61 | proxy = BeansDBProxy([self.proxy_addr]) 62 | proxy.delete('key1') 63 | proxy.set('key1', data1) 64 | self._assert_data(self.backend1_addr, 'key1', data1) 65 | self._assert_data(self.backend2_addr, 'key1', data1) 66 | self._assert_data(self.backend3_addr, 'key1', data1) 67 | self.assert_(MCStore(self.backend2_addr).exists('key1')) 68 | 69 | print "down backend2, proxy.get should be ok" 70 | self.backend2.stop() 71 | proxy.delete('key2') 72 | self.assert_(not proxy.exists('key2')) 73 | self.assert_(not MCStore(self.backend1_addr).exists('key2')) 74 | self.assert_(not MCStore(self.backend3_addr).exists('key2')) 75 | proxy.set('key2', data2) 76 | self.assertEqual(proxy.get('key2'), data2) 77 | 78 | self.assert_(proxy.exists('key2')) 79 | self.assert_(MCStore(self.backend3_addr).exists('key2')) 80 | self.assert_(MCStore(self.backend1_addr).exists('key2')) 81 | self._assert_data(self.proxy_addr, 'key2', data2) 82 | self._assert_data(self.backend1_addr, 'key2', data2) 83 | with self.assertRaises(Exception) as exc: 84 | MCStore(self.backend2_addr).get('key2') 85 | 86 | self._assert_data(self.backend3_addr, 'key2', data2) 87 | 88 | print "down backend1, proxy.get/set should fail" 89 | self.backend1.stop() 90 | self.assertEqual(proxy.get('key1'), data1) 91 | with self.assertRaises(Exception) as exc: 92 | MCStore(self.backend1_addr).get('key2') 93 | MCStore(self.backend2_addr).get('key2') 94 | with self.assertRaises(Exception) as exc: 95 | proxy.set('key2', data2) 96 | 97 | 98 | 99 | 100 | def tearDown(self): 101 | stop_svc(self.proxy_p) 102 | self.backend1.stop() 103 | self.backend2.stop() 104 | self.backend3.stop() 105 | self.backend1.clean() 106 | self.backend2.clean() 107 | self.backend3.clean() 108 | 109 | if __name__ == '__main__': 110 | unittest.main() 111 | 112 | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 : 113 | -------------------------------------------------------------------------------- /tests/test_log_rotate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding:utf-8 3 | 4 | import os 5 | import sys 6 | import time 7 | import signal 8 | from base import TestBeanseyeBase, BeansdbInstance, random_string, stop_svc 9 | import unittest 10 | from douban.beansdb import BeansDBProxy, MCStore 11 | import subprocess 12 | 13 | 14 | class TestLogRotate(TestBeanseyeBase): 15 | 16 | proxy_addr = 'localhost:7905' 17 | backend1_addr = 'localhost:57901' 18 | backend2_addr = 'localhost:57902' 19 | backend3_addr = 'localhost:57903' 20 | data_base_path = os.path.join("/tmp", "beanseye_test") 21 | accesslog = os.path.join(data_base_path, 'beansproxy_testlog.log') 22 | accesslog_bak = os.path.join(data_base_path, 'beansproxy_testlog.log_bak') 23 | errorlog = os.path.join(data_base_path, 'beansproxy_error_testlog.log') 24 | errorlog_bak = os.path.join(data_base_path, 'beansproxy_error_testlog.log_bak') 25 | 26 | 27 | def setUp(self): 28 | self._init_dir() 29 | if os.path.exists(self.accesslog): 30 | os.remove(self.accesslog) 31 | if os.path.exists(self.errorlog): 32 | os.remove(self.errorlog) 33 | self.backend1 = BeansdbInstance(self.data_base_path, 57901) 34 | self.backend2 = BeansdbInstance(self.data_base_path, 57902) 35 | self.backend3 = BeansdbInstance(self.data_base_path, 57903) 36 | self.backend1.start() 37 | self.backend2.start() 38 | self.backend3.start() 39 | # start 3 backend, no additional temporary node 40 | proxy_conf = { 41 | 'servers': [ 42 | self.backend1_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 43 | self.backend2_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 44 | self.backend3_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 45 | ], 46 | 'port': 7905, 47 | 'webport': 7908, 48 | 'threads': 8, 49 | 'n': 3, 50 | 'w': 2, 51 | 'r': 1, 52 | 'buckets': 16, 53 | 'slow': 100, 54 | 'listen': '0.0.0.0', 55 | 'proxies': [self.proxy_addr], 56 | } 57 | self.proxy_p = self._start_proxy(proxy_conf) 58 | 59 | def test1(self): 60 | data1 = random_string(10) 61 | time.sleep(1) 62 | 63 | print "test normal write" 64 | proxy = BeansDBProxy([self.proxy_addr]) 65 | proxy.delete('key1') 66 | proxy.set('key1', data1) 67 | self._assert_data(self.backend1_addr, 'key1', data1) 68 | self._assert_data(self.backend2_addr, 'key1', data1) 69 | self._assert_data(self.backend3_addr, 'key1', data1) 70 | self.assert_(MCStore(self.backend2_addr).exists('key1')) 71 | 72 | cmd = "ls -l /proc/%s/fd" % (self.proxy_p.pid) 73 | print cmd 74 | print subprocess.check_output(cmd, shell=True) 75 | 76 | print "move log" 77 | if os.path.exists(self.accesslog_bak): 78 | os.remove(self.accesslog_bak) 79 | if os.path.exists(self.errorlog_bak): 80 | os.remove(self.errorlog_bak) 81 | os.rename(self.accesslog, self.accesslog_bak) 82 | os.rename(self.errorlog, self.errorlog_bak) 83 | print "write more data to see if new log not exists" 84 | data1 = random_string(10) 85 | proxy.set('key1', data1) 86 | self.assert_(not os.path.exists(self.accesslog)) 87 | self.assert_(not os.path.exists(self.errorlog)) 88 | 89 | time.sleep(5) 90 | 91 | print "send SIGINT signal, should re-open log file" 92 | os.kill(self.proxy_p.pid, signal.SIGINT) 93 | 94 | cmd = "ls -l /proc/%s/fd" % (self.proxy_p.pid) 95 | print subprocess.check_output(cmd, shell=True) 96 | 97 | s = os.stat(self.accesslog) 98 | self.assert_(os.path.exists(self.accesslog)) 99 | self.assert_(os.path.exists(self.errorlog)) 100 | print "see if write to new accesslog" 101 | proxy.get('key1') 102 | time.sleep(1) 103 | s_new = os.stat(self.accesslog) 104 | print s_new.st_size, s.st_size 105 | self.assert_(s_new.st_size > s.st_size) 106 | 107 | 108 | 109 | 110 | 111 | def tearDown(self): 112 | stop_svc(self.proxy_p) 113 | self.backend1.stop() 114 | self.backend2.stop() 115 | self.backend3.stop() 116 | self.backend1.clean() 117 | self.backend2.clean() 118 | self.backend3.clean() 119 | 120 | if __name__ == '__main__': 121 | unittest.main() 122 | 123 | 124 | 125 | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 : 126 | -------------------------------------------------------------------------------- /tests/test2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding:utf-8 3 | 4 | import os 5 | import sys 6 | import time 7 | from base import TestBeanseyeBase, BeansdbInstance, random_string, stop_svc 8 | import unittest 9 | from douban.beansdb import BeansDBProxy, MCStore 10 | 11 | 12 | class Test2(TestBeanseyeBase): 13 | 14 | proxy_addr = 'localhost:7905' 15 | backend1_addr = 'localhost:57901' 16 | backend2_addr = 'localhost:57902' 17 | backend3_addr = 'localhost:57903' 18 | backend4_addr = 'localhost:57904' 19 | 20 | data_base_path = os.path.join("/tmp", "beanseye_test") 21 | accesslog = os.path.join(data_base_path, 'beansproxy_test2.log') 22 | errorlog = os.path.join(data_base_path, 'beansproxy_error_test2.log') 23 | 24 | def setUp(self): 25 | self._init_dir() 26 | if os.path.exists(self.accesslog): 27 | os.remove(self.accesslog) 28 | if os.path.exists(self.errorlog): 29 | os.remove(self.errorlog) 30 | 31 | self.backend1 = BeansdbInstance(self.data_base_path, 57901) 32 | self.backend2 = BeansdbInstance(self.data_base_path, 57902) 33 | self.backend3 = BeansdbInstance(self.data_base_path, 57903) 34 | self.backend4 = BeansdbInstance(self.data_base_path, 57904) 35 | self.backend1.start() 36 | self.backend2.start() 37 | self.backend3.start() 38 | self.backend4.start() 39 | # start 3 backend, no additional temporary node 40 | proxy_conf = { 41 | 'servers': [ 42 | self.backend1_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 43 | self.backend2_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 44 | self.backend3_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 45 | self.backend4_addr + ' -F -E -D -C -B -A -9 -8 -7 -6 -5 -4 -3 -2 -1 -0', 46 | ], 47 | 'port': 7905, 48 | 'webport': 7908, 49 | 'threads': 8, 50 | 'n': 3, 51 | 'w': 2, 52 | 'r': 1, 53 | 'buckets': 16, 54 | 'slow': 100, 55 | 'listen': '0.0.0.0', 56 | 'proxies': [self.proxy_addr], 57 | } 58 | self.proxy_p = self._start_proxy(proxy_conf) 59 | 60 | def test2(self): 61 | data1 = random_string(10) 62 | data2 = random_string(10) 63 | time.sleep(1) 64 | 65 | print "test normal write" 66 | proxy = BeansDBProxy([self.proxy_addr]) 67 | proxy.delete('key1') 68 | proxy.set('key1', data1) 69 | self._assert_data(self.backend1_addr, 'key1', data1) 70 | self._assert_data(self.backend2_addr, 'key1', data1) 71 | self._assert_data(self.backend3_addr, 'key1', data1) 72 | self._assert_data(self.backend4_addr, 'key1', None, "temporary node should not have the key when all primary nodes is good") 73 | 74 | proxy.delete('key2') 75 | print "down backend1 and backend2, proxy.get should be ok" 76 | self.backend1.stop() 77 | self.backend2.stop() 78 | proxy.set('key2', data2) 79 | self.assertEqual(proxy.get('key2'), data2) 80 | self._assert_data(self.proxy_addr, 'key2', data2) 81 | with self.assertRaises(Exception) as exc: 82 | MCStore(self.backend1_addr).get('key2') 83 | MCStore(self.backend2_addr).get('key2') 84 | self._assert_data(self.backend3_addr, 'key2', data2) 85 | #"temporary node should have the key when primary nodes < 2" 86 | self._assert_data(self.backend4_addr, 'key2', data2) 87 | print "test delete under bad sistuation, will raise error according to current behavior" 88 | with self.assertRaises(Exception) as exc: 89 | proxy.delete('key2') 90 | self._assert_data(self.backend3_addr, 'key2', None) 91 | self._assert_data(self.backend4_addr, 'key2', None) 92 | 93 | 94 | print "start backend2, (backend1 still down), test delete" 95 | self.backend2.start() 96 | time.sleep(10) 97 | proxy.delete('key2') 98 | self._assert_data(self.proxy_addr, 'key2', None) 99 | self._assert_data(self.backend2_addr, 'key2', None) 100 | self._assert_data(self.backend3_addr, 'key2', None) 101 | self._assert_data(self.backend4_addr, 'key2', None) 102 | 103 | 104 | 105 | def tearDown(self): 106 | stop_svc(self.proxy_p) 107 | self.backend1.stop() 108 | self.backend2.stop() 109 | self.backend3.stop() 110 | self.backend4.stop() 111 | self.backend1.clean() 112 | self.backend2.clean() 113 | self.backend3.clean() 114 | self.backend4.clean() 115 | 116 | if __name__ == '__main__': 117 | unittest.main() 118 | 119 | 120 | 121 | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 : 122 | -------------------------------------------------------------------------------- /src/memcache/rclient.go: -------------------------------------------------------------------------------- 1 | /* 2 | * memcache readonly client 3 | */ 4 | 5 | package memcache 6 | 7 | import ( 8 | "errors" 9 | "math" 10 | "sync" 11 | "time" 12 | ) 13 | 14 | type RClient struct { 15 | scheduler Scheduler 16 | N, W, R int 17 | } 18 | 19 | func NewRClient(sch Scheduler, N, W, R int) (c *RClient) { 20 | c = new(RClient) 21 | c.scheduler = sch 22 | c.N = N 23 | c.W = W 24 | c.R = R 25 | return c 26 | } 27 | 28 | func (c *RClient) Get(key string) (r *Item, targets []string, err error) { 29 | hosts := c.scheduler.GetHostsByKey(key) 30 | cnt := 0 31 | for _, host := range hosts { 32 | st := time.Now() 33 | r, err = host.Get(key) 34 | if err == nil { 35 | cnt++ 36 | if r != nil { 37 | t := float64(time.Now().Sub(st)) / 1e9 38 | c.scheduler.Feedback(host, key, 1 - float64(math.Sqrt(t)*t)) 39 | // got the right rval 40 | targets = []string{host.Addr} 41 | err = nil 42 | //return r, nil 43 | return 44 | } 45 | } else if err.Error() != "wait for retry" { 46 | c.scheduler.Feedback(host, key, -5) 47 | } else { 48 | c.scheduler.Feedback(host, key, -2) 49 | } 50 | 51 | if cnt >= c.R { 52 | // because hosts are sorted 53 | err = nil 54 | } 55 | } 56 | // here is a failure exit 57 | return 58 | } 59 | 60 | func (c *RClient) getMulti(keys []string) (rs map[string]*Item, targets []string, err error) { 61 | need := len(keys) 62 | rs = make(map[string]*Item, need) 63 | hosts := c.scheduler.GetHostsByKey(keys[0]) 64 | suc := 0 65 | for _, host := range hosts { 66 | st := time.Now() 67 | r, er := host.GetMulti(keys) 68 | if er == nil { 69 | suc += 1 70 | if r != nil { 71 | targets = append(targets, host.Addr) 72 | t := float64(time.Now().Sub(st)) / 1e9 73 | c.scheduler.Feedback(host, keys[0], 1 - float64(math.Sqrt(t)*t)) 74 | } 75 | } else if er.Error() != "wait for retry" { // failed 76 | c.scheduler.Feedback(host, keys[0], -5) 77 | } else { 78 | c.scheduler.Feedback(host, keys[0], -2) 79 | } 80 | err = er 81 | if er != nil { 82 | continue 83 | } 84 | 85 | for k, v := range r { 86 | rs[k] = v 87 | } 88 | 89 | if len(rs) == need { 90 | break 91 | } 92 | 93 | new_keys := []string{} 94 | for _, k := range keys { 95 | if _, ok := rs[k]; !ok { 96 | new_keys = append(new_keys, k) 97 | } 98 | } 99 | keys = new_keys 100 | if len(keys) == 0 { 101 | break // repeated keys 102 | } 103 | } 104 | if suc > c.R { 105 | err = nil 106 | } 107 | return 108 | 109 | } 110 | 111 | func (c *RClient) GetMulti(keys []string) (rs map[string]*Item, targets []string, err error) { 112 | var lock sync.Mutex 113 | rs = make(map[string]*Item, len(keys)) 114 | 115 | gs := c.scheduler.DivideKeysByBucket(keys) 116 | reply := make(chan bool, len(gs)) 117 | for _, ks := range gs { 118 | if len(ks) > 0 { 119 | go func(keys []string) { 120 | r, t, e := c.getMulti(keys) 121 | if e != nil { 122 | err = e 123 | } else { 124 | for k, v := range r { 125 | lock.Lock() 126 | rs[k] = v 127 | targets = append(targets, t...) 128 | lock.Unlock() 129 | } 130 | } 131 | reply <- true 132 | }(ks) 133 | } else { 134 | reply <- true 135 | } 136 | } 137 | // wait for complete 138 | for _, _ = range gs { 139 | <-reply 140 | } 141 | return 142 | } 143 | 144 | func (c *RClient) Set(key string, item *Item, noreply bool) (ok bool, targets []string, final_err error) { 145 | ok = false 146 | final_err = errors.New("Access Denied for ReadOnly") 147 | return 148 | } 149 | 150 | func (c *RClient) Append(key string, value []byte) (ok bool, targets []string, final_err error) { 151 | ok = false 152 | final_err = errors.New("Access Denied for ReadOnly") 153 | return 154 | } 155 | 156 | func (c *RClient) Incr(key string, value int) (result int, target []string, err error) { 157 | result = 0 158 | err = errors.New("Access Denied for ReadOnly") 159 | return 160 | } 161 | 162 | func (c *RClient) Delete(key string) (r bool, targets []string, err error) { 163 | r = false 164 | err = errors.New("Access Denied for ReadOnly") 165 | return 166 | } 167 | 168 | func (c *RClient) Len() int { 169 | return 0 170 | } 171 | -------------------------------------------------------------------------------- /tests/test_down_1.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding:utf-8 3 | 4 | import os 5 | import sys 6 | import time 7 | from base import TestBeanseyeBase, BeansdbInstance, random_string, stop_svc 8 | import unittest 9 | from douban.beansdb import BeansDBProxy, MCStore 10 | 11 | class Test3(TestBeanseyeBase): 12 | 13 | proxy_addr = 'localhost:7905' 14 | backend1_addr = 'localhost:57901' 15 | backend2_addr = 'localhost:57902' 16 | backend3_addr = 'localhost:57903' 17 | backend4_addr = 'localhost:57904' 18 | 19 | data_base_path = os.path.join("/tmp", "beanseye_test") 20 | accesslog = os.path.join(data_base_path, 'beansproxy_test3.log') 21 | errorlog = os.path.join(data_base_path, 'beansproxy_error_test3.log') 22 | 23 | def setUp(self): 24 | self._init_dir() 25 | if os.path.exists(self.accesslog): 26 | os.remove(self.accesslog) 27 | if os.path.exists(self.errorlog): 28 | os.remove(self.errorlog) 29 | 30 | self.backend1 = BeansdbInstance(self.data_base_path, 57901) 31 | self.backend2 = BeansdbInstance(self.data_base_path, 57902) 32 | self.backend3 = BeansdbInstance(self.data_base_path, 57903) 33 | self.backend4 = BeansdbInstance(self.data_base_path, 57904) 34 | self.backend1.start() 35 | self.backend2.start() 36 | self.backend3.start() 37 | self.backend4.start() 38 | # start 3 backend, no additional temporary node 39 | proxy_conf = { 40 | 'servers': [ 41 | self.backend1_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 42 | self.backend2_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 43 | self.backend3_addr + ' F E D C B A 9 8 7 6 5 4 3 2 1 0', 44 | self.backend4_addr + ' -F -E -D -C -B -A -9 -8 -7 -6 -5 -4 -3 -2 -1 -0', 45 | ], 46 | 'port': 7905, 47 | 'webport': 7908, 48 | 'threads': 8, 49 | 'n': 3, 50 | 'w': 2, 51 | 'r': 1, 52 | 'buckets': 16, 53 | 'slow': 100, 54 | 'listen': '0.0.0.0', 55 | 'proxies': [self.proxy_addr], 56 | } 57 | self.proxy_p = self._start_proxy(proxy_conf) 58 | 59 | 60 | def test3(self): 61 | """ test wether will fallback only down 2 primary node """ 62 | self.skipTest("disable acorrding to current behavior") 63 | return 64 | proxy = BeansDBProxy([self.proxy_addr]) 65 | self.backend1.stop() 66 | # self.backend2.stop() 67 | key3 = 'key3' 68 | i = 0 69 | store4 = MCStore(self.backend4_addr) 70 | ts_start = time.time() 71 | fallbacked = False 72 | while i < 2000: 73 | i += 1 74 | data3 = random_string(10) 75 | proxy.set(key3, data3) 76 | self.assertEqual(proxy.get(key3), data3) 77 | # time.sleep(0.1) 78 | data3_ = store4.get(key3) 79 | if data3_ is None: 80 | print "store4 get nothing yet, round=", i 81 | else: 82 | print "fallbacked to store4 after %s tries" % (i) 83 | fallbacked = True 84 | self.assertEqual(data3_, data3) 85 | break 86 | ts_stop = time.time() 87 | if not fallbacked: 88 | self.fail("still not fallback to backend 4") 89 | print "%s seconds passed" % (ts_stop - ts_start) 90 | self.backend1.start() 91 | self.assert_(proxy.exists("key3")) 92 | store1 = MCStore(self.backend1_addr) 93 | self.assert_(store1.get("key3") is None) 94 | data3 = random_string(10) 95 | ts_recover_start = time.time() 96 | i = 0 97 | recovered = False 98 | while i < 2000: 99 | #data3 = random_string(10) 100 | i += 1 101 | # time.sleep(0.1) 102 | proxy.set(key3, data3) 103 | self.assertEqual(proxy.get(key3), data3) 104 | data3_ = store1.get(key3) 105 | if data3_ is None: 106 | print "store1 get nothing yet, round=", i 107 | else: 108 | print "recover to store1 after %s tries, %s sec" % (i, time.time() - ts_recover_start) 109 | recovered = True 110 | self.assertEqual(data3_, data3) 111 | break 112 | if not recovered: 113 | self.fail("still not fallback to backend 1") 114 | 115 | 116 | 117 | def tearDown(self): 118 | stop_svc(self.proxy_p) 119 | self.backend1.stop() 120 | self.backend2.stop() 121 | self.backend3.stop() 122 | self.backend4.stop() 123 | self.backend1.clean() 124 | self.backend2.clean() 125 | self.backend3.clean() 126 | self.backend4.clean() 127 | 128 | if __name__ == '__main__': 129 | unittest.main() 130 | 131 | 132 | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 : 133 | -------------------------------------------------------------------------------- /src/memcache/schedule_test.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import "testing" 4 | 5 | type testCase struct { 6 | key string 7 | hosts []string 8 | } 9 | 10 | func testScheduler(t *testing.T, schd Scheduler, cases []testCase, in_order bool) { 11 | for i, c := range cases { 12 | hosts := schd.GetHostsByKey(c.key) 13 | t.Log(i, c.key, c.hosts, hosts) 14 | if len(hosts) != len(c.hosts) { 15 | t.Errorf("case #%d: key %s: number of hosts not match, %d <> %d", i, c.key, len(hosts), len(c.hosts)) 16 | continue 17 | } 18 | if in_order || len(hosts) == 1 { 19 | for j, h := range hosts { 20 | if h.Addr != c.hosts[j] { 21 | t.Errorf("key %s: expect %s but %s", c.key, c.hosts[j], h.Addr) 22 | } 23 | } 24 | } else { 25 | m := make(map[string]bool, len(c.hosts)) 26 | for _, h := range c.hosts { 27 | m[h] = true 28 | } 29 | for _, h := range hosts { 30 | if _, ok := m[h.Addr]; !ok { 31 | t.Errorf("key %s: host %s is not expected", c.key, h.Addr) 32 | } 33 | } 34 | } 35 | } 36 | } 37 | 38 | var mhosts = map[string][]int{ 39 | "host1": {0, 1}, 40 | "host2": {2, 3}, 41 | "host3": {1, 3}, 42 | } 43 | 44 | var mtests = []testCase{ 45 | testCase{"1key1", []string{"host2", "host3"}}, 46 | testCase{"2key2", []string{"host1"}}, 47 | testCase{"3key3", []string{"host1"}}, 48 | testCase{"4key4", []string{"host1", "host3"}}, 49 | } 50 | 51 | func TestManualScheduler(t *testing.T) { 52 | schd := NewManualScheduler(mhosts) 53 | testScheduler(t, schd, mtests, false) 54 | } 55 | 56 | func TestAutoScheduler(t *testing.T) { 57 | } 58 | 59 | var modhosts = []string{ 60 | "host0:11211", "host0:11212", "host0:11213", "host0:11214", 61 | "host1:11211", "host1:11212", "host1:11213", "host1:11214", 62 | } 63 | 64 | var modtests = []testCase{ 65 | testCase{"0:key:0", []string{"host1:11211"}}, 66 | testCase{"1:key:1", []string{"host1:11213"}}, 67 | testCase{"2:key:2", []string{"host1:11213"}}, 68 | testCase{"3:key:3", []string{"host0:11211"}}, 69 | } 70 | 71 | func TestModScheduler(t *testing.T) { 72 | schd := NewModScheduler(modhosts, "md5") 73 | testScheduler(t, schd, modtests, true) 74 | } 75 | 76 | var chthosts = []string{ 77 | "host0:11211", "host0:11212", "host0:11213", "host0:11214", 78 | "host1:11211", "host1:11212", "host1:11213", "host1:11214", 79 | "host2:11211", "host2:11212", "host2:11213", "host2:11214", 80 | "host3:11211", "host3:11212", "host3:11213", "host3:11214", 81 | "host4:11211", "host4:11212", "host4:11213", "host4:11214", 82 | "host5:11211", "host5:11212", "host5:11213", "host5:11214", 83 | "host6:11211", "host6:11212", "host6:11213", "host6:11214", 84 | "host7:11211", "host7:11212", "host7:11213", "host7:11214", 85 | "host8:11211", "host8:11212", "host8:11213", "host8:11214", 86 | "host9:11211", "host9:11212", "host9:11213", "host9:11214", 87 | } 88 | 89 | var chtests = []testCase{ 90 | testCase{"key:0", []string{"host2:11214"}}, 91 | testCase{"key:1", []string{"host5:11213"}}, 92 | testCase{"key:2", []string{"host0:11213"}}, 93 | testCase{"key:3", []string{"host6:11212"}}, 94 | testCase{"key:4", []string{"host4:11214"}}, 95 | testCase{"key:5", []string{"host2:11213"}}, 96 | testCase{"key:6", []string{"host1:11211"}}, 97 | testCase{"key:7", []string{"host0:11214"}}, 98 | testCase{"key:8", []string{"host0:11214"}}, 99 | testCase{"key:9", []string{"host5:11212"}}, 100 | testCase{"key:10", []string{"host4:11214"}}, 101 | testCase{"key:11", []string{"host2:11212"}}, 102 | testCase{"key:12", []string{"host7:11212"}}, 103 | testCase{"key:13", []string{"host8:11213"}}, 104 | testCase{"key:14", []string{"host8:11212"}}, 105 | testCase{"key:15", []string{"host2:11213"}}, 106 | testCase{"key:16", []string{"host8:11212"}}, 107 | testCase{"key:17", []string{"host1:11212"}}, 108 | testCase{"key:18", []string{"host2:11214"}}, 109 | testCase{"key:19", []string{"host8:11212"}}, 110 | testCase{"key:20", []string{"host1:11211"}}, 111 | testCase{"key:21", []string{"host1:11211"}}, 112 | testCase{"key:22", []string{"host6:11211"}}, 113 | testCase{"key:23", []string{"host1:11213"}}, 114 | testCase{"key:24", []string{"host3:11212"}}, 115 | testCase{"key:25", []string{"host8:11213"}}, 116 | testCase{"key:26", []string{"host7:11213"}}, 117 | testCase{"key:27", []string{"host3:11212"}}, 118 | testCase{"key:28", []string{"host0:11212"}}, 119 | testCase{"key:29", []string{"host2:11212"}}, 120 | testCase{"key:30", []string{"host8:11213"}}, 121 | testCase{"key:31", []string{"host8:11211"}}, 122 | testCase{"key:32", []string{"host7:11213"}}, 123 | testCase{"key:33", []string{"host1:11213"}}, 124 | testCase{"key:34", []string{"host9:11212"}}, 125 | testCase{"key:35", []string{"host4:11214"}}, 126 | testCase{"key:36", []string{"host3:11213"}}, 127 | testCase{"key:37", []string{"host7:11211"}}, 128 | testCase{"key:38", []string{"host3:11212"}}, 129 | testCase{"key:39", []string{"host9:11213"}}, 130 | testCase{"key:40", []string{"host1:11212"}}, 131 | testCase{"key:41", []string{"host3:11214"}}, 132 | testCase{"key:42", []string{"host0:11211"}}, 133 | testCase{"key:43", []string{"host9:11213"}}, 134 | testCase{"key:44", []string{"host1:11212"}}, 135 | testCase{"key:45", []string{"host4:11213"}}, 136 | testCase{"key:46", []string{"host5:11212"}}, 137 | testCase{"key:47", []string{"host4:11214"}}, 138 | testCase{"key:48", []string{"host8:11213"}}, 139 | testCase{"key:49", []string{"host3:11214"}}, 140 | } 141 | 142 | func TestConsistantHashScheduler(t *testing.T) { 143 | schd := NewConsistantHashScheduler(chthosts, "md5") 144 | testScheduler(t, schd, chtests, true) 145 | } 146 | -------------------------------------------------------------------------------- /static/mfs.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin:0; 3 | border:0; 4 | padding:0; 5 | height:100%; 6 | max-height:100%; 7 | background:#fff; 8 | font-family:arial, verdana, sans-serif; 9 | font-size:12px; 10 | overflow:hidden; 11 | } 12 | 13 | /* for internet explorer */ 14 | * html body { 15 | padding:54px 0 0 0; 16 | } 17 | 18 | #container { 19 | font-family:"times new roman", serif; 20 | font-size: 1.2em; 21 | position:fixed; 22 | top:54px; 23 | left:0; 24 | bottom:0; 25 | right:0; 26 | overflow:auto; 27 | background:#888; 28 | padding:15px 2px 15px 2px; 29 | color:#000; 30 | text-align:center; 31 | } 32 | 33 | * html #container { 34 | height:100%; 35 | width:100%; 36 | } 37 | 38 | #header { 39 | position:absolute; 40 | top:0; 41 | left:0; 42 | width:100%; 43 | height:54px; 44 | overflow:hidden; 45 | background:#444; 46 | color:#FFF 47 | } 48 | * html #header {height:54px;} 49 | 50 | #footer { 51 | position:absolute; 52 | bottom:0; 53 | left:0; 54 | width:100%; 55 | height:19px; 56 | overflow:hidden; 57 | text-align:right; 58 | padding:0; 59 | background:#444; 60 | border-top:1px solid #FFF; 61 | color:#FFF 62 | } 63 | * html #footer {height:20px;} 64 | 65 | a:link {color: #000;} 66 | a:active {color: #000;} 67 | a:visited {color: #000;} 68 | 69 | a.VISIBLELINK {color: #008;} 70 | 71 | span.IGNORE {color: #888;} 72 | span.OVERGOAL {color: #00E;} 73 | span.NORMAL {color: #080;} 74 | span.UNDERGOAL {color: #EA0;} 75 | span.ENDANGERED {color: #F50;} 76 | span.MISSING {color: #F00;} 77 | 78 | span.NOTEXCEEDED {color: #080;} 79 | span.EXCEEDED {color: #F00;} 80 | span.SEXCEEDED {color: #E80;} 81 | 82 | table {border: 1px solid black; border-spacing: 0px; border-collapse:collapse;} 83 | td {white-space:nowrap; border: 1px solid black; border-spacing: 0px; border-collapse:collapse; font-family: sans-serif; font-size: 12px;} 84 | th {white-space:nowrap; border: 1px solid black; border-spacing: 0px; border-collapse:collapse; font-family: sans-serif; font-size: 12px;} 85 | 86 | table.HDR {border-style: none; width: 100%; height: 54px; border-collapse:separate;} 87 | table.HDR td.LOGO {border-bottom: 1px solid #FFF; border-right: none; border-top: none; border-left: none; vertical-align: middle; padding: 0px 30px 0px 10px;} 88 | table.HDR td.MENU {border-style: none; vertical-align: bottom; padding: 0px 0px 0px 0px; } 89 | table.HDR td.FILLER {border-bottom: 1px solid #FFF; border-right: none; border-top: none; border-left: none; vertical-align: middle; text-align: right; padding: 0px 10px 0px 30px; width: 100%;} 90 | 91 | table.MENU {border-style: none; border-collapse:separate;} 92 | table.MENU a:link {color:#FFF; text-decoration:none;} 93 | table.MENU a:active {color:#FFF; text-decoration:none;} 94 | table.MENU a:visited {color:#FFF; text-decoration:none;} 95 | table.MENU td {padding: 8px; font-family: sans-serif; font-size: 16px; text-align:center; white-space: nowrap; color:#FFF} 96 | table.MENU td.UU {border-bottom: 1px solid #FFF; border-right: none; border-top: 1px solid #000; border-left: 1px solid #000; background-color: #666;} 97 | table.MENU td.SU {border-bottom: 1px solid #FFF; border-right: none; border-top: 1px solid #000; border-left: 1px solid #FFF; background-color: #666;} 98 | table.MENU td.US {border-bottom: 1px solid #888; border-right: none; border-top: 1px solid #FFF; border-left: 1px solid #FFF; background-color: #888;} 99 | table.MENU td.SS {border-bottom: 1px solid #888; border-right: none; border-top: 1px solid #FFF; border-left: 1px solid #AAA; background-color: #888;} 100 | table.MENU td.LUU {border-bottom: 1px solid #FFF; border-right: 1px solid #000; border-top: 1px solid #000; border-left: 1px solid #000; background-color: #666;} 101 | table.MENU td.LSU {border-bottom: 1px solid #FFF; border-right: 1px solid #000; border-top: 1px solid #000; border-left: 1px solid #FFF; background-color: #666;} 102 | table.MENU td.LUS {border-bottom: 1px solid #888; border-right: 1px solid #FFF; border-top: 1px solid #FFF; border-left: 1px solid #FFF; background-color: #888;} 103 | table.MENU td.LSS {border-bottom: 1px solid #888; border-right: 1px solid #FFF; border-top: 1px solid #FFF; border-left: 1px solid #AAA; background-color: #888;} 104 | 105 | table.FR {width: 1010px; margin-left:auto; margin-right:auto;} 106 | table.FR td, table.FR tr.C1 td {background-color: #E8E8E8;} 107 | table.FR tr.C2 td {background-color: #D6D6D6;} 108 | table.FR tr:first-child th {background-color: #A0A0A0;} 109 | table.FR th {background-color: #C0C0C0;} 110 | table.FR th.SMPROGBAR {width: 102px;} 111 | table.FR th.PROGBAR {width: 202px;} 112 | table.FR th.PERC4 {width: 4%;} 113 | table.FR th.PERC96 {width: 96%;} 114 | table.FR th.PERC8 {width: 8%;} 115 | table.FR td.REL {background-color: #D0D0F0;} 116 | table.FR td.PR {background-color: #7878B8;} 117 | 118 | table.CR {width: 1010px; margin-left:auto; margin-right:auto;} 119 | table.CR td, table.CR tr.C1 td {background-color: #E8E8E8;} 120 | table.CR tr.C2 td {background-color: #E0E0E0;} 121 | table.CR tr.C3 td {background-color: #D0D0D0;} 122 | table.CR tr.C4 td {background-color: #D8D8D8;} 123 | table.CR tr:first-child th {background-color: #A0A0A0;} 124 | table.CR th {background-color: #C0C0C0;} 125 | 126 | table.BOTMENU {width: 1000px; margin-left:auto; margin-right:auto;} 127 | 128 | .smbox {position:relative; border:1px solid #000; width:100px; background:#fff; height:14px;} 129 | .smbox .progress {background:#0F0; float:left; height:14px;} 130 | .smbox .value {width:100%; position:absolute; left:0; top:0; text-align:center;} 131 | 132 | .box {position:relative; border:1px solid #000; width:200px; background:#fff; height:14px;} 133 | .box .progress {background:#0F0; float:left; height:14px;} 134 | .box .value {width:100%; position:absolute; left:0; top:0; text-align:center;} 135 | -------------------------------------------------------------------------------- /src/memcache/store_test.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "testing" 7 | ) 8 | 9 | func testDistributeStore(t *testing.T, dclient DistributeStorage) { 10 | key := "dtest" 11 | r, hs, _ := dclient.Get(key) 12 | if r != nil && len(hs) > 0 { 13 | t.Errorf("get should return nil and []") 14 | } 15 | // set 16 | v := []byte("value") 17 | flag := 2 18 | ok, hs, _ := dclient.Set(key, &Item{Body: v, Flag: flag}, false) 19 | if !ok || len(hs) == 0 { 20 | t.Errorf("set failed") 21 | } 22 | v2, h, _ := dclient.Get(key) 23 | if v2 == nil || !bytes.Equal(v, v2.Body) || len(h) == 0 { 24 | t.Errorf("should return same value and a get host") 25 | } else { 26 | in_host := false 27 | for _, set_h := range hs { 28 | if set_h == h { 29 | in_host = true 30 | break 31 | } 32 | } 33 | if !in_host { 34 | t.Errorf("get should from set hosts") 35 | } 36 | } 37 | if v2.Flag != flag { 38 | t.Errorf("should return flag 2") 39 | } 40 | // set with noreply 41 | v = []byte("value 2") 42 | flag = 3 43 | key2 := "test2" 44 | ok, hs, _ = dclient.Set(key2, &Item{Body: v, Flag: flag}, true) 45 | if !ok || len(hs) == 0 { 46 | t.Errorf("set with ply failed") 47 | } 48 | v2, h, _ = dclient.Get(key2) 49 | if v2 == nil || !bytes.Equal(v, v2.Body) || len(h) == 0 { 50 | t.Errorf("should return same value") 51 | } else { 52 | in_host := false 53 | for _, set_h := range hs { 54 | if set_h == h { 55 | in_host = true 56 | break 57 | } 58 | } 59 | if !in_host { 60 | t.Errorf("get should from set hosts") 61 | } 62 | } 63 | // get_multi 64 | items, hhs, _ := dclient.GetMulti([]string{"test", "test", "test2", "test3"}) 65 | if len(items) != 2 || len(hhs) == 0 { 66 | t.Errorf("get_multi should return 2 values, but got %d", len(items)) 67 | } 68 | keys := make([]string, 102) 69 | for i := 0; i < 100; i++ { 70 | keys[i] = fmt.Sprintf("__t%d", i) 71 | dclient.Set(keys[i], &Item{Body: v}, true) 72 | } 73 | items, hhs, _ = dclient.GetMulti(keys) 74 | if len(items) != 100 || len(hhs) == 0 { 75 | t.Errorf("get_multi should return 100 values, but got %d", len(items)) 76 | } 77 | // get large obj 78 | v = make([]byte, 1024*1000) 79 | if ok, hhs, _ := dclient.Set("test_large", &Item{Body: v, Flag: flag}, false);!ok || len(hhs) == 0 { 80 | t.Errorf("set large value failed") 81 | } 82 | v2, _ = dclient.Get("test_large") 83 | if v2 == nil || !bytes.Equal(v, v2.Body) { 84 | t.Errorf("should return same large value") 85 | } 86 | // append 87 | } 88 | 89 | func testStore(t *testing.T, client Storage) { 90 | key := "test" 91 | r, _ := client.Get("test") 92 | if r != nil { 93 | t.Errorf("get should return nil") 94 | } 95 | // set 96 | v := []byte("value") 97 | flag := 2 98 | if ok, _ := client.Set("test", &Item{Body: v, Flag: flag}, false); !ok { 99 | t.Errorf("set failed") 100 | } 101 | v2, _ := client.Get("test") 102 | if !bytes.Equal(v, v2.Body) { 103 | t.Errorf("should return value") 104 | } 105 | if v2.Flag != flag { 106 | t.Errorf("should return flag 2") 107 | } 108 | // set with noreply 109 | v = []byte("value 2") 110 | flag = 3 111 | if ok, _ := client.Set("test2", &Item{Body: v, Flag: flag}, true); !ok { 112 | t.Errorf("set failed") 113 | } 114 | v2, _ = client.Get("test2") 115 | if v2 == nil || !bytes.Equal(v, v2.Body) { 116 | t.Errorf("should return value") 117 | } 118 | // get_multi 119 | items, _ := client.GetMulti([]string{"test", "test", "test2", "test3"}) 120 | if len(items) != 2 { 121 | t.Errorf("get_multi should return 2 values, but got %d", len(items)) 122 | } 123 | keys := make([]string, 102) 124 | for i := 0; i < 100; i++ { 125 | keys[i] = fmt.Sprintf("__t%d", i) 126 | client.Set(keys[i], &Item{Body: v}, true) 127 | } 128 | items, _ = client.GetMulti(keys) 129 | if len(items) != 100 { 130 | t.Errorf("get_multi should return 100 values, but got %d", len(items)) 131 | } 132 | // get large obj 133 | v = make([]byte, 1024*1000) 134 | if ok, _ := client.Set("test_large", &Item{Body: v, Flag: flag}, false); !ok { 135 | t.Errorf("set large value failed") 136 | } 137 | v2, _ = client.Get("test_large") 138 | if v2 == nil || !bytes.Equal(v, v2.Body) { 139 | t.Errorf("should return large value") 140 | } 141 | // append 142 | client.Set(key, &Item{Body: []byte("value")}, false) 143 | if ok, _ := client.Append("test", []byte(" good")); !ok { 144 | t.Error("append failed") 145 | } 146 | v2, _ = client.Get("test") 147 | if v2 == nil || string(v2.Body) != "value good" { 148 | t.Errorf("get after append: %v", v2) 149 | } 150 | // incr 151 | client.Set("test", &Item{Body: []byte("3"), Flag: 4}, false) 152 | if v, _ := client.Incr("test", 5); v != 8 { 153 | t.Errorf("incr failed: %d!=8", v) 154 | } 155 | // delete 156 | if ok, _ := client.Delete("test"); !ok { 157 | t.Errorf("delete failed") 158 | } 159 | v2, _ = client.Get("test") 160 | if v2 != nil { 161 | t.Errorf("get should return []") 162 | } 163 | } 164 | 165 | func testFailStore(t *testing.T, store Storage) { 166 | _, err := store.Get("key") 167 | if err == nil { 168 | t.Error("Get() should raise error") 169 | } 170 | _, err = store.GetMulti([]string{"key"}) 171 | if err == nil { 172 | t.Error("GetMulti() should raise error") 173 | } 174 | _, err = store.Set("key", &Item{}, false) 175 | if err == nil { 176 | t.Error("Set() should raise error") 177 | } 178 | _, err = store.Append("key", nil) 179 | if err == nil { 180 | t.Error("Append() should raise error") 181 | } 182 | _, err = store.Incr("key", 1) 183 | if err == nil { 184 | t.Error("Incr() should raise error") 185 | } 186 | _, err = store.Delete("key") 187 | if err == nil { 188 | t.Error("Delete() should raise error") 189 | } 190 | } 191 | 192 | func TestStore(t *testing.T) { 193 | store := NewMapStore() 194 | testStore(t, store) 195 | } 196 | -------------------------------------------------------------------------------- /src/memcache/server.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "bufio" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "net" 9 | "os" 10 | "os/signal" 11 | "strings" 12 | "sync" 13 | "syscall" 14 | "time" 15 | ) 16 | 17 | var SlowCmdTime = time.Millisecond * 100 // 100ms 18 | 19 | type ServerConn struct { 20 | RemoteAddr string 21 | rwc io.ReadWriteCloser // i/o connection 22 | closeAfterReply bool 23 | } 24 | 25 | func newServerConn(conn net.Conn) *ServerConn { 26 | c := new(ServerConn) 27 | c.RemoteAddr = conn.RemoteAddr().String() 28 | c.rwc = conn 29 | return c 30 | } 31 | 32 | func (c *ServerConn) Close() { 33 | if c.rwc != nil { 34 | c.rwc.Close() 35 | c.rwc = nil 36 | } 37 | } 38 | 39 | func (c *ServerConn) Shutdown() { 40 | c.closeAfterReply = true 41 | } 42 | 43 | func (c *ServerConn) Serve(store DistributeStorage, stats *Stats) (e error) { 44 | rbuf := bufio.NewReader(c.rwc) 45 | wbuf := bufio.NewWriter(c.rwc) 46 | 47 | req := new(Request) 48 | for { 49 | e = req.Read(rbuf) 50 | if e != nil { 51 | break 52 | } 53 | 54 | t := time.Now() 55 | var err error 56 | resp, hosts, err := req.Process(store, stats) 57 | if resp == nil { 58 | break 59 | } 60 | dt := time.Since(t) 61 | if dt > SlowCmdTime { 62 | stats.UpdateStat("slow_cmd", 1) 63 | } 64 | 65 | if !resp.noreply { 66 | if resp.Write(wbuf) != nil || wbuf.Flush() != nil { 67 | break 68 | } 69 | } 70 | 71 | if AccessLog != nil { 72 | key := strings.Join(req.Keys, ":") 73 | size := 0 74 | switch req.Cmd { 75 | case "get", "gets": 76 | for _, v := range resp.items { 77 | size += len(v.Body) 78 | } 79 | case "set", "add", "replace": 80 | size = len(req.Item.Body) 81 | } 82 | if err != nil { 83 | size = -1 84 | } 85 | if len(hosts) == 0 { 86 | hosts = append(hosts, "NoWhere") 87 | } 88 | var hosts_str string 89 | if req.Cmd == "get" && size == 0 { 90 | hosts_str = fmt.Sprintf("FAILED with %s", strings.Join(hosts, ",")) 91 | } else { 92 | hosts_str = fmt.Sprintf("from %s", strings.Join(hosts, ",")) 93 | } 94 | AccessLog.Printf("%s %s %s %d %s %dms", c.RemoteAddr, req.Cmd, key, size, hosts_str, dt.Nanoseconds()/1e6) 95 | } 96 | 97 | req.Clear() 98 | resp.CleanBuffer() 99 | 100 | if c.closeAfterReply { 101 | break 102 | } 103 | } 104 | c.Close() 105 | return 106 | } 107 | 108 | type Server struct { 109 | sync.Mutex 110 | addr string 111 | l net.Listener 112 | store DistributeStorage 113 | conns map[string]*ServerConn 114 | stats *Stats 115 | stop bool 116 | } 117 | 118 | func NewServer(store DistributeStorage) *Server { 119 | s := new(Server) 120 | s.store = store 121 | s.conns = make(map[string]*ServerConn, 1024) 122 | s.stats = NewStats() 123 | return s 124 | } 125 | 126 | func (s *Server) Listen(addr string) (e error) { 127 | s.addr = addr 128 | s.l, e = net.Listen("tcp", addr) 129 | return 130 | } 131 | 132 | func (s *Server) Serve() (e error) { 133 | if s.l == nil { 134 | return errors.New("no listener") 135 | } 136 | 137 | // trap signal 138 | sch := make(chan os.Signal, 10) 139 | signal.Notify(sch, syscall.SIGTERM, syscall.SIGKILL, syscall.SIGINT, 140 | syscall.SIGHUP, syscall.SIGSTOP, syscall.SIGQUIT) 141 | go func(ch <-chan os.Signal) { 142 | for { 143 | sig := <-ch 144 | if sig == syscall.SIGINT { // Ctrl+C 145 | OpenAccessLog(AccessLogPath) 146 | OpenErrorLog(ErrorLogPath) 147 | } else { 148 | ErrorLog.Print("signal recieved " + sig.String()) 149 | AccessFd.Close() 150 | ErrorFd.Close() 151 | s.Shutdown() 152 | break 153 | } 154 | } 155 | }(sch) 156 | 157 | // log.Print("start serving at ", s.addr, "...\n") 158 | for { 159 | rw, e := s.l.Accept() 160 | if e != nil { 161 | ErrorLog.Print("Accept failed: ", e) 162 | return e 163 | } 164 | if s.stop { 165 | break 166 | } 167 | c := newServerConn(rw) 168 | go func() { 169 | s.Lock() 170 | s.conns[c.RemoteAddr] = c 171 | s.stats.curr_connections++ 172 | s.stats.total_connections++ 173 | s.Unlock() 174 | 175 | c.Serve(s.store, s.stats) 176 | 177 | s.Lock() 178 | s.stats.curr_connections-- 179 | delete(s.conns, c.RemoteAddr) 180 | s.Unlock() 181 | }() 182 | } 183 | s.l.Close() 184 | // wait for connections to close 185 | for i := 0; i < 20; i++ { 186 | s.Lock() 187 | if len(s.conns) == 0 { 188 | return nil 189 | } 190 | s.Unlock() 191 | time.Sleep(1e8) 192 | } 193 | ErrorLog.Print("shutdown ", s.addr, "\n") 194 | return nil 195 | } 196 | 197 | func (s *Server) Shutdown() { 198 | s.stop = true 199 | 200 | // try to connect 201 | net.Dial("tcp", s.addr) 202 | 203 | // notify conns 204 | s.Lock() 205 | defer s.Unlock() 206 | if len(s.conns) > 0 { 207 | // log.Print("have ", len(s.conns), " active connections") 208 | for _, conn := range s.conns { 209 | // log.Print(s) 210 | conn.Shutdown() 211 | } 212 | } 213 | //s.Unlock() 214 | } 215 | 216 | /* 217 | func StartServer(addr string) (*Server, error) { 218 | store := NewMapStore() 219 | s := NewServer(store) 220 | e := s.Listen(addr) 221 | if e != nil { 222 | return nil, e 223 | } 224 | go func() { 225 | s.Serve() 226 | }() 227 | return s, nil 228 | } 229 | */ 230 | -------------------------------------------------------------------------------- /src/memcache/host.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "bufio" 5 | "errors" 6 | "fmt" 7 | "net" 8 | "strconv" 9 | "strings" 10 | "time" 11 | ) 12 | 13 | var MaxFreeConns = 20 14 | var ConnectTimeout time.Duration = time.Millisecond * 300 15 | var ReadTimeout time.Duration = time.Millisecond * 2000 16 | var WriteTimeout time.Duration = time.Millisecond * 2000 17 | 18 | type Host struct { 19 | Addr string 20 | nextDial time.Time 21 | conns chan net.Conn 22 | offset int 23 | } 24 | 25 | func NewHost(addr string) *Host { 26 | host := &Host{Addr: addr} 27 | host.conns = make(chan net.Conn, MaxFreeConns) 28 | return host 29 | } 30 | 31 | // Given a string of the form "host", "host:port", or "[ipv6::address]:port", 32 | // return true if the string includes a port. 33 | func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } 34 | 35 | func (host *Host) Close() { 36 | if host.conns == nil { 37 | return 38 | } 39 | ch := host.conns 40 | host.conns = nil 41 | close(ch) 42 | 43 | for c, closed := <-ch; closed; { 44 | c.Close() 45 | } 46 | } 47 | 48 | func (host *Host) createConn() (net.Conn, error) { 49 | now := time.Now() 50 | if host.nextDial.After(now) { 51 | return nil, errors.New("wait for retry") 52 | } 53 | 54 | addr := host.Addr 55 | if !hasPort(addr) { 56 | addr = addr + ":11211" 57 | } 58 | conn, err := net.DialTimeout("tcp", addr, ConnectTimeout) 59 | if err != nil { 60 | host.nextDial = now.Add(time.Second * 5) 61 | return nil, err 62 | } 63 | return conn, nil 64 | } 65 | 66 | func (host *Host) getConn() (c net.Conn, err error) { 67 | if host.conns == nil { 68 | return nil, errors.New("host closed") 69 | } 70 | select { 71 | case c = <-host.conns: 72 | default: 73 | c, err = host.createConn() 74 | } 75 | return 76 | } 77 | 78 | func (host *Host) releaseConn(conn net.Conn) { 79 | if host.conns == nil { 80 | conn.Close() 81 | return 82 | } 83 | select { 84 | case host.conns <- conn: 85 | default: 86 | conn.Close() 87 | } 88 | } 89 | 90 | func (host *Host) execute(req *Request) (resp *Response, err error) { 91 | var conn net.Conn 92 | conn, err = host.getConn() 93 | if err != nil { 94 | return 95 | } 96 | 97 | err = req.Write(conn) 98 | if err != nil { 99 | ErrorLog.Print(host.Addr, " write request failed:", err) 100 | conn.Close() 101 | return 102 | } 103 | 104 | resp = new(Response) 105 | if req.NoReply { 106 | host.releaseConn(conn) 107 | resp.status = "STORED" 108 | return 109 | } 110 | 111 | reader := bufio.NewReader(conn) 112 | err = resp.Read(reader) 113 | if err != nil { 114 | ErrorLog.Print(host.Addr, " read response failed:", err) 115 | conn.Close() 116 | return 117 | } 118 | 119 | if err := req.Check(resp); err != nil { 120 | ErrorLog.Print(host.Addr, " unexpected response", req, resp, err) 121 | conn.Close() 122 | return nil, err 123 | } 124 | 125 | host.releaseConn(conn) 126 | return 127 | } 128 | 129 | func (host *Host) executeWithTimeout(req *Request, timeout time.Duration) (resp *Response, err error) { 130 | done := make(chan bool, 1) 131 | now := time.Now() 132 | isTimeout := false 133 | go func() { 134 | resp, err = host.execute(req) 135 | done <- true 136 | if (isTimeout && err == nil) { 137 | ErrorLog.Printf("request %v to host %s return after timeout, use %d ms", req, host.Addr, time.Since(now)/1e6) 138 | } 139 | }() 140 | 141 | select { 142 | case <-done: 143 | case <-time.After(timeout): 144 | isTimeout = true 145 | err = fmt.Errorf("request %v timeout", req) 146 | ErrorLog.Printf("request %v to host %s timeout", req, host.Addr) 147 | } 148 | return 149 | } 150 | 151 | func (host *Host) Get(key string) (*Item, error) { 152 | req := &Request{Cmd: "get", Keys: []string{key}} 153 | resp, err := host.executeWithTimeout(req, ReadTimeout) 154 | if err != nil { 155 | return nil, err 156 | } 157 | item, _ := resp.items[key] 158 | return item, nil 159 | } 160 | 161 | func (host *Host) GetMulti(keys []string) (map[string]*Item, error) { 162 | req := &Request{Cmd: "get", Keys: keys} 163 | resp, err := host.execute(req) 164 | if err != nil { 165 | return nil, err 166 | } 167 | return resp.items, nil 168 | } 169 | 170 | func (host *Host) store(cmd string, key string, item *Item, noreply bool) (bool, error) { 171 | req := &Request{Cmd: cmd, Keys: []string{key}, Item: item, NoReply: noreply} 172 | resp, err := host.executeWithTimeout(req, WriteTimeout) 173 | return err == nil && resp.status == "STORED", err 174 | } 175 | 176 | func (host *Host) Set(key string, item *Item, noreply bool) (bool, error) { 177 | return host.store("set", key, item, noreply) 178 | } 179 | 180 | func (host *Host) Append(key string, value []byte) (bool, error) { 181 | req := &Request{Cmd: "append", Keys: []string{key}, Item: &Item{Body: value}} 182 | resp, err := host.execute(req) 183 | return err == nil && resp.status == "STORED", err 184 | } 185 | 186 | func (host *Host) Incr(key string, value int) (int, error) { 187 | req := &Request{Cmd: "incr", Keys: []string{key}, Item: &Item{Body: []byte(strconv.Itoa(value))}} 188 | resp, err := host.execute(req) 189 | if err != nil { 190 | return 0, err 191 | } 192 | return strconv.Atoi(resp.msg) 193 | } 194 | 195 | func (host *Host) Delete(key string) (bool, error) { 196 | req := &Request{Cmd: "delete", Keys: []string{key}} 197 | resp, err := host.execute(req) 198 | return err == nil && resp.status == "DELETED", err 199 | } 200 | 201 | func (host *Host) Stat(keys []string) (map[string]string, error) { 202 | req := &Request{Cmd: "stats", Keys: keys} 203 | resp, err := host.execute(req) 204 | if err != nil { 205 | return nil, err 206 | } 207 | st := make(map[string]string) 208 | for key, item := range resp.items { 209 | st[key] = string(item.Body) 210 | } 211 | return st, nil 212 | } 213 | 214 | func (host *Host) Len() int { 215 | return 0 216 | } 217 | -------------------------------------------------------------------------------- /src/memcache/client.go: -------------------------------------------------------------------------------- 1 | /* 2 | * memcache client 3 | */ 4 | 5 | package memcache 6 | 7 | import ( 8 | "errors" 9 | "math" 10 | "sync" 11 | "time" 12 | ) 13 | 14 | // Client of memcached 15 | type Client struct { 16 | scheduler Scheduler 17 | N, W, R int 18 | } 19 | 20 | func NewClient(sch Scheduler, N, W, R int) (c *Client) { 21 | c = new(Client) 22 | c.scheduler = sch 23 | c.N = N 24 | c.W = W 25 | c.R = R 26 | return c 27 | } 28 | 29 | func (c *Client) Get(key string) (r *Item, targets []string, err error) { 30 | hosts := c.scheduler.GetHostsByKey(key) 31 | cnt := 0 32 | for _, host := range hosts[:c.N] { 33 | st := time.Now() 34 | r, err = host.Get(key) 35 | if err == nil { 36 | cnt++ 37 | if r != nil { 38 | t := float64(time.Now().Sub(st)) / 1e9 39 | c.scheduler.Feedback(host, key, 1 - float64(math.Sqrt(t)*t)) 40 | // got the right rval 41 | targets = []string{host.Addr} 42 | err = nil 43 | //return r, nil 44 | return 45 | } else { 46 | targets = append(targets, host.Addr) 47 | } 48 | } else if err.Error() != "wait for retry" { 49 | c.scheduler.Feedback(host, key, -5) 50 | } else { 51 | c.scheduler.Feedback(host, key, -2) 52 | } 53 | } 54 | 55 | if cnt >= c.R { 56 | // because hosts are sorted 57 | err = nil 58 | } 59 | // here is a failure exit 60 | return 61 | } 62 | 63 | func (c *Client) getMulti(keys []string) (rs map[string]*Item, targets []string, err error) { 64 | need := len(keys) 65 | rs = make(map[string]*Item, need) 66 | hosts := c.scheduler.GetHostsByKey(keys[0]) 67 | suc := 0 68 | for _, host := range hosts[:c.N] { 69 | st := time.Now() 70 | r, er := host.GetMulti(keys) 71 | if er == nil { 72 | suc += 1 73 | if r != nil { 74 | targets = append(targets, host.Addr) 75 | t := float64(time.Now().Sub(st)) / 1e9 76 | c.scheduler.Feedback(host, keys[0], 1 - float64(math.Sqrt(t)*t)) 77 | } 78 | } else if er.Error() != "wait for retry" { // failed 79 | c.scheduler.Feedback(host, keys[0], -5) 80 | } else { 81 | c.scheduler.Feedback(host, keys[0], -2) 82 | } 83 | err = er 84 | if er != nil { 85 | continue 86 | } 87 | 88 | for k, v := range r { 89 | rs[k] = v 90 | } 91 | 92 | if len(rs) == need { 93 | break 94 | } 95 | 96 | new_keys := []string{} 97 | for _, k := range keys { 98 | if _, ok := rs[k]; !ok { 99 | new_keys = append(new_keys, k) 100 | } 101 | } 102 | keys = new_keys 103 | if len(keys) == 0 { 104 | break // repeated keys 105 | } 106 | } 107 | if suc > c.R { 108 | err = nil 109 | } 110 | return 111 | } 112 | 113 | func (c *Client) GetMulti(keys []string) (rs map[string]*Item, targets []string, err error) { 114 | var lock sync.Mutex 115 | rs = make(map[string]*Item, len(keys)) 116 | 117 | gs := c.scheduler.DivideKeysByBucket(keys) 118 | reply := make(chan bool, len(gs)) 119 | for _, ks := range gs { 120 | if len(ks) > 0 { 121 | go func(keys []string) { 122 | r, t, e := c.getMulti(keys) 123 | if e != nil { 124 | err = e 125 | } else { 126 | for k, v := range r { 127 | lock.Lock() 128 | rs[k] = v 129 | targets = append(targets, t...) 130 | lock.Unlock() 131 | } 132 | } 133 | reply <- true 134 | }(ks) 135 | } else { 136 | reply <- true 137 | } 138 | } 139 | // wait for complete 140 | for _, _ = range gs { 141 | <-reply 142 | } 143 | return 144 | } 145 | 146 | func (c *Client) Set(key string, item *Item, noreply bool) (ok bool, targets []string, final_err error) { 147 | suc := 0 148 | for i, host := range c.scheduler.GetHostsByKey(key) { 149 | if ok, err := host.Set(key, item, noreply); err == nil && ok { 150 | suc++ 151 | targets = append(targets, host.Addr) 152 | } else if err.Error() != "wait for retry" { 153 | c.scheduler.Feedback(host, key, -10) 154 | } 155 | 156 | if suc >= c.W && (i+1) >= c.N { 157 | // at least try N backends, and succeed W backends 158 | break 159 | } 160 | } 161 | if suc < c.W { 162 | ok = false 163 | final_err = errors.New("write failed") 164 | return 165 | } 166 | ok = true 167 | return 168 | } 169 | 170 | func (c *Client) Append(key string, value []byte) (ok bool, targets []string, final_err error) { 171 | suc := 0 172 | for i, host := range c.scheduler.GetHostsByKey(key) { 173 | if ok, err := host.Append(key, value); err == nil && ok { 174 | suc++ 175 | targets = append(targets, host.Addr) 176 | } else if err.Error() != "wait for retry" { 177 | c.scheduler.Feedback(host, key, -5) 178 | } 179 | 180 | if suc >= c.W && (i+1) >= c.N { 181 | // at least try N backends, and succeed W backends 182 | break 183 | } 184 | } 185 | if suc < c.W { 186 | ok = false 187 | final_err = errors.New("write failed") 188 | return 189 | } 190 | ok = true 191 | return 192 | } 193 | 194 | func (c *Client) Incr(key string, value int) (result int, targets []string, err error) { 195 | //result := 0 196 | suc := 0 197 | for i, host := range c.scheduler.GetHostsByKey(key) { 198 | r, e := host.Incr(key, value) 199 | if e != nil { 200 | err = e 201 | continue 202 | } 203 | if r > 0 { 204 | suc++ 205 | targets = append(targets, host.Addr) 206 | } 207 | if r > result { 208 | result = r 209 | } 210 | if suc >= c.W && (i+1) >= c.N { 211 | // at least try N backends, and succeed W backends 212 | break 213 | } 214 | } 215 | if result > 0 { 216 | err = nil 217 | } 218 | //return result, err // maximize 219 | return 220 | } 221 | 222 | func (c *Client) Delete(key string) (r bool, targets []string, err error) { 223 | suc := 0 224 | err_count := 0 225 | failed_hosts := make([]string, 2) 226 | for i, host := range c.scheduler.GetHostsByKey(key) { 227 | ok, er := host.Delete(key) 228 | 229 | if ok { 230 | suc++ 231 | targets = append(targets, host.Addr) 232 | } else if er != nil { 233 | err = er 234 | err_count++ 235 | failed_hosts = append(failed_hosts, host.Addr) 236 | if i >= c.N { 237 | continue 238 | } 239 | if er.Error() != "wait for retry" { 240 | c.scheduler.Feedback(host, key, -10) 241 | } 242 | } 243 | 244 | if suc >= c.N { 245 | break 246 | } 247 | } 248 | if err_count > 0 { 249 | ErrorLog.Printf("key: %s was delete failed in %v, and the last erorr is %s", key, failed_hosts, err) 250 | } 251 | if err_count < 2 { 252 | err = nil 253 | } 254 | r = (suc > 0) 255 | return 256 | } 257 | 258 | func (c *Client) Len() int { 259 | return 0 260 | } 261 | -------------------------------------------------------------------------------- /src/proxy/proxy.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "flag" 7 | "fmt" 8 | "gopkg.in/yaml.v2" 9 | "io" 10 | "io/ioutil" 11 | "log" 12 | "math" 13 | . "memcache" 14 | "net" 15 | "net/http" 16 | _ "net/http/pprof" 17 | "os" 18 | "runtime" 19 | "strconv" 20 | "strings" 21 | "text/template" 22 | "time" 23 | "sort" 24 | ) 25 | 26 | var conf *string = flag.String("conf", "conf/example.yaml", "config path") 27 | 28 | //var debug *bool = flag.Bool("debug", false, "debug info") 29 | var allocLimit *int = flag.Int("alloc", 1024*4, "cmem alloc limit") 30 | var basepath = flag.String("basepath", "", "base path") 31 | 32 | var eyeconfig Eye 33 | 34 | type gzipResponseWriter struct { 35 | io.Writer 36 | http.ResponseWriter 37 | } 38 | 39 | func (w gzipResponseWriter) Write(b []byte) (int, error) { 40 | return w.Writer.Write(b) 41 | } 42 | 43 | func makeGzipHandler(fn http.HandlerFunc) http.HandlerFunc { 44 | return func(w http.ResponseWriter, r *http.Request) { 45 | if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { 46 | fn(w, r) 47 | return 48 | } 49 | w.Header().Set("Content-Encoding", "gzip") 50 | gz := gzip.NewWriter(w) 51 | defer gz.Close() 52 | fn(gzipResponseWriter{Writer: gz, ResponseWriter: w}, r) 53 | } 54 | } 55 | 56 | func in(s, subs interface{}) (bool, error) { 57 | return strings.Contains(s.(string), subs.(string)), nil 58 | } 59 | 60 | func timer(v interface{}) string { 61 | if v == nil { 62 | return "" 63 | } 64 | t := v.(uint64) 65 | switch { 66 | case t > 3600*24*2: 67 | return fmt.Sprintf("%d day", t/3600/24) 68 | case t > 3600*2: 69 | return fmt.Sprintf("%d hour", t/3600) 70 | case t > 60*2: 71 | return fmt.Sprintf("%d min", t/60) 72 | default: 73 | return fmt.Sprintf("%d sec", t) 74 | } 75 | return "" 76 | } 77 | 78 | func sum(l interface{}) uint64 { 79 | if li, ok := l.([]uint64); ok { 80 | s := uint64(0) 81 | for _, n := range li { 82 | s += n 83 | } 84 | return s 85 | } 86 | return 0 87 | } 88 | 89 | func sizer(v interface{}) string { 90 | var n float64 91 | switch i := v.(type) { 92 | case int: 93 | n = float64(i) 94 | case uint: 95 | n = float64(i) 96 | case int64: 97 | n = float64(i) 98 | case uint64: 99 | n = float64(i) 100 | case float64: 101 | n = float64(i) 102 | case float32: 103 | n = float64(i) 104 | default: 105 | return "0" 106 | } 107 | if math.IsInf(n, 0) { 108 | return "Inf" 109 | } 110 | unit := 0 111 | var units = []string{"", "K", "M", "G", "T", "P"} 112 | for n > 1024.0 { 113 | n /= 1024 114 | unit += 1 115 | } 116 | s := fmt.Sprintf("%2.1f", n) 117 | if strings.HasSuffix(s, ".0") || len(s) >= 4 { 118 | s = s[:len(s)-2] 119 | } 120 | return s + units[unit] 121 | } 122 | 123 | func number(v interface{}) string { 124 | var n float64 125 | switch i := v.(type) { 126 | case int: 127 | n = float64(i) 128 | case uint: 129 | n = float64(i) 130 | case int64: 131 | n = float64(i) 132 | case uint64: 133 | n = float64(i) 134 | case float64: 135 | n = float64(i) 136 | case float32: 137 | n = float64(i) 138 | default: 139 | return "0" 140 | } 141 | if math.IsInf(n, 0) { 142 | return "Inf" 143 | } 144 | unit := 0 145 | var units = []string{"", "k", "m", "b"} 146 | for n > 1000.0 { 147 | n /= 1000 148 | unit += 1 149 | } 150 | s := fmt.Sprintf("%2.1f", n) 151 | if strings.HasSuffix(s, ".0") || len(s) >= 4 { 152 | s = s[:len(s)-2] 153 | } 154 | return s + units[unit] 155 | } 156 | 157 | var tmpls *template.Template 158 | var SECTIONS = [][]string{{"IN", "Info"}, {"SS", "Server"}, {"ST", "Status"}} 159 | 160 | var server_stats []map[string]interface{} 161 | var proxy_stats []map[string]interface{} 162 | var total_records, uniq_records uint64 163 | var bucket_stats []string 164 | var schd Scheduler 165 | 166 | func update_stats(servers []string, hosts []*Host, server_stats []map[string]interface{}, isNode bool) { 167 | if hosts == nil { 168 | hosts = make([]*Host, len(servers)) 169 | for i, s := range servers { 170 | hosts[i] = NewHost(s) 171 | } 172 | } 173 | 174 | // call self after 10 seconds 175 | time.AfterFunc(time.Second*10, func() { 176 | update_stats(servers, hosts, server_stats, isNode) 177 | }) 178 | 179 | defer func() { 180 | if err := recover(); err != nil { 181 | log.Print("update stats failed", err) 182 | } 183 | }() 184 | 185 | for i, h := range hosts { 186 | t, err := h.Stat(nil) 187 | if err != nil { 188 | server_stats[i] = map[string]interface{}{"name": h.Addr} 189 | continue 190 | } 191 | 192 | st := make(map[string]interface{}) 193 | st["name"] = h.Addr 194 | //log.Print(h.Addr, t) 195 | for k, v := range t { 196 | switch k { 197 | case "version", "pid": 198 | st[k] = v 199 | case "rusage_maxrss": 200 | if n, e := strconv.ParseInt(v, 10, 64); e == nil { 201 | st[k] = uint64(n) * 1024 202 | } 203 | case "rusage_user", "rusage_system": 204 | st[k], _ = strconv.ParseFloat(v, 64) 205 | default: 206 | var e error 207 | st[k], e = strconv.ParseUint(v, 10, 64) 208 | if e != nil { 209 | println("conv to ui64 failed", v) 210 | st[k] = 0 211 | } 212 | } 213 | } 214 | 215 | ST := func(name string) uint64 { 216 | if v, ok := st[name]; ok && v != nil { 217 | return v.(uint64) 218 | } 219 | return 0 220 | } 221 | 222 | st["slow_cmd"] = ST("slow_cmd") 223 | st["hit"] = ST("get_hits") * 100 / (ST("cmd_get") + 1) 224 | st["getset"] = float32(ST("cmd_get")) / float32(ST("cmd_set")+100.0) 225 | st["slow"] = ST("cmd_slow") * 100 / (ST("cmd_get") + ST("cmd_set") + ST("cmd_delete") + 1) 226 | if maxrss, ok := st["rusage_maxrss"]; ok { 227 | st["mpr"] = maxrss.(uint64) / (st["total_items"].(uint64) + st["curr_items"].(uint64) + 1000) 228 | } 229 | old := server_stats[i] 230 | keys := []string{"uptime", "cmd_get", "cmd_set", "cmd_delete", "slow_cmd", "get_hits", "get_misses", "bytes_read", "bytes_written"} 231 | if old != nil && len(old) > 2 { 232 | for _, k := range keys { 233 | if v, ok := st[k]; ok { 234 | if ov, ok := old[k]; ok { 235 | st["curr_"+k] = v.(uint64) - ov.(uint64) 236 | } else { 237 | log.Print("no in old", k) 238 | } 239 | } else { 240 | log.Print("no", k) 241 | } 242 | } 243 | } else { 244 | for _, k := range keys { 245 | st["curr_"+k] = uint64(0) 246 | } 247 | st["curr_uptime"] = uint64(1) 248 | } 249 | st["curr_hit"] = st["curr_get_hits"].(uint64) * 100 / (st["curr_cmd_get"].(uint64) + 1) 250 | st["curr_getset"] = float32(st["curr_cmd_get"].(uint64)) / float32(st["curr_cmd_set"].(uint64)+1.0) 251 | st["curr_slow"] = st["curr_slow_cmd"].(uint64) * 100 / (st["curr_cmd_get"].(uint64) + st["curr_cmd_set"].(uint64) + st["curr_cmd_delete"].(uint64) + 1) 252 | keys = []string{"cmd_get", "cmd_set", "cmd_delete", "bytes_read", "bytes_written"} 253 | dt := float32(st["curr_uptime"].(uint64)) 254 | for _, k := range keys { 255 | st["curr_"+k] = float32(st["curr_"+k].(uint64)) / dt 256 | } 257 | 258 | if isNode { 259 | rs, err := h.Get("@") 260 | if err != nil || rs == nil { 261 | server_stats[i] = st 262 | continue 263 | } 264 | bs := make([]uint64, 16) 265 | for i, line := range bytes.SplitN(rs.Body, []byte("\n"), 17) { 266 | if bytes.Count(line, []byte(" ")) < 2 || line[1] != '/' { 267 | continue 268 | } 269 | vv := bytes.SplitN(line, []byte(" "), 3) 270 | cnt, _ := strconv.ParseUint(string(vv[2]), 10, 64) 271 | bs[i] = cnt 272 | } 273 | st["buckets"] = bs 274 | } 275 | server_stats[i] = st 276 | } 277 | if isNode { 278 | total := uint64(0) 279 | utotal := uint64(0) 280 | cnt := make([]int, 16) 281 | m := make([]uint64, 16) 282 | for i := 0; i < 16; i++ { 283 | for _, st := range server_stats { 284 | if st == nil || st["buckets"] == nil { 285 | continue 286 | } 287 | n := st["buckets"].([]uint64)[i] 288 | total += n 289 | if n > m[i] { 290 | m[i] = n 291 | } 292 | } 293 | utotal += m[i] 294 | for _, st := range server_stats { 295 | if st == nil || st["buckets"] == nil { 296 | continue 297 | } 298 | n := st["buckets"].([]uint64)[i] 299 | if n > m[i]*98/100 { 300 | cnt[i] += 1 301 | } 302 | } 303 | } 304 | for i := 0; i < 16; i++ { 305 | switch cnt[i] { 306 | case 2: 307 | bucket_stats[i] = "warning" 308 | case 1: 309 | bucket_stats[i] = "dangerous" 310 | case 0: 311 | bucket_stats[i] = "invalid" 312 | } 313 | } 314 | for _, st := range server_stats { 315 | if st == nil || st["buckets"] == nil { 316 | continue 317 | } 318 | for i, c := range st["buckets"].([]uint64) { 319 | if c > m[i]*98/100 && cnt[i] < 3 { 320 | if cnt[i] == 1 { 321 | st["status"] = "dangerous" 322 | } else { 323 | st["status"] = "warning" 324 | } 325 | break 326 | } 327 | if c > 0 && c < m[i]-5 { 328 | st["status"] = "warning" 329 | } 330 | } 331 | } 332 | total_records = total 333 | uniq_records = utotal 334 | } 335 | } 336 | 337 | func Init(basepath string) { 338 | funcs := make(template.FuncMap) 339 | funcs["in"] = in 340 | funcs["sum"] = sum 341 | funcs["size"] = sizer 342 | funcs["num"] = number 343 | funcs["time"] = timer 344 | 345 | if !bytes.HasSuffix([]byte(basepath), []byte("/")) { 346 | basepath = basepath + "/" 347 | } 348 | 349 | tmpls = new(template.Template) 350 | tmpls = tmpls.Funcs(funcs) 351 | tmpls = template.Must(tmpls.ParseFiles(basepath+"static/index.html", 352 | basepath+"static/header.html", basepath+"static/info.html", 353 | basepath+"static/matrix.html", basepath+"static/server.html", 354 | basepath+"static/stats.html")) 355 | } 356 | 357 | func Status(w http.ResponseWriter, req *http.Request) { 358 | w.Header().Set("Content-Type", "text/html; charset=utf-8") 359 | 360 | sections := req.FormValue("sections") 361 | if len(sections) == 0 { 362 | sections = "IN|SS" 363 | } 364 | all_sections := [][]string{} 365 | last := "U" 366 | for _, s := range SECTIONS { 367 | now := "U" 368 | if strings.Contains(sections, s[0]) { 369 | now = "S" 370 | } 371 | all_sections = append(all_sections, []string{last + now, s[0], s[1]}) 372 | last = now 373 | } 374 | 375 | data := make(map[string]interface{}) 376 | data["sections"] = sections 377 | data["all_sections"] = all_sections 378 | data["server_stats"] = server_stats 379 | data["proxy_stats"] = proxy_stats 380 | data["bucket_stats"] = bucket_stats 381 | data["total_records"] = total_records 382 | data["uniq_records"] = uniq_records 383 | 384 | st := schd.Stats() 385 | stats := make([]map[string]interface{}, len(server_stats)) 386 | for i, _ := range stats { 387 | d := make(map[string]interface{}) 388 | name := server_stats[i]["name"].(string) 389 | d["name"] = name 390 | d["stat"] = st[name] 391 | stats[i] = d 392 | } 393 | data["stats"] = stats 394 | 395 | err := tmpls.ExecuteTemplate(w, "index.html", data) 396 | if err != nil { 397 | println("render", err.Error()) 398 | } 399 | } 400 | 401 | func min(a, b int) int { 402 | if a < b { 403 | return a 404 | } 405 | return b 406 | } 407 | 408 | func main() { 409 | flag.Parse() 410 | //c, err := config.ReadDefault(*conf) 411 | content, err := ioutil.ReadFile(*conf) 412 | if err != nil { 413 | log.Fatal("read config failed", *conf, err.Error()) 414 | } 415 | 416 | if err := yaml.Unmarshal(content, &eyeconfig); err != nil { 417 | log.Fatal("unmarshal yaml format config failed") 418 | } 419 | if *basepath == "" { 420 | if eyeconfig.Basepath == "" { 421 | curr_path, err1 := os.Getwd() 422 | if err1 != nil { 423 | log.Fatal("Cannot get pwd") 424 | return 425 | } 426 | *basepath = curr_path 427 | } else { 428 | *basepath = eyeconfig.Basepath 429 | } 430 | } 431 | Init(*basepath) 432 | 433 | if eyeconfig.Threads > 0 { 434 | runtime.GOMAXPROCS(eyeconfig.Threads) 435 | } 436 | 437 | if len(eyeconfig.Servers) == 0 { 438 | log.Fatal("no servers in conf") 439 | } 440 | server_configs := make(map[string][]string, len(eyeconfig.Servers)) 441 | for _, server := range eyeconfig.Servers { 442 | fields := strings.Split(server, " ") 443 | server_configs[fields[0]] = fields[1:] 444 | } 445 | servers := make([]string, 0, len(server_configs)) 446 | for server, _ := range server_configs { 447 | servers = append(servers, server) 448 | } 449 | sort.Sort(sort.StringSlice(servers)) 450 | 451 | if eyeconfig.WebPort <= 0 { 452 | log.Print("error webport in conf: ", eyeconfig.WebPort) 453 | } else if eyeconfig.Buckets <= 0 { 454 | log.Print("error buckets in conf: ", eyeconfig.Buckets) 455 | } else { 456 | server_stats = make([]map[string]interface{}, len(servers)) 457 | bucket_stats = make([]string, eyeconfig.Buckets) 458 | go update_stats(servers, nil, server_stats, true) 459 | 460 | if len(eyeconfig.Proxies) > 0 { 461 | proxy_stats = make([]map[string]interface{}, len(eyeconfig.Proxies)) 462 | go update_stats(eyeconfig.Proxies, nil, proxy_stats, false) 463 | } 464 | 465 | http.Handle("/", http.HandlerFunc(makeGzipHandler(Status))) 466 | http.Handle("/static/", http.FileServer(http.Dir(*basepath))) 467 | go func() { 468 | if len(eyeconfig.Listen) == 0 { 469 | eyeconfig.Listen = "0.0.0.0" 470 | } 471 | addr := fmt.Sprintf("%s:%d", eyeconfig.Listen, eyeconfig.WebPort) 472 | lt, e := net.Listen("tcp", addr) 473 | if e != nil { 474 | log.Println("monitor listen failed on ", addr, e) 475 | } 476 | log.Println("monitor listen on ", addr) 477 | http.Serve(lt, nil) 478 | }() 479 | } 480 | 481 | AllocLimit = *allocLimit 482 | 483 | var success bool 484 | 485 | if len(eyeconfig.AccessLog) > 0 { 486 | AccessLogPath = eyeconfig.AccessLog 487 | if success, err = OpenAccessLog(eyeconfig.AccessLog); !success { 488 | log.Fatalf("open AccessLog file in path: %s with error : %s", eyeconfig.AccessLog, err.Error()) 489 | } 490 | } 491 | 492 | if len(eyeconfig.ErrorLog) > 0 { 493 | ErrorLogPath = eyeconfig.ErrorLog 494 | if success, err = OpenErrorLog(eyeconfig.ErrorLog); !success { 495 | log.Fatalf("open ErrorLog file in path: %s with error : %s", eyeconfig.ErrorLog, err.Error()) 496 | } 497 | } 498 | 499 | slow := eyeconfig.Slow 500 | if slow == 0 { 501 | slow = 100 502 | } 503 | SlowCmdTime = time.Duration(int64(slow) * 1e6) 504 | 505 | readonly := eyeconfig.Readonly 506 | 507 | n := len(servers) 508 | if eyeconfig.N == 0 { 509 | eyeconfig.N = 3 510 | } 511 | N := min(eyeconfig.N, n) 512 | 513 | if eyeconfig.W == 0 { 514 | eyeconfig.W = 2 515 | } 516 | W := min(eyeconfig.W, n-1) 517 | 518 | if eyeconfig.R == 0 { 519 | eyeconfig.R = 1 520 | } 521 | R := eyeconfig.R 522 | 523 | //schd = NewAutoScheduler(servers, 16) 524 | schd = NewManualScheduler(server_configs, eyeconfig.Buckets, N) 525 | 526 | var client DistributeStorage 527 | if readonly { 528 | client = NewRClient(schd, N, W, R) 529 | } else { 530 | client = NewClient(schd, N, W, R) 531 | } 532 | 533 | http.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { 534 | }) 535 | 536 | proxy := NewServer(client) 537 | if eyeconfig.Port <= 0 { 538 | log.Fatal("error proxy port in config it is ", eyeconfig.Port) 539 | } 540 | addr := fmt.Sprintf("%s:%d", eyeconfig.Listen, eyeconfig.Port) 541 | if e := proxy.Listen(addr); e != nil { 542 | log.Fatal("proxy listen failed", e.Error()) 543 | } 544 | 545 | log.Println("proxy listen on ", addr) 546 | proxy.Serve() 547 | log.Print("shut down gracefully.") 548 | } 549 | -------------------------------------------------------------------------------- /src/memcache/schedule.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "math" 7 | "sort" 8 | "strconv" 9 | "strings" 10 | "time" 11 | "math/rand" 12 | ) 13 | 14 | // Scheduler: route request to nodes 15 | type Scheduler interface { 16 | Feedback(host *Host, key string, adjust float64) // feedback for auto routing 17 | GetHostsByKey(key string) []*Host // route a key to hosts 18 | DivideKeysByBucket(keys []string) [][]string // route some keys to group of hosts 19 | Stats() map[string][]float64 // internal status 20 | } 21 | 22 | type emptyScheduler struct{} 23 | 24 | func (c emptyScheduler) Feedback(host *Host, key string, adjust float64) {} 25 | 26 | func (c emptyScheduler) Stats() map[string][]float64 { return nil } 27 | 28 | // route request by Mod of HASH 29 | type ModScheduler struct { 30 | hosts []*Host 31 | hashMethod HashMethod 32 | emptyScheduler 33 | } 34 | 35 | func NewModScheduler(hosts []string, hashname string) Scheduler { 36 | var c ModScheduler 37 | c.hosts = make([]*Host, len(hosts)) 38 | c.hashMethod = hashMethods[hashname] 39 | for i, h := range hosts { 40 | c.hosts[i] = NewHost(h) 41 | } 42 | return &c 43 | } 44 | 45 | func (c *ModScheduler) GetHostsByKey(key string) []*Host { 46 | h := c.hashMethod([]byte(key)) 47 | r := make([]*Host, 1) 48 | r[0] = c.hosts[h%uint32(len(c.hosts))] 49 | return r 50 | } 51 | 52 | func (c *ModScheduler) DivideKeysByBucket(keys []string) [][]string { 53 | n := len(c.hosts) 54 | rs := make([][]string, n) 55 | for _, key := range keys { 56 | h := c.hashMethod([]byte(key)) % uint32(n) 57 | rs[h] = append(rs[h], key) 58 | } 59 | return rs 60 | } 61 | 62 | // internal status 63 | func (c *ModScheduler) Stats() map[string][]float64 { 64 | r := make(map[string][]float64) 65 | for i, h := range c.hosts { 66 | r[h.Addr] = make([]float64, len(c.hosts)) 67 | r[h.Addr][i] = 1 68 | } 69 | return r 70 | } 71 | 72 | type uint64Slice []uint64 73 | 74 | func (l uint64Slice) Len() int { 75 | return len(l) 76 | } 77 | 78 | func (l uint64Slice) Less(i, j int) bool { 79 | return l[i] < l[j] 80 | } 81 | 82 | func (l uint64Slice) Swap(i, j int) { 83 | l[i], l[j] = l[j], l[i] 84 | } 85 | 86 | // route requests by consistant hash 87 | type ConsistantHashScheduler struct { 88 | hosts []*Host 89 | index []uint64 90 | hashMethod HashMethod 91 | emptyScheduler 92 | } 93 | 94 | const VIRTUAL_NODES = 100 95 | 96 | func NewConsistantHashScheduler(hosts []string, hashname string) Scheduler { 97 | var c ConsistantHashScheduler 98 | c.hosts = make([]*Host, len(hosts)) 99 | c.index = make([]uint64, len(hosts)*VIRTUAL_NODES) 100 | c.hashMethod = hashMethods[hashname] 101 | for i, h := range hosts { 102 | c.hosts[i] = NewHost(h) 103 | for j := 0; j < VIRTUAL_NODES; j++ { 104 | v := c.hashMethod([]byte(fmt.Sprintf("%s-%d", h, j))) 105 | ps := strings.SplitN(h, ":", 2) 106 | host := ps[0] 107 | port := ps[1] 108 | if port == "11211" { 109 | v = c.hashMethod([]byte(fmt.Sprintf("%s-%d", host, j))) 110 | } 111 | c.index[i*VIRTUAL_NODES+j] = (uint64(v) << 32) + uint64(i) 112 | } 113 | } 114 | sort.Sort(uint64Slice(c.index)) 115 | if !sort.IsSorted(uint64Slice(c.index)) { 116 | panic("sort failed") 117 | } 118 | return &c 119 | } 120 | 121 | func (c *ConsistantHashScheduler) getHostIndex(key string) int { 122 | h := uint64(c.hashMethod([]byte(key))) << 32 123 | N := len(c.index) 124 | i := sort.Search(N, func(k int) bool { return c.index[k] >= h }) 125 | if i == N { 126 | i = 0 127 | } 128 | return int(c.index[i] & 0xffffffff) 129 | } 130 | 131 | func (c *ConsistantHashScheduler) GetHostsByKey(key string) []*Host { 132 | r := make([]*Host, 1) 133 | i := c.getHostIndex(key) 134 | r[0] = c.hosts[i] 135 | return r 136 | } 137 | 138 | func (c *ConsistantHashScheduler) DivideKeysByBucket(keys []string) [][]string { 139 | n := len(c.hosts) 140 | rs := make([][]string, n) 141 | for _, key := range keys { 142 | i := c.getHostIndex(key) 143 | rs[i] = append(rs[i], key) 144 | } 145 | return rs 146 | } 147 | 148 | // route request by configure by hand 149 | type ManualScheduler struct { 150 | N int 151 | hosts []*Host 152 | buckets [][]int 153 | backups [][]int 154 | bucketWidth int 155 | stats [][]float64 156 | hashMethod HashMethod 157 | feedChan chan *Feedback 158 | } 159 | 160 | // the string is a Hex int string, if it start with -, it means serve the bucket as a backup 161 | func NewManualScheduler(config map[string][]string, bs, n int) *ManualScheduler { 162 | defer func() { 163 | if r := recover(); r != nil { 164 | ErrorLog.Fatalln("NewManualScheduler panic, maybe node's supporting bucket more than buckets number") 165 | } 166 | }() 167 | c := new(ManualScheduler) 168 | c.hosts = make([]*Host, len(config)) 169 | c.buckets = make([][]int, bs) 170 | c.backups = make([][]int, bs) 171 | c.stats = make([][]float64, bs) 172 | c.N = n 173 | 174 | no := 0 175 | for addr, serve_to := range config { 176 | host := NewHost(addr) 177 | host.offset = no 178 | c.hosts[no] = host 179 | for _, bucket_str := range serve_to { 180 | if strings.HasPrefix(bucket_str, "-") { 181 | if bucket, e := strconv.ParseInt(bucket_str[1:], 16, 16); e == nil { 182 | //c.buckets[bucket] = append(c.buckets[bucket], no) 183 | c.backups[bucket] = append(c.backups[bucket], no) 184 | 185 | } else { 186 | ErrorLog.Println("Parse serving bucket config failed, it was not digital") 187 | } 188 | } else { 189 | if bucket, e := strconv.ParseInt(bucket_str, 16, 16); e == nil { 190 | c.buckets[bucket] = append(c.buckets[bucket], no) 191 | } else { 192 | ErrorLog.Println("Parse serving bucket config failed, it was not digital") 193 | } 194 | } 195 | } 196 | no++ 197 | } 198 | // set c.stats according to c.buckets 199 | for b := 0; b < bs; b++ { 200 | c.stats[b] = make([]float64, len(c.hosts)) 201 | } 202 | c.hashMethod = fnv1a1 203 | c.bucketWidth = calBitWidth(bs) 204 | 205 | go c.procFeedback() 206 | go func() { 207 | for { 208 | c.try_reward() 209 | time.Sleep(5 * 1e9) 210 | } 211 | }() 212 | return c 213 | } 214 | 215 | func fastdivideKeysByBucket(hash_func HashMethod, bs int, bw int, keys []string) [][]string { 216 | rs := make([][]string, bs) 217 | //bw := calBitWidth(bs) 218 | for _, key := range keys { 219 | b := getBucketByKey(hash_func, bw, key) 220 | rs[b] = append(rs[b], key) 221 | } 222 | return rs 223 | } 224 | 225 | 226 | /* 227 | func (c *ManualScheduler) dump_scores() { 228 | for i, bucket := range c.buckets { 229 | scores := make([]string, len(bucket)) 230 | stats := c.stats[i] 231 | for j, n := range bucket { 232 | addr := c.hosts[n].Addr 233 | scores[j] = fmt.Sprintf("%s:%f", addr[:strings.Index(addr, ":")], stats[n]) 234 | } 235 | ErrorLog.Printf( "Bucket %X Score: %v", i, scores) 236 | } 237 | } 238 | */ 239 | 240 | func (c *ManualScheduler) try_reward() { 241 | //c.dump_scores() 242 | for i, bucket := range c.buckets { 243 | // random raward 2nd, 3rd node 244 | second_node := bucket[1] 245 | if _, err := c.hosts[second_node].Get("@"); err == nil { 246 | var second_reward float64 = 0.0 247 | second_stat := c.stats[i][second_node] 248 | if second_stat < 0 { 249 | second_reward = 0 - second_stat 250 | } else { 251 | second_reward = float64(rand.Intn(10)) 252 | } 253 | c.feedChan <- &Feedback {hostIndex: second_node, bucketIndex: i, adjust: second_reward} 254 | } else { 255 | ErrorLog.Printf("beansdb server : %s in Bucket %X's second node Down while try_reward, the err = %s", c.hosts[second_node].Addr, i, err) 256 | } 257 | 258 | if c.N > 2 { 259 | third_node := bucket[2] 260 | if _, err := c.hosts[third_node].Get("@"); err == nil { 261 | var third_reward float64 = 0.0 262 | third_stat := c.stats[i][third_node] 263 | if third_stat < 0 { 264 | third_reward = 0 - third_stat 265 | } else { 266 | third_reward = float64(rand.Intn(16)) 267 | } 268 | c.feedChan <- &Feedback {hostIndex: third_node, bucketIndex: i, adjust: third_reward} 269 | } else { 270 | ErrorLog.Printf("beansdb server : %s in Bucket %X's third node Down while try_reward, the err = %s", c.hosts[third_node].Addr, i, err) 271 | } 272 | } 273 | } 274 | } 275 | 276 | func (c *ManualScheduler) procFeedback() { 277 | c.feedChan = make(chan *Feedback, 256) 278 | for { 279 | fb := <-c.feedChan 280 | c.feedback(fb.hostIndex, fb.bucketIndex, fb.adjust) 281 | } 282 | } 283 | 284 | func (c *ManualScheduler) feedback(i, bucket_index int, adjust float64) { 285 | 286 | stats := c.stats[bucket_index] 287 | old := stats[i] 288 | stats[i] += adjust 289 | 290 | // try to reduce the bucket's stats 291 | if stats[i] > 100 { 292 | for index := 0; index < len(stats); index++ { 293 | stats[index] = stats[index] / 2 294 | } 295 | } 296 | bucket := make([]int, c.N) 297 | copy(bucket, c.buckets[bucket_index]) 298 | 299 | k := 0 300 | // find the position 301 | for k = 0; k < c.N; k++ { 302 | if bucket[k] == i { 303 | break 304 | } 305 | } 306 | 307 | if stats[i]-old > 0 { 308 | for k > 0 && stats[bucket[k]] > stats[bucket[k-1]] { 309 | swap(bucket, k, k-1) 310 | k-- 311 | } 312 | } else { 313 | for k < c.N -1 && stats[bucket[k]] < stats[bucket[k+1]] { 314 | swap(bucket, k, k+1) 315 | k++ 316 | } 317 | } 318 | // set it to origin 319 | c.buckets[bucket_index] = bucket 320 | } 321 | 322 | func (c *ManualScheduler) GetHostsByKey(key string) (hosts []*Host) { 323 | i := getBucketByKey(c.hashMethod, c.bucketWidth, key) 324 | hosts = make([]*Host, c.N + len(c.backups[i])) 325 | for j, offset := range c.buckets[i] { 326 | hosts[j] = c.hosts[offset] 327 | } 328 | // set the backup nodes in pos after N - 1 329 | for j, offset := range c.backups[i] { 330 | hosts[c.N + j] = c.hosts[offset] 331 | } 332 | return 333 | } 334 | 335 | func (c *ManualScheduler) DivideKeysByBucket(keys []string) [][]string { 336 | return fastdivideKeysByBucket(c.hashMethod, len(c.buckets), c.bucketWidth, keys) 337 | } 338 | 339 | func (c *ManualScheduler) Feedback(host *Host, key string, adjust float64) { 340 | index := getBucketByKey(c.hashMethod, c.bucketWidth, key) 341 | c.feedChan <- &Feedback{hostIndex: host.offset, bucketIndex: index, adjust: adjust} 342 | } 343 | 344 | func (c *ManualScheduler) Stats() map[string][]float64 { 345 | r := make(map[string][]float64, len(c.hosts)) 346 | for _, h := range c.hosts { 347 | r[h.Addr] = make([]float64, len(c.buckets)) 348 | } 349 | for i, st := range c.stats { 350 | for j, w := range st { 351 | r[c.hosts[j].Addr][i] = w 352 | } 353 | } 354 | return r 355 | } 356 | 357 | type Feedback struct { 358 | hostIndex int 359 | bucketIndex int 360 | adjust float64 361 | } 362 | 363 | // route requests by auto discoved infomation, used in beansdb 364 | type AutoScheduler struct { 365 | n int 366 | hosts []*Host 367 | buckets [][]int 368 | stats [][]float64 369 | last_check time.Time 370 | hashMethod HashMethod 371 | feedChan chan *Feedback 372 | bucketWidth int 373 | } 374 | 375 | func NewAutoScheduler(config []string, bs int) *AutoScheduler { 376 | c := new(AutoScheduler) 377 | c.n = len(config) 378 | c.hosts = make([]*Host, c.n) 379 | c.buckets = make([][]int, bs) 380 | c.stats = make([][]float64, bs) 381 | for i := 0; i < bs; i++ { 382 | c.buckets[i] = make([]int, c.n) 383 | c.stats[i] = make([]float64, c.n) 384 | } 385 | for i, addr := range config { 386 | c.hosts[i] = NewHost(addr) 387 | for j := 0; j < bs; j++ { 388 | c.buckets[j][i] = i 389 | c.stats[j][i] = 0 390 | } 391 | } 392 | c.hashMethod = fnv1a1 393 | c.bucketWidth = calBitWidth(c.n) 394 | go c.procFeedback() 395 | 396 | c.check() 397 | go func() { 398 | for { 399 | c.check() 400 | time.Sleep(10 * 1e9) 401 | } 402 | }() 403 | return c 404 | } 405 | 406 | func calBitWidth(number int) int { 407 | width := 0 408 | for number > 1 { 409 | width++ 410 | number /= 2 411 | } 412 | return width 413 | } 414 | 415 | func getBucketByKey(hash_func HashMethod, bucketWidth int, key string) int { 416 | if len(key) > bucketWidth/4 && key[0] == '@' { 417 | return hextoi(key[1 : bucketWidth/4+1]) 418 | } 419 | if len(key) >= 1 && key[0] == '?' { 420 | key = key[1:] 421 | } 422 | h := hash_func([]byte(key)) 423 | return (int)(h >> (uint)(32-bucketWidth)) 424 | } 425 | 426 | func (c *AutoScheduler) GetHostsByKey(key string) []*Host { 427 | i := getBucketByKey(c.hashMethod, c.bucketWidth, key) 428 | //host_ids := c.GetBucketSnapshot(i) 429 | host_ids := c.buckets[i] 430 | cnt := len(host_ids) 431 | hosts := make([]*Host, cnt) 432 | for j := 0; j < cnt; j++ { 433 | hosts[j] = c.hosts[host_ids[j]] 434 | } 435 | return hosts 436 | } 437 | 438 | func divideKeysByBucket(hash_func HashMethod, bs int, keys []string) [][]string { 439 | rs := make([][]string, bs) 440 | bw := calBitWidth(bs) 441 | for _, key := range keys { 442 | b := getBucketByKey(hash_func, bw, key) 443 | rs[b] = append(rs[b], key) 444 | } 445 | return rs 446 | } 447 | 448 | 449 | func (c *AutoScheduler) DivideKeysByBucket(keys []string) [][]string { 450 | return divideKeysByBucket(c.hashMethod, len(c.buckets), keys) 451 | } 452 | 453 | func (c *AutoScheduler) Stats() map[string][]float64 { 454 | r := make(map[string][]float64) 455 | for _, h := range c.hosts { 456 | r[h.Addr] = make([]float64, len(c.buckets)) 457 | } 458 | for i, st := range c.stats { 459 | for j, w := range st { 460 | r[c.hosts[j].Addr][i] = w 461 | } 462 | } 463 | return r 464 | } 465 | 466 | func swap(a []int, j, k int) { 467 | a[j], a[k] = a[k], a[j] 468 | } 469 | 470 | func abs(x float64) float64 { 471 | if x < 0 { 472 | return -x 473 | } 474 | return x 475 | } 476 | 477 | func (c *AutoScheduler) hostIndex(host *Host) int { 478 | for i, h := range c.hosts { 479 | if h == host { 480 | return i 481 | } 482 | } 483 | return -1 484 | } 485 | 486 | func (c *AutoScheduler) procFeedback() { 487 | c.feedChan = make(chan *Feedback, 1024) 488 | for { 489 | fb := <-c.feedChan 490 | c.feedback(fb.hostIndex, fb.bucketIndex, fb.adjust) 491 | } 492 | } 493 | 494 | func (c *AutoScheduler) Feedback(host *Host, key string, adjust float64) { 495 | index := getBucketByKey(c.hashMethod, c.bucketWidth, key) 496 | i := c.hostIndex(host) 497 | if i < 0 { 498 | return 499 | } 500 | //c.feedback(i, index, adjust) 501 | c.feedChan <- &Feedback{hostIndex: i, bucketIndex: index, adjust: adjust} 502 | } 503 | 504 | func (c *AutoScheduler) feedback(i, index int, adjust float64) { 505 | stats := c.stats[index] 506 | old := stats[i] 507 | if adjust >= 0 { 508 | //log.Print("reset ", index, " ", c.hosts[i].Addr, " ", stats[i], adjust) 509 | stats[i] = (stats[i] + adjust) / 2 510 | } else { 511 | stats[i] += adjust 512 | } 513 | buckets := make([]int, len(c.hosts)) 514 | copy(buckets, c.buckets[index]) 515 | k := 0 516 | for k = 0; k < len(c.hosts); k++ { 517 | if buckets[k] == i { 518 | break 519 | } 520 | } 521 | if stats[i]-old > 0 { 522 | for k > 0 && stats[buckets[k]] > stats[buckets[k-1]] { 523 | swap(buckets, k, k-1) 524 | k-- 525 | } 526 | } else { 527 | for k < len(c.hosts)-1 && stats[buckets[k]] < stats[buckets[k+1]] { 528 | swap(buckets, k, k+1) 529 | k++ 530 | } 531 | } 532 | // set it to origin 533 | c.buckets[index] = buckets 534 | } 535 | 536 | func hextoi(hex string) int { 537 | r := rune(0) 538 | for _, c := range hex { 539 | r *= 16 540 | switch { 541 | case c >= '0' && c <= '9': 542 | r += c - '0' 543 | case c >= 'A' && c <= 'F': 544 | r += 10 + c - 'A' 545 | case c >= 'a' && c <= 'f': 546 | r += 10 + c - 'a' 547 | } 548 | } 549 | return int(r) 550 | } 551 | 552 | func (c *AutoScheduler) listHost(host *Host, dir string) { 553 | rs, err := host.Get(dir) 554 | if err != nil || rs == nil { 555 | return 556 | } 557 | for _, line := range bytes.SplitN(rs.Body, []byte("\n"), 17) { 558 | if bytes.Count(line, []byte(" ")) < 2 || line[1] != '/' { 559 | continue 560 | } 561 | vv := bytes.SplitN(line, []byte(" "), 3) 562 | cnt, _ := strconv.ParseFloat(string(vv[2]), 64) 563 | adjust := float64(math.Sqrt(cnt)) 564 | c.Feedback(host, dir+string(vv[0]), adjust) 565 | } 566 | } 567 | 568 | func (c *AutoScheduler) check() { 569 | defer func() { 570 | if e := recover(); e != nil { 571 | ErrorLog.Print("error while check()", e) 572 | } 573 | }() 574 | bs := len(c.buckets) 575 | bucketWidth := 0 576 | for bs > 1 { 577 | bucketWidth++ 578 | bs /= 2 579 | } 580 | count := 1 << (uint)(bucketWidth-4) 581 | w := bucketWidth/4 - 1 582 | format := fmt.Sprintf("@%%0%dx", w) 583 | for _, host := range c.hosts { 584 | if w < 1 { 585 | c.listHost(host, "@") 586 | } else { 587 | for i := 0; i < count; i++ { 588 | key := fmt.Sprintf(format, i) 589 | c.listHost(host, key) 590 | } 591 | } 592 | } 593 | 594 | c.last_check = time.Now() 595 | } 596 | -------------------------------------------------------------------------------- /src/memcache/protocol.go: -------------------------------------------------------------------------------- 1 | package memcache 2 | 3 | import ( 4 | "bufio" 5 | "cmem" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "reflect" 10 | "runtime" 11 | "strconv" 12 | "strings" 13 | "unsafe" 14 | ) 15 | 16 | const VERSION = "0.1.0" 17 | 18 | const ( 19 | MaxKeyLength = 200 20 | MaxBodyLength = 1024 * 1024 * 50 21 | ) 22 | 23 | var AllocLimit = 1024 * 4 24 | 25 | type Item struct { 26 | Flag int 27 | Exptime int 28 | Cas int 29 | Body []byte 30 | alloc *byte 31 | } 32 | 33 | func (it *Item) String() (s string) { 34 | return fmt.Sprintf("Item(Flag:%d, Exptime:%d, Length:%d, Cas:%d, Body:%v", 35 | it.Flag, it.Exptime, len(it.Body), it.Cas, it.Body) 36 | } 37 | 38 | type Request struct { 39 | Cmd string // get, set, delete, quit, etc. 40 | Keys []string // keys 41 | Item *Item 42 | NoReply bool 43 | } 44 | 45 | func (req *Request) String() (s string) { 46 | return fmt.Sprintf("Request(Cmd:%s, Keys:%v, Item:%v, NoReply: %t)", 47 | req.Cmd, req.Keys, &req.Item, req.NoReply) 48 | } 49 | 50 | func (req *Request) Clear() { 51 | req.NoReply = false 52 | if req.Item != nil && req.Item.alloc != nil { 53 | cmem.Free(req.Item.alloc, uintptr(cap(req.Item.Body))) 54 | req.Item.Body = nil 55 | req.Item.alloc = nil 56 | req.Item = nil 57 | } 58 | } 59 | 60 | func WriteFull(w io.Writer, buf []byte) error { 61 | n, e := w.Write(buf) 62 | for e != nil && n > 0 { 63 | buf = buf[n:] 64 | n, e = w.Write(buf) 65 | } 66 | return e 67 | } 68 | 69 | func (req *Request) Write(w io.Writer) (e error) { 70 | 71 | switch req.Cmd { 72 | 73 | case "get", "gets", "delete", "quit", "version", "stats", "flush_all": 74 | io.WriteString(w, req.Cmd) 75 | for _, key := range req.Keys { 76 | io.WriteString(w, " "+key) 77 | } 78 | if req.NoReply { 79 | io.WriteString(w, " noreply") 80 | } 81 | _, e = io.WriteString(w, "\r\n") 82 | 83 | case "set", "add", "replace", "cas", "prepend", "append": 84 | noreplay := "" 85 | if req.NoReply { 86 | noreplay = " noreply" 87 | } 88 | item := req.Item 89 | if req.Cmd == "cas" { 90 | fmt.Fprintf(w, "%s %s %d %d %d %d%s\r\n", req.Cmd, req.Keys[0], item.Flag, 91 | item.Exptime, item.Cas, len(item.Body), noreplay) 92 | } else { 93 | fmt.Fprintf(w, "%s %s %d %d %d%s\r\n", req.Cmd, req.Keys[0], item.Flag, 94 | item.Exptime, len(item.Body), noreplay) 95 | } 96 | if WriteFull(w, item.Body) != nil { 97 | return e 98 | } 99 | e = WriteFull(w, []byte("\r\n")) 100 | 101 | case "incr", "decr": 102 | io.WriteString(w, req.Cmd) 103 | fmt.Fprintf(w, " %s %s", req.Keys[0], string(req.Item.Body)) 104 | if req.NoReply { 105 | io.WriteString(w, " noreply") 106 | } 107 | _, e = io.WriteString(w, "\r\n") 108 | 109 | default: 110 | ErrorLog.Printf("unkown request cmd:", req.Cmd) 111 | return errors.New("unknown cmd: " + req.Cmd) 112 | } 113 | 114 | return e 115 | } 116 | 117 | func (req *Request) Read(b *bufio.Reader) (e error) { 118 | var s string 119 | if s, e = b.ReadString('\n'); e != nil { 120 | return e 121 | } 122 | if !strings.HasSuffix(s, "\r\n") { 123 | return errors.New("not completed command") 124 | } 125 | parts := strings.Fields(s) 126 | if len(parts) < 1 { 127 | return errors.New("invalid cmd") 128 | } 129 | 130 | req.Cmd = parts[0] 131 | switch req.Cmd { 132 | 133 | case "get", "gets": 134 | if len(parts) < 2 { 135 | return errors.New("invalid cmd") 136 | } 137 | req.Keys = parts[1:] 138 | 139 | case "set", "add", "replace", "cas", "append", "prepend": 140 | if len(parts) < 5 || len(parts) > 7 { 141 | return errors.New("invalid cmd") 142 | } 143 | req.Keys = parts[1:2] 144 | req.Item = &Item{} 145 | item := req.Item 146 | item.Flag, e = strconv.Atoi(parts[2]) 147 | if e != nil { 148 | return e 149 | } 150 | item.Exptime, e = strconv.Atoi(parts[3]) 151 | if e != nil { 152 | return e 153 | } 154 | length, e := strconv.Atoi(parts[4]) 155 | if e != nil { 156 | return e 157 | } 158 | if length > MaxBodyLength { 159 | return errors.New("body too large") 160 | } 161 | if req.Cmd == "cas" { 162 | if len(parts) < 6 { 163 | return errors.New("invalid cmd") 164 | } 165 | item.Cas, e = strconv.Atoi(parts[5]) 166 | if len(parts) > 6 && parts[6] != "noreply" { 167 | return errors.New("invalid cmd") 168 | } 169 | req.NoReply = len(parts) > 6 && parts[6] == "noreply" 170 | } else { 171 | if len(parts) > 5 && parts[5] != "noreply" { 172 | return errors.New("invalid cmd") 173 | } 174 | req.NoReply = len(parts) > 5 && parts[5] == "noreply" 175 | } 176 | 177 | // FIXME 178 | if length > AllocLimit { 179 | item.alloc = cmem.Alloc(uintptr(length)) 180 | item.Body = (*[1 << 30]byte)(unsafe.Pointer(item.alloc))[:length] 181 | (*reflect.SliceHeader)(unsafe.Pointer(&item.Body)).Cap = length 182 | runtime.SetFinalizer(item, func(item *Item) { 183 | if item.alloc != nil { 184 | //log.Print("free by finalizer: ", cap(item.Body)) 185 | cmem.Free(item.alloc, uintptr(cap(item.Body))) 186 | item.Body = nil 187 | item.alloc = nil 188 | } 189 | }) 190 | } else { 191 | item.Body = make([]byte, length) 192 | } 193 | if _, e = io.ReadFull(b, item.Body); e != nil { 194 | return e 195 | } 196 | b.ReadByte() // \r 197 | b.ReadByte() // \n 198 | 199 | case "delete": 200 | if len(parts) < 2 || len(parts) > 4 { 201 | return errors.New("invalid cmd") 202 | } 203 | req.Keys = parts[1:2] 204 | req.NoReply = len(parts) > 2 && parts[len(parts)-1] == "noreply" 205 | 206 | case "incr", "decr": 207 | if len(parts) < 3 || len(parts) > 4 { 208 | return errors.New("invalid cmd") 209 | } 210 | req.Keys = parts[1:2] 211 | req.Item = &Item{Body: []byte(parts[2])} 212 | req.NoReply = len(parts) > 3 && parts[3] == "noreply" 213 | 214 | case "stats": 215 | req.Keys = parts[1:] 216 | 217 | case "quit", "version", "flush_all": 218 | case "verbosity": 219 | if len(parts) >= 2 { 220 | req.Keys = parts[1:] 221 | } 222 | 223 | default: 224 | ErrorLog.Print("unknown command", req.Cmd) 225 | return errors.New("unknown command: " + req.Cmd) 226 | } 227 | 228 | return 229 | } 230 | 231 | type Response struct { 232 | status string 233 | msg string 234 | cas bool 235 | noreply bool 236 | items map[string]*Item 237 | } 238 | 239 | func (resp *Response) String() (s string) { 240 | return fmt.Sprintf("Response(Status:%s, msg:%s, Items:%v)", 241 | resp.status, resp.msg, resp.items) 242 | } 243 | 244 | func (resp *Response) Read(b *bufio.Reader) error { 245 | resp.items = make(map[string]*Item, 1) 246 | for { 247 | s, e := b.ReadString('\n') 248 | if e != nil { 249 | ErrorLog.Print("read response line failed", e) 250 | return e 251 | } 252 | parts := strings.Fields(s) 253 | if len(parts) < 1 { 254 | return errors.New("invalid response") 255 | } 256 | 257 | resp.status = parts[0] 258 | switch resp.status { 259 | 260 | case "VALUE": 261 | if len(parts) < 4 { 262 | return errors.New("invalid response") 263 | } 264 | 265 | key := parts[1] 266 | // check key length 267 | flag, e1 := strconv.Atoi(parts[2]) 268 | if e1 != nil { 269 | return errors.New("invalid response") 270 | } 271 | length, e2 := strconv.Atoi(parts[3]) 272 | if e2 != nil { 273 | return errors.New("invalid response") 274 | } 275 | if length > MaxBodyLength { 276 | return errors.New("body too large") 277 | } 278 | 279 | item := &Item{Flag: flag} 280 | if len(parts) == 5 { 281 | cas, e := strconv.Atoi(parts[4]) 282 | if e != nil { 283 | return errors.New("invalid response") 284 | } 285 | item.Cas = cas 286 | } 287 | 288 | // FIXME 289 | if length > AllocLimit { 290 | item.alloc = cmem.Alloc(uintptr(length)) 291 | item.Body = (*[1 << 30]byte)(unsafe.Pointer(item.alloc))[:length] 292 | (*reflect.SliceHeader)(unsafe.Pointer(&item.Body)).Cap = length 293 | runtime.SetFinalizer(item, func(item *Item) { 294 | if item.alloc != nil { 295 | //log.Print("free by finalizer: ", cap(item.Body)) 296 | cmem.Free(item.alloc, uintptr(cap(item.Body))) 297 | item.Body = nil 298 | item.alloc = nil 299 | } 300 | }) 301 | } else { 302 | item.Body = make([]byte, length) 303 | } 304 | if _, e = io.ReadFull(b, item.Body); e != nil { 305 | return e 306 | } 307 | b.ReadByte() // \r 308 | b.ReadByte() // \n 309 | resp.items[key] = item 310 | continue 311 | 312 | case "STAT": 313 | if len(parts) != 3 { 314 | return errors.New("invalid response") 315 | } 316 | var item Item 317 | item.Body = []byte(parts[2]) 318 | resp.items[parts[1]] = &item 319 | continue 320 | 321 | case "END": 322 | case "STORED", "NOT_STORED", "DELETED", "NOT_FOUND": 323 | case "OK": 324 | 325 | case "ERROR", "SERVER_ERROR", "CLIENT_ERROR": 326 | if len(parts) > 1 { 327 | resp.msg = parts[1] 328 | } 329 | ErrorLog.Print("error:", resp) 330 | 331 | default: 332 | // try to convert to int 333 | _, err := strconv.Atoi(resp.status) 334 | if err == nil { 335 | // response from incr,decr 336 | resp.msg = resp.status 337 | resp.status = "INCR" 338 | } else { 339 | ErrorLog.Print("unknown status:", s, resp.status) 340 | return errors.New("unknown response:" + resp.status) 341 | } 342 | } 343 | break 344 | } 345 | return nil 346 | } 347 | 348 | func (resp *Response) Write(w io.Writer) error { 349 | if resp.noreply { 350 | return nil 351 | } 352 | 353 | switch resp.status { 354 | case "VALUE": 355 | for key, item := range resp.items { 356 | if resp.cas { 357 | fmt.Fprintf(w, "VALUE %s %d %d %d\r\n", key, item.Flag, 358 | len(item.Body), item.Cas) 359 | } else { 360 | fmt.Fprintf(w, "VALUE %s %d %d\r\n", key, item.Flag, 361 | len(item.Body)) 362 | } 363 | if e := WriteFull(w, item.Body); e != nil { 364 | return e 365 | } 366 | WriteFull(w, []byte("\r\n")) 367 | } 368 | io.WriteString(w, "END\r\n") 369 | 370 | case "STAT": 371 | io.WriteString(w, resp.msg) 372 | io.WriteString(w, "END\r\n") 373 | 374 | case "INCR", "DECR": 375 | fmt.Fprintf(w, resp.msg) 376 | fmt.Fprintf(w, "\r\n") 377 | 378 | default: 379 | io.WriteString(w, resp.status) 380 | if resp.msg != "" { 381 | io.WriteString(w, " "+resp.msg) 382 | } 383 | io.WriteString(w, "\r\n") 384 | } 385 | return nil 386 | } 387 | 388 | func (resp *Response) CleanBuffer() { 389 | for _, item := range resp.items { 390 | if item.alloc != nil { 391 | cmem.Free(item.alloc, uintptr(cap(item.Body))) 392 | item.alloc = nil 393 | } 394 | runtime.SetFinalizer(item, nil) 395 | } 396 | resp.items = nil 397 | } 398 | 399 | func writeLine(w io.Writer, s string) { 400 | io.WriteString(w, s) 401 | io.WriteString(w, "\r\n") 402 | } 403 | 404 | func (req *Request) Process(store DistributeStorage, stat *Stats) (resp *Response, targets []string, err error) { 405 | resp = new(Response) 406 | resp.noreply = req.NoReply 407 | 408 | //var err error 409 | switch req.Cmd { 410 | 411 | case "get", "gets": 412 | for _, k := range req.Keys { 413 | if len(k) > MaxKeyLength { 414 | resp.status = "CLIENT_ERROR" 415 | resp.msg = "key too long" 416 | return 417 | } 418 | } 419 | 420 | resp.status = "VALUE" 421 | resp.cas = req.Cmd == "gets" 422 | if len(req.Keys) > 1 { 423 | resp.items, targets, err = store.GetMulti(req.Keys) 424 | if err != nil { 425 | resp.status = "SERVER_ERROR" 426 | resp.msg = err.Error() 427 | return 428 | } 429 | stat.cmd_get += int64(len(req.Keys)) 430 | stat.get_hits += int64(len(resp.items)) 431 | stat.get_misses += int64(len(req.Keys) - len(resp.items)) 432 | bytes := int64(0) 433 | for _, item := range resp.items { 434 | bytes += int64(len(item.Body)) 435 | } 436 | stat.bytes_written += bytes 437 | } else { 438 | stat.cmd_get++ 439 | key := req.Keys[0] 440 | var item *Item 441 | item, targets, err = store.Get(key) 442 | if err != nil { 443 | resp.status = "SERVER_ERROR" 444 | resp.msg = err.Error() 445 | return 446 | } 447 | if item == nil { 448 | stat.get_misses++ 449 | } else { 450 | resp.items = make(map[string]*Item, 1) 451 | resp.items[key] = item 452 | stat.get_hits++ 453 | stat.bytes_written += int64(len(item.Body)) 454 | } 455 | } 456 | 457 | case "set", "add", "replace", "cas": 458 | key := req.Keys[0] 459 | var suc bool 460 | suc, targets, err = store.Set(key, req.Item, req.NoReply) 461 | if err != nil { 462 | resp.status = "SERVER_ERROR" 463 | resp.msg = err.Error() 464 | break 465 | } 466 | 467 | stat.cmd_set++ 468 | stat.bytes_read += int64(len(req.Item.Body)) 469 | if suc { 470 | resp.status = "STORED" 471 | } else { 472 | resp.status = "NOT_STORED" 473 | } 474 | 475 | case "append": 476 | key := req.Keys[0] 477 | var suc bool 478 | suc, targets, err = store.Append(key, req.Item.Body) 479 | if err != nil { 480 | resp.status = "SERVER_ERROR" 481 | resp.msg = err.Error() 482 | return 483 | } 484 | 485 | stat.cmd_set++ 486 | stat.bytes_read += int64(len(req.Item.Body)) 487 | if suc { 488 | resp.status = "STORED" 489 | } else { 490 | resp.status = "NOT_STORED" 491 | } 492 | 493 | case "incr": 494 | stat.cmd_set++ 495 | stat.bytes_read += int64(len(req.Item.Body)) 496 | resp.noreply = req.NoReply 497 | key := req.Keys[0] 498 | add, err := strconv.Atoi(string(req.Item.Body)) 499 | if err != nil { 500 | resp.status = "CLIENT_ERROR" 501 | resp.msg = "invalid number" 502 | break 503 | } 504 | var result int 505 | result, targets, err = store.Incr(key, add) 506 | if err != nil { 507 | resp.status = "SERVER_ERROR" 508 | resp.msg = err.Error() 509 | break 510 | } 511 | 512 | if result > 0 { 513 | resp.status = "INCR" 514 | resp.msg = strconv.Itoa(result) 515 | } else { 516 | resp.status = "NOT_FOUND" 517 | } 518 | 519 | case "delete": 520 | key := req.Keys[0] 521 | var suc bool 522 | suc, targets, err = store.Delete(key) 523 | if err != nil { 524 | resp.status = "SERVER_ERROR" 525 | resp.msg = err.Error() 526 | break 527 | } 528 | if suc { 529 | resp.status = "DELETED" 530 | } else { 531 | resp.status = "NOT_FOUND" 532 | } 533 | stat.cmd_delete++ 534 | 535 | case "stats": 536 | st := stat.Stats() 537 | n := int64(store.Len()) 538 | st["curr_items"] = n 539 | st["total_items"] = n 540 | resp.status = "STAT" 541 | var ss []string 542 | if len(req.Keys) > 0 { 543 | ss = make([]string, len(req.Keys)) 544 | for i, k := range req.Keys { 545 | v, _ := st[k] 546 | ss[i] = fmt.Sprintf("STAT %s %d\r\n", k, v) 547 | } 548 | resp.msg = strings.Join(ss, "") 549 | } else { 550 | ss = make([]string, len(st)) 551 | cnt := 0 552 | for k, v := range st { 553 | ss[cnt] = fmt.Sprintf("STAT %s %d\r\n", k, v) 554 | cnt += 1 555 | } 556 | } 557 | resp.msg = strings.Join(ss, "") 558 | 559 | case "version": 560 | resp.status = "VERSION" 561 | resp.msg = VERSION 562 | 563 | case "verbosity", "flush_all": 564 | resp.status = "OK" 565 | 566 | case "quit": 567 | resp = nil 568 | return 569 | 570 | default: 571 | // client error 572 | resp = nil 573 | return 574 | resp.status = "CLIENT_ERROR" 575 | resp.msg = "invalid cmd" 576 | } 577 | return 578 | } 579 | 580 | func contain(vs []string, v string) bool { 581 | for _, i := range vs { 582 | if i == v { 583 | return true 584 | } 585 | } 586 | return false 587 | } 588 | 589 | func (req *Request) Check(resp *Response) error { 590 | switch req.Cmd { 591 | case "get", "gets": 592 | if resp.items != nil { 593 | for key, _ := range resp.items { 594 | if !contain(req.Keys, key) { 595 | ErrorLog.Print("unexpected key in response: ", key) 596 | return errors.New("unexpected key in response: " + key) 597 | } 598 | } 599 | } 600 | 601 | case "incr", "decr": 602 | if !contain([]string{"INCR", "DECR", "NOT_FOUND"}, resp.status) { 603 | return errors.New("unexpected status: " + resp.status) 604 | } 605 | 606 | case "set", "add", "replace", "append", "prepend": 607 | if !contain([]string{"STORED", "NOT_STORED", "EXISTS", "NOT_FOUND"}, 608 | resp.status) { 609 | return errors.New("unexpected status: " + resp.status) 610 | } 611 | } 612 | return nil 613 | } 614 | --------------------------------------------------------------------------------
Active servers (stats)
#hostversionuptimememthdconnmprrecordsspacegetsetdeleteslowget/sethitreadwrite
{{$i}}{{.name}}{{.version}}{{.uptime|time}}{{.rusage_maxrss|size}}{{.threads}}{{.curr_connections|num}}{{if .version}}{{.mpr|size}}{{end}}{{if .version}}{{.total_items|num}} / {{.curr_items|num}}{{end}}{{if .version}}{{.total_space|size}} / {{.avail_space|size}}{{end}}{{.cmd_get |num}} / {{.curr_cmd_get|num}}{{.cmd_set|num}} / {{.curr_cmd_set|num}}{{.cmd_delete|num}} / {{.curr_cmd_delete|num}}{{.slow}}% / {{.curr_slow}}% {{.getset|num}} / {{.curr_getset|num}}{{.hit}}% / {{.curr_hit}}%{{.bytes_written|size}} / {{.curr_bytes_written|size}} {{.bytes_read|size}} / {{.curr_bytes_read|size}}