├── tests
├── __init__.py
├── pip-req.txt
├── README.md
├── run_test.sh
├── utils.py
├── abnormal_cmd_test.py
├── dbclient.py
├── server_down_test.py
├── base.py
├── basic_test.py
├── gen_config.py
└── switch_storage_test.py
├── deb-req.d
└── dev.txt
├── pip-req.d
└── dev.txt
├── conf
├── table_dispatcher_cfg.cql
├── route.yaml
└── proxy.yaml
├── main.go
├── templates
├── css
│ ├── starter-template.css
│ ├── custom.css
│ └── bootstrap-sortable.css
├── stats.html
├── js
│ ├── nav.js
│ └── bootstrap-sortable.js
├── score.html
├── buckets.html
├── bucketinfo.html
└── base.html
├── utils
└── path.go
├── .gitignore
├── dbtest
└── conf
│ ├── route.yaml
│ └── proxy.yaml
├── dstore
├── host_test.go
├── utils.go
├── utils_test.go
├── read_only_scheduler_test.go
├── consistent_test.go
├── ringqueue.go
├── metrics.go
├── read_only_scheduler.go
├── bucket_test.go
├── consistent.go
├── bucket.go
├── host.go
├── store_test.go
└── scheduler.go
├── Makefile
├── .doubanpde
├── scripts
│ ├── bdb
│ │ ├── gobeansproxy
│ │ │ ├── 57980
│ │ │ │ └── conf
│ │ │ │ │ ├── route.yaml
│ │ │ │ │ └── global.yaml
│ │ │ ├── 57981
│ │ │ │ └── conf
│ │ │ │ │ ├── route.yaml
│ │ │ │ │ └── global.yaml
│ │ │ ├── 57982
│ │ │ │ └── conf
│ │ │ │ │ ├── route.yaml
│ │ │ │ │ └── global.yaml
│ │ │ ├── 57983
│ │ │ │ └── conf
│ │ │ │ │ ├── route.yaml
│ │ │ │ │ └── global.yaml
│ │ │ ├── dstore-only
│ │ │ │ └── conf
│ │ │ │ │ ├── route.yaml
│ │ │ │ │ └── proxy.yaml
│ │ │ └── prefix-switch-cfg
│ │ │ │ └── conf
│ │ │ │ ├── route.yaml
│ │ │ │ └── proxy.yaml
│ │ └── rivenbeansproxy
│ │ │ └── conf
│ │ │ └── proxy.yaml
│ └── cassandra
│ │ └── init_kv.cql
├── Makefile
└── pde.yaml
├── README.md
├── config
├── config_test.go
├── default.go
└── config.go
├── .github
└── workflows
│ └── go.yml
├── cassandra
├── prefix_table_finder_test.go
├── logger.go
├── udt.go
├── prefix_cfg.go
├── prefix_table_finder.go
├── cstar.go
└── prefix_switch.go
├── LICENSE
├── go.mod
├── gobeansproxy
├── gobeansproxy.go
└── web.go
└── go.sum
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/pip-req.txt:
--------------------------------------------------------------------------------
1 | PyYAML
2 | libmc>=0.5.6
3 | nose3
4 |
--------------------------------------------------------------------------------
/deb-req.d/dev.txt:
--------------------------------------------------------------------------------
1 | less
2 | python3-pip
3 | python-is-python3
4 |
--------------------------------------------------------------------------------
/pip-req.d/dev.txt:
--------------------------------------------------------------------------------
1 | cqlsh
2 |
3 | pytest
4 | requests
5 | pyyaml
6 |
--------------------------------------------------------------------------------
/tests/README.md:
--------------------------------------------------------------------------------
1 | # Introduction
2 |
3 | Integrated tests for gobeansproxy.
--------------------------------------------------------------------------------
/conf/table_dispatcher_cfg.cql:
--------------------------------------------------------------------------------
1 | CREATE TABLE IF NOT EXISTS YOURKEYSPACE.YOURTABLE (
2 | prefix blob PRIMARY KEY,
3 | value string,
4 | );
5 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/douban/gobeansproxy/gobeansproxy"
5 | )
6 |
7 | func main() {
8 | gobeansproxy.Main()
9 | }
10 |
--------------------------------------------------------------------------------
/templates/css/starter-template.css:
--------------------------------------------------------------------------------
1 | body {
2 | padding-top: 50px;
3 | }
4 | .starter-template {
5 | padding: 40px 15px;
6 | text-align: center;
7 | }
8 |
9 | .alert {
10 | margin-left: 18px
11 | }
12 |
--------------------------------------------------------------------------------
/utils/path.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "path"
5 | "runtime"
6 | )
7 |
8 | func GetProjectHomeDir() string {
9 | if _, filename, _, ok := runtime.Caller(1); ok {
10 | return path.Dir(path.Dir(filename))
11 | }
12 | return ""
13 | }
14 |
--------------------------------------------------------------------------------
/templates/css/custom.css:
--------------------------------------------------------------------------------
1 | .table-buckets > tbody > tr:nth-of-type(6n+1) {
2 | background: #ccc;
3 | }
4 |
5 | .table-buckets > tbody > tr:nth-of-type(6n+2) {
6 | background: #ccc;
7 | }
8 |
9 | .table-buckets > tbody > tr:nth-of-type(6n+3) {
10 | background: #ccc;
11 | }
12 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .tags
2 | .tags_sorted_by_file
3 | *.py[c|o|~]
4 | *.log
5 | unittest.xml
6 | venv/
7 | vendor/
8 | .idea/
9 | main
10 | vendor/
11 |
12 | # add by pdectl
13 | .doubanpde/*
14 | !.doubanpde/pde.yaml
15 | !.doubanpde/pdectl-*
16 | .doubanpde/pdectl-*/*
17 | !.doubanpde/pdectl-*/Dockerfile.tpl
18 |
--------------------------------------------------------------------------------
/conf/route.yaml:
--------------------------------------------------------------------------------
1 | numbucket: 16
2 | backup:
3 | - "127.0.0.1:7983"
4 | main:
5 | - addr: 127.0.0.1:7980
6 | buckets: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f]
7 | - addr: 127.0.0.1:7981
8 | buckets: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f]
9 | - addr: 127.0.0.1:7982
10 | buckets: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f]
11 |
--------------------------------------------------------------------------------
/dbtest/conf/route.yaml:
--------------------------------------------------------------------------------
1 | numbucket: 16
2 | backup:
3 | - "127.0.0.1:7983"
4 | main:
5 | - addr: 127.0.0.1:7980
6 | buckets: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f]
7 | - addr: 127.0.0.1:7981
8 | buckets: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f]
9 | - addr: 127.0.0.1:7982
10 | buckets: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f]
11 |
--------------------------------------------------------------------------------
/dstore/host_test.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "path"
5 | "testing"
6 |
7 | "github.com/douban/gobeansproxy/config"
8 | "github.com/douban/gobeansproxy/utils"
9 | )
10 |
11 | func TestHost(t *testing.T) {
12 | homeDir := utils.GetProjectHomeDir()
13 | confdir := path.Join(homeDir, "conf")
14 | proxyConf := &config.Proxy
15 | proxyConf.Load(confdir)
16 | }
17 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | all:install
2 |
3 | export PYTHONPATH=.
4 |
5 | .PHONY: test
6 | test:
7 | go version
8 | go test github.com/douban/gobeansproxy/config
9 | go test github.com/douban/gobeansproxy/dstore
10 |
11 | template:
12 | rm -r /var/lib/gobeansproxy/templates
13 | cp -r templates /var/lib/gobeansproxy/
14 |
15 | pytest: install
16 | ./tests/run_test.sh
17 |
18 | install:
19 | go install ./
20 |
--------------------------------------------------------------------------------
/dstore/utils.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | func deduplicateKeys(keys []string) []string {
4 | dedup := make(map[string]struct{}, len(keys))
5 |
6 | for _, k := range keys {
7 | if _, ok := dedup[k]; ok {
8 | continue
9 | } else {
10 | dedup[k] = struct{}{}
11 | }
12 | }
13 |
14 | dedupKs := make([]string, len(dedup))
15 | i := 0
16 | for k := range dedup {
17 | dedupKs[i] = k
18 | i++
19 | }
20 | return dedupKs
21 | }
22 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/gobeansproxy/57980/conf/route.yaml:
--------------------------------------------------------------------------------
1 | backup:
2 | - 127.0.0.1:57983
3 | main:
4 | - addr: 127.0.0.1:57980
5 | buckets: &id001
6 | - '0'
7 | - '1'
8 | - '2'
9 | - '3'
10 | - '4'
11 | - '5'
12 | - '6'
13 | - '7'
14 | - '8'
15 | - '9'
16 | - a
17 | - b
18 | - c
19 | - d
20 | - e
21 | - f
22 | - addr: 127.0.0.1:57981
23 | buckets: *id001
24 | - addr: 127.0.0.1:57982
25 | buckets: *id001
26 | numbucket: 16
27 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/gobeansproxy/57981/conf/route.yaml:
--------------------------------------------------------------------------------
1 | backup:
2 | - 127.0.0.1:57983
3 | main:
4 | - addr: 127.0.0.1:57980
5 | buckets: &id001
6 | - '0'
7 | - '1'
8 | - '2'
9 | - '3'
10 | - '4'
11 | - '5'
12 | - '6'
13 | - '7'
14 | - '8'
15 | - '9'
16 | - a
17 | - b
18 | - c
19 | - d
20 | - e
21 | - f
22 | - addr: 127.0.0.1:57981
23 | buckets: *id001
24 | - addr: 127.0.0.1:57982
25 | buckets: *id001
26 | numbucket: 16
27 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/gobeansproxy/57982/conf/route.yaml:
--------------------------------------------------------------------------------
1 | backup:
2 | - 127.0.0.1:57983
3 | main:
4 | - addr: 127.0.0.1:57980
5 | buckets: &id001
6 | - '0'
7 | - '1'
8 | - '2'
9 | - '3'
10 | - '4'
11 | - '5'
12 | - '6'
13 | - '7'
14 | - '8'
15 | - '9'
16 | - a
17 | - b
18 | - c
19 | - d
20 | - e
21 | - f
22 | - addr: 127.0.0.1:57981
23 | buckets: *id001
24 | - addr: 127.0.0.1:57982
25 | buckets: *id001
26 | numbucket: 16
27 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/gobeansproxy/57983/conf/route.yaml:
--------------------------------------------------------------------------------
1 | backup:
2 | - 127.0.0.1:57983
3 | main:
4 | - addr: 127.0.0.1:57980
5 | buckets: &id001
6 | - '0'
7 | - '1'
8 | - '2'
9 | - '3'
10 | - '4'
11 | - '5'
12 | - '6'
13 | - '7'
14 | - '8'
15 | - '9'
16 | - a
17 | - b
18 | - c
19 | - d
20 | - e
21 | - f
22 | - addr: 127.0.0.1:57981
23 | buckets: *id001
24 | - addr: 127.0.0.1:57982
25 | buckets: *id001
26 | numbucket: 16
27 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/gobeansproxy/dstore-only/conf/route.yaml:
--------------------------------------------------------------------------------
1 | backup:
2 | - 127.0.0.1:57983
3 | main:
4 | - addr: 127.0.0.1:57980
5 | buckets: &id001
6 | - '0'
7 | - '1'
8 | - '2'
9 | - '3'
10 | - '4'
11 | - '5'
12 | - '6'
13 | - '7'
14 | - '8'
15 | - '9'
16 | - a
17 | - b
18 | - c
19 | - d
20 | - e
21 | - f
22 | - addr: 127.0.0.1:57981
23 | buckets: *id001
24 | - addr: 127.0.0.1:57982
25 | buckets: *id001
26 | numbucket: 16
27 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/gobeansproxy/prefix-switch-cfg/conf/route.yaml:
--------------------------------------------------------------------------------
1 | backup:
2 | - 127.0.0.1:57983
3 | main:
4 | - addr: 127.0.0.1:57980
5 | buckets: &id001
6 | - '0'
7 | - '1'
8 | - '2'
9 | - '3'
10 | - '4'
11 | - '5'
12 | - '6'
13 | - '7'
14 | - '8'
15 | - '9'
16 | - a
17 | - b
18 | - c
19 | - d
20 | - e
21 | - f
22 | - addr: 127.0.0.1:57981
23 | buckets: *id001
24 | - addr: 127.0.0.1:57982
25 | buckets: *id001
26 | numbucket: 16
27 |
--------------------------------------------------------------------------------
/templates/stats.html:
--------------------------------------------------------------------------------
1 | {{ define "body" }}
2 |
Interval Stats
3 | config
4 | request
5 | buffer
6 | memstat
7 | rusage
8 | score
9 | score (in json)
10 | route
11 | route version
12 | buckets
13 | Debug PProf
14 | pprof
15 | {{ end }}
16 |
--------------------------------------------------------------------------------
/templates/js/nav.js:
--------------------------------------------------------------------------------
1 | (function() {
2 | function hrefToLevels(href) {
3 | href = href.split('?')[0];
4 | if (href.slice(-1) == '/') {
5 | return href.slice(0, -1);
6 | } else {
7 | return href;
8 | }
9 | }
10 |
11 | var urlLevels = hrefToLevels(location.pathname);
12 |
13 | $('.navbar-fixed-top .navbar-nav li a').each(function(i, n) {
14 | var hrefLevels = hrefToLevels($(n).attr('href'));
15 | if (urlLevels.lastIndexOf(hrefLevels, 0) === 0) {
16 | $(n).parent().addClass('active');
17 | }
18 | });
19 | })();
20 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # GoBeansProxy 
2 |
3 | A proxy for [Gobeansdb](https://github.com/douban/gobeansdb).
4 |
5 | ## Prepare
6 |
7 | Supported Go version: > 1.20.0
8 |
9 | ## Install
10 |
11 | ```
12 | $ git clone http://github.com/douban/gobeansproxy.git
13 | $ cd gobeansproxy
14 | $ go mod tidy
15 | $ make
16 | ```
17 |
18 | ## test
19 |
20 | ```
21 | $ make test # unit test
22 | ```
23 |
24 | ## run
25 |
26 | ```
27 | # Run with conf
28 | $ ${GOPATH}/bin/proxy -confdir pathToConfDir
29 |
30 | # Others
31 | $ ${GOPATH}/bin/proxy -h
32 | ```
33 |
--------------------------------------------------------------------------------
/templates/score.html:
--------------------------------------------------------------------------------
1 | {{ define "body" }}
2 |
3 |
4 | Score Stats
5 |
6 |
7 | | Bucket |
8 | Host |
9 | Score |
10 |
11 |
12 |
13 | {{ range $bucket, $hosts := .stats }}
14 | {{ range $hostAddr, $score := $hosts }}
15 |
16 | | {{ $bucket }} |
17 | {{ $hostAddr }} |
18 | {{ $score }} |
19 |
20 | {{ end }}
21 | {{ end }}
22 |
23 |
24 |
25 | {{ end }}
26 |
--------------------------------------------------------------------------------
/templates/buckets.html:
--------------------------------------------------------------------------------
1 | {{ define "body" }}
2 |
3 |
buckets
4 |
5 |
6 |
7 | | Bucket |
8 | Host |
9 | Percentage |
10 |
11 |
12 |
13 | {{ range $bucket, $hosts := .buckets }}
14 | {{ range $hostAddr, $percentage := $hosts }}
15 |
16 | |
17 | {{ $bucket }}
18 | |
19 | {{ $hostAddr }} |
20 | {{ $percentage }} |
21 |
22 | {{ end }}
23 | {{ end }}
24 |
25 |
26 |
27 | {{ end }}
28 |
--------------------------------------------------------------------------------
/config/config_test.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "path"
5 | "testing"
6 |
7 | "github.com/douban/gobeansproxy/utils"
8 | "github.com/stretchr/testify/assert"
9 | )
10 |
11 | func TestLoadConfig(t *testing.T) {
12 | homeDir := utils.GetProjectHomeDir()
13 | confdir := path.Join(homeDir, "conf")
14 |
15 | proxyCfg := new(ProxyConfig)
16 | proxyCfg.Load(confdir)
17 |
18 | assert := assert.New(t)
19 | assert.Equal("127.0.0.1", proxyCfg.Hostname)
20 | assert.Equal(7905, proxyCfg.Port)
21 | assert.Equal(250, proxyCfg.MaxKeyLen)
22 |
23 | assert.Equal(3, proxyCfg.N)
24 | assert.Equal(1, proxyCfg.R)
25 | assert.Equal(20, proxyCfg.MaxFreeConnsPerHost)
26 | assert.Equal(300, proxyCfg.ConnectTimeoutMs)
27 | assert.Equal(2000, proxyCfg.ReadTimeoutMs)
28 |
29 | assert.Equal("127.0.0.1:7980", Route.Main[0].Addr)
30 | }
31 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/gobeansproxy/57980/conf/global.yaml:
--------------------------------------------------------------------------------
1 | hstore:
2 | data:
3 | check_vhash: true
4 | datafile_max_str: 4000M
5 | flush_interval: 60
6 | flush_wake_str: 10M
7 | no_gc_days: 7
8 | hint:
9 | hint_index_interval_str: 32K
10 | hint_merge_interval: 5
11 | hint_no_merged: true
12 | hint_split_cap_str: 1M
13 | htree:
14 | tree_height: 3
15 | local:
16 | home: /data
17 | mc:
18 | body_big_str: 5M
19 | body_c_str: 0K
20 | body_max_str: 50M
21 | flush_max_str: 100M
22 | max_key_len: 250
23 | max_req: 16
24 | server:
25 | accesslog: /tmp/access.log
26 | errorlog: /tmp/error.log
27 | hostname: 127.0.0.1
28 | listen: 0.0.0.0
29 | port: 57980
30 | threads: 4
31 | webport: 57990
32 | zk: 'NO'
33 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/gobeansproxy/57981/conf/global.yaml:
--------------------------------------------------------------------------------
1 | hstore:
2 | data:
3 | check_vhash: true
4 | datafile_max_str: 4000M
5 | flush_interval: 60
6 | flush_wake_str: 10M
7 | no_gc_days: 7
8 | hint:
9 | hint_index_interval_str: 32K
10 | hint_merge_interval: 5
11 | hint_no_merged: true
12 | hint_split_cap_str: 1M
13 | htree:
14 | tree_height: 3
15 | local:
16 | home: /data
17 | mc:
18 | body_big_str: 5M
19 | body_c_str: 0K
20 | body_max_str: 50M
21 | flush_max_str: 100M
22 | max_key_len: 250
23 | max_req: 16
24 | server:
25 | accesslog: /tmp/access.log
26 | errorlog: /tmp/error.log
27 | hostname: 127.0.0.1
28 | listen: 0.0.0.0
29 | port: 57981
30 | threads: 4
31 | webport: 57991
32 | zk: 'NO'
33 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/gobeansproxy/57982/conf/global.yaml:
--------------------------------------------------------------------------------
1 | hstore:
2 | data:
3 | check_vhash: true
4 | datafile_max_str: 4000M
5 | flush_interval: 60
6 | flush_wake_str: 10M
7 | no_gc_days: 7
8 | hint:
9 | hint_index_interval_str: 32K
10 | hint_merge_interval: 5
11 | hint_no_merged: true
12 | hint_split_cap_str: 1M
13 | htree:
14 | tree_height: 3
15 | local:
16 | home: /data
17 | mc:
18 | body_big_str: 5M
19 | body_c_str: 0K
20 | body_max_str: 50M
21 | flush_max_str: 100M
22 | max_key_len: 250
23 | max_req: 16
24 | server:
25 | accesslog: /tmp/access.log
26 | errorlog: /tmp/error.log
27 | hostname: 127.0.0.1
28 | listen: 0.0.0.0
29 | port: 57982
30 | threads: 4
31 | webport: 57992
32 | zk: 'NO'
33 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/gobeansproxy/57983/conf/global.yaml:
--------------------------------------------------------------------------------
1 | hstore:
2 | data:
3 | check_vhash: true
4 | datafile_max_str: 4000M
5 | flush_interval: 60
6 | flush_wake_str: 10M
7 | no_gc_days: 7
8 | hint:
9 | hint_index_interval_str: 32K
10 | hint_merge_interval: 5
11 | hint_no_merged: true
12 | hint_split_cap_str: 1M
13 | htree:
14 | tree_height: 3
15 | local:
16 | home: /data
17 | mc:
18 | body_big_str: 5M
19 | body_c_str: 0K
20 | body_max_str: 50M
21 | flush_max_str: 100M
22 | max_key_len: 250
23 | max_req: 16
24 | server:
25 | accesslog: /tmp/access.log
26 | errorlog: /tmp/error.log
27 | hostname: 127.0.0.1
28 | listen: 0.0.0.0
29 | port: 57983
30 | threads: 4
31 | webport: 57993
32 | zk: 'NO'
33 |
--------------------------------------------------------------------------------
/.github/workflows/go.yml:
--------------------------------------------------------------------------------
1 | on: [push, pull_request]
2 | name: GoBeansProxy Test
3 | jobs:
4 | test:
5 | strategy:
6 | matrix:
7 | go-version: [1.20.x, 1.21.x]
8 | platform: [ubuntu-latest]
9 | runs-on: ${{ matrix.platform }}
10 | steps:
11 | - name: Install Go
12 | uses: actions/setup-go@v1
13 | with:
14 | go-version: ${{ matrix.go-version }}
15 |
16 | - name: Checkout code
17 | uses: actions/checkout@v1
18 | with:
19 | fetch-depth: 1
20 | path: go/src/github.com/douban/gobeansproxy
21 |
22 | - name: Test
23 | run: |
24 | go mod tidy
25 | go install github.com/douban/gobeansdb@latest
26 | make test
27 | env:
28 | GOPATH: /home/runner/work/gobeansproxy/go/
29 |
30 | - name: Install
31 | run: make install
32 |
--------------------------------------------------------------------------------
/.doubanpde/Makefile:
--------------------------------------------------------------------------------
1 | SHELL := /bin/bash
2 | PROJECT_DIR := /home/project
3 |
4 | env:
5 | dpi -y -D "-y"
6 | mkdir -p /tmp/gobeansproxy_prefix/proxy/
7 |
8 | build:
9 | go build -o gobeansproxy_bin
10 |
11 | start-proxy: build
12 | ./gobeansproxy_bin -confdir .doubanpde/scripts/bdb/gobeansproxy/prefix-switch-cfg/conf/
13 |
14 | start-riven-proxy: build
15 | ./gobeansproxy_bin -confdir .doubanpde/scripts/bdb/rivenbeansproxy/conf/
16 |
17 | start-proxy-gc-trace: build
18 | GODEBUG=gctrace=1 ./gobeansproxy_bin -confdir .doubanpde/scripts/bdb/gobeansproxy/prefix-switch-cfg/conf/
19 |
20 | start-proxy-valgrind: build
21 | G_SLICE=always-malloc G_DEBUG=gc-friendly valgrind -v --tool=memcheck --leak-check=full --num-callers=40 --error-limit=no --log-file=valgrind.log ./gobeansproxy_bin -confdir .doubanpde/scripts/bdb/gobeansproxy/prefix-switch-cfg/conf/
22 |
23 | tail-log:
24 | tail -f /tmp/gobeansproxy_prefix/proxy/*.log
25 |
26 | cqlsh:
27 | cqlsh -u cassandra -p cassandra
28 |
--------------------------------------------------------------------------------
/config/default.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | dbcfg "github.com/douban/gobeansdb/config"
5 | )
6 |
7 | var (
8 | DefaultServerConfig = dbcfg.ServerConfig{
9 | Hostname: "127.0.0.1",
10 | Listen: "0.0.0.0",
11 | Port: 7905,
12 | WebPort: 7908,
13 | Threads: 8,
14 | ZKServers: nil,
15 | ErrorLog: "./proxy-error.log",
16 | AccessLog: "./proxy-access.log",
17 | StaticDir: "/var/lib/gobeansproxy",
18 | }
19 |
20 | DefaultDStoreConfig = DStoreConfig{
21 | N: 3,
22 | W: 2,
23 | R: 1,
24 | MaxFreeConnsPerHost: 20,
25 | ConnectTimeoutMs: 300,
26 | WriteTimeoutMs: 2000,
27 | DialFailSilenceMs: 5000,
28 | ResTimeSeconds: 10,
29 | ErrorSeconds: 10,
30 | MaxConnectErrors: 10,
31 | ScoreDeviation: 10000, // 10000 Microseconds -> 10 Millisecond
32 | ItemSizeStats: 4096,
33 | ResponseTimeMin: 4000,
34 | Enable: true,
35 | }
36 | )
37 |
--------------------------------------------------------------------------------
/dbtest/conf/proxy.yaml:
--------------------------------------------------------------------------------
1 | # for doubandb proxy
2 | # ~/go/src/github.com/dispensable/gobeansproxy/dbtest/conf
3 | proxy:
4 | listen: 0.0.0.0
5 | port: 7905
6 | webport: 7908
7 | threads: 8
8 | errorlog: "~/go/src/github.com/dispensable/gobeansproxy/dbtest/log/proxy-error.log"
9 | accesslog: "~/go/src/github.com/dispensable/gobeansproxy/dbtest/log/proxy-access.log"
10 | hostname: 127.0.0.1
11 | staticdir: ~/go/src/github.com/dispensable/gobeansproxy/dbtest/staticdir/
12 | zkserves: []
13 | zkpath: "/gobeansproxy/test"
14 | mc:
15 | max_key_len: 250
16 | max_req: 16
17 | body_max_str: 50M
18 | body_big_str: 5M
19 | body_c_str: 0K
20 | dstore:
21 | n: 3
22 | w: 2
23 | r: 1
24 | max_free_conns_per_host: 20
25 | connect_timeout_ms: 300
26 | write_timeout_ms: 2000
27 | read_timeout_ms: 2000
28 | dial_fail_silence_ms: 5000
29 | response_time_seconds: 10
30 | error_seconds: 10
31 | max_connect_errors: 10
32 | score_deviation: 10000
33 | item_size_stats: 4096
34 | response_time_min: 4000
35 |
--------------------------------------------------------------------------------
/dstore/utils_test.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 | )
7 |
8 | func TestDeduplicateKeys(t *testing.T) {
9 | test := []string{"a", "b", "c", "d", "a"}
10 | dtk := deduplicateKeys(test)
11 | if (len(dtk) != 4) {
12 | t.Errorf("string slice should be deduplicated: %s", dtk)
13 | }
14 | test2 := []string{"a", "n"}
15 | dtk2 := deduplicateKeys(test2)
16 | if (len(dtk2) != 2) {
17 | t.Errorf("string slice %s has no duplications", test2)
18 | }
19 | t.Logf("after dedup: %s | %s", dtk, dtk2)
20 | }
21 |
22 | func BenchmarkDeduplicateKeys(b *testing.B) {
23 | test := []string{
24 | "/frodo_feed/title_vecs/3055:4601087161",
25 | "/frodo_feed/title_vecs/3055:4601087162",
26 | "/frodo_feed/title_vecs/3055:4601087161",
27 | "/frodo_feed/title_vecs/3055:4601087165",
28 | "/frodo_feed/title_vecs/3055:4601087161",
29 | }
30 |
31 | for j := 0; j < 200; j++ {
32 | test = append(test, fmt.Sprintf("/frodo_feed/title_vecs/3055:460108716%d", j))
33 | }
34 |
35 | for i := 0; i < b.N; i++ {
36 | deduplicateKeys(test)
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/cassandra/init_kv.cql:
--------------------------------------------------------------------------------
1 | -- DEFAULT USER: cassandra
2 | -- default pass: cassandra
3 | -- change cassandra password:
4 | -- ALTER USER cassandra WITH PASSWORD 'verysecretpass';
5 |
6 | -- create a user for doubandb
7 | CREATE USER IF NOT EXISTS doubandb_test WITH PASSWORD 'doubandb_test';
8 | CREATE ROLE IF NOT EXISTS doubandb_admin;
9 | -- Create a keyspace
10 | CREATE KEYSPACE IF NOT EXISTS doubandb WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : '3' };
11 | GRANT ALL ON KEYSPACE doubandb TO doubandb_admin;
12 | GRANT doubandb_admin TO doubandb_test;
13 |
14 | CREATE TYPE IF NOT EXISTS doubandb.bdbvalue (
15 | rtime timestamp,
16 | flag int,
17 | exptime int,
18 | cas int,
19 | body blob
20 | );
21 | -- Create a table
22 | CREATE TABLE IF NOT EXISTS doubandb.kvstore (
23 | key blob PRIMARY KEY,
24 | value doubandb.bdbvalue,
25 | ) WITH compression = {'class': 'ZstdCompressor'};
26 |
27 | -- insert a @ value for test
28 | --INSERT INTO doubandb.kvstore (key, value)
29 | -- VALUES ('@', {rtime: '2023-06-21 08:01:14.247000+0000', flag: 0, exptime: 0, cas: 0, body: null});
30 |
--------------------------------------------------------------------------------
/cassandra/prefix_table_finder_test.go:
--------------------------------------------------------------------------------
1 | package cassandra
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/douban/gobeansproxy/config"
8 | )
9 |
10 | var (
11 | cstarCfgTest = &config.CassandraStoreCfg{
12 | TableToKeyPrefix: map[string][]string{
13 | "a": []string{
14 | "/a",
15 | "/a/b/c",
16 | "/d/e/ffff",
17 | "/d/f/eeee",
18 | },
19 |
20 | "and": []string{
21 | "/and/anding",
22 | "/a/kkkk",
23 | },
24 | },
25 | DefaultTable: "misc",
26 | }
27 | )
28 |
29 | func TestKeyTableFinder(t *testing.T) {
30 | tree, err := NewKeyTableFinder(cstarCfgTest)
31 | if err != nil {
32 | t.Fatalf("init keytable finder err %s", err)
33 | }
34 |
35 |
36 | testData := map[string]string{
37 | "/a/fff/": "a",
38 | "/and/anding/kkk/fff": "and",
39 | "/d/e/ffff/fkljwe": "a",
40 | "iamnoting": "misc",
41 | "/a/kkkk/defa": "and",
42 | }
43 |
44 | for k, v := range testData {
45 | if tree.GetTableByKey(k) != v {
46 | t.Fatalf("%s table find err, should be: %s", k, v)
47 | }
48 | }
49 | }
50 |
51 | func BenchmarkKeyTableFinder(b *testing.B) {
52 | f, err := NewKeyTableFinder(cstarCfgTest)
53 | if err != nil {
54 | b.Failed()
55 | }
56 |
57 | for n := 0; n < b.N; n++ {
58 | k := fmt.Sprintf("send_me_toMisc_%d", n)
59 | m := f.GetTableByKey(k)
60 | if m != "misc" {
61 | panic(fmt.Sprintf("expect misc but got: %s, key: %s", m, k))
62 | }
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/tests/run_test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | virtualenv venv
4 | source venv/bin/activate
5 | venv/bin/python venv/bin/pip install -r tests/pip-req.txt
6 |
7 | # echo ">> test beansdb rw ..."
8 | # export GOBEANSPROXY_TEST_BR=1 GOBEANSPROXY_TEST_BW=1
9 | # export GOBEANSPROXY_TEST_CR=0 GOBEANSPROXY_TEST_CW=0
10 | # venv/bin/python venv/bin/nosetests --with-xunit --xunit-file=unittest.xml
11 |
12 | echo ">> test beansdb/cstar dual write, bdb read ..."
13 | export GOBEANSPROXY_TEST_BR=1 GOBEANSPROXY_TEST_BW=1
14 | export GOBEANSPROXY_TEST_CR=0 GOBEANSPROXY_TEST_CW=1
15 | venv/bin/python \
16 | venv/bin/nosetests \
17 | --with-xunit -v \
18 | --xunit-file="unittest-br${GOBEANSPROXY_TEST_BR}-bw${GOBEANSPROXY_TEST_BW}-cr${GOBEANSPROXY_TEST_CR}-cw${GOBEANSPROXY_TEST_CW}.xml"
19 |
20 | echo ">> test beansdb/cstar dual write. cstar read ..."
21 | export GOBEANSPROXY_TEST_BR=0 GOBEANSPROXY_TEST_BW=1
22 | export GOBEANSPROXY_TEST_CR=1 GOBEANSPROXY_TEST_CW=1
23 | venv/bin/python \
24 | venv/bin/nosetests \
25 | --with-xunit -v \
26 | --xunit-file="unittest-br${GOBEANSPROXY_TEST_BR}-bw${GOBEANSPROXY_TEST_BW}-cr${GOBEANSPROXY_TEST_CR}-cw${GOBEANSPROXY_TEST_CW}.xml"
27 |
28 | echo ">> test cstar rw ..."
29 | export GOBEANSPROXY_TEST_BR=0 GOBEANSPROXY_TEST_BW=0
30 | export GOBEANSPROXY_TEST_CR=1 GOBEANSPROXY_TEST_CW=1
31 | venv/bin/python \
32 | venv/bin/nosetests \
33 | --with-xunit -v \
34 | --xunit-file="unittest-br${GOBEANSPROXY_TEST_BR}-bw${GOBEANSPROXY_TEST_BW}-cr${GOBEANSPROXY_TEST_CR}-cw${GOBEANSPROXY_TEST_CW}.xml"
35 |
36 | deactivate
37 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2018, Douban Inc.
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/dstore/read_only_scheduler_test.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "testing"
5 |
6 | dbcfg "github.com/douban/gobeansdb/config"
7 | "github.com/stretchr/testify/assert"
8 | )
9 |
10 |
11 | func TestDivideKeyByHosts(t *testing.T) {
12 | route := new(dbcfg.RouteTable)
13 | route.Main = append(
14 | route.Main, dbcfg.Server{Addr: "127.0.0.1:7700"},
15 | dbcfg.Server{Addr: "127.0.0.1:7701"}, dbcfg.Server{Addr: "127.0.0.1:7702"},
16 | )
17 | InitGlobalManualScheduler(route, 1, NoBucketsRounRobinROSchduler)
18 |
19 | rrKeyHostCnt := map[string]int{
20 | "127.0.0.1:7700": 0,
21 | "127.0.0.1:7701": 0,
22 | "127.0.0.1:7702": 0,
23 | }
24 | for i := 1; i < 100; i++ {
25 | testKeys := []string{}
26 | for j := 0; j < i; j++ {
27 | hosts := globalScheduler.GetHostsByKey("j")
28 | assert.True(t, len(hosts) == 1, "rrr scheduler only return one host for one key")
29 | rrKeyHostCnt[hosts[0].Addr] += 1
30 | testKeys = append(testKeys, "")
31 | }
32 | result := globalScheduler.DivideKeysByBucket(testKeys)
33 | assert.Equal(t, len(route.Main), len(result), "keys should be split part max")
34 | totalK := 0
35 | for _, k := range result {
36 | totalK += len(k)
37 | }
38 | assert.Equal(t, len(testKeys), totalK, "all key must parted")
39 | assert.True(t, len(testKeys[len(testKeys)-1]) - len(testKeys[0]) < 3, "keys cap diff should less than server nums")
40 | }
41 | assert.True(t, rrKeyHostCnt["127.0.0.1:7700"] - rrKeyHostCnt["127.0.0.1:7701"] < 3, "rr should be balanced")
42 | assert.True(t, rrKeyHostCnt["127.0.0.1:7700"] - rrKeyHostCnt["127.0.0.1:7702"] < 3, "rr should be balanced")
43 | }
44 |
--------------------------------------------------------------------------------
/templates/bucketinfo.html:
--------------------------------------------------------------------------------
1 | {{ define "body" }}
2 |
3 |
Bucket Info
4 | {{ range $addr, $host:= .bucketinfo}}
5 | {{ range $score, $consistent:= $host}}
6 |
7 |
8 |
9 |
10 | | Host |
11 | Score |
12 | Percentage |
13 |
14 |
15 |
16 | {{ range $arc, $resstats := $consistent}}
17 |
18 | | {{ $addr}} |
19 | {{ $score}} |
20 | {{ $arc}} |
21 |
22 |
23 | | average |
24 | Count |
25 | SumTime |
26 |
27 |
28 | {{ range $_, $response := $resstats }}
29 |
30 | {{ if gt $response.Count 0 }}
31 | | {{divide $response.Sum $response.Count }} |
32 | {{ else }}
33 | 0 |
34 | {{ end }}
35 | {{ $response.Count }} |
36 | {{ $response.Sum }} |
37 |
38 | {{ end }}
39 | {{ end }}
40 |
41 |
42 |
43 | {{ end }}
44 | {{ end }}
45 |
46 | {{ end }}
47 |
--------------------------------------------------------------------------------
/templates/base.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Gobeansproxy
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
34 |
35 | {{ template "body" . }}
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/douban/gobeansproxy
2 |
3 | require (
4 | github.com/acomagu/trie/v2 v2.0.0
5 | github.com/douban/gobeansdb v1.1.3
6 | github.com/gocql/gocql v1.5.2
7 | github.com/sirupsen/logrus v1.9.3
8 | github.com/stretchr/testify v1.8.0
9 | gopkg.in/natefinch/lumberjack.v2 v2.2.1
10 | gopkg.in/yaml.v2 v2.4.0
11 | gopkg.in/yaml.v3 v3.0.1
12 | )
13 |
14 | require (
15 | github.com/beorn7/perks v1.0.1 // indirect
16 | github.com/cespare/xxhash/v2 v2.2.0 // indirect
17 | github.com/davecgh/go-spew v1.1.1 // indirect
18 | github.com/golang/protobuf v1.5.3 // indirect
19 | github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
20 | github.com/kr/text v0.2.0 // indirect
21 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
22 | github.com/pmezard/go-difflib v1.0.0 // indirect
23 | github.com/prometheus/client_model v0.3.0 // indirect
24 | github.com/prometheus/common v0.42.0 // indirect
25 | github.com/prometheus/procfs v0.10.1 // indirect
26 | github.com/rogpeppe/go-internal v1.11.0 // indirect
27 | github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da // indirect
28 | github.com/spaolacci/murmur3 v1.1.0 // indirect
29 | golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 // indirect
30 | golang.org/x/sys v0.10.0 // indirect
31 | google.golang.org/protobuf v1.30.0 // indirect
32 | gopkg.in/inf.v0 v0.9.1 // indirect
33 | )
34 |
35 | require (
36 | github.com/golang/snappy v0.0.4 // indirect
37 | github.com/prometheus/client_golang v1.16.0
38 | golang.org/x/sync v0.3.0
39 | )
40 |
41 | go 1.20
42 |
43 | // for lcoal dev
44 | // replace github.com/douban/gobeansdb => ../gobeansdb
45 |
46 | // replace github.com/douban/gobeansproxy => ../gobeansproxy
47 |
--------------------------------------------------------------------------------
/cassandra/logger.go:
--------------------------------------------------------------------------------
1 | package cassandra
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "path/filepath"
7 |
8 | "github.com/douban/gobeansproxy/config"
9 | logrus "github.com/sirupsen/logrus"
10 | rotateLogger "gopkg.in/natefinch/lumberjack.v2"
11 | )
12 |
13 | var (
14 | log = logrus.New()
15 | dumpLogger = logrus.New()
16 | )
17 |
18 | func setLogLevel(logLevel string) {
19 | l, err := logrus.ParseLevel(logLevel)
20 | if err != nil {
21 | log.Warnf("log level no supported will use info level (passed %s)", logLevel)
22 | }
23 | log.SetLevel(l)
24 | log.SetFormatter(&logrus.TextFormatter{
25 | DisableColors: false,
26 | FullTimestamp: true,
27 | })
28 | }
29 |
30 | type DualWriteErrorMgr struct {
31 | EFile string
32 | ELogger *logrus.Logger
33 | }
34 |
35 | func NewDualWErrMgr(ecfg *config.DualWErrCfg, logger *logrus.Logger) (*DualWriteErrorMgr, error) {
36 | if logger == nil {
37 | logger = dumpLogger
38 | }
39 |
40 | setLogLevel(ecfg.LoggerLevel)
41 |
42 | // check if target folder exists
43 | if stat, err := os.Stat(ecfg.DumpToDir); err != nil || !stat.IsDir() {
44 | return nil, fmt.Errorf("%s is not a dir or not exists", ecfg.DumpToDir)
45 | }
46 |
47 | // set dump Logger
48 | logger.SetFormatter(&logrus.JSONFormatter{})
49 | dumpFile := filepath.Join(ecfg.DumpToDir, ecfg.FName)
50 | logger.SetOutput(&rotateLogger.Logger{
51 | Filename: dumpFile,
52 | MaxSize: ecfg.RotateSize,
53 | Compress: ecfg.Compress,
54 | MaxAge: ecfg.MaxAges,
55 | MaxBackups: ecfg.MaxBackups,
56 | })
57 |
58 | return &DualWriteErrorMgr{
59 | EFile: dumpFile,
60 | ELogger: logger,
61 | }, nil
62 | }
63 |
64 | func (e *DualWriteErrorMgr) HandleErr(key, op string, err error) {
65 | e.ELogger.WithFields(logrus.Fields{
66 | "key": key,
67 | "op": op,
68 | }).Error(err)
69 | }
70 |
--------------------------------------------------------------------------------
/dstore/consistent_test.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "testing"
7 |
8 | "github.com/stretchr/testify/assert"
9 | )
10 |
11 | // 基本功能。
12 | func TestConsistent(t *testing.T) {
13 | assert := assert.New(t)
14 |
15 | hashs := NewPartition(100, 3)
16 |
17 | // 计数器
18 | counter := make(map[int]int)
19 | loop := 1000000
20 | threshold := loop / 3 / 10 * 4
21 |
22 | // 统计顺序 key 分布。
23 | for i := 0; i < loop; i++ {
24 | s := hashs.offsetGet(fmt.Sprintf("key%d", i))
25 | counter[s] += 1
26 | }
27 |
28 | // 检查计数是否超过最低阈值。
29 | for _, v := range counter {
30 | assert.True(v > threshold)
31 | }
32 | }
33 |
34 | // 阶段故障
35 | func TestConsistentRemove(t *testing.T) {
36 | assert := assert.New(t)
37 | hashs := NewPartition(100, 3)
38 | hashs.remove(0)
39 | loop := 1000000
40 | for i := 0; i < loop; i++ {
41 | s := hashs.offsetGet(fmt.Sprintf("key%d", i))
42 | assert.NotEqual(s, 0)
43 | }
44 | t.Log(hashs)
45 | hashs.remove(0)
46 | t.Log(hashs)
47 | }
48 |
49 | func TestConsistentBalance(t *testing.T) {
50 | assert := assert.New(t)
51 | hashs := NewPartition(100, 3)
52 | hashs.reBalance(0, 2, 100)
53 | assert.Equal(hashs.offsets[2], 99)
54 | }
55 |
56 | func TestConsistentMulti(t *testing.T) {
57 | assert := assert.New(t)
58 |
59 | hashs1 := NewPartition(100, 3)
60 |
61 | hashs2 := NewPartition(100, 3)
62 |
63 | // 多次访问结果一致。
64 | s1 := hashs1.offsetGet("abc")
65 | s2 := hashs1.offsetGet("abc")
66 | assert.Equal(s1, s2)
67 |
68 | s1 = hashs1.offsetGet("abc")
69 | s2 = hashs2.offsetGet("abc")
70 | assert.Equal(s1, s2)
71 |
72 | }
73 |
74 | // 哈希函数性能。
75 | func BenchmarkConsistentHash(b *testing.B) {
76 | h := NewPartition(100, 3)
77 |
78 | b.ResetTimer()
79 | for i := 0; i < b.N; i++ {
80 | h.hash("a" + strconv.Itoa(i))
81 | }
82 | b.StopTimer()
83 | }
84 |
--------------------------------------------------------------------------------
/cassandra/udt.go:
--------------------------------------------------------------------------------
1 | package cassandra
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | mc "github.com/douban/gobeansdb/memcache"
8 | "github.com/gocql/gocql"
9 | )
10 |
11 | type BDBValue struct {
12 | ReceiveTime time.Time `cql:"rtime"`
13 | Flag int `cql:"flag"`
14 | Exptime int `cql:"exptime"`
15 | Cas int `cql:"cas"`
16 | Body []byte `cql:"body"`
17 | }
18 |
19 | func NewBDBValue(item *mc.Item) *BDBValue {
20 | return &BDBValue{
21 | ReceiveTime: item.ReceiveTime,
22 | Flag: item.Flag,
23 | Exptime: item.Exptime,
24 | Cas: item.Cas,
25 | Body: item.CArray.Body,
26 | }
27 | }
28 |
29 | func (b *BDBValue) ToMCItem() (*mc.Item, error) {
30 | item := &mc.Item{
31 | ReceiveTime: b.ReceiveTime,
32 | Flag: b.Flag,
33 | Exptime: b.Exptime,
34 | Cas: b.Cas,
35 | }
36 | ok := item.Alloc(len(b.Body))
37 | if !ok {
38 | logger.Errorf("Alloc mem err for len %d", len(b.Body))
39 | return nil, fmt.Errorf("alloc mem error")
40 | }
41 | copy(item.CArray.Body, b.Body)
42 | return item, nil
43 | }
44 |
45 | func (b BDBValue) MarshalUDT(name string, info gocql.TypeInfo) ([]byte, error) {
46 | switch name {
47 | case "rtime":
48 | return gocql.Marshal(info, b.ReceiveTime)
49 | case "flag":
50 | return gocql.Marshal(info, b.Flag)
51 | case "exptime":
52 | return gocql.Marshal(info, b.Exptime)
53 | case "cas":
54 | return gocql.Marshal(info, b.Cas)
55 | case "body":
56 | return gocql.Marshal(info, b.Body)
57 | default:
58 | return nil, fmt.Errorf("unknown column for position: %q", name)
59 | }
60 | }
61 |
62 | func (b *BDBValue) UnmarshalUDT(name string, info gocql.TypeInfo, data []byte) error {
63 | switch name {
64 | case "rtime":
65 | return gocql.Unmarshal(info, data, &b.ReceiveTime)
66 | case "flag":
67 | return gocql.Unmarshal(info, data, &b.Flag)
68 | case "exptime":
69 | return gocql.Unmarshal(info, data, &b.Exptime)
70 | case "cas":
71 | return gocql.Unmarshal(info, data, &b.Cas)
72 | case "body":
73 | return gocql.Unmarshal(info, data, &b.Body)
74 | default:
75 | return fmt.Errorf("unknown column for position: %q", name)
76 | }
77 |
78 | }
79 |
--------------------------------------------------------------------------------
/dstore/ringqueue.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "errors"
5 | "sync"
6 | "time"
7 | )
8 |
9 | const (
10 | TIMEINTERVAL = 30 * 1000 * 1000 * 1000 // proxy 链接 后端 超时时间 为 3 秒,清空 30s 之前的数据,30 * 1000 * 1000 * 1000
11 | QUEUECAP = 60
12 | )
13 |
14 | const (
15 | latencyDataType = iota
16 | errorDataType
17 | )
18 |
19 | type Response struct {
20 | ReqTime time.Time
21 | Count int
22 | Sum float64
23 | }
24 |
25 | type RingQueue struct {
26 | resData *[QUEUECAP]Response
27 | errData *[QUEUECAP]Response
28 | sync.RWMutex
29 | }
30 |
31 | var (
32 | ErrQueueFull = errors.New("queue full")
33 | ErrQueueEmpty = errors.New("queue empty")
34 | )
35 |
36 | func NewRingQueue() *RingQueue {
37 | return &RingQueue{
38 | resData: &[QUEUECAP]Response{},
39 | errData: &[QUEUECAP]Response{},
40 | }
41 | }
42 |
43 | func (q *RingQueue) Push(start time.Time, ResTime float64, dataType int) error {
44 | second := start.Second()
45 | var data *[QUEUECAP]Response
46 | switch dataType {
47 | case latencyDataType:
48 | data = q.resData
49 | case errorDataType:
50 | data = q.errData
51 | }
52 |
53 | // TODO errData/ resData 锁分开:
54 | q.Lock()
55 | defer q.Unlock()
56 | if start.Sub(data[second].ReqTime) > TIMEINTERVAL {
57 | data[second].Sum = ResTime
58 | data[second].Count = 1
59 | data[second].ReqTime = start
60 | }
61 | data[second].Sum += ResTime
62 | data[second].ReqTime = start
63 | data[second].Count++
64 |
65 | return nil
66 | }
67 |
68 | func (q *RingQueue) Get(num, dataType int) (responses []Response) {
69 | now := time.Now()
70 | second := now.Second()
71 | offset := second - num
72 |
73 | var data *[QUEUECAP]Response
74 | switch dataType {
75 | case latencyDataType:
76 | data = q.resData
77 | case errorDataType:
78 | data = q.errData
79 | }
80 | q.RLock()
81 | defer q.RUnlock()
82 | if offset > 0 {
83 | return data[offset:second]
84 | } else {
85 | return append(data[len(q.resData)+offset:], data[0:second]...)
86 | }
87 | }
88 |
89 | func (q *RingQueue) clear() {
90 | q.Lock()
91 | q.Unlock()
92 | for i := 0; i < QUEUECAP; i++ {
93 | q.errData[i] = Response{}
94 | q.resData[i] = Response{}
95 | }
96 | }
97 |
--------------------------------------------------------------------------------
/gobeansproxy/gobeansproxy.go:
--------------------------------------------------------------------------------
1 | package gobeansproxy
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "log"
7 | "runtime"
8 |
9 | dbcfg "github.com/douban/gobeansdb/config"
10 | "github.com/douban/gobeansdb/loghub"
11 | mc "github.com/douban/gobeansdb/memcache"
12 |
13 | "github.com/douban/gobeansproxy/config"
14 | "github.com/douban/gobeansproxy/dstore"
15 | )
16 |
17 | var (
18 | server *mc.Server
19 | proxyConf = &config.Proxy
20 | logger = loghub.ErrorLogger
21 | accessLogger = loghub.AccessLogger
22 | )
23 |
24 | func Main() {
25 | var version = flag.Bool("version", false, "print vresion of beansproxy")
26 | var confdir = flag.String("confdir", "", "path of proxy config dir")
27 | var dumpconf = flag.Bool("dumpconf", false, "print configuration")
28 | flag.Parse()
29 |
30 | if *version {
31 | fmt.Println("gobeansproxy version", config.Version)
32 | return
33 | } else {
34 | log.Printf("gobeansproxy version %s", config.Version)
35 | }
36 |
37 | proxyConf.InitDefault()
38 | if *confdir != "" {
39 | log.Printf("use confdir %s", *confdir)
40 | proxyConf.Load(*confdir)
41 | }
42 | log.Printf("server port: %d, web port: %d", proxyConf.Port, proxyConf.WebPort)
43 |
44 | if *dumpconf {
45 | config.DumpConfig(proxyConf)
46 | return
47 | }
48 |
49 | runtime.GOMAXPROCS(proxyConf.Threads)
50 |
51 | loghub.InitLogger(proxyConf.ErrorLog, proxyConf.AccessLog, proxyConf.AnalysisLog)
52 | logger.Infof("start gobeansproxy")
53 | logger.Infof("gobeansproxy version %s starting at %d, config: %#v",
54 | config.Version, proxyConf.Port, proxyConf)
55 | logger.Infof("route table: %#v", config.Route)
56 |
57 | if proxyConf.DStoreConfig.Enable {
58 | dstore.InitGlobalManualScheduler(config.Route, proxyConf.N, proxyConf.Scheduler)
59 | }
60 | storage := new(dstore.Storage)
61 | err := storage.InitStorageEngine(proxyConf)
62 | if err != nil {
63 | log.Fatalf("Init storage engine err: %s", err)
64 | }
65 | addr := fmt.Sprintf("%s:%d", proxyConf.Listen, proxyConf.Port)
66 | server = mc.NewServer(storage)
67 | server.Listen(addr)
68 |
69 | logger.Infof("ready")
70 | log.Printf("ready")
71 |
72 | server.HandleSignals(proxyConf.ErrorLog, proxyConf.AccessLog, proxyConf.AnalysisLog)
73 | dbcfg.AllowReload = true
74 | startWeb()
75 | server.Serve()
76 | }
77 |
--------------------------------------------------------------------------------
/tests/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import errno
3 | import yaml
4 | try:
5 | from yaml import Cloader as Loader
6 | except ImportError:
7 | from yaml import Loader
8 | import string
9 | import socket
10 | import random
11 | import urllib.request, urllib.error, urllib.parse
12 | import time
13 | import shlex
14 | import subprocess
15 |
16 |
17 | def mkdir_p(path):
18 | "like `mkdir -p`"
19 | try:
20 | os.makedirs(path)
21 | except OSError as exc:
22 | if exc.errno == errno.EEXIST and os.path.isdir(path):
23 | pass
24 | else:
25 | raise
26 |
27 |
28 | def random_string(n):
29 | s = string.ascii_letters
30 | result = ""
31 | for _ in range(n):
32 | result += random.choice(s)
33 | return result
34 |
35 |
36 | def gethttp(addr, path):
37 | url = "http://%s/%s" % (addr, path)
38 | response = urllib.request.urlopen(url)
39 | return response.read()
40 |
41 |
42 | def start_cmd(cmd):
43 | print("start", cmd)
44 | log_file = '/tmp/beansdb/log.txt'
45 | mkdir_p(os.path.dirname(log_file))
46 | with open(log_file, 'a') as f:
47 | p = subprocess.Popen(
48 | cmd if isinstance(cmd, (tuple, list)) else shlex.split(cmd),
49 | stderr=f,
50 | )
51 | time.sleep(0.2)
52 | if p.poll() is not None:
53 | raise Exception("cannot start %s" % (cmd))
54 | return p
55 |
56 | def stop_cmd(popen):
57 | if popen.poll() is not None:
58 | return
59 | popen.terminate()
60 | popen.wait()
61 |
62 |
63 | def get_server_addr(conf_dir, server_name):
64 | if server_name == 'gobeansdb':
65 | conf_file = 'global.yaml'
66 | key = 'server'
67 | elif server_name == 'gobeansproxy':
68 | conf_file = 'proxy.yaml'
69 | key = 'proxy'
70 | else:
71 | raise ValueError(server_name)
72 | conf = load_yaml(os.path.join(conf_dir, conf_file))
73 | port = conf[key]['port']
74 | webport = conf[key]['webport']
75 | host = conf[key]['hostname'] or socket.gethostname()
76 |
77 | def port_to_addr(port):
78 | return '%s:%s' % (host, port)
79 |
80 | return list(map(port_to_addr, [port, webport]))
81 |
82 |
83 | def load_yaml(filepath):
84 | with open(filepath) as f:
85 | return yaml.load(f, Loader=Loader)
86 |
--------------------------------------------------------------------------------
/cassandra/prefix_cfg.go:
--------------------------------------------------------------------------------
1 | package cassandra
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/douban/gobeansproxy/config"
7 | )
8 |
9 | type PrefixDisPatcher interface {
10 | LoadStaticCfg(string) (*config.CassandraStoreCfg, error)
11 | LoadCfg(*config.CassandraStoreCfg, *CassandraStore) error
12 | Upsert(*config.CassandraStoreCfg, map[string][]string, *CassandraStore) error
13 | DeletePrefix(*config.CassandraStoreCfg, string, *CassandraStore) error
14 | GetCurrentMap() map[string]string
15 | }
16 |
17 | type DisPatcherCfg config.PrefixDisPatcherCfg
18 |
19 | func (config *DisPatcherCfg) LoadFromDB(
20 | cqlStore *CassandraStore) (prefixKeys [][]rune, vstatus []string, err error) {
21 | r := cqlStore.session.Query(
22 | fmt.Sprintf(
23 | "select prefix, value from %s.%s",
24 | config.CfgFromCstarKeySpace,
25 | config.CfgFromCstarTable,
26 | ),
27 | ).Iter().Scanner()
28 |
29 | for r.Next() {
30 | var (
31 | prefix string
32 | value string
33 | )
34 |
35 | err := r.Scan(&prefix, &value)
36 | if err != nil {
37 | return nil, nil, fmt.Errorf("load cfg from c* table err: %s", err)
38 | }
39 |
40 | prefixKeys = append(prefixKeys, []rune(prefix))
41 | vstatus = append(vstatus, value)
42 | }
43 |
44 | if err := r.Err(); err != nil {
45 | return nil, nil, fmt.Errorf("load cfg from c* iter err: %s", err)
46 | }
47 |
48 | return prefixKeys, vstatus, err
49 | }
50 |
51 | func (c *DisPatcherCfg) SaveToDB(m map[string][]string, cqlStore *CassandraStore) error {
52 | for value, prefix := range m {
53 | for _, p := range prefix {
54 | err := cqlStore.session.Query(
55 | fmt.Sprintf(
56 | "insert into %s.%s (prefix, value) values (?, ?)",
57 | c.CfgFromCstarKeySpace, c.CfgFromCstarTable,
58 | ), p, value,
59 | ).Exec()
60 |
61 | if err != nil {
62 | return fmt.Errorf("insert %s -> %s err: %s", p, value)
63 | }
64 | }
65 | }
66 | return nil
67 | }
68 |
69 | func (c *DisPatcherCfg) DeletePrefixCfg(prefix string, cqlStore *CassandraStore) error {
70 | err := cqlStore.session.Query(
71 | fmt.Sprintf(
72 | "delete from %s.%s where prefix = ?",
73 | c.CfgFromCstarKeySpace, c.CfgFromCstarTable,
74 | ), prefix,
75 | ).Exec()
76 |
77 | if err != nil {
78 | return fmt.Errorf("delete cfg prefix %s err: %s", prefix, err)
79 | }
80 | return nil
81 | }
82 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/gobeansproxy/dstore-only/conf/proxy.yaml:
--------------------------------------------------------------------------------
1 | cassandra:
2 | default_key_space: doubandb
3 | default_table: kvstore
4 | enable: false
5 | hosts:
6 | - 127.0.0.1:9042
7 | timeout_ms: 1000
8 | connect_timeout_ms: 3000
9 | write_timeout_ms: 1000
10 | retry_num: 3
11 | reconnect_interval_sec: 180
12 | max_conn_for_getm: 10
13 | num_conns: 10
14 | username: "doubandb_test"
15 | password: "doubandb_test"
16 | consistency: "local_one"
17 | prefix_table_dispatcher_cfg:
18 | # if not enable will use default keyspace and table
19 | enable: true
20 | static:
21 | # dispatch prefix1 key to table table_name1
22 | kvstore_prefix_a:
23 | - "/prefix_a"
24 | cfg_table: bdb_prefix_table_finder
25 | cfg_keyspace: doubandb
26 | prefix_rw_dispatcher_cfg:
27 | enable: true
28 | static:
29 | # dispatch prefix /test_prefix_c/ to dual write
30 | br1w1cr0w1:
31 | - "/test_prefix_c/"
32 | - "/test_prefix_d/"
33 | br0w0cr1w1:
34 | - "test_"
35 | cfg_table: bdb_prefix_rw_switcher
36 | cfg_keyspace: doubandb
37 | default_storage: "br1w1cr0w0"
38 | dual_write_err_cfg:
39 | dump_to_dir: /tmp/log/gobeansproxy/proxy/
40 | log_file_name: dual_write_err.log
41 | logger_level: "INFO"
42 | rotate_size_mb: 100
43 | compress: true
44 | max_ages: 7
45 | max_backups: 100
46 | dstore:
47 | enable: true
48 | connect_timeout_ms: 300
49 | dial_fail_silence_ms: 5000
50 | error_seconds: 10
51 | item_size_stats: 4096
52 | max_connect_errors: 3
53 | max_free_conns_per_host: 20
54 | n: 3
55 | r: 1
56 | read_timeout_ms: 2000
57 | response_time_min: 4000
58 | response_time_seconds: 10
59 | score_deviation: 10
60 | w: 2
61 | write_timeout_ms: 2000
62 | mc:
63 | body_big_str: 5M
64 | body_c_str: 0K
65 | body_max_str: 50M
66 | max_key_len: 250
67 | max_req: 16
68 | proxy:
69 | accesslog: /tmp/log/gobeansproxy/proxy/access.log
70 | errorlog: /tmp/log/gobeansproxy/proxy/error.log
71 | hostname: 127.0.0.1
72 | listen: 0.0.0.0
73 | port: 47907
74 | staticdir: /var/lib/gobeansproxy
75 | threads: 8
76 | webport: 47910
77 | zkpath: /gobeansproxy/test
78 | zkservers: []
79 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/rivenbeansproxy/conf/proxy.yaml:
--------------------------------------------------------------------------------
1 | cassandra:
2 | default_key_space: doubandb
3 | default_table: kvstore
4 | enable: true
5 | hosts:
6 | - 127.0.0.1:9042
7 | timeout_ms: 1000
8 | connect_timeout_ms: 3000
9 | write_timeout_ms: 1000
10 | retry_num: 3
11 | reconnect_interval_sec: 180
12 | max_conn_for_getm: 10
13 | num_conns: 10
14 | username: "doubandb_test"
15 | password: "doubandb_test"
16 | consistency: "local_one"
17 | prefix_table_dispatcher_cfg:
18 | # if not enable will use default keyspace and table
19 | enable: true
20 | static:
21 | # dispatch prefix1 key to table table_name1
22 | kvstore_ark:
23 | - "/ark"
24 | cfg_table: bdb_prefix_table_finder
25 | cfg_keyspace: doubandb
26 | prefix_rw_dispatcher_cfg:
27 | enable: false
28 | static:
29 | # dispatch prefix /test_prefix_c/ to dual write
30 | br1w1cr0w1:
31 | - "/test_prefix_c/"
32 | - "/test_prefix_d/"
33 | - "/arkark/"
34 | br0w0cr1w1:
35 | - "test_"
36 | cfg_table: bdb_prefix_rw_switcher
37 | cfg_keyspace: doubandb
38 | default_storage: "br0w0cr1w0"
39 | dual_write_err_cfg:
40 | dump_to_dir: /tmp/gobeansproxy_prefix/proxy/
41 | log_file_name: dual_write_err.log
42 | logger_level: "INFO"
43 | rotate_size_mb: 100
44 | compress: true
45 | max_ages: 7
46 | max_backups: 100
47 | dstore:
48 | enable: false
49 | connect_timeout_ms: 300
50 | dial_fail_silence_ms: 5000
51 | error_seconds: 10
52 | item_size_stats: 4096
53 | max_connect_errors: 3
54 | max_free_conns_per_host: 20
55 | n: 3
56 | r: 1
57 | read_timeout_ms: 2000
58 | response_time_min: 4000
59 | response_time_seconds: 10
60 | score_deviation: 10
61 | w: 2
62 | write_timeout_ms: 2000
63 | mc:
64 | body_big_str: 5M
65 | body_c_str: 0K
66 | body_max_str: 50M
67 | max_key_len: 250
68 | max_req: 16
69 | proxy:
70 | accesslog: /tmp/gobeansproxy_prefix/proxy/access.log
71 | errorlog: /tmp/gobeansproxy_prefix/proxy/error.log
72 | hostname: 127.0.0.1
73 | listen: 0.0.0.0
74 | port: 47907
75 | staticdir: /var/lib/gobeansproxy
76 | threads: 8
77 | webport: 47910
78 | zkpath: /gobeansproxy/test
79 | zkservers:
80 | - zk1:2181
81 |
--------------------------------------------------------------------------------
/.doubanpde/scripts/bdb/gobeansproxy/prefix-switch-cfg/conf/proxy.yaml:
--------------------------------------------------------------------------------
1 | cassandra:
2 | default_key_space: doubandb
3 | default_table: kvstore
4 | enable: true
5 | hosts:
6 | - 127.0.0.1:9042
7 | timeout_ms: 1000
8 | connect_timeout_ms: 3000
9 | write_timeout_ms: 1000
10 | retry_num: 3
11 | reconnect_interval_sec: 180
12 | max_conn_for_getm: 10
13 | num_conns: 10
14 | username: "doubandb_test"
15 | password: "doubandb_test"
16 | consistency: "local_one"
17 | prefix_table_dispatcher_cfg:
18 | # if not enable will use default keyspace and table
19 | enable: true
20 | static:
21 | # dispatch prefix1 key to table table_name1
22 | kvstore_ark:
23 | - "/ark"
24 | cfg_table: bdb_prefix_table_finder
25 | cfg_keyspace: doubandb
26 | prefix_rw_dispatcher_cfg:
27 | enable: true
28 | static:
29 | # dispatch prefix /test_prefix_c/ to dual write
30 | br1w1cr0w1:
31 | - "/test_prefix_c/"
32 | - "/test_prefix_d/"
33 | - "/arkark/"
34 | br0w0cr1w1:
35 | - "test_"
36 | cfg_table: bdb_prefix_rw_switcher
37 | cfg_keyspace: doubandb
38 | default_storage: "br0w1cr1w1"
39 | dual_write_err_cfg:
40 | dump_to_dir: /tmp/gobeansproxy_prefix/proxy/
41 | log_file_name: dual_write_err.log
42 | logger_level: "INFO"
43 | rotate_size_mb: 100
44 | compress: true
45 | max_ages: 7
46 | max_backups: 100
47 | dstore:
48 | enable: true
49 | connect_timeout_ms: 300
50 | dial_fail_silence_ms: 5000
51 | error_seconds: 10
52 | item_size_stats: 4096
53 | max_connect_errors: 3
54 | max_free_conns_per_host: 20
55 | n: 3
56 | r: 1
57 | read_timeout_ms: 2000
58 | response_time_min: 4000
59 | response_time_seconds: 10
60 | score_deviation: 10
61 | w: 2
62 | write_timeout_ms: 2000
63 | mc:
64 | body_big_str: 5M
65 | body_c_str: 0K
66 | body_max_str: 50M
67 | max_key_len: 250
68 | max_req: 16
69 | proxy:
70 | accesslog: /tmp/gobeansproxy_prefix/proxy/access.log
71 | errorlog: /tmp/gobeansproxy_prefix/proxy/error.log
72 | hostname: 127.0.0.1
73 | listen: 0.0.0.0
74 | port: 47907
75 | staticdir: /var/lib/gobeansproxy
76 | threads: 8
77 | webport: 47910
78 | zkpath: /gobeansproxy/test
79 | zkservers:
80 | - zk1:2181
81 |
--------------------------------------------------------------------------------
/dstore/metrics.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "github.com/prometheus/client_golang/prometheus"
5 | )
6 |
7 | var (
8 | totalReqs *prometheus.CounterVec
9 | errorReqs *prometheus.CounterVec
10 | rrrStoreReqs *prometheus.CounterVec
11 | rrrStoreErr *prometheus.CounterVec
12 | rrrStoreLag *prometheus.GaugeVec
13 | cmdReqDurationSeconds *prometheus.HistogramVec
14 | cmdE2EDurationSeconds *prometheus.HistogramVec
15 | BdbProxyPromRegistry *prometheus.Registry
16 | )
17 |
18 | func init() {
19 | BdbProxyPromRegistry = prometheus.NewRegistry()
20 | totalReqs = prometheus.NewCounterVec(
21 | prometheus.CounterOpts{
22 | Namespace: "gobeansproxy",
23 | Name: "total_reqs",
24 | Help: "total requests counter",
25 | },
26 |
27 | []string{"cmd", "store"},
28 | )
29 | BdbProxyPromRegistry.MustRegister(totalReqs)
30 |
31 | errorReqs = prometheus.NewCounterVec(
32 | prometheus.CounterOpts{
33 | Namespace: "gobeansproxy",
34 | Name: "error_reqs",
35 | Help: "error requests counter",
36 | },
37 |
38 | []string{"cmd", "store"},
39 | )
40 | BdbProxyPromRegistry.MustRegister(errorReqs)
41 |
42 | cmdE2EDurationSeconds = prometheus.NewHistogramVec(
43 | prometheus.HistogramOpts{
44 | Namespace: "gobeansproxy",
45 | Name: "cmd_e2e_duration_seconds",
46 | Help: "cmd e2e duration",
47 | Buckets: []float64{
48 | 0.001, 0.003, 0.005,
49 | 0.01, 0.03, 0.05, 0.07,
50 | 0.1, 0.3, 0.5, 0.7,
51 | 1, 2, 5,
52 | },
53 | },
54 |
55 | []string{"cmd"},
56 | )
57 | BdbProxyPromRegistry.MustRegister(cmdE2EDurationSeconds)
58 |
59 | rrrStoreReqs = prometheus.NewCounterVec(
60 | prometheus.CounterOpts{
61 | Namespace: "gobeansproxy",
62 | Name: "rrr_store_reqs",
63 | Help: "read only rr backends req counter",
64 | },
65 | []string{"host"},
66 | )
67 | BdbProxyPromRegistry.MustRegister(rrrStoreReqs)
68 |
69 | rrrStoreErr = prometheus.NewCounterVec(
70 | prometheus.CounterOpts{
71 | Namespace: "gobeansproxy",
72 | Name: "rrr_store_conn_err",
73 | Help: "store connection error counter",
74 | },
75 | []string{"host", "conn"},
76 | )
77 | BdbProxyPromRegistry.MustRegister(rrrStoreErr)
78 |
79 | rrrStoreLag = prometheus.NewGaugeVec(
80 | prometheus.GaugeOpts{
81 | Namespace: "gobeansproxy",
82 | Name: "rrr_store_lag_ms",
83 | Help: "round robin read only sch store lag",
84 | },
85 | []string{"host"},
86 | )
87 | BdbProxyPromRegistry.MustRegister(rrrStoreLag)
88 | }
89 |
--------------------------------------------------------------------------------
/tests/abnormal_cmd_test.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import telnetlib
3 | from tests.base import BaseTest
4 |
5 |
6 | class AbnormalCmdTest(BaseTest):
7 | def setUp(self):
8 | BaseTest.setUp(self)
9 | self.invalid_key = '/this/is/a/bad/key/%s' % chr(15)
10 |
11 | def run_cmd_by_telnet(self, cmd, expected, timeout=2):
12 | addr, port = self.proxy.addr.split(':')
13 | t = telnetlib.Telnet(addr, port)
14 | t.write(f'{cmd}\r\n'.encode())
15 | out = t.read_until(b'\n', timeout=timeout)
16 | t.write(b'quit\n')
17 | t.close()
18 | r = out.strip(b'\r\n')
19 | self.assertEqual(r, expected.encode())
20 |
21 | def test_get(self):
22 | # get not exist key
23 | cmd = 'get /test/get'
24 | self.run_cmd_by_telnet(cmd, 'END')
25 |
26 | # invalid key
27 | cmd = 'get %s' % self.invalid_key
28 | self.run_cmd_by_telnet(cmd, 'END')
29 | self.checkCounterZero()
30 |
31 | def test_set(self):
32 | # invalid key
33 | cmd = 'set %s 0 0 3\r\naaa' % self.invalid_key
34 | if self.bdb_write_enable:
35 | self.run_cmd_by_telnet(cmd, 'SERVER_ERROR write failed')
36 | elif self.cstar_write_enable:
37 | self.run_cmd_by_telnet(cmd, 'STORED')
38 |
39 | cmd = 'set /test/set 0 0 3\r\naaaa'
40 | self.run_cmd_by_telnet(cmd, 'CLIENT_ERROR bad data chunk')
41 |
42 | self.checkCounterZero()
43 |
44 | def test_delete(self):
45 | key = '/delete/not/exist/key'
46 | cmd = 'delete %s' % key
47 | if self.bdb_write_enable and not self.cstar_write_enable:
48 | self.run_cmd_by_telnet(cmd, 'NOT FOUND')
49 |
50 | if self.cstar_write_enable:
51 | # cstar will delete a key event if not exists
52 | # it just write a tombestone to mem
53 | self.run_cmd_by_telnet(cmd, 'DELETED')
54 |
55 | cmd = 'delete %s' % self.invalid_key
56 | if self.bdb_write_enable and not self.cstar_write_enable:
57 | self.run_cmd_by_telnet(cmd, 'NOT FOUND')
58 |
59 | if self.cstar_write_enable:
60 | # cstar will delete a key event if not exists
61 | # it just write a tombestone to mem
62 | self.run_cmd_by_telnet(cmd, 'DELETED')
63 | self.checkCounterZero()
64 |
65 | def test_get_meta_by_key(self):
66 | key = '/get_meta_by_key/not/exist/key'
67 | cmd = 'get ?%s' % key
68 | self.run_cmd_by_telnet(cmd, 'END')
69 |
70 | cmd = 'get ?%s' % self.invalid_key
71 | self.run_cmd_by_telnet(cmd, 'END')
72 | self.checkCounterZero()
73 |
--------------------------------------------------------------------------------
/conf/proxy.yaml:
--------------------------------------------------------------------------------
1 | # for doubandb proxy
2 | proxy:
3 | listen: 0.0.0.0
4 | port: 7905
5 | webport: 7908
6 | threads: 8
7 | errorlog: "./proxy-error.log"
8 | accesslog: "./proxy-access.log"
9 | hostname: 127.0.0.1
10 | staticdir: /var/lib/gobeansproxy
11 | zkserves: []
12 | zkpath: "/gobeansproxy/test"
13 | mc:
14 | max_key_len: 250
15 | max_req: 16
16 | body_max_str: 50M
17 | body_big_str: 5M
18 | body_c_str: 0K
19 | dstore:
20 | n: 3
21 | w: 2
22 | r: 1
23 | max_free_conns_per_host: 20
24 | connect_timeout_ms: 300
25 | write_timeout_ms: 2000
26 | read_timeout_ms: 2000
27 | dial_fail_silence_ms: 5000
28 | response_time_seconds: 10
29 | error_seconds: 10
30 | max_connect_errors: 10
31 | score_deviation: 10000
32 | item_size_stats: 4096
33 | response_time_min: 4000
34 | enable: true
35 | cassandra:
36 | enable: true
37 | default_key_space: dbname
38 | default_table: tablename
39 | hosts:
40 | - cassandra:9042
41 | timeout_ms: 1000
42 | connect_timeout_ms: 3000
43 | write_timeout_ms: 1000
44 | retry_num: 3
45 | reconnect_interval_sec: 180
46 | max_conn_for_getm: 10
47 | num_conns: 10
48 | username: ""
49 | # plaintext password only for test usage
50 | # please use password_file in prod env
51 | password: ""
52 | password_file: ""
53 | # local_one only for test usage
54 | # default: quorum
55 | # consistency: "local_one"
56 | prefix_table_dispatcher_cfg:
57 | # if not enable will use default keyspace and table
58 | enable: false
59 | static:
60 | # dispatch prefix1 key to table table_name1
61 | table_name1:
62 | - "prefix1"
63 | cfg_table: cassandra_cfg_table_name
64 | cfg_keyspace: cassandra_cfg_keyspace
65 | prefix_rw_dispatcher_cfg:
66 | enable: true
67 | static:
68 | # dispatch prefix /test_prefix_c/ to dual write
69 | br1w1cr0w1:
70 | - "/test_prefix_c/"
71 | - "/test_prefix_d/"
72 | br0w0cr1w1:
73 | - "test_"
74 | cfg_table: cassandra_cfg_table_name
75 | cfg_keyspace: cassandra_cfg_keyspace
76 | # if not match rw dispatcher config
77 | # will fallback to this default storage rw cfg
78 | # br1w1cr0w0: only use beansdb as backend
79 | # br1w1cr0w1: dual write and read from beansdb
80 | # br0w1cr1w1: dual write and read from c*
81 | # br0w0cr1w1: only use c* for rw backend
82 | default_storage: "br1w1cr0w0"
83 | # dual write error log config
84 | dual_write_err_cfg:
85 | dump_to_dir: /var/gobeansproxy/log/
86 | log_file_name: dual_write_err.log
87 | logger_level: "INFO"
88 | rotate_size_mb: 100
89 | compress: true
90 | max_ages: 7
91 | max_backups: 100
92 |
--------------------------------------------------------------------------------
/tests/dbclient.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import libmc
3 |
4 |
5 | def connect(server, **kwargs):
6 | comp_threshold = kwargs.pop('comp_threshold', 0)
7 | prefix = kwargs.pop('prefix', None)
8 |
9 | c = libmc.Client([server],
10 | do_split=0,
11 | comp_threshold=comp_threshold,
12 | prefix=prefix)
13 | c.config(libmc.MC_CONNECT_TIMEOUT, 300) # 0.3s
14 | c.config(libmc.MC_POLL_TIMEOUT, 10000) # 3s
15 | c.config(libmc.MC_RETRY_TIMEOUT, 5) # 5s
16 | return c
17 |
18 |
19 | class MCStore(object):
20 |
21 | IGNORED_LIBMC_RET = frozenset([
22 | libmc.MC_RETURN_OK,
23 | libmc.MC_RETURN_INVALID_KEY_ERR
24 | ])
25 |
26 | def __init__(self, addr, **kwargs):
27 | self.addr = addr
28 | self.mc = connect(addr, **kwargs)
29 |
30 | def __repr__(self):
31 | return '' % repr(self.addr)
32 |
33 | def __str__(self):
34 | return self.addr
35 |
36 | def set(self, key, data, rev=0):
37 | return bool(self.mc.set(key, data, rev))
38 |
39 | def set_raw(self, key, data, rev=0, flag=0):
40 | if rev < 0:
41 | raise Exception(rev)
42 | return self.mc.set_raw(key, data, rev, flag)
43 |
44 | def set_multi(self, values, return_failure=False):
45 | return self.mc.set_multi(values, return_failure=return_failure)
46 |
47 | def _check_last_error(self):
48 | last_err = self.mc.get_last_error()
49 | if last_err not in self.IGNORED_LIBMC_RET:
50 | raise IOError(last_err, self.mc.get_last_strerror())
51 |
52 | def get(self, key):
53 | try:
54 | r = self.mc.get(key)
55 | if r is None:
56 | self._check_last_error()
57 | return r
58 | except ValueError:
59 | self.mc.delete(key)
60 |
61 | def get_raw(self, key):
62 | r, flag = self.mc.get_raw(key)
63 | if r is None:
64 | self._check_last_error()
65 | return r, flag
66 |
67 | def get_multi(self, keys):
68 | r = self.mc.get_multi(keys)
69 | self._check_last_error()
70 | return r
71 |
72 | def delete(self, key):
73 | return bool(self.mc.delete(key))
74 |
75 | def delete_multi(self, keys, return_failure=False):
76 | return self.mc.delete_multi(keys, return_failure=return_failure)
77 |
78 | def exists(self, key):
79 | meta_info = self.mc.get('?' + key)
80 | if meta_info:
81 | version = meta_info.split(' ')[0]
82 | return int(version) > 0
83 | return False
84 |
85 | def incr(self, key, value):
86 | return self.mc.incr(key, int(value))
87 |
--------------------------------------------------------------------------------
/templates/css/bootstrap-sortable.css:
--------------------------------------------------------------------------------
1 | table.sortable span.sign {
2 | display: block;
3 | position: absolute;
4 | top: 50%;
5 | right: 5px;
6 | font-size: 12px;
7 | margin-top: -10px;
8 | color: #bfbfc1;
9 | }
10 |
11 | table.sortable th:after {
12 | display: block;
13 | position: absolute;
14 | top: 50%;
15 | right: 5px;
16 | font-size: 12px;
17 | margin-top: -10px;
18 | color: #bfbfc1;
19 | }
20 |
21 | table.sortable th.arrow:after {
22 | content: '';
23 | }
24 |
25 | table.sortable span.arrow, span.reversed, th.arrow.down:after, th.reversedarrow.down:after, th.arrow.up:after, th.reversedarrow.up:after {
26 | border-style: solid;
27 | border-width: 5px;
28 | font-size: 0;
29 | border-color: #ccc transparent transparent transparent;
30 | line-height: 0;
31 | height: 0;
32 | width: 0;
33 | margin-top: -2px;
34 | }
35 |
36 | table.sortable span.arrow.up, th.arrow.up:after {
37 | border-color: transparent transparent #ccc transparent;
38 | margin-top: -7px;
39 | }
40 |
41 | table.sortable span.reversed, th.reversedarrow.down:after {
42 | border-color: transparent transparent #ccc transparent;
43 | margin-top: -7px;
44 | }
45 |
46 | table.sortable span.reversed.up, th.reversedarrow.up:after {
47 | border-color: #ccc transparent transparent transparent;
48 | margin-top: -2px;
49 | }
50 |
51 | table.sortable span.az:before, th.az.down:after {
52 | content: "a .. z";
53 | }
54 |
55 | table.sortable span.az.up:before, th.az.up:after {
56 | content: "z .. a";
57 | }
58 |
59 | table.sortable th.az.nosort:after, th.AZ.nosort:after, th._19.nosort:after, th.month.nosort:after {
60 | content: "..";
61 | }
62 |
63 | table.sortable span.AZ:before, th.AZ.down:after {
64 | content: "A .. Z";
65 | }
66 |
67 | table.sortable span.AZ.up:before, th.AZ.up:after {
68 | content: "Z .. A";
69 | }
70 |
71 | table.sortable span._19:before, th._19.down:after {
72 | content: "1 .. 9";
73 | }
74 |
75 | table.sortable span._19.up:before, th._19.up:after {
76 | content: "9 .. 1";
77 | }
78 |
79 | table.sortable span.month:before, th.month.down:after {
80 | content: "jan .. dec";
81 | }
82 |
83 | table.sortable span.month.up:before, th.month.up:after {
84 | content: "dec .. jan";
85 | }
86 |
87 | table.sortable thead th:not([data-defaultsort=disabled]) {
88 | cursor: pointer;
89 | position: relative;
90 | top: 0;
91 | left: 0;
92 | }
93 |
94 | table.sortable thead th:hover:not([data-defaultsort=disabled]) {
95 | background: #efefef;
96 | }
97 |
98 | table.sortable thead th div.mozilla {
99 | position: relative;
100 | }
101 |
--------------------------------------------------------------------------------
/dstore/read_only_scheduler.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "fmt"
5 | "math"
6 | "time"
7 | "sync/atomic"
8 | dbcfg "github.com/douban/gobeansdb/config"
9 | )
10 |
11 | type RRReadScheduler struct {
12 | hosts []*Host
13 | current atomic.Int32
14 | totalHostsI32 int32
15 | totalHosts int
16 | totalHostsF64 float64
17 | quit bool
18 | }
19 |
20 |
21 | func NewRRReadScheduler(route *dbcfg.RouteTable) *RRReadScheduler {
22 | rrsche := new(RRReadScheduler)
23 | rrsche.hosts = make([]*Host, len(route.Main))
24 | for idx, server := range route.Main {
25 | host := NewHost(server.Addr)
26 | rrsche.hosts[idx] = host
27 | }
28 | rrsche.totalHosts = len(rrsche.hosts)
29 | rrsche.totalHostsI32 = int32(rrsche.totalHosts)
30 | rrsche.totalHostsF64 = float64(rrsche.totalHosts)
31 | return rrsche
32 | }
33 |
34 | func (sch *RRReadScheduler) GetHostsByKey(key string) (hosts []*Host) {
35 | next := sch.current.Add(1) % sch.totalHostsI32
36 | sch.current.Store(next)
37 | rrrStoreReqs.WithLabelValues(sch.hosts[next].Addr).Inc()
38 | return sch.hosts[next:next+1]
39 | }
40 |
41 | func (sch *RRReadScheduler) FeedbackError(host *Host, key string, startTime time.Time, errorCode float64) {
42 | rrrStoreErr.WithLabelValues(host.Addr, fmt.Sprintf("%f", errorCode)).Inc()
43 | return
44 | }
45 |
46 |
47 | func (sch *RRReadScheduler) FeedbackLatency(host *Host, key string, startTime time.Time, timeUsed time.Duration) {
48 | rrrStoreLag.WithLabelValues(host.Addr).Set(float64(timeUsed.Milliseconds()))
49 | return
50 | }
51 |
52 | // route some keys to group of hosts
53 | func (sch *RRReadScheduler) DivideKeysByBucket(keys []string) [][]string {
54 | numKeysPer := int(math.Round(float64(len(keys)) / sch.totalHostsF64))
55 | rs := make([][]string, len(sch.hosts))
56 | maxEndIdx := len(sch.hosts) - 1
57 |
58 | startIdx := 0
59 | partIdx := 0
60 | for {
61 | endIdx := startIdx + numKeysPer
62 | if endIdx >= len(keys) || partIdx == maxEndIdx {
63 | endIdx = len(keys)
64 | rs[partIdx] = keys[startIdx:endIdx]
65 | break
66 | }
67 | rs[partIdx] = keys[startIdx:endIdx]
68 | partIdx += 1
69 | startIdx = endIdx
70 | }
71 | return rs
72 | }
73 |
74 | // internal status
75 | func (sch *RRReadScheduler) Stats() map[string]map[string]float64 {
76 | return nil
77 | }
78 |
79 | // get latencies of hosts in the bucket
80 | func (sch *RRReadScheduler) LatenciesStats() map[string]map[string][QUEUECAP]Response {
81 | return nil
82 | }
83 |
84 | // get percentage of hosts in the bucket
85 | func (sch *RRReadScheduler) Partition() map[string]map[string]int {
86 | return nil
87 | }
88 |
89 | // return average latency and arc(percentage)
90 | func (sch *RRReadScheduler) GetBucketInfo(bucketID int64) map[string]map[string]map[string][]Response {
91 | return nil
92 | }
93 |
94 | func (sch *RRReadScheduler) Close() {
95 | sch.quit = true
96 | }
97 |
--------------------------------------------------------------------------------
/dstore/bucket_test.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "path"
5 | "sync"
6 | "testing"
7 | "time"
8 |
9 | "github.com/douban/gobeansproxy/config"
10 | "github.com/douban/gobeansproxy/utils"
11 | )
12 |
13 | func TestAddResTime(t *testing.T) {
14 | homeDir := utils.GetProjectHomeDir()
15 | confdir := path.Join(homeDir, "conf")
16 | proxyConf := &config.Proxy
17 | proxyConf.Load(confdir)
18 |
19 | testHost := []struct {
20 | Addr string
21 | resTime []float64
22 | }{
23 | {
24 | "127.0.0.1:1234",
25 | []float64{1.1, 1.3, 1.4, 2},
26 | },
27 | {
28 | "10.0.0.1:1234",
29 | []float64{1.4, 1, 6, 1.9, 2.1},
30 | },
31 | {
32 | "10.0.0.2:1234",
33 | []float64{1.4, 1, 6, 1.9, 2.1},
34 | },
35 | }
36 | hosts := []*Host{}
37 | for _, addr := range testHost {
38 | host := NewHost(addr.Addr)
39 | hosts = append(hosts, host)
40 | }
41 | bucket := newBucket(0, hosts...)
42 | var wg sync.WaitGroup
43 | for _, host := range testHost {
44 | wg.Add(1)
45 | go func(addr string, resTime []float64) {
46 | for _, score := range resTime {
47 | now := time.Now()
48 | bucket.addLatency(addr, now, score)
49 | time.Sleep(1 * time.Second)
50 | }
51 | wg.Done()
52 | }(host.Addr, host.resTime)
53 | }
54 | wg.Wait()
55 | bucket.ReBalance()
56 | for _, h := range bucket.hostsList {
57 | if h.score == 0 {
58 | t.Errorf("the host %s got score %f", h.host.Addr, h.score)
59 | }
60 | }
61 | }
62 |
63 | func TestDownHost(t *testing.T) {
64 | homeDir := utils.GetProjectHomeDir()
65 | confdir := path.Join(homeDir, "conf")
66 | proxyConf := &config.Proxy
67 | proxyConf.Load(confdir)
68 |
69 | testHost := []struct {
70 | Addr string
71 | resTime []float64
72 | status bool
73 | }{
74 | {
75 | "127.0.0.1:1234",
76 | []float64{1.1, 1.3, 1.4, 2},
77 | true,
78 | },
79 | {
80 | "10.0.0.1:1234",
81 | []float64{1.4, 1, 6, 1.9, 2.1},
82 | true,
83 | },
84 | {
85 | "10.0.0.2:1234",
86 | []float64{1.4, 1, 6, 1.9, 2.1},
87 | false,
88 | },
89 | }
90 | hosts := []*Host{}
91 | for _, addr := range testHost {
92 | host := NewHost(addr.Addr)
93 | hosts = append(hosts, host)
94 | }
95 | bucket := newBucket(0, hosts...)
96 | var wg sync.WaitGroup
97 | for _, host := range testHost {
98 | wg.Add(1)
99 | go func(addr string, resTime []float64) {
100 | for _, score := range resTime {
101 | now := time.Now()
102 | bucket.addLatency(addr, now, score)
103 | time.Sleep(1 * time.Second)
104 | }
105 | wg.Done()
106 | }(host.Addr, host.resTime)
107 | }
108 | wg.Wait()
109 |
110 | for _, host := range testHost {
111 | if !host.status {
112 | bucket.downHost(host.Addr)
113 | }
114 | }
115 | bucket.ReBalance()
116 | for _, h := range bucket.hostsList {
117 | if !h.status && h.score > 0 {
118 | t.Errorf("the host %s isdown, should got score %f", h.host.Addr, h.score)
119 | } else if h.score == 0 && h.status {
120 | t.Errorf("the host %s got score %f", h.host.Addr, h.score)
121 | }
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/tests/server_down_test.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | from tests.base import BaseTest
4 | from tests.dbclient import MCStore
5 |
6 |
7 | class KeyVersionTest(BaseTest):
8 | def setUp(self):
9 | BaseTest.setUp(self)
10 |
11 | # def tearDown(self):
12 | # import time
13 | # time.sleep(600)
14 | # super(self).tearDown()
15 |
16 | def _assert_val(self, addr, key, val, msg=None):
17 | store = MCStore(addr)
18 | self.assertEqual(store.get(key), val, msg)
19 |
20 | @BaseTest.require_rw_enable(
21 | br=(True, False), bw=(True,), cr=(False,), cw=(False, True)
22 | )
23 | def test_normal(self):
24 | proxy = MCStore(self.proxy.addr)
25 | key = '/test/normal/key'
26 | val = 'val1'
27 |
28 | self.assertTrue(proxy.set(key, val))
29 | self.assertEqual(proxy.get(key), val)
30 | for db in self.dbs:
31 | self._assert_val(db.addr, key, val)
32 |
33 | @BaseTest.require_rw_enable(
34 | br=(True, False), bw=(True,), cr=(False,), cw=(False, True)
35 | )
36 | def test_one_server_down(self):
37 | proxy = MCStore(self.proxy.addr)
38 | key = '/test/one/server/down'
39 | val = 'val'
40 | bad_server_idx = 0
41 |
42 | self.dbs[bad_server_idx].stop()
43 | self.assertTrue(proxy.set(key, val))
44 | self.assertEqual(proxy.get(key), val)
45 | for i, db in enumerate(self.dbs):
46 | if i != bad_server_idx:
47 | self._assert_val(db.addr, key, val)
48 |
49 | # 没有写备节点
50 | for db in self.backup_dbs:
51 | self._assert_val(db.addr, key, None)
52 |
53 | @BaseTest.require_rw_enable(
54 | br=(True, False), bw=(True,), cr=(False,), cw=(False, True)
55 | )
56 | def test_two_server_down(self):
57 | proxy = MCStore(self.proxy.addr)
58 | key = '/test/two/server/down'
59 | val = 'val'
60 | bad_server_idxs = [0, 1]
61 |
62 | for i in bad_server_idxs:
63 | self.dbs[i].stop()
64 | self.assertTrue(proxy.set(key, val))
65 | self.assertEqual(proxy.get(key), val)
66 | for i, db in enumerate(self.dbs):
67 | if i not in bad_server_idxs:
68 | self._assert_val(db.addr, key, val)
69 |
70 | # 写备节点了. 这里假设测试配置中只有一个备节点
71 | for db in self.backup_dbs:
72 | self._assert_val(db.addr, key, val)
73 |
74 | @BaseTest.require_rw_enable(
75 | br=(True, False), bw=(True,), cr=(False,), cw=(False, True)
76 | )
77 | def test_three_server_down(self):
78 | proxy = MCStore(self.proxy.addr)
79 | key = '/test/three/server/down'
80 | val = 'val'
81 | bad_server_idxs = [0, 1, 2]
82 |
83 | for i in bad_server_idxs:
84 | self.dbs[i].stop()
85 | self.assertFalse(proxy.set(key, val))
86 | with self.assertRaises(IOError):
87 | proxy.get(key)
88 |
89 | for i, db in enumerate(self.dbs):
90 | if i not in bad_server_idxs:
91 | self._assert_val(db.addr, key, val)
92 |
93 | # 写备节点了
94 | for db in self.backup_dbs:
95 | self._assert_val(db.addr, key, val)
96 |
--------------------------------------------------------------------------------
/.doubanpde/pde.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | labels:
5 | app: "gobeansproxy"
6 | createId: "{{ uuid }}"
7 | createdBy: pdectl
8 | createdByUser: wangqiang
9 | runByUser: '{{ .CliArgs.String "username" }}'
10 | runByPdectlVersion: "{{ .CliArgs.App.Version }}"
11 | runnerAddress: "{{ .RunnerAddress }}"
12 | createdTime: "{{ .CreatedTime }}"
13 | pdeVersion: "v0.1.4"
14 | useWebEditor: "false"
15 | webEditorPort: 0
16 | webEditorType: ""
17 | name: "gobeansproxy"
18 | annotations:
19 | pdectl.douban.com/cfg/exec-cmd: '{{ .CliArgs.String "exec-default-cmd" }}'
20 | spec:
21 | containers:
22 | - name: "main"
23 | env:
24 | - name: HOSTNAME
25 | value: "gobeansproxy-main"
26 | - name: SCRIBE_HOST
27 | value: 10.0.2.2
28 | image: "docker.douban/sa/pde-go-cli:latest-1.20-v2"
29 | ports:
30 | volumeMounts:
31 | # mount go path src to container go path
32 | - mountPath: /go/src/
33 | name: go-path-src
34 | # mount code folder
35 | - mountPath: /home/project/
36 | name: code
37 | - mountPath: /root/
38 | name: userhome
39 | - mountPath: '/home/{{ .CliArgs.String "username" }}'
40 | name: userhome
41 | - mountPath: /fuse:rslave
42 | name: fuse
43 | - mountPath: /etc/douban/
44 | name: etc-douban
45 | readOnly: true
46 | - mountPath: /etc/localtime
47 | name: etc-localtime
48 | readOnly: true
49 | - mountPath: /var/run/nscd/
50 | name: var-run-nscd
51 | readOnly: true
52 | workingDir: /home/project
53 | # - name: mc
54 | # image: docker.douban/memcached:latest
55 | # workingDir: /
56 | {{- range (mkSlice 57980 57981 57982 57983) }}
57 | - name: beansdb-{{ . }}
58 | image: docker.douban/platform/gobeansdb:latest
59 | workingDir: /data/
60 | volumeMounts:
61 | - mountPath: /data
62 | name: beansdb-{{ . }}-data-dir
63 | - mountPath: /gobeansdb/default_beansdb_cfg/
64 | name: beansdb-{{ . }}-cfg-dir
65 | {{- end }}
66 | - name: cassandra
67 | image: docker.douban/dba/cassandra:4.1.2
68 | workingDir: /
69 | volumeMounts:
70 | - mountPath: /var/lib/cassandra/
71 | name: cassandra-data-dir
72 | # - mountPath: /tmp/cassandra/
73 | # name: cassandra-cfg
74 | # command:
75 | # - "/bin/bash"
76 | # args:
77 | # - "-c"
78 | # - >
79 | # cp -rfv /tmp/cassandra/cassandra.yaml /etc/cassandra/ &&
80 | # /usr/local/bin/docker-entrypoint.sh cassandra -f
81 | restartPolicy: Never
82 | volumes:
83 | - hostPath:
84 | path: '{{ expandEnvVar "$GOPATH/src" }}'
85 | type: Directory
86 | name: go-path-src
87 | {{- $env := . }}
88 | {{- range (mkSlice 57980 57981 57982 57983) }}
89 | - hostPath:
90 | path: '{{ $env.CliArgs.String "project-dir" }}/.doubanpde/data/beansdb-{{ . }}/'
91 | type: DirectoryOrCreate
92 | name: beansdb-{{ . }}-data-dir
93 | - hostPath:
94 | path: '{{ $env.CliArgs.String "project-dir" }}/.doubanpde/scripts/bdb/gobeansproxy/{{ . }}/conf/'
95 | type: Directory
96 | name: beansdb-{{ . }}-cfg-dir
97 | {{- end }}
98 | - hostPath:
99 | path: '{{ .CliArgs.String "project-dir" }}/.doubanpde/data/cassandra/'
100 | type: DirectoryOrCreate
101 | name: cassandra-data-dir
102 | - hostPath:
103 | path: '{{ .CliArgs.String "project-dir" }}/.doubanpde/scripts/cassandra/'
104 | name: cassandra-cfg
105 | - hostPath:
106 | path: '{{ .CliArgs.String "project-dir" }}'
107 | type: Directory
108 | name: code
109 | - hostPath:
110 | path: '{{ expandEnvVar "$HOME/" }}'
111 | type: Directory
112 | name: userhome
113 | - hostPath:
114 | path: /fuse
115 | type: Directory
116 | name: fuse
117 | - hostPath:
118 | path: /etc/douban/
119 | name: etc-douban
120 | - hostPath:
121 | path: /etc/localtime
122 | name: etc-localtime
123 | - hostPath:
124 | path: /var/run/nscd/
125 | name: var-run-nscd
126 |
127 |
--------------------------------------------------------------------------------
/dstore/consistent.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "hash/fnv"
5 | "sync"
6 | )
7 |
8 | const (
9 | // 最少保留 MINKEYS/count 的 key 在某一个节点上
10 | MINKEYS = 1
11 | )
12 |
13 | // 一致性哈希变种
14 | type Partition struct {
15 | sync.RWMutex
16 |
17 | count int
18 | offsets []int
19 | }
20 |
21 | /* --- Partition -------------------------------------------------------------- */
22 | func NewPartition(count int, nodesNum int) *Partition {
23 | partition := &Partition{
24 | count: count,
25 | offsets: []int{},
26 | }
27 | lenNodes := partition.count / nodesNum
28 |
29 | for i := 0; i < nodesNum; i++ {
30 | partition.offsets = append(partition.offsets, lenNodes*i)
31 | }
32 | return partition
33 | }
34 |
35 | // 哈希函数。
36 | func (partition *Partition) hash(key string) int {
37 | hash := fnv.New32a()
38 | hash.Write([]byte(key))
39 | return int(hash.Sum32()) % partition.count
40 | }
41 |
42 | func (partition *Partition) getPre(index int) (pre int) {
43 | return (index - 1 + 3) % 3
44 | }
45 |
46 | func (partition *Partition) getNext(index int) (next int) {
47 | return (index + 1 + 3) % 3
48 | }
49 |
50 | func (partition *Partition) remove(host int) {
51 | partition.Lock()
52 | defer partition.Unlock()
53 | //TODO 只允许有三个节点
54 | preIndex := partition.getPre(host) //(host - 1 + 3) % 3
55 | offsetsPre := partition.offsets[preIndex] //partition.offsets[preIndex]
56 | offsetsCount := 0
57 | if offsetsPre > partition.offsets[host] {
58 | offsetsCount = (partition.offsets[host] + partition.count - offsetsPre)
59 | } else {
60 | offsetsCount = (partition.offsets[host] - offsetsPre)
61 | }
62 | prevalue := partition.offsets[preIndex] + (offsetsCount / 2)
63 | partition.offsets[preIndex] = partition.clearOffset(prevalue)
64 | partition.offsets[host] = partition.offsets[preIndex]
65 | }
66 |
67 | // 获取某一段弧长
68 | func (partition *Partition) getArc(index int) int {
69 | indexPre := partition.getPre(index)
70 | arc := partition.offsets[index] - partition.offsets[indexPre]
71 | if arc < 0 {
72 | arc += partition.count
73 | }
74 | return arc
75 | }
76 |
77 | func (partition *Partition) reBalance(indexFrom, indexTo int, step int) {
78 | partition.Lock()
79 | defer partition.Unlock()
80 | // from 节点的下一个节点
81 | fromNext := partition.getNext(indexFrom) //(indexFrom + 1 + 3) % 3
82 |
83 | if indexTo == fromNext {
84 | fromPre := partition.getPre(indexFrom) //(indexFrom - 1 + 3) % 3
85 | step = partition.clearStep(indexFrom, fromPre, step)
86 | value := partition.offsets[indexFrom] - step
87 | partition.offsets[indexFrom] = partition.clearOffset(value)
88 | } else {
89 | toNext := partition.getNext(indexTo) //(indexTo + 1 + 3) % 3
90 | step = partition.clearStep(toNext, indexTo, step)
91 | value := partition.offsets[indexTo] + step
92 | partition.offsets[indexTo] = partition.clearOffset(value)
93 | }
94 | }
95 |
96 | func (partition *Partition) clearStep(modify, indexPre, step int) int {
97 | interval := partition.offsets[modify] - partition.offsets[indexPre] - MINKEYS
98 | if interval < 0 {
99 | interval += partition.count
100 | }
101 | if step > interval {
102 | step = interval
103 | }
104 | return step
105 | }
106 |
107 | func (partition *Partition) clearOffset(offset int) int {
108 | if offset < 0 {
109 | offset += partition.count
110 | } else if offset > partition.count {
111 | offset = offset % partition.count
112 | }
113 | return offset
114 | }
115 |
116 | // 获取匹配主键。
117 | func (partition *Partition) offsetGet(key string) int {
118 | partition.RLock()
119 | defer partition.RUnlock()
120 |
121 | // A 0
122 | // |
123 | // -------
124 | // / \ B
125 | // / \
126 | // / \
127 | // | | -- 1
128 | // \ /
129 | // \ /
130 | // \ /
131 | // 2-- ---------
132 | // C
133 | // A: 2<-A->0
134 | // B: 0<-B->1
135 | // C: 1<-C->2
136 |
137 | index := partition.hash(key)
138 | // like offset[0] == 98, offset[1] == 32, offset [2] ==66
139 | // [98, 32, 66]
140 | // [98, 32, 98]
141 | // [98, 32, 32]
142 | if partition.offsets[0] > partition.offsets[1] {
143 | if index < partition.offsets[1] {
144 | return 1
145 | } else if index < partition.offsets[2] {
146 | return 2
147 | } else if index < partition.offsets[0] {
148 | return 0
149 | } else {
150 | return 1
151 | }
152 |
153 | // like offset 0 == 23, offset 1 == 88, offset 2 == 1
154 | // [23, 88, 1]
155 | // [32, 88 ,32]
156 | // [88, 88, 32]
157 | } else if partition.offsets[2] < partition.offsets[1] {
158 | if index < partition.offsets[2] {
159 | return 2
160 | } else if index < partition.offsets[0] {
161 | return 0
162 | } else if index < partition.offsets[1] {
163 | return 1
164 | } else {
165 | return 2
166 | }
167 |
168 | // offset 0 = 3 ,offset 1 = 34, offset 2 == 67
169 | // [3, 34, 67]
170 | // [3, 3, 67]
171 | // [3, 67, 67]
172 | } else {
173 | for i, value := range partition.offsets {
174 | if index < value {
175 | return i
176 | }
177 | }
178 | return 0
179 | }
180 | }
181 |
--------------------------------------------------------------------------------
/config/config.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "log"
5 | "path"
6 |
7 | dbcfg "github.com/douban/gobeansdb/config"
8 | dbutils "github.com/douban/gobeansdb/utils"
9 | )
10 |
11 | const (
12 | Version = "v2.1.1"
13 | )
14 |
15 | var (
16 | Proxy ProxyConfig
17 | Route *dbcfg.RouteTable
18 | )
19 |
20 | type ProxyConfig struct {
21 | dbcfg.ServerConfig `yaml:"proxy,omitempty"`
22 | dbcfg.MCConfig `yaml:"mc,omitempty"`
23 | DStoreConfig `yaml:"dstore,omitempty"`
24 | CassandraStoreCfg `yaml:"cassandra,omitempty"`
25 | Confdir string
26 | }
27 |
28 | type DStoreConfig struct {
29 | N int `yaml:",omitempty"`
30 | W int `yaml:",omitempty"`
31 | R int `yaml:",omitempty"`
32 | MaxFreeConnsPerHost int `yaml:"max_free_conns_per_host,omitempty"`
33 | ConnectTimeoutMs int `yaml:"connect_timeout_ms,omitempty"`
34 | DialFailSilenceMs int `yaml:"dial_fail_silence_ms,omitempty"`
35 | WriteTimeoutMs int `yaml:"write_timeout_ms,omitempty"`
36 | ReadTimeoutMs int `yaml:"read_timeout_ms,omitempty"`
37 | ResTimeSeconds int `yaml:"response_time_seconds,omitempty"`
38 | ErrorSeconds int `yaml:"error_seconds,omitempty"`
39 | MaxConnectErrors int `yaml:"max_connect_errors,omitempty"`
40 | ScoreDeviation float64 `yaml:"score_deviation,omitempty"`
41 | ItemSizeStats int `yaml:"item_size_stats,omitempty"`
42 | ResponseTimeMin float64 `yaml:"response_time_min,omitempty"`
43 | Enable bool `yaml:"enable"`
44 | Scheduler string `yaml:"scheduler,omitempty"`
45 | }
46 |
47 | type DualWErrCfg struct {
48 | DumpToDir string `yaml:"dump_to_dir"`
49 | FName string `yaml:"log_file_name"`
50 | LoggerLevel string `yaml:"logger_level"`
51 | RotateSize int `yaml:"rotate_size_mb"`
52 | Compress bool `yaml:"compress"`
53 | MaxAges int `yaml:"max_ages"`
54 | MaxBackups int `yaml:"max_backups"`
55 | }
56 |
57 | type PrefixDisPatcherCfg struct {
58 | StaticCfg map[string][]string `yaml:"static"`
59 | CfgFromCstarTable string `yaml:"cfg_table"`
60 | CfgFromCstarKeySpace string `yaml:"cfg_keyspace"`
61 | Enable bool `yaml:"enable"`
62 | }
63 |
64 | type CassandraStoreCfg struct {
65 | Enable bool `yaml:"enable"`
66 | Hosts []string `yaml:"hosts"`
67 | DefaultKeySpace string `yaml:"default_key_space"`
68 | DefaultTable string `yaml:"default_table"`
69 | CstarTimeoutMs int `yaml:"timeout_ms"`
70 | CstarConnectTimeoutMs int `yaml:"connect_timeout_ms"`
71 | CstarWriteTimeoutMs int `yaml:"write_timeout_ms"`
72 | MaxConnForGetm int `yaml:"max_conn_for_getm"`
73 | // ref: https://pkg.go.dev/github.com/gocql/gocql?utm_source=godoc#ClusterConfig
74 | ReconnectIntervalSec int `yaml:"reconnect_interval_sec"`
75 | RetryNum int `yaml:"retry_num"`
76 | NumConns int `yaml:"num_conns"`
77 | Username string `yaml:"username"`
78 | Password string `yaml:"password"`
79 | PasswordFile string `yaml:"password_file"`
80 | Consistency string `yaml:"consistency,omitempty"`
81 | PrefixTableDispatcherCfg PrefixDisPatcherCfg `yaml:"prefix_table_dispatcher_cfg"`
82 | PrefixRWDispatcherCfg PrefixDisPatcherCfg `yaml:"prefix_rw_dispatcher_cfg"`
83 | SwitchToKeyDefault string `yaml:"default_storage"`
84 | DualWErrCfg DualWErrCfg `yaml:"dual_write_err_cfg"`
85 | }
86 |
87 | func (c *ProxyConfig) InitDefault() {
88 | c.ServerConfig = DefaultServerConfig
89 | c.MCConfig = dbcfg.DefaultMCConfig
90 | c.DStoreConfig = DefaultDStoreConfig
91 | }
92 |
93 | func (c *ProxyConfig) ConfigPackages() {
94 | dbcfg.ServerConf = c.ServerConfig
95 | dbcfg.MCConf = c.MCConfig
96 | dbcfg.Version = Version
97 | }
98 |
99 | func (c *ProxyConfig) Load(confdir string) {
100 | if confdir != "" {
101 | var err error
102 |
103 | // proxy
104 | proxyPath := path.Join(confdir, "proxy.yaml")
105 | if err = dbcfg.LoadYamlConfig(c, proxyPath); err != nil {
106 | log.Fatalf("bad config %s: %s", proxyPath, err.Error())
107 | }
108 |
109 | // route
110 | if c.DStoreConfig.Enable {
111 | routePath := path.Join(confdir, "route.yaml")
112 | var route *dbcfg.RouteTable
113 |
114 | if len(c.ZKServers) > 0 {
115 | route, err = dbcfg.LoadRouteTableZK(routePath, c.ZKPath, c.ZKServers)
116 | if err != nil {
117 | log.Printf("fail to load route table from zk: %s, err: %s", c.ZKPath, err.Error())
118 | }
119 | }
120 |
121 | if len(c.ZKServers) == 0 || err != nil {
122 | route, err = dbcfg.LoadRouteTableLocal(routePath)
123 | }
124 | if err != nil {
125 | log.Fatalf("fail to load route table: %s", err.Error())
126 | }
127 |
128 | Route = route
129 | checkConfig(c, Route)
130 | }
131 | }
132 | c.Confdir = confdir
133 | dbutils.InitSizesPointer(c)
134 | c.ConfigPackages()
135 | }
136 |
137 | func checkConfig(proxy *ProxyConfig, route *dbcfg.RouteTable) {
138 | if route == nil {
139 | log.Fatal("empty route config")
140 | }
141 | }
142 |
143 | func DumpConfig(config interface{}) {
144 | dbcfg.DumpConfig(config)
145 | }
146 |
--------------------------------------------------------------------------------
/tests/base.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | import os
4 | import time
5 | import uuid
6 | import json
7 | import shutil
8 | import unittest
9 |
10 | from tests.dbclient import MCStore
11 | from tests.utils import (
12 | start_cmd, stop_cmd, get_server_addr, load_yaml, gethttp
13 | )
14 | from tests import gen_config
15 |
16 |
17 | GOBEANSDB_CMD = "gobeansdb"
18 | GOBEANSPROXY_CMD = f"{os.environ['GOPATH']}/bin/gobeansproxy"
19 | GOBEANSDB_READ_ENABLE = os.environ.get("GOBEANSPROXY_TEST_BR") == "1"
20 | GOBEANSDB_WRITE_ENABLE = os.environ.get("GOBEANSPROXY_TEST_BW") == "1"
21 | CSTAR_READ_ENABLE = os.environ.get("GOBEANSPROXY_TEST_CR") == "1"
22 | CSTAR_WRITE_ENABLE = os.environ.get("GOBEANSPROXY_TEST_CW") == "1"
23 |
24 |
25 | class BaseTest(unittest.TestCase):
26 | def setUp(self):
27 | root_dir = '/tmp/gobeansproxy_%s/' % uuid.uuid4()
28 | self.bdb_read_enable = GOBEANSDB_READ_ENABLE
29 | self.bdb_write_enable = GOBEANSDB_WRITE_ENABLE
30 | self.cstar_read_enable = CSTAR_READ_ENABLE
31 | self.cstar_write_enable = CSTAR_WRITE_ENABLE
32 | gen_config.gen_conf(
33 | root_dir,
34 | bdb_read_enable=self.bdb_read_enable,
35 | bdb_write_enable=self.bdb_write_enable,
36 | cstar_read_enable=self.cstar_read_enable,
37 | cstar_write_enable=self.cstar_write_enable
38 | )
39 |
40 | self.dbs = [GobeansdbInstance(os.path.join(root_dir, str(port), 'conf'))
41 | for (port, _) in gen_config.MAIN_PORT_PAIRS]
42 | for db in self.dbs:
43 | db.start(self.bdb_read_enable)
44 |
45 | self.backup_dbs = [GobeansdbInstance(os.path.join(root_dir, str(port), 'conf'))
46 | for (port, _) in gen_config.BACKUP_PORT_PAIRS]
47 | for db in self.backup_dbs:
48 | db.start(self.bdb_read_enable)
49 |
50 | self.proxy = GobeansproxyInstance(
51 | os.path.join(root_dir, 'proxy', 'conf'))
52 | self.proxy.start(self.bdb_read_enable)
53 |
54 | def tearDown(self):
55 | # time.sleep(1000)
56 | self.proxy.clean()
57 | for db in self.dbs:
58 | db.clean()
59 | for db in self.backup_dbs:
60 | db.clean()
61 |
62 | def checkCounterZero(self):
63 | if not (self.bdb_read_enable or self.bdb_write_enable):
64 | return
65 | time.sleep(0.5)
66 | content = gethttp(self.proxy.webaddr, 'buffer')
67 | buffers = json.loads(content)
68 | self.assertEqual(len(buffers), 4)
69 | for _, v in list(buffers.items()):
70 | self.assertEqual(v['Count'], 0, content)
71 | self.assertEqual(v['Size'], 0, content)
72 |
73 | @classmethod
74 | def require_rw_enable(func, br, bw, cr, cw):
75 | def wrap(func):
76 | def check_rw_func(*args, **kwargs):
77 | if not (GOBEANSDB_READ_ENABLE in br and \
78 | GOBEANSDB_WRITE_ENABLE in bw and \
79 | CSTAR_READ_ENABLE in cr and \
80 | CSTAR_WRITE_ENABLE in cw):
81 | return
82 | return func(*args, **kwargs)
83 | return check_rw_func
84 |
85 | return wrap
86 |
87 |
88 |
89 | class BaseServerInstance(object):
90 | def __init__(self, conf_dir, bin, server_name):
91 | self.popen = None
92 | self.cmd = '%s -confdir %s' % (bin, conf_dir)
93 | self.addr, self.webaddr = get_server_addr(conf_dir, server_name)
94 |
95 | def start(self, bdb_read_enable=True):
96 | assert self.popen is None
97 | self.popen = start_cmd(self.cmd)
98 | try_times = 0
99 | while True:
100 | try:
101 | store = MCStore(self.addr)
102 | if bdb_read_enable:
103 | store.get("@")
104 | else:
105 | store.set("test", "test")
106 | return
107 | except IOError:
108 | try_times += 1
109 | if try_times > 20:
110 | raise Exception("connect error for addr: %s", self.addr)
111 | time.sleep(0.5)
112 |
113 | def stop(self):
114 | print('stop', self.cmd)
115 | if self.popen:
116 | stop_cmd(self.popen)
117 | self.popen = None
118 |
119 | def clean(self):
120 | if self.popen:
121 | self.stop()
122 | self.clean_data()
123 |
124 | def clean_data(self):
125 | pass
126 |
127 |
128 | class GobeansproxyInstance(BaseServerInstance):
129 | def __init__(self, conf_dir):
130 | super(GobeansproxyInstance, self).__init__(
131 | conf_dir, GOBEANSPROXY_CMD, 'gobeansproxy')
132 |
133 |
134 | class GobeansdbInstance(BaseServerInstance):
135 | def __init__(self, conf_dir):
136 | super(GobeansdbInstance, self).__init__(
137 | conf_dir, GOBEANSDB_CMD, 'gobeansdb')
138 | conf = load_yaml(os.path.join(conf_dir, 'global.yaml'))
139 | self.db_home = conf['hstore']['local']['home']
140 |
141 | def clean_data(self):
142 | if os.path.exists(self.db_home):
143 | shutil.rmtree(self.db_home)
144 |
--------------------------------------------------------------------------------
/tests/basic_test.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | import os
4 |
5 | from tests.base import BaseTest
6 | from tests.dbclient import MCStore
7 | from tests.utils import random_string
8 |
9 |
10 | VERSION, HASH, FLAG, SIZE, TIMESTAMP, CHUNKID, OFFSET = list(range(7))
11 |
12 | class KeyVersionTest(BaseTest):
13 | def setUp(self):
14 | BaseTest.setUp(self)
15 |
16 | self.last_pos = 0
17 | self.last_size = 0
18 |
19 | def update_pos(self, size):
20 | self.last_pos += self.last_size
21 | self.last_size = size
22 |
23 | def get_meta(self, store, key):
24 | meta = store.get("??" + key)
25 | if meta:
26 | meta = meta.split()
27 | assert(len(meta) == 7)
28 | return tuple([int(meta[i]) for i in [VERSION, CHUNKID, OFFSET]])
29 |
30 | @BaseTest.require_rw_enable(br=(True,), bw=(True,), cr=(False,), cw=(True, False))
31 | def test_incr(self):
32 | store = MCStore(self.proxy.addr)
33 | key = 'key1'
34 | store.incr(key, 10)
35 | self.assertEqual(store.get(key), 10)
36 | self.checkCounterZero()
37 |
38 | @BaseTest.require_rw_enable(br=(True,), bw=(True,), cr=(False,), cw=(False,))
39 | def test_set_version(self):
40 | store = MCStore(self.proxy.addr)
41 | key = 'key1'
42 | store.set(key, 'aaa')
43 | self.update_pos(256)
44 |
45 | self.assertEqual(store.get(key), 'aaa')
46 | self.assertEqual(self.get_meta(store, key), (1, 0, self.last_pos))
47 |
48 | store.set_raw(key, b'bbb', rev=3)
49 | self.update_pos(256)
50 | self.assertEqual(self.get_meta(store, key), (3, 0, self.last_pos))
51 |
52 | store.set_raw(key, b'bbb', rev=4)
53 | self.assertEqual(self.get_meta(store, key), (4, 0, self.last_pos))
54 |
55 | store.set_raw(key, b'ccc', rev=2)
56 | self.assertEqual(store.get(key), b'bbb')
57 | self.assertEqual(self.get_meta(store, key), (4, 0, self.last_pos))
58 |
59 | self.checkCounterZero()
60 |
61 | @BaseTest.require_rw_enable(br=(True,), bw=(True,), cr=(False,), cw=(False,))
62 | def test_delete_version(self):
63 | store = MCStore(self.proxy.addr)
64 | key = 'key1'
65 |
66 | store.set(key, 'aaa')
67 | self.update_pos(256)
68 | self.assertEqual(self.get_meta(store, key), (1, 0, self.last_pos))
69 |
70 | store.delete(key)
71 | self.update_pos(256)
72 | self.assertEqual(store.get(key), None)
73 |
74 | self.assertEqual(self.get_meta(store, key), (-2, 0, self.last_pos))
75 | self.checkCounterZero()
76 |
77 | store.set(key, 'bbb')
78 | self.update_pos(256)
79 | self.assertEqual(store.get(key), 'bbb')
80 | self.assertEqual(self.get_meta(store, key), (3, 0, self.last_pos))
81 | self.checkCounterZero()
82 |
83 | def test_special_key(self):
84 | store = MCStore(self.proxy.addr)
85 | kvs = [('a' * 200, 1), ('a', list(range(1000)))]
86 | for k, v in kvs:
87 | self.assertTrue(store.set(k, v))
88 | self.assertEqual(store.get(k), v)
89 |
90 | # restart
91 | self.proxy.stop()
92 | self.proxy.start()
93 | store = MCStore(self.proxy.addr)
94 | for (k, v) in kvs:
95 | v2 = store.get(k)
96 | self.assertEqual(v2, v, "key %s, value %s, not %s" % (k, v, v2))
97 | if not self.cstar_write_enable:
98 | self.checkCounterZero()
99 |
100 | def test_big_v(self):
101 | store = MCStore(self.proxy.addr)
102 | key = 'largekeykk'
103 | size = 10 * 1024 * 1024
104 | string_large = random_string(size // 10) * 10
105 |
106 | self.assertTrue(store.set(key, string_large))
107 | self.assertEqual(store.get(key), string_large)
108 |
109 | def test_env(self):
110 | self.assertEqual(
111 | os.environ.get("GOBEANSPROXY_TEST_BR") == "1",
112 | self.bdb_read_enable
113 | )
114 | self.assertEqual(
115 | os.environ.get("GOBEANSPROXY_TEST_BW") == "1",
116 | self.bdb_write_enable
117 | )
118 | self.assertEqual(
119 | os.environ.get("GOBEANSPROXY_TEST_CR") == "1",
120 | self.cstar_read_enable
121 | )
122 | self.assertEqual(
123 | os.environ.get("GOBEANSPROXY_TEST_CW") == "1",
124 | self.cstar_write_enable
125 | )
126 |
127 | @BaseTest.require_rw_enable(br=(True,), bw=(True,), cr=(False,), cw=(False,))
128 | def test_big_value(self):
129 | store = MCStore(self.proxy.addr)
130 | key = 'largekey'
131 | size = 10 * 1024 * 1024
132 | string_large = random_string(size // 10) * 10
133 |
134 | self.assertTrue(store.set(key, string_large))
135 | self.assertEqual(store.get(key), string_large)
136 |
137 | rsize = (((size + len(key) + 24) >> 8) + 1) << 8
138 | self.update_pos(rsize)
139 |
140 | self.assertEqual(self.get_meta(store, key), (1, 0, self.last_pos))
141 |
142 | self.assertTrue(store.set(key, 'aaa'))
143 | self.update_pos(256)
144 | self.assertEqual(self.get_meta(store, key), (2, 0, self.last_pos))
145 |
146 | self.checkCounterZero()
147 |
--------------------------------------------------------------------------------
/dstore/bucket.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "sort"
5 | "time"
6 | )
7 |
8 | const RINGLEN = 60
9 | const CONSISTENTLEN = 100
10 |
11 | type HostInBucket struct {
12 | status bool
13 | score float64
14 | host *Host
15 | lantency *RingQueue
16 | }
17 |
18 | type Bucket struct {
19 | ID int
20 | hostsList []*HostInBucket
21 | partition *Partition
22 | }
23 |
24 | type ByName []*HostInBucket
25 |
26 | func (b ByName) Len() int {
27 | return len(b)
28 | }
29 |
30 | func (b ByName) Swap(i, j int) {
31 | b[i], b[j] = b[j], b[i]
32 | }
33 |
34 | func (b ByName) Less(i, j int) bool {
35 | return b[i].host.Addr < b[j].host.Addr
36 | }
37 |
38 | func newBucket(id int, hosts ...*Host) *Bucket {
39 | bucket := new(Bucket)
40 | bucket.ID = id
41 | bucket.hostsList = []*HostInBucket{}
42 | for _, host := range hosts {
43 | bucket.hostsList = append(bucket.hostsList, newHostInBucket(host))
44 | }
45 | sort.Sort(ByName(bucket.hostsList))
46 | bucket.partition = NewPartition(CONSISTENTLEN, len(bucket.hostsList))
47 | return bucket
48 | }
49 |
50 | func newHostInBucket(host *Host) *HostInBucket {
51 | return &HostInBucket{
52 | status: true,
53 | score: 0,
54 | host: host,
55 | lantency: NewRingQueue(),
56 | }
57 | }
58 |
59 | // get host by key
60 | func (bucket *Bucket) GetHosts(key string) (hosts []*Host) {
61 | hostIndex := bucket.partition.offsetGet(key)
62 | for i, host := range bucket.hostsList {
63 | if i != hostIndex {
64 | hosts = append(hosts, host.host)
65 | } else {
66 | hosts = append([]*Host{host.host}, hosts...)
67 | }
68 | }
69 | return
70 | }
71 |
72 | func (bucket *Bucket) ReBalance() {
73 | bucket.reScore()
74 | bucket.balance()
75 | }
76 |
77 | func (bucket *Bucket) reScore() {
78 | for _, host := range bucket.hostsList {
79 | var Sum float64
80 | var count int
81 | // while the host is down/
82 | if host.status == false {
83 | host.score = 0
84 | } else {
85 | latencies := host.lantency.Get(proxyConf.ResTimeSeconds, latencyDataType)
86 | for _, latency := range latencies {
87 | Sum += latency.Sum
88 | count += latency.Count
89 | }
90 | if count > 0 {
91 | host.score = Sum / float64(count)
92 | } else {
93 | host.score = 0
94 | }
95 | }
96 | }
97 | }
98 |
99 | func (bucket *Bucket) balance() {
100 | fromHost, toHost := bucket.getModify()
101 | // TODO
102 | if bucket.needBalance(fromHost, toHost) {
103 | var offsetOld, offsetNew []int
104 | offsetOld = bucket.partition.offsets
105 | bucket.partition.reBalance(fromHost, toHost, 1)
106 | offsetNew = bucket.partition.offsets
107 | logger.Debugf("bucket %d BALANCE: from host-%s-%d to host-%s-%d, make offsets %v to %v ", bucket.ID, bucket.hostsList[fromHost].host.Addr, fromHost, bucket.hostsList[toHost].host.Addr, toHost, offsetOld, offsetNew)
108 | }
109 | }
110 |
111 | func (bucket *Bucket) needBalance(fromIndex, toIndex int) bool {
112 | return bucket.roundScore(fromIndex)-bucket.roundScore(toIndex) > proxyConf.ScoreDeviation
113 | }
114 |
115 | func (bucket *Bucket) roundScore(hostIndex int) float64 {
116 | // while score is less than ResponseTimeMin, use ResponseTimeMin
117 | if v := bucket.hostsList[hostIndex].score; v < proxyConf.ResponseTimeMin {
118 | return proxyConf.ResponseTimeMin
119 | } else {
120 | return v
121 | }
122 | }
123 |
124 | func (bucket *Bucket) getModify() (fromHost, toHost int) {
125 | var maxScore float64
126 | var minScore float64
127 | count := 0
128 | for i, host := range bucket.hostsList {
129 | // do nothing while the host is down/
130 | if host.status == false {
131 | continue
132 | }
133 | if count == 0 {
134 | minScore = host.score
135 | maxScore = host.score
136 | fromHost = i
137 | toHost = i
138 | count++
139 | continue
140 | }
141 | if host.score > maxScore {
142 | maxScore = host.score
143 | fromHost = i
144 | }
145 | if host.score < minScore {
146 | minScore = host.score
147 | toHost = i
148 | }
149 | }
150 | return
151 | }
152 |
153 | // return false if have too much connection errors
154 | func (bucket *Bucket) isHostAlive(addr string) bool {
155 | _, host := bucket.getHostByAddr(addr)
156 | errs := host.lantency.Get(proxyConf.ErrorSeconds, errorDataType)
157 | count := 0
158 | for _, err := range errs {
159 | count += err.Count
160 | }
161 | return count < proxyConf.MaxConnectErrors
162 | }
163 |
164 | func (bucket *Bucket) riseHost(addr string) {
165 | // TODO 清除历史上的 Errors
166 | // 还需要清除 response time
167 | // TODO Lock
168 | _, hostBucket := bucket.getHostByAddr(addr)
169 | if hostBucket.status == false {
170 | hostBucket.status = true
171 | hostBucket.lantency.clear()
172 | }
173 | }
174 |
175 | func (bucket *Bucket) addLatency(host string, startTime time.Time, latency float64) {
176 | // TODO 每次添加都会排除掉
177 | _, hostBucket := bucket.getHostByAddr(host)
178 | if latency > 0 && !hostBucket.isAlive() {
179 | bucket.riseHost(host)
180 | }
181 | hostBucket.lantency.Push(startTime, latency, latencyDataType)
182 | }
183 |
184 | func (bucket *Bucket) addConErr(host string, startTime time.Time, error float64) {
185 | _, hostBucket := bucket.getHostByAddr(host)
186 | if hostBucket.isAlive() {
187 | hostBucket.lantency.Push(startTime, error, errorDataType)
188 | hostisalive := bucket.isHostAlive(host)
189 | if !hostisalive {
190 | bucket.downHost(host)
191 | logger.Errorf("host %s is removed from partition", host)
192 | }
193 | }
194 | }
195 |
196 | func (bucket *Bucket) getHostByAddr(addr string) (int, *HostInBucket) {
197 | for i, host := range bucket.hostsList {
198 | if host.host.Addr == addr {
199 | return i, host
200 | }
201 | }
202 | return -1, &HostInBucket{}
203 | }
204 |
205 | func (bucket *Bucket) downHost(addr string) {
206 | index, host := bucket.getHostByAddr(addr)
207 | host.down()
208 | bucket.partition.remove(index)
209 | }
210 |
211 | func (hb *HostInBucket) down() {
212 | hb.status = false
213 | hb.lantency.clear()
214 | }
215 |
216 | func (hb *HostInBucket) isAlive() bool {
217 | return hb.status
218 | }
219 |
--------------------------------------------------------------------------------
/cassandra/prefix_table_finder.go:
--------------------------------------------------------------------------------
1 | package cassandra
2 |
3 | import (
4 | "fmt"
5 | "io/ioutil"
6 | "path/filepath"
7 | "sync"
8 |
9 | "github.com/acomagu/trie/v2"
10 | "gopkg.in/yaml.v3"
11 |
12 | "github.com/douban/gobeansproxy/config"
13 | )
14 |
15 | var (
16 | selectQTpl string
17 | insertQTpl string
18 | deleteQTpl string
19 | )
20 |
21 | type KeyTableFinder struct {
22 | trie *trie.Tree[rune, string]
23 | defaultT string
24 | lock sync.RWMutex
25 | currentMap map[string]string
26 | }
27 |
28 | func getTableTrieFromCfg(
29 | ccfg *config.CassandraStoreCfg, cqlStore *CassandraStore) (
30 | *trie.Tree[rune, string], map[string]string, error) {
31 | if !ccfg.PrefixTableDispatcherCfg.Enable {
32 | logger.Infof("table switcher disabled, skip init ...")
33 | return nil, nil, nil
34 | }
35 |
36 | tcfg := ccfg.PrefixTableDispatcherCfg
37 | s2k := tcfg.StaticCfg
38 |
39 | var prefixTrie *trie.Tree[rune, string]
40 |
41 | keysString := [][]rune{}
42 | vStrStatus := []string{}
43 | dedup := map[string]struct{}{}
44 |
45 | if tcfg.CfgFromCstarTable != "" && tcfg.CfgFromCstarKeySpace != "" {
46 | c := DisPatcherCfg(tcfg)
47 | pkeys, pvalues, err := c.LoadFromDB(cqlStore)
48 | if err != nil {
49 | return nil, nil, err
50 | }
51 |
52 | keysString = append(keysString, pkeys...)
53 | vStrStatus = append(vStrStatus, pvalues...)
54 | }
55 |
56 | if len(s2k) > 0 {
57 | for s, kprefixs := range s2k {
58 | for _, prefix := range kprefixs {
59 | keysString = append(keysString, []rune(prefix))
60 | vStrStatus = append(vStrStatus, s)
61 | }
62 | }
63 | }
64 |
65 | // check duplicate
66 | if len(vStrStatus) != len(keysString) {
67 | return nil, nil, fmt.Errorf("value list len not match with prefix list len")
68 | }
69 |
70 | duplicateKeys := []string{}
71 | loadedMap := map[string]string{}
72 | for idx, k := range keysString {
73 | ks := string(k)
74 | loadedMap[ks] = vStrStatus[idx]
75 | if _, ok := dedup[ks]; !ok {
76 | dedup[ks] = struct{}{}
77 | } else {
78 | duplicateKeys = append(duplicateKeys, ks)
79 | }
80 | }
81 | if len(duplicateKeys) > 0 {
82 | return nil, nil, fmt.Errorf("prefix cfg duplicate: %v", duplicateKeys)
83 | }
84 | logger.Infof("Loading from cfg: %v", loadedMap)
85 |
86 | if len(keysString) == len(vStrStatus) && len(keysString) > 0 {
87 | tr := trie.New[rune, string](keysString, vStrStatus)
88 | prefixTrie = &tr
89 | } else {
90 | prefixTrie = nil
91 | }
92 |
93 | return prefixTrie, loadedMap, nil
94 | }
95 |
96 | func NewKeyTableFinder(config *config.CassandraStoreCfg, cqlStore *CassandraStore) (*KeyTableFinder, error) {
97 | f := new(KeyTableFinder)
98 | t, nowMap, err := getTableTrieFromCfg(config, cqlStore)
99 | if err != nil {
100 | return nil, err
101 | }
102 | f.trie = t
103 | f.defaultT = config.DefaultTable
104 | f.currentMap = nowMap
105 |
106 | // init sql str
107 | selectQTpl = fmt.Sprintf(
108 | "select value from %s.%%s where key = ?",
109 | config.DefaultKeySpace,
110 | )
111 | insertQTpl = fmt.Sprintf(
112 | "insert into %s.%%s (key, value) values (?, ?)",
113 | config.DefaultKeySpace,
114 | )
115 | deleteQTpl = fmt.Sprintf(
116 | "delete from %s.%%s where key = ?",
117 | config.DefaultKeySpace,
118 | )
119 |
120 | return f, nil
121 | }
122 |
123 | func (f *KeyTableFinder) GetTableByKey(key string) string {
124 | if f.trie == nil {
125 | return f.defaultT
126 | }
127 |
128 | var v string
129 | var match bool
130 |
131 | f.lock.RLock()
132 | defer f.lock.RUnlock()
133 |
134 | n := *(f.trie)
135 |
136 | for _, c := range key {
137 | if n = n.TraceOne(c); n == nil {
138 | break
139 | }
140 |
141 | if vv, ok := n.Terminal(); ok {
142 | v = vv
143 | match = true
144 | }
145 | }
146 |
147 | if match {
148 | return v
149 | } else {
150 | return f.defaultT
151 | }
152 | }
153 |
154 | func (f *KeyTableFinder) GetSqlTpl(sqlType string, key string) string {
155 | switch sqlType {
156 | case "select":
157 | return fmt.Sprintf(selectQTpl, f.GetTableByKey(key))
158 | case "delete":
159 | return fmt.Sprintf(deleteQTpl, f.GetTableByKey(key))
160 | default:
161 | return fmt.Sprintf(insertQTpl, f.GetTableByKey(key))
162 | }
163 | }
164 |
165 | func (f *KeyTableFinder) LoadStaticCfg(cfgDir string) (*config.CassandraStoreCfg, error) {
166 | cfg := struct {
167 | CassandraCfg config.CassandraStoreCfg `yaml:"cassandra"`
168 | }{}
169 |
170 | configF, err := ioutil.ReadFile(filepath.Join(cfgDir, "proxy.yaml"))
171 | if err != nil {
172 | return nil, err
173 | }
174 | err = yaml.Unmarshal(configF, &cfg)
175 | if err != nil {
176 | return nil, err
177 | }
178 |
179 | return &cfg.CassandraCfg, nil
180 | }
181 |
182 | func (f *KeyTableFinder) LoadCfg(cfg *config.CassandraStoreCfg, cqlStore *CassandraStore) error {
183 | if !cfg.PrefixTableDispatcherCfg.Enable {
184 | return fmt.Errorf("you can't disable key table finder online")
185 | }
186 | pTrie, nowMap, err := getTableTrieFromCfg(cfg, cqlStore)
187 | if err != nil {
188 | logger.Errorf("reloading c* cfg err: %s", err)
189 | return err
190 | }
191 | logger.Infof("reloading c* cfg for prefix switch to: %v", nowMap)
192 |
193 | defaultS := cfg.DefaultTable
194 | logger.Infof("reloading c* cfg for prefix default store to: %s", cfg.DefaultTable)
195 |
196 |
197 | f.lock.Lock()
198 | defer f.lock.Unlock()
199 | f.trie = pTrie
200 | f.defaultT = defaultS
201 | f.currentMap = nowMap
202 | cqlStore.staticTable = !cfg.PrefixTableDispatcherCfg.Enable
203 | return nil
204 | }
205 |
206 | func (f *KeyTableFinder) Upsert(cfg *config.CassandraStoreCfg, data map[string][]string, cqlStore *CassandraStore) error {
207 | dispatcherCfg := DisPatcherCfg(cfg.PrefixTableDispatcherCfg)
208 | return dispatcherCfg.SaveToDB(data, cqlStore)
209 | }
210 |
211 | func (f *KeyTableFinder) DeletePrefix(cfg *config.CassandraStoreCfg, prefix string, cqlStore *CassandraStore) error {
212 | dispatcherCfg := DisPatcherCfg(cfg.PrefixTableDispatcherCfg)
213 | return dispatcherCfg.DeletePrefixCfg(prefix, cqlStore)
214 | }
215 |
216 | func (f *KeyTableFinder) GetCurrentMap() map[string]string {
217 | return f.currentMap
218 | }
219 |
--------------------------------------------------------------------------------
/dstore/host.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "bufio"
5 | "errors"
6 | "fmt"
7 | "net"
8 | "strconv"
9 | "strings"
10 | "sync"
11 | "time"
12 |
13 | mc "github.com/douban/gobeansdb/memcache"
14 | )
15 |
16 | const (
17 | WAIT_FOR_RETRY = "wait for retry"
18 | )
19 |
20 | type Host struct {
21 | // Addr is host:port pair
22 | Addr string
23 |
24 | // Index is the index of host in Scheduler.hosts
25 | Index int
26 |
27 | // nextDial is the next time to reconnect
28 | nextDial time.Time
29 |
30 | // conns is a free list of connections
31 | conns chan net.Conn
32 |
33 | sync.Mutex
34 | }
35 |
36 | func NewHost(addr string) *Host {
37 | host := new(Host)
38 | host.Addr = addr
39 | host.conns = make(chan net.Conn, proxyConf.MaxFreeConnsPerHost)
40 | return host
41 | }
42 |
43 | func isWaitForRetry(err error) bool {
44 | return err != nil && strings.HasPrefix(err.Error(), WAIT_FOR_RETRY)
45 | }
46 |
47 | func (host *Host) Close() {
48 | if host.conns == nil {
49 | return
50 | }
51 | ch := host.conns
52 | host.conns = nil
53 | close(ch)
54 |
55 | for c, closed := <-ch; closed; {
56 | c.Close()
57 | }
58 | }
59 |
60 | func (host *Host) isSilence(now time.Time) (time.Time, bool) {
61 | if host.nextDial.After(now) {
62 | return host.nextDial, true
63 | }
64 | return now, false
65 | }
66 |
67 | func (host *Host) createConn() (net.Conn, error) {
68 | now := time.Now()
69 | if nextDial, isSilence := host.isSilence(now); isSilence {
70 | return nil, fmt.Errorf("%s: next try %s", WAIT_FOR_RETRY, nextDial.Format("2006-01-02T15:04:05.999"))
71 | }
72 |
73 | conn, err := net.DialTimeout("tcp", host.Addr, time.Duration(proxyConf.ConnectTimeoutMs)*time.Millisecond)
74 | if err != nil {
75 | host.nextDial = now.Add(time.Millisecond * time.Duration(proxyConf.DialFailSilenceMs))
76 | return nil, err
77 | }
78 | return conn, nil
79 | }
80 |
81 | func (host *Host) getConn() (c net.Conn, err error) {
82 | if host.conns == nil {
83 | return nil, errors.New("host closed")
84 | }
85 | select {
86 | // Grab a connection if available; create if not.
87 | case c = <-host.conns:
88 | // Got one; nothing more to do.
89 | default:
90 | // None free, so create a new one.
91 | c, err = host.createConn()
92 | }
93 | return
94 | }
95 |
96 | func (host *Host) releaseConn(conn net.Conn) {
97 | if host.conns == nil {
98 | conn.Close()
99 | return
100 | }
101 | select {
102 | // Reuse connection if there's room.
103 | case host.conns <- conn:
104 | // Connection on free list; nothing more to do.
105 | default:
106 | // Free list full, just carry on.
107 | conn.Close()
108 | }
109 | }
110 |
111 | func (host *Host) executeWithTimeout(req *mc.Request, timeout time.Duration) (resp *mc.Response, err error) {
112 | conn, err := host.getConn()
113 | if err != nil {
114 | return
115 | }
116 | conn.SetDeadline(time.Now().Add(timeout))
117 |
118 | var reason string
119 |
120 | defer func() {
121 | if err != nil {
122 | logger.Errorf("error occurred on %s, reason: %s, err: %s", host.Addr, reason, err.Error())
123 | if resp != nil {
124 | resp.CleanBuffer()
125 | }
126 | conn.Close()
127 | } else {
128 | host.releaseConn(conn)
129 | }
130 | }()
131 |
132 | err = req.Write(conn)
133 | if err != nil {
134 | reason = "write request failed"
135 | return
136 | }
137 |
138 | resp = new(mc.Response)
139 | if req.NoReply {
140 | resp.Status = "STORED"
141 | return
142 | }
143 |
144 | reader := bufio.NewReader(conn)
145 | if err = resp.Read(reader); err != nil {
146 | reason = "read response failed"
147 | return nil, err
148 | }
149 |
150 | if err = req.Check(resp); err != nil {
151 | reason = fmt.Sprintf("unexpected response %v %v",
152 | req, resp)
153 | return nil, err
154 | }
155 | return
156 | }
157 |
158 | func (host *Host) Len() int {
159 | return 0
160 | }
161 |
162 | func (host *Host) store(cmd string, key string, item *mc.Item, noreply bool) (bool, error) {
163 | req := &mc.Request{Cmd: cmd, Keys: []string{key}, Item: item, NoReply: noreply}
164 | resp, err := host.executeWithTimeout(req, time.Duration(proxyConf.WriteTimeoutMs)*time.Millisecond)
165 | return err == nil && resp.Status == "STORED", err
166 | }
167 |
168 | func (host *Host) Set(key string, item *mc.Item, noreply bool) (bool, error) {
169 | return host.store("set", key, item, noreply)
170 | }
171 |
172 | func (host *Host) Get(key string) (*mc.Item, error) {
173 | req := &mc.Request{Cmd: "get", Keys: []string{key}}
174 | resp, err := host.executeWithTimeout(req, time.Duration(proxyConf.ReadTimeoutMs)*time.Millisecond)
175 | if err != nil {
176 | return nil, err
177 | }
178 | item, _ := resp.Items[key]
179 | return item, nil
180 | }
181 |
182 | func (host *Host) GetMulti(keys []string) (map[string]*mc.Item, error) {
183 | req := &mc.Request{Cmd: "get", Keys: keys}
184 | resp, err := host.executeWithTimeout(req, time.Duration(proxyConf.ReadTimeoutMs)*time.Millisecond)
185 | if err != nil {
186 | return nil, err
187 | }
188 | return resp.Items, nil
189 | }
190 |
191 | func (host *Host) Append(key string, value []byte) (bool, error) {
192 | flag := 0
193 | item := newItem(flag, value)
194 | req := &mc.Request{Cmd: "append", Keys: []string{key}, Item: item}
195 | resp, err := host.executeWithTimeout(req, time.Duration(proxyConf.ReadTimeoutMs)*time.Millisecond)
196 | item.Free()
197 | if err == nil {
198 | return resp.Status == "STORED", nil
199 | } else {
200 | return false, err
201 | }
202 | }
203 |
204 | func (host *Host) Incr(key string, value int) (int, error) {
205 | flag := 0
206 | item := newItem(flag, []byte(strconv.Itoa(value)))
207 | req := &mc.Request{Cmd: "incr", Keys: []string{key}, Item: item}
208 | resp, err := host.executeWithTimeout(req, time.Duration(proxyConf.ReadTimeoutMs)*time.Millisecond)
209 | item.Free()
210 | if err != nil {
211 | return 0, err
212 | }
213 | return strconv.Atoi(resp.Msg)
214 | }
215 |
216 | func (host *Host) Delete(key string) (bool, error) {
217 | req := &mc.Request{Cmd: "delete", Keys: []string{key}}
218 | resp, err := host.executeWithTimeout(req, time.Duration(proxyConf.ReadTimeoutMs)*time.Millisecond)
219 | if err == nil {
220 | return resp.Status == "DELETED", nil
221 | } else {
222 | return false, err
223 | }
224 | }
225 |
226 | func (host *Host) Process(key string, args []string) (string, string) {
227 | return "", ""
228 | }
229 |
--------------------------------------------------------------------------------
/dstore/store_test.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "errors"
5 | "flag"
6 | "fmt"
7 | "io/ioutil"
8 | "net"
9 | "os"
10 | "os/exec"
11 | "os/user"
12 | "path"
13 | "path/filepath"
14 | "testing"
15 | "time"
16 |
17 | dbcfg "github.com/douban/gobeansdb/gobeansdb"
18 | mc "github.com/douban/gobeansdb/memcache"
19 | yaml "gopkg.in/yaml.v2"
20 |
21 | "github.com/douban/gobeansproxy/config"
22 | "github.com/douban/gobeansproxy/utils"
23 | "github.com/stretchr/testify/assert"
24 | )
25 |
26 | var testDataDir = flag.String("testDataDir", "/tmp/gobeansdbproxy/bdb/data/", "this dir will be used by gobeansdb and proxy")
27 |
28 |
29 | func setupSuite(tb testing.TB) func(tb testing.TB) {
30 | user, err := user.Current()
31 | if err != nil {
32 | tb.Fatalf("get username err: %s", err)
33 | }
34 | gopath := os.Getenv("GOPATH")
35 | gobeansdbBin := filepath.Join(gopath, "bin", "gobeansdb")
36 |
37 | if _, err := os.Stat(gobeansdbBin); errors.Is(err, os.ErrNotExist) {
38 | tb.Fatalf("gobeansdb binary not exists, %s", gobeansdbBin)
39 | }
40 |
41 | projDir := utils.GetProjectHomeDir()
42 |
43 | allGobeansdb := []*exec.Cmd{}
44 | for _, p := range []string{"57980", "57981", "57982", "57983"} {
45 | conn, _ := net.DialTimeout("tcp", net.JoinHostPort("localhost", p), time.Second)
46 | if conn != nil {
47 | conn.Close()
48 | tb.Logf("%s port already listening ignore start ...", p)
49 | continue
50 | }
51 |
52 | // we modify config when developer run test without container
53 | gobeansdbCfg := fmt.Sprintf("%s/.doubanpde/scripts/bdb/gobeansproxy/%s/conf/", projDir, p)
54 | cfgParsed := dbcfg.DBConfig{}
55 | yfile, err := ioutil.ReadFile(filepath.Join(gobeansdbCfg, "global.yaml"))
56 | if err != nil {
57 | tb.Fatal(err)
58 | }
59 | err = yaml.Unmarshal(yfile, &cfgParsed)
60 | if err != nil {
61 | tb.Fatalf("load cfg %s err: %s", gobeansdbCfg, err)
62 | }
63 | dataPath := filepath.Join(*testDataDir, p, user.Username, "data")
64 | logPath := filepath.Join(*testDataDir, p, user.Username, "log")
65 | for _, pp := range []string{dataPath, logPath} {
66 | err = os.MkdirAll(pp, os.ModePerm)
67 | if err != nil {
68 | tb.Fatalf("create dir %s err: %s", pp, err)
69 | }
70 | }
71 | cfgParsed.ServerConfig.AccessLog = filepath.Join(logPath, "access.log")
72 | cfgParsed.ServerConfig.ErrorLog = filepath.Join(logPath, "error.log")
73 | cfgParsed.HStoreConfig.DBLocalConfig.Home = dataPath
74 | gobeansdbTestCfg := fmt.Sprintf("%s/.doubanpde/scripts/bdb/gobeansproxy/%s/testconf/", projDir, p)
75 | err = os.MkdirAll(gobeansdbTestCfg, os.ModePerm)
76 | if err != nil {
77 | tb.Fatalf("create dir %s err: %s", gobeansdbTestCfg, err)
78 | }
79 | c, err := yaml.Marshal(cfgParsed)
80 | if err != nil {
81 | tb.Fatalf("marshal cfg err: %s", err)
82 | }
83 |
84 | dbGlobalCfg := filepath.Join(gobeansdbTestCfg, "global.yaml")
85 | f, err := os.OpenFile(dbGlobalCfg, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755)
86 | if err != nil {
87 | tb.Fatal(err)
88 | }
89 | defer f.Close()
90 | _, err = f.Write(c)
91 | if err != nil {
92 | tb.Fatal(err)
93 | }
94 | routeCfg := filepath.Join(gobeansdbTestCfg, "route.yaml")
95 | rcfg, err := ioutil.ReadFile(filepath.Join(gobeansdbCfg, "route.yaml"))
96 | if err != nil {
97 | tb.Fatal(err)
98 | }
99 | err = ioutil.WriteFile(routeCfg, rcfg, 0644)
100 | if err != nil {
101 | tb.Fatal(err)
102 | }
103 |
104 | cmd := exec.Command(
105 | gobeansdbBin,
106 | "-confdir",
107 | gobeansdbTestCfg,
108 | )
109 | if err := cmd.Start(); err != nil {
110 | tb.Fatalf("failed to start %s gobeansdb: %s", p, err)
111 | }
112 | tb.Logf("start %s with pid: %d", cmd, cmd.Process.Pid)
113 | allGobeansdb = append(allGobeansdb, cmd)
114 | }
115 | // wait some time let the server started
116 | time.Sleep(time.Second * 5)
117 |
118 | return func(tb testing.TB) {
119 | for _, execCmd := range allGobeansdb {
120 | if err := execCmd.Process.Kill(); err != nil {
121 | tb.Fatalf("failed to kill process %s: %s", execCmd, err)
122 | }
123 | }
124 | }
125 | }
126 |
127 | func testClientSet(t *testing.T, c mc.StorageClient, key string, val []byte) {
128 | assert := assert.New(t)
129 | flag := 2
130 | ok, err := clientSet(c, key, val, flag)
131 | setHosts := c.GetSuccessedTargets()
132 | c.Clean()
133 | assert.True(ok)
134 | assert.Nil(err)
135 | assert.True(len(setHosts) > 0)
136 |
137 | v, err := c.Get(key)
138 | getHosts := c.GetSuccessedTargets()
139 | c.Clean()
140 |
141 | assert.Equal(val, v.Body)
142 | assert.Equal(flag, v.Flag)
143 | assert.Equal(2, len(getHosts))
144 | assert.True(hasIntersection(setHosts, getHosts))
145 | }
146 |
147 | func clientSet(c mc.StorageClient, key string, val []byte, flag int) (bool, error) {
148 | item := newItem(flag, val)
149 | defer item.Free()
150 | noreply := false
151 | return c.Set(key, item, noreply)
152 | }
153 |
154 | func hasIntersection(arr1 []string, arr2 []string) bool {
155 | for _, i := range arr1 {
156 | for _, j := range arr2 {
157 | if i == j {
158 | return true
159 | }
160 | }
161 | }
162 | return false
163 | }
164 |
165 | func testFailStoreClient(t *testing.T, c mc.StorageClient) {
166 | assert := assert.New(t)
167 | key := "/test/fail/client"
168 |
169 | _, err := c.Get(key)
170 | assert.NotNil(err)
171 |
172 | _, err = c.Set("key", &mc.Item{}, false)
173 | assert.NotNil(err)
174 |
175 | _, err = c.GetMulti([]string{"key"})
176 | assert.NotNil(err)
177 | }
178 |
179 | func testStoreClient(t *testing.T, c mc.StorageClient) {
180 | assert := assert.New(t)
181 | key1 := "/test/client/1"
182 |
183 | r, _ := c.Get(key1)
184 | assert.Nil(r)
185 | assert.True(len(c.GetSuccessedTargets()) > 2)
186 | c.Clean()
187 |
188 | // set
189 | key2 := "/test/client/2"
190 | val2 := []byte("value 2")
191 | testClientSet(t, c, key2, val2)
192 |
193 | key3 := "/test/client/3"
194 | val3 := []byte("value 3")
195 | testClientSet(t, c, key3, val3)
196 |
197 | // get multi
198 | items, _ := c.GetMulti([]string{key1, key2, key3})
199 | c.Clean()
200 | assert.Equal(2, len(items))
201 |
202 | keyNum := 100
203 | keys := make([]string, keyNum)
204 | flagm := 3
205 | valm := []byte("value multi")
206 | for i := 0; i < keyNum; i++ {
207 | keys[i] = fmt.Sprintf("/test/client/multi_%d", i)
208 | ok, _ := clientSet(c, keys[i], valm, flagm)
209 | c.Clean()
210 | assert.True(ok)
211 | }
212 | items, err := c.GetMulti(keys)
213 | c.Clean()
214 | assert.Nil(err)
215 | assert.Equal(keyNum, len(items))
216 |
217 | // large obj
218 | key4 := "/test/client/4"
219 | val4 := make([]byte, 1024*1000)
220 | testClientSet(t, c, key4, val4)
221 |
222 | // delete
223 | key6 := "/test/client/6"
224 | val6 := []byte("value 6")
225 | testClientSet(t, c, key6, val6)
226 | ok, _ := c.Delete(key6)
227 | assert.True(ok)
228 | v6, _ := c.Get(key6)
229 | assert.Nil(v6)
230 | }
231 |
232 | func TestDStoreOnly(t *testing.T) {
233 | teardown := setupSuite(t)
234 | defer teardown(t)
235 |
236 | homeDir := utils.GetProjectHomeDir()
237 | confdir := path.Join(homeDir, ".doubanpde", "scripts", "bdb", "gobeansproxy", "dstore-only", "conf")
238 | proxyConf := &config.Proxy
239 | proxyConf.Load(confdir)
240 |
241 | InitGlobalManualScheduler(config.Route, proxyConf.N, BucketsManualSchduler)
242 | storage := new(Storage)
243 | storage.InitStorageEngine(proxyConf)
244 | c := NewStorageClient(proxyConf.N, proxyConf.W, proxyConf.R, storage.cstar, storage.PSwitcher, storage.dualWErrHandler)
245 |
246 | testStoreClient(t, c)
247 | }
248 |
--------------------------------------------------------------------------------
/tests/gen_config.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 |
4 | import os
5 | import yaml
6 | import copy
7 | from os.path import join
8 | from tests.utils import mkdir_p
9 |
10 | gobeansdb_conf_tmpl = {
11 | 'hstore': {
12 | 'data': {
13 | 'check_vhash': True,
14 | 'datafile_max_str': '4000M',
15 | 'flush_interval': 60,
16 | 'flush_wake_str': '10M',
17 | 'no_gc_days': 7,
18 | },
19 | 'hint': {
20 | 'hint_index_interval_str': '32K',
21 | 'hint_merge_interval': 5,
22 | 'hint_no_merged': True,
23 | 'hint_split_cap_str': '1M',
24 | },
25 | 'htree': {
26 | 'tree_height': 3,
27 | },
28 | 'local': {
29 | 'home': '/var/lib/beansdb'
30 | }
31 | },
32 | 'mc': {
33 | 'body_big_str': '5M',
34 | 'body_c_str': '0K',
35 | 'flush_max_str': '100M',
36 | 'body_max_str': '50M',
37 | 'max_key_len': 250,
38 | 'max_req': 16
39 | },
40 | 'server': {
41 | 'hostname': '127.0.0.1',
42 | 'listen': '0.0.0.0',
43 | 'errorlog': '/var/log/gobeansdb/error.log',
44 | 'accesslog': '',
45 | 'port': 7900,
46 | 'threads': 4,
47 | 'webport': 7903,
48 | 'zk': 'NO'
49 | }
50 | }
51 |
52 | route_conf_tmpl = {
53 | 'backup': ['127.0.0.1:7983'],
54 | 'main': [
55 | {
56 | 'addr': '127.0.0.1:7980',
57 | 'buckets': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a',
58 | 'b', 'c', 'd', 'e', 'f']
59 | }, {
60 | 'addr': '127.0.0.1:7981',
61 | 'buckets': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a',
62 | 'b', 'c', 'd', 'e', 'f']
63 | }, {
64 | 'addr': '127.0.0.1:7982',
65 | 'buckets': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a',
66 | 'b', 'c', 'd', 'e', 'f']
67 | }
68 | ],
69 | 'numbucket': 16
70 | }
71 |
72 | proxy_conf_tmpl = {
73 | 'dstore': {
74 | 'connect_timeout_ms': 300,
75 | 'dial_fail_silence_ms': 5000,
76 | 'max_free_conns_per_host': 20,
77 | 'n': 3,
78 | 'w': 2,
79 | 'r': 1,
80 | 'read_timeout_ms': 2000,
81 | 'write_timeout_ms': 2000,
82 | 'response_time_seconds': 10,
83 | 'error_seconds': 10,
84 | 'max_connect_errors': 3,
85 | 'score_deviation': 10,
86 | 'item_size_stats': 4096,
87 | 'response_time_min': 4000,
88 | 'enable_write': True,
89 | 'enable_read': False,
90 | },
91 | 'mc': {
92 | 'body_big_str': '5M',
93 | 'body_c_str': '0K',
94 | 'body_max_str': '50M',
95 | 'max_key_len': 250,
96 | 'max_req': 16
97 | },
98 | 'proxy': {
99 | 'hostname': '127.0.0.1',
100 | 'listen': '0.0.0.0',
101 | 'errorlog': '/var/log/gobeansproxy/error.log',
102 | 'accesslog': '/var/log/gobeansproxy/access.log',
103 | 'staticdir': '/var/lib/gobeansproxy',
104 | 'port': 7905,
105 | 'threads': 8,
106 | 'webport': 7908,
107 | 'zkservers': ["zk1:2181"],
108 | 'zkpath': "/gobeansproxy/test",
109 | },
110 | 'cassandra': {
111 | 'enable_read': True,
112 | 'enable_write': True,
113 | 'hosts': ["127.0.0.1:9042"],
114 | 'default_key_space': 'doubandb',
115 | 'default_table': 'kvstore',
116 | 'timeout_sec': 5,
117 | },
118 | }
119 |
120 | ### 注意这里的端口号需要和 gobeansproxy/conf/route.yaml 的端口号一致
121 |
122 | # (serverport, webport)
123 | MAIN_PORT_PAIRS = [(7980, 7990), (7981, 7991), (7982, 7992), ]
124 |
125 | BACKUP_PORT_PAIRS = [(7983, 7993), ]
126 |
127 | PROXY_PORT_PAIRS = (7905, 7908)
128 |
129 |
130 | def main():
131 | import argparse
132 | parser = argparse.ArgumentParser()
133 | parser.add_argument('-d', '--root-dir', help="root directory")
134 | args = parser.parse_args()
135 | gen_conf(
136 | os.path.abspath(args.root_dir), MAIN_PORT_PAIRS, BACKUP_PORT_PAIRS,
137 | PROXY_PORT_PAIRS)
138 |
139 |
140 | def gen_conf(root_dir,
141 | main_port_pairs=MAIN_PORT_PAIRS,
142 | backup_port_pairs=BACKUP_PORT_PAIRS,
143 | proxy_port_pairs=PROXY_PORT_PAIRS,
144 | bdb_read_enable=True, bdb_write_enable=True,
145 | cstar_read_enable=False, cstar_write_enable=False):
146 | ports = [x[0] for x in main_port_pairs]
147 | backup_ports = [x[0] for x in backup_port_pairs]
148 | route_conf = gen_route_conf(ports, backup_ports)
149 |
150 | ############# proxy
151 | # root_dir/proxy/conf/*.yaml
152 | # root_dir/proxy/*.log
153 | proxy_dir = gen_dir(root_dir, 'proxy')
154 | proxy_conf_dir = gen_dir(proxy_dir, 'conf')
155 |
156 | proxy_conf = gen_proxy_conf(
157 | proxy_dir, proxy_port_pairs[0], proxy_port_pairs[1],
158 | bdb_read_enable, bdb_write_enable,
159 | cstar_read_enable, cstar_write_enable
160 | )
161 | yaml_dump(proxy_conf, join(proxy_conf_dir, 'proxy.yaml'))
162 | yaml_dump(route_conf, join(proxy_conf_dir, 'route.yaml'))
163 |
164 | for (port, webport) in (MAIN_PORT_PAIRS + BACKUP_PORT_PAIRS):
165 | gen_gobeansdb_conf(root_dir, route_conf, port, webport)
166 |
167 |
168 | def gen_gobeansdb_conf(root_dir, route_conf, port, webport):
169 | ############# server
170 | # root_dir//conf/*.yaml
171 | # root_dir//data/
172 | # root_dir//*.log
173 | server_dir = gen_dir(root_dir, str(port))
174 | server_conf_dir = gen_dir(server_dir, 'conf')
175 | server_data_dir = gen_dir(server_dir, 'data')
176 |
177 | server_conf = gen_server_conf(server_data_dir, server_dir, port, webport)
178 | yaml_dump(server_conf, join(server_conf_dir, 'global.yaml'))
179 | yaml_dump(route_conf, join(server_conf_dir, 'route.yaml'))
180 |
181 |
182 | def gen_dir(*args):
183 | dir_ = join(*args)
184 | mkdir_p(dir_)
185 | return dir_
186 |
187 |
188 | def yaml_dump(conf, filename):
189 | with open(filename, "w") as f:
190 | yaml.dump(conf, stream=f, indent=4, default_flow_style=False)
191 |
192 |
193 | def gen_server_conf(homedir, logdir, port, webport):
194 | tmpl = copy.deepcopy(gobeansdb_conf_tmpl)
195 | tmpl['hstore']['local']['home'] = homedir
196 | tmpl['server']['errorlog'] = os.path.join(logdir, 'error.log')
197 | tmpl['server']['accesslog'] = os.path.join(logdir, 'access.log')
198 | tmpl['server']['port'] = port
199 | tmpl['server']['webport'] = webport
200 | return tmpl
201 |
202 |
203 | def gen_route_conf(ports, backup_ports, numbucket=16):
204 | tmpl = copy.deepcopy(route_conf_tmpl)
205 | host = '127.0.0.1'
206 | buckets = ['%x' % i for i in range(numbucket)]
207 | tmpl['backup'] = ['%s:%s' % (host, p) for p in backup_ports]
208 | tmpl['main'] = [{'addr': '%s:%s' % (host, p),
209 | 'buckets': buckets} for p in ports]
210 | return tmpl
211 |
212 |
213 | def gen_proxy_conf(
214 | logdir, port, webport,
215 | bdb_read_enable=True, bdb_write_enable=True,
216 | cstar_read_enable=False, cstar_write_enable=False):
217 | tmpl = copy.deepcopy(proxy_conf_tmpl)
218 | tmpl['proxy']['errorlog'] = os.path.join(logdir, 'error.log')
219 | tmpl['proxy']['accesslog'] = os.path.join(logdir, 'access.log')
220 | tmpl['proxy']['port'] = port
221 | tmpl['proxy']['webport'] = webport
222 |
223 | assert (bdb_read_enable or cstar_read_enable) \
224 | and (bdb_write_enable or cstar_write_enable), \
225 | 'must enable at least one engine'
226 |
227 | tmpl['cassandra']['enable_read'] = cstar_read_enable
228 | tmpl['cassandra']['enable_write'] = cstar_write_enable
229 | tmpl['dstore']['enable_read'] = bdb_read_enable
230 | tmpl['dstore']['enable_write'] = bdb_write_enable
231 | return tmpl
232 |
233 |
234 | if __name__ == '__main__':
235 | main()
236 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/acomagu/trie/v2 v2.0.0 h1:4/Vt77FUj6qtYl7IN/2QMyl22ztBT0Cr3wg3kL/9mHg=
2 | github.com/acomagu/trie/v2 v2.0.0/go.mod h1:trIf+o9oABbDJULhZ+jUiE5HjfO29H30dQV5PV+P8DA=
3 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
4 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
5 | github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
6 | github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
7 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
8 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
9 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
10 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
11 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
12 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
13 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
14 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
15 | github.com/douban/gobeansdb v1.1.3 h1:ZfAUkOSS9QGk2XKQOWLfmqPHMYrI4CcGOlnvRQ5OAAk=
16 | github.com/douban/gobeansdb v1.1.3/go.mod h1:pVfoirQu5pt26ig5w5yGrLXrVRzbn2/mcJu6uo/NZU4=
17 | github.com/gocql/gocql v1.5.2 h1:WnKf8xRQImcT/KLaEWG2pjEeryDB7K0qQN9mPs1C58Q=
18 | github.com/gocql/gocql v1.5.2/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8=
19 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
20 | github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
21 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
22 | github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
23 | github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
24 | github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
25 | github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
26 | github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
27 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
28 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
29 | github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
30 | github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
31 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
32 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
33 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
34 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
35 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
36 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
37 | github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
38 | github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
39 | github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
40 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
41 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
42 | github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
43 | github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
44 | github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
45 | github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
46 | github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
47 | github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
48 | github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
49 | github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
50 | github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
51 | github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
52 | github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU=
53 | github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
54 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
55 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
56 | github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
57 | github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
58 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
59 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
60 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
61 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
62 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
63 | github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
64 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
65 | golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 h1:LGJsf5LRplCck6jUCH3dBL2dmycNruWNF5xugkSlfXw=
66 | golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
67 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
68 | golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
69 | golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
70 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
71 | golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
72 | golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
73 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
74 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
75 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
76 | google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
77 | google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
78 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
79 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
80 | gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
81 | gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
82 | gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
83 | gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
84 | gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
85 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
86 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
87 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
88 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
89 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
90 |
--------------------------------------------------------------------------------
/cassandra/cstar.go:
--------------------------------------------------------------------------------
1 | package cassandra
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "os"
7 | "strings"
8 | "sync"
9 | "time"
10 | "unicode"
11 |
12 | "github.com/douban/gobeansdb/loghub"
13 | mc "github.com/douban/gobeansdb/memcache"
14 | "github.com/douban/gobeansproxy/config"
15 | "github.com/gocql/gocql"
16 | "golang.org/x/sync/errgroup"
17 | )
18 |
19 | const (
20 | MAX_KEY_LEN = 250
21 | )
22 |
23 | var (
24 | logger = loghub.ErrorLogger
25 | proxyConf = &config.Proxy
26 | selectQ string
27 | insertQ string
28 | deleteQ string
29 | )
30 |
31 | type CassandraStore struct {
32 | cluster *gocql.ClusterConfig
33 | session *gocql.Session
34 | keyTableFinder *KeyTableFinder
35 | staticTable bool
36 | ClusterName string
37 | }
38 |
39 | func NewCassandraStore(cstarCfg *config.CassandraStoreCfg) (*CassandraStore, error) {
40 | cluster := gocql.NewCluster(cstarCfg.Hosts...)
41 | if cstarCfg.Username != "" {
42 | password := cstarCfg.Password
43 | if cstarCfg.PasswordFile != "" {
44 | data, err := os.ReadFile(cstarCfg.PasswordFile)
45 | if err != nil {
46 | return nil, err
47 | }
48 | password = strings.TrimSuffix(string(data), "\n")
49 | }
50 |
51 | cluster.Authenticator = gocql.PasswordAuthenticator{
52 | Username: cstarCfg.Username,
53 | Password: password,
54 | }
55 | }
56 | cluster.Keyspace = cstarCfg.DefaultKeySpace
57 |
58 | switch cstarCfg.Consistency {
59 | case "local_one":
60 | cluster.Consistency = gocql.LocalOne
61 | default:
62 | cluster.Consistency = gocql.Quorum
63 | }
64 |
65 | cluster.ReconnectInterval = time.Duration(cstarCfg.ReconnectIntervalSec) * time.Second
66 | cluster.RetryPolicy = &gocql.SimpleRetryPolicy{NumRetries: cstarCfg.RetryNum}
67 | cluster.Timeout = time.Duration(cstarCfg.CstarTimeoutMs) * time.Millisecond
68 | cluster.ConnectTimeout = time.Duration(cstarCfg.CstarConnectTimeoutMs) * time.Millisecond
69 | cluster.WriteTimeout = time.Duration(cstarCfg.CstarWriteTimeoutMs) * time.Millisecond
70 | cluster.NumConns = cstarCfg.NumConns
71 |
72 | // cluster.SocketKeepalive = 600 * time.Second
73 | session, err := cluster.CreateSession()
74 | selectQ = fmt.Sprintf(
75 | "select value from %s.%s where key = ?",
76 | cstarCfg.DefaultKeySpace, cstarCfg.DefaultTable,
77 | )
78 | insertQ = fmt.Sprintf(
79 | "insert into %s.%s (key, value) values (?, ?)",
80 | cstarCfg.DefaultKeySpace, cstarCfg.DefaultTable,
81 | )
82 | deleteQ = fmt.Sprintf(
83 | "delete from %s.%s where key = ?",
84 | cstarCfg.DefaultKeySpace, cstarCfg.DefaultTable,
85 | )
86 |
87 | if err != nil {
88 | return nil, err
89 | } else {
90 | cqlStore := &CassandraStore{
91 | cluster: cluster,
92 | session: session,
93 | }
94 |
95 | cqlStore.ClusterName, err = cqlStore.GetClusterName()
96 | if err != nil {
97 | return nil, err
98 | }
99 | ktFinder, err := NewKeyTableFinder(cstarCfg, cqlStore)
100 | if err != nil {
101 | return nil, err
102 | }
103 | cqlStore.keyTableFinder = ktFinder
104 | cqlStore.staticTable = !cstarCfg.PrefixTableDispatcherCfg.Enable
105 | return cqlStore, nil
106 | }
107 | }
108 |
109 | func (c *CassandraStore) Close() {
110 | c.session.Close()
111 | }
112 |
113 | func (c *CassandraStore) GetClusterName() (string, error) {
114 | var name string
115 | err := c.session.Query("select cluster_name from system.local").Scan(&name)
116 | if err != nil {
117 | return "", err
118 | } else {
119 | return name, nil
120 | }
121 | }
122 |
123 | func (c *CassandraStore) Get(key string) (*mc.Item, error) {
124 | var q string
125 | if c.staticTable {
126 | q = selectQ
127 | } else {
128 | q = c.keyTableFinder.GetSqlTpl("select", key)
129 | }
130 |
131 | value := &BDBValue{}
132 | query := c.session.Query(q, key)
133 | defer query.Release()
134 | err := query.Scan(&value)
135 | if err == gocql.ErrNotFound {
136 | // https://github.com/douban/gobeansdb/blob/master/memcache/protocol.go#L499
137 | // just return nil for not found
138 | return nil, nil
139 | }
140 |
141 | if err != nil {
142 | return nil, err
143 | } else {
144 | item, err := value.ToMCItem()
145 | if err != nil {
146 | return nil, err
147 | }
148 | return item, nil
149 | }
150 | }
151 |
152 | func (c *CassandraStore) GetMulti(keys []string, result map[string]*mc.Item) error {
153 | // not using IN for this reason
154 | // https://stackoverflow.com/questions/26999098/is-the-in-relation-in-cassandra-bad-for-queries
155 |
156 | lock := sync.Mutex{}
157 |
158 | ctx := context.Background()
159 | g, ctx := errgroup.WithContext(ctx)
160 | g.SetLimit(proxyConf.CassandraStoreCfg.MaxConnForGetm)
161 |
162 | for _, key := range keys {
163 | key := key // https://golang.org/doc/faq#closures_and_goroutines
164 | g.Go(func() error {
165 | item, err := c.Get(key)
166 | if item != nil {
167 | lock.Lock()
168 | defer lock.Unlock()
169 | result[key] = item
170 | } else {
171 | if err != nil {
172 | return err
173 | }
174 | // if item is nil, must be not found, we don't care
175 | return nil
176 | }
177 | return nil
178 | })
179 | }
180 |
181 | if err := g.Wait(); err != nil {
182 | logger.Errorf("getm %s err: %s", keys, err)
183 | }
184 |
185 | return nil
186 | }
187 |
188 | func (c *CassandraStore) SetWithValue(key string, v *BDBValue) (ok bool, err error) {
189 | var q string
190 |
191 | if c.staticTable {
192 | q = insertQ
193 | } else {
194 | q = c.keyTableFinder.GetSqlTpl("insert", key)
195 | }
196 |
197 | query := c.session.Query(
198 | q,
199 | key,
200 | v,
201 | )
202 | defer query.Release()
203 | err = query.Exec()
204 |
205 | if err != nil {
206 | logger.Debugf("Set key %s err: %s", key, err)
207 | return false, err
208 | }
209 |
210 | return true, nil
211 | }
212 |
213 | func (c *CassandraStore) Set(key string, item *mc.Item) (ok bool, err error) {
214 | var q string
215 |
216 | if c.staticTable {
217 | q = insertQ
218 | } else {
219 | q = c.keyTableFinder.GetSqlTpl("insert", key)
220 | }
221 |
222 | v := NewBDBValue(item)
223 | query := c.session.Query(
224 | q,
225 | key,
226 | v,
227 | )
228 | defer query.Release()
229 | err = query.Exec()
230 |
231 | if err != nil {
232 | logger.Debugf("Set key %s err: %s", key, err)
233 | return false, err
234 | }
235 | return true, nil
236 | }
237 |
238 | func (c *CassandraStore) Delete(key string) (bool, error) {
239 | var q string
240 |
241 | if c.staticTable {
242 | q = deleteQ
243 | } else {
244 | q = c.keyTableFinder.GetSqlTpl("delete", key)
245 | }
246 |
247 | query := c.session.Query(
248 | q,
249 | key,
250 | )
251 | defer query.Release()
252 | err := query.Exec()
253 |
254 | return err == nil, err
255 | }
256 |
257 | func (c *CassandraStore) GetMeta(key string, extended bool) (*mc.Item, error) {
258 | item, err := c.Get(key)
259 | if err != nil {
260 | return nil, err
261 | }
262 |
263 | if item == nil {
264 | return nil, err
265 | }
266 |
267 | // we fake beansdb metadata
268 | // in douban-beansdb those data used to check if records exists
269 | var body string
270 | if extended {
271 | body = fmt.Sprintf(
272 | "%d %d %d %d %d %d %d",
273 | 1, 0, item.Flag, len(item.Body), item.ReceiveTime.Unix(), 0, 0,
274 | )
275 | } else {
276 | body = fmt.Sprintf(
277 | "%d %d %d %d %d",
278 | 1, 0, item.Flag, len(item.Body), item.ReceiveTime.Unix(),
279 | )
280 | }
281 | defer item.CArray.Free()
282 |
283 | result := new(mc.Item)
284 | result.Body = []byte(body)
285 | result.Flag = 0
286 | return result, nil
287 | }
288 |
289 | func (c *CassandraStore) GetPrefixTableFinder() *KeyTableFinder {
290 | return c.keyTableFinder
291 | }
292 |
293 | func IsValidKeyString(key string) bool {
294 | length := len(key)
295 | if length == 0 || length > MAX_KEY_LEN {
296 | logger.Warnf("bad key len=%d", length)
297 | return false
298 | }
299 |
300 | if key[0] <= ' ' || key[0] == '?' || key[0] == '@' {
301 | logger.Warnf("bad key len=%d key[0]=%x", length, key[0])
302 | return false
303 | }
304 |
305 | for _, r := range key {
306 | if unicode.IsControl(r) || unicode.IsSpace(r) {
307 | logger.Warnf("bad key len=%d %s", length, key)
308 | return false
309 | }
310 | }
311 | return true
312 | }
313 |
--------------------------------------------------------------------------------
/tests/switch_storage_test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 | import requests
4 | import yaml
5 |
6 | from yaml import load, dump
7 | try:
8 | from yaml import CLoader as Loader, CDumper as Dumper
9 | except ImportError:
10 | from yaml import Loader, Dumper
11 |
12 |
13 | from .dbclient import MCStore as store
14 |
15 | store_addr = os.getenv("GOBEANSPROXY_ADDR")
16 | store_api = os.getenv("GOBEANSPROXY_WEB")
17 | store_proxy_cfg = os.getenv("GOBEANSPROXY_PROXY_CFG") or \
18 | '/home/project/.doubanpde/scripts/bdb/gobeansproxy/prefix-switch-cfg/conf/proxy.yaml'
19 |
20 | p_status_brw = 'br1w1cr0w0'
21 | p_status_brw_cw = 'br1w1cr0w1'
22 | p_status_bw_crw = 'br0w1cr1w1'
23 | p_status_crw = 'br0w0cr1w1'
24 |
25 | order_of_status = {
26 | p_status_brw: 0,
27 | p_status_brw_cw: 1,
28 | p_status_bw_crw: 2,
29 | p_status_crw: 3,
30 | }
31 |
32 |
33 | class TestSwitchStorage:
34 |
35 | def setup_method(self):
36 | self.client = store(store_addr or "127.0.0.1:47907")
37 | self.prefix = "/__test_proxy/"
38 | self.prefix_wont_switch = "/__test_proxy_no_switch/"
39 | self.key_max = 100
40 | self.web_addr = store_api or "http://localhost:47910/cstar-cfg?config=rwswitcher"
41 | self.web_req = requests.Session()
42 | self.store_proxy_cfg_backup = store_proxy_cfg + '.backup'
43 | # copy cfg bak
44 | with open(store_proxy_cfg, 'r+') as f:
45 | with open(self.store_proxy_cfg_backup, 'w') as b:
46 | b.write(f.read())
47 | self.status = p_status_brw
48 | self.switch_store(p_status_brw)
49 |
50 | def format_key(self, k):
51 | return f"{self.prefix}{k}"
52 |
53 | def teardown_method(self):
54 | self.web_req.close()
55 | with open(self.store_proxy_cfg_backup, 'r+') as f:
56 | with open(store_proxy_cfg, 'w') as o:
57 | o.write(f.read())
58 | self.trigger_reload()
59 |
60 | @pytest.mark.parametrize("test_kv", [
61 | (1, 1),
62 | ("str", "str"),
63 | ("list", ["0", 1, 2]),
64 | ("dict", {"1": 1, "2": 2, "3": 3}),
65 | ("中文", "中文str"),
66 | ("bytes", b'abcde'),
67 | ("nesting", [{"abc中文": ["1", "2", "fff"]}]),
68 | ])
69 | def test_curd_value(self, test_kv):
70 | k, v = test_kv
71 | key = self.format_key(k)
72 | assert self.client.set(key, v)
73 | assert self.client.get(key) == v
74 | assert self.client.delete(key)
75 | assert self.client.get(key) is None
76 |
77 | @pytest.mark.parametrize("test_kvs", [
78 | (
79 | (1, 1),
80 | ("str", "str"),
81 | ("list", ["0", 1, 2]),
82 | ("dict", {"1": 1, "2": 2, "3": 3}),
83 | ("中文", "中文str"),
84 | ("bytes", b'abcde'),
85 | ("nesting", [{"abc中文": ["1", "2", "fff"]}]),
86 | ("bool", True),
87 | ),
88 | ])
89 | def test_getm_value(self, test_kvs):
90 | getm_prefix = '__test_proxy_getm/'
91 | r = {f'{getm_prefix}{k}': v for k, v in test_kvs}
92 | assert len(r) == len(test_kvs)
93 | keys = list(r.keys())
94 |
95 | for k, v in r.items():
96 | assert self.client.set(k, v)
97 |
98 | result = self.client.get_multi(keys)
99 | assert len(keys) == len(result)
100 | for k, v in result.items():
101 | assert r[k] == v
102 | assert self.client.delete(k)
103 |
104 | def trigger_reload(self):
105 | resp = self.web_req.post(self.web_addr)
106 | assert resp.json().get('message') == "success", 'failed, resp: {}'.format(resp.json())
107 |
108 | def update_rw_dispatch_cfg(self, switch_to, prefixes):
109 | data = {
110 | "prefix": {
111 | switch_to: prefixes
112 | }
113 | }
114 | resp = self.web_req.put(self.web_addr, json=data)
115 | assert 'error' not in resp.json()
116 |
117 | def clean_rw_dispatch_cfg(self, prefix):
118 | data = {
119 | "prefix": prefix
120 | }
121 | resp = self.web_req.delete(self.web_addr, json=data)
122 | assert 'error' not in resp.json()
123 |
124 | def switch_store(self, switch_to, use_static_cfg=True):
125 | assert switch_to in (p_status_brw, p_status_brw_cw,
126 | p_status_bw_crw, p_status_crw)
127 | if self.status == switch_to:
128 | return
129 |
130 | self.clean_rw_dispatch_cfg(self.prefix)
131 | with open(store_proxy_cfg, 'r+') as f:
132 | data = load(f, Loader=Loader)
133 | if use_static_cfg:
134 | scfg = {switch_to: [self.prefix]}
135 | else:
136 | # we should clean static cfg cause this will
137 | # conflict with our db cfg items
138 | scfg = {}
139 |
140 | data['cassandra']['prefix_rw_dispatcher_cfg']['static'] = scfg
141 |
142 | f.seek(0, 0)
143 | f.truncate()
144 |
145 | f.write(dump(data, Dumper=Dumper))
146 | if use_static_cfg:
147 | self.trigger_reload()
148 | else:
149 | # using put api for cfg update
150 | self.update_rw_dispatch_cfg(switch_to, [self.prefix])
151 | self.status = switch_to
152 |
153 | def test_switch_store(self):
154 |
155 | switch_to = [
156 | # bdb -> cassandra
157 | (
158 | p_status_brw_cw,
159 | p_status_bw_crw,
160 | p_status_crw,
161 | p_status_brw
162 | ),
163 |
164 | # bdb -> cassandra dual write -> bdb -> cassandra
165 | (
166 | p_status_brw_cw,
167 | p_status_bw_crw,
168 | p_status_brw_cw,
169 | p_status_bw_crw,
170 | p_status_crw,
171 | p_status_brw
172 | ),
173 | ]
174 |
175 | key = self.format_key('switch_test')
176 | value = 'value'
177 |
178 | no_switch_key = f'{self.prefix_wont_switch}notme'
179 | no_switch_value = "static"
180 |
181 | assert self.client.set(key, value)
182 | assert self.client.set(no_switch_key, no_switch_value)
183 |
184 | for use_static_cfg in (True, False):
185 | for stages in switch_to:
186 | last_stage = None
187 | for idx, stage in enumerate(stages):
188 | last_stage = self.status
189 | self.switch_store(stage, use_static_cfg)
190 |
191 | # ensure we can still get values
192 | # when change from crw -> other br status this is not going to equal
193 | if stage in (p_status_brw, p_status_brw_cw) and last_stage == p_status_crw:
194 | assert self.client.get(key) != value, f'stages: {stages} -> stage: {stage} error'
195 | else:
196 | assert self.client.get(key) == value, f'stages: {stages} -> stage: {stage} error'
197 | assert self.client.get(no_switch_key) == no_switch_value, f'stages: {stages} -> stage: {stage} error'
198 |
199 | # ensure we can set to new value
200 | value = f'value_on_{stage}'
201 | assert self.client.set(key, value), f'stages: {stages} -> stage: {stage} error'
202 | assert self.client.get(key) == value, f'stages: {stages} -> stage: {stage} error'
203 | no_switch_value = f'static_on_{stage}'
204 | assert self.client.set(no_switch_key, no_switch_value), f'stages: {stages} -> stage: {stage} error'
205 | assert self.client.get(no_switch_key) == no_switch_value, f'stages: {stages} -> stage: {stage} error'
206 |
207 | # ensure we can delete value
208 | assert self.client.delete(key), f'stages: {stages} -> stage: {stage} error'
209 | assert self.client.get(key) is None, f'stages: {stages} -> stage: {stage} error'
210 | assert self.client.set(key, value), f'stages: {stages} -> stage: {stage} error'
211 | assert self.client.get(key) == value, f'stages: {stages} -> stage: {stage} error'
212 |
213 | self.switch_store(p_status_brw)
214 | assert self.client.delete(key), f'stages: {stages} -> stage: {stage} error'
215 | assert self.client.delete(no_switch_key), f'stages: {stages} -> stage: {stage} error'
216 |
--------------------------------------------------------------------------------
/cassandra/prefix_switch.go:
--------------------------------------------------------------------------------
1 | package cassandra
2 |
3 | import (
4 | "fmt"
5 | "io/ioutil"
6 | "path/filepath"
7 | "sync"
8 |
9 | "github.com/acomagu/trie/v2"
10 | "github.com/douban/gobeansproxy/config"
11 | "gopkg.in/yaml.v3"
12 | )
13 |
14 | type PrefixSwitchStatus int
15 |
16 | const (
17 | // bdb r/w c* disable
18 | PrefixSwitchBrw PrefixSwitchStatus = 0
19 | // bdb r/w c* w
20 | PrefixSwitchBrwCw PrefixSwitchStatus = 1
21 | // bdb w c* r/w
22 | PrefixSwitchBwCrw PrefixSwitchStatus = 2
23 | // c* rw bdb disable
24 | PrefixSwitchCrw PrefixSwitchStatus = 3
25 | // c* read only bdb disable
26 | PrefixSwitchCr PrefixSwitchStatus = 4
27 |
28 | statusBrw string = "br1w1cr0w0"
29 | statusBrwCw string = "br1w1cr0w1"
30 | statusBwCrw string = "br0w1cr1w1"
31 | statusCrw string = "br0w0cr1w1"
32 | statusCr string = "br0w0cr1w0"
33 | )
34 |
35 | var (
36 | allowRWStatus = map[string]bool{
37 | statusBrw: true,
38 | statusBrwCw: true,
39 | statusBwCrw: true,
40 | statusCrw: true,
41 | statusCr: true,
42 | }
43 | )
44 |
45 | type PrefixSwitcher struct {
46 | trie *trie.Tree[rune, PrefixSwitchStatus]
47 | defaultT PrefixSwitchStatus
48 | lock sync.RWMutex
49 | currentTrieMap map[string]string
50 | cstarEnabled bool
51 | bdbEnabled bool
52 | }
53 |
54 | func (s PrefixSwitchStatus) IsReadOnBeansdb() bool {
55 | return s == PrefixSwitchBrw || s == PrefixSwitchBrwCw
56 | }
57 |
58 | func (s PrefixSwitchStatus) IsReadOnCstar() bool {
59 | return s == PrefixSwitchCrw || s == PrefixSwitchCr || s == PrefixSwitchBwCrw
60 | }
61 |
62 | func (s PrefixSwitchStatus) IsWriteOnBeansdb() bool {
63 | return s == PrefixSwitchBrw || s == PrefixSwitchBrwCw || s == PrefixSwitchBwCrw
64 | }
65 |
66 | func (s PrefixSwitchStatus) IsWriteOnCstar() bool {
67 | return s == PrefixSwitchCrw || s == PrefixSwitchBrwCw || s == PrefixSwitchBwCrw
68 | }
69 |
70 | func strToSwitchStatus(s string) (PrefixSwitchStatus, error) {
71 | switch s {
72 | case statusBrw:
73 | return PrefixSwitchBrw, nil
74 | case statusBrwCw:
75 | return PrefixSwitchBrwCw, nil
76 | case statusBwCrw:
77 | return PrefixSwitchBwCrw, nil
78 | case statusCrw:
79 | return PrefixSwitchCrw, nil
80 | case statusCr:
81 | return PrefixSwitchCr, nil
82 | default:
83 | return -1, fmt.Errorf("Unsupported switch type of %s", s)
84 | }
85 | }
86 |
87 | func GetPrefixSwitchTrieFromCfg(
88 | cfg *config.CassandraStoreCfg, cqlStore *CassandraStore) (
89 | *trie.Tree[rune, PrefixSwitchStatus], map[string]string, error) {
90 | if !cfg.PrefixRWDispatcherCfg.Enable {
91 | logger.Infof("rw switcher disabled, skip init ...")
92 | return nil, nil, nil
93 | }
94 |
95 | s2k := cfg.PrefixRWDispatcherCfg.StaticCfg
96 |
97 | keysString := [][]rune{}
98 | vStatus := []PrefixSwitchStatus{}
99 | vStrStatus := []string{}
100 | dedup := map[string]struct{}{}
101 |
102 | if cfg.PrefixRWDispatcherCfg.CfgFromCstarTable != "" &&
103 | cfg.PrefixRWDispatcherCfg.CfgFromCstarKeySpace != "" {
104 | c := DisPatcherCfg(cfg.PrefixRWDispatcherCfg)
105 | pkeys, pvalues, err := c.LoadFromDB(cqlStore)
106 | if err != nil {
107 | return nil, nil, err
108 | }
109 |
110 | keysString = append(keysString, pkeys...)
111 | vStrStatus = append(vStrStatus, pvalues...)
112 | }
113 |
114 | if len(s2k) > 0 {
115 | for s, kprefixs := range s2k {
116 | for _, prefix := range kprefixs {
117 | keysString = append(keysString, []rune(prefix))
118 | vStrStatus = append(vStrStatus, s)
119 | }
120 | }
121 | }
122 |
123 | // check duplicate
124 | if len(vStrStatus) != len(keysString) {
125 | return nil, nil, fmt.Errorf("value list len not match with prefix list len")
126 | }
127 |
128 | duplicateKeys := []string{}
129 | loadedMap := map[string]string{}
130 | for idx, k := range keysString {
131 | ks := string(k)
132 | loadedMap[ks] = vStrStatus[idx]
133 | if _, ok := dedup[ks]; !ok {
134 | dedup[ks] = struct{}{}
135 | } else {
136 | duplicateKeys = append(duplicateKeys, ks)
137 | }
138 | }
139 | if len(duplicateKeys) > 0 {
140 | return nil, nil, fmt.Errorf("prefix cfg duplicate: %v", duplicateKeys)
141 | }
142 |
143 | // now init real value
144 | for _, sv := range vStrStatus {
145 | rv, err := strToSwitchStatus(sv)
146 | if err != nil {
147 | return nil, nil, fmt.Errorf("parse value %s to status err: %s", sv, err)
148 | }
149 | vStatus = append(vStatus, rv)
150 | }
151 |
152 | logger.Infof("Loading from cfg: %v", loadedMap)
153 | if len(keysString) == len(vStatus) && len(keysString) > 0 {
154 | tr := trie.New[rune, PrefixSwitchStatus](keysString, vStatus)
155 | return &tr, loadedMap, nil
156 | } else {
157 | return nil, loadedMap, nil
158 | }
159 | }
160 |
161 | func NewPrefixSwitcher(cfg *config.ProxyConfig, cqlStore *CassandraStore) (*PrefixSwitcher, error) {
162 | f := new(PrefixSwitcher)
163 |
164 | if !cfg.CassandraStoreCfg.Enable {
165 | f.defaultT = PrefixSwitchBrw
166 | f.cstarEnabled = false
167 | return f, nil
168 | }
169 |
170 | prefixTrie, nowMap, err := GetPrefixSwitchTrieFromCfg(&cfg.CassandraStoreCfg, cqlStore)
171 | if err != nil {
172 | return nil, err
173 | }
174 |
175 | f.trie = prefixTrie
176 | f.cstarEnabled = true
177 |
178 | defaultS, err := strToSwitchStatus(cfg.SwitchToKeyDefault)
179 | if err != nil {
180 | return nil, err
181 | }
182 |
183 | f.defaultT = defaultS
184 | f.currentTrieMap = nowMap
185 | f.bdbEnabled = cfg.DStoreConfig.Enable
186 | return f, nil
187 | }
188 |
189 | // use this to match longest prefix of key
190 | // You should lock the s trie to prevent trie update
191 | func (s *PrefixSwitcher) matchStatus(key string) PrefixSwitchStatus {
192 | if s.trie == nil {
193 | return s.defaultT
194 | }
195 |
196 | var v PrefixSwitchStatus
197 | var match bool
198 |
199 | n := *(s.trie)
200 |
201 | for _, c := range key {
202 | if n = n.TraceOne(c); n == nil {
203 | break
204 | }
205 |
206 | if vv, ok := n.Terminal(); ok {
207 | v = vv
208 | match = true
209 | }
210 | }
211 |
212 | if match {
213 | return v
214 | } else {
215 | return s.defaultT
216 | }
217 | }
218 |
219 | func (s *PrefixSwitcher) GetStatus(key string) PrefixSwitchStatus {
220 | if !s.bdbEnabled && s.cstarEnabled {
221 | return PrefixSwitchCrw
222 | }
223 |
224 | if !s.cstarEnabled {
225 | return PrefixSwitchBrw
226 | }
227 |
228 | s.lock.RLock()
229 | defer s.lock.RUnlock()
230 | return s.matchStatus(key)
231 | }
232 |
233 | // check key prefix and return bdb read enable c* read enable
234 | func (s *PrefixSwitcher) ReadEnabledOn(key string) (bool, bool) {
235 | if !s.bdbEnabled && s.cstarEnabled {
236 | return false, true
237 | }
238 |
239 | if !s.cstarEnabled {
240 | return true, false
241 | }
242 |
243 | status := s.GetStatus(key)
244 | return status.IsReadOnBeansdb(), status.IsReadOnCstar()
245 | }
246 |
247 | // check keys prefix list and return bdb read keys and c* read keys
248 | func (s *PrefixSwitcher) ReadEnableOnKeys(keys []string) (bkeys []string, ckeys []string) {
249 | if !s.bdbEnabled && s.cstarEnabled {
250 | ckeys = keys
251 | return
252 | }
253 |
254 | if !s.cstarEnabled {
255 | bkeys = keys
256 | return
257 | }
258 |
259 | s.lock.RLock()
260 | defer s.lock.RUnlock()
261 |
262 | for _, k := range keys {
263 | status := s.matchStatus(k)
264 | if status.IsReadOnBeansdb() {
265 | bkeys = append(bkeys, k)
266 | // prevent wrong status
267 | // read can only be enable on 1 backend
268 | continue
269 | }
270 |
271 | if status.IsReadOnCstar() {
272 | ckeys = append(ckeys, k)
273 | }
274 | }
275 | return
276 | }
277 |
278 | // check key prefix and return bdb write enable c* write enable
279 | func (s *PrefixSwitcher) WriteEnabledOn(key string) (bool, bool) {
280 | if !s.bdbEnabled && s.cstarEnabled {
281 | return false, true
282 | }
283 |
284 | if !s.cstarEnabled {
285 | return true, false
286 | }
287 | status := s.GetStatus(key)
288 | return status.IsWriteOnBeansdb(), status.IsWriteOnCstar()
289 | }
290 |
291 | func (s *PrefixSwitcher) LoadStaticCfg(cfgDir string) (*config.CassandraStoreCfg, error) {
292 | cfg := struct {
293 | CassandraCfg config.CassandraStoreCfg `yaml:"cassandra"`
294 | }{}
295 |
296 | configF, err := ioutil.ReadFile(filepath.Join(cfgDir, "proxy.yaml"))
297 | if err != nil {
298 | return nil, err
299 | }
300 | err = yaml.Unmarshal(configF, &cfg)
301 | if err != nil {
302 | return nil, err
303 | }
304 |
305 | return &cfg.CassandraCfg, nil
306 | }
307 |
308 | func (s *PrefixSwitcher) LoadCfg(cfg *config.CassandraStoreCfg, cqlStore *CassandraStore) error {
309 | if !cfg.Enable {
310 | logger.Errorf("You can't use prefix switcher when c* backend disabled")
311 | return fmt.Errorf("can't load prefix swicher cfg when cassandra backend disabled")
312 | }
313 |
314 | if !cfg.PrefixRWDispatcherCfg.Enable {
315 | logger.Errorf("You can't disable rw dispatcher online")
316 | return fmt.Errorf("You can't disable rw dispathcer online")
317 | }
318 |
319 | pTrie, nowMap, err := GetPrefixSwitchTrieFromCfg(cfg, cqlStore)
320 | if err != nil {
321 | logger.Errorf("reloading c* cfg err: %s", err)
322 | return err
323 | }
324 | logger.Infof("reloading c* cfg for prefix switch to: %v", nowMap)
325 |
326 | defaultS, err := strToSwitchStatus(cfg.SwitchToKeyDefault)
327 | if err != nil {
328 | logger.Errorf("default switch storage parse err: %s", err)
329 | }
330 | logger.Infof("reloading c* cfg for prefix default store to: %s", cfg.SwitchToKeyDefault)
331 |
332 |
333 | s.lock.Lock()
334 | defer s.lock.Unlock()
335 | s.trie = pTrie
336 | s.defaultT = defaultS
337 | s.currentTrieMap = nowMap
338 | return nil
339 | }
340 |
341 | func (s *PrefixSwitcher) Upsert(cfg *config.CassandraStoreCfg, data map[string][]string, cqlStore *CassandraStore) error {
342 | for rwStatus := range data {
343 | if _, ok := allowRWStatus[rwStatus]; !ok {
344 | return fmt.Errorf("%s is not a validate rwstatus", rwStatus)
345 | }
346 | }
347 | dispatcherCfg := DisPatcherCfg(cfg.PrefixRWDispatcherCfg)
348 | return dispatcherCfg.SaveToDB(data, cqlStore)
349 | }
350 |
351 | func (s *PrefixSwitcher) DeletePrefix(cfg *config.CassandraStoreCfg, prefix string, cqlStore *CassandraStore) error {
352 | dispatcherCfg := DisPatcherCfg(cfg.PrefixRWDispatcherCfg)
353 | return dispatcherCfg.DeletePrefixCfg(prefix, cqlStore)
354 | }
355 |
356 | func (s *PrefixSwitcher) GetCurrentMap() map[string]string {
357 | return s.currentTrieMap
358 | }
359 |
--------------------------------------------------------------------------------
/dstore/scheduler.go:
--------------------------------------------------------------------------------
1 | package dstore
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "time"
7 |
8 | dbcfg "github.com/douban/gobeansdb/config"
9 | dbutil "github.com/douban/gobeansdb/utils"
10 | )
11 |
12 | const (
13 | FeedbackNonConnectErrSet = -10
14 | FeedbackNonConnectErrDelete = -10
15 | FeedbackConnectErrDefault = -2
16 | FeedbackNonConnectErrDefault = -5
17 | NoBucketsRounRobinROSchduler = "no_buckets_rro"
18 | BucketsManualSchduler = "buckets_manual"
19 | )
20 |
21 | var (
22 | globalScheduler Scheduler
23 | )
24 |
25 | // Scheduler: route request to nodes
26 | type Scheduler interface {
27 | // feedback for auto routing
28 | FeedbackError(host *Host, key string, startTime time.Time, errorCode float64)
29 | FeedbackLatency(host *Host, key string, startTime time.Time, timeUsed time.Duration)
30 |
31 | // route a key to hosts
32 | GetHostsByKey(key string) (hosts []*Host)
33 |
34 | // route some keys to group of hosts
35 | DivideKeysByBucket(keys []string) [][]string
36 |
37 | // internal status
38 | Stats() map[string]map[string]float64
39 |
40 | // get latencies of hosts in the bucket
41 | LatenciesStats() map[string]map[string][QUEUECAP]Response
42 |
43 | // get percentage of hosts in the bucket
44 | Partition() map[string]map[string]int
45 |
46 | // return average latency and arc(percentage)
47 | GetBucketInfo(bucketID int64) map[string]map[string]map[string][]Response
48 |
49 | Close()
50 | }
51 |
52 | // route request by configure
53 | type ManualScheduler struct {
54 | N int
55 | hosts []*Host
56 |
57 | // buckets[bucket] is a list of host index.
58 | bucketsCon []*Bucket
59 |
60 | // backups[bucket] is a list of host index.
61 | backupsCon []*Bucket
62 |
63 | hashMethod dbutil.HashMethod
64 |
65 | // bucketWidth: 2^bucketWidth = route.NumBucket
66 | bucketWidth int
67 |
68 | // 传递 feedback 信息
69 | feedChan chan *Feedback
70 |
71 | quit bool
72 | }
73 |
74 | func GetScheduler() Scheduler {
75 | return globalScheduler
76 | }
77 |
78 | func InitGlobalManualScheduler(route *dbcfg.RouteTable, n int, schedulerName string) {
79 | switch schedulerName {
80 | case BucketsManualSchduler, "":
81 | globalScheduler = NewManualScheduler(route, n)
82 | case NoBucketsRounRobinROSchduler:
83 | if n != 1 {
84 | logger.Fatalf("rro readonly scheduler can only use one replica, now: %d", n)
85 | }
86 | globalScheduler = NewRRReadScheduler(route)
87 | default:
88 | logger.Fatalf(
89 | "Unsupported scheduler, must be: %s or %s",
90 | BucketsManualSchduler, NoBucketsRounRobinROSchduler,
91 | )
92 | }
93 | }
94 |
95 | func NewManualScheduler(route *dbcfg.RouteTable, n int) *ManualScheduler {
96 | sch := new(ManualScheduler)
97 | sch.N = n
98 | sch.hosts = make([]*Host, len(route.Servers))
99 | sch.bucketsCon = make([]*Bucket, route.NumBucket)
100 | sch.backupsCon = make([]*Bucket, route.NumBucket)
101 |
102 | idx := 0
103 |
104 | bucketHosts := make(map[int][]*Host)
105 | backupHosts := make(map[int][]*Host)
106 | for addr, bucketsFlag := range route.Servers {
107 | host := NewHost(addr)
108 | host.Index = idx
109 | sch.hosts[idx] = host
110 | for bucketNum, mainFlag := range bucketsFlag {
111 | if mainFlag {
112 | if len(bucketHosts[bucketNum]) == 0 {
113 | bucketHosts[bucketNum] = []*Host{host} // append(bucketHosts[bucketNum], host)
114 | } else {
115 | bucketHosts[bucketNum] = append(bucketHosts[bucketNum], host)
116 | }
117 | } else {
118 | if len(backupHosts[bucketNum]) == 0 {
119 | backupHosts[bucketNum] = []*Host{host}
120 | } else {
121 | backupHosts[bucketNum] = append(bucketHosts[bucketNum], host)
122 | }
123 | }
124 | }
125 | idx++
126 | }
127 | for bucketNum, hosts := range bucketHosts {
128 | sch.bucketsCon[bucketNum] = newBucket(bucketNum, hosts...)
129 | }
130 |
131 | for bucketNum, hosts := range backupHosts {
132 | sch.backupsCon[bucketNum] = newBucket(bucketNum, hosts...)
133 | }
134 |
135 | sch.hashMethod = dbutil.Fnv1a
136 | sch.bucketWidth = calBitWidth(route.NumBucket)
137 |
138 | // scheduler 对各个 host 的打分机制
139 | go sch.procFeedback()
140 |
141 | go func() {
142 | for {
143 | if sch.quit {
144 | logger.Infof("close balance goroutine")
145 | //wait for all feedback done
146 | time.Sleep(10 * time.Second)
147 | close(sch.feedChan)
148 | break
149 | }
150 | sch.checkFails() //
151 | sch.tryRebalance()
152 | time.Sleep(5 * time.Second)
153 | }
154 | }()
155 |
156 | return sch
157 | }
158 |
159 | func calBitWidth(number int) int {
160 | width := 0
161 | for number > 1 {
162 | width++
163 | number /= 2
164 | }
165 | return width
166 | }
167 |
168 | func hexToInt(str string) int {
169 | n, _ := strconv.ParseInt(str, 16, 16)
170 | return int(n)
171 | }
172 |
173 | func getBucketByKey(hashFunc dbutil.HashMethod, bucketWidth int, key string) int {
174 | hexPathLen := bucketWidth / 4
175 | if key[0] == '@' && len(key) > hexPathLen {
176 | return hexToInt(key[1 : 1+hexPathLen])
177 | }
178 | if len(key) >= 1 && key[0] == '?' {
179 | key = key[1:]
180 | }
181 | h := hashFunc([]byte(key))
182 | return (int)(h >> (uint)(32-bucketWidth))
183 | }
184 |
185 | func (sch *ManualScheduler) GetHostsByKey(key string) (hosts []*Host) {
186 | bucketNum := getBucketByKey(sch.hashMethod, sch.bucketWidth, key)
187 | bucket := sch.bucketsCon[bucketNum]
188 | hosts = make([]*Host, sch.N+len(sch.backupsCon[bucketNum].hostsList))
189 | hostsCon := bucket.GetHosts(key)
190 | for i, host := range hostsCon {
191 | if i < sch.N {
192 | hosts[i] = host
193 | }
194 | }
195 | // set the backup nodes in pos after main nodes
196 | for index, host := range sch.backupsCon[bucketNum].hostsList {
197 | hosts[sch.N+index] = host.host
198 | }
199 | return
200 | }
201 |
202 | type Feedback struct {
203 | addr string
204 | bucket int
205 | data float64 //latency or errorCode
206 | startTime time.Time
207 | }
208 |
209 | func (sch *ManualScheduler) Feedback(host *Host, key string, startTime time.Time, data float64) {
210 | bucket := getBucketByKey(sch.hashMethod, sch.bucketWidth, key)
211 | sch.feedChan <- &Feedback{addr: host.Addr, bucket: bucket, data: data, startTime: startTime}
212 | }
213 |
214 | func (sch *ManualScheduler) FeedbackError(host *Host, key string, startTime time.Time, errorCode float64) {
215 | sch.Feedback(host, key, startTime, errorCode)
216 | }
217 |
218 | func (sch *ManualScheduler) FeedbackLatency(host *Host, key string, startTime time.Time, timeUsed time.Duration) {
219 | n := timeUsed.Nanoseconds() / 1000 // Nanoseconds to Microsecond
220 | sch.Feedback(host, key, startTime, float64(n))
221 | }
222 |
223 | func (sch *ManualScheduler) procFeedback() {
224 | sch.feedChan = make(chan *Feedback, 256)
225 | for {
226 | fb, ok := <-sch.feedChan
227 | if !ok {
228 | // channel was closed
229 | break
230 | }
231 | sch.feedback(fb.addr, fb.bucket, fb.startTime, fb.data)
232 | }
233 | }
234 |
235 | func (sch *ManualScheduler) feedback(addr string, bucketNum int, startTime time.Time, data float64) {
236 | bucket := sch.bucketsCon[bucketNum]
237 | index, _ := bucket.getHostByAddr(addr)
238 | if index < 0 {
239 | logger.Errorf("Got nothing by addr %s", addr)
240 | return
241 | } else {
242 | if data < 0 {
243 | bucket.addConErr(addr, startTime, data)
244 | } else {
245 | bucket.addLatency(addr, startTime, data)
246 | }
247 | }
248 | }
249 |
250 | func (sch *ManualScheduler) checkFails() {
251 | for _, bucket := range sch.bucketsCon {
252 | sch.checkFailsForBucket(bucket)
253 | }
254 | }
255 |
256 | func (sch *ManualScheduler) tryRebalance() {
257 | for _, bucket := range sch.bucketsCon {
258 | bucket.ReBalance()
259 | }
260 |
261 | }
262 |
263 | func (sch *ManualScheduler) checkFailsForBucket(bucket *Bucket) {
264 | hosts := bucket.hostsList
265 | for _, hostBucket := range hosts {
266 | if item, err := hostBucket.host.Get("@"); err == nil {
267 | item.Free()
268 | bucket.riseHost(hostBucket.host.Addr)
269 | } else {
270 | logger.Infof(
271 | "beansdb server %s in Bucket %X's Down while check fails , err is %s",
272 | hostBucket.host.Addr, bucket.ID, err)
273 | }
274 | }
275 | }
276 |
277 | func (sch *ManualScheduler) DivideKeysByBucket(keys []string) [][]string {
278 | rs := make([][]string, len(sch.bucketsCon))
279 | for _, key := range keys {
280 | b := getBucketByKey(sch.hashMethod, sch.bucketWidth, key)
281 | rs[b] = append(rs[b], key)
282 | }
283 | return rs
284 | }
285 |
286 | // Stats return the score of eache addr, it's used in web interface.
287 | // Result structure is { bucket1: {host1: score1, host2: score2, ...}, ... }
288 | func (sch *ManualScheduler) Stats() map[string]map[string]float64 {
289 | r := make(map[string]map[string]float64, len(sch.bucketsCon))
290 | for _, bucket := range sch.bucketsCon {
291 | var bkt string
292 | if sch.bucketWidth > 4 {
293 | bkt = fmt.Sprintf("%02x", bucket.ID)
294 | } else {
295 | bkt = fmt.Sprintf("%x", bucket.ID)
296 | }
297 | r[bkt] = make(map[string]float64, len(bucket.hostsList))
298 | for _, host := range bucket.hostsList {
299 | r[bkt][host.host.Addr] = host.score
300 | }
301 |
302 | }
303 | return r
304 | }
305 |
306 | func (sch *ManualScheduler) LatenciesStats() map[string]map[string][QUEUECAP]Response {
307 | r := make(map[string]map[string][QUEUECAP]Response, len(sch.bucketsCon))
308 |
309 | for _, bucket := range sch.bucketsCon {
310 | var bkt string
311 | if sch.bucketWidth > 4 {
312 | bkt = fmt.Sprintf("%02x", bucket.ID)
313 | } else {
314 | bkt = fmt.Sprintf("%x", bucket.ID)
315 | }
316 | r[bkt] = make(map[string][QUEUECAP]Response, len(bucket.hostsList))
317 | for _, host := range bucket.hostsList {
318 | r[bkt][host.host.Addr] = *host.lantency.resData
319 | }
320 |
321 | }
322 | return r
323 | }
324 |
325 | func (sch *ManualScheduler) Partition() map[string]map[string]int {
326 | r := make(map[string]map[string]int, len(sch.bucketsCon))
327 |
328 | for _, bucket := range sch.bucketsCon {
329 | var bkt string
330 | if sch.bucketWidth > 4 {
331 | bkt = fmt.Sprintf("%02x", bucket.ID)
332 | } else {
333 | bkt = fmt.Sprintf("%x", bucket.ID)
334 | }
335 | r[bkt] = make(map[string]int, len(bucket.hostsList))
336 | for i, host := range bucket.hostsList {
337 | r[bkt][host.host.Addr] = bucket.partition.getArc(i)
338 | }
339 |
340 | }
341 | return r
342 | }
343 |
344 | // return addr:score:offset:response
345 | func (sch *ManualScheduler) GetBucketInfo(bucketID int64) map[string]map[string]map[string][]Response {
346 | bkt := sch.bucketsCon[bucketID]
347 | r := make(map[string]map[string]map[string][]Response, len(bkt.hostsList))
348 | for i, hostInBucket := range bkt.hostsList {
349 | r[hostInBucket.host.Addr] = make(map[string]map[string][]Response)
350 | score := fmt.Sprintf("%f", hostInBucket.score)
351 | offset := fmt.Sprintf("%d", bkt.partition.getArc(i))
352 | r[hostInBucket.host.Addr][score] = map[string][]Response{
353 | offset: hostInBucket.lantency.Get(proxyConf.ResTimeSeconds, latencyDataType),
354 | }
355 | }
356 | return r
357 | }
358 |
359 | func (sch *ManualScheduler) Close() {
360 | sch.quit = true
361 | }
362 |
--------------------------------------------------------------------------------
/templates/js/bootstrap-sortable.js:
--------------------------------------------------------------------------------
1 | /**
2 | * TinySort is a small script that sorts HTML elements. It sorts by text- or attribute value, or by that of one of it's children.
3 | * @summary A nodeElement sorting script.
4 | * @version 2.2.0
5 | * @license MIT/GPL
6 | * @author Ron Valstar
7 | * @copyright Ron Valstar
8 | * @namespace tinysort
9 | */
10 | !function (a, b) { "use strict"; function c() { return b } "function" == typeof define && define.amd ? define("tinysort", c) : a.tinysort = b }(this, function () { "use strict"; function a(a, f) { function j() { 0 === arguments.length ? s({}) : d(arguments, function (a) { s(c(a) ? { selector: a } : a) }), p = D.length } function s(a) { var b = !!a.selector, c = b && ":" === a.selector[0], d = e(a || {}, r); D.push(e({ hasSelector: b, hasAttr: !(d.attr === i || "" === d.attr), hasData: d.data !== i, hasFilter: c, sortReturnNumber: "asc" === d.order ? 1 : -1 }, d)) } function t() { d(a, function (a, b) { y ? y !== a.parentNode && (E = !1) : y = a.parentNode; var c = D[0], d = c.hasFilter, e = c.selector, f = !e || d && a.matchesSelector(e) || e && a.querySelector(e), g = f ? B : C, h = { elm: a, pos: b, posn: g.length }; A.push(h), g.push(h) }), x = B.slice(0) } function u() { B.sort(v) } function v(a, e) { var f = 0; for (0 !== q && (q = 0) ; 0 === f && p > q;) { var i = D[q], j = i.ignoreDashes ? n : m; if (d(o, function (a) { var b = a.prepare; b && b(i) }), i.sortFunction) f = i.sortFunction(a, e); else if ("rand" == i.order) f = Math.random() < .5 ? 1 : -1; else { var k = h, r = b(a, i), s = b(e, i), t = "" === r || r === g, u = "" === s || s === g; if (r === s) f = 0; else if (i.emptyEnd && (t || u)) f = t && u ? 0 : t ? 1 : -1; else { if (!i.forceStrings) { var v = c(r) ? r && r.match(j) : h, w = c(s) ? s && s.match(j) : h; if (v && w) { var x = r.substr(0, r.length - v[0].length), y = s.substr(0, s.length - w[0].length); x == y && (k = !h, r = l(v[0]), s = l(w[0])) } } f = r === g || s === g ? 0 : s > r ? -1 : r > s ? 1 : 0 } } d(o, function (a) { var b = a.sort; b && (f = b(i, k, r, s, f)) }), f *= i.sortReturnNumber, 0 === f && q++ } return 0 === f && (f = a.pos > e.pos ? 1 : -1), f } function w() { var a = B.length === A.length; E && a ? F ? B.forEach(function (a, b) { a.elm.style.order = b }) : (B.forEach(function (a) { z.appendChild(a.elm) }), y.appendChild(z)) : (B.forEach(function (a) { var b = a.elm, c = k.createElement("div"); a.ghost = c, b.parentNode.insertBefore(c, b) }), B.forEach(function (a, b) { var c = x[b].ghost; c.parentNode.insertBefore(a.elm, c), c.parentNode.removeChild(c) })) } c(a) && (a = k.querySelectorAll(a)), 0 === a.length && console.warn("No elements to sort"); var x, y, z = k.createDocumentFragment(), A = [], B = [], C = [], D = [], E = !0, F = a.length && (f === g || f.useFlex !== !1) && -1 !== getComputedStyle(a[0].parentNode, null).display.indexOf("flex"); return j.apply(i, Array.prototype.slice.call(arguments, 1)), t(), u(), w(), B.map(function (a) { return a.elm }) } function b(a, b) { var d, e = a.elm; return b.selector && (b.hasFilter ? e.matchesSelector(b.selector) || (e = i) : e = e.querySelector(b.selector)), b.hasAttr ? d = e.getAttribute(b.attr) : b.useVal ? d = e.value || e.getAttribute("value") : b.hasData ? d = e.getAttribute("data-" + b.data) : e && (d = e.textContent), c(d) && (b.cases || (d = d.toLowerCase()), d = d.replace(/\s+/g, " ")), d } function c(a) { return "string" == typeof a } function d(a, b) { for (var c, d = a.length, e = d; e--;) c = d - e - 1, b(a[c], c) } function e(a, b, c) { for (var d in b) (c || a[d] === g) && (a[d] = b[d]); return a } function f(a, b, c) { o.push({ prepare: a, sort: b, sortBy: c }) } var g, h = !1, i = null, j = window, k = j.document, l = parseFloat, m = /(-?\d+\.?\d*)\s*$/g, n = /(\d+\.?\d*)\s*$/g, o = [], p = 0, q = 0, r = { selector: i, order: "asc", attr: i, data: i, useVal: h, place: "start", returns: h, cases: h, forceStrings: h, ignoreDashes: h, sortFunction: i, useFlex: h, emptyEnd: h }; return j.Element && function (a) { a.matchesSelector = a.matchesSelector || a.mozMatchesSelector || a.msMatchesSelector || a.oMatchesSelector || a.webkitMatchesSelector || function (a) { for (var b = this, c = (b.parentNode || b.document).querySelectorAll(a), d = -1; c[++d] && c[d] != b;); return !!c[d] } }(Element.prototype), e(f, { loop: d }), e(a, { plugin: f, defaults: r }) }());
11 |
12 | (function ($) {
13 |
14 | var $document = $(document),
15 | signClass,
16 | sortEngine;
17 |
18 | $.bootstrapSortable = function (applyLast, sign, customSort) {
19 |
20 | // Check if moment.js is available
21 | var momentJsAvailable = (typeof moment !== 'undefined');
22 |
23 | // Set class based on sign parameter
24 | signClass = !sign ? "arrow" : sign;
25 |
26 | // Set sorting algorithm
27 | if (customSort == 'default')
28 | customSort = defaultSortEngine;
29 | sortEngine = customSort || sortEngine || defaultSortEngine;
30 |
31 | // Set attributes needed for sorting
32 | $('table.sortable').each(function () {
33 | var $this = $(this);
34 | applyLast = (applyLast === true);
35 | $this.find('span.sign').remove();
36 |
37 | // Add placeholder cells for colspans
38 | $this.find('thead [colspan]').each(function () {
39 | var colspan = parseFloat($(this).attr('colspan'));
40 | for (var i = 1; i < colspan; i++) {
41 | $(this).after('');
42 | }
43 | });
44 |
45 | // Add placeholder cells for rowspans
46 | $this.find('thead [rowspan]').each(function () {
47 | var $cell = $(this);
48 | var rowspan = parseFloat($cell.attr('rowspan'));
49 | for (var i = 1; i < rowspan; i++) {
50 | var parentRow = $cell.parent('tr');
51 | var nextRow = parentRow.next('tr');
52 | var index = parentRow.children().index($cell);
53 | nextRow.children().eq(index).before(' | ');
54 | }
55 | });
56 |
57 | // Set indexes to header cells
58 | $this.find('thead tr').each(function (rowIndex) {
59 | $(this).find('th').each(function (columnIndex) {
60 | var $this = $(this);
61 | $this.addClass('nosort').removeClass('up down');
62 | $this.attr('data-sortcolumn', columnIndex);
63 | $this.attr('data-sortkey', columnIndex + '-' + rowIndex);
64 | });
65 | });
66 |
67 | // Cleanup placeholder cells
68 | $this.find('thead .rowspan-compensate, .colspan-compensate').remove();
69 |
70 | // Initialize sorting values
71 | $this.find('td').each(function () {
72 | var $this = $(this);
73 | if ($this.attr('data-dateformat') !== undefined && momentJsAvailable) {
74 | $this.attr('data-value', moment($this.text(), $this.attr('data-dateformat')).format('YYYY/MM/DD/HH/mm/ss'));
75 | }
76 | else {
77 | $this.attr('data-value') === undefined && $this.attr('data-value', $this.text());
78 | }
79 | });
80 |
81 | var context = lookupSortContext($this),
82 | bsSort = context.bsSort;
83 |
84 | $this.find('thead th[data-defaultsort!="disabled"]').each(function (index) {
85 | var $this = $(this);
86 | var $sortTable = $this.closest('table.sortable');
87 | $this.data('sortTable', $sortTable);
88 | var sortKey = $this.attr('data-sortkey');
89 | var thisLastSort = applyLast ? context.lastSort : -1;
90 | bsSort[sortKey] = applyLast ? bsSort[sortKey] : $this.attr('data-defaultsort');
91 | if (bsSort[sortKey] !== undefined && (applyLast === (sortKey === thisLastSort))) {
92 | bsSort[sortKey] = bsSort[sortKey] === 'asc' ? 'desc' : 'asc';
93 | doSort($this, $sortTable);
94 | }
95 | });
96 | $this.trigger('sorted');
97 | });
98 | };
99 |
100 | // Add click event to table header
101 | $document.on('click', 'table.sortable thead th[data-defaultsort!="disabled"]', function (e) {
102 | var $this = $(this), $table = $this.data('sortTable') || $this.closest('table.sortable');
103 | $table.trigger('before-sort');
104 | doSort($this, $table);
105 | $table.trigger('sorted');
106 | });
107 |
108 | // Look up sorting data appropriate for the specified table (jQuery element).
109 | // This allows multiple tables on one page without collisions.
110 | function lookupSortContext($table) {
111 | var context = $table.data("bootstrap-sortable-context");
112 | if (context === undefined) {
113 | context = { bsSort: [], lastSort: undefined };
114 | $table.find('thead th[data-defaultsort!="disabled"]').each(function (index) {
115 | var $this = $(this);
116 | var sortKey = $this.attr('data-sortkey');
117 | context.bsSort[sortKey] = $this.attr('data-defaultsort');
118 | if (context.bsSort[sortKey] !== undefined) {
119 | context.lastSort = sortKey;
120 | }
121 | });
122 | $table.data("bootstrap-sortable-context", context);
123 | }
124 | return context;
125 | }
126 |
127 | function defaultSortEngine(rows, sortingParams) {
128 | tinysort(rows, sortingParams);
129 | }
130 |
131 | // Sorting mechanism separated
132 | function doSort($this, $table) {
133 | var sortColumn = parseFloat($this.attr('data-sortcolumn')),
134 | context = lookupSortContext($table),
135 | bsSort = context.bsSort;
136 |
137 | var colspan = $this.attr('colspan');
138 | if (colspan) {
139 | var mainSort = parseFloat($this.data('mainsort')) || 0;
140 | var rowIndex = parseFloat($this.data('sortkey').split('-').pop());
141 |
142 | // If there is one more row in header, delve deeper
143 | if ($table.find('thead tr').length - 1 > rowIndex) {
144 | doSort($table.find('[data-sortkey="' + (sortColumn + mainSort) + '-' + (rowIndex + 1) + '"]'), $table);
145 | return;
146 | }
147 | // Otherwise, just adjust the sortColumn
148 | sortColumn = sortColumn + mainSort;
149 | }
150 |
151 | var localSignClass = $this.attr('data-defaultsign') || signClass;
152 |
153 | // update arrow icon
154 | $table.find('th').each(function () {
155 | $(this).removeClass('up').removeClass('down').addClass('nosort');
156 | });
157 |
158 | if ($.browser.mozilla) {
159 | var moz_arrow = $table.find('div.mozilla');
160 | if (moz_arrow !== undefined) {
161 | moz_arrow.find('.sign').remove();
162 | moz_arrow.parent().html(moz_arrow.html());
163 | }
164 | $this.wrapInner('');
165 | $this.children().eq(0).append('');
166 | }
167 | else {
168 | $table.find('span.sign').remove();
169 | $this.append('');
170 | }
171 |
172 | // sort direction
173 | var sortKey = $this.attr('data-sortkey');
174 | var initialDirection = $this.attr('data-firstsort') !== 'desc' ? 'desc' : 'asc';
175 |
176 | context.lastSort = sortKey;
177 | bsSort[sortKey] = (bsSort[sortKey] || initialDirection) === 'asc' ? 'desc' : 'asc';
178 | if (bsSort[sortKey] === 'desc') {
179 | $this.find('span.sign').addClass('up');
180 | $this.addClass('up').removeClass('down nosort');
181 | } else {
182 | $this.addClass('down').removeClass('up nosort');
183 | }
184 |
185 | // sort rows
186 | var rows = $table.children('tbody').children('tr');
187 | if (rows.length != 0) {
188 | sortEngine(rows, { selector: 'td:nth-child(' + (sortColumn + 1) + ')', order: bsSort[sortKey], data: 'value' });
189 | }
190 |
191 | // add class to sorted column cells
192 | $table.find('td.sorted, th.sorted').removeClass('sorted');
193 | rows.find('td:eq(' + sortColumn + ')').addClass('sorted');
194 | $this.addClass('sorted');
195 | }
196 |
197 | // jQuery 1.9 removed this object
198 | if (!$.browser) {
199 | $.browser = { chrome: false, mozilla: false, opera: false, msie: false, safari: false };
200 | var ua = navigator.userAgent;
201 | $.each($.browser, function (c) {
202 | $.browser[c] = ((new RegExp(c, 'i').test(ua))) ? true : false;
203 | if ($.browser.mozilla && c === 'mozilla') { $.browser.mozilla = ((new RegExp('firefox', 'i').test(ua))) ? true : false; }
204 | if ($.browser.chrome && c === 'safari') { $.browser.safari = false; }
205 | });
206 | }
207 |
208 | // Initialise on DOM ready
209 | $($.bootstrapSortable);
210 |
211 | }(jQuery));
212 |
--------------------------------------------------------------------------------
/gobeansproxy/web.go:
--------------------------------------------------------------------------------
1 | package gobeansproxy
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "io/ioutil"
7 | "net/http"
8 | _ "net/http/pprof"
9 | "path"
10 | "path/filepath"
11 | "runtime"
12 | "strconv"
13 | "sync"
14 | "text/template"
15 | "time"
16 |
17 | "github.com/douban/gobeansdb/cmem"
18 | dbcfg "github.com/douban/gobeansdb/config"
19 | mc "github.com/douban/gobeansdb/memcache"
20 | "github.com/douban/gobeansdb/utils"
21 | "github.com/douban/gobeansproxy/cassandra"
22 | "github.com/douban/gobeansproxy/config"
23 | "github.com/douban/gobeansproxy/dstore"
24 |
25 | "github.com/prometheus/client_golang/prometheus/promhttp"
26 |
27 | yaml "gopkg.in/yaml.v2"
28 | )
29 |
30 | func getBucket(r *http.Request) (bucketID int64, err error) {
31 | s := filepath.Base(r.URL.Path)
32 | return strconv.ParseInt(s, 16, 16)
33 | }
34 |
35 | func handleWebPanic(w http.ResponseWriter) {
36 | r := recover()
37 | if r != nil {
38 | stack := utils.GetStack(2000)
39 | logger.Errorf("web req panic:%#v, stack:%s", r, stack)
40 | fmt.Fprintf(w, "\npanic:%#v, stack:%s", r, stack)
41 | }
42 | }
43 |
44 | func handleYaml(w http.ResponseWriter, v interface{}) {
45 | defer handleWebPanic(w)
46 | b, err := yaml.Marshal(v)
47 | if err != nil {
48 | w.Write([]byte(err.Error()))
49 | } else {
50 | w.Write(b)
51 | }
52 | }
53 |
54 | func handleJson(w http.ResponseWriter, v interface{}) {
55 | b, err := json.Marshal(v)
56 | if err != nil {
57 | w.Write([]byte(err.Error()))
58 | } else {
59 | w.Write(b)
60 | }
61 | }
62 |
63 |
64 | type templateHandler struct {
65 | once sync.Once
66 | filename string
67 | templ *template.Template
68 | }
69 |
70 | func (t *templateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
71 | // add divide func
72 | fm := template.FuncMap{"divide": func(sumTime float64, count int) int {
73 | return int(sumTime) / count
74 | }}
75 | t.once.Do(func() {
76 | t.templ = template.Must(template.New("base.html").Funcs(fm).Option("missingkey=error").ParseFiles(
77 | filepath.Join(proxyConf.StaticDir, t.filename),
78 | filepath.Join(proxyConf.StaticDir, "templates/base.html")))
79 | })
80 | var data map[string]interface{}
81 | if t.filename == "templates/score.html" {
82 | data = map[string]interface{}{
83 | "stats": dstore.GetScheduler().Stats(),
84 | }
85 | }
86 | if t.filename == "templates/bucketinfo.html" {
87 | bucketID, err := getBucket(r)
88 | if err != nil {
89 | }
90 | data = map[string]interface{}{
91 | "bucketinfo": dstore.GetScheduler().GetBucketInfo(bucketID),
92 | }
93 | }
94 |
95 | if t.filename == "templates/buckets.html" {
96 | data = map[string]interface{}{
97 | "buckets": dstore.GetScheduler().Partition(),
98 | }
99 |
100 | }
101 | e := t.templ.Execute(w, data)
102 | if e != nil {
103 | logger.Errorf("ServerHTTP filename:%s, error: %s", t.filename, e.Error())
104 | }
105 | }
106 |
107 | func startWeb() {
108 | http.Handle("/templates/", http.FileServer(http.Dir(proxyConf.StaticDir)))
109 |
110 | http.Handle("/", &templateHandler{filename: "templates/stats.html"})
111 | http.Handle("/score/", &templateHandler{filename: "templates/score.html"})
112 | http.Handle("/bucketinfo/", &templateHandler{filename: "templates/bucketinfo.html"})
113 | http.Handle("/buckets", &templateHandler{filename: "templates/buckets.html"})
114 | http.HandleFunc("/score/json", handleScore)
115 | http.HandleFunc("/api/response_stats", handleSche)
116 | http.HandleFunc("/api/partition", handlePartition)
117 | http.HandleFunc("/api/bucket/", handleBucket)
118 |
119 | // same as gobeansdb
120 | http.HandleFunc("/config/", handleConfig)
121 | http.HandleFunc("/request/", handleRequest)
122 | http.HandleFunc("/buffer/", handleBuffer)
123 | http.HandleFunc("/memstat/", handleMemStat)
124 | http.HandleFunc("/rusage/", handleRusage)
125 | http.HandleFunc("/route/", handleRoute)
126 | http.HandleFunc("/route/version", handleRouteVersion)
127 | http.HandleFunc("/route/reload", handleRouteReload)
128 | http.Handle(
129 | "/metrics",
130 | promhttp.HandlerFor(dstore.BdbProxyPromRegistry,
131 | promhttp.HandlerOpts{Registry: dstore.BdbProxyPromRegistry}),
132 | )
133 | http.HandleFunc("/cstar-cfg", handleCstarCfgReload)
134 |
135 | webaddr := fmt.Sprintf("%s:%d", proxyConf.Listen, proxyConf.WebPort)
136 | go func() {
137 | logger.Infof("HTTP listen at %s", webaddr)
138 | if err := http.ListenAndServe(webaddr, nil); err != nil {
139 | logger.Fatalf("ListenAndServer: %s", err.Error())
140 | }
141 | }()
142 | }
143 |
144 | func handleConfig(w http.ResponseWriter, r *http.Request) {
145 | defer handleWebPanic(w)
146 | handleJson(w, proxyConf)
147 | }
148 |
149 | func handleRequest(w http.ResponseWriter, r *http.Request) {
150 | defer handleWebPanic(w)
151 | handleJson(w, mc.RL)
152 | }
153 |
154 | func handleRusage(w http.ResponseWriter, r *http.Request) {
155 | defer handleWebPanic(w)
156 | rusage := utils.Getrusage()
157 | handleJson(w, rusage)
158 | }
159 |
160 | func handleMemStat(w http.ResponseWriter, r *http.Request) {
161 | defer handleWebPanic(w)
162 | var ms runtime.MemStats
163 | runtime.ReadMemStats(&ms)
164 | handleJson(w, ms)
165 | }
166 |
167 | func handleBuffer(w http.ResponseWriter, r *http.Request) {
168 | defer handleWebPanic(w)
169 | handleJson(w, &cmem.DBRL)
170 | }
171 |
172 | func handleScore(w http.ResponseWriter, r *http.Request) {
173 | defer handleWebPanic(w)
174 | scores := dstore.GetScheduler().Stats()
175 | handleJson(w, scores)
176 | }
177 |
178 | func handleRoute(w http.ResponseWriter, r *http.Request) {
179 | defer handleWebPanic(w)
180 | handleYaml(w, config.Route)
181 | }
182 |
183 | func handleSche(w http.ResponseWriter, r *http.Request) {
184 | defer handleWebPanic(w)
185 | responseStats := dstore.GetScheduler().LatenciesStats()
186 | handleJson(w, responseStats)
187 | }
188 |
189 | func handlePartition(w http.ResponseWriter, r *http.Request) {
190 | defer handleWebPanic(w)
191 | partition := dstore.GetScheduler().Partition()
192 | handleJson(w, partition)
193 | }
194 |
195 | func handleBucket(w http.ResponseWriter, r *http.Request) {
196 | defer handleWebPanic(w)
197 | bucketID, err := getBucket(r)
198 | if err != nil {
199 | }
200 | bktInfo := dstore.GetScheduler().GetBucketInfo(bucketID)
201 | handleJson(w, bktInfo)
202 | }
203 |
204 | func handleRouteVersion(w http.ResponseWriter, r *http.Request) {
205 | defer handleWebPanic(w)
206 | if len(proxyConf.ZKServers) == 0 {
207 | w.Write([]byte("-1"))
208 | return
209 | } else {
210 | w.Write([]byte(strconv.Itoa(dbcfg.ZKClient.Version)))
211 | }
212 | }
213 |
214 | func getFormValueInt(r *http.Request, name string, ndefault int) (n int, err error) {
215 | n = ndefault
216 | s := r.FormValue(name)
217 | if s != "" {
218 | n, err = strconv.Atoi(s)
219 | }
220 | return
221 | }
222 |
223 | func handleRouteReload(w http.ResponseWriter, r *http.Request) {
224 | if !proxyConf.DStoreConfig.Enable {
225 | w.Write([]byte("err: dstore not enabled"))
226 | return
227 | }
228 |
229 | var err error
230 | if !dbcfg.AllowReload {
231 | w.Write([]byte("err: reloading"))
232 | return
233 | }
234 |
235 | dbcfg.AllowReload = false
236 | defer func() {
237 | dbcfg.AllowReload = true
238 | if err != nil {
239 | logger.Errorf("handleRoute err", err.Error())
240 | w.Write([]byte(fmt.Sprintf(err.Error())))
241 | return
242 | }
243 | }()
244 |
245 | if proxyConf.DStoreConfig.Scheduler == dstore.BucketsManualSchduler {
246 | if len(proxyConf.ZKServers) == 0 {
247 | w.Write([]byte("err: not using zookeeper"))
248 | return
249 | }
250 |
251 | defer handleWebPanic(w)
252 |
253 | r.ParseForm()
254 | ver, err := getFormValueInt(r, "ver", -1)
255 | if err != nil {
256 | return
257 | }
258 |
259 | newRouteContent, ver, err := dbcfg.ZKClient.GetRouteRaw(ver)
260 | if ver == dbcfg.ZKClient.Version {
261 | w.Write([]byte(fmt.Sprintf("warn: same version %d", ver)))
262 | return
263 | }
264 | info := fmt.Sprintf("update with route version %d\n", ver)
265 | logger.Infof(info)
266 | newRoute := new(dbcfg.RouteTable)
267 | err = newRoute.LoadFromYaml(newRouteContent)
268 | if err != nil {
269 | return
270 | }
271 |
272 | oldScheduler := dstore.GetScheduler()
273 | dstore.InitGlobalManualScheduler(newRoute, proxyConf.N, proxyConf.Scheduler)
274 | config.Route = newRoute
275 | dbcfg.ZKClient.Version = ver
276 | w.Write([]byte("ok"))
277 |
278 | go func() {
279 | // sleep for request to be completed.
280 | time.Sleep(time.Duration(proxyConf.ReadTimeoutMs) * time.Millisecond * 5)
281 | logger.Infof("scheduler closing when reroute, request: %v", r)
282 | oldScheduler.Close()
283 | }()
284 | } else {
285 | routePath := path.Join(proxyConf.Confdir, "route.yaml")
286 | route, err := dbcfg.LoadRouteTableLocal(routePath)
287 | if err != nil {
288 | w.Write([]byte(fmt.Sprintf("%s", err)))
289 | return
290 | }
291 | dstore.InitGlobalManualScheduler(route, proxyConf.N, proxyConf.Scheduler)
292 | config.Route = route
293 | w.Write([]byte("ok"))
294 | }
295 | }
296 |
297 | type ReloadableCfg struct {
298 | Cfg map[string]string `json:"cfg"`
299 | Message string `json:"message"`
300 | Error string `json:"error"`
301 | }
302 |
303 | func handleCstarCfgReload(w http.ResponseWriter, r *http.Request) {
304 | defer handleWebPanic(w)
305 |
306 | w.Header().Set("Content-Type", "application/json")
307 | resp := make(map[string]string)
308 | cfgName := r.URL.Query().Get("config")
309 | var dispatcher cassandra.PrefixDisPatcher
310 |
311 | switch cfgName {
312 | case "tablefinder":
313 | if dstore.PrefixTableFinder == nil {
314 | resp["error"] = "cassandra is disabled"
315 | w.WriteHeader(http.StatusBadRequest)
316 | handleJson(w, resp)
317 | return
318 | }
319 | dispatcher = dstore.PrefixTableFinder
320 | case "rwswitcher":
321 | if dstore.PrefixStorageSwitcher == nil {
322 | resp["error"] = "cassandra is disabled"
323 | w.WriteHeader(http.StatusBadRequest)
324 | handleJson(w, resp)
325 | return
326 | }
327 | dispatcher = dstore.PrefixStorageSwitcher
328 | default:
329 | resp["error"] = "unsupported config query arg, must be: tablefinder/rwswitcher"
330 | w.WriteHeader(http.StatusBadRequest)
331 | handleJson(w, resp)
332 | return
333 | }
334 |
335 | switch r.Method {
336 | case "GET":
337 | response := ReloadableCfg{
338 | Cfg: dispatcher.GetCurrentMap(),
339 | }
340 | response.Message = "success"
341 | w.WriteHeader(http.StatusOK)
342 | handleJson(w, response)
343 | return
344 | case "POST":
345 | staticCfg, err := dispatcher.LoadStaticCfg(config.Proxy.Confdir)
346 | if err != nil {
347 | resp["error"] = fmt.Sprintf("load static cfg err: %s", err)
348 | break
349 | }
350 |
351 | err = dispatcher.LoadCfg(staticCfg, dstore.CqlStore)
352 | if err != nil {
353 | resp["error"] = fmt.Sprintf("load cfg from db err: %s", err)
354 | break
355 | }
356 | resp["message"] = "ok"
357 | case "PUT":
358 | // load cfg static
359 | staticCfg, err := dispatcher.LoadStaticCfg(config.Proxy.Confdir)
360 | if err != nil {
361 | resp["error"] = fmt.Sprintf("load static cfg err: %s", err)
362 | break
363 | }
364 |
365 | // upsert new data to db
366 | b, err := ioutil.ReadAll(r.Body)
367 | if err != nil {
368 | resp["error"] = fmt.Sprintf("get body from req err: %s", err)
369 | break
370 | }
371 | defer r.Body.Close()
372 | var data map[string](map[string][]string)
373 | err = json.Unmarshal(b, &data)
374 | if err != nil {
375 | resp["error"] = fmt.Sprintf("parse req err: %s", err)
376 | break
377 | }
378 | pdata, ok := data["prefix"]
379 | if !ok {
380 | resp["error"] = fmt.Sprintf("parse req err: doesn't match {'prefix': {'': ['prefix1', 'prefix2']}}")
381 | break
382 | }
383 | err = dispatcher.Upsert(staticCfg, pdata, dstore.CqlStore)
384 | if err != nil {
385 | resp["error"] = fmt.Sprintf("upsert data %v err: %s", data, err)
386 | break
387 | }
388 |
389 | // require load cfg actually
390 | err = dispatcher.LoadCfg(staticCfg, dstore.CqlStore)
391 | if err != nil {
392 | resp["error"] = fmt.Sprintf("load cfg to server err: %s", err)
393 | break
394 | }
395 | case "DELETE":
396 | // load cfg static
397 | staticCfg, err := dispatcher.LoadStaticCfg(config.Proxy.Confdir)
398 | if err != nil {
399 | resp["error"] = fmt.Sprintf("load static cfg err: %s", err)
400 | break
401 | }
402 |
403 | // upsert new data to db
404 | b, err := ioutil.ReadAll(r.Body)
405 | if err != nil {
406 | resp["error"] = fmt.Sprintf("get body from req err: %s", err)
407 | break
408 | }
409 | defer r.Body.Close()
410 | var data map[string]string
411 | err = json.Unmarshal(b, &data)
412 | if err != nil {
413 | resp["error"] = fmt.Sprintf("parse req err: %s", err)
414 | break
415 | }
416 |
417 | prefix, ok := data["prefix"]
418 | if !ok {
419 | resp["error"] = fmt.Sprintf("req data should like: {'prefix': }")
420 | break
421 | }
422 | err = dispatcher.DeletePrefix(staticCfg, prefix, dstore.CqlStore)
423 | if err != nil {
424 | resp["error"] = fmt.Sprintf("upsert data %v err: %s", data, err)
425 | break
426 | }
427 |
428 | // require load cfg actually
429 | err = dispatcher.LoadCfg(staticCfg, dstore.CqlStore)
430 | if err != nil {
431 | resp["error"] = fmt.Sprintf("load cfg to server err: %s", err)
432 | break
433 | }
434 | default:
435 | w.WriteHeader(http.StatusBadRequest)
436 | resp["error"] = "unsupported method"
437 | }
438 |
439 |
440 | if _, ok := resp["error"]; ok {
441 | w.WriteHeader(http.StatusBadGateway)
442 | } else {
443 | w.WriteHeader(http.StatusOK)
444 | resp["message"] = "success"
445 | }
446 | handleJson(w, resp)
447 | }
448 |
--------------------------------------------------------------------------------
|