├── .gitignore ├── lrucache ├── .gitignore ├── list_extension.go ├── benchmark │ ├── vcache.go │ ├── mcache.go │ └── main.go ├── priorityqueue.go ├── Makefile ├── README.md ├── cache.go ├── multilru.go ├── multilru_test.go ├── list.go ├── lrucache.go └── lrucache_test.go ├── spacesaving ├── tools │ ├── .gitignore │ ├── go.sh │ ├── Makefile │ ├── main.go │ ├── compare.py │ ├── readpcap.go │ ├── perfect.go │ ├── README.md │ └── topdns.go ├── Makefile ├── count.go ├── srate.go ├── rate_test.go └── rate.go ├── pool ├── README.md ├── Makefile ├── pool_test.go └── pool.go ├── bytepool ├── README.md ├── Makefile ├── bytepool_test.go └── bytepool.go ├── ewma ├── Makefile ├── rate.go ├── ewma_test.go ├── ewma.go └── rate_test.go ├── circularbuffer ├── README.md ├── Makefile ├── circularbuffer_test.go └── circularbuffer.go ├── README.md ├── .github └── workflows │ └── semgrep.yml ├── LICENSE-BSD-CloudFlare ├── tokenbucket ├── bucket_test.go ├── bucket.go └── sip.go └── kt ├── bench_test.go ├── kt_metrics.go ├── kt_base_test.go └── kt.go /.gitignore: -------------------------------------------------------------------------------- 1 | */cover.out 2 | 3 | -------------------------------------------------------------------------------- /lrucache/.gitignore: -------------------------------------------------------------------------------- 1 | cover.out~ 2 | benchmark/benchmark 3 | 4 | -------------------------------------------------------------------------------- /spacesaving/tools/.gitignore: -------------------------------------------------------------------------------- 1 | main 2 | perfect 3 | topdns 4 | readpcap 5 | 6 | -------------------------------------------------------------------------------- /pool/README.md: -------------------------------------------------------------------------------- 1 | Pool 2 | ---- 3 | 4 | A backported version of 5 | [`sync.Pool`](http://tip.golang.org/pkg/sync/#Pool) package. 6 | -------------------------------------------------------------------------------- /bytepool/README.md: -------------------------------------------------------------------------------- 1 | BytePool 2 | -------- 3 | 4 | DEPRECATED. Use [`sync.Pool`](https://golang.org/pkg/sync/#Pool) instead. 5 | 6 | For discussion see this blog post: 7 | 8 | - http://blog.cloudflare.com/recycling-memory-buffers-in-go 9 | -------------------------------------------------------------------------------- /spacesaving/tools/go.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | make 5 | 6 | PCAPFILE=$1 7 | LEN=$2 8 | 9 | TEMP=/tmp/spacesavingtemp 10 | 11 | ./readpcap $PCAPFILE > $TEMP-read 12 | ./perfect < $TEMP-read > $TEMP-perfect 13 | ./main $LEN < $TEMP-read > $TEMP-rate 14 | python compare.py $TEMP-perfect $TEMP-rate 30 15 | -------------------------------------------------------------------------------- /ewma/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2014 CloudFlare, Inc. 2 | 3 | RACE+=--race 4 | 5 | COVEROUT=cover.out 6 | 7 | .PHONY: test clean 8 | 9 | test: 10 | @go test $(RACE) -bench=. -v . 11 | @go test -coverprofile=$(COVEROUT) . > /dev/null 12 | @go tool cover -func=$(COVEROUT)|sed 's|^.*/\([^/]*/[^/]*/[^/]*\)$$|\1|g' 13 | 14 | clean: 15 | rm -rf $(COVEROUT) 16 | -------------------------------------------------------------------------------- /pool/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | RACE+=--race 4 | 5 | COVEROUT=cover.out 6 | 7 | .PHONY: test clean 8 | 9 | test: 10 | @go test $(RACE) -bench=. -v . 11 | @go test -coverprofile=$(COVEROUT) . > /dev/null 12 | @go tool cover -func=$(COVEROUT)|sed 's|^.*/\([^/]*/[^/]*/[^/]*\)$$|\1|g' 13 | 14 | clean: 15 | rm -rf $(COVEROUT) 16 | -------------------------------------------------------------------------------- /spacesaving/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2014 CloudFlare, Inc. 2 | 3 | RACE+=--race 4 | 5 | COVEROUT=cover.out 6 | 7 | .PHONY: test clean 8 | 9 | test: 10 | @go test -coverprofile=$(COVEROUT) . 11 | @go tool cover -func=$(COVEROUT)|sed 's|^.*/\([^/]*/[^/]*/[^/]*\)$$|\1|g' 12 | @go test -bench=. -v . -run=nil 13 | 14 | clean: 15 | rm -rf $(COVEROUT) 16 | -------------------------------------------------------------------------------- /bytepool/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | RACE+=--race 4 | 5 | COVEROUT=cover.out 6 | 7 | .PHONY: test clean 8 | 9 | test: 10 | @go test $(RACE) -bench=. -v . 11 | @go test -coverprofile=$(COVEROUT) . > /dev/null 12 | @go tool cover -func=$(COVEROUT)|sed 's|^.*/\([^/]*/[^/]*/[^/]*\)$$|\1|g' 13 | 14 | clean: 15 | rm -rf $(COVEROUT) 16 | -------------------------------------------------------------------------------- /circularbuffer/README.md: -------------------------------------------------------------------------------- 1 | CircularBuffer 2 | -------------- 3 | 4 | A `golang` implementation of circular buffer data structure. It can be 5 | used as a fixed size queue or stack. 6 | 7 | To install: 8 | 9 | go get github.com/cloudflare/golibs/circularbuffer 10 | 11 | To test: 12 | 13 | cd $GOPATH/src/github.com/cloudflare/golibs/circularbuffer 14 | make test 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | CloudFlare golibs 2 | ----------------- 3 | 4 | Simple `golang` libraries used internally at CloudFlare: 5 | 6 | - `circularbuffer`: circular buffer data structure (fixed size stack) 7 | - `lrucache`: last recently used cache data structure 8 | - `pool`: backported version of `sync.Pool` 9 | - `bytepool`: a pool of byte slices 10 | - `ewma`: exponentially decaying moving average 11 | -------------------------------------------------------------------------------- /circularbuffer/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | RACE+=--race 4 | 5 | PKGNAME=github.com/cloudflare/golibs/circularbuffer 6 | COVEROUT=cover.out 7 | 8 | .PHONY: test clean 9 | 10 | test: 11 | @go test $(RACE) -bench=. -v $(PKGNAME) 12 | @go test -coverprofile=$(COVEROUT) $(PKGNAME) > /dev/null 13 | @go tool cover -func=$(COVEROUT)|sed 's|^.*/\([^/]*/[^/]*/[^/]*\)$$|\1|g' 14 | 15 | clean: 16 | rm -rf $(COVEROUT) 17 | -------------------------------------------------------------------------------- /spacesaving/tools/Makefile: -------------------------------------------------------------------------------- 1 | 2 | all: main perfect readpcap topdns 3 | 4 | GOLIBS=$(GOPATH)/src/github.com/cloudflare/golibs 5 | 6 | main: main.go $(GOLIBS)/spacesaving/*go 7 | go build main.go 8 | 9 | perfect: perfect.go $(GOLIBS)/ewma/*go 10 | go build perfect.go 11 | 12 | readpcap: readpcap.go $(GOPATH)/src/github.com/miekg/pcap/*go $(GOPATH)/src/github.com/miekg/dns/*go 13 | go build readpcap.go 14 | 15 | topdns: topdns.go $(GOLIBS)/spacesaving/*go $(GOPATH)/src/github.com/miekg/pcap/*go 16 | go build topdns.go 17 | 18 | clean: 19 | rm -f main perfect readpcap topdns 20 | 21 | -------------------------------------------------------------------------------- /lrucache/list_extension.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | // Extensions to "container/list" that allowing reuse of Elements. 4 | 5 | package lrucache 6 | 7 | func (l *list) PushElementFront(e *element) *element { 8 | return l.insert(e, &l.root) 9 | } 10 | 11 | func (l *list) PushElementBack(e *element) *element { 12 | return l.insert(e, l.root.prev) 13 | } 14 | 15 | func (l *list) PopElementFront() *element { 16 | el := l.Front() 17 | l.Remove(el) 18 | return el 19 | } 20 | 21 | func (l *list) PopFront() interface{} { 22 | el := l.Front() 23 | l.Remove(el) 24 | return el.Value 25 | } 26 | -------------------------------------------------------------------------------- /.github/workflows/semgrep.yml: -------------------------------------------------------------------------------- 1 | 2 | on: 3 | pull_request: {} 4 | workflow_dispatch: {} 5 | push: 6 | branches: 7 | - main 8 | - master 9 | name: Semgrep config 10 | jobs: 11 | semgrep: 12 | name: semgrep/ci 13 | runs-on: ubuntu-20.04 14 | env: 15 | SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} 16 | SEMGREP_URL: https://cloudflare.semgrep.dev 17 | SEMGREP_APP_URL: https://cloudflare.semgrep.dev 18 | SEMGREP_VERSION_CHECK_URL: https://cloudflare.semgrep.dev/api/check-version 19 | container: 20 | image: returntocorp/semgrep 21 | steps: 22 | - uses: actions/checkout@v3 23 | - run: semgrep ci 24 | -------------------------------------------------------------------------------- /lrucache/benchmark/vcache.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | package main 4 | 5 | import ( 6 | vcache "github.com/youtube/vitess/go/cache" 7 | ) 8 | 9 | type VCache struct { 10 | vcache.LRUCache 11 | } 12 | 13 | func NewVCache(capacity uint64) *VCache { 14 | return &VCache{ 15 | LRUCache: *vcache.NewLRUCache(capacity), 16 | } 17 | } 18 | 19 | type Value struct { 20 | v string 21 | } 22 | 23 | func (*Value) Size() int { 24 | return 1 25 | } 26 | 27 | func (c *VCache) Get(key string) (string, bool) { 28 | v, ok := c.LRUCache.Get(key) 29 | if !ok { 30 | return "", false 31 | } 32 | return v.(*Value).v, ok 33 | } 34 | 35 | func (c *VCache) Set(key, value string) { 36 | c.LRUCache.Set(key, &Value{v: value}) 37 | } 38 | -------------------------------------------------------------------------------- /lrucache/priorityqueue.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | // This code is based on golang example from "container/heap" package. 4 | 5 | package lrucache 6 | 7 | type priorityQueue []*entry 8 | 9 | func (pq priorityQueue) Len() int { 10 | return len(pq) 11 | } 12 | 13 | func (pq priorityQueue) Less(i, j int) bool { 14 | return pq[i].expire.Before(pq[j].expire) 15 | } 16 | 17 | func (pq priorityQueue) Swap(i, j int) { 18 | pq[i], pq[j] = pq[j], pq[i] 19 | pq[i].index = i 20 | pq[j].index = j 21 | } 22 | 23 | func (pq *priorityQueue) Push(e interface{}) { 24 | n := len(*pq) 25 | item := e.(*entry) 26 | item.index = n 27 | *pq = append(*pq, item) 28 | } 29 | 30 | func (pq *priorityQueue) Pop() interface{} { 31 | old := *pq 32 | n := len(old) 33 | item := old[n-1] 34 | item.index = -1 35 | *pq = old[0 : n-1] 36 | return item 37 | } 38 | -------------------------------------------------------------------------------- /lrucache/benchmark/mcache.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | package main 4 | 5 | import ( 6 | "github.com/cloudflare/golibs/lrucache" 7 | "time" 8 | ) 9 | 10 | func makeLRUCache(capacity uint64) lrucache.Cache { 11 | return lrucache.NewLRUCache(uint(capacity)) 12 | } 13 | func makeMultiLRU(capacity uint64) lrucache.Cache { 14 | shards := uint(2) 15 | return lrucache.NewMultiLRUCache(shards, uint(capacity)/shards) 16 | } 17 | 18 | type MCache struct { 19 | lrucache.Cache 20 | expiry time.Time 21 | now time.Time 22 | } 23 | 24 | type makeCache func(capacity uint64) lrucache.Cache 25 | 26 | func NewMCache(capacity uint64, newCache makeCache) *MCache { 27 | return &MCache{ 28 | Cache: newCache(capacity), 29 | expiry: time.Now().Add(time.Duration(30 * time.Second)), 30 | now: time.Now(), 31 | } 32 | } 33 | 34 | func (c *MCache) Get(key string) (string, bool) { 35 | v, ok := c.Cache.Get(key) 36 | if !ok { 37 | return "", false 38 | } 39 | return v.(*Value).v, true 40 | } 41 | 42 | func (c *MCache) Set(key, value string) { 43 | c.Cache.Set(key, &Value{v: value}, time.Time{}) 44 | } 45 | -------------------------------------------------------------------------------- /lrucache/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | RACE+=--race 4 | 5 | PKGNAME=github.com/cloudflare/golibs/lrucache 6 | SKIPCOVER=list.go|list_extension.go|priorityqueue.go 7 | 8 | .PHONY: all test bench cover clean 9 | 10 | all: 11 | @echo "Targets:" 12 | @echo " test: run tests with race detector" 13 | @echo " cover: print test coverage" 14 | @echo " bench: run basic benchmarks" 15 | 16 | test: 17 | @go test $(RACE) -bench=. -v $(PKGNAME) 18 | 19 | COVEROUT=cover.out 20 | cover: 21 | @go test -coverprofile=$(COVEROUT) -v $(PKGNAME) 22 | @cat $(COVEROUT) | egrep -v '$(SKIPCOVER)' > $(COVEROUT)~ 23 | @go tool cover -func=$(COVEROUT)~|sed 's|^.*/\([^/]*/[^/]*/[^/]*\)$$|\1|g' 24 | 25 | bench: 26 | @echo "[*] Scalability of cache/lrucache" 27 | @echo "[ ] Operations in shared cache using one core" 28 | @GOMAXPROCS=1 go test -run=- -bench='.*LRUCache.*' $(PKGNAME) \ 29 | | egrep -v "^PASS|^ok" 30 | 31 | @echo "[*] Scalability of cache/multilru" 32 | @echo "[ ] Operations in four caches using four cores " 33 | @GOMAXPROCS=4 go test -run=- -bench='.*MultiLRU.*' $(PKGNAME) \ 34 | | egrep -v "^PASS|^ok" 35 | 36 | 37 | @(cd benchmark; go build $(PKGNAME)/benchmark) 38 | @./benchmark/benchmark 39 | 40 | clean: 41 | rm -rf $(COVEROUT) $(COVEROUT)~ benchmark/benchmark 42 | -------------------------------------------------------------------------------- /spacesaving/tools/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "github.com/cloudflare/golibs/spacesaving" 7 | "io" 8 | "os" 9 | "strconv" 10 | "strings" 11 | "time" 12 | ) 13 | 14 | const TimeFormatString = "2006-01-02 15:04:05.999999999 -0700 MST" 15 | const halfLife = 60 * time.Second 16 | 17 | func main() { 18 | ss := spacesaving.Rate{} 19 | 20 | slots, err := strconv.ParseInt(os.Args[1], 10, 64) 21 | if err != nil { 22 | panic(err) 23 | } 24 | 25 | ss.Init(uint32(slots), halfLife) 26 | 27 | var lastTime time.Time 28 | 29 | in := bufio.NewReader(os.Stdin) 30 | for lineno := 1; true; lineno += 1 { 31 | line, err := in.ReadString('\n') 32 | if err == io.EOF { 33 | break 34 | } 35 | if err != nil { 36 | fmt.Fprintf(os.Stderr, "%s\n", err) 37 | os.Exit(1) 38 | } 39 | line = strings.TrimSpace(line) 40 | parts := strings.SplitN(line, ",", 2) 41 | 42 | ts, err := time.Parse(TimeFormatString, parts[0]) 43 | if err != nil { 44 | fmt.Fprintf(os.Stderr, "Ignoring line %d: %v\n", 45 | lineno, err) 46 | continue 47 | } 48 | key := strings.TrimSpace(parts[1]) 49 | 50 | ss.Touch(key, ts) 51 | lastTime = ts 52 | } 53 | 54 | elements := ss.GetAll(lastTime) 55 | for _, e := range elements { 56 | fmt.Printf("%s, %f, %f\n", e.Key, e.LoRate, e.HiRate) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /lrucache/README.md: -------------------------------------------------------------------------------- 1 | LRU Cache 2 | --------- 3 | 4 | A `golang` implementation of last recently used cache data structure. 5 | 6 | To install: 7 | 8 | go get github.com/cloudflare/golibs/lrucache 9 | 10 | To test: 11 | 12 | cd $GOPATH/src/github.com/cloudflare/golibs/lrucache 13 | make test 14 | 15 | For coverage: 16 | 17 | make cover 18 | 19 | Basic benchmarks: 20 | 21 | $ make bench # As tested on my two core i5 22 | [*] Scalability of cache/lrucache 23 | [ ] Operations in shared cache using one core 24 | BenchmarkConcurrentGetLRUCache 5000000 450 ns/op 25 | BenchmarkConcurrentSetLRUCache 2000000 821 ns/op 26 | BenchmarkConcurrentSetNXLRUCache 5000000 664 ns/op 27 | 28 | [*] Scalability of cache/multilru 29 | [ ] Operations in four caches using four cores 30 | BenchmarkConcurrentGetMultiLRU-4 5000000 475 ns/op 31 | BenchmarkConcurrentSetMultiLRU-4 2000000 809 ns/op 32 | BenchmarkConcurrentSetNXMultiLRU-4 5000000 643 ns/op 33 | 34 | [*] Capacity=4096 Keys=30000 KeySpace=15625 35 | vitess LRUCache MultiLRUCache-4 36 | create 1.709us 1.626374ms 343.54us 37 | Get (miss) 144.266083ms 132.470397ms 177.277193ms 38 | SetNX #1 338.637977ms 380.733302ms 411.709204ms 39 | Get (hit) 195.896066ms 173.252112ms 234.109494ms 40 | SetNX #2 349.785951ms 367.255624ms 419.129127ms 41 | -------------------------------------------------------------------------------- /LICENSE-BSD-CloudFlare: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013 CloudFlare, Inc. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are 5 | met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above 10 | copyright notice, this list of conditions and the following disclaimer 11 | in the documentation and/or other materials provided with the 12 | distribution. 13 | * Neither the name of the CloudFlare, Inc. nor the names of its 14 | contributors may be used to endorse or promote products derived from 15 | this software without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /spacesaving/tools/compare.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import string 5 | 6 | maxlines = int(sys.argv[3] or '1000', 10) 7 | 8 | perfect = {} 9 | 10 | with open(sys.argv[1], 'r') as f: 11 | for line in map(string.strip, f): 12 | key, v0 = line.split(",", 1) 13 | perfect[key] = float(v0) 14 | 15 | count = 0 16 | low = 0.0 17 | up = 0.0 18 | 19 | perfect_max = max(perfect.itervalues()) 20 | 21 | print "%50s\t%s\t\t%s\t\t%s\t\t%s" % ( 22 | "key", "min", "max", "real", "error" 23 | ) 24 | 25 | with open(sys.argv[2], 'r') as f: 26 | for line in map(string.strip, f): 27 | key, v1, v2 = line.split(",", 2) 28 | v1, v2 = float(v1), float(v2) 29 | 30 | vp = perfect[key] 31 | del perfect[key] 32 | if v1 <= vp <= v2: 33 | err = False 34 | else: 35 | err = True 36 | #print "%s: %f %f %f" % (key, v1, vp, v2) 37 | 38 | count += 1 39 | low += (vp-v1)**2 40 | up += (v2-vp)**2 41 | 42 | if count < maxlines or err: 43 | print "%50s\t%f\t%f\t%f\t%f%s" % ( 44 | key[:50], v1, v2, vp, (vp-v1)**2 + (v2-vp)**2, "\tERROR" if err else "") 45 | 46 | print 47 | k, v = max(perfect.iteritems(), key=lambda (k,v): v) 48 | print "Item with max rate uncaptured:" 49 | print "%50s\t\t\t\t\t%f" % ('> ' + k[:48], v) 50 | print 51 | print "%50s\t%f\t%f\t\t\t%f" % ( 52 | "mean error per item (%i items):" % count, low/count, up/count, (low+up)/count 53 | ) 54 | print "%50s\t\t\t\t\t%f" % ( 55 | "total uncounted rate (%i items):" % len(perfect), sum(perfect.itervalues()) 56 | ) 57 | -------------------------------------------------------------------------------- /tokenbucket/bucket_test.go: -------------------------------------------------------------------------------- 1 | package tokenbucket_test 2 | 3 | import ( 4 | "github.com/cloudflare/golibs/tokenbucket" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestInvalidBurst(t *testing.T) { 10 | defer func() { 11 | if recover() == nil { 12 | t.Fatal("expected panic, got nothing") 13 | } 14 | }() 15 | _ = tokenbucket.New(1, 300, 0) 16 | } 17 | 18 | func TestBucketDepth(t *testing.T) { 19 | b := tokenbucket.New(1, 300, 300) 20 | n := 0 21 | // With a bucket the size of the rate 22 | // we should expect the rate * 1s to make it through the filter 23 | // Because we can't touch simultaneously, use a fudge factor. 24 | for b.Touch(nil) { 25 | n++ 26 | } 27 | if n != 300 { 28 | t.Fatal("expected 300 touches to be sucessful; got ", n) 29 | } 30 | now := time.Now() 31 | for !b.Touch(nil) { 32 | } 33 | // Filter allowed us through. This should have taken about one second / rate 34 | dur := time.Since(now) 35 | dur *= 300 36 | diff := 1*time.Second - dur 37 | if diff > 30*time.Millisecond { 38 | t.Fatal("expected second +- 30ms to recover; got ", dur) 39 | } 40 | } 41 | 42 | func TestRate(t *testing.T) { 43 | b := tokenbucket.New(1, 5000, 2500) 44 | now := time.Now() 45 | passed := 0 46 | for time.Since(now) < 1*time.Second { 47 | if b.Touch(nil) { 48 | passed += 1 49 | } 50 | } 51 | // we allow the burst at the start of the second, 52 | // then settle down into the actual rate. 53 | // Fudge term of 10 since we can't be sure that it's 54 | // an actual second that we loop for. 55 | if passed < 7490 || passed > 7510 { 56 | t.Fatal("expected 7500 touches through; got ", passed) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /tokenbucket/bucket.go: -------------------------------------------------------------------------------- 1 | // package tokenbucket implements a simple token bucket filter. 2 | package tokenbucket 3 | 4 | import ( 5 | "math/rand" 6 | "time" 7 | ) 8 | 9 | type item struct { 10 | credit uint64 11 | prev uint64 12 | } 13 | 14 | // Filter implements a token bucket filter. 15 | type Filter struct { 16 | creditMax uint64 17 | touchCost uint64 18 | 19 | key0 uint64 20 | key1 uint64 21 | 22 | items []item 23 | } 24 | 25 | // New creates a new token bucket filter with num buckets, accruing tokens at rate per second. The depth specifies 26 | // the depth of the bucket. 27 | func New(num int, rate float64, depth uint64) *Filter { 28 | b := new(Filter) 29 | if depth <= 0 { 30 | panic("depth of bucket must be greater than 0") 31 | } 32 | b.touchCost = uint64((float64(1*time.Second) / rate)) 33 | b.creditMax = depth * b.touchCost 34 | b.items = make([]item, num) 35 | 36 | // Not the full range of a uint64, but we can 37 | // live with 2 bits of entropy missing 38 | b.key0 = uint64(rand.Int63()) 39 | b.key1 = uint64(rand.Int63()) 40 | 41 | return b 42 | } 43 | 44 | func (b *Filter) touch(it *item) bool { 45 | now := uint64(time.Now().UnixNano()) 46 | delta := now - it.prev 47 | it.credit += delta 48 | it.prev = now 49 | 50 | if it.credit > b.creditMax { 51 | it.credit = b.creditMax 52 | } 53 | 54 | if it.credit > b.touchCost { 55 | it.credit -= b.touchCost 56 | return true 57 | } 58 | return false 59 | } 60 | 61 | // Touch finds the token bucket for d, takes a token out of it and reports if 62 | // there are still tokens left in the bucket. 63 | func (b *Filter) Touch(d []byte) bool { 64 | n := len(b.items) 65 | h := hash(b.key0, b.key1, d) 66 | i := h % uint64(n) 67 | return b.touch(&b.items[i]) 68 | } 69 | -------------------------------------------------------------------------------- /spacesaving/tools/readpcap.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "encoding/hex" 6 | "flag" 7 | "fmt" 8 | "github.com/miekg/dns" 9 | "github.com/miekg/pcap" 10 | "log" 11 | "os" 12 | "runtime/pprof" 13 | "strings" 14 | ) 15 | 16 | var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file") 17 | 18 | func safeParse(msg *dns.Msg, data []byte) (err error) { 19 | defer func() { 20 | if e := recover(); e != nil { 21 | fmt.Fprintf(os.Stderr, "Crashed dns: %v\nError: %v\n", 22 | hex.EncodeToString(data), e) 23 | err = fmt.Errorf("bad packet") 24 | } 25 | }() 26 | return msg.Unpack(data) 27 | } 28 | 29 | func main() { 30 | flag.Parse() 31 | if *cpuprofile != "" { 32 | f, err := os.Create(*cpuprofile) 33 | if err != nil { 34 | log.Fatal(err) 35 | } 36 | pprof.StartCPUProfile(f) 37 | defer pprof.StopCPUProfile() 38 | } 39 | 40 | fileName := flag.Arg(0) 41 | 42 | pcapfile, err := pcap.OpenOffline(fileName) 43 | if err != nil { 44 | fmt.Fprintf(os.Stderr, "Can't open pcap file %#v: %v\n", 45 | fileName, err) 46 | os.Exit(1) 47 | } 48 | 49 | w := bufio.NewWriter(os.Stdout) 50 | 51 | i := uint(0) 52 | for pkt := pcapfile.Next(); pkt != nil; pkt = pcapfile.Next() { 53 | i += 1 54 | pkt.Decode() 55 | var msg dns.Msg 56 | 57 | if err := safeParse(&msg, pkt.Payload); err != nil { 58 | //fmt.Fprintf(os.Stderr, "err %v\n", err) 59 | continue 60 | } 61 | 62 | if len(msg.Question) != 1 { 63 | continue 64 | } 65 | 66 | // if msg.MsgHdr.Response == true { 67 | // continue 68 | // } 69 | 70 | qname := msg.Question[0].Name 71 | qname = qname[:len(qname)-1] 72 | fmt.Fprintf(w, "%s, %s\n", pkt.Time, strings.ToLower(qname)) 73 | } 74 | 75 | w.Flush() 76 | fmt.Fprintf(os.Stderr, "Parsed %d packets\n", i) 77 | } 78 | -------------------------------------------------------------------------------- /spacesaving/tools/perfect.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "github.com/cloudflare/golibs/ewma" 7 | "io" 8 | "os" 9 | "sort" 10 | "strings" 11 | "time" 12 | ) 13 | 14 | type Element struct { 15 | Key string 16 | Rate float64 17 | } 18 | 19 | type elslice []Element 20 | 21 | func (a elslice) Len() int { return len(a) } 22 | func (a elslice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 23 | func (a elslice) Less(i, j int) bool { return a[i].Rate < a[j].Rate } 24 | 25 | const TimeFormatString = "2006-01-02 15:04:05.999999999 -0700 MST" 26 | const halfLife = 60 * time.Second 27 | 28 | func main() { 29 | m := make(map[string]*ewma.EwmaRate, 4096) 30 | 31 | var lastTime time.Time 32 | 33 | in := bufio.NewReader(os.Stdin) 34 | for lineno := 1; true; lineno += 1 { 35 | line, err := in.ReadString('\n') 36 | if err == io.EOF { 37 | break 38 | } 39 | if err != nil { 40 | fmt.Fprintf(os.Stderr, "%s\n", err) 41 | os.Exit(1) 42 | } 43 | line = strings.TrimSpace(line) 44 | parts := strings.SplitN(line, ",", 2) 45 | 46 | ts, err := time.Parse(TimeFormatString, parts[0]) 47 | if err != nil { 48 | fmt.Fprintf(os.Stderr, "Ignoring line %d: %v\n", 49 | lineno, err) 50 | continue 51 | } 52 | key := strings.TrimSpace(parts[1]) 53 | 54 | if rate, found := m[key]; found { 55 | rate.Update(ts) 56 | } else { 57 | rate = new(ewma.EwmaRate) 58 | rate.Init(halfLife) 59 | rate.Update(ts) 60 | m[key] = rate 61 | } 62 | lastTime = ts 63 | } 64 | 65 | elements := make([]Element, 0, len(m)) 66 | for key, rate := range m { 67 | elements = append(elements, Element{ 68 | key, 69 | rate.Current(lastTime), 70 | }) 71 | } 72 | 73 | sort.Sort(sort.Reverse(elslice(elements))) 74 | 75 | for _, e := range elements { 76 | fmt.Printf("%s, %f\n", e.Key, e.Rate) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /spacesaving/tools/README.md: -------------------------------------------------------------------------------- 1 | Tools for validating spacesaving.Rate algorithm 2 | ----------- 3 | 4 | Testing github.com/cloudflare/golibs/spacesaving/rate.go ain't 5 | easy. Here are a few tools that can be used to make sure the thing 6 | actually works. 7 | 8 | First we need some data. Not having a better idea we use a pcap dump 9 | of a DNS traffic as test data. You can create a capture yourself: 10 | 11 | $ tcpdump -n -s0 -w dnstraffic.pcap -iany -c10000 udp and dst port 53 12 | 13 | Having that type `make`, you should compile four binaries: 14 | 15 | - `readpcap`: Prepares data for further steps: reads pcap and prints 16 | valid dns packets on stdout. 17 | 18 | - `main`: Reads data from stdin and counts rates using 19 | spacesaving.Rate implementation. 20 | 21 | - `perfect`: Reads data from stdin and counts rates using ideal 22 | ewma.Rate implementation, memory consumption is unlimited. 23 | 24 | - `topdns`: Uses pcap library to listen on a live network card and 25 | prints rates of captured dns requests. 26 | 27 | There is also a python script `compare.py` that can be used to compare 28 | two sets of results against each other. 29 | 30 | The steps are: 31 | 32 | 1) Use `./readpcap` tool to read the pcap and produce consumalbe data 33 | stream. 34 | 35 | 2) Use `./perfect` to count real packet rates and print them at the 36 | time of a last packet. This uses ideal implementation, and the memory 37 | usage is unconstrained. 38 | 39 | 3) Use `./main` to count approx packet rates using our 40 | spacesaving.Rate implementation. 41 | 42 | 4) Use `compare.py` to compare the results against each other. 43 | 44 | There is a handy script `go.sh` that does all that for you. For 45 | example to ask for rates of top 16 things: 46 | 47 | $ ./go.sh dnstraffic.pcap 16 48 | 49 | With a bit of luck packet ranges given by `main` will cover the real 50 | packet rates as returned by `perfect` tool. 51 | -------------------------------------------------------------------------------- /spacesaving/count.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 CloudFlare, Inc. 2 | 3 | package spacesaving 4 | 5 | type countBucket struct { 6 | key string 7 | count uint64 8 | error uint64 9 | } 10 | 11 | type Count struct { 12 | olist []countBucket 13 | hash map[string]uint32 14 | } 15 | 16 | func (ss *Count) Init(size int) *Count { 17 | *ss = Count{ 18 | olist: make([]countBucket, size), 19 | hash: make(map[string]uint32, size), 20 | } 21 | return ss 22 | } 23 | 24 | func (ss *Count) Touch(key string) { 25 | var ( 26 | bucketno uint32 27 | found bool 28 | bucket *countBucket 29 | ) 30 | 31 | if bucketno, found = ss.hash[key]; found { 32 | bucket = &ss.olist[bucketno] 33 | } else { 34 | bucketno = 0 35 | bucket = &ss.olist[bucketno] 36 | delete(ss.hash, bucket.key) 37 | ss.hash[key] = bucketno 38 | bucket.error = bucket.count 39 | bucket.key = key 40 | } 41 | 42 | bucket.count += 1 43 | 44 | for { 45 | if bucketno == uint32(len(ss.olist))-1 { 46 | break 47 | } 48 | 49 | b1 := &ss.olist[bucketno] 50 | b2 := &ss.olist[bucketno+1] 51 | if b1.count < b2.count { 52 | break 53 | } 54 | 55 | ss.hash[b1.key] = bucketno + 1 56 | ss.hash[b2.key] = bucketno 57 | *b1, *b2 = *b2, *b1 58 | bucketno += 1 59 | } 60 | } 61 | 62 | type Element struct { 63 | Key string 64 | LoCount uint64 65 | HiCount uint64 66 | } 67 | 68 | func (ss *Count) GetAll() []Element { 69 | elements := make([]Element, 0, len(ss.hash)) 70 | for i := len(ss.olist) - 1; i >= 0; i -= 1 { 71 | b := &ss.olist[i] 72 | if b.key == "" { 73 | continue 74 | } 75 | elements = append(elements, Element{ 76 | Key: b.key, 77 | LoCount: b.count - b.error, 78 | HiCount: b.count, 79 | }) 80 | } 81 | return elements 82 | } 83 | 84 | func (ss *Count) Reset() { 85 | empty := countBucket{} 86 | for i, _ := range ss.olist { 87 | delete(ss.hash, ss.olist[i].key) 88 | ss.olist[i] = empty 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /ewma/rate.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 CloudFlare, Inc. 2 | // 3 | // Facilities for tickless measurment of rates 4 | // 5 | // Apply exponentially decaying moving average to count rates of 6 | // things per second. Useful for various metrics. 7 | package ewma 8 | 9 | import ( 10 | "time" 11 | ) 12 | 13 | type EwmaRate struct { 14 | Ewma 15 | } 16 | 17 | // Nanoseconds in second 18 | const nanosec = float64(1000000000) 19 | 20 | // Allocate a new NewEwmaRate structure 21 | // 22 | // halfLife it the time takes for a half charge or half discharge 23 | func NewEwmaRate(halfLife time.Duration) *EwmaRate { 24 | return (&EwmaRate{}).Init(halfLife) 25 | } 26 | 27 | // Initialize already allocated NewEwmaRate structure 28 | // 29 | // halfLife it the time takes for a half charge or half discharge 30 | func (r *EwmaRate) Init(halfLife time.Duration) *EwmaRate { 31 | r.Ewma.Init(halfLife) 32 | return r 33 | } 34 | 35 | // Notify of an event happening. 36 | // 37 | // Uses system clock to determine current time. Returns current rate. 38 | func (r *EwmaRate) UpdateNow() float64 { 39 | return r.Update(time.Now()) 40 | } 41 | 42 | // Notify of an event happening, with specified current time. 43 | // 44 | // Returns current rate. 45 | func (r *EwmaRate) Update(now time.Time) float64 { 46 | timeDelta := now.Sub(r.lastTimestamp) 47 | return r.Ewma.Update(nanosec/float64(timeDelta.Nanoseconds()), now) 48 | } 49 | 50 | // CurrentNow reads the rate of events per second. 51 | // 52 | // Uses system clock to determine current time. 53 | func (r *EwmaRate) CurrentNow() float64 { 54 | return r.Current(time.Now()) 55 | } 56 | 57 | // Current reads the rate of events per second, with specified current time. 58 | func (r *EwmaRate) Current(now time.Time) float64 { 59 | if r.lastTimestamp.IsZero() || r.lastTimestamp == now || now.Before(r.lastTimestamp) { 60 | return r.Ewma.Current 61 | } 62 | 63 | timeDelta := now.Sub(r.lastTimestamp) 64 | 65 | // Count as if nothing was received since last update and 66 | // don't save anything. 67 | return r.count(0, timeDelta) 68 | } 69 | -------------------------------------------------------------------------------- /ewma/ewma_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 CloudFlare, Inc. 2 | 3 | package ewma 4 | 5 | import ( 6 | "testing" 7 | "time" 8 | ) 9 | 10 | type testTupleEwma struct { 11 | v float64 12 | delay float64 13 | cur float64 14 | } 15 | 16 | var testVectorEwma = [][]testTupleEwma{ 17 | // Sanity check (half life is 60 seconds) 18 | { 19 | {10, 60, 5}, // half charge time is 1 minute 20 | {10, 60, 7.5}, 21 | {10, 60, 8.75}, 22 | {10, 180, 9.84375}, // full charge is quite high 23 | {10, 3400, 10}, // depending on floats precision 24 | {0, 60, 5}, // half discharge time is minute 25 | {0, 60, 2.5}, 26 | {0, 60, 1.25}, 27 | {0, 60, 0.625}, 28 | {0, 60, 0.3125}, 29 | }, 30 | 31 | // Charging 4 times every second is the same as... 32 | { 33 | {10, 1, 0.1148597964710385}, 34 | {10, 1, 0.22840031565754015}, 35 | {10, 1, 0.34063671075154406}, 36 | {10, 1, 0.4515839608958339}, 37 | }, 38 | // ...charging once after four seconds. 39 | { 40 | {10, 4, 0.45158396089583497}, 41 | }, 42 | 43 | // And for fun charging exponencially 44 | { 45 | {1, 60, 0.5}, 46 | {2, 60, 1.25}, 47 | {4, 60, 2.625}, 48 | {8, 60, 5.3125}, 49 | {16, 60, 10.65625}, 50 | {32, 60, 21.328125}, 51 | {64, 60, 42.6640625}, 52 | }, 53 | // ...charging once after four seconds. 54 | { 55 | {10, 4, 0.45158396089583497}, 56 | }, 57 | } 58 | 59 | func TestEwma(t *testing.T) { 60 | for testNo, test := range testVectorEwma { 61 | e := NewEwma(time.Duration(1 * time.Minute)) 62 | 63 | // Feed the 0th timestamp 64 | ts := time.Now() 65 | e.Update(0, ts) 66 | 67 | if e.Current != 0 { 68 | t.Errorf("Rate after init should be zero") 69 | } 70 | 71 | for lineNo, l := range test { 72 | ts = ts.Add(time.Duration(l.delay * float64(time.Second.Nanoseconds()))) 73 | e.Update(l.v, ts) 74 | if e.Current != l.cur { 75 | t.Errorf("Test %d, line %d: %v != %v", 76 | testNo, lineNo, e.Current, l.cur) 77 | } 78 | } 79 | } 80 | } 81 | 82 | func TestEwmaCoverErrors(t *testing.T) { 83 | e := NewEwma(time.Duration(1 * time.Minute)) 84 | 85 | ts := time.Now() 86 | e.Update(0, ts) 87 | 88 | e.Update(0, ts.Add(-1*time.Second)) 89 | if e.Current != 0 { 90 | t.Error("expecting 0") 91 | } 92 | 93 | e.UpdateNow(0) 94 | if e.Current != 0 { 95 | t.Error("expecting 0") 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /ewma/ewma.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 CloudFlare, Inc. 2 | // 3 | // Tickless implementation of exponentially decaying moving average 4 | // 5 | // Most of EWMA implementations update values every X seconds. This is 6 | // suboptimal. Instead of having a ticker goroutine it is possible to 7 | // adjust the weight accordingly and have a moving average updated on 8 | // the fly. 9 | // 10 | // Everyone is familiar with EWMA - it's wide used as the load average 11 | // smoothing algorithm. 12 | package ewma 13 | 14 | import ( 15 | "math" 16 | "time" 17 | ) 18 | 19 | type Ewma struct { 20 | lastTimestamp time.Time 21 | weightHelper float64 22 | 23 | // Current value of the moving average 24 | Current float64 25 | } 26 | 27 | // Allocate a new NewEwma structure 28 | // 29 | // halfLife it the time takes for a half charge or half discharge 30 | func NewEwma(halfLife time.Duration) *Ewma { 31 | return (&Ewma{}).Init(halfLife) 32 | } 33 | 34 | // Initialize already allocated NewEwma structure 35 | // 36 | // halfLife it the time takes for a half charge or half discharge 37 | func (e *Ewma) Init(halfLife time.Duration) *Ewma { 38 | *e = Ewma{ 39 | weightHelper: -math.Ln2 / float64(halfLife.Nanoseconds()), 40 | } 41 | return e 42 | } 43 | 44 | func (e *Ewma) count(next float64, timeDelta time.Duration) float64 { 45 | // weight = math.Exp(timedelta * math.Log(0.5) / halfLife) 46 | weight := math.Exp(float64(timeDelta.Nanoseconds()) * e.weightHelper) 47 | return e.Current*weight + next*(1-weight) 48 | } 49 | 50 | // Update moving average with the value. 51 | // 52 | // Uses system clock to determine current time to count wight. Returns 53 | // updated moving avarage. 54 | func (e *Ewma) UpdateNow(value float64) float64 { 55 | return e.Update(value, time.Now()) 56 | } 57 | 58 | // Update moving average with the value, using given time as weight 59 | // 60 | // Returns updated moving avarage. 61 | func (e *Ewma) Update(next float64, timestamp time.Time) float64 { 62 | if timestamp.Before(e.lastTimestamp) || timestamp == e.lastTimestamp { 63 | return e.Current 64 | } 65 | 66 | if e.lastTimestamp.IsZero() { 67 | // Ignore the first sample 68 | e.lastTimestamp = timestamp 69 | return e.Current 70 | } 71 | 72 | timeDelta := timestamp.Sub(e.lastTimestamp) 73 | e.lastTimestamp = timestamp 74 | 75 | e.Current = e.count(next, timeDelta) 76 | return e.Current 77 | } 78 | -------------------------------------------------------------------------------- /lrucache/benchmark/main.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | // Microbenchmarks for LRUCache, MultiLRUCache versus vitess/cache. 4 | package main 5 | 6 | import ( 7 | "fmt" 8 | "math" 9 | "math/rand" 10 | "time" 11 | ) 12 | 13 | func randomString(l int) string { 14 | bytes := make([]byte, l) 15 | for i := 0; i < l; i++ { 16 | bytes[i] = byte(65 + rand.Intn(90-65)) 17 | } 18 | return string(bytes) 19 | } 20 | 21 | type GenericCache interface { 22 | Set(key string, value string) 23 | Get(key string) (string, bool) 24 | } 25 | 26 | func main() { 27 | list_of_capacities := []uint64{32, 128, 1024, 4096, 1024 * 1024} 28 | number_of_keys := 30000 29 | key_length := 3 30 | 31 | keys := make([]string, number_of_keys) 32 | for i := 0; i < 1000; i++ { 33 | keys[i] = randomString(key_length) 34 | } 35 | 36 | for _, capacity := range list_of_capacities { 37 | m := make([]GenericCache, 3) 38 | 39 | fmt.Printf("[*] Capacity=%v Keys=%v KeySpace=%v\n", capacity, number_of_keys, int(math.Pow(90-65., float64(key_length)))) 40 | fmt.Printf("\t\tvitess\t\tLRUCache\tMultiLRUCache-4\n") 41 | 42 | tc0 := time.Now() 43 | m[0] = (GenericCache)(NewVCache(capacity)) 44 | tc1 := time.Now() 45 | m[1] = (GenericCache)(NewMCache(capacity, makeLRUCache)) 46 | tc2 := time.Now() 47 | m[2] = (GenericCache)(NewMCache(capacity, makeMultiLRU)) 48 | tc3 := time.Now() 49 | 50 | fmt.Printf("create\t\t%-10v\t%-10v\t%v\n", tc1.Sub(tc0), tc2.Sub(tc1), tc3.Sub(tc2)) 51 | 52 | fmt.Printf("Get (miss)") 53 | for _, c := range m { 54 | t0 := time.Now() 55 | for i := 0; i < 1000000; i++ { 56 | c.Get(keys[i%len(keys)]) 57 | } 58 | td := time.Since(t0) 59 | fmt.Printf("\t%v", td) 60 | } 61 | fmt.Printf("\n") 62 | 63 | fmt.Printf("SetNX #1") 64 | for _, c := range m { 65 | t0 := time.Now() 66 | for i := 0; i < 1000000; i++ { 67 | c.Set(keys[i%len(keys)], "v") 68 | } 69 | td := time.Since(t0) 70 | fmt.Printf("\t%v", td) 71 | } 72 | fmt.Printf("\n") 73 | 74 | fmt.Printf("Get (hit)") 75 | for _, c := range m { 76 | t0 := time.Now() 77 | for i := 0; i < 1000000; i++ { 78 | c.Get(keys[i%len(keys)]) 79 | } 80 | td := time.Since(t0) 81 | fmt.Printf("\t%v", td) 82 | } 83 | fmt.Printf("\n") 84 | 85 | fmt.Printf("SetNX #2") 86 | for _, c := range m { 87 | t0 := time.Now() 88 | for i := 0; i < 1000000; i++ { 89 | c.Set(keys[i%len(keys)], "v") 90 | } 91 | td := time.Since(t0) 92 | fmt.Printf("\t%v", td) 93 | } 94 | fmt.Printf("\n\n") 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /kt/bench_test.go: -------------------------------------------------------------------------------- 1 | package kt 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strconv" 7 | "testing" 8 | ) 9 | 10 | func BenchmarkSet(b *testing.B) { 11 | ctx := context.Background() 12 | cmd := startServer(b) 13 | defer haltServer(cmd, b) 14 | conn, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 15 | if err != nil { 16 | b.Fatal(err.Error()) 17 | } 18 | b.ResetTimer() 19 | for i := 0; i < b.N; i++ { 20 | str := strconv.Itoa(i) 21 | conn.set(ctx, str, []byte(str)) 22 | } 23 | } 24 | 25 | func BenchmarkSetLarge(b *testing.B) { 26 | ctx := context.Background() 27 | cmd := startServer(b) 28 | defer haltServer(cmd, b) 29 | conn, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 30 | if err != nil { 31 | b.Fatal(err.Error()) 32 | } 33 | var large [4096]byte 34 | b.ResetTimer() 35 | for i := 0; i < b.N; i++ { 36 | str := strconv.Itoa(i) 37 | conn.set(ctx, str, large[:]) 38 | } 39 | } 40 | 41 | func BenchmarkGet(b *testing.B) { 42 | ctx := context.Background() 43 | cmd := startServer(b) 44 | defer haltServer(cmd, b) 45 | conn, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 46 | if err != nil { 47 | b.Fatal(err.Error()) 48 | } 49 | err = conn.set(ctx, "something", []byte("foobar")) 50 | if err != nil { 51 | b.Fatal(err) 52 | } 53 | b.ResetTimer() 54 | for i := 0; i < b.N; i++ { 55 | _, err := conn.Get(ctx, "something") 56 | if err != nil { 57 | b.Fatal(err) 58 | } 59 | } 60 | } 61 | 62 | func BenchmarkGetLarge(b *testing.B) { 63 | ctx := context.Background() 64 | cmd := startServer(b) 65 | defer haltServer(cmd, b) 66 | conn, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 67 | if err != nil { 68 | b.Fatal(err.Error()) 69 | } 70 | err = conn.set(ctx, "something", make([]byte, 4096)) 71 | if err != nil { 72 | b.Fatal(err) 73 | } 74 | b.ResetTimer() 75 | for i := 0; i < b.N; i++ { 76 | _, err := conn.Get(ctx, "something") 77 | if err != nil { 78 | b.Fatal(err) 79 | } 80 | } 81 | } 82 | 83 | func BenchmarkBulkBytes(b *testing.B) { 84 | ctx := context.Background() 85 | cmd := startServer(b) 86 | defer haltServer(cmd, b) 87 | db, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 88 | if err != nil { 89 | b.Fatal(err.Error()) 90 | } 91 | 92 | keys := make(map[string][]byte) 93 | for i := 0; i < 200; i++ { 94 | keys[fmt.Sprintf("cache/news/%d", i)] = []byte{'4'} 95 | } 96 | 97 | for k := range keys { 98 | db.set(ctx, k, []byte("something")) 99 | } 100 | b.ResetTimer() 101 | for i := 0; i < b.N; i++ { 102 | err := db.GetBulkBytes(ctx, keys) 103 | if err != nil { 104 | b.Fatal(err) 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /spacesaving/tools/topdns.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "encoding/hex" 6 | "fmt" 7 | "github.com/cloudflare/golibs/spacesaving" 8 | "github.com/miekg/dns" 9 | "github.com/miekg/pcap" 10 | "log" 11 | "os" 12 | "strings" 13 | "sync" 14 | "time" 15 | ) 16 | 17 | func safeParse(msg *dns.Msg, data []byte) (err error) { 18 | defer func() { 19 | if e := recover(); e != nil { 20 | hexs := hex.EncodeToString(data) 21 | fmt.Fprintf(os.Stderr, "Crashed dns: %v\nError: %v\n", 22 | hexs, e) 23 | panic("Unpacking dns crashed: " + hexs) 24 | } 25 | }() 26 | return msg.Unpack(data) 27 | } 28 | 29 | func main() { 30 | var ( 31 | pc *pcap.Pcap 32 | err error 33 | ) 34 | devloop: 35 | for _, device := range []string{"bond0", "eth2", "en0", "any"} { 36 | devs, errx := pcap.FindAllDevs() 37 | if errx != "" { 38 | log.Fatalf("%v", errx) 39 | } 40 | for _, dev := range devs { 41 | if dev.Name == device { 42 | pc, err = pcap.OpenLive(device, 8192, false, 1000) 43 | if err == nil { 44 | break devloop 45 | } 46 | } 47 | } 48 | } 49 | 50 | if err != nil { 51 | log.Fatalf("%v", err) 52 | } 53 | 54 | if err = pc.SetFilter("udp and dst port 53"); err != nil { 55 | log.Fatalf("%v", err) 56 | } 57 | 58 | lock := &sync.Mutex{} 59 | ss := &spacesaving.Rate{} 60 | ss.Init(4096, 60*time.Second) 61 | 62 | go Poller(lock, ss, pc) 63 | 64 | for pkt, r := pc.NextEx(); r >= 0; pkt, r = pc.NextEx() { 65 | if r == 0 { 66 | continue 67 | } 68 | pkt.Decode() 69 | var msg dns.Msg 70 | if err := safeParse(&msg, pkt.Payload); err != nil { 71 | fmt.Printf("err %v\n", err) 72 | continue 73 | } 74 | 75 | qname := strings.ToLower(msg.Question[0].Name) 76 | if len(qname) > 0 { 77 | qname = qname[:len(qname)-1] 78 | } 79 | 80 | lock.Lock() 81 | ss.Touch(qname, pkt.Time) 82 | lock.Unlock() 83 | } 84 | 85 | fmt.Printf("Done\n") 86 | } 87 | 88 | func Poller(lock *sync.Mutex, ss *spacesaving.Rate, pc *pcap.Pcap) { 89 | w := bufio.NewWriter(os.Stdout) 90 | 91 | for _ = range time.Tick(3 * time.Second) { 92 | stat, _ := pc.Getstats() 93 | 94 | lock.Lock() 95 | fmt.Fprintf(w, "\033c") 96 | elements := ss.GetAll(time.Now()) 97 | for i, e := range elements { 98 | fmt.Fprintf(w, "%60s\t%f\t%f\n", e.Key, e.LoRate, e.HiRate) 99 | if i > 40 { 100 | break 101 | } 102 | } 103 | fmt.Fprintf(w, "\n") 104 | fmt.Fprintf(w, "received:%v dropped:%v/%v (software/interface)\n", 105 | stat.PacketsReceived, stat.PacketsDropped, stat.PacketsIfDropped) 106 | w.Flush() 107 | lock.Unlock() 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /lrucache/cache.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | // Package lrucache implements a last recently used cache data structure. 4 | // 5 | // This code tries to avoid dynamic memory allocations - all required 6 | // memory is allocated on creation. Access to the data structure is 7 | // O(1). Modification O(log(n)) if expiry is used, O(1) 8 | // otherwise. 9 | // 10 | // This package exports three things: 11 | // LRUCache: is the main implementation. It supports multithreading by 12 | // using guarding mutex lock. 13 | // 14 | // MultiLRUCache: is a sharded implementation. It supports the same 15 | // API as LRUCache and uses it internally, but is not limited to 16 | // a single CPU as every shard is separately locked. Use this 17 | // data structure instead of LRUCache if you have have lock 18 | // contention issues. 19 | // 20 | // Cache interface: Both implementations fulfill it. 21 | package lrucache 22 | 23 | import ( 24 | "time" 25 | ) 26 | 27 | // Cache interface is fulfilled by the LRUCache and MultiLRUCache 28 | // implementations. 29 | type Cache interface { 30 | // Methods not needing to know current time. 31 | // 32 | // Get a key from the cache, possibly stale. Update its LRU 33 | // score. 34 | Get(key string) (value interface{}, ok bool) 35 | // Get a key from the cache, possibly stale. Don't modify its LRU score. O(1) 36 | GetQuiet(key string) (value interface{}, ok bool) 37 | // Get and remove a key from the cache. 38 | Del(key string) (value interface{}, ok bool) 39 | // Evict all items from the cache. 40 | Clear() int 41 | // Number of entries used in the LRU 42 | Len() int 43 | // Get the total capacity of the LRU 44 | Capacity() int 45 | 46 | // Methods use time.Now() when neccessary to determine expiry. 47 | // 48 | // Add an item to the cache overwriting existing one if it 49 | // exists. 50 | Set(key string, value interface{}, expire time.Time) 51 | // Get a key from the cache, make sure it's not stale. Update 52 | // its LRU score. 53 | GetNotStale(key string) (value interface{}, ok bool) 54 | // Evict all the expired items. 55 | Expire() int 56 | 57 | // Methods allowing to explicitly specify time used to 58 | // determine if items are expired. 59 | // 60 | // Add an item to the cache overwriting existing one if it 61 | // exists. Allows specifing current time required to expire an 62 | // item when no more slots are used. 63 | SetNow(key string, value interface{}, expire time.Time, now time.Time) 64 | // Get a key from the cache, make sure it's not stale. Update 65 | // its LRU score. 66 | GetNotStaleNow(key string, now time.Time) (value interface{}, ok bool) 67 | // Evict items that expire before Now. 68 | ExpireNow(now time.Time) int 69 | } 70 | -------------------------------------------------------------------------------- /ewma/rate_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 CloudFlare, Inc. 2 | 3 | package ewma 4 | 5 | import ( 6 | "testing" 7 | "time" 8 | ) 9 | 10 | type testTupleRate struct { 11 | packet bool 12 | delay float64 13 | cur float64 14 | } 15 | 16 | var testVectorRate = [][]testTupleRate{ 17 | // Sanity check (half life is 1 second) 18 | { 19 | // Feeding packets every second gets to 1 pps eventually 20 | {false, 1, 0}, 21 | {true, 1, 0}, 22 | {true, 1, 0.5}, 23 | {true, 1, 0.75}, 24 | {true, 1, 0.875}, 25 | {true, 1, 0.9375}, 26 | {true, 1, 0.96875}, 27 | {true, 1, 0.984375}, 28 | {true, 1, 0.9921875}, 29 | {true, 1, 0.99609375}, 30 | {true, 1, 0.998046875}, 31 | 32 | // Stop over 5 seconds 33 | {false, 1, 0.4990234375}, 34 | {false, 1, 0.24951171875}, 35 | {false, 1, 0.12475585937500003}, 36 | {false, 1, 0.0623779296875}, 37 | {false, 1, 0.03118896484375}, 38 | 39 | // A small number after 30 seconds discharge 40 | {false, 25, 0.000000000929503585211933486330}, 41 | }, 42 | 43 | // Burst of 10, 1ms apart, gets us to ~7 pps 44 | { 45 | {true, 1, 0}, 46 | {true, 0.001, -1}, {true, 0.001, -1}, {true, 0.001, -1}, {true, 0.001, -1}, {true, 0.001, -1}, 47 | {true, 0.001, -1}, {true, 0.001, -1}, {true, 0.001, -1}, {true, 0.001, -1}, {true, 0.001, -1}, 48 | {false, 0, 6.9075045629642595}, 49 | {false, 1, 3.453752281482129760092902870383}, 50 | {false, 1, 1.726876140741064880046451435192}, 51 | }, 52 | 53 | // 10 packets 100ms apart, get 5 pps 54 | { 55 | {true, 1, 0}, 56 | {true, 0.1, -1}, {true, 0.1, -1}, {true, 0.1, -1}, {true, 0.1, -1}, {true, 0.1, -1}, 57 | {true, 0.1, -1}, {true, 0.1, -1}, {true, 0.1, -1}, {true, 0.1, -1}, {true, 0.1, -1}, 58 | {false, 0, 5.000000000000002}, 59 | {false, 1, 2.500000000000000888178419700125}, 60 | {false, 1, 1.250000000000000444089209850063}, 61 | }, 62 | } 63 | 64 | func TestRate(t *testing.T) { 65 | for testNo, test := range testVectorRate { 66 | ts := time.Now() 67 | e := NewEwmaRate(time.Duration(1 * time.Second)) 68 | 69 | for lineNo, l := range test { 70 | ts = ts.Add(time.Duration(l.delay * float64(time.Second.Nanoseconds()))) 71 | if l.packet { 72 | e.Update(ts) 73 | } 74 | if l.cur != -1 && e.Current(ts) != l.cur { 75 | t.Errorf("Test %d, line %d: %.30f != %.30f", 76 | testNo, lineNo, e.Current(ts), l.cur) 77 | } 78 | } 79 | } 80 | } 81 | 82 | func TestRateCoverErrors(t *testing.T) { 83 | e := NewEwmaRate(time.Duration(1 * time.Second)) 84 | 85 | if e.CurrentNow() != 0 { 86 | t.Error("expecting 0") 87 | } 88 | 89 | e.UpdateNow() 90 | rate := e.CurrentNow() 91 | if !(rate >= 0.0 && rate < 0.2) { 92 | // depending on the speed of the CPU 93 | t.Errorf("expecting 0 got %v", rate) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /pool/pool_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package pool 6 | 7 | import ( 8 | "runtime" 9 | "sync" 10 | "sync/atomic" 11 | "testing" 12 | "time" 13 | "unsafe" 14 | ) 15 | 16 | func TestPool(t *testing.T) { 17 | var p Pool 18 | if p.Get() != nil { 19 | t.Fatal("expected empty") 20 | } 21 | p.Put("a") 22 | p.Put("b") 23 | if g := p.Get(); g != "b" { 24 | t.Fatalf("got %#v; want b", g) 25 | } 26 | if g := p.Get(); g != "a" { 27 | t.Fatalf("got %#v; want a", g) 28 | } 29 | if g := p.Get(); g != nil { 30 | t.Fatalf("got %#v; want nil", g) 31 | } 32 | 33 | p.Put("c") 34 | p.Drain() 35 | if g := p.Get(); g != nil { 36 | t.Fatalf("got %#v; want nil after GC", g) 37 | } 38 | } 39 | 40 | func TestPoolNew(t *testing.T) { 41 | i := 0 42 | p := Pool{ 43 | New: func() interface{} { 44 | i++ 45 | return i 46 | }, 47 | } 48 | if v := p.Get(); v != 1 { 49 | t.Fatalf("got %v; want 1", v) 50 | } 51 | if v := p.Get(); v != 2 { 52 | t.Fatalf("got %v; want 2", v) 53 | } 54 | p.Put(42) 55 | if v := p.Get(); v != 42 { 56 | t.Fatalf("got %v; want 42", v) 57 | } 58 | if v := p.Get(); v != 3 { 59 | t.Fatalf("got %v; want 3", v) 60 | } 61 | } 62 | 63 | // Test that Pool does not hold pointers to previously cached 64 | // resources 65 | func TestPoolGC(t *testing.T) { 66 | var p Pool 67 | var fin uint32 68 | const N = 100 69 | for i := 0; i < N; i++ { 70 | v := new(int) 71 | runtime.SetFinalizer(v, func(vv *int) { 72 | atomic.AddUint32(&fin, 1) 73 | }) 74 | p.Put(v) 75 | } 76 | for i := 0; i < N; i++ { 77 | p.Get() 78 | } 79 | for i := 0; i < 5; i++ { 80 | runtime.GC() 81 | time.Sleep(time.Millisecond) 82 | // 2 pointers can remain on stack or elsewhere 83 | if atomic.LoadUint32(&fin) >= N-2 { 84 | return 85 | } 86 | } 87 | t.Fatalf("only %v out of %v resources are finalized", 88 | atomic.LoadUint32(&fin), N) 89 | } 90 | 91 | func TestPoolStress(t *testing.T) { 92 | const P = 10 93 | N := int(1e6) 94 | if testing.Short() { 95 | N /= 100 96 | } 97 | var p Pool 98 | done := make(chan bool) 99 | for i := 0; i < P; i++ { 100 | go func() { 101 | var v interface{} = 0 102 | for j := 0; j < N; j++ { 103 | if v == nil { 104 | v = 0 105 | } 106 | p.Put(v) 107 | v = p.Get() 108 | if v != nil && v.(int) != 0 { 109 | t.Fatalf("expect 0, got %v", v) 110 | } 111 | } 112 | done <- true 113 | }() 114 | } 115 | for i := 0; i < P; i++ { 116 | <-done 117 | } 118 | } 119 | 120 | func BenchmarkPool(b *testing.B) { 121 | procs := runtime.GOMAXPROCS(-1) 122 | var dec func() bool 123 | if unsafe.Sizeof(b.N) == 8 { 124 | n := int64(b.N) 125 | dec = func() bool { 126 | return atomic.AddInt64(&n, -1) >= 0 127 | } 128 | } else { 129 | n := int32(b.N) 130 | dec = func() bool { 131 | return atomic.AddInt32(&n, -1) >= 0 132 | } 133 | } 134 | var p Pool 135 | var wg sync.WaitGroup 136 | for i := 0; i < procs; i++ { 137 | wg.Add(1) 138 | go func() { 139 | defer wg.Done() 140 | for dec() { 141 | p.Put(1) 142 | p.Get() 143 | } 144 | }() 145 | } 146 | wg.Wait() 147 | } 148 | -------------------------------------------------------------------------------- /bytepool/bytepool_test.go: -------------------------------------------------------------------------------- 1 | package bytepool 2 | 3 | import ( 4 | "math" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | var mathTestData = []struct { 10 | n uint32 11 | f, c uint 12 | }{ 13 | {0, 0, 0}, 14 | {1, 0, 0}, 15 | {2, 1, 1}, 16 | {3, 1, 2}, 17 | {4, 2, 2}, 18 | {5, 2, 3}, 19 | {6, 2, 3}, 20 | {7, 2, 3}, 21 | {8, 3, 3}, 22 | {15, 3, 4}, 23 | {16, 4, 4}, 24 | {17, 4, 5}, 25 | {math.MaxUint32 - 1, 31, 32}, 26 | {math.MaxUint32, 31, 32}, 27 | } 28 | 29 | func TestMath(t *testing.T) { 30 | t.Parallel() 31 | 32 | for _, l := range mathTestData { 33 | if log2Floor(l.n) != l.f || log2Ceil(l.n) != l.c { 34 | t.Errorf("x=log2(%d) Expecting ⌊x⌋=%d ⌈x⌉=%d got ⌊x⌋=%d ⌈x⌉=%d", 35 | l.n, l.f, l.c, log2Floor(l.n), log2Ceil(l.n)) 36 | } 37 | } 38 | } 39 | 40 | func TestPool(t *testing.T) { 41 | t.Parallel() 42 | 43 | var p BytePool 44 | p.Init(0, 128) 45 | 46 | v := p.Get(31) 47 | if cap(v) != 32 || len(v) != 31 { 48 | t.Fatal("wrong capacity or length") 49 | } 50 | p.Put(v[:1]) 51 | 52 | v = p.Get(30) 53 | if cap(v) != 32 || len(v) != 30 { 54 | t.Fatal("wrong capacity or length") 55 | } 56 | 57 | e := make([]byte, 0, 127) 58 | p.Put(e) 59 | v = p.Get(64) 60 | 61 | if cap(v) != 127 || len(v) != 64 { 62 | t.Fatalf("wrong capacity or length %d %d", cap(v), len(v)) 63 | } 64 | 65 | v = p.Get(127) 66 | if cap(v) != 128 || len(v) != 127 { 67 | t.Fatalf("wrong capacity or length %d %d", cap(v), len(v)) 68 | } 69 | 70 | v = p.Get(128) 71 | if cap(v) != 128 || len(v) != 128 { 72 | t.Fatalf("wrong capacity or length %d %d", cap(v), len(v)) 73 | } 74 | } 75 | 76 | func TestDrain(t *testing.T) { 77 | t.Parallel() 78 | 79 | var p BytePool 80 | p.Init(1*time.Millisecond, 128) 81 | p.Put(make([]byte, 127)) 82 | time.Sleep(100 * time.Millisecond) 83 | 84 | if p.entries() != 0 { 85 | t.Fatal("expected the pool to be drained") 86 | } 87 | p.Close() 88 | } 89 | 90 | func TestLimits(t *testing.T) { 91 | t.Parallel() 92 | 93 | var ti int 94 | var p BytePool 95 | p.Init(0, 127) 96 | 97 | p.Put(make([]byte, 129)) 98 | if p.entries() != 0 { 99 | t.Fatal("expected the pool to be empty") 100 | } 101 | 102 | p.Put(make([]byte, 127)) 103 | if p.entries() != 1 { 104 | t.Fatal("expected the pool to have a single item") 105 | } 106 | 107 | p.Put(make([]byte, 0)) 108 | if p.entries() != 1 { 109 | t.Fatal("expected different pool length") 110 | } 111 | 112 | p.Put(make([]byte, 1)) 113 | if p.entries() != 2 { 114 | t.Fatal("expected different pool length") 115 | } 116 | 117 | p.Close() 118 | 119 | p.Init(0, math.MaxUint32) 120 | p.Put(make([]byte, 129)) 121 | if p.entries() != 1 { 122 | t.Fatal("expected different pool length") 123 | } 124 | 125 | p.Put(make([]byte, math.MaxUint32 + 1)) 126 | if p.entries() != 1 { 127 | t.Fatal("expected the pool to have a single item") 128 | } 129 | 130 | p.Put(make([]byte, math.MaxInt32 + 1)) 131 | ti = (1 << log2Ceil(math.MaxUint32)) - 1 132 | if ti <= 0 { 133 | // 32-bit systems: Put() slice-size math.MaxInt32 + 1 fails 134 | if p.entries() != 1 { 135 | t.Fatal("expected the pool to have a single item") 136 | } 137 | } else { 138 | if p.entries() != 2 { 139 | t.Fatal("expected the pool to have two items") 140 | } 141 | } 142 | 143 | p.Drain() 144 | p.Put(nil) 145 | if p.entries() != 0 { 146 | t.Fatal("expected different pool length") 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /lrucache/multilru.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | package lrucache 4 | 5 | import ( 6 | "hash/crc32" 7 | "time" 8 | ) 9 | 10 | // MultiLRUCache data structure. Never dereference it or copy it by 11 | // value. Always use it through a pointer. 12 | type MultiLRUCache struct { 13 | buckets uint 14 | cache []*LRUCache 15 | } 16 | 17 | // Using this constructor is almost always wrong. Use NewMultiLRUCache instead. 18 | func (m *MultiLRUCache) Init(buckets, bucket_capacity uint) { 19 | m.buckets = buckets 20 | m.cache = make([]*LRUCache, buckets) 21 | for i := uint(0); i < buckets; i++ { 22 | m.cache[i] = NewLRUCache(bucket_capacity) 23 | } 24 | } 25 | 26 | // Set the stale expiry grace period for each cache in the multicache instance. 27 | func (m *MultiLRUCache) SetExpireGracePeriod(p time.Duration) { 28 | for _, c := range m.cache { 29 | c.ExpireGracePeriod = p 30 | } 31 | } 32 | 33 | func NewMultiLRUCache(buckets, bucket_capacity uint) *MultiLRUCache { 34 | m := &MultiLRUCache{} 35 | m.Init(buckets, bucket_capacity) 36 | return m 37 | } 38 | 39 | func (m *MultiLRUCache) bucketNo(key string) uint { 40 | // Arbitrary choice. Any fast hash will do. 41 | return uint(crc32.ChecksumIEEE([]byte(key))) % m.buckets 42 | } 43 | 44 | func (m *MultiLRUCache) Set(key string, value interface{}, expire time.Time) { 45 | m.cache[m.bucketNo(key)].Set(key, value, expire) 46 | } 47 | 48 | func (m *MultiLRUCache) SetNow(key string, value interface{}, expire time.Time, now time.Time) { 49 | m.cache[m.bucketNo(key)].SetNow(key, value, expire, now) 50 | } 51 | 52 | func (m *MultiLRUCache) Get(key string) (value interface{}, ok bool) { 53 | return m.cache[m.bucketNo(key)].Get(key) 54 | } 55 | 56 | func (m *MultiLRUCache) GetQuiet(key string) (value interface{}, ok bool) { 57 | return m.cache[m.bucketNo(key)].Get(key) 58 | } 59 | 60 | func (m *MultiLRUCache) GetNotStale(key string) (value interface{}, ok bool) { 61 | return m.cache[m.bucketNo(key)].GetNotStale(key) 62 | } 63 | 64 | func (m *MultiLRUCache) GetNotStaleNow(key string, now time.Time) (value interface{}, ok bool) { 65 | return m.cache[m.bucketNo(key)].GetNotStaleNow(key, now) 66 | } 67 | 68 | func (m *MultiLRUCache) GetStale(key string) (value interface{}, ok, expired bool) { 69 | return m.cache[m.bucketNo(key)].GetStale(key) 70 | } 71 | 72 | func (m *MultiLRUCache) GetStaleNow(key string, now time.Time) (value interface{}, ok, expired bool) { 73 | return m.cache[m.bucketNo(key)].GetStaleNow(key, now) 74 | } 75 | 76 | func (m *MultiLRUCache) Del(key string) (value interface{}, ok bool) { 77 | return m.cache[m.bucketNo(key)].Del(key) 78 | } 79 | 80 | func (m *MultiLRUCache) Clear() int { 81 | var s int 82 | for _, c := range m.cache { 83 | s += c.Clear() 84 | } 85 | return s 86 | } 87 | 88 | func (m *MultiLRUCache) Len() int { 89 | var s int 90 | for _, c := range m.cache { 91 | s += c.Len() 92 | } 93 | return s 94 | } 95 | 96 | func (m *MultiLRUCache) Capacity() int { 97 | var s int 98 | for _, c := range m.cache { 99 | s += c.Capacity() 100 | } 101 | return s 102 | } 103 | 104 | func (m *MultiLRUCache) Expire() int { 105 | var s int 106 | for _, c := range m.cache { 107 | s += c.Expire() 108 | } 109 | return s 110 | } 111 | 112 | func (m *MultiLRUCache) ExpireNow(now time.Time) int { 113 | var s int 114 | for _, c := range m.cache { 115 | s += c.ExpireNow(now) 116 | } 117 | return s 118 | } 119 | -------------------------------------------------------------------------------- /pool/pool.go: -------------------------------------------------------------------------------- 1 | // This is a backported implementation of sync.Pool from gotip pre-1.3 2 | // http://tip.golang.org/src/pkg/sync/pool.go?m=text 3 | // 4 | // Extended to drain pool periodically. 5 | 6 | // Copyright 2013 The Go Authors. All rights reserved. 7 | // Use of this source code is governed by a BSD-style 8 | // license that can be found in the LICENSE file. 9 | 10 | package pool 11 | 12 | import ( 13 | "sync" 14 | "time" 15 | ) 16 | 17 | // A Pool is a set of temporary objects that may be individually saved 18 | // and retrieved. 19 | // 20 | // Any item stored in the Pool may be removed automatically by the 21 | // implementation at any time without notification. 22 | // If the Pool holds the only reference when this happens, the item 23 | // might be deallocated. 24 | // 25 | // A Pool is safe for use by multiple goroutines simultaneously. 26 | // 27 | // Pool's intended use is for free lists maintained in global variables, 28 | // typically accessed by multiple goroutines simultaneously. Using a 29 | // Pool instead of a custom free list allows the runtime to reclaim 30 | // entries from the pool when it makes sense to do so. An 31 | // appropriate use of sync.Pool is to create a pool of temporary buffers 32 | // shared between independent clients of a global resource. On the 33 | // other hand, if a free list is maintained as part of an object used 34 | // only by a single client and freed when the client completes, 35 | // implementing that free list as a Pool is not appropriate. 36 | // 37 | // This is an experimental type and might not be released. 38 | type Pool struct { 39 | next *Pool // for use by runtime. must be first. 40 | list []interface{} // offset known to runtime 41 | mu sync.Mutex // guards list 42 | 43 | // New optionally specifies a function to generate 44 | // a value when Get would otherwise return nil. 45 | // It may not be changed concurrently with calls to Get. 46 | New func() interface{} 47 | 48 | DrainPeriod time.Duration // Period between drains 49 | drainTicker *time.Ticker // ticker used by the drainer goroutine 50 | } 51 | 52 | // Put adds x to the pool. 53 | func (p *Pool) Put(x interface{}) { 54 | if x == nil { 55 | return 56 | } 57 | p.mu.Lock() 58 | if p.list == nil { 59 | if p.drainTicker == nil && p.DrainPeriod != 0 { 60 | p.drainTicker = time.NewTicker(p.DrainPeriod) 61 | go func() { 62 | for _ = range p.drainTicker.C { 63 | p.Drain() 64 | } 65 | }() 66 | } 67 | } 68 | p.list = append(p.list, x) 69 | p.mu.Unlock() 70 | } 71 | 72 | // Get selects an arbitrary item from the Pool, removes it from the 73 | // Pool, and returns it to the caller. 74 | // Get may choose to ignore the pool and treat it as empty. 75 | // Callers should not assume any relation between values passed to Put and 76 | // the values returned by Get. 77 | // 78 | // If Get would otherwise return nil and p.New is non-nil, Get returns 79 | // the result of calling p.New. 80 | func (p *Pool) Get() interface{} { 81 | p.mu.Lock() 82 | var x interface{} 83 | if n := len(p.list); n > 0 { 84 | x = p.list[n-1] 85 | p.list[n-1] = nil // Just to be safe 86 | p.list = p.list[:n-1] 87 | } 88 | p.mu.Unlock() 89 | if x == nil && p.New != nil { 90 | x = p.New() 91 | } 92 | return x 93 | } 94 | 95 | func (p *Pool) Drain() { 96 | p.mu.Lock() 97 | p.list = make([]interface{}, 0, cap(p.list)/2) 98 | p.mu.Unlock() 99 | } 100 | 101 | func (p *Pool) Close() { 102 | p.Drain() 103 | p.mu.Lock() 104 | p.list = make([]interface{}, 0) 105 | if p.drainTicker != nil { 106 | p.drainTicker.Stop() 107 | p.drainTicker = nil 108 | } 109 | p.mu.Unlock() 110 | } 111 | -------------------------------------------------------------------------------- /bytepool/bytepool.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | // Package bytepool is deprecated 4 | package bytepool 5 | 6 | import ( 7 | "math" 8 | "sync" 9 | "time" 10 | ) 11 | 12 | type pool struct { 13 | list [][]byte 14 | mu sync.Mutex 15 | } 16 | 17 | type BytePool struct { 18 | list_of_pools []pool 19 | drainTicker *time.Ticker 20 | maxSize int 21 | } 22 | 23 | // Initialize BytePool structure. Starts draining regularly if 24 | // drainPeriod is non zero. MaxSize specifies the maximum length of a 25 | // byte slice that should be cached (rounded to the next power of 2). 26 | // 27 | // Deprecated: Use sync.Pool from the stdlib instead. 28 | func (tp *BytePool) Init(drainPeriod time.Duration, maxSize uint32) { 29 | maxSizeLog := log2Ceil(maxSize) 30 | tp.maxSize = (1 << maxSizeLog) - 1 31 | // 32-bit catch 32 | if tp.maxSize <= 0 { 33 | tp.maxSize = math.MaxInt32 34 | maxSizeLog = log2Ceil(math.MaxInt32) 35 | } 36 | tp.list_of_pools = make([]pool, maxSizeLog+1) 37 | if drainPeriod > 0 { 38 | tp.drainTicker = time.NewTicker(drainPeriod) 39 | go func() { 40 | for _ = range tp.drainTicker.C { 41 | tp.Drain() 42 | } 43 | }() 44 | } 45 | } 46 | 47 | // Put the byte slice back in pool. 48 | func (tp *BytePool) Put(el []byte) { 49 | if cap(el) < 1 || cap(el) > tp.maxSize { 50 | return 51 | } 52 | el = el[:cap(el)] 53 | o := log2Floor(uint32(cap(el))) 54 | p := &tp.list_of_pools[o] 55 | p.mu.Lock() 56 | p.list = append(p.list, el) 57 | p.mu.Unlock() 58 | } 59 | 60 | // Get a byte slice from the pool. 61 | func (tp *BytePool) Get(size int) []byte { 62 | if size < 1 || size > tp.maxSize { 63 | return make([]byte, size) 64 | } 65 | var x []byte 66 | 67 | o := log2Ceil(uint32(size)) 68 | p := &tp.list_of_pools[o] 69 | p.mu.Lock() 70 | if n := len(p.list); n > 0 { 71 | x = p.list[n-1] 72 | p.list[n-1] = nil 73 | p.list = p.list[:n-1] 74 | } 75 | p.mu.Unlock() 76 | if x == nil { 77 | x = make([]byte, 1<> 1 120 | v |= v >> 2 121 | v |= v >> 4 122 | v |= v >> 8 123 | v |= v >> 16 124 | return multiplyDeBruijnBitPosition[uint32(v*0x07C4ACDD)>>27] 125 | } 126 | 127 | // Equivalent to: uint(math.Ceil(math.Log2(float64(n)))) 128 | func log2Ceil(v uint32) uint { 129 | var isNotPowerOfTwo uint = 1 130 | // Golang doesn't know how to convert bool to int - branch required 131 | if (v & (v - 1)) == 0 { 132 | isNotPowerOfTwo = 0 133 | } 134 | return log2Floor(v) + isNotPowerOfTwo 135 | } 136 | -------------------------------------------------------------------------------- /spacesaving/srate.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 CloudFlare, Inc. 2 | 3 | package spacesaving 4 | 5 | import ( 6 | "container/heap" 7 | "math" 8 | "time" 9 | ) 10 | 11 | type srateBucket struct { 12 | key string 13 | count uint64 14 | countTs int64 15 | countRate float64 16 | error uint64 17 | errorTs int64 18 | errorRate float64 19 | index int 20 | } 21 | 22 | type srateHeap []*srateBucket 23 | 24 | func (sh srateHeap) Len() int { return len(sh) } 25 | 26 | func (sh srateHeap) Less(i, j int) bool { 27 | return sh[i].countTs < sh[j].countTs 28 | } 29 | 30 | func (sh srateHeap) Swap(i, j int) { 31 | sh[i], sh[j] = sh[j], sh[i] 32 | sh[i].index = i 33 | sh[j].index = j 34 | } 35 | 36 | func (sh *srateHeap) Push(x interface{}) { 37 | n := len(*sh) 38 | bucket := x.(*srateBucket) 39 | bucket.index = n 40 | *sh = append(*sh, bucket) 41 | } 42 | 43 | func (sh *srateHeap) Pop() interface{} { 44 | old := *sh 45 | n := len(old) 46 | bucket := old[n-1] 47 | bucket.index = -1 // for safety 48 | *sh = old[0 : n-1] 49 | return bucket 50 | } 51 | 52 | type SimpleRate struct { 53 | heap srateHeap 54 | hash map[string]*srateBucket 55 | weightHelper float64 56 | halfLife time.Duration 57 | size int 58 | } 59 | 60 | func (ss *SimpleRate) Init(size int, halfLife time.Duration) *SimpleRate { 61 | *ss = SimpleRate{ 62 | heap: make([]*srateBucket, 0, size), 63 | hash: make(map[string]*srateBucket, size), 64 | weightHelper: -math.Ln2 / float64(halfLife.Nanoseconds()), 65 | halfLife: halfLife, 66 | size: size, 67 | } 68 | return ss 69 | } 70 | 71 | func (ss *SimpleRate) count(rate float64, lastTs, now int64) float64 { 72 | deltaNs := float64(now - lastTs) 73 | weight := math.Exp(deltaNs * ss.weightHelper) 74 | 75 | if deltaNs > 0 && lastTs != 0 { 76 | return rate*weight + (1000000000./deltaNs)*(1-weight) 77 | } 78 | return rate * weight 79 | } 80 | 81 | func (ss *SimpleRate) recount(rate float64, lastTs, now int64) float64 { 82 | return rate * math.Exp(float64(now-lastTs)*ss.weightHelper) 83 | } 84 | 85 | func (ss *SimpleRate) Touch(key string, nowTs time.Time) { 86 | var ( 87 | found bool 88 | bucket *srateBucket 89 | now = nowTs.UnixNano() 90 | ) 91 | bucket, found = ss.hash[key]; 92 | if found { 93 | // we already have the correct bucket 94 | } else if len(ss.heap) < ss.size { 95 | // create new bucket 96 | bucket = &srateBucket{} 97 | ss.hash[key] = bucket 98 | bucket.key = key 99 | heap.Push(&ss.heap, bucket) 100 | } else { 101 | // use minimum bucket 102 | bucket = ss.heap[0] 103 | delete(ss.hash, bucket.key) 104 | ss.hash[key] = bucket 105 | bucket.error, bucket.errorTs, bucket.errorRate = 106 | bucket.count, bucket.countTs, bucket.countRate 107 | bucket.key = key 108 | } 109 | 110 | bucket.count += 1 111 | bucket.countRate = ss.count(bucket.countRate, bucket.countTs, now) 112 | bucket.countTs = now 113 | 114 | heap.Fix(&ss.heap, bucket.index) 115 | } 116 | 117 | type srateElement struct { 118 | Key string 119 | LoCount uint64 120 | HiCount uint64 121 | LoRate float64 122 | HiRate float64 123 | } 124 | 125 | func (ss *SimpleRate) GetAll(nowTs time.Time) []srateElement { 126 | now := nowTs.UnixNano() 127 | 128 | elements := make([]srateElement, 0, len(ss.heap)) 129 | for _, b := range ss.heap { 130 | rate := ss.recount(b.countRate, b.countTs, now) 131 | errRate := ss.recount(b.errorRate, b.errorTs, now) 132 | elements = append(elements, srateElement{ 133 | Key: b.key, 134 | LoCount: b.count - b.error, 135 | HiCount: b.count, 136 | LoRate: rate - errRate, 137 | HiRate: rate, 138 | }) 139 | } 140 | return elements 141 | } 142 | -------------------------------------------------------------------------------- /lrucache/multilru_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | package lrucache 4 | 5 | import ( 6 | "runtime" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | func TestMultiLRUBasic(t *testing.T) { 12 | t.Parallel() 13 | 14 | m := NewMultiLRUCache(2, 3) 15 | 16 | if m.Capacity() != 6 { 17 | t.Error("expecting different capacity") 18 | } 19 | 20 | m.Set("a", "va", time.Time{}) 21 | m.Set("b", "vb", time.Time{}) 22 | m.Set("c", "vc", time.Time{}) 23 | 24 | if m.Len() != 3 { 25 | t.Error("expecting different length") 26 | } 27 | 28 | m.Set("a", "va", time.Time{}) 29 | m.Set("b", "vb", time.Time{}) 30 | m.Set("c", "vc", time.Time{}) 31 | 32 | if m.Len() != 3 { 33 | t.Error("expecting different length") 34 | } 35 | 36 | // chances of all of them going to single bucket are slim 37 | for c := 'a'; c < 'z'; c = rune(int(c) + 1) { 38 | m.Set(string(c), string([]rune{'v', c}), time.Time{}) 39 | } 40 | past := time.Now().Add(time.Duration(-10 * time.Second)) 41 | m.Set("j", "vj", past) 42 | 43 | if m.Len() != 6 { 44 | t.Error("expecting different length") 45 | } 46 | 47 | if m.ExpireNow(past) != 0 { 48 | t.Error("expecting different expire") 49 | } 50 | 51 | if m.Expire() != 1 { 52 | t.Error("expecting different expire") 53 | } 54 | 55 | if m.Clear() != 5 { 56 | t.Error("expecting different length") 57 | } 58 | 59 | if m.Len() != 0 { 60 | t.Error("expecting different length") 61 | } 62 | 63 | m.Set("a", "va", time.Time{}) 64 | if v, _ := m.Del("a"); v != "va" { 65 | t.Error("expected hit") 66 | } 67 | if _, ok := m.Del("a"); ok { 68 | t.Error("expected miss") 69 | } 70 | 71 | // This is stupid, mostly for code coverage. 72 | m.Clear() 73 | for c := 'a'; c < 'z'; c = rune(int(c) + 1) { 74 | m.Set(string(c), string([]rune{'v', c}), time.Time{}) 75 | } 76 | 77 | m.SetNow("yy", "vyy", past, past) 78 | m.SetNow("zz", "vzz", time.Time{}, time.Now()) 79 | 80 | m.GetQuiet("yy") 81 | m.GetQuiet("yy") 82 | 83 | m.SetNow("yy", "vyy", past, past) 84 | if v, _ := m.Get("yy"); v != "vyy" { 85 | t.Error("expected hit") 86 | } 87 | 88 | if v, _ := m.GetNotStaleNow("yy", past); v != "vyy" { 89 | t.Error("expected hit") 90 | } 91 | 92 | if _, ok := m.GetNotStale("yy"); ok { 93 | t.Error("expected miss") 94 | } 95 | } 96 | 97 | func filledMultiLRU(expire time.Time) *MultiLRUCache { 98 | b := NewMultiLRUCache(4, 250) 99 | for i := 0; i < 1000; i++ { 100 | b.Set(randomString(2), "value", expire) 101 | } 102 | return b 103 | } 104 | 105 | func BenchmarkConcurrentGetMultiLRU(bb *testing.B) { 106 | b := filledMultiLRU(time.Now().Add(time.Duration(4))) 107 | 108 | cpu := runtime.GOMAXPROCS(0) 109 | ch := make(chan bool) 110 | worker := func() { 111 | for i := 0; i < bb.N/cpu; i++ { 112 | b.Get(randomString(2)) 113 | } 114 | ch <- true 115 | } 116 | for i := 0; i < cpu; i++ { 117 | go worker() 118 | } 119 | for i := 0; i < cpu; i++ { 120 | _ = <-ch 121 | } 122 | } 123 | 124 | func BenchmarkConcurrentSetMultiLRU(bb *testing.B) { 125 | b := filledMultiLRU(time.Now().Add(time.Duration(4))) 126 | 127 | cpu := runtime.GOMAXPROCS(0) 128 | ch := make(chan bool) 129 | worker := func() { 130 | for i := 0; i < bb.N/cpu; i++ { 131 | expire := time.Now().Add(time.Duration(4 * time.Second)) 132 | b.Set(randomString(2), "v", expire) 133 | } 134 | ch <- true 135 | } 136 | for i := 0; i < cpu; i++ { 137 | go worker() 138 | } 139 | for i := 0; i < cpu; i++ { 140 | _ = <-ch 141 | } 142 | } 143 | 144 | // No expiry 145 | func BenchmarkConcurrentSetNXMultiLRU(bb *testing.B) { 146 | b := filledMultiLRU(time.Time{}) 147 | 148 | cpu := runtime.GOMAXPROCS(0) 149 | ch := make(chan bool) 150 | worker := func() { 151 | for i := 0; i < bb.N/cpu; i++ { 152 | b.Set(randomString(2), "v", time.Time{}) 153 | } 154 | ch <- true 155 | } 156 | for i := 0; i < cpu; i++ { 157 | go worker() 158 | } 159 | for i := 0; i < cpu; i++ { 160 | _ = <-ch 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /tokenbucket/sip.go: -------------------------------------------------------------------------------- 1 | // Written in 2012 by Dmitry Chestnykh. 2 | // 3 | // To the extent possible under law, the author have dedicated all copyright 4 | // and related and neighboring rights to this software to the public domain 5 | // worldwide. This software is distributed without any warranty. 6 | // http://creativecommons.org/publicdomain/zero/1.0/ 7 | 8 | package tokenbucket 9 | 10 | // copied from https://github.com/dchest/siphash/blob/master/hash.go 11 | 12 | const BlockSize = 8 13 | 14 | // hash returns the 64-bit SipHash-2-4 of the given byte slice with two 64-bit 15 | // parts of 128-bit key: k0 and k1. 16 | func hash(k0, k1 uint64, p []byte) uint64 { 17 | // Initialization. 18 | v0 := k0 ^ 0x736f6d6570736575 19 | v1 := k1 ^ 0x646f72616e646f6d 20 | v2 := k0 ^ 0x6c7967656e657261 21 | v3 := k1 ^ 0x7465646279746573 22 | t := uint64(len(p)) << 56 23 | 24 | // Compression. 25 | for len(p) >= BlockSize { 26 | m := uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | 27 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 28 | v3 ^= m 29 | 30 | // Round 1. 31 | v0 += v1 32 | v1 = v1<<13 | v1>>(64-13) 33 | v1 ^= v0 34 | v0 = v0<<32 | v0>>(64-32) 35 | 36 | v2 += v3 37 | v3 = v3<<16 | v3>>(64-16) 38 | v3 ^= v2 39 | 40 | v0 += v3 41 | v3 = v3<<21 | v3>>(64-21) 42 | v3 ^= v0 43 | 44 | v2 += v1 45 | v1 = v1<<17 | v1>>(64-17) 46 | v1 ^= v2 47 | v2 = v2<<32 | v2>>(64-32) 48 | 49 | // Round 2. 50 | v0 += v1 51 | v1 = v1<<13 | v1>>(64-13) 52 | v1 ^= v0 53 | v0 = v0<<32 | v0>>(64-32) 54 | 55 | v2 += v3 56 | v3 = v3<<16 | v3>>(64-16) 57 | v3 ^= v2 58 | 59 | v0 += v3 60 | v3 = v3<<21 | v3>>(64-21) 61 | v3 ^= v0 62 | 63 | v2 += v1 64 | v1 = v1<<17 | v1>>(64-17) 65 | v1 ^= v2 66 | v2 = v2<<32 | v2>>(64-32) 67 | 68 | v0 ^= m 69 | p = p[BlockSize:] 70 | } 71 | 72 | // Compress last block. 73 | switch len(p) { 74 | case 7: 75 | t |= uint64(p[6]) << 48 76 | fallthrough 77 | case 6: 78 | t |= uint64(p[5]) << 40 79 | fallthrough 80 | case 5: 81 | t |= uint64(p[4]) << 32 82 | fallthrough 83 | case 4: 84 | t |= uint64(p[3]) << 24 85 | fallthrough 86 | case 3: 87 | t |= uint64(p[2]) << 16 88 | fallthrough 89 | case 2: 90 | t |= uint64(p[1]) << 8 91 | fallthrough 92 | case 1: 93 | t |= uint64(p[0]) 94 | } 95 | 96 | v3 ^= t 97 | 98 | // Round 1. 99 | v0 += v1 100 | v1 = v1<<13 | v1>>(64-13) 101 | v1 ^= v0 102 | v0 = v0<<32 | v0>>(64-32) 103 | 104 | v2 += v3 105 | v3 = v3<<16 | v3>>(64-16) 106 | v3 ^= v2 107 | 108 | v0 += v3 109 | v3 = v3<<21 | v3>>(64-21) 110 | v3 ^= v0 111 | 112 | v2 += v1 113 | v1 = v1<<17 | v1>>(64-17) 114 | v1 ^= v2 115 | v2 = v2<<32 | v2>>(64-32) 116 | 117 | // Round 2. 118 | v0 += v1 119 | v1 = v1<<13 | v1>>(64-13) 120 | v1 ^= v0 121 | v0 = v0<<32 | v0>>(64-32) 122 | 123 | v2 += v3 124 | v3 = v3<<16 | v3>>(64-16) 125 | v3 ^= v2 126 | 127 | v0 += v3 128 | v3 = v3<<21 | v3>>(64-21) 129 | v3 ^= v0 130 | 131 | v2 += v1 132 | v1 = v1<<17 | v1>>(64-17) 133 | v1 ^= v2 134 | v2 = v2<<32 | v2>>(64-32) 135 | 136 | v0 ^= t 137 | 138 | // Finalization. 139 | v2 ^= 0xff 140 | 141 | // Round 1. 142 | v0 += v1 143 | v1 = v1<<13 | v1>>(64-13) 144 | v1 ^= v0 145 | v0 = v0<<32 | v0>>(64-32) 146 | 147 | v2 += v3 148 | v3 = v3<<16 | v3>>(64-16) 149 | v3 ^= v2 150 | 151 | v0 += v3 152 | v3 = v3<<21 | v3>>(64-21) 153 | v3 ^= v0 154 | 155 | v2 += v1 156 | v1 = v1<<17 | v1>>(64-17) 157 | v1 ^= v2 158 | v2 = v2<<32 | v2>>(64-32) 159 | 160 | // Round 2. 161 | v0 += v1 162 | v1 = v1<<13 | v1>>(64-13) 163 | v1 ^= v0 164 | v0 = v0<<32 | v0>>(64-32) 165 | 166 | v2 += v3 167 | v3 = v3<<16 | v3>>(64-16) 168 | v3 ^= v2 169 | 170 | v0 += v3 171 | v3 = v3<<21 | v3>>(64-21) 172 | v3 ^= v0 173 | 174 | v2 += v1 175 | v1 = v1<<17 | v1>>(64-17) 176 | v1 ^= v2 177 | v2 = v2<<32 | v2>>(64-32) 178 | 179 | // Round 3. 180 | v0 += v1 181 | v1 = v1<<13 | v1>>(64-13) 182 | v1 ^= v0 183 | v0 = v0<<32 | v0>>(64-32) 184 | 185 | v2 += v3 186 | v3 = v3<<16 | v3>>(64-16) 187 | v3 ^= v2 188 | 189 | v0 += v3 190 | v3 = v3<<21 | v3>>(64-21) 191 | v3 ^= v0 192 | 193 | v2 += v1 194 | v1 = v1<<17 | v1>>(64-17) 195 | v1 ^= v2 196 | v2 = v2<<32 | v2>>(64-32) 197 | 198 | // Round 4. 199 | v0 += v1 200 | v1 = v1<<13 | v1>>(64-13) 201 | v1 ^= v0 202 | v0 = v0<<32 | v0>>(64-32) 203 | 204 | v2 += v3 205 | v3 = v3<<16 | v3>>(64-16) 206 | v3 ^= v2 207 | 208 | v0 += v3 209 | v3 = v3<<21 | v3>>(64-21) 210 | v3 ^= v0 211 | 212 | v2 += v1 213 | v1 = v1<<17 | v1>>(64-17) 214 | v1 ^= v2 215 | v2 = v2<<32 | v2>>(64-32) 216 | 217 | return v0 ^ v1 ^ v2 ^ v3 218 | } 219 | -------------------------------------------------------------------------------- /kt/kt_metrics.go: -------------------------------------------------------------------------------- 1 | package kt 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/prometheus/client_golang/prometheus" 8 | ) 9 | 10 | // TrackedConn is a wrapper around kt.Conn that will accept a prometheus counter 11 | // vector, and keep track of number of IO operations made to KT. 12 | type TrackedConn struct { 13 | kt *Conn 14 | opTimer *prometheus.SummaryVec 15 | } 16 | 17 | const ( 18 | opCount = "COUNT" 19 | opRemove = "REMOVE" 20 | opGetBulk = "GETBULK" 21 | opGet = "GET" 22 | opGetBytes = "GETBYTES" 23 | opSet = "SET" 24 | opGetBulkBytes = "GETBULKBYTES" 25 | opSetBulk = "SETBULK" 26 | opRemoveBulk = "REMOVEBULK" 27 | opMatchPrefix = "MATCHPREFIX" 28 | ) 29 | 30 | // NewTrackedConn creates a new connection to a Kyoto Tycoon endpoint, and tracks 31 | // operations made to it using prometheus metrics. 32 | // All supported operations are tracked, opTimer times the number of seconds 33 | // each type of operation took, generating a summary. 34 | func NewTrackedConn(host string, port int, poolsize int, timeout time.Duration, 35 | opTimer *prometheus.SummaryVec) (*TrackedConn, error) { 36 | conn, err := NewConn(host, port, poolsize, timeout) 37 | if err != nil { 38 | return nil, err 39 | } 40 | 41 | return &TrackedConn{ 42 | kt: conn, 43 | opTimer: opTimer}, nil 44 | } 45 | 46 | // NewTrackedConnFromConn returns a tracked connection that simply wraps the given 47 | // database connection. 48 | func NewTrackedConnFromConn(conn *Conn, opTimer *prometheus.SummaryVec) (*TrackedConn, error) { 49 | return &TrackedConn{ 50 | kt: conn, 51 | opTimer: opTimer}, nil 52 | } 53 | 54 | func (c *TrackedConn) Count(ctx context.Context) (int, error) { 55 | start := time.Now() 56 | defer func() { 57 | since := time.Since(start) 58 | c.opTimer.WithLabelValues(opGet).Observe(since.Seconds()) 59 | }() 60 | 61 | return c.kt.Count(ctx) 62 | } 63 | 64 | func (c *TrackedConn) Remove(ctx context.Context, key string) error { 65 | start := time.Now() 66 | defer func() { 67 | since := time.Since(start) 68 | c.opTimer.WithLabelValues(opRemove).Observe(since.Seconds()) 69 | }() 70 | 71 | return c.kt.remove(ctx, key) 72 | } 73 | 74 | func (c *TrackedConn) GetBulk(ctx context.Context, keysAndVals map[string]string) error { 75 | start := time.Now() 76 | defer func() { 77 | since := time.Since(start) 78 | c.opTimer.WithLabelValues(opGetBulk).Observe(since.Seconds()) 79 | }() 80 | 81 | return c.kt.GetBulk(ctx, keysAndVals) 82 | } 83 | 84 | func (c *TrackedConn) Get(ctx context.Context, key string) (string, error) { 85 | start := time.Now() 86 | defer func() { 87 | since := time.Since(start) 88 | c.opTimer.WithLabelValues(opGet).Observe(since.Seconds()) 89 | }() 90 | 91 | return c.kt.Get(ctx, key) 92 | } 93 | 94 | func (c *TrackedConn) GetBytes(ctx context.Context, key string) ([]byte, error) { 95 | start := time.Now() 96 | defer func() { 97 | since := time.Since(start) 98 | c.opTimer.WithLabelValues(opGetBytes).Observe(since.Seconds()) 99 | }() 100 | 101 | return c.kt.GetBytes(ctx, key) 102 | } 103 | 104 | func (c *TrackedConn) set(ctx context.Context, key string, value []byte) error { 105 | start := time.Now() 106 | defer func() { 107 | since := time.Since(start) 108 | c.opTimer.WithLabelValues(opSet).Observe(since.Seconds()) 109 | }() 110 | 111 | return c.kt.set(ctx, key, value) 112 | } 113 | 114 | func (c *TrackedConn) GetBulkBytes(ctx context.Context, keys map[string][]byte) error { 115 | start := time.Now() 116 | defer func() { 117 | since := time.Since(start) 118 | c.opTimer.WithLabelValues(opGetBulkBytes).Observe(since.Seconds()) 119 | }() 120 | 121 | return c.kt.GetBulkBytes(ctx, keys) 122 | } 123 | 124 | func (c *TrackedConn) setBulk(ctx context.Context, values map[string]string) (int64, error) { 125 | start := time.Now() 126 | defer func() { 127 | since := time.Since(start) 128 | c.opTimer.WithLabelValues(opSetBulk).Observe(since.Seconds()) 129 | }() 130 | 131 | return c.kt.setBulk(ctx, values) 132 | } 133 | 134 | func (c *TrackedConn) RemoveBulk(ctx context.Context, keys []string) (int64, error) { 135 | start := time.Now() 136 | defer func() { 137 | since := time.Since(start) 138 | c.opTimer.WithLabelValues(opRemoveBulk).Observe(since.Seconds()) 139 | }() 140 | 141 | return c.kt.removeBulk(ctx, keys) 142 | } 143 | 144 | func (c *TrackedConn) MatchPrefix(ctx context.Context, key string, maxrecords int64) ([]string, error) { 145 | start := time.Now() 146 | defer func() { 147 | since := time.Since(start) 148 | c.opTimer.WithLabelValues(opMatchPrefix).Observe(since.Seconds()) 149 | }() 150 | 151 | return c.kt.MatchPrefix(ctx, key, maxrecords) 152 | } 153 | -------------------------------------------------------------------------------- /circularbuffer/circularbuffer_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | package circularbuffer 4 | 5 | import ( 6 | "testing" 7 | ) 8 | 9 | func (b *CircularBuffer) verifyIsEmpty() bool { 10 | b.lock.Lock() 11 | defer b.lock.Unlock() 12 | 13 | e := len(b.avail) == 0 14 | if e { 15 | if b.pos != b.start { 16 | panic("desychronized state") 17 | } 18 | } 19 | return e 20 | } 21 | 22 | func TestSyncGet(t *testing.T) { 23 | c := NewCircularBuffer(10) 24 | 25 | for i := 0; i < 4; i++ { 26 | c.NBPush(i) 27 | } 28 | 29 | for i := 0; i < 4; i++ { 30 | v := c.Get().(int) 31 | if i != v { 32 | t.Error(v) 33 | } 34 | } 35 | 36 | if c.verifyIsEmpty() != true { 37 | t.Error("not empty") 38 | } 39 | } 40 | 41 | func TestSyncOverflow(t *testing.T) { 42 | c := NewCircularBuffer(10) // up to 9 items in the buffer 43 | 44 | for i := 0; i < 9; i++ { 45 | v := c.NBPush(i) 46 | if v != nil { 47 | t.Error(v) 48 | } 49 | } 50 | v := c.NBPush(9) 51 | if v != 0 { 52 | t.Error(v) 53 | } 54 | 55 | for i := 1; i < 10; i++ { 56 | v := c.Get().(int) 57 | if i != v { 58 | t.Error(v) 59 | } 60 | } 61 | 62 | if c.verifyIsEmpty() != true { 63 | t.Error("not empty") 64 | } 65 | } 66 | 67 | func TestAsyncGet(t *testing.T) { 68 | c := NewCircularBuffer(10) 69 | 70 | go func() { 71 | for i := 0; i < 4; i++ { 72 | v := c.Get().(int) 73 | if i != v { 74 | t.Error(i) 75 | } 76 | } 77 | 78 | if c.verifyIsEmpty() != true { 79 | t.Error("not empty") 80 | } 81 | }() 82 | 83 | c.NBPush(0) 84 | c.NBPush(1) 85 | c.NBPush(2) 86 | c.NBPush(3) 87 | } 88 | 89 | func TestSyncPop(t *testing.T) { 90 | c := NewCircularBuffer(10) 91 | 92 | c.NBPush(3) 93 | c.NBPush(2) 94 | c.NBPush(1) 95 | c.NBPush(0) 96 | 97 | for i := 0; i < 4; i++ { 98 | v := c.Pop().(int) 99 | if i != v { 100 | t.Error(v) 101 | } 102 | } 103 | 104 | if c.verifyIsEmpty() != true { 105 | t.Error("not empty") 106 | } 107 | } 108 | 109 | func TestASyncPop(t *testing.T) { 110 | c := NewCircularBuffer(10) 111 | 112 | go func() { 113 | for i := 0; i < 4; i++ { 114 | v := c.Pop().(int) 115 | if i != v { 116 | t.Error(v) 117 | } 118 | } 119 | 120 | if c.verifyIsEmpty() != true { 121 | t.Error("not empty") 122 | } 123 | }() 124 | 125 | c.NBPush(3) 126 | c.NBPush(2) 127 | c.NBPush(1) 128 | c.NBPush(0) 129 | } 130 | 131 | func TestSyncOverflowEvictCallback(t *testing.T) { 132 | c := NewCircularBuffer(10) // up to 9 items in the buffer 133 | 134 | evicted := 0 135 | c.Evict = func(v interface{}) { 136 | if v.(int) != evicted { 137 | t.Error(v) 138 | } 139 | evicted += 1 140 | } 141 | 142 | for i := 0; i < 18; i++ { 143 | v := c.NBPush(i) 144 | if v != nil { 145 | t.Error(v) 146 | } 147 | } 148 | 149 | for i := 9; i < 18; i++ { 150 | v := c.Get().(int) 151 | if i != v { 152 | t.Error(v) 153 | } 154 | } 155 | 156 | if evicted != 9 { 157 | t.Error(evicted) 158 | } 159 | 160 | if c.verifyIsEmpty() != true { 161 | t.Error("not empty") 162 | } 163 | } 164 | 165 | func drain(c *CircularBuffer) []int { 166 | n := make([]int, 0) 167 | for c.Empty() != true { 168 | n = append(n, c.Get().(int)) 169 | } 170 | return n 171 | } 172 | 173 | func cmp(a, b []int) int { 174 | for i := 0; i < len(a) && i < len(b); i++ { 175 | switch { 176 | case a[i] > b[i]: 177 | return 1 178 | case a[i] < b[i]: 179 | return -1 180 | } 181 | } 182 | switch { 183 | case len(a) > len(b): 184 | return 1 185 | case len(a) < len(b): 186 | return -1 187 | } 188 | return 0 189 | } 190 | 191 | func TestEject(t *testing.T) { 192 | c := NewCircularBuffer(5) 193 | 194 | c.NBPush(0) 195 | c.NBPush(1) 196 | c.NBPush(2) 197 | c.NBPush(3) 198 | x := drain(c) 199 | if cmp(x, []int{0, 1, 2, 3}) != 0 { 200 | t.Error("x %v", x) 201 | } 202 | 203 | c.NBPush(0) 204 | c.NBPush(1) 205 | c.NBPush(2) 206 | c.NBPush(3) 207 | c.NBPush(4) 208 | x = drain(c) 209 | if cmp(x, []int{1, 2, 3, 4}) != 0 { 210 | t.Error("x %v", x) 211 | } 212 | } 213 | 214 | func TestOptionalPush(t *testing.T) { 215 | c := NewCircularBuffer(5) 216 | 217 | c.NBPush(0) 218 | c.NBPush(1) 219 | c.NBPush(2) 220 | c.NBPush(3) 221 | x := drain(c) 222 | if cmp(x, []int{0, 1, 2, 3}) != 0 { 223 | t.Error("x %v", x) 224 | } 225 | 226 | // No evict 227 | c.NBPush(0) 228 | c.NBPush(1) 229 | c.NBPush(2) 230 | c.NBOptionalPush(3) 231 | x = drain(c) 232 | if cmp(x, []int{0, 1, 2, 3}) != 0 { 233 | t.Error("x %v", x) 234 | } 235 | 236 | // Evict us 237 | c.NBPush(0) 238 | c.NBPush(1) 239 | c.NBPush(2) 240 | c.NBPush(3) 241 | c.NBOptionalPush(4) 242 | x = drain(c) 243 | if cmp(x, []int{0, 1, 2, 3}) != 0 { 244 | t.Error("x %v", x) 245 | } 246 | 247 | // Evict us, with callback 248 | c.Evict = func(v interface{}) { 249 | if v.(int) != 4 { 250 | t.Error(v) 251 | } 252 | } 253 | c.NBPush(0) 254 | c.NBPush(1) 255 | c.NBPush(2) 256 | c.NBPush(3) 257 | c.NBOptionalPush(4) 258 | x = drain(c) 259 | if cmp(x, []int{0, 1, 2, 3}) != 0 { 260 | t.Error("x %v", x) 261 | } 262 | } 263 | -------------------------------------------------------------------------------- /circularbuffer/circularbuffer.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | // Circular buffer data structure. 4 | // 5 | // This implementation avoids memory allocations during push/pop 6 | // operations. It supports nonblocking push (ie: old data gets 7 | // evicted). You can pop an item from the top. You can access this 8 | // data structure concurrently, using an internal guarding mutex lock. 9 | // 10 | // An item can get evicted when NBPush is called. During the call the 11 | // buffer will try to call the Evict callback. If the callback is 12 | // present the NBPush fun runs it and returns nil. Otherwise NBPush 13 | // returns the evicted value or nil if there still is free space in 14 | // the stack. 15 | // 16 | // This package exports three things: 17 | // StackPusher interface 18 | // StackGetter interface 19 | // CircularBuffer structure 20 | package circularbuffer 21 | 22 | import ( 23 | "sync" 24 | ) 25 | 26 | // An interface used to add things to the stack. 27 | type StackPusher interface { 28 | // Non-blocking push. Will evict items from the cache if there 29 | // isn't enough space available. 30 | NBPush(interface{}) interface{} 31 | 32 | // Non-blocking push. Wil put item in the buffer only if there 33 | // is free space. 34 | NBOptionalPush(interface{}) interface{} 35 | } 36 | 37 | // An interface used to get items from the stack. 38 | type StackGetter interface { 39 | // Get an item from the beginning of the stack (oldest), 40 | // blocking. 41 | Get() interface{} 42 | // Blocking pop an item from the end of the stack (newest), 43 | // blocking. 44 | Pop() interface{} 45 | } 46 | 47 | type CircularBuffer struct { 48 | start uint // idx of first used cell 49 | pos uint // idx of first unused cell 50 | buffer []interface{} 51 | size uint 52 | avail chan bool // poor man's semaphore. len(avail) is always equal to (size + pos - start) % size 53 | lock sync.Mutex 54 | // Callback used by NBPush if an item needs to be evicted from 55 | // the stack. 56 | Evict func(v interface{}) 57 | } 58 | 59 | // Create CircularBuffer object with a prealocated buffer of a given size. 60 | func NewCircularBuffer(size uint) *CircularBuffer { 61 | return &CircularBuffer{ 62 | buffer: make([]interface{}, size), 63 | size: size, 64 | avail: make(chan bool, size), 65 | } 66 | } 67 | 68 | // Nonblocking push. If the Evict callback is not set returns the 69 | // evicted item (if any), otherwise nil. 70 | func (b *CircularBuffer) NBPush(v interface{}) interface{} { 71 | var evictv interface{} 72 | b.lock.Lock() 73 | 74 | if b.buffer[b.pos] != nil { 75 | panic("not nil") 76 | } 77 | 78 | b.buffer[b.pos] = v 79 | b.pos = (b.pos + 1) % b.size 80 | if b.pos == b.start { 81 | // Remove old item from the bottom of the stack to 82 | // free the space for the new one. This doesn't change 83 | // the length of the stack, so no need to touch avail. 84 | evictv = b.buffer[b.start] 85 | b.buffer[b.start] = nil 86 | b.start = (b.start + 1) % b.size 87 | } else { 88 | select { 89 | case b.avail <- true: 90 | default: 91 | panic("Sending to avail channel must never block") 92 | } 93 | } 94 | b.lock.Unlock() 95 | if evictv != nil && b.Evict != nil { 96 | // Outside the lock. User callback may in want to add 97 | // an item to the stack. 98 | b.Evict(evictv) 99 | return nil 100 | } 101 | return evictv 102 | } 103 | 104 | // Nonblocking push. Push only if there is space. Otherwise evict v. 105 | func (b *CircularBuffer) NBOptionalPush(v interface{}) interface{} { 106 | var evictv interface{} 107 | b.lock.Lock() 108 | 109 | if b.buffer[b.pos] != nil { 110 | panic("not nil") 111 | } 112 | 113 | if (b.start+b.size-1)%b.size == b.pos { 114 | // evict v, don't change anything 115 | evictv = v 116 | } else { 117 | // Plenty of space, just add as usual 118 | b.buffer[b.pos] = v 119 | b.pos = (b.pos + 1) % b.size 120 | select { 121 | case b.avail <- true: 122 | default: 123 | panic("Sending to avail channel must never block") 124 | } 125 | } 126 | b.lock.Unlock() 127 | if evictv != nil && b.Evict != nil { 128 | // Outside the lock. User callback may in want to add 129 | // an item to the stack. 130 | b.Evict(evictv) 131 | return nil 132 | } 133 | return evictv 134 | } 135 | 136 | // Get an item from the beginning of the queue (oldest), blocking. 137 | func (b *CircularBuffer) Get() interface{} { 138 | _ = <-b.avail 139 | 140 | b.lock.Lock() 141 | defer b.lock.Unlock() 142 | 143 | if b.start == b.pos { 144 | panic("Trying to get from empty buffer") 145 | } 146 | 147 | v := b.buffer[b.start] 148 | b.buffer[b.start] = nil 149 | b.start = (b.start + 1) % b.size 150 | 151 | return v 152 | } 153 | 154 | // Blocking pop an item from the end of the queue (newest), blocking. 155 | func (b *CircularBuffer) Pop() interface{} { 156 | _ = <-b.avail 157 | 158 | b.lock.Lock() 159 | defer b.lock.Unlock() 160 | 161 | if b.start == b.pos { 162 | panic("Can't pop from empty buffer") 163 | } 164 | 165 | b.pos = (b.size + b.pos - 1) % b.size 166 | v := b.buffer[b.pos] 167 | b.buffer[b.pos] = nil 168 | 169 | return v 170 | } 171 | 172 | // Empty Is the buffer empty? 173 | func (b *CircularBuffer) Empty() bool { 174 | // b.avail is a channel, no need for a lock 175 | return len(b.avail) == 0 176 | } 177 | 178 | // Length of the buffer 179 | func (b *CircularBuffer) Length() int { 180 | // b.avail is a channel, no need for a lock 181 | return len(b.avail) 182 | } 183 | -------------------------------------------------------------------------------- /spacesaving/rate_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 CloudFlare, Inc. 2 | 3 | package spacesaving 4 | 5 | import ( 6 | "math" 7 | "math/rand" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | var listOfTestVectors = [][]struct { 13 | update bool 14 | key string 15 | delay float64 16 | rateLo float64 17 | rateHi float64 18 | }{ 19 | // Two slots, half life of 1 second 20 | // Sanity check, feeding one packet per second 21 | { 22 | {false, "a", 1, 0, 0}, 23 | {true, "a", 1, 0, 0}, 24 | {true, "a", 1, 0.5, 0.5}, 25 | {true, "a", 1, 0.75, 0.75}, 26 | {true, "a", 1, 0.875, 0.875}, 27 | {true, "a", 1, 0.9375, 0.9375}, 28 | {true, "a", 1, 0.96875, 0.96875}, 29 | {true, "a", 1, 0.984375, 0.984375}, 30 | {true, "a", 1, 0.9921875, 0.9921875}, 31 | {true, "a", 1, 0.99609375, 0.99609375}, 32 | {true, "a", 1, 0.998046875, 0.998046875}, 33 | 34 | // Discharging over 5 seconds 35 | {false, "a", 1, 0.4990234375, 0.4990234375}, 36 | {false, "a", 1, 0.24951171875, 0.24951171875}, 37 | {false, "a", 1, 0.12475585937500003, 0.12475585937500003}, 38 | {false, "a", 1, 0.0623779296875, 0.0623779296875}, 39 | {false, "a", 1, 0.03118896484375, 0.03118896484375}, 40 | 41 | // A small number remains after 30 seconds of discharge 42 | {false, "a", 25, 0.000000000929503585211933486330, 43 | 0.000000000929503585211933486330}, 44 | }, 45 | 46 | // Sanity check of yielding 47 | { 48 | {false, "a", 1, 0, 0}, 49 | {false, "b", 0, 0, 0}, 50 | 51 | {true, "a", 1, 0, 0}, 52 | {true, "b", 0, 0, 0}, 53 | 54 | {true, "a", 1, 0.5, 0.5}, 55 | {true, "b", 0, 0.5, 0.5}, 56 | 57 | {true, "c", 1, 0.5, 0.75}, 58 | {false, "a", 0, 0.25, 0.25}, 59 | {false, "b", 0, 0, 0.25}, // b is yielded 60 | 61 | {true, "a", 0, 0.75, 0.75}, 62 | {false, "b", 0, 0, 0.75}, // b is yielded 63 | {false, "c", 0, 0.5, 0.75}, 64 | 65 | {true, "b", 0, 0, 0.75}, 66 | 67 | {false, "a", 0, 0.0, 0.75}, // a is yielded 68 | {false, "b", 0, 0.0, 0.75}, 69 | {false, "c", 0, 0.5, 0.75}, 70 | }, 71 | } 72 | 73 | func TestRate(t *testing.T) { 74 | t.Parallel() 75 | 76 | for testNo, testVector := range listOfTestVectors { 77 | ts := time.Now() 78 | ss := (&Rate{}).Init(2, 1*time.Second) 79 | for i, l := range testVector { 80 | ts = ts.Add(time.Duration(l.delay * 81 | float64(time.Second.Nanoseconds()))) 82 | 83 | if l.update { 84 | ss.Touch(l.key, ts) 85 | } 86 | 87 | if l.rateLo != -1 || l.rateHi != -1 { 88 | rateLo, rateHi := ss.GetSingle(l.key, ts) 89 | if l.rateLo != -1 && rateLo != l.rateLo { 90 | t.Errorf("test %v line %v: rateLo "+ 91 | "expected=%v got=%v", 92 | testNo, i, l.rateLo, rateLo) 93 | } 94 | if l.rateHi != -1 && rateHi != l.rateHi { 95 | t.Errorf("test %v line %v: rateHi "+ 96 | "expected=%v got=%v", 97 | testNo, i, l.rateHi, rateHi) 98 | } 99 | } 100 | } 101 | } 102 | } 103 | 104 | func TestRateGetAll(t *testing.T) { 105 | t.Parallel() 106 | 107 | ss := (&Rate{}).Init(2, 1*time.Second) 108 | 109 | ss.Touch("a",time.Now()) 110 | ss.Touch("a",time.Now()) 111 | ss.Touch("b",time.Now()) 112 | ss.Touch("b",time.Now()) 113 | ss.Touch("c",time.Now()) 114 | ss.Touch("c",time.Now()) 115 | 116 | el := ss.GetAll(time.Now()) 117 | if el[0].Key != "c" { 118 | t.Errorf("%v\n", el[0]) 119 | } 120 | if el[1].Key != "b" { 121 | t.Errorf("%v\n", el[1]) 122 | } 123 | if len(el) != 2 { 124 | t.Error("expecting lenght = 2") 125 | } 126 | 127 | ss.Touch("b",time.Now()) 128 | ss.Touch("b",time.Now()) 129 | ss.Touch("b",time.Now()) 130 | ss.Touch("b",time.Now()) 131 | 132 | el = ss.GetAll(time.Now()) 133 | if el[0].Key != "b" { 134 | t.Errorf("%v\n", el[0]) 135 | } 136 | if el[1].Key != "c" { 137 | t.Errorf("%v\n", el[1]) 138 | } 139 | } 140 | 141 | func TestRateGetAllCover(t *testing.T) { 142 | t.Parallel() 143 | 144 | ss := (&Rate{}).Init(2, 1*time.Second) 145 | el := ss.GetAll(time.Now()) 146 | 147 | if len(el) != 0 { 148 | t.Error("expecting lenght = 0") 149 | } 150 | 151 | 152 | } 153 | 154 | 155 | // Benchmark updating times with 10% hit rate. 156 | func BenchmarkTouch16384_ten(bb *testing.B) { 157 | benchmark(bb, 16384, 0.1) 158 | } 159 | 160 | func BenchmarkTouch32768_ten(bb *testing.B) { 161 | benchmark(bb, 32768, 0.1) 162 | } 163 | 164 | // Benchmark updating times with 50% hit rate. 165 | func BenchmarkTouch16384_fifty(bb *testing.B) { 166 | benchmark(bb, 16384, 0.5) 167 | } 168 | 169 | func BenchmarkTouch32768_fifty(bb *testing.B) { 170 | benchmark(bb, 32768, 0.5) 171 | } 172 | 173 | // Benchmark updating times with 90% hit rate. 174 | func BenchmarkTouch16384_ninety(bb *testing.B) { 175 | benchmark(bb, 16384, 0.9) 176 | } 177 | 178 | func BenchmarkTouch32768_ninety(bb *testing.B) { 179 | benchmark(bb, 32768, 0.9) 180 | } 181 | 182 | // Benchmark updating items with 100% hit rate. 183 | func BenchmarkTouch1024_hundred(bb *testing.B) { 184 | benchmark(bb, 1024, 1) 185 | } 186 | 187 | func BenchmarkTouch2048_hundred(bb *testing.B) { 188 | benchmark(bb, 2048, 1) 189 | } 190 | 191 | func BenchmarkTouch4096_hundred(bb *testing.B) { 192 | benchmark(bb, 4096, 1) 193 | } 194 | 195 | func BenchmarkTouch8192_hundred(bb *testing.B) { 196 | benchmark(bb, 8192, 1) 197 | } 198 | 199 | func BenchmarkTouch16384_hundred(bb *testing.B) { 200 | benchmark(bb, 16384, 1) 201 | } 202 | 203 | func BenchmarkTouch32768_hundred(bb *testing.B) { 204 | benchmark(bb, 32768, 1) 205 | } 206 | 207 | func benchmark(bb *testing.B, n int, hitrate float64) { 208 | ss := (&Rate{}).Init(uint32(n), 1*time.Second) 209 | 210 | for i := 0; i < n; i += 1 { 211 | ss.Touch(string(i), time.Now()) 212 | } 213 | 214 | // warmup 215 | for i := 0; i < n; i += 1 { 216 | ss.Touch(string(rand.Intn(n)), time.Now()) 217 | } 218 | 219 | topRange := int(float64(n) * 1 / hitrate) 220 | bb.ResetTimer() 221 | for i := 0; i < bb.N; i += 1 { 222 | ss.Touch(string(rand.Intn(topRange)), time.Now()) 223 | } 224 | } 225 | 226 | // math.Exp is the slowest operation in this implementation. Measure just how 227 | // slow it is. Usually calling math.Exp() is responsible for ~30% of the CPU. 228 | func BenchmarkMathExp(bb *testing.B) { 229 | x := rand.Float64() 230 | for i := 0; i < bb.N; i += 1 { 231 | math.Exp(x) 232 | } 233 | } 234 | -------------------------------------------------------------------------------- /spacesaving/rate.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 CloudFlare, Inc. 2 | // 3 | // Tickless measurement of rates of top-k items in an infinite stream. 4 | // 5 | // Use exponentially decaying moving average to track rates of things per 6 | // second for a top-K items in the stream of events. Top-K is also known as 7 | // heavy hitters problem. 8 | // 9 | // Here we adapt a space saving algorithm to track rates instead of counters. 10 | // This changes the complexity of the data strucutre - an update takes 11 | // O(log(k)) time for k tracked items. As we use exponentially decaying moving 12 | // average that means in a worst case we're math.Exp() function log(k) times 13 | // on every update. 14 | package spacesaving 15 | 16 | import ( 17 | "container/heap" 18 | "math" 19 | "sort" 20 | "time" 21 | ) 22 | 23 | type bucket struct { 24 | key string 25 | lastTs int64 26 | rate float64 27 | errLastTs int64 28 | errRate float64 29 | idx uint32 30 | } 31 | 32 | type idxEl struct { 33 | rate float64 34 | lastTs int64 35 | } 36 | 37 | type ssHeap struct { 38 | ss *Rate 39 | h []uint32 40 | } 41 | 42 | func (sh *ssHeap) Len() int { return len(sh.h) } 43 | func (ss *ssHeap) Push(x interface{}) { panic("not implemented") } 44 | func (ss *ssHeap) Pop() interface{} { panic("not implemented") } 45 | 46 | func (sh *ssHeap) Less(i, j int) bool { 47 | ss := sh.ss 48 | a, b := &ss.buckets[sh.h[i]], &ss.buckets[sh.h[j]] 49 | rateA, rateB := a.rate, b.rate 50 | lastA, lastB := a.lastTs, b.lastTs 51 | 52 | // Formula the same as recount(), inline is faster 53 | if lastA >= lastB { 54 | // optimization. if rateB is already smaller than rateA, there 55 | // is no need to compute real rates. It ain't gonna grow, and 56 | // we can avoid running expensive math.Exp(). 57 | if rateB >= rateA { 58 | rateB *= math.Exp(float64(lastA-lastB) * ss.weightHelper) 59 | } 60 | } else { 61 | if rateA >= rateB { 62 | rateA *= math.Exp(float64(lastB-lastA) * ss.weightHelper) 63 | } 64 | } 65 | 66 | if rateA != rateB { 67 | return rateA < rateB 68 | } else { 69 | // This makes difference for unitialized buckets. Rate is 70 | // zero, but lastTs is modified. In such case make sure to use 71 | // the unintialized bucket first. 72 | return lastA < lastB 73 | } 74 | } 75 | 76 | func (sh *ssHeap) Swap(i, j int) { 77 | a, b := &sh.ss.buckets[sh.h[i]], &sh.ss.buckets[sh.h[j]] 78 | // if a.idx != uint32(i) || b.idx != uint32(j) { 79 | // panic("desynchronized data") 80 | // } 81 | sh.h[i], sh.h[j] = sh.h[j], sh.h[i] 82 | a.idx, b.idx = uint32(j), uint32(i) 83 | } 84 | 85 | type Rate struct { 86 | keytobucketno map[string]uint32 87 | buckets []bucket 88 | weightHelper float64 89 | halfLife time.Duration 90 | sh ssHeap 91 | } 92 | 93 | // Initialize already allocated Rate structure. 94 | // 95 | // Size stands for number of items to track in the stream. HalfLife determines 96 | // the time required half-charge or half-discharge a rate counter. 97 | func (ss *Rate) Init(size uint32, halfLife time.Duration) *Rate { 98 | *ss = Rate{ 99 | keytobucketno: make(map[string]uint32, size), 100 | buckets: make([]bucket, size), 101 | weightHelper: -math.Ln2 / float64(halfLife.Nanoseconds()), 102 | halfLife: halfLife, 103 | } 104 | ss.sh.h = make([]uint32, size) 105 | ss.sh.ss = ss 106 | heap.Init(&ss.sh) 107 | for i := uint32(0); i < uint32(size); i++ { 108 | ss.sh.h[i] = i 109 | ss.buckets[i].idx = i 110 | } 111 | return ss 112 | } 113 | 114 | // Mark an event happening, using given timestamp. 115 | // 116 | // The implementation assumes time is monotonic, the behaviour is undefined in 117 | // the case of time going back. This operation has logarithmic complexity. 118 | func (ss *Rate) Touch(key string, nowTs time.Time) { 119 | now := nowTs.UnixNano() 120 | 121 | var bucket *bucket 122 | if bucketno, found := ss.keytobucketno[key]; found { 123 | bucket = &ss.buckets[bucketno] 124 | } else { 125 | bucketno = uint32(ss.sh.h[0]) 126 | 127 | bucket = &ss.buckets[bucketno] 128 | delete(ss.keytobucketno, bucket.key) 129 | ss.keytobucketno[key] = bucketno 130 | 131 | bucket.key, bucket.errLastTs, bucket.errRate = 132 | key, bucket.lastTs, bucket.rate 133 | } 134 | 135 | if bucket.lastTs != 0 { 136 | bucket.rate = ss.count(bucket.rate, bucket.lastTs, now) 137 | } 138 | bucket.lastTs = now 139 | 140 | // Even lastTs change may change ordering. 141 | heap.Fix(&ss.sh, int(bucket.idx)) 142 | } 143 | 144 | func (ss *Rate) count(rate float64, lastTs, now int64) float64 { 145 | deltaNs := float64(now - lastTs) 146 | weight := math.Exp(deltaNs * ss.weightHelper) 147 | 148 | if deltaNs != 0 { 149 | return rate*weight + (1000000000./deltaNs)*(1-weight) 150 | } 151 | return rate * weight 152 | } 153 | 154 | func (ss *Rate) recount(rate float64, lastTs, now int64) float64 { 155 | return rate * math.Exp(float64(now-lastTs)*ss.weightHelper) 156 | } 157 | 158 | type RateElement struct { 159 | Key string 160 | LoRate float64 161 | HiRate float64 162 | } 163 | 164 | type sseSlice []RateElement 165 | 166 | func (a sseSlice) Len() int { return len(a) } 167 | func (a sseSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 168 | func (a sseSlice) Less(i, j int) bool { return a[i].HiRate < a[j].HiRate } 169 | 170 | // GetAll gets the lower and upper bounds of a range for all tracked elements 171 | // 172 | // The items are sorted by decreasing upper bound. Complexity is O(k*log(k)) 173 | // due to sorting. 174 | func (ss *Rate) GetAll(nowTs time.Time) []RateElement { 175 | now := nowTs.UnixNano() 176 | elements := make([]RateElement, 0, len(ss.buckets)) 177 | for _, bucket := range ss.buckets { 178 | if bucket.key == "" { 179 | continue 180 | } 181 | rate := ss.recount(bucket.rate, bucket.lastTs, now) 182 | errRate := ss.recount(bucket.errRate, bucket.errLastTs, now) 183 | elements = append(elements, RateElement{ 184 | Key: bucket.key, 185 | LoRate: rate - errRate, 186 | HiRate: rate, 187 | }) 188 | } 189 | sort.Sort(sort.Reverse(sseSlice(elements))) 190 | return elements 191 | } 192 | 193 | // GetSingle gets the lower and upper bounds of a range for a single element. If the 194 | // element isn't tracked lower bound will be zero and upper bound will be the 195 | // lowest bound of all the tracked items. 196 | func (ss *Rate) GetSingle(key string, nowTs time.Time) (float64, float64) { 197 | now := nowTs.UnixNano() 198 | var bucket *bucket 199 | if bucketno, found := ss.keytobucketno[key]; found { 200 | bucket = &ss.buckets[bucketno] 201 | rate := ss.recount(bucket.rate, bucket.lastTs, now) 202 | errRate := ss.recount(bucket.errRate, bucket.errLastTs, now) 203 | return rate - errRate, rate 204 | } else { 205 | bucketno = uint32(ss.sh.h[0]) 206 | bucket = &ss.buckets[bucketno] 207 | errRate := ss.recount(bucket.rate, bucket.lastTs, now) 208 | return 0, errRate 209 | } 210 | 211 | } 212 | -------------------------------------------------------------------------------- /lrucache/list.go: -------------------------------------------------------------------------------- 1 | // Copyright 2009 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | /* This file is a slightly modified file from the go package sources 6 | and is released on the following license: 7 | 8 | Copyright (c) 2012 The Go Authors. All rights reserved. 9 | 10 | Redistribution and use in source and binary forms, with or without 11 | modification, are permitted provided that the following conditions are 12 | met: 13 | 14 | * Redistributions of source code must retain the above copyright 15 | notice, this list of conditions and the following disclaimer. 16 | * Redistributions in binary form must reproduce the above 17 | copyright notice, this list of conditions and the following disclaimer 18 | in the documentation and/or other materials provided with the 19 | distribution. 20 | * Neither the name of Google Inc. nor the names of its 21 | contributors may be used to endorse or promote products derived from 22 | this software without specific prior written permission. 23 | 24 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 27 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 28 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 29 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 30 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 31 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 32 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 33 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 34 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 | */ 36 | 37 | // Package list implements a doubly linked list. 38 | // 39 | // To iterate over a list (where l is a *List): 40 | // for e := l.Front(); e != nil; e = e.Next() { 41 | // // do something with e.Value 42 | // } 43 | // 44 | 45 | package lrucache 46 | 47 | // Element is an element of a linked list. 48 | type element struct { 49 | // Next and previous pointers in the doubly-linked list of elements. 50 | // To simplify the implementation, internally a list l is implemented 51 | // as a ring, such that &l.root is both the next element of the last 52 | // list element (l.Back()) and the previous element of the first list 53 | // element (l.Front()). 54 | next, prev *element 55 | 56 | // The list to which this element belongs. 57 | list *list 58 | 59 | // The value stored with this element. 60 | Value interface{} 61 | } 62 | 63 | // Next returns the next list element or nil. 64 | func (e *element) Next() *element { 65 | if p := e.next; e.list != nil && p != &e.list.root { 66 | return p 67 | } 68 | return nil 69 | } 70 | 71 | // Prev returns the previous list element or nil. 72 | func (e *element) Prev() *element { 73 | if p := e.prev; e.list != nil && p != &e.list.root { 74 | return p 75 | } 76 | return nil 77 | } 78 | 79 | // List represents a doubly linked list. 80 | // The zero value for List is an empty list ready to use. 81 | type list struct { 82 | root element // sentinel list element, only &root, root.prev, and root.next are used 83 | len int // current list length excluding (this) sentinel element 84 | } 85 | 86 | // Init initializes or clears list l. 87 | func (l *list) Init() *list { 88 | l.root.next = &l.root 89 | l.root.prev = &l.root 90 | l.len = 0 91 | return l 92 | } 93 | 94 | // New returns an initialized list. 95 | // func New() *list { return new(list).Init() } 96 | 97 | // Len returns the number of elements of list l. 98 | // The complexity is O(1). 99 | func (l *list) Len() int { return l.len } 100 | 101 | // Front returns the first element of list l or nil 102 | func (l *list) Front() *element { 103 | if l.len == 0 { 104 | return nil 105 | } 106 | return l.root.next 107 | } 108 | 109 | // Back returns the last element of list l or nil. 110 | func (l *list) Back() *element { 111 | if l.len == 0 { 112 | return nil 113 | } 114 | return l.root.prev 115 | } 116 | 117 | // insert inserts e after at, increments l.len, and returns e. 118 | func (l *list) insert(e, at *element) *element { 119 | n := at.next 120 | at.next = e 121 | e.prev = at 122 | e.next = n 123 | n.prev = e 124 | e.list = l 125 | l.len++ 126 | return e 127 | } 128 | 129 | // insertValue is a convenience wrapper for insert(&Element{Value: v}, at). 130 | func (l *list) insertValue(v interface{}, at *element) *element { 131 | return l.insert(&element{Value: v}, at) 132 | } 133 | 134 | // remove removes e from its list, decrements l.len, and returns e. 135 | func (l *list) remove(e *element) *element { 136 | e.prev.next = e.next 137 | e.next.prev = e.prev 138 | e.next = nil // avoid memory leaks 139 | e.prev = nil // avoid memory leaks 140 | e.list = nil 141 | l.len-- 142 | return e 143 | } 144 | 145 | // Remove removes e from l if e is an element of list l. 146 | // It returns the element value e.Value. 147 | func (l *list) Remove(e *element) interface{} { 148 | if e.list == l { 149 | // if e.list == l, l must have been initialized when e was inserted 150 | // in l or l == nil (e is a zero Element) and l.remove will crash 151 | l.remove(e) 152 | } 153 | return e.Value 154 | } 155 | 156 | // PushFront inserts a new element e with value v at the front of list l and returns e. 157 | func (l *list) PushFront(v interface{}) *element { 158 | return l.insertValue(v, &l.root) 159 | } 160 | 161 | // PushBack inserts a new element e with value v at the back of list l and returns e. 162 | func (l *list) PushBack(v interface{}) *element { 163 | return l.insertValue(v, l.root.prev) 164 | } 165 | 166 | // InsertBefore inserts a new element e with value v immediately before mark and returns e. 167 | // If mark is not an element of l, the list is not modified. 168 | func (l *list) InsertBefore(v interface{}, mark *element) *element { 169 | if mark.list != l { 170 | return nil 171 | } 172 | // see comment in List.Remove about initialization of l 173 | return l.insertValue(v, mark.prev) 174 | } 175 | 176 | // InsertAfter inserts a new element e with value v immediately after mark and returns e. 177 | // If mark is not an element of l, the list is not modified. 178 | func (l *list) InsertAfter(v interface{}, mark *element) *element { 179 | if mark.list != l { 180 | return nil 181 | } 182 | // see comment in List.Remove about initialization of l 183 | return l.insertValue(v, mark) 184 | } 185 | 186 | // MoveToFront moves element e to the front of list l. 187 | // If e is not an element of l, the list is not modified. 188 | func (l *list) MoveToFront(e *element) { 189 | if e.list != l || l.root.next == e { 190 | return 191 | } 192 | // see comment in List.Remove about initialization of l 193 | l.insert(l.remove(e), &l.root) 194 | } 195 | 196 | // MoveToBack moves element e to the back of list l. 197 | // If e is not an element of l, the list is not modified. 198 | func (l *list) MoveToBack(e *element) { 199 | if e.list != l || l.root.prev == e { 200 | return 201 | } 202 | // see comment in List.Remove about initialization of l 203 | l.insert(l.remove(e), l.root.prev) 204 | } 205 | 206 | // MoveBefore moves element e to its new position before mark. 207 | // If e is not an element of l, or e == mark, the list is not modified. 208 | func (l *list) MoveBefore(e, mark *element) { 209 | if e.list != l || e == mark { 210 | return 211 | } 212 | l.insert(l.remove(e), mark.prev) 213 | } 214 | 215 | // MoveAfter moves element e to its new position after mark. 216 | // If e is not an element of l, or e == mark, the list is not modified. 217 | func (l *list) MoveAfter(e, mark *element) { 218 | if e.list != l || e == mark { 219 | return 220 | } 221 | l.insert(l.remove(e), mark) 222 | } 223 | 224 | // PushBackList inserts a copy of an other list at the back of list l. 225 | // The lists l and other may be the same. 226 | func (l *list) PushBackList(other *list) { 227 | for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() { 228 | l.insertValue(e.Value, l.root.prev) 229 | } 230 | } 231 | 232 | // PushFrontList inserts a copy of an other list at the front of list l. 233 | // The lists l and other may be the same. 234 | func (l *list) PushFrontList(other *list) { 235 | for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() { 236 | l.insertValue(e.Value, &l.root) 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /lrucache/lrucache.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 CloudFlare, Inc. 2 | 3 | package lrucache 4 | 5 | import ( 6 | "container/heap" 7 | "sync" 8 | "time" 9 | ) 10 | 11 | // Every element in the cache is linked to three data structures: 12 | // Table map, PriorityQueue heap ordered by expiry and a LruList list 13 | // ordered by decreasing popularity. 14 | type entry struct { 15 | element element // list element. value is a pointer to this entry 16 | key string // key is a key! 17 | value interface{} // 18 | expire time.Time // time when the item is expired. it's okay to be stale. 19 | index int // index for priority queue needs. -1 if entry is free 20 | } 21 | 22 | // LRUCache data structure. Never dereference it or copy it by 23 | // value. Always use it through a pointer. 24 | type LRUCache struct { 25 | lock sync.Mutex 26 | table map[string]*entry // all entries in table must be in lruList 27 | priorityQueue priorityQueue // some elements from table may be in priorityQueue 28 | lruList list // every entry is either used and resides in lruList 29 | freeList list // or free and is linked to freeList 30 | 31 | ExpireGracePeriod time.Duration // time after an expired entry is purged from cache (unless pushed out of LRU) 32 | } 33 | 34 | // Initialize the LRU cache instance. O(capacity) 35 | func (b *LRUCache) Init(capacity uint) { 36 | b.table = make(map[string]*entry, capacity) 37 | b.priorityQueue = make([]*entry, 0, capacity) 38 | b.lruList.Init() 39 | b.freeList.Init() 40 | heap.Init(&b.priorityQueue) 41 | 42 | // Reserve all the entries in one giant continous block of memory 43 | arrayOfEntries := make([]entry, capacity) 44 | for i := uint(0); i < capacity; i++ { 45 | e := &arrayOfEntries[i] 46 | e.element.Value = e 47 | e.index = -1 48 | b.freeList.PushElementBack(&e.element) 49 | } 50 | } 51 | 52 | // Create new LRU cache instance. Allocate all the needed memory. O(capacity) 53 | func NewLRUCache(capacity uint) *LRUCache { 54 | b := &LRUCache{} 55 | b.Init(capacity) 56 | return b 57 | } 58 | 59 | // Give me the entry with lowest expiry field if it's before now. 60 | func (b *LRUCache) expiredEntry(now time.Time) *entry { 61 | if len(b.priorityQueue) == 0 { 62 | return nil 63 | } 64 | 65 | if now.IsZero() { 66 | // Fill it only when actually used. 67 | now = time.Now() 68 | } 69 | 70 | if e := b.priorityQueue[0]; e.expire.Before(now) { 71 | return e 72 | } 73 | return nil 74 | } 75 | 76 | // Give me the least used entry. 77 | func (b *LRUCache) leastUsedEntry() *entry { 78 | return b.lruList.Back().Value.(*entry) 79 | } 80 | 81 | func (b *LRUCache) freeSomeEntry(now time.Time) (e *entry, used bool) { 82 | if b.freeList.Len() > 0 { 83 | return b.freeList.Front().Value.(*entry), false 84 | } 85 | 86 | e = b.expiredEntry(now) 87 | if e != nil { 88 | return e, true 89 | } 90 | 91 | if b.lruList.Len() == 0 { 92 | return nil, false 93 | } 94 | 95 | return b.leastUsedEntry(), true 96 | } 97 | 98 | // Move entry from used/lru list to a free list. Clear the entry as well. 99 | func (b *LRUCache) removeEntry(e *entry) { 100 | if e.element.list != &b.lruList { 101 | panic("list lruList") 102 | } 103 | 104 | if e.index != -1 { 105 | heap.Remove(&b.priorityQueue, e.index) 106 | } 107 | b.lruList.Remove(&e.element) 108 | b.freeList.PushElementFront(&e.element) 109 | delete(b.table, e.key) 110 | e.key = "" 111 | e.value = nil 112 | } 113 | 114 | func (b *LRUCache) insertEntry(e *entry) { 115 | if e.element.list != &b.freeList { 116 | panic("list freeList") 117 | } 118 | 119 | if !e.expire.IsZero() { 120 | heap.Push(&b.priorityQueue, e) 121 | } 122 | b.freeList.Remove(&e.element) 123 | b.lruList.PushElementFront(&e.element) 124 | b.table[e.key] = e 125 | } 126 | 127 | func (b *LRUCache) touchEntry(e *entry) { 128 | b.lruList.MoveToFront(&e.element) 129 | } 130 | 131 | // SetNow adds an item to the cache overwriting existing one if it 132 | // exists. Allows specifing current time required to expire an item 133 | // when no more slots are used. O(log(n)) if expiry is set, O(1) when 134 | // clear. 135 | func (b *LRUCache) SetNow(key string, value interface{}, expire time.Time, now time.Time) { 136 | b.lock.Lock() 137 | defer b.lock.Unlock() 138 | 139 | var used bool 140 | 141 | e := b.table[key] 142 | if e != nil { 143 | used = true 144 | } else { 145 | e, used = b.freeSomeEntry(now) 146 | if e == nil { 147 | return 148 | } 149 | } 150 | if used { 151 | b.removeEntry(e) 152 | } 153 | 154 | e.key = key 155 | e.value = value 156 | e.expire = expire 157 | b.insertEntry(e) 158 | } 159 | 160 | // Set adds an item to the cache overwriting existing one if it 161 | // exists. O(log(n)) if expiry is set, O(1) when clear. 162 | func (b *LRUCache) Set(key string, value interface{}, expire time.Time) { 163 | b.SetNow(key, value, expire, time.Time{}) 164 | } 165 | 166 | // Get a key from the cache, possibly stale. Update its LRU score. O(1) 167 | func (b *LRUCache) Get(key string) (v interface{}, ok bool) { 168 | b.lock.Lock() 169 | defer b.lock.Unlock() 170 | 171 | e := b.table[key] 172 | if e == nil { 173 | return nil, false 174 | } 175 | 176 | b.touchEntry(e) 177 | return e.value, true 178 | } 179 | 180 | // GetQuiet gets a key from the cache, possibly stale. Don't modify its LRU score. O(1) 181 | func (b *LRUCache) GetQuiet(key string) (v interface{}, ok bool) { 182 | b.lock.Lock() 183 | defer b.lock.Unlock() 184 | 185 | e := b.table[key] 186 | if e == nil { 187 | return nil, false 188 | } 189 | 190 | return e.value, true 191 | } 192 | 193 | // GetNotStale gets a key from the cache, make sure it's not stale. Update its 194 | // LRU score. O(log(n)) if the item is expired. 195 | func (b *LRUCache) GetNotStale(key string) (value interface{}, ok bool) { 196 | return b.GetNotStaleNow(key, time.Now()) 197 | } 198 | 199 | // GetNotStaleNow gets a key from the cache, make sure it's not stale. Update its 200 | // LRU score. O(log(n)) if the item is expired. 201 | func (b *LRUCache) GetNotStaleNow(key string, now time.Time) (value interface{}, ok bool) { 202 | b.lock.Lock() 203 | defer b.lock.Unlock() 204 | 205 | e := b.table[key] 206 | if e == nil { 207 | return nil, false 208 | } 209 | 210 | if e.expire.Before(now) { 211 | // Remove entries expired for more than a graceful period 212 | if b.ExpireGracePeriod == 0 || e.expire.Sub(now) > b.ExpireGracePeriod { 213 | b.removeEntry(e) 214 | } 215 | return nil, false 216 | } 217 | 218 | b.touchEntry(e) 219 | return e.value, true 220 | } 221 | 222 | // GetStale gets a key from the cache, possibly stale. Update its LRU 223 | // score. O(1) always. 224 | func (b *LRUCache) GetStale(key string) (value interface{}, ok, expired bool) { 225 | return b.GetStaleNow(key, time.Now()) 226 | } 227 | 228 | // GetStaleNow gets a key from the cache, possibly stale. Update its LRU 229 | // score. O(1) always. 230 | func (b *LRUCache) GetStaleNow(key string, now time.Time) (value interface{}, ok, expired bool) { 231 | b.lock.Lock() 232 | defer b.lock.Unlock() 233 | 234 | e := b.table[key] 235 | if e == nil { 236 | return nil, false, false 237 | } 238 | 239 | b.touchEntry(e) 240 | return e.value, true, e.expire.Before(now) 241 | } 242 | 243 | // Del gets and remove a key from the cache. O(log(n)) if the item is using expiry, O(1) otherwise. 244 | func (b *LRUCache) Del(key string) (v interface{}, ok bool) { 245 | b.lock.Lock() 246 | defer b.lock.Unlock() 247 | 248 | e := b.table[key] 249 | if e == nil { 250 | return nil, false 251 | } 252 | 253 | value := e.value 254 | b.removeEntry(e) 255 | return value, true 256 | } 257 | 258 | // Evict all items from the cache. O(n*log(n)) 259 | func (b *LRUCache) Clear() int { 260 | b.lock.Lock() 261 | defer b.lock.Unlock() 262 | 263 | // First, remove entries that have expiry set 264 | l := len(b.priorityQueue) 265 | for i := 0; i < l; i++ { 266 | // This could be reduced to O(n). 267 | b.removeEntry(b.priorityQueue[0]) 268 | } 269 | 270 | // Second, remove all remaining entries 271 | r := b.lruList.Len() 272 | for i := 0; i < r; i++ { 273 | b.removeEntry(b.leastUsedEntry()) 274 | } 275 | return l + r 276 | } 277 | 278 | // Evict all the expired items. O(n*log(n)) 279 | func (b *LRUCache) Expire() int { 280 | return b.ExpireNow(time.Now()) 281 | } 282 | 283 | // Evict items that expire before `now`. O(n*log(n)) 284 | func (b *LRUCache) ExpireNow(now time.Time) int { 285 | b.lock.Lock() 286 | defer b.lock.Unlock() 287 | 288 | i := 0 289 | for { 290 | e := b.expiredEntry(now) 291 | if e == nil { 292 | break 293 | } 294 | b.removeEntry(e) 295 | i += 1 296 | } 297 | return i 298 | } 299 | 300 | // Number of entries used in the LRU 301 | func (b *LRUCache) Len() int { 302 | // yes. this stupid thing requires locking 303 | b.lock.Lock() 304 | defer b.lock.Unlock() 305 | 306 | return b.lruList.Len() 307 | } 308 | 309 | // Capacity gets the total capacity of the LRU 310 | func (b *LRUCache) Capacity() int { 311 | // yes. this stupid thing requires locking 312 | b.lock.Lock() 313 | defer b.lock.Unlock() 314 | 315 | return b.lruList.Len() + b.freeList.Len() 316 | } 317 | -------------------------------------------------------------------------------- /lrucache/lrucache_test.go: -------------------------------------------------------------------------------- 1 | package lrucache 2 | 3 | import ( 4 | "math/rand" 5 | "runtime" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestBasicExpiry(t *testing.T) { 11 | t.Parallel() 12 | b := NewLRUCache(3) 13 | if _, ok := b.Get("a"); ok { 14 | t.Error("") 15 | } 16 | 17 | now := time.Now() 18 | b.Set("b", "vb", now.Add(time.Duration(2*time.Second))) 19 | b.Set("a", "va", now.Add(time.Duration(1*time.Second))) 20 | b.Set("c", "vc", now.Add(time.Duration(3*time.Second))) 21 | 22 | if v, _ := b.Get("a"); v != "va" { 23 | t.Error("") 24 | } 25 | if v, _ := b.Get("b"); v != "vb" { 26 | t.Error("") 27 | } 28 | if v, _ := b.Get("c"); v != "vc" { 29 | t.Error("") 30 | } 31 | 32 | b.Set("d", "vd", now.Add(time.Duration(4*time.Second))) 33 | if _, ok := b.Get("a"); ok { 34 | t.Error("Expecting element A to be evicted") 35 | } 36 | 37 | b.Set("e", "ve", now.Add(time.Duration(-4*time.Second))) 38 | if _, ok := b.Get("b"); ok { 39 | t.Error("Expecting element B to be evicted") 40 | } 41 | 42 | b.Set("f", "vf", now.Add(time.Duration(5*time.Second))) 43 | if _, ok := b.Get("e"); ok { 44 | t.Error("Expecting element E to be evicted") 45 | } 46 | 47 | if v, _ := b.Get("c"); v != "vc" { 48 | t.Error("Expecting element C to not be evicted") 49 | } 50 | n := now.Add(time.Duration(10 * time.Second)) 51 | b.SetNow("g", "vg", now.Add(time.Duration(5*time.Second)), n) 52 | if _, ok := b.Get("c"); ok { 53 | t.Error("Expecting element C to be evicted") 54 | } 55 | 56 | if b.Len() != 3 { 57 | t.Error("Expecting different length") 58 | } 59 | b.Del("miss") 60 | b.Del("g") 61 | if b.Len() != 2 { 62 | t.Error("Expecting different length") 63 | } 64 | 65 | b.Clear() 66 | if b.Len() != 0 { 67 | t.Error("Expecting different length") 68 | } 69 | 70 | now = time.Now() 71 | b.Set("b", "vb", now.Add(time.Duration(2*time.Second))) 72 | b.Set("a", "va", now.Add(time.Duration(1*time.Second))) 73 | b.Set("d", "vd", now.Add(time.Duration(4*time.Second))) 74 | b.Set("c", "vc", now.Add(time.Duration(3*time.Second))) 75 | 76 | if _, ok := b.Get("b"); ok { 77 | t.Error("Expecting miss") 78 | } 79 | 80 | b.GetQuiet("miss") 81 | if v, _ := b.GetQuiet("a"); v != "va" { 82 | t.Error("Expecting hit") 83 | } 84 | 85 | b.Set("e", "ve", now.Add(time.Duration(5*time.Second))) 86 | if _, ok := b.Get("a"); ok { 87 | t.Error("Expecting miss") 88 | } 89 | 90 | if b.Capacity() != 3 { 91 | t.Error("Expecting different capacity") 92 | } 93 | } 94 | 95 | func TestBasicNoExpiry(t *testing.T) { 96 | t.Parallel() 97 | b := NewLRUCache(3) 98 | if _, ok := b.Get("a"); ok { 99 | t.Error("") 100 | } 101 | 102 | b.Set("b", "vb", time.Time{}) 103 | b.Set("a", "va", time.Time{}) 104 | b.Set("c", "vc", time.Time{}) 105 | b.Set("d", "vd", time.Time{}) 106 | 107 | if _, ok := b.Get("b"); ok { 108 | t.Error("expecting miss") 109 | } 110 | 111 | if v, _ := b.Get("a"); v != "va" { 112 | t.Error("expecting hit") 113 | } 114 | if v, _ := b.Get("c"); v != "vc" { 115 | t.Error("expecting hit") 116 | } 117 | if v, _ := b.Get("d"); v != "vd" { 118 | t.Error("expecting hit") 119 | } 120 | 121 | past := time.Now().Add(time.Duration(-10 * time.Second)) 122 | 123 | b.Set("e", "ve", past) 124 | 125 | if _, ok := b.Get("a"); ok { 126 | t.Error("expecting miss") 127 | } 128 | if v, _ := b.Get("e"); v != "ve" { 129 | t.Error("expecting hit") 130 | } 131 | 132 | // Make sure expired items get evicted before items without expiry 133 | b.Set("f", "vf", time.Time{}) 134 | if _, ok := b.Get("e"); ok { 135 | t.Error("expecting miss") 136 | } 137 | 138 | r := b.Clear() 139 | if b.Len() != 0 || r != 3 { 140 | t.Error("Expecting different length") 141 | } 142 | 143 | b.Set("c", "vc", time.Time{}) 144 | b.Set("d", "vd", time.Time{}) 145 | b.Set("e", "ve", past) 146 | 147 | if b.Len() != 3 { 148 | t.Error("Expecting different length") 149 | } 150 | r = b.Expire() 151 | if b.Len() != 2 || r != 1 { 152 | t.Error("Expecting different length") 153 | } 154 | r = b.Clear() 155 | if b.Len() != 0 || r != 2 { 156 | t.Error("Expecting different length") 157 | } 158 | } 159 | 160 | func TestNil(t *testing.T) { 161 | t.Parallel() 162 | b := NewLRUCache(3) 163 | 164 | // value nil 165 | if v, ok := b.Get("a"); v != nil || ok != false { 166 | t.Error("expecting miss") 167 | } 168 | 169 | b.Set("a", nil, time.Time{}) 170 | 171 | if v, ok := b.Get("a"); v != nil || ok != true { 172 | t.Error("expecting hit") 173 | } 174 | 175 | // value not nil (sanity check) 176 | if v, ok := b.Get("b"); v != nil || ok != false { 177 | t.Error("expecting miss") 178 | } 179 | 180 | b.Set("b", "vb", time.Time{}) 181 | 182 | if v, ok := b.Get("b"); v != "vb" || ok != true { 183 | t.Error("expecting miss") 184 | } 185 | } 186 | 187 | func rec(foo func()) (recovered int) { 188 | recovered = 0 189 | defer func() { 190 | if r := recover(); r != nil { 191 | recovered += 1 192 | } 193 | }() 194 | foo() 195 | return recovered 196 | } 197 | 198 | func TestPanicByValue(t *testing.T) { 199 | t.Parallel() 200 | b := NewLRUCache(3) 201 | 202 | b.Set("a", "a", time.Time{}) 203 | 204 | c := *b 205 | r := rec(func() { 206 | c.Del("a") 207 | }) 208 | if r != 1 { 209 | t.Error("Expecting panic") 210 | } 211 | 212 | b.Del("a") 213 | 214 | r = rec(func() { 215 | c.Set("a", "A", time.Time{}) 216 | }) 217 | if r != 1 { 218 | t.Error("Expecting panic") 219 | } 220 | } 221 | 222 | func TestZeroLength(t *testing.T) { 223 | t.Parallel() 224 | b := NewLRUCache(0) 225 | 226 | if _, ok := b.Get("a"); ok { 227 | t.Error("Expected miss") 228 | } 229 | 230 | b.Set("a", "va", time.Time{}) 231 | if _, ok := b.Get("a"); ok { 232 | t.Error("Expected miss") 233 | } 234 | 235 | b.Clear() 236 | } 237 | 238 | func TestExtra(t *testing.T) { 239 | t.Parallel() 240 | b := NewLRUCache(3) 241 | if _, ok := b.Get("a"); ok { 242 | t.Error("") 243 | } 244 | 245 | now := time.Now() 246 | b.Set("b", "vb", now.Add(time.Duration(-2*time.Second))) 247 | b.Set("a", "va", now.Add(time.Duration(-1*time.Second))) 248 | b.Set("c", "vc", now.Add(time.Duration(3*time.Second))) 249 | 250 | if v, _ := b.Get("a"); v != "va" { 251 | t.Error("expecting value") 252 | } 253 | 254 | if _, ok := b.GetNotStale("a"); ok { 255 | t.Error("not expecting value") 256 | } 257 | if _, ok := b.GetNotStale("miss"); ok { 258 | t.Error("not expecting value") 259 | } 260 | if v, _ := b.GetNotStale("c"); v != "vc" { 261 | t.Error("expecting hit") 262 | } 263 | 264 | if b.Len() != 2 { 265 | t.Error("Expecting different length") 266 | } 267 | if b.Expire() != 1 { 268 | t.Error("Expecting different length") 269 | } 270 | } 271 | 272 | func randomString(l int) string { 273 | bytes := make([]byte, l) 274 | for i := 0; i < l; i++ { 275 | bytes[i] = byte(65 + rand.Intn(90-65)) 276 | } 277 | return string(bytes) 278 | } 279 | 280 | func createFilledBucket(expire time.Time) *LRUCache { 281 | b := NewLRUCache(1000) 282 | for i := 0; i < 1000; i++ { 283 | b.Set(randomString(2), "value", expire) 284 | } 285 | return b 286 | } 287 | 288 | func TestConcurrentGet(t *testing.T) { 289 | t.Parallel() 290 | b := createFilledBucket(time.Now().Add(time.Duration(4))) 291 | 292 | done := make(chan bool) 293 | worker := func() { 294 | for i := 0; i < 10000; i++ { 295 | b.Get(randomString(2)) 296 | } 297 | done <- true 298 | } 299 | workers := 4 300 | for i := 0; i < workers; i++ { 301 | go worker() 302 | } 303 | for i := 0; i < workers; i++ { 304 | _ = <-done 305 | } 306 | } 307 | 308 | func TestConcurrentSet(t *testing.T) { 309 | t.Parallel() 310 | b := createFilledBucket(time.Now().Add(time.Duration(4))) 311 | 312 | done := make(chan bool) 313 | worker := func() { 314 | expire := time.Now().Add(time.Duration(4 * time.Second)) 315 | for i := 0; i < 10000; i++ { 316 | b.Set(randomString(2), "value", expire) 317 | } 318 | done <- true 319 | } 320 | workers := 4 321 | for i := 0; i < workers; i++ { 322 | go worker() 323 | } 324 | for i := 0; i < workers; i++ { 325 | _ = <-done 326 | } 327 | } 328 | 329 | func BenchmarkConcurrentGetLRUCache(bb *testing.B) { 330 | b := createFilledBucket(time.Now().Add(time.Duration(4))) 331 | 332 | cpu := runtime.GOMAXPROCS(0) 333 | ch := make(chan bool) 334 | worker := func() { 335 | for i := 0; i < bb.N/cpu; i++ { 336 | b.Get(randomString(2)) 337 | } 338 | ch <- true 339 | } 340 | for i := 0; i < cpu; i++ { 341 | go worker() 342 | } 343 | for i := 0; i < cpu; i++ { 344 | _ = <-ch 345 | } 346 | } 347 | 348 | func BenchmarkConcurrentSetLRUCache(bb *testing.B) { 349 | b := createFilledBucket(time.Now().Add(time.Duration(4))) 350 | 351 | cpu := runtime.GOMAXPROCS(0) 352 | ch := make(chan bool) 353 | worker := func() { 354 | for i := 0; i < bb.N/cpu; i++ { 355 | expire := time.Now().Add(time.Duration(4 * time.Second)) 356 | b.Set(randomString(2), "v", expire) 357 | } 358 | ch <- true 359 | } 360 | for i := 0; i < cpu; i++ { 361 | go worker() 362 | } 363 | for i := 0; i < cpu; i++ { 364 | _ = <-ch 365 | } 366 | } 367 | 368 | // No expiry 369 | func BenchmarkConcurrentSetNXLRUCache(bb *testing.B) { 370 | b := createFilledBucket(time.Time{}) 371 | 372 | cpu := runtime.GOMAXPROCS(0) 373 | ch := make(chan bool) 374 | worker := func() { 375 | for i := 0; i < bb.N/cpu; i++ { 376 | b.Set(randomString(2), "v", time.Time{}) 377 | } 378 | ch <- true 379 | } 380 | for i := 0; i < cpu; i++ { 381 | go worker() 382 | } 383 | for i := 0; i < cpu; i++ { 384 | _ = <-ch 385 | } 386 | } 387 | -------------------------------------------------------------------------------- /kt/kt_base_test.go: -------------------------------------------------------------------------------- 1 | package kt 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "os" 7 | "os/exec" 8 | "reflect" 9 | "strconv" 10 | "syscall" 11 | "testing" 12 | "time" 13 | ) 14 | 15 | const ( 16 | KTHOST = "127.0.0.1" 17 | KTPORT = 23034 18 | ) 19 | 20 | func startServerUnix(t testing.TB, sockAddr string) *exec.Cmd { 21 | db := "/tmp/test.rocksdb" 22 | 23 | cmd := exec.Command("qsutil", "db", "create", db) 24 | // This is a hack. As long as QS is running the file won't be removed by the kernel. 25 | defer os.RemoveAll(db) 26 | 27 | if err := cmd.Run(); err != nil { 28 | t.Fatal("failed to create QS DB: ", err) 29 | } 30 | 31 | cmd = exec.Command("qsdaemon", "--ktrpc=unix://"+sockAddr, "--cli=tcp4://localhost:4242", db) 32 | cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} 33 | 34 | if err := cmd.Start(); err != nil { 35 | t.Fatal("failed to start QS: ", err) 36 | } 37 | 38 | for i := 0; ; i++ { 39 | conn, err := net.Dial("tcp", "localhost:4242") 40 | if err == nil { 41 | break 42 | conn.Close() 43 | } 44 | time.Sleep(50 * time.Millisecond) 45 | if i > 50 { 46 | t.Fatal("failed to start QS: ", err) 47 | } 48 | } 49 | 50 | cmdW := exec.Command("qsutil", "cli", "set", "--server=tcp4://localhost:4242", "1", "2") 51 | if err := cmdW.Run(); err != nil { 52 | t.Fatal("failed to write to QS: ", err) 53 | } 54 | 55 | return cmd 56 | } 57 | 58 | func startServer(t testing.TB) *exec.Cmd { 59 | port := strconv.Itoa(KTPORT) 60 | 61 | if _, err := net.Dial("tcp", KTHOST+":"+port); err == nil { 62 | t.Fatal("Not expecting ktserver to exist yet. Perhaps: killall ktserver?") 63 | } 64 | 65 | cmd := exec.Command("ktserver", "-host", KTHOST, "-port", port, "%") 66 | cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} 67 | 68 | if err := cmd.Start(); err != nil { 69 | t.Fatal("failed to start KT: ", err) 70 | } 71 | 72 | for i := 0; ; i++ { 73 | conn, err := net.Dial("tcp", KTHOST+":"+port) 74 | if err == nil { 75 | conn.Close() 76 | return cmd 77 | } 78 | time.Sleep(50 * time.Millisecond) 79 | if i > 50 { 80 | t.Fatal("failed to start KT: ", err) 81 | } 82 | } 83 | } 84 | 85 | func haltServer(cmd *exec.Cmd, t testing.TB) { 86 | defer os.RemoveAll("/tmp/bad.sock") 87 | 88 | // QS forks a child for zero downtime upgrade so we need this hackery 89 | syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL) 90 | 91 | if _, err := cmd.Process.Wait(); err != nil { 92 | t.Fatal("failed to halt KT: ", err) 93 | } 94 | } 95 | 96 | func TestCount(t *testing.T) { 97 | ctx := context.Background() 98 | cmd := startServer(t) 99 | defer haltServer(cmd, t) 100 | 101 | db, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 102 | if err != nil { 103 | t.Fatal(err.Error()) 104 | } 105 | 106 | db.set(ctx, "name", []byte("Steve Vai")) 107 | if n, err := db.Count(ctx); err != nil { 108 | t.Error(err) 109 | } else if n != 1 { 110 | t.Errorf("Count failed: want 1, got %d.", n) 111 | } 112 | } 113 | 114 | func TestGetSet(t *testing.T) { 115 | ctx := context.Background() 116 | cmd := startServer(t) 117 | defer haltServer(cmd, t) 118 | 119 | db, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 120 | if err != nil { 121 | t.Fatal(err.Error()) 122 | } 123 | keys := []string{"a", "b", "c"} 124 | for _, k := range keys { 125 | db.set(ctx, k, []byte(k)) 126 | got, _ := db.Get(ctx, k) 127 | if got != k { 128 | t.Errorf("Get failed: want %s, got %s.", k, got) 129 | } 130 | } 131 | } 132 | 133 | func TestMatchPrefix(t *testing.T) { 134 | ctx := context.Background() 135 | cmd := startServer(t) 136 | defer haltServer(cmd, t) 137 | db, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 138 | if err != nil { 139 | t.Fatal(err.Error()) 140 | } 141 | 142 | keys := []string{ 143 | "cache/news/1", 144 | "cache/news/2", 145 | "cache/news/3", 146 | "cache/news/4", 147 | } 148 | for _, k := range keys { 149 | db.set(ctx, k, []byte("something")) 150 | } 151 | var tests = []struct { 152 | max int64 153 | prefix string 154 | expected []string 155 | }{ 156 | { 157 | max: 2, 158 | prefix: "cache/news", 159 | expected: keys[:2], 160 | }, 161 | { 162 | max: 10, 163 | prefix: "cache/news", 164 | expected: keys, 165 | }, 166 | { 167 | max: 10, 168 | prefix: "/cache/news", 169 | expected: nil, 170 | }, 171 | } 172 | for _, tt := range tests { 173 | values, err := db.MatchPrefix(ctx, tt.prefix, tt.max) 174 | if err != nil && tt.expected != nil { 175 | t.Fatal(err) 176 | } 177 | if !reflect.DeepEqual(values, tt.expected) { 178 | t.Errorf("db.MatchPrefix(%q, 2). Want %#v. Got %#v.", tt.prefix, tt.expected, values) 179 | } 180 | } 181 | 182 | values, err := db.MatchPrefix(ctx, "//////////DoNotExistAAAAAA", 1028) 183 | if len(values) != 0 || err != ErrSuccess { 184 | t.Errorf("db.MatchPrefix(DoNotExistAAAAAA, 1000). Want %d, got %d", len(values), err) 185 | } 186 | 187 | values, err = db.MatchPrefix(ctx, "//////////DoNotExistBBBBBB", 1028) 188 | if len(values) != 0 || err != ErrSuccess { 189 | t.Errorf("db.MatchPrefix(//////////DoNotExistBBBBBB, 1028). Want %d, got %d", len(values), err) 190 | } 191 | 192 | values, err = db.MatchPrefix(ctx, "c", 1028) 193 | if len(values) != 4 || err != nil { 194 | t.Errorf("db.MatchPrefix(//////////DoNotExistBBBBBB, 1028). Want %d, got %d", len(values), err) 195 | } 196 | } 197 | 198 | func TestGetBulk(t *testing.T) { 199 | ctx := context.Background() 200 | cmd := startServer(t) 201 | defer haltServer(cmd, t) 202 | db, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 203 | if err != nil { 204 | t.Fatal(err.Error()) 205 | } 206 | 207 | testKeys := map[string]string{} 208 | baseKeys := map[string]string{ 209 | "cache/news/1": "1", 210 | "cache/news/2": "2", 211 | "cache/news/3": "3", 212 | "cache/news/4": "4", 213 | "cache/news/5": "5", 214 | "cache/news/6": "6", 215 | } 216 | 217 | for k, v := range baseKeys { 218 | db.set(ctx, k, []byte(v)) 219 | testKeys[k] = "" 220 | } 221 | 222 | err = db.GetBulk(ctx, testKeys) 223 | if err != nil { 224 | t.Fatal(err) 225 | } 226 | 227 | for k, v := range baseKeys { 228 | if !reflect.DeepEqual(v, testKeys[k]) { 229 | t.Errorf("db.GetBulk(). Want %v. Got %v. for key %s", v, testKeys[k], k) 230 | } 231 | } 232 | 233 | // Now remove some keys 234 | db.remove(ctx, "cache/news/1") 235 | db.remove(ctx, "cache/news/2") 236 | delete(baseKeys, "cache/news/1") 237 | delete(baseKeys, "cache/news/2") 238 | 239 | err = db.GetBulk(ctx, testKeys) 240 | if err != nil { 241 | t.Fatal(err) 242 | } 243 | 244 | for k, v := range baseKeys { 245 | if !reflect.DeepEqual(v, testKeys[k]) { 246 | t.Errorf("db.GetBulk(). Want %v. Got %v. for key %s", v, testKeys[k], k) 247 | } 248 | } 249 | 250 | if _, ok := testKeys["cache/news/1"]; ok { 251 | t.Errorf("db.GetBulk(). Returned deleted key %v.", "cache/news/1") 252 | } 253 | } 254 | 255 | func TestSetGetRemoveBulk(t *testing.T) { 256 | ctx := context.Background() 257 | cmd := startServer(t) 258 | defer haltServer(cmd, t) 259 | db, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 260 | if err != nil { 261 | t.Fatal(err.Error()) 262 | } 263 | 264 | testKeys := map[string]string{} 265 | baseKeys := map[string]string{ 266 | "cache/news/1": "1", 267 | "cache/news/2": "2", 268 | "cache/news/3": "3", 269 | "cache/news/4": "4", 270 | "cache/news/5": "5", 271 | "cache/news/6": "6", 272 | } 273 | removeKeys := make([]string, len(baseKeys)) 274 | 275 | for k, _ := range baseKeys { 276 | testKeys[k] = "" 277 | removeKeys = append(removeKeys, k) 278 | } 279 | 280 | if _, err := db.setBulk(ctx, baseKeys); err != nil { 281 | t.Fatal(err) 282 | } 283 | 284 | if err := db.GetBulk(ctx, testKeys); err != nil { 285 | t.Fatal(err) 286 | } 287 | 288 | for k, v := range baseKeys { 289 | if !reflect.DeepEqual(v, testKeys[k]) { 290 | t.Errorf("db.GetBulk(). Want %v. Got %v. for key %s", v, testKeys[k], k) 291 | } 292 | } 293 | 294 | if _, err := db.removeBulk(ctx, removeKeys); err != nil { 295 | t.Fatal(err) 296 | } 297 | 298 | count, _ := db.Count(ctx) 299 | if count != 0 { 300 | t.Errorf("db.removeBulk(). Want %v. Got %v", 0, count) 301 | } 302 | } 303 | 304 | func TestGetBulkBytes(t *testing.T) { 305 | ctx := context.Background() 306 | cmd := startServer(t) 307 | defer haltServer(cmd, t) 308 | db, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 309 | if err != nil { 310 | t.Fatal(err.Error()) 311 | } 312 | 313 | testKeys := map[string][]byte{} 314 | baseKeys := map[string][]byte{ 315 | "cache/news/1": []byte("1"), 316 | "cache/news/2": []byte("2"), 317 | "cache/news/3": []byte("3"), 318 | "cache/news/4": []byte("4"), 319 | "cache/news/5": []byte("5"), 320 | "cache/news/6": []byte("6"), 321 | } 322 | 323 | for k, v := range baseKeys { 324 | db.set(ctx, k, v) 325 | testKeys[k] = []byte("") 326 | } 327 | 328 | err = db.GetBulkBytes(ctx, testKeys) 329 | if err != nil { 330 | t.Fatal(err) 331 | } 332 | 333 | for k, v := range baseKeys { 334 | if !reflect.DeepEqual(v, testKeys[k]) { 335 | t.Errorf("db.GetBulk(). Want %v. Got %v. for key %s", v, testKeys[k], k) 336 | } 337 | } 338 | 339 | // Now remove some keys 340 | db.remove(ctx, "cache/news/4") 341 | delete(baseKeys, "cache/news/4") 342 | 343 | err = db.GetBulkBytes(ctx, testKeys) 344 | if err != nil { 345 | t.Fatal(err) 346 | } 347 | 348 | for k, v := range baseKeys { 349 | if !reflect.DeepEqual(v, testKeys[k]) { 350 | t.Errorf("db.GetBulkBytes(). Want %v. Got %v. for key %s", v, testKeys[k], k) 351 | } 352 | } 353 | 354 | if _, ok := testKeys["cache/news/4"]; ok { 355 | t.Errorf("db.GetBulkBytes(). Returned deleted key %v.", "cache/news/4") 356 | } 357 | 358 | noKeys := map[string][]byte{ 359 | "XXXcache/news/1": []byte(""), 360 | "XXXcache/news/2": []byte(""), 361 | "XXXcache/news/3": []byte(""), 362 | "XXXcache/news/4": []byte(""), 363 | "XXXcache/news/5": []byte(""), 364 | "XXXcache/news/6": []byte(""), 365 | } 366 | err = db.GetBulkBytes(ctx, noKeys) 367 | if err != nil { 368 | t.Fatal(err) 369 | } 370 | 371 | if len(noKeys) != 0 { 372 | t.Errorf("db.GetBulkBinary. Want %d, got %d", 0, len(noKeys)) 373 | } 374 | } 375 | 376 | func TestGetBulkBytesLargeValue(t *testing.T) { 377 | ctx := context.Background() 378 | cmd := startServer(t) 379 | defer haltServer(cmd, t) 380 | db, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 381 | if err != nil { 382 | t.Fatal(err.Error()) 383 | } 384 | 385 | testKeys := map[string][]byte{} 386 | baseKeys := map[string][]byte{ 387 | "cache/news/1": []byte("v=spf1 mx a:alligator.org a:mailout11.intuit.com a:mailout12.intuit.com a:mailout13.intuit.com a:mailout14.intuit.com a:mailout21.intuit.com a:mailout22.intuit.com a:mailout23.intuit.com a:mailout24.intuit.com a:lvmailout01.intuit.com a:lvmailout02.intuit\" \".com a:lvmailout03.intuit.com a:lvmailappout10.intuit.com a:lvmailappout11.intuit.com a:lvmailappout12.intuit.com a:lvmailappout13.intuit.com a:lvmailappout20.intuit.com a:lvmailappout21.intuit.com a:lvmailappout22.intuit.com a:lvmailappout23.intuit.com a\" \":mailout1b.intuit.com a:mailout2b.intuit.com a:mailout3b.intuit.com a:mailout4b.intuit.com a:mailout101.intuit.com a:mailout102.intuit.com a:mailout103.intuit.com a:mailout104.intuit.com a:mailout201.intuit.com a:mailout202.intuit.com a:mailout203.intuit.\" \"com a:mailout204.intuit.com a:mailout4a.intuit.com a:mailout1a.intuit.com a:mailout2a.intuit.com a:mailout3a.intuit.com a:mailout5a.intuit.com ip4:209.251.131.160/28 ip4:206.154.105.172 ip4:206.154.105.173 ip4:206.154.105.174 ip4:206.154.105.175 ip4:206.1\" \"54.105.176 ip4:206.154.105.177 ip4:206.154.105.178 ip4:206.154.105.179 ip4:199.16.139.16 ip4:199.16.139.17 ip4:199.16.139.18 ip4:199.16.139.20 ip4:199.16.139.21 ip4:199.16.139.22 ip4:199.16.139.23 ip4:199.16.139.24 ip4:199.16.139.25 ip4:199.16.139.26 ip4:\" \"199.16.139.27 ip4:206.108.40.7 ip4:206.108.40.8 ip4:206.108.40.9 ip4:206.108.40.10 ip4:206.108.40.11 ip4:206.108.40.12 ip4:206.108.40.13 ip4:206.108.40.14 ip4:206.108.40.15 ip4:206.108.40.16 ip4:206.108.40.17 ip4:206.108.40.28 ip4:206.108.40.90 ip4:206.10\" \"8.40.91 ip4:206.108.40.92 include:_spf.google.com -all"), 388 | "cache/news/2": []byte("2"), 389 | "cache/news/3": []byte("3"), 390 | "cache/news/4": []byte("sdjkfhsdkfjhskdjfhskdhfksdf"), 391 | "cache/news/5": []byte("3826498237rsjdhfkjsdhfkjhsdjkfhsdjkf2893yrjascmzxbncmnzbxvsefuwie"), 392 | "cache/news/6": []byte("6"), 393 | } 394 | 395 | for k, v := range baseKeys { 396 | db.set(ctx, k, v) 397 | testKeys[k] = []byte("") 398 | } 399 | 400 | err = db.GetBulkBytes(ctx, testKeys) 401 | if err != nil { 402 | t.Fatal(err) 403 | } 404 | 405 | for k, v := range baseKeys { 406 | if !reflect.DeepEqual(v, testKeys[k]) { 407 | t.Errorf("db.GetBulk(). Want %v. Got %v. for key %s", v, testKeys[k], k) 408 | } 409 | } 410 | 411 | err = db.GetBulkBytes(ctx, make(map[string][]byte)) 412 | if err != nil { 413 | t.Fatal(err) 414 | } 415 | 416 | wrong := make(map[string][]byte) 417 | wrong["/////doesntexitst"] = []byte("blah") 418 | 419 | err = db.GetBulkBytes(ctx, wrong) 420 | if err != nil { 421 | t.Fatal(err) 422 | } 423 | if len(wrong["/////doesntexitst"]) != 0 { 424 | t.Error(wrong["/////doesntexitst"]) 425 | } 426 | } 427 | 428 | func TestGetBytes(t *testing.T) { 429 | ctx := context.Background() 430 | cmd := startServer(t) 431 | defer haltServer(cmd, t) 432 | db, err := NewConn(KTHOST, KTPORT, 1, DEFAULT_TIMEOUT) 433 | if err != nil { 434 | t.Fatal(err.Error()) 435 | } 436 | 437 | _, err = db.GetBytes(ctx, "//doesntexist") 438 | if err != ErrNotFound { 439 | t.Fatal(err) 440 | } 441 | } 442 | 443 | func TestGetBytesUnix(t *testing.T) { 444 | sock := "/tmp/bad.sock" 445 | ctx := context.Background() 446 | cmd := startServerUnix(t, sock) 447 | 448 | defer haltServer(cmd, t) 449 | 450 | db, err := NewConn("unix://"+sock, 0, 1, DEFAULT_TIMEOUT) 451 | if err != nil { 452 | t.Fatal(err) 453 | } 454 | 455 | _, err = db.GetBytes(ctx, "//doesntexist") 456 | if err != ErrNotFound { 457 | t.Fatal(err) 458 | } 459 | v, err := db.GetBytes(ctx, "1") 460 | if err != nil { 461 | t.Fatal(err) 462 | } 463 | 464 | if string(v) != "2" { 465 | t.Fatal("KV pair does not match") 466 | } 467 | } 468 | 469 | func TestIsError(t *testing.T) { 470 | err := &Error{Message: "What a hoopy frood"} 471 | if !IsError(err) { 472 | t.Error("IsError returns false") 473 | } 474 | } 475 | -------------------------------------------------------------------------------- /kt/kt.go: -------------------------------------------------------------------------------- 1 | package kt 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/base64" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "io/ioutil" 11 | "net" 12 | "net/http" 13 | "net/url" 14 | "os" 15 | "path" 16 | "strconv" 17 | "strings" 18 | "sync/atomic" 19 | "time" 20 | 21 | "crypto/tls" 22 | "crypto/x509" 23 | "encoding/pem" 24 | 25 | "github.com/opentracing/opentracing-go" 26 | "github.com/prometheus/client_golang/prometheus" 27 | ) 28 | 29 | var certExpiryTimestamp = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 30 | Name: "ktrpc_client_certificate_expiry_timestamp", 31 | Help: "The certificate expiry timestamp (UNIX epoch UTC) labeled by the certificate serial number", 32 | }, 33 | []string{ 34 | "serial", 35 | }, 36 | ) 37 | 38 | func init() { 39 | prometheus.MustRegister(certExpiryTimestamp) 40 | } 41 | 42 | const DEFAULT_TIMEOUT = 2 * time.Second 43 | 44 | // Error is returned by all functions in this package. 45 | type Error struct { 46 | // Error returned by KT 47 | Message string 48 | // HTTP status code, if any (0 otherwise) 49 | Code int 50 | } 51 | 52 | func (e *Error) Error() string { 53 | return fmt.Sprintln("kt:", e.Message) 54 | } 55 | 56 | // IsError returns true if the error was generated by this package. 57 | func IsError(err error) bool { 58 | _, ok := err.(*Error) 59 | return ok 60 | } 61 | 62 | // Conn represents a connection to a kyoto tycoon endpoint. 63 | // It uses a connection pool to efficiently communicate with the server. 64 | // Conn is safe for concurrent use. 65 | type Conn struct { 66 | // Has to be first for atomic alignment 67 | retryCount uint64 68 | scheme string 69 | timeout time.Duration 70 | host string 71 | transport *http.Transport 72 | } 73 | 74 | func expiryCertMetric(certFile string) error { 75 | leftOverCert, err := ioutil.ReadFile(certFile) 76 | if err != nil { 77 | return err 78 | } 79 | 80 | var cert *pem.Block 81 | 82 | for { 83 | // Some part of this bloc come from the go standard library 84 | 85 | // Several cert can be concatenated in the same file 86 | cert, leftOverCert = pem.Decode(leftOverCert) 87 | 88 | if cert == nil { 89 | // The end of the cert list 90 | return nil 91 | } 92 | 93 | if cert.Type != "CERTIFICATE" || len(cert.Headers) != 0 { 94 | // This is is from src/crypto/x509/cert_pool.go 95 | continue 96 | } 97 | 98 | xc, err := x509.ParseCertificate(cert.Bytes) 99 | if err != nil { 100 | return err 101 | } 102 | 103 | serial := (*xc.SerialNumber).String() 104 | expiry := xc.NotAfter 105 | 106 | b := certExpiryTimestamp.WithLabelValues(serial) 107 | b.Set(float64(expiry.Unix())) 108 | } 109 | } 110 | 111 | func certPaths(dir string) (cert, key, ca string, err error) { 112 | certSets := [][]string{ 113 | []string{"service.pem", "service-key.pem", "ca.pem"}, // certmgr 114 | []string{"tls.crt", "tls.key", "ca.crt"}, // kubernetes pki 115 | } 116 | 117 | for _, set := range certSets { 118 | goodSet := true 119 | cert = path.Join(dir, set[0]) 120 | key = path.Join(dir, set[1]) 121 | ca = path.Join(dir, set[2]) 122 | 123 | for _, file := range []string{cert, key, ca} { 124 | if _, err := os.Stat(file); os.IsNotExist(err) { 125 | goodSet = false 126 | break 127 | } 128 | } 129 | 130 | if goodSet { 131 | return cert, key, ca, nil 132 | } 133 | } 134 | return "", "", "", fmt.Errorf("there are no certificates in path: %s", dir) 135 | } 136 | 137 | func loadCerts(rootPath string, certPath string, keyPath string) (*tls.Certificate, *x509.CertPool, error) { 138 | 139 | err := expiryCertMetric(certPath) 140 | if err != nil { 141 | return nil, nil, err 142 | } 143 | 144 | certX509, err := tls.LoadX509KeyPair(certPath, keyPath) 145 | if err != nil { 146 | return nil, nil, err 147 | } 148 | 149 | err = expiryCertMetric(rootPath) 150 | if err != nil { 151 | return nil, nil, err 152 | } 153 | 154 | caFile, err := ioutil.ReadFile(rootPath) 155 | if err != nil { 156 | return nil, nil, err 157 | } 158 | 159 | roots := x509.NewCertPool() 160 | roots.AppendCertsFromPEM(caFile) 161 | 162 | return &certX509, roots, err 163 | } 164 | 165 | func newTLSClientConfig(rootPath string, certPath string, keyPath string) (*tls.Config, error) { 166 | 167 | certX509, roots, err := loadCerts(rootPath, certPath, keyPath) 168 | if err != nil { 169 | return nil, err 170 | } 171 | 172 | return &tls.Config{ 173 | Certificates: []tls.Certificate{*certX509}, 174 | RootCAs: roots, 175 | }, nil 176 | } 177 | 178 | // KT has 2 interfaces, A restful one and an RPC one. 179 | // The RESTful interface is usually much faster than 180 | // the RPC one, but not all methods are implemented. 181 | // Use the RESTFUL interfaces when we can and fallback 182 | // to the RPC one when needed. 183 | // 184 | // The RPC format uses tab separated values with a choice of encoding 185 | // for each of the fields. We use base64 since it is always safe. 186 | // 187 | // REST format is just the body of the HTTP request being the value. 188 | 189 | func newConn(host string, port int, poolsize int, timeout time.Duration, certDir string) (*Conn, error) { 190 | var tlsConfig *tls.Config 191 | var err error 192 | 193 | scheme := "http" 194 | 195 | if certDir != "" { 196 | certPath, keyPath, rootPath, err := certPaths(certDir) 197 | if err != nil { 198 | return nil, err 199 | } 200 | 201 | tlsConfig, err = newTLSClientConfig(rootPath, certPath, keyPath) 202 | if err != nil { 203 | return nil, err 204 | } 205 | 206 | scheme = "https" 207 | } 208 | 209 | portstr := strconv.Itoa(port) 210 | c := &Conn{ 211 | scheme: scheme, 212 | timeout: timeout, 213 | host: net.JoinHostPort(host, portstr), 214 | transport: &http.Transport{ 215 | TLSClientConfig: tlsConfig, 216 | ResponseHeaderTimeout: timeout, 217 | MaxIdleConnsPerHost: poolsize, 218 | IdleConnTimeout: 30 * time.Second, 219 | }, 220 | } 221 | 222 | // connectivity check so that we can bail out 223 | // early instead of when we do the first operation. 224 | err = c.CheckConn() 225 | if err != nil { 226 | return nil, err 227 | } 228 | 229 | return c, nil 230 | } 231 | 232 | // NewConnTLS creates a TLS enabled connection to a Kyoto Tycoon endpoing 233 | func NewConnTLS(host string, port int, poolsize int, timeout time.Duration, certDir string) (*Conn, error) { 234 | return newConn(host, port, poolsize, timeout, certDir) 235 | } 236 | 237 | // NewConn creates a connection to an Kyoto Tycoon endpoint. 238 | func NewConn(host string, port int, poolsize int, timeout time.Duration) (*Conn, error) { 239 | parts := strings.Split(host, "://") 240 | 241 | if len(parts) == 1 { 242 | return newConn(host, port, poolsize, timeout, "") 243 | } else if len(parts) == 2 { 244 | if parts[0] != "unix" { 245 | return nil, errors.New("NewConn: Unexpected network") 246 | } 247 | return newUnixConn(parts[1], poolsize, timeout) 248 | } else { 249 | return nil, errors.New("NewConn: Wrong parameters") 250 | } 251 | } 252 | 253 | // NewClientWithTLS creates a TLS enabled connection. 254 | // This method allows a custom path for the Root CA, and the Certificate and Key. 255 | func NewClientWithTLS(host string, port int, poolsize int, timeout time.Duration, rootPath string, certPath string, keyPath string) (*Conn, error) { 256 | 257 | tlsConfig, err := newTLSClientConfig(rootPath, certPath, keyPath) 258 | if err != nil { 259 | return nil, err 260 | } 261 | 262 | portstr := strconv.Itoa(port) 263 | c := &Conn{ 264 | scheme: "https", 265 | timeout: timeout, 266 | host: net.JoinHostPort(host, portstr), 267 | transport: &http.Transport{ 268 | TLSClientConfig: tlsConfig, 269 | ResponseHeaderTimeout: timeout, 270 | MaxIdleConnsPerHost: poolsize, 271 | IdleConnTimeout: 30 * time.Second, 272 | }, 273 | } 274 | 275 | // connectivity check so that we can bail out 276 | // early instead of when we do the first operation. 277 | err = c.CheckConn() 278 | if err != nil { 279 | return nil, err 280 | } 281 | 282 | return c, nil 283 | } 284 | 285 | // CheckConn can be used to check connection to Kyoto Tycoon endpoint is working as expected. 286 | func (c *Conn) CheckConn() error { 287 | ctx, cancel := context.WithTimeout(context.Background(), c.timeout) 288 | defer cancel() 289 | 290 | _, _, err := c.doRPC(ctx, "/rpc/void", nil) 291 | return err 292 | } 293 | 294 | type unixDialer struct { 295 | net.Dialer 296 | } 297 | 298 | func (d *unixDialer) Dial(network, address string) (net.Conn, error) { 299 | // Hackery to remove port 80 300 | parts := strings.Split(address, ":") 301 | return d.Dialer.Dial("unix", parts[0]) 302 | } 303 | 304 | func newUnixConn(socket string, poolsize int, timeout time.Duration) (*Conn, error) { 305 | c := Conn{ 306 | scheme: "http", 307 | timeout: timeout, 308 | host: socket, 309 | transport: &http.Transport{ 310 | ResponseHeaderTimeout: timeout, 311 | IdleConnTimeout: 30 * time.Second, 312 | 313 | Dial: (&unixDialer{net.Dialer{ 314 | Timeout: 30 * time.Second, 315 | KeepAlive: 30 * time.Second, 316 | }, 317 | }).Dial}, 318 | } 319 | 320 | // connectivity check so that we can bail out 321 | // early instead of when we do the first operation. 322 | err := c.CheckConn() 323 | if err != nil { 324 | return nil, err 325 | } 326 | 327 | return &c, nil 328 | } 329 | 330 | var ( 331 | ErrTimeout error = &Error{Message: "operation timeout"} 332 | // the wording on this error is deliberately weird, 333 | // because users would search for the string logical inconsistency 334 | // in order to find lookup misses. 335 | ErrNotFound = &Error{Message: "entry not found aka logical inconsistency"} 336 | // old gokabinet returned this error on success. Keeping around "for compatibility" until 337 | // I can kill it with fire. 338 | ErrSuccess = &Error{Message: "success"} 339 | ) 340 | 341 | // RetryCount is the number of retries performed due to the remote end 342 | // closing idle connections. 343 | // 344 | // The value increases monotonically, until it wraps to 0. 345 | func (c *Conn) RetryCount() uint64 { 346 | return atomic.LoadUint64(&c.retryCount) 347 | } 348 | 349 | // Count returns the number of records in the database 350 | func (c *Conn) Count(ctx context.Context) (int, error) { 351 | span, ctx := opentracing.StartSpanFromContext(ctx, "ktrpc Count") 352 | defer span.Finish() 353 | span.SetTag("url", "/rpc/status") 354 | 355 | code, m, err := c.doRPC(ctx, "/rpc/status", nil) 356 | if err != nil { 357 | span.SetTag("status", err) 358 | return 0, err 359 | } 360 | 361 | if code != 200 { 362 | err := makeError(m) 363 | span.SetTag("status", err) 364 | return 0, err 365 | } 366 | return strconv.Atoi(string(findRec(m, "count").Value)) 367 | } 368 | 369 | func (c *Conn) remove(ctx context.Context, key string) error { 370 | span, ctx := opentracing.StartSpanFromContext(ctx, "ktrpc Remove") 371 | defer span.Finish() 372 | 373 | code, body, err := c.doREST(ctx, "DELETE", key, nil) 374 | if err != nil { 375 | span.SetTag("status", err) 376 | return err 377 | } 378 | if code == 404 { 379 | span.SetTag("status", "not_found") 380 | return ErrNotFound 381 | } 382 | if code != 204 { 383 | err := &Error{string(body), code} 384 | span.SetTag("status", err) 385 | return err 386 | } 387 | return nil 388 | } 389 | 390 | // GetBulk retrieves the keys in the map. The results will be filled in on function return. 391 | // If a key was not found in the database, it will be removed from the map. 392 | func (c *Conn) GetBulk(ctx context.Context, keysAndVals map[string]string) error { 393 | span, ctx := opentracing.StartSpanFromContext(ctx, "ktrpc GetBulk") 394 | defer span.Finish() 395 | 396 | m := make(map[string][]byte) 397 | for k := range keysAndVals { 398 | m[k] = zeroslice 399 | } 400 | err := c.doGetBulkBytes(ctx, m) 401 | if err != nil { 402 | span.SetTag("status", err) 403 | return err 404 | } 405 | for k := range keysAndVals { 406 | b, ok := m[k] 407 | if ok { 408 | keysAndVals[k] = string(b) 409 | } else { 410 | delete(keysAndVals, k) 411 | } 412 | } 413 | return nil 414 | } 415 | 416 | // Get retrieves the data stored at key. ErrNotFound is 417 | // returned if no such data exists 418 | func (c *Conn) Get(ctx context.Context, key string) (string, error) { 419 | span, ctx := opentracing.StartSpanFromContext(ctx, "ktrpc Get") 420 | defer span.Finish() 421 | span.SetTag("key", key) 422 | s, err := c.doGet(ctx, key) 423 | if err != nil { 424 | return "", err 425 | } 426 | return string(s), nil 427 | } 428 | 429 | // doGet perform http request to retrieve the value associated with key 430 | func (c *Conn) doGet(ctx context.Context, key string) ([]byte, error) { 431 | span := opentracing.SpanFromContext(ctx) 432 | 433 | code, body, err := c.doREST(ctx, "GET", key, nil) 434 | if err != nil { 435 | span.SetTag("err", err) 436 | return nil, err 437 | } 438 | 439 | switch code { 440 | case 200: 441 | span.SetTag("status", "ok") 442 | break 443 | case 404: 444 | span.SetTag("status", "not_found") 445 | return nil, ErrNotFound 446 | default: 447 | err := &Error{string(body), code} 448 | span.SetTag("status", err) 449 | return nil, err 450 | } 451 | return body, nil 452 | } 453 | 454 | // GetBytes retrieves the data stored at key in the format of a byte slice 455 | // ErrNotFound is returned if no such data is found. 456 | func (c *Conn) GetBytes(ctx context.Context, key string) ([]byte, error) { 457 | span, ctx := opentracing.StartSpanFromContext(ctx, "ktrpc GetBytes") 458 | defer span.Finish() 459 | span.SetTag("key", key) 460 | return c.doGet(ctx, key) 461 | } 462 | 463 | // Set stores the data at key 464 | func (c *Conn) set(ctx context.Context, key string, value []byte) error { 465 | span, ctx := opentracing.StartSpanFromContext(ctx, "ktrpc Set") 466 | defer span.Finish() 467 | 468 | code, body, err := c.doREST(ctx, "PUT", key, value) 469 | if err != nil { 470 | return err 471 | } 472 | if code != 201 { 473 | return &Error{string(body), code} 474 | } 475 | 476 | return nil 477 | } 478 | 479 | var zeroslice = []byte("0") 480 | 481 | // GetBulkBytes retrieves the keys in the map. The results will be filled in on function return. 482 | // If a key was not found in the database, it will be removed from the map. 483 | func (c *Conn) GetBulkBytes(ctx context.Context, keys map[string][]byte) error { 484 | span, ctx := opentracing.StartSpanFromContext(ctx, "ktrpc GetBulkBytes") 485 | defer span.Finish() 486 | err := c.doGetBulkBytes(ctx, keys) 487 | if err != nil { 488 | span.SetTag("status", err) 489 | } 490 | return err 491 | } 492 | 493 | // doGetBulkBytes retrieves the keys in the map. The results will be filled in on function return. 494 | // If a key was not found in the database, it will be removed from the map. 495 | func (c *Conn) doGetBulkBytes(ctx context.Context, keys map[string][]byte) error { 496 | 497 | // The format for querying multiple keys in KT is to send a 498 | // TSV value for each key with a _ as a prefix. 499 | // KT then returns the value as a TSV set with _ in front of the keys 500 | keystransmit := make([]KV, 0, len(keys)) 501 | for k, _ := range keys { 502 | // we set the value to nil because we want a sentinel value 503 | // for when no data was found. This is important for 504 | // when we remove the not found keys from the map 505 | keys[k] = nil 506 | keystransmit = append(keystransmit, KV{"_" + k, zeroslice}) 507 | } 508 | 509 | code, m, err := c.doRPC(ctx, "/rpc/get_bulk", keystransmit) 510 | if err != nil { 511 | return err 512 | } 513 | if code != 200 { 514 | return makeError(m) 515 | } 516 | for _, kv := range m { 517 | if kv.Key[0] != '_' { 518 | continue 519 | } 520 | keys[kv.Key[1:]] = kv.Value 521 | } 522 | for k, v := range keys { 523 | if v == nil { 524 | delete(keys, k) 525 | } 526 | } 527 | return nil 528 | } 529 | 530 | // SetBulk stores the values in the map. 531 | func (c *Conn) setBulk(ctx context.Context, values map[string]string) (int64, error) { 532 | vals := make([]KV, 0, len(values)) 533 | for k, v := range values { 534 | vals = append(vals, KV{"_" + k, []byte(v)}) 535 | } 536 | span, ctx := opentracing.StartSpanFromContext(ctx, "ktrpc SetBulk") 537 | defer span.Finish() 538 | 539 | code, m, err := c.doRPC(ctx, "/rpc/set_bulk", vals) 540 | if err != nil { 541 | span.SetTag("status", err) 542 | return 0, err 543 | } 544 | if code != 200 { 545 | span.SetTag("status", code) 546 | return 0, makeError(m) 547 | } 548 | return strconv.ParseInt(string(findRec(m, "num").Value), 10, 64) 549 | } 550 | 551 | func (c *Conn) removeBulk(ctx context.Context, keys []string) (int64, error) { 552 | vals := make([]KV, 0, len(keys)) 553 | for _, k := range keys { 554 | vals = append(vals, KV{"_" + k, zeroslice}) 555 | } 556 | 557 | span, ctx := opentracing.StartSpanFromContext(ctx, "ktrpc RemoveBulk") 558 | defer span.Finish() 559 | 560 | code, m, err := c.doRPC(ctx, "/rpc/remove_bulk", vals) 561 | if err != nil { 562 | span.SetTag("status", err) 563 | return 0, err 564 | } 565 | if code != 200 { 566 | span.SetTag("status", code) 567 | return 0, makeError(m) 568 | } 569 | return strconv.ParseInt(string(findRec(m, "num").Value), 10, 64) 570 | } 571 | 572 | // MatchPrefix performs the match_prefix operation against the server 573 | // It returns a sorted list of strings. 574 | // The error may be ErrSuccess in the case that no records were found. 575 | // This is for compatibility with the old gokabinet library. 576 | func (c *Conn) MatchPrefix(ctx context.Context, key string, maxrecords int64) ([]string, error) { 577 | keystransmit := []KV{ 578 | {"prefix", []byte(key)}, 579 | {"max", []byte(strconv.FormatInt(maxrecords, 10))}, 580 | } 581 | 582 | span, ctx := opentracing.StartSpanFromContext(ctx, "ktrpc MatchPrefix") 583 | defer span.Finish() 584 | span.SetTag("prefix", key) 585 | span.SetTag("limit", maxrecords) 586 | 587 | code, m, err := c.doRPC(ctx, "/rpc/match_prefix", keystransmit) 588 | if err != nil { 589 | span.SetTag("status", err) 590 | return nil, err 591 | } 592 | if code != 200 { 593 | span.SetTag("status", code) 594 | return nil, makeError(m) 595 | } 596 | res := make([]string, 0, len(m)) 597 | for _, kv := range m { 598 | if kv.Key[0] == '_' { 599 | res = append(res, string(kv.Key[1:])) 600 | } 601 | } 602 | if len(res) == 0 { 603 | span.SetTag("status", ErrSuccess) 604 | // yeah, gokabinet was weird here. 605 | return nil, ErrSuccess 606 | } 607 | return res, nil 608 | } 609 | 610 | var base64headers http.Header 611 | var identityheaders http.Header 612 | 613 | func init() { 614 | identityheaders = make(http.Header) 615 | identityheaders.Set("Content-Type", "text/tab-separated-values") 616 | base64headers = make(http.Header) 617 | base64headers.Set("Content-Type", "text/tab-separated-values; colenc=B") 618 | } 619 | 620 | // KV uses an explicit structure here rather than a map[string][]byte 621 | // because we need ordered data. 622 | type KV struct { 623 | Key string 624 | Value []byte 625 | } 626 | 627 | // Do an RPC call against the KT endpoint. 628 | func (c *Conn) doRPC(ctx context.Context, path string, values []KV) (code int, vals []KV, err error) { 629 | url := &url.URL{ 630 | Scheme: c.scheme, 631 | Host: c.host, 632 | Path: path, 633 | } 634 | 635 | body, enc := TSVEncode(values) 636 | headers := identityheaders 637 | if enc == Base64Enc { 638 | headers = base64headers 639 | } 640 | resp, t, err := c.roundTrip(ctx, "POST", url, headers, body) 641 | if err != nil { 642 | return 0, nil, err 643 | } 644 | resultBody, err := ioutil.ReadAll(resp.Body) 645 | resp.Body.Close() 646 | if !t.Stop() { 647 | return 0, nil, ErrTimeout 648 | } 649 | if err != nil { 650 | return 0, nil, err 651 | } 652 | m, err := DecodeValues(resultBody, resp.Header.Get("Content-Type")) 653 | if err != nil { 654 | return 0, nil, err 655 | } 656 | return resp.StatusCode, m, nil 657 | } 658 | 659 | func (c *Conn) roundTrip(ctx context.Context, method string, url *url.URL, headers http.Header, body []byte) (*http.Response, *time.Timer, error) { 660 | req, t := c.makeRequest(ctx, method, url, headers, body) 661 | resp, err := c.transport.RoundTrip(req) 662 | if err != nil { 663 | // Ideally we would only retry when we hit a network error. This doesn't work 664 | // since net/http wraps some of these errors. Do the simple thing and retry eagerly. 665 | t.Stop() 666 | c.transport.CloseIdleConnections() 667 | req, t = c.makeRequest(ctx, method, url, headers, body) 668 | resp, err = c.transport.RoundTrip(req) 669 | atomic.AddUint64(&c.retryCount, 1) 670 | } 671 | if err != nil { 672 | if !t.Stop() { 673 | err = ErrTimeout 674 | } 675 | return nil, nil, err 676 | } 677 | return resp, t, nil 678 | } 679 | 680 | func (c *Conn) makeRequest(ctx context.Context, method string, url *url.URL, headers http.Header, body []byte) (*http.Request, *time.Timer) { 681 | var rc io.ReadCloser 682 | if body != nil { 683 | rc = ioutil.NopCloser(bytes.NewReader(body)) 684 | } 685 | 686 | // inject span context into the HTTP request header to propagate it 687 | // to server-side 688 | if span := opentracing.SpanFromContext(ctx); span != nil { 689 | opentracing.GlobalTracer().Inject( 690 | span.Context(), 691 | opentracing.HTTPHeaders, 692 | opentracing.HTTPHeadersCarrier(headers), 693 | ) 694 | } 695 | 696 | req := &http.Request{ 697 | Method: method, 698 | URL: url, 699 | Header: headers, 700 | Body: rc, 701 | ContentLength: int64(len(body)), 702 | } 703 | 704 | req = req.WithContext(ctx) 705 | 706 | t := time.AfterFunc(c.timeout, func() { 707 | c.transport.CancelRequest(req) 708 | }) 709 | return req, t 710 | } 711 | 712 | type Encoding int 713 | 714 | const ( 715 | IdentityEnc Encoding = iota 716 | Base64Enc 717 | ) 718 | 719 | // Encode the request body in TSV. The encoding is chosen based 720 | // on whether there are any binary data in the key/values 721 | func TSVEncode(values []KV) ([]byte, Encoding) { 722 | var bufsize int 723 | var hasbinary bool 724 | for _, kv := range values { 725 | // length of key 726 | hasbinary = hasbinary || hasBinary(kv.Key) 727 | bufsize += base64.StdEncoding.EncodedLen(len(kv.Key)) 728 | // tab 729 | bufsize += 1 730 | // value 731 | hasbinary = hasbinary || hasBinarySlice(kv.Value) 732 | bufsize += base64.StdEncoding.EncodedLen(len(kv.Value)) 733 | // newline 734 | bufsize += 1 735 | } 736 | buf := make([]byte, bufsize) 737 | var n int 738 | for _, kv := range values { 739 | if hasbinary { 740 | base64.StdEncoding.Encode(buf[n:], []byte(kv.Key)) 741 | n += base64.StdEncoding.EncodedLen(len(kv.Key)) 742 | } else { 743 | n += copy(buf[n:], kv.Key) 744 | } 745 | buf[n] = '\t' 746 | n++ 747 | if hasbinary { 748 | base64.StdEncoding.Encode(buf[n:], kv.Value) 749 | n += base64.StdEncoding.EncodedLen(len(kv.Value)) 750 | } else { 751 | n += copy(buf[n:], kv.Value) 752 | } 753 | buf[n] = '\n' 754 | n++ 755 | } 756 | enc := IdentityEnc 757 | if hasbinary { 758 | enc = Base64Enc 759 | } 760 | return buf, enc 761 | } 762 | 763 | func hasBinary(b string) bool { 764 | for i := 0; i < len(b); i++ { 765 | c := b[i] 766 | if c < 0x20 || c > 0x7e { 767 | return true 768 | } 769 | } 770 | return false 771 | } 772 | 773 | func hasBinarySlice(b []byte) bool { 774 | for _, c := range b { 775 | if c < 0x20 || c > 0x7e { 776 | return true 777 | } 778 | } 779 | return false 780 | } 781 | 782 | // DecodeValues takes a response from an KT RPC call decodes it into a list of key 783 | // value pairs. 784 | func DecodeValues(buf []byte, contenttype string) ([]KV, error) { 785 | if len(buf) == 0 { 786 | return nil, nil 787 | } 788 | // Ideally, we should parse the mime media type here, 789 | // but this is an expensive operation because mime is just 790 | // that awful. 791 | // 792 | // KT can return values in 3 different formats, Tab separated values (TSV) without any field encoding, 793 | // TSV with fields base64 encoded or TSV with URL encoding. 794 | // KT does not give you any option as to the format that it returns, so we have to implement all of them 795 | // 796 | // KT responses are pretty simple and we can rely 797 | // on it putting the parameter of colenc=[BU] at 798 | // the end of the string. Just look for B, U or s 799 | // (last character of tab-separated-values) 800 | // to figure out which field encoding is used. 801 | var decodef decodefunc 802 | switch contenttype[len(contenttype)-1] { 803 | case 'B': 804 | decodef = base64Decode 805 | case 'U': 806 | decodef = urlDecode 807 | case 's': 808 | decodef = identityDecode 809 | default: 810 | return nil, &Error{Message: fmt.Sprintf("responded with unknown Content-Type: %s", contenttype)} 811 | } 812 | 813 | // Because of the encoding, we can tell how many records there 814 | // are by scanning through the input and counting the \n's 815 | var recCount int 816 | for _, v := range buf { 817 | if v == '\n' { 818 | recCount++ 819 | } 820 | } 821 | result := make([]KV, 0, recCount) 822 | b := bytes.NewBuffer(buf) 823 | for { 824 | key, err := b.ReadBytes('\t') 825 | if err != nil { 826 | return result, nil 827 | } 828 | key = decodef(key[:len(key)-1]) 829 | value, err := b.ReadBytes('\n') 830 | if len(value) > 0 { 831 | fieldlen := len(value) - 1 832 | if value[len(value)-1] != '\n' { 833 | fieldlen = len(value) 834 | } 835 | value = decodef(value[:fieldlen]) 836 | result = append(result, KV{string(key), value}) 837 | } 838 | if err != nil { 839 | return result, nil 840 | } 841 | } 842 | } 843 | 844 | // decodefunc takes a byte slice and decodes the 845 | // value in place. It returns a slice pointing into 846 | // the original byte slice. It is used for decoding the 847 | // individual fields of the TSV that kt returns 848 | type decodefunc func([]byte) []byte 849 | 850 | // Don't do anything, this is pure TSV 851 | func identityDecode(b []byte) []byte { 852 | return b 853 | } 854 | 855 | // Base64 decode each of the field 856 | func base64Decode(b []byte) []byte { 857 | n, _ := base64.StdEncoding.Decode(b, b) 858 | return b[:n] 859 | } 860 | 861 | // Decode % escaped URL format 862 | func urlDecode(b []byte) []byte { 863 | res := b 864 | resi := 0 865 | for i := 0; i < len(b); i++ { 866 | if b[i] != '%' { 867 | res[resi] = b[i] 868 | resi++ 869 | continue 870 | } 871 | res[resi] = unhex(b[i+1])<<4 | unhex(b[i+2]) 872 | resi++ 873 | i += 2 874 | } 875 | return res[:resi] 876 | } 877 | 878 | // copied from net/url 879 | func unhex(c byte) byte { 880 | switch { 881 | case '0' <= c && c <= '9': 882 | return c - '0' 883 | case 'a' <= c && c <= 'f': 884 | return c - 'a' + 10 885 | case 'A' <= c && c <= 'F': 886 | return c - 'A' + 10 887 | } 888 | return 0 889 | } 890 | 891 | // TODO: make this return errors that can be introspected more easily 892 | // and make it trim components of the error to filter out unused information. 893 | func makeError(m []KV) error { 894 | kv := findRec(m, "ERROR") 895 | if kv.Key == "" { 896 | return &Error{Message: "generic error"} 897 | } 898 | return &Error{Message: string(kv.Value)} 899 | } 900 | 901 | func findRec(kvs []KV, key string) KV { 902 | for _, kv := range kvs { 903 | if kv.Key == key { 904 | return kv 905 | } 906 | } 907 | return KV{} 908 | } 909 | 910 | // empty header for REST calls. 911 | var emptyHeader = make(http.Header) 912 | 913 | func (c *Conn) doREST(ctx context.Context, op string, key string, val []byte) (code int, body []byte, err error) { 914 | newkey := urlenc(key) 915 | url := &url.URL{ 916 | Scheme: c.scheme, 917 | Host: c.host, 918 | Opaque: newkey, 919 | } 920 | resp, t, err := c.roundTrip(ctx, op, url, emptyHeader, val) 921 | if err != nil { 922 | return 0, nil, err 923 | } 924 | resultBody, err := ioutil.ReadAll(resp.Body) 925 | resp.Body.Close() 926 | if !t.Stop() { 927 | err = ErrTimeout 928 | } 929 | return resp.StatusCode, resultBody, err 930 | } 931 | 932 | // encode the key for use in a RESTFUL url 933 | // KT requires that we use URL escaped values for 934 | // anything not safe in a query component. 935 | // Add a slash for the leading slash in the url. 936 | func urlenc(s string) string { 937 | return "/" + url.QueryEscape(s) 938 | } 939 | --------------------------------------------------------------------------------