├── .gitignore ├── docs ├── microcache-architecture.png ├── README.md └── microcache-architecture.svg ├── go.mod ├── compressor.go ├── monitor.go ├── tools ├── random_url_generator │ └── random_url_generator.go └── compare_compression │ └── compare_compression.go ├── background_request.go ├── compressor_snappy.go ├── compressor_gzip.go ├── driver.go ├── monitor_test.go ├── LICENSE ├── compressor_test.go ├── driver_test.go ├── response.go ├── go.sum ├── driver_lru.go ├── monitor_func.go ├── request_test.go ├── driver_arc.go ├── driver_ristretto.go ├── examples └── basic │ └── example.go ├── request.go ├── microcache_bench_test.go ├── README.md ├── microcache.go └── microcache_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | vendor 2 | .idea 3 | -------------------------------------------------------------------------------- /docs/microcache-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevburnsjr/microcache/HEAD/docs/microcache-architecture.png -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/kevburnsjr/microcache 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/dgraph-io/ristretto v0.0.1 7 | github.com/golang/snappy v0.0.1 8 | github.com/hashicorp/golang-lru v0.5.3 9 | ) 10 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Diagrams 2 | 3 | The diagram microcache-architecture.svg was created using [draw.io](https://www.draw.io/) 4 | 5 | ![microcache-architecture.svg](microcache-architecture.svg) 6 | 7 | This SVG file contains the raw XML diagram encoded within it. 8 | Import this SVG into draw.io in order to change it. 9 | After making changes, export as SVG with "Include a copy of my diagram" checked. 10 | -------------------------------------------------------------------------------- /compressor.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | // Compressor is the interface for response compressors 4 | type Compressor interface { 5 | 6 | // Compress compresses a response prior to being saved in the cache and returns a clone 7 | // usually by compressing the response body 8 | Compress(Response) Response 9 | 10 | // Expand decompresses a response's body (destructively) 11 | Expand(Response) Response 12 | } 13 | -------------------------------------------------------------------------------- /monitor.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | // Monitor is an interface for collecting metrics about the microcache 8 | type Monitor interface { 9 | GetInterval() time.Duration 10 | Log(Stats) 11 | Hit() 12 | Miss() 13 | Stale() 14 | Backend() 15 | Error() 16 | } 17 | 18 | type Stats struct { 19 | Size int 20 | Hits int 21 | Misses int 22 | Stales int 23 | Backend int 24 | Errors int 25 | } 26 | -------------------------------------------------------------------------------- /tools/random_url_generator/random_url_generator.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "math/rand" 7 | ) 8 | 9 | // This script generates a file containing 5,000 random URLs for testing 10 | // siege -f /tmp/urls.txt -c50 -b 11 | func main() { 12 | file := "" 13 | for i := 0; i < 10000; i++ { 14 | file = file + fmt.Sprintf("http://localhost/%d\n", rand.Int()) 15 | } 16 | ioutil.WriteFile("/tmp/urls.txt", []byte(file), 0644) 17 | } 18 | -------------------------------------------------------------------------------- /background_request.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | ) 7 | 8 | // newBackgroundRequest clones a request for use in background object revalidation. 9 | // This prevents a closed foreground request context from prematurely cancelling 10 | // the background request context. 11 | func newBackgroundRequest(r *http.Request) *http.Request { 12 | return r.Clone(bgContext{r.Context(), make(chan struct{})}) 13 | } 14 | 15 | type bgContext struct { 16 | context.Context 17 | done chan struct{} 18 | } 19 | 20 | func (c bgContext) Done() <-chan struct{} { 21 | return c.done 22 | } 23 | -------------------------------------------------------------------------------- /compressor_snappy.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "github.com/golang/snappy" 5 | ) 6 | 7 | // CompressorSnappy is a Snappy compressor 8 | // 14x faster compress than gzip 9 | // 8x faster expand than gzip 10 | // ~ 1.5 - 2x larger result (see README) 11 | type CompressorSnappy struct { 12 | } 13 | 14 | func (c CompressorSnappy) Compress(res Response) Response { 15 | newres := res.clone() 16 | newres.body = snappy.Encode(nil, res.body) 17 | return newres 18 | } 19 | 20 | func (c CompressorSnappy) Expand(res Response) Response { 21 | res.body, _ = snappy.Decode(nil, res.body) 22 | return res 23 | } 24 | -------------------------------------------------------------------------------- /compressor_gzip.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "io/ioutil" 7 | ) 8 | 9 | // CompressorGzip is a gzip compressor 10 | type CompressorGzip struct { 11 | } 12 | 13 | func (c CompressorGzip) Compress(res Response) Response { 14 | newres := res.clone() 15 | var buf bytes.Buffer 16 | zw := gzip.NewWriter(&buf) 17 | zw.Write(res.body) 18 | zw.Close() 19 | newres.body = buf.Bytes() 20 | return newres 21 | } 22 | 23 | func (c CompressorGzip) Expand(res Response) Response { 24 | buf := bytes.NewBuffer(res.body) 25 | zr, _ := gzip.NewReader(buf) 26 | res.body, _ = ioutil.ReadAll(zr) 27 | zr.Close() 28 | return res 29 | } 30 | -------------------------------------------------------------------------------- /driver.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | // Driver is the interface for cache drivers 4 | type Driver interface { 5 | 6 | // SetRequestOpts stores request options in the request cache. 7 | // Requests contain request-specific cache configuration based on response headers 8 | SetRequestOpts(string, RequestOpts) error 9 | 10 | // GetRequestOpts retrieves request options from the request cache 11 | GetRequestOpts(string) RequestOpts 12 | 13 | // Set stores a response object in the response cache. 14 | // This contains the full response as well as an expiration date. 15 | Set(string, Response) error 16 | 17 | // Get retrieves a response object from the response cache 18 | Get(string) Response 19 | 20 | // Remove removes a response object from the response cache. 21 | // Required by HTTP spec to purge cached responses after successful unsafe request. 22 | Remove(string) error 23 | 24 | // GetSize returns the number of objects stored in the cache 25 | GetSize() int 26 | } 27 | -------------------------------------------------------------------------------- /monitor_test.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "net/http" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | // Remove should work as expected 10 | func TestMonitor(t *testing.T) { 11 | var hits int 12 | var expected = 4 13 | testMonitor := MonitorFunc(100*time.Second, func(s Stats) { 14 | hits = s.Hits 15 | }) 16 | testMonitor.hits = int64(expected) 17 | testMonitor.Log(Stats{}) 18 | if hits != expected { 19 | t.Fatalf("Monitor not logging correctly (%d != %d)", hits, expected) 20 | } 21 | } 22 | 23 | // Microcache calls monitor 24 | func TestMicrocacheCallsMonitor(t *testing.T) { 25 | var statChan = make(chan int) 26 | testMonitor := &monitorFunc{interval: 10 * time.Millisecond, logFunc: func(s Stats) { 27 | statChan <- s.Size 28 | }} 29 | cache := New(Config{ 30 | TTL: 30 * time.Second, 31 | Monitor: testMonitor, 32 | Driver: NewDriverLRU(10), 33 | }) 34 | defer cache.Stop() 35 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 36 | batchGet(handler, []string{"/"}) 37 | size := <-statChan 38 | if size != 1 { 39 | t.Fatal("Monitor was not called by microcache") 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Kevin Burns 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /compressor_test.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | ) 7 | 8 | var zipTest = []byte(`{"firstName":"John","lastName":"Smith","isAlive":true,"age":27,"address":{"streetAddress":"21 2nd Street","city":"New York","state":"NY","postalCode":"10021-3100"},"phoneNumbers":[{"type":"home","number":"212 555-1234"},{"type":"office","number":"646 555-4567"},{"type":"mobile","number":"123 456-7890"}],"children":[],"spouse":null}`) 9 | 10 | // CompressorGzip 11 | func TestCompressorGzip(t *testing.T) { 12 | res := Response{body: zipTest} 13 | c := CompressorGzip{} 14 | crRes := c.Compress(res) 15 | if len(res.body) <= len(crRes.body) { 16 | t.Fatal("No Compression in Gzip") 17 | } 18 | exRes := c.Expand(crRes) 19 | if !bytes.Equal(res.body, exRes.body) { 20 | t.Fatal("Expanded compression does not match in Gzip") 21 | } 22 | } 23 | 24 | // CompressorSnappy 25 | func TestCompressorSnappy(t *testing.T) { 26 | res := Response{body: zipTest} 27 | c := CompressorSnappy{} 28 | crRes := c.Compress(res) 29 | if len(res.body) <= len(crRes.body) { 30 | t.Fatal("No Compression in Snappy") 31 | } 32 | exRes := c.Expand(crRes) 33 | if !bytes.Equal(res.body, exRes.body) { 34 | t.Fatal("Expanded compression does not match in Snappy") 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /driver_test.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "net/http" 5 | "testing" 6 | ) 7 | 8 | // Remove should work as expected 9 | func TestRemove(t *testing.T) { 10 | var testDriver = func(name string, d Driver) { 11 | cache := New(Config{Driver: d}) 12 | defer cache.Stop() 13 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 14 | batchGet(handler, []string{ 15 | "/", 16 | }) 17 | if d.GetSize() != 1 { 18 | t.Fatalf("%s Driver reports inaccurate length", name) 19 | } 20 | r, _ := http.NewRequest("GET", "/", nil) 21 | reqHash := getRequestHash(cache, r) 22 | reqOpts := buildRequestOpts(cache, Response{}, r) 23 | objHash := reqOpts.getObjectHash(reqHash, r) 24 | d.Remove(objHash) 25 | if d.GetSize() != 0 { 26 | t.Fatalf("%s Driver cannot delete items", name) 27 | } 28 | } 29 | testDriver("ARC", NewDriverARC(10)) 30 | testDriver("LRU", NewDriverLRU(10)) 31 | } 32 | 33 | // Empty init should not fatal 34 | func TestEmptyInit(t *testing.T) { 35 | var testDriver = func(name string, d Driver) { 36 | cache := New(Config{Driver: d}) 37 | defer cache.Stop() 38 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 39 | batchGet(handler, []string{ 40 | "/a", 41 | "/b", 42 | }) 43 | if d.GetSize() != 1 { 44 | t.Fatalf("%s Driver should have length 1", name) 45 | } 46 | } 47 | testDriver("ARC", NewDriverARC(0)) 48 | testDriver("LRU", NewDriverLRU(0)) 49 | } 50 | -------------------------------------------------------------------------------- /response.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "net/http" 5 | "strings" 6 | "time" 7 | ) 8 | 9 | // Response is used both as a cache object for the response 10 | // and to wrap http.ResponseWriter for downstream requests. 11 | type Response struct { 12 | found bool 13 | date time.Time 14 | expires time.Time 15 | status int 16 | headerWritten bool 17 | header http.Header 18 | body []byte 19 | } 20 | 21 | func (res *Response) Write(b []byte) (int, error) { 22 | res.body = append(res.body, b...) 23 | return len(b), nil 24 | } 25 | 26 | func (res *Response) Header() http.Header { 27 | return res.header 28 | } 29 | 30 | func (res *Response) WriteHeader(code int) { 31 | res.status = code 32 | res.headerWritten = true 33 | } 34 | 35 | func (res *Response) sendResponse(w http.ResponseWriter) { 36 | for header, values := range res.header { 37 | // Do not forward microcache headers to client 38 | if strings.HasPrefix(header, "Microcache-") { 39 | continue 40 | } 41 | for _, val := range values { 42 | w.Header().Add(header, val) 43 | } 44 | } 45 | if res.headerWritten { 46 | w.WriteHeader(res.status) 47 | } 48 | w.Write(res.body) 49 | return 50 | } 51 | 52 | func (res *Response) clone() Response { 53 | return Response{ 54 | found: res.found, 55 | date: res.date, 56 | expires: res.expires, 57 | status: res.status, 58 | header: res.header, 59 | body: res.body, 60 | } 61 | } 62 | 63 | type passthroughWriter struct { 64 | http.ResponseWriter 65 | status int 66 | } 67 | 68 | func (w *passthroughWriter) WriteHeader(code int) { 69 | w.status = code 70 | w.ResponseWriter.WriteHeader(code) 71 | } 72 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= 2 | github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= 3 | github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= 4 | github.com/dgraph-io/ristretto v0.0.0-20191010170704-2ba187ef9534 h1:9G6fVccQriMJu4nXwpwLDoy9y31t/KUSLAbPcoBgv+4= 5 | github.com/dgraph-io/ristretto v0.0.0-20191010170704-2ba187ef9534/go.mod h1:edzKIzGvqUCMzhTVWbiTSe75zD9Xxq0GtSBtFmaUTZs= 6 | github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e h1:aeUNgwup7PnDOBAD1BOKAqzb/W/NksOj6r3dwKKuqfg= 7 | github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e/go.mod h1:edzKIzGvqUCMzhTVWbiTSe75zD9Xxq0GtSBtFmaUTZs= 8 | github.com/dgraph-io/ristretto v0.0.0-20191106054232-1dd5a4de16b5 h1:pNuleeGdjsgZ2tVP27B1QT4QekFIA58UpMY5vQkMfIk= 9 | github.com/dgraph-io/ristretto v0.0.0-20191106054232-1dd5a4de16b5/go.mod h1:edzKIzGvqUCMzhTVWbiTSe75zD9Xxq0GtSBtFmaUTZs= 10 | github.com/dgraph-io/ristretto v0.0.1 h1:cJwdnj42uV8Jg4+KLrYovLiCgIfz9wtWm6E6KA+1tLs= 11 | github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= 12 | github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= 13 | github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= 14 | github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 15 | github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= 16 | github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= 17 | github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 18 | -------------------------------------------------------------------------------- /driver_lru.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "github.com/hashicorp/golang-lru" 5 | ) 6 | 7 | // DriverLRU is a driver implementation using github.com/hashicorp/golang-lru 8 | type DriverLRU struct { 9 | RequestCache *lru.Cache 10 | ResponseCache *lru.Cache 11 | } 12 | 13 | // NewDriverLRU returns the default LRU driver configuration. 14 | // size determines the number of items in the cache. 15 | // Memory usage should be considered when choosing the appropriate cache size. 16 | // The amount of memory consumed by the driver will depend upon the response size. 17 | // Roughly, memory = cacheSize * averageResponseSize / compression ratio 18 | func NewDriverLRU(size int) DriverLRU { 19 | // golang-lru segfaults when size is zero 20 | if size < 1 { 21 | size = 1 22 | } 23 | reqCache, _ := lru.New(size) 24 | resCache, _ := lru.New(size) 25 | return DriverLRU{ 26 | reqCache, 27 | resCache, 28 | } 29 | } 30 | 31 | func (c DriverLRU) SetRequestOpts(hash string, req RequestOpts) error { 32 | c.RequestCache.Add(hash, req) 33 | return nil 34 | } 35 | 36 | func (c DriverLRU) GetRequestOpts(hash string) (req RequestOpts) { 37 | obj, success := c.RequestCache.Get(hash) 38 | if success { 39 | req = obj.(RequestOpts) 40 | } 41 | return req 42 | } 43 | 44 | func (c DriverLRU) Set(hash string, res Response) error { 45 | c.ResponseCache.Add(hash, res) 46 | return nil 47 | } 48 | 49 | func (c DriverLRU) Get(hash string) (res Response) { 50 | obj, success := c.ResponseCache.Get(hash) 51 | if success { 52 | res = obj.(Response) 53 | } 54 | return res 55 | } 56 | 57 | func (c DriverLRU) Remove(hash string) error { 58 | c.ResponseCache.Remove(hash) 59 | return nil 60 | } 61 | 62 | func (c DriverLRU) GetSize() int { 63 | return c.ResponseCache.Len() 64 | } 65 | -------------------------------------------------------------------------------- /monitor_func.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "sync/atomic" 5 | "time" 6 | ) 7 | 8 | // MonitorFunc turns a function into a Monitor 9 | func MonitorFunc(interval time.Duration, logFunc func(Stats)) *monitorFunc { 10 | return &monitorFunc{ 11 | interval: interval, 12 | logFunc: logFunc, 13 | } 14 | } 15 | 16 | type monitorFunc struct { 17 | interval time.Duration 18 | logFunc func(Stats) 19 | hits int64 20 | misses int64 21 | stales int64 22 | backend int64 23 | errors int64 24 | stop chan bool 25 | } 26 | 27 | func (m *monitorFunc) GetInterval() time.Duration { 28 | return m.interval 29 | } 30 | 31 | func (m *monitorFunc) Log(stats Stats) { 32 | // hits 33 | stats.Hits = int(atomic.SwapInt64(&m.hits, 0)) 34 | 35 | // misses 36 | stats.Misses = int(atomic.SwapInt64(&m.misses, 0)) 37 | 38 | // stales 39 | stats.Stales = int(atomic.SwapInt64(&m.stales, 0)) 40 | 41 | // backend 42 | stats.Backend = int(atomic.SwapInt64(&m.backend, 0)) 43 | 44 | // errors 45 | stats.Errors = int(atomic.SwapInt64(&m.errors, 0)) 46 | 47 | // log 48 | m.logFunc(stats) 49 | } 50 | 51 | func (m *monitorFunc) Hit() { 52 | atomic.AddInt64(&m.hits, 1) 53 | } 54 | 55 | func (m *monitorFunc) Miss() { 56 | atomic.AddInt64(&m.misses, 1) 57 | } 58 | 59 | func (m *monitorFunc) Stale() { 60 | atomic.AddInt64(&m.stales, 1) 61 | } 62 | 63 | func (m *monitorFunc) Backend() { 64 | atomic.AddInt64(&m.backend, 1) 65 | } 66 | 67 | func (m *monitorFunc) Error() { 68 | atomic.AddInt64(&m.errors, 1) 69 | } 70 | 71 | func (m *monitorFunc) getHits() int { 72 | return int(atomic.LoadInt64(&m.hits)) 73 | } 74 | 75 | func (m *monitorFunc) getMisses() int { 76 | return int(atomic.LoadInt64(&m.misses)) 77 | } 78 | 79 | func (m *monitorFunc) getStales() int { 80 | return int(atomic.LoadInt64(&m.stales)) 81 | } 82 | 83 | func (m *monitorFunc) getBackends() int { 84 | return int(atomic.LoadInt64(&m.backend)) 85 | } 86 | 87 | func (m *monitorFunc) getErrors() int { 88 | return int(atomic.LoadInt64(&m.errors)) 89 | } 90 | -------------------------------------------------------------------------------- /request_test.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "net/http" 5 | "reflect" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | // buildRequestOpts detects response headers appropriately 11 | func TestBuildRequestOpts(t *testing.T) { 12 | var i = 0 13 | r, _ := http.NewRequest("GET", "/", nil) 14 | type tc struct { 15 | hdr string 16 | val string 17 | exp RequestOpts 18 | } 19 | var runCases = func(m *microcache, cases []tc) { 20 | for _, c := range cases { 21 | res := Response{header: http.Header{}} 22 | res.Header().Set(c.hdr, c.val) 23 | reqOpts := buildRequestOpts(m, res, r) 24 | reqOpts.found = false 25 | if !reflect.DeepEqual(reqOpts, c.exp) { 26 | t.Fatalf("Mismatch in case %d\n%#v\n%#v", i+1, reqOpts, c.exp) 27 | } 28 | i++ 29 | } 30 | } 31 | runCases(New(Config{}), []tc{ 32 | {"microcache-nocache", "1", RequestOpts{nocache: true}}, 33 | {"microcache-ttl", "10", RequestOpts{ttl: time.Duration(10 * time.Second)}}, 34 | {"microcache-stale-if-error", "10", RequestOpts{staleIfError: time.Duration(10 * time.Second)}}, 35 | {"microcache-stale-while-revalidate", "10", RequestOpts{staleWhileRevalidate: time.Duration(10 * time.Second)}}, 36 | {"microcache-collapsed-forwarding", "1", RequestOpts{collapsedForwarding: true}}, 37 | {"microcache-stale-recache", "1", RequestOpts{staleRecache: true}}, 38 | {"Microcache-Vary-Query", "a", RequestOpts{varyQuery: []string{"a"}}}, 39 | }) 40 | runCases(New(Config{Nocache: true}), []tc{ 41 | {"microcache-cache", "1", RequestOpts{nocache: false}}, 42 | }) 43 | runCases(New(Config{CollapsedForwarding: true}), []tc{ 44 | {"microcache-no-collapsed-forwarding", "1", RequestOpts{collapsedForwarding: false}}, 45 | }) 46 | runCases(New(Config{StaleRecache: true}), []tc{ 47 | {"microcache-no-stale-recache", "1", RequestOpts{staleRecache: false}}, 48 | }) 49 | runCases(New(Config{Vary: []string{"a"}}), []tc{ 50 | {"Microcache-Vary", "b", RequestOpts{vary: []string{"a", "b"}}}, 51 | }) 52 | runCases(New(Config{Vary: []string{"a"}}), []tc{ 53 | {"Vary", "b", RequestOpts{vary: []string{"a", "b"}}}, 54 | }) 55 | } 56 | -------------------------------------------------------------------------------- /driver_arc.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "github.com/hashicorp/golang-lru" 5 | ) 6 | 7 | // DriverARC is a driver implementation using github.com/hashicorp/golang-lru 8 | // ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). 9 | // It requires more ram and cpu than straight LRU but can be more efficient 10 | // https://godoc.org/github.com/hashicorp/golang-lru#ARCCache 11 | type DriverARC struct { 12 | RequestCache *lru.ARCCache 13 | ResponseCache *lru.ARCCache 14 | } 15 | 16 | // NewDriverARC returns an ARC driver. 17 | // size determines the number of items in the cache. 18 | // Memory usage should be considered when choosing the appropriate cache size. 19 | // The amount of memory consumed by the driver will depend upon the response size. 20 | // Roughly, memory = cacheSize * averageResponseSize / compression ratio 21 | // ARC caches have additional CPU and memory overhead when compared with LRU 22 | // ARC does not support eviction monitoring 23 | func NewDriverARC(size int) DriverARC { 24 | // golang-lru segfaults when size is zero 25 | if size < 1 { 26 | size = 1 27 | } 28 | reqCache, _ := lru.NewARC(size) 29 | resCache, _ := lru.NewARC(size) 30 | return DriverARC{ 31 | reqCache, 32 | resCache, 33 | } 34 | } 35 | 36 | func (c DriverARC) SetRequestOpts(hash string, req RequestOpts) error { 37 | c.RequestCache.Add(hash, req) 38 | return nil 39 | } 40 | 41 | func (c DriverARC) GetRequestOpts(hash string) (req RequestOpts) { 42 | obj, success := c.RequestCache.Get(hash) 43 | if success { 44 | req = obj.(RequestOpts) 45 | } 46 | return req 47 | } 48 | 49 | func (c DriverARC) Set(hash string, res Response) error { 50 | c.ResponseCache.Add(hash, res) 51 | return nil 52 | } 53 | 54 | func (c DriverARC) Get(hash string) (res Response) { 55 | obj, success := c.ResponseCache.Get(hash) 56 | if success { 57 | res = obj.(Response) 58 | } 59 | return res 60 | } 61 | 62 | func (c DriverARC) Remove(hash string) error { 63 | c.ResponseCache.Remove(hash) 64 | return nil 65 | } 66 | 67 | func (c DriverARC) GetSize() int { 68 | return c.ResponseCache.Len() 69 | } 70 | -------------------------------------------------------------------------------- /driver_ristretto.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "unsafe" 5 | 6 | "github.com/dgraph-io/ristretto" 7 | ) 8 | 9 | var ( 10 | requestOptsSize = int64(unsafe.Sizeof(RequestOpts{})) 11 | responseSize = int64(unsafe.Sizeof(Response{})) 12 | ) 13 | 14 | // DriverRistretto is a driver implementation using github.com/dgraph-io/ristretto 15 | type DriverRistretto struct { 16 | Driver 17 | 18 | Cache *ristretto.Cache 19 | } 20 | 21 | func calculateResponseCost(res Response) int64 { 22 | s := responseSize 23 | 24 | // Estimate size of the map itself. 25 | s += 5*8 + int64(len(res.header)*8) 26 | 27 | for k, vv := range res.header { 28 | s += int64(len(k)) 29 | for _, v := range vv { 30 | s += int64(len(v)) 31 | } 32 | } 33 | 34 | s += int64(cap(res.body)) 35 | 36 | return s 37 | } 38 | 39 | func calculateRequestOptCost(req RequestOpts) int64 { 40 | s := requestOptsSize 41 | 42 | for _, v := range req.vary { 43 | s += int64(len(v)) 44 | } 45 | for _, v := range req.varyQuery { 46 | s += int64(len(v)) 47 | } 48 | 49 | return s 50 | } 51 | 52 | // NewDriverRistretto returns the default Ristretto driver configuration. 53 | // requests should be the number of items you expect to keep in the cache when full. 54 | // Estimating this on the higher side is better. 55 | // size determines the maximum number of bytes in the cache. 56 | func NewDriverRistretto(requests, size int64) DriverRistretto { 57 | if size == 0 { 58 | size = 1 59 | } 60 | if requests == 0 { 61 | requests = size 62 | } 63 | 64 | cache, err := ristretto.NewCache(&ristretto.Config{ 65 | NumCounters: requests * 10, 66 | MaxCost: size, 67 | BufferItems: 64, 68 | Metrics: true, // Required to implement Driver.GetSize() 69 | }) 70 | if err != nil { 71 | panic(err) 72 | } 73 | 74 | return DriverRistretto{Cache: cache} 75 | } 76 | 77 | func (d DriverRistretto) SetRequestOpts(hash string, req RequestOpts) error { 78 | d.Cache.Set(hash, req, calculateRequestOptCost(req)) 79 | return nil 80 | } 81 | 82 | func (d DriverRistretto) GetRequestOpts(hash string) (req RequestOpts) { 83 | r, ok := d.Cache.Get(hash) 84 | if ok && r != nil { 85 | req = r.(RequestOpts) 86 | } 87 | return req 88 | } 89 | 90 | func (d DriverRistretto) Set(hash string, res Response) error { 91 | d.Cache.Set(hash, res, calculateResponseCost(res)) 92 | return nil 93 | } 94 | 95 | func (d DriverRistretto) Get(hash string) (res Response) { 96 | r, ok := d.Cache.Get(hash) 97 | if ok && r != nil { 98 | res = r.(Response) 99 | } 100 | return res 101 | } 102 | 103 | func (d DriverRistretto) Remove(hash string) error { 104 | d.Cache.Del(hash) 105 | return nil 106 | } 107 | 108 | func (d DriverRistretto) GetSize() int { 109 | return int(d.Cache.Metrics.KeysAdded() - d.Cache.Metrics.KeysEvicted()) 110 | } 111 | -------------------------------------------------------------------------------- /tools/compare_compression/compare_compression.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "compress/zlib" 7 | "flag" 8 | "fmt" 9 | "io/ioutil" 10 | "time" 11 | 12 | "github.com/golang/snappy" 13 | ) 14 | 15 | func main() { 16 | 17 | var filepath *string = flag.String("f", "", "File to compress") 18 | flag.Parse() 19 | 20 | if *filepath == "" { 21 | fmt.Println("Error: Missing Flag -f filepath (required)") 22 | return 23 | } 24 | 25 | // Create a file (json or otherwise) using your own data to see how it 26 | // compresses with these different compression algorithms. 27 | dat, _ := ioutil.ReadFile(*filepath) 28 | 29 | orig := len(dat) 30 | 31 | fmt.Printf("Original: %d bytes\n", orig) 32 | 33 | var c []byte 34 | 35 | start := time.Now() 36 | for i := 0; i < 1e2; i++ { 37 | c = compressZlib(dat) 38 | } 39 | fmt.Printf("zlib compress %v %d bytes (%.1fx)\n", time.Since(start), len(c), float64(orig)/float64(len(c))) 40 | 41 | start = time.Now() 42 | for i := 0; i < 1e2; i++ { 43 | expandZlib(c) 44 | } 45 | fmt.Printf("zlib expand %v\n", time.Since(start)) 46 | 47 | start = time.Now() 48 | for i := 0; i < 1e2; i++ { 49 | c = compressGzip(dat) 50 | } 51 | fmt.Printf("gzip compress %v %d bytes (%.1fx)\n", time.Since(start), len(c), float64(orig)/float64(len(c))) 52 | 53 | start = time.Now() 54 | for i := 0; i < 1e2; i++ { 55 | expandGzip(c) 56 | } 57 | fmt.Printf("gzip expand %v\n", time.Since(start)) 58 | 59 | start = time.Now() 60 | for i := 0; i < 1e2; i++ { 61 | c = compressSnappy(dat) 62 | } 63 | fmt.Printf("snappy compress %v %d bytes (%.1fx)\n", time.Since(start), len(c), float64(orig)/float64(len(c))) 64 | 65 | func(res []byte) []byte { 66 | start = time.Now() 67 | for i := 0; i < 1e2; i++ { 68 | res = expandSnappy(c) 69 | } 70 | return res 71 | }([]byte{}) 72 | fmt.Printf("snappy expand %v\n", time.Since(start)) 73 | 74 | } 75 | 76 | func compressZlib(in []byte) []byte { 77 | var b bytes.Buffer 78 | w := zlib.NewWriter(&b) 79 | w.Write(in) 80 | w.Close() 81 | return b.Bytes() 82 | } 83 | 84 | func expandZlib(in []byte) []byte { 85 | buf := bytes.NewBuffer(in) 86 | r, _ := zlib.NewReader(buf) 87 | out, _ := ioutil.ReadAll(r) 88 | r.Close() 89 | return out 90 | } 91 | 92 | func compressGzip(in []byte) []byte { 93 | var b bytes.Buffer 94 | w := gzip.NewWriter(&b) 95 | w.Write(in) 96 | w.Close() 97 | return b.Bytes() 98 | } 99 | 100 | func expandGzip(in []byte) []byte { 101 | buf := bytes.NewBuffer(in) 102 | r, _ := gzip.NewReader(buf) 103 | out, _ := ioutil.ReadAll(r) 104 | r.Close() 105 | return out 106 | } 107 | 108 | func compressSnappy(in []byte) []byte { 109 | return snappy.Encode(nil, in) 110 | } 111 | 112 | func expandSnappy(in []byte) []byte { 113 | out, _ := snappy.Decode(nil, in) 114 | return out 115 | } 116 | -------------------------------------------------------------------------------- /examples/basic/example.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "log" 6 | "net/http" 7 | "time" 8 | 9 | "github.com/kevburnsjr/microcache" 10 | ) 11 | 12 | func main() { 13 | // - Nocache: true 14 | // Cache is disabled for all requests by default 15 | // Cache can be enabled per request hash with response header 16 | // 17 | // microcache-cache: 1 18 | // 19 | // - Timeout: 3 * time.Second 20 | // Requests will be timed out and treated as 503 if they do not return within 35s 21 | // 22 | // - TTL: 30 * time.Second 23 | // Responses which enable cache explicitly will be cached for 30s by default 24 | // Response cache time can be configured per endpoint with response header 25 | // 26 | // microcache-ttl: 30 27 | // 28 | // - StaleIfError: 3600 * time.Second 29 | // If the request encounters an error (or times out), a stale response will be returned 30 | // provided that the stale cached response expired less than an hour ago. 31 | // Can be altered per request with response header 32 | // More Info: https://tools.ietf.org/html/rfc5861 33 | // 34 | // microcache-stale-if-error: 86400 35 | // 36 | // - StaleRecache: true 37 | // Upon serving a stale response following an error, that stale response will be 38 | // re-cached for the default ttl (10s) 39 | // Can be disabled per request with response header 40 | // 41 | // microcache-no-stale-recache: 1 42 | // 43 | // - StaleWhileRevalidate: 30 * time.Second 44 | // If the cache encounters a request for a cached object that has expired in the 45 | // last 30s, the cache will reply immediately with a stale response and fetch 46 | // the resource in a background process. 47 | // More Info: https://tools.ietf.org/html/rfc5861 48 | // 49 | // microcache-stale-while-revalidate: 20 50 | // 51 | // - HashQuery: true 52 | // All query parameters are included in the request hash 53 | // 54 | // - QueryIgnore: []string{} 55 | // A list of query parameters to ignore when hashing the request 56 | // Add oauth parameters or other unwanted cache busters to this list 57 | // 58 | // - Exposed: true 59 | // Header will be appended to response indicating HIT / MISS / STALE 60 | // 61 | // microcache: ( HIT | MISS | STALE ) 62 | // 63 | // - SuppressAgeHeader: false 64 | // Age is a standard HTTP header indicating the age of the cached object in seconds 65 | // The Age header is added by default to all HIT and STALE responses 66 | // This parameter prevents the Age header from being set 67 | // 68 | // Age: ( seconds ) 69 | // 70 | // - Monitor: microcache.MonitorFunc(5 * time.Second, logStats) 71 | // LogStats will be called every 5s to log stats about the cache 72 | // 73 | cache := microcache.New(microcache.Config{ 74 | Nocache: true, 75 | Timeout: 3 * time.Second, 76 | TTL: 30 * time.Second, 77 | StaleIfError: 3600 * time.Second, 78 | StaleRecache: true, 79 | StaleWhileRevalidate: 30 * time.Second, 80 | CollapsedForwarding: true, 81 | HashQuery: true, 82 | QueryIgnore: []string{}, 83 | Exposed: true, 84 | SuppressAgeHeader: false, 85 | Monitor: microcache.MonitorFunc(5*time.Second, logStats), 86 | Driver: microcache.NewDriverLRU(1e4), 87 | Compressor: microcache.CompressorSnappy{}, 88 | }) 89 | defer cache.Stop() 90 | 91 | h := cache.Middleware(handler{}) 92 | 93 | http.ListenAndServe(":80", h) 94 | } 95 | 96 | type handler struct { 97 | } 98 | 99 | var body = bytes.Repeat([]byte("1234567890"), 1e3) 100 | 101 | // This example fills up to 1.2GB of memory, so at least 2.0GB of RAM is recommended 102 | func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 103 | 104 | // Enable cache 105 | w.Header().Set("microcache-cache", "1") 106 | 107 | // Return a 10 kilobyte response body 108 | w.Write(body) 109 | } 110 | 111 | func logStats(stats microcache.Stats) { 112 | total := stats.Hits + stats.Misses + stats.Stales 113 | log.Printf("Size: %d, Total: %d, Hits: %d, Misses: %d, Stales: %d, Backend: %d, Errors: %d\n", 114 | stats.Size, 115 | total, 116 | stats.Hits, 117 | stats.Misses, 118 | stats.Stales, 119 | stats.Backend, 120 | stats.Errors, 121 | ) 122 | } 123 | -------------------------------------------------------------------------------- /request.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "crypto/sha1" 5 | "net/http" 6 | "strconv" 7 | "strings" 8 | "time" 9 | ) 10 | 11 | func getRequestHash(m *microcache, r *http.Request) string { 12 | h := sha1.New() 13 | h.Write([]byte(r.URL.Path)) 14 | for _, header := range m.Vary { 15 | h.Write([]byte("&" + header + ":" + r.Header.Get(header))) 16 | } 17 | if m.HashQuery { 18 | if m.QueryIgnore != nil { 19 | for key, values := range r.URL.Query() { 20 | if _, ok := m.QueryIgnore[key]; ok { 21 | continue 22 | } 23 | for _, value := range values { 24 | h.Write([]byte("&" + key + "=" + value)) 25 | } 26 | } 27 | } else { 28 | h.Write([]byte(r.URL.RawQuery)) 29 | } 30 | } 31 | return string(h.Sum(nil)) 32 | } 33 | 34 | // RequestOpts stores per-request cache options. This is necessary to allow 35 | // custom response headers to be evaluated, cached and applied prior to 36 | // response object retrieval (ie. microcache-vary, microcache-nocache, etc) 37 | type RequestOpts struct { 38 | found bool 39 | ttl time.Duration 40 | staleIfError time.Duration 41 | staleRecache bool 42 | staleWhileRevalidate time.Duration 43 | collapsedForwarding bool 44 | vary []string 45 | varyQuery []string 46 | nocache bool 47 | } 48 | 49 | func (req *RequestOpts) getObjectHash(reqHash string, r *http.Request) string { 50 | h := sha1.New() 51 | h.Write([]byte(reqHash)) 52 | for _, header := range req.vary { 53 | h.Write([]byte("&" + header + ":" + r.Header.Get(header))) 54 | } 55 | if len(req.varyQuery) > 0 { 56 | queryParams := r.URL.Query() 57 | for _, param := range req.varyQuery { 58 | if vals, ok := queryParams[param]; ok { 59 | for _, val := range vals { 60 | h.Write([]byte("&" + param + "=" + val)) 61 | } 62 | } 63 | } 64 | } 65 | return string(h.Sum(nil)) 66 | } 67 | 68 | func buildRequestOpts(m *microcache, res Response, r *http.Request) RequestOpts { 69 | headers := res.header 70 | req := RequestOpts{ 71 | found: true, 72 | nocache: m.Nocache, 73 | ttl: m.TTL, 74 | staleIfError: m.StaleIfError, 75 | staleRecache: m.StaleRecache, 76 | staleWhileRevalidate: m.StaleWhileRevalidate, 77 | collapsedForwarding: m.CollapsedForwarding, 78 | vary: m.Vary, 79 | } 80 | 81 | // w.Header().Set("microcache-cache", "1") 82 | if headers.Get("microcache-cache") != "" { 83 | req.nocache = false 84 | } 85 | 86 | // w.Header().Set("microcache-nocache", "1") 87 | if headers.Get("microcache-nocache") != "" { 88 | req.nocache = true 89 | } 90 | 91 | // w.Header().Set("microcache-ttl", "10") // 10 seconds 92 | ttlHdr, _ := strconv.Atoi(headers.Get("microcache-ttl")) 93 | if ttlHdr > 0 { 94 | req.ttl = time.Duration(ttlHdr) * time.Second 95 | } 96 | 97 | // w.Header().Set("microcache-stale-if-error", "20") // 20 seconds 98 | staleIfErrorHdr, _ := strconv.Atoi(headers.Get("microcache-stale-if-error")) 99 | if staleIfErrorHdr > 0 { 100 | req.staleIfError = time.Duration(staleIfErrorHdr) * time.Second 101 | } 102 | 103 | // w.Header().Set("microcache-stale-while-revalidate", "20") // 20 seconds 104 | staleWhileRevalidateHdr, _ := strconv.Atoi(headers.Get("microcache-stale-while-revalidate")) 105 | if staleWhileRevalidateHdr > 0 { 106 | req.staleWhileRevalidate = time.Duration(staleWhileRevalidateHdr) * time.Second 107 | } 108 | 109 | // w.Header().Set("microcache-collapsed-forwarding", "1") 110 | if headers.Get("microcache-collapsed-forwarding") != "" { 111 | req.collapsedForwarding = true 112 | } 113 | 114 | // w.Header().Set("microcache-no-collapsed-forwarding", "1") 115 | if headers.Get("microcache-no-collapsed-forwarding") != "" { 116 | req.collapsedForwarding = false 117 | } 118 | 119 | // w.Header().Set("microcache-stale-recache", "1") 120 | if headers.Get("microcache-stale-recache") != "" { 121 | req.staleRecache = true 122 | } 123 | 124 | // w.Header().Set("microcache-no-stale-recache", "1") 125 | if headers.Get("microcache-no-stale-recache") != "" { 126 | req.staleRecache = false 127 | } 128 | 129 | // w.Header().Add("microcache-vary-query", "q, page, limit") 130 | if varyQueries, ok := headers["Microcache-Vary-Query"]; ok { 131 | for _, hdr := range varyQueries { 132 | varyQueryParams := strings.Split(hdr, ",") 133 | for i, v := range varyQueryParams { 134 | varyQueryParams[i] = strings.Trim(v, " ") 135 | } 136 | req.varyQuery = append(req.varyQuery, varyQueryParams...) 137 | } 138 | } 139 | 140 | // w.Header().Add("microcache-vary", "accept-language, accept-encoding") 141 | if varyHdr, ok := headers["Microcache-Vary"]; ok { 142 | for _, hdr := range varyHdr { 143 | varyHdrs := strings.Split(hdr, ",") 144 | for i, v := range varyHdrs { 145 | varyHdrs[i] = strings.Trim(v, " ") 146 | } 147 | req.vary = append(req.vary, varyHdrs...) 148 | } 149 | } 150 | 151 | // w.Header().Add("Vary", "accept-language, accept-encoding") 152 | if varyHdr, ok := headers["Vary"]; ok { 153 | for _, hdr := range varyHdr { 154 | varyHdrs := strings.Split(hdr, ",") 155 | for i, v := range varyHdrs { 156 | varyHdrs[i] = strings.Trim(v, " ") 157 | } 158 | req.vary = append(req.vary, varyHdrs...) 159 | } 160 | } 161 | 162 | return req 163 | } 164 | -------------------------------------------------------------------------------- /microcache_bench_test.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "net/http" 5 | "strconv" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func BenchmarkHits(b *testing.B) { 11 | cache := New(Config{ 12 | TTL: 30 * time.Second, 13 | Driver: NewDriverLRU(10), 14 | }) 15 | defer cache.Stop() 16 | handler := cache.Middleware(http.HandlerFunc(successHandler)) 17 | r, _ := http.NewRequest("GET", "/", nil) 18 | w := &noopWriter{http.Header{}} 19 | b.ResetTimer() 20 | for i := 0; i < b.N; i++ { 21 | handler.ServeHTTP(w, r) 22 | } 23 | } 24 | 25 | func BenchmarkNocache(b *testing.B) { 26 | cache := New(Config{ 27 | Nocache: true, 28 | Driver: NewDriverLRU(10), 29 | }) 30 | defer cache.Stop() 31 | handler := cache.Middleware(http.HandlerFunc(successHandler)) 32 | r, _ := http.NewRequest("GET", "/", nil) 33 | w := &noopWriter{http.Header{}} 34 | b.ResetTimer() 35 | for i := 0; i < b.N; i++ { 36 | handler.ServeHTTP(w, r) 37 | } 38 | } 39 | 40 | func BenchmarkMisses(b *testing.B) { 41 | cache := New(Config{ 42 | TTL: 30 * time.Second, 43 | Driver: NewDriverLRU(10), 44 | }) 45 | defer cache.Stop() 46 | handler := cache.Middleware(http.HandlerFunc(successHandler)) 47 | r, _ := http.NewRequest("GET", "/", nil) 48 | w := &noopWriter{http.Header{}} 49 | b.ResetTimer() 50 | for i := 0; i < b.N; i++ { 51 | r.URL.Path = "/" + strconv.Itoa(i) 52 | handler.ServeHTTP(w, r) 53 | } 54 | } 55 | 56 | func BenchmarkCompression1kHits(b *testing.B) { 57 | cache := New(Config{ 58 | TTL: 30 * time.Second, 59 | Driver: NewDriverLRU(10), 60 | Compressor: CompressorSnappy{}, 61 | }) 62 | defer cache.Stop() 63 | handler := cache.Middleware(http.HandlerFunc(success1kHandler)) 64 | r, _ := http.NewRequest("GET", "/", nil) 65 | w := &noopWriter{http.Header{}} 66 | b.ResetTimer() 67 | for i := 0; i < b.N; i++ { 68 | handler.ServeHTTP(w, r) 69 | } 70 | } 71 | 72 | func BenchmarkCompression1kNocache(b *testing.B) { 73 | cache := New(Config{ 74 | Nocache: true, 75 | Driver: NewDriverLRU(10), 76 | Compressor: CompressorSnappy{}, 77 | }) 78 | defer cache.Stop() 79 | handler := cache.Middleware(http.HandlerFunc(success1kHandler)) 80 | r, _ := http.NewRequest("GET", "/", nil) 81 | w := &noopWriter{http.Header{}} 82 | b.ResetTimer() 83 | for i := 0; i < b.N; i++ { 84 | handler.ServeHTTP(w, r) 85 | } 86 | } 87 | 88 | func BenchmarkCompression1kMisses(b *testing.B) { 89 | cache := New(Config{ 90 | TTL: 30 * time.Second, 91 | Driver: NewDriverLRU(10), 92 | Compressor: CompressorSnappy{}, 93 | }) 94 | defer cache.Stop() 95 | handler := cache.Middleware(http.HandlerFunc(success1kHandler)) 96 | r, _ := http.NewRequest("GET", "/", nil) 97 | w := &noopWriter{http.Header{}} 98 | b.ResetTimer() 99 | for i := 0; i < b.N; i++ { 100 | r.URL.Path = "/" + strconv.Itoa(i) 101 | handler.ServeHTTP(w, r) 102 | } 103 | } 104 | 105 | func BenchmarkParallelCompression1kHits(b *testing.B) { 106 | cache := New(Config{ 107 | TTL: 30 * time.Second, 108 | Driver: NewDriverLRU(10), 109 | Compressor: CompressorSnappy{}, 110 | }) 111 | defer cache.Stop() 112 | handler := cache.Middleware(http.HandlerFunc(success1kHandler)) 113 | r, _ := http.NewRequest("GET", "/", nil) 114 | b.ResetTimer() 115 | b.RunParallel(func(pb *testing.PB) { 116 | w := &noopWriter{http.Header{}} 117 | for i := 0; pb.Next(); i++ { 118 | handler.ServeHTTP(w, r) 119 | } 120 | }) 121 | } 122 | 123 | func BenchmarkParallelCompression1kNocache(b *testing.B) { 124 | cache := New(Config{ 125 | Nocache: true, 126 | Driver: NewDriverLRU(10), 127 | Compressor: CompressorSnappy{}, 128 | }) 129 | defer cache.Stop() 130 | handler := cache.Middleware(http.HandlerFunc(success1kHandler)) 131 | r, _ := http.NewRequest("GET", "/", nil) 132 | b.ResetTimer() 133 | b.RunParallel(func(pb *testing.PB) { 134 | w := &noopWriter{http.Header{}} 135 | for i := 0; pb.Next(); i++ { 136 | handler.ServeHTTP(w, r) 137 | } 138 | }) 139 | } 140 | 141 | func BenchmarkParallelCompression1kMisses(b *testing.B) { 142 | cache := New(Config{ 143 | TTL: 30 * time.Second, 144 | Driver: NewDriverLRU(10), 145 | Compressor: CompressorSnappy{}, 146 | }) 147 | defer cache.Stop() 148 | handler := cache.Middleware(http.HandlerFunc(success1kHandler)) 149 | r, _ := http.NewRequest("GET", "/", nil) 150 | b.ResetTimer() 151 | b.RunParallel(func(pb *testing.PB) { 152 | w := &noopWriter{http.Header{}} 153 | for i := 0; pb.Next(); i++ { 154 | r.URL.Path = "/" + strconv.Itoa(i) 155 | handler.ServeHTTP(w, r) 156 | } 157 | }) 158 | } 159 | 160 | type noopWriter struct { 161 | header http.Header 162 | } 163 | 164 | func (w *noopWriter) Write(b []byte) (int, error) { 165 | return len(b), nil 166 | } 167 | 168 | func (w *noopWriter) Header() http.Header { 169 | return w.header 170 | } 171 | 172 | func (w *noopWriter) WriteHeader(code int) {} 173 | 174 | func successHandler(w http.ResponseWriter, r *http.Request) {} 175 | 176 | var json1k = []byte(`{"counts":[{"bucket":1569888000,"total":115,"unique":39},{"bucket":1569801600,"total":150,"unique":38},{"bucket":1569715200,"total":129,"unique":34},{"bucket":1569628800,"total":142,"unique":34},{"bucket":1569542400,"total":151,"unique":39},{"bucket":1569456000,"total":145,"unique":44},{"bucket":1569369600,"total":143,"unique":49},{"bucket":1569283200,"total":174,"unique":46},{"bucket":1569196800,"total":407,"unique":52},{"bucket":1569110400,"total":357,"unique":51},{"bucket":1569024000,"total":227,"unique":44},{"bucket":1568937600,"total":257,"unique":44},{"bucket":1568851200,"total":238,"unique":47},{"bucket":1568764800,"total":246,"unique":62}],"summary":{"total":2881,"unique":108}}`) 177 | 178 | func success1kHandler(w http.ResponseWriter, r *http.Request) { 179 | w.Write(json1k) 180 | } 181 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Microcache 2 | 3 | A non-standard HTTP cache implemented as Go middleware. 4 | 5 | [![GoDoc](https://godoc.org/github.com/kevburnsjr/microcache?status.svg)](https://godoc.org/github.com/kevburnsjr/microcache) 6 | [![Go Report Card](https://goreportcard.com/badge/github.com/kevburnsjr/microcache?1)](https://goreportcard.com/report/github.com/kevburnsjr/microcache) 7 | [![Code Coverage](http://gocover.io/_badge/github.com/kevburnsjr/microcache?1)](http://gocover.io/github.com/kevburnsjr/microcache) 8 | 9 | HTTP [Microcaching](https://www.nginx.com/blog/benefits-of-microcaching-nginx/) 10 | is a common strategy for improving the efficiency, availability and 11 | response time variability of HTTP web services. These benefits are especially relevant 12 | in microservice architectures where a service's synchronous dependencies sometimes 13 | become unavailable and it is not always feasible or economical to add a separate 14 | caching layer between all services. 15 | 16 | To date, very few software packages exist to solve this specific problem. Most 17 | microcache deployments make use of existing HTTP caching middleware like NGINX. This 18 | presents a challenge. When an HTTP cache exists for the purpose of microcaching between 19 | an origin server and a CDN, the origin must choose whether to use standard HTTP caching 20 | headers with aggressive short TTLs for the microcache or less aggressive longer TTL 21 | headers more suitable to CDNs. The overlap in HTTP header key space prevents these two 22 | cache layers from coexisting without some additional customization. 23 | 24 | All request specific custom response headers supported by this cache are prefixed with 25 | ```microcache-``` and scrubbed from the response. Most of the common HTTP caching 26 | headers one would expect to see in an http cache are ignored (except Vary). This 27 | was intentional and support may change depending on developer feedback. The purpose of 28 | this cache is not to act as a substitute for a robust HTTP caching layer but rather 29 | to serve as an additional caching layer with separate controls for shorter lived, 30 | more aggressive caching measures. 31 | 32 | The manner in which this cache operates (writing response bodies to byte buffers) may 33 | not be suitable for all applications. Caching should certainly be disabled for any 34 | resources serving very large and/or streaming responses. For instance, caching is 35 | automatically disabled for all websocket requests. 36 | 37 | More info in the docs: https://godoc.org/github.com/kevburnsjr/microcache 38 | 39 | ## Example 40 | 41 | ```go 42 | package main 43 | 44 | import ( 45 | "bytes" 46 | "log" 47 | "net/http" 48 | "time" 49 | 50 | "github.com/kevburnsjr/microcache" 51 | ) 52 | 53 | func main() { 54 | cache := microcache.New(microcache.Config{ 55 | Nocache: true, 56 | Timeout: 3 * time.Second, 57 | TTL: 30 * time.Second, 58 | StaleIfError: 3600 * time.Second, 59 | StaleRecache: true, 60 | StaleWhileRevalidate: 30 * time.Second, 61 | CollapsedForwarding: true, 62 | HashQuery: true, 63 | QueryIgnore: []string{}, 64 | Exposed: true, 65 | SuppressAgeHeader: false, 66 | Monitor: microcache.MonitorFunc(5*time.Second, logStats), 67 | Driver: microcache.NewDriverLRU(1e4), 68 | Compressor: microcache.CompressorSnappy{}, 69 | }) 70 | defer cache.Stop() 71 | 72 | h := cache.Middleware(handler{}) 73 | 74 | http.ListenAndServe(":80", h) 75 | } 76 | 77 | type handler struct { 78 | } 79 | 80 | var body = bytes.Repeat([]byte("1234567890"), 1e3) 81 | 82 | // This example fills up to 1.2GB of memory, so at least 2.0GB of RAM is recommended 83 | func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 84 | 85 | // Enable cache 86 | w.Header().Set("microcache-cache", "1") 87 | 88 | // Return a 10 kilobyte response body 89 | w.Write(body) 90 | } 91 | 92 | func logStats(stats microcache.Stats) { 93 | total := stats.Hits + stats.Misses + stats.Stales 94 | log.Printf("Size: %d, Total: %d, Hits: %d, Misses: %d, Stales: %d, Backend: %d, Errors: %d\n", 95 | stats.Size, 96 | total, 97 | stats.Hits, 98 | stats.Misses, 99 | stats.Stales, 100 | stats.Backend, 101 | stats.Errors, 102 | ) 103 | } 104 | ``` 105 | 106 | ## Features 107 | 108 | May improve service efficiency by reducing origin read traffic 109 | 110 | * **ttl** - response caching with global or request specific ttl 111 | * **collapsed-forwarding** - deduplicate requests for cacheable resources 112 | 113 | May improve client facing response time variability 114 | 115 | * **stale-while-revalidate** - serve stale content while fetching cacheable resources in the background 116 | 117 | May improve service availability 118 | 119 | * **request-timeout** - kill long running requests 120 | * **stale-if-error** - serve stale responses on error (or request timeout) 121 | * **stale-recache** - recache stale responses following stale-if-error 122 | 123 | Supports content negotiation with global and request specific cache splintering 124 | 125 | * **vary** - splinter requests by request header value 126 | * **vary-query** - splinter requests by URL query parameter value 127 | 128 | ## Control Flow Diagram 129 | 130 | This diagram illustrates the basic internal operation of the middleware. 131 | 132 | ![microcache-architecture.svg](docs/microcache-architecture.svg) 133 | 134 | ## Compression 135 | 136 | The Snappy compressor is recommended to optimize for CPU over memory efficiency compared with gzip 137 | 138 | [Snappy](https://github.com/golang/snappy) provides: 139 | 140 | - 14x faster compression over gzip 141 | - 8x faster expansion over gzip 142 | - but the result is 1.5 - 2x the size compared to gzip (for specific json examples) 143 | 144 | Your mileage may vary. See [compare_compression.go](tools/compare_compression/compare_compression.go) to test your specific workloads 145 | 146 | ``` 147 | > go run tools/compare_compression.go -f large.json 148 | Original: 616,611 bytes of json 149 | zlib compress 719.853807ms 61,040 bytes (10.1x) 150 | gzip compress 720.731066ms 61,052 bytes (10.1x) 151 | snappy compress 48.836002ms 106,613 bytes (5.8x) 152 | zlib expand 211.538416ms 153 | gzip expand 220.011961ms 154 | snappy expand 26.973263ms 155 | ``` 156 | 157 | ## Benchmarks 158 | 159 | All benchmarks are lies. Running example code above on 5820k i7 @ 3.9Ghz DDR4. 160 | GOMAXPROCS=2, 10KB response. 161 | 162 | ``` 163 | > gobench -u http://localhost/ -c 10 -t 10 164 | Dispatching 10 clients 165 | Waiting for results... 166 | 167 | Requests: 404120 hits 168 | Successful requests: 404120 hits 169 | Network failed: 0 hits 170 | Bad requests failed (!2xx): 0 hits 171 | Successful requests rate: 40412 hits/sec 172 | Read throughput: 410714568 bytes/sec 173 | Write throughput: 3273453 bytes/sec 174 | Test time: 10 sec 175 | ``` 176 | 177 | The intent of this middleware is to serve cached content with minimal overhead. We could 178 | probably do some more gymnastics to reduce allocs but overall performance is quite good. 179 | It achieves the goal of reducing hit response times to the order of microseconds. 180 | 181 | ``` 182 | $ go test -bench=. -benchmem 183 | goos: linux 184 | goarch: amd64 185 | pkg: github.com/kevburnsjr/microcache 186 | BenchmarkHits-12 827476 1449 ns/op 678 B/op 14 allocs/op 187 | BenchmarkNocache-12 1968576 613 ns/op 312 B/op 6 allocs/op 188 | BenchmarkMisses-12 307652 3890 ns/op 1765 B/op 37 allocs/op 189 | BenchmarkCompression1kHits-12 648564 1928 ns/op 1384 B/op 15 allocs/op 190 | BenchmarkCompression1kNocache-12 1965351 611 ns/op 312 B/op 6 allocs/op 191 | BenchmarkCompression1kMisses-12 232980 5119 ns/op 3365 B/op 39 allocs/op 192 | BenchmarkParallelCompression1kHits-12 2525994 511 ns/op 1383 B/op 15 allocs/op 193 | BenchmarkParallelCompression1kNocache-12 3876728 335 ns/op 312 B/op 6 allocs/op 194 | BenchmarkParallelCompression1kMisses-12 666580 2226 ns/op 3326 B/op 38 allocs/op 195 | PASS 196 | ok github.com/kevburnsjr/microcache 7.188s 197 | ``` 198 | 199 | ## Release Status 200 | 201 | API is stable. 100% test coverage. 202 | 203 | At least one large scale deployment of this library has been running in production 204 | on a high volume internet facing API at an Alexa Top 500 global website for over a year. 205 | 206 | ## Notes 207 | 208 | ``` 209 | Vary query by parameter presence as well as value 210 | 211 | Modify Monitor.Error to accept request, response and error 212 | Add Monitor.Timeout accepting request, response and error 213 | 214 | Separate middleware: 215 | Sanitize lang header? (first language) 216 | Sanitize region? (country code) 217 | 218 | etag support? 219 | if-modified-since support? 220 | HTCP? 221 | TCI? 222 | Custom rule handling? 223 | Passthrough: func(r) bool 224 | ``` 225 | -------------------------------------------------------------------------------- /microcache.go: -------------------------------------------------------------------------------- 1 | // microcache is a non-standard HTTP microcache implemented as Go middleware. 2 | package microcache 3 | 4 | import ( 5 | "fmt" 6 | "net/http" 7 | "strings" 8 | "sync" 9 | "time" 10 | ) 11 | 12 | type Microcache interface { 13 | Middleware(http.Handler) http.Handler 14 | Start() 15 | Stop() 16 | offsetIncr(time.Duration) 17 | } 18 | 19 | type microcache struct { 20 | Nocache bool 21 | Timeout time.Duration 22 | TTL time.Duration 23 | StaleIfError time.Duration 24 | StaleRecache bool 25 | StaleWhileRevalidate time.Duration 26 | HashQuery bool 27 | QueryIgnore map[string]bool 28 | CollapsedForwarding bool 29 | Vary []string 30 | Driver Driver 31 | Compressor Compressor 32 | Monitor Monitor 33 | Exposed bool 34 | SuppressAgeHeader bool 35 | 36 | stopMonitor chan bool 37 | revalidating map[string]bool 38 | revalidateMutex *sync.Mutex 39 | collapse map[string]*sync.Mutex 40 | collapseMutex *sync.Mutex 41 | 42 | // Used to advance time for testing 43 | offset time.Duration 44 | offsetMutex *sync.RWMutex 45 | } 46 | 47 | type Config struct { 48 | // Nocache prevents responses from being cached by default 49 | // Can be overridden by the microcache-cache and microcache-nocache response headers 50 | Nocache bool 51 | 52 | // Timeout specifies the maximum execution time for backend responses 53 | // Example: If the underlying handler takes more than 10s to respond, 54 | // the request is cancelled and the response is treated as 503 55 | // Recommended: 10s 56 | // Default: 0 57 | Timeout time.Duration 58 | 59 | // TTL specifies a default ttl for cached responses 60 | // Can be overridden by the microcache-ttl response header 61 | // Recommended: 10s 62 | // Default: 0 63 | TTL time.Duration 64 | 65 | // StaleWhileRevalidate specifies a period during which a stale response may be 66 | // served immediately while the resource is fetched in the background. This can be 67 | // useful for ensuring consistent response times at the cost of content freshness. 68 | // More Info: https://tools.ietf.org/html/rfc5861 69 | // Recommended: 20s 70 | // Default: 0 71 | StaleWhileRevalidate time.Duration 72 | 73 | // StaleIfError specifies a default stale grace period 74 | // If a request fails and StaleIfError is set, the object will be served as stale 75 | // and the response will be re-cached for the duration of this grace period 76 | // Can be overridden by the microcache-ttl-stale response header 77 | // More Info: https://tools.ietf.org/html/rfc5861 78 | // Recommended: 20s 79 | // Default: 0 80 | StaleIfError time.Duration 81 | 82 | // StaleRecache specifies whether to re-cache the response object for ttl while serving 83 | // stale response on backend error 84 | // Recommended: true 85 | // Default: false 86 | StaleRecache bool 87 | 88 | // CollapsedForwarding specifies whether to collapse duplicate requests 89 | // This helps prevent servers with a cold cache from hammering the backend 90 | // Default: false 91 | CollapsedForwarding bool 92 | 93 | // HashQuery determines whether all query parameters in the request URI 94 | // should be hashed to differentiate requests 95 | // Default: false 96 | HashQuery bool 97 | 98 | // QueryIgnore is a list of query parameters to ignore when hashing 99 | // Default: nil 100 | QueryIgnore []string 101 | 102 | // Vary specifies a list of http request headers by which all requests 103 | // should be differentiated. When making use of this option, it may be a good idea 104 | // to normalize these headers first using a separate piece of middleware. 105 | // 106 | // []string{"accept-language", "accept-encoding", "xml-http-request"} 107 | // 108 | // Default: []string{} 109 | Vary []string 110 | 111 | // Driver specifies a cache storage driver 112 | // Default: lru with 10,000 item capacity 113 | Driver Driver 114 | 115 | // Compressor specifies a compressor to use for reducing the memory required to cache 116 | // response bodies 117 | // Default: nil 118 | Compressor Compressor 119 | 120 | // Monitor is an optional parameter which will periodically report statistics about 121 | // the cache to enable monitoring of cache size, cache efficiency and error rate 122 | // Default: nil 123 | Monitor Monitor 124 | 125 | // Exposed determines whether to add a header to the response indicating the response state 126 | // Microcache: ( HIT | MISS | STALE ) 127 | // Default: false 128 | Exposed bool 129 | 130 | // SuppressAgeHeader determines whether to suppress the age header in responses 131 | // The age header is added by default to all HIT and STALE responses 132 | // Age: ( seconds ) 133 | // Default: false 134 | SuppressAgeHeader bool 135 | } 136 | 137 | // New creates and returns a configured microcache instance 138 | func New(o Config) *microcache { 139 | // Defaults 140 | m := microcache{ 141 | Nocache: o.Nocache, 142 | TTL: o.TTL, 143 | StaleIfError: o.StaleIfError, 144 | StaleRecache: o.StaleRecache, 145 | StaleWhileRevalidate: o.StaleWhileRevalidate, 146 | Timeout: o.Timeout, 147 | HashQuery: o.HashQuery, 148 | CollapsedForwarding: o.CollapsedForwarding, 149 | Vary: o.Vary, 150 | Driver: o.Driver, 151 | Compressor: o.Compressor, 152 | Monitor: o.Monitor, 153 | Exposed: o.Exposed, 154 | SuppressAgeHeader: o.SuppressAgeHeader, 155 | revalidating: map[string]bool{}, 156 | revalidateMutex: &sync.Mutex{}, 157 | collapse: map[string]*sync.Mutex{}, 158 | collapseMutex: &sync.Mutex{}, 159 | offsetMutex: &sync.RWMutex{}, 160 | } 161 | if o.Driver == nil { 162 | m.Driver = NewDriverLRU(1e4) // default 10k cache items 163 | } 164 | if o.QueryIgnore != nil { 165 | m.QueryIgnore = make(map[string]bool) 166 | for _, key := range o.QueryIgnore { 167 | m.QueryIgnore[key] = true 168 | } 169 | } 170 | m.Start() 171 | return &m 172 | } 173 | 174 | // Middleware can be used to wrap an HTTP handler with microcache functionality. 175 | // It can also be passed to http middleware providers like alice as a constructor. 176 | // 177 | // mx := microcache.New(microcache.Config{TTL: 10 * time.Second}) 178 | // newHandler := mx.Middleware(yourHandler) 179 | // 180 | // Or with alice 181 | // 182 | // chain.Append(mx.Middleware) 183 | // 184 | func (m *microcache) Middleware(h http.Handler) http.Handler { 185 | if m.Timeout > 0 { 186 | h = http.TimeoutHandler(h, m.Timeout, "Timed out") 187 | } 188 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 189 | // Websocket passthrough 190 | upgrade := strings.ToLower(r.Header.Get("connection")) == "upgrade" 191 | if upgrade || m.Driver == nil { 192 | if m.Monitor != nil { 193 | m.Monitor.Miss() 194 | } 195 | h.ServeHTTP(w, r) 196 | return 197 | } 198 | 199 | // Fetch request options 200 | reqHash := getRequestHash(m, r) 201 | req := m.Driver.GetRequestOpts(reqHash) 202 | 203 | // Hard passthrough on non cacheable requests 204 | if req.nocache { 205 | if m.Monitor != nil { 206 | m.Monitor.Miss() 207 | } 208 | h.ServeHTTP(w, r) 209 | return 210 | } 211 | 212 | // CollapsedForwarding 213 | // This implementation may collapse too many uncacheable requests. 214 | // Refactor may be complicated. 215 | if m.CollapsedForwarding { 216 | m.collapseMutex.Lock() 217 | mutex, ok := m.collapse[reqHash] 218 | if !ok { 219 | mutex = &sync.Mutex{} 220 | m.collapse[reqHash] = mutex 221 | } 222 | m.collapseMutex.Unlock() 223 | // Mutex serializes collapsible requests 224 | mutex.Lock() 225 | defer func() { 226 | mutex.Unlock() 227 | m.collapseMutex.Lock() 228 | delete(m.collapse, reqHash) 229 | m.collapseMutex.Unlock() 230 | }() 231 | if !req.found { 232 | req = m.Driver.GetRequestOpts(reqHash) 233 | } 234 | } 235 | 236 | // Fetch cached response object 237 | var objHash string 238 | var obj Response 239 | if req.found { 240 | objHash = req.getObjectHash(reqHash, r) 241 | obj = m.Driver.Get(objHash) 242 | if m.Compressor != nil { 243 | obj = m.Compressor.Expand(obj) 244 | } 245 | } 246 | 247 | // Non-cacheable request method passthrough and purge 248 | if r.Method != "GET" && r.Method != "HEAD" && r.Method != "OPTIONS" { 249 | if m.Monitor != nil { 250 | m.Monitor.Miss() 251 | } 252 | if obj.found { 253 | // HTTP spec requires caches to purge cached responses following 254 | // successful unsafe request 255 | ptw := passthroughWriter{w, 0} 256 | h.ServeHTTP(&ptw, r) 257 | if ptw.status >= 200 && ptw.status < 400 { 258 | m.Driver.Remove(objHash) 259 | } 260 | } else { 261 | h.ServeHTTP(w, r) 262 | } 263 | return 264 | } 265 | 266 | // Fresh response object found 267 | if obj.found && obj.expires.After(m.now()) { 268 | if m.Monitor != nil { 269 | m.Monitor.Hit() 270 | } 271 | if m.Exposed { 272 | w.Header().Set("microcache", "HIT") 273 | } 274 | m.setAgeHeader(w, obj) 275 | obj.sendResponse(w) 276 | return 277 | } 278 | 279 | // Stale While Revalidate 280 | if obj.found && req.staleWhileRevalidate > 0 && 281 | obj.expires.Add(req.staleWhileRevalidate).After(m.now()) { 282 | if m.Monitor != nil { 283 | m.Monitor.Stale() 284 | } 285 | if m.Exposed { 286 | w.Header().Set("microcache", "STALE") 287 | } 288 | m.setAgeHeader(w, obj) 289 | obj.sendResponse(w) 290 | 291 | // Dedupe revalidation 292 | m.revalidateMutex.Lock() 293 | _, revalidating := m.revalidating[objHash] 294 | if !revalidating { 295 | m.revalidating[objHash] = true 296 | } 297 | m.revalidateMutex.Unlock() 298 | if !revalidating { 299 | br := newBackgroundRequest(r) 300 | go func() { 301 | defer func() { 302 | // Clear revalidation lock 303 | m.revalidateMutex.Lock() 304 | delete(m.revalidating, objHash) 305 | m.revalidateMutex.Unlock() 306 | }() 307 | m.handleBackendResponse(h, w, br, reqHash, req, objHash, obj, true) 308 | }() 309 | } 310 | 311 | return 312 | } else { 313 | m.handleBackendResponse(h, w, r, reqHash, req, objHash, obj, false) 314 | return 315 | } 316 | }) 317 | } 318 | 319 | func (m *microcache) handleBackendResponse( 320 | h http.Handler, 321 | w http.ResponseWriter, 322 | r *http.Request, 323 | reqHash string, 324 | req RequestOpts, 325 | objHash string, 326 | obj Response, 327 | background bool, 328 | ) { 329 | if m.Monitor != nil { 330 | m.Monitor.Backend() 331 | } 332 | 333 | // Backend Response 334 | beres := Response{header: http.Header{}} 335 | 336 | // Execute request 337 | h.ServeHTTP(&beres, r) 338 | 339 | if !beres.headerWritten { 340 | beres.status = http.StatusOK 341 | } 342 | 343 | // Log Error 344 | if beres.status >= 500 && m.Monitor != nil { 345 | m.Monitor.Error() 346 | } 347 | 348 | // Serve Stale 349 | if beres.status >= 500 && obj.found { 350 | serveStale := obj.expires.Add(req.staleIfError).After(m.now()) 351 | // Extend stale response expiration by staleIfError grace period 352 | if req.found && serveStale && req.staleRecache { 353 | obj.expires = obj.date.Add(m.getOffset()).Add(req.ttl) 354 | m.store(objHash, obj) 355 | } 356 | if !background && serveStale { 357 | if m.Monitor != nil { 358 | m.Monitor.Stale() 359 | } 360 | if m.Exposed { 361 | w.Header().Set("microcache", "STALE") 362 | } 363 | m.setAgeHeader(w, obj) 364 | obj.sendResponse(w) 365 | return 366 | } 367 | } 368 | 369 | // Backend Request succeeded 370 | if beres.status >= 200 && beres.status < 400 { 371 | if !req.found { 372 | // Store request options 373 | req = buildRequestOpts(m, beres, r) 374 | m.Driver.SetRequestOpts(reqHash, req) 375 | objHash = req.getObjectHash(reqHash, r) 376 | } 377 | // Cache response 378 | if !req.nocache { 379 | beres.expires = m.now().Add(req.ttl) 380 | m.store(objHash, beres) 381 | } 382 | } 383 | 384 | // Don't render response during background revalidate 385 | if background { 386 | return 387 | } 388 | 389 | if m.Monitor != nil { 390 | m.Monitor.Miss() 391 | } 392 | if m.Exposed { 393 | w.Header().Set("microcache", "MISS") 394 | } 395 | beres.sendResponse(w) 396 | } 397 | 398 | // Start starts the monitor and any other required background processes 399 | func (m *microcache) Start() { 400 | if m.stopMonitor != nil || m.Monitor == nil { 401 | return 402 | } 403 | m.stopMonitor = make(chan bool) 404 | go func() { 405 | for { 406 | select { 407 | case <-time.After(m.Monitor.GetInterval()): 408 | m.Monitor.Log(Stats{ 409 | Size: m.Driver.GetSize(), 410 | }) 411 | case <-m.stopMonitor: 412 | return 413 | } 414 | } 415 | }() 416 | } 417 | 418 | // setAgeHeader sets the age header if not suppressed 419 | func (m *microcache) setAgeHeader(w http.ResponseWriter, obj Response) { 420 | if !m.SuppressAgeHeader { 421 | age := (m.now().Unix() - obj.date.Unix()) 422 | w.Header().Set("age", fmt.Sprintf("%d", age)) 423 | } 424 | } 425 | 426 | // store sets the age header if not suppressed 427 | func (m *microcache) store(objHash string, obj Response) { 428 | obj.found = true 429 | obj.date = time.Now() 430 | if m.Compressor != nil { 431 | m.Driver.Set(objHash, m.Compressor.Compress(obj)) 432 | } else { 433 | m.Driver.Set(objHash, obj) 434 | } 435 | } 436 | 437 | // Stop stops the monitor and any other required background processes 438 | func (m *microcache) Stop() { 439 | if m.stopMonitor == nil { 440 | return 441 | } 442 | m.stopMonitor <- true 443 | } 444 | 445 | // Increments the offset for testing purposes 446 | func (m *microcache) offsetIncr(o time.Duration) { 447 | m.offsetMutex.Lock() 448 | defer m.offsetMutex.Unlock() 449 | m.offset += o 450 | } 451 | 452 | // Get offset 453 | func (m *microcache) getOffset() time.Duration { 454 | m.offsetMutex.RLock() 455 | defer m.offsetMutex.RUnlock() 456 | return m.offset 457 | } 458 | 459 | // Get current time with offset 460 | func (m *microcache) now() time.Time { 461 | return time.Now().Add(m.getOffset()) 462 | } 463 | -------------------------------------------------------------------------------- /microcache_test.go: -------------------------------------------------------------------------------- 1 | package microcache 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "net/http/httptest" 8 | "strings" 9 | "sync" 10 | "testing" 11 | "time" 12 | ) 13 | 14 | // TTL should be respected 15 | func TestTTL(t *testing.T) { 16 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 17 | cache := New(Config{ 18 | TTL: 30 * time.Second, 19 | Monitor: testMonitor, 20 | Driver: NewDriverLRU(10), 21 | }) 22 | defer cache.Stop() 23 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 24 | batchGet(handler, []string{ 25 | "/", 26 | "/", 27 | }) 28 | cache.offsetIncr(30 * time.Second) 29 | batchGet(handler, []string{ 30 | "/", 31 | "/", 32 | }) 33 | if testMonitor.getMisses() != 2 || testMonitor.getHits() != 2 { 34 | t.Fatal("TTL not respected - got", testMonitor.getHits(), "hits") 35 | } 36 | } 37 | 38 | // HashQuery 39 | func TestHashQuery(t *testing.T) { 40 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 41 | cache := New(Config{ 42 | TTL: 30 * time.Second, 43 | HashQuery: true, 44 | Monitor: testMonitor, 45 | Driver: NewDriverLRU(10), 46 | }) 47 | defer cache.Stop() 48 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 49 | batchGet(handler, []string{ 50 | "/", 51 | "/?a=1", 52 | }) 53 | if testMonitor.getMisses() != 2 { 54 | t.Fatal("HashQuery not respected - got", testMonitor.getMisses(), "misses") 55 | } 56 | } 57 | 58 | // HashQuery Disabled 59 | func TestHashQueryDisabled(t *testing.T) { 60 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 61 | cache := New(Config{ 62 | TTL: 30 * time.Second, 63 | HashQuery: false, 64 | Monitor: testMonitor, 65 | Driver: NewDriverLRU(10), 66 | }) 67 | defer cache.Stop() 68 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 69 | batchGet(handler, []string{ 70 | "/", 71 | "/?a=1", 72 | }) 73 | if testMonitor.getMisses() != 1 { 74 | t.Fatal("HashQuery not ignored - got", testMonitor.getMisses(), "misses") 75 | } 76 | } 77 | 78 | // Query Ignore operates as expected 79 | func TestQueryIgnore(t *testing.T) { 80 | cache := New(Config{ 81 | TTL: 30 * time.Second, 82 | HashQuery: true, 83 | QueryIgnore: []string{"a"}, 84 | Driver: NewDriverLRU(10), 85 | Exposed: true, 86 | }) 87 | defer cache.Stop() 88 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 89 | cases := []struct { 90 | url string 91 | hit bool 92 | }{ 93 | {"/", false}, 94 | {"/?a=1", true}, 95 | {"/?foo=1", false}, 96 | {"/?foo=1", true}, 97 | {"/?foo=1&a=1", true}, 98 | {"/?foo=1&b=1", false}, 99 | } 100 | for i, c := range cases { 101 | r := getResponse(handler, c.url) 102 | if c.hit != (r.Header().Get("microcache") == "HIT") { 103 | t.Fatalf("Hit should have been %v for case %d", c.hit, i+1) 104 | } 105 | } 106 | } 107 | 108 | // QueryIgnore should be disregarded when HashQuery is false 109 | func TestQueryIgnoreDisabled(t *testing.T) { 110 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 111 | cache := New(Config{ 112 | TTL: 30 * time.Second, 113 | HashQuery: false, 114 | QueryIgnore: []string{"a"}, 115 | Monitor: testMonitor, 116 | Driver: NewDriverLRU(10), 117 | }) 118 | defer cache.Stop() 119 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 120 | batchGet(handler, []string{ 121 | "/", 122 | "/?a=1", 123 | "/?b=2", 124 | }) 125 | if testMonitor.getMisses() != 1 { 126 | t.Fatal("Query parameters ignored - got", testMonitor.getMisses(), "misses") 127 | } 128 | } 129 | 130 | // StaleWhileRevalidate 131 | func TestStaleWhileRevalidate(t *testing.T) { 132 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 133 | cache := New(Config{ 134 | TTL: 30 * time.Second, 135 | StaleWhileRevalidate: 30 * time.Second, 136 | Monitor: testMonitor, 137 | Driver: NewDriverLRU(10), 138 | Exposed: true, 139 | }) 140 | defer cache.Stop() 141 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 142 | 143 | // prime cache 144 | batchGet(handler, []string{ 145 | "/", 146 | "/", 147 | }) 148 | if testMonitor.getMisses() != 1 || testMonitor.getHits() != 1 { 149 | t.Fatal("StaleWhileRevalidate not respected - got", testMonitor.getMisses(), "misses") 150 | } 151 | 152 | // stale and hit after 30s 153 | cache.offsetIncr(30 * time.Second) 154 | batchGet(handler, []string{ 155 | "/", 156 | }) 157 | time.Sleep(10 * time.Millisecond) 158 | batchGet(handler, []string{ 159 | "/", 160 | }) 161 | if testMonitor.getStales() != 1 || testMonitor.getHits() != 2 { 162 | t.Fatal("StaleWhileRevalidate not respected - got", testMonitor.getStales(), "stales") 163 | } 164 | } 165 | 166 | // CollapsedFowarding and StaleWhileRevalidate 167 | func TestCollapsedFowardingStaleWhileRevalidate(t *testing.T) { 168 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 169 | cache := New(Config{ 170 | TTL: 30 * time.Second, 171 | CollapsedForwarding: true, 172 | StaleWhileRevalidate: 30 * time.Second, 173 | Monitor: testMonitor, 174 | Driver: NewDriverLRU(10), 175 | }) 176 | defer cache.Stop() 177 | handler := cache.Middleware(http.HandlerFunc(timelySuccessHandler)) 178 | batchGet(handler, []string{"/"}) 179 | cache.offsetIncr(31 * time.Second) 180 | start := time.Now() 181 | parallelGet(handler, strings.Split(strings.Repeat(",/", 10)[1:], ",")) 182 | end := time.Since(start) 183 | // Sleep for a little bit to give the StaleWhileRevalidate goroutines some time to start. 184 | time.Sleep(time.Millisecond * 10) 185 | if testMonitor.getMisses() != 1 || testMonitor.getStales() != 10 || 186 | testMonitor.getBackends() != 2 || end > 20*time.Millisecond { 187 | t.Fatalf("CollapsedFowarding and StaleWhileRevalidate not respected %s", dumpMonitor(testMonitor)) 188 | } 189 | } 190 | 191 | // StaleIfError 192 | func TestStaleIfError(t *testing.T) { 193 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 194 | cache := New(Config{ 195 | TTL: 30 * time.Second, 196 | StaleIfError: 600 * time.Second, 197 | Monitor: testMonitor, 198 | QueryIgnore: []string{"fail"}, 199 | Driver: NewDriverLRU(10), 200 | Exposed: true, 201 | }) 202 | defer cache.Stop() 203 | handler := cache.Middleware(http.HandlerFunc(failureHandler)) 204 | 205 | // prime cache 206 | batchGet(handler, []string{ 207 | "/", 208 | "/", 209 | }) 210 | if testMonitor.getMisses() != 1 || testMonitor.getHits() != 1 { 211 | t.Fatal("StaleIfError not respected - got", testMonitor.getMisses(), "misses") 212 | } 213 | 214 | // stale after 30s 215 | cache.offsetIncr(30 * time.Second) 216 | batchGet(handler, []string{ 217 | "/?fail=1", 218 | }) 219 | if testMonitor.getStales() != 1 { 220 | t.Fatal("StaleIfError not respected - got", testMonitor.getStales(), "stales") 221 | } 222 | 223 | // error after 600s 224 | cache.offsetIncr(600 * time.Second) 225 | batchGet(handler, []string{ 226 | "/?fail=1", 227 | }) 228 | if testMonitor.getErrors() != 2 || testMonitor.getStales() != 1 { 229 | t.Fatal("StaleIfError not respected - got", testMonitor.getErrors(), "errors") 230 | } 231 | } 232 | 233 | // StaleRecache 234 | func TestStaleRecache(t *testing.T) { 235 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 236 | cache := New(Config{ 237 | TTL: 30 * time.Second, 238 | StaleIfError: 600 * time.Second, 239 | StaleRecache: true, 240 | Monitor: testMonitor, 241 | QueryIgnore: []string{"fail"}, 242 | Driver: NewDriverLRU(10), 243 | }) 244 | defer cache.Stop() 245 | 246 | handler := cache.Middleware(http.HandlerFunc(failureHandler)) 247 | 248 | // prime cache 249 | batchGet(handler, []string{ 250 | "/", 251 | "/", 252 | }) 253 | if testMonitor.getMisses() != 1 || testMonitor.getHits() != 1 { 254 | t.Fatal("StaleRecache not respected - got", testMonitor.getMisses(), "misses") 255 | } 256 | 257 | // stale after 30s 258 | cache.offsetIncr(30 * time.Second) 259 | batchGet(handler, []string{ 260 | "/?fail=1", 261 | }) 262 | if testMonitor.getStales() != 1 { 263 | t.Fatal("StaleIfError not respected - got", testMonitor.getStales(), "stales") 264 | } 265 | 266 | // hit when stale is recached 267 | batchGet(handler, []string{ 268 | "/?fail=1", 269 | }) 270 | if testMonitor.getHits() != 2 { 271 | t.Fatal("StaleRecache not respected - got", testMonitor.getErrors(), "errors") 272 | } 273 | } 274 | 275 | // Timeout 276 | func TestTimeout(t *testing.T) { 277 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 278 | cache := New(Config{ 279 | TTL: 30 * time.Second, 280 | Timeout: 10 * time.Millisecond, 281 | Monitor: testMonitor, 282 | Driver: NewDriverLRU(10), 283 | }) 284 | defer cache.Stop() 285 | handler := cache.Middleware(http.HandlerFunc(slowSuccessHandler)) 286 | start := time.Now() 287 | batchGet(handler, []string{ 288 | "/", 289 | }) 290 | if testMonitor.getErrors() != 1 || time.Since(start) > 20*time.Millisecond { 291 | t.Fatal("Timeout not respected - got", testMonitor.getErrors(), "errors") 292 | } 293 | } 294 | 295 | // Request context cancellation should not cause error from TimeoutHandler 296 | func TestRequestContextCancel(t *testing.T) { 297 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 298 | cache := New(Config{ 299 | TTL: 30 * time.Second, 300 | StaleWhileRevalidate: 30 * time.Second, 301 | Timeout: 10 * time.Second, 302 | CollapsedForwarding: true, 303 | Monitor: testMonitor, 304 | Driver: NewDriverLRU(10), 305 | }) 306 | defer cache.Stop() 307 | handler := cache.Middleware(http.HandlerFunc(timelySuccessHandler)) 308 | batchGet(handler, []string{"/"}) 309 | cache.offsetIncr(31 * time.Second) 310 | r, _ := http.NewRequest("GET", "/", nil) 311 | ctx, cancel := context.WithCancel(r.Context()) 312 | r = r.WithContext(ctx) 313 | w := httptest.NewRecorder() 314 | handler.ServeHTTP(w, r) 315 | cancel() 316 | time.Sleep(1 * time.Millisecond) 317 | if testMonitor.getErrors() > 0 { 318 | t.Fatal("TimeoutHandler returned error") 319 | } 320 | cache.offsetIncr(31 * time.Second) 321 | cache.Timeout = 1 * time.Millisecond 322 | batchGet(cache.Middleware(http.HandlerFunc(slowSuccessHandler)), []string{"/"}) 323 | time.Sleep(2 * time.Millisecond) 324 | if testMonitor.getErrors() != 1 { 325 | t.Fatal("Request did not time out") 326 | } 327 | } 328 | 329 | // CollapsedFowarding 330 | func TestCollapsedFowarding(t *testing.T) { 331 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 332 | cache := New(Config{ 333 | TTL: 30 * time.Second, 334 | CollapsedForwarding: true, 335 | Monitor: testMonitor, 336 | Driver: NewDriverLRU(10), 337 | }) 338 | defer cache.Stop() 339 | handler := cache.Middleware(http.HandlerFunc(timelySuccessHandler)) 340 | start := time.Now() 341 | parallelGet(handler, []string{ 342 | "/", 343 | "/", 344 | "/", 345 | "/", 346 | "/", 347 | "/", 348 | }) 349 | if testMonitor.getMisses() != 1 || testMonitor.getHits() != 5 || time.Since(start) > 20*time.Millisecond { 350 | t.Fatal("CollapsedFowarding not respected - got", testMonitor.getHits(), "hits") 351 | } 352 | } 353 | 354 | // SuppressAgeHeader 355 | func TestAgeHeader(t *testing.T) { 356 | // Age header is added by default 357 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 358 | cache := New(Config{ 359 | TTL: 30 * time.Second, 360 | Monitor: testMonitor, 361 | Driver: NewDriverLRU(10), 362 | }) 363 | defer cache.Stop() 364 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 365 | batchGet(handler, []string{ 366 | "/", 367 | }) 368 | cache.offsetIncr(20 * time.Second) 369 | w := getResponse(handler, "/") 370 | if w.Header().Get("age") != "20" { 371 | t.Fatal("Age header was not correct \"", w.Header().Get("age"), "\" != 20") 372 | } 373 | } 374 | 375 | // SuppressAgeHeaderSuppression 376 | func TestAgeHeaderSuppression(t *testing.T) { 377 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 378 | cache := New(Config{ 379 | TTL: 30 * time.Second, 380 | SuppressAgeHeader: true, 381 | Monitor: testMonitor, 382 | Driver: NewDriverLRU(10), 383 | }) 384 | defer cache.Stop() 385 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 386 | batchGet(handler, []string{ 387 | "/", 388 | }) 389 | w := getResponse(handler, "/") 390 | if w.Header().Get("age") != "" { 391 | t.Fatal("Age header was added when it should be empty") 392 | } 393 | } 394 | 395 | // ARCCache should work as expected 396 | func TestARCCache(t *testing.T) { 397 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 398 | cache := New(Config{ 399 | TTL: 30 * time.Second, 400 | Monitor: testMonitor, 401 | Driver: NewDriverARC(10), 402 | }) 403 | defer cache.Stop() 404 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 405 | batchGet(handler, []string{ 406 | "/", 407 | "/", 408 | }) 409 | cache.offsetIncr(30 * time.Second) 410 | batchGet(handler, []string{ 411 | "/", 412 | "/", 413 | }) 414 | if testMonitor.getMisses() != 2 || testMonitor.getHits() != 2 { 415 | t.Fatal("TTL not respected by ARC - got", testMonitor.getHits(), "hits") 416 | } 417 | } 418 | 419 | // Multiple calls to Start should not cause race conditions 420 | func TestMultipleStart(t *testing.T) { 421 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 422 | cache := New(Config{ 423 | TTL: 30 * time.Second, 424 | Monitor: testMonitor, 425 | Driver: NewDriverLRU(10), 426 | }) 427 | defer cache.Stop() 428 | cache.Start() 429 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 430 | batchGet(handler, []string{ 431 | "/", 432 | "/", 433 | }) 434 | cache.offsetIncr(30 * time.Second) 435 | batchGet(handler, []string{ 436 | "/", 437 | "/", 438 | }) 439 | } 440 | 441 | // Without WriteHeader 442 | func TestNoWriteHeader(t *testing.T) { 443 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 444 | cache := New(Config{ 445 | TTL: 30 * time.Second, 446 | Monitor: testMonitor, 447 | Driver: NewDriverLRU(10), 448 | }) 449 | defer cache.Stop() 450 | handler := cache.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 451 | w.Write([]byte("ok")) 452 | })) 453 | batchGet(handler, []string{ 454 | "/", 455 | "/", 456 | }) 457 | if testMonitor.getMisses() != 1 || testMonitor.getHits() != 1 { 458 | t.Fatal("WriteHeader not implicitly called", testMonitor.getHits(), "hits") 459 | } 460 | } 461 | 462 | // Websocket should pass through 463 | func TestWebsocketPassthrough(t *testing.T) { 464 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 465 | cache := New(Config{ 466 | Driver: NewDriverLRU(10), 467 | Monitor: testMonitor, 468 | }) 469 | defer cache.Stop() 470 | var resSubstitutionOccurred bool 471 | handler := cache.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 472 | _, resSubstitutionOccurred = w.(*Response) 473 | })) 474 | batchGet(handler, []string{ 475 | "/", 476 | }) 477 | if !resSubstitutionOccurred { 478 | t.Fatal("Response substitution should have occurred") 479 | } 480 | r, _ := http.NewRequest("GET", "/", nil) 481 | r.Header.Set("connection", "upgrade") 482 | w := httptest.NewRecorder() 483 | handler.ServeHTTP(w, r) 484 | if resSubstitutionOccurred { 485 | t.Fatal("Response substitution should not have occurred") 486 | } 487 | if testMonitor.getMisses() != 2 { 488 | t.Fatal("Websocket passthrough should count as miss") 489 | } 490 | } 491 | 492 | // Nocache should pass through when triggered by header 493 | func TestNocacheHeader(t *testing.T) { 494 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 495 | cache := New(Config{ 496 | Driver: NewDriverLRU(10), 497 | Monitor: testMonitor, 498 | }) 499 | defer cache.Stop() 500 | var resSubstitutionOccurred bool 501 | handler := cache.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 502 | w.Header().Set("microcache-nocache", "1") 503 | _, resSubstitutionOccurred = w.(*Response) 504 | })) 505 | batchGet(handler, []string{"/"}) 506 | if !resSubstitutionOccurred { 507 | t.Fatal("Response substitution should have occurred") 508 | } 509 | batchGet(handler, []string{"/"}) 510 | if resSubstitutionOccurred { 511 | t.Fatal("Response substitution should not have occurred") 512 | } 513 | if testMonitor.getMisses() != 2 { 514 | t.Fatal("Nocache should count as miss") 515 | } 516 | } 517 | 518 | // TTL should be respected when used with compression 519 | func TestCompressorTTL(t *testing.T) { 520 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 521 | cache := New(Config{ 522 | TTL: 30 * time.Second, 523 | Monitor: testMonitor, 524 | Driver: NewDriverLRU(10), 525 | Compressor: CompressorSnappy{}, 526 | }) 527 | defer cache.Stop() 528 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 529 | batchGet(handler, []string{ 530 | "/", 531 | "/", 532 | }) 533 | cache.offsetIncr(30 * time.Second) 534 | batchGet(handler, []string{ 535 | "/", 536 | "/", 537 | }) 538 | if testMonitor.getMisses() != 2 || testMonitor.getHits() != 2 { 539 | t.Fatal("TTL not respected - got", testMonitor.getHits(), "hits") 540 | } 541 | } 542 | 543 | // Vary operates as expected 544 | func TestVary(t *testing.T) { 545 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 546 | cache := New(Config{ 547 | TTL: 30 * time.Second, 548 | Monitor: testMonitor, 549 | Driver: NewDriverLRU(10), 550 | Vary: []string{"foo"}, 551 | Exposed: true, 552 | }) 553 | defer cache.Stop() 554 | handler := cache.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 555 | w.Header().Set("Vary", "bar") 556 | w.Header().Set("Microcache-Vary", "baz") 557 | })) 558 | cases := []struct { 559 | url string 560 | hdr map[string]string 561 | hit bool 562 | }{ 563 | {"/", map[string]string{"foo": "1"}, false}, 564 | {"/", map[string]string{"foo": "1"}, true}, 565 | {"/", map[string]string{"foo": "1", "bar": "1"}, false}, 566 | {"/", map[string]string{"foo": "1", "bar": "1"}, true}, 567 | {"/", map[string]string{"foo": "1", "bar": "2"}, false}, 568 | {"/", map[string]string{"foo": "2", "bar": "2"}, false}, 569 | {"/", map[string]string{"foo": "2", "bar": "2"}, true}, 570 | {"/", map[string]string{"foo": "1", "bar": "2", "baz": "1"}, false}, 571 | {"/", map[string]string{"foo": "2", "bar": "2", "baz": "1"}, false}, 572 | {"/", map[string]string{"foo": "2", "bar": "2", "baz": "1"}, true}, 573 | } 574 | for i, c := range cases { 575 | h := http.Header{} 576 | for k, v := range c.hdr { 577 | h.Set(k, v) 578 | } 579 | r := getResponseWithHeader(handler, c.url, h) 580 | if c.hit != (r.Header().Get("microcache") == "HIT") { 581 | t.Fatalf("Hit should have been %v for case %d", c.hit, i+1) 582 | } 583 | } 584 | } 585 | 586 | // Vary Query operates as expected 587 | func TestVaryQuery(t *testing.T) { 588 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 589 | cache := New(Config{ 590 | TTL: 30 * time.Second, 591 | Monitor: testMonitor, 592 | Driver: NewDriverLRU(10), 593 | Exposed: true, 594 | }) 595 | defer cache.Stop() 596 | handler := cache.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 597 | w.Header().Set("Microcache-Vary-Query", "foo") 598 | })) 599 | cases := []struct { 600 | url string 601 | hit bool 602 | }{ 603 | {"/?foo=1", false}, 604 | {"/?foo=1", true}, 605 | {"/?foo=2", false}, 606 | {"/?foo=2", true}, 607 | {"/", false}, 608 | {"/?bar=1", true}, 609 | {"/?baz=2", true}, 610 | } 611 | for i, c := range cases { 612 | r := getResponse(handler, c.url) 613 | if c.hit != (r.Header().Get("microcache") == "HIT") { 614 | t.Fatalf("Hit should have been %v for case %d", c.hit, i+1) 615 | } 616 | } 617 | } 618 | 619 | // Unsafe requests should miss 620 | func TestUnsafe(t *testing.T) { 621 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 622 | cache := New(Config{ 623 | TTL: 30 * time.Second, 624 | Monitor: testMonitor, 625 | Driver: NewDriverLRU(10), 626 | Exposed: true, 627 | }) 628 | defer cache.Stop() 629 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 630 | cases := []struct { 631 | url string 632 | method string 633 | hit bool 634 | }{ 635 | {"/", "POST", false}, 636 | } 637 | for i, c := range cases { 638 | r := getResponseWithMethod(handler, c.url, c.method) 639 | if c.hit != (r.Header().Get("microcache") == "HIT") { 640 | t.Fatalf("Hit should have been %v for case %d", c.hit, i+1) 641 | } 642 | } 643 | if testMonitor.getMisses() != 1 { 644 | t.Fatal("Unsafe methods should cause miss") 645 | } 646 | } 647 | 648 | // Unsafe requests should miss and purge objects 649 | func TestUnsafePurge(t *testing.T) { 650 | testMonitor := &monitorFunc{interval: 100 * time.Second, logFunc: func(Stats) {}} 651 | cache := New(Config{ 652 | TTL: 30 * time.Second, 653 | Monitor: testMonitor, 654 | Driver: NewDriverLRU(10), 655 | Exposed: true, 656 | }) 657 | defer cache.Stop() 658 | handler := cache.Middleware(http.HandlerFunc(noopSuccessHandler)) 659 | cases := []struct { 660 | url string 661 | method string 662 | hit bool 663 | }{ 664 | {"/", "GET", false}, 665 | {"/", "GET", true}, 666 | {"/", "POST", false}, 667 | {"/", "GET", false}, 668 | {"/", "GET", true}, 669 | {"/", "PUT", false}, 670 | {"/", "GET", false}, 671 | {"/", "GET", true}, 672 | {"/", "DELETE", false}, 673 | {"/", "GET", false}, 674 | {"/", "GET", true}, 675 | {"/", "PATCH", false}, 676 | {"/", "GET", false}, 677 | {"/", "GET", true}, 678 | } 679 | for i, c := range cases { 680 | r := getResponseWithMethod(handler, c.url, c.method) 681 | if c.hit != (r.Header().Get("microcache") == "HIT") { 682 | t.Fatalf("Hit should have been %v for case %d", c.hit, i+1) 683 | } 684 | } 685 | } 686 | 687 | // Stop 688 | func TestStop(t *testing.T) { 689 | cache := New(Config{}) 690 | done := make(chan bool) 691 | go func() { 692 | cache.Stop() 693 | done <- true 694 | }() 695 | select { 696 | case <-time.After(100 * time.Millisecond): 697 | t.Fatal("Middleware failed to stop") 698 | case <-done: 699 | return 700 | } 701 | } 702 | 703 | // --- helper funcs --- 704 | 705 | func batchGet(handler http.Handler, urls []string) { 706 | for _, url := range urls { 707 | r, _ := http.NewRequest("GET", url, nil) 708 | w := httptest.NewRecorder() 709 | handler.ServeHTTP(w, r) 710 | // Same check as https://github.com/golang/go/blob/d6ffc1d8394d6f6420bb92d79d320da88720fbe0/src/net/http/server.go#L1090 711 | if code := w.Code; code < 100 || code > 999 { 712 | panic(fmt.Sprintf("invalid WriteHeader code %v", code)) 713 | } 714 | } 715 | } 716 | 717 | func parallelGet(handler http.Handler, urls []string) { 718 | var wg sync.WaitGroup 719 | for _, url := range urls { 720 | r1, _ := http.NewRequest("GET", url, nil) 721 | wg.Add(1) 722 | go func() { 723 | handler.ServeHTTP(httptest.NewRecorder(), r1) 724 | wg.Done() 725 | }() 726 | } 727 | wg.Wait() 728 | } 729 | 730 | func getResponse(handler http.Handler, url string) *httptest.ResponseRecorder { 731 | r, _ := http.NewRequest("GET", url, nil) 732 | w := httptest.NewRecorder() 733 | handler.ServeHTTP(w, r) 734 | return w 735 | } 736 | 737 | func getResponseWithHeader(handler http.Handler, url string, h http.Header) *httptest.ResponseRecorder { 738 | r, _ := http.NewRequest("GET", url, nil) 739 | r.Header = h 740 | w := httptest.NewRecorder() 741 | handler.ServeHTTP(w, r) 742 | return w 743 | } 744 | 745 | func getResponseWithMethod(handler http.Handler, url string, m string) *httptest.ResponseRecorder { 746 | r, _ := http.NewRequest(m, url, nil) 747 | w := httptest.NewRecorder() 748 | handler.ServeHTTP(w, r) 749 | return w 750 | } 751 | 752 | func noopSuccessHandler(w http.ResponseWriter, r *http.Request) { 753 | http.Error(w, "done", 200) 754 | } 755 | 756 | func failureHandler(w http.ResponseWriter, r *http.Request) { 757 | fail := r.FormValue("fail") 758 | if fail != "" { 759 | http.Error(w, "fail", 500) 760 | } else { 761 | http.Error(w, "done", 200) 762 | } 763 | } 764 | 765 | func slowSuccessHandler(w http.ResponseWriter, r *http.Request) { 766 | time.Sleep(100 * time.Millisecond) 767 | http.Error(w, "done", 200) 768 | } 769 | 770 | func timelySuccessHandler(w http.ResponseWriter, r *http.Request) { 771 | time.Sleep(10 * time.Millisecond) 772 | http.Error(w, "done", 200) 773 | } 774 | 775 | func dumpMonitor(m *monitorFunc) string { 776 | return fmt.Sprintf("Hits: %d, Misses: %d, Backend: %d, Stales: %d, Errors: %d", 777 | m.getHits(), 778 | m.getMisses(), 779 | m.getBackends(), 780 | m.getStales(), 781 | m.getErrors(), 782 | ) 783 | } 784 | -------------------------------------------------------------------------------- /docs/microcache-architecture.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 |
Yes
Yes
No
No
Calculate
Request Hash
SHA1(path + query)
[Not supported by viewer]
Request
Options
LRU
[Not supported by viewer]
 Fetch 
[Not supported by viewer]
Response
Objects
LRU
[Not supported by viewer]
Next HTTP 
Handler
[Not supported by viewer]
Calculate%3CmxGraphModel%3E%3Croot%3E%3CmxCell%20id%3D%220%22%2F%3E%3CmxCell%20id%3D%221%22%20parent%3D%220%22%2F%3E%3CmxCell%20id%3D%222%22%20value%3D%22%26lt%3Bdiv%26gt%3B%26lt%3Bfont%20color%3D%26quot%3B%23000000%26quot%3B%26gt%3B%26lt%3Bspan%20style%3D%26quot%3Bfont-size%3A%2011px%26quot%3B%26gt%3B%26lt%3Bb%26gt%3BRequest%20Hash%26lt%3B%2Fb%26gt%3B%26lt%3B%2Fspan%26gt%3B%26lt%3B%2Ffont%26gt%3B%26lt%3B%2Fdiv%26gt%3B%26lt%3Bdiv%26gt%3B%26lt%3Bfont%20color%3D%26quot%3B%23000000%26quot%3B%26gt%3B%26lt%3Bspan%20style%3D%26quot%3Bfont-size%3A%2011px%26quot%3B%26gt%3BSHA1(path%20%2B%20query)%26lt%3B%2Fspan%26gt%3B%26lt%3B%2Ffont%26gt%3B%26lt%3B%2Fdiv%26gt%3B%22%20style%3D%22rounded%3D1%3BwhiteSpace%3Dwrap%3Bhtml%3D1%3BfillColor%3D%23f5f5f5%3BstrokeColor%3D%23666666%3BfontColor%3D%23333333%3B%22%20vertex%3D%221%22%20parent%3D%221%22%3E%3CmxGeometry%20x%3D%22120%22%20y%3D%22190%22%20width%3D%22120%22%20height%3D%2260%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3C%2Froot%3E%3C%2FmxGraphModel%3E
Object Hash
SHA1(reqHash + Vary)
[Not supported by viewer]
 Fetch 
[Not supported by viewer]
 Found 
[Not supported by viewer]
 Not Found or Not Cacheable 
 Not Found or Not Cacheable 
Fetch Response
Fetch Response
 Not Found 
 Not Found 
 Stale 
[Not supported by viewer]
Write Response
Write Response
Fresh
Fresh
Check Expiration
Check Expiration
 Found 
[Not supported by viewer]
Compress and Store
Compress and Store
Calculate
Object Hash
and expiration
[Not supported by viewer]
Store
Store
If not revalidating
If not revalidating
Modify
Request Options
based on response
[Not supported by viewer]
 No 
[Not supported by viewer]
Yes
Yes
Stale While
Revalidate
[Not supported by viewer]
 Yes 
[Not supported by viewer]
Decompress
Response Body
Decompress<br>Response Body
Microcache
Microcache
ServeHTTP
ServeHTTP
Request
Request
ResponseWriter
ResponseWriter
Sychronous
Sychronous
Asychronous
Asychronous
 Legend 
 Legend 
In Memory Storage
In Memory Storage
Process Step
Process Step
Interface
Interface
External
External
Control flow diagram and basic operation of the Microcache middleware. Stale if error, collapsed forwarding and request timeout not represented in this diagram.
[Not supported by viewer]
Cacheable
Cacheable
HIT
HIT
STALE
STALE
MISS
MISS
 Monitor 
 Monitor 
--------------------------------------------------------------------------------