├── .github
├── CODEOWNERS
├── FUNDING.yml
└── workflows
│ └── ci.yml
├── .gitignore
├── .golangci.yml
├── LICENSE
├── README.md
├── cache.go
├── cache_test.go
├── eventbus
├── pubsub.go
├── pubsub_test.go
├── redis.go
└── redis_test.go
├── expirable_cache.go
├── expirable_cache_test.go
├── go.mod
├── go.sum
├── internal
└── cache
│ ├── cache.go
│ ├── cache_test.go
│ └── options.go
├── lru_cache.go
├── lru_cache_test.go
├── options.go
├── redis_cache.go
├── redis_cache_test.go
├── scache.go
├── scache_test.go
├── url.go
├── url_test.go
└── v2
├── cache.go
├── cache_test.go
├── eventbus
├── pubsub.go
├── pubsub_test.go
├── redis.go
└── redis_test.go
├── expirable_cache.go
├── expirable_cache_test.go
├── go.mod
├── go.sum
├── lru_cache.go
├── lru_cache_test.go
├── options.go
├── redis_cache.go
├── redis_cache_test.go
├── scache.go
├── scache_test.go
├── url.go
└── url_test.go
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # These owners will be the default owners for everything in the repo.
2 | # Unless a later match takes precedence, @umputun will be requested for
3 | # review when someone opens a pull request.
4 |
5 | * @umputun
6 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | github: [umputun]
2 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: build
2 |
3 | on:
4 | push:
5 | branches:
6 | tags:
7 | pull_request:
8 |
9 | jobs:
10 | build:
11 | runs-on: ubuntu-latest
12 |
13 | steps:
14 | - name: set up go
15 | uses: actions/setup-go@v5
16 | with:
17 | go-version: "1.21"
18 | id: go
19 |
20 | - name: checkout
21 | uses: actions/checkout@v4
22 |
23 | - name: start Redis
24 | uses: supercharge/redis-github-action@1.8.0
25 |
26 | - name: build and test
27 | run: |
28 | go get -v
29 | go test -timeout=60s -race -covermode=atomic -coverprofile=$GITHUB_WORKSPACE/profile.cov_tmp
30 | cat $GITHUB_WORKSPACE/profile.cov_tmp | grep -v "_mock.go" > $GITHUB_WORKSPACE/profile.cov
31 | go build -race
32 | env:
33 | TZ: "America/Chicago"
34 | ENABLE_REDIS_TESTS: "true"
35 |
36 | - name: build and test for v2
37 | run: |
38 | go get -v
39 | go test -timeout=60s -race -covermode=atomic -coverprofile=$GITHUB_WORKSPACE/profile.cov_tmp
40 | # combine the coverage files
41 | cat $GITHUB_WORKSPACE/profile.cov_tmp | grep -v "_mock.go" | grep -v "mode:" >> $GITHUB_WORKSPACE/profile.cov
42 | go build -race
43 | env:
44 | TZ: "America/Chicago"
45 | ENABLE_REDIS_TESTS: "true"
46 | working-directory: v2
47 |
48 | - name: golangci-lint
49 | uses: golangci/golangci-lint-action@v4
50 | with:
51 | version: latest
52 |
53 | - name: golangci-lint for v2
54 | uses: golangci/golangci-lint-action@v4
55 | with:
56 | version: latest
57 | working-directory: v2
58 |
59 | - name: submit coverage
60 | run: |
61 | go install github.com/mattn/goveralls@latest
62 | goveralls -service="github" -coverprofile=$GITHUB_WORKSPACE/profile.cov
63 | env:
64 | COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }}
65 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Binaries for programs and plugins
2 | *.exe
3 | *.exe~
4 | *.dll
5 | *.so
6 | *.dylib
7 |
8 | # Test binary, build with `go test -c`
9 | *.test
10 |
11 | # Output of the go coverage tool, specifically when used with LiteIDE
12 | *.out
13 |
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | linters-settings:
2 | govet:
3 | enable:
4 | - shadow
5 | gocyclo:
6 | min-complexity: 15
7 | goconst:
8 | min-len: 2
9 | min-occurrences: 2
10 | misspell:
11 | locale: US
12 | lll:
13 | line-length: 140
14 | gocritic:
15 | enabled-tags:
16 | - performance
17 | - style
18 | - experimental
19 | disabled-checks:
20 | - wrapperFunc
21 |
22 | linters:
23 | enable:
24 | - staticcheck
25 | - unused
26 | - revive
27 | - govet
28 | - unconvert
29 | - gosec
30 | - gocyclo
31 | - dupl
32 | - misspell
33 | - unparam
34 | - typecheck
35 | - ineffassign
36 | - stylecheck
37 | - gochecknoinits
38 | - exportloopref
39 | - gocritic
40 | - nakedret
41 | - gosimple
42 | - prealloc
43 | fast: false
44 | disable-all: true
45 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Umputun
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Loading Cache Wrapper [](https://github.com/go-pkgz/lcw/actions) [](https://coveralls.io/github/go-pkgz/lcw?branch=master) [](https://godoc.org/github.com/go-pkgz/lcw/v2)
2 |
3 | The library adds a thin layer on top of [lru\expirable cache](https://github.com/hashicorp/golang-lru).
4 |
5 | | Cache name | Constructor | Defaults | Description |
6 | |----------------|-----------------------|-------------------|-------------------------|
7 | | LruCache | lcw.NewLruCache | keys=1000 | LRU cache with limits |
8 | | ExpirableCache | lcw.NewExpirableCache | keys=1000, ttl=5m | TTL cache with limits |
9 | | RedisCache | lcw.NewRedisCache | ttl=5m | Redis cache with limits |
10 | | Nop | lcw.NewNopCache | | Do-nothing cache |
11 |
12 | Main features:
13 |
14 | - LoadingCache (guava style)
15 | - Limit maximum cache size (in bytes)
16 | - Limit maximum key size
17 | - Limit maximum size of a value
18 | - Limit number of keys
19 | - TTL support (`ExpirableCache` and `RedisCache`)
20 | - Callback on eviction event (not supported in `RedisCache`)
21 | - Functional style invalidation
22 | - Functional options
23 | - Sane defaults
24 |
25 | ## Install and update
26 |
27 | `go get -u github.com/go-pkgz/lcw/v2`
28 |
29 | ## Usage
30 |
31 | ```go
32 | package main
33 |
34 | import (
35 | "github.com/go-pkgz/lcw/v2"
36 | )
37 |
38 | func main() {
39 | o := lcw.NewOpts[int]()
40 | cache, err := lcw.NewLruCache(o.MaxKeys(500), o.MaxCacheSize(65536), o.MaxValSize(200), o.MaxKeySize(32))
41 | if err != nil {
42 | panic("failed to create cache")
43 | }
44 | defer cache.Close()
45 |
46 | val, err := cache.Get("key123", func() (int, error) {
47 | res, err := getDataFromSomeSource(params) // returns int
48 | return res, err
49 | })
50 |
51 | if err != nil {
52 | panic("failed to get data")
53 | }
54 |
55 | s := val // cached value
56 | }
57 | ```
58 |
59 | ### Cache with URI
60 |
61 | Cache can be created with URIs:
62 |
63 | - `mem://lru?max_key_size=10&max_val_size=1024&max_keys=50&max_cache_size=64000` - creates LRU cache with given limits
64 | - `mem://expirable?ttl=30s&max_key_size=10&max_val_size=1024&max_keys=50&max_cache_size=64000` - create expirable cache
65 | - `redis://10.0.0.1:1234?db=16&password=qwerty&network=tcp4&dial_timeout=1s&read_timeout=5s&write_timeout=3s` - create
66 | redis cache
67 | - `nop://` - create Nop cache
68 |
69 | ## Scoped cache
70 |
71 | `Scache` provides a wrapper on top of all implementations of `LoadingCache` with a number of special features:
72 |
73 | 1. Key is not a string, but a composed type made from partition, key-id and list of scopes (tags).
74 | 1. Value type limited to `[]byte`
75 | 1. Added `Flush` method for scoped/tagged invalidation of multiple records in a given partition
76 | 1. A simplified interface with Get, Stat, Flush and Close only.
77 |
78 | ## Details
79 |
80 | - In all cache types other than Redis (e.g. LRU and Expirable at the moment) values are stored as-is which means
81 | that mutable values can be changed outside of cache. `ExampleLoadingCache_Mutability` illustrates that.
82 | - All byte-size limits (MaxCacheSize and MaxValSize) only work for values implementing `lcw.Sizer` interface.
83 | - Negative limits (max options) rejected
84 | - The implementation started as a part of [remark42](https://github.com/umputun/remark)
85 | and later on moved to [go-pkgz/rest](https://github.com/go-pkgz/rest/tree/master/cache)
86 | library and finally generalized to become `lcw`.
87 |
--------------------------------------------------------------------------------
/cache.go:
--------------------------------------------------------------------------------
1 | // Package lcw adds a thin layer on top of lru and expirable cache providing more limits and common interface.
2 | // The primary method to get (and set) data to/from the cache is LoadingCache.Get returning stored data for a given key or
3 | // call provided func to retrieve and store, similar to Guava loading cache.
4 | // Limits allow max values for key size, number of keys, value size and total size of values in the cache.
5 | // CacheStat gives general stats on cache performance.
6 | // 3 flavors of cache provided - NoP (do-nothing cache), ExpirableCache (TTL based), and LruCache
7 | package lcw
8 |
9 | import (
10 | "fmt"
11 | )
12 |
13 | // Sizer allows to perform size-based restrictions, optional.
14 | // If not defined both maxValueSize and maxCacheSize checks will be ignored
15 | type Sizer interface {
16 | Size() int
17 | }
18 |
19 | // LoadingCache defines guava-like cache with Get method returning cached value ao retrieving it if not in cache
20 | type LoadingCache interface {
21 | Get(key string, fn func() (interface{}, error)) (val interface{}, err error) // load or get from cache
22 | Peek(key string) (interface{}, bool) // get from cache by key
23 | Invalidate(fn func(key string) bool) // invalidate items for func(key) == true
24 | Delete(key string) // delete by key
25 | Purge() // clear cache
26 | Stat() CacheStat // cache stats
27 | Keys() []string // list of all keys
28 | Close() error // close open connections
29 | }
30 |
31 | // CacheStat represent stats values
32 | type CacheStat struct {
33 | Hits int64
34 | Misses int64
35 | Keys int
36 | Size int64
37 | Errors int64
38 | }
39 |
40 | // String formats cache stats
41 | func (s CacheStat) String() string {
42 | ratio := 0.0
43 | if s.Hits+s.Misses > 0 {
44 | ratio = float64(s.Hits) / float64(s.Hits+s.Misses)
45 | }
46 | return fmt.Sprintf("{hits:%d, misses:%d, ratio:%.2f, keys:%d, size:%d, errors:%d}",
47 | s.Hits, s.Misses, ratio, s.Keys, s.Size, s.Errors)
48 | }
49 |
50 | // Nop is do-nothing implementation of LoadingCache
51 | type Nop struct{}
52 |
53 | // NewNopCache makes new do-nothing cache
54 | func NewNopCache() *Nop {
55 | return &Nop{}
56 | }
57 |
58 | // Get calls fn without any caching
59 | func (n *Nop) Get(_ string, fn func() (interface{}, error)) (interface{}, error) { return fn() }
60 |
61 | // Peek does nothing and always returns false
62 | func (n *Nop) Peek(string) (interface{}, bool) { return nil, false }
63 |
64 | // Invalidate does nothing for nop cache
65 | func (n *Nop) Invalidate(func(key string) bool) {}
66 |
67 | // Purge does nothing for nop cache
68 | func (n *Nop) Purge() {}
69 |
70 | // Delete does nothing for nop cache
71 | func (n *Nop) Delete(string) {}
72 |
73 | // Keys does nothing for nop cache
74 | func (n *Nop) Keys() []string { return nil }
75 |
76 | // Stat always 0s for nop cache
77 | func (n *Nop) Stat() CacheStat {
78 | return CacheStat{}
79 | }
80 |
81 | // Close does nothing for nop cache
82 | func (n *Nop) Close() error {
83 | return nil
84 | }
85 |
--------------------------------------------------------------------------------
/cache_test.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "strings"
7 | "sync"
8 | "sync/atomic"
9 | "testing"
10 | "time"
11 |
12 | "github.com/redis/go-redis/v9"
13 | "github.com/stretchr/testify/assert"
14 | "github.com/stretchr/testify/require"
15 | )
16 |
17 | func TestNop_Get(t *testing.T) {
18 | var coldCalls int32
19 | var c LoadingCache = NewNopCache()
20 | res, err := c.Get("key1", func() (interface{}, error) {
21 | atomic.AddInt32(&coldCalls, 1)
22 | return "result", nil
23 | })
24 | assert.NoError(t, err)
25 | assert.Equal(t, "result", res.(string))
26 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls))
27 |
28 | res, err = c.Get("key1", func() (interface{}, error) {
29 | atomic.AddInt32(&coldCalls, 1)
30 | return "result2", nil
31 | })
32 | assert.NoError(t, err)
33 | assert.Equal(t, "result2", res.(string))
34 | assert.Equal(t, int32(2), atomic.LoadInt32(&coldCalls))
35 |
36 | assert.Equal(t, CacheStat{}, c.Stat())
37 | }
38 |
39 | func TestNop_Peek(t *testing.T) {
40 | var coldCalls int32
41 | c := NewNopCache()
42 | res, err := c.Get("key1", func() (interface{}, error) {
43 | atomic.AddInt32(&coldCalls, 1)
44 | return "result", nil
45 | })
46 | assert.NoError(t, err)
47 | assert.Equal(t, "result", res.(string))
48 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls))
49 |
50 | _, ok := c.Peek("key1")
51 | assert.False(t, ok)
52 | }
53 |
54 | func TestStat_String(t *testing.T) {
55 | s := CacheStat{Keys: 100, Hits: 60, Misses: 10, Size: 12345, Errors: 5}
56 | assert.Equal(t, "{hits:60, misses:10, ratio:0.86, keys:100, size:12345, errors:5}", s.String())
57 | }
58 |
59 | func TestCache_Get(t *testing.T) {
60 | caches, teardown := cachesTestList(t)
61 | defer teardown()
62 |
63 | for _, c := range caches {
64 | c := c
65 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
66 | var coldCalls int32
67 | res, err := c.Get("key", func() (interface{}, error) {
68 | atomic.AddInt32(&coldCalls, 1)
69 | return "result", nil
70 | })
71 | assert.NoError(t, err)
72 | assert.Equal(t, "result", res.(string))
73 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls))
74 |
75 | res, err = c.Get("key", func() (interface{}, error) {
76 | atomic.AddInt32(&coldCalls, 1)
77 | return "result2", nil
78 | })
79 |
80 | assert.NoError(t, err)
81 | assert.Equal(t, "result", res.(string))
82 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls), "cache hit")
83 |
84 | _, err = c.Get("key-2", func() (interface{}, error) {
85 | atomic.AddInt32(&coldCalls, 1)
86 | return "result2", fmt.Errorf("some error")
87 | })
88 | assert.Error(t, err)
89 | assert.Equal(t, int32(2), atomic.LoadInt32(&coldCalls), "cache hit")
90 |
91 | _, err = c.Get("key-2", func() (interface{}, error) {
92 | atomic.AddInt32(&coldCalls, 1)
93 | return "result2", fmt.Errorf("some error")
94 | })
95 | assert.Error(t, err)
96 | assert.Equal(t, int32(3), atomic.LoadInt32(&coldCalls), "cache hit")
97 | })
98 | }
99 | }
100 |
101 | func TestCache_MaxValueSize(t *testing.T) {
102 | caches, teardown := cachesTestList(t, MaxKeys(5), MaxValSize(10))
103 | defer teardown()
104 |
105 | for _, c := range caches {
106 | c := c
107 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
108 | // put good size value to cache and make sure it cached
109 | res, err := c.Get("key-Z", func() (interface{}, error) {
110 | return sizedString("result-Z"), nil
111 | })
112 | assert.NoError(t, err)
113 | assert.Equal(t, sizedString("result-Z"), res.(sizedString))
114 |
115 | res, err = c.Get("key-Z", func() (interface{}, error) {
116 | return sizedString("result-Zzzz"), nil
117 | })
118 | if s, ok := res.(string); ok {
119 | res = sizedString(s)
120 | }
121 | assert.NoError(t, err)
122 | assert.Equal(t, sizedString("result-Z"), res.(sizedString), "got cached value")
123 |
124 | // put too big value to cache and make sure it is not cached
125 | res, err = c.Get("key-Big", func() (interface{}, error) {
126 | return sizedString("1234567890"), nil
127 | })
128 | if s, ok := res.(string); ok {
129 | res = sizedString(s)
130 | }
131 | assert.NoError(t, err)
132 | assert.Equal(t, sizedString("1234567890"), res.(sizedString))
133 |
134 | res, err = c.Get("key-Big", func() (interface{}, error) {
135 | return sizedString("result-big"), nil
136 | })
137 | if s, ok := res.(string); ok {
138 | res = sizedString(s)
139 | }
140 | assert.NoError(t, err)
141 | assert.Equal(t, sizedString("result-big"), res.(sizedString), "got not cached value")
142 |
143 | // put too big value to cache but not Sizer
144 | res, err = c.Get("key-Big2", func() (interface{}, error) {
145 | return "1234567890", nil
146 | })
147 | assert.NoError(t, err)
148 | assert.Equal(t, "1234567890", res.(string))
149 |
150 | res, err = c.Get("key-Big2", func() (interface{}, error) {
151 | return "xyz", nil
152 | })
153 | assert.NoError(t, err)
154 | assert.Equal(t, "1234567890", res.(string), "too long, but not Sizer. from cache")
155 | })
156 | }
157 | }
158 |
159 | func TestCache_MaxCacheSize(t *testing.T) {
160 | caches, teardown := cachesTestList(t, MaxKeys(50), MaxCacheSize(20))
161 | defer teardown()
162 |
163 | for _, c := range caches {
164 | c := c
165 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
166 | // put good size value to cache and make sure it cached
167 | res, err := c.Get("key-Z", func() (interface{}, error) {
168 | return sizedString("result-Z"), nil
169 | })
170 | assert.NoError(t, err)
171 | if s, ok := res.(string); ok {
172 | res = sizedString(s)
173 | }
174 | assert.Equal(t, sizedString("result-Z"), res.(sizedString))
175 | res, err = c.Get("key-Z", func() (interface{}, error) {
176 | return sizedString("result-Zzzz"), nil
177 | })
178 | if s, ok := res.(string); ok {
179 | res = sizedString(s)
180 | }
181 | assert.NoError(t, err)
182 | assert.Equal(t, sizedString("result-Z"), res.(sizedString), "got cached value")
183 | if _, ok := c.(*RedisCache); !ok {
184 | assert.Equal(t, int64(8), c.size())
185 | }
186 | _, err = c.Get("key-Z2", func() (interface{}, error) {
187 | return sizedString("result-Y"), nil
188 | })
189 | assert.NoError(t, err)
190 | if _, ok := c.(*RedisCache); !ok {
191 | assert.Equal(t, int64(16), c.size())
192 | }
193 |
194 | // this will cause removal
195 | _, err = c.Get("key-Z3", func() (interface{}, error) {
196 | return sizedString("result-Z"), nil
197 | })
198 | assert.NoError(t, err)
199 | if _, ok := c.(*RedisCache); !ok {
200 | assert.Equal(t, int64(16), c.size())
201 | // Due RedisCache does not support MaxCacheSize this assert should be skipped
202 | assert.Equal(t, 2, c.keys())
203 | }
204 | })
205 | }
206 | }
207 |
208 | func TestCache_MaxCacheSizeParallel(t *testing.T) {
209 | caches, teardown := cachesTestList(t, MaxCacheSize(123), MaxKeys(10000))
210 | defer teardown()
211 |
212 | for _, c := range caches {
213 | c := c
214 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
215 | wg := sync.WaitGroup{}
216 | for i := 0; i < 1000; i++ {
217 | wg.Add(1)
218 | i := i
219 | go func() {
220 | //nolint:gosec // not used for security purpose
221 | time.Sleep(time.Duration(rand.Intn(100)) * time.Nanosecond)
222 | defer wg.Done()
223 | res, err := c.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
224 | return sizedString(fmt.Sprintf("result-%d", i)), nil
225 | })
226 | require.NoError(t, err)
227 | require.Equal(t, sizedString(fmt.Sprintf("result-%d", i)), res.(sizedString))
228 | }()
229 | }
230 | wg.Wait()
231 | assert.True(t, c.size() < 123 && c.size() >= 0)
232 | t.Log("size", c.size())
233 | })
234 | }
235 |
236 | }
237 |
238 | func TestCache_MaxKeySize(t *testing.T) {
239 | caches, teardown := cachesTestList(t, MaxKeySize(5))
240 | defer teardown()
241 |
242 | for _, c := range caches {
243 | c := c
244 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
245 | res, err := c.Get("key", func() (interface{}, error) {
246 | return "value", nil
247 | })
248 | assert.NoError(t, err)
249 | assert.Equal(t, "value", res.(string))
250 |
251 | res, err = c.Get("key", func() (interface{}, error) {
252 | return "valueXXX", nil
253 | })
254 | assert.NoError(t, err)
255 | assert.Equal(t, "value", res.(string), "cached")
256 |
257 | res, err = c.Get("key1234", func() (interface{}, error) {
258 | return "value", nil
259 | })
260 | assert.NoError(t, err)
261 | assert.Equal(t, "value", res.(string))
262 |
263 | res, err = c.Get("key1234", func() (interface{}, error) {
264 | return "valueXYZ", nil
265 | })
266 | assert.NoError(t, err)
267 | assert.Equal(t, "valueXYZ", res.(string), "not cached")
268 | })
269 | }
270 | }
271 |
272 | func TestCache_Peek(t *testing.T) {
273 | caches, teardown := cachesTestList(t)
274 | defer teardown()
275 |
276 | for _, c := range caches {
277 | c := c
278 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
279 | var coldCalls int32
280 | res, err := c.Get("key", func() (interface{}, error) {
281 | atomic.AddInt32(&coldCalls, 1)
282 | return "result", nil
283 | })
284 | assert.NoError(t, err)
285 | assert.Equal(t, "result", res.(string))
286 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls))
287 |
288 | r, ok := c.Peek("key")
289 | assert.True(t, ok)
290 | assert.Equal(t, "result", r.(string))
291 | })
292 | }
293 | }
294 |
295 | func TestLruCache_ParallelHits(t *testing.T) {
296 | caches, teardown := cachesTestList(t)
297 | defer teardown()
298 |
299 | for _, c := range caches {
300 | c := c
301 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
302 | var coldCalls int32
303 |
304 | res, err := c.Get("key", func() (interface{}, error) {
305 | return "value", nil
306 | })
307 | assert.NoError(t, err)
308 | assert.Equal(t, "value", res.(string))
309 |
310 | wg := sync.WaitGroup{}
311 | for i := 0; i < 1000; i++ {
312 | wg.Add(1)
313 | i := i
314 | go func() {
315 | defer wg.Done()
316 | res, err := c.Get("key", func() (interface{}, error) {
317 | atomic.AddInt32(&coldCalls, 1)
318 | return fmt.Sprintf("result-%d", i), nil
319 | })
320 | require.NoError(t, err)
321 | require.Equal(t, "value", res.(string))
322 | }()
323 | }
324 | wg.Wait()
325 | assert.Equal(t, int32(0), atomic.LoadInt32(&coldCalls))
326 | })
327 | }
328 | }
329 |
330 | func TestCache_Purge(t *testing.T) {
331 | caches, teardown := cachesTestList(t)
332 | defer teardown()
333 |
334 | for _, c := range caches {
335 | c := c
336 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
337 | var coldCalls int32
338 | // fill cache
339 | for i := 0; i < 1000; i++ {
340 | i := i
341 | _, err := c.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
342 | atomic.AddInt32(&coldCalls, 1)
343 | return fmt.Sprintf("result-%d", i), nil
344 | })
345 | require.NoError(t, err)
346 | }
347 | assert.Equal(t, int32(1000), atomic.LoadInt32(&coldCalls))
348 | assert.Equal(t, 1000, c.keys())
349 |
350 | c.Purge()
351 | assert.Equal(t, 0, c.keys(), "all keys removed")
352 | })
353 | }
354 | }
355 |
356 | func TestCache_Invalidate(t *testing.T) {
357 | caches, teardown := cachesTestList(t)
358 | defer teardown()
359 |
360 | for _, c := range caches {
361 | c := c
362 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
363 | var coldCalls int32
364 |
365 | // fill cache
366 | for i := 0; i < 1000; i++ {
367 | i := i
368 | _, err := c.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
369 | atomic.AddInt32(&coldCalls, 1)
370 | return fmt.Sprintf("result-%d", i), nil
371 | })
372 | require.NoError(t, err)
373 | }
374 | assert.Equal(t, int32(1000), atomic.LoadInt32(&coldCalls))
375 | assert.Equal(t, 1000, c.keys())
376 |
377 | c.Invalidate(func(key string) bool {
378 | return strings.HasSuffix(key, "0")
379 | })
380 |
381 | assert.Equal(t, 900, c.keys(), "100 keys removed")
382 | res, err := c.Get("key-1", func() (interface{}, error) {
383 | atomic.AddInt32(&coldCalls, 1)
384 | return "result-xxx", nil
385 | })
386 | require.NoError(t, err)
387 | assert.Equal(t, "result-1", res.(string), "from the cache")
388 |
389 | res, err = c.Get("key-10", func() (interface{}, error) {
390 | atomic.AddInt32(&coldCalls, 1)
391 | return "result-xxx", nil
392 | })
393 | require.NoError(t, err)
394 | assert.Equal(t, "result-xxx", res.(string), "not from the cache")
395 | })
396 | }
397 | }
398 |
399 | func TestCache_Delete(t *testing.T) {
400 | caches, teardown := cachesTestList(t)
401 | defer teardown()
402 |
403 | for _, c := range caches {
404 | c := c
405 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
406 | // fill cache
407 | for i := 0; i < 1000; i++ {
408 | i := i
409 | _, err := c.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
410 | return sizedString(fmt.Sprintf("result-%d", i)), nil
411 | })
412 | require.NoError(t, err)
413 | }
414 | assert.Equal(t, 1000, c.Stat().Keys)
415 | if _, ok := c.(*RedisCache); !ok {
416 | assert.Equal(t, int64(9890), c.Stat().Size)
417 | }
418 | c.Delete("key-2")
419 | assert.Equal(t, 999, c.Stat().Keys)
420 | if _, ok := c.(*RedisCache); !ok {
421 | assert.Equal(t, int64(9890-8), c.Stat().Size)
422 | }
423 | })
424 | }
425 | }
426 |
427 | func TestCache_DeleteWithEvent(t *testing.T) {
428 | var evKey string
429 | var evVal interface{}
430 | var evCount int
431 | onEvict := func(key string, value interface{}) {
432 | evKey = key
433 | evVal = value
434 | evCount++
435 | }
436 |
437 | caches, teardown := cachesTestList(t, OnEvicted(onEvict))
438 | defer teardown()
439 |
440 | for _, c := range caches {
441 | c := c
442 |
443 | evKey, evVal, evCount = "", "", 0
444 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
445 | if _, ok := c.(*RedisCache); ok {
446 | t.Skip("RedisCache doesn't support delete events")
447 | }
448 | // fill cache
449 | for i := 0; i < 1000; i++ {
450 | i := i
451 | _, err := c.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
452 | return sizedString(fmt.Sprintf("result-%d", i)), nil
453 | })
454 | require.NoError(t, err)
455 | }
456 | assert.Equal(t, 1000, c.Stat().Keys)
457 | assert.Equal(t, int64(9890), c.Stat().Size)
458 |
459 | c.Delete("key-2")
460 | assert.Equal(t, 999, c.Stat().Keys)
461 | assert.Equal(t, "key-2", evKey)
462 | assert.Equal(t, sizedString("result-2"), evVal)
463 | assert.Equal(t, 1, evCount)
464 | })
465 | }
466 | }
467 |
468 | func TestCache_Stats(t *testing.T) {
469 | caches, teardown := cachesTestList(t)
470 | defer teardown()
471 |
472 | for _, c := range caches {
473 | c := c
474 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
475 | // fill cache
476 | for i := 0; i < 100; i++ {
477 | i := i
478 | _, err := c.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
479 | return sizedString(fmt.Sprintf("result-%d", i)), nil
480 | })
481 | require.NoError(t, err)
482 | }
483 | stats := c.Stat()
484 | switch c.(type) {
485 | case *RedisCache:
486 | assert.Equal(t, CacheStat{Hits: 0, Misses: 100, Keys: 100, Size: 0}, stats)
487 | default:
488 | assert.Equal(t, CacheStat{Hits: 0, Misses: 100, Keys: 100, Size: 890}, stats)
489 | }
490 |
491 | _, err := c.Get("key-1", func() (interface{}, error) {
492 | return "xyz", nil
493 | })
494 | require.NoError(t, err)
495 | switch c.(type) {
496 | case *RedisCache:
497 | assert.Equal(t, CacheStat{Hits: 1, Misses: 100, Keys: 100, Size: 0}, c.Stat())
498 | default:
499 | assert.Equal(t, CacheStat{Hits: 1, Misses: 100, Keys: 100, Size: 890}, c.Stat())
500 | }
501 |
502 | _, err = c.Get("key-1123", func() (interface{}, error) {
503 | return sizedString("xyz"), nil
504 | })
505 | require.NoError(t, err)
506 | switch c.(type) {
507 | case *RedisCache:
508 | assert.Equal(t, CacheStat{Hits: 1, Misses: 101, Keys: 101, Size: 0}, c.Stat())
509 | default:
510 | assert.Equal(t, CacheStat{Hits: 1, Misses: 101, Keys: 101, Size: 893}, c.Stat())
511 | }
512 |
513 | _, err = c.Get("key-9999", func() (interface{}, error) {
514 | return nil, fmt.Errorf("err")
515 | })
516 | require.Error(t, err)
517 | switch c.(type) {
518 | case *RedisCache:
519 | assert.Equal(t, CacheStat{Hits: 1, Misses: 101, Keys: 101, Size: 0, Errors: 1}, c.Stat())
520 | default:
521 | assert.Equal(t, CacheStat{Hits: 1, Misses: 101, Keys: 101, Size: 893, Errors: 1}, c.Stat())
522 | }
523 | })
524 | }
525 | }
526 |
527 | // ExampleLoadingCache_Get illustrates creation of a cache and loading value from it
528 | func ExampleLoadingCache_Get() {
529 | c, err := NewExpirableCache(MaxKeys(10), TTL(time.Minute*30)) // make expirable cache (30m TTL) with up to 10 keys
530 | if err != nil {
531 | panic("can' make cache")
532 | }
533 | defer c.Close()
534 |
535 | // try to get from cache and because mykey is not in will put it
536 | _, _ = c.Get("mykey", func() (interface{}, error) {
537 | fmt.Println("cache miss 1")
538 | return "myval-1", nil
539 | })
540 |
541 | // get from cache, func won't run because mykey in
542 | v, err := c.Get("mykey", func() (interface{}, error) {
543 | fmt.Println("cache miss 2")
544 | return "myval-2", nil
545 | })
546 |
547 | if err != nil {
548 | panic("can't get from cache")
549 | }
550 | fmt.Printf("got %s from cache, stats: %s", v.(string), c.Stat())
551 | // Output: cache miss 1
552 | // got myval-1 from cache, stats: {hits:1, misses:1, ratio:0.50, keys:1, size:0, errors:0}
553 | }
554 |
555 | // ExampleLoadingCache_Delete illustrates cache value eviction and OnEvicted function usage.
556 | func ExampleLoadingCache_Delete() {
557 | // make expirable cache (30m TTL) with up to 10 keys. Set callback on eviction event
558 | c, err := NewExpirableCache(MaxKeys(10), TTL(time.Minute*30), OnEvicted(func(key string, _ interface{}) {
559 | fmt.Println("key " + key + " evicted")
560 | }))
561 | if err != nil {
562 | panic("can' make cache")
563 | }
564 | defer c.Close()
565 |
566 | // try to get from cache and because mykey is not in will put it
567 | _, _ = c.Get("mykey", func() (interface{}, error) {
568 | return "myval-1", nil
569 | })
570 |
571 | c.Delete("mykey")
572 | fmt.Println("stats: " + c.Stat().String())
573 | // Output: key mykey evicted
574 | // stats: {hits:0, misses:1, ratio:0.00, keys:0, size:0, errors:0}
575 | }
576 |
577 | // nolint:govet //false positive due to example name
578 | // ExampleLoadingCacheMutability illustrates changing mutable stored item outside of cache, works only for non-Redis cache.
579 | func Example_loadingCacheMutability() {
580 | c, err := NewExpirableCache(MaxKeys(10), TTL(time.Minute*30)) // make expirable cache (30m TTL) with up to 10 keys
581 | if err != nil {
582 | panic("can' make cache")
583 | }
584 | defer c.Close()
585 |
586 | mutableSlice := []string{"key1", "key2"}
587 |
588 | // put mutableSlice in "mutableSlice" cache key
589 | _, _ = c.Get("mutableSlice", func() (interface{}, error) {
590 | return mutableSlice, nil
591 | })
592 |
593 | // get from cache, func won't run because mutableSlice is cached
594 | // value is original now
595 | v, _ := c.Get("mutableSlice", func() (interface{}, error) {
596 | return nil, nil
597 | })
598 | fmt.Printf("got %v slice from cache\n", v)
599 |
600 | mutableSlice[0] = "another_key_1"
601 | mutableSlice[1] = "another_key_2"
602 |
603 | // get from cache, func won't run because mutableSlice is cached
604 | // value is changed inside the cache now because mutableSlice stored as-is, in mutable state
605 | v, _ = c.Get("mutableSlice", func() (interface{}, error) {
606 | return nil, nil
607 | })
608 | fmt.Printf("got %v slice from cache after it's change outside of cache\n", v)
609 |
610 | // Output:
611 | // got [key1 key2] slice from cache
612 | // got [another_key_1 another_key_2] slice from cache after it's change outside of cache
613 | }
614 |
615 | type counts interface {
616 | size() int64 // cache size in bytes
617 | keys() int // number of keys in cache
618 | }
619 |
620 | type countedCache interface {
621 | LoadingCache
622 | counts
623 | }
624 |
625 | func cachesTestList(t *testing.T, opts ...Option) (c []countedCache, teardown func()) {
626 | var caches []countedCache
627 | ec, err := NewExpirableCache(opts...)
628 | require.NoError(t, err, "can't make exp cache")
629 | caches = append(caches, ec)
630 | lc, err := NewLruCache(opts...)
631 | require.NoError(t, err, "can't make lru cache")
632 | caches = append(caches, lc)
633 |
634 | server := newTestRedisServer()
635 | client := redis.NewClient(&redis.Options{
636 | Addr: server.Addr()})
637 | rc, err := NewRedisCache(client, opts...)
638 | require.NoError(t, err, "can't make redis cache")
639 | caches = append(caches, rc)
640 |
641 | return caches, func() {
642 | _ = client.Close()
643 | _ = ec.Close()
644 | _ = lc.Close()
645 | _ = rc.Close()
646 | server.Close()
647 | }
648 | }
649 |
650 | type sizedString string
651 |
652 | func (s sizedString) Size() int { return len(s) }
653 |
654 | func (s sizedString) MarshalBinary() (data []byte, err error) {
655 | return []byte(s), nil
656 | }
657 |
658 | type mockPubSub struct {
659 | calledKeys []string
660 | fns []func(fromID, key string)
661 | sync.Mutex
662 | sync.WaitGroup
663 | }
664 |
665 | func (m *mockPubSub) CalledKeys() []string {
666 | m.Lock()
667 | defer m.Unlock()
668 | return m.calledKeys
669 | }
670 |
671 | func (m *mockPubSub) Subscribe(fn func(fromID, key string)) error {
672 | m.Lock()
673 | defer m.Unlock()
674 | m.fns = append(m.fns, fn)
675 | return nil
676 | }
677 |
678 | func (m *mockPubSub) Publish(fromID, key string) error {
679 | m.Lock()
680 | defer m.Unlock()
681 | m.calledKeys = append(m.calledKeys, key)
682 | for _, fn := range m.fns {
683 | fn := fn
684 | m.Add(1)
685 | // run in goroutine to prevent deadlock
686 | go func() {
687 | fn(fromID, key)
688 | m.Done()
689 | }()
690 | }
691 | return nil
692 | }
693 |
--------------------------------------------------------------------------------
/eventbus/pubsub.go:
--------------------------------------------------------------------------------
1 | // Package eventbus provides PubSub interface used for distributed cache invalidation,
2 | // as well as NopPubSub and RedisPubSub implementations.
3 | package eventbus
4 |
5 | // PubSub interface is used for distributed cache invalidation.
6 | // Publish is called on each entry invalidation,
7 | // Subscribe is used for subscription for these events.
8 | type PubSub interface {
9 | Publish(fromID, key string) error
10 | Subscribe(fn func(fromID, key string)) error
11 | }
12 |
13 | // NopPubSub implements default do-nothing pub-sub (event bus)
14 | type NopPubSub struct{}
15 |
16 | // Subscribe does nothing for NopPubSub
17 | func (n *NopPubSub) Subscribe(func(fromID string, key string)) error {
18 | return nil
19 | }
20 |
21 | // Publish does nothing for NopPubSub
22 | func (n *NopPubSub) Publish(string, string) error {
23 | return nil
24 | }
25 |
--------------------------------------------------------------------------------
/eventbus/pubsub_test.go:
--------------------------------------------------------------------------------
1 | package eventbus
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | )
8 |
9 | func TestNopPubSub(t *testing.T) {
10 | nopPubSub := NopPubSub{}
11 | assert.NoError(t, nopPubSub.Subscribe(nil))
12 | assert.NoError(t, nopPubSub.Publish("", ""))
13 | }
14 |
--------------------------------------------------------------------------------
/eventbus/redis.go:
--------------------------------------------------------------------------------
1 | package eventbus
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "strings"
7 | "time"
8 |
9 | "github.com/redis/go-redis/v9"
10 |
11 | "github.com/hashicorp/go-multierror"
12 | )
13 |
14 | // NewRedisPubSub creates new RedisPubSub with given parameters.
15 | // Returns an error in case of problems with creating PubSub client for specified channel.
16 | func NewRedisPubSub(addr, channel string) (*RedisPubSub, error) {
17 | client := redis.NewClient(&redis.Options{Addr: addr})
18 | pubSub := client.Subscribe(context.Background(), channel)
19 | // wait for subscription to be created and ignore the message
20 | if _, err := pubSub.Receive(context.Background()); err != nil {
21 | _ = client.Close()
22 | return nil, fmt.Errorf("problem subscribing to channel %s on address %s: %w", channel, addr, err)
23 | }
24 | return &RedisPubSub{client: client, pubSub: pubSub, channel: channel, done: make(chan struct{})}, nil
25 | }
26 |
27 | // RedisPubSub provides Redis implementation for PubSub interface
28 | type RedisPubSub struct {
29 | client *redis.Client
30 | pubSub *redis.PubSub
31 | channel string
32 |
33 | done chan struct{}
34 | }
35 |
36 | // Subscribe calls provided function on subscription channel provided on new RedisPubSub instance creation.
37 | // Should not be called more than once. Spawns a goroutine and does not return an error.
38 | func (m *RedisPubSub) Subscribe(fn func(fromID, key string)) error {
39 | go func(done <-chan struct{}, pubsub *redis.PubSub) {
40 | for {
41 | select {
42 | case <-done:
43 | return
44 | default:
45 | }
46 | msg, err := pubsub.ReceiveTimeout(context.Background(), time.Second*10)
47 | if err != nil {
48 | continue
49 | }
50 |
51 | // Process the message
52 | if msg, ok := msg.(*redis.Message); ok {
53 | payload := strings.Split(msg.Payload, "$")
54 | fn(payload[0], strings.Join(payload[1:], "$"))
55 | }
56 | }
57 | }(m.done, m.pubSub)
58 |
59 | return nil
60 | }
61 |
62 | // Publish publishes provided message to channel provided on new RedisPubSub instance creation
63 | func (m *RedisPubSub) Publish(fromID, key string) error {
64 | return m.client.Publish(context.Background(), m.channel, fromID+"$"+key).Err()
65 | }
66 |
67 | // Close cleans up running goroutines and closes Redis clients
68 | func (m *RedisPubSub) Close() error {
69 | close(m.done)
70 | errs := new(multierror.Error)
71 | if err := m.pubSub.Close(); err != nil {
72 | errs = multierror.Append(errs, fmt.Errorf("problem closing pubSub client: %w", err))
73 | }
74 | if err := m.client.Close(); err != nil {
75 | errs = multierror.Append(errs, fmt.Errorf("problem closing redis client: %w", err))
76 | }
77 | return errs.ErrorOrNil()
78 | }
79 |
--------------------------------------------------------------------------------
/eventbus/redis_test.go:
--------------------------------------------------------------------------------
1 | package eventbus
2 |
3 | import (
4 | "math/rand"
5 | "os"
6 | "strconv"
7 | "testing"
8 | "time"
9 |
10 | "github.com/stretchr/testify/assert"
11 | "github.com/stretchr/testify/require"
12 | )
13 |
14 | func TestNewRedisPubSub_Error(t *testing.T) {
15 | redisPubSub, err := NewRedisPubSub("127.0.0.1:99999", "test")
16 | require.Error(t, err)
17 | require.Nil(t, redisPubSub)
18 | }
19 |
20 | func TestRedisPubSub(t *testing.T) {
21 | if _, ok := os.LookupEnv("ENABLE_REDIS_TESTS"); !ok {
22 | t.Skip("ENABLE_REDIS_TESTS env variable is not set, not expecting Redis to be ready at 127.0.0.1:6379")
23 | }
24 |
25 | //nolint:gosec // not used for security purpose
26 | channel := "lcw-test-" + strconv.Itoa(rand.Intn(1000000))
27 | redisPubSub, err := NewRedisPubSub("127.0.0.1:6379", channel)
28 | require.NoError(t, err)
29 | require.NotNil(t, redisPubSub)
30 | var called []string
31 | assert.Nil(t, redisPubSub.Subscribe(func(fromID, key string) {
32 | called = append(called, fromID, key)
33 | }))
34 | assert.NoError(t, redisPubSub.Publish("test_fromID", "$test$key$"))
35 | // Sleep which waits for Subscribe goroutine to pick up published changes
36 | time.Sleep(time.Second)
37 | assert.NoError(t, redisPubSub.Close())
38 | assert.Equal(t, []string{"test_fromID", "$test$key$"}, called)
39 | }
40 |
--------------------------------------------------------------------------------
/expirable_cache.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "sync/atomic"
6 | "time"
7 |
8 | "github.com/go-pkgz/lcw/eventbus"
9 | "github.com/go-pkgz/lcw/internal/cache"
10 | "github.com/google/uuid"
11 | )
12 |
13 | // ExpirableCache implements LoadingCache with TTL.
14 | type ExpirableCache struct {
15 | options
16 | CacheStat
17 | currentSize int64
18 | id string
19 | backend *cache.LoadingCache
20 | }
21 |
22 | // NewExpirableCache makes expirable LoadingCache implementation, 1000 max keys by default and 5m TTL
23 | func NewExpirableCache(opts ...Option) (*ExpirableCache, error) {
24 | res := ExpirableCache{
25 | options: options{
26 | maxKeys: 1000,
27 | maxValueSize: 0,
28 | ttl: 5 * time.Minute,
29 | eventBus: &eventbus.NopPubSub{},
30 | },
31 | id: uuid.New().String(),
32 | }
33 |
34 | for _, opt := range opts {
35 | if err := opt(&res.options); err != nil {
36 | return nil, fmt.Errorf("failed to set cache option: %w", err)
37 | }
38 | }
39 |
40 | if err := res.eventBus.Subscribe(res.onBusEvent); err != nil {
41 | return nil, fmt.Errorf("can't subscribe to event bus: %w", err)
42 | }
43 |
44 | backend, err := cache.NewLoadingCache(
45 | cache.MaxKeys(res.maxKeys),
46 | cache.TTL(res.ttl),
47 | cache.PurgeEvery(res.ttl/2),
48 | cache.OnEvicted(func(key string, value interface{}) {
49 | if res.onEvicted != nil {
50 | res.onEvicted(key, value)
51 | }
52 | if s, ok := value.(Sizer); ok {
53 | size := s.Size()
54 | atomic.AddInt64(&res.currentSize, -1*int64(size))
55 | }
56 | // ignore the error on Publish as we don't have log inside the module and
57 | // there is no other way to handle it: we publish the cache invalidation
58 | // and hope for the best
59 | _ = res.eventBus.Publish(res.id, key)
60 | }),
61 | )
62 | if err != nil {
63 | return nil, fmt.Errorf("error creating backend: %w", err)
64 | }
65 | res.backend = backend
66 |
67 | return &res, nil
68 | }
69 |
70 | // Get gets value by key or load with fn if not found in cache
71 | func (c *ExpirableCache) Get(key string, fn func() (interface{}, error)) (data interface{}, err error) {
72 | if v, ok := c.backend.Get(key); ok {
73 | atomic.AddInt64(&c.Hits, 1)
74 | return v, nil
75 | }
76 |
77 | if data, err = fn(); err != nil {
78 | atomic.AddInt64(&c.Errors, 1)
79 | return data, err
80 | }
81 | atomic.AddInt64(&c.Misses, 1)
82 |
83 | if !c.allowed(key, data) {
84 | return data, nil
85 | }
86 |
87 | if s, ok := data.(Sizer); ok {
88 | if c.maxCacheSize > 0 && atomic.LoadInt64(&c.currentSize)+int64(s.Size()) >= c.maxCacheSize {
89 | c.backend.DeleteExpired()
90 | return data, nil
91 | }
92 | atomic.AddInt64(&c.currentSize, int64(s.Size()))
93 | }
94 |
95 | c.backend.Set(key, data)
96 |
97 | return data, nil
98 | }
99 |
100 | // Invalidate removes keys with passed predicate fn, i.e. fn(key) should be true to get evicted
101 | func (c *ExpirableCache) Invalidate(fn func(key string) bool) {
102 | c.backend.InvalidateFn(fn)
103 | }
104 |
105 | // Peek returns the key value (or undefined if not found) without updating the "recently used"-ness of the key.
106 | func (c *ExpirableCache) Peek(key string) (interface{}, bool) {
107 | return c.backend.Peek(key)
108 | }
109 |
110 | // Purge clears the cache completely.
111 | func (c *ExpirableCache) Purge() {
112 | c.backend.Purge()
113 | atomic.StoreInt64(&c.currentSize, 0)
114 | }
115 |
116 | // Delete cache item by key
117 | func (c *ExpirableCache) Delete(key string) {
118 | c.backend.Invalidate(key)
119 | }
120 |
121 | // Keys returns cache keys
122 | func (c *ExpirableCache) Keys() (res []string) {
123 | return c.backend.Keys()
124 | }
125 |
126 | // Stat returns cache statistics
127 | func (c *ExpirableCache) Stat() CacheStat {
128 | return CacheStat{
129 | Hits: c.Hits,
130 | Misses: c.Misses,
131 | Size: c.size(),
132 | Keys: c.keys(),
133 | Errors: c.Errors,
134 | }
135 | }
136 |
137 | // Close kills cleanup goroutine
138 | func (c *ExpirableCache) Close() error {
139 | c.backend.Close()
140 | return nil
141 | }
142 |
143 | // onBusEvent reacts on invalidation message triggered by event bus from another cache instance
144 | func (c *ExpirableCache) onBusEvent(id, key string) {
145 | if id != c.id {
146 | c.backend.Invalidate(key)
147 | }
148 | }
149 |
150 | func (c *ExpirableCache) size() int64 {
151 | return atomic.LoadInt64(&c.currentSize)
152 | }
153 |
154 | func (c *ExpirableCache) keys() int {
155 | return c.backend.ItemCount()
156 | }
157 |
158 | func (c *ExpirableCache) allowed(key string, data interface{}) bool {
159 | if c.backend.ItemCount() >= c.maxKeys {
160 | return false
161 | }
162 | if c.maxKeySize > 0 && len(key) > c.maxKeySize {
163 | return false
164 | }
165 | if s, ok := data.(Sizer); ok {
166 | if c.maxValueSize > 0 && s.Size() >= c.maxValueSize {
167 | return false
168 | }
169 | }
170 | return true
171 | }
172 |
--------------------------------------------------------------------------------
/expirable_cache_test.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "sort"
6 | "sync/atomic"
7 | "testing"
8 | "time"
9 |
10 | "github.com/stretchr/testify/assert"
11 | "github.com/stretchr/testify/require"
12 | )
13 |
14 | func TestExpirableCache(t *testing.T) {
15 | lc, err := NewExpirableCache(MaxKeys(5), TTL(time.Millisecond*100))
16 | require.NoError(t, err)
17 | for i := 0; i < 5; i++ {
18 | i := i
19 | _, e := lc.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
20 | return fmt.Sprintf("result-%d", i), nil
21 | })
22 | assert.NoError(t, e)
23 | time.Sleep(10 * time.Millisecond)
24 | }
25 |
26 | assert.Equal(t, 5, lc.Stat().Keys)
27 | assert.Equal(t, int64(5), lc.Stat().Misses)
28 |
29 | keys := lc.Keys()
30 | sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
31 | assert.EqualValues(t, []string{"key-0", "key-1", "key-2", "key-3", "key-4"}, keys)
32 |
33 | _, e := lc.Get("key-xx", func() (interface{}, error) {
34 | return "result-xx", nil
35 | })
36 | assert.NoError(t, e)
37 | assert.Equal(t, 5, lc.Stat().Keys)
38 | assert.Equal(t, int64(6), lc.Stat().Misses)
39 |
40 | // let key-0 expire, GitHub Actions friendly way
41 | for lc.Stat().Keys > 4 {
42 | lc.backend.DeleteExpired() // enforce DeleteExpired for GitHub earlier than TTL/2
43 | time.Sleep(time.Millisecond * 10)
44 | }
45 | assert.Equal(t, 4, lc.Stat().Keys)
46 |
47 | time.Sleep(210 * time.Millisecond)
48 | assert.Equal(t, 0, lc.keys())
49 | assert.Equal(t, []string{}, lc.Keys())
50 |
51 | assert.NoError(t, lc.Close())
52 | }
53 |
54 | func TestExpirableCache_MaxKeys(t *testing.T) {
55 | var coldCalls int32
56 | lc, err := NewExpirableCache(MaxKeys(5), MaxValSize(10))
57 | require.NoError(t, err)
58 |
59 | // put 5 keys to cache
60 | for i := 0; i < 5; i++ {
61 | i := i
62 | res, e := lc.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
63 | atomic.AddInt32(&coldCalls, 1)
64 | return fmt.Sprintf("result-%d", i), nil
65 | })
66 | assert.NoError(t, e)
67 | assert.Equal(t, fmt.Sprintf("result-%d", i), res.(string))
68 | assert.Equal(t, int32(i+1), atomic.LoadInt32(&coldCalls))
69 | }
70 |
71 | // check if really cached
72 | res, err := lc.Get("key-3", func() (interface{}, error) {
73 | return "result-blah", nil
74 | })
75 | assert.NoError(t, err)
76 | assert.Equal(t, "result-3", res.(string), "should be cached")
77 |
78 | // try to cache after maxKeys reached
79 | res, err = lc.Get("key-X", func() (interface{}, error) {
80 | return "result-X", nil
81 | })
82 | assert.NoError(t, err)
83 | assert.Equal(t, "result-X", res.(string))
84 | assert.Equal(t, 5, lc.keys())
85 |
86 | // put to cache and make sure it cached
87 | res, err = lc.Get("key-Z", func() (interface{}, error) {
88 | return "result-Z", nil
89 | })
90 | assert.NoError(t, err)
91 | assert.Equal(t, "result-Z", res.(string))
92 |
93 | res, err = lc.Get("key-Z", func() (interface{}, error) {
94 | return "result-Zzzz", nil
95 | })
96 | assert.NoError(t, err)
97 | assert.Equal(t, "result-Zzzz", res.(string), "got non-cached value")
98 | assert.Equal(t, 5, lc.keys())
99 |
100 | assert.NoError(t, lc.Close())
101 | }
102 |
103 | func TestExpirableCache_BadOptions(t *testing.T) {
104 | _, err := NewExpirableCache(MaxCacheSize(-1))
105 | assert.EqualError(t, err, "failed to set cache option: negative max cache size")
106 |
107 | _, err = NewExpirableCache(MaxKeySize(-1))
108 | assert.EqualError(t, err, "failed to set cache option: negative max key size")
109 |
110 | _, err = NewExpirableCache(MaxKeys(-1))
111 | assert.EqualError(t, err, "failed to set cache option: negative max keys")
112 |
113 | _, err = NewExpirableCache(MaxValSize(-1))
114 | assert.EqualError(t, err, "failed to set cache option: negative max value size")
115 |
116 | _, err = NewExpirableCache(TTL(-1))
117 | assert.EqualError(t, err, "failed to set cache option: negative ttl")
118 | }
119 |
120 | func TestExpirableCacheWithBus(t *testing.T) {
121 | ps := &mockPubSub{}
122 | lc1, err := NewExpirableCache(MaxKeys(5), TTL(time.Millisecond*100), EventBus(ps))
123 | require.NoError(t, err)
124 | defer lc1.Close()
125 |
126 | lc2, err := NewExpirableCache(MaxKeys(50), TTL(time.Millisecond*5000), EventBus(ps))
127 | require.NoError(t, err)
128 | defer lc2.Close()
129 |
130 | // add 5 keys to the first node cache
131 | for i := 0; i < 5; i++ {
132 | i := i
133 | _, e := lc1.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
134 | return fmt.Sprintf("result-%d", i), nil
135 | })
136 | assert.NoError(t, e)
137 | time.Sleep(10 * time.Millisecond)
138 | }
139 |
140 | assert.Equal(t, 0, len(ps.CalledKeys()), "no events")
141 | assert.Equal(t, 5, lc1.Stat().Keys)
142 | assert.Equal(t, int64(5), lc1.Stat().Misses)
143 |
144 | // add key-1 key to the second node
145 | _, e := lc2.Get("key-1", func() (interface{}, error) {
146 | return "result-111", nil
147 | })
148 | assert.NoError(t, e)
149 | assert.Equal(t, 1, lc2.Stat().Keys)
150 | assert.Equal(t, int64(1), lc2.Stat().Misses, lc2.Stat())
151 |
152 | // let key-0 expire, GitHub Actions friendly way
153 | for lc1.Stat().Keys > 4 {
154 | lc1.backend.DeleteExpired() // enforce DeleteExpired for GitHub earlier than TTL/2
155 | ps.Wait() // wait for onBusEvent goroutines to finish
156 | time.Sleep(time.Millisecond * 10)
157 | }
158 | assert.Equal(t, 4, lc1.Stat().Keys)
159 | assert.Equal(t, 1, lc2.Stat().Keys, "key-1 still in cache2")
160 | assert.Equal(t, 1, len(ps.CalledKeys()))
161 |
162 | time.Sleep(210 * time.Millisecond) // let all keys expire
163 | ps.Wait() // wait for onBusEvent goroutines to finish
164 | assert.Equal(t, 6, len(ps.CalledKeys()), "6 events, key-1 expired %+v", ps.calledKeys)
165 | assert.Equal(t, 0, lc1.Stat().Keys)
166 | assert.Equal(t, 0, lc2.Stat().Keys, "key-1 removed from cache2")
167 | }
168 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/go-pkgz/lcw
2 |
3 | go 1.21
4 |
5 | toolchain go1.21.6
6 |
7 | require (
8 | github.com/alicebob/miniredis/v2 v2.31.1
9 | github.com/google/uuid v1.5.0
10 | github.com/hashicorp/go-multierror v1.1.1
11 | github.com/hashicorp/golang-lru v1.0.2
12 | github.com/redis/go-redis/v9 v9.4.0
13 | github.com/stretchr/testify v1.8.4
14 | )
15 |
16 | require (
17 | github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
18 | github.com/cespare/xxhash/v2 v2.2.0 // indirect
19 | github.com/davecgh/go-spew v1.1.1 // indirect
20 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
21 | github.com/hashicorp/errwrap v1.0.0 // indirect
22 | github.com/pmezard/go-difflib v1.0.0 // indirect
23 | github.com/yuin/gopher-lua v1.1.0 // indirect
24 | gopkg.in/yaml.v3 v3.0.1 // indirect
25 | )
26 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0=
2 | github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
3 | github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
4 | github.com/alicebob/miniredis/v2 v2.31.1 h1:7XAt0uUg3DtwEKW5ZAGa+K7FZV2DdKQo5K/6TTnfX8Y=
5 | github.com/alicebob/miniredis/v2 v2.31.1/go.mod h1:UB/T2Uztp7MlFSDakaX1sTXUv5CASoprx0wulRT6HBg=
6 | github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
7 | github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
8 | github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
9 | github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
10 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
11 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
12 | github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
13 | github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
14 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
15 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
16 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
17 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
18 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
19 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
20 | github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
21 | github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
22 | github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
23 | github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
24 | github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
25 | github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
26 | github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
27 | github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
28 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
29 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
30 | github.com/redis/go-redis/v9 v9.4.0 h1:Yzoz33UZw9I/mFhx4MNrB6Fk+XHO1VukNcCa1+lwyKk=
31 | github.com/redis/go-redis/v9 v9.4.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
32 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
33 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
34 | github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE=
35 | github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
36 | golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
37 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
38 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
39 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
40 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
41 |
--------------------------------------------------------------------------------
/internal/cache/cache.go:
--------------------------------------------------------------------------------
1 | // Package cache implements LoadingCache.
2 | //
3 | // Support LRC TTL-based eviction.
4 | package cache
5 |
6 | import (
7 | "fmt"
8 | "sort"
9 | "sync"
10 | "time"
11 | )
12 |
13 | // LoadingCache provides expirable loading cache with LRC eviction.
14 | type LoadingCache struct {
15 | purgeEvery time.Duration
16 | ttl time.Duration
17 | maxKeys int64
18 | done chan struct{}
19 | onEvicted func(key string, value interface{})
20 |
21 | mu sync.Mutex
22 | data map[string]*cacheItem
23 | }
24 |
25 | // noEvictionTTL - very long ttl to prevent eviction
26 | const noEvictionTTL = time.Hour * 24 * 365 * 10
27 |
28 | // NewLoadingCache returns a new expirable LRC cache, activates purge with purgeEvery (0 to never purge).
29 | // Default MaxKeys is unlimited (0).
30 | func NewLoadingCache(options ...Option) (*LoadingCache, error) {
31 | res := LoadingCache{
32 | data: map[string]*cacheItem{},
33 | ttl: noEvictionTTL,
34 | purgeEvery: 0,
35 | maxKeys: 0,
36 | done: make(chan struct{}),
37 | }
38 |
39 | for _, opt := range options {
40 | if err := opt(&res); err != nil {
41 | return nil, fmt.Errorf("failed to set cache option: %w", err)
42 | }
43 | }
44 |
45 | if res.maxKeys > 0 || res.purgeEvery > 0 {
46 | if res.purgeEvery == 0 {
47 | res.purgeEvery = time.Minute * 5 // non-zero purge enforced because maxKeys defined
48 | }
49 | go func(done <-chan struct{}) {
50 | ticker := time.NewTicker(res.purgeEvery)
51 | for {
52 | select {
53 | case <-done:
54 | return
55 | case <-ticker.C:
56 | res.mu.Lock()
57 | res.purge(res.maxKeys)
58 | res.mu.Unlock()
59 | }
60 | }
61 | }(res.done)
62 | }
63 | return &res, nil
64 | }
65 |
66 | // Set key
67 | func (c *LoadingCache) Set(key string, value interface{}) {
68 | c.mu.Lock()
69 | defer c.mu.Unlock()
70 |
71 | now := time.Now()
72 | if _, ok := c.data[key]; !ok {
73 | c.data[key] = &cacheItem{}
74 | }
75 | c.data[key].data = value
76 | c.data[key].expiresAt = now.Add(c.ttl)
77 |
78 | // Enforced purge call in addition the one from the ticker
79 | // to limit the worst-case scenario with a lot of sets in the
80 | // short period of time (between two timed purge calls)
81 | if c.maxKeys > 0 && int64(len(c.data)) >= c.maxKeys*2 {
82 | c.purge(c.maxKeys)
83 | }
84 | }
85 |
86 | // Get returns the key value
87 | func (c *LoadingCache) Get(key string) (interface{}, bool) {
88 | c.mu.Lock()
89 | defer c.mu.Unlock()
90 | value, ok := c.getValue(key)
91 | if !ok {
92 | return nil, false
93 | }
94 | return value, ok
95 | }
96 |
97 | // Peek returns the key value (or undefined if not found) without updating the "recently used"-ness of the key.
98 | func (c *LoadingCache) Peek(key string) (interface{}, bool) {
99 | c.mu.Lock()
100 | defer c.mu.Unlock()
101 | value, ok := c.getValue(key)
102 | if !ok {
103 | return nil, false
104 | }
105 | return value, ok
106 | }
107 |
108 | // Invalidate key (item) from the cache
109 | func (c *LoadingCache) Invalidate(key string) {
110 | c.mu.Lock()
111 | if value, ok := c.data[key]; ok {
112 | delete(c.data, key)
113 | if c.onEvicted != nil {
114 | c.onEvicted(key, value.data)
115 | }
116 | }
117 | c.mu.Unlock()
118 | }
119 |
120 | // InvalidateFn deletes multiple keys if predicate is true
121 | func (c *LoadingCache) InvalidateFn(fn func(key string) bool) {
122 | c.mu.Lock()
123 | for key, value := range c.data {
124 | if fn(key) {
125 | delete(c.data, key)
126 | if c.onEvicted != nil {
127 | c.onEvicted(key, value.data)
128 | }
129 | }
130 | }
131 | c.mu.Unlock()
132 | }
133 |
134 | // Keys return slice of current keys in the cache
135 | func (c *LoadingCache) Keys() []string {
136 | c.mu.Lock()
137 | defer c.mu.Unlock()
138 | keys := make([]string, 0, len(c.data))
139 | for k := range c.data {
140 | keys = append(keys, k)
141 | }
142 | return keys
143 | }
144 |
145 | // get value respecting the expiration, should be called with lock
146 | func (c *LoadingCache) getValue(key string) (interface{}, bool) {
147 | value, ok := c.data[key]
148 | if !ok {
149 | return nil, false
150 | }
151 | if time.Now().After(c.data[key].expiresAt) {
152 | return nil, false
153 | }
154 | return value.data, ok
155 | }
156 |
157 | // Purge clears the cache completely.
158 | func (c *LoadingCache) Purge() {
159 | c.mu.Lock()
160 | defer c.mu.Unlock()
161 |
162 | // to release the memory, as otherwise old map would store same amount of entries to prevent reallocations
163 | oldData := c.data
164 | c.data = make(map[string]*cacheItem)
165 |
166 | for k, v := range oldData {
167 | if c.onEvicted != nil {
168 | c.onEvicted(k, v.data)
169 | }
170 | }
171 | }
172 |
173 | // DeleteExpired clears cache of expired items
174 | func (c *LoadingCache) DeleteExpired() {
175 | c.mu.Lock()
176 | defer c.mu.Unlock()
177 | c.purge(0)
178 | }
179 |
180 | // ItemCount return count of items in cache
181 | func (c *LoadingCache) ItemCount() int {
182 | c.mu.Lock()
183 | n := len(c.data)
184 | c.mu.Unlock()
185 | return n
186 | }
187 |
188 | // Close cleans the cache and destroys running goroutines
189 | func (c *LoadingCache) Close() {
190 | c.mu.Lock()
191 | defer c.mu.Unlock()
192 | // don't panic in case service is already closed
193 | select {
194 | case <-c.done:
195 | return
196 | default:
197 | }
198 | close(c.done)
199 | }
200 |
201 | // keysWithTS includes list of keys with ts. This is for sorting keys
202 | // in order to provide least recently added sorting for size-based eviction
203 | type keysWithTS []struct {
204 | key string
205 | ts time.Time
206 | }
207 |
208 | // purge records > maxKeys. Has to be called with lock!
209 | // call with maxKeys 0 will only clear expired entries.
210 | func (c *LoadingCache) purge(maxKeys int64) {
211 | kts := keysWithTS{}
212 |
213 | for key, value := range c.data {
214 | // ttl eviction
215 | if time.Now().After(value.expiresAt) {
216 | delete(c.data, key)
217 | if c.onEvicted != nil {
218 | c.onEvicted(key, value.data)
219 | }
220 | }
221 |
222 | // prepare list of keysWithTS for size eviction
223 | if maxKeys > 0 && int64(len(c.data)) > maxKeys {
224 | kts = append(kts, struct {
225 | key string
226 | ts time.Time
227 | }{key, value.expiresAt})
228 | }
229 | }
230 |
231 | // size eviction
232 | size := int64(len(c.data))
233 | if len(kts) > 0 {
234 | sort.Slice(kts, func(i int, j int) bool { return kts[i].ts.Before(kts[j].ts) })
235 | for d := 0; int64(d) < size-maxKeys; d++ {
236 | key := kts[d].key
237 | value := c.data[key].data
238 | delete(c.data, key)
239 | if c.onEvicted != nil {
240 | c.onEvicted(key, value)
241 | }
242 | }
243 | }
244 | }
245 |
246 | type cacheItem struct {
247 | expiresAt time.Time
248 | data interface{}
249 | }
250 |
--------------------------------------------------------------------------------
/internal/cache/cache_test.go:
--------------------------------------------------------------------------------
1 | package cache
2 |
3 | import (
4 | "fmt"
5 | "runtime"
6 | "sync"
7 | "testing"
8 | "time"
9 |
10 | "github.com/stretchr/testify/assert"
11 | )
12 |
13 | func TestLoadingCacheNoPurge(t *testing.T) {
14 | lc, err := NewLoadingCache()
15 | assert.NoError(t, err)
16 | defer lc.Close()
17 |
18 | lc.Set("key1", "val1")
19 | assert.Equal(t, 1, lc.ItemCount())
20 |
21 | v, ok := lc.Peek("key1")
22 | assert.Equal(t, "val1", v)
23 | assert.True(t, ok)
24 |
25 | v, ok = lc.Peek("key2")
26 | assert.Empty(t, v)
27 | assert.False(t, ok)
28 |
29 | assert.Equal(t, []string{"key1"}, lc.Keys())
30 | }
31 |
32 | func TestLoadingCacheWithPurge(t *testing.T) {
33 | var evicted []string
34 | lc, err := NewLoadingCache(
35 | PurgeEvery(time.Millisecond*100),
36 | TTL(150*time.Millisecond),
37 | OnEvicted(func(key string, value interface{}) { evicted = append(evicted, key, value.(string)) }),
38 | )
39 | assert.NoError(t, err)
40 | defer lc.Close()
41 |
42 | lc.Set("key1", "val1")
43 |
44 | time.Sleep(100 * time.Millisecond) // not enough to expire
45 | assert.Equal(t, 1, lc.ItemCount())
46 |
47 | v, ok := lc.Get("key1")
48 | assert.Equal(t, "val1", v)
49 | assert.True(t, ok)
50 |
51 | time.Sleep(200 * time.Millisecond) // expire
52 | v, ok = lc.Get("key1")
53 | assert.False(t, ok)
54 | assert.Nil(t, v)
55 |
56 | assert.Equal(t, 0, lc.ItemCount())
57 | assert.Equal(t, []string{"key1", "val1"}, evicted)
58 |
59 | // add new entry
60 | lc.Set("key2", "val2")
61 | assert.Equal(t, 1, lc.ItemCount())
62 |
63 | time.Sleep(200 * time.Millisecond) // expire key2
64 |
65 | // DeleteExpired, key2 deleted
66 | lc.DeleteExpired()
67 | assert.Equal(t, 0, lc.ItemCount())
68 | assert.Equal(t, []string{"key1", "val1", "key2", "val2"}, evicted)
69 |
70 | // add third entry
71 | lc.Set("key3", "val3")
72 | assert.Equal(t, 1, lc.ItemCount())
73 |
74 | // Purge, cache should be clean
75 | lc.Purge()
76 | assert.Equal(t, 0, lc.ItemCount())
77 | assert.Equal(t, []string{"key1", "val1", "key2", "val2", "key3", "val3"}, evicted)
78 | }
79 |
80 | func TestLoadingCacheWithPurgeEnforcedBySize(t *testing.T) {
81 | lc, err := NewLoadingCache(MaxKeys(10))
82 | assert.NoError(t, err)
83 | defer lc.Close()
84 |
85 | for i := 0; i < 100; i++ {
86 | i := i
87 | lc.Set(fmt.Sprintf("key%d", i), fmt.Sprintf("val%d", i))
88 | v, ok := lc.Get(fmt.Sprintf("key%d", i))
89 | assert.Equal(t, fmt.Sprintf("val%d", i), v)
90 | assert.True(t, ok)
91 | assert.True(t, lc.ItemCount() < 20)
92 | }
93 |
94 | assert.Equal(t, 10, lc.ItemCount())
95 | }
96 |
97 | func TestLoadingCacheWithPurgeMax(t *testing.T) {
98 | lc, err := NewLoadingCache(PurgeEvery(time.Millisecond*50), MaxKeys(2))
99 | assert.NoError(t, err)
100 | defer lc.Close()
101 |
102 | lc.Set("key1", "val1")
103 | lc.Set("key2", "val2")
104 | lc.Set("key3", "val3")
105 | assert.Equal(t, 3, lc.ItemCount())
106 |
107 | time.Sleep(100 * time.Millisecond)
108 | assert.Equal(t, 2, lc.ItemCount())
109 |
110 | _, found := lc.Get("key1")
111 | assert.False(t, found, "key1 should be deleted")
112 | }
113 |
114 | func TestLoadingCacheConcurrency(t *testing.T) {
115 | lc, err := NewLoadingCache()
116 | assert.NoError(t, err)
117 | defer lc.Close()
118 | wg := sync.WaitGroup{}
119 | wg.Add(1000)
120 | for i := 0; i < 1000; i++ {
121 | go func(i int) {
122 | lc.Set(fmt.Sprintf("key-%d", i/10), fmt.Sprintf("val-%d", i/10))
123 | wg.Done()
124 | }(i)
125 | }
126 | wg.Wait()
127 | assert.Equal(t, 100, lc.ItemCount())
128 | }
129 |
130 | func TestLoadingCacheInvalidateAndEvict(t *testing.T) {
131 | var evicted int
132 | lc, err := NewLoadingCache(OnEvicted(func(_ string, _ interface{}) { evicted++ }))
133 | assert.NoError(t, err)
134 | defer lc.Close()
135 |
136 | lc.Set("key1", "val1")
137 | lc.Set("key2", "val2")
138 |
139 | val, ok := lc.Get("key1")
140 | assert.True(t, ok)
141 | assert.Equal(t, "val1", val)
142 | assert.Equal(t, 0, evicted)
143 |
144 | lc.Invalidate("key1")
145 | assert.Equal(t, 1, evicted)
146 | val, ok = lc.Get("key1")
147 | assert.Empty(t, val)
148 | assert.False(t, ok)
149 |
150 | val, ok = lc.Get("key2")
151 | assert.True(t, ok)
152 | assert.Equal(t, "val2", val)
153 |
154 | lc.InvalidateFn(func(key string) bool {
155 | return key == "key2"
156 | })
157 | assert.Equal(t, 2, evicted)
158 | _, ok = lc.Get("key2")
159 | assert.False(t, ok)
160 | assert.Equal(t, 0, lc.ItemCount())
161 | }
162 |
163 | func TestLoadingCacheBadOption(t *testing.T) {
164 | lc, err := NewLoadingCache(func(_ *LoadingCache) error {
165 | return fmt.Errorf("mock err")
166 | })
167 | assert.EqualError(t, err, "failed to set cache option: mock err")
168 | assert.Nil(t, lc)
169 | }
170 |
171 | func TestLoadingExpired(t *testing.T) {
172 | lc, err := NewLoadingCache(TTL(time.Millisecond * 5))
173 | assert.NoError(t, err)
174 | defer lc.Close()
175 |
176 | lc.Set("key1", "val1")
177 | assert.Equal(t, 1, lc.ItemCount())
178 |
179 | v, ok := lc.Peek("key1")
180 | assert.Equal(t, v, "val1")
181 | assert.True(t, ok)
182 |
183 | v, ok = lc.Get("key1")
184 | assert.Equal(t, v, "val1")
185 | assert.True(t, ok)
186 |
187 | time.Sleep(time.Millisecond * 10) // wait for entry to expire
188 | assert.Equal(t, 1, lc.ItemCount()) // but not purged
189 |
190 | v, ok = lc.Peek("key1")
191 | assert.Empty(t, v)
192 | assert.False(t, ok)
193 |
194 | v, ok = lc.Get("key1")
195 | assert.Empty(t, v)
196 | assert.False(t, ok)
197 | }
198 |
199 | func TestDoubleClose(t *testing.T) {
200 | lc, err := NewLoadingCache(TTL(time.Millisecond * 5))
201 | assert.NoError(t, err)
202 | lc.Close()
203 | lc.Close() // don't panic in case service is already closed
204 | }
205 |
206 | func TestBucketsLeak(t *testing.T) {
207 | const n = 1_000_000
208 |
209 | gcAndGetAllocKb := func() int {
210 | stats := runtime.MemStats{}
211 | runtime.GC()
212 | runtime.ReadMemStats(&stats)
213 | return int(stats.Alloc / 1024)
214 | }
215 |
216 | lc, err := NewLoadingCache()
217 | assert.NoError(t, err)
218 | allocKB := gcAndGetAllocKb()
219 | t.Logf("allocated before start: %dKB\n", allocKB)
220 | assert.Less(t, allocKB, 1024, "alloc should be less than 1024KB before we start")
221 |
222 | for i := 0; i < n; i++ {
223 | lc.Set(fmt.Sprintf("key-%d", i), fmt.Sprintf("val-%d", i))
224 | }
225 | allocKB = gcAndGetAllocKb()
226 | t.Logf("alloc after storing %d entries: %dKB\n", n, allocKB)
227 | assert.Greater(t, allocKB, 1024, "alloc should be more than 1024KB when we have a lot of entries")
228 |
229 | lc.Purge()
230 | allocKB = gcAndGetAllocKb()
231 | t.Logf("allocated after the Purge call: %dKB\n", allocKB)
232 | assert.Less(t, allocKB, 1024, "alloc should be less than 1024KB before after the Purge call")
233 |
234 | // Prevents optimization
235 | runtime.KeepAlive(lc)
236 | }
237 |
--------------------------------------------------------------------------------
/internal/cache/options.go:
--------------------------------------------------------------------------------
1 | package cache
2 |
3 | import "time"
4 |
5 | // Option func type
6 | type Option func(lc *LoadingCache) error
7 |
8 | // OnEvicted called automatically for expired and manually deleted entries
9 | func OnEvicted(fn func(key string, value interface{})) Option {
10 | return func(lc *LoadingCache) error {
11 | lc.onEvicted = fn
12 | return nil
13 | }
14 | }
15 |
16 | // PurgeEvery functional option defines purge interval
17 | // by default it is 0, i.e. never. If MaxKeys set to any non-zero this default will be 5minutes
18 | func PurgeEvery(interval time.Duration) Option {
19 | return func(lc *LoadingCache) error {
20 | lc.purgeEvery = interval
21 | return nil
22 | }
23 | }
24 |
25 | // MaxKeys functional option defines how many keys to keep.
26 | // By default it is 0, which means unlimited.
27 | // If any non-zero MaxKeys set, default PurgeEvery will be set to 5 minutes
28 | func MaxKeys(maximum int) Option {
29 | return func(lc *LoadingCache) error {
30 | lc.maxKeys = int64(maximum)
31 | return nil
32 | }
33 | }
34 |
35 | // TTL functional option defines TTL for all cache entries.
36 | // By default it is set to 10 years, sane option for expirable cache might be 5 minutes.
37 | func TTL(ttl time.Duration) Option {
38 | return func(lc *LoadingCache) error {
39 | lc.ttl = ttl
40 | return nil
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/lru_cache.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "sync/atomic"
6 |
7 | "github.com/go-pkgz/lcw/eventbus"
8 | "github.com/google/uuid"
9 | lru "github.com/hashicorp/golang-lru"
10 | )
11 |
12 | // LruCache wraps lru.LruCache with loading cache Get and size limits
13 | type LruCache struct {
14 | options
15 | CacheStat
16 | backend *lru.Cache
17 | currentSize int64
18 | id string // uuid identifying cache instance
19 | }
20 |
21 | // NewLruCache makes LRU LoadingCache implementation, 1000 max keys by default
22 | func NewLruCache(opts ...Option) (*LruCache, error) {
23 | res := LruCache{
24 | options: options{
25 | maxKeys: 1000,
26 | maxValueSize: 0,
27 | eventBus: &eventbus.NopPubSub{},
28 | },
29 | id: uuid.New().String(),
30 | }
31 | for _, opt := range opts {
32 | if err := opt(&res.options); err != nil {
33 | return nil, fmt.Errorf("failed to set cache option: %w", err)
34 | }
35 | }
36 |
37 | err := res.init()
38 | return &res, err
39 | }
40 |
41 | func (c *LruCache) init() error {
42 | if err := c.eventBus.Subscribe(c.onBusEvent); err != nil {
43 | return fmt.Errorf("can't subscribe to event bus: %w", err)
44 | }
45 |
46 | onEvicted := func(key interface{}, value interface{}) {
47 | if c.onEvicted != nil {
48 | c.onEvicted(key.(string), value)
49 | }
50 | if s, ok := value.(Sizer); ok {
51 | size := s.Size()
52 | atomic.AddInt64(&c.currentSize, -1*int64(size))
53 | }
54 | _ = c.eventBus.Publish(c.id, key.(string)) // signal invalidation to other nodes
55 | }
56 |
57 | var err error
58 | // OnEvicted called automatically for expired and manually deleted
59 | if c.backend, err = lru.NewWithEvict(c.maxKeys, onEvicted); err != nil {
60 | return fmt.Errorf("failed to make lru cache backend: %w", err)
61 | }
62 |
63 | return nil
64 | }
65 |
66 | // Get gets value by key or load with fn if not found in cache
67 | func (c *LruCache) Get(key string, fn func() (interface{}, error)) (data interface{}, err error) {
68 | if v, ok := c.backend.Get(key); ok {
69 | atomic.AddInt64(&c.Hits, 1)
70 | return v, nil
71 | }
72 |
73 | if data, err = fn(); err != nil {
74 | atomic.AddInt64(&c.Errors, 1)
75 | return data, err
76 | }
77 |
78 | atomic.AddInt64(&c.Misses, 1)
79 |
80 | if !c.allowed(key, data) {
81 | return data, nil
82 | }
83 |
84 | c.backend.Add(key, data)
85 |
86 | if s, ok := data.(Sizer); ok {
87 | atomic.AddInt64(&c.currentSize, int64(s.Size()))
88 | if c.maxCacheSize > 0 && atomic.LoadInt64(&c.currentSize) > c.maxCacheSize {
89 | for atomic.LoadInt64(&c.currentSize) > c.maxCacheSize {
90 | c.backend.RemoveOldest()
91 | }
92 | }
93 | }
94 |
95 | return data, nil
96 | }
97 |
98 | // Peek returns the key value (or undefined if not found) without updating the "recently used"-ness of the key.
99 | func (c *LruCache) Peek(key string) (interface{}, bool) {
100 | return c.backend.Peek(key)
101 | }
102 |
103 | // Purge clears the cache completely.
104 | func (c *LruCache) Purge() {
105 | c.backend.Purge()
106 | atomic.StoreInt64(&c.currentSize, 0)
107 | }
108 |
109 | // Invalidate removes keys with passed predicate fn, i.e. fn(key) should be true to get evicted
110 | func (c *LruCache) Invalidate(fn func(key string) bool) {
111 | for _, k := range c.backend.Keys() { // Keys() returns copy of cache's key, safe to remove directly
112 | if key, ok := k.(string); ok && fn(key) {
113 | c.backend.Remove(key)
114 | }
115 | }
116 | }
117 |
118 | // Delete cache item by key
119 | func (c *LruCache) Delete(key string) {
120 | c.backend.Remove(key)
121 | }
122 |
123 | // Keys returns cache keys
124 | func (c *LruCache) Keys() (res []string) {
125 | keys := c.backend.Keys()
126 | res = make([]string, 0, len(keys))
127 | for _, key := range keys {
128 | res = append(res, key.(string))
129 | }
130 | return res
131 | }
132 |
133 | // Stat returns cache statistics
134 | func (c *LruCache) Stat() CacheStat {
135 | return CacheStat{
136 | Hits: c.Hits,
137 | Misses: c.Misses,
138 | Size: c.size(),
139 | Keys: c.keys(),
140 | Errors: c.Errors,
141 | }
142 | }
143 |
144 | // Close does nothing for this type of cache
145 | func (c *LruCache) Close() error {
146 | return nil
147 | }
148 |
149 | // onBusEvent reacts on invalidation message triggered by event bus from another cache instance
150 | func (c *LruCache) onBusEvent(id, key string) {
151 | if id != c.id && c.backend.Contains(key) { // prevent reaction on event from this cache
152 | c.backend.Remove(key)
153 | }
154 | }
155 |
156 | func (c *LruCache) size() int64 {
157 | return atomic.LoadInt64(&c.currentSize)
158 | }
159 |
160 | func (c *LruCache) keys() int {
161 | return c.backend.Len()
162 | }
163 |
164 | func (c *LruCache) allowed(key string, data interface{}) bool {
165 | if c.maxKeySize > 0 && len(key) > c.maxKeySize {
166 | return false
167 | }
168 | if s, ok := data.(Sizer); ok {
169 | if c.maxValueSize > 0 && s.Size() >= c.maxValueSize {
170 | return false
171 | }
172 | }
173 | return true
174 | }
175 |
--------------------------------------------------------------------------------
/lru_cache_test.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "log"
7 | "math/rand"
8 | "net/http"
9 | "net/http/httptest"
10 | "os"
11 | "sort"
12 | "strconv"
13 | "sync/atomic"
14 | "testing"
15 | "time"
16 |
17 | "github.com/stretchr/testify/assert"
18 | "github.com/stretchr/testify/require"
19 |
20 | "github.com/go-pkgz/lcw/eventbus"
21 | )
22 |
23 | func TestLruCache_MaxKeys(t *testing.T) {
24 | var coldCalls int32
25 | lc, err := NewLruCache(MaxKeys(5), MaxValSize(10))
26 | require.NoError(t, err)
27 |
28 | // put 5 keys to cache
29 | for i := 0; i < 5; i++ {
30 | i := i
31 | res, e := lc.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
32 | atomic.AddInt32(&coldCalls, 1)
33 | return fmt.Sprintf("result-%d", i), nil
34 | })
35 | assert.NoError(t, e)
36 | assert.Equal(t, fmt.Sprintf("result-%d", i), res.(string))
37 | assert.Equal(t, int32(i+1), atomic.LoadInt32(&coldCalls))
38 | }
39 |
40 | keys := lc.Keys()
41 | sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
42 | assert.EqualValues(t, []string{"key-0", "key-1", "key-2", "key-3", "key-4"}, keys)
43 |
44 | // check if really cached
45 | res, err := lc.Get("key-3", func() (interface{}, error) {
46 | return "result-blah", nil
47 | })
48 | assert.NoError(t, err)
49 | assert.Equal(t, "result-3", res.(string), "should be cached")
50 |
51 | // try to cache after maxKeys reached
52 | res, err = lc.Get("key-X", func() (interface{}, error) {
53 | return "result-X", nil
54 | })
55 | assert.NoError(t, err)
56 | assert.Equal(t, "result-X", res.(string))
57 | assert.Equal(t, 5, lc.backend.Len())
58 |
59 | // put to cache and make sure it cached
60 | res, err = lc.Get("key-Z", func() (interface{}, error) {
61 | return "result-Z", nil
62 | })
63 | assert.NoError(t, err)
64 | assert.Equal(t, "result-Z", res.(string))
65 |
66 | res, err = lc.Get("key-Z", func() (interface{}, error) {
67 | return "result-Zzzz", nil
68 | })
69 | assert.NoError(t, err)
70 | assert.Equal(t, "result-Z", res.(string), "got cached value")
71 | assert.Equal(t, 5, lc.backend.Len())
72 | }
73 |
74 | func TestLruCache_BadOptions(t *testing.T) {
75 | _, err := NewLruCache(MaxCacheSize(-1))
76 | assert.EqualError(t, err, "failed to set cache option: negative max cache size")
77 |
78 | _, err = NewLruCache(MaxKeySize(-1))
79 | assert.EqualError(t, err, "failed to set cache option: negative max key size")
80 |
81 | _, err = NewLruCache(MaxKeys(-1))
82 | assert.EqualError(t, err, "failed to set cache option: negative max keys")
83 |
84 | _, err = NewLruCache(MaxValSize(-1))
85 | assert.EqualError(t, err, "failed to set cache option: negative max value size")
86 |
87 | _, err = NewLruCache(TTL(-1))
88 | assert.EqualError(t, err, "failed to set cache option: negative ttl")
89 | }
90 |
91 | func TestLruCache_MaxKeysWithBus(t *testing.T) {
92 | ps := &mockPubSub{}
93 |
94 | var coldCalls int32
95 | lc1, err := NewLruCache(MaxKeys(5), MaxValSize(10), EventBus(ps))
96 | require.NoError(t, err)
97 | defer lc1.Close()
98 |
99 | lc2, err := NewLruCache(MaxKeys(50), MaxValSize(100), EventBus(ps))
100 | require.NoError(t, err)
101 | defer lc2.Close()
102 |
103 | // put 5 keys to cache1
104 | for i := 0; i < 5; i++ {
105 | i := i
106 | res, e := lc1.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
107 | atomic.AddInt32(&coldCalls, 1)
108 | return fmt.Sprintf("result-%d", i), nil
109 | })
110 | assert.NoError(t, e)
111 | assert.Equal(t, fmt.Sprintf("result-%d", i), res.(string))
112 | assert.Equal(t, int32(i+1), atomic.LoadInt32(&coldCalls))
113 | }
114 | // check if really cached
115 | res, err := lc1.Get("key-3", func() (interface{}, error) {
116 | return "result-blah", nil
117 | })
118 | assert.NoError(t, err)
119 | assert.Equal(t, "result-3", res.(string), "should be cached")
120 |
121 | assert.Equal(t, 0, len(ps.CalledKeys()), "no events")
122 |
123 | // put 1 key to cache2
124 | res, e := lc2.Get("key-1", func() (interface{}, error) {
125 | return "result-111", nil
126 | })
127 | assert.NoError(t, e)
128 | assert.Equal(t, "result-111", res.(string))
129 |
130 | // try to cache1 after maxKeys reached, will remove key-0
131 | res, err = lc1.Get("key-X", func() (interface{}, error) {
132 | return "result-X", nil
133 | })
134 | assert.NoError(t, err)
135 | assert.Equal(t, "result-X", res.(string))
136 | assert.Equal(t, 5, lc1.backend.Len())
137 |
138 | assert.Equal(t, 1, len(ps.CalledKeys()), "1 event, key-0 expired")
139 |
140 | assert.Equal(t, 1, lc2.backend.Len(), "cache2 still has key-1")
141 |
142 | // try to cache1 after maxKeys reached, will remove key-1
143 | res, err = lc1.Get("key-X2", func() (interface{}, error) {
144 | return "result-X", nil
145 | })
146 | assert.NoError(t, err)
147 | assert.Equal(t, "result-X", res.(string))
148 |
149 | assert.Equal(t, 2, len(ps.CalledKeys()), "2 events, key-1 expired")
150 |
151 | // wait for onBusEvent goroutines to finish
152 | ps.Wait()
153 |
154 | assert.Equal(t, 0, lc2.backend.Len(), "cache2 removed key-1")
155 | }
156 |
157 | func TestLruCache_MaxKeysWithRedis(t *testing.T) {
158 | if _, ok := os.LookupEnv("ENABLE_REDIS_TESTS"); !ok {
159 | t.Skip("ENABLE_REDIS_TESTS env variable is not set, not expecting Redis to be ready at 127.0.0.1:6379")
160 | }
161 |
162 | var coldCalls int32
163 |
164 | //nolint:gosec // not used for security purpose
165 | channel := "lcw-test-" + strconv.Itoa(rand.Intn(1000000))
166 |
167 | redisPubSub1, err := eventbus.NewRedisPubSub("127.0.0.1:6379", channel)
168 | require.NoError(t, err)
169 | lc1, err := NewLruCache(MaxKeys(5), MaxValSize(10), EventBus(redisPubSub1))
170 | require.NoError(t, err)
171 | defer lc1.Close()
172 |
173 | redisPubSub2, err := eventbus.NewRedisPubSub("127.0.0.1:6379", channel)
174 | require.NoError(t, err)
175 | lc2, err := NewLruCache(MaxKeys(50), MaxValSize(100), EventBus(redisPubSub2))
176 | require.NoError(t, err)
177 | defer lc2.Close()
178 |
179 | // put 5 keys to cache1
180 | for i := 0; i < 5; i++ {
181 | i := i
182 | res, e := lc1.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
183 | atomic.AddInt32(&coldCalls, 1)
184 | return fmt.Sprintf("result-%d", i), nil
185 | })
186 | assert.NoError(t, e)
187 | assert.Equal(t, fmt.Sprintf("result-%d", i), res.(string))
188 | assert.Equal(t, int32(i+1), atomic.LoadInt32(&coldCalls))
189 | }
190 | // check if really cached
191 | res, err := lc1.Get("key-3", func() (interface{}, error) {
192 | return "result-blah", nil
193 | })
194 | assert.NoError(t, err)
195 | assert.Equal(t, "result-3", res.(string), "should be cached")
196 |
197 | // put 1 key to cache2
198 | res, e := lc2.Get("key-1", func() (interface{}, error) {
199 | return "result-111", nil
200 | })
201 | assert.NoError(t, e)
202 | assert.Equal(t, "result-111", res.(string))
203 |
204 | // try to cache1 after maxKeys reached, will remove key-0
205 | res, err = lc1.Get("key-X", func() (interface{}, error) {
206 | return "result-X", nil
207 | })
208 | assert.NoError(t, err)
209 | assert.Equal(t, "result-X", res.(string))
210 | assert.Equal(t, 5, lc1.backend.Len())
211 |
212 | assert.Equal(t, 1, lc2.backend.Len(), "cache2 still has key-1")
213 |
214 | // try to cache1 after maxKeys reached, will remove key-1
215 | res, err = lc1.Get("key-X2", func() (interface{}, error) {
216 | return "result-X", nil
217 | })
218 | assert.NoError(t, err)
219 | assert.Equal(t, "result-X", res.(string))
220 |
221 | time.Sleep(time.Second)
222 | assert.Equal(t, 0, lc2.backend.Len(), "cache2 removed key-1")
223 | assert.NoError(t, redisPubSub1.Close())
224 | assert.NoError(t, redisPubSub2.Close())
225 | }
226 |
227 | // LruCache illustrates the use of LRU loading cache
228 | func ExampleLruCache() {
229 | // set up test server for single response
230 | var hitCount int
231 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
232 | if r.URL.String() == "/post/42" && hitCount == 0 {
233 | _, _ = w.Write([]byte("
test response"))
234 | return
235 | }
236 | w.WriteHeader(404)
237 | }))
238 |
239 | // load page function
240 | loadURL := func(url string) (string, error) {
241 | resp, err := http.Get(url) // nolint
242 | if err != nil {
243 | return "", err
244 | }
245 | b, err := io.ReadAll(resp.Body)
246 | _ = resp.Body.Close()
247 | if err != nil {
248 | return "", err
249 | }
250 | return string(b), nil
251 | }
252 |
253 | // fixed size LRU cache, 100 items, up to 10k in total size
254 | cache, err := NewLruCache(MaxKeys(100), MaxCacheSize(10*1024))
255 | if err != nil {
256 | log.Printf("can't make lru cache, %v", err)
257 | }
258 |
259 | // url not in cache, load data
260 | url := ts.URL + "/post/42"
261 | val, err := cache.Get(url, func() (val interface{}, err error) {
262 | return loadURL(url)
263 | })
264 | if err != nil {
265 | log.Fatalf("can't load url %s, %v", url, err)
266 | }
267 | fmt.Println(val.(string))
268 |
269 | // url not in cache, load data
270 | val, err = cache.Get(url, func() (val interface{}, err error) {
271 | return loadURL(url)
272 | })
273 | if err != nil {
274 | log.Fatalf("can't load url %s, %v", url, err)
275 | }
276 | fmt.Println(val.(string))
277 |
278 | // url cached, skip load and get from the cache
279 | val, err = cache.Get(url, func() (val interface{}, err error) {
280 | return loadURL(url)
281 | })
282 | if err != nil {
283 | log.Fatalf("can't load url %s, %v", url, err)
284 | }
285 | fmt.Println(val.(string))
286 |
287 | // get cache stats
288 | stats := cache.Stat()
289 | fmt.Printf("%+v\n", stats)
290 |
291 | // close test HTTP server after all log.Fatalf are passed
292 | ts.Close()
293 |
294 | // Output:
295 | // test response
296 | // test response
297 | // test response
298 | // {hits:2, misses:1, ratio:0.67, keys:1, size:0, errors:0}
299 | }
300 |
--------------------------------------------------------------------------------
/options.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | "github.com/go-pkgz/lcw/eventbus"
8 | )
9 |
10 | type options struct {
11 | maxKeys int
12 | maxValueSize int
13 | maxKeySize int
14 | maxCacheSize int64
15 | ttl time.Duration
16 | onEvicted func(key string, value interface{})
17 | eventBus eventbus.PubSub
18 | }
19 |
20 | // Option func type
21 | type Option func(o *options) error
22 |
23 | // MaxValSize functional option defines the largest value's size allowed to be cached
24 | // By default it is 0, which means unlimited.
25 | func MaxValSize(maximum int) Option {
26 | return func(o *options) error {
27 | if maximum < 0 {
28 | return fmt.Errorf("negative max value size")
29 | }
30 | o.maxValueSize = maximum
31 | return nil
32 | }
33 | }
34 |
35 | // MaxKeySize functional option defines the largest key's size allowed to be used in cache
36 | // By default it is 0, which means unlimited.
37 | func MaxKeySize(maximum int) Option {
38 | return func(o *options) error {
39 | if maximum < 0 {
40 | return fmt.Errorf("negative max key size")
41 | }
42 | o.maxKeySize = maximum
43 | return nil
44 | }
45 | }
46 |
47 | // MaxKeys functional option defines how many keys to keep.
48 | // By default it is 0, which means unlimited.
49 | func MaxKeys(maximum int) Option {
50 | return func(o *options) error {
51 | if maximum < 0 {
52 | return fmt.Errorf("negative max keys")
53 | }
54 | o.maxKeys = maximum
55 | return nil
56 | }
57 | }
58 |
59 | // MaxCacheSize functional option defines the total size of cached data.
60 | // By default it is 0, which means unlimited.
61 | func MaxCacheSize(maximum int64) Option {
62 | return func(o *options) error {
63 | if maximum < 0 {
64 | return fmt.Errorf("negative max cache size")
65 | }
66 | o.maxCacheSize = maximum
67 | return nil
68 | }
69 | }
70 |
71 | // TTL functional option defines duration.
72 | // Works for ExpirableCache only
73 | func TTL(ttl time.Duration) Option {
74 | return func(o *options) error {
75 | if ttl < 0 {
76 | return fmt.Errorf("negative ttl")
77 | }
78 | o.ttl = ttl
79 | return nil
80 | }
81 | }
82 |
83 | // OnEvicted sets callback on invalidation event
84 | func OnEvicted(fn func(key string, value interface{})) Option {
85 | return func(o *options) error {
86 | o.onEvicted = fn
87 | return nil
88 | }
89 | }
90 |
91 | // EventBus sets PubSub for distributed cache invalidation
92 | func EventBus(pubSub eventbus.PubSub) Option {
93 | return func(o *options) error {
94 | o.eventBus = pubSub
95 | return nil
96 | }
97 | }
98 |
--------------------------------------------------------------------------------
/redis_cache.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "sync/atomic"
8 | "time"
9 |
10 | "github.com/redis/go-redis/v9"
11 | )
12 |
13 | // RedisValueSizeLimit is maximum allowed value size in Redis
14 | const RedisValueSizeLimit = 512 * 1024 * 1024
15 |
16 | // RedisCache implements LoadingCache for Redis.
17 | type RedisCache struct {
18 | options
19 | CacheStat
20 | backend redis.UniversalClient
21 | }
22 |
23 | // NewRedisCache makes Redis LoadingCache implementation.
24 | func NewRedisCache(backend redis.UniversalClient, opts ...Option) (*RedisCache, error) {
25 | res := RedisCache{
26 | options: options{
27 | ttl: 5 * time.Minute,
28 | },
29 | }
30 | for _, opt := range opts {
31 | if err := opt(&res.options); err != nil {
32 | return nil, fmt.Errorf("failed to set cache option: %w", err)
33 | }
34 | }
35 |
36 | if res.maxValueSize <= 0 || res.maxValueSize > RedisValueSizeLimit {
37 | res.maxValueSize = RedisValueSizeLimit
38 | }
39 |
40 | res.backend = backend
41 |
42 | return &res, nil
43 | }
44 |
45 | // Get gets value by key or load with fn if not found in cache
46 | func (c *RedisCache) Get(key string, fn func() (interface{}, error)) (data interface{}, err error) {
47 | v, getErr := c.backend.Get(context.Background(), key).Result()
48 | switch {
49 | // RedisClient returns nil when find a key in DB
50 | case getErr == nil:
51 | atomic.AddInt64(&c.Hits, 1)
52 | return v, nil
53 | // RedisClient returns redis.Nil when doesn't find a key in DB
54 | case errors.Is(getErr, redis.Nil):
55 | if data, err = fn(); err != nil {
56 | atomic.AddInt64(&c.Errors, 1)
57 | return data, err
58 | }
59 | // RedisClient returns !nil when something goes wrong while get data
60 | default:
61 | atomic.AddInt64(&c.Errors, 1)
62 | return v, getErr
63 | }
64 | atomic.AddInt64(&c.Misses, 1)
65 |
66 | if !c.allowed(key, data) {
67 | return data, nil
68 | }
69 |
70 | _, setErr := c.backend.Set(context.Background(), key, data, c.ttl).Result()
71 | if setErr != nil {
72 | atomic.AddInt64(&c.Errors, 1)
73 | return data, setErr
74 | }
75 |
76 | return data, nil
77 | }
78 |
79 | // Invalidate removes keys with passed predicate fn, i.e. fn(key) should be true to get evicted
80 | func (c *RedisCache) Invalidate(fn func(key string) bool) {
81 | for _, key := range c.backend.Keys(context.Background(), "*").Val() { // Keys() returns copy of cache's key, safe to remove directly
82 | if fn(key) {
83 | c.backend.Del(context.Background(), key)
84 | }
85 | }
86 | }
87 |
88 | // Peek returns the key value (or undefined if not found) without updating the "recently used"-ness of the key.
89 | func (c *RedisCache) Peek(key string) (interface{}, bool) {
90 | ret, err := c.backend.Get(context.Background(), key).Result()
91 | if err != nil {
92 | return nil, false
93 | }
94 | return ret, true
95 | }
96 |
97 | // Purge clears the cache completely.
98 | func (c *RedisCache) Purge() {
99 | c.backend.FlushDB(context.Background())
100 |
101 | }
102 |
103 | // Delete cache item by key
104 | func (c *RedisCache) Delete(key string) {
105 | c.backend.Del(context.Background(), key)
106 | }
107 |
108 | // Keys gets all keys for the cache
109 | func (c *RedisCache) Keys() (res []string) {
110 | return c.backend.Keys(context.Background(), "*").Val()
111 | }
112 |
113 | // Stat returns cache statistics
114 | func (c *RedisCache) Stat() CacheStat {
115 | return CacheStat{
116 | Hits: c.Hits,
117 | Misses: c.Misses,
118 | Size: c.size(),
119 | Keys: c.keys(),
120 | Errors: c.Errors,
121 | }
122 | }
123 |
124 | // Close closes underlying connections
125 | func (c *RedisCache) Close() error {
126 | return c.backend.Close()
127 | }
128 |
129 | func (c *RedisCache) size() int64 {
130 | return 0
131 | }
132 |
133 | func (c *RedisCache) keys() int {
134 | return int(c.backend.DBSize(context.Background()).Val())
135 | }
136 |
137 | func (c *RedisCache) allowed(key string, data interface{}) bool {
138 | if c.maxKeys > 0 && c.backend.DBSize(context.Background()).Val() >= int64(c.maxKeys) {
139 | return false
140 | }
141 | if c.maxKeySize > 0 && len(key) > c.maxKeySize {
142 | return false
143 | }
144 | if s, ok := data.(Sizer); ok {
145 | if c.maxValueSize > 0 && (s.Size() >= c.maxValueSize) {
146 | return false
147 | }
148 | }
149 | return true
150 | }
151 |
--------------------------------------------------------------------------------
/redis_cache_test.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "sort"
7 | "sync/atomic"
8 | "testing"
9 | "time"
10 |
11 | "github.com/alicebob/miniredis/v2"
12 | "github.com/redis/go-redis/v9"
13 | "github.com/stretchr/testify/assert"
14 | "github.com/stretchr/testify/require"
15 | )
16 |
17 | // newTestRedis returns a redis.Cmdable.
18 | func newTestRedisServer() *miniredis.Miniredis {
19 | mr, err := miniredis.Run()
20 | if err != nil {
21 | panic(err)
22 | }
23 |
24 | return mr
25 | }
26 |
27 | type fakeString string
28 |
29 | func TestExpirableRedisCache(t *testing.T) {
30 | server := newTestRedisServer()
31 | defer server.Close()
32 | client := redis.NewClient(&redis.Options{
33 | Addr: server.Addr()})
34 | defer client.Close()
35 | rc, err := NewRedisCache(client, MaxKeys(5), TTL(time.Second*6))
36 | require.NoError(t, err)
37 | defer rc.Close()
38 | require.NoError(t, err)
39 | for i := 0; i < 5; i++ {
40 | i := i
41 | _, e := rc.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
42 | return fmt.Sprintf("result-%d", i), nil
43 | })
44 | assert.NoError(t, e)
45 | server.FastForward(1000 * time.Millisecond)
46 | }
47 |
48 | assert.Equal(t, 5, rc.Stat().Keys)
49 | assert.Equal(t, int64(5), rc.Stat().Misses)
50 |
51 | keys := rc.Keys()
52 | sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
53 | assert.EqualValues(t, []string{"key-0", "key-1", "key-2", "key-3", "key-4"}, keys)
54 |
55 | _, e := rc.Get("key-xx", func() (interface{}, error) {
56 | return "result-xx", nil
57 | })
58 | assert.NoError(t, e)
59 | assert.Equal(t, 5, rc.Stat().Keys)
60 | assert.Equal(t, int64(6), rc.Stat().Misses)
61 |
62 | server.FastForward(1000 * time.Millisecond)
63 | assert.Equal(t, 4, rc.Stat().Keys)
64 |
65 | server.FastForward(4000 * time.Millisecond)
66 | assert.Equal(t, 0, rc.keys())
67 |
68 | }
69 |
70 | func TestRedisCache(t *testing.T) {
71 | var coldCalls int32
72 |
73 | server := newTestRedisServer()
74 | defer server.Close()
75 | client := redis.NewClient(&redis.Options{
76 | Addr: server.Addr()})
77 | defer client.Close()
78 | rc, err := NewRedisCache(client, MaxKeys(5), MaxValSize(10), MaxKeySize(10))
79 | require.NoError(t, err)
80 | defer rc.Close()
81 | // put 5 keys to cache
82 | for i := 0; i < 5; i++ {
83 | i := i
84 | res, e := rc.Get(fmt.Sprintf("key-%d", i), func() (interface{}, error) {
85 | atomic.AddInt32(&coldCalls, 1)
86 | return fmt.Sprintf("result-%d", i), nil
87 | })
88 | assert.NoError(t, e)
89 | assert.Equal(t, fmt.Sprintf("result-%d", i), res.(string))
90 | assert.Equal(t, int32(i+1), atomic.LoadInt32(&coldCalls))
91 | }
92 |
93 | // check if really cached
94 | res, err := rc.Get("key-3", func() (interface{}, error) {
95 | return "result-blah", nil
96 | })
97 | assert.NoError(t, err)
98 | assert.Equal(t, "result-3", res.(string), "should be cached")
99 |
100 | // try to cache after maxKeys reached
101 | res, err = rc.Get("key-X", func() (interface{}, error) {
102 | return "result-X", nil
103 | })
104 | assert.NoError(t, err)
105 | assert.Equal(t, "result-X", res.(string))
106 | assert.Equal(t, int64(5), rc.backend.DBSize(context.Background()).Val())
107 |
108 | // put to cache and make sure it cached
109 | res, err = rc.Get("key-Z", func() (interface{}, error) {
110 | return "result-Z", nil
111 | })
112 | assert.NoError(t, err)
113 | assert.Equal(t, "result-Z", res.(string))
114 |
115 | res, err = rc.Get("key-Z", func() (interface{}, error) {
116 | return "result-Zzzz", nil
117 | })
118 | assert.NoError(t, err)
119 | assert.Equal(t, "result-Zzzz", res.(string), "got non-cached value")
120 | assert.Equal(t, 5, rc.keys())
121 |
122 | res, err = rc.Get("key-Zzzzzzz", func() (interface{}, error) {
123 | return "result-Zzzz", nil
124 | })
125 | assert.NoError(t, err)
126 | assert.Equal(t, "result-Zzzz", res.(string), "got non-cached value")
127 | assert.Equal(t, 5, rc.keys())
128 |
129 | res, ok := rc.Peek("error-key-Z2")
130 | assert.False(t, ok)
131 | assert.Nil(t, res)
132 | }
133 |
134 | func TestRedisCacheErrors(t *testing.T) {
135 | server := newTestRedisServer()
136 | defer server.Close()
137 | client := redis.NewClient(&redis.Options{
138 | Addr: server.Addr()})
139 | defer client.Close()
140 | rc, err := NewRedisCache(client)
141 | require.NoError(t, err)
142 | defer rc.Close()
143 |
144 | res, err := rc.Get("error-key-Z", func() (interface{}, error) {
145 | return "error-result-Z", fmt.Errorf("some error")
146 | })
147 | assert.Error(t, err)
148 | assert.Equal(t, "error-result-Z", res.(string))
149 | assert.Equal(t, int64(1), rc.Stat().Errors)
150 |
151 | res, err = rc.Get("error-key-Z2", func() (interface{}, error) {
152 | return fakeString("error-result-Z2"), nil
153 | })
154 | assert.Error(t, err)
155 | assert.Equal(t, fakeString("error-result-Z2"), res.(fakeString))
156 | assert.Equal(t, int64(2), rc.Stat().Errors)
157 |
158 | server.Close()
159 | res, err = rc.Get("error-key-Z3", func() (interface{}, error) {
160 | return fakeString("error-result-Z3"), nil
161 | })
162 | assert.Error(t, err)
163 | assert.Equal(t, "", res.(string))
164 | assert.Equal(t, int64(3), rc.Stat().Errors)
165 | }
166 |
167 | func TestRedisCache_BadOptions(t *testing.T) {
168 | server := newTestRedisServer()
169 | defer server.Close()
170 | client := redis.NewClient(&redis.Options{
171 | Addr: server.Addr()})
172 | defer client.Close()
173 |
174 | _, err := NewRedisCache(client, MaxCacheSize(-1))
175 | assert.EqualError(t, err, "failed to set cache option: negative max cache size")
176 |
177 | _, err = NewRedisCache(client, MaxCacheSize(-1))
178 | assert.EqualError(t, err, "failed to set cache option: negative max cache size")
179 |
180 | _, err = NewRedisCache(client, MaxKeys(-1))
181 | assert.EqualError(t, err, "failed to set cache option: negative max keys")
182 |
183 | _, err = NewRedisCache(client, MaxValSize(-1))
184 | assert.EqualError(t, err, "failed to set cache option: negative max value size")
185 |
186 | _, err = NewRedisCache(client, TTL(-1))
187 | assert.EqualError(t, err, "failed to set cache option: negative ttl")
188 |
189 | _, err = NewRedisCache(client, MaxKeySize(-1))
190 | assert.EqualError(t, err, "failed to set cache option: negative max key size")
191 |
192 | }
193 |
--------------------------------------------------------------------------------
/scache.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 | )
7 |
8 | // Scache wraps LoadingCache with partitions (sub-system), and scopes.
9 | // Simplified interface with just 4 funcs - Get, Flush, Stats and Close
10 | type Scache struct {
11 | lc LoadingCache
12 | }
13 |
14 | // NewScache creates Scache on top of LoadingCache
15 | func NewScache(lc LoadingCache) *Scache {
16 | return &Scache{lc: lc}
17 | }
18 |
19 | // Get retrieves a key from underlying backend
20 | func (m *Scache) Get(key Key, fn func() ([]byte, error)) (data []byte, err error) {
21 | keyStr := key.String()
22 | val, err := m.lc.Get(keyStr, func() (value interface{}, e error) {
23 | return fn()
24 | })
25 | return val.([]byte), err
26 | }
27 |
28 | // Stat delegates the call to the underlying cache backend
29 | func (m *Scache) Stat() CacheStat {
30 | return m.lc.Stat()
31 | }
32 |
33 | // Close calls Close function of the underlying cache
34 | func (m *Scache) Close() error {
35 | return m.lc.Close()
36 | }
37 |
38 | // Flush clears cache and calls postFlushFn async
39 | func (m *Scache) Flush(req FlusherRequest) {
40 | if len(req.scopes) == 0 {
41 | m.lc.Purge()
42 | return
43 | }
44 |
45 | // check if fullKey has matching scopes
46 | inScope := func(fullKey string) bool {
47 | key, err := parseKey(fullKey)
48 | if err != nil {
49 | return false
50 | }
51 | for _, s := range req.scopes {
52 | for _, ks := range key.scopes {
53 | if ks == s {
54 | return true
55 | }
56 | }
57 | }
58 | return false
59 | }
60 |
61 | for _, k := range m.lc.Keys() {
62 | if inScope(k) {
63 | m.lc.Delete(k) // Keys() returns copy of cache's key, safe to remove directly
64 | }
65 | }
66 | }
67 |
68 | // Key for scoped cache. Created foe given partition (can be empty) and set with ID and Scopes.
69 | // example: k := NewKey("sys1").ID(postID).Scopes("last_posts", customer_id)
70 | type Key struct {
71 | id string // the primary part of the key, i.e. usual cache's key
72 | partition string // optional id for a subsystem or cache partition
73 | scopes []string // list of scopes to use in invalidation
74 | }
75 |
76 | // NewKey makes base key for given partition. Partition can be omitted.
77 | func NewKey(partition ...string) Key {
78 | if len(partition) == 0 {
79 | return Key{partition: ""}
80 | }
81 | return Key{partition: partition[0]}
82 | }
83 |
84 | // ID sets key id
85 | func (k Key) ID(id string) Key {
86 | k.id = id
87 | return k
88 | }
89 |
90 | // Scopes of the key
91 | func (k Key) Scopes(scopes ...string) Key {
92 | k.scopes = scopes
93 | return k
94 | }
95 |
96 | // String makes full string key from primary key, partition and scopes
97 | // key string made as @@@@$$....
98 | func (k Key) String() string {
99 | bld := strings.Builder{}
100 | _, _ = bld.WriteString(k.partition)
101 | _, _ = bld.WriteString("@@")
102 | _, _ = bld.WriteString(k.id)
103 | _, _ = bld.WriteString("@@")
104 | _, _ = bld.WriteString(strings.Join(k.scopes, "$$"))
105 | return bld.String()
106 | }
107 |
108 | // parseKey gets compound key string created by Key func and split it to the actual key, partition and scopes
109 | // key string made as @@@@$$....
110 | func parseKey(keyStr string) (Key, error) {
111 | elems := strings.Split(keyStr, "@@")
112 | if len(elems) != 3 {
113 | return Key{}, fmt.Errorf("can't parse cache key %s, invalid number of segments %d", keyStr, len(elems))
114 | }
115 |
116 | scopes := strings.Split(elems[2], "$$")
117 | if len(scopes) == 1 && scopes[0] == "" {
118 | scopes = []string{}
119 | }
120 | key := Key{
121 | partition: elems[0],
122 | id: elems[1],
123 | scopes: scopes,
124 | }
125 |
126 | return key, nil
127 | }
128 |
129 | // FlusherRequest used as input for cache.Flush
130 | type FlusherRequest struct {
131 | partition string
132 | scopes []string
133 | }
134 |
135 | // Flusher makes new FlusherRequest with empty scopes
136 | func Flusher(partition string) FlusherRequest {
137 | res := FlusherRequest{partition: partition}
138 | return res
139 | }
140 |
141 | // Scopes adds scopes to FlusherRequest
142 | func (f FlusherRequest) Scopes(scopes ...string) FlusherRequest {
143 | f.scopes = scopes
144 | return f
145 | }
146 |
--------------------------------------------------------------------------------
/scache_test.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "log"
7 | "net/http"
8 | "net/http/httptest"
9 | "sync"
10 | "sync/atomic"
11 | "testing"
12 | "time"
13 |
14 | "github.com/stretchr/testify/assert"
15 | "github.com/stretchr/testify/require"
16 | )
17 |
18 | func TestScache_Get(t *testing.T) {
19 | lru, err := NewLruCache()
20 | require.NoError(t, err)
21 | lc := NewScache(lru)
22 | defer lc.Close()
23 |
24 | var coldCalls int32
25 |
26 | res, err := lc.Get(NewKey("site").ID("key"), func() ([]byte, error) {
27 | atomic.AddInt32(&coldCalls, 1)
28 | return []byte("result"), nil
29 | })
30 | assert.NoError(t, err)
31 | assert.Equal(t, "result", string(res))
32 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls))
33 |
34 | res, err = lc.Get(NewKey("site").ID("key"), func() ([]byte, error) {
35 | atomic.AddInt32(&coldCalls, 1)
36 | return []byte("result"), nil
37 | })
38 | assert.NoError(t, err)
39 | assert.Equal(t, "result", string(res))
40 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls))
41 |
42 | lc.Flush(Flusher("site"))
43 | time.Sleep(100 * time.Millisecond) // let postFn to do its thing
44 |
45 | _, err = lc.Get(NewKey("site").ID("key"), func() ([]byte, error) {
46 | return nil, fmt.Errorf("err")
47 | })
48 | assert.Error(t, err)
49 | }
50 |
51 | func TestScache_Scopes(t *testing.T) {
52 | lru, err := NewLruCache()
53 | require.NoError(t, err)
54 | lc := NewScache(lru)
55 | defer lc.Close()
56 |
57 | res, err := lc.Get(NewKey("site").ID("key").Scopes("s1", "s2"), func() ([]byte, error) {
58 | return []byte("value"), nil
59 | })
60 | assert.NoError(t, err)
61 | assert.Equal(t, "value", string(res))
62 |
63 | res, err = lc.Get(NewKey("site").ID("key2").Scopes("s2"), func() ([]byte, error) {
64 | return []byte("value2"), nil
65 | })
66 | assert.NoError(t, err)
67 | assert.Equal(t, "value2", string(res))
68 |
69 | assert.Equal(t, 2, len(lc.lc.Keys()))
70 | lc.Flush(Flusher("site").Scopes("s1"))
71 | assert.Equal(t, 1, len(lc.lc.Keys()))
72 |
73 | _, err = lc.Get(NewKey("site").ID("key2").Scopes("s2"), func() ([]byte, error) {
74 | assert.Fail(t, "should stay")
75 | return nil, nil
76 | })
77 | assert.NoError(t, err)
78 | res, err = lc.Get(NewKey("site").ID("key").Scopes("s1", "s2"), func() ([]byte, error) {
79 | return []byte("value-upd"), nil
80 | })
81 | assert.NoError(t, err)
82 | assert.Equal(t, "value-upd", string(res), "was deleted, update")
83 |
84 | assert.Equal(t, CacheStat{Hits: 1, Misses: 3, Keys: 2, Size: 0, Errors: 0}, lc.Stat())
85 | }
86 |
87 | func TestScache_Flush(t *testing.T) {
88 | lru, err := NewLruCache()
89 | require.NoError(t, err)
90 | lc := NewScache(lru)
91 |
92 | addToCache := func(id string, scopes ...string) {
93 | res, err := lc.Get(NewKey("site").ID(id).Scopes(scopes...), func() ([]byte, error) {
94 | return []byte("value" + id), nil
95 | })
96 | require.NoError(t, err)
97 | require.Equal(t, "value"+id, string(res))
98 | }
99 |
100 | init := func() {
101 | lc.Flush(Flusher("site"))
102 | addToCache("key1", "s1", "s2")
103 | addToCache("key2", "s1", "s2", "s3")
104 | addToCache("key3", "s1", "s2", "s3")
105 | addToCache("key4", "s2", "s3")
106 | addToCache("key5", "s2")
107 | addToCache("key6")
108 | addToCache("key7", "s4", "s3")
109 | require.Equal(t, 7, len(lc.lc.Keys()), "cache init")
110 | }
111 |
112 | tbl := []struct {
113 | scopes []string
114 | left int
115 | msg string
116 | }{
117 | {[]string{}, 0, "full flush, no scopes"},
118 | {[]string{"s0"}, 7, "flush wrong scope"},
119 | {[]string{"s1"}, 4, "flush s1 scope"},
120 | {[]string{"s2", "s1"}, 2, "flush s2+s1 scope"},
121 | {[]string{"s1", "s2"}, 2, "flush s1+s2 scope"},
122 | {[]string{"s1", "s2", "s4"}, 1, "flush s1+s2+s4 scope"},
123 | {[]string{"s1", "s2", "s3"}, 1, "flush s1+s2+s3 scope"},
124 | {[]string{"s1", "s2", "ss"}, 2, "flush s1+s2+wrong scope"},
125 | }
126 |
127 | for i, tt := range tbl {
128 | tt := tt
129 | i := i
130 | t.Run(tt.msg, func(t *testing.T) {
131 | init()
132 | lc.Flush(Flusher("site").Scopes(tt.scopes...))
133 | assert.Equal(t, tt.left, len(lc.lc.Keys()), "keys size, %s #%d", tt.msg, i)
134 | })
135 | }
136 | }
137 |
138 | func TestScache_FlushFailed(t *testing.T) {
139 | lru, err := NewLruCache()
140 | require.NoError(t, err)
141 | lc := NewScache(lru)
142 |
143 | val, err := lc.Get(NewKey("site").ID("invalid-composite"), func() ([]byte, error) {
144 | return []byte("value"), nil
145 | })
146 | assert.NoError(t, err)
147 | assert.Equal(t, "value", string(val))
148 | assert.Equal(t, 1, len(lc.lc.Keys()))
149 |
150 | lc.Flush(Flusher("site").Scopes("invalid-composite"))
151 | assert.Equal(t, 1, len(lc.lc.Keys()))
152 | }
153 |
154 | func TestScope_Key(t *testing.T) {
155 | tbl := []struct {
156 | key string
157 | partition string
158 | scopes []string
159 | full string
160 | }{
161 | {"key1", "p1", []string{"s1"}, "p1@@key1@@s1"},
162 | {"key2", "p2", []string{"s11", "s2"}, "p2@@key2@@s11$$s2"},
163 | {"key3", "", []string{}, "@@key3@@"},
164 | {"key3", "", []string{"xx", "yyy"}, "@@key3@@xx$$yyy"},
165 | }
166 |
167 | for _, tt := range tbl {
168 | tt := tt
169 | t.Run(tt.full, func(t *testing.T) {
170 | k := NewKey(tt.partition).ID(tt.key).Scopes(tt.scopes...)
171 | assert.Equal(t, tt.full, k.String())
172 | k, err := parseKey(tt.full)
173 | require.NoError(t, err)
174 | assert.Equal(t, tt.partition, k.partition)
175 | assert.Equal(t, tt.key, k.id)
176 | assert.Equal(t, tt.scopes, k.scopes)
177 | })
178 | }
179 |
180 | // without partition
181 | k := NewKey().ID("id1").Scopes("s1", "s2")
182 | assert.Equal(t, "@@id1@@s1$$s2", k.String())
183 |
184 | // parse invalid key strings
185 | _, err := parseKey("abc")
186 | assert.Error(t, err)
187 | _, err = parseKey("")
188 | assert.Error(t, err)
189 | }
190 |
191 | func TestScache_Parallel(t *testing.T) {
192 | var coldCalls int32
193 | lru, err := NewLruCache()
194 | require.NoError(t, err)
195 | lc := NewScache(lru)
196 |
197 | res, err := lc.Get(NewKey("site").ID("key"), func() ([]byte, error) {
198 | return []byte("value"), nil
199 | })
200 | assert.NoError(t, err)
201 | assert.Equal(t, "value", string(res))
202 |
203 | wg := sync.WaitGroup{}
204 | for i := 0; i < 1000; i++ {
205 | wg.Add(1)
206 | i := i
207 | go func() {
208 | defer wg.Done()
209 | res, err := lc.Get(NewKey("site").ID("key"), func() ([]byte, error) {
210 | atomic.AddInt32(&coldCalls, 1)
211 | return []byte(fmt.Sprintf("result-%d", i)), nil
212 | })
213 | require.NoError(t, err)
214 | require.Equal(t, "value", string(res))
215 | }()
216 | }
217 | wg.Wait()
218 | assert.Equal(t, int32(0), atomic.LoadInt32(&coldCalls))
219 | }
220 |
221 | // LruCache illustrates the use of LRU loading cache
222 | func ExampleScache() {
223 | // set up test server for single response
224 | var hitCount int
225 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
226 | if r.URL.String() == "/post/42" && hitCount == 0 {
227 | _, _ = w.Write([]byte("test response"))
228 | return
229 | }
230 | w.WriteHeader(404)
231 | }))
232 |
233 | // load page function
234 | loadURL := func(url string) ([]byte, error) {
235 | resp, err := http.Get(url) // nolint
236 | if err != nil {
237 | return nil, err
238 | }
239 | b, err := io.ReadAll(resp.Body)
240 | _ = resp.Body.Close()
241 | if err != nil {
242 | return nil, err
243 | }
244 | return b, nil
245 | }
246 |
247 | // fixed size LRU cache, 100 items, up to 10k in total size
248 | backend, err := NewLruCache(MaxKeys(100), MaxCacheSize(10*1024))
249 | if err != nil {
250 | log.Fatalf("can't make lru cache, %v", err)
251 | }
252 |
253 | cache := NewScache(backend)
254 |
255 | // url not in cache, load data
256 | url := ts.URL + "/post/42"
257 | key := NewKey().ID(url).Scopes("test")
258 | val, err := cache.Get(key, func() (val []byte, err error) {
259 | return loadURL(url)
260 | })
261 | if err != nil {
262 | log.Fatalf("can't load url %s, %v", url, err)
263 | }
264 | fmt.Println(string(val))
265 |
266 | // url not in cache, load data
267 | key = NewKey().ID(url).Scopes("test")
268 | val, err = cache.Get(key, func() (val []byte, err error) {
269 | return loadURL(url)
270 | })
271 | if err != nil {
272 | log.Fatalf("can't load url %s, %v", url, err)
273 | }
274 | fmt.Println(string(val))
275 |
276 | // url cached, skip load and get from the cache
277 | key = NewKey().ID(url).Scopes("test")
278 | val, err = cache.Get(key, func() (val []byte, err error) {
279 | return loadURL(url)
280 | })
281 | if err != nil {
282 | log.Fatalf("can't load url %s, %v", url, err)
283 | }
284 | fmt.Println(string(val))
285 |
286 | // get cache stats
287 | stats := cache.Stat()
288 | fmt.Printf("%+v\n", stats)
289 |
290 | // close cache and test HTTP server after all log.Fatalf are passed
291 | ts.Close()
292 | err = cache.Close()
293 | if err != nil {
294 | log.Fatalf("can't close cache %v", err)
295 | }
296 |
297 | // Output:
298 | // test response
299 | // test response
300 | // test response
301 | // {hits:2, misses:1, ratio:0.67, keys:1, size:0, errors:0}
302 | }
303 |
--------------------------------------------------------------------------------
/url.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "net/url"
6 | "strconv"
7 | "time"
8 |
9 | "github.com/hashicorp/go-multierror"
10 | "github.com/redis/go-redis/v9"
11 | )
12 |
13 | // New parses uri and makes any of supported caches
14 | // supported URIs:
15 | // - redis://:?db=123&max_keys=10
16 | // - mem://lru?max_keys=10&max_cache_size=1024
17 | // - mem://expirable?ttl=30s&max_val_size=100
18 | // - nop://
19 | func New(uri string) (LoadingCache, error) {
20 | u, err := url.Parse(uri)
21 | if err != nil {
22 | return nil, fmt.Errorf("parse cache uri %s: %w", uri, err)
23 | }
24 |
25 | query := u.Query()
26 | opts, err := optionsFromQuery(query)
27 | if err != nil {
28 | return nil, fmt.Errorf("parse uri options %s: %w", uri, err)
29 | }
30 |
31 | switch u.Scheme {
32 | case "redis":
33 | redisOpts, e := redisOptionsFromURL(u)
34 | if e != nil {
35 | return nil, e
36 | }
37 | res, e := NewRedisCache(redis.NewClient(redisOpts), opts...)
38 | if e != nil {
39 | return nil, fmt.Errorf("make redis for %s: %w", uri, e)
40 | }
41 | return res, nil
42 | case "mem":
43 | switch u.Hostname() {
44 | case "lru":
45 | return NewLruCache(opts...)
46 | case "expirable":
47 | return NewExpirableCache(opts...)
48 | default:
49 | return nil, fmt.Errorf("unsupported mem cache type %s", u.Hostname())
50 | }
51 | case "nop":
52 | return NewNopCache(), nil
53 | }
54 | return nil, fmt.Errorf("unsupported cache type %s", u.Scheme)
55 | }
56 |
57 | func optionsFromQuery(q url.Values) (opts []Option, err error) {
58 | errs := new(multierror.Error)
59 |
60 | if v := q.Get("max_val_size"); v != "" {
61 | vv, e := strconv.Atoi(v)
62 | if e != nil {
63 | errs = multierror.Append(errs, fmt.Errorf("max_val_size query param %s: %w", v, e))
64 | } else {
65 | opts = append(opts, MaxValSize(vv))
66 | }
67 | }
68 |
69 | if v := q.Get("max_key_size"); v != "" {
70 | vv, e := strconv.Atoi(v)
71 | if e != nil {
72 | errs = multierror.Append(errs, fmt.Errorf("max_key_size query param %s: %w", v, e))
73 | } else {
74 | opts = append(opts, MaxKeySize(vv))
75 | }
76 | }
77 |
78 | if v := q.Get("max_keys"); v != "" {
79 | vv, e := strconv.Atoi(v)
80 | if e != nil {
81 | errs = multierror.Append(errs, fmt.Errorf("max_keys query param %s: %w", v, e))
82 | } else {
83 | opts = append(opts, MaxKeys(vv))
84 | }
85 | }
86 |
87 | if v := q.Get("max_cache_size"); v != "" {
88 | vv, e := strconv.ParseInt(v, 10, 64)
89 | if e != nil {
90 | errs = multierror.Append(errs, fmt.Errorf("max_cache_size query param %s: %w", v, e))
91 | } else {
92 | opts = append(opts, MaxCacheSize(vv))
93 | }
94 | }
95 |
96 | if v := q.Get("ttl"); v != "" {
97 | vv, e := time.ParseDuration(v)
98 | if e != nil {
99 | errs = multierror.Append(errs, fmt.Errorf("ttl query param %s: %w", v, e))
100 | } else {
101 | opts = append(opts, TTL(vv))
102 | }
103 | }
104 |
105 | return opts, errs.ErrorOrNil()
106 | }
107 |
108 | func redisOptionsFromURL(u *url.URL) (*redis.Options, error) {
109 | query := u.Query()
110 |
111 | db, err := strconv.Atoi(query.Get("db"))
112 | if err != nil {
113 | return nil, fmt.Errorf("db from %s: %w", u, err)
114 | }
115 |
116 | res := &redis.Options{
117 | Addr: u.Hostname() + ":" + u.Port(),
118 | DB: db,
119 | Password: query.Get("password"),
120 | Network: query.Get("network"),
121 | }
122 |
123 | if dialTimeout, err := time.ParseDuration(query.Get("dial_timeout")); err == nil {
124 | res.DialTimeout = dialTimeout
125 | }
126 |
127 | if readTimeout, err := time.ParseDuration(query.Get("read_timeout")); err == nil {
128 | res.ReadTimeout = readTimeout
129 | }
130 |
131 | if writeTimeout, err := time.ParseDuration(query.Get("write_timeout")); err == nil {
132 | res.WriteTimeout = writeTimeout
133 | }
134 |
135 | return res, nil
136 | }
137 |
--------------------------------------------------------------------------------
/url_test.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "net/url"
6 | "strconv"
7 | "testing"
8 | "time"
9 |
10 | "github.com/redis/go-redis/v9"
11 | "github.com/stretchr/testify/assert"
12 | "github.com/stretchr/testify/require"
13 | )
14 |
15 | func TestUrl_optionsFromQuery(t *testing.T) {
16 | tbl := []struct {
17 | url string
18 | num int
19 | fail bool
20 | }{
21 | {"mem://lru?ttl=26s&max_keys=100&max_val_size=1024&max_key_size=64&max_cache_size=111", 5, false},
22 | {"mem://lru?ttl=26s&max_keys=100&foo=bar", 2, false},
23 | {"mem://lru?ttl=xx26s&max_keys=100&foo=bar", 0, true},
24 | {"mem://lru?foo=bar", 0, false},
25 | {"mem://lru?foo=bar&max_keys=abcd", 0, true},
26 | {"mem://lru?foo=bar&max_val_size=abcd", 0, true},
27 | {"mem://lru?foo=bar&max_cache_size=abcd", 0, true},
28 | {"mem://lru?foo=bar&max_key_size=abcd", 0, true},
29 | }
30 |
31 | for i, tt := range tbl {
32 | tt := tt
33 | t.Run(strconv.Itoa(i), func(t *testing.T) {
34 | u, err := url.Parse(tt.url)
35 | require.NoError(t, err)
36 | r, err := optionsFromQuery(u.Query())
37 | if tt.fail {
38 | require.Error(t, err)
39 | return
40 | }
41 | assert.Equal(t, tt.num, len(r))
42 | })
43 | }
44 | }
45 |
46 | func TestUrl_redisOptionsFromURL(t *testing.T) {
47 | tbl := []struct {
48 | url string
49 | fail bool
50 | opts redis.Options
51 | }{
52 | {"redis://127.0.0.1:12345?db=xa19", true, redis.Options{}},
53 | {"redis://127.0.0.1:12345?foo=bar&max_keys=abcd&db=19", false, redis.Options{Addr: "127.0.0.1:12345", DB: 19}},
54 | {
55 | "redis://127.0.0.1:12345?db=19&password=xyz&network=tcp4&dial_timeout=1s&read_timeout=2s&write_timeout=3m",
56 | false, redis.Options{Addr: "127.0.0.1:12345", DB: 19, Password: "xyz", Network: "tcp4",
57 | DialTimeout: 1 * time.Second, ReadTimeout: 2 * time.Second, WriteTimeout: 3 * time.Minute},
58 | },
59 | }
60 |
61 | for i, tt := range tbl {
62 | tt := tt
63 | t.Run(strconv.Itoa(i), func(t *testing.T) {
64 | u, err := url.Parse(tt.url)
65 | require.NoError(t, err)
66 | r, err := redisOptionsFromURL(u)
67 | if tt.fail {
68 | require.Error(t, err)
69 | return
70 | }
71 | require.NoError(t, err)
72 | assert.Equal(t, tt.opts, *r)
73 | })
74 | }
75 | }
76 |
77 | func TestUrl_NewLru(t *testing.T) {
78 | u := "mem://lru?max_keys=10"
79 | res, err := New(u)
80 | require.NoError(t, err)
81 | r, ok := res.(*LruCache)
82 | require.True(t, ok)
83 | assert.Equal(t, 10, r.maxKeys)
84 | }
85 |
86 | func TestUrl_NewExpirable(t *testing.T) {
87 | u := "mem://expirable?max_keys=10&ttl=30m"
88 | res, err := New(u)
89 | require.NoError(t, err)
90 | defer res.Close()
91 | r, ok := res.(*ExpirableCache)
92 | require.True(t, ok)
93 | assert.Equal(t, 10, r.maxKeys)
94 | assert.Equal(t, 30*time.Minute, r.ttl)
95 | }
96 |
97 | func TestUrl_NewNop(t *testing.T) {
98 | u := "nop://"
99 | res, err := New(u)
100 | require.NoError(t, err)
101 | _, ok := res.(*Nop)
102 | require.True(t, ok)
103 | }
104 |
105 | func TestUrl_NewRedis(t *testing.T) {
106 | srv := newTestRedisServer()
107 | defer srv.Close()
108 | u := fmt.Sprintf("redis://%s?db=1&ttl=10s", srv.Addr())
109 | res, err := New(u)
110 | require.NoError(t, err)
111 | defer res.Close()
112 | r, ok := res.(*RedisCache)
113 | require.True(t, ok)
114 | assert.Equal(t, 10*time.Second, r.ttl)
115 |
116 | u = fmt.Sprintf("redis://%s?db=1&ttl=zz10s", srv.Addr())
117 | _, err = New(u)
118 | require.Error(t, err)
119 | assert.Contains(t, err.Error(), "ttl query param zz10s: time: invalid duration")
120 |
121 | _, err = New("redis://localhost:xxx?db=1")
122 | require.Error(t, err)
123 | assert.Contains(t, err.Error(), "parse cache uri redis://localhost:xxx?db=1: parse")
124 | assert.Contains(t, err.Error(), "redis://localhost:xxx?db=1")
125 | assert.Contains(t, err.Error(), "invalid port \":xxx\" after host")
126 | }
127 |
128 | func TestUrl_NewFailed(t *testing.T) {
129 | u := "blah://ip?foo=bar"
130 | _, err := New(u)
131 | require.EqualError(t, err, "unsupported cache type blah")
132 |
133 | u = "mem://blah?foo=bar"
134 | _, err = New(u)
135 | require.EqualError(t, err, "unsupported mem cache type blah")
136 |
137 | u = "mem://lru?max_keys=xyz"
138 | _, err = New(u)
139 | require.EqualError(t, err, "parse uri options mem://lru?max_keys=xyz: 1 error occurred:\n\t* max_keys query param xyz: strconv.Atoi: parsing \"xyz\": invalid syntax\n\n")
140 | }
141 |
--------------------------------------------------------------------------------
/v2/cache.go:
--------------------------------------------------------------------------------
1 | // Package lcw adds a thin layer on top of lru and expirable cache providing more limits and common interface.
2 | // The primary method to get (and set) data to/from the cache is LoadingCache.Get returning stored data for a given key or
3 | // call provided func to retrieve and store, similar to Guava loading cache.
4 | // Limits allow max values for key size, number of keys, value size and total size of values in the cache.
5 | // CacheStat gives general stats on cache performance.
6 | // 3 flavors of cache provided - NoP (do-nothing cache), ExpirableCache (TTL based), and LruCache
7 | package lcw
8 |
9 | import (
10 | "fmt"
11 | )
12 |
13 | // Sizer allows to perform size-based restrictions, optional.
14 | // If not defined both maxValueSize and maxCacheSize checks will be ignored
15 | type Sizer interface {
16 | Size() int
17 | }
18 |
19 | // LoadingCache defines guava-like cache with Get method returning cached value ao retrieving it if not in cache
20 | type LoadingCache[V any] interface {
21 | Get(key string, fn func() (V, error)) (val V, err error) // load or get from cache
22 | Peek(key string) (V, bool) // get from cache by key
23 | Invalidate(fn func(key string) bool) // invalidate items for func(key) == true
24 | Delete(key string) // delete by key
25 | Purge() // clear cache
26 | Stat() CacheStat // cache stats
27 | Keys() []string // list of all keys
28 | Close() error // close open connections
29 | }
30 |
31 | // CacheStat represent stats values
32 | type CacheStat struct {
33 | Hits int64
34 | Misses int64
35 | Keys int
36 | Size int64
37 | Errors int64
38 | }
39 |
40 | // String formats cache stats
41 | func (s CacheStat) String() string {
42 | ratio := 0.0
43 | if s.Hits+s.Misses > 0 {
44 | ratio = float64(s.Hits) / float64(s.Hits+s.Misses)
45 | }
46 | return fmt.Sprintf("{hits:%d, misses:%d, ratio:%.2f, keys:%d, size:%d, errors:%d}",
47 | s.Hits, s.Misses, ratio, s.Keys, s.Size, s.Errors)
48 | }
49 |
50 | // Nop is do-nothing implementation of LoadingCache
51 | type Nop[V any] struct{}
52 |
53 | // NewNopCache makes new do-nothing cache
54 | func NewNopCache[V any]() *Nop[V] {
55 | return &Nop[V]{}
56 | }
57 |
58 | // Get calls fn without any caching
59 | func (n *Nop[V]) Get(_ string, fn func() (V, error)) (V, error) { return fn() }
60 |
61 | // Peek does nothing and always returns false
62 | func (n *Nop[V]) Peek(string) (V, bool) { var emptyValue V; return emptyValue, false }
63 |
64 | // Invalidate does nothing for nop cache
65 | func (n *Nop[V]) Invalidate(func(key string) bool) {}
66 |
67 | // Purge does nothing for nop cache
68 | func (n *Nop[V]) Purge() {}
69 |
70 | // Delete does nothing for nop cache
71 | func (n *Nop[V]) Delete(string) {}
72 |
73 | // Keys does nothing for nop cache
74 | func (n *Nop[V]) Keys() []string { return nil }
75 |
76 | // Stat always 0s for nop cache
77 | func (n *Nop[V]) Stat() CacheStat {
78 | return CacheStat{}
79 | }
80 |
81 | // Close does nothing for nop cache
82 | func (n *Nop[V]) Close() error {
83 | return nil
84 | }
85 |
--------------------------------------------------------------------------------
/v2/cache_test.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "strings"
7 | "sync"
8 | "sync/atomic"
9 | "testing"
10 | "time"
11 |
12 | "github.com/redis/go-redis/v9"
13 | "github.com/stretchr/testify/assert"
14 | "github.com/stretchr/testify/require"
15 | )
16 |
17 | func TestNop_Get(t *testing.T) {
18 | var coldCalls int32
19 | var c LoadingCache[string] = NewNopCache[string]()
20 | res, err := c.Get("key1", func() (string, error) {
21 | atomic.AddInt32(&coldCalls, 1)
22 | return "result", nil
23 | })
24 | assert.NoError(t, err)
25 | assert.Equal(t, "result", res)
26 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls))
27 |
28 | res, err = c.Get("key1", func() (string, error) {
29 | atomic.AddInt32(&coldCalls, 1)
30 | return "result2", nil
31 | })
32 | assert.NoError(t, err)
33 | assert.Equal(t, "result2", res)
34 | assert.Equal(t, int32(2), atomic.LoadInt32(&coldCalls))
35 |
36 | assert.Equal(t, CacheStat{}, c.Stat())
37 | }
38 |
39 | func TestNop_Peek(t *testing.T) {
40 | var coldCalls int32
41 | c := NewNopCache[string]()
42 | res, err := c.Get("key1", func() (string, error) {
43 | atomic.AddInt32(&coldCalls, 1)
44 | return "result", nil
45 | })
46 | assert.NoError(t, err)
47 | assert.Equal(t, "result", res)
48 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls))
49 |
50 | _, ok := c.Peek("key1")
51 | assert.False(t, ok)
52 | }
53 |
54 | func TestStat_String(t *testing.T) {
55 | s := CacheStat{Keys: 100, Hits: 60, Misses: 10, Size: 12345, Errors: 5}
56 | assert.Equal(t, "{hits:60, misses:10, ratio:0.86, keys:100, size:12345, errors:5}", s.String())
57 | }
58 |
59 | func TestCache_Get(t *testing.T) {
60 | caches, teardown := cachesTestList[string](t)
61 | defer teardown()
62 |
63 | for _, c := range caches {
64 | c := c
65 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
66 | var coldCalls int32
67 | res, err := c.Get("key", func() (string, error) {
68 | atomic.AddInt32(&coldCalls, 1)
69 | return "result", nil
70 | })
71 | assert.NoError(t, err)
72 | assert.Equal(t, "result", res)
73 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls))
74 |
75 | res, err = c.Get("key", func() (string, error) {
76 | atomic.AddInt32(&coldCalls, 1)
77 | return "result2", nil
78 | })
79 |
80 | assert.NoError(t, err)
81 | assert.Equal(t, "result", res)
82 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls), "cache hit")
83 |
84 | _, err = c.Get("key-2", func() (string, error) {
85 | atomic.AddInt32(&coldCalls, 1)
86 | return "result2", fmt.Errorf("some error")
87 | })
88 | assert.Error(t, err)
89 | assert.Equal(t, int32(2), atomic.LoadInt32(&coldCalls), "cache hit")
90 |
91 | _, err = c.Get("key-2", func() (string, error) {
92 | atomic.AddInt32(&coldCalls, 1)
93 | return "result2", fmt.Errorf("some error")
94 | })
95 | assert.Error(t, err)
96 | assert.Equal(t, int32(3), atomic.LoadInt32(&coldCalls), "cache hit")
97 | })
98 | }
99 | }
100 |
101 | func TestCache_MaxValueSize(t *testing.T) {
102 | o := NewOpts[sizedString]()
103 | caches, teardown := cachesTestList(t, o.MaxKeys(5), o.MaxValSize(10), o.StrToV(func(s string) sizedString { return sizedString(s) }))
104 | defer teardown()
105 |
106 | for _, c := range caches {
107 | c := c
108 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
109 | // put good size value to cache and make sure it cached
110 | res, err := c.Get("key-Z", func() (sizedString, error) {
111 | return "result-Z", nil
112 | })
113 | assert.NoError(t, err)
114 | assert.Equal(t, sizedString("result-Z"), res)
115 |
116 | res, err = c.Get("key-Z", func() (sizedString, error) {
117 | return "result-Zzzz", nil
118 | })
119 | assert.NoError(t, err)
120 | assert.Equal(t, sizedString("result-Z"), res, "got cached value")
121 |
122 | // put too big value to cache and make sure it is not cached
123 | res, err = c.Get("key-Big", func() (sizedString, error) {
124 | return "1234567890", nil
125 | })
126 | assert.NoError(t, err)
127 | assert.Equal(t, sizedString("1234567890"), res)
128 |
129 | res, err = c.Get("key-Big", func() (sizedString, error) {
130 | return "result-big", nil
131 | })
132 | assert.NoError(t, err)
133 | assert.Equal(t, sizedString("result-big"), res, "got not cached value")
134 |
135 | // put too big value to cache
136 | res, err = c.Get("key-Big2", func() (sizedString, error) {
137 | return "1234567890", nil
138 | })
139 | assert.NoError(t, err)
140 | assert.Equal(t, sizedString("1234567890"), res)
141 |
142 | res, err = c.Get("key-Big2", func() (sizedString, error) {
143 | return "xyz", nil
144 | })
145 | assert.NoError(t, err)
146 | assert.Equal(t, sizedString("xyz"), res, "too long, but not Sizer. from cache")
147 | })
148 | }
149 | }
150 |
151 | func TestCache_MaxCacheSize(t *testing.T) {
152 | o := NewOpts[sizedString]()
153 | caches, teardown := cachesTestList(t, o.MaxKeys(50), o.MaxCacheSize(20), o.StrToV(func(s string) sizedString { return sizedString(s) }))
154 | defer teardown()
155 |
156 | for _, c := range caches {
157 | c := c
158 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
159 | // put good size value to cache and make sure it cached
160 | res, err := c.Get("key-Z", func() (sizedString, error) {
161 | return "result-Z", nil
162 | })
163 | assert.NoError(t, err)
164 | assert.Equal(t, sizedString("result-Z"), res)
165 | res, err = c.Get("key-Z", func() (sizedString, error) {
166 | return "result-Zzzz", nil
167 | })
168 | assert.NoError(t, err)
169 | assert.Equal(t, sizedString("result-Z"), res, "got cached value")
170 | if _, ok := c.(*RedisCache[sizedString]); !ok {
171 | assert.Equal(t, int64(8), c.size())
172 | }
173 | _, err = c.Get("key-Z2", func() (sizedString, error) {
174 | return "result-Y", nil
175 | })
176 | assert.NoError(t, err)
177 | if _, ok := c.(*RedisCache[sizedString]); !ok {
178 | assert.Equal(t, int64(16), c.size())
179 | }
180 |
181 | // this will cause removal
182 | _, err = c.Get("key-Z3", func() (sizedString, error) {
183 | return "result-Z", nil
184 | })
185 | assert.NoError(t, err)
186 | if _, ok := c.(*RedisCache[sizedString]); !ok {
187 | assert.Equal(t, int64(16), c.size())
188 | // Due RedisCache[sizedString] does not support MaxCacheSize this assert should be skipped
189 | assert.Equal(t, 2, c.keys())
190 | }
191 | })
192 | }
193 | }
194 |
195 | func TestCache_MaxCacheSizeParallel(t *testing.T) {
196 | o := NewOpts[sizedString]()
197 | caches, teardown := cachesTestList(t, o.MaxCacheSize(123), o.MaxKeys(10000), o.StrToV(func(s string) sizedString { return sizedString(s) }))
198 | defer teardown()
199 |
200 | for _, c := range caches {
201 | c := c
202 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
203 | wg := sync.WaitGroup{}
204 | for i := 0; i < 1000; i++ {
205 | wg.Add(1)
206 | i := i
207 | go func() {
208 | //nolint:gosec // not used for security purpose
209 | time.Sleep(time.Duration(rand.Intn(100)) * time.Nanosecond)
210 | defer wg.Done()
211 | res, err := c.Get(fmt.Sprintf("key-%d", i), func() (sizedString, error) {
212 | return sizedString(fmt.Sprintf("result-%d", i)), nil
213 | })
214 | require.NoError(t, err)
215 | require.Equal(t, sizedString(fmt.Sprintf("result-%d", i)), res)
216 | }()
217 | }
218 | wg.Wait()
219 | assert.True(t, c.size() < 123 && c.size() >= 0)
220 | t.Log("size", c.size())
221 | })
222 | }
223 |
224 | }
225 |
226 | func TestCache_MaxKeySize(t *testing.T) {
227 | o := NewOpts[sizedString]()
228 | caches, teardown := cachesTestList(t, o.MaxKeySize(5), o.StrToV(func(s string) sizedString { return sizedString(s) }))
229 | defer teardown()
230 |
231 | for _, c := range caches {
232 | c := c
233 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
234 | res, err := c.Get("key", func() (sizedString, error) {
235 | return "value", nil
236 | })
237 | assert.NoError(t, err)
238 | assert.Equal(t, sizedString("value"), res)
239 |
240 | res, err = c.Get("key", func() (sizedString, error) {
241 | return "valueXXX", nil
242 | })
243 | assert.NoError(t, err)
244 | assert.Equal(t, sizedString("value"), res, "cached")
245 |
246 | res, err = c.Get("key1234", func() (sizedString, error) {
247 | return "value", nil
248 | })
249 | assert.NoError(t, err)
250 | assert.Equal(t, sizedString("value"), res)
251 |
252 | res, err = c.Get("key1234", func() (sizedString, error) {
253 | return "valueXYZ", nil
254 | })
255 | assert.NoError(t, err)
256 | assert.Equal(t, sizedString("valueXYZ"), res, "not cached")
257 | })
258 | }
259 | }
260 |
261 | func TestCache_Peek(t *testing.T) {
262 | caches, teardown := cachesTestList[string](t)
263 | defer teardown()
264 |
265 | for _, c := range caches {
266 | c := c
267 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
268 | var coldCalls int32
269 | res, err := c.Get("key", func() (string, error) {
270 | atomic.AddInt32(&coldCalls, 1)
271 | return "result", nil
272 | })
273 | assert.NoError(t, err)
274 | assert.Equal(t, "result", res)
275 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls))
276 |
277 | r, ok := c.Peek("key")
278 | assert.True(t, ok)
279 | assert.Equal(t, "result", r)
280 | })
281 | }
282 | }
283 |
284 | func TestLruCache_ParallelHits(t *testing.T) {
285 | caches, teardown := cachesTestList[string](t)
286 | defer teardown()
287 |
288 | for _, c := range caches {
289 | c := c
290 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
291 | var coldCalls int32
292 |
293 | res, err := c.Get("key", func() (string, error) {
294 | return "value", nil
295 | })
296 | assert.NoError(t, err)
297 | assert.Equal(t, "value", res)
298 |
299 | wg := sync.WaitGroup{}
300 | for i := 0; i < 1000; i++ {
301 | wg.Add(1)
302 | i := i
303 | go func() {
304 | defer wg.Done()
305 | res, err := c.Get("key", func() (string, error) {
306 | atomic.AddInt32(&coldCalls, 1)
307 | return fmt.Sprintf("result-%d", i), nil
308 | })
309 | require.NoError(t, err)
310 | require.Equal(t, "value", res)
311 | }()
312 | }
313 | wg.Wait()
314 | assert.Equal(t, int32(0), atomic.LoadInt32(&coldCalls))
315 | })
316 | }
317 | }
318 |
319 | func TestCache_Purge(t *testing.T) {
320 | caches, teardown := cachesTestList[string](t)
321 | defer teardown()
322 |
323 | for _, c := range caches {
324 | c := c
325 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
326 | var coldCalls int32
327 | // fill cache
328 | for i := 0; i < 1000; i++ {
329 | i := i
330 | _, err := c.Get(fmt.Sprintf("key-%d", i), func() (string, error) {
331 | atomic.AddInt32(&coldCalls, 1)
332 | return fmt.Sprintf("result-%d", i), nil
333 | })
334 | require.NoError(t, err)
335 | }
336 | assert.Equal(t, int32(1000), atomic.LoadInt32(&coldCalls))
337 | assert.Equal(t, 1000, c.keys())
338 |
339 | c.Purge()
340 | assert.Equal(t, 0, c.keys(), "all keys removed")
341 | })
342 | }
343 | }
344 |
345 | func TestCache_Invalidate(t *testing.T) {
346 | caches, teardown := cachesTestList[string](t)
347 | defer teardown()
348 |
349 | for _, c := range caches {
350 | c := c
351 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
352 | var coldCalls int32
353 |
354 | // fill cache
355 | for i := 0; i < 1000; i++ {
356 | i := i
357 | _, err := c.Get(fmt.Sprintf("key-%d", i), func() (string, error) {
358 | atomic.AddInt32(&coldCalls, 1)
359 | return fmt.Sprintf("result-%d", i), nil
360 | })
361 | require.NoError(t, err)
362 | }
363 | assert.Equal(t, int32(1000), atomic.LoadInt32(&coldCalls))
364 | assert.Equal(t, 1000, c.keys())
365 |
366 | c.Invalidate(func(key string) bool {
367 | return strings.HasSuffix(key, "0")
368 | })
369 |
370 | assert.Equal(t, 900, c.keys(), "100 keys removed")
371 | res, err := c.Get("key-1", func() (string, error) {
372 | atomic.AddInt32(&coldCalls, 1)
373 | return "result-xxx", nil
374 | })
375 | require.NoError(t, err)
376 | assert.Equal(t, "result-1", res, "from the cache")
377 |
378 | res, err = c.Get("key-10", func() (string, error) {
379 | atomic.AddInt32(&coldCalls, 1)
380 | return "result-xxx", nil
381 | })
382 | require.NoError(t, err)
383 | assert.Equal(t, "result-xxx", res, "not from the cache")
384 | })
385 | }
386 | }
387 |
388 | func TestCache_Delete(t *testing.T) {
389 | o := NewOpts[sizedString]()
390 | caches, teardown := cachesTestList[sizedString](t, o.StrToV(func(s string) sizedString { return sizedString(s) }))
391 | defer teardown()
392 |
393 | for _, c := range caches {
394 | c := c
395 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
396 | // fill cache
397 | for i := 0; i < 1000; i++ {
398 | i := i
399 | _, err := c.Get(fmt.Sprintf("key-%d", i), func() (sizedString, error) {
400 | return sizedString(fmt.Sprintf("result-%d", i)), nil
401 | })
402 | require.NoError(t, err)
403 | }
404 | assert.Equal(t, 1000, c.Stat().Keys)
405 | if _, ok := c.(*RedisCache[sizedString]); !ok {
406 | assert.Equal(t, int64(9890), c.Stat().Size)
407 | }
408 | c.Delete("key-2")
409 | assert.Equal(t, 999, c.Stat().Keys)
410 | if _, ok := c.(*RedisCache[sizedString]); !ok {
411 | assert.Equal(t, int64(9890-8), c.Stat().Size)
412 | }
413 | })
414 | }
415 | }
416 |
417 | func TestCache_DeleteWithEvent(t *testing.T) {
418 | var evKey string
419 | var evVal sizedString
420 | var evCount int
421 | onEvict := func(key string, value sizedString) {
422 | evKey = key
423 | evVal = value
424 | evCount++
425 | }
426 |
427 | o := NewOpts[sizedString]()
428 | caches, teardown := cachesTestList(t, o.OnEvicted(onEvict), o.StrToV(func(s string) sizedString { return sizedString(s) }))
429 | defer teardown()
430 |
431 | for _, c := range caches {
432 | c := c
433 |
434 | evKey, evVal, evCount = "", "", 0
435 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
436 | if _, ok := c.(*RedisCache[sizedString]); ok {
437 | t.Skip("RedisCache[sizedString] doesn't support delete events")
438 | }
439 | // fill cache
440 | for i := 0; i < 1000; i++ {
441 | i := i
442 | _, err := c.Get(fmt.Sprintf("key-%d", i), func() (sizedString, error) {
443 | return sizedString(fmt.Sprintf("result-%d", i)), nil
444 | })
445 | require.NoError(t, err)
446 | }
447 | assert.Equal(t, 1000, c.Stat().Keys)
448 | assert.Equal(t, int64(9890), c.Stat().Size)
449 |
450 | c.Delete("key-2")
451 | assert.Equal(t, 999, c.Stat().Keys)
452 | assert.Equal(t, "key-2", evKey)
453 | assert.Equal(t, sizedString("result-2"), evVal)
454 | assert.Equal(t, 1, evCount)
455 | })
456 | }
457 | }
458 |
459 | func TestCache_Stats(t *testing.T) {
460 | o := NewOpts[sizedString]()
461 | caches, teardown := cachesTestList[sizedString](t, o.StrToV(func(s string) sizedString { return sizedString(s) }))
462 | defer teardown()
463 |
464 | for _, c := range caches {
465 | c := c
466 | t.Run(strings.Replace(fmt.Sprintf("%T", c), "*lcw.", "", 1), func(t *testing.T) {
467 | // fill cache
468 | for i := 0; i < 100; i++ {
469 | i := i
470 | _, err := c.Get(fmt.Sprintf("key-%d", i), func() (sizedString, error) {
471 | return sizedString(fmt.Sprintf("result-%d", i)), nil
472 | })
473 | require.NoError(t, err)
474 | }
475 | stats := c.Stat()
476 | switch c.(type) {
477 | case *RedisCache[sizedString]:
478 | assert.Equal(t, CacheStat{Hits: 0, Misses: 100, Keys: 100, Size: 0}, stats)
479 | default:
480 | assert.Equal(t, CacheStat{Hits: 0, Misses: 100, Keys: 100, Size: 890}, stats)
481 | }
482 |
483 | _, err := c.Get("key-1", func() (sizedString, error) {
484 | return "xyz", nil
485 | })
486 | require.NoError(t, err)
487 | switch c.(type) {
488 | case *RedisCache[sizedString]:
489 | assert.Equal(t, CacheStat{Hits: 1, Misses: 100, Keys: 100, Size: 0}, c.Stat())
490 | default:
491 | assert.Equal(t, CacheStat{Hits: 1, Misses: 100, Keys: 100, Size: 890}, c.Stat())
492 | }
493 |
494 | _, err = c.Get("key-1123", func() (sizedString, error) {
495 | return sizedString("xyz"), nil
496 | })
497 | require.NoError(t, err)
498 | switch c.(type) {
499 | case *RedisCache[sizedString]:
500 | assert.Equal(t, CacheStat{Hits: 1, Misses: 101, Keys: 101, Size: 0}, c.Stat())
501 | default:
502 | assert.Equal(t, CacheStat{Hits: 1, Misses: 101, Keys: 101, Size: 893}, c.Stat())
503 | }
504 |
505 | _, err = c.Get("key-9999", func() (sizedString, error) {
506 | return "", fmt.Errorf("err")
507 | })
508 | require.Error(t, err)
509 | switch c.(type) {
510 | case *RedisCache[sizedString]:
511 | assert.Equal(t, CacheStat{Hits: 1, Misses: 101, Keys: 101, Size: 0, Errors: 1}, c.Stat())
512 | default:
513 | assert.Equal(t, CacheStat{Hits: 1, Misses: 101, Keys: 101, Size: 893, Errors: 1}, c.Stat())
514 | }
515 | })
516 | }
517 | }
518 |
519 | // ExampleLoadingCache_Get illustrates creation of a cache and loading value from it
520 | func ExampleLoadingCache_Get() {
521 | o := NewOpts[string]()
522 | c, err := NewExpirableCache(o.MaxKeys(10), o.TTL(time.Minute*30)) // make expirable cache (30m o.TTL) with up to 10 keys
523 | if err != nil {
524 | panic("can' make cache")
525 | }
526 | defer c.Close()
527 |
528 | // try to get from cache and because mykey is not in will put it
529 | _, _ = c.Get("mykey", func() (string, error) {
530 | fmt.Println("cache miss 1")
531 | return "myval-1", nil
532 | })
533 |
534 | // get from cache, func won't run because mykey in
535 | v, err := c.Get("mykey", func() (string, error) {
536 | fmt.Println("cache miss 2")
537 | return "myval-2", nil
538 | })
539 |
540 | if err != nil {
541 | panic("can't get from cache")
542 | }
543 | fmt.Printf("got %s from cache, stats: %s", v, c.Stat())
544 | // Output: cache miss 1
545 | // got myval-1 from cache, stats: {hits:1, misses:1, ratio:0.50, keys:1, size:0, errors:0}
546 | }
547 |
548 | // ExampleLoadingCache_Delete illustrates cache value eviction and OnEvicted function usage.
549 | func ExampleLoadingCache_Delete() {
550 | // make expirable cache (30m TTL) with up to 10 keys. Set callback on eviction event
551 | o := NewOpts[string]()
552 | c, err := NewExpirableCache(o.MaxKeys(10), o.TTL(time.Minute*30), o.OnEvicted(func(key string, _ string) {
553 | fmt.Println("key " + key + " evicted")
554 | }))
555 | if err != nil {
556 | panic("can' make cache")
557 | }
558 | defer c.Close()
559 |
560 | // try to get from cache and because mykey is not in will put it
561 | _, _ = c.Get("mykey", func() (string, error) {
562 | return "myval-1", nil
563 | })
564 |
565 | c.Delete("mykey")
566 | fmt.Println("stats: " + c.Stat().String())
567 | // Output: key mykey evicted
568 | // stats: {hits:0, misses:1, ratio:0.00, keys:0, size:0, errors:0}
569 | }
570 |
571 | // nolint:govet //false positive due to example name
572 | // ExampleLoadingCacheMutability illustrates changing mutable stored item outside of cache, works only for non-Redis cache.
573 | func Example_loadingCacheMutability() {
574 | o := NewOpts[[]string]()
575 | c, err := NewExpirableCache(o.MaxKeys(10), o.TTL(time.Minute*30)) // make expirable cache (30m o.TTL) with up to 10 keys
576 | if err != nil {
577 | panic("can' make cache")
578 | }
579 | defer c.Close()
580 |
581 | mutableSlice := []string{"key1", "key2"}
582 |
583 | // put mutableSlice in "mutableSlice" cache key
584 | _, _ = c.Get("mutableSlice", func() ([]string, error) {
585 | return mutableSlice, nil
586 | })
587 |
588 | // get from cache, func won't run because mutableSlice is cached
589 | // value is original now
590 | v, _ := c.Get("mutableSlice", func() ([]string, error) {
591 | return nil, nil
592 | })
593 | fmt.Printf("got %v slice from cache\n", v)
594 |
595 | mutableSlice[0] = "another_key_1"
596 | mutableSlice[1] = "another_key_2"
597 |
598 | // get from cache, func won't run because mutableSlice is cached
599 | // value is changed inside the cache now because mutableSlice stored as-is, in mutable state
600 | v, _ = c.Get("mutableSlice", func() ([]string, error) {
601 | return nil, nil
602 | })
603 | fmt.Printf("got %v slice from cache after it's change outside of cache\n", v)
604 |
605 | // Output:
606 | // got [key1 key2] slice from cache
607 | // got [another_key_1 another_key_2] slice from cache after it's change outside of cache
608 | }
609 |
610 | type counts interface {
611 | size() int64 // cache size in bytes
612 | keys() int // number of keys in cache
613 | }
614 |
615 | type countedCache[V any] interface {
616 | LoadingCache[V]
617 | counts
618 | }
619 |
620 | func cachesTestList[V any](t *testing.T, opts ...Option[V]) (c []countedCache[V], teardown func()) {
621 | var caches []countedCache[V]
622 | ec, err := NewExpirableCache(opts...)
623 | require.NoError(t, err, "can't make exp cache")
624 | caches = append(caches, ec)
625 | lc, err := NewLruCache(opts...)
626 | require.NoError(t, err, "can't make lru cache")
627 | caches = append(caches, lc)
628 |
629 | server := newTestRedisServer()
630 | client := redis.NewClient(&redis.Options{
631 | Addr: server.Addr()})
632 | rc, err := NewRedisCache(client, opts...)
633 | require.NoError(t, err, "can't make redis cache")
634 | caches = append(caches, rc)
635 |
636 | return caches, func() {
637 | _ = client.Close()
638 | _ = ec.Close()
639 | _ = lc.Close()
640 | _ = rc.Close()
641 | server.Close()
642 | }
643 | }
644 |
645 | type sizedString string
646 |
647 | func (s sizedString) Size() int { return len(s) }
648 |
649 | func (s sizedString) MarshalBinary() (data []byte, err error) {
650 | return []byte(s), nil
651 | }
652 |
653 | type mockPubSub struct {
654 | calledKeys []string
655 | fns []func(fromID, key string)
656 | sync.Mutex
657 | sync.WaitGroup
658 | }
659 |
660 | func (m *mockPubSub) CalledKeys() []string {
661 | m.Lock()
662 | defer m.Unlock()
663 | return m.calledKeys
664 | }
665 |
666 | func (m *mockPubSub) Subscribe(fn func(fromID, key string)) error {
667 | m.Lock()
668 | defer m.Unlock()
669 | m.fns = append(m.fns, fn)
670 | return nil
671 | }
672 |
673 | func (m *mockPubSub) Publish(fromID, key string) error {
674 | m.Lock()
675 | defer m.Unlock()
676 | m.calledKeys = append(m.calledKeys, key)
677 | for _, fn := range m.fns {
678 | fn := fn
679 | m.Add(1)
680 | // run in goroutine to prevent deadlock
681 | go func() {
682 | fn(fromID, key)
683 | m.Done()
684 | }()
685 | }
686 | return nil
687 | }
688 |
--------------------------------------------------------------------------------
/v2/eventbus/pubsub.go:
--------------------------------------------------------------------------------
1 | // Package eventbus provides PubSub interface used for distributed cache invalidation,
2 | // as well as NopPubSub and RedisPubSub implementations.
3 | package eventbus
4 |
5 | // PubSub interface is used for distributed cache invalidation.
6 | // Publish is called on each entry invalidation,
7 | // Subscribe is used for subscription for these events.
8 | type PubSub interface {
9 | Publish(fromID, key string) error
10 | Subscribe(fn func(fromID, key string)) error
11 | }
12 |
13 | // NopPubSub implements default do-nothing pub-sub (event bus)
14 | type NopPubSub struct{}
15 |
16 | // Subscribe does nothing for NopPubSub
17 | func (n *NopPubSub) Subscribe(func(fromID string, key string)) error {
18 | return nil
19 | }
20 |
21 | // Publish does nothing for NopPubSub
22 | func (n *NopPubSub) Publish(string, string) error {
23 | return nil
24 | }
25 |
--------------------------------------------------------------------------------
/v2/eventbus/pubsub_test.go:
--------------------------------------------------------------------------------
1 | package eventbus
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | )
8 |
9 | func TestNopPubSub(t *testing.T) {
10 | nopPubSub := NopPubSub{}
11 | assert.NoError(t, nopPubSub.Subscribe(nil))
12 | assert.NoError(t, nopPubSub.Publish("", ""))
13 | }
14 |
--------------------------------------------------------------------------------
/v2/eventbus/redis.go:
--------------------------------------------------------------------------------
1 | package eventbus
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "strings"
7 | "time"
8 |
9 | "github.com/hashicorp/go-multierror"
10 | "github.com/redis/go-redis/v9"
11 | )
12 |
13 | // NewRedisPubSub creates new RedisPubSub with given parameters.
14 | // Returns an error in case of problems with creating PubSub client for specified channel.
15 | func NewRedisPubSub(addr, channel string) (*RedisPubSub, error) {
16 | client := redis.NewClient(&redis.Options{Addr: addr})
17 | pubSub := client.Subscribe(context.Background(), channel)
18 | // wait for subscription to be created and ignore the message
19 | if _, err := pubSub.Receive(context.Background()); err != nil {
20 | _ = client.Close()
21 | return nil, fmt.Errorf("problem subscribing to channel %s on address %s: %w", channel, addr, err)
22 | }
23 | return &RedisPubSub{client: client, pubSub: pubSub, channel: channel, done: make(chan struct{})}, nil
24 | }
25 |
26 | // RedisPubSub provides Redis implementation for PubSub interface
27 | type RedisPubSub struct {
28 | client *redis.Client
29 | pubSub *redis.PubSub
30 | channel string
31 |
32 | done chan struct{}
33 | }
34 |
35 | // Subscribe calls provided function on subscription channel provided on new RedisPubSub instance creation.
36 | // Should not be called more than once. Spawns a goroutine and does not return an error.
37 | func (m *RedisPubSub) Subscribe(fn func(fromID, key string)) error {
38 | go func(done <-chan struct{}, pubsub *redis.PubSub) {
39 | for {
40 | select {
41 | case <-done:
42 | return
43 | default:
44 | }
45 | msg, err := pubsub.ReceiveTimeout(context.Background(), time.Second*10)
46 | if err != nil {
47 | continue
48 | }
49 |
50 | // Process the message
51 | if msg, ok := msg.(*redis.Message); ok {
52 | payload := strings.Split(msg.Payload, "$")
53 | fn(payload[0], strings.Join(payload[1:], "$"))
54 | }
55 | }
56 | }(m.done, m.pubSub)
57 |
58 | return nil
59 | }
60 |
61 | // Publish publishes provided message to channel provided on new RedisPubSub instance creation
62 | func (m *RedisPubSub) Publish(fromID, key string) error {
63 | return m.client.Publish(context.Background(), m.channel, fromID+"$"+key).Err()
64 | }
65 |
66 | // Close cleans up running goroutines and closes Redis clients
67 | func (m *RedisPubSub) Close() error {
68 | close(m.done)
69 |
70 | errs := new(multierror.Error)
71 | if err := m.pubSub.Close(); err != nil {
72 | errs = multierror.Append(errs, fmt.Errorf("problem closing pubSub client: %w", err))
73 | }
74 | if err := m.client.Close(); err != nil {
75 | errs = multierror.Append(errs, fmt.Errorf("problem closing redis client: %w", err))
76 | }
77 | return errs.ErrorOrNil()
78 | }
79 |
--------------------------------------------------------------------------------
/v2/eventbus/redis_test.go:
--------------------------------------------------------------------------------
1 | package eventbus
2 |
3 | import (
4 | "math/rand"
5 | "os"
6 | "strconv"
7 | "testing"
8 | "time"
9 |
10 | "github.com/stretchr/testify/assert"
11 | "github.com/stretchr/testify/require"
12 | )
13 |
14 | func TestNewRedisPubSub_Error(t *testing.T) {
15 | redisPubSub, err := NewRedisPubSub("127.0.0.1:99999", "test")
16 | require.Error(t, err)
17 | require.Nil(t, redisPubSub)
18 | }
19 |
20 | func TestRedisPubSub(t *testing.T) {
21 | if _, ok := os.LookupEnv("ENABLE_REDIS_TESTS"); !ok {
22 | t.Skip("ENABLE_REDIS_TESTS env variable is not set, not expecting Redis to be ready at 127.0.0.1:6379")
23 | }
24 |
25 | //nolint:gosec // not used for security purpose
26 | channel := "lcw-test-" + strconv.Itoa(rand.Intn(1000000))
27 | redisPubSub, err := NewRedisPubSub("127.0.0.1:6379", channel)
28 | require.NoError(t, err)
29 | require.NotNil(t, redisPubSub)
30 | var called []string
31 | assert.Nil(t, redisPubSub.Subscribe(func(fromID, key string) {
32 | called = append(called, fromID, key)
33 | }))
34 | assert.NoError(t, redisPubSub.Publish("test_fromID", "$test$key$"))
35 | // Sleep which waits for Subscribe goroutine to pick up published changes
36 | time.Sleep(time.Second)
37 | assert.NoError(t, redisPubSub.Close())
38 | assert.Equal(t, []string{"test_fromID", "$test$key$"}, called)
39 | }
40 |
--------------------------------------------------------------------------------
/v2/expirable_cache.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "sync/atomic"
6 | "time"
7 |
8 | "github.com/google/uuid"
9 | "github.com/hashicorp/golang-lru/v2/expirable"
10 |
11 | "github.com/go-pkgz/lcw/v2/eventbus"
12 | )
13 |
14 | // ExpirableCache implements LoadingCache with TTL.
15 | type ExpirableCache[V any] struct {
16 | Workers[V]
17 | CacheStat
18 | currentSize int64
19 | id string
20 | backend *expirable.LRU[string, V]
21 | }
22 |
23 | // NewExpirableCache makes expirable LoadingCache implementation, 1000 max keys by default and 5m TTL
24 | func NewExpirableCache[V any](opts ...Option[V]) (*ExpirableCache[V], error) {
25 | res := ExpirableCache[V]{
26 | Workers: Workers[V]{
27 | maxKeys: 1000,
28 | maxValueSize: 0,
29 | ttl: 5 * time.Minute,
30 | eventBus: &eventbus.NopPubSub{},
31 | },
32 | id: uuid.New().String(),
33 | }
34 |
35 | for _, opt := range opts {
36 | if err := opt(&res.Workers); err != nil {
37 | return nil, fmt.Errorf("failed to set cache option: %w", err)
38 | }
39 | }
40 |
41 | if err := res.eventBus.Subscribe(res.onBusEvent); err != nil {
42 | return nil, fmt.Errorf("can't subscribe to event bus: %w", err)
43 | }
44 |
45 | res.backend = expirable.NewLRU[string, V](res.maxKeys, func(key string, value V) {
46 | if res.onEvicted != nil {
47 | res.onEvicted(key, value)
48 | }
49 | if s, ok := any(value).(Sizer); ok {
50 | size := s.Size()
51 | atomic.AddInt64(&res.currentSize, -1*int64(size))
52 | }
53 | // ignore the error on Publish as we don't have log inside the module and
54 | // there is no other way to handle it: we publish the cache invalidation
55 | // and hope for the best
56 | _ = res.eventBus.Publish(res.id, key)
57 | }, res.ttl)
58 |
59 | return &res, nil
60 | }
61 |
62 | // Get gets value by key or load with fn if not found in cache
63 | func (c *ExpirableCache[V]) Get(key string, fn func() (V, error)) (data V, err error) {
64 | if v, ok := c.backend.Get(key); ok {
65 | atomic.AddInt64(&c.Hits, 1)
66 | return v, nil
67 | }
68 |
69 | if data, err = fn(); err != nil {
70 | atomic.AddInt64(&c.Errors, 1)
71 | return data, err
72 | }
73 | atomic.AddInt64(&c.Misses, 1)
74 |
75 | if !c.allowed(key, data) {
76 | return data, nil
77 | }
78 |
79 | if s, ok := any(data).(Sizer); ok {
80 | if c.maxCacheSize > 0 && atomic.LoadInt64(&c.currentSize)+int64(s.Size()) >= c.maxCacheSize {
81 | return data, nil
82 | }
83 | atomic.AddInt64(&c.currentSize, int64(s.Size()))
84 | }
85 |
86 | c.backend.Add(key, data)
87 |
88 | return data, nil
89 | }
90 |
91 | // Invalidate removes keys with passed predicate fn, i.e. fn(key) should be true to get evicted
92 | func (c *ExpirableCache[V]) Invalidate(fn func(key string) bool) {
93 | for _, key := range c.backend.Keys() {
94 | if fn(key) {
95 | c.backend.Remove(key)
96 | }
97 | }
98 | }
99 |
100 | // Peek returns the key value (or undefined if not found) without updating the "recently used"-ness of the key.
101 | func (c *ExpirableCache[V]) Peek(key string) (V, bool) {
102 | return c.backend.Peek(key)
103 | }
104 |
105 | // Purge clears the cache completely.
106 | func (c *ExpirableCache[V]) Purge() {
107 | c.backend.Purge()
108 | atomic.StoreInt64(&c.currentSize, 0)
109 | }
110 |
111 | // Delete cache item by key
112 | func (c *ExpirableCache[V]) Delete(key string) {
113 | c.backend.Remove(key)
114 | }
115 |
116 | // Keys returns cache keys
117 | func (c *ExpirableCache[V]) Keys() (res []string) {
118 | return c.backend.Keys()
119 | }
120 |
121 | // Stat returns cache statistics
122 | func (c *ExpirableCache[V]) Stat() CacheStat {
123 | return CacheStat{
124 | Hits: c.Hits,
125 | Misses: c.Misses,
126 | Size: c.size(),
127 | Keys: c.keys(),
128 | Errors: c.Errors,
129 | }
130 | }
131 |
132 | // Close supposed to kill cleanup goroutine,
133 | // but it's not possible before https://github.com/hashicorp/golang-lru/issues/159 is solved
134 | // so for now it just cleans it.
135 | func (c *ExpirableCache[V]) Close() error {
136 | c.backend.Purge()
137 | atomic.StoreInt64(&c.currentSize, 0)
138 | return nil
139 | }
140 |
141 | // onBusEvent reacts on invalidation message triggered by event bus from another cache instance
142 | func (c *ExpirableCache[V]) onBusEvent(id, key string) {
143 | if id != c.id {
144 | c.backend.Remove(key)
145 | }
146 | }
147 |
148 | func (c *ExpirableCache[V]) size() int64 {
149 | return atomic.LoadInt64(&c.currentSize)
150 | }
151 |
152 | func (c *ExpirableCache[V]) keys() int {
153 | return c.backend.Len()
154 | }
155 |
156 | func (c *ExpirableCache[V]) allowed(key string, data V) bool {
157 | if c.backend.Len() >= c.maxKeys {
158 | return false
159 | }
160 | if c.maxKeySize > 0 && len(key) > c.maxKeySize {
161 | return false
162 | }
163 | if s, ok := any(data).(Sizer); ok {
164 | if c.maxValueSize > 0 && s.Size() >= c.maxValueSize {
165 | return false
166 | }
167 | }
168 | return true
169 | }
170 |
--------------------------------------------------------------------------------
/v2/expirable_cache_test.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "sort"
6 | "sync/atomic"
7 | "testing"
8 | "time"
9 |
10 | "github.com/stretchr/testify/assert"
11 | "github.com/stretchr/testify/require"
12 | )
13 |
14 | func TestExpirableCache(t *testing.T) {
15 | o := NewOpts[string]()
16 | lc, err := NewExpirableCache(o.MaxKeys(5), o.TTL(time.Millisecond*100))
17 | require.NoError(t, err)
18 | for i := 0; i < 5; i++ {
19 | i := i
20 | _, e := lc.Get(fmt.Sprintf("key-%d", i), func() (string, error) {
21 | return fmt.Sprintf("result-%d", i), nil
22 | })
23 | assert.NoError(t, e)
24 | time.Sleep(10 * time.Millisecond)
25 | }
26 |
27 | assert.Equal(t, 5, lc.Stat().Keys)
28 | assert.Equal(t, int64(5), lc.Stat().Misses)
29 |
30 | keys := lc.Keys()
31 | sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
32 | assert.EqualValues(t, []string{"key-0", "key-1", "key-2", "key-3", "key-4"}, keys)
33 |
34 | _, e := lc.Get("key-xx", func() (string, error) {
35 | return "result-xx", nil
36 | })
37 | assert.NoError(t, e)
38 | assert.Equal(t, 5, lc.Stat().Keys)
39 | assert.Equal(t, int64(6), lc.Stat().Misses)
40 |
41 | // let key-0 expire, GitHub Actions friendly way
42 | for lc.Stat().Keys > 4 {
43 | time.Sleep(time.Millisecond * 10)
44 | }
45 | assert.Equal(t, 4, lc.Stat().Keys)
46 |
47 | time.Sleep(210 * time.Millisecond)
48 | assert.Equal(t, 0, lc.keys())
49 | assert.Equal(t, []string{}, lc.Keys())
50 |
51 | assert.NoError(t, lc.Close())
52 | }
53 |
54 | func TestExpirableCache_MaxKeys(t *testing.T) {
55 | var coldCalls int32
56 | o := NewOpts[string]()
57 | lc, err := NewExpirableCache(o.MaxKeys(5), o.MaxValSize(10))
58 | require.NoError(t, err)
59 |
60 | // put 5 keys to cache
61 | for i := 0; i < 5; i++ {
62 | i := i
63 | res, e := lc.Get(fmt.Sprintf("key-%d", i), func() (string, error) {
64 | atomic.AddInt32(&coldCalls, 1)
65 | return fmt.Sprintf("result-%d", i), nil
66 | })
67 | assert.NoError(t, e)
68 | assert.Equal(t, fmt.Sprintf("result-%d", i), res)
69 | assert.Equal(t, int32(i+1), atomic.LoadInt32(&coldCalls))
70 | }
71 |
72 | // check if really cached
73 | res, err := lc.Get("key-3", func() (string, error) {
74 | return "result-blah", nil
75 | })
76 | assert.NoError(t, err)
77 | assert.Equal(t, "result-3", res, "should be cached")
78 |
79 | // try to cache after maxKeys reached
80 | res, err = lc.Get("key-X", func() (string, error) {
81 | return "result-X", nil
82 | })
83 | assert.NoError(t, err)
84 | assert.Equal(t, "result-X", res)
85 | assert.Equal(t, 5, lc.keys())
86 |
87 | // put to cache and make sure it cached
88 | res, err = lc.Get("key-Z", func() (string, error) {
89 | return "result-Z", nil
90 | })
91 | assert.NoError(t, err)
92 | assert.Equal(t, "result-Z", res)
93 |
94 | res, err = lc.Get("key-Z", func() (string, error) {
95 | return "result-Zzzz", nil
96 | })
97 | assert.NoError(t, err)
98 | assert.Equal(t, "result-Zzzz", res, "got non-cached value")
99 | assert.Equal(t, 5, lc.keys())
100 |
101 | assert.NoError(t, lc.Close())
102 | }
103 |
104 | func TestExpirableCache_BadOptions(t *testing.T) {
105 | o := NewOpts[string]()
106 | _, err := NewExpirableCache(o.MaxCacheSize(-1))
107 | assert.EqualError(t, err, "failed to set cache option: negative max cache size")
108 |
109 | _, err = NewExpirableCache(o.MaxKeySize(-1))
110 | assert.EqualError(t, err, "failed to set cache option: negative max key size")
111 |
112 | _, err = NewExpirableCache(o.MaxKeys(-1))
113 | assert.EqualError(t, err, "failed to set cache option: negative max keys")
114 |
115 | _, err = NewExpirableCache(o.MaxValSize(-1))
116 | assert.EqualError(t, err, "failed to set cache option: negative max value size")
117 |
118 | _, err = NewExpirableCache(o.TTL(-1))
119 | assert.EqualError(t, err, "failed to set cache option: negative ttl")
120 | }
121 |
122 | func TestExpirableCacheWithBus(t *testing.T) {
123 | ps := &mockPubSub{}
124 | o := NewOpts[string]()
125 | lc1, err := NewExpirableCache(o.MaxKeys(5), o.TTL(time.Millisecond*100), o.EventBus(ps))
126 | require.NoError(t, err)
127 | defer lc1.Close()
128 |
129 | lc2, err := NewExpirableCache(o.MaxKeys(50), o.TTL(time.Millisecond*5000), o.EventBus(ps))
130 | require.NoError(t, err)
131 | defer lc2.Close()
132 |
133 | // add 5 keys to the first node cache
134 | for i := 0; i < 5; i++ {
135 | i := i
136 | _, e := lc1.Get(fmt.Sprintf("key-%d", i), func() (string, error) {
137 | return fmt.Sprintf("result-%d", i), nil
138 | })
139 | assert.NoError(t, e)
140 | time.Sleep(10 * time.Millisecond)
141 | }
142 |
143 | assert.Equal(t, 0, len(ps.CalledKeys()), "no events")
144 | assert.Equal(t, 5, lc1.Stat().Keys)
145 | assert.Equal(t, int64(5), lc1.Stat().Misses)
146 |
147 | // add key-1 key to the second node
148 | _, e := lc2.Get("key-1", func() (string, error) {
149 | return "result-111", nil
150 | })
151 | assert.NoError(t, e)
152 | assert.Equal(t, 1, lc2.Stat().Keys)
153 | assert.Equal(t, int64(1), lc2.Stat().Misses, lc2.Stat())
154 |
155 | // let key-0 expire
156 | for lc1.Stat().Keys > 4 {
157 | ps.Wait() // wait for onBusEvent goroutines to finish
158 | time.Sleep(time.Millisecond * 5)
159 | }
160 | assert.Equal(t, 4, lc1.Stat().Keys)
161 | assert.Equal(t, 1, lc2.Stat().Keys, "key-1 still in cache2")
162 | assert.Equal(t, 1, len(ps.CalledKeys()))
163 |
164 | time.Sleep(210 * time.Millisecond) // let all keys expire
165 | ps.Wait() // wait for onBusEvent goroutines to finish
166 | assert.Equal(t, 6, len(ps.CalledKeys()), "6 events, key-1 expired %+v", ps.calledKeys)
167 | assert.Equal(t, 0, lc1.Stat().Keys)
168 | assert.Equal(t, 0, lc2.Stat().Keys, "key-1 removed from cache2")
169 | }
170 |
--------------------------------------------------------------------------------
/v2/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/go-pkgz/lcw/v2
2 |
3 | go 1.21
4 |
5 | require (
6 | github.com/alicebob/miniredis/v2 v2.31.1
7 | github.com/google/uuid v1.6.0
8 | github.com/hashicorp/go-multierror v1.1.1
9 | github.com/hashicorp/golang-lru/v2 v2.0.7
10 | github.com/redis/go-redis/v9 v9.4.0
11 | github.com/stretchr/testify v1.8.4
12 | )
13 |
14 | require (
15 | github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect
16 | github.com/cespare/xxhash/v2 v2.2.0 // indirect
17 | github.com/davecgh/go-spew v1.1.1 // indirect
18 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
19 | github.com/hashicorp/errwrap v1.1.0 // indirect
20 | github.com/pmezard/go-difflib v1.0.0 // indirect
21 | github.com/yuin/gopher-lua v1.1.1 // indirect
22 | gopkg.in/yaml.v3 v3.0.1 // indirect
23 | )
24 |
--------------------------------------------------------------------------------
/v2/go.sum:
--------------------------------------------------------------------------------
1 | github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0=
2 | github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
3 | github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
4 | github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE=
5 | github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
6 | github.com/alicebob/miniredis/v2 v2.31.1 h1:7XAt0uUg3DtwEKW5ZAGa+K7FZV2DdKQo5K/6TTnfX8Y=
7 | github.com/alicebob/miniredis/v2 v2.31.1/go.mod h1:UB/T2Uztp7MlFSDakaX1sTXUv5CASoprx0wulRT6HBg=
8 | github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
9 | github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
10 | github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
11 | github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
12 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
13 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
14 | github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
15 | github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
16 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
17 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
18 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
19 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
20 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
21 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
22 | github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
23 | github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
24 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
25 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
26 | github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
27 | github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
28 | github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
29 | github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
30 | github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
31 | github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
32 | github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
33 | github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
34 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
35 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
36 | github.com/redis/go-redis/v9 v9.4.0 h1:Yzoz33UZw9I/mFhx4MNrB6Fk+XHO1VukNcCa1+lwyKk=
37 | github.com/redis/go-redis/v9 v9.4.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
38 | github.com/redis/go-redis/v9 v9.5.0 h1:Xe9TKMmZv939gwTBcvc0n1tzK5l2re0pKw/W/tN3amw=
39 | github.com/redis/go-redis/v9 v9.5.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
40 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
41 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
42 | github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE=
43 | github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
44 | github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
45 | github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
46 | golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
47 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
48 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
49 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
50 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
51 |
--------------------------------------------------------------------------------
/v2/lru_cache.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "sync/atomic"
6 |
7 | "github.com/google/uuid"
8 | lru "github.com/hashicorp/golang-lru/v2"
9 |
10 | "github.com/go-pkgz/lcw/v2/eventbus"
11 | )
12 |
13 | // LruCache wraps lru.LruCache with loading cache Get and size limits
14 | type LruCache[V any] struct {
15 | Workers[V]
16 | CacheStat
17 | backend *lru.Cache[string, V]
18 | currentSize int64
19 | id string // uuid identifying cache instance
20 | }
21 |
22 | // NewLruCache makes LRU LoadingCache implementation, 1000 max keys by default
23 | func NewLruCache[V any](opts ...Option[V]) (*LruCache[V], error) {
24 | res := LruCache[V]{
25 | Workers: Workers[V]{
26 | maxKeys: 1000,
27 | maxValueSize: 0,
28 | eventBus: &eventbus.NopPubSub{},
29 | },
30 | id: uuid.New().String(),
31 | }
32 | for _, opt := range opts {
33 | if err := opt(&res.Workers); err != nil {
34 | return nil, fmt.Errorf("failed to set cache option: %w", err)
35 | }
36 | }
37 |
38 | err := res.init()
39 | return &res, err
40 | }
41 |
42 | func (c *LruCache[V]) init() error {
43 | if err := c.eventBus.Subscribe(c.onBusEvent); err != nil {
44 | return fmt.Errorf("can't subscribe to event bus: %w", err)
45 | }
46 |
47 | onEvicted := func(key string, value V) {
48 | if c.onEvicted != nil {
49 | c.onEvicted(key, value)
50 | }
51 | if s, ok := any(value).(Sizer); ok {
52 | size := s.Size()
53 | atomic.AddInt64(&c.currentSize, -1*int64(size))
54 | }
55 | _ = c.eventBus.Publish(c.id, key) // signal invalidation to other nodes
56 | }
57 |
58 | var err error
59 | // OnEvicted called automatically for expired and manually deleted
60 | if c.backend, err = lru.NewWithEvict[string, V](c.maxKeys, onEvicted); err != nil {
61 | return fmt.Errorf("failed to make lru cache backend: %w", err)
62 | }
63 |
64 | return nil
65 | }
66 |
67 | // Get gets value by key or load with fn if not found in cache
68 | func (c *LruCache[V]) Get(key string, fn func() (V, error)) (data V, err error) {
69 | if v, ok := c.backend.Get(key); ok {
70 | atomic.AddInt64(&c.Hits, 1)
71 | return v, nil
72 | }
73 |
74 | if data, err = fn(); err != nil {
75 | atomic.AddInt64(&c.Errors, 1)
76 | return data, err
77 | }
78 |
79 | atomic.AddInt64(&c.Misses, 1)
80 |
81 | if !c.allowed(key, data) {
82 | return data, nil
83 | }
84 |
85 | c.backend.Add(key, data)
86 |
87 | if s, ok := any(data).(Sizer); ok {
88 | atomic.AddInt64(&c.currentSize, int64(s.Size()))
89 | if c.maxCacheSize > 0 && atomic.LoadInt64(&c.currentSize) > c.maxCacheSize {
90 | for atomic.LoadInt64(&c.currentSize) > c.maxCacheSize {
91 | c.backend.RemoveOldest()
92 | }
93 | }
94 | }
95 |
96 | return data, nil
97 | }
98 |
99 | // Peek returns the key value (or undefined if not found) without updating the "recently used"-ness of the key.
100 | func (c *LruCache[V]) Peek(key string) (V, bool) {
101 | return c.backend.Peek(key)
102 | }
103 |
104 | // Purge clears the cache completely.
105 | func (c *LruCache[V]) Purge() {
106 | c.backend.Purge()
107 | atomic.StoreInt64(&c.currentSize, 0)
108 | }
109 |
110 | // Invalidate removes keys with passed predicate fn, i.e. fn(key) should be true to get evicted
111 | func (c *LruCache[V]) Invalidate(fn func(key string) bool) {
112 | for _, k := range c.backend.Keys() { // Keys() returns copy of cache's key, safe to remove directly
113 | if fn(k) {
114 | c.backend.Remove(k)
115 | }
116 | }
117 | }
118 |
119 | // Delete cache item by key
120 | func (c *LruCache[V]) Delete(key string) {
121 | c.backend.Remove(key)
122 | }
123 |
124 | // Keys returns cache keys
125 | func (c *LruCache[V]) Keys() (res []string) {
126 | return c.backend.Keys()
127 | }
128 |
129 | // Stat returns cache statistics
130 | func (c *LruCache[V]) Stat() CacheStat {
131 | return CacheStat{
132 | Hits: c.Hits,
133 | Misses: c.Misses,
134 | Size: c.size(),
135 | Keys: c.keys(),
136 | Errors: c.Errors,
137 | }
138 | }
139 |
140 | // Close does nothing for this type of cache
141 | func (c *LruCache[V]) Close() error {
142 | return nil
143 | }
144 |
145 | // onBusEvent reacts on invalidation message triggered by event bus from another cache instance
146 | func (c *LruCache[V]) onBusEvent(id, key string) {
147 | if id != c.id && c.backend.Contains(key) { // prevent reaction on event from this cache
148 | c.backend.Remove(key)
149 | }
150 | }
151 |
152 | func (c *LruCache[V]) size() int64 {
153 | return atomic.LoadInt64(&c.currentSize)
154 | }
155 |
156 | func (c *LruCache[V]) keys() int {
157 | return c.backend.Len()
158 | }
159 |
160 | func (c *LruCache[V]) allowed(key string, data V) bool {
161 | if c.maxKeySize > 0 && len(key) > c.maxKeySize {
162 | return false
163 | }
164 | if s, ok := any(data).(Sizer); ok {
165 | if c.maxValueSize > 0 && s.Size() >= c.maxValueSize {
166 | return false
167 | }
168 | }
169 | return true
170 | }
171 |
--------------------------------------------------------------------------------
/v2/lru_cache_test.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "log"
7 | "math/rand"
8 | "net/http"
9 | "net/http/httptest"
10 | "os"
11 | "sort"
12 | "strconv"
13 | "sync/atomic"
14 | "testing"
15 | "time"
16 |
17 | "github.com/stretchr/testify/assert"
18 | "github.com/stretchr/testify/require"
19 |
20 | "github.com/go-pkgz/lcw/v2/eventbus"
21 | )
22 |
23 | func TestLruCache_MaxKeys(t *testing.T) {
24 | var coldCalls int32
25 | o := NewOpts[string]()
26 | lc, err := NewLruCache(o.MaxKeys(5), o.MaxValSize(10))
27 | require.NoError(t, err)
28 |
29 | // put 5 keys to cache
30 | for i := 0; i < 5; i++ {
31 | i := i
32 | res, e := lc.Get(fmt.Sprintf("key-%d", i), func() (string, error) {
33 | atomic.AddInt32(&coldCalls, 1)
34 | return fmt.Sprintf("result-%d", i), nil
35 | })
36 | assert.NoError(t, e)
37 | assert.Equal(t, fmt.Sprintf("result-%d", i), res)
38 | assert.Equal(t, int32(i+1), atomic.LoadInt32(&coldCalls))
39 | }
40 |
41 | keys := lc.Keys()
42 | sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
43 | assert.EqualValues(t, []string{"key-0", "key-1", "key-2", "key-3", "key-4"}, keys)
44 |
45 | // check if really cached
46 | res, err := lc.Get("key-3", func() (string, error) {
47 | return "result-blah", nil
48 | })
49 | assert.NoError(t, err)
50 | assert.Equal(t, "result-3", res, "should be cached")
51 |
52 | // try to cache after maxKeys reached
53 | res, err = lc.Get("key-X", func() (string, error) {
54 | return "result-X", nil
55 | })
56 | assert.NoError(t, err)
57 | assert.Equal(t, "result-X", res)
58 | assert.Equal(t, 5, lc.backend.Len())
59 |
60 | // put to cache and make sure it cached
61 | res, err = lc.Get("key-Z", func() (string, error) {
62 | return "result-Z", nil
63 | })
64 | assert.NoError(t, err)
65 | assert.Equal(t, "result-Z", res)
66 |
67 | res, err = lc.Get("key-Z", func() (string, error) {
68 | return "result-Zzzz", nil
69 | })
70 | assert.NoError(t, err)
71 | assert.Equal(t, "result-Z", res, "got cached value")
72 | assert.Equal(t, 5, lc.backend.Len())
73 | }
74 |
75 | func TestLruCache_BadOptions(t *testing.T) {
76 | o := NewOpts[string]()
77 | _, err := NewLruCache(o.MaxCacheSize(-1))
78 | assert.EqualError(t, err, "failed to set cache option: negative max cache size")
79 |
80 | _, err = NewLruCache(o.MaxKeySize(-1))
81 | assert.EqualError(t, err, "failed to set cache option: negative max key size")
82 |
83 | _, err = NewLruCache(o.MaxKeys(-1))
84 | assert.EqualError(t, err, "failed to set cache option: negative max keys")
85 |
86 | _, err = NewLruCache(o.MaxValSize(-1))
87 | assert.EqualError(t, err, "failed to set cache option: negative max value size")
88 |
89 | _, err = NewLruCache(o.TTL(-1))
90 | assert.EqualError(t, err, "failed to set cache option: negative ttl")
91 | }
92 |
93 | func TestLruCache_MaxKeysWithBus(t *testing.T) {
94 | ps := &mockPubSub{}
95 | o := NewOpts[string]()
96 |
97 | var coldCalls int32
98 | lc1, err := NewLruCache(o.MaxKeys(5), o.MaxValSize(10), o.EventBus(ps))
99 | require.NoError(t, err)
100 | defer lc1.Close()
101 |
102 | lc2, err := NewLruCache(o.MaxKeys(50), o.MaxValSize(100), o.EventBus(ps))
103 | require.NoError(t, err)
104 | defer lc2.Close()
105 |
106 | // put 5 keys to cache1
107 | for i := 0; i < 5; i++ {
108 | i := i
109 | res, e := lc1.Get(fmt.Sprintf("key-%d", i), func() (string, error) {
110 | atomic.AddInt32(&coldCalls, 1)
111 | return fmt.Sprintf("result-%d", i), nil
112 | })
113 | assert.NoError(t, e)
114 | assert.Equal(t, fmt.Sprintf("result-%d", i), res)
115 | assert.Equal(t, int32(i+1), atomic.LoadInt32(&coldCalls))
116 | }
117 | // check if really cached
118 | res, err := lc1.Get("key-3", func() (string, error) {
119 | return "result-blah", nil
120 | })
121 | assert.NoError(t, err)
122 | assert.Equal(t, "result-3", res, "should be cached")
123 |
124 | assert.Equal(t, 0, len(ps.CalledKeys()), "no events")
125 |
126 | // put 1 key to cache2
127 | res, e := lc2.Get("key-1", func() (string, error) {
128 | return "result-111", nil
129 | })
130 | assert.NoError(t, e)
131 | assert.Equal(t, "result-111", res)
132 |
133 | // try to cache1 after maxKeys reached, will remove key-0
134 | res, err = lc1.Get("key-X", func() (string, error) {
135 | return "result-X", nil
136 | })
137 | assert.NoError(t, err)
138 | assert.Equal(t, "result-X", res)
139 | assert.Equal(t, 5, lc1.backend.Len())
140 |
141 | assert.Equal(t, 1, len(ps.CalledKeys()), "1 event, key-0 expired")
142 |
143 | assert.Equal(t, 1, lc2.backend.Len(), "cache2 still has key-1")
144 |
145 | // try to cache1 after maxKeys reached, will remove key-1
146 | res, err = lc1.Get("key-X2", func() (string, error) {
147 | return "result-X", nil
148 | })
149 | assert.NoError(t, err)
150 | assert.Equal(t, "result-X", res)
151 |
152 | assert.Equal(t, 2, len(ps.CalledKeys()), "2 events, key-1 expired")
153 |
154 | // wait for onBusEvent goroutines to finish
155 | ps.Wait()
156 |
157 | assert.Equal(t, 0, lc2.backend.Len(), "cache2 removed key-1")
158 | }
159 |
160 | func TestLruCache_MaxKeysWithRedis(t *testing.T) {
161 | if _, ok := os.LookupEnv("ENABLE_REDIS_TESTS"); !ok {
162 | t.Skip("ENABLE_REDIS_TESTS env variable is not set, not expecting Redis to be ready at 127.0.0.1:6379")
163 | }
164 |
165 | var coldCalls int32
166 |
167 | //nolint:gosec // not used for security purpose
168 | channel := "lcw-test-" + strconv.Itoa(rand.Intn(1000000))
169 |
170 | redisPubSub1, err := eventbus.NewRedisPubSub("127.0.0.1:6379", channel)
171 | require.NoError(t, err)
172 | o := NewOpts[string]()
173 | lc1, err := NewLruCache(o.MaxKeys(5), o.MaxValSize(10), o.EventBus(redisPubSub1))
174 | require.NoError(t, err)
175 | defer lc1.Close()
176 |
177 | redisPubSub2, err := eventbus.NewRedisPubSub("127.0.0.1:6379", channel)
178 | require.NoError(t, err)
179 | lc2, err := NewLruCache(o.MaxKeys(50), o.MaxValSize(100), o.EventBus(redisPubSub2))
180 | require.NoError(t, err)
181 | defer lc2.Close()
182 |
183 | // put 5 keys to cache1
184 | for i := 0; i < 5; i++ {
185 | i := i
186 | res, e := lc1.Get(fmt.Sprintf("key-%d", i), func() (string, error) {
187 | atomic.AddInt32(&coldCalls, 1)
188 | return fmt.Sprintf("result-%d", i), nil
189 | })
190 | assert.NoError(t, e)
191 | assert.Equal(t, fmt.Sprintf("result-%d", i), res)
192 | assert.Equal(t, int32(i+1), atomic.LoadInt32(&coldCalls))
193 | }
194 | // check if really cached
195 | res, err := lc1.Get("key-3", func() (string, error) {
196 | return "result-blah", nil
197 | })
198 | assert.NoError(t, err)
199 | assert.Equal(t, "result-3", res, "should be cached")
200 |
201 | // put 1 key to cache2
202 | res, e := lc2.Get("key-1", func() (string, error) {
203 | return "result-111", nil
204 | })
205 | assert.NoError(t, e)
206 | assert.Equal(t, "result-111", res)
207 |
208 | // try to cache1 after maxKeys reached, will remove key-0
209 | res, err = lc1.Get("key-X", func() (string, error) {
210 | return "result-X", nil
211 | })
212 | assert.NoError(t, err)
213 | assert.Equal(t, "result-X", res)
214 | assert.Equal(t, 5, lc1.backend.Len())
215 |
216 | assert.Equal(t, 1, lc2.backend.Len(), "cache2 still has key-1")
217 |
218 | // try to cache1 after maxKeys reached, will remove key-1
219 | res, err = lc1.Get("key-X2", func() (string, error) {
220 | return "result-X", nil
221 | })
222 | assert.NoError(t, err)
223 | assert.Equal(t, "result-X", res)
224 |
225 | time.Sleep(time.Second)
226 | assert.Equal(t, 0, lc2.backend.Len(), "cache2 removed key-1")
227 | assert.NoError(t, redisPubSub1.Close())
228 | assert.NoError(t, redisPubSub2.Close())
229 | }
230 |
231 | // LruCache illustrates the use of LRU loading cache
232 | func ExampleLruCache() {
233 | // set up test server for single response
234 | var hitCount int
235 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
236 | if r.URL.String() == "/post/42" && hitCount == 0 {
237 | _, _ = w.Write([]byte("test response"))
238 | return
239 | }
240 | w.WriteHeader(404)
241 | }))
242 |
243 | // load page function
244 | loadURL := func(url string) (string, error) {
245 | resp, err := http.Get(url) // nolint
246 | if err != nil {
247 | return "", err
248 | }
249 | b, err := io.ReadAll(resp.Body)
250 | _ = resp.Body.Close()
251 | if err != nil {
252 | return "", err
253 | }
254 | return string(b), nil
255 | }
256 |
257 | // fixed size LRU cache, 100 items, up to 10k in total size
258 | o := NewOpts[string]()
259 | cache, err := NewLruCache(o.MaxKeys(100), o.MaxCacheSize(10*1024))
260 | if err != nil {
261 | log.Printf("can't make lru cache, %v", err)
262 | }
263 |
264 | // url not in cache, load data
265 | url := ts.URL + "/post/42"
266 | val, err := cache.Get(url, func() (val string, err error) {
267 | return loadURL(url)
268 | })
269 | if err != nil {
270 | log.Fatalf("can't load url %s, %v", url, err)
271 | }
272 | fmt.Println(val)
273 |
274 | // url not in cache, load data
275 | val, err = cache.Get(url, func() (val string, err error) {
276 | return loadURL(url)
277 | })
278 | if err != nil {
279 | log.Fatalf("can't load url %s, %v", url, err)
280 | }
281 | fmt.Println(val)
282 |
283 | // url cached, skip load and get from the cache
284 | val, err = cache.Get(url, func() (val string, err error) {
285 | return loadURL(url)
286 | })
287 | if err != nil {
288 | log.Fatalf("can't load url %s, %v", url, err)
289 | }
290 | fmt.Println(val)
291 |
292 | // get cache stats
293 | stats := cache.Stat()
294 | fmt.Printf("%+v\n", stats)
295 |
296 | // close test HTTP server after all log.Fatalf are passed
297 | ts.Close()
298 |
299 | // Output:
300 | // test response
301 | // test response
302 | // test response
303 | // {hits:2, misses:1, ratio:0.67, keys:1, size:0, errors:0}
304 | }
305 |
--------------------------------------------------------------------------------
/v2/options.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | "github.com/go-pkgz/lcw/v2/eventbus"
8 | )
9 |
10 | type Workers[V any] struct {
11 | maxKeys int
12 | maxValueSize int
13 | maxKeySize int
14 | maxCacheSize int64
15 | ttl time.Duration
16 | onEvicted func(key string, value V)
17 | eventBus eventbus.PubSub
18 | strToV func(string) V
19 | }
20 |
21 | // Option func type
22 | type Option[V any] func(o *Workers[V]) error
23 |
24 | // WorkerOptions holds the option setting methods
25 | type WorkerOptions[T any] struct{}
26 |
27 | // NewOpts creates a new WorkerOptions instance
28 | func NewOpts[T any]() *WorkerOptions[T] {
29 | return &WorkerOptions[T]{}
30 | }
31 |
32 | // MaxValSize functional option defines the largest value's size allowed to be cached
33 | // By default it is 0, which means unlimited.
34 | func (o *WorkerOptions[V]) MaxValSize(maximum int) Option[V] {
35 | return func(o *Workers[V]) error {
36 | if maximum < 0 {
37 | return fmt.Errorf("negative max value size")
38 | }
39 | o.maxValueSize = maximum
40 | return nil
41 | }
42 | }
43 |
44 | // MaxKeySize functional option defines the largest key's size allowed to be used in cache
45 | // By default it is 0, which means unlimited.
46 | func (o *WorkerOptions[V]) MaxKeySize(maximum int) Option[V] {
47 | return func(o *Workers[V]) error {
48 | if maximum < 0 {
49 | return fmt.Errorf("negative max key size")
50 | }
51 | o.maxKeySize = maximum
52 | return nil
53 | }
54 | }
55 |
56 | // MaxKeys functional option defines how many keys to keep.
57 | // By default, it is 0, which means unlimited.
58 | func (o *WorkerOptions[V]) MaxKeys(maximum int) Option[V] {
59 | return func(o *Workers[V]) error {
60 | if maximum < 0 {
61 | return fmt.Errorf("negative max keys")
62 | }
63 | o.maxKeys = maximum
64 | return nil
65 | }
66 | }
67 |
68 | // MaxCacheSize functional option defines the total size of cached data.
69 | // By default, it is 0, which means unlimited.
70 | func (o *WorkerOptions[V]) MaxCacheSize(maximum int64) Option[V] {
71 | return func(o *Workers[V]) error {
72 | if maximum < 0 {
73 | return fmt.Errorf("negative max cache size")
74 | }
75 | o.maxCacheSize = maximum
76 | return nil
77 | }
78 | }
79 |
80 | // TTL functional option defines duration.
81 | // Works for ExpirableCache only
82 | func (o *WorkerOptions[V]) TTL(ttl time.Duration) Option[V] {
83 | return func(o *Workers[V]) error {
84 | if ttl < 0 {
85 | return fmt.Errorf("negative ttl")
86 | }
87 | o.ttl = ttl
88 | return nil
89 | }
90 | }
91 |
92 | // OnEvicted sets callback on invalidation event
93 | func (o *WorkerOptions[V]) OnEvicted(fn func(key string, value V)) Option[V] {
94 | return func(o *Workers[V]) error {
95 | o.onEvicted = fn
96 | return nil
97 | }
98 | }
99 |
100 | // EventBus sets PubSub for distributed cache invalidation
101 | func (o *WorkerOptions[V]) EventBus(pubSub eventbus.PubSub) Option[V] {
102 | return func(o *Workers[V]) error {
103 | o.eventBus = pubSub
104 | return nil
105 | }
106 | }
107 |
108 | // StrToV sets strToV function for RedisCache
109 | func (o *WorkerOptions[V]) StrToV(fn func(string) V) Option[V] {
110 | return func(o *Workers[V]) error {
111 | o.strToV = fn
112 | return nil
113 | }
114 | }
115 |
--------------------------------------------------------------------------------
/v2/redis_cache.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "reflect"
8 | "sync/atomic"
9 | "time"
10 |
11 | "github.com/redis/go-redis/v9"
12 | )
13 |
14 | // RedisValueSizeLimit is maximum allowed value size in Redis
15 | const RedisValueSizeLimit = 512 * 1024 * 1024
16 |
17 | // RedisCache implements LoadingCache for Redis.
18 | type RedisCache[V any] struct {
19 | Workers[V]
20 | CacheStat
21 | backend redis.UniversalClient
22 | }
23 |
24 | // NewRedisCache makes Redis LoadingCache implementation.
25 | // Supports only string and string-based types and will return error otherwise.
26 | func NewRedisCache[V any](backend redis.UniversalClient, opts ...Option[V]) (*RedisCache[V], error) {
27 | // check if V is string, not underlying type but directly, and otherwise return error if strToV is nil as it should be defined
28 |
29 | res := RedisCache[V]{
30 | Workers: Workers[V]{
31 | ttl: 5 * time.Minute,
32 | },
33 | }
34 | for _, opt := range opts {
35 | if err := opt(&res.Workers); err != nil {
36 | return nil, fmt.Errorf("failed to set cache option: %w", err)
37 | }
38 | }
39 |
40 | // check if underlying type is string, so we can safely store it in Redis
41 | var v V
42 | if reflect.TypeOf(v).Kind() != reflect.String {
43 | return nil, fmt.Errorf("can't store non-string types in Redis cache")
44 | }
45 | switch any(v).(type) {
46 | case string:
47 | // check strToV option only for string-like but non string types
48 | default:
49 | if res.strToV == nil {
50 | return nil, fmt.Errorf("StrToV option should be set for string-like type")
51 | }
52 | }
53 |
54 | if res.maxValueSize <= 0 || res.maxValueSize > RedisValueSizeLimit {
55 | res.maxValueSize = RedisValueSizeLimit
56 | }
57 |
58 | res.backend = backend
59 |
60 | return &res, nil
61 | }
62 |
63 | // Get gets value by key or load with fn if not found in cache
64 | func (c *RedisCache[V]) Get(key string, fn func() (V, error)) (data V, err error) {
65 | v, getErr := c.backend.Get(context.Background(), key).Result()
66 | switch {
67 | // RedisClient returns nil when find a key in DB
68 | case getErr == nil:
69 | atomic.AddInt64(&c.Hits, 1)
70 | switch any(data).(type) {
71 | case string:
72 | return any(v).(V), nil
73 | default:
74 | return c.strToV(v), nil
75 | }
76 | // RedisClient returns redis.Nil when doesn't find a key in DB
77 | case errors.Is(getErr, redis.Nil):
78 | if data, err = fn(); err != nil {
79 | atomic.AddInt64(&c.Errors, 1)
80 | return data, err
81 | }
82 | // RedisClient returns !nil when something goes wrong while get data
83 | default:
84 | atomic.AddInt64(&c.Errors, 1)
85 | switch any(data).(type) {
86 | case string:
87 | return any(v).(V), getErr
88 | default:
89 | return c.strToV(v), getErr
90 | }
91 | }
92 | atomic.AddInt64(&c.Misses, 1)
93 |
94 | if !c.allowed(key, data) {
95 | return data, nil
96 | }
97 |
98 | _, setErr := c.backend.Set(context.Background(), key, data, c.ttl).Result()
99 | if setErr != nil {
100 | atomic.AddInt64(&c.Errors, 1)
101 | return data, setErr
102 | }
103 |
104 | return data, nil
105 | }
106 |
107 | // Invalidate removes keys with passed predicate fn, i.e. fn(key) should be true to get evicted
108 | func (c *RedisCache[V]) Invalidate(fn func(key string) bool) {
109 | for _, key := range c.backend.Keys(context.Background(), "*").Val() { // Keys() returns copy of cache's key, safe to remove directly
110 | if fn(key) {
111 | c.backend.Del(context.Background(), key)
112 | }
113 | }
114 | }
115 |
116 | // Peek returns the key value (or undefined if not found) without updating the "recently used"-ness of the key.
117 | func (c *RedisCache[V]) Peek(key string) (data V, found bool) {
118 | ret, err := c.backend.Get(context.Background(), key).Result()
119 | if err != nil {
120 | var emptyValue V
121 | return emptyValue, false
122 | }
123 | switch any(data).(type) {
124 | case string:
125 | return any(ret).(V), true
126 | default:
127 | return any(ret).(V), true
128 | }
129 | }
130 |
131 | // Purge clears the cache completely.
132 | func (c *RedisCache[V]) Purge() {
133 | c.backend.FlushDB(context.Background())
134 |
135 | }
136 |
137 | // Delete cache item by key
138 | func (c *RedisCache[V]) Delete(key string) {
139 | c.backend.Del(context.Background(), key)
140 | }
141 |
142 | // Keys gets all keys for the cache
143 | func (c *RedisCache[V]) Keys() (res []string) {
144 | return c.backend.Keys(context.Background(), "*").Val()
145 | }
146 |
147 | // Stat returns cache statistics
148 | func (c *RedisCache[V]) Stat() CacheStat {
149 | return CacheStat{
150 | Hits: c.Hits,
151 | Misses: c.Misses,
152 | Size: c.size(),
153 | Keys: c.keys(),
154 | Errors: c.Errors,
155 | }
156 | }
157 |
158 | // Close closes underlying connections
159 | func (c *RedisCache[V]) Close() error {
160 | return c.backend.Close()
161 | }
162 |
163 | func (c *RedisCache[V]) size() int64 {
164 | return 0
165 | }
166 |
167 | func (c *RedisCache[V]) keys() int {
168 | return int(c.backend.DBSize(context.Background()).Val())
169 | }
170 |
171 | func (c *RedisCache[V]) allowed(key string, data V) bool {
172 | if c.maxKeys > 0 && c.backend.DBSize(context.Background()).Val() >= int64(c.maxKeys) {
173 | return false
174 | }
175 | if c.maxKeySize > 0 && len(key) > c.maxKeySize {
176 | return false
177 | }
178 | if s, ok := any(data).(Sizer); ok {
179 | if c.maxValueSize > 0 && (s.Size() >= c.maxValueSize) {
180 | return false
181 | }
182 | }
183 | return true
184 | }
185 |
--------------------------------------------------------------------------------
/v2/redis_cache_test.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "sort"
7 | "sync/atomic"
8 | "testing"
9 | "time"
10 |
11 | "github.com/alicebob/miniredis/v2"
12 | "github.com/redis/go-redis/v9"
13 | "github.com/stretchr/testify/assert"
14 | "github.com/stretchr/testify/require"
15 | )
16 |
17 | // newTestRedis returns a redis.Cmdable.
18 | func newTestRedisServer() *miniredis.Miniredis {
19 | mr, err := miniredis.Run()
20 | if err != nil {
21 | panic(err)
22 | }
23 |
24 | return mr
25 | }
26 |
27 | func TestExpirableRedisCache(t *testing.T) {
28 | server := newTestRedisServer()
29 | defer server.Close()
30 | client := redis.NewClient(&redis.Options{
31 | Addr: server.Addr()})
32 | defer client.Close()
33 | o := NewOpts[string]()
34 | rc, err := NewRedisCache(client, o.MaxKeys(5), o.TTL(time.Second*6))
35 | require.NoError(t, err)
36 | defer rc.Close()
37 | require.NoError(t, err)
38 | for i := 0; i < 5; i++ {
39 | i := i
40 | _, e := rc.Get(fmt.Sprintf("key-%d", i), func() (string, error) {
41 | return fmt.Sprintf("result-%d", i), nil
42 | })
43 | assert.NoError(t, e)
44 | server.FastForward(1000 * time.Millisecond)
45 | }
46 |
47 | assert.Equal(t, 5, rc.Stat().Keys)
48 | assert.Equal(t, int64(5), rc.Stat().Misses)
49 |
50 | keys := rc.Keys()
51 | sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
52 | assert.EqualValues(t, []string{"key-0", "key-1", "key-2", "key-3", "key-4"}, keys)
53 |
54 | _, e := rc.Get("key-xx", func() (string, error) {
55 | return "result-xx", nil
56 | })
57 | assert.NoError(t, e)
58 | assert.Equal(t, 5, rc.Stat().Keys)
59 | assert.Equal(t, int64(6), rc.Stat().Misses)
60 |
61 | server.FastForward(1000 * time.Millisecond)
62 | assert.Equal(t, 4, rc.Stat().Keys)
63 |
64 | server.FastForward(4000 * time.Millisecond)
65 | assert.Equal(t, 0, rc.keys())
66 |
67 | }
68 |
69 | func TestRedisCache(t *testing.T) {
70 | var coldCalls int32
71 |
72 | server := newTestRedisServer()
73 | defer server.Close()
74 | client := redis.NewClient(&redis.Options{
75 | Addr: server.Addr()})
76 | defer client.Close()
77 | o := NewOpts[string]()
78 | rc, err := NewRedisCache(client, o.MaxKeys(5), o.MaxValSize(10), o.MaxKeySize(10))
79 | require.NoError(t, err)
80 | defer rc.Close()
81 | // put 5 keys to cache
82 | for i := 0; i < 5; i++ {
83 | i := i
84 | res, e := rc.Get(fmt.Sprintf("key-%d", i), func() (string, error) {
85 | atomic.AddInt32(&coldCalls, 1)
86 | return fmt.Sprintf("result-%d", i), nil
87 | })
88 | assert.NoError(t, e)
89 | assert.Equal(t, fmt.Sprintf("result-%d", i), res)
90 | assert.Equal(t, int32(i+1), atomic.LoadInt32(&coldCalls))
91 | }
92 |
93 | // check if really cached
94 | res, err := rc.Get("key-3", func() (string, error) {
95 | return "result-blah", nil
96 | })
97 | assert.NoError(t, err)
98 | assert.Equal(t, "result-3", res, "should be cached")
99 |
100 | // try to cache after maxKeys reached
101 | res, err = rc.Get("key-X", func() (string, error) {
102 | return "result-X", nil
103 | })
104 | assert.NoError(t, err)
105 | assert.Equal(t, "result-X", res)
106 | assert.Equal(t, int64(5), rc.backend.DBSize(context.Background()).Val())
107 |
108 | // put to cache and make sure it cached
109 | res, err = rc.Get("key-Z", func() (string, error) {
110 | return "result-Z", nil
111 | })
112 | assert.NoError(t, err)
113 | assert.Equal(t, "result-Z", res)
114 |
115 | res, err = rc.Get("key-Z", func() (string, error) {
116 | return "result-Zzzz", nil
117 | })
118 | assert.NoError(t, err)
119 | assert.Equal(t, "result-Zzzz", res, "got non-cached value")
120 | assert.Equal(t, 5, rc.keys())
121 |
122 | res, err = rc.Get("key-Zzzzzzz", func() (string, error) {
123 | return "result-Zzzz", nil
124 | })
125 | assert.NoError(t, err)
126 | assert.Equal(t, "result-Zzzz", res, "got non-cached value")
127 | assert.Equal(t, 5, rc.keys())
128 |
129 | res, ok := rc.Peek("error-key-Z2")
130 | assert.False(t, ok)
131 | assert.Empty(t, res)
132 | }
133 |
134 | func TestRedisCacheErrors(t *testing.T) {
135 | server := newTestRedisServer()
136 | defer server.Close()
137 | client := redis.NewClient(&redis.Options{
138 | Addr: server.Addr()})
139 | defer client.Close()
140 | rc, err := NewRedisCache[string](client)
141 | require.NoError(t, err)
142 | defer rc.Close()
143 |
144 | res, err := rc.Get("error-key-Z", func() (string, error) {
145 | return "error-result-Z", fmt.Errorf("some error")
146 | })
147 | assert.Error(t, err)
148 | assert.Equal(t, "error-result-Z", res)
149 | assert.Equal(t, int64(1), rc.Stat().Errors)
150 | }
151 |
152 | // should not work with non-string types
153 | func TestRedisCacheCreationErrors(t *testing.T) {
154 | // string case, no error
155 | // no close is needed as it will call client.Close(), which will cause panic
156 | rcString, err := NewRedisCache[string](nil)
157 | require.NoError(t, err)
158 | assert.NotNil(t, rcString)
159 | // string-based type but no StrToV option, error expected
160 | rcSizedString, err := NewRedisCache[sizedString](nil)
161 | require.EqualError(t, err, "StrToV option should be set for string-like type")
162 | assert.Nil(t, rcSizedString)
163 | // string-based type with StrToV option, no error
164 | // no close is needed as it will call client.Close(), which will cause panic
165 | o := NewOpts[sizedString]()
166 | rcSizedString, err = NewRedisCache[sizedString](nil, o.StrToV(func(s string) sizedString { return sizedString(s) }))
167 | require.NoError(t, err)
168 | assert.NotNil(t, rcSizedString)
169 | // non-string based type, error expected
170 | rcInt, err := NewRedisCache[int](nil)
171 | require.EqualError(t, err, "can't store non-string types in Redis cache")
172 | assert.Nil(t, rcInt)
173 | }
174 |
175 | func TestRedisCache_BadOptions(t *testing.T) {
176 | server := newTestRedisServer()
177 | defer server.Close()
178 | client := redis.NewClient(&redis.Options{
179 | Addr: server.Addr()})
180 | defer client.Close()
181 |
182 | o := NewOpts[string]()
183 | _, err := NewRedisCache(client, o.MaxCacheSize(-1))
184 | assert.EqualError(t, err, "failed to set cache option: negative max cache size")
185 |
186 | _, err = NewRedisCache(client, o.MaxCacheSize(-1))
187 | assert.EqualError(t, err, "failed to set cache option: negative max cache size")
188 |
189 | _, err = NewRedisCache(client, o.MaxKeys(-1))
190 | assert.EqualError(t, err, "failed to set cache option: negative max keys")
191 |
192 | _, err = NewRedisCache(client, o.MaxValSize(-1))
193 | assert.EqualError(t, err, "failed to set cache option: negative max value size")
194 |
195 | _, err = NewRedisCache(client, o.TTL(-1))
196 | assert.EqualError(t, err, "failed to set cache option: negative ttl")
197 |
198 | _, err = NewRedisCache(client, o.MaxKeySize(-1))
199 | assert.EqualError(t, err, "failed to set cache option: negative max key size")
200 |
201 | }
202 |
--------------------------------------------------------------------------------
/v2/scache.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 | )
7 |
8 | // Scache wraps LoadingCache with partitions (sub-system), and scopes.
9 | // Simplified interface with just 4 funcs - Get, Flush, Stats and Close
10 | type Scache[V any] struct {
11 | lc LoadingCache[V]
12 | }
13 |
14 | // NewScache creates Scache on top of LoadingCache
15 | func NewScache[V any](lc LoadingCache[V]) *Scache[V] {
16 | return &Scache[V]{lc: lc}
17 | }
18 |
19 | // Get retrieves a key from underlying backend
20 | func (m *Scache[V]) Get(key Key, fn func() (V, error)) (data V, err error) {
21 | keyStr := key.String()
22 | val, err := m.lc.Get(keyStr, func() (value V, e error) {
23 | return fn()
24 | })
25 | return val, err
26 | }
27 |
28 | // Stat delegates the call to the underlying cache backend
29 | func (m *Scache[V]) Stat() CacheStat {
30 | return m.lc.Stat()
31 | }
32 |
33 | // Close calls Close function of the underlying cache
34 | func (m *Scache[V]) Close() error {
35 | return m.lc.Close()
36 | }
37 |
38 | // Flush clears cache and calls postFlushFn async
39 | func (m *Scache[V]) Flush(req FlusherRequest) {
40 | if len(req.scopes) == 0 {
41 | m.lc.Purge()
42 | return
43 | }
44 |
45 | // check if fullKey has matching scopes
46 | inScope := func(fullKey string) bool {
47 | key, err := parseKey(fullKey)
48 | if err != nil {
49 | return false
50 | }
51 | for _, s := range req.scopes {
52 | for _, ks := range key.scopes {
53 | if ks == s {
54 | return true
55 | }
56 | }
57 | }
58 | return false
59 | }
60 |
61 | for _, k := range m.lc.Keys() {
62 | if inScope(k) {
63 | m.lc.Delete(k) // Keys() returns copy of cache's key, safe to remove directly
64 | }
65 | }
66 | }
67 |
68 | // Key for scoped cache. Created foe given partition (can be empty) and set with ID and Scopes.
69 | // example: k := NewKey("sys1").ID(postID).Scopes("last_posts", customer_id)
70 | type Key struct {
71 | id string // the primary part of the key, i.e. usual cache's key
72 | partition string // optional id for a subsystem or cache partition
73 | scopes []string // list of scopes to use in invalidation
74 | }
75 |
76 | // NewKey makes base key for given partition. Partition can be omitted.
77 | func NewKey(partition ...string) Key {
78 | if len(partition) == 0 {
79 | return Key{partition: ""}
80 | }
81 | return Key{partition: partition[0]}
82 | }
83 |
84 | // ID sets key id
85 | func (k Key) ID(id string) Key {
86 | k.id = id
87 | return k
88 | }
89 |
90 | // Scopes of the key
91 | func (k Key) Scopes(scopes ...string) Key {
92 | k.scopes = scopes
93 | return k
94 | }
95 |
96 | // String makes full string key from primary key, partition and scopes
97 | // key string made as @@@@$$....
98 | func (k Key) String() string {
99 | bld := strings.Builder{}
100 | _, _ = bld.WriteString(k.partition)
101 | _, _ = bld.WriteString("@@")
102 | _, _ = bld.WriteString(k.id)
103 | _, _ = bld.WriteString("@@")
104 | _, _ = bld.WriteString(strings.Join(k.scopes, "$$"))
105 | return bld.String()
106 | }
107 |
108 | // parseKey gets compound key string created by Key func and split it to the actual key, partition and scopes
109 | // key string made as @@@@$$....
110 | func parseKey(keyStr string) (Key, error) {
111 | elems := strings.Split(keyStr, "@@")
112 | if len(elems) != 3 {
113 | return Key{}, fmt.Errorf("can't parse cache key %s, invalid number of segments %d", keyStr, len(elems))
114 | }
115 |
116 | scopes := strings.Split(elems[2], "$$")
117 | if len(scopes) == 1 && scopes[0] == "" {
118 | scopes = []string{}
119 | }
120 | key := Key{
121 | partition: elems[0],
122 | id: elems[1],
123 | scopes: scopes,
124 | }
125 |
126 | return key, nil
127 | }
128 |
129 | // FlusherRequest used as input for cache.Flush
130 | type FlusherRequest struct {
131 | partition string
132 | scopes []string
133 | }
134 |
135 | // Flusher makes new FlusherRequest with empty scopes
136 | func Flusher(partition string) FlusherRequest {
137 | res := FlusherRequest{partition: partition}
138 | return res
139 | }
140 |
141 | // Scopes adds scopes to FlusherRequest
142 | func (f FlusherRequest) Scopes(scopes ...string) FlusherRequest {
143 | f.scopes = scopes
144 | return f
145 | }
146 |
--------------------------------------------------------------------------------
/v2/scache_test.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "log"
7 | "net/http"
8 | "net/http/httptest"
9 | "sync"
10 | "sync/atomic"
11 | "testing"
12 | "time"
13 |
14 | "github.com/stretchr/testify/assert"
15 | "github.com/stretchr/testify/require"
16 | )
17 |
18 | func TestScache_Get(t *testing.T) {
19 | lru, err := NewLruCache[[]byte]()
20 | require.NoError(t, err)
21 | lc := NewScache[[]byte](lru)
22 | defer lc.Close()
23 |
24 | var coldCalls int32
25 |
26 | res, err := lc.Get(NewKey("site").ID("key"), func() ([]byte, error) {
27 | atomic.AddInt32(&coldCalls, 1)
28 | return []byte("result"), nil
29 | })
30 | assert.NoError(t, err)
31 | assert.Equal(t, "result", string(res))
32 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls))
33 |
34 | res, err = lc.Get(NewKey("site").ID("key"), func() ([]byte, error) {
35 | atomic.AddInt32(&coldCalls, 1)
36 | return []byte("result"), nil
37 | })
38 | assert.NoError(t, err)
39 | assert.Equal(t, "result", string(res))
40 | assert.Equal(t, int32(1), atomic.LoadInt32(&coldCalls))
41 |
42 | lc.Flush(Flusher("site"))
43 | time.Sleep(100 * time.Millisecond) // let postFn to do its thing
44 |
45 | _, err = lc.Get(NewKey("site").ID("key"), func() ([]byte, error) {
46 | return nil, fmt.Errorf("err")
47 | })
48 | assert.Error(t, err)
49 | }
50 |
51 | func TestScache_Scopes(t *testing.T) {
52 | lru, err := NewLruCache[[]byte]()
53 | require.NoError(t, err)
54 | lc := NewScache[[]byte](lru)
55 | defer lc.Close()
56 |
57 | res, err := lc.Get(NewKey("site").ID("key").Scopes("s1", "s2"), func() ([]byte, error) {
58 | return []byte("value"), nil
59 | })
60 | assert.NoError(t, err)
61 | assert.Equal(t, "value", string(res))
62 |
63 | res, err = lc.Get(NewKey("site").ID("key2").Scopes("s2"), func() ([]byte, error) {
64 | return []byte("value2"), nil
65 | })
66 | assert.NoError(t, err)
67 | assert.Equal(t, "value2", string(res))
68 |
69 | assert.Equal(t, 2, len(lc.lc.Keys()))
70 | lc.Flush(Flusher("site").Scopes("s1"))
71 | assert.Equal(t, 1, len(lc.lc.Keys()))
72 |
73 | _, err = lc.Get(NewKey("site").ID("key2").Scopes("s2"), func() ([]byte, error) {
74 | assert.Fail(t, "should stay")
75 | return nil, nil
76 | })
77 | assert.NoError(t, err)
78 | res, err = lc.Get(NewKey("site").ID("key").Scopes("s1", "s2"), func() ([]byte, error) {
79 | return []byte("value-upd"), nil
80 | })
81 | assert.NoError(t, err)
82 | assert.Equal(t, "value-upd", string(res), "was deleted, update")
83 |
84 | assert.Equal(t, CacheStat{Hits: 1, Misses: 3, Keys: 2, Size: 0, Errors: 0}, lc.Stat())
85 | }
86 |
87 | func TestScache_Flush(t *testing.T) {
88 | lru, err := NewLruCache[[]byte]()
89 | require.NoError(t, err)
90 | lc := NewScache[[]byte](lru)
91 |
92 | addToCache := func(id string, scopes ...string) {
93 | res, err := lc.Get(NewKey("site").ID(id).Scopes(scopes...), func() ([]byte, error) {
94 | return []byte("value" + id), nil
95 | })
96 | require.NoError(t, err)
97 | require.Equal(t, "value"+id, string(res))
98 | }
99 |
100 | init := func() {
101 | lc.Flush(Flusher("site"))
102 | addToCache("key1", "s1", "s2")
103 | addToCache("key2", "s1", "s2", "s3")
104 | addToCache("key3", "s1", "s2", "s3")
105 | addToCache("key4", "s2", "s3")
106 | addToCache("key5", "s2")
107 | addToCache("key6")
108 | addToCache("key7", "s4", "s3")
109 | require.Equal(t, 7, len(lc.lc.Keys()), "cache init")
110 | }
111 |
112 | tbl := []struct {
113 | scopes []string
114 | left int
115 | msg string
116 | }{
117 | {[]string{}, 0, "full flush, no scopes"},
118 | {[]string{"s0"}, 7, "flush wrong scope"},
119 | {[]string{"s1"}, 4, "flush s1 scope"},
120 | {[]string{"s2", "s1"}, 2, "flush s2+s1 scope"},
121 | {[]string{"s1", "s2"}, 2, "flush s1+s2 scope"},
122 | {[]string{"s1", "s2", "s4"}, 1, "flush s1+s2+s4 scope"},
123 | {[]string{"s1", "s2", "s3"}, 1, "flush s1+s2+s3 scope"},
124 | {[]string{"s1", "s2", "ss"}, 2, "flush s1+s2+wrong scope"},
125 | }
126 |
127 | for i, tt := range tbl {
128 | tt := tt
129 | i := i
130 | t.Run(tt.msg, func(t *testing.T) {
131 | init()
132 | lc.Flush(Flusher("site").Scopes(tt.scopes...))
133 | assert.Equal(t, tt.left, len(lc.lc.Keys()), "keys size, %s #%d", tt.msg, i)
134 | })
135 | }
136 | }
137 |
138 | func TestScache_FlushFailed(t *testing.T) {
139 | lru, err := NewLruCache[[]byte]()
140 | require.NoError(t, err)
141 | lc := NewScache[[]byte](lru)
142 |
143 | val, err := lc.Get(NewKey("site").ID("invalid-composite"), func() ([]byte, error) {
144 | return []byte("value"), nil
145 | })
146 | assert.NoError(t, err)
147 | assert.Equal(t, "value", string(val))
148 | assert.Equal(t, 1, len(lc.lc.Keys()))
149 |
150 | lc.Flush(Flusher("site").Scopes("invalid-composite"))
151 | assert.Equal(t, 1, len(lc.lc.Keys()))
152 | }
153 |
154 | func TestScope_Key(t *testing.T) {
155 | tbl := []struct {
156 | key string
157 | partition string
158 | scopes []string
159 | full string
160 | }{
161 | {"key1", "p1", []string{"s1"}, "p1@@key1@@s1"},
162 | {"key2", "p2", []string{"s11", "s2"}, "p2@@key2@@s11$$s2"},
163 | {"key3", "", []string{}, "@@key3@@"},
164 | {"key3", "", []string{"xx", "yyy"}, "@@key3@@xx$$yyy"},
165 | }
166 |
167 | for _, tt := range tbl {
168 | tt := tt
169 | t.Run(tt.full, func(t *testing.T) {
170 | k := NewKey(tt.partition).ID(tt.key).Scopes(tt.scopes...)
171 | assert.Equal(t, tt.full, k.String())
172 | k, err := parseKey(tt.full)
173 | require.NoError(t, err)
174 | assert.Equal(t, tt.partition, k.partition)
175 | assert.Equal(t, tt.key, k.id)
176 | assert.Equal(t, tt.scopes, k.scopes)
177 | })
178 | }
179 |
180 | // without partition
181 | k := NewKey().ID("id1").Scopes("s1", "s2")
182 | assert.Equal(t, "@@id1@@s1$$s2", k.String())
183 |
184 | // parse invalid key strings
185 | _, err := parseKey("abc")
186 | assert.Error(t, err)
187 | _, err = parseKey("")
188 | assert.Error(t, err)
189 | }
190 |
191 | func TestScache_Parallel(t *testing.T) {
192 | var coldCalls int32
193 | lru, err := NewLruCache[[]byte]()
194 | require.NoError(t, err)
195 | lc := NewScache[[]byte](lru)
196 |
197 | res, err := lc.Get(NewKey("site").ID("key"), func() ([]byte, error) {
198 | return []byte("value"), nil
199 | })
200 | assert.NoError(t, err)
201 | assert.Equal(t, "value", string(res))
202 |
203 | wg := sync.WaitGroup{}
204 | for i := 0; i < 1000; i++ {
205 | wg.Add(1)
206 | i := i
207 | go func() {
208 | defer wg.Done()
209 | res, err := lc.Get(NewKey("site").ID("key"), func() ([]byte, error) {
210 | atomic.AddInt32(&coldCalls, 1)
211 | return []byte(fmt.Sprintf("result-%d", i)), nil
212 | })
213 | require.NoError(t, err)
214 | require.Equal(t, "value", string(res))
215 | }()
216 | }
217 | wg.Wait()
218 | assert.Equal(t, int32(0), atomic.LoadInt32(&coldCalls))
219 | }
220 |
221 | // LruCache illustrates the use of LRU loading cache
222 | func ExampleScache() {
223 | // set up test server for single response
224 | var hitCount int
225 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
226 | if r.URL.String() == "/post/42" && hitCount == 0 {
227 | _, _ = w.Write([]byte("test response"))
228 | return
229 | }
230 | w.WriteHeader(404)
231 | }))
232 |
233 | // load page function
234 | loadURL := func(url string) ([]byte, error) {
235 | resp, err := http.Get(url) // nolint
236 | if err != nil {
237 | return nil, err
238 | }
239 | b, err := io.ReadAll(resp.Body)
240 | _ = resp.Body.Close()
241 | if err != nil {
242 | return nil, err
243 | }
244 | return b, nil
245 | }
246 |
247 | // fixed size LRU cache, 100 items, up to 10k in total size
248 | o := NewOpts[[]byte]()
249 | backend, err := NewLruCache(o.MaxKeys(100), o.MaxCacheSize(10*1024))
250 | if err != nil {
251 | log.Fatalf("can't make lru cache, %v", err)
252 | }
253 |
254 | cache := NewScache[[]byte](backend)
255 |
256 | // url not in cache, load data
257 | url := ts.URL + "/post/42"
258 | key := NewKey().ID(url).Scopes("test")
259 | val, err := cache.Get(key, func() (val []byte, err error) {
260 | return loadURL(url)
261 | })
262 | if err != nil {
263 | log.Fatalf("can't load url %s, %v", url, err)
264 | }
265 | fmt.Println(string(val))
266 |
267 | // url not in cache, load data
268 | key = NewKey().ID(url).Scopes("test")
269 | val, err = cache.Get(key, func() (val []byte, err error) {
270 | return loadURL(url)
271 | })
272 | if err != nil {
273 | log.Fatalf("can't load url %s, %v", url, err)
274 | }
275 | fmt.Println(string(val))
276 |
277 | // url cached, skip load and get from the cache
278 | key = NewKey().ID(url).Scopes("test")
279 | val, err = cache.Get(key, func() (val []byte, err error) {
280 | return loadURL(url)
281 | })
282 | if err != nil {
283 | log.Fatalf("can't load url %s, %v", url, err)
284 | }
285 | fmt.Println(string(val))
286 |
287 | // get cache stats
288 | stats := cache.Stat()
289 | fmt.Printf("%+v\n", stats)
290 |
291 | // close cache and test HTTP server after all log.Fatalf are passed
292 | ts.Close()
293 | err = cache.Close()
294 | if err != nil {
295 | log.Fatalf("can't close cache %v", err)
296 | }
297 |
298 | // Output:
299 | // test response
300 | // test response
301 | // test response
302 | // {hits:2, misses:1, ratio:0.67, keys:1, size:0, errors:0}
303 | }
304 |
--------------------------------------------------------------------------------
/v2/url.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "net/url"
6 | "strconv"
7 | "time"
8 |
9 | "github.com/hashicorp/go-multierror"
10 | "github.com/redis/go-redis/v9"
11 | )
12 |
13 | // New parses uri and makes any of supported caches
14 | // supported URIs:
15 | // - redis://:?db=123&max_keys=10
16 | // - mem://lru?max_keys=10&max_cache_size=1024
17 | // - mem://expirable?ttl=30s&max_val_size=100
18 | // - nop://
19 | func New[V any](uri string) (LoadingCache[V], error) {
20 | u, err := url.Parse(uri)
21 | if err != nil {
22 | return nil, fmt.Errorf("parse cache uri %s: %w", uri, err)
23 | }
24 |
25 | query := u.Query()
26 | opts, err := optionsFromQuery[V](query)
27 | if err != nil {
28 | return nil, fmt.Errorf("parse uri options %s: %w", uri, err)
29 | }
30 |
31 | switch u.Scheme {
32 | case "redis":
33 | redisOpts, e := redisOptionsFromURL(u)
34 | if e != nil {
35 | return nil, e
36 | }
37 | res, e := NewRedisCache(redis.NewClient(redisOpts), opts...)
38 | if e != nil {
39 | return nil, fmt.Errorf("make redis for %s: %w", uri, e)
40 | }
41 | return res, nil
42 | case "mem":
43 | switch u.Hostname() {
44 | case "lru":
45 | return NewLruCache[V](opts...)
46 | case "expirable":
47 | return NewExpirableCache[V](opts...)
48 | default:
49 | return nil, fmt.Errorf("unsupported mem cache type %s", u.Hostname())
50 | }
51 | case "nop":
52 | return NewNopCache[V](), nil
53 | }
54 | return nil, fmt.Errorf("unsupported cache type %s", u.Scheme)
55 | }
56 |
57 | func optionsFromQuery[V any](q url.Values) (opts []Option[V], err error) {
58 | errs := new(multierror.Error)
59 | o := NewOpts[V]()
60 |
61 | if v := q.Get("max_val_size"); v != "" {
62 | vv, e := strconv.Atoi(v)
63 | if e != nil {
64 | errs = multierror.Append(errs, fmt.Errorf("max_val_size query param %s: %w", v, e))
65 | } else {
66 | opts = append(opts, o.MaxValSize(vv))
67 | }
68 | }
69 |
70 | if v := q.Get("max_key_size"); v != "" {
71 | vv, e := strconv.Atoi(v)
72 | if e != nil {
73 | errs = multierror.Append(errs, fmt.Errorf("max_key_size query param %s: %w", v, e))
74 | } else {
75 | opts = append(opts, o.MaxKeySize(vv))
76 | }
77 | }
78 |
79 | if v := q.Get("max_keys"); v != "" {
80 | vv, e := strconv.Atoi(v)
81 | if e != nil {
82 | errs = multierror.Append(errs, fmt.Errorf("max_keys query param %s: %w", v, e))
83 | } else {
84 | opts = append(opts, o.MaxKeys(vv))
85 | }
86 | }
87 |
88 | if v := q.Get("max_cache_size"); v != "" {
89 | vv, e := strconv.ParseInt(v, 10, 64)
90 | if e != nil {
91 | errs = multierror.Append(errs, fmt.Errorf("max_cache_size query param %s: %w", v, e))
92 | } else {
93 | opts = append(opts, o.MaxCacheSize(vv))
94 | }
95 | }
96 |
97 | if v := q.Get("ttl"); v != "" {
98 | vv, e := time.ParseDuration(v)
99 | if e != nil {
100 | errs = multierror.Append(errs, fmt.Errorf("ttl query param %s: %w", v, e))
101 | } else {
102 | opts = append(opts, o.TTL(vv))
103 | }
104 | }
105 |
106 | return opts, errs.ErrorOrNil()
107 | }
108 |
109 | func redisOptionsFromURL(u *url.URL) (*redis.Options, error) {
110 | query := u.Query()
111 |
112 | db, err := strconv.Atoi(query.Get("db"))
113 | if err != nil {
114 | return nil, fmt.Errorf("db from %s: %w", u, err)
115 | }
116 |
117 | res := &redis.Options{
118 | Addr: u.Hostname() + ":" + u.Port(),
119 | DB: db,
120 | Password: query.Get("password"),
121 | Network: query.Get("network"),
122 | }
123 |
124 | if dialTimeout, err := time.ParseDuration(query.Get("dial_timeout")); err == nil {
125 | res.DialTimeout = dialTimeout
126 | }
127 |
128 | if readTimeout, err := time.ParseDuration(query.Get("read_timeout")); err == nil {
129 | res.ReadTimeout = readTimeout
130 | }
131 |
132 | if writeTimeout, err := time.ParseDuration(query.Get("write_timeout")); err == nil {
133 | res.WriteTimeout = writeTimeout
134 | }
135 |
136 | return res, nil
137 | }
138 |
--------------------------------------------------------------------------------
/v2/url_test.go:
--------------------------------------------------------------------------------
1 | package lcw
2 |
3 | import (
4 | "fmt"
5 | "net/url"
6 | "strconv"
7 | "testing"
8 | "time"
9 |
10 | "github.com/redis/go-redis/v9"
11 | "github.com/stretchr/testify/assert"
12 | "github.com/stretchr/testify/require"
13 | )
14 |
15 | func TestUrl_optionsFromQuery(t *testing.T) {
16 | tbl := []struct {
17 | url string
18 | num int
19 | fail bool
20 | }{
21 | {"mem://lru?ttl=26s&max_keys=100&max_val_size=1024&max_key_size=64&max_cache_size=111", 5, false},
22 | {"mem://lru?ttl=26s&max_keys=100&foo=bar", 2, false},
23 | {"mem://lru?ttl=xx26s&max_keys=100&foo=bar", 0, true},
24 | {"mem://lru?foo=bar", 0, false},
25 | {"mem://lru?foo=bar&max_keys=abcd", 0, true},
26 | {"mem://lru?foo=bar&max_val_size=abcd", 0, true},
27 | {"mem://lru?foo=bar&max_cache_size=abcd", 0, true},
28 | {"mem://lru?foo=bar&max_key_size=abcd", 0, true},
29 | }
30 |
31 | for i, tt := range tbl {
32 | tt := tt
33 | t.Run(strconv.Itoa(i), func(t *testing.T) {
34 | u, err := url.Parse(tt.url)
35 | require.NoError(t, err)
36 | r, err := optionsFromQuery[string](u.Query())
37 | if tt.fail {
38 | require.Error(t, err)
39 | return
40 | }
41 | assert.Equal(t, tt.num, len(r))
42 | })
43 | }
44 | }
45 |
46 | func TestUrl_redisOptionsFromURL(t *testing.T) {
47 | tbl := []struct {
48 | url string
49 | fail bool
50 | opts redis.Options
51 | }{
52 | {"redis://127.0.0.1:12345?db=xa19", true, redis.Options{}},
53 | {"redis://127.0.0.1:12345?foo=bar&max_keys=abcd&db=19", false, redis.Options{Addr: "127.0.0.1:12345", DB: 19}},
54 | {
55 | "redis://127.0.0.1:12345?db=19&password=xyz&network=tcp4&dial_timeout=1s&read_timeout=2s&write_timeout=3m",
56 | false, redis.Options{Addr: "127.0.0.1:12345", DB: 19, Password: "xyz", Network: "tcp4",
57 | DialTimeout: 1 * time.Second, ReadTimeout: 2 * time.Second, WriteTimeout: 3 * time.Minute},
58 | },
59 | }
60 |
61 | for i, tt := range tbl {
62 | tt := tt
63 | t.Run(strconv.Itoa(i), func(t *testing.T) {
64 | u, err := url.Parse(tt.url)
65 | require.NoError(t, err)
66 | r, err := redisOptionsFromURL(u)
67 | if tt.fail {
68 | require.Error(t, err)
69 | return
70 | }
71 | require.NoError(t, err)
72 | assert.Equal(t, tt.opts, *r)
73 | })
74 | }
75 | }
76 |
77 | func TestUrl_NewLru(t *testing.T) {
78 | u := "mem://lru?max_keys=10"
79 | res, err := New[string](u)
80 | require.NoError(t, err)
81 | r, ok := res.(*LruCache[string])
82 | require.True(t, ok)
83 | assert.Equal(t, 10, r.maxKeys)
84 | }
85 |
86 | func TestUrl_NewExpirable(t *testing.T) {
87 | u := "mem://expirable?max_keys=10&ttl=30m"
88 | res, err := New[string](u)
89 | require.NoError(t, err)
90 | defer res.Close()
91 | r, ok := res.(*ExpirableCache[string])
92 | require.True(t, ok)
93 | assert.Equal(t, 10, r.maxKeys)
94 | assert.Equal(t, 30*time.Minute, r.ttl)
95 | }
96 |
97 | func TestUrl_NewNop(t *testing.T) {
98 | u := "nop://"
99 | res, err := New[string](u)
100 | require.NoError(t, err)
101 | _, ok := res.(*Nop[string])
102 | require.True(t, ok)
103 | }
104 |
105 | func TestUrl_NewRedis(t *testing.T) {
106 | srv := newTestRedisServer()
107 | defer srv.Close()
108 | u := fmt.Sprintf("redis://%s?db=1&ttl=10s", srv.Addr())
109 | res, err := New[string](u)
110 | require.NoError(t, err)
111 | defer res.Close()
112 | r, ok := res.(*RedisCache[string])
113 | require.True(t, ok)
114 | assert.Equal(t, 10*time.Second, r.ttl)
115 |
116 | u = fmt.Sprintf("redis://%s?db=1&ttl=zz10s", srv.Addr())
117 | _, err = New[string](u)
118 | require.Error(t, err)
119 | assert.Contains(t, err.Error(), "ttl query param zz10s: time: invalid duration")
120 |
121 | _, err = New[string]("redis://localhost:xxx?db=1")
122 | require.Error(t, err)
123 | assert.Contains(t, err.Error(), "parse cache uri redis://localhost:xxx?db=1: parse")
124 | assert.Contains(t, err.Error(), "redis://localhost:xxx?db=1")
125 | assert.Contains(t, err.Error(), "invalid port \":xxx\" after host")
126 | }
127 |
128 | func TestUrl_NewFailed(t *testing.T) {
129 | u := "blah://ip?foo=bar"
130 | _, err := New[string](u)
131 | require.EqualError(t, err, "unsupported cache type blah")
132 |
133 | u = "mem://blah?foo=bar"
134 | _, err = New[string](u)
135 | require.EqualError(t, err, "unsupported mem cache type blah")
136 |
137 | u = "mem://lru?max_keys=xyz"
138 | _, err = New[string](u)
139 | require.EqualError(t, err, "parse uri options mem://lru?max_keys=xyz: 1 error occurred:\n\t* max_keys query param xyz: strconv.Atoi: parsing \"xyz\": invalid syntax\n\n")
140 | }
141 |
--------------------------------------------------------------------------------