├── .editorconfig ├── .github └── workflows │ └── go.yml ├── .gitignore ├── LICENSE ├── README.md ├── cache.go ├── cache_test.go ├── go.mod └── go.sum /.editorconfig: -------------------------------------------------------------------------------- 1 | # top-most EditorConfig file 2 | root = true 3 | 4 | # Unix-style newlines with a newline ending every file 5 | [*] 6 | end_of_line = lf 7 | insert_final_newline = true 8 | 9 | # Matches multiple files with brace expansion notation 10 | # Set default charset 11 | [*.{js,json,env,sh,py,go,html,html,ts,md,yml,tml}] 12 | charset = utf-8 13 | indent_style = space 14 | indent_size = 4 15 | 16 | # Tab indentation (no size specified) 17 | [Makefile] 18 | charset = utf-8 19 | indent_style = space 20 | indent_size = 4 21 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | # This workflow will build a golang project 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go 3 | 4 | name: Go 5 | 6 | on: 7 | push: 8 | branches: ["main"] 9 | pull_request: 10 | branches: ["main"] 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | 18 | - name: Set up Go 19 | uses: actions/setup-go@v4 20 | with: 21 | go-version: "1.23" 22 | 23 | - name: Build 24 | run: go build -v ./... 25 | 26 | - name: Tests 27 | run: | 28 | go install github.com/mattn/goveralls@latest 29 | go test -race -covermode atomic -coverprofile=covprofile ./... 30 | 31 | - name: Send coverage 32 | uses: shogo82148/actions-goveralls@v1 33 | with: 34 | path-to-profile: covprofile 35 | 36 | - name: golangci-lint 37 | uses: golangci/golangci-lint-action@v6 38 | with: 39 | version: v1.60 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.gitignore.io/api/go,osx,linux,windows 2 | 3 | ### Go ### 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.dll 7 | *.so 8 | *.dylib 9 | 10 | # Test binary, build with `go test -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | 16 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 17 | .glide/ 18 | 19 | # Golang project vendor packages which should be ignored 20 | vendor/ 21 | 22 | ### Linux ### 23 | *~ 24 | 25 | # temporary files which can be created if a process still has a handle open of a deleted file 26 | .fuse_hidden* 27 | 28 | # KDE directory preferences 29 | .directory 30 | 31 | # Linux trash folder which might appear on any partition or disk 32 | .Trash-* 33 | 34 | # .nfs files are created when an open file is removed but is still being accessed 35 | .nfs* 36 | 37 | ### OSX ### 38 | *.DS_Store 39 | .AppleDouble 40 | .LSOverride 41 | 42 | # Icon must end with two \r 43 | Icon 44 | 45 | # Thumbnails 46 | ._* 47 | 48 | # Files that might appear in the root of a volume 49 | .DocumentRevisions-V100 50 | .fseventsd 51 | .Spotlight-V100 52 | .TemporaryItems 53 | .Trashes 54 | .VolumeIcon.icns 55 | .com.apple.timemachine.donotpresent 56 | 57 | # Directories potentially created on remote AFP share 58 | .AppleDB 59 | .AppleDesktop 60 | Network Trash Folder 61 | Temporary Items 62 | .apdisk 63 | 64 | ### Windows ### 65 | # Windows thumbnail cache files 66 | Thumbs.db 67 | ehthumbs.db 68 | ehthumbs_vista.db 69 | 70 | # Folder config file 71 | Desktop.ini 72 | 73 | # Recycle Bin used on file shares 74 | $RECYCLE.BIN/ 75 | 76 | # Windows Installer files 77 | *.cab 78 | *.msi 79 | *.msm 80 | *.msp 81 | 82 | # Windows shortcuts 83 | *.lnk 84 | 85 | # End of https://www.gitignore.io/api/go,osx,linux,windows 86 | 87 | .vscode -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Naughty Gopher 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

pocache gopher

2 | 3 | [![](https://github.com/naughtygopher/pocache/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/naughtygopher/pocache/actions) 4 | [![Go Reference](https://pkg.go.dev/badge/github.com/naughtygopher/pocache.svg)](https://pkg.go.dev/github.com/naughtygopher/pocache) 5 | [![Go Report Card](https://goreportcard.com/badge/github.com/naughtygopher/pocache?cache_invalidate=v0.3.0)](https://goreportcard.com/report/github.com/naughtygopher/pocache) 6 | [![Coverage Status](https://coveralls.io/repos/github/naughtygopher/pocache/badge.svg?branch=main&cache_invalidate=v0.3.0)](https://coveralls.io/github/naughtygopher/pocache?branch=main) 7 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://github.com/creativecreature/sturdyc/blob/master/LICENSE) 8 | [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go?tab=readme-ov-file#caches) 9 | 10 | # Pocache 11 | 12 | Pocache (`poh-cash (/poʊ kæʃ/)`), **P**reemptive **o**ptimistic cache, is a lightweight in-app caching package. It introduces preemptive cache updates, optimizing performance in concurrent environments by reducing redundant database calls while maintaining fresh data. It uses [Hashicorp's Go LRU package](https://github.com/hashicorp/golang-lru) as the default storage. 13 | 14 | Yet another _elegant_ solution for the infamous [Thundering herd problem](https://en.wikipedia.org/wiki/Thundering_herd_problem), save your database(s)! 15 | 16 | ## Key Features 17 | 18 | 1. **Preemptive Cache Updates:** Automatically updates cache entries _nearing_ expiration. 19 | 2. **Threshold Window:** Configurable time window before cache expiration to trigger updates. 20 | 3. **Serve stale**: Opt-in configuration to serve expired cache and do a background refresh. 21 | 4. **Debounced Updates:** Prevents excessive I/O calls by debouncing concurrent update requests for the same key. 22 | 5. **Custom store**: customizable underlying storage to extend/replace in-app cache or use external cache database. 23 | 24 | ## Why use Pocache? 25 | 26 | In highly concurrent environments (e.g., web servers), multiple requests try to access the same cache entry simultaneously. Without query call suppression / call debouncing, the app would query the underlying database multiple times until the cache is refreshed. While trying to solve the thundering herd problem, most applications serve stale/expired cache until the update is completed. 27 | 28 | Pocache solves these scenarios by combining debounce mechanism along with optimistic updates during the threshold window, keeping the cache up to date all the time and never having to serve stale cache! 29 | 30 | ## How does it work? 31 | 32 | Given a cache expiration time and a threshold window, Pocache triggers a preemptive cache update when a value is accessed within the threshold window. 33 | 34 | Example: 35 | 36 | - Cache expiration: 10 minutes 37 | - Threshold window: 1 minute 38 | 39 | ``` 40 | |______________________ ____threshold window__________ ______________| 41 | 0 min 9 mins 10 mins 42 | Add key here Get key within window Key expires 43 | ``` 44 | 45 | When a key is fetched within the threshold window (between 9-10 minutes), Pocache initiates a background update for that key (_preemptive_). This ensures fresh data availability, anticipating future usage (_optimistic_). 46 | 47 | ## Custom store 48 | 49 | Pocache defines the following interface for its underlying storage. You can configure storage of your choice as long as it implements this simple interface, and is provided as a configuration. 50 | 51 | ```golang 52 | type store[K comparable, T any] interface { 53 | Add(key K, value *Payload[T]) (evicted bool) 54 | Get(key K) (value *Payload[T], found bool) 55 | Remove(key K) (present bool) 56 | } 57 | ``` 58 | 59 | Below is an example(not for production use) of setting a custom store. 60 | 61 | ```golang 62 | type mystore[Key comparable, T any] struct{ 63 | data sync.Map 64 | } 65 | 66 | func (ms *mystore[K,T]) Add(key K, value *Payload[T]) (evicted bool) { 67 | ms.data.Store(key, value) 68 | } 69 | 70 | func (ms *mystore[K,T]) Get(key K) (value *Payload[T], found bool) { 71 | v, found := ms.data.Load(key) 72 | if !found { 73 | return nil, found 74 | } 75 | 76 | value, _ := v.(*Payload[T]) 77 | return value, true 78 | } 79 | 80 | func (ms *mystore[K,T]) Remove(key K) (present bool) { 81 | _, found := ms.data.Load(key) 82 | ms.data.Delete(key) 83 | return found 84 | } 85 | 86 | func foo() { 87 | cache, err := pocache.New(pocache.Config[string, string]{ 88 | Store: mystore{data: sync.Map{}} 89 | }) 90 | } 91 | ``` 92 | 93 | ## Full example 94 | 95 | ```golang 96 | package main 97 | 98 | import ( 99 | "context" 100 | "fmt" 101 | "time" 102 | 103 | "github.com/naughtygopher/pocache" 104 | ) 105 | 106 | type Item struct { 107 | ID string 108 | Name string 109 | Description string 110 | } 111 | 112 | func newItem(key string) *Item { 113 | return &Item{ 114 | ID: fmt.Sprintf("%d", time.Now().Nanosecond()), 115 | Name: "name::" + key, 116 | Description: "description::" + key, 117 | } 118 | } 119 | 120 | func updater(ctx context.Context, key string) (*Item, error) { 121 | return newItem(key), nil 122 | } 123 | 124 | func onErr(err error) { 125 | panic(fmt.Sprintf("this should never have happened!: %+v", err)) 126 | } 127 | 128 | func main() { 129 | cache, err := pocache.New(pocache.Config[string, *Item]{ 130 | // LRUCacheSize is the number of keys to be maintained in the cache (Optional, default 1000) 131 | LRUCacheSize: 100000, 132 | // QLength is the length of update and delete queue (Optional, default 1000) 133 | QLength: 1000, 134 | 135 | // CacheAge is for how long the cache would be maintained, apart from the LRU eviction 136 | // It's maintained to not maintain stale data if/when keys are not evicted based on LRU 137 | // (Optional, default 1minute) 138 | CacheAge: time.Hour, 139 | // Threshold is the duration prior to expiry, when the key is considered eligible to be updated 140 | // (Optional, default 1 second) 141 | Threshold: time.Minute * 5, 142 | 143 | // ServeStale will not return error if the cache has expired. It will return the stale 144 | // value, and trigger an update as well. This is useful for usecases where it's ok 145 | // to serve stale values and data consistency is not of paramount importance. 146 | // (Optional, default false) 147 | ServeStale: false, 148 | 149 | // UpdaterTimeout is the context time out for when the updater function is called 150 | // (Optional, default 1 second) 151 | UpdaterTimeout: time.Second * 15, 152 | // Updater is optional, but without it it's a basic LRU cache 153 | Updater: updater, 154 | 155 | // ErrWatcher is called when there's any error when trying to update cache (Optional) 156 | ErrWatcher: onErr, 157 | }) 158 | if err != nil { 159 | panic(err) 160 | } 161 | 162 | const key = "hello" 163 | item := newItem(key) 164 | e := cache.Add(key, item) 165 | fmt.Println("evicted:", e) 166 | 167 | ee := cache.BulkAdd([]pocache.Tuple[string, *Item]{ 168 | {Key: key + "2", Value: newItem(key + "2")}, 169 | }) 170 | fmt.Println("evicted list:", ee) 171 | 172 | ii := cache.Get(key) 173 | if ii.Found { 174 | fmt.Println("value:", ii.V) 175 | } 176 | 177 | ii = cache.Get(key + "2") 178 | if ii.Found { 179 | fmt.Println("value:", ii.V) 180 | } 181 | } 182 | ``` 183 | 184 | ## The gopher 185 | 186 | The gopher used here was created using [Gopherize.me](https://gopherize.me/). Pocache helps you to stop the herd from thundering. 187 | -------------------------------------------------------------------------------- /cache.go: -------------------------------------------------------------------------------- 1 | // Package pocache implements an in-memory, LRU cache, with preemptive update feature. 2 | package pocache 3 | 4 | import ( 5 | "context" 6 | "errors" 7 | "fmt" 8 | "sync" 9 | "sync/atomic" 10 | "time" 11 | 12 | lru "github.com/hashicorp/golang-lru/v2" 13 | ) 14 | 15 | var ( 16 | ErrValidation = errors.New("invalid") 17 | ErrPanic = errors.New("panicked") 18 | ) 19 | 20 | type ( 21 | // ErrOnUpdate defines the type of the hook function, which is called 22 | // if there's any error when trying to update a key in the background 23 | ErrOnUpdate func(err error) 24 | 25 | // Updater defines the function which is used to get the new value 26 | // of a key. This is required for pocache to do background updates 27 | Updater[K comparable, T any] func(ctx context.Context, key K) (T, error) 28 | 29 | // Store defines the interface required for the underlying storage of pocache. 30 | Store[K comparable, T any] interface { 31 | Add(key K, value *Payload[T]) (evicted bool) 32 | Get(key K) (value *Payload[T], found bool) 33 | Remove(key K) (present bool) 34 | } 35 | ) 36 | 37 | type Config[K comparable, T any] struct { 38 | // LRUCacheSize is the number of keys to be maintained in the cache 39 | LRUCacheSize uint 40 | // QLength is the length of update and delete queue 41 | QLength uint 42 | 43 | // CacheAge is for how long the cache would be maintained, apart from the LRU eviction 44 | // It's maintained to not maintain stale data if/when keys are not evicted based on LRU 45 | CacheAge time.Duration 46 | // Threshold is the duration prior to expiry, when the key is considered eligible to be updated 47 | Threshold time.Duration 48 | DisableCache bool 49 | 50 | // ServeStale will not return error if the cache has expired. It will return the stale 51 | // value, and trigger an update as well. This is useful for usecases where it's ok 52 | // to serve stale values and data consistency is not of paramount importance 53 | ServeStale bool 54 | 55 | // UpdaterTimeout is the context time out for when the updater function is called 56 | UpdaterTimeout time.Duration 57 | Updater Updater[K, T] 58 | Store Store[K, T] 59 | 60 | // ErrWatcher is called when there's any error when trying to update cache 61 | ErrWatcher ErrOnUpdate 62 | } 63 | 64 | func (cfg *Config[K, T]) Sanitize() { 65 | if cfg.LRUCacheSize == 0 { 66 | cfg.LRUCacheSize = 1000 67 | } 68 | 69 | if cfg.QLength == 0 { 70 | cfg.QLength = 1000 71 | } 72 | 73 | if cfg.CacheAge <= 0 { 74 | cfg.CacheAge = time.Minute 75 | } 76 | 77 | if cfg.Threshold <= 0 { 78 | cfg.Threshold = cfg.CacheAge - time.Second 79 | } 80 | 81 | if cfg.UpdaterTimeout <= 0 { 82 | cfg.UpdaterTimeout = time.Second 83 | } 84 | } 85 | 86 | func (cfg *Config[K, T]) Validate() error { 87 | if cfg.LRUCacheSize == 0 { 88 | return errors.Join( 89 | ErrValidation, 90 | fmt.Errorf("LRU cache size cannot be 0"), 91 | ) 92 | } 93 | 94 | if cfg.CacheAge <= cfg.Threshold { 95 | return errors.Join( 96 | ErrValidation, 97 | fmt.Errorf( 98 | "cache age %s cannot be shorter than threshold %s", 99 | cfg.CacheAge, 100 | cfg.Threshold, 101 | )) 102 | } 103 | 104 | return nil 105 | } 106 | 107 | func (cfg *Config[K, T]) SanitizeValidate() error { 108 | cfg.Sanitize() 109 | return cfg.Validate() 110 | } 111 | 112 | type Payload[T any] struct { 113 | // ExpireAt is an atomic pointer to avoid race condition 114 | // while concurrently reading the timestamp 115 | ExpireAt *atomic.Pointer[time.Time] 116 | Payload T 117 | } 118 | 119 | func (pyl *Payload[T]) Expiry() time.Time { 120 | if pyl.ExpireAt == nil { 121 | return time.Time{} 122 | } 123 | 124 | return *pyl.ExpireAt.Load() 125 | } 126 | 127 | func (pyl *Payload[T]) Value() T { 128 | return pyl.Payload 129 | } 130 | 131 | type Tuple[K comparable, T any] struct { 132 | Key K 133 | Value T 134 | } 135 | 136 | type Value[T any] struct { 137 | V T 138 | Found bool 139 | } 140 | 141 | type Cache[K comparable, T any] struct { 142 | isDisabled bool 143 | disableServeStale bool 144 | store Store[K, T] 145 | cacheAge time.Duration 146 | 147 | deleteQ chan<- K 148 | 149 | // following configurations are used only when an updater & threshold update are enabled 150 | // threshold is the duration within which if the cache is about to expire, it is eligible to be updated 151 | threshold time.Duration 152 | updateQ chan<- K 153 | updater Updater[K, T] 154 | updaterTimeout time.Duration 155 | // updateInProgress is used to handle update debounce 156 | updateInProgress *sync.Map 157 | errWatcher ErrOnUpdate 158 | } 159 | 160 | // initUpdater initializes all configuration required for threshold based update 161 | func (ch *Cache[K, T]) initUpdater(cfg *Config[K, T]) { 162 | if cfg.Updater == nil { 163 | return 164 | } 165 | 166 | ch.threshold = cfg.Threshold.Abs() 167 | updateQ := make(chan K, cfg.QLength) 168 | ch.updateQ = updateQ 169 | 170 | ch.updater = cfg.Updater 171 | ch.updaterTimeout = cfg.UpdaterTimeout 172 | ch.updateInProgress = new(sync.Map) 173 | ch.errWatcher = cfg.ErrWatcher 174 | 175 | go ch.updateListener(updateQ) 176 | } 177 | 178 | func (ch *Cache[K, T]) errCallback(err error) { 179 | if err == nil || ch.errWatcher == nil { 180 | return 181 | } 182 | 183 | ch.errWatcher(err) 184 | } 185 | 186 | func (ch *Cache[K, T]) enqueueUpdate(key K) { 187 | if ch.updater == nil { 188 | return 189 | } 190 | 191 | _, inprogress := ch.updateInProgress.Load(key) 192 | if inprogress { 193 | // key is already queued for update, no need to update again 194 | return 195 | } 196 | 197 | ch.updateInProgress.Store(key, struct{}{}) 198 | ch.updateQ <- key 199 | } 200 | 201 | func (ch *Cache[K, T]) deleteListener(keys <-chan K) { 202 | for key := range keys { 203 | ch.store.Remove(key) 204 | } 205 | } 206 | 207 | func (ch *Cache[K, T]) updateListener(keys <-chan K) { 208 | for key := range keys { 209 | ch.update(key) 210 | } 211 | } 212 | 213 | func (ch *Cache[K, T]) update(key K) { 214 | defer func() { 215 | rec := recover() 216 | if rec == nil { 217 | return 218 | } 219 | ch.updateInProgress.Delete(key) 220 | err, isErr := rec.(error) 221 | if isErr { 222 | ch.errCallback(errors.Join(ErrPanic, err)) 223 | return 224 | } 225 | ch.errCallback(errors.Join(ErrPanic, fmt.Errorf("%+v", rec))) 226 | }() 227 | 228 | ctx, cancel := context.WithTimeout(context.Background(), ch.updaterTimeout) 229 | defer cancel() 230 | 231 | value, err := ch.updater(ctx, key) 232 | ch.updateInProgress.Delete(key) 233 | if err != nil { 234 | ch.errCallback(err) 235 | return 236 | } 237 | 238 | ch.Add(key, value) 239 | } 240 | 241 | func (ch *Cache[K, T]) Get(key K) Value[T] { 242 | var v Value[T] 243 | 244 | if ch.isDisabled { 245 | return v 246 | } 247 | 248 | cp, found := ch.store.Get(key) 249 | if !found { 250 | return v 251 | } 252 | 253 | expireAt := cp.ExpireAt.Load() 254 | delta := time.Since(*expireAt) 255 | if delta >= 0 && ch.disableServeStale { 256 | // cache expired and should be removed 257 | ch.deleteQ <- key 258 | return v 259 | } 260 | 261 | inTreshold := delta < 0 && delta.Abs() <= ch.threshold 262 | expired := delta >= 0 263 | if inTreshold || expired { 264 | // key is eligible for update 265 | ch.enqueueUpdate(key) 266 | } 267 | 268 | v.Found = true 269 | v.V = cp.Payload 270 | 271 | return v 272 | } 273 | 274 | func (ch *Cache[K, T]) Add(key K, value T) (evicted bool) { 275 | if ch.isDisabled { 276 | return false 277 | } 278 | 279 | expireAt := time.Now().Add(ch.cacheAge) 280 | cea := atomic.Pointer[time.Time]{} 281 | cea.Store(&expireAt) 282 | 283 | return ch.store.Add(key, &Payload[T]{ 284 | ExpireAt: &cea, 285 | Payload: value, 286 | }) 287 | } 288 | 289 | func (ch *Cache[K, T]) BulkAdd(tuples []Tuple[K, T]) (evicted []bool) { 290 | evicted = make([]bool, len(tuples)) 291 | for i, tuple := range tuples { 292 | evicted[i] = ch.Add(tuple.Key, tuple.Value) 293 | } 294 | 295 | return evicted 296 | } 297 | 298 | func DefaultStore[K comparable, T any](lrusize int) (Store[K, T], error) { 299 | lCache, err := lru.New[K, *Payload[T]](int(lrusize)) 300 | if err != nil { 301 | return nil, fmt.Errorf("failed initializing LRU cache: %w", err) 302 | } 303 | return lCache, nil 304 | } 305 | 306 | func New[K comparable, T any](cfg Config[K, T]) (*Cache[K, T], error) { 307 | err := cfg.SanitizeValidate() 308 | if err != nil { 309 | return nil, err 310 | } 311 | 312 | cstore := cfg.Store 313 | if cstore == nil { 314 | cstore, err = DefaultStore[K, T](int(cfg.LRUCacheSize)) 315 | if err != nil { 316 | return nil, err 317 | } 318 | } 319 | 320 | deleteQ := make(chan K, cfg.QLength) 321 | ch := &Cache[K, T]{ 322 | isDisabled: cfg.DisableCache, 323 | disableServeStale: !cfg.ServeStale, 324 | store: cstore, 325 | cacheAge: cfg.CacheAge.Abs(), 326 | deleteQ: deleteQ, 327 | } 328 | 329 | ch.initUpdater(&cfg) 330 | 331 | go ch.deleteListener(deleteQ) 332 | 333 | return ch, nil 334 | } 335 | -------------------------------------------------------------------------------- /cache_test.go: -------------------------------------------------------------------------------- 1 | package pocache 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "sync/atomic" 8 | "testing" 9 | "time" 10 | 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestCache(tt *testing.T) { 16 | var ( 17 | prefix = "prefix" 18 | value = "value" 19 | requirer = require.New(tt) 20 | asserter = require.New(tt) 21 | ) 22 | 23 | tt.Run("found", func(t *testing.T) { 24 | cache, err := New(Config[string, any]{ 25 | LRUCacheSize: 10000, 26 | CacheAge: time.Minute, 27 | DisableCache: false, 28 | }) 29 | requirer.NoError(err) 30 | 31 | cache.BulkAdd([]Tuple[string, any]{{Key: prefix, Value: value}}) 32 | v := cache.Get(prefix) 33 | asserter.True(v.Found) 34 | asserter.Equal(v.V, value) 35 | }) 36 | 37 | tt.Run("not found", func(t *testing.T) { 38 | cache, err := New(Config[string, any]{ 39 | LRUCacheSize: 10000, 40 | CacheAge: time.Minute, 41 | DisableCache: false, 42 | }) 43 | requirer.NoError(err) 44 | 45 | cache.BulkAdd([]Tuple[string, any]{{Key: prefix, Value: value}}) 46 | v := cache.Get(prefix + "_does_not_exist") 47 | asserter.False(v.Found) 48 | asserter.Equal(v.V, nil) 49 | }) 50 | 51 | tt.Run("cache age expired", func(t *testing.T) { 52 | cache, err := New(Config[string, any]{ 53 | LRUCacheSize: 1, 54 | CacheAge: time.Nanosecond, 55 | DisableCache: false, 56 | }) 57 | requirer.NoError(err) 58 | 59 | cache.BulkAdd([]Tuple[string, any]{{Key: prefix, Value: value}}) 60 | time.Sleep(time.Millisecond) 61 | v := cache.Get(prefix) 62 | asserter.False(v.Found) 63 | asserter.Equal(v.V, nil) 64 | }) 65 | 66 | tt.Run("update cache", func(t *testing.T) { 67 | cache, err := New(Config[string, any]{ 68 | LRUCacheSize: 10000, 69 | CacheAge: time.Minute, 70 | DisableCache: false, 71 | }) 72 | requirer.NoError(err) 73 | 74 | cache.BulkAdd([]Tuple[string, any]{{Key: prefix, Value: value}}) 75 | v := cache.Get(prefix) 76 | asserter.True(v.Found) 77 | asserter.Equal(v.V, value) 78 | 79 | value = "new_value" 80 | cache.BulkAdd([]Tuple[string, any]{{Key: prefix, Value: value}}) 81 | v = cache.Get(prefix) 82 | asserter.True(v.Found) 83 | asserter.Equal(v.V, value) 84 | }) 85 | 86 | tt.Run("multiple Add/Get to check if channel blocks", func(t *testing.T) { //nolint:govet 87 | // limit should be greater than the channel buffer for updateQ & deleteQ 88 | limit := 200 89 | cache, err := New(Config[string, any]{ 90 | LRUCacheSize: 10000, 91 | CacheAge: time.Minute, 92 | DisableCache: false, 93 | }) 94 | requirer.NoError(err) 95 | 96 | for i := 0; i < limit; i++ { 97 | prefix := fmt.Sprintf("%s_%d", prefix, i) 98 | value := fmt.Sprintf("%s_%d", value, i) 99 | cache.BulkAdd([]Tuple[string, any]{{Key: prefix, Value: value}}) 100 | } 101 | 102 | for i := 0; i < limit; i++ { 103 | prefix := fmt.Sprintf("%s_%d", prefix, i) 104 | value := fmt.Sprintf("%s_%d", value, i) 105 | v := cache.Get(prefix) 106 | asserter.True(v.Found) 107 | asserter.Equal(v.V, value) 108 | } 109 | }) 110 | 111 | tt.Run("serve stale", func(t *testing.T) { 112 | cache, err := New(Config[string, any]{ 113 | LRUCacheSize: 10000, 114 | CacheAge: time.Second * 2, 115 | DisableCache: false, 116 | ServeStale: true, 117 | }) 118 | requirer.NoError(err) 119 | 120 | cache.BulkAdd([]Tuple[string, any]{{Key: prefix, Value: value}}) 121 | // wait for cache to expire 122 | time.Sleep(time.Second * 3) 123 | 124 | v := cache.Get(prefix) 125 | asserter.True(v.Found) 126 | asserter.Equal(v.V, value) 127 | }) 128 | 129 | tt.Run("debounce", func(t *testing.T) { 130 | cache, err := New(Config[string, any]{ 131 | LRUCacheSize: 10000, 132 | CacheAge: time.Minute, 133 | Threshold: time.Second * 59, 134 | DisableCache: false, 135 | Updater: func(ctx context.Context, key string) (any, error) { 136 | // intentional delay in updater to retain debounce key 137 | // in the map long enough to be tested 138 | time.Sleep(time.Second * 3) 139 | return key, nil 140 | }, 141 | }) 142 | requirer.NoError(err) 143 | 144 | _ = cache.BulkAdd([]Tuple[string, any]{{Key: prefix, Value: value}}) 145 | // wait for threshold window 146 | time.Sleep(time.Second) 147 | // trigger auto update within threshold window 148 | _ = cache.Get(prefix) 149 | 150 | // re-trigger auto update within threshold window 151 | _ = cache.Get(prefix) 152 | // check if key added to debounce checker map 153 | _, found := cache.updateInProgress.Load(prefix) 154 | asserter.True(found) 155 | }) 156 | 157 | tt.Run("disabled", func(t *testing.T) { 158 | cache, err := New(Config[string, any]{ 159 | LRUCacheSize: 10000, 160 | CacheAge: time.Minute, 161 | Threshold: time.Second * 59, 162 | DisableCache: true, 163 | Updater: func(ctx context.Context, key string) (any, error) { 164 | return key, nil 165 | }, 166 | }) 167 | requirer.NoError(err) 168 | 169 | _ = cache.BulkAdd([]Tuple[string, any]{{Key: prefix, Value: value}}) 170 | // wait for threshold window 171 | time.Sleep(time.Second * 2) 172 | 173 | // trigger auto update within threshold window 174 | _ = cache.Get(prefix) 175 | 176 | // wait for updater to be executed 177 | time.Sleep(time.Second * 1) 178 | v := cache.Get(prefix) 179 | asserter.False(v.Found) 180 | }) 181 | 182 | tt.Run("no updater", func(t *testing.T) { 183 | cache, err := New(Config[string, any]{ 184 | LRUCacheSize: 10000, 185 | CacheAge: time.Minute, 186 | Threshold: time.Second * 59, 187 | DisableCache: false, 188 | Updater: nil, 189 | }) 190 | requirer.NoError(err) 191 | 192 | _ = cache.Add(prefix, value) 193 | // wait for threshold window 194 | time.Sleep(time.Second * 2) 195 | // trigger auto update within threshold window 196 | _ = cache.Get(prefix) 197 | // wait for updater to run 198 | time.Sleep(time.Second * 2) 199 | 200 | v := cache.Get(prefix) 201 | asserter.EqualValues(value, v.V) 202 | }) 203 | 204 | } 205 | 206 | func TestThresholdUpdater(tt *testing.T) { 207 | var ( 208 | requirer = require.New(tt) 209 | asserter = require.New(tt) 210 | cacheAge = time.Second 211 | threshold = time.Millisecond * 500 212 | ) 213 | 214 | ranUpdater := atomic.Bool{} 215 | 216 | ch, err := New(Config[string, string]{ 217 | CacheAge: cacheAge, 218 | Threshold: threshold, 219 | Updater: func(ctx context.Context, key string) (string, error) { 220 | ranUpdater.Store(true) 221 | return key, nil 222 | }, 223 | }) 224 | requirer.NoError(err) 225 | tt.Run("before threshold", func(t *testing.T) { 226 | ranUpdater.Store(false) 227 | key := "key_1" 228 | ch.Add(key, key) 229 | ch.BulkAdd([]Tuple[string, string]{{Key: key, Value: key}}) 230 | 231 | v := ch.Get(key) 232 | asserter.True(v.Found) 233 | asserter.False(ranUpdater.Load()) 234 | asserter.EqualValues(key, v.V) 235 | }) 236 | 237 | tt.Run("during threshold", func(t *testing.T) { 238 | ranUpdater.Store(false) 239 | key := "key_2" 240 | 241 | ch.BulkAdd([]Tuple[string, string]{{Key: key, Value: key}}) 242 | time.Sleep((cacheAge - threshold) + time.Millisecond) 243 | v := ch.Get(key) 244 | asserter.True(v.Found) 245 | asserter.EqualValues(key, v.V) 246 | // wait for updater to complete execution 247 | time.Sleep(time.Millisecond * 100) 248 | asserter.True(ranUpdater.Load()) 249 | }) 250 | 251 | tt.Run("after threshold (cache expired)", func(t *testing.T) { 252 | ranUpdater.Store(false) 253 | key := "key_3" 254 | 255 | ch.BulkAdd([]Tuple[string, string]{{Key: key, Value: key}}) 256 | time.Sleep(time.Millisecond * 1100) 257 | 258 | v := ch.Get(key) 259 | asserter.False(v.Found) 260 | asserter.False(ranUpdater.Load()) 261 | asserter.EqualValues("", v.V) 262 | }) 263 | } 264 | 265 | func TestThresholdUpdaterStale(tt *testing.T) { 266 | var ( 267 | requirer = require.New(tt) 268 | asserter = require.New(tt) 269 | cacheAge = time.Second 270 | threshold = time.Millisecond * 500 271 | ) 272 | 273 | ranUpdater := atomic.Bool{} 274 | 275 | ch, err := New(Config[string, string]{ 276 | ServeStale: true, 277 | CacheAge: cacheAge, 278 | Threshold: threshold, 279 | Updater: func(ctx context.Context, key string) (string, error) { 280 | ranUpdater.Store(true) 281 | return key, nil 282 | }, 283 | }) 284 | requirer.NoError(err) 285 | tt.Run("before threshold", func(t *testing.T) { 286 | ranUpdater.Store(false) 287 | key := "key_1" 288 | ch.Add(key, key) 289 | ch.BulkAdd([]Tuple[string, string]{{Key: key, Value: key}}) 290 | 291 | v := ch.Get(key) 292 | asserter.True(v.Found) 293 | asserter.False(ranUpdater.Load()) 294 | asserter.EqualValues(key, v.V) 295 | }) 296 | 297 | tt.Run("during threshold", func(t *testing.T) { 298 | ranUpdater.Store(false) 299 | key := "key_2" 300 | 301 | ch.BulkAdd([]Tuple[string, string]{{Key: key, Value: key}}) 302 | time.Sleep((cacheAge - threshold) + time.Millisecond) 303 | v := ch.Get(key) 304 | asserter.True(v.Found) 305 | asserter.EqualValues(key, v.V) 306 | // wait for updater to complete execution 307 | time.Sleep(time.Millisecond * 100) 308 | asserter.True(ranUpdater.Load()) 309 | }) 310 | 311 | tt.Run("after threshold (cache expired)", func(t *testing.T) { 312 | ranUpdater.Store(false) 313 | key := "key_3" 314 | 315 | ch.BulkAdd([]Tuple[string, string]{{Key: key, Value: key}}) 316 | time.Sleep(cacheAge + time.Millisecond) 317 | 318 | v := ch.Get(key) 319 | asserter.True(v.Found) 320 | asserter.EqualValues(key, v.V) 321 | 322 | // wait for updater to complete execution 323 | time.Sleep(time.Millisecond * 100) 324 | asserter.True(ranUpdater.Load()) 325 | }) 326 | 327 | tt.Run("long after threshold (cache expired)", func(t *testing.T) { 328 | ranUpdater.Store(false) 329 | key := "key_4" 330 | 331 | ch.BulkAdd([]Tuple[string, string]{{Key: key, Value: key}}) 332 | time.Sleep(cacheAge + 2*threshold) 333 | 334 | v := ch.Get(key) 335 | asserter.True(v.Found) 336 | asserter.EqualValues(key, v.V) 337 | 338 | // wait for updater to complete execution 339 | time.Sleep(time.Millisecond * 100) 340 | asserter.True(ranUpdater.Load()) 341 | }) 342 | } 343 | 344 | func TestValidate(tt *testing.T) { 345 | asserter := assert.New(tt) 346 | requirer := require.New(tt) 347 | 348 | tt.Run("invalid LRU cache size", func(t *testing.T) { 349 | cfg := Config[string, string]{ 350 | LRUCacheSize: 0, 351 | } 352 | err := cfg.Validate() 353 | requirer.NotNil(err) 354 | asserter.ErrorIs(err, ErrValidation) 355 | }) 356 | tt.Run("invalid threshold", func(t *testing.T) { 357 | cfg := Config[string, string]{ 358 | LRUCacheSize: 10, 359 | CacheAge: time.Second, 360 | Threshold: time.Second, 361 | } 362 | err := cfg.Validate() 363 | requirer.NotNil(err) 364 | asserter.ErrorIs(err, ErrValidation) 365 | }) 366 | 367 | tt.Run("valid configuration", func(t *testing.T) { 368 | cfg := Config[string, string]{ 369 | LRUCacheSize: 10, 370 | CacheAge: time.Minute, 371 | Threshold: time.Second, 372 | } 373 | err := cfg.Validate() 374 | requirer.Nil(err) 375 | }) 376 | } 377 | 378 | func TestSanitize(tt *testing.T) { 379 | asserter := assert.New(tt) 380 | 381 | cfg := Config[string, string]{} 382 | cfg.Sanitize() 383 | asserter.Equal(cfg.LRUCacheSize, uint(1000)) 384 | asserter.Equal(cfg.QLength, uint(1000)) 385 | asserter.Equal(cfg.CacheAge, time.Minute) 386 | asserter.Equal(cfg.Threshold, time.Second*59) 387 | asserter.Equal(cfg.UpdaterTimeout, time.Second) 388 | } 389 | 390 | func TestPayload(tt *testing.T) { 391 | asserter := assert.New(tt) 392 | 393 | tt.Run("expiry & payload available", func(t *testing.T) { 394 | expireAt := time.Now().Add(time.Minute) 395 | cea := atomic.Pointer[time.Time]{} 396 | cea.Store(&expireAt) 397 | value := "hello world" 398 | pyl := Payload[string]{ 399 | ExpireAt: &cea, 400 | Payload: value, 401 | } 402 | asserter.Equal(value, pyl.Value()) 403 | asserter.EqualValues(expireAt, pyl.Expiry()) 404 | }) 405 | 406 | tt.Run("expiry not available", func(t *testing.T) { 407 | value := "hello world" 408 | pyl := Payload[string]{ 409 | ExpireAt: nil, 410 | Payload: value, 411 | } 412 | asserter.Equal(value, pyl.Value()) 413 | asserter.EqualValues(time.Time{}, pyl.Expiry()) 414 | }) 415 | 416 | tt.Run("value not available", func(t *testing.T) { 417 | expireAt := time.Now().Add(time.Minute) 418 | cea := atomic.Pointer[time.Time]{} 419 | cea.Store(&expireAt) 420 | pyl := Payload[any]{ 421 | ExpireAt: &cea, 422 | Payload: nil, 423 | } 424 | asserter.Equal(nil, pyl.Value()) 425 | asserter.EqualValues(expireAt, pyl.Expiry()) 426 | }) 427 | 428 | tt.Run("expiry & value not available", func(t *testing.T) { 429 | pyl := Payload[any]{ 430 | ExpireAt: nil, 431 | Payload: nil, 432 | } 433 | asserter.Equal(nil, pyl.Value()) 434 | asserter.EqualValues(time.Time{}, pyl.Expiry()) 435 | }) 436 | } 437 | 438 | func TestErrWatcher(tt *testing.T) { 439 | var ( 440 | prefix = "prefix" 441 | value = "value" 442 | requirer = require.New(tt) 443 | asserter = require.New(tt) 444 | ) 445 | 446 | tt.Run("err watcher", func(t *testing.T) { 447 | forcedErr := fmt.Errorf("forced error") 448 | ranUpdater := atomic.Bool{} 449 | ranErrWatcher := atomic.Bool{} 450 | 451 | cache, err := New(Config[string, any]{ 452 | LRUCacheSize: 10000, 453 | CacheAge: time.Minute, 454 | Threshold: time.Second * 59, 455 | DisableCache: false, 456 | Updater: func(ctx context.Context, key string) (any, error) { 457 | ranUpdater.Store(true) 458 | return nil, forcedErr 459 | }, 460 | ErrWatcher: func(watcherErr error) { 461 | ranErrWatcher.Store(true) 462 | asserter.ErrorIs(watcherErr, forcedErr) 463 | }, 464 | }) 465 | requirer.NoError(err) 466 | 467 | _ = cache.BulkAdd([]Tuple[string, any]{{Key: prefix, Value: value}}) 468 | // wait for threshold window 469 | time.Sleep(time.Second) 470 | // trigger auto update within threshold window 471 | _ = cache.Get(prefix) 472 | 473 | // wait for the updater callback to be executed 474 | time.Sleep(time.Second * 2) 475 | asserter.True(ranUpdater.Load()) 476 | asserter.True(ranErrWatcher.Load()) 477 | }) 478 | 479 | tt.Run("no err watcher", func(t *testing.T) { 480 | forcedErr := fmt.Errorf("forced error") 481 | ranUpdater := atomic.Bool{} 482 | ranErrWatcher := atomic.Bool{} 483 | 484 | cache, err := New(Config[string, any]{ 485 | LRUCacheSize: 10000, 486 | CacheAge: time.Minute, 487 | Threshold: time.Second * 59, 488 | DisableCache: false, 489 | Updater: func(ctx context.Context, key string) (any, error) { 490 | ranUpdater.Store(true) 491 | return nil, forcedErr 492 | }, 493 | }) 494 | requirer.NoError(err) 495 | 496 | _ = cache.BulkAdd([]Tuple[string, any]{{Key: prefix, Value: value}}) 497 | // wait for threshold window 498 | time.Sleep(time.Second) 499 | // trigger auto update within threshold window 500 | _ = cache.Get(prefix) 501 | 502 | // wait for the updater callback to be executed 503 | time.Sleep(time.Second * 2) 504 | asserter.True(ranUpdater.Load()) 505 | asserter.False(ranErrWatcher.Load()) 506 | }) 507 | 508 | tt.Run("err watcher: catch panic text", func(t *testing.T) { 509 | ranUpdater := atomic.Bool{} 510 | ranErrWatcher := atomic.Bool{} 511 | 512 | cache, err := New(Config[string, any]{ 513 | LRUCacheSize: 10000, 514 | CacheAge: time.Minute, 515 | Threshold: time.Second * 59, 516 | DisableCache: false, 517 | Updater: func(ctx context.Context, key string) (any, error) { 518 | ranUpdater.Store(true) 519 | panic("force panicked") 520 | }, 521 | ErrWatcher: func(watcherErr error) { 522 | ranErrWatcher.Store(true) 523 | asserter.ErrorContains(watcherErr, "force panicked") 524 | }, 525 | }) 526 | requirer.NoError(err) 527 | cache.Add(prefix, value) 528 | 529 | // wait for threshold window 530 | time.Sleep(time.Second) 531 | // trigger auto update within threshold window 532 | _ = cache.Get(prefix) 533 | 534 | // wait for the updater callback to be executed 535 | time.Sleep(time.Second * 2) 536 | asserter.True(ranUpdater.Load()) 537 | asserter.True(ranErrWatcher.Load()) 538 | 539 | }) 540 | 541 | tt.Run("err watcher: catch panic err", func(t *testing.T) { 542 | ranUpdater := atomic.Bool{} 543 | ranErrWatcher := atomic.Bool{} 544 | ErrPanic := errors.New("panic err") 545 | 546 | cache, err := New(Config[string, any]{ 547 | LRUCacheSize: 10000, 548 | CacheAge: time.Minute, 549 | Threshold: time.Second * 59, 550 | DisableCache: false, 551 | Updater: func(ctx context.Context, key string) (any, error) { 552 | ranUpdater.Store(true) 553 | panic(ErrPanic) 554 | }, 555 | ErrWatcher: func(watcherErr error) { 556 | ranErrWatcher.Store(true) 557 | asserter.ErrorIs(watcherErr, ErrPanic) 558 | }, 559 | }) 560 | requirer.NoError(err) 561 | cache.Add(prefix, value) 562 | 563 | // wait for threshold window 564 | time.Sleep(time.Second) 565 | // trigger auto update within threshold window 566 | _ = cache.Get(prefix) 567 | 568 | // wait for the updater callback to be executed 569 | time.Sleep(time.Second * 2) 570 | asserter.True(ranUpdater.Load()) 571 | asserter.True(ranErrWatcher.Load()) 572 | 573 | }) 574 | 575 | } 576 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/naughtygopher/pocache 2 | 3 | go 1.22 4 | 5 | require ( 6 | github.com/hashicorp/golang-lru/v2 v2.0.7 7 | github.com/stretchr/testify v1.9.0 8 | ) 9 | 10 | require ( 11 | github.com/davecgh/go-spew v1.1.1 // indirect 12 | github.com/pmezard/go-difflib v1.0.0 // indirect 13 | gopkg.in/yaml.v3 v3.0.1 // indirect 14 | ) 15 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= 4 | github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= 5 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 6 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 7 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 8 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 9 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 10 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 11 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 12 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 13 | --------------------------------------------------------------------------------