├── .gitattributes ├── .github ├── codecov.yml ├── dependabot.yml └── workflows │ └── test.yml ├── .gitignore ├── LICENSE ├── README.md ├── entry.go ├── entry_test.go ├── go.mod ├── gocache.go ├── gocache_bench_test.go ├── gocache_test.go ├── janitor.go ├── janitor_test.go ├── pattern.go ├── pattern_test.go ├── policy.go └── statistics.go /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=lf -------------------------------------------------------------------------------- /.github/codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | patch: off 4 | project: 5 | default: 6 | target: 75% 7 | threshold: null 8 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | labels: ["dependencies"] 6 | schedule: 7 | interval: "daily" 8 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | on: 3 | pull_request: 4 | paths-ignore: 5 | - '*.md' 6 | push: 7 | branches: 8 | - master 9 | paths-ignore: 10 | - '*.md' 11 | jobs: 12 | test: 13 | name: test 14 | runs-on: ubuntu-latest 15 | timeout-minutes: 3 16 | steps: 17 | - uses: actions/setup-go@v5 18 | with: 19 | go-version: 1.21 20 | - uses: actions/checkout@v4 21 | - run: go test ./... -race -coverprofile=coverage.txt -covermode=atomic 22 | - name: Codecov 23 | uses: codecov/codecov-action@v5.4.3 24 | with: 25 | files: ./coverage.txt 26 | token: ${{ secrets.CODECOV_TOKEN }} 27 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 TwiN 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gocache 2 | ![test](https://github.com/TwiN/gocache/workflows/test/badge.svg?branch=master) 3 | [![Go Report Card](https://goreportcard.com/badge/github.com/TwiN/gocache)](https://goreportcard.com/report/github.com/TwiN/gocache) 4 | [![codecov](https://codecov.io/gh/TwiN/gocache/branch/master/graph/badge.svg)](https://codecov.io/gh/TwiN/gocache) 5 | [![Go version](https://img.shields.io/github/go-mod/go-version/TwiN/gocache.svg)](https://github.com/TwiN/gocache) 6 | [![Go Reference](https://pkg.go.dev/badge/github.com/TwiN/gocache.svg)](https://pkg.go.dev/github.com/TwiN/gocache/v2) 7 | [![Follow TwiN](https://img.shields.io/github/followers/TwiN?label=Follow&style=social)](https://github.com/TwiN) 8 | 9 | gocache is an easy-to-use, high-performance, lightweight and thread-safe (goroutine-safe) in-memory key-value cache 10 | with support for LRU and FIFO eviction policies as well as expiration, bulk operations and even retrieval of keys by pattern. 11 | 12 | 13 | ## Table of Contents 14 | 15 | - [Features](#features) 16 | - [Usage](#usage) 17 | - [Initializing the cache](#initializing-the-cache) 18 | - [Functions](#functions) 19 | - [Examples](#examples) 20 | - [Creating or updating an entry](#creating-or-updating-an-entry) 21 | - [Getting an entry](#getting-an-entry) 22 | - [Deleting an entry](#deleting-an-entry) 23 | - [Complex example](#complex-example) 24 | - [Persistence](#persistence) 25 | - [Eviction](#eviction) 26 | - [MaxSize](#maxsize) 27 | - [MaxMemoryUsage](#maxmemoryusage) 28 | - [Expiration](#expiration) 29 | - [Performance](#performance) 30 | - [Summary](#summary) 31 | - [Results](#results) 32 | - [FAQ](#faq) 33 | - [How can I persist the data on application termination?](#how-can-i-persist-the-data-on-application-termination) 34 | 35 | 36 | ## Features 37 | gocache supports the following cache eviction policies: 38 | - First in first out (FIFO) 39 | - Least recently used (LRU) 40 | 41 | It also supports cache entry TTL, which is both active and passive. Active expiration means that if you attempt 42 | to retrieve a cache key that has already expired, it will delete it on the spot and the behavior will be as if 43 | the cache key didn't exist. As for passive expiration, there's a background task that will take care of deleting 44 | expired keys. 45 | 46 | It also includes what you'd expect from a cache, like GET/SET, bulk operations and get by pattern. 47 | 48 | 49 | ## Usage 50 | ``` 51 | go get -u github.com/TwiN/gocache/v2 52 | ``` 53 | 54 | 55 | ### Initializing the cache 56 | ```go 57 | cache := gocache.NewCache().WithMaxSize(1000).WithEvictionPolicy(gocache.LeastRecentlyUsed) 58 | ``` 59 | 60 | If you're planning on using expiration (`SetWithTTL` or `Expire`) and you want expired entries to be automatically deleted 61 | in the background, make sure to start the janitor when you instantiate the cache: 62 | 63 | ```go 64 | cache.StartJanitor() 65 | ``` 66 | 67 | ### Functions 68 | | Function | Description | 69 | |-----------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 70 | | WithMaxSize | Sets the max size of the cache. `gocache.NoMaxSize` means there is no limit. If not set, the default max size is `gocache.DefaultMaxSize`. | 71 | | WithMaxMemoryUsage | Sets the max memory usage of the cache. `gocache.NoMaxMemoryUsage` means there is no limit. The default behavior is to not evict based on memory usage. | 72 | | WithEvictionPolicy | Sets the eviction algorithm to be used when the cache reaches the max size. If not set, the default eviction policy is `gocache.FirstInFirstOut` (FIFO). | 73 | | WithDefaultTTL | Sets the default TTL for each entry. | 74 | | WithForceNilInterfaceOnNilPointer | Configures whether values with a nil pointer passed to write functions should be forcefully set to nil. Defaults to true. | 75 | | StartJanitor | Starts the janitor, which is in charge of deleting expired cache entries in the background. | 76 | | StopJanitor | Stops the janitor. | 77 | | Set | Same as `SetWithTTL`, but using the default TTL (which is `gocache.NoExpiration`, unless configured otherwise). | 78 | | SetWithTTL | Creates or updates a cache entry with the given key, value and expiration time. If the max size after the aforementioned operation is above the configured max size, the tail will be evicted. Depending on the eviction policy, the tail is defined as the oldest | 79 | | SetAll | Same as `Set`, but in bulk. | 80 | | SetAllWithTTL | Same as `SetWithTTL`, but in bulk. | 81 | | Get | Gets a cache entry by its key. | 82 | | GetByKeys | Gets a map of entries by their keys. The resulting map will contain all keys, even if some of the keys in the slice passed as parameter were not present in the cache. | 83 | | GetAll | Gets all cache entries. | 84 | | GetKeysByPattern | Retrieves a slice of keys that matches a given pattern. | 85 | | Delete | Removes a key from the cache. | 86 | | DeleteAll | Removes multiple keys from the cache. | 87 | | DeleteKeysByPattern | Removes all keys that that matches a given pattern. | 88 | | Count | Gets the size of the cache. This includes cache keys which may have already expired, but have not been removed yet. | 89 | | Clear | Wipes the cache. | 90 | | TTL | Gets the time until a cache key expires. | 91 | | Expire | Sets the expiration time of an existing cache key. | 92 | 93 | For further documentation, please refer to [Go Reference](https://pkg.go.dev/github.com/TwiN/gocache) 94 | 95 | 96 | ### Examples 97 | 98 | #### Creating or updating an entry 99 | ```go 100 | cache.Set("key", "value") 101 | cache.Set("key", 1) 102 | cache.Set("key", struct{ Text string }{Test: "value"}) 103 | cache.SetWithTTL("key", []byte("value"), 24*time.Hour) 104 | ``` 105 | 106 | #### Getting an entry 107 | ```go 108 | value, exists := cache.Get("key") 109 | ``` 110 | You can also get multiple entries by using `cache.GetByKeys([]string{"key1", "key2"})` 111 | 112 | #### Deleting an entry 113 | ```go 114 | cache.Delete("key") 115 | ``` 116 | You can also delete multiple entries by using `cache.DeleteAll([]string{"key1", "key2"})` 117 | 118 | #### Complex example 119 | ```go 120 | package main 121 | 122 | import ( 123 | "fmt" 124 | "time" 125 | 126 | "github.com/TwiN/gocache/v2" 127 | ) 128 | 129 | func main() { 130 | cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(10000) 131 | cache.StartJanitor() // Passively manages expired entries 132 | defer cache.StopJanitor() 133 | 134 | cache.Set("key", "value") 135 | cache.SetWithTTL("key-with-ttl", "value", 60*time.Minute) 136 | cache.SetAll(map[string]any{"k1": "v1", "k2": "v2", "k3": "v3"}) 137 | 138 | fmt.Println("[Count] Cache size:", cache.Count()) 139 | 140 | value, exists := cache.Get("key") 141 | fmt.Printf("[Get] key=key; value=%s; exists=%v\n", value, exists) 142 | for key, value := range cache.GetByKeys([]string{"k1", "k2", "k3"}) { 143 | fmt.Printf("[GetByKeys] key=%s; value=%s\n", key, value) 144 | } 145 | for _, key := range cache.GetKeysByPattern("key*", 0) { 146 | fmt.Printf("[GetKeysByPattern] pattern=key*; key=%s\n", key) 147 | } 148 | 149 | cache.Expire("key", time.Hour) 150 | time.Sleep(500*time.Millisecond) 151 | timeUntilExpiration, _ := cache.TTL("key") 152 | fmt.Println("[TTL] Number of minutes before 'key' expires:", int(timeUntilExpiration.Seconds())) 153 | 154 | cache.Delete("key") 155 | cache.DeleteAll([]string{"k1", "k2", "k3"}) 156 | 157 | cache.Clear() 158 | fmt.Println("[Count] Cache size after clearing the cache:", cache.Count()) 159 | } 160 | ``` 161 | 162 |
163 | Output 164 | 165 | ``` 166 | [Count] Cache size: 5 167 | [Get] key=key; value=value; exists=true 168 | [GetByKeys] key=k1; value=v1 169 | [GetByKeys] key=k2; value=v2 170 | [GetByKeys] key=k3; value=v3 171 | [GetKeysByPattern] pattern=key*; key=key-with-ttl 172 | [GetKeysByPattern] pattern=key*; key=key 173 | [TTL] Number of minutes before 'key' expires: 3599 174 | [Count] Cache size after clearing the cache: 0 175 | ``` 176 |
177 | 178 | 179 | ## Persistence 180 | Prior to v2, gocache supported persistence out of the box. 181 | 182 | After some thinking, I decided that persistence added too many dependencies, and given than this is a cache library 183 | and most people wouldn't be interested in persistence, I decided to get rid of it. 184 | 185 | That being said, you can use the `GetAll` and `SetAll` methods of `gocache.Cache` to implement persistence yourself. 186 | 187 | 188 | ## Eviction 189 | ### MaxSize 190 | Eviction by MaxSize is the default behavior, and is also the most efficient. 191 | 192 | The code below will create a cache that has a maximum size of 1000: 193 | ```go 194 | cache := gocache.NewCache().WithMaxSize(1000) 195 | ``` 196 | This means that whenever an operation causes the total size of the cache to go above 1000, the tail will be evicted. 197 | 198 | ### MaxMemoryUsage 199 | Eviction by MaxMemoryUsage is **disabled by default**, and is in alpha. 200 | 201 | The code below will create a cache that has a maximum memory usage of 50MB: 202 | ```go 203 | cache := gocache.NewCache().WithMaxSize(0).WithMaxMemoryUsage(50*gocache.Megabyte) 204 | ``` 205 | This means that whenever an operation causes the total memory usage of the cache to go above 50MB, one or more tails 206 | will be evicted. 207 | 208 | Unlike evictions caused by reaching the MaxSize, evictions triggered by MaxMemoryUsage may lead to multiple entries 209 | being evicted in a row. The reason for this is that if, for instance, you had 100 entries of 0.1MB each and you suddenly added 210 | a single entry of 10MB, 100 entries would need to be evicted to make enough space for that new big entry. 211 | 212 | It's very important to keep in mind that eviction by MaxMemoryUsage is approximate. 213 | 214 | **The only memory taken into consideration is the size of the cache, not the size of the entire application.** 215 | If you pass along 100MB worth of data in a matter of seconds, even though the cache's memory usage will remain 216 | under 50MB (or whatever you configure the MaxMemoryUsage to), the memory footprint generated by that 100MB will 217 | still exist until the next GC cycle. 218 | 219 | As previously mentioned, this is a work in progress, and here's a list of the things you should keep in mind: 220 | - The memory usage of structs are a gross estimation and may not reflect the actual memory usage. 221 | - Native types (string, int, bool, []byte, etc.) are the most accurate for calculating the memory usage. 222 | - Adding an entry bigger than the configured MaxMemoryUsage will work, but it will evict all other entries. 223 | 224 | 225 | ## Expiration 226 | There are two ways that the deletion of expired keys can take place: 227 | - Active 228 | - Passive 229 | 230 | **Active deletion of expired keys** happens when an attempt is made to access the value of a cache entry that expired. 231 | `Get`, `GetByKeys` and `GetAll` are the only functions that can trigger active deletion of expired keys. 232 | 233 | **Passive deletion of expired keys** runs in the background and is managed by the janitor. 234 | If you do not start the janitor, there will be no passive deletion of expired keys. 235 | 236 | 237 | ## Performance 238 | ### Summary 239 | - **Set**: Both map and gocache have the same performance. 240 | - **Get**: Map is faster than gocache. 241 | 242 | This is because gocache keeps track of the head and the tail for eviction and expiration/TTL. 243 | 244 | Ultimately, the difference is negligible. 245 | 246 | We could add a way to disable eviction or disable expiration altogether just to match the map's performance, 247 | but if you're looking into using a library like gocache, odds are, you want more than just a map. 248 | 249 | 250 | ### Results 251 | | key | value | 252 | |:-------|:---------| 253 | | goos | windows | 254 | | goarch | amd64 | 255 | | cpu | i7-9700K | 256 | | mem | 32G DDR4 | 257 | 258 | ``` 259 | // Normal map 260 | BenchmarkMap_Get-8 49944228 24.2 ns/op 7 B/op 0 allocs/op 261 | BenchmarkMap_Set/small_value-8 3939964 394.1 ns/op 188 B/op 2 allocs/op 262 | BenchmarkMap_Set/medium_value-8 3868586 395.5 ns/op 191 B/op 2 allocs/op 263 | BenchmarkMap_Set/large_value-8 3992138 385.3 ns/op 186 B/op 2 allocs/op 264 | // Gocache 265 | BenchmarkCache_Get/FirstInFirstOut-8 27907950 44.3 ns/op 7 B/op 0 allocs/op 266 | BenchmarkCache_Get/LeastRecentlyUsed-8 28211396 44.2 ns/op 7 B/op 0 allocs/op 267 | BenchmarkCache_Set/FirstInFirstOut_small_value-8 3139538 373.5 ns/op 185 B/op 3 allocs/op 268 | BenchmarkCache_Set/FirstInFirstOut_medium_value-8 3099516 378.6 ns/op 186 B/op 3 allocs/op 269 | BenchmarkCache_Set/FirstInFirstOut_large_value-8 3086776 386.7 ns/op 186 B/op 3 allocs/op 270 | BenchmarkCache_Set/LeastRecentlyUsed_small_value-8 3070555 379.0 ns/op 187 B/op 3 allocs/op 271 | BenchmarkCache_Set/LeastRecentlyUsed_medium_value-8 3056928 383.8 ns/op 187 B/op 3 allocs/op 272 | BenchmarkCache_Set/LeastRecentlyUsed_large_value-8 3108250 383.8 ns/op 186 B/op 3 allocs/op 273 | BenchmarkCache_SetUsingMaxMemoryUsage/medium_value-8 2773315 449.0 ns/op 210 B/op 4 allocs/op 274 | BenchmarkCache_SetUsingMaxMemoryUsage/large_value-8 2731818 440.0 ns/op 211 B/op 4 allocs/op 275 | BenchmarkCache_SetUsingMaxMemoryUsage/small_value-8 2659296 446.8 ns/op 213 B/op 4 allocs/op 276 | BenchmarkCache_SetWithMaxSize/100_small_value-8 4848658 248.8 ns/op 114 B/op 3 allocs/op 277 | BenchmarkCache_SetWithMaxSize/10000_small_value-8 4117632 293.7 ns/op 106 B/op 3 allocs/op 278 | BenchmarkCache_SetWithMaxSize/100000_small_value-8 3867402 313.0 ns/op 110 B/op 3 allocs/op 279 | BenchmarkCache_SetWithMaxSize/100_medium_value-8 4750057 250.1 ns/op 113 B/op 3 allocs/op 280 | BenchmarkCache_SetWithMaxSize/10000_medium_value-8 4143772 294.5 ns/op 106 B/op 3 allocs/op 281 | BenchmarkCache_SetWithMaxSize/100000_medium_value-8 3768883 313.2 ns/op 111 B/op 3 allocs/op 282 | BenchmarkCache_SetWithMaxSize/100_large_value-8 4822646 251.1 ns/op 114 B/op 3 allocs/op 283 | BenchmarkCache_SetWithMaxSize/10000_large_value-8 4154428 291.6 ns/op 106 B/op 3 allocs/op 284 | BenchmarkCache_SetWithMaxSize/100000_large_value-8 3897358 313.7 ns/op 110 B/op 3 allocs/op 285 | BenchmarkCache_SetWithMaxSizeAndLRU/100_small_value-8 4784180 254.2 ns/op 114 B/op 3 allocs/op 286 | BenchmarkCache_SetWithMaxSizeAndLRU/10000_small_value-8 4067042 292.0 ns/op 106 B/op 3 allocs/op 287 | BenchmarkCache_SetWithMaxSizeAndLRU/100000_small_value-8 3832760 313.8 ns/op 111 B/op 3 allocs/op 288 | BenchmarkCache_SetWithMaxSizeAndLRU/100_medium_value-8 4846706 252.2 ns/op 114 B/op 3 allocs/op 289 | BenchmarkCache_SetWithMaxSizeAndLRU/10000_medium_value-8 4103817 292.5 ns/op 106 B/op 3 allocs/op 290 | BenchmarkCache_SetWithMaxSizeAndLRU/100000_medium_value-8 3845623 315.1 ns/op 111 B/op 3 allocs/op 291 | BenchmarkCache_SetWithMaxSizeAndLRU/100_large_value-8 4744513 257.9 ns/op 114 B/op 3 allocs/op 292 | BenchmarkCache_SetWithMaxSizeAndLRU/10000_large_value-8 3956316 299.5 ns/op 106 B/op 3 allocs/op 293 | BenchmarkCache_SetWithMaxSizeAndLRU/100000_large_value-8 3876843 351.3 ns/op 110 B/op 3 allocs/op 294 | BenchmarkCache_GetSetMultipleConcurrent-8 750088 1566.0 ns/op 128 B/op 8 allocs/op 295 | BenchmarkCache_GetSetConcurrentWithFrequentEviction/FirstInFirstOut-8 3836961 316.2 ns/op 80 B/op 1 allocs/op 296 | BenchmarkCache_GetSetConcurrentWithFrequentEviction/LeastRecentlyUsed-8 3846165 315.6 ns/op 80 B/op 1 allocs/op 297 | BenchmarkCache_GetConcurrently/FirstInFirstOut-8 4830342 239.8 ns/op 8 B/op 1 allocs/op 298 | BenchmarkCache_GetConcurrently/LeastRecentlyUsed-8 4895587 243.2 ns/op 8 B/op 1 allocs/op 299 | (Trimmed "BenchmarkCache_" for readability) 300 | WithForceNilInterfaceOnNilPointer/true_with_nil_struct_pointer-8 6901461 178.5 ns/op 7 B/op 1 allocs/op 301 | WithForceNilInterfaceOnNilPointer/true-8 6629566 180.7 ns/op 7 B/op 1 allocs/op 302 | WithForceNilInterfaceOnNilPointer/false_with_nil_struct_pointer-8 6282798 170.1 ns/op 7 B/op 1 allocs/op 303 | WithForceNilInterfaceOnNilPointer/false-8 6741382 172.6 ns/op 7 B/op 1 allocs/op 304 | WithForceNilInterfaceOnNilPointerWithConcurrency/true_with_nil_struct_pointer-8 4432951 258.0 ns/op 8 B/op 1 allocs/op 305 | WithForceNilInterfaceOnNilPointerWithConcurrency/true-8 4676943 244.4 ns/op 8 B/op 1 allocs/op 306 | WithForceNilInterfaceOnNilPointerWithConcurrency/false_with_nil_struct_pointer-8 4818418 239.6 ns/op 8 B/op 1 allocs/op 307 | WithForceNilInterfaceOnNilPointerWithConcurrency/false-8 5025937 238.2 ns/op 8 B/op 1 allocs/op 308 | ``` 309 | 310 | 311 | ## FAQ 312 | 313 | ### How can I persist the data on application termination? 314 | While creating your own auto save feature might come in handy, it may still lead to loss of data if the application 315 | automatically saves every 10 minutes and your application crashes 9 minutes after the previous save. 316 | 317 | To increase your odds of not losing any data, you can use Go's `signal` package, more specifically its `Notify` function 318 | which allows listening for termination signals like SIGTERM and SIGINT. Once a termination signal is caught, you can 319 | add the necessary logic for a graceful shutdown. 320 | 321 | In the following example, the code that would usually be present in the `main` function is moved to a different function 322 | named `Start` which is launched on a different goroutine so that listening for a termination signals is what blocks the 323 | main goroutine instead: 324 | ```go 325 | package main 326 | 327 | import ( 328 | "log" 329 | "os" 330 | "os/signal" 331 | "syscall" 332 | 333 | "github.com/TwiN/gocache/v2" 334 | ) 335 | 336 | var cache = gocache.NewCache() 337 | 338 | func main() { 339 | data := retrieveCacheEntriesUsingWhateverMeanYouUsedToPersistIt() 340 | cache.SetAll(data) 341 | // Start everything else on another goroutine to prevent blocking the main goroutine 342 | go Start() 343 | // Wait for termination signal 344 | sig := make(chan os.Signal, 1) 345 | done := make(chan bool, 1) 346 | signal.Notify(sig, os.Interrupt, syscall.SIGTERM) 347 | go func() { 348 | <-sig 349 | log.Println("Received termination signal, attempting to gracefully shut down") 350 | // Persist the cache entries 351 | cacheEntries := cache.GetAll() 352 | persistCacheEntriesHoweverYouWant(cacheEntries) 353 | // Tell the main goroutine that we're done 354 | done <- true 355 | }() 356 | <-done 357 | log.Println("Shutting down") 358 | } 359 | ``` 360 | 361 | Note that this won't protect you from a SIGKILL, as this signal cannot be caught. 362 | -------------------------------------------------------------------------------- /entry.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | "unsafe" 7 | ) 8 | 9 | // Entry is a cache entry 10 | type Entry struct { 11 | // Key is the name of the cache entry 12 | Key string 13 | 14 | // Value is the value of the cache entry 15 | Value any 16 | 17 | // RelevantTimestamp is the variable used to store either: 18 | // - creation timestamp, if the Cache's EvictionPolicy is FirstInFirstOut 19 | // - last access timestamp, if the Cache's EvictionPolicy is LeastRecentlyUsed 20 | // 21 | // Note that updating an existing entry will also update this value 22 | RelevantTimestamp time.Time 23 | 24 | // Expiration is the unix time in nanoseconds at which the entry will expire (-1 means no expiration) 25 | Expiration int64 26 | 27 | next *Entry 28 | previous *Entry 29 | } 30 | 31 | // Accessed updates the Entry's RelevantTimestamp to now 32 | func (entry *Entry) Accessed() { 33 | entry.RelevantTimestamp = time.Now() 34 | } 35 | 36 | // Expired returns whether the Entry has expired 37 | func (entry Entry) Expired() bool { 38 | if entry.Expiration > 0 { 39 | if time.Now().UnixNano() > entry.Expiration { 40 | return true 41 | } 42 | } 43 | return false 44 | } 45 | 46 | // SizeInBytes returns the size of an entry in bytes, approximately. 47 | func (entry *Entry) SizeInBytes() int { 48 | return toBytes(entry.Key) + toBytes(entry.Value) + 32 49 | } 50 | 51 | func toBytes(value any) int { 52 | switch value.(type) { 53 | case string: 54 | return int(unsafe.Sizeof(value)) + len(value.(string)) 55 | case int8, uint8, bool: 56 | return int(unsafe.Sizeof(value)) + 1 57 | case int16, uint16: 58 | return int(unsafe.Sizeof(value)) + 2 59 | case int32, uint32, float32, complex64: 60 | return int(unsafe.Sizeof(value)) + 4 61 | case int64, uint64, int, uint, float64, complex128: 62 | return int(unsafe.Sizeof(value)) + 8 63 | case []any: 64 | size := 0 65 | for _, v := range value.([]any) { 66 | size += toBytes(v) 67 | } 68 | return int(unsafe.Sizeof(value)) + size 69 | case []string: 70 | size := 0 71 | for _, v := range value.([]string) { 72 | size += toBytes(v) 73 | } 74 | return int(unsafe.Sizeof(value)) + size 75 | case []int8: 76 | return int(unsafe.Sizeof(value)) + len(value.([]int8)) 77 | case []uint8: 78 | return int(unsafe.Sizeof(value)) + len(value.([]uint8)) 79 | case []bool: 80 | return int(unsafe.Sizeof(value)) + len(value.([]bool)) 81 | case []int16: 82 | return int(unsafe.Sizeof(value)) + (len(value.([]int16)) * 2) 83 | case []uint16: 84 | return int(unsafe.Sizeof(value)) + (len(value.([]uint16)) * 2) 85 | case []int32: 86 | return int(unsafe.Sizeof(value)) + (len(value.([]int32)) * 4) 87 | case []uint32: 88 | return int(unsafe.Sizeof(value)) + (len(value.([]uint32)) * 4) 89 | case []float32: 90 | return int(unsafe.Sizeof(value)) + (len(value.([]float32)) * 4) 91 | case []complex64: 92 | return int(unsafe.Sizeof(value)) + (len(value.([]complex64)) * 4) 93 | case []int64: 94 | return int(unsafe.Sizeof(value)) + (len(value.([]int64)) * 8) 95 | case []uint64: 96 | return int(unsafe.Sizeof(value)) + (len(value.([]uint64)) * 8) 97 | case []int: 98 | return int(unsafe.Sizeof(value)) + (len(value.([]int)) * 8) 99 | case []uint: 100 | return int(unsafe.Sizeof(value)) + (len(value.([]uint)) * 8) 101 | case []float64: 102 | return int(unsafe.Sizeof(value)) + (len(value.([]float64)) * 8) 103 | case []complex128: 104 | return int(unsafe.Sizeof(value)) + (len(value.([]complex128)) * 8) 105 | default: 106 | return int(unsafe.Sizeof(value)) + len(fmt.Sprintf("%v", value)) 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /entry_test.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestEntry_SizeInBytes(t *testing.T) { 9 | testSizeInBytes(t, "key", 0, 75) 10 | testSizeInBytes(t, "k", 0, 73) 11 | testSizeInBytes(t, "k", "v", 66) 12 | testSizeInBytes(t, "k", true, 66) 13 | testSizeInBytes(t, "k", int8(1), 66) 14 | testSizeInBytes(t, "k", uint8(1), 66) 15 | testSizeInBytes(t, "k", true, 66) 16 | testSizeInBytes(t, "k", int16(1), 67) 17 | testSizeInBytes(t, "k", uint16(1), 67) 18 | testSizeInBytes(t, "k", int32(1), 69) 19 | testSizeInBytes(t, "k", uint32(1), 69) 20 | testSizeInBytes(t, "k", float32(1), 69) 21 | testSizeInBytes(t, "k", complex64(1), 69) 22 | testSizeInBytes(t, "k", int64(1), 73) 23 | testSizeInBytes(t, "k", uint64(1), 73) 24 | testSizeInBytes(t, "k", 1, 73) 25 | testSizeInBytes(t, "k", uint(1), 73) 26 | testSizeInBytes(t, "k", float64(1), 73) 27 | testSizeInBytes(t, "k", complex128(1), 73) 28 | testSizeInBytes(t, "k", []string{}, 65) 29 | testSizeInBytes(t, "k", []string{"what"}, 85) 30 | testSizeInBytes(t, "k", []string{"what", "the"}, 104) 31 | testSizeInBytes(t, "k", []int8{}, 65) 32 | testSizeInBytes(t, "k", []int8{1}, 66) 33 | testSizeInBytes(t, "k", []int8{1, 2}, 67) 34 | testSizeInBytes(t, "k", []uint8{1}, 66) 35 | testSizeInBytes(t, "k", []uint8{1, 2}, 67) 36 | testSizeInBytes(t, "k", []bool{true}, 66) 37 | testSizeInBytes(t, "k", []bool{true, false}, 67) 38 | testSizeInBytes(t, "k", []int16{1}, 67) 39 | testSizeInBytes(t, "k", []int16{1, 2}, 69) 40 | testSizeInBytes(t, "k", []uint16{1}, 67) 41 | testSizeInBytes(t, "k", []int32{1}, 69) 42 | testSizeInBytes(t, "k", []int32{1, 2}, 73) 43 | testSizeInBytes(t, "k", []uint32{1}, 69) 44 | testSizeInBytes(t, "k", []uint32{1, 2}, 73) 45 | testSizeInBytes(t, "k", []float32{1}, 69) 46 | testSizeInBytes(t, "k", []float32{1, 2}, 73) 47 | testSizeInBytes(t, "k", []complex64{1}, 69) 48 | testSizeInBytes(t, "k", []complex64{1, 2}, 73) 49 | testSizeInBytes(t, "k", []int64{1}, 73) 50 | testSizeInBytes(t, "k", []int64{1, 2}, 81) 51 | testSizeInBytes(t, "k", []uint64{1}, 73) 52 | testSizeInBytes(t, "k", []uint64{1, 2}, 81) 53 | testSizeInBytes(t, "k", []int{1}, 73) 54 | testSizeInBytes(t, "k", []int{1, 2}, 81) 55 | testSizeInBytes(t, "k", []uint{1}, 73) 56 | testSizeInBytes(t, "k", []uint{1, 2}, 81) 57 | testSizeInBytes(t, "k", []float64{1}, 73) 58 | testSizeInBytes(t, "k", []float64{1, 2}, 81) 59 | testSizeInBytes(t, "k", []complex128{1}, 73) 60 | testSizeInBytes(t, "k", []complex128{1, 2}, 81) 61 | testSizeInBytes(t, "k", struct{}{}, 67) 62 | testSizeInBytes(t, "k", struct{ A string }{A: "hello"}, 72) 63 | testSizeInBytes(t, "k", struct{ A, B string }{A: "hello", B: "world"}, 78) 64 | testSizeInBytes(t, "k", nil, 70) 65 | testSizeInBytes(t, "k", make([]any, 5), 170) 66 | } 67 | 68 | func testSizeInBytes(t *testing.T, key string, value any, expectedSize int) { 69 | t.Run(fmt.Sprintf("%T_%d", value, expectedSize), func(t *testing.T) { 70 | if size := (&Entry{Key: key, Value: value}).SizeInBytes(); size != expectedSize { 71 | t.Errorf("expected size of entry with key '%v' and value '%v' (%T) to be %d, got %d", key, value, value, expectedSize, size) 72 | } 73 | }) 74 | } 75 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/TwiN/gocache/v2 2 | 3 | go 1.21 4 | -------------------------------------------------------------------------------- /gocache.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "errors" 5 | "reflect" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | var ( 11 | Debug = false 12 | ) 13 | 14 | const ( 15 | // NoMaxSize means that the cache has no maximum number of entries in the cache 16 | // Setting Cache.maxSize to this value also means there will be no eviction 17 | NoMaxSize = 0 18 | 19 | // NoMaxMemoryUsage means that the cache has no maximum number of entries in the cache 20 | NoMaxMemoryUsage = 0 21 | 22 | // DefaultMaxSize is the max size set if no max size is specified 23 | DefaultMaxSize = 100000 24 | 25 | // NoExpiration is the value that must be used as TTL to specify that the given key should never expire 26 | NoExpiration = -1 27 | 28 | Kilobyte = 1024 29 | Megabyte = 1024 * Kilobyte 30 | Gigabyte = 1024 * Megabyte 31 | ) 32 | 33 | var ( 34 | ErrKeyDoesNotExist = errors.New("key does not exist") // Returned when a cache key does not exist 35 | ErrKeyHasNoExpiration = errors.New("key has no expiration") // Returned when a cache key has no expiration 36 | ErrJanitorAlreadyRunning = errors.New("janitor is already running") // Returned when the janitor has already been started 37 | ) 38 | 39 | // Cache is the core struct of gocache which contains the data as well as all relevant configuration fields 40 | // 41 | // Do not instantiate this struct directly, use NewCache instead 42 | type Cache struct { 43 | // maxSize is the maximum amount of entries that can be in the cache at any given time 44 | // By default, this is set to DefaultMaxSize 45 | maxSize int 46 | 47 | // maxMemoryUsage is the maximum amount of memory that can be taken up by the cache at any time 48 | // By default, this is set to NoMaxMemoryUsage, meaning that the default behavior is to not evict 49 | // based on maximum memory usage 50 | maxMemoryUsage int 51 | 52 | // evictionPolicy is the eviction policy 53 | evictionPolicy EvictionPolicy 54 | 55 | // defaultTTL is the default TTL for each entry 56 | // Defaults to NoExpiration 57 | defaultTTL time.Duration 58 | 59 | // stats is the object that contains cache statistics/metrics 60 | stats *Statistics 61 | 62 | // entries is the content of the cache 63 | entries map[string]*Entry 64 | 65 | // mutex is the lock for making concurrent operations on the cache 66 | mutex sync.RWMutex 67 | 68 | // head is the cache entry at the head of the cache 69 | head *Entry 70 | 71 | // tail is the last cache node and also the next entry that will be evicted 72 | tail *Entry 73 | 74 | // stopJanitor is the channel used to stop the janitor 75 | stopJanitor chan bool 76 | 77 | // memoryUsage is the approximate memory usage of the cache (dataset only) in bytes 78 | memoryUsage int 79 | 80 | // forceNilInterfaceOnNilPointer determines whether all Set-like functions should set a value as nil if the 81 | // interface passed has a nil value but not a nil type. 82 | // 83 | // By default, interfaces are only nil when both their type and value is nil. 84 | // This means that when you pass a pointer to a nil value, the type of the interface 85 | // will still show as nil, which means that if you don't cast the interface after 86 | // retrieving it, a nil check will return that the value is not false. 87 | forceNilInterfaceOnNilPointer bool 88 | } 89 | 90 | // MaxSize returns the maximum amount of keys that can be present in the cache before 91 | // new entries trigger the eviction of the tail 92 | func (cache *Cache) MaxSize() int { 93 | return cache.maxSize 94 | } 95 | 96 | // MaxMemoryUsage returns the configured maxMemoryUsage of the cache 97 | func (cache *Cache) MaxMemoryUsage() int { 98 | return cache.maxMemoryUsage 99 | } 100 | 101 | // EvictionPolicy returns the EvictionPolicy of the Cache 102 | func (cache *Cache) EvictionPolicy() EvictionPolicy { 103 | return cache.evictionPolicy 104 | } 105 | 106 | // Stats returns statistics from the cache 107 | func (cache *Cache) Stats() Statistics { 108 | cache.mutex.RLock() 109 | stats := Statistics{ 110 | EvictedKeys: cache.stats.EvictedKeys, 111 | ExpiredKeys: cache.stats.ExpiredKeys, 112 | Hits: cache.stats.Hits, 113 | Misses: cache.stats.Misses, 114 | } 115 | cache.mutex.RUnlock() 116 | return stats 117 | } 118 | 119 | // MemoryUsage returns the current memory usage of the cache's dataset in bytes 120 | // If MaxMemoryUsage is set to NoMaxMemoryUsage, this will return 0 121 | func (cache *Cache) MemoryUsage() int { 122 | return cache.memoryUsage 123 | } 124 | 125 | // WithMaxSize sets the maximum amount of entries that can be in the cache at any given time 126 | // A maxSize of 0 or less means infinite 127 | func (cache *Cache) WithMaxSize(maxSize int) *Cache { 128 | if maxSize < 0 { 129 | maxSize = NoMaxSize 130 | } 131 | if maxSize != NoMaxSize && cache.Count() == 0 { 132 | cache.entries = make(map[string]*Entry, maxSize) 133 | } 134 | cache.maxSize = maxSize 135 | return cache 136 | } 137 | 138 | // WithMaxMemoryUsage sets the maximum amount of memory that can be used by the cache at any given time 139 | // 140 | // NOTE: This is approximate. 141 | // 142 | // Setting this to NoMaxMemoryUsage will disable eviction by memory usage 143 | func (cache *Cache) WithMaxMemoryUsage(maxMemoryUsageInBytes int) *Cache { 144 | if maxMemoryUsageInBytes < 0 { 145 | maxMemoryUsageInBytes = NoMaxMemoryUsage 146 | } 147 | cache.maxMemoryUsage = maxMemoryUsageInBytes 148 | return cache 149 | } 150 | 151 | // WithEvictionPolicy sets eviction algorithm. 152 | // 153 | // Defaults to FirstInFirstOut (FIFO) 154 | func (cache *Cache) WithEvictionPolicy(policy EvictionPolicy) *Cache { 155 | cache.evictionPolicy = policy 156 | return cache 157 | } 158 | 159 | // WithDefaultTTL sets the default TTL for each entry (unless a different TTL is specified using SetWithTTL or SetAllWithTTL) 160 | // 161 | // Defaults to NoExpiration (-1) 162 | func (cache *Cache) WithDefaultTTL(ttl time.Duration) *Cache { 163 | if ttl > 1 { 164 | cache.defaultTTL = ttl 165 | } 166 | return cache 167 | } 168 | 169 | // WithForceNilInterfaceOnNilPointer sets whether all Set-like functions should set a value as nil if the 170 | // interface passed has a nil value but not a nil type. 171 | // 172 | // In Go, an interface is only nil if both its type and value are nil, which means that a nil pointer 173 | // (e.g. (*Struct)(nil)) will retain its attribution to the type, and the unmodified value returned from 174 | // Cache.Get, for instance, would return false when compared with nil if this option is set to false. 175 | // 176 | // We can bypass this by detecting if the interface's value is nil and setting it to nil rather than 177 | // a nil pointer, which will make the value returned from Cache.Get return true when compared with nil. 178 | // This is exactly what passing true to WithForceNilInterfaceOnNilPointer does, and it's also the default behavior. 179 | // 180 | // Alternatively, you may pass false to WithForceNilInterfaceOnNilPointer, which will mean that you'll have 181 | // to cast the value returned from Cache.Get to its original type to check for whether the pointer returned 182 | // is nil or not. 183 | // 184 | // If set to true (default): 185 | // 186 | // cache := gocache.NewCache().WithForceNilInterfaceOnNilPointer(true) 187 | // cache.Set("key", (*Struct)(nil)) 188 | // value, _ := cache.Get("key") 189 | // // the following returns true, because the interface{} (any) was forcefully set to nil 190 | // if value == nil {} 191 | // // the following will panic, because the value has been casted to its type (which is nil) 192 | // if value.(*Struct) == nil {} 193 | // 194 | // If set to false: 195 | // 196 | // cache := gocache.NewCache().WithForceNilInterfaceOnNilPointer(false) 197 | // cache.Set("key", (*Struct)(nil)) 198 | // value, _ := cache.Get("key") 199 | // // the following returns false, because the interface{} (any) returned has a non-nil type (*Struct) 200 | // if value == nil {} 201 | // // the following returns true, because the value has been cast to its type 202 | // if value.(*Struct) == nil {} 203 | // 204 | // In other words, if set to true, you do not need to cast the value returned from the cache to 205 | // check if the value is nil. 206 | // 207 | // Defaults to true 208 | func (cache *Cache) WithForceNilInterfaceOnNilPointer(forceNilInterfaceOnNilPointer bool) *Cache { 209 | cache.forceNilInterfaceOnNilPointer = forceNilInterfaceOnNilPointer 210 | return cache 211 | } 212 | 213 | // NewCache creates a new Cache 214 | // 215 | // Should be used in conjunction with Cache.WithMaxSize, Cache.WithMaxMemoryUsage and/or Cache.WithEvictionPolicy 216 | // 217 | // gocache.NewCache().WithMaxSize(10000).WithEvictionPolicy(gocache.LeastRecentlyUsed) 218 | func NewCache() *Cache { 219 | return &Cache{ 220 | maxSize: DefaultMaxSize, 221 | evictionPolicy: FirstInFirstOut, 222 | defaultTTL: NoExpiration, 223 | stats: &Statistics{}, 224 | entries: make(map[string]*Entry), 225 | mutex: sync.RWMutex{}, 226 | stopJanitor: nil, 227 | forceNilInterfaceOnNilPointer: true, 228 | } 229 | } 230 | 231 | // Set creates or updates a key with a given value 232 | func (cache *Cache) Set(key string, value any) { 233 | cache.SetWithTTL(key, value, cache.defaultTTL) 234 | } 235 | 236 | // SetWithTTL creates or updates a key with a given value and sets an expiration time (-1 is NoExpiration) 237 | // 238 | // The TTL provided must be greater than 0, or NoExpiration (-1). If a negative value that isn't -1 (NoExpiration) is 239 | // provided, the entry will not be created if the key doesn't exist 240 | func (cache *Cache) SetWithTTL(key string, value any, ttl time.Duration) { 241 | // An interface is only nil if both its value and its type are nil, however, passing a nil pointer as an interface{} 242 | // means that the interface itself is not nil, because the interface value is nil but not the type. 243 | if cache.forceNilInterfaceOnNilPointer { 244 | if value != nil && (reflect.ValueOf(value).Kind() == reflect.Ptr && reflect.ValueOf(value).IsNil()) { 245 | value = nil 246 | } 247 | } 248 | cache.mutex.Lock() 249 | entry, ok := cache.get(key) 250 | if !ok { 251 | // A negative TTL that isn't -1 (NoExpiration) or 0 is an entry that will expire instantly, 252 | // so might as well just not create it in the first place 253 | if ttl != NoExpiration && ttl < 1 { 254 | cache.mutex.Unlock() 255 | return 256 | } 257 | // Cache entry doesn't exist, so we have to create a new one 258 | entry = &Entry{ 259 | Key: key, 260 | Value: value, 261 | RelevantTimestamp: time.Now(), 262 | next: cache.head, 263 | } 264 | if cache.head == nil { 265 | cache.tail = entry 266 | } else { 267 | cache.head.previous = entry 268 | } 269 | cache.head = entry 270 | cache.entries[key] = entry 271 | if cache.maxMemoryUsage != NoMaxMemoryUsage { 272 | cache.memoryUsage += entry.SizeInBytes() 273 | } 274 | } else { 275 | // A negative TTL that isn't -1 (NoExpiration) or 0 is an entry that will expire instantly, 276 | // so might as well just delete it immediately instead of updating it 277 | if ttl != NoExpiration && ttl < 1 { 278 | cache.delete(key) 279 | cache.mutex.Unlock() 280 | return 281 | } 282 | if cache.maxMemoryUsage != NoMaxMemoryUsage { 283 | // Subtract the old entry from the cache's memoryUsage 284 | cache.memoryUsage -= entry.SizeInBytes() 285 | } 286 | // Update existing entry's value 287 | entry.Value = value 288 | entry.RelevantTimestamp = time.Now() 289 | if cache.maxMemoryUsage != NoMaxMemoryUsage { 290 | // Add the memory usage of the new entry to the cache's memoryUsage 291 | cache.memoryUsage += entry.SizeInBytes() 292 | } 293 | // Because we just updated the entry, we need to move it back to HEAD 294 | cache.moveExistingEntryToHead(entry) 295 | } 296 | if ttl != NoExpiration { 297 | entry.Expiration = time.Now().Add(ttl).UnixNano() 298 | } else { 299 | entry.Expiration = NoExpiration 300 | } 301 | // If the cache doesn't have a maxSize/maxMemoryUsage, then there's no point 302 | // checking if we need to evict an entry, so we'll just return now 303 | if cache.maxSize == NoMaxSize && cache.maxMemoryUsage == NoMaxMemoryUsage { 304 | cache.mutex.Unlock() 305 | return 306 | } 307 | // If there's a maxSize and the cache has more entries than the maxSize, evict 308 | if cache.maxSize != NoMaxSize && len(cache.entries) > cache.maxSize { 309 | cache.evict() 310 | } 311 | // If there's a maxMemoryUsage and the memoryUsage is above the maxMemoryUsage, evict 312 | if cache.maxMemoryUsage != NoMaxMemoryUsage && cache.memoryUsage > cache.maxMemoryUsage { 313 | for cache.memoryUsage > cache.maxMemoryUsage && len(cache.entries) > 0 { 314 | cache.evict() 315 | } 316 | } 317 | cache.mutex.Unlock() 318 | } 319 | 320 | // SetAll creates or updates multiple values 321 | func (cache *Cache) SetAll(entries map[string]any) { 322 | cache.SetAllWithTTL(entries, cache.defaultTTL) 323 | } 324 | 325 | // SetAllWithTTL creates or updates multiple values 326 | func (cache *Cache) SetAllWithTTL(entries map[string]any, ttl time.Duration) { 327 | for key, value := range entries { 328 | cache.SetWithTTL(key, value, ttl) 329 | } 330 | } 331 | 332 | // Get retrieves an entry using the key passed as parameter 333 | // If there is no such entry, the value returned will be nil and the boolean will be false 334 | // If there is an entry, the value returned will be the value cached and the boolean will be true 335 | func (cache *Cache) Get(key string) (any, bool) { 336 | cache.mutex.Lock() 337 | entry, ok := cache.get(key) 338 | if !ok { 339 | cache.stats.Misses++ 340 | cache.mutex.Unlock() 341 | return nil, false 342 | } 343 | if entry.Expired() { 344 | cache.stats.ExpiredKeys++ 345 | cache.delete(key) 346 | cache.mutex.Unlock() 347 | return nil, false 348 | } 349 | cache.stats.Hits++ 350 | if cache.evictionPolicy == LeastRecentlyUsed { 351 | entry.Accessed() 352 | if cache.head == entry { 353 | cache.mutex.Unlock() 354 | return entry.Value, true 355 | } 356 | // Because the eviction policy is LRU, we need to move the entry back to HEAD 357 | cache.moveExistingEntryToHead(entry) 358 | } 359 | cache.mutex.Unlock() 360 | return entry.Value, true 361 | } 362 | 363 | // GetValue retrieves an entry using the key passed as parameter 364 | // Unlike Get, this function only returns the value 365 | func (cache *Cache) GetValue(key string) any { 366 | value, _ := cache.Get(key) 367 | return value 368 | } 369 | 370 | // GetByKeys retrieves multiple entries using the keys passed as parameter 371 | // All keys are returned in the map, regardless of whether they exist or not, however, entries that do not exist in the 372 | // cache will return nil, meaning that there is no way of determining whether a key genuinely has the value nil, or 373 | // whether it doesn't exist in the cache using only this function. 374 | func (cache *Cache) GetByKeys(keys []string) map[string]any { 375 | entries := make(map[string]any) 376 | for _, key := range keys { 377 | entries[key], _ = cache.Get(key) 378 | } 379 | return entries 380 | } 381 | 382 | // GetAll retrieves all cache entries 383 | // 384 | // If the eviction policy is LeastRecentlyUsed, note that unlike Get and GetByKeys, this does not update the last access 385 | // timestamp. The reason for this is that since all cache entries will be accessed, updating the last access timestamp 386 | // would provide very little benefit while harming the ability to accurately determine the next key that will be evicted 387 | // 388 | // You should probably avoid using this if you have a lot of entries. 389 | // 390 | // GetKeysByPattern is a good alternative if you want to retrieve entries that you do not have the key for, as it only 391 | // retrieves the keys and does not trigger active eviction and has a parameter for setting a limit to the number of keys 392 | // you wish to retrieve. 393 | func (cache *Cache) GetAll() map[string]any { 394 | entries := make(map[string]any) 395 | cache.mutex.Lock() 396 | for key, entry := range cache.entries { 397 | if entry.Expired() { 398 | cache.delete(key) 399 | continue 400 | } 401 | entries[key] = entry.Value 402 | } 403 | cache.stats.Hits += uint64(len(entries)) 404 | cache.mutex.Unlock() 405 | return entries 406 | } 407 | 408 | // GetKeysByPattern retrieves a slice of keys that match a given pattern 409 | // If the limit is set to 0, the entire cache will be searched for matching keys. 410 | // If the limit is above 0, the search will stop once the specified number of matching keys have been found. 411 | // 412 | // e.g. 413 | // 414 | // cache.GetKeysByPattern("*some*", 0) will return all keys containing "some" in them 415 | // cache.GetKeysByPattern("*some*", 5) will return 5 keys (or less) containing "some" in them 416 | // 417 | // Note that GetKeysByPattern does not trigger active evictions, nor does it count as accessing the entry (if LRU). 418 | // The reason for that behavior is that these two (active eviction and access) only applies when you access the value 419 | // of the cache entry, and this function only returns the keys. 420 | func (cache *Cache) GetKeysByPattern(pattern string, limit int) []string { 421 | var matchingKeys []string 422 | cache.mutex.Lock() 423 | for key, value := range cache.entries { 424 | if value.Expired() { 425 | continue 426 | } 427 | if MatchPattern(pattern, key) { 428 | matchingKeys = append(matchingKeys, key) 429 | if limit > 0 && len(matchingKeys) >= limit { 430 | break 431 | } 432 | } 433 | } 434 | cache.mutex.Unlock() 435 | return matchingKeys 436 | } 437 | 438 | // Delete removes a key from the cache 439 | // 440 | // Returns false if the key did not exist. 441 | func (cache *Cache) Delete(key string) bool { 442 | cache.mutex.Lock() 443 | ok := cache.delete(key) 444 | cache.mutex.Unlock() 445 | return ok 446 | } 447 | 448 | // DeleteAll deletes multiple entries based on the keys passed as parameter 449 | // 450 | // Returns the number of keys deleted 451 | func (cache *Cache) DeleteAll(keys []string) int { 452 | numberOfKeysDeleted := 0 453 | cache.mutex.Lock() 454 | for _, key := range keys { 455 | if cache.delete(key) { 456 | numberOfKeysDeleted++ 457 | } 458 | } 459 | cache.mutex.Unlock() 460 | return numberOfKeysDeleted 461 | } 462 | 463 | // DeleteKeysByPattern deletes all entries matching a given key pattern and returns the number of entries deleted. 464 | // 465 | // Note that DeleteKeysByPattern does not trigger active evictions, nor does it count as accessing the entry (if LRU). 466 | func (cache *Cache) DeleteKeysByPattern(pattern string) int { 467 | return cache.DeleteAll(cache.GetKeysByPattern(pattern, 0)) 468 | } 469 | 470 | // Count returns the total amount of entries in the cache, regardless of whether they're expired or not 471 | func (cache *Cache) Count() int { 472 | cache.mutex.RLock() 473 | count := len(cache.entries) 474 | cache.mutex.RUnlock() 475 | return count 476 | } 477 | 478 | // Clear deletes all entries from the cache 479 | func (cache *Cache) Clear() { 480 | cache.mutex.Lock() 481 | cache.entries = make(map[string]*Entry) 482 | cache.memoryUsage = 0 483 | cache.head = nil 484 | cache.tail = nil 485 | cache.mutex.Unlock() 486 | } 487 | 488 | // TTL returns the time until the cache entry specified by the key passed as parameter 489 | // will be deleted. 490 | func (cache *Cache) TTL(key string) (time.Duration, error) { 491 | cache.mutex.RLock() 492 | entry, ok := cache.get(key) 493 | cache.mutex.RUnlock() 494 | if !ok { 495 | return 0, ErrKeyDoesNotExist 496 | } 497 | if entry.Expiration == NoExpiration { 498 | return 0, ErrKeyHasNoExpiration 499 | } 500 | timeUntilExpiration := time.Until(time.Unix(0, entry.Expiration)) 501 | if timeUntilExpiration < 0 { 502 | // The key has already expired but hasn't been deleted yet. 503 | // From the client's perspective, this means that the cache entry doesn't exist 504 | return 0, ErrKeyDoesNotExist 505 | } 506 | return timeUntilExpiration, nil 507 | } 508 | 509 | // Expire sets a key's expiration time 510 | // 511 | // A TTL of -1 means that the key will never expire 512 | // A TTL of 0 means that the key will expire immediately 513 | // If using LRU, note that this does not reset the position of the key 514 | // 515 | // Returns true if the cache key exists and has had its expiration time altered 516 | func (cache *Cache) Expire(key string, ttl time.Duration) bool { 517 | entry, ok := cache.get(key) 518 | if !ok || entry.Expired() { 519 | return false 520 | } 521 | if ttl != NoExpiration { 522 | entry.Expiration = time.Now().Add(ttl).UnixNano() 523 | } else { 524 | entry.Expiration = NoExpiration 525 | } 526 | return true 527 | } 528 | 529 | // get retrieves an entry using the key passed as parameter, but unlike Get, it doesn't update the access time or 530 | // move the position of the entry to the head 531 | func (cache *Cache) get(key string) (*Entry, bool) { 532 | entry, ok := cache.entries[key] 533 | return entry, ok 534 | } 535 | 536 | func (cache *Cache) delete(key string) bool { 537 | entry, ok := cache.entries[key] 538 | if ok { 539 | if cache.maxMemoryUsage != NoMaxMemoryUsage { 540 | cache.memoryUsage -= entry.SizeInBytes() 541 | } 542 | cache.removeExistingEntryReferences(entry) 543 | delete(cache.entries, key) 544 | } 545 | return ok 546 | } 547 | 548 | // moveExistingEntryToHead replaces the current cache head for an existing entry 549 | func (cache *Cache) moveExistingEntryToHead(entry *Entry) { 550 | if !(entry == cache.head && entry == cache.tail) { 551 | cache.removeExistingEntryReferences(entry) 552 | } 553 | if entry != cache.head { 554 | entry.next = cache.head 555 | entry.previous = nil 556 | if cache.head != nil { 557 | cache.head.previous = entry 558 | } 559 | cache.head = entry 560 | } 561 | } 562 | 563 | // removeExistingEntryReferences modifies the next and previous reference of an existing entry and re-links 564 | // the next and previous entry accordingly, as well as the cache head or/and the cache tail if necessary. 565 | // Note that it does not remove the entry from the cache, only the references. 566 | func (cache *Cache) removeExistingEntryReferences(entry *Entry) { 567 | if cache.tail == entry && cache.head == entry { 568 | cache.tail = nil 569 | cache.head = nil 570 | } else if cache.tail == entry { 571 | cache.tail = cache.tail.previous 572 | } else if cache.head == entry { 573 | cache.head = cache.head.next 574 | } 575 | if entry.previous != nil { 576 | entry.previous.next = entry.next 577 | } 578 | if entry.next != nil { 579 | entry.next.previous = entry.previous 580 | } 581 | entry.next = nil 582 | entry.previous = nil 583 | } 584 | 585 | // evict removes the tail from the cache 586 | func (cache *Cache) evict() { 587 | if cache.tail == nil || len(cache.entries) == 0 { 588 | return 589 | } 590 | if cache.tail != nil { 591 | oldTail := cache.tail 592 | cache.removeExistingEntryReferences(oldTail) 593 | delete(cache.entries, oldTail.Key) 594 | if cache.maxMemoryUsage != NoMaxMemoryUsage { 595 | cache.memoryUsage -= oldTail.SizeInBytes() 596 | } 597 | cache.stats.EvictedKeys++ 598 | } 599 | } 600 | -------------------------------------------------------------------------------- /gocache_bench_test.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "strconv" 7 | "strings" 8 | "testing" 9 | ) 10 | 11 | func BenchmarkMap_Get(b *testing.B) { 12 | m := make(map[string]any) 13 | for n := 0; n < b.N; n++ { 14 | _, _ = m[strconv.Itoa(n)] 15 | } 16 | b.ReportAllocs() 17 | } 18 | 19 | func BenchmarkMap_Set(b *testing.B) { 20 | values := map[string]string{ 21 | "small": "a", 22 | "medium": strings.Repeat("a", 1024), 23 | "large": strings.Repeat("a", 1024*100), 24 | } 25 | for name, value := range values { 26 | b.Run(fmt.Sprintf("%s value", name), func(b *testing.B) { 27 | m := make(map[string]any) 28 | for n := 0; n < b.N; n++ { 29 | m[strconv.Itoa(n)] = value 30 | } 31 | b.ReportAllocs() 32 | }) 33 | } 34 | } 35 | 36 | func BenchmarkCache_Get(b *testing.B) { 37 | evictionPolicies := []EvictionPolicy{FirstInFirstOut, LeastRecentlyUsed} 38 | for _, evictionPolicy := range evictionPolicies { 39 | cache := NewCache().WithMaxSize(NoMaxSize).WithMaxMemoryUsage(NoMaxMemoryUsage) 40 | b.Run(string(evictionPolicy), func(b *testing.B) { 41 | for n := 0; n < b.N; n++ { 42 | cache.Get(strconv.Itoa(n)) 43 | } 44 | b.ReportAllocs() 45 | }) 46 | } 47 | } 48 | 49 | func BenchmarkCache_Set(b *testing.B) { 50 | values := map[string]string{ 51 | "small": "a", 52 | "medium": strings.Repeat("a", 1024), 53 | "large": strings.Repeat("a", 1024*100), 54 | } 55 | evictionPolicies := []EvictionPolicy{FirstInFirstOut, LeastRecentlyUsed} 56 | for _, evictionPolicy := range evictionPolicies { 57 | for name, value := range values { 58 | b.Run(fmt.Sprintf("%s %s value", evictionPolicy, name), func(b *testing.B) { 59 | cache := NewCache().WithMaxSize(NoMaxSize).WithMaxMemoryUsage(NoMaxMemoryUsage).WithEvictionPolicy(evictionPolicy) 60 | for n := 0; n < b.N; n++ { 61 | cache.Set(strconv.Itoa(n), value) 62 | } 63 | b.ReportAllocs() 64 | }) 65 | } 66 | } 67 | } 68 | 69 | // BenchmarkCache_SetUsingMaxMemoryUsage does NOT test evictions, it tests the overhead of the extra work 70 | // automatically performed when using MaxMemoryUsage 71 | func BenchmarkCache_SetUsingMaxMemoryUsage(b *testing.B) { 72 | values := map[string]string{ 73 | "small": "a", 74 | "medium": strings.Repeat("a", 1024), 75 | "large": strings.Repeat("a", 1024*100), 76 | } 77 | for name, value := range values { 78 | b.Run(fmt.Sprintf("%s value", name), func(b *testing.B) { 79 | cache := NewCache().WithMaxSize(NoMaxSize).WithMaxMemoryUsage(999 * Gigabyte) 80 | for n := 0; n < b.N; n++ { 81 | cache.Set(strconv.Itoa(n), value) 82 | } 83 | b.ReportAllocs() 84 | }) 85 | } 86 | } 87 | 88 | func BenchmarkCache_SetWithMaxSize(b *testing.B) { 89 | values := map[string]string{ 90 | "small": "a", 91 | "medium": strings.Repeat("a", 1024), 92 | "large": strings.Repeat("a", 1024*100), 93 | } 94 | maxSizes := []int{100, 10000, 100000} 95 | for name, value := range values { 96 | for _, maxSize := range maxSizes { 97 | b.Run(fmt.Sprintf("%d %s value", maxSize, name), func(b *testing.B) { 98 | cache := NewCache().WithMaxSize(maxSize) 99 | for n := 0; n < b.N; n++ { 100 | cache.Set(strconv.Itoa(n), value) 101 | } 102 | b.ReportAllocs() 103 | }) 104 | } 105 | } 106 | } 107 | 108 | func BenchmarkCache_SetWithMaxSizeAndLRU(b *testing.B) { 109 | values := map[string]string{ 110 | "small": "a", 111 | "medium": strings.Repeat("a", 1024), 112 | "large": strings.Repeat("a", 1024*100), 113 | } 114 | maxSizes := []int{100, 10000, 100000} 115 | for name, value := range values { 116 | for _, maxSize := range maxSizes { 117 | b.Run(fmt.Sprintf("%d %s value", maxSize, name), func(b *testing.B) { 118 | cache := NewCache().WithMaxSize(maxSize).WithEvictionPolicy(LeastRecentlyUsed) 119 | for n := 0; n < b.N; n++ { 120 | cache.Set(strconv.Itoa(n), value) 121 | } 122 | b.ReportAllocs() 123 | }) 124 | } 125 | } 126 | } 127 | 128 | func BenchmarkCache_GetSetMultipleConcurrent(b *testing.B) { 129 | data := map[string]string{ 130 | "k1": "v1", 131 | "k2": "v2", 132 | "k3": "v3", 133 | "k4": "v4", 134 | "k5": "v5", 135 | "k6": "v6", 136 | "k7": "v7", 137 | "k8": "v8", 138 | } 139 | cache := NewCache().WithMaxSize(NoMaxSize) 140 | 141 | b.RunParallel(func(pb *testing.PB) { 142 | for pb.Next() { 143 | for k, v := range data { 144 | cache.Set(k, v) 145 | cache.Get(k) 146 | } 147 | } 148 | }) 149 | b.ReportAllocs() 150 | } 151 | 152 | func BenchmarkCache_GetSetConcurrentWithFrequentEviction(b *testing.B) { 153 | value := strings.Repeat("a", 256) 154 | evictionPolicies := []EvictionPolicy{FirstInFirstOut, LeastRecentlyUsed} 155 | for _, evictionPolicy := range evictionPolicies { 156 | b.Run(string(evictionPolicy), func(b *testing.B) { 157 | cache := NewCache().WithEvictionPolicy(LeastRecentlyUsed).WithMaxSize(3).WithMaxMemoryUsage(NoMaxMemoryUsage) 158 | b.RunParallel(func(pb *testing.PB) { 159 | for pb.Next() { 160 | k := strconv.Itoa(rand.Intn(15)) 161 | cache.Set(k, value) 162 | _, _ = cache.Get(k) 163 | } 164 | }) 165 | b.ReportAllocs() 166 | }) 167 | 168 | } 169 | } 170 | 171 | func BenchmarkCache_GetConcurrently(b *testing.B) { 172 | value := strings.Repeat("a", 256) 173 | for _, evictionPolicy := range []EvictionPolicy{FirstInFirstOut, LeastRecentlyUsed} { 174 | b.Run(string(evictionPolicy), func(b *testing.B) { 175 | cache := NewCache().WithMaxSize(100000) 176 | for i := 0; i < 100000; i++ { 177 | cache.Set(strconv.Itoa(i), value) 178 | } 179 | b.RunParallel(func(pb *testing.PB) { 180 | for pb.Next() { 181 | key := strconv.Itoa(rand.Intn(100000)) 182 | val, ok := cache.Get(key) 183 | if !ok { 184 | b.Errorf("key: %v; value: %v", key, val) 185 | } 186 | if val != value { 187 | b.Errorf("expected: %v; got: %v", val, value) 188 | } 189 | } 190 | }) 191 | b.ReportAllocs() 192 | }) 193 | } 194 | } 195 | 196 | // Note: The default value for Cache.forceNilInterfaceOnNilPointer is true 197 | func BenchmarkCache_WithForceNilInterfaceOnNilPointer(b *testing.B) { 198 | const ( 199 | Min = 10000 200 | Max = 99999 201 | ) 202 | type Struct struct { 203 | Value string 204 | } 205 | forceNilInterfaceOnNilPointerValues := []bool{true, false} 206 | values := []*Struct{nil, {Value: "value"}} 207 | for _, forceNilInterfaceOnNilPointer := range forceNilInterfaceOnNilPointerValues { 208 | for _, value := range values { 209 | name := fmt.Sprintf("%v", forceNilInterfaceOnNilPointer) 210 | if value == nil { 211 | name += " with nil struct pointer" 212 | } 213 | b.Run(name, func(b *testing.B) { 214 | cache := NewCache().WithMaxSize(NoMaxSize).WithMaxMemoryUsage(NoMaxMemoryUsage).WithForceNilInterfaceOnNilPointer(forceNilInterfaceOnNilPointer) 215 | for n := 0; n < b.N; n++ { 216 | cache.Set(strconv.Itoa(rand.Intn(Max-Min)+Min), value) 217 | } 218 | b.ReportAllocs() 219 | }) 220 | } 221 | } 222 | } 223 | 224 | func BenchmarkCache_WithForceNilInterfaceOnNilPointerWithConcurrency(b *testing.B) { 225 | const ( 226 | Min = 10000 227 | Max = 99999 228 | ) 229 | type Struct struct { 230 | Value string 231 | } 232 | forceNilInterfaceOnNilPointerValues := []bool{true, false} 233 | values := []*Struct{nil, {Value: "value"}} 234 | for _, forceNilInterfaceOnNilPointer := range forceNilInterfaceOnNilPointerValues { 235 | for _, value := range values { 236 | name := fmt.Sprintf("%v", forceNilInterfaceOnNilPointer) 237 | if value == nil { 238 | name += " with nil struct pointer" 239 | } 240 | b.Run(name, func(b *testing.B) { 241 | cache := NewCache().WithMaxSize(NoMaxSize).WithMaxMemoryUsage(NoMaxMemoryUsage).WithForceNilInterfaceOnNilPointer(forceNilInterfaceOnNilPointer) 242 | b.RunParallel(func(pb *testing.PB) { 243 | for pb.Next() { 244 | cache.Set(strconv.Itoa(rand.Intn(Max-Min)+Min), value) 245 | } 246 | }) 247 | b.ReportAllocs() 248 | }) 249 | } 250 | } 251 | } 252 | -------------------------------------------------------------------------------- /gocache_test.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "math/rand" 7 | "strconv" 8 | "strings" 9 | "testing" 10 | "time" 11 | ) 12 | 13 | func TestNewCache(t *testing.T) { 14 | cache := NewCache().WithMaxSize(1234).WithEvictionPolicy(LeastRecentlyUsed) 15 | if cache.MaxMemoryUsage() != NoMaxMemoryUsage { 16 | t.Error("shouldn't have a max memory usage configured") 17 | } 18 | if cache.EvictionPolicy() != LeastRecentlyUsed { 19 | t.Error("should've had a LeastRecentlyUsed eviction policy") 20 | } 21 | if cache.defaultTTL != NoExpiration { 22 | t.Error("should've had a default TTL of NoExpiration") 23 | } 24 | if cache.MaxSize() != 1234 { 25 | t.Error("should've had a max cache size of 1234") 26 | } 27 | if cache.MemoryUsage() != 0 { 28 | t.Error("should've had a memory usage of 0") 29 | } 30 | } 31 | 32 | func TestCache_Stats(t *testing.T) { 33 | cache := NewCache().WithMaxSize(1234).WithEvictionPolicy(LeastRecentlyUsed) 34 | cache.Set("key", "value") 35 | if cache.Stats().Hits != 0 { 36 | t.Error("should have 0 hits") 37 | } 38 | if cache.Stats().Misses != 0 { 39 | t.Error("should have 0 misses") 40 | } 41 | cache.Get("key") 42 | if cache.Stats().Hits != 1 { 43 | t.Error("should have 1 hit") 44 | } 45 | if cache.Stats().Misses != 0 { 46 | t.Error("should have 0 misses") 47 | } 48 | cache.Get("key-that-does-not-exist") 49 | if cache.Stats().Hits != 1 { 50 | t.Error("should have 1 hit") 51 | } 52 | if cache.Stats().Misses != 1 { 53 | t.Error("should have 1 miss") 54 | } 55 | } 56 | 57 | func TestCache_Get(t *testing.T) { 58 | cache := NewCache().WithMaxSize(10) 59 | cache.Set("key", "value") 60 | value, ok := cache.Get("key") 61 | if !ok { 62 | t.Error("expected key to exist") 63 | } 64 | if value != "value" { 65 | t.Errorf("expected: %s, but got: %s", "value", value) 66 | } 67 | } 68 | 69 | func TestCache_GetExpired(t *testing.T) { 70 | cache := NewCache() 71 | cache.SetWithTTL("key", "value", time.Millisecond) 72 | time.Sleep(2 * time.Millisecond) 73 | _, ok := cache.Get("key") 74 | if ok { 75 | t.Error("expected key to be expired") 76 | } 77 | } 78 | 79 | func TestCache_GetEntryThatHasNotExpiredYet(t *testing.T) { 80 | cache := NewCache() 81 | cache.SetWithTTL("key", "value", time.Hour) 82 | _, ok := cache.Get("key") 83 | if !ok { 84 | t.Error("expected key to not have expired") 85 | } 86 | } 87 | 88 | func TestCache_GetValue(t *testing.T) { 89 | cache := NewCache().WithMaxSize(10) 90 | cache.Set("key", "value") 91 | value := cache.GetValue("key") 92 | if value != "value" { 93 | t.Errorf("expected: %s, but got: %s", "value", value) 94 | } 95 | } 96 | 97 | func TestCache_GetByKeys(t *testing.T) { 98 | cache := NewCache().WithMaxSize(10) 99 | cache.Set("key1", "value1") 100 | cache.Set("key2", "value2") 101 | keyValues := cache.GetByKeys([]string{"key1", "key2", "key3"}) 102 | if len(keyValues) != 3 { 103 | t.Error("expected length of map to be 3") 104 | } 105 | if keyValues["key1"] != "value1" { 106 | t.Errorf("expected: %s, but got: %s", "value1", keyValues["key1"]) 107 | } 108 | if keyValues["key2"] != "value2" { 109 | t.Errorf("expected: %s, but got: %s", "value2", keyValues["key2"]) 110 | } 111 | if value, ok := keyValues["key3"]; !ok || value != nil { 112 | t.Errorf("expected key3 to exist and be nil, but got: %s", value) 113 | } 114 | } 115 | 116 | func TestCache_GetAll(t *testing.T) { 117 | cache := NewCache().WithMaxSize(10) 118 | cache.Set("key1", "value1") 119 | cache.Set("key2", "value2") 120 | keyValues := cache.GetAll() 121 | if len(keyValues) != 2 { 122 | t.Error("expected length of map to be 2") 123 | } 124 | if keyValues["key1"] != "value1" { 125 | t.Errorf("expected: %s, but got: %s", "value1", keyValues["key1"]) 126 | } 127 | if keyValues["key2"] != "value2" { 128 | t.Errorf("expected: %s, but got: %s", "value2", keyValues["key2"]) 129 | } 130 | } 131 | 132 | func TestCache_GetAllWhenOneKeyIsExpired(t *testing.T) { 133 | cache := NewCache().WithMaxSize(10) 134 | cache.Set("key1", "value1") 135 | cache.Set("key2", "value2") 136 | cache.SetWithTTL("key3", "value3", time.Nanosecond) 137 | time.Sleep(time.Millisecond) 138 | keyValues := cache.GetAll() 139 | if len(keyValues) != 2 { 140 | t.Error("expected length of map to be 2") 141 | } 142 | if keyValues["key1"] != "value1" { 143 | t.Errorf("expected: %s, but got: %s", "value1", keyValues["key1"]) 144 | } 145 | if keyValues["key2"] != "value2" { 146 | t.Errorf("expected: %s, but got: %s", "value2", keyValues["key2"]) 147 | } 148 | } 149 | 150 | func TestCache_GetKeysByPattern(t *testing.T) { 151 | // All keys match 152 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4"}, "key*", 0, 4) 153 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4"}, "*y*", 0, 4) 154 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4"}, "*key*", 0, 4) 155 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4"}, "*", 0, 4) 156 | // All keys match but limit is reached 157 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4"}, "*", 2, 2) 158 | // Some keys match 159 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4", "key11"}, "key1*", 0, 2) 160 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4", "key11"}, "*key1*", 0, 2) 161 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4", "key11", "key111"}, "key1*", 0, 3) 162 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4", "key11", "key111"}, "key11*", 0, 2) 163 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4", "key11", "key111"}, "*11*", 0, 2) 164 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4", "key11", "key111"}, "k*1*", 0, 3) 165 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4", "key11", "key111"}, "*k*1", 0, 3) 166 | // No keys match 167 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4"}, "image*", 0, 0) 168 | testGetKeysByPattern(t, []string{"key1", "key2", "key3", "key4"}, "?", 0, 0) 169 | } 170 | 171 | func testGetKeysByPattern(t *testing.T, keys []string, pattern string, limit, expectedMatchingKeys int) { 172 | cache := NewCache().WithMaxSize(len(keys)) 173 | for _, key := range keys { 174 | cache.Set(key, key) 175 | } 176 | matchingKeys := cache.GetKeysByPattern(pattern, limit) 177 | if len(matchingKeys) != expectedMatchingKeys { 178 | t.Errorf("expected to have %d keys to match pattern '%s', got %d", expectedMatchingKeys, pattern, len(matchingKeys)) 179 | } 180 | } 181 | 182 | func TestCache_GetKeysByPatternWithExpiredKey(t *testing.T) { 183 | cache := NewCache().WithMaxSize(10) 184 | cache.SetWithTTL("key", "value", 10*time.Millisecond) 185 | // The cache entry shouldn't have expired yet, so GetKeysByPattern should return 1 key 186 | if matchingKeys := cache.GetKeysByPattern("*", 0); len(matchingKeys) != 1 { 187 | t.Errorf("expected to have %d keys to match pattern '%s', got %d", 1, "*", len(matchingKeys)) 188 | } 189 | time.Sleep(30 * time.Millisecond) 190 | // Since the key expired, the same call should return 0 keys instead of 1 191 | if matchingKeys := cache.GetKeysByPattern("*", 0); len(matchingKeys) != 0 { 192 | t.Errorf("expected to have %d keys to match pattern '%s', got %d", 0, "*", len(matchingKeys)) 193 | } 194 | } 195 | 196 | func TestCache_Set(t *testing.T) { 197 | cache := NewCache().WithMaxSize(NoMaxSize) 198 | cache.Set("key", "value") 199 | value, ok := cache.Get("key") 200 | if !ok { 201 | t.Error("expected key to exist") 202 | } 203 | if value != "value" { 204 | t.Errorf("expected: %s, but got: %s", "value", value) 205 | } 206 | cache.Set("key", "newvalue") 207 | value, ok = cache.Get("key") 208 | if !ok { 209 | t.Error("expected key to exist") 210 | } 211 | if value != "newvalue" { 212 | t.Errorf("expected: %s, but got: %s", "newvalue", value) 213 | } 214 | } 215 | 216 | func TestCache_SetDifferentTypesOfData(t *testing.T) { 217 | cache := NewCache().WithMaxSize(NoMaxSize) 218 | cache.Set("key", 1) 219 | value, ok := cache.Get("key") 220 | if !ok { 221 | t.Error("expected key to exist") 222 | } 223 | if value != 1 { 224 | t.Errorf("expected: %v, but got: %v", 1, value) 225 | } 226 | cache.Set("key", struct{ Test string }{Test: "test"}) 227 | value, ok = cache.Get("key") 228 | if !ok { 229 | t.Error("expected key to exist") 230 | } 231 | if value.(struct{ Test string }) != struct{ Test string }{Test: "test"} { 232 | t.Errorf("expected: %s, but got: %s", "newvalue", value) 233 | } 234 | } 235 | 236 | func TestCache_SetGetInt(t *testing.T) { 237 | cache := NewCache().WithMaxSize(NoMaxSize) 238 | cache.Set("key", 1) 239 | value, ok := cache.Get("key") 240 | if !ok { 241 | t.Error("expected key to exist") 242 | } 243 | if value != 1 { 244 | t.Errorf("expected: %v, but got: %v", 1, value) 245 | } 246 | cache.Set("key", 2.1) 247 | value, ok = cache.Get("key") 248 | if !ok { 249 | t.Error("expected key to exist") 250 | } 251 | if value != 2.1 { 252 | t.Errorf("expected: %v, but got: %v", 2.1, value) 253 | } 254 | } 255 | 256 | func TestCache_SetGetBool(t *testing.T) { 257 | cache := NewCache().WithMaxSize(NoMaxSize) 258 | cache.Set("key", true) 259 | value, ok := cache.Get("key") 260 | if !ok { 261 | t.Error("expected key to exist") 262 | } 263 | if value != true { 264 | t.Errorf("expected: %v, but got: %v", true, value) 265 | } 266 | } 267 | 268 | func TestCache_SetGetByteSlice(t *testing.T) { 269 | cache := NewCache().WithMaxSize(NoMaxSize) 270 | cache.Set("key", []byte("hey")) 271 | value, ok := cache.Get("key") 272 | if !ok { 273 | t.Error("expected key to exist") 274 | } 275 | if bytes.Compare(value.([]byte), []byte("hey")) != 0 { 276 | t.Errorf("expected: %v, but got: %v", []byte("hey"), value) 277 | } 278 | } 279 | 280 | func TestCache_SetGetStringSlice(t *testing.T) { 281 | cache := NewCache().WithMaxSize(NoMaxSize) 282 | cache.Set("key", []string{"john", "doe"}) 283 | value, ok := cache.Get("key") 284 | if !ok { 285 | t.Error("expected key to exist") 286 | } 287 | if value.([]string)[0] != "john" { 288 | t.Errorf("expected: %v, but got: %v", "john", value) 289 | } 290 | if value.([]string)[1] != "doe" { 291 | t.Errorf("expected: %v, but got: %v", "doe", value) 292 | } 293 | } 294 | 295 | func TestCache_SetGetStruct(t *testing.T) { 296 | cache := NewCache().WithMaxSize(NoMaxSize) 297 | type Custom struct { 298 | Int int 299 | Uint uint 300 | Float32 float32 301 | String string 302 | Strings []string 303 | Nested struct { 304 | String string 305 | } 306 | } 307 | cache.Set("key", Custom{ 308 | Int: 111, 309 | Uint: 222, 310 | Float32: 123.456, 311 | String: "hello", 312 | Strings: []string{"s1", "s2"}, 313 | Nested: struct{ String string }{String: "nested field"}, 314 | }) 315 | value, ok := cache.Get("key") 316 | if !ok { 317 | t.Error("expected key to exist") 318 | } 319 | if ExpectedValue := 111; value.(Custom).Int != ExpectedValue { 320 | t.Errorf("expected: %v, but got: %v", ExpectedValue, value) 321 | } 322 | if ExpectedValue := uint(222); value.(Custom).Uint != ExpectedValue { 323 | t.Errorf("expected: %v, but got: %v", ExpectedValue, value) 324 | } 325 | if ExpectedValue := float32(123.456); value.(Custom).Float32 != ExpectedValue { 326 | t.Errorf("expected: %v, but got: %v", ExpectedValue, value) 327 | } 328 | if ExpectedValue := "hello"; value.(Custom).String != ExpectedValue { 329 | t.Errorf("expected: %v, but got: %v", ExpectedValue, value) 330 | } 331 | if ExpectedValue := "s1"; value.(Custom).Strings[0] != ExpectedValue { 332 | t.Errorf("expected: %v, but got: %v", ExpectedValue, value) 333 | } 334 | if ExpectedValue := "s2"; value.(Custom).Strings[1] != ExpectedValue { 335 | t.Errorf("expected: %v, but got: %v", ExpectedValue, value) 336 | } 337 | if ExpectedValue := "nested field"; value.(Custom).Nested.String != ExpectedValue { 338 | t.Errorf("expected: %v, but got: %v", ExpectedValue, value) 339 | } 340 | } 341 | 342 | func TestCache_SetAll(t *testing.T) { 343 | cache := NewCache().WithMaxSize(NoMaxSize) 344 | cache.SetAll(map[string]any{"k1": "v1", "k2": "v2"}) 345 | value, ok := cache.Get("k1") 346 | if !ok { 347 | t.Error("expected key to exist") 348 | } 349 | if value != "v1" { 350 | t.Errorf("expected: %s, but got: %s", "v1", value) 351 | } 352 | value, ok = cache.Get("k2") 353 | if !ok { 354 | t.Error("expected key to exist") 355 | } 356 | if value != "v2" { 357 | t.Errorf("expected: %s, but got: %s", "v2", value) 358 | } 359 | cache.SetAll(map[string]any{"k1": "updated"}) 360 | value, ok = cache.Get("k1") 361 | if !ok { 362 | t.Error("expected key to exist") 363 | } 364 | if value != "updated" { 365 | t.Errorf("expected: %s, but got: %s", "updated", value) 366 | } 367 | } 368 | 369 | func TestCache_SetWithTTL(t *testing.T) { 370 | cache := NewCache().WithMaxSize(NoMaxSize) 371 | cache.SetWithTTL("key", "value", NoExpiration) 372 | value, ok := cache.Get("key") 373 | if !ok { 374 | t.Error("expected key to exist") 375 | } 376 | if value != "value" { 377 | t.Errorf("expected: %s, but got: %s", "value", value) 378 | } 379 | } 380 | 381 | func TestCache_SetWithTTLWhenTTLIsNegative(t *testing.T) { 382 | cache := NewCache().WithMaxSize(NoMaxSize) 383 | cache.SetWithTTL("key", "value", -12345) 384 | _, ok := cache.Get("key") 385 | if ok { 386 | t.Error("expected key to not exist, because there's no point in creating a cache entry that has a negative TTL") 387 | } 388 | } 389 | 390 | func TestCache_SetWithTTLWhenTTLIsZero(t *testing.T) { 391 | cache := NewCache().WithMaxSize(NoMaxSize) 392 | cache.SetWithTTL("key", "value", 0) 393 | _, ok := cache.Get("key") 394 | if ok { 395 | t.Error("expected key to not exist, because there's no point in creating a cache entry that has a TTL of 0") 396 | } 397 | } 398 | 399 | func TestCache_SetWithTTLWhenTTLIsZeroAndEntryAlreadyExists(t *testing.T) { 400 | cache := NewCache().WithMaxSize(NoMaxSize) 401 | cache.SetWithTTL("key", "value", NoExpiration) 402 | cache.SetWithTTL("key", "value", 0) 403 | _, ok := cache.Get("key") 404 | if ok { 405 | t.Error("expected key to not exist, because there's the entry was created with a TTL of 0, so it should have been deleted immediately") 406 | } 407 | } 408 | 409 | func TestCache_EvictionsRespectMaxSize(t *testing.T) { 410 | cache := NewCache().WithMaxSize(5) 411 | for n := 0; n < 10; n++ { 412 | cache.Set(fmt.Sprintf("test_%d", n), []byte("value")) 413 | } 414 | count := cache.Count() 415 | if count > 5 { 416 | t.Error("Max size was set to 5, but the cache size reached a size of", count) 417 | } 418 | } 419 | 420 | func TestCache_EvictionsWithFIFO(t *testing.T) { 421 | cache := NewCache().WithMaxSize(3).WithEvictionPolicy(FirstInFirstOut) 422 | 423 | cache.Set("1", []byte("value")) 424 | cache.Set("2", []byte("value")) 425 | cache.Set("3", []byte("value")) 426 | _, _ = cache.Get("1") 427 | cache.Set("4", []byte("value")) 428 | _, ok := cache.Get("1") 429 | if ok { 430 | t.Error("expected key 1 to have been removed, because FIFO") 431 | } 432 | } 433 | 434 | func TestCache_EvictionsWithLRU(t *testing.T) { 435 | cache := NewCache().WithMaxSize(3).WithEvictionPolicy(LeastRecentlyUsed) 436 | 437 | cache.Set("1", []byte("value")) 438 | cache.Set("2", []byte("value")) 439 | cache.Set("3", []byte("value")) 440 | _, _ = cache.Get("1") 441 | cache.Set("4", []byte("value")) 442 | 443 | _, ok := cache.Get("1") 444 | if !ok { 445 | t.Error("expected key 1 to still exist, because LRU") 446 | } 447 | } 448 | 449 | func TestCache_HeadToTailSimple(t *testing.T) { 450 | cache := NewCache().WithMaxSize(3) 451 | cache.Set("1", "1") 452 | if cache.tail.Key != "1" && cache.head.Key != "1" { 453 | t.Error("expected tail=1 and head=1") 454 | } 455 | cache.Set("2", "2") 456 | if cache.tail.Key != "1" && cache.head.Key != "2" { 457 | t.Error("expected tail=1 and head=2") 458 | } 459 | cache.Set("3", "3") 460 | if cache.tail.Key != "1" && cache.head.Key != "3" { 461 | t.Error("expected tail=1 and head=4") 462 | } 463 | cache.Set("4", "4") 464 | if cache.tail.Key != "2" && cache.head.Key != "4" { 465 | t.Error("expected tail=2 and head=4") 466 | } 467 | cache.Set("5", "5") 468 | if cache.tail.Key != "3" && cache.head.Key != "5" { 469 | t.Error("expected tail=3 and head=5") 470 | } 471 | } 472 | 473 | func TestCache_HeadTailWorksWithFIFO(t *testing.T) { 474 | cache := NewCache().WithMaxSize(3).WithEvictionPolicy(FirstInFirstOut) 475 | 476 | if cache.tail != nil { 477 | t.Error("cache tail should have been nil") 478 | } 479 | if cache.head != nil { 480 | t.Error("cache head should have been nil") 481 | } 482 | 483 | cache.Set("1", []byte("value")) 484 | 485 | // (head) 1 (tail) 486 | if cache.tail == nil || cache.tail.Key != "1" { 487 | t.Error("cache tail should have been entry with key 1") 488 | } 489 | if cache.head == nil || cache.head.Key != "1" { 490 | t.Error("cache head should have been entry with key 1") 491 | } 492 | 493 | cache.Set("2", []byte("value")) 494 | 495 | // (head) 2 - 1 (tail) 496 | if cache.tail == nil || cache.tail.Key != "1" { 497 | t.Error("cache tail should have been the entry with key 1") 498 | } 499 | if cache.head == nil || cache.head.Key != "2" { 500 | t.Error("cache head should have been the entry with key 2") 501 | } 502 | if cache.head.next.Key != "1" { 503 | t.Error("The entry key next to the cache head should have been 1") 504 | } 505 | if cache.head.previous != nil { 506 | t.Error("The cache head should not have a previous node") 507 | } 508 | if cache.tail.previous.Key != "2" { 509 | t.Error("The entry key previous to the cache tail should have been 2") 510 | } 511 | if cache.tail.next != nil { 512 | t.Error("The cache tail should not have a next node") 513 | } 514 | 515 | cache.Set("3", []byte("value")) 516 | 517 | // (head) 3 - 2 - 1 (tail) 518 | if cache.tail == nil || cache.tail.Key != "1" { 519 | t.Error("cache tail should have been the entry with key 1") 520 | } 521 | if cache.tail.previous.Key != "2" { 522 | t.Error("The entry key previous to the cache tail should have been 2") 523 | } 524 | if cache.tail.next != nil { 525 | t.Error("The cache tail should not have a next node") 526 | } 527 | if cache.head == nil || cache.head.Key != "3" { 528 | t.Error("cache head should have been the entry with key 3") 529 | } 530 | if cache.head.next.Key != "2" { 531 | t.Error("The entry key next to the cache head should have been 2") 532 | } 533 | if cache.head.previous != nil { 534 | t.Error("The cache head should not have a previous node") 535 | } 536 | if cache.head.next.previous.Key != "3" { 537 | t.Error("The head's next node should have its previous node pointing to the cache head") 538 | } 539 | if cache.head.next.next.Key != "1" { 540 | t.Error("The head's next node should have its next node pointing to the cache tail") 541 | } 542 | 543 | // Get the first entry. This doesn't change anything for FIFO, but for LRU, it would mean that retrieved entry 544 | // wouldn't be evicted since it was recently accessed. Basically, we just want to make sure that FIFO works 545 | // as intended (i.e. not like LRU) 546 | _, _ = cache.Get("1") 547 | 548 | cache.Set("4", []byte("value")) 549 | 550 | // (head) 4 - 3 - 2 (tail) 551 | _, ok := cache.Get("1") 552 | if ok { 553 | t.Error("expected key 1 to have been removed, because FIFO") 554 | } 555 | if cache.tail == nil || cache.tail.Key != "2" { 556 | t.Error("cache tail should have been the entry with key 2") 557 | } 558 | if cache.tail.previous.Key != "3" { 559 | t.Error("The entry key previous to the cache tail should have been 3") 560 | } 561 | if cache.tail.next != nil { 562 | t.Error("The cache tail should not have a next node") 563 | } 564 | if cache.head == nil || cache.head.Key != "4" { 565 | t.Error("cache head should have been the entry with key 4") 566 | } 567 | if cache.head.next.Key != "3" { 568 | t.Error("The entry key next to the cache head should have been 3") 569 | } 570 | if cache.head.previous != nil { 571 | t.Error("The cache head should not have a previous node") 572 | } 573 | if cache.head.next.previous.Key != "4" { 574 | t.Error("The head's next node should have its previous node pointing to the cache head") 575 | } 576 | if cache.head.next.next.Key != "2" { 577 | t.Error("The head's next node should have its next node pointing to the cache tail") 578 | } 579 | } 580 | 581 | func TestCache_HeadTailWorksWithLRU(t *testing.T) { 582 | cache := NewCache().WithMaxSize(3).WithEvictionPolicy(LeastRecentlyUsed) 583 | 584 | if cache.tail != nil { 585 | t.Error("cache tail should have been nil") 586 | } 587 | if cache.head != nil { 588 | t.Error("cache head should have been nil") 589 | } 590 | 591 | cache.Set("1", []byte("value")) 592 | 593 | // (head) 1 (tail) 594 | if cache.tail == nil || cache.tail.Key != "1" { 595 | t.Error("cache tail should have been entry with key 1") 596 | } 597 | if cache.head == nil || cache.head.Key != "1" { 598 | t.Error("cache head should have been entry with key 1") 599 | } 600 | 601 | cache.Set("2", []byte("value")) 602 | 603 | // (head) 2 - 1 (tail) 604 | if cache.tail == nil || cache.tail.Key != "1" { 605 | t.Error("cache tail should have been the entry with key 1") 606 | } 607 | if cache.head == nil || cache.head.Key != "2" { 608 | t.Error("cache head should have been the entry with key 2") 609 | } 610 | if cache.head.next.Key != "1" { 611 | t.Error("The entry key next to the cache head should have been 1") 612 | } 613 | if cache.head.previous != nil { 614 | t.Error("The cache head should not have a previous node") 615 | } 616 | if cache.tail.previous.Key != "2" { 617 | t.Error("The entry key previous to the cache tail should have been 2") 618 | } 619 | if cache.tail.next != nil { 620 | t.Error("The cache tail should not have a next node") 621 | } 622 | 623 | cache.Set("3", []byte("value")) 624 | 625 | // (head) 3 - 2 - 1 (tail) 626 | if cache.tail == nil || cache.tail.Key != "1" { 627 | t.Error("cache tail should have been the entry with key 1") 628 | } 629 | if cache.tail.previous.Key != "2" { 630 | t.Error("The entry key previous to the cache tail should have been 2") 631 | } 632 | if cache.tail.next != nil { 633 | t.Error("The cache tail should not have a next node") 634 | } 635 | if cache.head == nil || cache.head.Key != "3" { 636 | t.Error("cache head should have been the entry with key 3") 637 | } 638 | if cache.head.next.Key != "2" { 639 | t.Error("The entry key next to the cache head should have been 2") 640 | } 641 | if cache.head.previous != nil { 642 | t.Error("The cache head should not have a previous node") 643 | } 644 | if cache.head.next.previous.Key != "3" { 645 | t.Error("The head's next node should have its previous node pointing to the cache head") 646 | } 647 | if cache.head.next.next.Key != "1" { 648 | t.Error("The head's next node should have its next node pointing to the cache tail") 649 | } 650 | 651 | // Because we're using a LRU cache, this should cause 1 to get moved back to the head, thus 652 | // moving it from the tail. 653 | // In other words, because we retrieved the key 1 here, this is no longer the least recently used cache entry, 654 | // which means it will not be evicted during the next insertion. 655 | _, _ = cache.Get("1") 656 | 657 | // (head) 1 - 3 - 2 (tail) (This updated because LRU) 658 | cache.Set("4", []byte("value")) 659 | 660 | // (head) 4 - 1 - 3 (tail) 661 | if cache.tail == nil || cache.tail.Key != "3" { 662 | t.Error("cache tail should have been the entry with key 3") 663 | } 664 | if cache.tail.previous.Key != "1" { 665 | t.Error("The entry key previous to the cache tail should have been 1") 666 | } 667 | if cache.tail.next != nil { 668 | t.Error("The cache tail should not have a next node") 669 | } 670 | if cache.head == nil || cache.head.Key != "4" { 671 | t.Error("cache head should have been the entry with key 4") 672 | } 673 | if cache.head.next.Key != "1" { 674 | t.Error("The entry key next to the cache head should have been 1") 675 | } 676 | if cache.head.previous != nil { 677 | t.Error("The cache head should not have a previous node") 678 | } 679 | if cache.head.next.previous.Key != cache.head.Key { 680 | t.Error("The head's next node should have its previous node pointing to the cache head") 681 | } 682 | if cache.head.next.next.Key != cache.tail.Key { 683 | t.Error("Should be able to walk from head to tail") 684 | } 685 | if cache.tail.previous.previous != cache.head { 686 | t.Error("Should be able to walk from tail to head") 687 | } 688 | 689 | _, ok := cache.Get("1") 690 | if !ok { 691 | t.Error("expected key 1 to still exist, because LRU") 692 | } 693 | } 694 | 695 | func TestCache_HeadStaysTheSameIfCallRepeatedly(t *testing.T) { 696 | cache := NewCache().WithEvictionPolicy(LeastRecentlyUsed).WithMaxSize(10) 697 | cache.Set("1", "1") 698 | if cache.tail.Key != "1" && cache.head.Key != "1" { 699 | t.Error("expected tail=1 and head=1") 700 | } 701 | cache.Set("1", "1") 702 | if cache.tail.Key != "1" && cache.head.Key != "1" { 703 | t.Error("expected tail=1 and head=1") 704 | } 705 | cache.Get("1") 706 | if cache.tail.Key != "1" && cache.head.Key != "1" { 707 | t.Error("expected tail=1 and head=1") 708 | } 709 | cache.Get("1") 710 | if cache.tail.Key != "1" && cache.head.Key != "1" { 711 | t.Error("expected tail=1 and head=1") 712 | } 713 | } 714 | 715 | func TestCache_Delete(t *testing.T) { 716 | cache := NewCache() 717 | 718 | if cache.tail != nil { 719 | t.Error("cache tail should have been nil") 720 | } 721 | if cache.head != nil { 722 | t.Error("cache head should have been nil") 723 | } 724 | 725 | cache.Set("1", "hey") 726 | cache.Set("2", []byte("sup")) 727 | cache.Set("3", 123456) 728 | 729 | // (head) 3 - 2 - 1 (tail) 730 | if cache.tail.Key != "1" { 731 | t.Error("cache tail should have been the entry with key 1") 732 | } 733 | if cache.head.Key != "3" { 734 | t.Error("cache head should have been the entry with key 3") 735 | } 736 | 737 | cache.Delete("2") 738 | 739 | // (head) 3 - 1 (tail) 740 | if cache.tail.Key != "1" { 741 | t.Error("cache tail should have been the entry with key 1") 742 | } 743 | if cache.head.Key != "3" { 744 | t.Error("cache head should have been the entry with key 3") 745 | } 746 | if cache.tail.previous.Key != "3" { 747 | t.Error("The entry key previous to the cache tail should have been 3") 748 | } 749 | if cache.head.next.Key != "1" { 750 | t.Error("The entry key next to the cache tail should have been 1") 751 | } 752 | 753 | cache.Delete("1") 754 | 755 | // (head) 3 (tail) 756 | if cache.tail.Key != "3" { 757 | t.Error("cache tail should have been the entry with key 3") 758 | } 759 | if cache.head.Key != "3" { 760 | t.Error("cache head should have been the entry with key 3") 761 | } 762 | 763 | if cache.head != cache.tail { 764 | t.Error("There should only be one entry in the cache") 765 | } 766 | if cache.head.next != nil || cache.tail.previous != nil { 767 | t.Error("Since head == tail, there should be no next/prev") 768 | } 769 | } 770 | 771 | func TestCache_DeleteAll(t *testing.T) { 772 | cache := NewCache() 773 | cache.Set("1", []byte("1")) 774 | cache.Set("2", []byte("2")) 775 | cache.Set("3", []byte("3")) 776 | if len(cache.GetByKeys([]string{"1", "2", "3"})) != 3 { 777 | t.Error("Expected keys 1, 2 and 3 to exist") 778 | } 779 | numberOfDeletedKeys := cache.DeleteAll([]string{"1", "2", "3"}) 780 | if numberOfDeletedKeys != 3 { 781 | t.Errorf("Expected 3 keys to have been deleted, but only %d were deleted", numberOfDeletedKeys) 782 | } 783 | } 784 | 785 | func TestCache_DeleteKeysByPattern(t *testing.T) { 786 | cache := NewCache() 787 | cache.Set("a1", []byte("v")) 788 | cache.Set("a2", []byte("v")) 789 | cache.Set("b1", []byte("v")) 790 | if len(cache.GetByKeys([]string{"a1", "a2", "b1"})) != 3 { 791 | t.Error("Expected keys 1, 2 and 3 to exist") 792 | } 793 | numberOfDeletedKeys := cache.DeleteKeysByPattern("a*") 794 | if numberOfDeletedKeys != 2 { 795 | t.Errorf("Expected 2 keys to have been deleted, but only %d were deleted", numberOfDeletedKeys) 796 | } 797 | if _, exists := cache.Get("b1"); !exists { 798 | t.Error("Expected key b1 to still exist") 799 | } 800 | } 801 | 802 | func TestCache_TTL(t *testing.T) { 803 | cache := NewCache() 804 | ttl, err := cache.TTL("key") 805 | if err != ErrKeyDoesNotExist { 806 | t.Errorf("expected %s, got %s", ErrKeyDoesNotExist, err) 807 | } 808 | cache.Set("key", "value") 809 | _, err = cache.TTL("key") 810 | if err != ErrKeyHasNoExpiration { 811 | t.Error("Expected TTL on new key created using Set to have no expiration") 812 | } 813 | cache.SetWithTTL("key", "value", time.Hour) 814 | ttl, err = cache.TTL("key") 815 | if err != nil { 816 | t.Error("Unexpected error") 817 | } 818 | if ttl.Minutes() < 59 || ttl.Minutes() > 60 { 819 | t.Error("Expected the TTL to be almost an hour") 820 | } 821 | cache.SetWithTTL("key", "value", 5*time.Millisecond) 822 | time.Sleep(6 * time.Millisecond) 823 | ttl, err = cache.TTL("key") 824 | if err != ErrKeyDoesNotExist { 825 | t.Error("key should've expired, thus TTL should've returned ") 826 | } 827 | } 828 | 829 | func TestCache_Expire(t *testing.T) { 830 | cache := NewCache() 831 | if cache.Expire("key-that-does-not-exist", time.Minute) { 832 | t.Error("Expected Expire to return false, because the key used did not exist") 833 | } 834 | cache.Set("key", "value") 835 | _, err := cache.TTL("key") 836 | if err != ErrKeyHasNoExpiration { 837 | t.Error("Expected TTL on new key created using Set to have no expiration") 838 | } 839 | if !cache.Expire("key", time.Hour) { 840 | t.Error("Expected Expire to return true") 841 | } 842 | ttl, err := cache.TTL("key") 843 | if err != nil { 844 | t.Error("Unexpected error") 845 | } 846 | if ttl.Minutes() < 59 || ttl.Minutes() > 60 { 847 | t.Error("Expected the TTL to be almost an hour") 848 | } 849 | if !cache.Expire("key", 5*time.Millisecond) { 850 | t.Error("Expected Expire to return true") 851 | } 852 | time.Sleep(6 * time.Millisecond) 853 | _, err = cache.TTL("key") 854 | if err != ErrKeyDoesNotExist { 855 | t.Error("key should've expired, thus TTL should've returned ErrKeyDoesNotExist") 856 | } 857 | if cache.Expire("key", time.Hour) { 858 | t.Error("Expire should've returned false, because the key should've already expired, thus no longer exist") 859 | } 860 | cache.SetWithTTL("key", "value", time.Hour) 861 | if !cache.Expire("key", NoExpiration) { 862 | t.Error("Expire should've returned true") 863 | } 864 | if _, err := cache.TTL("key"); err != ErrKeyHasNoExpiration { 865 | t.Error("TTL should've returned ErrKeyHasNoExpiration") 866 | } 867 | } 868 | 869 | func TestCache_Clear(t *testing.T) { 870 | cache := NewCache().WithMaxSize(10) 871 | cache.Set("k1", "v1") 872 | cache.Set("k2", "v2") 873 | cache.Set("k3", "v3") 874 | if cache.Count() != 3 { 875 | t.Error("expected cache size to be 3, got", cache.Count()) 876 | } 877 | cache.Clear() 878 | if cache.Count() != 0 { 879 | t.Error("expected cache to be empty") 880 | } 881 | if cache.memoryUsage != 0 { 882 | t.Error("expected cache.memoryUsage to be 0") 883 | } 884 | } 885 | 886 | func TestCache_WithMaxSize(t *testing.T) { 887 | cache := NewCache().WithMaxSize(1234) 888 | if cache.MaxSize() != 1234 { 889 | t.Error("expected cache to have a maximum size of 1234") 890 | } 891 | } 892 | 893 | func TestCache_WithMaxSizeAndNegativeValue(t *testing.T) { 894 | cache := NewCache().WithMaxSize(-10) 895 | if cache.MaxSize() != NoMaxSize { 896 | t.Error("expected cache to have no maximum size") 897 | } 898 | } 899 | 900 | func TestCache_WithMaxMemoryUsage(t *testing.T) { 901 | const ValueSize = Kilobyte 902 | cache := NewCache().WithMaxSize(0).WithMaxMemoryUsage(Kilobyte * 64) 903 | for i := 0; i < 100; i++ { 904 | cache.Set(fmt.Sprintf("%d", i), strings.Repeat("0", ValueSize)) 905 | } 906 | if cache.MemoryUsage()/1024 < 63 || cache.MemoryUsage()/1024 > 65 { 907 | t.Error("expected memoryUsage to be between 63KB and 64KB") 908 | } 909 | } 910 | 911 | func TestCache_WithMaxMemoryUsageWhenAddingAnEntryThatCausesMoreThanOneEviction(t *testing.T) { 912 | const ValueSize = Kilobyte 913 | cache := NewCache().WithMaxSize(0).WithMaxMemoryUsage(64 * Kilobyte) 914 | for i := 0; i < 100; i++ { 915 | cache.Set(fmt.Sprintf("%d", i), strings.Repeat("0", ValueSize)) 916 | } 917 | if cache.MemoryUsage()/1024 < 63 || cache.MemoryUsage()/1024 > 65 { 918 | t.Error("expected memoryUsage to be between 63KB and 64KB") 919 | } 920 | } 921 | 922 | func TestCache_WithMaxMemoryUsageAndNegativeValue(t *testing.T) { 923 | cache := NewCache().WithMaxSize(0).WithMaxMemoryUsage(-1234) 924 | if cache.MaxMemoryUsage() != NoMaxMemoryUsage { 925 | t.Error("attempting to set a negative max memory usage should force MaxMemoryUsage to NoMaxMemoryUsage") 926 | } 927 | } 928 | 929 | func TestCache_MemoryUsageAfterSet10000AndDelete5000(t *testing.T) { 930 | const ValueSize = 64 931 | cache := NewCache().WithMaxSize(10000).WithMaxMemoryUsage(Gigabyte) 932 | for i := 0; i < cache.maxSize; i++ { 933 | cache.Set(fmt.Sprintf("%05d", i), strings.Repeat("0", ValueSize)) 934 | } 935 | memoryUsageBeforeDeleting := cache.MemoryUsage() 936 | for i := 0; i < cache.maxSize/2; i++ { 937 | key := fmt.Sprintf("%05d", i) 938 | cache.Delete(key) 939 | } 940 | memoryUsageRatio := float32(cache.MemoryUsage()) / float32(memoryUsageBeforeDeleting) 941 | if memoryUsageRatio != 0.5 { 942 | t.Error("Since half of the keys were deleted, the memoryUsage should've been half of what the memory usage was before beginning deletion") 943 | } 944 | } 945 | 946 | func TestCache_MemoryUsageIsReliable(t *testing.T) { 947 | cache := NewCache().WithMaxMemoryUsage(Megabyte) 948 | previousCacheMemoryUsage := cache.MemoryUsage() 949 | if previousCacheMemoryUsage != 0 { 950 | t.Error("cache.MemoryUsage() should've been 0") 951 | } 952 | cache.Set("1", 1) 953 | if cache.MemoryUsage() <= previousCacheMemoryUsage { 954 | t.Error("cache.MemoryUsage() should've increased") 955 | } 956 | previousCacheMemoryUsage = cache.MemoryUsage() 957 | cache.SetAll(map[string]any{"2": "2", "3": "3", "4": "4"}) 958 | if cache.MemoryUsage() <= previousCacheMemoryUsage { 959 | t.Error("cache.MemoryUsage() should've increased") 960 | } 961 | previousCacheMemoryUsage = cache.MemoryUsage() 962 | cache.Delete("2") 963 | if cache.MemoryUsage() >= previousCacheMemoryUsage { 964 | t.Error("cache.MemoryUsage() should've decreased") 965 | } 966 | previousCacheMemoryUsage = cache.MemoryUsage() 967 | cache.Set("1", 1) 968 | if cache.MemoryUsage() != previousCacheMemoryUsage { 969 | t.Error("cache.MemoryUsage() shouldn't have changed, because the entry didn't change") 970 | } 971 | previousCacheMemoryUsage = cache.MemoryUsage() 972 | cache.Delete("3") 973 | if cache.MemoryUsage() >= previousCacheMemoryUsage { 974 | t.Error("cache.MemoryUsage() should've decreased") 975 | } 976 | previousCacheMemoryUsage = cache.MemoryUsage() 977 | cache.Delete("4") 978 | if cache.MemoryUsage() >= previousCacheMemoryUsage { 979 | t.Error("cache.MemoryUsage() should've decreased") 980 | } 981 | previousCacheMemoryUsage = cache.MemoryUsage() 982 | cache.Delete("1") 983 | if cache.MemoryUsage() >= previousCacheMemoryUsage || cache.memoryUsage != 0 { 984 | t.Error("cache.MemoryUsage() should've been 0") 985 | } 986 | previousCacheMemoryUsage = cache.MemoryUsage() 987 | cache.Set("1", "v4lu3") 988 | if cache.MemoryUsage() <= previousCacheMemoryUsage { 989 | t.Error("cache.MemoryUsage() should've increased") 990 | } 991 | previousCacheMemoryUsage = cache.MemoryUsage() 992 | cache.Set("1", "value") 993 | if cache.MemoryUsage() != previousCacheMemoryUsage { 994 | t.Error("cache.MemoryUsage() shouldn't have changed") 995 | } 996 | previousCacheMemoryUsage = cache.MemoryUsage() 997 | cache.Set("1", true) 998 | if cache.MemoryUsage() >= previousCacheMemoryUsage { 999 | t.Error("cache.MemoryUsage() should've decreased, because a bool uses less memory than a string") 1000 | } 1001 | } 1002 | 1003 | func TestCache_MemoryUsageAndMaxSizeIsReliable(t *testing.T) { 1004 | cache := NewCache().WithMaxMemoryUsage(100 * Kilobyte).WithMaxSize(1) 1005 | rand.Seed(time.Now().UnixNano()) 1006 | for i := 0; i < 10000; i++ { 1007 | if i%5 == 0 { 1008 | cache.Set(strconv.Itoa(rand.Intn(10)), strings.Repeat("what", rand.Intn(10000))) 1009 | } else { 1010 | cache.Set(strconv.Itoa(rand.Intn(10)), struct { 1011 | m map[string]any 1012 | s string 1013 | }{ 1014 | m: map[string]any{ 1015 | "a": "b", 1016 | strings.Repeat("b", rand.Intn(1000)): map[string]string{ 1017 | strings.Repeat("c", rand.Intn(1000)): "d", 1018 | }, 1019 | }, 1020 | s: strings.Repeat("e", rand.Intn(1000)), 1021 | }) 1022 | } 1023 | cache.Get(strconv.Itoa(rand.Intn(10))) 1024 | if cache.MemoryUsage() < 0 { 1025 | t.Fatal("cache.MemoryUsage() should never be negative") 1026 | } 1027 | } 1028 | } 1029 | 1030 | func TestCache_WithDefaultTTL(t *testing.T) { 1031 | cache := NewCache().WithDefaultTTL(5 * time.Millisecond) 1032 | if cache.defaultTTL != 5*time.Millisecond { 1033 | t.Error("expected defaultTTL to be 5ms") 1034 | } 1035 | cache.Set("1", 1) 1036 | cache.SetWithTTL("2", 2, time.Hour) 1037 | if cache.GetValue("1") == nil { 1038 | t.Error("expected cache entry with key 1 to still exist") 1039 | } 1040 | if cache.GetValue("2") == nil { 1041 | t.Error("expected cache entry with key 2 to still exist") 1042 | } 1043 | time.Sleep(10 * time.Millisecond) 1044 | if cache.GetValue("1") != nil { 1045 | t.Error("expected cache entry with key 1 to have expired") 1046 | } 1047 | if cache.GetValue("2") == nil { 1048 | t.Error("expected cache entry with key 2 to still exist") 1049 | } 1050 | } 1051 | 1052 | func TestCache_WithForceNilInterfaceOnNilPointer(t *testing.T) { 1053 | type Struct struct{} 1054 | cache := NewCache().WithForceNilInterfaceOnNilPointer(true) 1055 | cache.Set("key", (*Struct)(nil)) 1056 | if value, exists := cache.Get("key"); !exists { 1057 | t.Error("expected key to exist") 1058 | } else { 1059 | if value != nil { 1060 | // the value is not nil, because cache.Get returns an interface{} (any), and the type of that interface is not nil 1061 | t.Error("value should be nil") 1062 | } 1063 | } 1064 | 1065 | cache.Clear() 1066 | 1067 | cache = cache.WithForceNilInterfaceOnNilPointer(false) 1068 | cache.Set("key", (*Struct)(nil)) 1069 | if value, exists := cache.Get("key"); !exists { 1070 | t.Error("expected key to exist") 1071 | } else { 1072 | if value == nil { 1073 | t.Error("value should be not be nil, because the type of the interface is not nil") 1074 | } 1075 | if value.(*Struct) != nil { 1076 | t.Error("casted value should be nil") 1077 | } 1078 | } 1079 | } 1080 | 1081 | func TestEvictionWhenThereIsNothingToEvict(t *testing.T) { 1082 | cache := NewCache() 1083 | cache.evict() 1084 | cache.evict() 1085 | cache.evict() 1086 | } 1087 | 1088 | func TestCache(t *testing.T) { 1089 | cache := NewCache().WithMaxSize(3).WithEvictionPolicy(LeastRecentlyUsed) 1090 | cache.Set("1", 1) 1091 | cache.Set("2", 2) 1092 | cache.Set("3", 3) 1093 | cache.Set("4", 4) 1094 | if _, ok := cache.Get("4"); !ok { 1095 | t.Error("expected 4 to exist") 1096 | } 1097 | if _, ok := cache.Get("3"); !ok { 1098 | t.Error("expected 3 to exist") 1099 | } 1100 | if _, ok := cache.Get("2"); !ok { 1101 | t.Error("expected 2 to exist") 1102 | } 1103 | if _, ok := cache.Get("1"); ok { 1104 | t.Error("expected 1 to have been evicted") 1105 | } 1106 | cache.Set("5", 5) 1107 | if _, ok := cache.Get("1"); ok { 1108 | t.Error("expected 1 to have been evicted") 1109 | } 1110 | if _, ok := cache.Get("2"); !ok { 1111 | t.Error("expected 2 to exist") 1112 | } 1113 | if _, ok := cache.Get("3"); !ok { 1114 | t.Error("expected 3 to exist") 1115 | } 1116 | if _, ok := cache.Get("4"); ok { 1117 | t.Error("expected 4 to have been evicted") 1118 | } 1119 | if _, ok := cache.Get("5"); !ok { 1120 | t.Error("expected 5 to exist") 1121 | } 1122 | } 1123 | -------------------------------------------------------------------------------- /janitor.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "log" 5 | "time" 6 | ) 7 | 8 | const ( 9 | // JanitorShiftTarget is the target number of expired keys to find during passive clean up duty 10 | // before pausing the passive expired keys eviction process 11 | JanitorShiftTarget = 25 12 | 13 | // JanitorMaxIterationsPerShift is the maximum number of nodes to traverse before pausing 14 | // 15 | // This is to prevent the janitor from traversing the entire cache, which could take a long time 16 | // to complete depending on the size of the cache. 17 | // 18 | // By limiting it to a small number, we are effectively reducing the impact of passive eviction. 19 | JanitorMaxIterationsPerShift = 1000 20 | 21 | // JanitorMinShiftBackOff is the minimum interval between each iteration of steps 22 | // defined by JanitorMaxIterationsPerShift 23 | JanitorMinShiftBackOff = 50 * time.Millisecond 24 | 25 | // JanitorMaxShiftBackOff is the maximum interval between each iteration of steps 26 | // defined by JanitorMaxIterationsPerShift 27 | JanitorMaxShiftBackOff = 500 * time.Millisecond 28 | ) 29 | 30 | // StartJanitor starts the janitor on a different goroutine 31 | // The janitor's job is to delete expired keys in the background, in other words, it takes care of passive eviction. 32 | // It can be stopped by calling Cache.StopJanitor. 33 | // If you do not start the janitor, expired keys will only be deleted when they are accessed through Get, GetByKeys, or 34 | // GetAll. 35 | func (cache *Cache) StartJanitor() error { 36 | if cache.stopJanitor != nil { 37 | return ErrJanitorAlreadyRunning 38 | } 39 | cache.stopJanitor = make(chan bool) 40 | go func() { 41 | // rather than starting from the tail on every run, we can try to start from the last traversed entry 42 | var lastTraversedNode *Entry 43 | totalNumberOfExpiredKeysInPreviousRunFromTailToHead := 0 44 | backOff := JanitorMinShiftBackOff 45 | for { 46 | select { 47 | case <-time.After(backOff): 48 | // Passive clean up duty 49 | cache.mutex.Lock() 50 | if cache.tail != nil { 51 | start := time.Now() 52 | steps := 0 53 | expiredEntriesFound := 0 54 | current := cache.tail 55 | if lastTraversedNode != nil { 56 | // Make sure the lastTraversedNode is still in the cache, otherwise we might be traversing nodes that were already deleted. 57 | // Furthermore, we need to make sure that the entry from the cache has the same pointer as the lastTraversedNode 58 | // to verify that there isn't just a new cache entry with the same key (i.e. in case lastTraversedNode got evicted) 59 | if entryFromCache, isInCache := cache.get(lastTraversedNode.Key); isInCache && entryFromCache == lastTraversedNode { 60 | current = lastTraversedNode 61 | } 62 | } 63 | if current == cache.tail { 64 | if Debug { 65 | log.Printf("There are currently %d entries in the cache. The last walk resulted in finding %d expired keys", len(cache.entries), totalNumberOfExpiredKeysInPreviousRunFromTailToHead) 66 | } 67 | totalNumberOfExpiredKeysInPreviousRunFromTailToHead = 0 68 | } 69 | for current != nil { 70 | // since we're walking from the tail to the head, we get the previous reference 71 | var previous *Entry 72 | steps++ 73 | if current.Expired() { 74 | expiredEntriesFound++ 75 | // Because delete will remove the previous reference from the entry, we need to store the 76 | // previous reference before we delete it 77 | previous = current.previous 78 | cache.delete(current.Key) 79 | cache.stats.ExpiredKeys++ 80 | } 81 | if current == cache.head { 82 | lastTraversedNode = nil 83 | break 84 | } 85 | // Travel to the current node's previous node only if no specific previous node has been specified 86 | if previous != nil { 87 | current = previous 88 | } else { 89 | current = current.previous 90 | } 91 | lastTraversedNode = current 92 | if steps == JanitorMaxIterationsPerShift || expiredEntriesFound >= JanitorShiftTarget { 93 | if expiredEntriesFound > 0 { 94 | backOff = JanitorMinShiftBackOff 95 | } else { 96 | if backOff*2 <= JanitorMaxShiftBackOff { 97 | backOff *= 2 98 | } else { 99 | backOff = JanitorMaxShiftBackOff 100 | } 101 | } 102 | break 103 | } 104 | } 105 | if Debug { 106 | log.Printf("traversed %d nodes and found %d expired entries in %s before stopping\n", steps, expiredEntriesFound, time.Since(start)) 107 | } 108 | totalNumberOfExpiredKeysInPreviousRunFromTailToHead += expiredEntriesFound 109 | } else { 110 | if backOff*2 < JanitorMaxShiftBackOff { 111 | backOff *= 2 112 | } else { 113 | backOff = JanitorMaxShiftBackOff 114 | } 115 | } 116 | cache.mutex.Unlock() 117 | case <-cache.stopJanitor: 118 | cache.stopJanitor <- true 119 | return 120 | } 121 | } 122 | }() 123 | //if Debug { 124 | // go func() { 125 | // var m runtime.MemStats 126 | // for { 127 | // runtime.ReadMemStats(&m) 128 | // log.Printf("Alloc=%vMB; HeapReleased=%vMB; Sys=%vMB; HeapInUse=%vMB; HeapObjects=%v; HeapObjectsFreed=%v; GC=%v; cache.memoryUsage=%vMB; cacheSize=%d\n", m.Alloc/1024/1024, m.HeapReleased/1024/1024, m.Sys/1024/1024, m.HeapInuse/1024/1024, m.HeapObjects, m.Frees, m.NumGC, cache.memoryUsage/1024/1024, cache.Count()) 129 | // time.Sleep(3 * time.Second) 130 | // } 131 | // }() 132 | //} 133 | return nil 134 | } 135 | 136 | // StopJanitor stops the janitor 137 | func (cache *Cache) StopJanitor() { 138 | if cache.stopJanitor != nil { 139 | // Tell the janitor to stop, and then wait for the janitor to reply on the same channel that it's stopping 140 | // This may seem a bit odd, but this allows us to avoid a data race condition when trying to set 141 | // cache.stopJanitor to nil 142 | cache.stopJanitor <- true 143 | <-cache.stopJanitor 144 | cache.stopJanitor = nil 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /janitor_test.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestCache_StartJanitor(t *testing.T) { 10 | cache := NewCache() 11 | cache.SetWithTTL("1", "1", time.Nanosecond) 12 | if cacheSize := cache.Count(); cacheSize != 1 { 13 | t.Errorf("expected cacheSize to be 1, but was %d", cacheSize) 14 | } 15 | err := cache.StartJanitor() 16 | if err != nil { 17 | t.Fatal(err) 18 | } 19 | defer cache.StopJanitor() 20 | time.Sleep(JanitorMinShiftBackOff * 2) 21 | if cacheSize := cache.Count(); cacheSize != 0 { 22 | t.Errorf("expected cacheSize to be 0, but was %d", cacheSize) 23 | } 24 | } 25 | 26 | func TestCache_StartJanitorWhenAlreadyStarted(t *testing.T) { 27 | cache := NewCache() 28 | if err := cache.StartJanitor(); err != nil { 29 | t.Fatal(err) 30 | } 31 | if err := cache.StartJanitor(); err == nil { 32 | t.Fatal("expected StartJanitor to return an error, because the janitor is already started") 33 | } 34 | cache.StopJanitor() 35 | } 36 | 37 | func TestCache_StopJanitor(t *testing.T) { 38 | cache := NewCache() 39 | _ = cache.StartJanitor() 40 | if cache.stopJanitor == nil { 41 | t.Error("starting the janitor should've initialized cache.stopJanitor") 42 | } 43 | cache.StopJanitor() 44 | if cache.stopJanitor != nil { 45 | t.Error("stopping the janitor should've set cache.stopJanitor to nil") 46 | } 47 | // Check if stopping the janitor even though it's already stopped causes a panic 48 | cache.StopJanitor() 49 | } 50 | 51 | func TestJanitor(t *testing.T) { 52 | cache := NewCache().WithMaxSize(3 * JanitorMaxIterationsPerShift) 53 | defer cache.Clear() 54 | for i := 0; i < 3*JanitorMaxIterationsPerShift; i++ { 55 | if i < JanitorMaxIterationsPerShift && i%2 == 0 { 56 | cache.SetWithTTL(fmt.Sprintf("%d", i), "value", time.Millisecond) 57 | } else { 58 | cache.SetWithTTL(fmt.Sprintf("%d", i), "value", time.Hour) 59 | } 60 | } 61 | cacheSize := cache.Count() 62 | err := cache.StartJanitor() 63 | if err != nil { 64 | t.Fatal(err) 65 | } 66 | defer cache.StopJanitor() 67 | time.Sleep(JanitorMinShiftBackOff * 4) 68 | if cacheSize <= cache.Count() { 69 | t.Error("The janitor should be deleting expired cache entries") 70 | } 71 | cacheSize = cache.Count() 72 | time.Sleep(JanitorMinShiftBackOff * 4) 73 | if cacheSize <= cache.Count() { 74 | t.Error("The janitor should be deleting expired cache entries") 75 | } 76 | cacheSize = cache.Count() 77 | time.Sleep(JanitorMinShiftBackOff * 4) 78 | if cacheSize <= cache.Count() { 79 | t.Error("The janitor should be deleting expired cache entries") 80 | } 81 | } 82 | 83 | func TestJanitorIsLoopingProperly(t *testing.T) { 84 | cache := NewCache().WithMaxSize(JanitorMaxIterationsPerShift + 3) 85 | defer cache.Clear() 86 | for i := 0; i < JanitorMaxIterationsPerShift; i++ { 87 | cache.SetWithTTL(fmt.Sprintf("%d", i), "value", time.Hour) 88 | } 89 | cache.SetWithTTL("key-to-expire-1", "value", JanitorMinShiftBackOff*2) 90 | cache.SetWithTTL("key-to-expire-2", "value", JanitorMinShiftBackOff*2) 91 | cache.SetWithTTL("key-to-expire-3", "value", JanitorMinShiftBackOff*2) 92 | err := cache.StartJanitor() 93 | if err != nil { 94 | t.Fatal(err) 95 | } 96 | defer cache.StopJanitor() 97 | if cache.Count() != JanitorMaxIterationsPerShift+3 { 98 | t.Error("The janitor shouldn't have had enough time to remove anything from the cache yet", cache.Count()) 99 | } 100 | const timeout = JanitorMinShiftBackOff * 20 101 | threeKeysExpiredWithinOneSecond := false 102 | for start := time.Now(); time.Since(start) < timeout; { 103 | if cache.Stats().ExpiredKeys == 3 { 104 | threeKeysExpiredWithinOneSecond = true 105 | break 106 | } 107 | time.Sleep(JanitorMinShiftBackOff) 108 | } 109 | if !threeKeysExpiredWithinOneSecond { 110 | t.Error("expected 3 keys to expire within 1 second") 111 | } 112 | if cache.Count() != JanitorMaxIterationsPerShift { 113 | t.Error("The janitor should've deleted 3 entries") 114 | } 115 | } 116 | 117 | func TestJanitorDoesNotThrowATantrumWhenThereIsNothingToClean(t *testing.T) { 118 | cache := NewCache() 119 | start := time.Now() 120 | _ = cache.StartJanitor() 121 | defer cache.StopJanitor() 122 | time.Sleep(JanitorMaxShiftBackOff * 3) 123 | // Technically, if the janitor doesn't backoff properly, the sleep above is likely to take more time than the sleep 124 | // below because it would be eating up the CPU. 125 | // This is a far-fetched test, but it's a good sanity check. 126 | if time.Since(start) > JanitorMaxShiftBackOff*4 { 127 | t.Error("The janitor should've backed off and prevented CPU usage from throttling the application") 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /pattern.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import "path/filepath" 4 | 5 | // MatchPattern checks whether a string matches a pattern 6 | func MatchPattern(pattern, s string) bool { 7 | if pattern == "*" { 8 | return true 9 | } 10 | matched, _ := filepath.Match(pattern, s) 11 | return matched 12 | } 13 | -------------------------------------------------------------------------------- /pattern_test.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import "testing" 4 | 5 | func TestMatchPattern(t *testing.T) { 6 | scenarios := []struct { 7 | pattern string 8 | key string 9 | expectedToMatch bool 10 | }{ 11 | { 12 | pattern: "*", 13 | key: "livingroom_123", 14 | expectedToMatch: true, 15 | }, 16 | { 17 | pattern: "*", 18 | key: "livingroom_123", 19 | expectedToMatch: true, 20 | }, 21 | { 22 | pattern: "**", 23 | key: "livingroom_123", 24 | expectedToMatch: true, 25 | }, 26 | { 27 | pattern: "living*", 28 | key: "livingroom_123", 29 | expectedToMatch: true, 30 | }, 31 | { 32 | pattern: "*living*", 33 | key: "livingroom_123", 34 | expectedToMatch: true, 35 | }, 36 | { 37 | pattern: "*123", 38 | key: "livingroom_123", 39 | expectedToMatch: true, 40 | }, 41 | { 42 | pattern: "*_*", 43 | key: "livingroom_123", 44 | expectedToMatch: true, 45 | }, 46 | { 47 | pattern: "living*_*3", 48 | key: "livingroom_123", 49 | expectedToMatch: true, 50 | }, 51 | { 52 | pattern: "living*room_*3", 53 | key: "livingroom_123", 54 | expectedToMatch: true, 55 | }, 56 | { 57 | pattern: "living*room_*3", 58 | key: "livingroom_123", 59 | expectedToMatch: true, 60 | }, 61 | { 62 | pattern: "*vin*om*2*", 63 | key: "livingroom_123", 64 | expectedToMatch: true, 65 | }, 66 | { 67 | pattern: "livingroom_123", 68 | key: "livingroom_123", 69 | expectedToMatch: true, 70 | }, 71 | { 72 | pattern: "*livingroom_123*", 73 | key: "livingroom_123", 74 | expectedToMatch: true, 75 | }, 76 | { 77 | pattern: "livingroom", 78 | key: "livingroom_123", 79 | expectedToMatch: false, 80 | }, 81 | { 82 | pattern: "livingroom123", 83 | key: "livingroom_123", 84 | expectedToMatch: false, 85 | }, 86 | { 87 | pattern: "what", 88 | key: "livingroom_123", 89 | expectedToMatch: false, 90 | }, 91 | { 92 | pattern: "*what*", 93 | key: "livingroom_123", 94 | expectedToMatch: false, 95 | }, 96 | { 97 | pattern: "*.*", 98 | key: "livingroom_123", 99 | expectedToMatch: false, 100 | }, 101 | { 102 | pattern: "room*123", 103 | key: "livingroom_123", 104 | expectedToMatch: false, 105 | }, 106 | } 107 | for _, scenario := range scenarios { 108 | t.Run(scenario.pattern+"---"+scenario.key, func(t *testing.T) { 109 | matched := MatchPattern(scenario.pattern, scenario.key) 110 | if scenario.expectedToMatch { 111 | if !matched { 112 | t.Errorf("%s should've matched pattern '%s'", scenario.key, scenario.pattern) 113 | } 114 | } else { 115 | if matched { 116 | t.Errorf("%s shouldn't have matched pattern '%s'", scenario.key, scenario.pattern) 117 | } 118 | } 119 | }) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /policy.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | // EvictionPolicy is what dictates how evictions are handled 4 | type EvictionPolicy string 5 | 6 | const ( 7 | // LeastRecentlyUsed is an eviction policy that causes the most recently accessed cache entry to be moved to the 8 | // head of the cache. Effectively, this causes the cache entries that have not been accessed for some time to 9 | // gradually move closer and closer to the tail, and since the tail is the entry that gets deleted when an eviction 10 | // is required, it allows less used cache entries to be evicted while keeping recently accessed entries at or close 11 | // to the head. 12 | // 13 | // For instance, creating a Cache with a Cache.MaxSize of 3 and creating the entries 1, 2 and 3 in that order would 14 | // put 3 at the head and 1 at the tail: 15 | // 3 (head) -> 2 -> 1 (tail) 16 | // If the cache entry 1 was then accessed, 1 would become the head and 2 the tail: 17 | // 1 (head) -> 3 -> 2 (tail) 18 | // If a cache entry 4 was then created, because the Cache.MaxSize is 3, the tail (2) would then be evicted: 19 | // 4 (head) -> 1 -> 3 (tail) 20 | LeastRecentlyUsed EvictionPolicy = "LeastRecentlyUsed" 21 | 22 | // FirstInFirstOut is an eviction policy that causes cache entries to be evicted in the same order that they are 23 | // created. 24 | // 25 | // For instance, creating a Cache with a Cache.MaxSize of 3 and creating the entries 1, 2 and 3 in that order would 26 | // put 3 at the head and 1 at the tail: 27 | // 3 (head) -> 2 -> 1 (tail) 28 | // If the cache entry 1 was then accessed, unlike with LeastRecentlyUsed, nothing would change: 29 | // 3 (head) -> 2 -> 1 (tail) 30 | // If a cache entry 4 was then created, because the Cache.MaxSize is 3, the tail (1) would then be evicted: 31 | // 4 (head) -> 3 -> 2 (tail) 32 | FirstInFirstOut EvictionPolicy = "FirstInFirstOut" 33 | ) 34 | -------------------------------------------------------------------------------- /statistics.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | type Statistics struct { 4 | // EvictedKeys is the number of keys that were evicted 5 | EvictedKeys uint64 6 | 7 | // ExpiredKeys is the number of keys that were automatically deleted as a result of expiring 8 | ExpiredKeys uint64 9 | 10 | // Hits is the number of cache hits 11 | Hits uint64 12 | 13 | // Misses is the number of cache misses 14 | Misses uint64 15 | } 16 | --------------------------------------------------------------------------------