├── LICENSE ├── README.md ├── cache.go ├── cache_test.go ├── fifo.go ├── fifo_test.go ├── go.mod ├── lifo.go ├── lifo_test.go ├── lru.go ├── lru_test.go ├── random.go ├── random_test.go ├── ttl.go └── ttl_test.go /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Go cache 2 | 3 | This repository contains implementations of various caching algorithms in Go, 4 | with support for generics. 5 | 6 | See the [godoc][godoc] for more information. 7 | 8 | [godoc]: https://pkg.go.dev/github.com/sethvargo/go-cache 9 | -------------------------------------------------------------------------------- /cache.go: -------------------------------------------------------------------------------- 1 | // Package cache implements a collection of caching algorithms in Go which 2 | // support generics for strong typing. The implementations are the purely 3 | // "academic" definitions of the cache algorithms, and more finely-tuned 4 | // libraries might be a better fit for high-throughput or high-storage use 5 | // cases. 6 | // 7 | // In addition to the standard Get and Set functions, there is also a 8 | // package-level Fetch function which acts as a write-through operation: 9 | // 10 | // lru := cache.NewLRU[string, string](15) 11 | // 12 | // v, err := cache.Fetch(lru, "foo", func() (string, error)) { 13 | // return "bar", nil 14 | // } 15 | // if err != nil { 16 | // // TODO: handle error 17 | // } 18 | // fmt.Println(v) // Output: bar 19 | // 20 | // By default, none of the implementations are safe for concurrent use. To make 21 | // a cache as safe for concurrent use, wrap it in the sync cache: 22 | // 23 | // lruSync := cache.NewSync[string, string](cache.NewLRU[string, string](15)) 24 | // 25 | // Unfortunately Go does not currently infer the type constraint from the input, 26 | // so you must declare it twice. 27 | package cache 28 | 29 | // Cache is a generic interface for various cache implementations. 30 | type Cache[K comparable, V any] interface { 31 | // Get retrives the given key from the cache. If the item exists, it is 32 | // returned. If it does not exist, the second argument will be false. 33 | Get(K) (V, bool) 34 | 35 | // Set inserts the given key into the cache. If the key already exists, it 36 | // will be overwritten. 37 | Set(K, V) 38 | 39 | // Fetch retrieves the cached value. If the value does not exist, the 40 | // FetchFunc is called and the result is stored. If the value does exist, the 41 | // FetchFunc is not invoked. 42 | Fetch(K, FetchFunc[V]) (V, error) 43 | 44 | // Stop terminates the cache, deleting any cached entries. Once invoked, any 45 | // future calls to Get or Set will panic. 46 | Stop() 47 | } 48 | 49 | // FetchFunc is a function that is invoked when a cached value is not found. 50 | type FetchFunc[V any] func() (V, error) 51 | 52 | // ptrTo is a helper for returning the pointer to a type. 53 | func ptrTo[V any](v V) *V { 54 | return &v 55 | } 56 | -------------------------------------------------------------------------------- /cache_test.go: -------------------------------------------------------------------------------- 1 | package cache_test 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/sethvargo/go-cache" 8 | ) 9 | 10 | func ExampleNewFIFO() { 11 | fifo := cache.NewFIFO[string, string](15) 12 | defer fifo.Stop() 13 | 14 | fifo.Set("foo", "bar") 15 | v, _ := fifo.Get("foo") 16 | fmt.Println(v) // Output: bar 17 | } 18 | 19 | func ExampleNewLIFO() { 20 | lifo := cache.NewLIFO[string, string](15) 21 | defer lifo.Stop() 22 | 23 | lifo.Set("foo", "bar") 24 | v, _ := lifo.Get("foo") 25 | fmt.Println(v) // Output: bar 26 | } 27 | 28 | func ExampleNewLRU() { 29 | lru := cache.NewLRU[string, string](15) 30 | defer lru.Stop() 31 | 32 | lru.Set("foo", "bar") 33 | v, _ := lru.Get("foo") 34 | fmt.Println(v) // Output: bar 35 | } 36 | 37 | func ExampleNewRandom() { 38 | random := cache.NewRandom[string, string](15) 39 | defer random.Stop() 40 | 41 | random.Set("foo", "bar") 42 | v, _ := random.Get("foo") 43 | fmt.Println(v) // Output: bar 44 | } 45 | 46 | func ExampleNewTTL() { 47 | ttl := cache.NewTTL[string, string](5 * time.Minute) 48 | defer ttl.Stop() 49 | 50 | ttl.Set("foo", "bar") 51 | v, _ := ttl.Get("foo") 52 | fmt.Println(v) // Output: bar 53 | } 54 | -------------------------------------------------------------------------------- /fifo.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | ) 7 | 8 | // Ensure implements. 9 | var _ Cache[string, string] = (*FIFO[string, string])(nil) 10 | 11 | // FIFO implements the first-in-first-out cache algorithm, evicting the cache 12 | // elements in the order in which they were inserted with the cache is at 13 | // capacity. 14 | // 15 | // K is the cache key and must be a comparable. V can be any type, but pointers 16 | // are best for performance. 17 | type FIFO[K comparable, V any] struct { 18 | // cache represents the internal cache storage. It has a comparable key and 19 | // points to an entry in the singly-linked list. The node in the linked list 20 | // contains the actual cached data. 21 | cache map[K]*fifoListItem[K, V] 22 | 23 | // head points to the head of the linked list and tail points to the tail. 24 | head, tail *fifoListItem[K, V] 25 | 26 | // capacity is the total capacity for the cache. 27 | capacity int64 28 | 29 | // stopped indicates whether the cache is stopped. 30 | stopped uint32 31 | 32 | // lock is the internal lock for concurrency. 33 | lock sync.RWMutex 34 | } 35 | 36 | // NewFIFO creates a new FIFO cache with the given of the given capacity. 37 | func NewFIFO[K comparable, V any](capacity int64) *FIFO[K, V] { 38 | if capacity <= 0 { 39 | panic("capacity must be greater than 0") 40 | } 41 | 42 | return &FIFO[K, V]{ 43 | cache: make(map[K]*fifoListItem[K, V], capacity), 44 | capacity: capacity, 45 | } 46 | } 47 | 48 | // Get fetches the cache item at the given key. If the value exists, it is 49 | // returned. If the value does not exist, it returns the zero value for the 50 | // object and the second parameter will be false. 51 | func (l *FIFO[K, V]) Get(key K) (V, bool) { 52 | l.lock.RLock() 53 | defer l.lock.RUnlock() 54 | return l.get(key) 55 | } 56 | 57 | // get is the internal implementation of Get. It does not lock. 58 | func (l *FIFO[K, V]) get(key K) (V, bool) { 59 | if l.isStopped() { 60 | panic("cache is stopped") 61 | } 62 | 63 | node, ok := l.cache[key] 64 | if !ok { 65 | var v V 66 | return v, false 67 | } 68 | return node.value, true 69 | } 70 | 71 | // Set inserts the value in the cache. If an entry already exists at the given 72 | // key, it is overwritten. If an entry does not exist, a new entry is created 73 | // (which might trigger eviction of an older entry). 74 | func (l *FIFO[K, V]) Set(key K, val V) { 75 | l.lock.Lock() 76 | defer l.lock.Unlock() 77 | l.set(key, val) 78 | } 79 | 80 | // set is the internal implementation for set. It does not lock. 81 | func (l *FIFO[K, V]) set(key K, val V) { 82 | if l.isStopped() { 83 | panic("cache is stopped") 84 | } 85 | 86 | if int64(len(l.cache)) >= l.capacity { 87 | head := l.head 88 | next := head.next 89 | 90 | delete(l.cache, *head.key) 91 | 92 | // Zero out the old node to improve gc sweeps. 93 | var zeroK *K 94 | var zeroV V 95 | head.key = zeroK 96 | head.value = zeroV 97 | head.next = nil 98 | 99 | l.head = next 100 | } 101 | 102 | node, ok := l.cache[key] 103 | if !ok { 104 | node = &fifoListItem[K, V]{ 105 | key: &key, 106 | } 107 | l.cache[key] = node 108 | 109 | // This entry is new, so add it to the end of the list. 110 | if l.tail != nil { 111 | l.tail.next = node 112 | } 113 | l.tail = node 114 | 115 | // If this is the first entry in the cache, update the head. 116 | if l.head == nil { 117 | l.head = node 118 | } 119 | } 120 | node.value = val 121 | } 122 | 123 | // Fetch retrieves the cached value. If the value does not exist, the FetchFunc 124 | // is called and the result is stored. If the value does exist, the FetchFunc is 125 | // not invoked. 126 | func (l *FIFO[K, V]) Fetch(key K, fn FetchFunc[V]) (V, error) { 127 | l.lock.Lock() 128 | defer l.lock.Unlock() 129 | 130 | if l.isStopped() { 131 | panic("cache is stopped") 132 | } 133 | 134 | if v, ok := l.get(key); ok { 135 | return v, nil 136 | } 137 | 138 | v, err := fn() 139 | if err != nil { 140 | var zeroV V 141 | return zeroV, err 142 | } 143 | 144 | l.set(key, v) 145 | return v, nil 146 | } 147 | 148 | // Stop clears the cache and prevents new entries from being added and 149 | // retrieved. 150 | func (l *FIFO[K, V]) Stop() { 151 | l.lock.Lock() 152 | defer l.lock.Unlock() 153 | 154 | if !atomic.CompareAndSwapUint32(&l.stopped, 0, 1) { 155 | return 156 | } 157 | 158 | for k := range l.cache { 159 | delete(l.cache, k) 160 | } 161 | l.cache = nil 162 | 163 | var zeroK *K 164 | var zeroV V 165 | 166 | node := l.head 167 | for node != nil { 168 | node.key = zeroK 169 | node.value = zeroV 170 | node, node.next = node.next, nil 171 | } 172 | 173 | l.head = nil 174 | l.tail = nil 175 | } 176 | 177 | // isStopped is a helper for checking if the queue is stopped. 178 | func (l *FIFO[K, V]) isStopped() bool { 179 | return atomic.LoadUint32(&l.stopped) == 1 180 | } 181 | 182 | // fifoListItem represents an entry in the linked list. 183 | type fifoListItem[K comparable, V any] struct { 184 | next *fifoListItem[K, V] 185 | key *K 186 | value V 187 | } 188 | -------------------------------------------------------------------------------- /fifo_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "testing" 7 | ) 8 | 9 | func TestNewFIFO(t *testing.T) { 10 | t.Parallel() 11 | 12 | t.Run("defaults", func(t *testing.T) { 13 | t.Parallel() 14 | 15 | cache := NewFIFO[string, string](10) 16 | defer cache.Stop() 17 | 18 | if got, want := cache.capacity, int64(10); got != want { 19 | t.Errorf("expected %d to be %d", got, want) 20 | } 21 | if got, want := cache.cache, make(map[string]*fifoListItem[string, string], 10); !reflect.DeepEqual(got, want) { 22 | t.Errorf("expected %#v to be %#v", got, want) 23 | } 24 | if got, want := cache.head, (*fifoListItem[string, string])(nil); got != want { 25 | t.Errorf("expected %#v to be %#v", got, want) 26 | } 27 | if got, want := cache.tail, (*fifoListItem[string, string])(nil); got != want { 28 | t.Errorf("expected %#v to be %#v", got, want) 29 | } 30 | }) 31 | 32 | t.Run("panic_on_negative", func(t *testing.T) { 33 | t.Parallel() 34 | 35 | defer func() { 36 | if got, want := fmt.Sprintf("%s", recover()), "capacity must be greater than 0"; got != want { 37 | t.Errorf("expected %q to contain %q", got, want) 38 | } 39 | }() 40 | 41 | cache := NewFIFO[string, string](0) 42 | defer cache.Stop() 43 | 44 | t.Errorf("did not panic") 45 | }) 46 | } 47 | 48 | func TestFIFO_Get(t *testing.T) { 49 | t.Parallel() 50 | 51 | t.Run("not_exist", func(t *testing.T) { 52 | t.Parallel() 53 | 54 | cache := NewFIFO[string, int](1) 55 | defer cache.Stop() 56 | 57 | if v, ok := cache.Get("foo"); ok { 58 | t.Errorf("expected not found, got %#v", v) 59 | } 60 | if v, ok := cache.Get("bar"); ok { 61 | t.Errorf("expected not found, got %#v", v) 62 | } 63 | 64 | if got, want := len(cache.cache), 0; got != want { 65 | t.Errorf("expected %#v to be empty", cache.cache) 66 | } 67 | }) 68 | 69 | t.Run("exists", func(t *testing.T) { 70 | t.Parallel() 71 | 72 | cache := NewFIFO[string, int](1) 73 | defer cache.Stop() 74 | 75 | cache.Set("foo", 5) 76 | 77 | if v, _ := cache.Get("foo"); v != 5 { 78 | t.Errorf("expected %#v, got %#v", 5, v) 79 | } 80 | if v, ok := cache.Get("bar"); ok { 81 | t.Errorf("expected not found, got %#v", v) 82 | } 83 | 84 | if got, want := len(cache.cache), 1; got != want { 85 | t.Errorf("expected %#v to be empty", cache.cache) 86 | } 87 | }) 88 | } 89 | 90 | func TestFIFO_Set(t *testing.T) { 91 | t.Parallel() 92 | 93 | t.Run("sets", func(t *testing.T) { 94 | t.Parallel() 95 | 96 | cache := NewFIFO[string, int](1) 97 | defer cache.Stop() 98 | 99 | cache.Set("foo", 5) 100 | 101 | if v, _ := cache.Get("foo"); v != 5 { 102 | t.Errorf("expected %#v, got %#v", 5, v) 103 | } 104 | }) 105 | 106 | t.Run("evicts", func(t *testing.T) { 107 | t.Parallel() 108 | 109 | cache := NewFIFO[string, int](2) 110 | defer cache.Stop() 111 | 112 | cache.Set("foo", 5) 113 | cache.Set("bar", 4) 114 | 115 | if v, _ := cache.Get("foo"); v != 5 { 116 | t.Errorf("expected %#v, got %#v", 5, v) 117 | } 118 | if v, _ := cache.Get("bar"); v != 4 { 119 | t.Errorf("expected %#v, got %#v", 5, v) 120 | } 121 | 122 | cache.Set("baz", 3) 123 | 124 | if v, _ := cache.Get("baz"); v != 3 { 125 | t.Errorf("expected %#v, got %#v", 3, v) 126 | } 127 | if v, _ := cache.Get("bar"); v != 4 { 128 | t.Errorf("expected %#v, got %#v", 5, v) 129 | } 130 | if v, ok := cache.Get("foo"); ok { 131 | t.Errorf("expected %#v to be evicted", v) 132 | } 133 | }) 134 | } 135 | 136 | func TestFIFO_Fetch(t *testing.T) { 137 | t.Parallel() 138 | 139 | t.Run("saves", func(t *testing.T) { 140 | t.Parallel() 141 | 142 | cache := NewFIFO[string, string](3) 143 | defer cache.Stop() 144 | 145 | v, err := cache.Fetch("foo", func() (string, error) { 146 | return "bar", nil 147 | }) 148 | if err != nil { 149 | t.Fatal(err) 150 | } 151 | if got, want := v, "bar"; got != want { 152 | t.Errorf("expected %q to eb %q", got, want) 153 | } 154 | 155 | v, ok := cache.Get("foo") 156 | if !ok { 157 | t.Errorf("expected item to be cached") 158 | } 159 | if got, want := v, "bar"; got != want { 160 | t.Errorf("expected %q to eb %q", got, want) 161 | } 162 | }) 163 | 164 | t.Run("returns_cached", func(t *testing.T) { 165 | t.Parallel() 166 | 167 | cache := NewFIFO[string, string](3) 168 | defer cache.Stop() 169 | 170 | cache.Set("foo", "bar") 171 | 172 | cache.Fetch("foo", func() (string, error) { 173 | t.Errorf("function was called") 174 | return "", nil 175 | }) 176 | }) 177 | 178 | t.Run("returns_error", func(t *testing.T) { 179 | t.Parallel() 180 | 181 | cache := NewFIFO[string, string](3) 182 | defer cache.Stop() 183 | 184 | if _, err := cache.Fetch("foo", func() (string, error) { 185 | return "", fmt.Errorf("error") 186 | }); err == nil { 187 | t.Error("expected error") 188 | } 189 | }) 190 | } 191 | 192 | func TestFIFO_Stop(t *testing.T) { 193 | t.Parallel() 194 | 195 | t.Run("deletes_all_entries", func(t *testing.T) { 196 | t.Parallel() 197 | 198 | cache := NewFIFO[string, int](1) 199 | 200 | cache.Set("foo", 5) 201 | cache.Set("bar", 10) 202 | cache.Set("baz", 15) 203 | 204 | cache.Stop() 205 | 206 | if cache.cache != nil { 207 | t.Errorf("expected %#v to be nil", cache.cache) 208 | } 209 | if cache.head != nil { 210 | t.Errorf("expected %#v to be nil", cache.head) 211 | } 212 | if cache.tail != nil { 213 | t.Errorf("expected %#v to be nil", cache.tail) 214 | } 215 | }) 216 | 217 | t.Run("panics_get", func(t *testing.T) { 218 | t.Parallel() 219 | 220 | defer func() { 221 | if got, want := fmt.Sprintf("%s", recover()), "cache is stopped"; got != want { 222 | t.Errorf("expected %q to contain %q", got, want) 223 | } 224 | }() 225 | 226 | cache := NewFIFO[string, int](10) 227 | cache.Stop() 228 | cache.Get("foo") 229 | t.Errorf("did not panic") 230 | }) 231 | 232 | t.Run("panics_set", func(t *testing.T) { 233 | t.Parallel() 234 | 235 | defer func() { 236 | if got, want := fmt.Sprintf("%s", recover()), "cache is stopped"; got != want { 237 | t.Errorf("expected %q to contain %q", got, want) 238 | } 239 | }() 240 | 241 | cache := NewFIFO[string, int](10) 242 | cache.Stop() 243 | cache.Set("foo", 5) 244 | t.Errorf("did not panic") 245 | }) 246 | } 247 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/sethvargo/go-cache 2 | 3 | go 1.18 4 | -------------------------------------------------------------------------------- /lifo.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | ) 7 | 8 | // Ensure implements. 9 | var _ Cache[string, string] = (*LIFO[string, string])(nil) 10 | 11 | // LIFO implements the last-in-first-out cache algorithm, evicting the most 12 | // recent elements in the when the cache is at capacity. 13 | // 14 | // K is the cache key and must be a comparable. V can be any type, but pointers 15 | // are best for performance. 16 | type LIFO[K comparable, V any] struct { 17 | // cache represents the internal cache storage. It has a comparable key and 18 | // points to an entry in the singly-linked list. The node in the linked list 19 | // contains the actual cached data. 20 | cache map[K]*lifoListItem[K, V] 21 | 22 | // head points to the head of the linked list. 23 | head *lifoListItem[K, V] 24 | 25 | // capacity is the total capacity for the cache. 26 | capacity int64 27 | 28 | // stopped indicates whether the cache is stopped. 29 | stopped uint32 30 | 31 | // lock is the internal lock for concurrency. 32 | lock sync.RWMutex 33 | } 34 | 35 | // NewLIFO creates a new LIFO cache with the given of the given capacity. 36 | func NewLIFO[K comparable, V any](capacity int64) *LIFO[K, V] { 37 | if capacity <= 0 { 38 | panic("capacity must be greater than 0") 39 | } 40 | 41 | return &LIFO[K, V]{ 42 | cache: make(map[K]*lifoListItem[K, V], capacity), 43 | capacity: capacity, 44 | } 45 | } 46 | 47 | // Get fetches the cache item at the given key. If the value exists, it is 48 | // returned. If the value does not exist, it returns the zero value for the 49 | // object and the second parameter will be false. 50 | func (l *LIFO[K, V]) Get(key K) (V, bool) { 51 | l.lock.RLock() 52 | defer l.lock.RUnlock() 53 | return l.get(key) 54 | } 55 | 56 | // get is the internal implementation of Get. It does not lock. 57 | func (l *LIFO[K, V]) get(key K) (V, bool) { 58 | if l.isStopped() { 59 | panic("cache is stopped") 60 | } 61 | 62 | node, ok := l.cache[key] 63 | if !ok { 64 | var v V 65 | return v, false 66 | } 67 | return node.value, true 68 | } 69 | 70 | // Set inserts the value in the cache. If an entry already exists at the given 71 | // key, it is overwritten. If an entry does not exist, a new entry is created 72 | // (which might trigger eviction of another entry). 73 | func (l *LIFO[K, V]) Set(key K, val V) { 74 | l.lock.Lock() 75 | defer l.lock.Unlock() 76 | l.set(key, val) 77 | } 78 | 79 | // set is the internal implementation for set. It does not lock. 80 | func (l *LIFO[K, V]) set(key K, val V) { 81 | if l.isStopped() { 82 | panic("cache is stopped") 83 | } 84 | 85 | if int64(len(l.cache)) >= l.capacity { 86 | head := l.head 87 | next := head.next 88 | 89 | delete(l.cache, *head.key) 90 | 91 | // Zero out the old node to improve gc sweeps. 92 | var zeroK *K 93 | var zeroV V 94 | head.key = zeroK 95 | head.value = zeroV 96 | head.next = nil 97 | 98 | l.head = next 99 | } 100 | 101 | node, ok := l.cache[key] 102 | if !ok { 103 | node = &lifoListItem[K, V]{ 104 | key: &key, 105 | } 106 | l.cache[key] = node 107 | 108 | node.next = l.head 109 | l.head = node 110 | } 111 | node.value = val 112 | } 113 | 114 | // Fetch retrieves the cached value. If the value does not exist, the FetchFunc 115 | // is called and the result is stored. If the value does exist, the FetchFunc is 116 | // not invoked. 117 | func (l *LIFO[K, V]) Fetch(key K, fn FetchFunc[V]) (V, error) { 118 | l.lock.Lock() 119 | defer l.lock.Unlock() 120 | 121 | if l.isStopped() { 122 | panic("cache is stopped") 123 | } 124 | 125 | if v, ok := l.get(key); ok { 126 | return v, nil 127 | } 128 | 129 | v, err := fn() 130 | if err != nil { 131 | var zeroV V 132 | return zeroV, err 133 | } 134 | 135 | l.set(key, v) 136 | return v, nil 137 | } 138 | 139 | // Stop clears the cache and prevents new entries from being added and 140 | // retrieved. 141 | func (l *LIFO[K, V]) Stop() { 142 | l.lock.Lock() 143 | defer l.lock.Unlock() 144 | 145 | if !atomic.CompareAndSwapUint32(&l.stopped, 0, 1) { 146 | return 147 | } 148 | 149 | for k := range l.cache { 150 | delete(l.cache, k) 151 | } 152 | l.cache = nil 153 | 154 | var zeroK *K 155 | var zeroV V 156 | 157 | node := l.head 158 | for node != nil { 159 | node.key = zeroK 160 | node.value = zeroV 161 | node, node.next = node.next, nil 162 | } 163 | 164 | l.head = nil 165 | } 166 | 167 | // isStopped is a helper for checking if the queue is stopped. 168 | func (l *LIFO[K, V]) isStopped() bool { 169 | return atomic.LoadUint32(&l.stopped) == 1 170 | } 171 | 172 | // lifoListItem represents an entry in the linked list. 173 | type lifoListItem[K comparable, V any] struct { 174 | next *lifoListItem[K, V] 175 | key *K 176 | value V 177 | } 178 | -------------------------------------------------------------------------------- /lifo_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "testing" 7 | ) 8 | 9 | func TestNewLIFO(t *testing.T) { 10 | t.Parallel() 11 | 12 | t.Run("defaults", func(t *testing.T) { 13 | t.Parallel() 14 | 15 | cache := NewLIFO[string, string](10) 16 | defer cache.Stop() 17 | 18 | if got, want := cache.capacity, int64(10); got != want { 19 | t.Errorf("expected %d to be %d", got, want) 20 | } 21 | if got, want := cache.cache, make(map[string]*lifoListItem[string, string], 10); !reflect.DeepEqual(got, want) { 22 | t.Errorf("expected %#v to be %#v", got, want) 23 | } 24 | if got, want := cache.head, (*lifoListItem[string, string])(nil); got != want { 25 | t.Errorf("expected %#v to be %#v", got, want) 26 | } 27 | }) 28 | 29 | t.Run("panic_on_negative", func(t *testing.T) { 30 | t.Parallel() 31 | 32 | defer func() { 33 | if got, want := fmt.Sprintf("%s", recover()), "capacity must be greater than 0"; got != want { 34 | t.Errorf("expected %q to contain %q", got, want) 35 | } 36 | }() 37 | 38 | cache := NewLIFO[string, string](0) 39 | defer cache.Stop() 40 | 41 | t.Errorf("did not panic") 42 | }) 43 | } 44 | 45 | func TestLIFO_Get(t *testing.T) { 46 | t.Parallel() 47 | 48 | t.Run("not_exist", func(t *testing.T) { 49 | t.Parallel() 50 | 51 | cache := NewLIFO[string, int](1) 52 | defer cache.Stop() 53 | 54 | if v, ok := cache.Get("foo"); ok { 55 | t.Errorf("expected not found, got %#v", v) 56 | } 57 | if v, ok := cache.Get("bar"); ok { 58 | t.Errorf("expected not found, got %#v", v) 59 | } 60 | 61 | if got, want := len(cache.cache), 0; got != want { 62 | t.Errorf("expected %#v to be empty", cache.cache) 63 | } 64 | }) 65 | 66 | t.Run("exists", func(t *testing.T) { 67 | t.Parallel() 68 | 69 | cache := NewLIFO[string, int](1) 70 | defer cache.Stop() 71 | 72 | cache.Set("foo", 5) 73 | 74 | if v, _ := cache.Get("foo"); v != 5 { 75 | t.Errorf("expected %#v, got %#v", 5, v) 76 | } 77 | if v, ok := cache.Get("bar"); ok { 78 | t.Errorf("expected not found, got %#v", v) 79 | } 80 | 81 | if got, want := len(cache.cache), 1; got != want { 82 | t.Errorf("expected %#v to be empty", cache.cache) 83 | } 84 | }) 85 | } 86 | 87 | func TestLIFO_Set(t *testing.T) { 88 | t.Parallel() 89 | 90 | t.Run("sets", func(t *testing.T) { 91 | t.Parallel() 92 | 93 | cache := NewLIFO[string, int](1) 94 | defer cache.Stop() 95 | 96 | cache.Set("foo", 5) 97 | 98 | if v, _ := cache.Get("foo"); v != 5 { 99 | t.Errorf("expected %#v, got %#v", 5, v) 100 | } 101 | }) 102 | 103 | t.Run("evicts", func(t *testing.T) { 104 | t.Parallel() 105 | 106 | cache := NewLIFO[string, int](2) 107 | defer cache.Stop() 108 | 109 | cache.Set("foo", 5) 110 | cache.Set("bar", 4) 111 | 112 | if v, _ := cache.Get("foo"); v != 5 { 113 | t.Errorf("expected %#v, got %#v", 5, v) 114 | } 115 | if v, _ := cache.Get("bar"); v != 4 { 116 | t.Errorf("expected %#v, got %#v", 5, v) 117 | } 118 | 119 | cache.Set("baz", 3) 120 | 121 | if v, _ := cache.Get("baz"); v != 3 { 122 | t.Errorf("expected %#v, got %#v", 3, v) 123 | } 124 | if v, ok := cache.Get("bar"); ok { 125 | t.Errorf("expected %#v to be evicted", v) 126 | } 127 | if v, _ := cache.Get("foo"); v != 5 { 128 | t.Errorf("expected %#v, got %#v", 5, v) 129 | } 130 | }) 131 | } 132 | 133 | func TestLIFO_Fetch(t *testing.T) { 134 | t.Parallel() 135 | 136 | t.Run("saves", func(t *testing.T) { 137 | t.Parallel() 138 | 139 | cache := NewLIFO[string, string](3) 140 | defer cache.Stop() 141 | 142 | v, err := cache.Fetch("foo", func() (string, error) { 143 | return "bar", nil 144 | }) 145 | if err != nil { 146 | t.Fatal(err) 147 | } 148 | if got, want := v, "bar"; got != want { 149 | t.Errorf("expected %q to eb %q", got, want) 150 | } 151 | 152 | v, ok := cache.Get("foo") 153 | if !ok { 154 | t.Errorf("expected item to be cached") 155 | } 156 | if got, want := v, "bar"; got != want { 157 | t.Errorf("expected %q to eb %q", got, want) 158 | } 159 | }) 160 | 161 | t.Run("returns_cached", func(t *testing.T) { 162 | t.Parallel() 163 | 164 | cache := NewLIFO[string, string](3) 165 | defer cache.Stop() 166 | 167 | cache.Set("foo", "bar") 168 | 169 | cache.Fetch("foo", func() (string, error) { 170 | t.Errorf("function was called") 171 | return "", nil 172 | }) 173 | }) 174 | 175 | t.Run("returns_error", func(t *testing.T) { 176 | t.Parallel() 177 | 178 | cache := NewLIFO[string, string](3) 179 | defer cache.Stop() 180 | 181 | if _, err := cache.Fetch("foo", func() (string, error) { 182 | return "", fmt.Errorf("error") 183 | }); err == nil { 184 | t.Error("expected error") 185 | } 186 | }) 187 | } 188 | 189 | func TestLIFO_Stop(t *testing.T) { 190 | t.Parallel() 191 | 192 | t.Run("deletes_all_entries", func(t *testing.T) { 193 | t.Parallel() 194 | 195 | cache := NewLIFO[string, int](1) 196 | 197 | cache.Set("foo", 5) 198 | cache.Set("bar", 10) 199 | cache.Set("baz", 15) 200 | 201 | cache.Stop() 202 | 203 | if cache.cache != nil { 204 | t.Errorf("expected %#v to be nil", cache.cache) 205 | } 206 | if cache.head != nil { 207 | t.Errorf("expected %#v to be nil", cache.head) 208 | } 209 | }) 210 | 211 | t.Run("panics_get", func(t *testing.T) { 212 | t.Parallel() 213 | 214 | defer func() { 215 | if got, want := fmt.Sprintf("%s", recover()), "cache is stopped"; got != want { 216 | t.Errorf("expected %q to contain %q", got, want) 217 | } 218 | }() 219 | 220 | cache := NewLIFO[string, int](10) 221 | cache.Stop() 222 | cache.Get("foo") 223 | t.Errorf("did not panic") 224 | }) 225 | 226 | t.Run("panics_set", func(t *testing.T) { 227 | t.Parallel() 228 | 229 | defer func() { 230 | if got, want := fmt.Sprintf("%s", recover()), "cache is stopped"; got != want { 231 | t.Errorf("expected %q to contain %q", got, want) 232 | } 233 | }() 234 | 235 | cache := NewLIFO[string, int](10) 236 | cache.Stop() 237 | cache.Set("foo", 5) 238 | t.Errorf("did not panic") 239 | }) 240 | } 241 | -------------------------------------------------------------------------------- /lru.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | ) 7 | 8 | // Ensure implements. 9 | var _ Cache[string, string] = (*LRU[string, string])(nil) 10 | 11 | // LRU implements the least-recently-used cache algorithm, evicting the oldest 12 | // cache elements when the cache is at capacity. This cache is not safe for 13 | // concurrent use. 14 | // 15 | // K is the cache key and must be a comparable. V can be any type, but pointers 16 | // are best for performance. 17 | type LRU[K comparable, V any] struct { 18 | // cache represents the internal cache storage. It has a comparable key and 19 | // points to an entry in the doubly-linked list. The node in the linked list 20 | // contains the actual cached data. 21 | cache map[K]*lruListItem[K, V] 22 | 23 | // head points to the head of the linked list and tail points to the tail. 24 | head, tail *lruListItem[K, V] 25 | 26 | // capacity is the total capacity for the cache. 27 | capacity int64 28 | 29 | // stopped indicates whether the cache is stopped. 30 | stopped uint32 31 | 32 | // lock is the internal lock for concurrency. 33 | lock sync.Mutex 34 | } 35 | 36 | // NewLRU creates a new LRU cache with the given of the given capacity. 37 | func NewLRU[K comparable, V any](capacity int64) *LRU[K, V] { 38 | if capacity <= 0 { 39 | panic("capacity must be greater than 0") 40 | } 41 | 42 | return &LRU[K, V]{ 43 | cache: make(map[K]*lruListItem[K, V], capacity), 44 | capacity: capacity, 45 | } 46 | } 47 | 48 | // Get fetches the cache item at the given key. If the value exists, it is 49 | // returned. If the value does not exist, it returns the zero value for the 50 | // object and the second parameter will be false. 51 | func (l *LRU[K, V]) Get(key K) (V, bool) { 52 | l.lock.Lock() 53 | defer l.lock.Unlock() 54 | return l.get(key) 55 | } 56 | 57 | // get is the internal implementation of Get. It does not lock. 58 | func (l *LRU[K, V]) get(key K) (V, bool) { 59 | if l.isStopped() { 60 | panic("cache is stopped") 61 | } 62 | 63 | node, ok := l.cache[key] 64 | if !ok { 65 | var v V 66 | return v, false 67 | } 68 | 69 | l.moveToTail(node) 70 | return node.value, true 71 | } 72 | 73 | // Set inserts the value in the cache. If an entry already exists at the given 74 | // key, it is overwritten. If an entry does not exist, a new entry is created 75 | // (which might trigger eviction of an older entry). 76 | func (l *LRU[K, V]) Set(key K, val V) { 77 | l.lock.Lock() 78 | defer l.lock.Unlock() 79 | l.set(key, val) 80 | } 81 | 82 | // set is the internal implementation for set. It does not lock. 83 | func (l *LRU[K, V]) set(key K, val V) { 84 | if l.isStopped() { 85 | panic("cache is stopped") 86 | } 87 | 88 | if int64(len(l.cache)) >= l.capacity { 89 | head := l.head 90 | next := head.next 91 | 92 | delete(l.cache, *head.key) 93 | 94 | // Zero out the old node to improve gc sweeps. 95 | var zeroK *K 96 | var zeroV V 97 | head.key = zeroK 98 | head.value = zeroV 99 | head.prev = nil 100 | head.next = nil 101 | 102 | if next != nil { 103 | next.prev = nil 104 | } 105 | l.head = next 106 | } 107 | 108 | node, ok := l.cache[key] 109 | if !ok { 110 | node = &lruListItem[K, V]{ 111 | key: &key, 112 | } 113 | l.cache[key] = node 114 | } 115 | node.value = val 116 | l.moveToTail(node) 117 | } 118 | 119 | // Fetch retrieves the cached value. If the value does not exist, the FetchFunc 120 | // is called and the result is stored. If the value does exist, the FetchFunc is 121 | // not invoked. 122 | func (l *LRU[K, V]) Fetch(key K, fn FetchFunc[V]) (V, error) { 123 | l.lock.Lock() 124 | defer l.lock.Unlock() 125 | 126 | if l.isStopped() { 127 | panic("cache is stopped") 128 | } 129 | 130 | if v, ok := l.get(key); ok { 131 | return v, nil 132 | } 133 | 134 | v, err := fn() 135 | if err != nil { 136 | var zeroV V 137 | return zeroV, err 138 | } 139 | 140 | l.set(key, v) 141 | return v, nil 142 | } 143 | 144 | // Stop clears the cache and prevents new entries from being added and 145 | // retrieved. 146 | func (l *LRU[K, V]) Stop() { 147 | l.lock.Lock() 148 | defer l.lock.Unlock() 149 | 150 | if !atomic.CompareAndSwapUint32(&l.stopped, 0, 1) { 151 | return 152 | } 153 | 154 | for k := range l.cache { 155 | delete(l.cache, k) 156 | } 157 | l.cache = nil 158 | 159 | var zeroK *K 160 | var zeroV V 161 | 162 | node := l.head 163 | for node != nil { 164 | node.key = zeroK 165 | node.value = zeroV 166 | node.prev = nil 167 | node, node.next = node.next, nil 168 | } 169 | 170 | l.head = nil 171 | l.tail = nil 172 | } 173 | 174 | // moveToTail moves the given node to the end (tail) of the linked list. 175 | func (l *LRU[K, V]) moveToTail(node *lruListItem[K, V]) { 176 | if node == l.tail { 177 | return 178 | } 179 | 180 | if node == l.head { 181 | l.head = node.next 182 | } 183 | 184 | if node.prev != nil { 185 | node.prev.next = node.next 186 | } 187 | 188 | if node.next != nil { 189 | node.next.prev = node.prev 190 | } 191 | 192 | if l.tail != nil { 193 | l.tail.next = node 194 | } 195 | node.next = nil 196 | node.prev = l.tail 197 | l.tail = node 198 | 199 | if l.head == nil { 200 | l.head = node 201 | } 202 | } 203 | 204 | // isStopped is a helper for checking if the queue is stopped. 205 | func (l *LRU[K, V]) isStopped() bool { 206 | return atomic.LoadUint32(&l.stopped) == 1 207 | } 208 | 209 | // lruListItem represents an entry in the linked list. 210 | type lruListItem[K comparable, V any] struct { 211 | prev, next *lruListItem[K, V] 212 | key *K 213 | value V 214 | } 215 | -------------------------------------------------------------------------------- /lru_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "testing" 7 | ) 8 | 9 | func TestNewLRU(t *testing.T) { 10 | t.Parallel() 11 | 12 | t.Run("defaults", func(t *testing.T) { 13 | t.Parallel() 14 | 15 | cache := NewLRU[string, string](10) 16 | defer cache.Stop() 17 | 18 | if got, want := cache.capacity, int64(10); got != want { 19 | t.Errorf("expected %d to be %d", got, want) 20 | } 21 | if got, want := cache.cache, make(map[string]*lruListItem[string, string], 10); !reflect.DeepEqual(got, want) { 22 | t.Errorf("expected %#v to be %#v", got, want) 23 | } 24 | if got, want := cache.head, (*lruListItem[string, string])(nil); got != want { 25 | t.Errorf("expected %#v to be %#v", got, want) 26 | } 27 | if got, want := cache.tail, (*lruListItem[string, string])(nil); got != want { 28 | t.Errorf("expected %#v to be %#v", got, want) 29 | } 30 | }) 31 | 32 | t.Run("panic_on_negative", func(t *testing.T) { 33 | t.Parallel() 34 | 35 | defer func() { 36 | if got, want := fmt.Sprintf("%s", recover()), "capacity must be greater than 0"; got != want { 37 | t.Errorf("expected %q to contain %q", got, want) 38 | } 39 | }() 40 | 41 | cache := NewLRU[string, string](0) 42 | defer cache.Stop() 43 | 44 | t.Errorf("did not panic") 45 | }) 46 | } 47 | 48 | func TestLRU_Get(t *testing.T) { 49 | t.Parallel() 50 | 51 | t.Run("not_exist", func(t *testing.T) { 52 | t.Parallel() 53 | 54 | cache := NewLRU[string, int](1) 55 | defer cache.Stop() 56 | 57 | if v, ok := cache.Get("foo"); ok { 58 | t.Errorf("expected not found, got %#v", v) 59 | } 60 | if v, ok := cache.Get("bar"); ok { 61 | t.Errorf("expected not found, got %#v", v) 62 | } 63 | 64 | if got, want := len(cache.cache), 0; got != want { 65 | t.Errorf("expected %#v to be empty", cache.cache) 66 | } 67 | }) 68 | 69 | t.Run("exists", func(t *testing.T) { 70 | t.Parallel() 71 | 72 | cache := NewLRU[string, int](1) 73 | defer cache.Stop() 74 | 75 | cache.Set("foo", 5) 76 | 77 | if v, _ := cache.Get("foo"); v != 5 { 78 | t.Errorf("expected %#v, got %#v", 5, v) 79 | } 80 | if v, ok := cache.Get("bar"); ok { 81 | t.Errorf("expected not found, got %#v", v) 82 | } 83 | }) 84 | 85 | t.Run("moves_to_tail", func(t *testing.T) { 86 | t.Parallel() 87 | 88 | cache := NewLRU[string, int](3) 89 | defer cache.Stop() 90 | 91 | cache.Set("foo", 5) 92 | cache.Set("bar", 3) 93 | cache.Set("baz", 1) 94 | 95 | if got, want := cache.head.key, "foo"; *got != want { 96 | t.Errorf("expected %v to be %v", got, want) 97 | } 98 | if got, want := cache.tail.key, "baz"; *got != want { 99 | t.Errorf("expected %v to be %v", got, want) 100 | } 101 | 102 | cache.Get("baz") 103 | if got, want := cache.tail.key, "baz"; *got != want { 104 | t.Errorf("expected %v to be %v", got, want) 105 | } 106 | 107 | cache.Get("baz") 108 | if got, want := cache.tail.key, "baz"; *got != want { 109 | t.Errorf("expected %v to be %v", got, want) 110 | } 111 | 112 | cache.Get("foo") 113 | if got, want := cache.tail.key, "foo"; *got != want { 114 | t.Errorf("expected %v to be %v", got, want) 115 | } 116 | }) 117 | } 118 | 119 | func TestLRU_Set(t *testing.T) { 120 | t.Parallel() 121 | 122 | t.Run("sets", func(t *testing.T) { 123 | t.Parallel() 124 | 125 | cache := NewLRU[string, int](1) 126 | defer cache.Stop() 127 | 128 | cache.Set("foo", 5) 129 | 130 | if v, _ := cache.Get("foo"); v != 5 { 131 | t.Errorf("expected %#v, got %#v", 5, v) 132 | } 133 | }) 134 | 135 | t.Run("evicts", func(t *testing.T) { 136 | t.Parallel() 137 | 138 | cache := NewLRU[string, int](2) 139 | defer cache.Stop() 140 | 141 | cache.Set("foo", 5) 142 | cache.Set("bar", 4) 143 | 144 | if v, _ := cache.Get("foo"); v != 5 { 145 | t.Errorf("expected %#v, got %#v", 5, v) 146 | } 147 | if v, _ := cache.Get("bar"); v != 4 { 148 | t.Errorf("expected %#v, got %#v", 5, v) 149 | } 150 | 151 | cache.Set("baz", 3) 152 | 153 | if v, _ := cache.Get("baz"); v != 3 { 154 | t.Errorf("expected %#v, got %#v", 3, v) 155 | } 156 | if v, _ := cache.Get("bar"); v != 4 { 157 | t.Errorf("expected %#v, got %#v", 5, v) 158 | } 159 | if v, ok := cache.Get("foo"); ok { 160 | t.Errorf("expected %#v to be evicted", v) 161 | } 162 | }) 163 | } 164 | 165 | func TestLRU_Fetch(t *testing.T) { 166 | t.Parallel() 167 | 168 | t.Run("saves", func(t *testing.T) { 169 | t.Parallel() 170 | 171 | cache := NewLRU[string, string](3) 172 | defer cache.Stop() 173 | 174 | v, err := cache.Fetch("foo", func() (string, error) { 175 | return "bar", nil 176 | }) 177 | if err != nil { 178 | t.Fatal(err) 179 | } 180 | if got, want := v, "bar"; got != want { 181 | t.Errorf("expected %q to eb %q", got, want) 182 | } 183 | 184 | v, ok := cache.Get("foo") 185 | if !ok { 186 | t.Errorf("expected item to be cached") 187 | } 188 | if got, want := v, "bar"; got != want { 189 | t.Errorf("expected %q to eb %q", got, want) 190 | } 191 | }) 192 | 193 | t.Run("returns_cached", func(t *testing.T) { 194 | t.Parallel() 195 | 196 | cache := NewLRU[string, string](3) 197 | defer cache.Stop() 198 | 199 | cache.Set("foo", "bar") 200 | 201 | cache.Fetch("foo", func() (string, error) { 202 | t.Errorf("function was called") 203 | return "", nil 204 | }) 205 | }) 206 | 207 | t.Run("returns_error", func(t *testing.T) { 208 | t.Parallel() 209 | 210 | cache := NewLRU[string, string](3) 211 | defer cache.Stop() 212 | 213 | if _, err := cache.Fetch("foo", func() (string, error) { 214 | return "", fmt.Errorf("error") 215 | }); err == nil { 216 | t.Error("expected error") 217 | } 218 | }) 219 | } 220 | 221 | func TestLRU_Stop(t *testing.T) { 222 | t.Parallel() 223 | 224 | t.Run("deletes_all_entries", func(t *testing.T) { 225 | t.Parallel() 226 | 227 | cache := NewLRU[string, int](1) 228 | 229 | cache.Set("foo", 5) 230 | cache.Set("bar", 10) 231 | cache.Set("baz", 15) 232 | 233 | cache.Stop() 234 | 235 | if cache.cache != nil { 236 | t.Errorf("expected %#v to be nil", cache.cache) 237 | } 238 | if cache.head != nil { 239 | t.Errorf("expected %#v to be nil", cache.head) 240 | } 241 | if cache.tail != nil { 242 | t.Errorf("expected %#v to be nil", cache.tail) 243 | } 244 | }) 245 | 246 | t.Run("panics_get", func(t *testing.T) { 247 | t.Parallel() 248 | 249 | defer func() { 250 | if got, want := fmt.Sprintf("%s", recover()), "cache is stopped"; got != want { 251 | t.Errorf("expected %q to contain %q", got, want) 252 | } 253 | }() 254 | 255 | cache := NewLRU[string, int](10) 256 | cache.Stop() 257 | cache.Get("foo") 258 | t.Errorf("did not panic") 259 | }) 260 | 261 | t.Run("panics_set", func(t *testing.T) { 262 | t.Parallel() 263 | 264 | defer func() { 265 | if got, want := fmt.Sprintf("%s", recover()), "cache is stopped"; got != want { 266 | t.Errorf("expected %q to contain %q", got, want) 267 | } 268 | }() 269 | 270 | cache := NewLRU[string, int](10) 271 | cache.Stop() 272 | cache.Set("foo", 5) 273 | t.Errorf("did not panic") 274 | }) 275 | } 276 | -------------------------------------------------------------------------------- /random.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | ) 7 | 8 | // Ensure implements. 9 | var _ Cache[string, string] = (*Random[string, string])(nil) 10 | 11 | // Random implements a cache in which items are evicted randomly when space is 12 | // needed. 13 | // 14 | // K is the cache key and must be a comparable. V can be any type, but pointers 15 | // are best for performance. 16 | type Random[K comparable, V any] struct { 17 | // cache represents the internal cache storage. 18 | cache map[K]V 19 | 20 | // capacity is the total capacity for the cache. 21 | capacity int64 22 | 23 | // stopped indicates whether the cache is stopped. 24 | stopped uint32 25 | 26 | // lock is the internal lock for concurrency. 27 | lock sync.RWMutex 28 | } 29 | 30 | // NewRandom creates a new random replacement cache with the given of the given 31 | // capacity. 32 | func NewRandom[K comparable, V any](capacity int64) *Random[K, V] { 33 | if capacity <= 0 { 34 | panic("capacity must be greater than 0") 35 | } 36 | 37 | return &Random[K, V]{ 38 | cache: make(map[K]V, capacity), 39 | capacity: capacity, 40 | } 41 | } 42 | 43 | // Get fetches the cache item at the given key. If the value exists, it is 44 | // returned. If the value does not exist, it returns the zero value for the 45 | // object and the second parameter will be false. 46 | func (l *Random[K, V]) Get(key K) (V, bool) { 47 | l.lock.RLock() 48 | defer l.lock.RUnlock() 49 | return l.get(key) 50 | } 51 | 52 | // get is the internal implementation of Get. It does not lock. 53 | func (l *Random[K, V]) get(key K) (V, bool) { 54 | if l.isStopped() { 55 | panic("cache is stopped") 56 | } 57 | 58 | v, ok := l.cache[key] 59 | return v, ok 60 | } 61 | 62 | // Set inserts the value in the cache. If an entry already exists at the given 63 | // key, it is overwritten. If an entry does not exist, a new entry is created 64 | // (which might trigger eviction of an random entry). 65 | func (l *Random[K, V]) Set(key K, val V) { 66 | l.lock.Lock() 67 | defer l.lock.Unlock() 68 | l.set(key, val) 69 | } 70 | 71 | // set is the internal implementation for set. It does not lock. 72 | func (l *Random[K, V]) set(key K, val V) { 73 | if l.isStopped() { 74 | panic("cache is stopped") 75 | } 76 | 77 | if int64(len(l.cache)) >= l.capacity { 78 | // Go's map iteration is random on each invocation, so iterate and delete 79 | // the first element. 80 | for k := range l.cache { 81 | delete(l.cache, k) 82 | break 83 | } 84 | } 85 | 86 | l.cache[key] = val 87 | } 88 | 89 | // Fetch retrieves the cached value. If the value does not exist, the FetchFunc 90 | // is called and the result is stored. If the value does exist, the FetchFunc is 91 | // not invoked. 92 | func (l *Random[K, V]) Fetch(key K, fn FetchFunc[V]) (V, error) { 93 | l.lock.Lock() 94 | defer l.lock.Unlock() 95 | 96 | if l.isStopped() { 97 | panic("cache is stopped") 98 | } 99 | 100 | if v, ok := l.get(key); ok { 101 | return v, nil 102 | } 103 | 104 | v, err := fn() 105 | if err != nil { 106 | var zeroV V 107 | return zeroV, err 108 | } 109 | 110 | l.set(key, v) 111 | return v, nil 112 | } 113 | 114 | // Stop clears the cache and prevents new entries from being added and 115 | // retrieved. 116 | func (l *Random[K, V]) Stop() { 117 | l.lock.Lock() 118 | defer l.lock.Unlock() 119 | 120 | if !atomic.CompareAndSwapUint32(&l.stopped, 0, 1) { 121 | return 122 | } 123 | 124 | for k := range l.cache { 125 | delete(l.cache, k) 126 | } 127 | l.cache = nil 128 | } 129 | 130 | // isStopped is a helper for checking if the queue is stopped. 131 | func (l *Random[K, V]) isStopped() bool { 132 | return atomic.LoadUint32(&l.stopped) == 1 133 | } 134 | -------------------------------------------------------------------------------- /random_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "testing" 7 | ) 8 | 9 | func TestNewRandom(t *testing.T) { 10 | t.Parallel() 11 | 12 | t.Run("defaults", func(t *testing.T) { 13 | t.Parallel() 14 | 15 | cache := NewRandom[string, string](10) 16 | defer cache.Stop() 17 | 18 | if got, want := cache.capacity, int64(10); got != want { 19 | t.Errorf("expected %d to be %d", got, want) 20 | } 21 | if got, want := cache.cache, make(map[string]string, 10); !reflect.DeepEqual(got, want) { 22 | t.Errorf("expected %#v to be %#v", got, want) 23 | } 24 | }) 25 | 26 | t.Run("panic_on_negative", func(t *testing.T) { 27 | t.Parallel() 28 | 29 | defer func() { 30 | if got, want := fmt.Sprintf("%s", recover()), "capacity must be greater than 0"; got != want { 31 | t.Errorf("expected %q to contain %q", got, want) 32 | } 33 | }() 34 | 35 | cache := NewRandom[string, string](0) 36 | defer cache.Stop() 37 | 38 | t.Errorf("did not panic") 39 | }) 40 | } 41 | 42 | func TestRandom_Get(t *testing.T) { 43 | t.Parallel() 44 | 45 | t.Run("not_exist", func(t *testing.T) { 46 | t.Parallel() 47 | 48 | cache := NewRandom[string, int](1) 49 | defer cache.Stop() 50 | 51 | if v, ok := cache.Get("foo"); ok { 52 | t.Errorf("expected not found, got %#v", v) 53 | } 54 | if v, ok := cache.Get("bar"); ok { 55 | t.Errorf("expected not found, got %#v", v) 56 | } 57 | 58 | if got, want := len(cache.cache), 0; got != want { 59 | t.Errorf("expected %#v to be empty", cache.cache) 60 | } 61 | }) 62 | 63 | t.Run("exists", func(t *testing.T) { 64 | t.Parallel() 65 | 66 | cache := NewRandom[string, int](1) 67 | defer cache.Stop() 68 | 69 | cache.Set("foo", 5) 70 | 71 | if v, _ := cache.Get("foo"); v != 5 { 72 | t.Errorf("expected %#v, got %#v", 5, v) 73 | } 74 | if v, ok := cache.Get("bar"); ok { 75 | t.Errorf("expected not found, got %#v", v) 76 | } 77 | }) 78 | } 79 | 80 | func TestRandom_Set(t *testing.T) { 81 | t.Parallel() 82 | 83 | t.Run("sets", func(t *testing.T) { 84 | t.Parallel() 85 | 86 | cache := NewRandom[string, int](1) 87 | defer cache.Stop() 88 | 89 | cache.Set("foo", 5) 90 | 91 | if v, _ := cache.Get("foo"); v != 5 { 92 | t.Errorf("expected %#v, got %#v", 5, v) 93 | } 94 | }) 95 | 96 | t.Run("evicts", func(t *testing.T) { 97 | t.Parallel() 98 | 99 | cache := NewRandom[string, int](2) 100 | defer cache.Stop() 101 | 102 | cache.Set("foo", 5) 103 | cache.Set("bar", 4) 104 | 105 | if v, _ := cache.Get("foo"); v != 5 { 106 | t.Errorf("expected %#v, got %#v", 5, v) 107 | } 108 | if v, _ := cache.Get("bar"); v != 4 { 109 | t.Errorf("expected %#v, got %#v", 5, v) 110 | } 111 | 112 | cache.Set("baz", 3) 113 | if v, _ := cache.Get("baz"); v != 3 { 114 | t.Errorf("expected %#v, got %#v", 3, v) 115 | } 116 | 117 | if got, want := len(cache.cache), 2; got != want { 118 | t.Errorf("expected %d to be %d", got, want) 119 | } 120 | }) 121 | } 122 | 123 | func TestRandom_Fetch(t *testing.T) { 124 | t.Parallel() 125 | 126 | t.Run("saves", func(t *testing.T) { 127 | t.Parallel() 128 | 129 | cache := NewRandom[string, string](3) 130 | defer cache.Stop() 131 | 132 | v, err := cache.Fetch("foo", func() (string, error) { 133 | return "bar", nil 134 | }) 135 | if err != nil { 136 | t.Fatal(err) 137 | } 138 | if got, want := v, "bar"; got != want { 139 | t.Errorf("expected %q to eb %q", got, want) 140 | } 141 | 142 | v, ok := cache.Get("foo") 143 | if !ok { 144 | t.Errorf("expected item to be cached") 145 | } 146 | if got, want := v, "bar"; got != want { 147 | t.Errorf("expected %q to eb %q", got, want) 148 | } 149 | }) 150 | 151 | t.Run("returns_cached", func(t *testing.T) { 152 | t.Parallel() 153 | 154 | cache := NewRandom[string, string](3) 155 | defer cache.Stop() 156 | 157 | cache.Set("foo", "bar") 158 | 159 | cache.Fetch("foo", func() (string, error) { 160 | t.Errorf("function was called") 161 | return "", nil 162 | }) 163 | }) 164 | 165 | t.Run("returns_error", func(t *testing.T) { 166 | t.Parallel() 167 | 168 | cache := NewRandom[string, string](3) 169 | defer cache.Stop() 170 | 171 | if _, err := cache.Fetch("foo", func() (string, error) { 172 | return "", fmt.Errorf("error") 173 | }); err == nil { 174 | t.Error("expected error") 175 | } 176 | }) 177 | } 178 | 179 | func TestRandom_Stop(t *testing.T) { 180 | t.Parallel() 181 | 182 | t.Run("deletes_all_entries", func(t *testing.T) { 183 | t.Parallel() 184 | 185 | cache := NewRandom[string, int](1) 186 | cache.Set("foo", 5) 187 | cache.Set("bar", 10) 188 | cache.Set("baz", 15) 189 | 190 | cache.Stop() 191 | 192 | if cache.cache != nil { 193 | t.Errorf("expected %#v to be nil", cache.cache) 194 | } 195 | }) 196 | 197 | t.Run("panics_get", func(t *testing.T) { 198 | t.Parallel() 199 | 200 | defer func() { 201 | if got, want := fmt.Sprintf("%s", recover()), "cache is stopped"; got != want { 202 | t.Errorf("expected %q to contain %q", got, want) 203 | } 204 | }() 205 | 206 | cache := NewRandom[string, int](10) 207 | cache.Stop() 208 | cache.Get("foo") 209 | t.Errorf("did not panic") 210 | }) 211 | 212 | t.Run("panics_set", func(t *testing.T) { 213 | t.Parallel() 214 | 215 | defer func() { 216 | if got, want := fmt.Sprintf("%s", recover()), "cache is stopped"; got != want { 217 | t.Errorf("expected %q to contain %q", got, want) 218 | } 219 | }() 220 | 221 | cache := NewRandom[string, int](10) 222 | cache.Stop() 223 | cache.Set("foo", 5) 224 | t.Errorf("did not panic") 225 | }) 226 | } 227 | -------------------------------------------------------------------------------- /ttl.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | "time" 7 | ) 8 | 9 | // Ensure implements. 10 | var _ Cache[string, string] = (*TTL[string, string])(nil) 11 | 12 | // TTL implements a cache in which items are evicted when they have lived in the 13 | // cached beyond an expiration. 14 | // 15 | // K is the cache key and must be a comparable. V can be any type, but pointers 16 | // are best for performance. 17 | type TTL[K comparable, V any] struct { 18 | // cache represents the internal cache storage. 19 | cache map[K]*ttlListItem[K, V] 20 | 21 | // head points to the head of the linked list and tail points to the tail. 22 | head, tail *ttlListItem[K, V] 23 | 24 | // ttl is the global TTL value. 25 | ttl time.Duration 26 | 27 | // stopped indicates whether the cache is stopped. stopCh is a channel used to 28 | // control cancellation. 29 | stopped uint32 30 | stopCh chan struct{} 31 | 32 | // lock is the internal lock to allow for concurrent operations. 33 | lock sync.RWMutex 34 | } 35 | 36 | // NewTTL creates a new TTL cache with the given of the given TTL. The TTL 37 | // applies for all entries in the cache. Items are not guaranteed to be purged 38 | // from the cache at their exact expiration time, but they are guaranteed to not 39 | // be returned past their expiration time. The sweeping operation runs on 40 | // quarterstep intervals of the provided TTL. 41 | func NewTTL[K comparable, V any](ttl time.Duration) *TTL[K, V] { 42 | if ttl <= 0 { 43 | panic("ttl must be greater than 0") 44 | } 45 | 46 | c := &TTL[K, V]{ 47 | cache: make(map[K]*ttlListItem[K, V], 16), 48 | ttl: ttl, 49 | stopCh: make(chan struct{}), 50 | } 51 | 52 | // Start the sweep! 53 | sweep := ttl / 4 54 | if min := 50 * time.Millisecond; sweep < min { 55 | sweep = min 56 | } 57 | go c.start(sweep) 58 | 59 | return c 60 | } 61 | 62 | // Get fetches the cache item at the given key. If the value exists, it is 63 | // returned. If the value does not exist, it returns the zero value for the 64 | // object and the second parameter will be false. 65 | func (l *TTL[K, V]) Get(key K) (V, bool) { 66 | now := time.Now().UTC() 67 | l.lock.RLock() 68 | defer l.lock.RUnlock() 69 | return l.get(key, now) 70 | } 71 | 72 | // get is the internal implementation of Get. It does not lock. 73 | func (l *TTL[K, V]) get(key K, now time.Time) (V, bool) { 74 | if l.isStopped() { 75 | panic("cache is stopped") 76 | } 77 | 78 | v, ok := l.cache[key] 79 | if !ok || v.expiresAt.Before(now) { 80 | var zeroV V 81 | return zeroV, false 82 | } 83 | return v.value, true 84 | } 85 | 86 | // Set inserts the value in the cache. If an entry already exists at the given 87 | // key, it is overwritten. If an entry does not exist, a new entry is created. 88 | func (l *TTL[K, V]) Set(key K, val V) { 89 | now := time.Now().UTC() 90 | l.lock.Lock() 91 | defer l.lock.Unlock() 92 | l.set(key, val, now) 93 | } 94 | 95 | // set is the internal implementation for set. It does not lock. 96 | func (l *TTL[K, V]) set(key K, val V, now time.Time) { 97 | if l.isStopped() { 98 | panic("cache is stopped") 99 | } 100 | 101 | node, ok := l.cache[key] 102 | if !ok { 103 | node = &ttlListItem[K, V]{ 104 | key: &key, 105 | } 106 | l.cache[key] = node 107 | } 108 | node.value = val 109 | node.expiresAt = ptrTo(now.Add(l.ttl)) 110 | 111 | // If this is the first entry in the cache, update the head. 112 | if l.head == nil { 113 | l.head = node 114 | } 115 | 116 | // This entry is new, so add it to the end of the list. 117 | if l.tail != nil { 118 | l.tail.next = node 119 | } 120 | l.tail = node 121 | } 122 | 123 | // Fetch retrieves the cached value. If the value does not exist, the FetchFunc 124 | // is called and the result is stored. If the value does exist, the FetchFunc is 125 | // not invoked. 126 | func (l *TTL[K, V]) Fetch(key K, fn FetchFunc[V]) (V, error) { 127 | now := time.Now().UTC() 128 | 129 | l.lock.Lock() 130 | defer l.lock.Unlock() 131 | 132 | if l.isStopped() { 133 | panic("cache is stopped") 134 | } 135 | 136 | if v, ok := l.get(key, now); ok { 137 | return v, nil 138 | } 139 | 140 | v, err := fn() 141 | if err != nil { 142 | var zeroV V 143 | return zeroV, err 144 | } 145 | 146 | l.set(key, v, now) 147 | return v, nil 148 | } 149 | 150 | // Stop clears the cache and prevents new entries from being added and 151 | // retrieved. 152 | func (l *TTL[K, V]) Stop() { 153 | l.lock.Lock() 154 | defer l.lock.Unlock() 155 | 156 | if !atomic.CompareAndSwapUint32(&l.stopped, 0, 1) { 157 | return 158 | } 159 | close(l.stopCh) 160 | 161 | for k, v := range l.cache { 162 | var zeroV V 163 | v.key = nil 164 | v.value = zeroV 165 | v.expiresAt = nil 166 | delete(l.cache, k) 167 | } 168 | l.cache = nil 169 | 170 | var zeroK *K 171 | var zeroV V 172 | 173 | node := l.head 174 | for node != nil { 175 | node.key = zeroK 176 | node.value = zeroV 177 | node, node.next = node.next, nil 178 | } 179 | 180 | l.head = nil 181 | l.tail = nil 182 | } 183 | 184 | // isStopped is a helper for checking if the queue is stopped. 185 | func (l *TTL[K, V]) isStopped() bool { 186 | return atomic.LoadUint32(&l.stopped) == 1 187 | } 188 | 189 | // start begins the background reaping process for expired entries. It runs 190 | // until stopped via Stop() and is intended to be called as a goroutine. 191 | func (l *TTL[K, V]) start(sweep time.Duration) { 192 | ticker := time.NewTicker(sweep) 193 | defer ticker.Stop() 194 | 195 | for { 196 | // Check if we're stopped first to prevent entering a race between a short 197 | // time ticker and the stop channel. 198 | if l.isStopped() { 199 | return 200 | } 201 | 202 | select { 203 | case <-l.stopCh: 204 | return 205 | case <-ticker.C: 206 | func() { 207 | now := time.Now().UTC() 208 | 209 | l.lock.Lock() 210 | defer l.lock.Unlock() 211 | 212 | // Walk the LinkedList from the front, since those are the oldest items. 213 | node := l.head 214 | for node != nil { 215 | // If this item isn't a candidate for expiration, then no future items 216 | // will be a candidate either, since they are in increasing order. 217 | if node.expiresAt.After(now) { 218 | break 219 | } 220 | 221 | delete(l.cache, *node.key) 222 | 223 | var zeroV V 224 | node.key = nil 225 | node.value = zeroV 226 | node.expiresAt = nil 227 | node, node.next = node.next, nil 228 | } 229 | 230 | l.head = node 231 | if node == nil { 232 | l.tail = nil 233 | } 234 | }() 235 | } 236 | } 237 | } 238 | 239 | // ttlListItem represents an entry in the linked list. 240 | type ttlListItem[K comparable, V any] struct { 241 | next *ttlListItem[K, V] 242 | key *K 243 | value V 244 | expiresAt *time.Time 245 | } 246 | -------------------------------------------------------------------------------- /ttl_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestNewTTL(t *testing.T) { 11 | t.Parallel() 12 | 13 | t.Run("defaults", func(t *testing.T) { 14 | t.Parallel() 15 | 16 | cache := NewTTL[string, string](5 * time.Minute) 17 | defer cache.Stop() 18 | 19 | if got, want := cache.ttl, 5*time.Minute; got != want { 20 | t.Errorf("expected %d to be %d", got, want) 21 | } 22 | if got, want := cache.cache, make(map[string]*ttlListItem[string, string], 10); !reflect.DeepEqual(got, want) { 23 | t.Errorf("expected %#v to be %#v", got, want) 24 | } 25 | }) 26 | 27 | t.Run("panic_on_negative", func(t *testing.T) { 28 | t.Parallel() 29 | 30 | defer func() { 31 | if got, want := fmt.Sprintf("%s", recover()), "ttl must be greater than 0"; got != want { 32 | t.Errorf("expected %q to contain %q", got, want) 33 | } 34 | }() 35 | 36 | cache := NewTTL[string, string](0) 37 | defer cache.Stop() 38 | t.Errorf("did not panic") 39 | }) 40 | } 41 | 42 | func TestTTL_Get(t *testing.T) { 43 | t.Parallel() 44 | 45 | t.Run("not_exist", func(t *testing.T) { 46 | t.Parallel() 47 | 48 | cache := NewTTL[string, int](5 * time.Minute) 49 | defer cache.Stop() 50 | 51 | if v, ok := cache.Get("foo"); ok { 52 | t.Errorf("expected not found, got %#v", v) 53 | } 54 | if v, ok := cache.Get("bar"); ok { 55 | t.Errorf("expected not found, got %#v", v) 56 | } 57 | 58 | if got, want := len(cache.cache), 0; got != want { 59 | t.Errorf("expected %#v to be empty", cache.cache) 60 | } 61 | }) 62 | 63 | t.Run("exists", func(t *testing.T) { 64 | t.Parallel() 65 | 66 | cache := NewTTL[string, int](5 * time.Minute) 67 | defer cache.Stop() 68 | 69 | cache.Set("foo", 5) 70 | 71 | if v, _ := cache.Get("foo"); v != 5 { 72 | t.Errorf("expected %#v, got %#v", 5, v) 73 | } 74 | if v, ok := cache.Get("bar"); ok { 75 | t.Errorf("expected not found, got %#v", v) 76 | } 77 | 78 | if got, want := len(cache.cache), 1; got != want { 79 | t.Errorf("expected %#v to be empty", cache.cache) 80 | } 81 | }) 82 | } 83 | 84 | func TestTTL_Set(t *testing.T) { 85 | t.Parallel() 86 | 87 | t.Run("sets", func(t *testing.T) { 88 | t.Parallel() 89 | 90 | cache := NewTTL[string, int](5 * time.Minute) 91 | defer cache.Stop() 92 | 93 | cache.Set("foo", 5) 94 | 95 | if v, _ := cache.Get("foo"); v != 5 { 96 | t.Errorf("expected %#v, got %#v", 5, v) 97 | } 98 | }) 99 | 100 | t.Run("evicts", func(t *testing.T) { 101 | t.Parallel() 102 | 103 | cache := NewTTL[string, int](50 * time.Millisecond) 104 | defer cache.Stop() 105 | 106 | cache.Set("foo", 5) 107 | cache.Set("bar", 4) 108 | 109 | if v, _ := cache.Get("foo"); v != 5 { 110 | t.Errorf("expected %#v, got %#v", 5, v) 111 | } 112 | if v, _ := cache.Get("bar"); v != 4 { 113 | t.Errorf("expected %#v, got %#v", 5, v) 114 | } 115 | 116 | time.Sleep(50 * time.Millisecond) 117 | 118 | if v, ok := cache.Get("foo"); ok { 119 | t.Errorf("expected %#v to be evicted", v) 120 | } 121 | if v, ok := cache.Get("bar"); ok { 122 | t.Errorf("expected %#v to be evicted", v) 123 | } 124 | }) 125 | } 126 | 127 | func TestTTL_Fetch(t *testing.T) { 128 | t.Parallel() 129 | 130 | t.Run("saves", func(t *testing.T) { 131 | t.Parallel() 132 | 133 | cache := NewTTL[string, string](50 * time.Millisecond) 134 | defer cache.Stop() 135 | 136 | v, err := cache.Fetch("foo", func() (string, error) { 137 | return "bar", nil 138 | }) 139 | if err != nil { 140 | t.Fatal(err) 141 | } 142 | if got, want := v, "bar"; got != want { 143 | t.Errorf("expected %q to eb %q", got, want) 144 | } 145 | 146 | v, ok := cache.Get("foo") 147 | if !ok { 148 | t.Errorf("expected item to be cached") 149 | } 150 | if got, want := v, "bar"; got != want { 151 | t.Errorf("expected %q to eb %q", got, want) 152 | } 153 | }) 154 | 155 | t.Run("returns_cached", func(t *testing.T) { 156 | t.Parallel() 157 | 158 | cache := NewTTL[string, string](50 * time.Millisecond) 159 | defer cache.Stop() 160 | 161 | cache.Set("foo", "bar") 162 | 163 | cache.Fetch("foo", func() (string, error) { 164 | t.Errorf("function was called") 165 | return "", nil 166 | }) 167 | }) 168 | 169 | t.Run("returns_error", func(t *testing.T) { 170 | t.Parallel() 171 | 172 | cache := NewTTL[string, string](50 * time.Millisecond) 173 | defer cache.Stop() 174 | 175 | if _, err := cache.Fetch("foo", func() (string, error) { 176 | return "", fmt.Errorf("error") 177 | }); err == nil { 178 | t.Error("expected error") 179 | } 180 | }) 181 | } 182 | 183 | func TestTTL_Stop(t *testing.T) { 184 | t.Parallel() 185 | 186 | t.Run("deletes_all_entries", func(t *testing.T) { 187 | t.Parallel() 188 | 189 | cache := NewTTL[string, int](5 * time.Minute) 190 | cache.Set("foo", 5) 191 | cache.Set("bar", 10) 192 | cache.Set("baz", 15) 193 | 194 | cache.Stop() 195 | 196 | if cache.cache != nil { 197 | t.Errorf("expected %#v to be nil", cache.cache) 198 | } 199 | }) 200 | 201 | t.Run("panics_get", func(t *testing.T) { 202 | t.Parallel() 203 | 204 | defer func() { 205 | if got, want := fmt.Sprintf("%s", recover()), "cache is stopped"; got != want { 206 | t.Errorf("expected %q to contain %q", got, want) 207 | } 208 | }() 209 | 210 | cache := NewTTL[string, int](5 * time.Minute) 211 | cache.Stop() 212 | cache.Get("foo") 213 | t.Errorf("did not panic") 214 | }) 215 | 216 | t.Run("panics_set", func(t *testing.T) { 217 | t.Parallel() 218 | 219 | defer func() { 220 | if got, want := fmt.Sprintf("%s", recover()), "cache is stopped"; got != want { 221 | t.Errorf("expected %q to contain %q", got, want) 222 | } 223 | }() 224 | 225 | cache := NewTTL[string, int](5 * time.Minute) 226 | cache.Stop() 227 | cache.Set("foo", 5) 228 | t.Errorf("did not panic") 229 | }) 230 | } 231 | --------------------------------------------------------------------------------