├── .github └── workflows │ ├── master.yaml │ └── pull_request.yaml ├── .gitignore ├── .golangci.yaml ├── Makefile ├── assert └── assert.go ├── bucket.go ├── bucket_test.go ├── cache.go ├── cache_test.go ├── configuration.go ├── configuration_test.go ├── control.go ├── go.mod ├── go.sum ├── item.go ├── item_test.go ├── layeredbucket.go ├── layeredcache.go ├── layeredcache_test.go ├── license.txt ├── list.go ├── list_test.go ├── readme.md ├── secondarycache.go └── secondarycache_test.go /.github/workflows/master.yaml: -------------------------------------------------------------------------------- 1 | name: Master 2 | on: 3 | push: 4 | branches: 5 | - master 6 | 7 | permissions: 8 | contents: read 9 | 10 | jobs: 11 | bench: 12 | runs-on: ubuntu-latest 13 | timeout-minutes: 15 14 | steps: 15 | - uses: actions/checkout@v3 16 | 17 | - uses: actions/setup-go@v4 18 | with: 19 | go-version-file: './go.mod' 20 | 21 | - name: Run benchmark and store the output to a file 22 | run: | 23 | set -o pipefail 24 | make bench | tee bench_output.txt 25 | 26 | - name: Get benchmark as JSON 27 | uses: benchmark-action/github-action-benchmark@v1 28 | with: 29 | # What benchmark tool the output.txt came from 30 | tool: 'go' 31 | # Where the output from the benchmark tool is stored 32 | output-file-path: bench_output.txt 33 | # Write benchmarks to this file 34 | external-data-json-path: ./cache/benchmark-data.json 35 | # Workflow will fail when an alert happens 36 | fail-on-alert: true 37 | github-token: ${{ secrets.GITHUB_TOKEN }} 38 | comment-on-alert: true 39 | 40 | - name: Get CPU information 41 | uses: kenchan0130/actions-system-info@master 42 | id: system-info 43 | 44 | - name: Save benchmark JSON to cache 45 | uses: actions/cache/save@v3 46 | with: 47 | path: ./cache/benchmark-data.json 48 | # Save with commit hash to avoid "cache already exists" 49 | # Save with OS & CPU info to prevent comparing against results from different CPUs 50 | key: ${{ github.sha }}-${{ runner.os }}-${{ steps.system-info.outputs.cpu-model }}-go-benchmark 51 | -------------------------------------------------------------------------------- /.github/workflows/pull_request.yaml: -------------------------------------------------------------------------------- 1 | name: Pull Request 2 | on: 3 | merge_group: 4 | pull_request: 5 | branches: 6 | - master 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | lint: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout code 16 | uses: actions/checkout@v3 17 | - name: Set up Go 18 | uses: actions/setup-go@v4 19 | with: 20 | go-version-file: './go.mod' 21 | - name: golangci-lint 22 | uses: golangci/golangci-lint-action@v3 23 | with: 24 | version: latest 25 | 26 | test: 27 | runs-on: ubuntu-latest 28 | timeout-minutes: 15 29 | steps: 30 | - name: Checkout code 31 | uses: actions/checkout@v3 32 | - name: Set up Go 33 | uses: actions/setup-go@v4 34 | with: 35 | go-version-file: './go.mod' 36 | - name: Unit Tests 37 | run: make t 38 | bench: 39 | runs-on: ubuntu-latest 40 | timeout-minutes: 5 41 | steps: 42 | - name: Checkout code 43 | uses: actions/checkout@v3 44 | with: 45 | fetch-depth: 0 # to be able to retrieve the last commit in master branch 46 | 47 | - name: Set up Go 48 | uses: actions/setup-go@v4 49 | with: 50 | go-version-file: './go.mod' 51 | cache-dependency-path: './go.sum' 52 | check-latest: true 53 | 54 | - name: Run benchmark and store the output to a file 55 | run: | 56 | set -o pipefail 57 | make bench | tee ${{ github.sha }}_bench_output.txt 58 | 59 | - name: Get CPU information 60 | uses: kenchan0130/actions-system-info@master 61 | id: system-info 62 | 63 | - name: Get Master branch SHA 64 | id: get-master-branch-sha 65 | run: | 66 | SHA=$(git rev-parse origin/master) 67 | echo "sha=$SHA" >> $GITHUB_OUTPUT 68 | 69 | - name: Try to get benchmark JSON from master branch 70 | uses: actions/cache/restore@v3 71 | id: cache 72 | with: 73 | path: ./cache/benchmark-data.json 74 | key: ${{ steps.get-master-branch-sha.outputs.sha }}-${{ runner.os }}-${{ steps.system-info.outputs.cpu-model }}-go-benchmark 75 | 76 | - name: Compare benchmarks with master 77 | uses: benchmark-action/github-action-benchmark@v1 78 | if: steps.cache.outputs.cache-hit == 'true' 79 | with: 80 | # What benchmark tool the output.txt came from 81 | tool: 'go' 82 | # Where the output from the benchmark tool is stored 83 | output-file-path: ${{ github.sha }}_bench_output.txt 84 | # Where the benchmarks in master are (to compare) 85 | external-data-json-path: ./cache/benchmark-data.json 86 | # Do not save the data 87 | save-data-file: false 88 | # Workflow will fail when an alert happens 89 | fail-on-alert: true 90 | github-token: ${{ secrets.GITHUB_TOKEN }} 91 | # Enable Job Summary for PRs 92 | summary-always: true 93 | 94 | - name: Run benchmarks 95 | uses: benchmark-action/github-action-benchmark@v1 96 | if: steps.cache.outputs.cache-hit != 'true' 97 | with: 98 | # What benchmark tool the output.txt came from 99 | tool: 'go' 100 | # Where the output from the benchmark tool is stored 101 | output-file-path: ${{ github.sha }}_bench_output.txt 102 | # Write benchmarks to this file, do not publish to Github Pages 103 | save-data-file: false 104 | external-data-json-path: ./cache/benchmark-data.json 105 | # Workflow will fail when an alert happens 106 | fail-on-alert: true 107 | # Enable alert commit comment 108 | github-token: ${{ secrets.GITHUB_TOKEN }} 109 | comment-on-alert: true 110 | # Enable Job Summary for PRs 111 | summary-always: true 112 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | vendor/ 2 | .idea/ 3 | *.out -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | run: 2 | timeout: 3m 3 | modules-download-mode: readonly 4 | 5 | linters: 6 | enable: 7 | - errname 8 | - gofmt 9 | - goimports 10 | - stylecheck 11 | - importas 12 | - errcheck 13 | - gosimple 14 | - govet 15 | - ineffassign 16 | - mirror 17 | - staticcheck 18 | - tagalign 19 | - testifylint 20 | - typecheck 21 | - unused 22 | - unconvert 23 | - unparam 24 | - wastedassign 25 | - whitespace 26 | - exhaustive 27 | - noctx 28 | - promlinter 29 | 30 | linters-settings: 31 | govet: 32 | enable-all: true 33 | disable: 34 | - shadow 35 | - fieldalignment -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := help 2 | 3 | .PHONY: help 4 | help: 5 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' 6 | 7 | .PHONY: bench 8 | bench: ## Run benchmarks 9 | go test ./... -bench . -benchtime 5s -timeout 0 -run=XXX -benchmem 10 | 11 | .PHONY: l 12 | l: ## Lint Go source files 13 | go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest && golangci-lint run 14 | 15 | .PHONY: t 16 | t: ## Run unit tests 17 | go test -race -count=1 ./... 18 | 19 | .PHONY: f 20 | f: ## Format code 21 | go fmt ./... 22 | 23 | .PHONY: c 24 | c: ## Measure code coverage 25 | go test -race -covermode=atomic ./... -coverprofile=cover.out -------------------------------------------------------------------------------- /assert/assert.go: -------------------------------------------------------------------------------- 1 | // A wrapper around *testing.T. I hate the if a != b { t.ErrorF(....) } pattern. 2 | // Packages should prefer using the tests package (which exposes all of 3 | // these functions). The only reason to use this package directly is if 4 | // the tests package depends on your package (and thus you have a cyclical 5 | // dependency) 6 | package assert 7 | 8 | import ( 9 | "math" 10 | "reflect" 11 | "strings" 12 | "testing" 13 | "time" 14 | ) 15 | 16 | // a == b 17 | func Equal[T comparable](t *testing.T, actual T, expected T) { 18 | t.Helper() 19 | if actual != expected { 20 | t.Errorf("expected '%v' to equal '%v'", actual, expected) 21 | t.FailNow() 22 | } 23 | } 24 | 25 | // Two lists are equal (same length & same values in the same order) 26 | func List[T comparable](t *testing.T, actuals []T, expecteds []T) { 27 | t.Helper() 28 | Equal(t, len(actuals), len(expecteds)) 29 | 30 | for i, actual := range actuals { 31 | Equal(t, actual, expecteds[i]) 32 | } 33 | } 34 | 35 | // needle not in []haystack 36 | func DoesNotContain[T comparable](t *testing.T, haystack []T, needle T) { 37 | t.Helper() 38 | for _, v := range haystack { 39 | if v == needle { 40 | t.Errorf("expected '%v' to not be in '%v'", needle, haystack) 41 | t.FailNow() 42 | } 43 | } 44 | } 45 | 46 | // A value is nil 47 | func Nil(t *testing.T, actual interface{}) { 48 | t.Helper() 49 | if actual != nil && !reflect.ValueOf(actual).IsNil() { 50 | t.Errorf("expected %v to be nil", actual) 51 | t.FailNow() 52 | } 53 | } 54 | 55 | // A value is not nil 56 | func NotNil(t *testing.T, actual interface{}) { 57 | t.Helper() 58 | if actual == nil { 59 | t.Errorf("expected %v to be not nil", actual) 60 | t.FailNow() 61 | } 62 | } 63 | 64 | // A value is true 65 | func True(t *testing.T, actual bool) { 66 | t.Helper() 67 | if !actual { 68 | t.Error("expected true, got false") 69 | t.FailNow() 70 | } 71 | } 72 | 73 | // A value is false 74 | func False(t *testing.T, actual bool) { 75 | t.Helper() 76 | if actual { 77 | t.Error("expected false, got true") 78 | t.FailNow() 79 | } 80 | } 81 | 82 | // The string contains the given value 83 | func StringContains(t *testing.T, actual string, expected string) { 84 | t.Helper() 85 | if !strings.Contains(actual, expected) { 86 | t.Errorf("expected %s to contain %s", actual, expected) 87 | t.FailNow() 88 | } 89 | } 90 | 91 | func Error(t *testing.T, actual error, expected error) { 92 | t.Helper() 93 | if actual != expected { 94 | t.Errorf("expected '%s' to be '%s'", actual, expected) 95 | t.FailNow() 96 | } 97 | } 98 | 99 | func Nowish(t *testing.T, actual time.Time) { 100 | t.Helper() 101 | diff := math.Abs(time.Now().UTC().Sub(actual).Seconds()) 102 | if diff > 1 { 103 | t.Errorf("expected '%s' to be nowish", actual) 104 | t.FailNow() 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /bucket.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | import ( 4 | "strings" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | type bucket[T any] struct { 10 | sync.RWMutex 11 | lookup map[string]*Item[T] 12 | } 13 | 14 | func (b *bucket[T]) itemCount() int { 15 | b.RLock() 16 | defer b.RUnlock() 17 | return len(b.lookup) 18 | } 19 | 20 | func (b *bucket[T]) forEachFunc(matches func(key string, item *Item[T]) bool) bool { 21 | lookup := b.lookup 22 | b.RLock() 23 | defer b.RUnlock() 24 | for key, item := range lookup { 25 | if !matches(key, item) { 26 | return false 27 | } 28 | } 29 | return true 30 | } 31 | 32 | func (b *bucket[T]) get(key string) *Item[T] { 33 | b.RLock() 34 | defer b.RUnlock() 35 | return b.lookup[key] 36 | } 37 | 38 | func (b *bucket[T]) setnx(key string, value T, duration time.Duration, track bool) *Item[T] { 39 | b.RLock() 40 | item := b.lookup[key] 41 | b.RUnlock() 42 | if item != nil { 43 | return item 44 | } 45 | 46 | expires := time.Now().Add(duration).UnixNano() 47 | newItem := newItem(key, value, expires, track) 48 | 49 | b.Lock() 50 | defer b.Unlock() 51 | 52 | // check again under write lock 53 | item = b.lookup[key] 54 | if item != nil { 55 | return item 56 | } 57 | 58 | b.lookup[key] = newItem 59 | return newItem 60 | } 61 | 62 | func (b *bucket[T]) setnx2(key string, f func() T, duration time.Duration, track bool) (*Item[T], bool) { 63 | b.RLock() 64 | item := b.lookup[key] 65 | b.RUnlock() 66 | if item != nil { 67 | return item, true 68 | } 69 | 70 | b.Lock() 71 | defer b.Unlock() 72 | 73 | // check again under write lock 74 | item = b.lookup[key] 75 | if item != nil { 76 | return item, true 77 | } 78 | 79 | expires := time.Now().Add(duration).UnixNano() 80 | newItem := newItem(key, f(), expires, track) 81 | 82 | b.lookup[key] = newItem 83 | return newItem, false 84 | } 85 | 86 | func (b *bucket[T]) set(key string, value T, duration time.Duration, track bool) (*Item[T], *Item[T]) { 87 | expires := time.Now().Add(duration).UnixNano() 88 | item := newItem(key, value, expires, track) 89 | b.Lock() 90 | existing := b.lookup[key] 91 | b.lookup[key] = item 92 | b.Unlock() 93 | return item, existing 94 | } 95 | 96 | func (b *bucket[T]) remove(key string) *Item[T] { 97 | b.Lock() 98 | item := b.lookup[key] 99 | delete(b.lookup, key) 100 | b.Unlock() 101 | return item 102 | } 103 | 104 | func (b *bucket[T]) delete(key string) { 105 | b.Lock() 106 | delete(b.lookup, key) 107 | b.Unlock() 108 | } 109 | 110 | // This is an expensive operation, so we do what we can to optimize it and limit 111 | // the impact it has on concurrent operations. Specifically, we: 112 | // 1 - Do an initial iteration to collect matches. This allows us to do the 113 | // "expensive" prefix check (on all values) using only a read-lock 114 | // 2 - Do a second iteration, under write lock, for the matched results to do 115 | // the actual deletion 116 | 117 | // Also, this is the only place where the Bucket is aware of cache detail: the 118 | // deletables channel. Passing it here lets us avoid iterating over matched items 119 | // again in the cache. Further, we pass item to deletables BEFORE actually removing 120 | // the item from the map. I'm pretty sure this is 100% fine, but it is unique. 121 | // (We do this so that the write to the channel is under the read lock and not the 122 | // write lock) 123 | func (b *bucket[T]) deleteFunc(matches func(key string, item *Item[T]) bool, deletables chan *Item[T]) int { 124 | lookup := b.lookup 125 | items := make([]*Item[T], 0) 126 | 127 | b.RLock() 128 | for key, item := range lookup { 129 | if matches(key, item) { 130 | deletables <- item 131 | items = append(items, item) 132 | } 133 | } 134 | b.RUnlock() 135 | 136 | if len(items) == 0 { 137 | // avoid the write lock if we can 138 | return 0 139 | } 140 | 141 | b.Lock() 142 | for _, item := range items { 143 | delete(lookup, item.key) 144 | } 145 | b.Unlock() 146 | return len(items) 147 | } 148 | 149 | func (b *bucket[T]) deletePrefix(prefix string, deletables chan *Item[T]) int { 150 | return b.deleteFunc(func(key string, item *Item[T]) bool { 151 | return strings.HasPrefix(key, prefix) 152 | }, deletables) 153 | } 154 | 155 | // we expect the caller to have acquired a write lock 156 | func (b *bucket[T]) clear() { 157 | for _, item := range b.lookup { 158 | item.promotions = -2 159 | } 160 | b.lookup = make(map[string]*Item[T]) 161 | } 162 | -------------------------------------------------------------------------------- /bucket_test.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/karlseguin/ccache/v3/assert" 8 | ) 9 | 10 | func Test_Bucket_GetMissFromBucket(t *testing.T) { 11 | bucket := testBucket() 12 | assert.Nil(t, bucket.get("invalid")) 13 | } 14 | 15 | func Test_Bucket_GetHitFromBucket(t *testing.T) { 16 | bucket := testBucket() 17 | item := bucket.get("power") 18 | assertValue(t, item, "9000") 19 | } 20 | 21 | func Test_Bucket_DeleteItemFromBucket(t *testing.T) { 22 | bucket := testBucket() 23 | bucket.delete("power") 24 | assert.Nil(t, bucket.get("power")) 25 | } 26 | 27 | func Test_Bucket_SetsANewBucketItem(t *testing.T) { 28 | bucket := testBucket() 29 | item, existing := bucket.set("spice", "flow", time.Minute, false) 30 | assertValue(t, item, "flow") 31 | item = bucket.get("spice") 32 | assertValue(t, item, "flow") 33 | assert.Equal(t, existing, nil) 34 | } 35 | 36 | func Test_Bucket_SetsAnExistingItem(t *testing.T) { 37 | bucket := testBucket() 38 | item, existing := bucket.set("power", "9001", time.Minute, false) 39 | assertValue(t, item, "9001") 40 | item = bucket.get("power") 41 | assertValue(t, item, "9001") 42 | assertValue(t, existing, "9000") 43 | } 44 | 45 | func testBucket() *bucket[string] { 46 | b := &bucket[string]{lookup: make(map[string]*Item[string])} 47 | b.lookup["power"] = &Item[string]{ 48 | key: "power", 49 | value: "9000", 50 | } 51 | return b 52 | } 53 | 54 | func assertValue(t *testing.T, item *Item[string], expected string) { 55 | assert.Equal(t, item.value, expected) 56 | } 57 | -------------------------------------------------------------------------------- /cache.go: -------------------------------------------------------------------------------- 1 | // An LRU cached aimed at high concurrency 2 | package ccache 3 | 4 | import ( 5 | "hash/fnv" 6 | "sync/atomic" 7 | "time" 8 | ) 9 | 10 | type Cache[T any] struct { 11 | *Configuration[T] 12 | control 13 | list *List[T] 14 | size int64 15 | pruneTargetSize int64 16 | buckets []*bucket[T] 17 | bucketMask uint32 18 | deletables chan *Item[T] 19 | promotables chan *Item[T] 20 | } 21 | 22 | // Create a new cache with the specified configuration 23 | // See ccache.Configure() for creating a configuration 24 | func New[T any](config *Configuration[T]) *Cache[T] { 25 | c := &Cache[T]{ 26 | list: NewList[T](), 27 | Configuration: config, 28 | control: newControl(), 29 | bucketMask: uint32(config.buckets) - 1, 30 | buckets: make([]*bucket[T], config.buckets), 31 | deletables: make(chan *Item[T], config.deleteBuffer), 32 | promotables: make(chan *Item[T], config.promoteBuffer), 33 | pruneTargetSize: config.maxSize - config.maxSize*int64(config.percentToPrune)/100, 34 | } 35 | for i := 0; i < config.buckets; i++ { 36 | c.buckets[i] = &bucket[T]{ 37 | lookup: make(map[string]*Item[T]), 38 | } 39 | } 40 | go c.worker() 41 | return c 42 | } 43 | 44 | func (c *Cache[T]) ItemCount() int { 45 | count := 0 46 | for _, b := range c.buckets { 47 | count += b.itemCount() 48 | } 49 | return count 50 | } 51 | 52 | func (c *Cache[T]) DeletePrefix(prefix string) int { 53 | count := 0 54 | for _, b := range c.buckets { 55 | count += b.deletePrefix(prefix, c.deletables) 56 | } 57 | return count 58 | } 59 | 60 | // Deletes all items that the matches func evaluates to true. 61 | func (c *Cache[T]) DeleteFunc(matches func(key string, item *Item[T]) bool) int { 62 | count := 0 63 | for _, b := range c.buckets { 64 | count += b.deleteFunc(matches, c.deletables) 65 | } 66 | return count 67 | } 68 | 69 | func (c *Cache[T]) ForEachFunc(matches func(key string, item *Item[T]) bool) { 70 | for _, b := range c.buckets { 71 | if !b.forEachFunc(matches) { 72 | break 73 | } 74 | } 75 | } 76 | 77 | // Get an item from the cache. Returns nil if the item wasn't found. 78 | // This can return an expired item. Use item.Expired() to see if the item 79 | // is expired and item.TTL() to see how long until the item expires (which 80 | // will be negative for an already expired item). 81 | func (c *Cache[T]) Get(key string) *Item[T] { 82 | item := c.bucket(key).get(key) 83 | if item == nil { 84 | return nil 85 | } 86 | if !item.Expired() { 87 | select { 88 | case c.promotables <- item: 89 | default: 90 | } 91 | } 92 | return item 93 | } 94 | 95 | // Same as Get but does not promote the value. This essentially circumvents the 96 | // "least recently used" aspect of this cache. To some degree, it's akin to a 97 | // "peak" 98 | func (c *Cache[T]) GetWithoutPromote(key string) *Item[T] { 99 | return c.bucket(key).get(key) 100 | } 101 | 102 | // Used when the cache was created with the Track() configuration option. 103 | // Avoid otherwise 104 | func (c *Cache[T]) TrackingGet(key string) TrackedItem[T] { 105 | item := c.Get(key) 106 | if item == nil { 107 | return nil 108 | } 109 | item.track() 110 | return item 111 | } 112 | 113 | // Used when the cache was created with the Track() configuration option. 114 | // Sets the item, and returns a tracked reference to it. 115 | func (c *Cache[T]) TrackingSet(key string, value T, duration time.Duration) TrackedItem[T] { 116 | return c.set(key, value, duration, true) 117 | } 118 | 119 | // Set the value in the cache for the specified duration 120 | func (c *Cache[T]) Set(key string, value T, duration time.Duration) { 121 | c.set(key, value, duration, false) 122 | } 123 | 124 | // Setnx set the value in the cache for the specified duration if not exists 125 | func (c *Cache[T]) Setnx(key string, value T, duration time.Duration) { 126 | c.bucket(key).setnx(key, value, duration, false) 127 | } 128 | 129 | // Setnx2 set the value in the cache for the specified duration if not exists 130 | func (c *Cache[T]) Setnx2(key string, f func() T, duration time.Duration) *Item[T] { 131 | item, existing := c.bucket(key).setnx2(key, f, duration, false) 132 | // consistent with Get 133 | if existing && !item.Expired() { 134 | select { 135 | case c.promotables <- item: 136 | default: 137 | } 138 | // consistent with set 139 | } else if !existing { 140 | c.promotables <- item 141 | } 142 | return item 143 | } 144 | 145 | // Replace the value if it exists, does not set if it doesn't. 146 | // Returns true if the item existed an was replaced, false otherwise. 147 | // Replace does not reset item's TTL 148 | func (c *Cache[T]) Replace(key string, value T) bool { 149 | item := c.bucket(key).get(key) 150 | if item == nil { 151 | return false 152 | } 153 | c.Set(key, value, item.TTL()) 154 | return true 155 | } 156 | 157 | // Extend the value if it exists, does not set if it doesn't exists. 158 | // Returns true if the expire time of the item an was extended, false otherwise. 159 | func (c *Cache[T]) Extend(key string, duration time.Duration) bool { 160 | item := c.bucket(key).get(key) 161 | if item == nil { 162 | return false 163 | } 164 | 165 | item.Extend(duration) 166 | return true 167 | } 168 | 169 | // Attempts to get the value from the cache and calles fetch on a miss (missing 170 | // or stale item). If fetch returns an error, no value is cached and the error 171 | // is returned back to the caller. 172 | // Note that Fetch merely calls the public Get and Set functions. If you want 173 | // a different Fetch behavior, such as thundering herd protection or returning 174 | // expired items, implement it in your application. 175 | func (c *Cache[T]) Fetch(key string, duration time.Duration, fetch func() (T, error)) (*Item[T], error) { 176 | item := c.Get(key) 177 | if item != nil && !item.Expired() { 178 | return item, nil 179 | } 180 | value, err := fetch() 181 | if err != nil { 182 | return nil, err 183 | } 184 | return c.set(key, value, duration, false), nil 185 | } 186 | 187 | // Remove the item from the cache, return true if the item was present, false otherwise. 188 | func (c *Cache[T]) Delete(key string) bool { 189 | item := c.bucket(key).remove(key) 190 | if item != nil { 191 | c.deletables <- item 192 | return true 193 | } 194 | return false 195 | } 196 | 197 | func (c *Cache[T]) set(key string, value T, duration time.Duration, track bool) *Item[T] { 198 | item, existing := c.bucket(key).set(key, value, duration, track) 199 | if existing != nil { 200 | c.deletables <- existing 201 | } 202 | c.promotables <- item 203 | return item 204 | } 205 | 206 | func (c *Cache[T]) bucket(key string) *bucket[T] { 207 | h := fnv.New32a() 208 | h.Write([]byte(key)) 209 | return c.buckets[h.Sum32()&c.bucketMask] 210 | } 211 | 212 | func (c *Cache[T]) halted(fn func()) { 213 | c.halt() 214 | defer c.unhalt() 215 | fn() 216 | } 217 | 218 | func (c *Cache[T]) halt() { 219 | for _, bucket := range c.buckets { 220 | bucket.Lock() 221 | } 222 | } 223 | 224 | func (c *Cache[T]) unhalt() { 225 | for _, bucket := range c.buckets { 226 | bucket.Unlock() 227 | } 228 | } 229 | 230 | func (c *Cache[T]) worker() { 231 | dropped := 0 232 | cc := c.control 233 | 234 | promoteItem := func(item *Item[T]) { 235 | if c.doPromote(item) && c.size > c.maxSize { 236 | dropped += c.gc() 237 | } 238 | } 239 | 240 | for { 241 | select { 242 | case item := <-c.promotables: 243 | promoteItem(item) 244 | case item := <-c.deletables: 245 | c.doDelete(item) 246 | case control := <-cc: 247 | switch msg := control.(type) { 248 | case controlStop: 249 | goto drain 250 | case controlGetDropped: 251 | msg.res <- dropped 252 | dropped = 0 253 | case controlSetMaxSize: 254 | newMaxSize := msg.size 255 | c.maxSize = newMaxSize 256 | c.pruneTargetSize = newMaxSize - newMaxSize*int64(c.percentToPrune)/100 257 | if c.size > c.maxSize { 258 | dropped += c.gc() 259 | } 260 | msg.done <- struct{}{} 261 | case controlClear: 262 | c.halted(func() { 263 | promotables := c.promotables 264 | for len(promotables) > 0 { 265 | <-promotables 266 | } 267 | deletables := c.deletables 268 | for len(deletables) > 0 { 269 | <-deletables 270 | } 271 | 272 | for _, bucket := range c.buckets { 273 | bucket.clear() 274 | } 275 | c.size = 0 276 | c.list = NewList[T]() 277 | }) 278 | msg.done <- struct{}{} 279 | case controlGetSize: 280 | msg.res <- c.size 281 | case controlGC: 282 | dropped += c.gc() 283 | msg.done <- struct{}{} 284 | case controlSyncUpdates: 285 | doAllPendingPromotesAndDeletes(c.promotables, promoteItem, c.deletables, c.doDelete) 286 | msg.done <- struct{}{} 287 | } 288 | } 289 | } 290 | 291 | drain: 292 | for { 293 | select { 294 | case item := <-c.deletables: 295 | c.doDelete(item) 296 | default: 297 | return 298 | } 299 | } 300 | } 301 | 302 | // This method is used to implement SyncUpdates. It simply receives and processes as many 303 | // items as it can receive from the promotables and deletables channels immediately without 304 | // blocking. If some other goroutine sends an item on either channel after this method has 305 | // finished receiving, that's OK, because SyncUpdates only guarantees processing of values 306 | // that were already sent by the same goroutine. 307 | func doAllPendingPromotesAndDeletes[T any]( 308 | promotables <-chan *Item[T], 309 | promoteFn func(*Item[T]), 310 | deletables <-chan *Item[T], 311 | deleteFn func(*Item[T]), 312 | ) { 313 | doAllPromotes: 314 | for { 315 | select { 316 | case item := <-promotables: 317 | promoteFn(item) 318 | default: 319 | break doAllPromotes 320 | } 321 | } 322 | doAllDeletes: 323 | for { 324 | select { 325 | case item := <-deletables: 326 | deleteFn(item) 327 | default: 328 | break doAllDeletes 329 | } 330 | } 331 | } 332 | 333 | func (c *Cache[T]) doDelete(item *Item[T]) { 334 | if item.next == nil && item.prev == nil { 335 | item.promotions = -2 336 | } else { 337 | c.size -= item.size 338 | if c.onDelete != nil { 339 | c.onDelete(item) 340 | } 341 | c.list.Remove(item) 342 | item.promotions = -2 343 | } 344 | } 345 | 346 | func (c *Cache[T]) doPromote(item *Item[T]) bool { 347 | //already deleted 348 | if item.promotions == -2 { 349 | return false 350 | } 351 | 352 | if item.next != nil || item.prev != nil { // not a new item 353 | if item.shouldPromote(c.getsPerPromote) { 354 | c.list.MoveToFront(item) 355 | item.promotions = 0 356 | } 357 | return false 358 | } 359 | 360 | c.size += item.size 361 | c.list.Insert(item) 362 | return true 363 | } 364 | 365 | func (c *Cache[T]) gc() int { 366 | dropped := 0 367 | item := c.list.Tail 368 | 369 | prunedSize := int64(0) 370 | sizeToPrune := c.size - c.pruneTargetSize 371 | 372 | for prunedSize < sizeToPrune { 373 | if item == nil { 374 | return dropped 375 | } 376 | // fmt.Println(item.key) 377 | prev := item.prev 378 | if !c.tracking || atomic.LoadInt32(&item.refCount) == 0 { 379 | c.bucket(item.key).delete(item.key) 380 | itemSize := item.size 381 | c.size -= itemSize 382 | prunedSize += itemSize 383 | 384 | c.list.Remove(item) 385 | if c.onDelete != nil { 386 | c.onDelete(item) 387 | } 388 | dropped += 1 389 | item.promotions = -2 390 | } 391 | item = prev 392 | } 393 | return dropped 394 | } 395 | -------------------------------------------------------------------------------- /cache_test.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | import ( 4 | "math/rand" 5 | "sort" 6 | "strconv" 7 | "sync" 8 | "sync/atomic" 9 | "testing" 10 | "time" 11 | 12 | "github.com/karlseguin/ccache/v3/assert" 13 | ) 14 | 15 | func Test_Setnx(t *testing.T) { 16 | cache := New(Configure[string]()) 17 | defer cache.Stop() 18 | assert.Equal(t, cache.ItemCount(), 0) 19 | 20 | cache.Set("spice", "flow", time.Minute) 21 | assert.Equal(t, cache.ItemCount(), 1) 22 | 23 | // set if exists 24 | cache.Setnx("spice", "worm", time.Minute) 25 | assert.Equal(t, cache.ItemCount(), 1) 26 | assert.Equal(t, cache.Get("spice").Value(), "flow") 27 | 28 | // set if not exists 29 | cache.Delete("spice") 30 | cache.Setnx("spice", "worm", time.Minute) 31 | assert.Equal(t, cache.Get("spice").Value(), "worm") 32 | 33 | assert.Equal(t, cache.ItemCount(), 1) 34 | } 35 | 36 | func Test_Extend(t *testing.T) { 37 | cache := New(Configure[string]()) 38 | defer cache.Stop() 39 | assert.Equal(t, cache.ItemCount(), 0) 40 | 41 | // non exist 42 | ok := cache.Extend("spice", time.Minute*10) 43 | assert.Equal(t, false, ok) 44 | 45 | // exist 46 | cache.Set("spice", "flow", time.Minute) 47 | assert.Equal(t, cache.ItemCount(), 1) 48 | 49 | ok = cache.Extend("spice", time.Minute*10) // 10 + 10 50 | assert.Equal(t, true, ok) 51 | 52 | item := cache.Get("spice") 53 | less := time.Minute*22 < time.Duration(item.expires) 54 | assert.Equal(t, true, less) 55 | more := time.Minute*18 < time.Duration(item.expires) 56 | assert.Equal(t, true, more) 57 | 58 | assert.Equal(t, cache.ItemCount(), 1) 59 | } 60 | 61 | func Test_CacheDeletesAValue(t *testing.T) { 62 | cache := New(Configure[string]()) 63 | defer cache.Stop() 64 | assert.Equal(t, cache.ItemCount(), 0) 65 | 66 | cache.Set("spice", "flow", time.Minute) 67 | cache.Set("worm", "sand", time.Minute) 68 | assert.Equal(t, cache.ItemCount(), 2) 69 | 70 | cache.Delete("spice") 71 | assert.Equal(t, cache.Get("spice"), nil) 72 | assert.Equal(t, cache.Get("worm").Value(), "sand") 73 | assert.Equal(t, cache.ItemCount(), 1) 74 | } 75 | 76 | func Test_CacheDeletesAPrefix(t *testing.T) { 77 | cache := New(Configure[string]()) 78 | defer cache.Stop() 79 | assert.Equal(t, cache.ItemCount(), 0) 80 | 81 | cache.Set("aaa", "1", time.Minute) 82 | cache.Set("aab", "2", time.Minute) 83 | cache.Set("aac", "3", time.Minute) 84 | cache.Set("ac", "4", time.Minute) 85 | cache.Set("z5", "7", time.Minute) 86 | assert.Equal(t, cache.ItemCount(), 5) 87 | 88 | assert.Equal(t, cache.DeletePrefix("9a"), 0) 89 | assert.Equal(t, cache.ItemCount(), 5) 90 | 91 | assert.Equal(t, cache.DeletePrefix("aa"), 3) 92 | assert.Equal(t, cache.Get("aaa"), nil) 93 | assert.Equal(t, cache.Get("aab"), nil) 94 | assert.Equal(t, cache.Get("aac"), nil) 95 | assert.Equal(t, cache.Get("ac").Value(), "4") 96 | assert.Equal(t, cache.Get("z5").Value(), "7") 97 | assert.Equal(t, cache.ItemCount(), 2) 98 | } 99 | 100 | func Test_CacheDeletesAFunc(t *testing.T) { 101 | cache := New(Configure[int]()) 102 | defer cache.Stop() 103 | assert.Equal(t, cache.ItemCount(), 0) 104 | 105 | cache.Set("a", 1, time.Minute) 106 | cache.Set("b", 2, time.Minute) 107 | cache.Set("c", 3, time.Minute) 108 | cache.Set("d", 4, time.Minute) 109 | cache.Set("e", 5, time.Minute) 110 | cache.Set("f", 6, time.Minute) 111 | assert.Equal(t, cache.ItemCount(), 6) 112 | 113 | assert.Equal(t, cache.DeleteFunc(func(key string, item *Item[int]) bool { 114 | return false 115 | }), 0) 116 | assert.Equal(t, cache.ItemCount(), 6) 117 | 118 | assert.Equal(t, cache.DeleteFunc(func(key string, item *Item[int]) bool { 119 | return item.Value() < 4 120 | }), 3) 121 | assert.Equal(t, cache.ItemCount(), 3) 122 | 123 | assert.Equal(t, cache.DeleteFunc(func(key string, item *Item[int]) bool { 124 | return key == "d" 125 | }), 1) 126 | assert.Equal(t, cache.ItemCount(), 2) 127 | } 128 | 129 | func Test_CacheOnDeleteCallbackCalled(t *testing.T) { 130 | onDeleteFnCalled := int32(0) 131 | onDeleteFn := func(item *Item[string]) { 132 | if item.key == "spice" { 133 | atomic.AddInt32(&onDeleteFnCalled, 1) 134 | } 135 | } 136 | 137 | cache := New(Configure[string]().OnDelete(onDeleteFn)) 138 | defer cache.Stop() 139 | 140 | cache.Set("spice", "flow", time.Minute) 141 | cache.Set("worm", "sand", time.Minute) 142 | 143 | cache.SyncUpdates() // wait for worker to pick up preceding updates 144 | 145 | cache.Delete("spice") 146 | cache.SyncUpdates() 147 | 148 | assert.Equal(t, cache.Get("spice"), nil) 149 | assert.Equal(t, cache.Get("worm").Value(), "sand") 150 | assert.Equal(t, atomic.LoadInt32(&onDeleteFnCalled), 1) 151 | } 152 | 153 | func Test_CacheFetchesExpiredItems(t *testing.T) { 154 | cache := New(Configure[string]()) 155 | defer cache.Stop() 156 | 157 | fn := func() (string, error) { return "moo-moo", nil } 158 | 159 | cache.Set("beef", "moo", time.Second*-1) 160 | assert.Equal(t, cache.Get("beef").Value(), "moo") 161 | 162 | out, _ := cache.Fetch("beef", time.Second, fn) 163 | assert.Equal(t, out.Value(), "moo-moo") 164 | } 165 | 166 | func Test_CacheGCsTheOldestItems(t *testing.T) { 167 | cache := New(Configure[int]().MaxSize(100).PercentToPrune(10)) 168 | defer cache.Stop() 169 | 170 | for i := 0; i < 100; i++ { 171 | cache.Set(strconv.Itoa(i), i, time.Minute) 172 | } 173 | cache.SyncUpdates() 174 | cache.GC() 175 | assert.Equal(t, cache.Get("9"), nil) 176 | assert.Equal(t, cache.Get("10").Value(), 10) 177 | assert.Equal(t, cache.ItemCount(), 90) 178 | } 179 | 180 | func Test_CachePromotedItemsDontGetPruned(t *testing.T) { 181 | cache := New(Configure[int]().MaxSize(100).PercentToPrune(10).GetsPerPromote(1)) 182 | defer cache.Stop() 183 | 184 | for i := 0; i < 100; i++ { 185 | cache.Set(strconv.Itoa(i), i, time.Minute) 186 | } 187 | cache.SyncUpdates() 188 | cache.Get("9") 189 | cache.SyncUpdates() 190 | cache.GC() 191 | assert.Equal(t, cache.Get("9").Value(), 9) 192 | assert.Equal(t, cache.Get("10"), nil) 193 | assert.Equal(t, cache.Get("11").Value(), 11) 194 | } 195 | 196 | func Test_GetWithoutPromoteDoesNotPromote(t *testing.T) { 197 | cache := New(Configure[int]().MaxSize(100).PercentToPrune(10).GetsPerPromote(1)) 198 | defer cache.Stop() 199 | 200 | for i := 0; i < 100; i++ { 201 | cache.Set(strconv.Itoa(i), i, time.Minute) 202 | } 203 | cache.SyncUpdates() 204 | cache.GetWithoutPromote("9") 205 | cache.SyncUpdates() 206 | cache.GC() 207 | assert.Equal(t, cache.Get("9"), nil) 208 | assert.Equal(t, cache.Get("10").Value(), 10) 209 | assert.Equal(t, cache.Get("11").Value(), 11) 210 | } 211 | 212 | func Test_CacheTrackerDoesNotCleanupHeldInstance(t *testing.T) { 213 | cache := New(Configure[int]().MaxSize(10).PercentToPrune(10).Track()) 214 | defer cache.Stop() 215 | 216 | item0 := cache.TrackingSet("0", 0, time.Minute) 217 | 218 | cache.Set("1", 1, time.Minute) 219 | item1 := cache.TrackingGet("1") 220 | 221 | for i := 2; i < 11; i++ { 222 | cache.Set(strconv.Itoa(i), i, time.Minute) 223 | } 224 | 225 | cache.SyncUpdates() 226 | cache.GC() 227 | assert.Equal(t, cache.Get("0").Value(), 0) 228 | assert.Equal(t, cache.Get("1").Value(), 1) 229 | item0.Release() 230 | item1.Release() 231 | 232 | for i := 1; i < 5; i++ { 233 | cache.Set(strconv.Itoa(i+20), i, time.Minute) 234 | } 235 | cache.GC() 236 | assert.Equal(t, cache.Get("0"), nil) 237 | assert.Equal(t, cache.Get("1"), nil) 238 | } 239 | 240 | func Test_CacheRemovesOldestItemWhenFull(t *testing.T) { 241 | onDeleteFnCalled := false 242 | onDeleteFn := func(item *Item[int]) { 243 | if item.key == "0" { 244 | onDeleteFnCalled = true 245 | } 246 | } 247 | 248 | cache := New(Configure[int]().MaxSize(5).PercentToPrune(1).OnDelete(onDeleteFn)) 249 | defer cache.Stop() 250 | 251 | for i := 0; i < 7; i++ { 252 | cache.Set(strconv.Itoa(i), i, time.Minute) 253 | } 254 | cache.SyncUpdates() 255 | assert.Equal(t, cache.Get("0"), nil) 256 | assert.Equal(t, cache.Get("1"), nil) 257 | assert.Equal(t, cache.Get("2").Value(), 2) 258 | assert.Equal(t, onDeleteFnCalled, true) 259 | assert.Equal(t, cache.ItemCount(), 5) 260 | } 261 | 262 | func Test_CacheRemovesOldestItemWhenFullBySizer(t *testing.T) { 263 | cache := New(Configure[*SizedItem]().MaxSize(50).PercentToPrune(15)) 264 | defer cache.Stop() 265 | 266 | for i := 0; i < 25; i++ { 267 | cache.Set(strconv.Itoa(i), &SizedItem{i, 2}, time.Minute) 268 | } 269 | cache.SyncUpdates() 270 | cache.GC() 271 | assert.Equal(t, cache.Get("0"), nil) 272 | assert.Equal(t, cache.Get("1"), nil) 273 | assert.Equal(t, cache.Get("2"), nil) 274 | assert.Equal(t, cache.Get("3"), nil) 275 | assert.Equal(t, cache.Get("4").Value().id, 4) 276 | assert.Equal(t, cache.GetDropped(), 4) 277 | assert.Equal(t, cache.GetDropped(), 0) 278 | } 279 | 280 | func Test_CacheSetUpdatesSizeOnDelta(t *testing.T) { 281 | cache := New(Configure[*SizedItem]()) 282 | defer cache.Stop() 283 | 284 | cache.Set("a", &SizedItem{0, 2}, time.Minute) 285 | cache.Set("b", &SizedItem{0, 3}, time.Minute) 286 | cache.SyncUpdates() 287 | assert.Equal(t, cache.GetSize(), 5) 288 | cache.Set("b", &SizedItem{0, 3}, time.Minute) 289 | cache.SyncUpdates() 290 | assert.Equal(t, cache.GetSize(), 5) 291 | cache.Set("b", &SizedItem{0, 4}, time.Minute) 292 | cache.SyncUpdates() 293 | assert.Equal(t, cache.GetSize(), 6) 294 | cache.Set("b", &SizedItem{0, 2}, time.Minute) 295 | cache.SyncUpdates() 296 | assert.Equal(t, cache.GetSize(), 4) 297 | cache.Delete("b") 298 | cache.SyncUpdates() 299 | assert.Equal(t, cache.GetSize(), 2) 300 | } 301 | 302 | func Test_CacheReplaceDoesNotchangeSizeIfNotSet(t *testing.T) { 303 | cache := New(Configure[*SizedItem]()) 304 | defer cache.Stop() 305 | 306 | cache.Set("1", &SizedItem{1, 2}, time.Minute) 307 | cache.Set("2", &SizedItem{1, 2}, time.Minute) 308 | cache.Set("3", &SizedItem{1, 2}, time.Minute) 309 | cache.Replace("4", &SizedItem{1, 2}) 310 | cache.SyncUpdates() 311 | assert.Equal(t, cache.GetSize(), 6) 312 | } 313 | 314 | func Test_CacheReplaceChangesSize(t *testing.T) { 315 | cache := New(Configure[*SizedItem]()) 316 | defer cache.Stop() 317 | 318 | cache.Set("1", &SizedItem{1, 2}, time.Minute) 319 | cache.Set("2", &SizedItem{1, 2}, time.Minute) 320 | 321 | cache.Replace("2", &SizedItem{1, 2}) 322 | cache.SyncUpdates() 323 | assert.Equal(t, cache.GetSize(), 4) 324 | 325 | cache.Replace("2", &SizedItem{1, 1}) 326 | cache.SyncUpdates() 327 | assert.Equal(t, cache.GetSize(), 3) 328 | 329 | cache.Replace("2", &SizedItem{1, 3}) 330 | cache.SyncUpdates() 331 | assert.Equal(t, cache.GetSize(), 5) 332 | } 333 | 334 | func Test_CacheResizeOnTheFly(t *testing.T) { 335 | cache := New(Configure[int]().MaxSize(50).PercentToPrune(10)) 336 | defer cache.Stop() 337 | 338 | for i := 0; i < 50; i++ { 339 | cache.Set(strconv.Itoa(i), i, time.Minute) 340 | } 341 | cache.SetMaxSize(3) 342 | cache.SyncUpdates() 343 | assert.Equal(t, cache.GetDropped(), 47) 344 | assert.Equal(t, cache.Get("46"), nil) 345 | assert.Equal(t, cache.Get("47").Value(), 47) 346 | assert.Equal(t, cache.Get("48").Value(), 48) 347 | assert.Equal(t, cache.Get("49").Value(), 49) 348 | 349 | cache.Set("50", 50, time.Minute) 350 | cache.SyncUpdates() 351 | assert.Equal(t, cache.GetDropped(), 1) 352 | assert.Equal(t, cache.Get("47"), nil) 353 | assert.Equal(t, cache.Get("48").Value(), 48) 354 | assert.Equal(t, cache.Get("49").Value(), 49) 355 | assert.Equal(t, cache.Get("50").Value(), 50) 356 | 357 | cache.SetMaxSize(10) 358 | cache.Set("51", 51, time.Minute) 359 | cache.SyncUpdates() 360 | assert.Equal(t, cache.GetDropped(), 0) 361 | assert.Equal(t, cache.Get("48").Value(), 48) 362 | assert.Equal(t, cache.Get("49").Value(), 49) 363 | assert.Equal(t, cache.Get("50").Value(), 50) 364 | assert.Equal(t, cache.Get("51").Value(), 51) 365 | } 366 | 367 | func Test_CacheForEachFunc(t *testing.T) { 368 | cache := New(Configure[int]().MaxSize(3).PercentToPrune(1)) 369 | defer cache.Stop() 370 | 371 | assert.List(t, forEachKeys[int](cache), []string{}) 372 | 373 | cache.Set("1", 1, time.Minute) 374 | assert.List(t, forEachKeys(cache), []string{"1"}) 375 | 376 | cache.Set("2", 2, time.Minute) 377 | cache.SyncUpdates() 378 | assert.List(t, forEachKeys(cache), []string{"1", "2"}) 379 | 380 | cache.Set("3", 3, time.Minute) 381 | cache.SyncUpdates() 382 | assert.List(t, forEachKeys(cache), []string{"1", "2", "3"}) 383 | 384 | cache.Set("4", 4, time.Minute) 385 | cache.SyncUpdates() 386 | assert.List(t, forEachKeys(cache), []string{"2", "3", "4"}) 387 | 388 | cache.Set("stop", 5, time.Minute) 389 | cache.SyncUpdates() 390 | assert.DoesNotContain(t, forEachKeys(cache), "stop") 391 | 392 | cache.Set("6", 6, time.Minute) 393 | cache.SyncUpdates() 394 | assert.DoesNotContain(t, forEachKeys(cache), "stop") 395 | } 396 | 397 | func Test_CachePrune(t *testing.T) { 398 | maxSize := int64(500) 399 | cache := New(Configure[string]().MaxSize(maxSize).PercentToPrune(50)) 400 | defer cache.Stop() 401 | 402 | epoch := 0 403 | for i := 0; i < 10000; i++ { 404 | epoch += 1 405 | expired := make([]string, 0) 406 | for i := 0; i < 50; i += 1 { 407 | key := strconv.FormatInt(rand.Int63n(maxSize*20), 10) 408 | item := cache.Get(key) 409 | if item == nil || item.TTL() > 1*time.Minute { 410 | expired = append(expired, key) 411 | } 412 | } 413 | for _, key := range expired { 414 | cache.Set(key, key, 5*time.Minute) 415 | } 416 | if epoch%500 == 0 { 417 | assert.True(t, cache.GetSize() <= 500) 418 | } 419 | } 420 | } 421 | 422 | func Test_ConcurrentStop(t *testing.T) { 423 | for i := 0; i < 100; i++ { 424 | cache := New(Configure[string]()) 425 | r := func() { 426 | for { 427 | key := strconv.Itoa(int(rand.Int31n(100))) 428 | switch rand.Int31n(3) { 429 | case 0: 430 | cache.Get(key) 431 | case 1: 432 | cache.Set(key, key, time.Minute) 433 | case 2: 434 | cache.Delete(key) 435 | } 436 | } 437 | } 438 | go r() 439 | go r() 440 | go r() 441 | time.Sleep(time.Millisecond * 10) 442 | cache.Stop() 443 | } 444 | } 445 | 446 | func Test_ConcurrentClearAndSet(t *testing.T) { 447 | for i := 0; i < 1000000; i++ { 448 | var stop atomic.Bool 449 | var wg sync.WaitGroup 450 | 451 | cache := New(Configure[string]()) 452 | r := func() { 453 | for !stop.Load() { 454 | cache.Set("a", "a", time.Minute) 455 | } 456 | wg.Done() 457 | } 458 | go r() 459 | wg.Add(1) 460 | cache.Clear() 461 | stop.Store(true) 462 | wg.Wait() 463 | cache.SyncUpdates() 464 | 465 | // The point of this test is to make sure that the cache's lookup and its 466 | // recency list are in sync. But the two aren't written to atomically: 467 | // the lookup is written to directly from the call to Set, whereas the 468 | // list is maintained by the background worker. This can create a period 469 | // where the two are out of sync. Even SyncUpdate is helpless here, since 470 | // it can only sync what's been written to the buffers. 471 | for i := 0; i < 10; i++ { 472 | expectedCount := 0 473 | if cache.list.Head != nil { 474 | expectedCount = 1 475 | } 476 | actualCount := cache.ItemCount() 477 | if expectedCount == actualCount { 478 | return 479 | } 480 | time.Sleep(time.Millisecond) 481 | } 482 | t.Errorf("cache list and lookup are not consistent") 483 | t.FailNow() 484 | cache.Stop() 485 | } 486 | } 487 | 488 | func BenchmarkFrequentSets(b *testing.B) { 489 | cache := New(Configure[int]()) 490 | defer cache.Stop() 491 | 492 | b.ResetTimer() 493 | for n := 0; n < b.N; n++ { 494 | key := strconv.Itoa(n) 495 | cache.Set(key, n, time.Minute) 496 | } 497 | } 498 | 499 | func BenchmarkFrequentGets(b *testing.B) { 500 | cache := New(Configure[int]()) 501 | defer cache.Stop() 502 | numKeys := 500 503 | for i := 0; i < numKeys; i++ { 504 | key := strconv.Itoa(i) 505 | cache.Set(key, i, time.Minute) 506 | } 507 | 508 | b.ResetTimer() 509 | for n := 0; n < b.N; n++ { 510 | key := strconv.FormatInt(rand.Int63n(int64(numKeys)), 10) 511 | cache.Get(key) 512 | } 513 | } 514 | 515 | func BenchmarkGetWithPromoteSmall(b *testing.B) { 516 | getsPerPromotes := 5 517 | cache := New(Configure[int]().GetsPerPromote(int32(getsPerPromotes))) 518 | defer cache.Stop() 519 | 520 | b.ResetTimer() 521 | for n := 0; n < b.N; n++ { 522 | key := strconv.Itoa(n) 523 | cache.Set(key, n, time.Minute) 524 | for i := 0; i < getsPerPromotes; i++ { 525 | cache.Get(key) 526 | } 527 | } 528 | } 529 | 530 | func BenchmarkGetWithPromoteLarge(b *testing.B) { 531 | getsPerPromotes := 100 532 | cache := New(Configure[int]().GetsPerPromote(int32(getsPerPromotes))) 533 | defer cache.Stop() 534 | 535 | b.ResetTimer() 536 | for n := 0; n < b.N; n++ { 537 | key := strconv.Itoa(n) 538 | cache.Set(key, n, time.Minute) 539 | for i := 0; i < getsPerPromotes; i++ { 540 | cache.Get(key) 541 | } 542 | } 543 | } 544 | 545 | type SizedItem struct { 546 | id int 547 | s int64 548 | } 549 | 550 | func (s *SizedItem) Size() int64 { 551 | return s.s 552 | } 553 | 554 | func forEachKeys[T any](cache *Cache[T]) []string { 555 | keys := make([]string, 0, 10) 556 | cache.ForEachFunc(func(key string, i *Item[T]) bool { 557 | if key == "stop" { 558 | return false 559 | } 560 | keys = append(keys, key) 561 | return true 562 | }) 563 | sort.Strings(keys) 564 | return keys 565 | } 566 | -------------------------------------------------------------------------------- /configuration.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | type Configuration[T any] struct { 4 | maxSize int64 5 | buckets int 6 | itemsToPrune int 7 | percentToPrune int 8 | deleteBuffer int 9 | promoteBuffer int 10 | getsPerPromote int32 11 | tracking bool 12 | onDelete func(item *Item[T]) 13 | } 14 | 15 | // Creates a configuration object with sensible defaults 16 | // Use this as the start of the fluent configuration: 17 | // e.g.: ccache.New(ccache.Configure().MaxSize(10000)) 18 | func Configure[T any]() *Configuration[T] { 19 | return &Configuration[T]{ 20 | buckets: 16, 21 | itemsToPrune: 0, 22 | percentToPrune: 10, 23 | deleteBuffer: 1024, 24 | getsPerPromote: 3, 25 | promoteBuffer: 1024, 26 | maxSize: 5000, 27 | tracking: false, 28 | } 29 | } 30 | 31 | // The max size for the cache 32 | // [5000] 33 | func (c *Configuration[T]) MaxSize(max int64) *Configuration[T] { 34 | c.maxSize = max 35 | return c 36 | } 37 | 38 | // Keys are hashed into % bucket count to provide greater concurrency (every set 39 | // requires a write lock on the bucket). Must be a power of 2 (1, 2, 4, 8, 16, ...) 40 | // [16] 41 | func (c *Configuration[T]) Buckets(count uint32) *Configuration[T] { 42 | if count == 0 || !((count & (^count + 1)) == count) { 43 | count = 16 44 | } 45 | c.buckets = int(count) 46 | return c 47 | } 48 | 49 | // The percent of the max size to prune when memory is low. 50 | // [10] 51 | func (c *Configuration[T]) PercentToPrune(percent uint8) *Configuration[T] { 52 | if percent > 100 { 53 | percent = 20 54 | } 55 | c.percentToPrune = int(percent) 56 | return c 57 | } 58 | 59 | // The size of the queue for items which should be promoted. If the queue fills 60 | // up, promotions are skipped 61 | // [1024] 62 | func (c *Configuration[T]) PromoteBuffer(size uint32) *Configuration[T] { 63 | c.promoteBuffer = int(size) 64 | return c 65 | } 66 | 67 | // The size of the queue for items which should be deleted. If the queue fills 68 | // up, calls to Delete() will block 69 | func (c *Configuration[T]) DeleteBuffer(size uint32) *Configuration[T] { 70 | c.deleteBuffer = int(size) 71 | return c 72 | } 73 | 74 | // Give a large cache with a high read / write ratio, it's usually unnecessary 75 | // to promote an item on every Get. GetsPerPromote specifies the number of Gets 76 | // a key must have before being promoted 77 | // [3] 78 | func (c *Configuration[T]) GetsPerPromote(count int32) *Configuration[T] { 79 | c.getsPerPromote = count 80 | return c 81 | } 82 | 83 | // Typically, a cache is agnostic about how cached values are use. This is fine 84 | // for a typical cache usage, where you fetch an item from the cache, do something 85 | // (write it out) and nothing else. 86 | 87 | // However, if callers are going to keep a reference to a cached item for a long 88 | // time, things get messy. Specifically, the cache can evict the item, while 89 | // references still exist. Technically, this isn't an issue. However, if you reload 90 | // the item back into the cache, you end up with 2 objects representing the same 91 | // data. This is a waste of space and could lead to weird behavior (the type an 92 | // identity map is meant to solve). 93 | 94 | // By turning tracking on and using the cache's TrackingGet, the cache 95 | // won't evict items which you haven't called Release() on. It's a simple reference 96 | // counter. 97 | func (c *Configuration[T]) Track() *Configuration[T] { 98 | c.tracking = true 99 | return c 100 | } 101 | 102 | // OnDelete allows setting a callback function to react to ideam deletion. 103 | // This typically allows to do a cleanup of resources, such as calling a Close() on 104 | // cached object that require some kind of tear-down. 105 | func (c *Configuration[T]) OnDelete(callback func(item *Item[T])) *Configuration[T] { 106 | c.onDelete = callback 107 | return c 108 | } 109 | -------------------------------------------------------------------------------- /configuration_test.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/karlseguin/ccache/v3/assert" 7 | ) 8 | 9 | func Test_Configuration_BucketsPowerOf2(t *testing.T) { 10 | for i := uint32(0); i < 31; i++ { 11 | c := Configure[int]().Buckets(i) 12 | if i == 1 || i == 2 || i == 4 || i == 8 || i == 16 { 13 | assert.Equal(t, c.buckets, int(i)) 14 | } else { 15 | assert.Equal(t, c.buckets, 16) 16 | } 17 | } 18 | } 19 | 20 | func Test_Configuration_Buffers(t *testing.T) { 21 | assert.Equal(t, Configure[int]().DeleteBuffer(24).deleteBuffer, 24) 22 | assert.Equal(t, Configure[int]().PromoteBuffer(95).promoteBuffer, 95) 23 | } 24 | -------------------------------------------------------------------------------- /control.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | type controlGC struct { 4 | done chan struct{} 5 | } 6 | 7 | type controlClear struct { 8 | done chan struct{} 9 | } 10 | 11 | type controlStop struct { 12 | } 13 | 14 | type controlGetSize struct { 15 | res chan int64 16 | } 17 | 18 | type controlGetDropped struct { 19 | res chan int 20 | } 21 | 22 | type controlSetMaxSize struct { 23 | size int64 24 | done chan struct{} 25 | } 26 | 27 | type controlSyncUpdates struct { 28 | done chan struct{} 29 | } 30 | 31 | type control chan interface{} 32 | 33 | func newControl() chan interface{} { 34 | return make(chan interface{}, 5) 35 | } 36 | 37 | // Forces GC. There should be no reason to call this function, except from tests 38 | // which require synchronous GC. 39 | // This is a control command. 40 | func (c control) GC() { 41 | done := make(chan struct{}) 42 | c <- controlGC{done: done} 43 | <-done 44 | } 45 | 46 | // Sends a stop signal to the worker thread. The worker thread will shut down 47 | // 5 seconds after the last message is received. The cache should not be used 48 | // after Stop is called, but concurrently executing requests should properly finish 49 | // executing. 50 | // This is a control command. 51 | func (c control) Stop() { 52 | c.SyncUpdates() 53 | c <- controlStop{} 54 | } 55 | 56 | // Clears the cache 57 | // This is a control command. 58 | func (c control) Clear() { 59 | done := make(chan struct{}) 60 | c <- controlClear{done: done} 61 | <-done 62 | } 63 | 64 | // Gets the size of the cache. This is an O(1) call to make, but it is handled 65 | // by the worker goroutine. It's meant to be called periodically for metrics, or 66 | // from tests. 67 | // This is a control command. 68 | func (c control) GetSize() int64 { 69 | res := make(chan int64) 70 | c <- controlGetSize{res: res} 71 | return <-res 72 | } 73 | 74 | // Gets the number of items removed from the cache due to memory pressure since 75 | // the last time GetDropped was called 76 | // This is a control command. 77 | func (c control) GetDropped() int { 78 | res := make(chan int) 79 | c <- controlGetDropped{res: res} 80 | return <-res 81 | } 82 | 83 | // Sets a new max size. That can result in a GC being run if the new maxium size 84 | // is smaller than the cached size 85 | // This is a control command. 86 | func (c control) SetMaxSize(size int64) { 87 | done := make(chan struct{}) 88 | c <- controlSetMaxSize{size: size, done: done} 89 | <-done 90 | } 91 | 92 | // SyncUpdates waits until the cache has finished asynchronous state updates for any operations 93 | // that were done by the current goroutine up to now. 94 | // 95 | // For efficiency, the cache's implementation of LRU behavior is partly managed by a worker 96 | // goroutine that updates its internal data structures asynchronously. This means that the 97 | // cache's state in terms of (for instance) eviction of LRU items is only eventually consistent; 98 | // there is no guarantee that it happens before a Get or Set call has returned. Most of the time 99 | // application code will not care about this, but especially in a test scenario you may want to 100 | // be able to know when the worker has caught up. 101 | // 102 | // This applies only to cache methods that were previously called by the same goroutine that is 103 | // now calling SyncUpdates. If other goroutines are using the cache at the same time, there is 104 | // no way to know whether any of them still have pending state updates when SyncUpdates returns. 105 | // This is a control command. 106 | func (c control) SyncUpdates() { 107 | done := make(chan struct{}) 108 | c <- controlSyncUpdates{done: done} 109 | <-done 110 | } 111 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/karlseguin/ccache/v3 2 | 3 | go 1.19 4 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/karlseguin/ccache/2c9c102ef561ecc96b932b6eb12415f637e14d45/go.sum -------------------------------------------------------------------------------- /item.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | import ( 4 | "fmt" 5 | "sync/atomic" 6 | "time" 7 | ) 8 | 9 | type Sized interface { 10 | Size() int64 11 | } 12 | 13 | type TrackedItem[T any] interface { 14 | Value() T 15 | Release() 16 | Expired() bool 17 | TTL() time.Duration 18 | Expires() time.Time 19 | Extend(duration time.Duration) 20 | } 21 | 22 | type Item[T any] struct { 23 | key string 24 | group string 25 | promotions int32 26 | refCount int32 27 | expires int64 28 | size int64 29 | value T 30 | next *Item[T] 31 | prev *Item[T] 32 | } 33 | 34 | func newItem[T any](key string, value T, expires int64, track bool) *Item[T] { 35 | size := int64(1) 36 | 37 | // https://github.com/golang/go/issues/49206 38 | if sized, ok := (interface{})(value).(Sized); ok { 39 | size = sized.Size() 40 | } 41 | 42 | item := &Item[T]{ 43 | key: key, 44 | value: value, 45 | promotions: 0, 46 | size: size, 47 | expires: expires, 48 | } 49 | if track { 50 | item.refCount = 1 51 | } 52 | return item 53 | } 54 | 55 | func (i *Item[T]) shouldPromote(getsPerPromote int32) bool { 56 | i.promotions += 1 57 | return i.promotions == getsPerPromote 58 | } 59 | 60 | func (i *Item[T]) Key() string { 61 | return i.key 62 | } 63 | 64 | func (i *Item[T]) Value() T { 65 | return i.value 66 | } 67 | 68 | func (i *Item[T]) track() { 69 | atomic.AddInt32(&i.refCount, 1) 70 | } 71 | 72 | func (i *Item[T]) Release() { 73 | atomic.AddInt32(&i.refCount, -1) 74 | } 75 | 76 | func (i *Item[T]) Expired() bool { 77 | expires := atomic.LoadInt64(&i.expires) 78 | return expires < time.Now().UnixNano() 79 | } 80 | 81 | func (i *Item[T]) TTL() time.Duration { 82 | expires := atomic.LoadInt64(&i.expires) 83 | return time.Nanosecond * time.Duration(expires-time.Now().UnixNano()) 84 | } 85 | 86 | func (i *Item[T]) Expires() time.Time { 87 | expires := atomic.LoadInt64(&i.expires) 88 | return time.Unix(0, expires) 89 | } 90 | 91 | func (i *Item[T]) Extend(duration time.Duration) { 92 | atomic.StoreInt64(&i.expires, time.Now().Add(duration).UnixNano()) 93 | } 94 | 95 | // String returns a string representation of the Item. This includes the default string 96 | // representation of its Value(), as implemented by fmt.Sprintf with "%v", but the exact 97 | // format of the string should not be relied on; it is provided only for debugging 98 | // purposes, and because otherwise including an Item in a call to fmt.Printf or 99 | // fmt.Sprintf expression could cause fields of the Item to be read in a non-thread-safe 100 | // way. 101 | func (i *Item[T]) String() string { 102 | group := i.group 103 | if group == "" { 104 | return fmt.Sprintf("Item(%s:%v)", i.key, i.value) 105 | } 106 | return fmt.Sprintf("Item(%s:%s:%v)", group, i.key, i.value) 107 | } 108 | -------------------------------------------------------------------------------- /item_test.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | import ( 4 | "math" 5 | "testing" 6 | "time" 7 | 8 | "github.com/karlseguin/ccache/v3/assert" 9 | ) 10 | 11 | func Test_Item_Key(t *testing.T) { 12 | item := &Item[int]{key: "foo"} 13 | assert.Equal(t, item.Key(), "foo") 14 | } 15 | 16 | func Test_Item_Promotability(t *testing.T) { 17 | item := &Item[int]{promotions: 4} 18 | assert.Equal(t, item.shouldPromote(5), true) 19 | assert.Equal(t, item.shouldPromote(5), false) 20 | } 21 | 22 | func Test_Item_Expired(t *testing.T) { 23 | now := time.Now().UnixNano() 24 | item1 := &Item[int]{expires: now + (10 * int64(time.Millisecond))} 25 | item2 := &Item[int]{expires: now - (10 * int64(time.Millisecond))} 26 | assert.Equal(t, item1.Expired(), false) 27 | assert.Equal(t, item2.Expired(), true) 28 | } 29 | 30 | func Test_Item_TTL(t *testing.T) { 31 | now := time.Now().UnixNano() 32 | item1 := &Item[int]{expires: now + int64(time.Second)} 33 | item2 := &Item[int]{expires: now - int64(time.Second)} 34 | assert.Equal(t, int(math.Ceil(item1.TTL().Seconds())), 1) 35 | assert.Equal(t, int(math.Ceil(item2.TTL().Seconds())), -1) 36 | } 37 | 38 | func Test_Item_Expires(t *testing.T) { 39 | now := time.Now().UnixNano() 40 | item := &Item[int]{expires: now + (10)} 41 | assert.Equal(t, item.Expires().UnixNano(), now+10) 42 | } 43 | 44 | func Test_Item_Extend(t *testing.T) { 45 | item := &Item[int]{expires: time.Now().UnixNano() + 10} 46 | item.Extend(time.Minute * 2) 47 | assert.Equal(t, item.Expires().Unix(), time.Now().Unix()+120) 48 | } 49 | -------------------------------------------------------------------------------- /layeredbucket.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | type layeredBucket[T any] struct { 9 | sync.RWMutex 10 | buckets map[string]*bucket[T] 11 | } 12 | 13 | func (b *layeredBucket[T]) itemCount() int { 14 | count := 0 15 | b.RLock() 16 | defer b.RUnlock() 17 | for _, b := range b.buckets { 18 | count += b.itemCount() 19 | } 20 | return count 21 | } 22 | 23 | func (b *layeredBucket[T]) get(primary, secondary string) *Item[T] { 24 | bucket := b.getSecondaryBucket(primary) 25 | if bucket == nil { 26 | return nil 27 | } 28 | return bucket.get(secondary) 29 | } 30 | 31 | func (b *layeredBucket[T]) getSecondaryBucket(primary string) *bucket[T] { 32 | b.RLock() 33 | bucket, exists := b.buckets[primary] 34 | b.RUnlock() 35 | if !exists { 36 | return nil 37 | } 38 | return bucket 39 | } 40 | 41 | func (b *layeredBucket[T]) set(primary, secondary string, value T, duration time.Duration, track bool) (*Item[T], *Item[T]) { 42 | b.Lock() 43 | bkt, exists := b.buckets[primary] 44 | if !exists { 45 | bkt = &bucket[T]{lookup: make(map[string]*Item[T])} 46 | b.buckets[primary] = bkt 47 | } 48 | b.Unlock() 49 | item, existing := bkt.set(secondary, value, duration, track) 50 | item.group = primary 51 | return item, existing 52 | } 53 | 54 | func (b *layeredBucket[T]) remove(primary, secondary string) *Item[T] { 55 | b.RLock() 56 | bucket, exists := b.buckets[primary] 57 | b.RUnlock() 58 | if !exists { 59 | return nil 60 | } 61 | return bucket.remove(secondary) 62 | } 63 | 64 | func (b *layeredBucket[T]) delete(primary, secondary string) { 65 | b.RLock() 66 | bucket, exists := b.buckets[primary] 67 | b.RUnlock() 68 | if !exists { 69 | return 70 | } 71 | bucket.delete(secondary) 72 | } 73 | 74 | func (b *layeredBucket[T]) deletePrefix(primary, prefix string, deletables chan *Item[T]) int { 75 | b.RLock() 76 | bucket, exists := b.buckets[primary] 77 | b.RUnlock() 78 | if !exists { 79 | return 0 80 | } 81 | return bucket.deletePrefix(prefix, deletables) 82 | } 83 | 84 | func (b *layeredBucket[T]) deleteFunc(primary string, matches func(key string, item *Item[T]) bool, deletables chan *Item[T]) int { 85 | b.RLock() 86 | bucket, exists := b.buckets[primary] 87 | b.RUnlock() 88 | if !exists { 89 | return 0 90 | } 91 | return bucket.deleteFunc(matches, deletables) 92 | } 93 | 94 | func (b *layeredBucket[T]) deleteAll(primary string, deletables chan *Item[T]) bool { 95 | b.RLock() 96 | bucket, exists := b.buckets[primary] 97 | b.RUnlock() 98 | if !exists { 99 | return false 100 | } 101 | 102 | bucket.Lock() 103 | defer bucket.Unlock() 104 | 105 | if l := len(bucket.lookup); l == 0 { 106 | return false 107 | } 108 | for key, item := range bucket.lookup { 109 | delete(bucket.lookup, key) 110 | deletables <- item 111 | } 112 | return true 113 | } 114 | 115 | func (b *layeredBucket[T]) forEachFunc(primary string, matches func(key string, item *Item[T]) bool) { 116 | b.RLock() 117 | bucket, exists := b.buckets[primary] 118 | b.RUnlock() 119 | if exists { 120 | bucket.forEachFunc(matches) 121 | } 122 | } 123 | 124 | // we expect the caller to have acquired a write lock 125 | func (b *layeredBucket[T]) clear() { 126 | for _, bucket := range b.buckets { 127 | bucket.clear() 128 | } 129 | b.buckets = make(map[string]*bucket[T]) 130 | } 131 | -------------------------------------------------------------------------------- /layeredcache.go: -------------------------------------------------------------------------------- 1 | // An LRU cached aimed at high concurrency 2 | package ccache 3 | 4 | import ( 5 | "hash/fnv" 6 | "sync/atomic" 7 | "time" 8 | ) 9 | 10 | type LayeredCache[T any] struct { 11 | *Configuration[T] 12 | control 13 | list *List[T] 14 | buckets []*layeredBucket[T] 15 | bucketMask uint32 16 | size int64 17 | pruneTargetSize int64 18 | deletables chan *Item[T] 19 | promotables chan *Item[T] 20 | } 21 | 22 | // Create a new layered cache with the specified configuration. 23 | // A layered cache used a two keys to identify a value: a primary key 24 | // and a secondary key. Get, Set and Delete require both a primary and 25 | // secondary key. However, DeleteAll requires only a primary key, deleting 26 | // all values that share the same primary key. 27 | 28 | // Layered Cache is useful as an HTTP cache, where an HTTP purge might 29 | // delete multiple variants of the same resource: 30 | // primary key = "user/44" 31 | // secondary key 1 = ".json" 32 | // secondary key 2 = ".xml" 33 | 34 | // See ccache.Configure() for creating a configuration 35 | func Layered[T any](config *Configuration[T]) *LayeredCache[T] { 36 | c := &LayeredCache[T]{ 37 | list: NewList[T](), 38 | Configuration: config, 39 | control: newControl(), 40 | bucketMask: uint32(config.buckets) - 1, 41 | buckets: make([]*layeredBucket[T], config.buckets), 42 | deletables: make(chan *Item[T], config.deleteBuffer), 43 | promotables: make(chan *Item[T], config.promoteBuffer), 44 | pruneTargetSize: config.maxSize - config.maxSize*int64(config.percentToPrune)/100, 45 | } 46 | for i := 0; i < config.buckets; i++ { 47 | c.buckets[i] = &layeredBucket[T]{ 48 | buckets: make(map[string]*bucket[T]), 49 | } 50 | } 51 | go c.worker() 52 | return c 53 | } 54 | 55 | func (c *LayeredCache[T]) ItemCount() int { 56 | count := 0 57 | for _, b := range c.buckets { 58 | count += b.itemCount() 59 | } 60 | return count 61 | } 62 | 63 | // Get an item from the cache. Returns nil if the item wasn't found. 64 | // This can return an expired item. Use item.Expired() to see if the item 65 | // is expired and item.TTL() to see how long until the item expires (which 66 | // will be negative for an already expired item). 67 | func (c *LayeredCache[T]) Get(primary, secondary string) *Item[T] { 68 | item := c.bucket(primary).get(primary, secondary) 69 | if item == nil { 70 | return nil 71 | } 72 | if item.expires > time.Now().UnixNano() { 73 | select { 74 | case c.promotables <- item: 75 | default: 76 | } 77 | } 78 | return item 79 | } 80 | 81 | // Same as Get but does not promote the value. This essentially circumvents the 82 | // "least recently used" aspect of this cache. To some degree, it's akin to a 83 | // "peak" 84 | func (c *LayeredCache[T]) GetWithoutPromote(primary, secondary string) *Item[T] { 85 | return c.bucket(primary).get(primary, secondary) 86 | } 87 | 88 | func (c *LayeredCache[T]) ForEachFunc(primary string, matches func(key string, item *Item[T]) bool) { 89 | c.bucket(primary).forEachFunc(primary, matches) 90 | } 91 | 92 | // Get the secondary cache for a given primary key. This operation will 93 | // never return nil. In the case where the primary key does not exist, a 94 | // new, underlying, empty bucket will be created and returned. 95 | func (c *LayeredCache[T]) GetOrCreateSecondaryCache(primary string) *SecondaryCache[T] { 96 | primaryBkt := c.bucket(primary) 97 | bkt := primaryBkt.getSecondaryBucket(primary) 98 | primaryBkt.Lock() 99 | if bkt == nil { 100 | bkt = &bucket[T]{lookup: make(map[string]*Item[T])} 101 | primaryBkt.buckets[primary] = bkt 102 | } 103 | primaryBkt.Unlock() 104 | return &SecondaryCache[T]{ 105 | bucket: bkt, 106 | pCache: c, 107 | } 108 | } 109 | 110 | // Used when the cache was created with the Track() configuration option. 111 | // Avoid otherwise 112 | func (c *LayeredCache[T]) TrackingGet(primary, secondary string) TrackedItem[T] { 113 | item := c.Get(primary, secondary) 114 | if item == nil { 115 | return nil 116 | } 117 | item.track() 118 | return item 119 | } 120 | 121 | // Set the value in the cache for the specified duration 122 | func (c *LayeredCache[T]) TrackingSet(primary, secondary string, value T, duration time.Duration) TrackedItem[T] { 123 | return c.set(primary, secondary, value, duration, true) 124 | } 125 | 126 | // Set the value in the cache for the specified duration 127 | func (c *LayeredCache[T]) Set(primary, secondary string, value T, duration time.Duration) { 128 | c.set(primary, secondary, value, duration, false) 129 | } 130 | 131 | // Replace the value if it exists, does not set if it doesn't. 132 | // Returns true if the item existed an was replaced, false otherwise. 133 | // Replace does not reset item's TTL nor does it alter its position in the LRU 134 | func (c *LayeredCache[T]) Replace(primary, secondary string, value T) bool { 135 | item := c.bucket(primary).get(primary, secondary) 136 | if item == nil { 137 | return false 138 | } 139 | c.Set(primary, secondary, value, item.TTL()) 140 | return true 141 | } 142 | 143 | // Attempts to get the value from the cache and calles fetch on a miss. 144 | // If fetch returns an error, no value is cached and the error is returned back 145 | // to the caller. 146 | // Note that Fetch merely calls the public Get and Set functions. If you want 147 | // a different Fetch behavior, such as thundering herd protection or returning 148 | // expired items, implement it in your application. 149 | func (c *LayeredCache[T]) Fetch(primary, secondary string, duration time.Duration, fetch func() (T, error)) (*Item[T], error) { 150 | item := c.Get(primary, secondary) 151 | if item != nil { 152 | return item, nil 153 | } 154 | value, err := fetch() 155 | if err != nil { 156 | return nil, err 157 | } 158 | return c.set(primary, secondary, value, duration, false), nil 159 | } 160 | 161 | // Remove the item from the cache, return true if the item was present, false otherwise. 162 | func (c *LayeredCache[T]) Delete(primary, secondary string) bool { 163 | item := c.bucket(primary).remove(primary, secondary) 164 | if item != nil { 165 | c.deletables <- item 166 | return true 167 | } 168 | return false 169 | } 170 | 171 | // Deletes all items that share the same primary key 172 | func (c *LayeredCache[T]) DeleteAll(primary string) bool { 173 | return c.bucket(primary).deleteAll(primary, c.deletables) 174 | } 175 | 176 | // Deletes all items that share the same primary key and prefix. 177 | func (c *LayeredCache[T]) DeletePrefix(primary, prefix string) int { 178 | return c.bucket(primary).deletePrefix(primary, prefix, c.deletables) 179 | } 180 | 181 | // Deletes all items that share the same primary key and where the matches func evaluates to true. 182 | func (c *LayeredCache[T]) DeleteFunc(primary string, matches func(key string, item *Item[T]) bool) int { 183 | return c.bucket(primary).deleteFunc(primary, matches, c.deletables) 184 | } 185 | 186 | func (c *LayeredCache[T]) set(primary, secondary string, value T, duration time.Duration, track bool) *Item[T] { 187 | item, existing := c.bucket(primary).set(primary, secondary, value, duration, track) 188 | if existing != nil { 189 | c.deletables <- existing 190 | } 191 | c.promote(item) 192 | return item 193 | } 194 | 195 | func (c *LayeredCache[T]) bucket(key string) *layeredBucket[T] { 196 | h := fnv.New32a() 197 | h.Write([]byte(key)) 198 | return c.buckets[h.Sum32()&c.bucketMask] 199 | } 200 | 201 | func (c *LayeredCache[T]) halted(fn func()) { 202 | c.halt() 203 | defer c.unhalt() 204 | fn() 205 | } 206 | 207 | func (c *LayeredCache[T]) halt() { 208 | for _, bucket := range c.buckets { 209 | bucket.Lock() 210 | } 211 | } 212 | 213 | func (c *LayeredCache[T]) unhalt() { 214 | for _, bucket := range c.buckets { 215 | bucket.Unlock() 216 | } 217 | } 218 | 219 | func (c *LayeredCache[T]) promote(item *Item[T]) { 220 | c.promotables <- item 221 | } 222 | 223 | func (c *LayeredCache[T]) worker() { 224 | dropped := 0 225 | cc := c.control 226 | 227 | promoteItem := func(item *Item[T]) { 228 | if c.doPromote(item) && c.size > c.maxSize { 229 | dropped += c.gc() 230 | } 231 | } 232 | 233 | for { 234 | select { 235 | case item := <-c.promotables: 236 | promoteItem(item) 237 | case item := <-c.deletables: 238 | c.doDelete(item) 239 | case control := <-cc: 240 | switch msg := control.(type) { 241 | case controlStop: 242 | goto drain 243 | case controlGetDropped: 244 | msg.res <- dropped 245 | dropped = 0 246 | case controlSetMaxSize: 247 | newMaxSize := msg.size 248 | c.maxSize = newMaxSize 249 | c.pruneTargetSize = newMaxSize - newMaxSize*int64(c.percentToPrune)/100 250 | if c.size > c.maxSize { 251 | dropped += c.gc() 252 | } 253 | msg.done <- struct{}{} 254 | case controlClear: 255 | promotables := c.promotables 256 | for len(promotables) > 0 { 257 | <-promotables 258 | } 259 | deletables := c.deletables 260 | for len(deletables) > 0 { 261 | <-deletables 262 | } 263 | 264 | c.halted(func() { 265 | for _, bucket := range c.buckets { 266 | bucket.clear() 267 | } 268 | c.size = 0 269 | c.list = NewList[T]() 270 | }) 271 | msg.done <- struct{}{} 272 | case controlGetSize: 273 | msg.res <- c.size 274 | case controlGC: 275 | dropped += c.gc() 276 | msg.done <- struct{}{} 277 | case controlSyncUpdates: 278 | doAllPendingPromotesAndDeletes(c.promotables, promoteItem, c.deletables, c.doDelete) 279 | msg.done <- struct{}{} 280 | } 281 | } 282 | } 283 | 284 | drain: 285 | for { 286 | select { 287 | case item := <-c.deletables: 288 | c.doDelete(item) 289 | default: 290 | return 291 | } 292 | } 293 | } 294 | 295 | func (c *LayeredCache[T]) doDelete(item *Item[T]) { 296 | if item.prev == nil && item.next == nil { 297 | item.promotions = -2 298 | } else { 299 | c.size -= item.size 300 | if c.onDelete != nil { 301 | c.onDelete(item) 302 | } 303 | c.list.Remove(item) 304 | item.promotions = -2 305 | } 306 | } 307 | 308 | func (c *LayeredCache[T]) doPromote(item *Item[T]) bool { 309 | // deleted before it ever got promoted 310 | if item.promotions == -2 { 311 | return false 312 | } 313 | 314 | if item.next != nil || item.prev != nil { // not a new item 315 | if item.shouldPromote(c.getsPerPromote) { 316 | c.list.MoveToFront(item) 317 | item.promotions = 0 318 | } 319 | return false 320 | } 321 | 322 | c.size += item.size 323 | c.list.Insert(item) 324 | return true 325 | } 326 | 327 | func (c *LayeredCache[T]) gc() int { 328 | dropped := 0 329 | item := c.list.Tail 330 | 331 | prunedSize := int64(0) 332 | sizeToPrune := c.size - c.pruneTargetSize 333 | 334 | for prunedSize < sizeToPrune { 335 | if item == nil { 336 | return dropped 337 | } 338 | prev := item.prev 339 | if !c.tracking || atomic.LoadInt32(&item.refCount) == 0 { 340 | 341 | c.bucket(item.group).delete(item.group, item.key) 342 | itemSize := item.size 343 | c.size -= itemSize 344 | prunedSize += itemSize 345 | 346 | c.list.Remove(item) 347 | if c.onDelete != nil { 348 | c.onDelete(item) 349 | } 350 | dropped += 1 351 | item.promotions = -2 352 | } 353 | item = prev 354 | } 355 | return dropped 356 | } 357 | -------------------------------------------------------------------------------- /layeredcache_test.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | import ( 4 | "math/rand" 5 | "sort" 6 | "strconv" 7 | "sync/atomic" 8 | "testing" 9 | "time" 10 | 11 | "github.com/karlseguin/ccache/v3/assert" 12 | ) 13 | 14 | func Test_LayedCache_GetsANonExistantValue(t *testing.T) { 15 | cache := newLayered[string]() 16 | defer cache.Stop() 17 | 18 | assert.Equal(t, cache.Get("spice", "flow"), nil) 19 | assert.Equal(t, cache.ItemCount(), 0) 20 | } 21 | 22 | func Test_LayedCache_SetANewValue(t *testing.T) { 23 | cache := newLayered[string]() 24 | defer cache.Stop() 25 | 26 | cache.Set("spice", "flow", "a value", time.Minute) 27 | assert.Equal(t, cache.Get("spice", "flow").Value(), "a value") 28 | assert.Equal(t, cache.Get("spice", "stop"), nil) 29 | assert.Equal(t, cache.ItemCount(), 1) 30 | } 31 | 32 | func Test_LayedCache_SetsMultipleValueWithinTheSameLayer(t *testing.T) { 33 | cache := newLayered[string]() 34 | defer cache.Stop() 35 | 36 | cache.Set("spice", "flow", "value-a", time.Minute) 37 | cache.Set("spice", "must", "value-b", time.Minute) 38 | cache.Set("leto", "sister", "ghanima", time.Minute) 39 | assert.Equal(t, cache.Get("spice", "flow").Value(), "value-a") 40 | assert.Equal(t, cache.Get("spice", "must").Value(), "value-b") 41 | assert.Equal(t, cache.Get("spice", "worm"), nil) 42 | 43 | assert.Equal(t, cache.Get("leto", "sister").Value(), "ghanima") 44 | assert.Equal(t, cache.Get("leto", "brother"), nil) 45 | assert.Equal(t, cache.Get("baron", "friend"), nil) 46 | assert.Equal(t, cache.ItemCount(), 3) 47 | } 48 | 49 | func Test_LayedCache_ReplaceDoesNothingIfKeyDoesNotExist(t *testing.T) { 50 | cache := newLayered[string]() 51 | defer cache.Stop() 52 | 53 | assert.Equal(t, cache.Replace("spice", "flow", "value-a"), false) 54 | assert.Equal(t, cache.Get("spice", "flow"), nil) 55 | } 56 | 57 | func Test_LayedCache_ReplaceUpdatesTheValue(t *testing.T) { 58 | cache := newLayered[string]() 59 | defer cache.Stop() 60 | 61 | cache.Set("spice", "flow", "value-a", time.Minute) 62 | assert.Equal(t, cache.Replace("spice", "flow", "value-b"), true) 63 | assert.Equal(t, cache.Get("spice", "flow").Value(), "value-b") 64 | assert.Equal(t, cache.ItemCount(), 1) 65 | //not sure how to test that the TTL hasn't changed sort of a sleep.. 66 | } 67 | 68 | func Test_LayedCache_DeletesAValue(t *testing.T) { 69 | cache := newLayered[string]() 70 | defer cache.Stop() 71 | 72 | cache.Set("spice", "flow", "value-a", time.Minute) 73 | cache.Set("spice", "must", "value-b", time.Minute) 74 | cache.Set("leto", "sister", "ghanima", time.Minute) 75 | cache.Delete("spice", "flow") 76 | assert.Equal(t, cache.Get("spice", "flow"), nil) 77 | assert.Equal(t, cache.Get("spice", "must").Value(), "value-b") 78 | assert.Equal(t, cache.Get("spice", "worm"), nil) 79 | assert.Equal(t, cache.Get("leto", "sister").Value(), "ghanima") 80 | assert.Equal(t, cache.ItemCount(), 2) 81 | } 82 | 83 | func Test_LayedCache_DeletesAPrefix(t *testing.T) { 84 | cache := newLayered[string]() 85 | defer cache.Stop() 86 | 87 | assert.Equal(t, cache.ItemCount(), 0) 88 | 89 | cache.Set("spice", "aaa", "1", time.Minute) 90 | cache.Set("spice", "aab", "2", time.Minute) 91 | cache.Set("spice", "aac", "3", time.Minute) 92 | cache.Set("leto", "aac", "3", time.Minute) 93 | cache.Set("spice", "ac", "4", time.Minute) 94 | cache.Set("spice", "z5", "7", time.Minute) 95 | assert.Equal(t, cache.ItemCount(), 6) 96 | 97 | assert.Equal(t, cache.DeletePrefix("spice", "9a"), 0) 98 | assert.Equal(t, cache.ItemCount(), 6) 99 | 100 | assert.Equal(t, cache.DeletePrefix("spice", "aa"), 3) 101 | assert.Equal(t, cache.Get("spice", "aaa"), nil) 102 | assert.Equal(t, cache.Get("spice", "aab"), nil) 103 | assert.Equal(t, cache.Get("spice", "aac"), nil) 104 | assert.Equal(t, cache.Get("spice", "ac").Value(), "4") 105 | assert.Equal(t, cache.Get("spice", "z5").Value(), "7") 106 | assert.Equal(t, cache.ItemCount(), 3) 107 | } 108 | 109 | func Test_LayedCache_DeletesAFunc(t *testing.T) { 110 | cache := newLayered[int]() 111 | defer cache.Stop() 112 | 113 | assert.Equal(t, cache.ItemCount(), 0) 114 | 115 | cache.Set("spice", "a", 1, time.Minute) 116 | cache.Set("leto", "b", 2, time.Minute) 117 | cache.Set("spice", "c", 3, time.Minute) 118 | cache.Set("spice", "d", 4, time.Minute) 119 | cache.Set("spice", "e", 5, time.Minute) 120 | cache.Set("spice", "f", 6, time.Minute) 121 | assert.Equal(t, cache.ItemCount(), 6) 122 | 123 | assert.Equal(t, cache.DeleteFunc("spice", func(key string, item *Item[int]) bool { 124 | return false 125 | }), 0) 126 | assert.Equal(t, cache.ItemCount(), 6) 127 | 128 | assert.Equal(t, cache.DeleteFunc("spice", func(key string, item *Item[int]) bool { 129 | return item.Value() < 4 130 | }), 2) 131 | assert.Equal(t, cache.ItemCount(), 4) 132 | 133 | assert.Equal(t, cache.DeleteFunc("spice", func(key string, item *Item[int]) bool { 134 | return key == "d" 135 | }), 1) 136 | assert.Equal(t, cache.ItemCount(), 3) 137 | } 138 | 139 | func Test_LayedCache_OnDeleteCallbackCalled(t *testing.T) { 140 | onDeleteFnCalled := int32(0) 141 | onDeleteFn := func(item *Item[string]) { 142 | if item.group == "spice" && item.key == "flow" { 143 | atomic.AddInt32(&onDeleteFnCalled, 1) 144 | } 145 | } 146 | 147 | cache := Layered[string](Configure[string]().OnDelete(onDeleteFn)) 148 | defer cache.Stop() 149 | 150 | cache.Set("spice", "flow", "value-a", time.Minute) 151 | cache.Set("spice", "must", "value-b", time.Minute) 152 | cache.Set("leto", "sister", "ghanima", time.Minute) 153 | 154 | cache.SyncUpdates() 155 | cache.Delete("spice", "flow") 156 | cache.SyncUpdates() 157 | 158 | assert.Equal(t, cache.Get("spice", "flow"), nil) 159 | assert.Equal(t, cache.Get("spice", "must").Value(), "value-b") 160 | assert.Equal(t, cache.Get("spice", "worm"), nil) 161 | assert.Equal(t, cache.Get("leto", "sister").Value(), "ghanima") 162 | 163 | assert.Equal(t, atomic.LoadInt32(&onDeleteFnCalled), 1) 164 | } 165 | 166 | func Test_LayedCache_DeletesALayer(t *testing.T) { 167 | cache := newLayered[string]() 168 | defer cache.Stop() 169 | 170 | cache.Set("spice", "flow", "value-a", time.Minute) 171 | cache.Set("spice", "must", "value-b", time.Minute) 172 | cache.Set("leto", "sister", "ghanima", time.Minute) 173 | cache.DeleteAll("spice") 174 | assert.Equal(t, cache.Get("spice", "flow"), nil) 175 | assert.Equal(t, cache.Get("spice", "must"), nil) 176 | assert.Equal(t, cache.Get("spice", "worm"), nil) 177 | assert.Equal(t, cache.Get("leto", "sister").Value(), "ghanima") 178 | } 179 | 180 | func Test_LayeredCache_GCsTheOldestItems(t *testing.T) { 181 | cache := Layered(Configure[int]().MaxSize(100).PercentToPrune(10)) 182 | defer cache.Stop() 183 | 184 | cache.Set("xx", "a", 23, time.Minute) 185 | for i := 0; i < 100; i++ { 186 | cache.Set(strconv.Itoa(i), "a", i, time.Minute) 187 | } 188 | cache.Set("xx", "b", 9001, time.Minute) 189 | //let the items get promoted (and added to our list) 190 | cache.SyncUpdates() 191 | cache.GC() 192 | assert.Equal(t, cache.Get("xx", "a"), nil) 193 | assert.Equal(t, cache.Get("xx", "b").Value(), 9001) 194 | assert.Equal(t, cache.Get("9", "a"), nil) 195 | assert.Equal(t, cache.Get("10", "a"), nil) 196 | assert.Equal(t, cache.Get("11", "a").Value(), 11) 197 | } 198 | 199 | func Test_LayeredCache_PromotedItemsDontGetPruned(t *testing.T) { 200 | cache := Layered(Configure[int]().MaxSize(100).PercentToPrune(10).GetsPerPromote(1)) 201 | defer cache.Stop() 202 | 203 | for i := 0; i < 100; i++ { 204 | cache.Set(strconv.Itoa(i), "a", i, time.Minute) 205 | } 206 | cache.SyncUpdates() 207 | cache.Get("9", "a") 208 | cache.SyncUpdates() 209 | cache.GC() 210 | assert.Equal(t, cache.Get("9", "a").Value(), 9) 211 | assert.Equal(t, cache.Get("10", "a"), nil) 212 | assert.Equal(t, cache.Get("11", "a").Value(), 11) 213 | } 214 | 215 | func Test_LayeredCache_GetWithoutPromoteDoesNotPromote(t *testing.T) { 216 | cache := Layered(Configure[int]().MaxSize(100).PercentToPrune(10).GetsPerPromote(1)) 217 | defer cache.Stop() 218 | 219 | for i := 0; i < 100; i++ { 220 | cache.Set(strconv.Itoa(i), "a", i, time.Minute) 221 | } 222 | cache.SyncUpdates() 223 | cache.GetWithoutPromote("9", "a") 224 | cache.SyncUpdates() 225 | cache.GC() 226 | assert.Equal(t, cache.Get("9", "a"), nil) 227 | assert.Equal(t, cache.Get("10", "a").Value(), 10) 228 | assert.Equal(t, cache.Get("11", "a").Value(), 11) 229 | } 230 | 231 | func Test_LayeredCache_TrackerDoesNotCleanupHeldInstance(t *testing.T) { 232 | cache := Layered(Configure[int]().MaxSize(10).PercentToPrune(10).Track()) 233 | defer cache.Stop() 234 | 235 | item0 := cache.TrackingSet("0", "a", 0, time.Minute) 236 | cache.Set("1", "a", 1, time.Minute) 237 | item1 := cache.TrackingGet("1", "a") 238 | 239 | for i := 2; i < 11; i++ { 240 | cache.Set(strconv.Itoa(i), "a", i, time.Minute) 241 | } 242 | 243 | cache.SyncUpdates() 244 | cache.GC() 245 | assert.Equal(t, cache.Get("0", "a").Value(), 0) 246 | assert.Equal(t, cache.Get("1", "a").Value(), 1) 247 | item0.Release() 248 | 249 | item1.Release() 250 | for i := 1; i < 5; i++ { 251 | cache.Set(strconv.Itoa(i+20), "a", i, time.Minute) 252 | } 253 | cache.GC() 254 | assert.Equal(t, cache.Get("0", "a"), nil) 255 | assert.Equal(t, cache.Get("1", "a"), nil) 256 | } 257 | 258 | func Test_LayeredCache_RemovesOldestItemWhenFull(t *testing.T) { 259 | onDeleteFnCalled := false 260 | onDeleteFn := func(item *Item[int]) { 261 | if item.key == "a" { 262 | onDeleteFnCalled = true 263 | } 264 | } 265 | cache := Layered(Configure[int]().MaxSize(5).PercentToPrune(1).OnDelete(onDeleteFn)) 266 | defer cache.Stop() 267 | 268 | cache.Set("xx", "a", 23, time.Minute) 269 | for i := 0; i < 7; i++ { 270 | cache.Set(strconv.Itoa(i), "a", i, time.Minute) 271 | } 272 | cache.Set("xx", "b", 9001, time.Minute) 273 | cache.SyncUpdates() 274 | 275 | assert.Equal(t, cache.Get("xx", "a"), nil) 276 | assert.Equal(t, cache.Get("0", "a"), nil) 277 | assert.Equal(t, cache.Get("1", "a"), nil) 278 | assert.Equal(t, cache.Get("2", "a"), nil) 279 | assert.Equal(t, cache.Get("3", "a").Value(), 3) 280 | assert.Equal(t, cache.Get("xx", "b").Value(), 9001) 281 | assert.Equal(t, cache.GetDropped(), 4) 282 | assert.Equal(t, cache.GetDropped(), 0) 283 | assert.Equal(t, onDeleteFnCalled, true) 284 | } 285 | 286 | func Test_LayeredCache_ResizeOnTheFly(t *testing.T) { 287 | cache := Layered(Configure[int]().MaxSize(9).PercentToPrune(1)) 288 | defer cache.Stop() 289 | 290 | for i := 0; i < 5; i++ { 291 | cache.Set(strconv.Itoa(i), "a", i, time.Minute) 292 | } 293 | cache.SyncUpdates() 294 | 295 | cache.SetMaxSize(3) 296 | cache.SyncUpdates() 297 | assert.Equal(t, cache.GetDropped(), 2) 298 | assert.Equal(t, cache.Get("0", "a"), nil) 299 | assert.Equal(t, cache.Get("1", "a"), nil) 300 | assert.Equal(t, cache.Get("2", "a").Value(), 2) 301 | assert.Equal(t, cache.Get("3", "a").Value(), 3) 302 | assert.Equal(t, cache.Get("4", "a").Value(), 4) 303 | 304 | cache.Set("5", "a", 5, time.Minute) 305 | cache.SyncUpdates() 306 | assert.Equal(t, cache.GetDropped(), 1) 307 | assert.Equal(t, cache.Get("2", "a"), nil) 308 | assert.Equal(t, cache.Get("3", "a").Value(), 3) 309 | assert.Equal(t, cache.Get("4", "a").Value(), 4) 310 | assert.Equal(t, cache.Get("5", "a").Value(), 5) 311 | 312 | cache.SetMaxSize(10) 313 | cache.Set("6", "a", 6, time.Minute) 314 | cache.SyncUpdates() 315 | assert.Equal(t, cache.GetDropped(), 0) 316 | assert.Equal(t, cache.Get("3", "a").Value(), 3) 317 | assert.Equal(t, cache.Get("4", "a").Value(), 4) 318 | assert.Equal(t, cache.Get("5", "a").Value(), 5) 319 | assert.Equal(t, cache.Get("6", "a").Value(), 6) 320 | } 321 | 322 | func Test_LayeredCache_RemovesOldestItemWhenFullBySizer(t *testing.T) { 323 | cache := Layered(Configure[*SizedItem]().MaxSize(50).PercentToPrune(15)) 324 | defer cache.Stop() 325 | 326 | for i := 0; i < 25; i++ { 327 | cache.Set("pri", strconv.Itoa(i), &SizedItem{i, 2}, time.Minute) 328 | } 329 | cache.SyncUpdates() 330 | cache.GC() 331 | assert.Equal(t, cache.Get("pri", "0"), nil) 332 | assert.Equal(t, cache.Get("pri", "1"), nil) 333 | assert.Equal(t, cache.Get("pri", "2"), nil) 334 | assert.Equal(t, cache.Get("pri", "3"), nil) 335 | assert.Equal(t, cache.Get("pri", "4").Value().id, 4) 336 | } 337 | 338 | func Test_LayeredCache_SetUpdatesSizeOnDelta(t *testing.T) { 339 | cache := Layered(Configure[*SizedItem]()) 340 | defer cache.Stop() 341 | 342 | cache.Set("pri", "a", &SizedItem{0, 2}, time.Minute) 343 | cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute) 344 | cache.SyncUpdates() 345 | assert.Equal(t, cache.GetSize(), 5) 346 | cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute) 347 | cache.SyncUpdates() 348 | assert.Equal(t, cache.GetSize(), 5) 349 | cache.Set("pri", "b", &SizedItem{0, 4}, time.Minute) 350 | cache.SyncUpdates() 351 | assert.Equal(t, cache.GetSize(), 6) 352 | cache.Set("pri", "b", &SizedItem{0, 2}, time.Minute) 353 | cache.Set("sec", "b", &SizedItem{0, 3}, time.Minute) 354 | cache.SyncUpdates() 355 | assert.Equal(t, cache.GetSize(), 7) 356 | cache.Delete("pri", "b") 357 | cache.SyncUpdates() 358 | assert.Equal(t, cache.GetSize(), 5) 359 | } 360 | 361 | func Test_LayeredCache_ReplaceDoesNotchangeSizeIfNotSet(t *testing.T) { 362 | cache := Layered(Configure[*SizedItem]()) 363 | defer cache.Stop() 364 | 365 | cache.Set("pri", "1", &SizedItem{1, 2}, time.Minute) 366 | cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute) 367 | cache.Set("pri", "3", &SizedItem{1, 2}, time.Minute) 368 | cache.Replace("sec", "3", &SizedItem{1, 2}) 369 | cache.SyncUpdates() 370 | assert.Equal(t, cache.GetSize(), 6) 371 | } 372 | 373 | func Test_LayeredCache_ReplaceChangesSize(t *testing.T) { 374 | cache := Layered(Configure[*SizedItem]()) 375 | defer cache.Stop() 376 | 377 | cache.Set("pri", "1", &SizedItem{1, 2}, time.Minute) 378 | cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute) 379 | 380 | cache.Replace("pri", "2", &SizedItem{1, 2}) 381 | cache.SyncUpdates() 382 | assert.Equal(t, cache.GetSize(), 4) 383 | 384 | cache.Replace("pri", "2", &SizedItem{1, 1}) 385 | cache.SyncUpdates() 386 | assert.Equal(t, cache.GetSize(), 3) 387 | 388 | cache.Replace("pri", "2", &SizedItem{1, 3}) 389 | cache.SyncUpdates() 390 | assert.Equal(t, cache.GetSize(), 5) 391 | } 392 | 393 | func Test_LayeredCache_EachFunc(t *testing.T) { 394 | cache := Layered(Configure[int]().MaxSize(3).PercentToPrune(1)) 395 | defer cache.Stop() 396 | 397 | assert.List(t, forEachKeysLayered[int](cache, "1"), []string{}) 398 | 399 | cache.Set("1", "a", 1, time.Minute) 400 | assert.List(t, forEachKeysLayered[int](cache, "1"), []string{"a"}) 401 | 402 | cache.Set("1", "b", 2, time.Minute) 403 | cache.SyncUpdates() 404 | assert.List(t, forEachKeysLayered[int](cache, "1"), []string{"a", "b"}) 405 | 406 | cache.Set("1", "c", 3, time.Minute) 407 | cache.SyncUpdates() 408 | assert.List(t, forEachKeysLayered[int](cache, "1"), []string{"a", "b", "c"}) 409 | 410 | cache.Set("1", "d", 4, time.Minute) 411 | cache.SyncUpdates() 412 | assert.List(t, forEachKeysLayered[int](cache, "1"), []string{"b", "c", "d"}) 413 | 414 | // iteration is non-deterministic, all we know for sure is "stop" should not be in there 415 | cache.Set("1", "stop", 5, time.Minute) 416 | cache.SyncUpdates() 417 | assert.DoesNotContain(t, forEachKeysLayered[int](cache, "1"), "stop") 418 | 419 | cache.Set("1", "e", 6, time.Minute) 420 | cache.SyncUpdates() 421 | assert.DoesNotContain(t, forEachKeysLayered[int](cache, "1"), "stop") 422 | } 423 | 424 | func Test_LayeredCachePrune(t *testing.T) { 425 | maxSize := int64(500) 426 | cache := Layered(Configure[string]().MaxSize(maxSize).PercentToPrune(50)) 427 | defer cache.Stop() 428 | 429 | epoch := 0 430 | for i := 0; i < 10000; i++ { 431 | epoch += 1 432 | expired := make([]string, 0) 433 | for i := 0; i < 50; i += 1 { 434 | key := strconv.FormatInt(rand.Int63n(maxSize*20), 10) 435 | item := cache.Get(key, key) 436 | if item == nil || item.TTL() > 1*time.Minute { 437 | expired = append(expired, key) 438 | } 439 | } 440 | for _, key := range expired { 441 | cache.Set(key, key, key, 5*time.Minute) 442 | } 443 | if epoch%500 == 0 { 444 | assert.True(t, cache.GetSize() <= 500) 445 | } 446 | } 447 | } 448 | 449 | func Test_LayeredConcurrentStop(t *testing.T) { 450 | for i := 0; i < 100; i++ { 451 | cache := Layered(Configure[string]()) 452 | r := func() { 453 | for { 454 | key := strconv.Itoa(int(rand.Int31n(100))) 455 | switch rand.Int31n(3) { 456 | case 0: 457 | cache.Get(key, key) 458 | case 1: 459 | cache.Set(key, key, key, time.Minute) 460 | case 2: 461 | cache.Delete(key, key) 462 | } 463 | } 464 | } 465 | go r() 466 | go r() 467 | go r() 468 | time.Sleep(time.Millisecond * 10) 469 | cache.Stop() 470 | } 471 | } 472 | func newLayered[T any]() *LayeredCache[T] { 473 | c := Layered[T](Configure[T]()) 474 | c.Clear() 475 | return c 476 | } 477 | 478 | func forEachKeysLayered[T any](cache *LayeredCache[T], primary string) []string { 479 | keys := make([]string, 0, 10) 480 | cache.ForEachFunc(primary, func(key string, i *Item[T]) bool { 481 | if key == "stop" { 482 | return false 483 | } 484 | keys = append(keys, key) 485 | return true 486 | }) 487 | sort.Strings(keys) 488 | return keys 489 | } 490 | -------------------------------------------------------------------------------- /license.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013 Karl Seguin. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /list.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | type List[T any] struct { 4 | Head *Item[T] 5 | Tail *Item[T] 6 | } 7 | 8 | func NewList[T any]() *List[T] { 9 | return &List[T]{} 10 | } 11 | 12 | func (l *List[T]) Remove(item *Item[T]) { 13 | next := item.next 14 | prev := item.prev 15 | 16 | if next == nil { 17 | l.Tail = prev 18 | } else { 19 | next.prev = prev 20 | } 21 | 22 | if prev == nil { 23 | l.Head = next 24 | } else { 25 | prev.next = next 26 | } 27 | item.next = nil 28 | item.prev = nil 29 | } 30 | 31 | func (l *List[T]) MoveToFront(item *Item[T]) { 32 | l.Remove(item) 33 | l.Insert(item) 34 | } 35 | 36 | func (l *List[T]) Insert(item *Item[T]) { 37 | head := l.Head 38 | l.Head = item 39 | if head == nil { 40 | l.Tail = item 41 | return 42 | } 43 | item.next = head 44 | head.prev = item 45 | } 46 | -------------------------------------------------------------------------------- /list_test.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/karlseguin/ccache/v3/assert" 7 | ) 8 | 9 | func Test_List_Insert(t *testing.T) { 10 | l := NewList[int]() 11 | assertList(t, l) 12 | 13 | l.Insert(newItem("a", 1, 0, false)) 14 | assertList(t, l, 1) 15 | 16 | l.Insert(newItem("b", 2, 0, false)) 17 | assertList(t, l, 2, 1) 18 | 19 | l.Insert(newItem("c", 3, 0, false)) 20 | assertList(t, l, 3, 2, 1) 21 | } 22 | 23 | func Test_List_Remove(t *testing.T) { 24 | l := NewList[int]() 25 | assertList(t, l) 26 | 27 | item := newItem("a", 1, 0, false) 28 | l.Insert(item) 29 | l.Remove(item) 30 | assertList(t, l) 31 | 32 | n5 := newItem("e", 5, 0, false) 33 | l.Insert(n5) 34 | n4 := newItem("d", 4, 0, false) 35 | l.Insert(n4) 36 | n3 := newItem("c", 3, 0, false) 37 | l.Insert(n3) 38 | n2 := newItem("b", 2, 0, false) 39 | l.Insert(n2) 40 | n1 := newItem("a", 1, 0, false) 41 | l.Insert(n1) 42 | 43 | l.Remove(n5) 44 | assertList(t, l, 1, 2, 3, 4) 45 | 46 | l.Remove(n1) 47 | assertList(t, l, 2, 3, 4) 48 | 49 | l.Remove(n3) 50 | assertList(t, l, 2, 4) 51 | 52 | l.Remove(n2) 53 | assertList(t, l, 4) 54 | 55 | l.Remove(n4) 56 | assertList(t, l) 57 | } 58 | 59 | func assertList(t *testing.T, list *List[int], expected ...int) { 60 | t.Helper() 61 | 62 | if len(expected) == 0 { 63 | assert.Nil(t, list.Head) 64 | assert.Nil(t, list.Tail) 65 | return 66 | } 67 | 68 | node := list.Head 69 | for _, expected := range expected { 70 | assert.Equal(t, node.value, expected) 71 | node = node.next 72 | } 73 | 74 | node = list.Tail 75 | for i := len(expected) - 1; i >= 0; i-- { 76 | assert.Equal(t, node.value, expected[i]) 77 | node = node.prev 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # CCache 2 | 3 | CCache is an LRU Cache, written in Go, focused on supporting high concurrency. 4 | 5 | Lock contention on the list is reduced by: 6 | 7 | * Introducing a window which limits the frequency that an item can get promoted 8 | * Using a buffered channel to queue promotions for a single worker 9 | * Garbage collecting within the same thread as the worker 10 | 11 | Unless otherwise stated, all methods are thread-safe. 12 | 13 | The non-generic version of this cache can be imported via `github.com/karlseguin/ccache/`. 14 | 15 | ## Configuration 16 | Import and create a `Cache` instance: 17 | 18 | ```go 19 | import ( 20 | "github.com/karlseguin/ccache/v3" 21 | ) 22 | 23 | // create a cache with string values 24 | var cache = ccache.New(ccache.Configure[string]()) 25 | ``` 26 | 27 | `Configure` exposes a chainable API: 28 | 29 | ```go 30 | // creates a cache with int values 31 | var cache = ccache.New(ccache.Configure[int]().MaxSize(1000).ItemsToPrune(100)) 32 | ``` 33 | 34 | The most likely configuration options to tweak are: 35 | 36 | * `MaxSize(int)` - the maximum number size to store in the cache (default: 5000) 37 | * `GetsPerPromote(int)` - the number of times an item is fetched before we promote it. For large caches with long TTLs, it normally isn't necessary to promote an item after every fetch (default: 3) 38 | * `PercentToPrune(int)` - the percentage, relative to `MaxSize`, to prune when the cache is full (default: 10) 39 | 40 | Configurations that change the internals of the cache, which aren't as likely to need tweaking: 41 | 42 | * `Buckets` - ccache shards its internal map to provide a greater amount of concurrency. Must be a power of 2 (default: 16). 43 | * `PromoteBuffer(int)` - the size of the buffer to use to queue promotions (default: 1024) 44 | * `DeleteBuffer(int)` the size of the buffer to use to queue deletions (default: 1024) 45 | 46 | ## Usage 47 | 48 | Once the cache is setup, you can `Get`, `Set` and `Delete` items from it. A `Get` returns an `*Item`: 49 | 50 | ### Get 51 | ```go 52 | item := cache.Get("user:4") 53 | if item == nil { 54 | //handle 55 | } else { 56 | user := item.Value() 57 | } 58 | ``` 59 | The returned `*Item` exposes a number of methods: 60 | 61 | * `Value() T` - the value cached 62 | * `Expired() bool` - whether the item is expired or not 63 | * `TTL() time.Duration` - the duration before the item expires (will be a negative value for expired items) 64 | * `Expires() time.Time` - the time the item will expire 65 | 66 | By returning expired items, CCache lets you decide if you want to serve stale content or not. For example, you might decide to serve up slightly stale content (< 30 seconds old) while re-fetching newer data in the background. You might also decide to serve up infinitely stale content if you're unable to get new data from your source. 67 | 68 | ### GetWithoutPromote 69 | Same as `Get` but does not "promote" the value, which is to say it circumvents the "lru" aspect of this cache. Should only be used in limited cases, such as peaking at the value. 70 | 71 | ### Set 72 | `Set` expects the key, value and ttl: 73 | 74 | ```go 75 | cache.Set("user:4", user, time.Minute * 10) 76 | ``` 77 | 78 | ### Fetch 79 | There's also a `Fetch` which mixes a `Get` and a `Set`: 80 | 81 | ```go 82 | item, err := cache.Fetch("user:4", time.Minute * 10, func() (*User, error) { 83 | //code to fetch the data incase of a miss 84 | //should return the data to cache and the error, if any 85 | }) 86 | ``` 87 | 88 | `Fetch` doesn't do anything fancy: it merely uses the public `Get` and `Set` functions. If you want more advanced behavior, such as using a singleflight to protect against thundering herd, support a callback that accepts the key, or returning expired items, you should implement that in your application. 89 | 90 | ### Delete 91 | `Delete` expects the key to delete. It's ok to call `Delete` on a non-existent key: 92 | 93 | ```go 94 | cache.Delete("user:4") 95 | ``` 96 | 97 | ### DeletePrefix 98 | `DeletePrefix` deletes all keys matching the provided prefix. Returns the number of keys removed. 99 | 100 | ### DeleteFunc 101 | `DeleteFunc` deletes all items that the provided matches func evaluates to true. Returns the number of keys removed. 102 | 103 | ### ForEachFunc 104 | `ForEachFunc` iterates through all keys and values in the map and passes them to the provided function. Iteration stops if the function returns false. Iteration order is random. 105 | 106 | ### Clear 107 | `Clear` clears the cache. If the cache's gc is running, `Clear` waits for it to finish. 108 | 109 | ### Extend 110 | 111 | The life of an item can be changed via the `Extend` method. This will change the expiry of the item by the specified duration relative to the current time. 112 | 113 | ```go 114 | cache.Extend("user:4", time.Minute * 10) 115 | 116 | // or 117 | item := cache.Get("user:4") 118 | if item != nil { 119 | item.Extend(time.Minute * 10) 120 | } 121 | ``` 122 | 123 | ### Replace 124 | The value of an item can be updated to a new value without renewing the item's TTL or it's position in the LRU: 125 | 126 | ```go 127 | cache.Replace("user:4", user) 128 | ``` 129 | 130 | `Replace` returns true if the item existed (and thus was replaced). In the case where the key was not in the cache, the value *is not* inserted and false is returned. 131 | 132 | ### Setnx 133 | 134 | Set the value if not exists. setnx will first check whether kv exists. If it does not exist, set kv in cache. this operation is atomic. 135 | 136 | ```go 137 | cache.Set("user:4", user, time.Minute * 10) 138 | ``` 139 | 140 | ### GetDropped 141 | You can get the number of keys evicted due to memory pressure by calling `GetDropped`: 142 | 143 | ```go 144 | dropped := cache.GetDropped() 145 | ``` 146 | The counter is reset on every call. If the cache's gc is running, `GetDropped` waits for it to finish; it's meant to be called asynchronously for statistics /monitoring purposes. 147 | 148 | ### Stop 149 | The cache's background worker can be stopped by calling `Stop`. Once `Stop` is called 150 | the cache should not be used (calls are likely to panic). Stop must be called in order to allow the garbage collector to reap the cache. 151 | 152 | ## Tracking 153 | CCache supports a special tracking mode which is meant to be used in conjunction with other pieces of your code that maintains a long-lived reference to data. 154 | 155 | When you configure your cache with `Track()`: 156 | 157 | ```go 158 | cache = ccache.New(ccache.Configure[int]().Track()) 159 | ``` 160 | 161 | The items retrieved via `TrackingGet` will not be eligible for purge until `Release` is called on them: 162 | 163 | ```go 164 | item := cache.TrackingGet("user:4") 165 | user := item.Value() //will be nil if "user:4" didn't exist in the cache 166 | item.Release() //can be called even if item.Value() returned nil 167 | ``` 168 | 169 | In practice, `Release` wouldn't be called until later, at some other place in your code. `TrackingSet` can be used to set a value to be tracked. 170 | 171 | There's a couple reason to use the tracking mode if other parts of your code also hold references to objects. First, if you're already going to hold a reference to these objects, there's really no reason not to have them in the cache - the memory is used up anyways. 172 | 173 | More important, it helps ensure that your code returns consistent data. With tracking, "user:4" might be purged, and a subsequent `Fetch` would reload the data. This can result in different versions of "user:4" being returned by different parts of your system. 174 | 175 | ## LayeredCache 176 | 177 | CCache's `LayeredCache` stores and retrieves values by both a primary and secondary key. Deletion can happen against either the primary and secondary key, or the primary key only (removing all values that share the same primary key). 178 | 179 | `LayeredCache` is useful for HTTP caching, when you want to purge all variations of a request. 180 | 181 | `LayeredCache` takes the same configuration object as the main cache, exposes the same optional tracking capabilities, but exposes a slightly different API: 182 | 183 | ```go 184 | cache := ccache.Layered(ccache.Configure[string]()) 185 | 186 | cache.Set("/users/goku", "type:json", "{value_to_cache}", time.Minute * 5) 187 | cache.Set("/users/goku", "type:xml", "", time.Minute * 5) 188 | 189 | json := cache.Get("/users/goku", "type:json") 190 | xml := cache.Get("/users/goku", "type:xml") 191 | 192 | cache.Delete("/users/goku", "type:json") 193 | cache.Delete("/users/goku", "type:xml") 194 | // OR 195 | cache.DeleteAll("/users/goku") 196 | ``` 197 | 198 | # SecondaryCache 199 | 200 | In some cases, when using a `LayeredCache`, it may be desirable to always be acting on the secondary portion of the cache entry. This could be the case where the primary key is used as a key elsewhere in your code. The `SecondaryCache` is retrieved with: 201 | 202 | ```go 203 | cache := ccache.Layered(ccache.Configure[string]()) 204 | sCache := cache.GetOrCreateSecondaryCache("/users/goku") 205 | sCache.Set("type:json", "{value_to_cache}", time.Minute * 5) 206 | ``` 207 | 208 | The semantics for interacting with the `SecondaryCache` are exactly the same as for a regular `Cache`. However, one difference is that `Get` will not return nil, but will return an empty 'cache' for a non-existent primary key. 209 | 210 | ## Size 211 | By default, items added to a cache have a size of 1. This means that if you configure `MaxSize(10000)`, you'll be able to store 10000 items in the cache. 212 | 213 | However, if the values you set into the cache have a method `Size() int64`, this size will be used. Note that ccache has an overhead of ~350 bytes per entry, which isn't taken into account. In other words, given a filled up cache, with `MaxSize(4096000)` and items that return a `Size() int64` of 2048, we can expect to find 2000 items (4096000/2048) taking a total space of 4796000 bytes. 214 | 215 | ## Want Something Simpler? 216 | For a simpler cache, checkout out [rcache](https://github.com/karlseguin/rcache). 217 | -------------------------------------------------------------------------------- /secondarycache.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | import "time" 4 | 5 | type SecondaryCache[T any] struct { 6 | bucket *bucket[T] 7 | pCache *LayeredCache[T] 8 | } 9 | 10 | // Get the secondary key. 11 | // The semantics are the same as for LayeredCache.Get 12 | func (s *SecondaryCache[T]) Get(secondary string) *Item[T] { 13 | return s.bucket.get(secondary) 14 | } 15 | 16 | // Set the secondary key to a value. 17 | // The semantics are the same as for LayeredCache.Set 18 | func (s *SecondaryCache[T]) Set(secondary string, value T, duration time.Duration) *Item[T] { 19 | item, existing := s.bucket.set(secondary, value, duration, false) 20 | if existing != nil { 21 | s.pCache.deletables <- existing 22 | } 23 | s.pCache.promote(item) 24 | return item 25 | } 26 | 27 | // Fetch or set a secondary key. 28 | // The semantics are the same as for LayeredCache.Fetch 29 | func (s *SecondaryCache[T]) Fetch(secondary string, duration time.Duration, fetch func() (T, error)) (*Item[T], error) { 30 | item := s.Get(secondary) 31 | if item != nil { 32 | return item, nil 33 | } 34 | value, err := fetch() 35 | if err != nil { 36 | return nil, err 37 | } 38 | return s.Set(secondary, value, duration), nil 39 | } 40 | 41 | // Delete a secondary key. 42 | // The semantics are the same as for LayeredCache.Delete 43 | func (s *SecondaryCache[T]) Delete(secondary string) bool { 44 | item := s.bucket.remove(secondary) 45 | if item != nil { 46 | s.pCache.deletables <- item 47 | return true 48 | } 49 | return false 50 | } 51 | 52 | // Replace a secondary key. 53 | // The semantics are the same as for LayeredCache.Replace 54 | func (s *SecondaryCache[T]) Replace(secondary string, value T) bool { 55 | item := s.Get(secondary) 56 | if item == nil { 57 | return false 58 | } 59 | s.Set(secondary, value, item.TTL()) 60 | return true 61 | } 62 | 63 | // Track a secondary key. 64 | // The semantics are the same as for LayeredCache.TrackingGet 65 | func (c *SecondaryCache[T]) TrackingGet(secondary string) TrackedItem[T] { 66 | item := c.Get(secondary) 67 | if item == nil { 68 | return nil 69 | } 70 | item.track() 71 | return item 72 | } 73 | -------------------------------------------------------------------------------- /secondarycache_test.go: -------------------------------------------------------------------------------- 1 | package ccache 2 | 3 | import ( 4 | "strconv" 5 | "testing" 6 | "time" 7 | 8 | "github.com/karlseguin/ccache/v3/assert" 9 | ) 10 | 11 | func Test_SecondaryCache_GetsANonExistantValue(t *testing.T) { 12 | cache := newLayered[string]().GetOrCreateSecondaryCache("foo") 13 | 14 | assert.Equal(t, cache == nil, false) 15 | } 16 | 17 | func Test_SecondaryCache_SetANewValue(t *testing.T) { 18 | cache := newLayered[string]() 19 | 20 | cache.Set("spice", "flow", "a value", time.Minute) 21 | sCache := cache.GetOrCreateSecondaryCache("spice") 22 | assert.Equal(t, sCache.Get("flow").Value(), "a value") 23 | assert.Equal(t, sCache.Get("stop"), nil) 24 | } 25 | 26 | func Test_SecondaryCache_ValueCanBeSeenInBothCaches1(t *testing.T) { 27 | cache := newLayered[string]() 28 | 29 | cache.Set("spice", "flow", "a value", time.Minute) 30 | sCache := cache.GetOrCreateSecondaryCache("spice") 31 | sCache.Set("orinoco", "another value", time.Minute) 32 | assert.Equal(t, sCache.Get("orinoco").Value(), "another value") 33 | assert.Equal(t, cache.Get("spice", "orinoco").Value(), "another value") 34 | } 35 | 36 | func Test_SecondaryCache_ValueCanBeSeenInBothCaches2(t *testing.T) { 37 | cache := newLayered[string]() 38 | 39 | sCache := cache.GetOrCreateSecondaryCache("spice") 40 | sCache.Set("flow", "a value", time.Minute) 41 | assert.Equal(t, sCache.Get("flow").Value(), "a value") 42 | assert.Equal(t, cache.Get("spice", "flow").Value(), "a value") 43 | } 44 | 45 | func Test_SecondaryCache_DeletesAreReflectedInBothCaches(t *testing.T) { 46 | cache := newLayered[string]() 47 | 48 | cache.Set("spice", "flow", "a value", time.Minute) 49 | cache.Set("spice", "sister", "ghanima", time.Minute) 50 | sCache := cache.GetOrCreateSecondaryCache("spice") 51 | 52 | cache.Delete("spice", "flow") 53 | assert.Equal(t, cache.Get("spice", "flow"), nil) 54 | assert.Equal(t, sCache.Get("flow"), nil) 55 | 56 | sCache.Delete("sister") 57 | assert.Equal(t, cache.Get("spice", "sister"), nil) 58 | assert.Equal(t, sCache.Get("sister"), nil) 59 | } 60 | 61 | func Test_SecondaryCache_ReplaceDoesNothingIfKeyDoesNotExist(t *testing.T) { 62 | cache := newLayered[string]() 63 | 64 | sCache := cache.GetOrCreateSecondaryCache("spice") 65 | assert.Equal(t, sCache.Replace("flow", "value-a"), false) 66 | assert.Equal(t, cache.Get("spice", "flow"), nil) 67 | } 68 | 69 | func Test_SecondaryCache_ReplaceUpdatesTheValue(t *testing.T) { 70 | cache := newLayered[string]() 71 | 72 | cache.Set("spice", "flow", "value-a", time.Minute) 73 | sCache := cache.GetOrCreateSecondaryCache("spice") 74 | assert.Equal(t, sCache.Replace("flow", "value-b"), true) 75 | assert.Equal(t, cache.Get("spice", "flow").Value(), "value-b") 76 | } 77 | 78 | func Test_SecondaryCache_FetchReturnsAnExistingValue(t *testing.T) { 79 | cache := newLayered[string]() 80 | 81 | cache.Set("spice", "flow", "value-a", time.Minute) 82 | sCache := cache.GetOrCreateSecondaryCache("spice") 83 | val, _ := sCache.Fetch("flow", time.Minute, func() (string, error) { return "a fetched value", nil }) 84 | assert.Equal(t, val.Value(), "value-a") 85 | } 86 | 87 | func Test_SecondaryCache_FetchReturnsANewValue(t *testing.T) { 88 | cache := newLayered[string]() 89 | 90 | sCache := cache.GetOrCreateSecondaryCache("spice") 91 | val, _ := sCache.Fetch("flow", time.Minute, func() (string, error) { return "a fetched value", nil }) 92 | assert.Equal(t, val.Value(), "a fetched value") 93 | } 94 | 95 | func Test_SecondaryCache_TrackerDoesNotCleanupHeldInstance(t *testing.T) { 96 | cache := Layered(Configure[int]().MaxSize(10).PercentToPrune(10).Track()) 97 | 98 | for i := 0; i < 10; i++ { 99 | cache.Set(strconv.Itoa(i), "a", i, time.Minute) 100 | } 101 | sCache := cache.GetOrCreateSecondaryCache("0") 102 | 103 | item := sCache.TrackingGet("a") 104 | time.Sleep(time.Millisecond * 10) 105 | 106 | cache.GC() 107 | assert.Equal(t, cache.Get("0", "a").Value(), 0) 108 | assert.Equal(t, cache.Get("1", "a"), nil) 109 | 110 | item.Release() 111 | for i := 1; i < 5; i++ { 112 | cache.Set(strconv.Itoa(i+20), "a", i, time.Minute) 113 | } 114 | cache.GC() 115 | assert.Equal(t, cache.Get("0", "a"), nil) 116 | } 117 | --------------------------------------------------------------------------------