├── .codecov.yml ├── .github └── workflows │ └── go.yaml ├── .gitignore ├── .golangci.yaml ├── CHANGELOG.md ├── LICENSE ├── Makefile ├── README.md ├── benchmarks └── results │ ├── ds1.png │ ├── mix.png │ ├── oltp.png │ ├── s3.png │ ├── twitter-c52s10.png │ ├── wikicdn.png │ └── zipf.png ├── builder.go ├── builder_strkey.go ├── builder_strkey_go124.go ├── builder_test.go ├── cache.go ├── cache_correctness_test.go ├── cache_test.go ├── generic_test.go ├── go.mod ├── go.sum ├── internal ├── bf │ ├── bf.go │ └── bf_test.go ├── buffer.go ├── clock │ ├── clock.go │ └── clock_test.go ├── counter.go ├── entry.go ├── hasher │ ├── hasher.go │ ├── hasher_test.go │ ├── maphash_hasher.go │ └── maphash_hasher_test.go ├── list.go ├── list_test.go ├── persistence.go ├── persistence_test.go ├── policy_bench_test.go ├── policy_flag.go ├── policy_flag_test.go ├── rbmutex.go ├── secondary_cache.go ├── singleflight.go ├── singleflight_test.go ├── sketch.go ├── sketch_test.go ├── slru.go ├── stats.go ├── store.go ├── store_test.go ├── timerwheel.go ├── timerwheel_test.go ├── tlfu.go ├── tlfu_test.go ├── utils.go ├── utils_test.go ├── utils_test_go124.go └── xruntime │ ├── rand.go │ ├── rand_1.22.go │ └── xruntime.go ├── loading_cache_test.go ├── otest ├── persistence_test.go ├── run └── main.go ├── secondary_cache_test.go └── stats_test.go /.codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: 4 | default: 5 | target: 85% # the required coverage value 6 | threshold: 5% # the leniency in hitting the target 7 | 8 | ignore: 9 | - "internal/nvm/preallocate" -------------------------------------------------------------------------------- /.github/workflows/go.yaml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: ['*', '*/*'] 6 | tags: ['v*'] 7 | pull_request: 8 | branches: ['*'] 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | lint: 15 | name: lint 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v4 19 | - uses: actions/setup-go@v5 20 | with: 21 | go-version: stable 22 | - name: golangci-lint 23 | uses: golangci/golangci-lint-action@v6 24 | with: 25 | version: v1.64 26 | test: 27 | name: test 28 | strategy: 29 | matrix: 30 | go: ["1.23.x", "1.24.x"] 31 | runs-on: ubuntu-latest 32 | steps: 33 | - name: Setup Go 34 | with: 35 | go-version: ${{ matrix.go }} 36 | uses: actions/setup-go@v2 37 | 38 | - uses: actions/checkout@v2 39 | 40 | - name: Test 41 | run: make cover 42 | 43 | - name: Upload coverage to codecov.io 44 | uses: codecov/codecov-action@v3 45 | 46 | test-correctness-nopool: 47 | name: test-correctness-nopool 48 | strategy: 49 | matrix: 50 | go: ["1.23.x", "1.24.x"] 51 | runs-on: ubuntu-latest 52 | steps: 53 | - name: Setup Go 54 | with: 55 | go-version: ${{ matrix.go }} 56 | uses: actions/setup-go@v2 57 | 58 | - uses: actions/checkout@v2 59 | 60 | - name: Test 61 | run: go test ./... -run=TestCacheCorrectness_NoPool -count=1 -race 62 | 63 | test-correctness-pool: 64 | name: test-correctness-pool 65 | strategy: 66 | matrix: 67 | go: ["1.23.x", "1.24.x"] 68 | runs-on: ubuntu-latest 69 | steps: 70 | - name: Setup Go 71 | with: 72 | go-version: ${{ matrix.go }} 73 | uses: actions/setup-go@v2 74 | 75 | - uses: actions/checkout@v2 76 | 77 | - name: Test 78 | run: go test ./... -run=TestCacheCorrectness_EntryPool -count=1 79 | 80 | test-os: 81 | name: test-os 82 | strategy: 83 | matrix: 84 | go: ["1.23.x", "1.24.x"] 85 | os: [macos-latest, windows-latest, ubuntu-latest] 86 | runs-on: ${{ matrix.os }} 87 | steps: 88 | - name: Setup Go 89 | with: 90 | go-version: ${{ matrix.go }} 91 | uses: actions/setup-go@v2 92 | 93 | - uses: actions/checkout@v2 94 | 95 | - name: Test 96 | run: | 97 | go test ./... -run=TestPersist_OS 98 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.toptal.com/developers/gitignore/api/go,goland 3 | # Edit at https://www.toptal.com/developers/gitignore?templates=go,goland 4 | 5 | ### Go ### 6 | # If you prefer the allow list template instead of the deny list, see community template: 7 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 8 | # 9 | # Binaries for programs and plugins 10 | *.exe 11 | *.exe~ 12 | *.dll 13 | *.so 14 | *.dylib 15 | 16 | # Test binary, built with `go test -c` 17 | *.test 18 | 19 | # Output of the go coverage tool, specifically when used with LiteIDE 20 | *.out 21 | 22 | # Dependency directories (remove the comment below to include it) 23 | # vendor/ 24 | 25 | # Go workspace file 26 | go.work 27 | 28 | ### Go Patch ### 29 | /vendor/ 30 | /Godeps/ 31 | 32 | ### trace ### 33 | /benchmarks/trace/ 34 | 35 | ### cover report ### 36 | cover.html 37 | 38 | ### Goland ### 39 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm, Rider and Goland 40 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 41 | 42 | # User-specific stuff 43 | .idea/**/workspace.xml 44 | .idea/**/tasks.xml 45 | .idea/**/usage.statistics.xml 46 | .idea/**/dictionaries 47 | .idea/**/shelf 48 | 49 | # AWS User-specific 50 | .idea/**/aws.xml 51 | 52 | # Generated files 53 | .idea/**/contentModel.xml 54 | 55 | # Sensitive or high-churn files 56 | .idea/**/dataSources/ 57 | .idea/**/dataSources.ids 58 | .idea/**/dataSources.local.xml 59 | .idea/**/sqlDataSources.xml 60 | .idea/**/dynamic.xml 61 | .idea/**/uiDesigner.xml 62 | .idea/**/dbnavigator.xml 63 | 64 | # Gradle 65 | .idea/**/gradle.xml 66 | .idea/**/libraries 67 | 68 | # Gradle and Maven with auto-import 69 | # When using Gradle or Maven with auto-import, you should exclude module files, 70 | # since they will be recreated, and may cause churn. Uncomment if using 71 | # auto-import. 72 | # .idea/artifacts 73 | # .idea/compiler.xml 74 | # .idea/jarRepositories.xml 75 | # .idea/modules.xml 76 | # .idea/*.iml 77 | # .idea/modules 78 | # *.iml 79 | # *.ipr 80 | 81 | # CMake 82 | cmake-build-*/ 83 | 84 | # Mongo Explorer plugin 85 | .idea/**/mongoSettings.xml 86 | 87 | # File-based project format 88 | *.iws 89 | 90 | # IntelliJ 91 | out/ 92 | 93 | # mpeltonen/sbt-idea plugin 94 | .idea_modules/ 95 | 96 | # JIRA plugin 97 | atlassian-ide-plugin.xml 98 | 99 | # Cursive Clojure plugin 100 | .idea/replstate.xml 101 | 102 | # Crashlytics plugin (for Android Studio and IntelliJ) 103 | com_crashlytics_export_strings.xml 104 | crashlytics.properties 105 | crashlytics-build.properties 106 | fabric.properties 107 | 108 | # Editor-based Rest Client 109 | .idea/httpRequests 110 | 111 | # Ignores the whole .idea folder and all .iml files 112 | .idea/ 113 | 114 | # Android studio 3.1+ serialized cache file 115 | .idea/caches/build_file_checksums.ser 116 | 117 | # End of https://www.toptal.com/developers/gitignore/api/go,goland -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | run: 2 | timeout: 10m 3 | concurrency: 10 4 | allow-parallel-runners: true 5 | linters-settings: 6 | exhaustive: 7 | default-signifies-exhaustive: true 8 | revive: 9 | ignore-generated-header: false 10 | severity: warning 11 | confidence: 3 12 | gocritic: 13 | enabled-checks: 14 | - ruleguard 15 | settings: 16 | ruleguard: 17 | rules: pkg/ruleguard.rules.go 18 | nakedret: 19 | max-func-lines: 60 20 | linters: 21 | disable-all: true 22 | enable: 23 | - asciicheck 24 | - bodyclose 25 | - dogsled 26 | - errcheck 27 | - errorlint 28 | - exhaustive 29 | - goimports 30 | - goprintffuncname 31 | - gosec 32 | - gosimple 33 | - govet 34 | - ineffassign 35 | - misspell 36 | - nakedret 37 | - prealloc 38 | - revive 39 | - staticcheck 40 | - unconvert 41 | - unused 42 | - whitespace 43 | issues: 44 | exclude-rules: 45 | - linters: 46 | - gosec 47 | text: '404' 48 | - linters: 49 | - gosec 50 | text: G115 51 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 0.6.1 (2025-03-02) 2 | 3 | ### API Changes: 4 | - the `StringKey` method is marked as deprecated starting from Go 1.24. See https://github.com/Yiling-J/theine-go/pull/61 for more details. 5 | 6 | ### Enhancements: 7 | - Fix data race after `Close`: https://github.com/Yiling-J/theine-go/pull/60 8 | - Use `maphash.Comparable` as the hash function for Go 1.24 onward https://github.com/Yiling-J/theine-go/pull/61 9 | 10 | ## 0.6.0 (2024-10-29) 11 | 12 | ### API Changes: 13 | - Added a new option, `UseEntryPool`, to the builder, which defaults to false. Enabling this option will reuse evicted entries through a sync pool. The sync pool was used by default before v0.6.0 and could not be turned off; but it only benefits systems optimized for allocation and those with heavy concurrent writes. See the README for more details. 14 | 15 | ### Enhancements: 16 | - Theine now uses a single LRU window as the "W" part of W-TinyLFU, adaptively changing its size based on hit ratio. This approach is consistent with Caffeine and should improve hit ratios across various workloads. 17 | 18 | ## 0.5.0 (2024-10-10) 19 | 20 | ### API Changes: 21 | - The NVM secondary cache has been moved to a separate package: https://github.com/Yiling-J/theine-nvm. 22 | 23 | ### Enhancements: 24 | - Reduced `Set` allocations, making Theine zero allocation (amortized). 25 | - Improved read performance slightly by utilizing a cached `now` value. 26 | - Fixed race conditions in cost (weight) updates that could cause inaccurate policy cost. 27 | - Added benchmarks for different `GOMAXPROC` values in the README. 28 | 29 | ## 0.4.1 (2024-08-22) 30 | 31 | ### Enhancements: 32 | * Use x/sys/cpu cacheline size by @Yiling-J in https://github.com/Yiling-J/theine-go/pull/43 33 | * Add Size method on cache by @nlachfr in https://github.com/Yiling-J/theine-go/pull/41 34 | * Accurate hits/misses counter by @Yiling-J in https://github.com/Yiling-J/theine-go/pull/44 35 | * Add stats API by @Yiling-J in https://github.com/Yiling-J/theine-go/pull/45 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Yiling-J 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: test test-race-pool test-race-nopool testx lint bench cover 2 | 3 | test: 4 | go test -race -skip=TestCacheCorrectness_ ./... 5 | 6 | test-correct-pool: 7 | go test ./... -run=TestCacheCorrectness_EntryPool -count=1 8 | 9 | test-correct-nopool: 10 | go test ./... -run=TestCacheCorrectness_NoPool -count=1 -race 11 | 12 | testx: 13 | go test ./... -v -failfast 14 | 15 | lint: 16 | golangci-lint run 17 | 18 | cover: 19 | go test -timeout 2000s -race -coverprofile=cover.out -coverpkg=./... -skip=TestCacheCorrectness_ ./... 20 | go tool cover -html=cover.out -o cover.html 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Theine 2 | [![Go Reference](https://pkg.go.dev/badge/github.com/Yiling-J/theine-go.svg)](https://pkg.go.dev/github.com/Yiling-J/theine-go) 3 | [![codecov](https://codecov.io/gh/Yiling-J/theine-go/branch/main/graph/badge.svg?token=E1HJLJH07V)](https://codecov.io/gh/Yiling-J/theine-go) 4 | 5 | High performance in-memory & hybrid cache inspired by [Caffeine](https://github.com/ben-manes/caffeine). 6 | 7 | 8 | - Good performance 9 | - Support for Generics 10 | - High hit ratio with adaptive [W-TinyLFU](https://arxiv.org/pdf/1512.00727.pdf) eviction policy 11 | - Expired data are removed automatically using [hierarchical timer wheel](http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf) 12 | - Simple API 13 | 14 | ## Table of Contents 15 | 16 | - [Requirements](#requirements) 17 | - [Installation](#installation) 18 | - [API](#api) 19 | - [Cache Persistence](#cache-persistence) 20 | - [Benchmarks](#benchmarks) 21 | * [throughput](#throughput) 22 | * [hit ratios](#hit-ratios) 23 | - [Secondary Cache(Experimental)](#secondary-cacheexperimental) 24 | - [Support](#support) 25 | 26 | ## Requirements 27 | Go 1.19+ 28 | 29 | ## Installation 30 | ``` 31 | go get github.com/Yiling-J/theine-go 32 | ``` 33 | 34 | ## API 35 | 36 | **Builder API** 37 | 38 | Theine provides two types of client, simple cache and loading cache. Both of them are initialized from a builder. The difference between simple cache and loading cache is: loading cache's Get method will compute the value using loader function when there is a miss, while simple cache client only return false and do nothing. 39 | 40 | Loading cache uses [singleflight](https://pkg.go.dev/golang.org/x/sync/singleflight) to prevent concurrent loading to same key(thundering herd). 41 | 42 | **Entry Pool** 43 | 44 | Theine stores `*Entry` as the value in the hashmap, where each entry contains key, value, and metadata related to the policy. Before v0.6.0, Theine used a `sync pool` to automatically reuse evicted entries. This approach was beneficial for scenarios with heavy concurrent writes. If your system is already optimized for allocation, it should significantly reduce memory allocations. However, if cache writes are rare and GC is triggered often by other parts of your system, the sync pool becomes nearly useless. 45 | 46 | And sync pool had a potential drawback: **race conditions within the policy**. Theine sends events to the policy asynchronously via channels/buffers, so there was a small chance that an event could be applied to the wrong entry if the entry was evicted and then reused by the pool. 47 | 48 | To mitigate this, Theine rechecks the key first when updating the policy, but this behavior might be flagged by the race detector. Starting from v0.6.0, Theine introduced a new option called `UseEntryPool`, which defaults to `false`. If you are dealing with heavy concurrent writes and minimize allocations is crucial, you can enable this option. 49 | 50 | **API Details** 51 | 52 | simple cache: 53 | 54 | ```GO 55 | import "github.com/Yiling-J/theine-go" 56 | 57 | // key type string, value type string, max size 1000 58 | // max size is the only required configuration to build a client 59 | client, err := theine.NewBuilder[string, string](1000).Build() 60 | if err != nil { 61 | panic(err) 62 | } 63 | 64 | // builder also provide several optional configurations 65 | // you can chain them together and call build once 66 | // client, err := theine.NewBuilder[string, string](1000).Cost(...).Doorkeeper(...).Build() 67 | 68 | // or create builder first 69 | builder := theine.NewBuilder[string, string](1000) 70 | 71 | // dynamic cost function based on value 72 | // use 0 in Set will call this function to evaluate cost at runtime 73 | builder.Cost(func(v string) int64 { 74 | return int64(len(v)) 75 | }) 76 | 77 | // enable entryPool (default false) 78 | builder.UseEntryPool(true) 79 | 80 | // doorkeeper (default false) 81 | // doorkeeper will drop Set if they are not in bloomfilter yet 82 | // this can improve write performance, but may lower hit ratio 83 | builder.Doorkeeper(true) 84 | 85 | // removal listener, this function will be called when entry is removed 86 | // RemoveReason could be REMOVED/EVICTED/EXPIRED 87 | // REMOVED: remove by API 88 | // EVICTED: evicted by Window-TinyLFU policy 89 | // EXPIRED: expired by timing wheel 90 | builder.RemovalListener(func(key K, value V, reason theine.RemoveReason) {}) 91 | 92 | ``` 93 | loading cache: 94 | 95 | ```go 96 | import "github.com/Yiling-J/theine-go" 97 | 98 | // loader function: func(ctx context.Context, key K) (theine.Loaded[V], error) 99 | // Loaded struct should include cache value, cost and ttl, which required by Set method 100 | client, err := theine.NewBuilder[string, string](1000).Loading( 101 | func(ctx context.Context, key string) (theine.Loaded[string], error) { 102 | return theine.Loaded[string]{Value: key, Cost: 1, TTL: 0}, nil 103 | }, 104 | ).Build() 105 | if err != nil { 106 | panic(err) 107 | } 108 | 109 | ``` 110 | Other builder options are same as simple cache(cost, doorkeeper, removal listener). 111 | 112 | 113 | **Client API** 114 | 115 | ```Go 116 | // set, key foo, value bar, cost 1 117 | // success will be false if cost > max size 118 | success := client.Set("foo", "bar", 1) 119 | // cost 0 means using dynamic cost function 120 | // success := client.Set("foo", "bar", 0) 121 | 122 | // set with ttl 123 | success = client.SetWithTTL("foo", "bar", 1, 1*time.Second) 124 | 125 | // get(simple cache version) 126 | value, ok := client.Get("foo") 127 | 128 | // get(loading cache version) 129 | value, err := client.Get(ctx, "foo") 130 | 131 | // remove 132 | client.Delete("foo") 133 | 134 | // iterate key/value in cache and apply custom function 135 | // if function returns false, range stops the iteration 136 | client.Range(func(key, value int) bool { 137 | return true 138 | }) 139 | 140 | // returns an estimation of the cache size usage 141 | client.EstimatedSize() 142 | 143 | // get cache stats(in-memory cache only), include hits, misses and hit ratio 144 | client.Stats() 145 | 146 | // close client, set hashmaps in shard to nil and close all goroutines 147 | client.Close() 148 | 149 | ``` 150 | 151 | ## Cache Persistence 152 | Theine supports persisting the cache into `io.Writer` and restoring from `io.Reader`. [Gob](https://pkg.go.dev/encoding/gob) is used to encode/decode data, so **make sure your key/value can be encoded by gob correctly first** before using this feature. 153 | 154 | #### API 155 | ```go 156 | func (c *Cache[K, V]) SaveCache(version uint64, writer io.Writer) error 157 | func (c *Cache[K, V]) LoadCache(version uint64, reader io.Reader) error 158 | ``` 159 | **- Important:** please `LoadCache` immediately after client created, or existing entries' TTL might be affected. 160 | 161 | #### Example: 162 | ```go 163 | // save 164 | f, err := os.Create("test") 165 | err := client.SaveCache(0, f) 166 | f.Close() 167 | 168 | // load 169 | f, err = os.Open("test") 170 | require.Nil(t, err) 171 | newClient, err := theine.NewBuilder[int, int](100).Build() 172 | // load immediately after client created 173 | err = newClient.LoadCache(0, f) 174 | f.Close() 175 | ``` 176 | Version number must be same when saving and loading, or `LoadCache` will return `theine.VersionMismatch` error. You can change the version number when you want to ignore persisted cache. 177 | ```go 178 | err := newClient.LoadCache(1, f) 179 | // VersionMismatch is a global variable 180 | if err == theine.VersionMismatch { 181 | // ignore and skip loading 182 | } else if err != nil { 183 | // panic error 184 | } 185 | ``` 186 | 187 | Theine does not guarantee that caches from previous versions will be compatible after an upgrade, so it is highly recommended not to use the old persistence file when upgrading Theine. 188 | 189 | #### Details 190 | When persisting cache, Theine roughly do: 191 | - Store version number. 192 | - Store clock(used in TTL). 193 | - Store frequency sketch. 194 | - Store entries one by one in protected LRU in most-recently:least-recently order. 195 | - Store entries one by one in probation LRU in most-recently:least-recently order. 196 | - Loop shards and store entries one by one in each shard deque. 197 | 198 | When loading cache, Theine roughly do: 199 | - Load version number, compare to current version number. 200 | - Load clock. 201 | - Load frequency sketch. 202 | - Load protected LRU and insert entries back to new protected LRU and shards/timingwheel, expired entries will be ignored. Because cache capacity may change, this step will stop if max protected LRU size reached. 203 | - Load probation LRU and insert entries back to new probation LRU and shards/timingwheel, expired entries will be ignored, Because cache capacity may change, this step will stop if max probation LRU size reached. 204 | - Load deque entries and insert back to shards, expired entries will be ignored. 205 | 206 | Theine will save checksum when persisting cache and verify checksum first when loading. 207 | 208 | ## Benchmarks 209 | 210 | Source: https://github.com/maypok86/benchmarks 211 | 212 | ### throughput 213 | 214 | 100% read (cpu 8/16/32) 215 | 216 | ``` 217 | goos: linux 218 | goarch: amd64 219 | pkg: github.com/maypok86/benchmarks/throughput 220 | cpu: Intel(R) Xeon(R) Platinum 8163 CPU @ 2.50GHz 221 | 222 | BenchmarkCache/zipf_otter_reads=100%,writes=0%-8 88954334 14.78 ns/op 67648151 ops/s 223 | BenchmarkCache/zipf_theine_reads=100%,writes=0%-8 51908306 21.87 ns/op 45729075 ops/s 224 | BenchmarkCache/zipf_ristretto_reads=100%,writes=0%-8 27217994 42.36 ns/op 23606992 ops/s 225 | 226 | BenchmarkCache/zipf_otter_reads=100%,writes=0%-16 132372591 8.397 ns/op 119086508 ops/s 227 | BenchmarkCache/zipf_theine_reads=100%,writes=0%-16 85420364 13.78 ns/op 72549558 ops/s 228 | BenchmarkCache/zipf_ristretto_reads=100%,writes=0%-16 47790158 25.17 ns/op 39734070 ops/s 229 | 230 | BenchmarkCache/zipf_otter_reads=100%,writes=0%-32 174121321 7.078 ns/op 141273879 ops/s 231 | BenchmarkCache/zipf_theine_reads=100%,writes=0%-32 118185849 10.45 ns/op 95703790 ops/s 232 | BenchmarkCache/zipf_ristretto_reads=100%,writes=0%-32 66458452 18.85 ns/op 53055079 ops/s 233 | 234 | ``` 235 | 236 | 75% read (cpu 8/16/32) 237 | ``` 238 | goos: linux 239 | goarch: amd64 240 | pkg: github.com/maypok86/benchmarks/throughput 241 | cpu: Intel(R) Xeon(R) Platinum 8163 CPU @ 2.50GHz 242 | 243 | BenchmarkCache/zipf_otter_reads=75%,writes=25%-8 49907841 32.67 ns/op 30609572 ops/s 244 | BenchmarkCache/zipf_theine_reads=75%,writes=25%-8 21484245 48.89 ns/op 20453469 ops/s 245 | BenchmarkCache/zipf_ristretto_reads=75%,writes=25%-8 8651056 130.5 ns/op 7664450 ops/s 246 | 247 | BenchmarkCache/zipf_otter_reads=75%,writes=25%-16 50226466 21.85 ns/op 45764160 ops/s 248 | BenchmarkCache/zipf_theine_reads=75%,writes=25%-16 46674459 24.68 ns/op 40523215 ops/s 249 | BenchmarkCache/zipf_ristretto_reads=75%,writes=25%-16 10233784 108.0 ns/op 9262524 ops/s 250 | 251 | BenchmarkCache/zipf_otter_reads=75%,writes=25%-32 89651678 11.96 ns/op 83606257 ops/s 252 | BenchmarkCache/zipf_theine_reads=75%,writes=25%-32 75969892 15.53 ns/op 64394679 ops/s 253 | BenchmarkCache/zipf_ristretto_reads=75%,writes=25%-32 15766912 76.37 ns/op 13093551 ops/s 254 | 255 | ``` 256 | 257 | 258 | 100% write (cpu 8/16/32) 259 | 260 | ``` 261 | goos: linux 262 | goarch: amd64 263 | pkg: github.com/maypok86/benchmarks/throughput 264 | cpu: Intel(R) Xeon(R) Platinum 8163 CPU @ 2.50GHz 265 | 266 | BenchmarkCache/zipf_otter_reads=0%,writes=100%-8 1567917 723.0 ns/op 1383080 ops/s 267 | BenchmarkCache/zipf_theine_reads=0%,writes=100%-8 2194747 542.4 ns/op 1843615 ops/s 268 | BenchmarkCache/zipf_ristretto_reads=0%,writes=100%-8 1839237 642.5 ns/op 1556503 ops/s 269 | 270 | BenchmarkCache/zipf_otter_reads=0%,writes=100%-16 1384345 846.0 ns/op 1181980 ops/s 271 | BenchmarkCache/zipf_theine_reads=0%,writes=100%-16 1915946 528.8 ns/op 1891008 ops/s 272 | BenchmarkCache/zipf_ristretto_reads=0%,writes=100%-16 1765465 697.3 ns/op 1434089 ops/s 273 | 274 | BenchmarkCache/zipf_otter_reads=0%,writes=100%-32 1265883 979.8 ns/op 1020607 ops/s 275 | BenchmarkCache/zipf_theine_reads=0%,writes=100%-32 1953358 526.1 ns/op 1900935 ops/s 276 | BenchmarkCache/zipf_ristretto_reads=0%,writes=100%-32 1618098 696.1 ns/op 1436625 ops/s 277 | ``` 278 | 279 | benchmem 100% write (cpu 32) 280 | ``` 281 | goos: linux 282 | goarch: amd64 283 | pkg: github.com/maypok86/benchmarks/throughput 284 | cpu: Intel(R) Xeon(R) Platinum 8163 CPU @ 2.50GHz 285 | 286 | BenchmarkCache/zipf_otter_reads=0%,writes=100%-32 80 B/op 1 allocs/op 287 | BenchmarkCache/zipf_theine_reads=0%,writes=100%-32 0 B/op 0 allocs/op 288 | BenchmarkCache/zipf_ristretto_reads=0%,writes=100%-32 112 B/op 3 allocs/op 289 | 290 | ``` 291 | 292 | ### hit ratios 293 | 294 | **zipf** 295 | 296 | ![hit ratios](benchmarks/results/zipf.png) 297 | **s3** 298 | 299 | ![hit ratios](benchmarks/results/s3.png) 300 | **ds1** 301 | 302 | ![hit ratios](benchmarks/results/ds1.png) 303 | **oltp** 304 | 305 | ![hit ratios](benchmarks/results/oltp.png) 306 | **wiki CDN** 307 | 308 | ![hit ratios](benchmarks/results/wikicdn.png) 309 | **Twitter Cache** 310 | 311 | ![hit ratios](benchmarks/results/twitter-c52s10.png) 312 | 313 | 314 | ## Secondary Cache(Experimental) 315 | 316 | SecondaryCache is the interface for caching data on a secondary tier, which can be a non-volatile media or alternate forms of caching such as sqlite. The purpose of the secondary cache is to support other ways of caching the object. It can be viewed as an extension of Theine’s current in-memory cache. 317 | 318 | Currently, the SecondaryCache interface has one implementation inspired by CacheLib's Hybrid Cache. 319 | 320 | ```go 321 | type SecondaryCache[K comparable, V any] interface { 322 | Get(key K) (value V, cost int64, expire int64, ok bool, err error) 323 | Set(key K, value V, cost int64, expire int64) error 324 | Delete(key K) error 325 | HandleAsyncError(err error) 326 | } 327 | ``` 328 | 329 | If you plan to use a remote cache or database, such as Redis, as a secondary cache, keep in mind that the in-memory cache remains the primary source of truth. Evicted entries from memory are sent to the secondary cache. This approach differs from most tiered cache systems, where the remote cache is treated as the primary source of truth and is written to first. 330 | 331 | #### Secondary Cache Implementations 332 | NVM: https://github.com/Yiling-J/theine-nvm 333 | 334 | #### Limitations 335 | - Cache Persistence is not currently supported, but it may be added in the future. You can still use the Persistence API in a hybrid-enabled cache, but only the DRAM part of the cache will be saved or loaded. 336 | - The removal listener will only receive REMOVED events, which are generated when an entry is explicitly removed by calling the Delete API. 337 | - No Range/Len API. 338 | 339 | 340 | ## Support 341 | Feel free to open an issue or ask question in discussions. 342 | -------------------------------------------------------------------------------- /benchmarks/results/ds1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yiling-J/theine-go/ecceec36c9963b4853244ac59c71b0da56ae5dc7/benchmarks/results/ds1.png -------------------------------------------------------------------------------- /benchmarks/results/mix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yiling-J/theine-go/ecceec36c9963b4853244ac59c71b0da56ae5dc7/benchmarks/results/mix.png -------------------------------------------------------------------------------- /benchmarks/results/oltp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yiling-J/theine-go/ecceec36c9963b4853244ac59c71b0da56ae5dc7/benchmarks/results/oltp.png -------------------------------------------------------------------------------- /benchmarks/results/s3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yiling-J/theine-go/ecceec36c9963b4853244ac59c71b0da56ae5dc7/benchmarks/results/s3.png -------------------------------------------------------------------------------- /benchmarks/results/twitter-c52s10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yiling-J/theine-go/ecceec36c9963b4853244ac59c71b0da56ae5dc7/benchmarks/results/twitter-c52s10.png -------------------------------------------------------------------------------- /benchmarks/results/wikicdn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yiling-J/theine-go/ecceec36c9963b4853244ac59c71b0da56ae5dc7/benchmarks/results/wikicdn.png -------------------------------------------------------------------------------- /benchmarks/results/zipf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yiling-J/theine-go/ecceec36c9963b4853244ac59c71b0da56ae5dc7/benchmarks/results/zipf.png -------------------------------------------------------------------------------- /builder.go: -------------------------------------------------------------------------------- 1 | package theine 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | 7 | "github.com/Yiling-J/theine-go/internal" 8 | ) 9 | 10 | func validateBuilder[K comparable, V any](options internal.StoreOptions[K, V]) error { 11 | if options.MaxSize <= 0 { 12 | return errors.New("size must be positive") 13 | } 14 | return nil 15 | } 16 | 17 | func validateLoadingBuilder[K comparable, V any](options internal.StoreOptions[K, V]) error { 18 | if err := validateBuilder(options); err != nil { 19 | return err 20 | } 21 | if options.Loader == nil { 22 | return errors.New("loader function required") 23 | } 24 | return nil 25 | } 26 | 27 | func validateHybridBuilder[K comparable, V any](options internal.StoreOptions[K, V]) error { 28 | if err := validateBuilder(options); err != nil { 29 | return err 30 | } 31 | if options.SecondaryCache == nil { 32 | return errors.New("secondary cache required") 33 | } 34 | if options.Workers <= 0 { 35 | return errors.New("workers must be positive") 36 | } 37 | return nil 38 | } 39 | 40 | func validateLoadingHybridBuilder[K comparable, V any](options internal.StoreOptions[K, V]) error { 41 | if err := validateLoadingBuilder(options); err != nil { 42 | return err 43 | } 44 | if err := validateHybridBuilder(options); err != nil { 45 | return err 46 | } 47 | return nil 48 | } 49 | 50 | type Builder[K comparable, V any] struct { 51 | options internal.StoreOptions[K, V] 52 | } 53 | 54 | func NewBuilder[K comparable, V any](maxsize int64) *Builder[K, V] { 55 | b := &Builder[K, V]{} 56 | b.options.MaxSize = maxsize 57 | return b 58 | } 59 | 60 | // Cost adds dynamic cost function to builder. 61 | // There is a default cost function which always return 1. 62 | func (b *Builder[K, V]) Cost(cost func(v V) int64) *Builder[K, V] { 63 | b.options.Cost = cost 64 | return b 65 | } 66 | 67 | // Doorkeeper enables/disables doorkeeper. 68 | // Doorkeeper will drop Set if they are not in bloomfilter yet. 69 | func (b *Builder[K, V]) Doorkeeper(enabled bool) *Builder[K, V] { 70 | b.options.Doorkeeper = enabled 71 | return b 72 | } 73 | 74 | // UseEntryPool enables/disables reusing evicted entries through a sync pool. 75 | // This can significantly reduce memory allocation under heavy concurrent writes, 76 | // but it may lead to occasional race conditions. Theine updates its policy asynchronously, 77 | // so when an Update event is processed, the corresponding entry might have already been reused. 78 | // Theine will compare the key again, but this does not completely eliminate the risk of a race. 79 | func (b *Builder[K, V]) UseEntryPool(enabled bool) *Builder[K, V] { 80 | b.options.EntryPool = enabled 81 | return b 82 | } 83 | 84 | // RemovalListener adds remove callback function to builder. 85 | // This function is called when entry in cache is evicted/expired/deleted. 86 | func (b *Builder[K, V]) RemovalListener(listener func(key K, value V, reason RemoveReason)) *Builder[K, V] { 87 | b.options.Listener = listener 88 | return b 89 | } 90 | 91 | // Build builds a cache client from builder. 92 | func (b *Builder[K, V]) Build() (*Cache[K, V], error) { 93 | if err := validateBuilder(b.options); err != nil { 94 | return nil, err 95 | } 96 | store := internal.NewStore(&b.options) 97 | return &Cache[K, V]{store: store}, nil 98 | } 99 | 100 | // Add loading function and switch to LoadingBuilder. 101 | func (b *Builder[K, V]) Loading( 102 | loader func(ctx context.Context, key K) (Loaded[V], error), 103 | ) *LoadingBuilder[K, V] { 104 | if loader != nil { 105 | b.options.Loader = func(ctx context.Context, key K) (internal.Loaded[V], error) { 106 | v, err := loader(ctx, key) 107 | return internal.Loaded[V]{Value: v.Value, Cost: v.Cost, TTL: v.TTL}, err 108 | } 109 | } 110 | return &LoadingBuilder[K, V]{ 111 | options: b.options, 112 | } 113 | } 114 | 115 | // Add secondary cache and switch to HybridBuilder. 116 | func (b *Builder[K, V]) Hybrid(cache internal.SecondaryCache[K, V]) *HybridBuilder[K, V] { 117 | b.options.SecondaryCache = cache 118 | b.options.Workers = 2 119 | b.options.Probability = 1 120 | return &HybridBuilder[K, V]{ 121 | options: b.options, 122 | } 123 | } 124 | 125 | // BuildWithLoader builds a loading cache client from builder with custom loader function. 126 | func (b *Builder[K, V]) BuildWithLoader(loader func(ctx context.Context, key K) (Loaded[V], error)) (*LoadingCache[K, V], error) { 127 | if b.options.MaxSize <= 0 { 128 | return nil, errors.New("size must be positive") 129 | } 130 | if loader == nil { 131 | return nil, errors.New("loader function required") 132 | } 133 | store := internal.NewStore(&b.options) 134 | loadingStore := internal.NewLoadingStore(store) 135 | loadingStore.Loader(func(ctx context.Context, key K) (internal.Loaded[V], error) { 136 | v, err := loader(ctx, key) 137 | return internal.Loaded[V]{Value: v.Value, Cost: v.Cost, TTL: v.TTL}, err 138 | }) 139 | return &LoadingCache[K, V]{store: loadingStore}, nil 140 | } 141 | 142 | type LoadingBuilder[K comparable, V any] struct { 143 | options internal.StoreOptions[K, V] 144 | } 145 | 146 | // Add secondary cache and switch to HybridLoadingBuilder. 147 | func (b *LoadingBuilder[K, V]) Hybrid(cache internal.SecondaryCache[K, V]) *HybridLoadingBuilder[K, V] { 148 | b.options.SecondaryCache = cache 149 | b.options.Workers = 2 150 | b.options.Probability = 1 151 | return &HybridLoadingBuilder[K, V]{ 152 | options: b.options, 153 | } 154 | } 155 | 156 | // Build builds a cache client from builder. 157 | func (b *LoadingBuilder[K, V]) Build() (*LoadingCache[K, V], error) { 158 | if err := validateLoadingBuilder(b.options); err != nil { 159 | return nil, err 160 | } 161 | store := internal.NewStore(&b.options) 162 | loadingStore := internal.NewLoadingStore(store) 163 | loadingStore.Loader(func(ctx context.Context, key K) (internal.Loaded[V], error) { 164 | v, err := b.options.Loader(ctx, key) 165 | return internal.Loaded[V]{Value: v.Value, Cost: v.Cost, TTL: v.TTL}, err 166 | }) 167 | return &LoadingCache[K, V]{store: loadingStore}, nil 168 | } 169 | 170 | type HybridBuilder[K comparable, V any] struct { 171 | options internal.StoreOptions[K, V] 172 | } 173 | 174 | func (b *HybridBuilder[K, V]) validate() error { 175 | if b.options.SecondaryCache == nil { 176 | return errors.New("secondary cache required") 177 | } 178 | if b.options.Workers <= 0 { 179 | return errors.New("workers must be positive") 180 | } 181 | return nil 182 | } 183 | 184 | // Set secondary cache workers. 185 | // Worker will send evicted entries to secondary cache. 186 | func (b *HybridBuilder[K, V]) Workers(w int) *HybridBuilder[K, V] { 187 | b.options.Workers = w 188 | return b 189 | } 190 | 191 | // Set acceptance probability. The value has to be in the range of [0, 1]. 192 | func (b *HybridBuilder[K, V]) AdmProbability(p float32) *HybridBuilder[K, V] { 193 | b.options.Probability = p 194 | return b 195 | } 196 | 197 | // Add loading function and switch to HybridLoadingBuilder. 198 | func (b *HybridBuilder[K, V]) Loading( 199 | loader func(ctx context.Context, key K) (Loaded[V], error), 200 | ) *HybridLoadingBuilder[K, V] { 201 | if loader != nil { 202 | b.options.Loader = func(ctx context.Context, key K) (internal.Loaded[V], error) { 203 | v, err := loader(ctx, key) 204 | return internal.Loaded[V]{Value: v.Value, Cost: v.Cost, TTL: v.TTL}, err 205 | } 206 | } 207 | return &HybridLoadingBuilder[K, V]{ 208 | options: b.options, 209 | } 210 | } 211 | 212 | // Build builds a cache client from builder. 213 | func (b *HybridBuilder[K, V]) Build() (*HybridCache[K, V], error) { 214 | if err := b.validate(); err != nil { 215 | return nil, err 216 | } 217 | store := internal.NewStore(&b.options) 218 | return &HybridCache[K, V]{store: store}, nil 219 | } 220 | 221 | type HybridLoadingBuilder[K comparable, V any] struct { 222 | options internal.StoreOptions[K, V] 223 | } 224 | 225 | // Build builds a cache client from builder. 226 | func (b *HybridLoadingBuilder[K, V]) Build() (*HybridLoadingCache[K, V], error) { 227 | if err := validateLoadingHybridBuilder(b.options); err != nil { 228 | return nil, err 229 | } 230 | store := internal.NewStore(&b.options) 231 | loadingStore := internal.NewLoadingStore(store) 232 | loadingStore.Loader(func(ctx context.Context, key K) (internal.Loaded[V], error) { 233 | v, err := b.options.Loader(ctx, key) 234 | return internal.Loaded[V]{Value: v.Value, Cost: v.Cost, TTL: v.TTL}, err 235 | }) 236 | return &HybridLoadingCache[K, V]{store: loadingStore}, nil 237 | } 238 | -------------------------------------------------------------------------------- /builder_strkey.go: -------------------------------------------------------------------------------- 1 | //go:build !go1.24 2 | // +build !go1.24 3 | 4 | package theine 5 | 6 | // StringKey add a custom key -> string method, the string will be used in shard hashing. 7 | func (b *Builder[K, V]) StringKey(fn func(k K) string) *Builder[K, V] { 8 | b.options.StringKeyFunc = fn 9 | return b 10 | } 11 | -------------------------------------------------------------------------------- /builder_strkey_go124.go: -------------------------------------------------------------------------------- 1 | //go:build go1.24 2 | // +build go1.24 3 | 4 | package theine 5 | 6 | // Deprecated: StringKey was used prior to Go 1.24 when Comparable was unavailable. 7 | // With the introduction of Comparable, special handling for string keys is no longer necessary. 8 | func (b *Builder[K, V]) StringKey(fn func(k K) string) *Builder[K, V] { 9 | b.options.StringKeyFunc = fn 10 | return b 11 | } 12 | -------------------------------------------------------------------------------- /builder_test.go: -------------------------------------------------------------------------------- 1 | package theine_test 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | "testing" 7 | 8 | "github.com/Yiling-J/theine-go" 9 | "github.com/Yiling-J/theine-go/internal" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestBuilder(t *testing.T) { 14 | // simple cache 15 | _, err := theine.NewBuilder[int, int](-500).Build() 16 | require.Error(t, err) 17 | builder := theine.NewBuilder[int, int](100) 18 | builder = builder.Cost(func(v int) int64 { return 1 }) 19 | builder = builder.Doorkeeper(false).UseEntryPool(true) 20 | builder = builder.RemovalListener(func(key, value int, reason theine.RemoveReason) {}) 21 | 22 | cache, err := builder.Build() 23 | require.Nil(t, err) 24 | require.Equal(t, reflect.TypeOf(&theine.Cache[int, int]{}), reflect.TypeOf(cache)) 25 | 26 | // loading cache 27 | _, err = builder.Loading(nil).Build() 28 | require.Error(t, err) 29 | builderL := builder.Loading(func(ctx context.Context, key int) (theine.Loaded[int], error) { 30 | return theine.Loaded[int]{}, nil 31 | }) 32 | cacheL, err := builderL.Build() 33 | require.Nil(t, err) 34 | require.Equal(t, reflect.TypeOf(&theine.LoadingCache[int, int]{}), reflect.TypeOf(cacheL)) 35 | 36 | // hybrid cache 37 | _, err = builder.Hybrid(nil).Build() 38 | require.Error(t, err) 39 | secondary := internal.NewSimpleMapSecondary[int, int]() 40 | _, err = builder.Hybrid(secondary).Workers(0).Build() 41 | require.Error(t, err) 42 | builderH := builder.Hybrid(secondary).Workers(1).AdmProbability(0.8) 43 | cacheH, err := builderH.Build() 44 | require.Nil(t, err) 45 | require.Equal(t, reflect.TypeOf(&theine.HybridCache[int, int]{}), reflect.TypeOf(cacheH)) 46 | 47 | // loading + hybrid 48 | builderLH := builderL.Hybrid(secondary) 49 | cacheLH, err := builderLH.Build() 50 | require.Nil(t, err) 51 | require.Equal(t, reflect.TypeOf(&theine.HybridLoadingCache[int, int]{}), reflect.TypeOf(cacheLH)) 52 | 53 | // hybrid + loading 54 | builderLH = builderH.Workers(8).Loading( 55 | func(ctx context.Context, key int) (theine.Loaded[int], error) { 56 | return theine.Loaded[int]{}, nil 57 | }) 58 | cacheLH, err = builderLH.Build() 59 | require.Nil(t, err) 60 | require.Equal(t, reflect.TypeOf(&theine.HybridLoadingCache[int, int]{}), reflect.TypeOf(cacheLH)) 61 | } 62 | -------------------------------------------------------------------------------- /cache.go: -------------------------------------------------------------------------------- 1 | package theine 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "time" 7 | 8 | "github.com/Yiling-J/theine-go/internal" 9 | ) 10 | 11 | const ( 12 | ZERO_TTL = 0 * time.Second 13 | ) 14 | 15 | var VersionMismatch = internal.VersionMismatch 16 | 17 | type RemoveReason = internal.RemoveReason 18 | type DataBlock = internal.DataBlock[any] 19 | type Stats = internal.Stats 20 | type Loaded[V any] internal.Loaded[V] 21 | 22 | const ( 23 | REMOVED = internal.REMOVED 24 | EVICTED = internal.EVICTED 25 | EXPIRED = internal.EXPIRED 26 | ) 27 | 28 | type Cache[K comparable, V any] struct { 29 | store *internal.Store[K, V] 30 | } 31 | 32 | // Get gets value by key. 33 | func (c *Cache[K, V]) Get(key K) (V, bool) { 34 | return c.store.Get(key) 35 | } 36 | 37 | // Set inserts or updates entry in cache with given ttl. 38 | // Return false when cost > max size. 39 | func (c *Cache[K, V]) SetWithTTL(key K, value V, cost int64, ttl time.Duration) bool { 40 | return c.store.Set(key, value, cost, ttl) 41 | } 42 | 43 | // Set inserts or updates entry in cache. 44 | // Return false when cost > max size. 45 | func (c *Cache[K, V]) Set(key K, value V, cost int64) bool { 46 | return c.SetWithTTL(key, value, cost, ZERO_TTL) 47 | } 48 | 49 | // Delete deletes key from cache. 50 | func (c *Cache[K, V]) Delete(key K) { 51 | c.store.Delete(key) 52 | } 53 | 54 | // Range calls f sequentially for each key and value present in the cache. 55 | // If f returns false, range stops the iteration. 56 | func (c *Cache[K, V]) Range(f func(key K, value V) bool) { 57 | c.store.Range(f) 58 | } 59 | 60 | // Len returns number of entries in cache. 61 | func (c *Cache[K, V]) Len() int { 62 | return c.store.Len() 63 | } 64 | 65 | // EstimatedSize returns an approximate used size of the cache. 66 | func (c *Cache[K, V]) EstimatedSize() int { 67 | return c.store.EstimatedSize() 68 | } 69 | 70 | // Close closes all goroutines created by cache. 71 | func (c *Cache[K, V]) Close() { 72 | c.store.Close() 73 | } 74 | 75 | // SaveCache save cache data to writer. 76 | func (c *Cache[K, V]) SaveCache(version uint64, writer io.Writer) error { 77 | return c.store.Persist(version, writer) 78 | } 79 | 80 | // LoadCache load cache data from reader. 81 | func (c *Cache[K, V]) LoadCache(version uint64, reader io.Reader) error { 82 | return c.store.Recover(version, reader) 83 | } 84 | 85 | // Get cache stats. 86 | func (c *Cache[K, V]) Stats() Stats { 87 | return c.store.Stats() 88 | } 89 | 90 | // Wait write buffer sync to policy. 91 | func (c *Cache[K, V]) Wait() { 92 | c.store.Wait() 93 | } 94 | 95 | type LoadingCache[K comparable, V any] struct { 96 | store *internal.LoadingStore[K, V] 97 | } 98 | 99 | // Get gets value by key. 100 | func (c *LoadingCache[K, V]) Get(ctx context.Context, key K) (V, error) { 101 | return c.store.Get(ctx, key) 102 | } 103 | 104 | // Set inserts or updates entry in cache with given ttl. 105 | // Return false when cost > max size. 106 | func (c *LoadingCache[K, V]) SetWithTTL(key K, value V, cost int64, ttl time.Duration) bool { 107 | return c.store.Set(key, value, cost, ttl) 108 | } 109 | 110 | // Set inserts or updates entry in cache. 111 | // Return false when cost > max size. 112 | func (c *LoadingCache[K, V]) Set(key K, value V, cost int64) bool { 113 | return c.SetWithTTL(key, value, cost, ZERO_TTL) 114 | } 115 | 116 | // Delete deletes key from cache. 117 | func (c *LoadingCache[K, V]) Delete(key K) { 118 | c.store.Delete(key) 119 | } 120 | 121 | // Range calls f sequentially for each key and value present in the cache. 122 | // If f returns false, range stops the iteration. 123 | func (c *LoadingCache[K, V]) Range(f func(key K, value V) bool) { 124 | c.store.Range(f) 125 | } 126 | 127 | // Len returns number of entries in cache. 128 | func (c *LoadingCache[K, V]) Len() int { 129 | return c.store.Len() 130 | } 131 | 132 | // EstimatedSize returns an approximate used size of the cache. 133 | func (c *LoadingCache[K, V]) EstimatedSize() int { 134 | return c.store.EstimatedSize() 135 | } 136 | 137 | // SaveCache save cache data to writer. 138 | func (c *LoadingCache[K, V]) SaveCache(version uint64, writer io.Writer) error { 139 | return c.store.Persist(version, writer) 140 | } 141 | 142 | // LoadCache load cache data from reader. 143 | func (c *LoadingCache[K, V]) LoadCache(version uint64, reader io.Reader) error { 144 | return c.store.Recover(version, reader) 145 | } 146 | 147 | // Get cache stats. 148 | func (c *LoadingCache[K, V]) Stats() Stats { 149 | return c.store.Stats() 150 | } 151 | 152 | // Wait write buffer sync to policy. 153 | func (c *LoadingCache[K, V]) Wait() { 154 | c.store.Wait() 155 | } 156 | 157 | // Close closes all goroutines created by cache. 158 | func (c *LoadingCache[K, V]) Close() { 159 | c.store.Close() 160 | } 161 | 162 | type Serializer[T any] interface { 163 | internal.Serializer[T] 164 | } 165 | 166 | type HybridCache[K comparable, V any] struct { 167 | store *internal.Store[K, V] 168 | } 169 | 170 | // Get gets value by key. 171 | func (c *HybridCache[K, V]) Get(key K) (V, bool, error) { 172 | return c.store.GetWithSecodary(key) 173 | } 174 | 175 | // Set inserts or updates entry in cache with given ttl. 176 | // Return false when cost > max size. 177 | func (c *HybridCache[K, V]) SetWithTTL(key K, value V, cost int64, ttl time.Duration) bool { 178 | return c.store.Set(key, value, cost, ttl) 179 | } 180 | 181 | // Set inserts or updates entry in cache. 182 | // Return false when cost > max size. 183 | func (c *HybridCache[K, V]) Set(key K, value V, cost int64) bool { 184 | return c.SetWithTTL(key, value, cost, ZERO_TTL) 185 | } 186 | 187 | // Delete deletes key from cache. 188 | func (c *HybridCache[K, V]) Delete(key K) error { 189 | return c.store.DeleteWithSecondary(key) 190 | } 191 | 192 | // SaveCache save cache data to writer. 193 | func (c *HybridCache[K, V]) SaveCache(version uint64, writer io.Writer) error { 194 | return c.store.Persist(version, writer) 195 | } 196 | 197 | // LoadCache load cache data from reader. 198 | func (c *HybridCache[K, V]) LoadCache(version uint64, reader io.Reader) error { 199 | return c.store.Recover(version, reader) 200 | } 201 | 202 | // Close closes all goroutines created by cache. 203 | func (c *HybridCache[K, V]) Close() { 204 | } 205 | 206 | type HybridLoadingCache[K comparable, V any] struct { 207 | store *internal.LoadingStore[K, V] 208 | } 209 | 210 | // Get gets value by key. 211 | func (c *HybridLoadingCache[K, V]) Get(ctx context.Context, key K) (V, error) { 212 | return c.store.Get(ctx, key) 213 | } 214 | 215 | // Set inserts or updates entry in cache with given ttl. 216 | // Return false when cost > max size. 217 | func (c *HybridLoadingCache[K, V]) SetWithTTL(key K, value V, cost int64, ttl time.Duration) bool { 218 | return c.store.Set(key, value, cost, ttl) 219 | } 220 | 221 | // Set inserts or updates entry in cache. 222 | // Return false when cost > max size. 223 | func (c *HybridLoadingCache[K, V]) Set(key K, value V, cost int64) bool { 224 | return c.SetWithTTL(key, value, cost, ZERO_TTL) 225 | } 226 | 227 | // Delete deletes key from cache. 228 | func (c *HybridLoadingCache[K, V]) Delete(key K) error { 229 | return c.store.DeleteWithSecondary(key) 230 | } 231 | 232 | // SaveCache save cache data to writer. 233 | func (c *HybridLoadingCache[K, V]) SaveCache(version uint64, writer io.Writer) error { 234 | return c.store.Persist(version, writer) 235 | } 236 | 237 | // LoadCache load cache data from reader. 238 | func (c *HybridLoadingCache[K, V]) LoadCache(version uint64, reader io.Reader) error { 239 | return c.store.Recover(version, reader) 240 | } 241 | 242 | // Close closes all goroutines created by cache. 243 | func (c *HybridLoadingCache[K, V]) Close() { 244 | c.store.Close() 245 | } 246 | -------------------------------------------------------------------------------- /cache_correctness_test.go: -------------------------------------------------------------------------------- 1 | package theine 2 | 3 | import ( 4 | "math/rand" 5 | "sync" 6 | "testing" 7 | "time" 8 | 9 | "github.com/Yiling-J/theine-go/internal" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func keyGen() []uint64 { 14 | keys := []uint64{} 15 | r := rand.New(rand.NewSource(0)) 16 | z := rand.NewZipf(r, 1.01, 9.0, 20000000) 17 | for i := 0; i < 2<<16; i++ { 18 | keys = append(keys, z.Uint64()) 19 | } 20 | return keys 21 | } 22 | 23 | func getSet(t *testing.T, entrypool bool) { 24 | for _, size := range []int{500, 2000, 10000, 50000} { 25 | builder := NewBuilder[uint64, uint64](int64(size)) 26 | if entrypool { 27 | builder.UseEntryPool(true) 28 | } 29 | 30 | builder.RemovalListener(func(key, value uint64, reason RemoveReason) {}) 31 | client, err := builder.Build() 32 | require.Nil(t, err) 33 | var wg sync.WaitGroup 34 | keys := keyGen() 35 | 36 | for i := 1; i <= 20; i++ { 37 | wg.Add(1) 38 | go func() { 39 | defer wg.Done() 40 | rd := rand.Intn(2 << 16) 41 | for i := 0; i < 500000; i++ { 42 | keyGet := keys[(i+rd)&(2<<16-1)] 43 | keyUpdate := keys[(i+3*rd)&(2<<16-1)] 44 | 45 | v, ok := client.Get(keyGet) 46 | if ok && v != keyGet { 47 | panic(keyGet) 48 | } 49 | if !ok { 50 | client.SetWithTTL(keyGet, keyGet, 1, 0) 51 | } 52 | 53 | client.SetWithTTL(keyUpdate, keyUpdate, int64(i%10+1), 0) 54 | } 55 | }() 56 | } 57 | wg.Wait() 58 | client.store.Wait() 59 | 60 | require.True( 61 | t, client.Len() < size+internal.WriteBufferSize, 62 | ) 63 | 64 | di := client.store.DebugInfo() 65 | 66 | require.Equal(t, client.Len(), int(di.TotalCount())) 67 | require.True(t, di.TotalWeight() <= int64(size+size/10)) 68 | require.True(t, di.TotalWeight() >= int64(size-15)) 69 | require.Equal(t, di.TotalWeight(), di.WeightedSize) 70 | require.Equal(t, di.WindowWeight, di.WindowWeightField) 71 | require.Equal(t, di.ProbationWeight, di.ProbationWeightField) 72 | require.Equal(t, di.ProtectedWeight, di.ProtectedWeightField) 73 | 74 | client.store.RangeEntry(func(entry *internal.Entry[uint64, uint64]) { 75 | require.Equal(t, entry.Weight(), entry.PolicyWeight(), entry.Position()) 76 | }) 77 | 78 | client.Close() 79 | } 80 | } 81 | 82 | func TestCacheCorrectness_EntryPool_GetSet(t *testing.T) { 83 | getSet(t, true) 84 | } 85 | func TestCacheCorrectness_NoPool_GetSet(t *testing.T) { 86 | getSet(t, false) 87 | } 88 | 89 | func getSetDeleteExpire(t *testing.T, entrypool bool) { 90 | for _, size := range []int{500, 2000, 10000, 50000} { 91 | builder := NewBuilder[uint64, uint64](int64(size)) 92 | if entrypool { 93 | builder.UseEntryPool(true) 94 | } 95 | builder.RemovalListener(func(key, value uint64, reason RemoveReason) {}) 96 | client, err := builder.Build() 97 | require.Nil(t, err) 98 | var wg sync.WaitGroup 99 | keys := keyGen() 100 | 101 | for i := 1; i <= 20; i++ { 102 | wg.Add(1) 103 | go func() { 104 | defer wg.Done() 105 | rd := rand.Intn(2 << 16) 106 | for i := 0; i < 100000; i++ { 107 | key := keys[(i+rd)&(2<<16-1)] 108 | v, ok := client.Get(key) 109 | if ok && v != key { 110 | panic(key) 111 | } 112 | if !ok { 113 | client.SetWithTTL(key, key, int64(i%10+1), time.Second*time.Duration(i%25+5)) 114 | } 115 | if i%5 == 0 { 116 | client.Delete(key) 117 | } 118 | if i%5000 == 0 { 119 | client.Range(func(key, value uint64) bool { 120 | return true 121 | }) 122 | } 123 | } 124 | }() 125 | } 126 | wg.Wait() 127 | 128 | client.store.Wait() 129 | 130 | require.True( 131 | t, client.Len() < size+internal.WriteBufferSize, 132 | ) 133 | 134 | di := client.store.DebugInfo() 135 | 136 | require.Equal(t, client.Len(), int(di.TotalCount())) 137 | require.True(t, di.TotalWeight() <= int64(size+size/10)) 138 | require.Equal(t, di.TotalWeight(), di.WeightedSize) 139 | require.Equal(t, di.WindowWeight, di.WindowWeightField) 140 | require.Equal(t, di.ProbationWeight, di.ProbationWeightField) 141 | require.Equal(t, di.ProtectedWeight, di.ProtectedWeightField) 142 | 143 | client.store.RangeEntry(func(entry *internal.Entry[uint64, uint64]) { 144 | require.Equal(t, entry.Weight(), entry.PolicyWeight(), entry.Position()) 145 | }) 146 | 147 | client.Close() 148 | } 149 | } 150 | 151 | func TestCacheCorrectness_EntryPool_GetSetDeleteExpire(t *testing.T) { 152 | getSetDeleteExpire(t, true) 153 | } 154 | 155 | func TestCacheCorrectness_NoPool_GetSetDeleteExpire(t *testing.T) { 156 | getSetDeleteExpire(t, false) 157 | } 158 | -------------------------------------------------------------------------------- /cache_test.go: -------------------------------------------------------------------------------- 1 | package theine_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math/rand" 7 | "strconv" 8 | "strings" 9 | "sync" 10 | "sync/atomic" 11 | "testing" 12 | "time" 13 | 14 | "github.com/Yiling-J/theine-go" 15 | "github.com/stretchr/testify/require" 16 | "golang.org/x/sync/errgroup" 17 | ) 18 | 19 | func TestCache_MaxsizeZero(t *testing.T) { 20 | _, err := theine.NewBuilder[string, string](0).Build() 21 | require.NotNil(t, err) 22 | } 23 | 24 | func TestCache_Set(t *testing.T) { 25 | client, err := theine.NewBuilder[string, string](1000).Build() 26 | require.Nil(t, err) 27 | defer client.Close() 28 | for i := 0; i < 20000; i++ { 29 | key := fmt.Sprintf("key:%d", rand.Intn(100000)) 30 | client.Set(key, key, 1) 31 | } 32 | client.Wait() 33 | require.Equal(t, 1000, client.Len()) 34 | } 35 | 36 | func TestCache_Update(t *testing.T) { 37 | client, err := theine.NewBuilder[string, string](1000).Build() 38 | require.Nil(t, err) 39 | defer client.Close() 40 | key := "foo" 41 | for _, v := range []string{"a", "b", "c", "d", "e", "e"} { 42 | client.Set(key, v, 1) 43 | vn, ok := client.Get(key) 44 | require.True(t, ok) 45 | require.Equal(t, v, vn) 46 | } 47 | } 48 | 49 | func TestCache_SetParallel(t *testing.T) { 50 | client, err := theine.NewBuilder[string, string](1000).Build() 51 | require.Nil(t, err) 52 | defer client.Close() 53 | var wg sync.WaitGroup 54 | for i := 1; i <= 12; i++ { 55 | wg.Add(1) 56 | go func() { 57 | defer wg.Done() 58 | for i := 0; i < 10000; i++ { 59 | key := fmt.Sprintf("key:%d", rand.Intn(100000)) 60 | client.Set(key, key, 1) 61 | } 62 | }() 63 | } 64 | wg.Wait() 65 | client.Wait() 66 | require.Equal(t, 1000, client.Len()) 67 | } 68 | 69 | func TestCache_GetSetGetDeleteGet(t *testing.T) { 70 | for _, entryPool := range []bool{false, true} { 71 | t.Run(fmt.Sprintf("entrypool enable %v", entryPool), func(t *testing.T) { 72 | client, err := theine.NewBuilder[string, string](50000).Build() 73 | require.Nil(t, err) 74 | defer client.Close() 75 | for i := 0; i < 20000; i++ { 76 | key := fmt.Sprintf("key:%d", rand.Intn(3000)) 77 | _, ok := client.Get(key) 78 | require.False(t, ok) 79 | client.Set(key, key, 1) 80 | v, ok := client.Get(key) 81 | require.True(t, ok) 82 | require.Equal(t, key, v) 83 | client.Delete(key) 84 | _, ok = client.Get(key) 85 | require.False(t, ok) 86 | } 87 | }) 88 | } 89 | } 90 | 91 | func TestCache_Delete(t *testing.T) { 92 | client, err := theine.NewBuilder[string, string](100).Build() 93 | require.Nil(t, err) 94 | defer client.Close() 95 | client.Set("foo", "foo", 1) 96 | v, ok := client.Get("foo") 97 | require.True(t, ok) 98 | require.Equal(t, "foo", v) 99 | client.Delete("foo") 100 | _, ok = client.Get("foo") 101 | require.False(t, ok) 102 | 103 | client.SetWithTTL("foo", "foo", 1, 10*time.Second) 104 | v, ok = client.Get("foo") 105 | require.True(t, ok) 106 | require.Equal(t, "foo", v) 107 | client.Delete("foo") 108 | _, ok = client.Get("foo") 109 | require.False(t, ok) 110 | } 111 | 112 | func TestCache_GetSetParallel(t *testing.T) { 113 | client, err := theine.NewBuilder[string, string](1000).Build() 114 | require.Nil(t, err) 115 | defer client.Close() 116 | var wg sync.WaitGroup 117 | for i := 1; i <= 12; i++ { 118 | wg.Add(1) 119 | go func() { 120 | defer wg.Done() 121 | for i := 0; i < 10000; i++ { 122 | key := fmt.Sprintf("key:%d", rand.Intn(3000)) 123 | v, ok := client.Get(key) 124 | if !ok { 125 | client.Set(key, key, 1) 126 | } else { 127 | require.Equal(t, key, v) 128 | } 129 | } 130 | }() 131 | } 132 | wg.Wait() 133 | client.Wait() 134 | require.Equal(t, 1000, client.Len()) 135 | } 136 | 137 | func TestCache_SetWithTTL(t *testing.T) { 138 | client, err := theine.NewBuilder[string, string](500).Build() 139 | require.Nil(t, err) 140 | defer client.Close() 141 | client.SetWithTTL("foo", "foo", 1, 3600*time.Second) 142 | require.Equal(t, 1, client.Len()) 143 | time.Sleep(1 * time.Second) 144 | client.SetWithTTL("foo", "foo", 1, 1*time.Second) 145 | require.Equal(t, 1, client.Len()) 146 | time.Sleep(3 * time.Second) 147 | _, ok := client.Get("foo") 148 | require.False(t, ok) 149 | } 150 | 151 | func TestCache_SetWithTTLAutoExpire(t *testing.T) { 152 | client, err := theine.NewBuilder[string, string](500).Build() 153 | require.Nil(t, err) 154 | defer client.Close() 155 | for i := 0; i < 500; i++ { 156 | key1 := fmt.Sprintf("key:%d", i) 157 | client.SetWithTTL(key1, key1, 1, time.Second) 158 | key2 := fmt.Sprintf("key:%d:2", i) 159 | client.SetWithTTL(key2, key2, 1, 100*time.Second) 160 | } 161 | time.Sleep(3 * time.Second) 162 | require.True(t, client.Len() < 500) 163 | for i := 0; i < 500; i++ { 164 | key := fmt.Sprintf("key:%d", i) 165 | _, ok := client.Get(key) 166 | require.False(t, ok) 167 | } 168 | } 169 | 170 | func TestCache_Cost(t *testing.T) { 171 | client, err := theine.NewBuilder[string, string](500).Build() 172 | require.Nil(t, err) 173 | defer client.Close() 174 | 175 | success := client.Set("z", "z", 501) 176 | require.False(t, success) 177 | for i := 0; i < 30; i++ { 178 | key := fmt.Sprintf("key:%d", i) 179 | success = client.Set(key, key, 20) 180 | require.True(t, success) 181 | } 182 | client.Wait() 183 | require.True(t, client.Len() <= 25 && client.Len() >= 24) 184 | require.True(t, client.EstimatedSize() <= 500 && client.EstimatedSize() >= 480) 185 | 186 | // test cost func 187 | builder := theine.NewBuilder[string, string](500) 188 | builder.Cost(func(v string) int64 { 189 | return int64(len(v)) 190 | }) 191 | client2, err := builder.Build() 192 | require.Nil(t, err) 193 | defer client2.Close() 194 | success = client2.Set("z", strings.Repeat("z", 501), 0) 195 | require.False(t, success) 196 | for i := 0; i < 30; i++ { 197 | key := fmt.Sprintf("key:%d", i) 198 | success = client2.Set(key, strings.Repeat("z", 20), 0) 199 | require.True(t, success) 200 | } 201 | client2.Wait() 202 | require.True(t, client2.Len() <= 25 && client2.Len() >= 24) 203 | require.True(t, client2.EstimatedSize() <= 500 && client2.EstimatedSize() >= 480) 204 | } 205 | 206 | func TestCache_CostUpdate(t *testing.T) { 207 | client, err := theine.NewBuilder[string, string](500).Build() 208 | require.Nil(t, err) 209 | defer client.Close() 210 | for i := 0; i < 30; i++ { 211 | key := fmt.Sprintf("key:%d", i) 212 | success := client.Set(key, key, 20) 213 | require.True(t, success) 214 | } 215 | client.Wait() 216 | require.True(t, client.Len() <= 25 && client.Len() >= 24) 217 | require.True(t, client.EstimatedSize() <= 500 && client.EstimatedSize() >= 480) 218 | 219 | // update cost 220 | success := client.Set("key:15", "", 200) 221 | require.True(t, success) 222 | client.Wait() 223 | 224 | require.True( 225 | t, client.Len() <= 16 && client.Len() >= 15, 226 | fmt.Sprintf("length too large %d", client.Len()), 227 | ) 228 | require.True(t, client.EstimatedSize() <= 500 && client.EstimatedSize() >= 480) 229 | } 230 | 231 | func TestCache_EstimatedSize(t *testing.T) { 232 | client, err := theine.NewBuilder[int, int](500).Build() 233 | require.Nil(t, err) 234 | defer client.Close() 235 | ctx, cfn := context.WithCancel(context.Background()) 236 | defer cfn() 237 | wg, ctx := errgroup.WithContext(ctx) 238 | wg.Go(func() error { 239 | tkr := time.NewTicker(time.Nanosecond) 240 | defer tkr.Stop() 241 | for { 242 | select { 243 | case <-tkr.C: 244 | client.EstimatedSize() 245 | case <-ctx.Done(): 246 | return nil 247 | } 248 | } 249 | }) 250 | wg.Go(func() error { 251 | defer cfn() 252 | for i := 0; i < 1200; i++ { 253 | if i%2 == 0 { 254 | client.Set(i, 1, 1) 255 | } else { 256 | client.Get(i) 257 | } 258 | } 259 | return nil 260 | }) 261 | require.Nil(t, wg.Wait()) 262 | client.Wait() 263 | require.Equal(t, 500, client.EstimatedSize()) 264 | } 265 | 266 | func TestCache_Doorkeeper(t *testing.T) { 267 | builder := theine.NewBuilder[string, string](500) 268 | builder.Doorkeeper(true) 269 | client, err := builder.Build() 270 | require.Nil(t, err) 271 | defer client.Close() 272 | for i := 0; i < 200; i++ { 273 | key := fmt.Sprintf("key:%d", i) 274 | success := client.Set(key, key, 1) 275 | require.False(t, success) 276 | } 277 | require.True(t, client.Len() == 0) 278 | client.Wait() 279 | for i := 0; i < 200; i++ { 280 | key := fmt.Sprintf("key:%d", i) 281 | success := client.Set(key, key, 1) 282 | require.True(t, success) 283 | } 284 | require.True(t, client.Len() > 0) 285 | for i := 0; i < 500000; i++ { 286 | key := fmt.Sprintf("key:%d:2", i) 287 | client.Set(key, key, 1) 288 | } 289 | } 290 | 291 | func TestCache_RemovalListener(t *testing.T) { 292 | builder := theine.NewBuilder[int, int](100) 293 | var lock sync.Mutex 294 | removed := map[int]int{} 295 | evicted := map[int]int{} 296 | expired := map[int]int{} 297 | builder.RemovalListener(func(key, value int, reason theine.RemoveReason) { 298 | lock.Lock() 299 | defer lock.Unlock() 300 | switch reason { 301 | case theine.REMOVED: 302 | removed[key] = value 303 | case theine.EVICTED: 304 | evicted[key] = value 305 | case theine.EXPIRED: 306 | expired[key] = value 307 | } 308 | }) 309 | client, err := builder.Build() 310 | require.Nil(t, err) 311 | defer client.Close() 312 | for i := 0; i < 100; i++ { 313 | success := client.Set(i, i, 1) 314 | require.True(t, success) 315 | } 316 | // this will evict one entry: 0 317 | success := client.Set(100, 100, 1) 318 | require.True(t, success) 319 | client.Wait() 320 | lock.Lock() 321 | require.Equal(t, 1, len(evicted)) 322 | require.True(t, evicted[0] == 0) 323 | lock.Unlock() 324 | // manually remove one 325 | client.Delete(5) 326 | client.Wait() 327 | lock.Lock() 328 | require.Equal(t, 1, len(removed)) 329 | require.True(t, removed[5] == 5) 330 | lock.Unlock() 331 | // expire one 332 | for i := 0; i < 100; i++ { 333 | success := client.SetWithTTL(i+100, i+100, 1, 1*time.Second) 334 | require.True(t, success) 335 | } 336 | client.Wait() 337 | time.Sleep(5 * time.Second) 338 | lock.Lock() 339 | require.True(t, len(expired) > 0) 340 | lock.Unlock() 341 | } 342 | 343 | func TestCache_Range(t *testing.T) { 344 | for _, cap := range []int{100, 200000} { 345 | client, err := theine.NewBuilder[int, int](int64(cap)).Build() 346 | require.Nil(t, err) 347 | defer client.Close() 348 | for i := 0; i < 100; i++ { 349 | success := client.Set(i, i, 1) 350 | require.True(t, success) 351 | } 352 | data := map[int]int{} 353 | client.Range(func(key, value int) bool { 354 | data[key] = value 355 | return true 356 | }) 357 | require.Equal(t, 100, len(data)) 358 | for i := 0; i < 100; i++ { 359 | require.Equal(t, i, data[i]) 360 | } 361 | // return false 362 | data = map[int]int{} 363 | client.Range(func(key, value int) bool { 364 | data[key] = value 365 | return len(data) < 20 366 | }) 367 | require.Equal(t, 20, len(data)) 368 | // expired 369 | client, err = theine.NewBuilder[int, int](int64(cap)).Build() 370 | require.Nil(t, err) 371 | for i := 0; i < 50; i++ { 372 | success := client.Set(i, i, 1) 373 | require.True(t, success) 374 | } 375 | for i := 50; i < 100; i++ { 376 | success := client.SetWithTTL(i, i, 1, time.Second) 377 | require.True(t, success) 378 | } 379 | client.Wait() 380 | time.Sleep(2 * time.Second) 381 | data = map[int]int{} 382 | client.Range(func(key, value int) bool { 383 | data[key] = value 384 | return true 385 | }) 386 | require.Equal(t, 50, len(data)) 387 | for i := 0; i < 50; i++ { 388 | require.Equal(t, i, data[i]) 389 | } 390 | } 391 | } 392 | 393 | type Foo struct { 394 | Bar string 395 | } 396 | 397 | func TestCache_StringKey(t *testing.T) { 398 | builder := theine.NewBuilder[Foo, int](10000) 399 | builder.StringKey(func(k Foo) string { return k.Bar }) 400 | client, err := builder.Build() 401 | require.Nil(t, err) 402 | defer client.Close() 403 | for i := 0; i < 50; i++ { 404 | foo := Foo{Bar: strconv.Itoa(i + 100)} 405 | client.Set(foo, i, 1) 406 | if v, ok := client.Get(Foo{Bar: strconv.Itoa(i + 100)}); !ok || v != i { 407 | require.FailNow(t, "") 408 | } 409 | } 410 | } 411 | 412 | func TestCache_Zipf(t *testing.T) { 413 | var miss atomic.Uint64 414 | client, err := theine.NewBuilder[uint64, uint64](50000).Build() 415 | require.NoError(t, err) 416 | defer client.Close() 417 | r := rand.New(rand.NewSource(0)) 418 | z := rand.NewZipf(r, 1.01, 9.0, 50000*1000) 419 | 420 | total := 10000000 421 | for i := 0; i < total; i++ { 422 | key := z.Uint64() 423 | v, ok := client.Get(key) 424 | if ok { 425 | require.Equal(t, v, key) 426 | } else { 427 | miss.Add(1) 428 | success := client.Set(key, key, 1) 429 | require.True(t, success) 430 | } 431 | } 432 | stats := client.Stats() 433 | require.True(t, stats.HitRatio() > 0.5) 434 | require.True(t, stats.HitRatio() < 0.6) 435 | require.True(t, 1-float64(miss.Load())/float64(total) > 0.5) 436 | require.True(t, 1-float64(miss.Load())/float64(total) < 0.6) 437 | } 438 | -------------------------------------------------------------------------------- /generic_test.go: -------------------------------------------------------------------------------- 1 | package theine_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/Yiling-J/theine-go" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func assertGeneric[T comparable](t *testing.T, k1 T, k2 T) { 11 | client, err := theine.NewBuilder[T, int](10).Build() 12 | require.Nil(t, err) 13 | client.Set(k1, 1, 1) 14 | v, ok := client.Get(k1) 15 | require.True(t, ok) 16 | require.Equal(t, 1, v) 17 | _, ok = client.Get(k2) 18 | require.False(t, ok) 19 | } 20 | 21 | type t1 struct { 22 | id int 23 | } 24 | 25 | type t2 struct { 26 | t1 t1 27 | } 28 | 29 | type t3 struct { 30 | t1 *t1 31 | } 32 | 33 | func TestGenericKey(t *testing.T) { 34 | assertGeneric(t, 1, 2) 35 | assertGeneric(t, "a", "b") 36 | assertGeneric(t, 1.1, 1.2) 37 | assertGeneric(t, true, false) 38 | assertGeneric(t, t1{id: 1}, t1{id: 2}) 39 | assertGeneric(t, &t1{id: 1}, &t1{id: 1}) 40 | assertGeneric(t, t2{t1: t1{id: 1}}, t2{t1: t1{id: 2}}) 41 | assertGeneric(t, t3{t1: &t1{id: 1}}, t3{t1: &t1{id: 1}}) 42 | } 43 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/Yiling-J/theine-go 2 | 3 | go 1.20 4 | 5 | require ( 6 | github.com/stretchr/testify v1.8.2 7 | github.com/zeebo/xxh3 v1.0.2 8 | golang.org/x/sync v0.8.0 9 | golang.org/x/sys v0.8.0 10 | ) 11 | 12 | require ( 13 | github.com/davecgh/go-spew v1.1.1 // indirect 14 | github.com/klauspost/cpuid/v2 v2.0.9 // indirect 15 | github.com/kr/pretty v0.1.0 // indirect 16 | github.com/pmezard/go-difflib v1.0.0 // indirect 17 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect 18 | gopkg.in/yaml.v3 v3.0.1 // indirect 19 | ) 20 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= 5 | github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= 6 | github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= 7 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 8 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 9 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 10 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 11 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 12 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 13 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 14 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 15 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 16 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 17 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 18 | github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= 19 | github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 20 | github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= 21 | github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= 22 | github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= 23 | golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= 24 | golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 25 | golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= 26 | golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 27 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 28 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= 29 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 30 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 31 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 32 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 33 | -------------------------------------------------------------------------------- /internal/bf/bf.go: -------------------------------------------------------------------------------- 1 | package bf 2 | 3 | import ( 4 | "math" 5 | ) 6 | 7 | // doorkeeper is a small bloom-filter-based cache admission policy 8 | type Bloomfilter struct { 9 | Filter bitvector // our filter bit vector 10 | M uint32 // size of bit vector in bits 11 | K uint32 // distinct hash functions needed 12 | FalsePositiveRate float64 13 | Capacity int 14 | } 15 | 16 | func New(falsePositiveRate float64) *Bloomfilter { 17 | d := &Bloomfilter{FalsePositiveRate: falsePositiveRate} 18 | d.EnsureCapacity(320) 19 | return d 20 | } 21 | 22 | // create new bloomfilter with given size in bytes 23 | func NewWithSize(size uint32) *Bloomfilter { 24 | d := &Bloomfilter{} 25 | bits := size * 8 26 | m := nextPowerOfTwo(bits) 27 | d.M = m 28 | d.Filter = newbv(m) 29 | return d 30 | } 31 | 32 | func (d *Bloomfilter) EnsureCapacity(capacity int) { 33 | if capacity <= d.Capacity { 34 | return 35 | } 36 | capacity = int(nextPowerOfTwo(uint32(capacity))) 37 | bits := float64(capacity) * -math.Log(d.FalsePositiveRate) / (math.Log(2.0) * math.Log(2.0)) // in bits 38 | m := nextPowerOfTwo(uint32(bits)) 39 | 40 | if m < 1024 { 41 | m = 1024 42 | } 43 | 44 | k := uint32(0.7 * float64(m) / float64(capacity)) 45 | if k < 2 { 46 | k = 2 47 | } 48 | d.Capacity = capacity 49 | d.M = m 50 | d.Filter = newbv(m) 51 | d.K = k 52 | } 53 | 54 | func (d *Bloomfilter) Exist(h uint64) bool { 55 | h1, h2 := uint32(h), uint32(h>>32) 56 | var o uint = 1 57 | for i := uint32(0); i < d.K; i++ { 58 | o &= d.Filter.get((h1 + (i * h2)) & (d.M - 1)) 59 | } 60 | return o == 1 61 | } 62 | 63 | // insert inserts the byte array b into the bloom filter. Returns true if the value 64 | // was already considered to be in the bloom filter. 65 | func (d *Bloomfilter) Insert(h uint64) bool { 66 | h1, h2 := uint32(h), uint32(h>>32) 67 | var o uint = 1 68 | for i := uint32(0); i < d.K; i++ { 69 | o &= d.Filter.getset((h1 + (i * h2)) & (d.M - 1)) 70 | } 71 | return o == 1 72 | } 73 | 74 | // Reset clears the bloom filter 75 | func (d *Bloomfilter) Reset() { 76 | for i := range d.Filter { 77 | d.Filter[i] = 0 78 | } 79 | } 80 | 81 | // Internal routines for the bit vector 82 | type bitvector []uint64 83 | 84 | func newbv(size uint32) bitvector { 85 | return make([]uint64, uint(size+63)/64) 86 | } 87 | 88 | func (b bitvector) get(bit uint32) uint { 89 | shift := bit % 64 90 | idx := bit / 64 91 | bb := b[idx] 92 | m := uint64(1) << shift 93 | return uint((bb & m) >> shift) 94 | } 95 | 96 | // set bit 'bit' in the bitvector d and return previous value 97 | func (b bitvector) getset(bit uint32) uint { 98 | shift := bit % 64 99 | idx := bit / 64 100 | bb := b[idx] 101 | m := uint64(1) << shift 102 | b[idx] |= m 103 | return uint((bb & m) >> shift) 104 | } 105 | 106 | // return the integer >= i which is a power of two 107 | func nextPowerOfTwo(i uint32) uint32 { 108 | n := i - 1 109 | n |= n >> 1 110 | n |= n >> 2 111 | n |= n >> 4 112 | n |= n >> 8 113 | n |= n >> 16 114 | n++ 115 | return n 116 | } 117 | -------------------------------------------------------------------------------- /internal/bf/bf_test.go: -------------------------------------------------------------------------------- 1 | package bf 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestBloom(t *testing.T) { 10 | bf := NewWithSize(5) 11 | bf.FalsePositiveRate = 0.1 12 | bf.EnsureCapacity(5) 13 | bf.EnsureCapacity(500) 14 | bf.EnsureCapacity(200) 15 | 16 | exist := bf.Insert(123) 17 | require.False(t, exist) 18 | 19 | exist = bf.Exist(123) 20 | require.True(t, exist) 21 | 22 | exist = bf.Exist(456) 23 | require.False(t, exist) 24 | } 25 | -------------------------------------------------------------------------------- /internal/buffer.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Yiling-J. All rights reserved. 2 | // Copyright (c) 2023 Alexey Mayshev. All rights reserved. 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | package internal 17 | 18 | import ( 19 | "runtime" 20 | "sync/atomic" 21 | "unsafe" 22 | 23 | "github.com/Yiling-J/theine-go/internal/xruntime" 24 | ) 25 | 26 | const ( 27 | // The maximum number of elements per buffer. 28 | capacity = 16 29 | mask = uint64(capacity - 1) 30 | ) 31 | 32 | func castToPointer[K comparable, V any](ptr unsafe.Pointer) *ReadBufItem[K, V] { 33 | return (*ReadBufItem[K, V])(ptr) 34 | } 35 | 36 | // PolicyBuffers is the set of buffers returned by the lossy buffer. 37 | type PolicyBuffers[K comparable, V any] struct { 38 | Returned []ReadBufItem[K, V] 39 | } 40 | 41 | // Buffer is a circular ring buffer stores the elements being transferred by the producers to the consumer. 42 | // The monotonically increasing count of reads and writes allow indexing sequentially to the next 43 | // element location based upon a power-of-two sizing. 44 | // 45 | // The producers race to read the counts, check if there is available capacity, and if so then try 46 | // once to CAS to the next write count. If the increment is successful then the producer lazily 47 | // publishes the element. The producer does not retry or block when unsuccessful due to a failed 48 | // CAS or the buffer being full. 49 | // 50 | // The consumer reads the counts and takes the available elements. The clearing of the elements 51 | // and the next read count are lazily set. 52 | // 53 | // This implementation is striped to further increase concurrency. 54 | type Buffer[K comparable, V any] struct { 55 | head atomic.Uint64 56 | // headPadding 57 | _ [xruntime.CacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte 58 | tail atomic.Uint64 59 | // tailPadding 60 | _ [xruntime.CacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte 61 | returned unsafe.Pointer 62 | // returnedPadding 63 | _ [xruntime.CacheLineSize - 8]byte 64 | policyBuffers unsafe.Pointer 65 | // returnedSlicePadding 66 | _ [xruntime.CacheLineSize - 8]byte 67 | buffer [capacity]unsafe.Pointer 68 | } 69 | 70 | // New creates a new lossy Buffer. 71 | func NewBuffer[K comparable, V any]() *Buffer[K, V] { 72 | pb := &PolicyBuffers[K, V]{ 73 | Returned: make([]ReadBufItem[K, V], 0, capacity), 74 | } 75 | b := &Buffer[K, V]{ 76 | policyBuffers: unsafe.Pointer(pb), 77 | } 78 | b.returned = b.policyBuffers 79 | return b 80 | } 81 | 82 | // Add lazily publishes the item to the consumer. 83 | // 84 | // item may be lost due to contention. 85 | func (b *Buffer[K, V]) Add(n ReadBufItem[K, V]) *PolicyBuffers[K, V] { 86 | head := b.head.Load() 87 | tail := b.tail.Load() 88 | size := tail - head 89 | if size >= capacity { 90 | // full buffer 91 | return nil 92 | } 93 | if b.tail.CompareAndSwap(tail, tail+1) { 94 | // success 95 | index := int(tail & mask) 96 | atomic.StorePointer(&b.buffer[index], unsafe.Pointer(&ReadBufItem[K, V]{ 97 | entry: n.entry, 98 | hash: n.hash, 99 | })) 100 | if size == capacity-1 { 101 | // try return new buffer 102 | if !atomic.CompareAndSwapPointer(&b.returned, b.policyBuffers, nil) { 103 | // somebody already get buffer 104 | return nil 105 | } 106 | 107 | pb := (*PolicyBuffers[K, V])(b.policyBuffers) 108 | for i := 0; i < capacity; i++ { 109 | index := int(head & mask) 110 | v := atomic.LoadPointer(&b.buffer[index]) 111 | if v != nil { 112 | // published 113 | pb.Returned = append(pb.Returned, *castToPointer[K, V](v)) 114 | // release 115 | atomic.StorePointer(&b.buffer[index], nil) 116 | } 117 | head++ 118 | } 119 | 120 | b.head.Store(head) 121 | return pb 122 | } 123 | } 124 | 125 | // failed 126 | return nil 127 | } 128 | 129 | // Load all items in buffer, used in test only to update policy proactive proactively 130 | func (b *Buffer[K, V]) items() []ReadBufItem[K, V] { 131 | head := b.head.Load() 132 | returned := []ReadBufItem[K, V]{} 133 | // try return new buffer 134 | for _, pt := range b.buffer { 135 | // #nosec G601 136 | v := atomic.LoadPointer(&pt) 137 | if v != nil { 138 | returned = append(returned, *castToPointer[K, V](v)) 139 | } 140 | head++ 141 | } 142 | 143 | return returned 144 | } 145 | 146 | // Free returns the processed buffer back and also clears it. 147 | func (b *Buffer[K, V]) Free() { 148 | pb := (*PolicyBuffers[K, V])(b.policyBuffers) 149 | for i := 0; i < len(pb.Returned); i++ { 150 | pb.Returned[i].entry = nil 151 | pb.Returned[i].hash = 0 152 | } 153 | pb.Returned = pb.Returned[:0] 154 | atomic.StorePointer(&b.returned, b.policyBuffers) 155 | } 156 | 157 | // Clear clears the lossy Buffer and returns it to the default state. 158 | func (b *Buffer[K, V]) Clear() { 159 | for !atomic.CompareAndSwapPointer(&b.returned, b.policyBuffers, nil) { 160 | runtime.Gosched() 161 | } 162 | for i := 0; i < capacity; i++ { 163 | atomic.StorePointer(&b.buffer[i], nil) 164 | } 165 | b.Free() 166 | b.tail.Store(0) 167 | b.head.Store(0) 168 | } 169 | -------------------------------------------------------------------------------- /internal/clock/clock.go: -------------------------------------------------------------------------------- 1 | package clock 2 | 3 | import ( 4 | "math" 5 | "sync/atomic" 6 | "time" 7 | ) 8 | 9 | type Clock struct { 10 | Start time.Time 11 | now atomic.Int64 12 | } 13 | 14 | func (c *Clock) NowNano() int64 { 15 | return time.Since(c.Start).Nanoseconds() 16 | } 17 | 18 | func (c *Clock) NowNanoCached() int64 { 19 | return c.now.Load() 20 | } 21 | 22 | func (c *Clock) RefreshNowCache() { 23 | c.now.Store(c.NowNano()) 24 | } 25 | 26 | // used in test only 27 | func (c *Clock) SetNowCache(n int64) { 28 | c.now.Store(n) 29 | } 30 | 31 | func (c *Clock) ExpireNano(ttl time.Duration) int64 { 32 | // Both `ttl` and `nano + ttl` can overflow, but we only handle the overflow of `nano + ttl` here. 33 | // An overflowed `ttl` can be either positive or negative. If it's positive, we won't detect it since it behaves 34 | // like a regular `ttl`. Users of Theine should ensure that `ttl` does not overflow (this should be the case in most scenarios 35 | // unless the value is directly manipulated via math operations). 36 | // When `nano + ttl` overflows, we cap the returned expiration time at `math.MaxInt64`. 37 | return saturatingAdd(c.NowNano(), ttl.Nanoseconds()) 38 | } 39 | 40 | func (c *Clock) SetStart(ts int64) { 41 | c.Start = time.Unix(0, ts) 42 | } 43 | 44 | func saturatingAdd(a, b int64) int64 { 45 | var max int64 = math.MaxInt64 46 | var min int64 = math.MinInt64 47 | if b > 0 && a > max-b { 48 | return max 49 | } 50 | 51 | if b < 0 && a < min-b { 52 | return min 53 | } 54 | 55 | return a + b 56 | } 57 | -------------------------------------------------------------------------------- /internal/clock/clock_test.go: -------------------------------------------------------------------------------- 1 | package clock_test 2 | 3 | import ( 4 | "math" 5 | "testing" 6 | "time" 7 | 8 | "github.com/Yiling-J/theine-go/internal/clock" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestClock_NowNano(t *testing.T) { 13 | c := &clock.Clock{Start: time.Now()} 14 | start := c.NowNano() 15 | time.Sleep(5 * time.Millisecond) 16 | end := c.NowNano() 17 | 18 | require.Greater(t, end, start) 19 | } 20 | 21 | func TestClock_ExpireNano(t *testing.T) { 22 | c := &clock.Clock{Start: time.Now()} 23 | nano := c.NowNano() 24 | 25 | ttl := 1 * time.Second 26 | expireNano := c.ExpireNano(ttl) 27 | lower := nano + ttl.Nanoseconds() 28 | upper := c.NowNano() + ttl.Nanoseconds() 29 | require.Greater(t, expireNano, lower) 30 | require.Less(t, expireNano, upper) 31 | 32 | overflowTTL := time.Duration(math.MaxInt64) 33 | expireNano = c.ExpireNano(overflowTTL) 34 | require.Equal(t, int64(math.MaxInt64), expireNano) 35 | } 36 | 37 | func TestClock_RefreshNowCache(t *testing.T) { 38 | c := &clock.Clock{Start: time.Now()} 39 | now := c.NowNanoCached() 40 | time.Sleep(5 * time.Millisecond) 41 | require.Equal(t, now, c.NowNanoCached()) 42 | 43 | c.RefreshNowCache() 44 | require.NotEqual(t, c.NowNanoCached(), now) 45 | } 46 | 47 | func TestClock_SetNowCache(t *testing.T) { 48 | c := &clock.Clock{} 49 | c.SetNowCache(123456789) 50 | require.Equal(t, int64(123456789), c.NowNanoCached()) 51 | } 52 | 53 | func TestClock_SetStart(t *testing.T) { 54 | c := &clock.Clock{} 55 | ts := time.Now().UnixNano() 56 | c.SetStart(ts) 57 | require.Equal(t, ts, c.Start.UnixNano()) 58 | } 59 | -------------------------------------------------------------------------------- /internal/counter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Yiling-J 2 | // Copyright 2024 Andrei Pechkurov 3 | 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | package internal 17 | 18 | import ( 19 | "sync" 20 | "sync/atomic" 21 | 22 | "github.com/Yiling-J/theine-go/internal/xruntime" 23 | ) 24 | 25 | // pool for P tokens 26 | var ptokenPool sync.Pool 27 | 28 | // a P token is used to point at the current OS thread (P) 29 | // on which the goroutine is run; exact identity of the thread, 30 | // as well as P migration tolerance, is not important since 31 | // it's used to as a best effort mechanism for assigning 32 | // concurrent operations (goroutines) to different stripes of 33 | // the counter 34 | type ptoken struct { 35 | idx uint32 36 | //lint:ignore U1000 prevents false sharing 37 | pad [xruntime.CacheLineSize - 4]byte 38 | } 39 | 40 | // A UnsignedCounter is a unsigned striped int64 counter. 41 | // 42 | // Should be preferred over a single atomically updated int64 43 | // counter in high contention scenarios. 44 | // 45 | // A Counter must not be copied after first use. 46 | type UnsignedCounter struct { 47 | stripes []cstripe 48 | mask uint32 49 | } 50 | 51 | type cstripe struct { 52 | c uint64 53 | //lint:ignore U1000 prevents false sharing 54 | pad [xruntime.CacheLineSize - 8]byte 55 | } 56 | 57 | // UnsignedCounter creates a new UnsignedCounter instance. 58 | func NewUnsignedCounter() *UnsignedCounter { 59 | nstripes := RoundUpPowerOf2(xruntime.Parallelism()) 60 | c := UnsignedCounter{ 61 | stripes: make([]cstripe, nstripes), 62 | mask: nstripes - 1, 63 | } 64 | return &c 65 | } 66 | 67 | // Inc increments the counter by 1. 68 | func (c *UnsignedCounter) Inc() { 69 | c.Add(1) 70 | } 71 | 72 | // Add adds the delta to the counter. 73 | func (c *UnsignedCounter) Add(delta uint64) { 74 | t, ok := ptokenPool.Get().(*ptoken) 75 | if !ok { 76 | t = new(ptoken) 77 | t.idx = xruntime.Fastrand() 78 | } 79 | for { 80 | stripe := &c.stripes[t.idx&c.mask] 81 | cnt := atomic.LoadUint64(&stripe.c) 82 | if atomic.CompareAndSwapUint64(&stripe.c, cnt, cnt+delta) { 83 | break 84 | } 85 | // Give a try with another randomly selected stripe. 86 | t.idx = xruntime.Fastrand() 87 | } 88 | ptokenPool.Put(t) 89 | } 90 | 91 | // Value returns the current counter value. 92 | // The returned value may not include all of the latest operations in 93 | // presence of concurrent modifications of the counter. 94 | func (c *UnsignedCounter) Value() uint64 { 95 | v := uint64(0) 96 | for i := 0; i < len(c.stripes); i++ { 97 | stripe := &c.stripes[i] 98 | v += atomic.LoadUint64(&stripe.c) 99 | } 100 | return v 101 | } 102 | 103 | // Reset resets the counter to zero. 104 | // This method should only be used when it is known that there are 105 | // no concurrent modifications of the counter. 106 | func (c *UnsignedCounter) Reset() { 107 | for i := 0; i < len(c.stripes); i++ { 108 | stripe := &c.stripes[i] 109 | atomic.StoreUint64(&stripe.c, 0) 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /internal/entry.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "sync/atomic" 5 | ) 6 | 7 | const ( 8 | NEW int8 = iota 9 | REMOVE 10 | UPDATE 11 | EVICTE 12 | WAIT 13 | ) 14 | 15 | type ReadBufItem[K comparable, V any] struct { 16 | entry *Entry[K, V] 17 | hash uint64 18 | } 19 | type WriteBufItem[K comparable, V any] struct { 20 | entry *Entry[K, V] 21 | costChange int64 22 | code int8 23 | rechedule bool 24 | fromNVM bool 25 | hash uint64 26 | } 27 | 28 | type MetaData[K comparable, V any] struct { 29 | prev *Entry[K, V] 30 | next *Entry[K, V] 31 | wheelPrev *Entry[K, V] 32 | wheelNext *Entry[K, V] 33 | } 34 | 35 | type Entry[K comparable, V any] struct { 36 | key K // Protected by the shard mutex. 37 | value V // Protected by the shard mutex. 38 | meta MetaData[K, V] // Used in the timing wheel and policy LRU, protected by the policy mutex. 39 | weight atomic.Int64 // Protected by the shard mutex. 40 | policyWeight int64 // Protected by the policy mutex. 41 | expire atomic.Int64 // Protected by the shard mutex. 42 | flag Flag // Protected by the policy mutex. 43 | } 44 | 45 | // used in test only 46 | func NewEntry[K comparable, V any](key K, value V, cost int64, expire int64) *Entry[K, V] { 47 | entry := &Entry[K, V]{ 48 | key: key, 49 | value: value, 50 | } 51 | entry.weight.Store(cost) 52 | entry.policyWeight = cost 53 | if expire > 0 { 54 | entry.expire.Store(expire) 55 | } 56 | return entry 57 | } 58 | 59 | func (e *Entry[K, V]) Next(listType uint8) *Entry[K, V] { 60 | switch listType { 61 | case LIST_PROBATION, LIST_PROTECTED, LIST_WINDOW: 62 | if p := e.meta.next; !p.flag.IsRoot() { 63 | return e.meta.next 64 | } 65 | return nil 66 | 67 | case WHEEL_LIST: 68 | if p := e.meta.wheelNext; !p.flag.IsRoot() { 69 | return e.meta.wheelNext 70 | } 71 | return nil 72 | } 73 | return nil 74 | } 75 | 76 | func (e *Entry[K, V]) Prev(listType uint8) *Entry[K, V] { 77 | switch listType { 78 | case LIST_PROBATION, LIST_PROTECTED, LIST_WINDOW: 79 | if p := e.meta.prev; !p.flag.IsRoot() { 80 | return e.meta.prev 81 | } 82 | return nil 83 | 84 | case WHEEL_LIST: 85 | if p := e.meta.wheelPrev; !p.flag.IsRoot() { 86 | return e.meta.wheelPrev 87 | } 88 | return nil 89 | } 90 | return nil 91 | } 92 | 93 | func (e *Entry[K, V]) PrevPolicy() *Entry[K, V] { 94 | if p := e.meta.prev; !p.flag.IsRoot() { 95 | return e.meta.prev 96 | } 97 | return nil 98 | } 99 | 100 | func (e *Entry[K, V]) PrevExpire() *Entry[K, V] { 101 | if p := e.meta.wheelPrev; !p.flag.IsRoot() { 102 | return e.meta.wheelPrev 103 | } 104 | return nil 105 | } 106 | 107 | func (e *Entry[K, V]) NextPolicy() *Entry[K, V] { 108 | if p := e.meta.next; !p.flag.IsRoot() { 109 | return e.meta.next 110 | } 111 | return nil 112 | } 113 | 114 | func (e *Entry[K, V]) NextExpire() *Entry[K, V] { 115 | if p := e.meta.wheelNext; !p.flag.IsRoot() { 116 | return e.meta.wheelNext 117 | } 118 | return nil 119 | } 120 | 121 | func (e *Entry[K, V]) prev(listType uint8) *Entry[K, V] { 122 | switch listType { 123 | case LIST_PROBATION, LIST_PROTECTED, LIST_WINDOW: 124 | return e.meta.prev 125 | case WHEEL_LIST: 126 | return e.meta.wheelPrev 127 | } 128 | return nil 129 | } 130 | 131 | func (e *Entry[K, V]) next(listType uint8) *Entry[K, V] { 132 | switch listType { 133 | case LIST_PROBATION, LIST_PROTECTED, LIST_WINDOW: 134 | return e.meta.next 135 | case WHEEL_LIST: 136 | return e.meta.wheelNext 137 | } 138 | return nil 139 | } 140 | 141 | func (e *Entry[K, V]) setPrev(entry *Entry[K, V], listType uint8) { 142 | switch listType { 143 | case LIST_PROBATION, LIST_PROTECTED, LIST_WINDOW: 144 | e.meta.prev = entry 145 | case WHEEL_LIST: 146 | e.meta.wheelPrev = entry 147 | } 148 | } 149 | 150 | func (e *Entry[K, V]) setNext(entry *Entry[K, V], listType uint8) { 151 | switch listType { 152 | case LIST_PROBATION, LIST_PROTECTED, LIST_WINDOW: 153 | e.meta.next = entry 154 | case WHEEL_LIST: 155 | e.meta.wheelNext = entry 156 | } 157 | } 158 | 159 | func (e *Entry[K, V]) pentry() *Pentry[K, V] { 160 | return &Pentry[K, V]{ 161 | Key: e.key, 162 | Value: e.value, 163 | Weight: e.weight.Load(), 164 | PolicyWeight: e.policyWeight, 165 | Expire: e.expire.Load(), 166 | Flag: e.flag, 167 | } 168 | } 169 | 170 | // entry for persistence 171 | type Pentry[K comparable, V any] struct { 172 | Key K 173 | Value V 174 | Weight int64 175 | PolicyWeight int64 176 | Expire int64 177 | Frequency int 178 | Flag Flag 179 | } 180 | 181 | func (e *Pentry[K, V]) entry() *Entry[K, V] { 182 | en := &Entry[K, V]{ 183 | key: e.Key, 184 | value: e.Value, 185 | } 186 | en.weight.Store(e.Weight) 187 | en.expire.Store(e.Expire) 188 | en.flag = e.Flag 189 | en.policyWeight = e.PolicyWeight 190 | return en 191 | } 192 | 193 | func (e *Entry[K, V]) Weight() int64 { 194 | return e.weight.Load() 195 | } 196 | 197 | func (e *Entry[K, V]) PolicyWeight() int64 { 198 | return e.policyWeight 199 | } 200 | 201 | func (e *Entry[K, V]) Position() string { 202 | switch { 203 | case e.flag.IsWindow(): 204 | return "WINDOW" 205 | case e.flag.IsProbation(): 206 | return "PROBATION" 207 | case e.flag.IsProtected(): 208 | return "PROTECTED" 209 | case e.flag.IsRemoved(): 210 | return "REMOVED" 211 | } 212 | return "UNKNOWN" 213 | } 214 | -------------------------------------------------------------------------------- /internal/hasher/hasher.go: -------------------------------------------------------------------------------- 1 | //go:build !go1.24 2 | // +build !go1.24 3 | 4 | package hasher 5 | 6 | import ( 7 | "unsafe" 8 | 9 | "github.com/zeebo/xxh3" 10 | ) 11 | 12 | type Hasher[K comparable] struct { 13 | ksize int 14 | kstr bool 15 | kfunc func(K) string 16 | } 17 | 18 | func NewHasher[K comparable](stringKeyFunc func(K) string) *Hasher[K] { 19 | h := &Hasher[K]{kfunc: stringKeyFunc} 20 | var k K 21 | switch ((interface{})(k)).(type) { 22 | case string: 23 | h.kstr = true 24 | default: 25 | h.ksize = int(unsafe.Sizeof(k)) 26 | } 27 | return h 28 | } 29 | 30 | func (h *Hasher[K]) Hash(key K) uint64 { 31 | var strKey string 32 | if h.kfunc != nil { 33 | strKey = h.kfunc(key) 34 | } else if h.kstr { 35 | strKey = *(*string)(unsafe.Pointer(&key)) 36 | } else { 37 | strKey = *(*string)(unsafe.Pointer(&struct { 38 | data unsafe.Pointer 39 | len int 40 | }{unsafe.Pointer(&key), h.ksize})) 41 | } 42 | return xxh3.HashString(strKey) 43 | } 44 | -------------------------------------------------------------------------------- /internal/hasher/hasher_test.go: -------------------------------------------------------------------------------- 1 | //go:build !go1.24 2 | // +build !go1.24 3 | 4 | package hasher 5 | 6 | import ( 7 | "strconv" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | type Foo struct { 14 | Bar string 15 | } 16 | 17 | func TestHasher_StringKey(t *testing.T) { 18 | hasher := NewHasher[string](nil) 19 | h := hasher.Hash(strconv.Itoa(123456)) 20 | for i := 0; i < 10; i++ { 21 | require.Equal(t, h, hasher.Hash(strconv.Itoa(123456))) 22 | } 23 | } 24 | 25 | func TestHasher_StructStringKey(t *testing.T) { 26 | hasher1 := NewHasher[Foo](nil) 27 | hasher2 := NewHasher[Foo](func(k Foo) string { 28 | return k.Bar 29 | }) 30 | h1 := uint64(0) 31 | h2 := uint64(0) 32 | for i := 0; i < 10; i++ { 33 | foo := Foo{Bar: strconv.Itoa(123456)} 34 | if h1 == 0 { 35 | h1 = hasher1.Hash(foo) 36 | } else { 37 | require.NotEqual(t, h1, hasher1.Hash(foo)) 38 | } 39 | } 40 | for i := 0; i < 10; i++ { 41 | foo := Foo{Bar: strconv.Itoa(123456)} 42 | if h2 == 0 { 43 | h2 = hasher2.Hash(foo) 44 | } else { 45 | require.Equal(t, h2, hasher2.Hash(foo)) 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /internal/hasher/maphash_hasher.go: -------------------------------------------------------------------------------- 1 | //go:build go1.24 2 | // +build go1.24 3 | 4 | package hasher 5 | 6 | import ( 7 | "hash/maphash" 8 | ) 9 | 10 | type Hasher[K comparable] struct { 11 | seed maphash.Seed 12 | kfunc func(K) string 13 | } 14 | 15 | func NewHasher[K comparable](stringKeyFunc func(K) string) *Hasher[K] { 16 | // TODO: stringKeyFunc was used prior to Go 1.24 when Comparable was unavailable. 17 | // With the introduction of Comparable, special handling for struct with string field key is no longer necessary. 18 | return &Hasher[K]{kfunc: stringKeyFunc, seed: maphash.MakeSeed()} 19 | } 20 | 21 | func (h *Hasher[K]) Hash(key K) uint64 { 22 | if h.kfunc != nil { 23 | return maphash.Comparable(h.seed, h.kfunc(key)) 24 | } 25 | return maphash.Comparable(h.seed, key) 26 | } 27 | -------------------------------------------------------------------------------- /internal/hasher/maphash_hasher_test.go: -------------------------------------------------------------------------------- 1 | //go:build go1.24 2 | // +build go1.24 3 | 4 | package hasher 5 | 6 | import ( 7 | "strconv" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | type Foo struct { 14 | Bar string 15 | } 16 | 17 | func TestHasher_StringKey(t *testing.T) { 18 | hasher := NewHasher[string](nil) 19 | h := hasher.Hash(strconv.Itoa(123456)) 20 | for i := 0; i < 10; i++ { 21 | require.Equal(t, h, hasher.Hash(strconv.Itoa(123456))) 22 | } 23 | } 24 | 25 | func TestHasher_StructStringKey(t *testing.T) { 26 | hasher1 := NewHasher[Foo](nil) 27 | hasher2 := NewHasher[Foo](func(k Foo) string { 28 | return k.Bar 29 | }) 30 | h1 := uint64(0) 31 | h2 := uint64(0) 32 | for i := 0; i < 10; i++ { 33 | foo := Foo{Bar: strconv.Itoa(123456)} 34 | if h1 == 0 { 35 | h1 = hasher1.Hash(foo) 36 | } else { 37 | require.Equal(t, h1, hasher1.Hash(foo)) 38 | } 39 | } 40 | for i := 0; i < 10; i++ { 41 | foo := Foo{Bar: strconv.Itoa(123456)} 42 | if h2 == 0 { 43 | h2 = hasher2.Hash(foo) 44 | } else { 45 | require.Equal(t, h2, hasher2.Hash(foo)) 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /internal/list.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "bytes" 5 | "encoding/gob" 6 | "fmt" 7 | "io" 8 | "strings" 9 | 10 | "github.com/Yiling-J/theine-go/internal/hasher" 11 | ) 12 | 13 | const ( 14 | LIST_PROBATION uint8 = 1 15 | LIST_PROTECTED uint8 = 2 16 | WHEEL_LIST uint8 = 3 17 | LIST_WINDOW uint8 = 4 18 | ) 19 | 20 | // List represents a doubly linked list. 21 | // The zero value for List is an empty list ready to use. 22 | type List[K comparable, V any] struct { 23 | root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used 24 | len int64 // current list length(sum of costs) excluding (this) sentinel element 25 | count int // count of entries in list 26 | capacity uint 27 | listType uint8 // 1 tinylfu list, 2 timerwheel list 28 | } 29 | 30 | // New returns an initialized list. 31 | func NewList[K comparable, V any](size uint, listType uint8) *List[K, V] { 32 | l := &List[K, V]{listType: listType, capacity: size, root: Entry[K, V]{}} 33 | l.root.flag.SetRoot(true) 34 | l.root.setNext(&l.root, l.listType) 35 | l.root.setPrev(&l.root, l.listType) 36 | l.capacity = size 37 | return l 38 | } 39 | 40 | func (l *List[K, V]) Reset() { 41 | l.root.setNext(&l.root, l.listType) 42 | l.root.setPrev(&l.root, l.listType) 43 | l.len = 0 44 | } 45 | 46 | // Len returns the number of elements of list l. 47 | // The complexity is O(1). 48 | func (l *List[K, V]) Len() int { return int(l.len) } 49 | 50 | func (l *List[K, V]) display() string { 51 | var s []string 52 | for e := l.Front(); e != nil; e = e.Next(l.listType) { 53 | s = append(s, fmt.Sprintf("%v", e.key)) 54 | } 55 | return strings.Join(s, "/") 56 | } 57 | 58 | func (l *List[K, V]) entries() []*Entry[K, V] { 59 | var s []*Entry[K, V] 60 | for e := l.Front(); e != nil; e = e.Next(l.listType) { 61 | s = append(s, e) 62 | } 63 | return s 64 | } 65 | 66 | func (l *List[K, V]) rangef(fn func(*Entry[K, V])) { 67 | for e := l.Front(); e != nil; e = e.Next(l.listType) { 68 | fn(e) 69 | } 70 | } 71 | 72 | func (l *List[K, V]) displayReverse() string { 73 | var s []string 74 | for e := l.Back(); e != nil; e = e.Prev(l.listType) { 75 | s = append(s, fmt.Sprintf("%v", e.key)) 76 | } 77 | return strings.Join(s, "/") 78 | } 79 | 80 | // Front returns the first element of list l or nil if the list is empty. 81 | func (l *List[K, V]) Front() *Entry[K, V] { 82 | e := l.root.next(l.listType) 83 | if e != &l.root { 84 | return e 85 | } 86 | return nil 87 | } 88 | 89 | // Back returns the last element of list l or nil if the list is empty. 90 | func (l *List[K, V]) Back() *Entry[K, V] { 91 | e := l.root.prev(l.listType) 92 | if e != &l.root { 93 | return e 94 | } 95 | return nil 96 | } 97 | 98 | // insert inserts e after at, increments l.len 99 | func (l *List[K, V]) insert(e, at *Entry[K, V]) { 100 | if l.listType != WHEEL_LIST { 101 | if l.listType == LIST_PROTECTED { 102 | e.flag.SetProtected(true) 103 | } else if l.listType == LIST_PROBATION { 104 | e.flag.SetProbation(true) 105 | } else if l.listType == LIST_WINDOW { 106 | e.flag.SetWindow(true) 107 | } 108 | } 109 | 110 | e.setPrev(at, l.listType) 111 | e.setNext(at.next(l.listType), l.listType) 112 | e.prev(l.listType).setNext(e, l.listType) 113 | e.next(l.listType).setPrev(e, l.listType) 114 | l.len += e.policyWeight 115 | l.count += 1 116 | } 117 | 118 | // PushFront push entry to list head 119 | func (l *List[K, V]) PushFront(e *Entry[K, V]) { 120 | l.insert(e, &l.root) 121 | } 122 | 123 | // Push push entry to the back of list 124 | func (l *List[K, V]) PushBack(e *Entry[K, V]) { 125 | l.insert(e, l.root.prev(l.listType)) 126 | } 127 | 128 | // remove removes e from its list, decrements l.len 129 | func (l *List[K, V]) remove(e *Entry[K, V]) { 130 | e.prev(l.listType).setNext(e.next(l.listType), l.listType) 131 | e.next(l.listType).setPrev(e.prev(l.listType), l.listType) 132 | e.setNext(nil, l.listType) 133 | e.setPrev(nil, l.listType) 134 | if l.listType != WHEEL_LIST { 135 | e.flag.SetProbation(false) 136 | e.flag.SetProtected(false) 137 | e.flag.SetWindow(false) 138 | } 139 | l.len += -e.policyWeight 140 | l.count -= 1 141 | } 142 | 143 | // move moves e to next to at. 144 | func (l *List[K, V]) move(e, at *Entry[K, V]) { 145 | if e == at { 146 | return 147 | } 148 | e.prev(l.listType).setNext(e.next(l.listType), l.listType) 149 | e.next(l.listType).setPrev(e.prev(l.listType), l.listType) 150 | 151 | e.setPrev(at, l.listType) 152 | e.setNext(at.next(l.listType), l.listType) 153 | e.prev(l.listType).setNext(e, l.listType) 154 | e.next(l.listType).setPrev(e, l.listType) 155 | } 156 | 157 | // Remove removes e from l if e is an element of list l. 158 | // It returns the element value e.Value. 159 | // The element must not be nil. 160 | func (l *List[K, V]) Remove(e *Entry[K, V]) { 161 | l.remove(e) 162 | } 163 | 164 | // MoveToFront moves element e to the front of list l. 165 | // If e is not an element of l, the list is not modified. 166 | // The element must not be nil. 167 | func (l *List[K, V]) MoveToFront(e *Entry[K, V]) { 168 | l.move(e, &l.root) 169 | } 170 | 171 | // MoveToBack moves element e to the back of list l. 172 | // If e is not an element of l, the list is not modified. 173 | // The element must not be nil. 174 | func (l *List[K, V]) MoveToBack(e *Entry[K, V]) { 175 | l.move(e, l.root.prev(l.listType)) 176 | } 177 | 178 | // MoveBefore moves element e to its new position before mark. 179 | // If e or mark is not an element of l, or e == mark, the list is not modified. 180 | // The element and mark must not be nil. 181 | func (l *List[K, V]) MoveBefore(e, mark *Entry[K, V]) { 182 | l.move(e, mark.prev(l.listType)) 183 | } 184 | 185 | // MoveAfter moves element e to its new position after mark. 186 | // If e or mark is not an element of l, or e == mark, the list is not modified. 187 | // The element and mark must not be nil. 188 | func (l *List[K, V]) MoveAfter(e, mark *Entry[K, V]) { 189 | l.move(e, mark) 190 | } 191 | 192 | func (l *List[K, V]) PopTail() *Entry[K, V] { 193 | entry := l.root.prev(l.listType) 194 | if entry != nil && entry != &l.root { 195 | l.remove(entry) 196 | return entry 197 | } 198 | return nil 199 | } 200 | 201 | func (l *List[K, V]) Contains(entry *Entry[K, V]) bool { 202 | for e := l.Front(); e != nil; e = e.Next(l.listType) { 203 | if e == entry { 204 | return true 205 | } 206 | } 207 | return false 208 | } 209 | 210 | func (l *List[K, V]) Persist(writer io.Writer, blockEncoder *gob.Encoder, sketch *CountMinSketch, hasher *hasher.Hasher[K], tp uint8) error { 211 | buffer := bytes.NewBuffer(make([]byte, 0, BlockBufferSize)) 212 | block := NewBlock[*Pentry[K, V]](tp, buffer, blockEncoder) 213 | for er := l.Front(); er != nil; er = er.Next(l.listType) { 214 | e := er.pentry() 215 | e.Frequency = int(sketch.Estimate(hasher.Hash(e.Key))) 216 | full, err := block.Write(e) 217 | if err != nil { 218 | return err 219 | } 220 | if full { 221 | buffer.Reset() 222 | block = NewBlock[*Pentry[K, V]](tp, buffer, blockEncoder) 223 | } 224 | } 225 | err := block.Save() 226 | if err != nil { 227 | return err 228 | } 229 | buffer.Reset() 230 | return nil 231 | } 232 | -------------------------------------------------------------------------------- /internal/list_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestList_PushPop(t *testing.T) { 11 | l := NewList[string, string](5, LIST_PROBATION) 12 | require.Equal(t, uint(5), l.capacity) 13 | require.Equal(t, LIST_PROBATION, l.listType) 14 | for i := 0; i < 5; i++ { 15 | l.PushFront(NewEntry(fmt.Sprintf("%d", i), "", 1, 0)) 16 | } 17 | require.Equal(t, 5, int(l.len)) 18 | require.Equal(t, "4/3/2/1/0", l.display()) 19 | require.Equal(t, "0/1/2/3/4", l.displayReverse()) 20 | 21 | for i := 0; i < 5; i++ { 22 | entry := l.PopTail() 23 | require.Equal(t, fmt.Sprintf("%d", i), entry.key) 24 | } 25 | entry := l.PopTail() 26 | require.Nil(t, entry) 27 | 28 | entries := []*Entry[string, string]{} 29 | for i := 0; i < 5; i++ { 30 | new := NewEntry(fmt.Sprintf("%d", i), "", 1, 0) 31 | l.PushFront(new) 32 | entries = append(entries, new) 33 | } 34 | require.Equal(t, "4/3/2/1/0", l.display()) 35 | l.MoveToBack(entries[2]) 36 | require.Equal(t, "4/3/1/0/2", l.display()) 37 | require.Equal(t, "2/0/1/3/4", l.displayReverse()) 38 | l.MoveBefore(entries[1], entries[3]) 39 | require.Equal(t, "4/1/3/0/2", l.display()) 40 | require.Equal(t, "2/0/3/1/4", l.displayReverse()) 41 | l.MoveAfter(entries[2], entries[4]) 42 | require.Equal(t, "4/2/1/3/0", l.display()) 43 | require.Equal(t, "0/3/1/2/4", l.displayReverse()) 44 | l.Remove(entries[1]) 45 | require.Equal(t, "4/2/3/0", l.display()) 46 | require.Equal(t, "0/3/2/4", l.displayReverse()) 47 | } 48 | 49 | func TestList_CountCost(t *testing.T) { 50 | l := NewList[string, string](100, LIST_PROBATION) 51 | require.Equal(t, uint(100), l.capacity) 52 | require.Equal(t, LIST_PROBATION, l.listType) 53 | for i := 0; i < 5; i++ { 54 | l.PushFront(NewEntry(fmt.Sprintf("%d", i), "", 20, 0)) 55 | } 56 | require.Equal(t, 100, int(l.len)) 57 | require.Equal(t, 5, l.count) 58 | for i := 0; i < 3; i++ { 59 | entry := l.PopTail() 60 | require.NotNil(t, entry) 61 | } 62 | require.Equal(t, 40, int(l.len)) 63 | require.Equal(t, 2, l.count) 64 | } 65 | 66 | func TestList_WheelList(t *testing.T) { 67 | l := NewList[string, string](5, WHEEL_LIST) 68 | require.Equal(t, uint(5), l.capacity) 69 | require.Equal(t, WHEEL_LIST, l.listType) 70 | for i := 0; i < 5; i++ { 71 | l.PushFront(NewEntry(fmt.Sprintf("%d", i), "", 1, 0)) 72 | } 73 | require.Equal(t, 5, int(l.len)) 74 | require.Equal(t, "4/3/2/1/0", l.display()) 75 | require.Equal(t, "0/1/2/3/4", l.displayReverse()) 76 | 77 | for i := 0; i < 5; i++ { 78 | entry := l.PopTail() 79 | require.Equal(t, fmt.Sprintf("%d", i), entry.key) 80 | } 81 | entry := l.PopTail() 82 | require.Nil(t, entry) 83 | } 84 | -------------------------------------------------------------------------------- /internal/persistence.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "bytes" 5 | "encoding/gob" 6 | 7 | "github.com/zeebo/xxh3" 8 | ) 9 | 10 | const BlockBufferSize = 4 * 1024 * 1024 11 | 12 | type DataBlock[V any] struct { 13 | Type uint8 // 1: meta&timerwheel, 2: window, 3: probation, 4: protected 14 | SecondaryType uint8 15 | CheckSum uint64 16 | Index uint64 // helper filed, usage depends on Type/SecondaryType 17 | Data []byte 18 | clean bool 19 | buffer *bytes.Buffer // used in entryDecoder 20 | // datablock should share single blockEncoder 21 | // but use separate entryEncoder 22 | blockEncoder *gob.Encoder 23 | entryEncoder *gob.Encoder 24 | } 25 | 26 | func NewBlock[V any](tp uint8, buffer *bytes.Buffer, blockEncoder *gob.Encoder) *DataBlock[V] { 27 | return &DataBlock[V]{ 28 | Type: tp, 29 | buffer: buffer, 30 | blockEncoder: blockEncoder, 31 | entryEncoder: gob.NewEncoder(buffer), 32 | clean: true, 33 | } 34 | } 35 | 36 | func (b *DataBlock[V]) Save() error { 37 | if b.clean { 38 | return nil 39 | } 40 | b.clean = true 41 | data := b.buffer.Bytes() 42 | b.CheckSum = xxh3.Hash(data) 43 | b.Data = data 44 | return b.blockEncoder.Encode(b) 45 | } 46 | 47 | func (b *DataBlock[V]) Write(item V) (full bool, err error) { 48 | err = b.entryEncoder.Encode(item) 49 | if err != nil { 50 | return false, err 51 | } 52 | b.clean = false 53 | if b.buffer.Len() >= BlockBufferSize { 54 | b.clean = true 55 | data := b.buffer.Bytes() 56 | b.CheckSum = xxh3.Hash(data) 57 | b.Data = data 58 | err = b.blockEncoder.Encode(b) 59 | return true, err 60 | } 61 | return false, nil 62 | } 63 | 64 | func (b *DataBlock[V]) MarkDirty() { 65 | b.clean = false 66 | } 67 | -------------------------------------------------------------------------------- /internal/persistence_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "math/rand" 5 | "os" 6 | "strconv" 7 | "strings" 8 | "sync/atomic" 9 | "testing" 10 | "time" 11 | 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestStorePersistence_Simple(t *testing.T) { 16 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 17 | for i := 0; i < 20; i++ { 18 | _ = store.Set(i, i, 1, 0) 19 | } 20 | // fill window and move 10-19 to main probation 21 | for i := 20; i < 30; i++ { 22 | _ = store.Set(i, i, 1, 0) 23 | } 24 | 25 | store.Wait() 26 | for i := 0; i < 10; i++ { 27 | _, _ = store.Get(i) 28 | } 29 | 30 | for _, buf := range store.stripedBuffer { 31 | store.drainRead(buf.items()) 32 | } 33 | // now 20-29 in window, 0-9 in protected and 10-19 in probation 34 | require.Equal(t, 10, store.policy.window.Len()) 35 | require.Equal(t, 10, store.policy.slru.protected.Len()) 36 | require.Equal(t, 10, store.policy.slru.probation.Len()) 37 | require.Equal(t, 30, int(store.policy.weightedSize)) 38 | require.ElementsMatch(t, 39 | strings.Split("9/8/7/6/5/4/3/2/1/0", "/"), 40 | strings.Split(store.policy.slru.protected.display(), "/"), 41 | ) 42 | require.ElementsMatch(t, 43 | strings.Split("19/18/17/16/15/14/13/12/11/10", "/"), 44 | strings.Split(store.policy.slru.probation.display(), "/"), 45 | ) 46 | 47 | // update sketch 48 | for i := 0; i < 10; i++ { 49 | _, _ = store.Get(5) 50 | } 51 | for _, buf := range store.stripedBuffer { 52 | store.drainRead(buf.items()) 53 | } 54 | count := store.policy.sketch.Estimate(store.hasher.Hash(5)) 55 | require.True(t, count > 5) 56 | 57 | f, err := os.Create("stest") 58 | defer os.Remove("stest") 59 | require.Nil(t, err) 60 | err = store.Persist(0, f) 61 | require.Nil(t, err) 62 | f.Close() 63 | 64 | new := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 65 | f, err = os.Open("stest") 66 | require.Nil(t, err) 67 | err = new.Recover(0, f) 68 | require.Nil(t, err) 69 | f.Close() 70 | m := map[int]int{} 71 | new.Range(func(key, value int) bool { 72 | m[key] = value 73 | return true 74 | }) 75 | require.Equal(t, 30, len(m)) 76 | for k, v := range m { 77 | require.Equal(t, k, v) 78 | } 79 | require.Equal(t, 10, new.policy.window.Len()) 80 | require.Equal(t, 10, new.policy.slru.protected.Len()) 81 | require.Equal(t, 10, new.policy.slru.probation.Len()) 82 | require.Equal(t, 30, int(new.policy.weightedSize)) 83 | 84 | require.ElementsMatch(t, 85 | strings.Split("9/8/7/6/5/4/3/2/1/0", "/"), 86 | strings.Split(store.policy.slru.protected.display(), "/"), 87 | ) 88 | require.Equal(t, "19/18/17/16/15/14/13/12/11/10", new.policy.slru.probation.display()) 89 | 90 | count = new.policy.sketch.Estimate(new.hasher.Hash(5)) 91 | require.True(t, count > 5) 92 | } 93 | 94 | func TestStorePersistence_TTL(t *testing.T) { 95 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 96 | for i := 0; i < 10; i++ { 97 | _ = store.Set(i, i, 1, 2*time.Second) 98 | } 99 | for i := 10; i < 20; i++ { 100 | _ = store.Set(i, i, 1, 5*time.Second) 101 | } 102 | for i := 20; i < 30; i++ { 103 | _ = store.Set(i, i, 1, 1*time.Second) 104 | } 105 | time.Sleep(200 * time.Millisecond) 106 | 107 | f, err := os.Create("stest") 108 | defer os.Remove("stest") 109 | require.Nil(t, err) 110 | err = store.Persist(0, f) 111 | require.Nil(t, err) 112 | f.Close() 113 | // expire 20-29 114 | time.Sleep(time.Second) 115 | new := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 116 | f, err = os.Open("stest") 117 | require.Nil(t, err) 118 | err = new.Recover(0, f) 119 | require.Nil(t, err) 120 | f.Close() 121 | m := map[int]int{} 122 | new.Range(func(key, value int) bool { 123 | m[key] = value 124 | return true 125 | }) 126 | require.Equal(t, 20, len(m)) 127 | time.Sleep(2 * time.Second) 128 | for i := 0; i < 10; i++ { 129 | _, ok := new.Get(i) 130 | require.False(t, ok) 131 | } 132 | for i := 10; i < 20; i++ { 133 | _, ok := new.Get(i) 134 | require.True(t, ok) 135 | } 136 | time.Sleep(3 * time.Second) 137 | for i := 10; i < 20; i++ { 138 | _, ok := new.Get(i) 139 | require.False(t, ok) 140 | } 141 | } 142 | 143 | func TestStorePersistence_Resize(t *testing.T) { 144 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 145 | for i := 0; i < 1000; i++ { 146 | _ = store.Set(i, i, 1, 0) 147 | } 148 | time.Sleep(200 * time.Millisecond) 149 | for i := 0; i < 500; i++ { 150 | _, _ = store.Get(i) 151 | } 152 | for _, buf := range store.stripedBuffer { 153 | store.drainRead(buf.items()) 154 | } 155 | // now 0-499 in protected and 500-989 in probation, 990-999 in window 156 | require.Equal(t, 500, store.policy.slru.protected.Len()) 157 | require.Equal(t, 490, store.policy.slru.probation.Len()) 158 | 159 | f, err := os.Create("stest") 160 | defer os.Remove("stest") 161 | require.Nil(t, err) 162 | err = store.Persist(0, f) 163 | require.Nil(t, err) 164 | f.Close() 165 | 166 | new := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 100}) 167 | f, err = os.Open("stest") 168 | require.Nil(t, err) 169 | err = new.Recover(0, f) 170 | require.Nil(t, err) 171 | f.Close() 172 | // new cache protected size is 79, should contains latest 80 entries of original protected 173 | require.Equal(t, 79, new.policy.slru.protected.Len()) 174 | // new cache probation size is 20, should contains latest 20 entries of original probation 175 | require.Equal(t, 20, new.policy.slru.probation.Len()) 176 | // The original map size is 1000, so the sketch table size is 1024. 177 | // The new store size is smaller (100), but since the sketch is a memory-efficient data structure, 178 | // increasing its size should be safe. 179 | require.Equal(t, 1024, len(new.policy.sketch.Table)) 180 | 181 | for _, i := range strings.Split(new.policy.slru.protected.display(), "/") { 182 | in, err := strconv.Atoi(i) 183 | require.Nil(t, err) 184 | require.True(t, in < 500) 185 | } 186 | 187 | for _, i := range strings.Split(new.policy.slru.probation.display(), "/") { 188 | in, err := strconv.Atoi(i) 189 | require.Nil(t, err) 190 | require.True(t, in >= 500 && in < 1000) 191 | } 192 | } 193 | 194 | type DelayWriter struct { 195 | f *os.File 196 | beforeWrite func() 197 | } 198 | 199 | func (dw *DelayWriter) Write(p []byte) (n int, err error) { 200 | dw.beforeWrite() 201 | time.Sleep(1 * time.Second) 202 | return dw.f.Write(p) 203 | } 204 | 205 | func TestStorePersistence_Readonly(t *testing.T) { 206 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 207 | for i := 0; i < 1000; i++ { 208 | _ = store.Set(i, i, 1, 0) 209 | } 210 | for i := 0; i < 500; i++ { 211 | _, _ = store.Get(i) 212 | } 213 | store.Wait() 214 | var counter atomic.Uint64 215 | persistDone := make(chan bool) 216 | 217 | v, ok := store.Get(100) 218 | require.True(t, ok) 219 | require.Equal(t, 100, v) 220 | 221 | startCounter := make(chan bool) 222 | started := false 223 | go func() { 224 | done := false 225 | <-startCounter 226 | for !done { 227 | select { 228 | case <-persistDone: 229 | done = true 230 | default: 231 | shard := store.shards[rand.Intn(len(store.shards))] 232 | tk := shard.mu.RLock() 233 | shard.mu.RUnlock(tk) 234 | counter.Add(1) 235 | } 236 | } 237 | }() 238 | 239 | f, err := os.Create("stest") 240 | defer os.Remove("stest") 241 | require.Nil(t, err) 242 | err = store.Persist(0, &DelayWriter{f: f, beforeWrite: func() { 243 | if !started { 244 | close(startCounter) 245 | started = true 246 | } 247 | }}) 248 | require.Nil(t, err) 249 | f.Close() 250 | persistDone <- true 251 | 252 | new := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 253 | f, err = os.Open("stest") 254 | require.Nil(t, err) 255 | err = new.Recover(0, f) 256 | require.Nil(t, err) 257 | f.Close() 258 | 259 | // read should not be blocked during persistence 260 | require.Greater(t, counter.Load(), uint64(100)) 261 | 262 | for i := 0; i < 1000; i++ { 263 | new.Get(i) 264 | new.Set(i, 123, 1, 0) 265 | } 266 | new.Wait() 267 | } 268 | -------------------------------------------------------------------------------- /internal/policy_bench_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | ) 7 | 8 | func BenchmarkPolicy_Read(b *testing.B) { 9 | store := NewStore[uint64, bool](&StoreOptions[uint64, bool]{MaxSize: 100000}) 10 | r := rand.New(rand.NewSource(0)) 11 | z := rand.NewZipf(r, 1.4, 9.0, 100000) 12 | 13 | witems := []WriteBufItem[uint64, bool]{} 14 | ritems := []ReadBufItem[uint64, bool]{} 15 | for i := 0; i < 100000; i++ { 16 | k := z.Uint64() 17 | e := &Entry[uint64, bool]{ 18 | key: k, 19 | value: true, 20 | } 21 | e.weight.Store(1) 22 | witems = append(witems, WriteBufItem[uint64, bool]{ 23 | entry: e, 24 | costChange: 0, 25 | code: NEW, 26 | }) 27 | } 28 | for _, wi := range witems { 29 | store.policy.Set(wi.entry) 30 | ritems = append(ritems, ReadBufItem[uint64, bool]{ 31 | entry: wi.entry, 32 | hash: store.hasher.Hash(wi.entry.key), 33 | }) 34 | } 35 | 36 | b.ResetTimer() 37 | for i := 0; i < b.N; i++ { 38 | store.policy.Access(ritems[i&65535]) 39 | } 40 | } 41 | 42 | func BenchmarkPolicy_Write(b *testing.B) { 43 | store := NewStore[uint64, bool](&StoreOptions[uint64, bool]{MaxSize: 100000}) 44 | 45 | b.ResetTimer() 46 | for i := 0; i < b.N; i++ { 47 | e := &Entry[uint64, bool]{ 48 | key: uint64(i), 49 | value: true, 50 | } 51 | e.weight.Store(1) 52 | e.policyWeight = 1 53 | store.sinkWrite(WriteBufItem[uint64, bool]{ 54 | entry: e, 55 | costChange: 0, 56 | code: NEW, 57 | }) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /internal/policy_flag.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | // Flag struct uses 8 bits, with each bit representing a boolean value. 4 | // Currently, 5 bits are used. 5 | // All bits are read/write in policy only(with policy mutex), so safe to put them together. 6 | // Bit 1: Indicates if this entry is a root of linked list. 7 | // Bit 2: Indicates if this entry is on probation. 8 | // Bit 3: Indicates if this entry is protected. 9 | // Bit 4: Indicates if this entry is removed from main(SLRU). 10 | // Bit 5: Indicates if this entry is from NVM. 11 | // Bit 6: Indicates if this entry is deleted by API. 12 | // Bit 7: Indicates if this entry is window. 13 | type Flag struct { 14 | Flags int8 15 | } 16 | 17 | func (f *Flag) SetRoot(isRoot bool) { 18 | if isRoot { 19 | f.Flags |= (1 << 0) // Set bit 1 (root) 20 | } else { 21 | f.Flags &^= (1 << 0) // Clear bit 1 (root) 22 | } 23 | } 24 | 25 | func (f *Flag) SetProbation(isProbation bool) { 26 | if isProbation { 27 | f.Flags |= (1 << 1) // Set bit 2 (probation) 28 | } else { 29 | f.Flags &^= (1 << 1) // Clear bit 2 (probation) 30 | } 31 | } 32 | 33 | func (f *Flag) SetProtected(isProtected bool) { 34 | if isProtected { 35 | f.Flags |= (1 << 2) // Set bit 3 (protected) 36 | } else { 37 | f.Flags &^= (1 << 2) // Clear bit 3 (protected) 38 | } 39 | } 40 | 41 | func (f *Flag) SetWindow(isWindow bool) { 42 | if isWindow { 43 | f.Flags |= (1 << 6) // Set bit 7 (window) 44 | } else { 45 | f.Flags &^= (1 << 6) // Clear bit 7 (window) 46 | } 47 | } 48 | 49 | func (f *Flag) SetRemoved(isRemoved bool) { 50 | if isRemoved { 51 | f.Flags |= (1 << 3) // Set bit 4 (removed) 52 | } else { 53 | f.Flags &^= (1 << 3) // Clear bit 4 (removed) 54 | } 55 | } 56 | 57 | func (f *Flag) SetFromNVM(isFromNVM bool) { 58 | if isFromNVM { 59 | f.Flags |= (1 << 4) // Set bit 5 (from NVM) 60 | } else { 61 | f.Flags &^= (1 << 4) // Clear bit 5 (from NVM) 62 | } 63 | } 64 | 65 | func (f *Flag) SetDeleted(isDeleted bool) { 66 | if isDeleted { 67 | f.Flags |= (1 << 5) // Set bit 6 (deleted) 68 | } else { 69 | f.Flags &^= (1 << 5) // Clear bit 6 (deleted) 70 | } 71 | } 72 | 73 | func (f *Flag) IsRoot() bool { 74 | return (f.Flags & (1 << 0)) != 0 75 | } 76 | 77 | func (f *Flag) IsProbation() bool { 78 | return (f.Flags & (1 << 1)) != 0 79 | } 80 | 81 | func (f *Flag) IsProtected() bool { 82 | return (f.Flags & (1 << 2)) != 0 83 | } 84 | 85 | func (f *Flag) IsRemoved() bool { 86 | return (f.Flags & (1 << 3)) != 0 87 | } 88 | 89 | func (f *Flag) IsFromNVM() bool { 90 | return (f.Flags & (1 << 4)) != 0 91 | } 92 | 93 | func (f *Flag) IsDeleted() bool { 94 | return (f.Flags & (1 << 5)) != 0 95 | } 96 | 97 | func (f *Flag) IsWindow() bool { 98 | return (f.Flags & (1 << 6)) != 0 99 | } 100 | -------------------------------------------------------------------------------- /internal/policy_flag_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import "testing" 4 | 5 | func TestFlag_SetRoot(t *testing.T) { 6 | f := Flag{} 7 | f.SetRoot(true) 8 | if !f.IsRoot() { 9 | t.Error("Expected root flag to be true, got false") 10 | } 11 | f.SetRoot(false) 12 | if f.IsRoot() { 13 | t.Error("Expected root flag to be false, got true") 14 | } 15 | } 16 | 17 | func TestFlag_SetProbation(t *testing.T) { 18 | f := Flag{} 19 | f.SetProbation(true) 20 | if !f.IsProbation() { 21 | t.Error("Expected probation flag to be true, got false") 22 | } 23 | f.SetProbation(false) 24 | if f.IsProbation() { 25 | t.Error("Expected probation flag to be false, got true") 26 | } 27 | } 28 | 29 | func TestFlag_SetProtected(t *testing.T) { 30 | f := Flag{} 31 | f.SetProtected(true) 32 | if !f.IsProtected() { 33 | t.Error("Expected protected flag to be true, got false") 34 | } 35 | f.SetProtected(false) 36 | if f.IsProtected() { 37 | t.Error("Expected protected flag to be false, got true") 38 | } 39 | } 40 | 41 | func TestFlag_SetRemoved(t *testing.T) { 42 | f := Flag{} 43 | f.SetRemoved(true) 44 | if !f.IsRemoved() { 45 | t.Error("Expected removed flag to be true, got false") 46 | } 47 | f.SetRemoved(false) 48 | if f.IsRemoved() { 49 | t.Error("Expected removed flag to be false, got true") 50 | } 51 | } 52 | 53 | func TestFlag_SetFromNVM(t *testing.T) { 54 | f := Flag{} 55 | f.SetFromNVM(true) 56 | if !f.IsFromNVM() { 57 | t.Error("Expected from NVM flag to be true, got false") 58 | } 59 | f.SetFromNVM(false) 60 | if f.IsFromNVM() { 61 | t.Error("Expected from NVM flag to be false, got true") 62 | } 63 | } 64 | 65 | func TestFlag_CombinedFlags(t *testing.T) { 66 | f := Flag{} 67 | f.SetRoot(true) 68 | f.SetProbation(true) 69 | f.SetProtected(false) 70 | f.SetRemoved(true) 71 | f.SetFromNVM(true) 72 | 73 | if !f.IsRoot() { 74 | t.Error("Expected root flag to be true, got false") 75 | } 76 | if !f.IsProbation() { 77 | t.Error("Expected probation flag to be true, got false") 78 | } 79 | if f.IsProtected() { 80 | t.Error("Expected protected flag to be false, got true") 81 | } 82 | if !f.IsRemoved() { 83 | t.Error("Expected removed flag to be true, got false") 84 | } 85 | if !f.IsFromNVM() { 86 | t.Error("Expected from NVM flag to be true, got false") 87 | } 88 | 89 | // reset 90 | f.Flags = 0 91 | 92 | if f.IsRoot() { 93 | t.Error("Expected root flag to be false, got true") 94 | } 95 | if f.IsProbation() { 96 | t.Error("Expected probation flag to be false, got true") 97 | } 98 | if f.IsProtected() { 99 | t.Error("Expected protected flag to be false, got true") 100 | } 101 | if f.IsRemoved() { 102 | t.Error("Expected removed flag to be false, got true") 103 | } 104 | if f.IsFromNVM() { 105 | t.Error("Expected from NVM flag to be false, got true") 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /internal/rbmutex.go: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Yiling-J 2 | // Copyright 2024 Andrei Pechkurov 3 | 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | package internal 17 | 18 | import ( 19 | "runtime" 20 | "sync" 21 | "sync/atomic" 22 | "time" 23 | 24 | "github.com/Yiling-J/theine-go/internal/xruntime" 25 | ) 26 | 27 | // slow-down guard 28 | const nslowdown = 7 29 | 30 | // pool for reader tokens 31 | var rtokenPool sync.Pool 32 | 33 | // RToken is a reader lock token. 34 | type RToken struct { 35 | slot uint32 36 | //lint:ignore U1000 prevents false sharing 37 | pad [xruntime.CacheLineSize - 4]byte 38 | } 39 | 40 | // A RBMutex is a reader biased reader/writer mutual exclusion lock. 41 | // The lock can be held by an many readers or a single writer. 42 | // The zero value for a RBMutex is an unlocked mutex. 43 | // 44 | // A RBMutex must not be copied after first use. 45 | // 46 | // RBMutex is based on a modified version of BRAVO 47 | // (Biased Locking for Reader-Writer Locks) algorithm: 48 | // https://arxiv.org/pdf/1810.01553.pdf 49 | // 50 | // RBMutex is a specialized mutex for scenarios, such as caches, 51 | // where the vast majority of locks are acquired by readers and write 52 | // lock acquire attempts are infrequent. In such scenarios, RBMutex 53 | // performs better than sync.RWMutex on large multicore machines. 54 | // 55 | // RBMutex extends sync.RWMutex internally and uses it as the "reader 56 | // bias disabled" fallback, so the same semantics apply. The only 57 | // noticeable difference is in reader tokens returned from the 58 | // RLock/RUnlock methods. 59 | type RBMutex struct { 60 | rslots []rslot 61 | rmask uint32 62 | rbias int32 63 | inhibitUntil time.Time 64 | rw sync.RWMutex 65 | } 66 | 67 | type rslot struct { 68 | mu int32 69 | //lint:ignore U1000 prevents false sharing 70 | pad [xruntime.CacheLineSize - 4]byte 71 | } 72 | 73 | // NewRBMutex creates a new RBMutex instance. 74 | func NewRBMutex() *RBMutex { 75 | nslots := RoundUpPowerOf2(xruntime.Parallelism()) 76 | mu := RBMutex{ 77 | rslots: make([]rslot, nslots), 78 | rmask: nslots - 1, 79 | rbias: 1, 80 | } 81 | return &mu 82 | } 83 | 84 | // TryRLock tries to lock m for reading without blocking. 85 | // When TryRLock succeeds, it returns true and a reader token. 86 | // In case of a failure, a false is returned. 87 | func (mu *RBMutex) TryRLock() (bool, *RToken) { 88 | if t := mu.fastRlock(); t != nil { 89 | return true, t 90 | } 91 | // Optimistic slow path. 92 | if mu.rw.TryRLock() { 93 | if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) { 94 | atomic.StoreInt32(&mu.rbias, 1) 95 | } 96 | return true, nil 97 | } 98 | return false, nil 99 | } 100 | 101 | // RLock locks m for reading and returns a reader token. The 102 | // token must be used in the later RUnlock call. 103 | // 104 | // Should not be used for recursive read locking; a blocked Lock 105 | // call excludes new readers from acquiring the lock. 106 | func (mu *RBMutex) RLock() *RToken { 107 | if t := mu.fastRlock(); t != nil { 108 | return t 109 | } 110 | // Slow path. 111 | mu.rw.RLock() 112 | if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) { 113 | atomic.StoreInt32(&mu.rbias, 1) 114 | } 115 | return nil 116 | } 117 | 118 | func (mu *RBMutex) fastRlock() *RToken { 119 | if atomic.LoadInt32(&mu.rbias) == 1 { 120 | t, ok := rtokenPool.Get().(*RToken) 121 | if !ok { 122 | t = new(RToken) 123 | t.slot = xruntime.Fastrand() 124 | } 125 | // Try all available slots to distribute reader threads to slots. 126 | for i := 0; i < len(mu.rslots); i++ { 127 | slot := t.slot + uint32(i) 128 | rslot := &mu.rslots[slot&mu.rmask] 129 | rslotmu := atomic.LoadInt32(&rslot.mu) 130 | if atomic.CompareAndSwapInt32(&rslot.mu, rslotmu, rslotmu+1) { 131 | if atomic.LoadInt32(&mu.rbias) == 1 { 132 | // Hot path succeeded. 133 | t.slot = slot 134 | return t 135 | } 136 | // The mutex is no longer reader biased. Roll back. 137 | atomic.AddInt32(&rslot.mu, -1) 138 | rtokenPool.Put(t) 139 | return nil 140 | } 141 | // Contention detected. Give a try with the next slot. 142 | } 143 | } 144 | return nil 145 | } 146 | 147 | // RUnlock undoes a single RLock call. A reader token obtained from 148 | // the RLock call must be provided. RUnlock does not affect other 149 | // simultaneous readers. A panic is raised if m is not locked for 150 | // reading on entry to RUnlock. 151 | func (mu *RBMutex) RUnlock(t *RToken) { 152 | if t == nil { 153 | mu.rw.RUnlock() 154 | return 155 | } 156 | if atomic.AddInt32(&mu.rslots[t.slot&mu.rmask].mu, -1) < 0 { 157 | panic("invalid reader state detected") 158 | } 159 | rtokenPool.Put(t) 160 | } 161 | 162 | // TryLock tries to lock m for writing without blocking. 163 | func (mu *RBMutex) TryLock() bool { 164 | if mu.rw.TryLock() { 165 | if atomic.LoadInt32(&mu.rbias) == 1 { 166 | atomic.StoreInt32(&mu.rbias, 0) 167 | for i := 0; i < len(mu.rslots); i++ { 168 | if atomic.LoadInt32(&mu.rslots[i].mu) > 0 { 169 | // There is a reader. Roll back. 170 | atomic.StoreInt32(&mu.rbias, 1) 171 | mu.rw.Unlock() 172 | return false 173 | } 174 | } 175 | } 176 | return true 177 | } 178 | return false 179 | } 180 | 181 | // Lock locks m for writing. If the lock is already locked for 182 | // reading or writing, Lock blocks until the lock is available. 183 | func (mu *RBMutex) Lock() { 184 | mu.rw.Lock() 185 | if atomic.LoadInt32(&mu.rbias) == 1 { 186 | atomic.StoreInt32(&mu.rbias, 0) 187 | start := time.Now() 188 | for i := 0; i < len(mu.rslots); i++ { 189 | for atomic.LoadInt32(&mu.rslots[i].mu) > 0 { 190 | runtime.Gosched() 191 | } 192 | } 193 | mu.inhibitUntil = time.Now().Add(time.Since(start) * nslowdown) 194 | } 195 | } 196 | 197 | // Unlock unlocks m for writing. A panic is raised if m is not locked 198 | // for writing on entry to Unlock. 199 | // 200 | // As with RWMutex, a locked RBMutex is not associated with a 201 | // particular goroutine. One goroutine may RLock (Lock) a RBMutex and 202 | // then arrange for another goroutine to RUnlock (Unlock) it. 203 | func (mu *RBMutex) Unlock() { 204 | mu.rw.Unlock() 205 | } 206 | -------------------------------------------------------------------------------- /internal/secondary_cache.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "errors" 5 | "sync" 6 | "sync/atomic" 7 | 8 | "github.com/Yiling-J/theine-go/internal/clock" 9 | ) 10 | 11 | type Serializer[T any] interface { 12 | Marshal(v T) ([]byte, error) 13 | Unmarshal(raw []byte, v *T) error 14 | } 15 | 16 | type SecondaryCacheItem[K comparable, V any] struct { 17 | entry *Entry[K, V] 18 | reason RemoveReason 19 | shard *Shard[K, V] 20 | } 21 | 22 | type SecondaryCache[K comparable, V any] interface { 23 | Get(key K) (value V, cost int64, expire int64, ok bool, err error) 24 | Set(key K, value V, cost int64, expire int64) error 25 | Delete(key K) error 26 | HandleAsyncError(err error) 27 | } 28 | 29 | // used in test only 30 | type SimpleMapSecondary[K comparable, V any] struct { 31 | m map[K]*Entry[K, V] 32 | ErrCounter atomic.Uint64 33 | mu sync.Mutex 34 | ErrMode bool 35 | } 36 | 37 | func NewSimpleMapSecondary[K comparable, V any]() *SimpleMapSecondary[K, V] { 38 | return &SimpleMapSecondary[K, V]{ 39 | m: make(map[K]*Entry[K, V]), 40 | } 41 | } 42 | 43 | func (s *SimpleMapSecondary[K, V]) Get(key K) (value V, cost int64, expire int64, ok bool, err error) { 44 | s.mu.Lock() 45 | defer s.mu.Unlock() 46 | 47 | e, ok := s.m[key] 48 | if !ok { 49 | return 50 | } 51 | return e.value, e.weight.Load(), e.expire.Load(), true, nil 52 | } 53 | 54 | func (s *SimpleMapSecondary[K, V]) Set(key K, value V, cost int64, expire int64) error { 55 | s.mu.Lock() 56 | defer s.mu.Unlock() 57 | 58 | if s.ErrMode { 59 | return errors.New("err") 60 | } 61 | 62 | e := &Entry[K, V]{ 63 | value: value, 64 | } 65 | e.weight.Store(cost) 66 | 67 | s.m[key] = e 68 | s.m[key].expire.Store(expire) 69 | return nil 70 | } 71 | 72 | func (s *SimpleMapSecondary[K, V]) Delete(key K) error { 73 | s.mu.Lock() 74 | defer s.mu.Unlock() 75 | 76 | if _, ok := s.m[key]; !ok { 77 | return nil 78 | } 79 | delete(s.m, key) 80 | return nil 81 | } 82 | 83 | func (s *SimpleMapSecondary[K, V]) SetClock(clock *clock.Clock) { 84 | } 85 | 86 | func (s *SimpleMapSecondary[K, V]) HandleAsyncError(err error) { 87 | if err != nil { 88 | s.ErrCounter.Add(1) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /internal/singleflight.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 Yiling-J 2 | // Copyright 2013 The Go Authors. All rights reserved. 3 | // Use of this source code is governed by a BSD-style 4 | // license that can be found in the LICENSE file. 5 | 6 | // Package singleflight provides a duplicate function call suppression 7 | // mechanism. 8 | package internal 9 | 10 | import ( 11 | "bytes" 12 | "errors" 13 | "fmt" 14 | "runtime" 15 | "runtime/debug" 16 | "sync" 17 | "sync/atomic" 18 | ) 19 | 20 | // errGoexit indicates the runtime.Goexit was called in 21 | // the user given function. 22 | var errGoexit = errors.New("runtime.Goexit was called") 23 | 24 | // A panicError is an arbitrary value recovered from a panic 25 | // with the stack trace during the execution of given function. 26 | type panicError struct { 27 | value interface{} 28 | stack []byte 29 | } 30 | 31 | // Error implements error interface. 32 | func (p *panicError) Error() string { 33 | return fmt.Sprintf("%v\n\n%s", p.value, p.stack) 34 | } 35 | 36 | func newPanicError(v interface{}) error { 37 | stack := debug.Stack() 38 | 39 | // The first line of the stack trace is of the form "goroutine N [status]:" 40 | // but by the time the panic reaches Do the goroutine may no longer exist 41 | // and its status will have changed. Trim out the misleading line. 42 | if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { 43 | stack = stack[line+1:] 44 | } 45 | return &panicError{value: v, stack: stack} 46 | } 47 | 48 | // call is an in-flight or completed singleflight.Do call 49 | type call[V any] struct { 50 | 51 | // These fields are written once before the WaitGroup is done 52 | // and are only read after the WaitGroup is done. 53 | val V 54 | err error 55 | 56 | wg sync.WaitGroup 57 | 58 | // These fields are read and written with the singleflight 59 | // mutex held before the WaitGroup is done, and are read but 60 | // not written after the WaitGroup is done. 61 | dups atomic.Int32 62 | } 63 | 64 | // Group represents a class of work and forms a namespace in 65 | // which units of work can be executed with duplicate suppression. 66 | type Group[K comparable, V any] struct { 67 | m map[K]*call[V] // lazily initialized 68 | mu sync.Mutex // protects m 69 | callPool sync.Pool 70 | } 71 | 72 | func NewGroup[K comparable, V any]() *Group[K, V] { 73 | return &Group[K, V]{ 74 | callPool: sync.Pool{New: func() any { 75 | return new(call[V]) 76 | }}, 77 | } 78 | } 79 | 80 | // Result holds the results of Do, so they can be passed 81 | // on a channel. 82 | type Result struct { 83 | Val interface{} 84 | Err error 85 | Shared bool 86 | } 87 | 88 | // Do executes and returns the results of the given function, making 89 | // sure that only one execution is in-flight for a given key at a 90 | // time. If a duplicate comes in, the duplicate caller waits for the 91 | // original to complete and receives the same results. 92 | // The return value shared indicates whether v was given to multiple callers. 93 | func (g *Group[K, V]) Do(key K, fn func() (V, error)) (v V, err error, shared bool) { 94 | g.mu.Lock() 95 | if g.m == nil { 96 | g.m = make(map[K]*call[V]) 97 | } 98 | if c, ok := g.m[key]; ok { 99 | _ = c.dups.Add(1) 100 | g.mu.Unlock() 101 | c.wg.Wait() 102 | var perr *panicError 103 | if errors.As(c.err, &perr) { 104 | panic(c.err) 105 | } else if errors.Is(c.err, errGoexit) { 106 | runtime.Goexit() 107 | } 108 | // assign value/err before put back to pool to avoid race 109 | v = c.val 110 | err = c.err 111 | n := c.dups.Add(-1) 112 | if n == 0 { 113 | g.callPool.Put(c) 114 | } 115 | return v, err, true 116 | } 117 | c := g.callPool.Get().(*call[V]) 118 | defer func() { 119 | n := c.dups.Add(-1) 120 | if n == 0 { 121 | g.callPool.Put(c) 122 | } 123 | }() 124 | _ = c.dups.Add(1) 125 | c.wg.Add(1) 126 | g.m[key] = c 127 | g.mu.Unlock() 128 | 129 | g.doCall(c, key, fn) 130 | return c.val, c.err, true 131 | } 132 | 133 | // doCall handles the single call for a key. 134 | func (g *Group[K, V]) doCall(c *call[V], key K, fn func() (V, error)) { 135 | normalReturn := false 136 | recovered := false 137 | 138 | // use double-defer to distinguish panic from runtime.Goexit, 139 | // more details see https://golang.org/cl/134395 140 | defer func() { 141 | // the given function invoked runtime.Goexit 142 | if !normalReturn && !recovered { 143 | c.err = errGoexit 144 | } 145 | 146 | g.mu.Lock() 147 | defer g.mu.Unlock() 148 | c.wg.Done() 149 | if g.m[key] == c { 150 | delete(g.m, key) 151 | } 152 | var perr *panicError 153 | if errors.As(c.err, &perr) { 154 | panic(c.err) 155 | } 156 | }() 157 | 158 | func() { 159 | defer func() { 160 | if !normalReturn { 161 | // Ideally, we would wait to take a stack trace until we've determined 162 | // whether this is a panic or a runtime.Goexit. 163 | // 164 | // Unfortunately, the only way we can distinguish the two is to see 165 | // whether the recover stopped the goroutine from terminating, and by 166 | // the time we know that, the part of the stack trace relevant to the 167 | // panic has been discarded. 168 | if r := recover(); r != nil { 169 | c.err = newPanicError(r) 170 | } 171 | } 172 | }() 173 | 174 | c.val, c.err = fn() 175 | normalReturn = true 176 | }() 177 | 178 | if !normalReturn { 179 | recovered = true 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /internal/singleflight_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package internal 6 | 7 | import ( 8 | "crypto/rand" 9 | "errors" 10 | "fmt" 11 | "io" 12 | "runtime" 13 | "runtime/debug" 14 | "sync" 15 | "sync/atomic" 16 | "testing" 17 | "time" 18 | ) 19 | 20 | func TestDo(t *testing.T) { 21 | g := NewGroup[string, string]() 22 | v, err, _ := g.Do("key", func() (string, error) { 23 | return "bar", nil 24 | }) 25 | if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want { 26 | t.Errorf("Do = %v; want %v", got, want) 27 | } 28 | if err != nil { 29 | t.Errorf("Do error = %v", err) 30 | } 31 | } 32 | 33 | func TestDoErr(t *testing.T) { 34 | g := NewGroup[string, string]() 35 | someErr := errors.New("Some error") 36 | v, err, _ := g.Do("key", func() (string, error) { 37 | return "", someErr 38 | }) 39 | if !errors.Is(err, someErr) { 40 | t.Errorf("Do error = %v; want someErr %v", err, someErr) 41 | } 42 | if v != "" { 43 | t.Errorf("unexpected non-nil value %#v", v) 44 | } 45 | } 46 | 47 | func TestDoDupSuppress(t *testing.T) { 48 | g := NewGroup[string, string]() 49 | var wg1, wg2 sync.WaitGroup 50 | c := make(chan string, 1) 51 | var calls int32 52 | fn := func() (string, error) { 53 | if atomic.AddInt32(&calls, 1) == 1 { 54 | // First invocation. 55 | wg1.Done() 56 | } 57 | v := <-c 58 | c <- v // pump; make available for any future calls 59 | 60 | time.Sleep(10 * time.Millisecond) // let more goroutines enter Do 61 | 62 | return v, nil 63 | } 64 | 65 | const n = 10 66 | wg1.Add(1) 67 | for i := 0; i < n; i++ { 68 | wg1.Add(1) 69 | wg2.Add(1) 70 | go func() { 71 | defer wg2.Done() 72 | wg1.Done() 73 | v, err, _ := g.Do("key", fn) 74 | if err != nil { 75 | t.Errorf("Do error: %v", err) 76 | return 77 | } 78 | if s := v; s != "bar" { 79 | t.Errorf("Do = %T %v; want %q", v, v, "bar") 80 | } 81 | }() 82 | } 83 | wg1.Wait() 84 | // At least one goroutine is in fn now and all of them have at 85 | // least reached the line before the Do. 86 | c <- "bar" 87 | wg2.Wait() 88 | if got := atomic.LoadInt32(&calls); got <= 0 || got >= n { 89 | t.Errorf("number of calls = %d; want over 0 and less than %d", got, n) 90 | } 91 | } 92 | 93 | // Test singleflight behaves correctly after Do panic. 94 | // See https://github.com/golang/go/issues/41133 95 | func TestPanicDo(t *testing.T) { 96 | g := NewGroup[string, string]() 97 | fn := func() (string, error) { 98 | panic("invalid memory address or nil pointer dereference") 99 | } 100 | 101 | const n = 5 102 | waited := int32(n) 103 | panicCount := int32(0) 104 | done := make(chan struct{}) 105 | for i := 0; i < n; i++ { 106 | go func() { 107 | defer func() { 108 | if err := recover(); err != nil { 109 | t.Logf("Got panic: %v\n%s", err, debug.Stack()) 110 | atomic.AddInt32(&panicCount, 1) 111 | } 112 | 113 | if atomic.AddInt32(&waited, -1) == 0 { 114 | close(done) 115 | } 116 | }() 117 | 118 | _, _, _ = g.Do("key", fn) 119 | }() 120 | } 121 | 122 | select { 123 | case <-done: 124 | if panicCount != n { 125 | t.Errorf("Expect %d panic, but got %d", n, panicCount) 126 | } 127 | case <-time.After(time.Second): 128 | t.Fatalf("Do hangs") 129 | } 130 | } 131 | 132 | func TestGoexitDo(t *testing.T) { 133 | g := NewGroup[string, int]() 134 | fn := func() (int, error) { 135 | runtime.Goexit() 136 | return 0, nil 137 | } 138 | 139 | const n = 5 140 | waited := int32(n) 141 | done := make(chan struct{}) 142 | for i := 0; i < n; i++ { 143 | go func() { 144 | var err error 145 | defer func() { 146 | if err != nil { 147 | t.Errorf("Error should be nil, but got: %v", err) 148 | } 149 | if atomic.AddInt32(&waited, -1) == 0 { 150 | close(done) 151 | } 152 | }() 153 | _, err, _ = g.Do("key", fn) 154 | }() 155 | } 156 | 157 | select { 158 | case <-done: 159 | case <-time.After(time.Second): 160 | t.Fatalf("Do hangs") 161 | } 162 | } 163 | 164 | func BenchmarkDo(b *testing.B) { 165 | keys := randKeys(b, 10240, 10) 166 | benchDo(b, NewGroup[string, int](), keys) 167 | } 168 | 169 | func benchDo(b *testing.B, g *Group[string, int], keys []string) { 170 | keyc := len(keys) 171 | b.ReportAllocs() 172 | b.ResetTimer() 173 | 174 | b.RunParallel(func(pb *testing.PB) { 175 | for i := 0; pb.Next(); i++ { 176 | _, _, _ = g.Do(keys[i%keyc], func() (int, error) { 177 | return 0, nil 178 | }) 179 | } 180 | }) 181 | } 182 | 183 | func randKeys(b *testing.B, count, length uint) []string { 184 | keys := make([]string, 0, count) 185 | key := make([]byte, length) 186 | 187 | for i := uint(0); i < count; i++ { 188 | if _, err := io.ReadFull(rand.Reader, key); err != nil { 189 | b.Fatalf("Failed to generate random key %d of %d of length %d: %s", i+1, count, length, err) 190 | } 191 | keys = append(keys, string(key)) 192 | } 193 | return keys 194 | } 195 | -------------------------------------------------------------------------------- /internal/sketch.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "math/bits" 5 | ) 6 | 7 | const ( 8 | resetMask = 0x7777777777777777 9 | oneMask = 0x1111111111111111 10 | ) 11 | 12 | type CountMinSketch struct { 13 | Table []uint64 14 | Additions uint 15 | SampleSize uint 16 | BlockMask uint 17 | } 18 | 19 | func NewCountMinSketch() *CountMinSketch { 20 | new := &CountMinSketch{} 21 | new.EnsureCapacity(64) 22 | return new 23 | } 24 | 25 | // indexOf return table index and counter index together 26 | func (s *CountMinSketch) indexOf(counterHash uint64, block uint64, offset uint8) (uint, uint) { 27 | h := counterHash >> (offset << 3) 28 | // max block + 7(8 * 8 bytes), fit 64 bytes cache line 29 | index := block + h&1 + uint64(offset<<1) 30 | return uint(index), uint((h >> 1) & 0xf) 31 | } 32 | 33 | func (s *CountMinSketch) inc(index uint, offset uint) bool { 34 | offset = offset << 2 35 | mask := uint64(0xF << offset) 36 | v := s.Table[index] 37 | if v&mask != mask { 38 | s.Table[index] = v + 1<> 1) & resetMask 93 | } 94 | s.Additions = (s.Additions - uint(count>>2)) >> 1 95 | } 96 | 97 | func (s *CountMinSketch) count(h uint64, block uint64, offset uint8) uint { 98 | index, off := s.indexOf(h, block, offset) 99 | off = off << 2 100 | count := (s.Table[index] >> off) & 0xF 101 | return uint(count) 102 | } 103 | 104 | // used in test 105 | func uint64ToBase10Slice(n uint64) []int { 106 | result := make([]int, 16) 107 | for i := 0; i < 16; i++ { 108 | result[15-i] = int((n >> (i * 4)) & 0xF) 109 | } 110 | return result 111 | } 112 | 113 | // used in test 114 | func (s *CountMinSketch) counters() [][]int { 115 | all := [][]int{} 116 | for i := 0; i < len(s.Table); i++ { 117 | all = append(all, uint64ToBase10Slice(s.Table[i])) 118 | } 119 | return all 120 | } 121 | 122 | func min(a, b uint) uint { 123 | if a < b { 124 | return a 125 | } 126 | return b 127 | } 128 | 129 | func (s *CountMinSketch) Estimate(h uint64) uint { 130 | block := (h & uint64(s.BlockMask)) << 3 131 | hc := rehash(h) 132 | m := min(s.count(hc, block, 0), 100) 133 | m = min(s.count(hc, block, 1), m) 134 | m = min(s.count(hc, block, 2), m) 135 | m = min(s.count(hc, block, 3), m) 136 | return m 137 | } 138 | 139 | func (s *CountMinSketch) EnsureCapacity(size uint) { 140 | if len(s.Table) >= int(size) { 141 | return 142 | } 143 | if size < 16 { 144 | size = 16 145 | } 146 | newSize := next2Power(size) 147 | s.Table = make([]uint64, newSize) 148 | s.SampleSize = 10 * newSize 149 | s.BlockMask = uint((len(s.Table) >> 3) - 1) 150 | s.Additions = 0 151 | } 152 | 153 | func rehash(h uint64) uint64 { 154 | h *= 0x94d049bb133111eb 155 | h ^= h >> 31 156 | return h 157 | } 158 | -------------------------------------------------------------------------------- /internal/sketch_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "strconv" 7 | "testing" 8 | 9 | "github.com/Yiling-J/theine-go/internal/hasher" 10 | "github.com/stretchr/testify/require" 11 | "github.com/zeebo/xxh3" 12 | ) 13 | 14 | func TestSketch_EnsureCapacity(t *testing.T) { 15 | sketch := NewCountMinSketch() 16 | sketch.EnsureCapacity(1) 17 | require.Equal(t, 64, len(sketch.Table)) 18 | } 19 | 20 | func TestSketch_Basic(t *testing.T) { 21 | sketch := NewCountMinSketch() 22 | sketch.EnsureCapacity(10000) 23 | require.Equal(t, 16384, len(sketch.Table)) 24 | require.Equal(t, uint(163840), sketch.SampleSize) 25 | 26 | failed := 0 27 | for i := 0; i < 10000; i++ { 28 | key := fmt.Sprintf("key:%d", i) 29 | keyh := xxh3.HashStringSeed(key, 1234) 30 | sketch.Add(keyh) 31 | sketch.Add(keyh) 32 | sketch.Add(keyh) 33 | sketch.Add(keyh) 34 | sketch.Add(keyh) 35 | key = fmt.Sprintf("key:%d:b", i) 36 | keyh2 := xxh3.HashStringSeed(key, 1234) 37 | sketch.Add(keyh2) 38 | sketch.Add(keyh2) 39 | sketch.Add(keyh2) 40 | 41 | es1 := sketch.Estimate(keyh) 42 | es2 := sketch.Estimate(keyh2) 43 | if es1 != 5 { 44 | failed++ 45 | } 46 | if es2 != 3 { 47 | failed++ 48 | } 49 | require.True(t, es1 >= 5) 50 | require.True(t, es2 >= 3) 51 | } 52 | require.True(t, failed < 40, failed) 53 | } 54 | 55 | func TestSketch_ResetFreq(t *testing.T) { 56 | sketch := NewCountMinSketch() 57 | sketch.EnsureCapacity(1000) 58 | for i := 0; i < len(sketch.Table); i++ { 59 | sketch.Table[i] = ^uint64(0) 60 | } 61 | keyh := xxh3.HashString("key1") 62 | require.Equal(t, 15, int(sketch.Estimate(keyh))) 63 | sketch.reset() 64 | require.Equal(t, 7, int(sketch.Estimate(keyh))) 65 | for _, cs := range sketch.counters() { 66 | for _, c := range cs { 67 | require.Equal(t, c, 7) 68 | } 69 | } 70 | } 71 | 72 | func TestSketch_Small(t *testing.T) { 73 | sketch := NewCountMinSketch() 74 | sketch.EnsureCapacity(512) 75 | hasher := hasher.NewHasher[uint64](nil) 76 | for i := 0; i < 512; i++ { 77 | h := hasher.Hash(uint64(i)) 78 | sketch.Add(h) 79 | require.Less(t, int(sketch.Estimate(h)), 3) 80 | } 81 | } 82 | 83 | func TestSketch_ResetAddition(t *testing.T) { 84 | sketch := NewCountMinSketch() 85 | sketch.EnsureCapacity(100) 86 | require.Equal(t, 128, len(sketch.Table)) 87 | require.Equal(t, uint(1280), sketch.SampleSize) 88 | // override sampleSize so test won't reset 89 | sketch.SampleSize = 5120 90 | 91 | keyh := xxh3.HashString("k1") 92 | sketch.Add(keyh) 93 | sketch.Add(keyh) 94 | sketch.Add(keyh) 95 | sketch.Add(keyh) 96 | sketch.Add(keyh) 97 | keyh2 := xxh3.HashString("k1b") 98 | sketch.Add(keyh2) 99 | sketch.Add(keyh2) 100 | sketch.Add(keyh2) 101 | 102 | es1 := sketch.Estimate(keyh) 103 | es2 := sketch.Estimate(keyh2) 104 | additions := sketch.Additions 105 | sketch.reset() 106 | additionsNew := sketch.Additions 107 | es1h := sketch.Estimate(keyh) 108 | es2h := sketch.Estimate(keyh2) 109 | require.Equal(t, es1/2, es1h) 110 | require.Equal(t, es2/2, es2h) 111 | require.Equal(t, additions-(es1-es1h)-(es2-es2h), additionsNew) 112 | } 113 | 114 | func BenchmarkSketch(b *testing.B) { 115 | sketch := NewCountMinSketch() 116 | sketch.EnsureCapacity(50000000) 117 | nums := []uint64{} 118 | for i := 0; i < 100000; i++ { 119 | h := xxh3.HashString(strconv.Itoa(rand.Intn(100000))) 120 | nums = append(nums, h) 121 | } 122 | b.ResetTimer() 123 | for i := 0; i < b.N; i++ { 124 | sketch.Estimate(nums[i%100000]) 125 | } 126 | } 127 | 128 | func TestSketch_HeavyHitters(t *testing.T) { 129 | sketch := NewCountMinSketch() 130 | hasher := hasher.NewHasher[uint64](nil) 131 | sketch.EnsureCapacity(2000) 132 | for i := 100; i < 5000; i++ { 133 | h := hasher.Hash(uint64(i)) 134 | sketch.Add(h) 135 | } 136 | for i := 0; i < 10; i += 2 { 137 | for j := 0; j < i; j++ { 138 | h := hasher.Hash(uint64(i)) 139 | sketch.Add(h) 140 | } 141 | } 142 | 143 | // A perfect popularity count yields an array [0, 0, 2, 0, 4, 0, 6, 0, 8, 0] 144 | popularity := make([]int, 10) 145 | for i := 0; i < 10; i++ { 146 | h := hasher.Hash(uint64(i)) 147 | popularity[i] = int(sketch.Estimate(h)) 148 | } 149 | for i := 0; i < len(popularity); i++ { 150 | if i == 0 || i == 1 || i == 3 || i == 5 || i == 7 || i == 9 { 151 | require.LessOrEqual(t, popularity[i], popularity[2]) 152 | } else if i == 2 { 153 | require.LessOrEqual(t, popularity[2], popularity[4]) 154 | } else if i == 4 { 155 | require.LessOrEqual(t, popularity[4], popularity[6]) 156 | } else if i == 6 { 157 | require.LessOrEqual(t, popularity[6], popularity[8]) 158 | } 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /internal/slru.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | type Slru[K comparable, V any] struct { 4 | probation *List[K, V] 5 | protected *List[K, V] 6 | maxsize uint 7 | } 8 | 9 | func NewSlru[K comparable, V any](size uint) *Slru[K, V] { 10 | return &Slru[K, V]{ 11 | maxsize: size, 12 | // probation list size is dynamic 13 | probation: NewList[K, V](0, LIST_PROBATION), 14 | protected: NewList[K, V](uint(float32(size)*0.8), LIST_PROTECTED), 15 | } 16 | } 17 | 18 | func (s *Slru[K, V]) insert(entry *Entry[K, V]) { 19 | s.probation.PushFront(entry) 20 | } 21 | 22 | func (s *Slru[K, V]) access(entry *Entry[K, V]) { 23 | if entry.flag.IsProbation() { 24 | s.probation.remove(entry) 25 | s.protected.PushFront(entry) 26 | } else if entry.flag.IsProtected() { 27 | s.protected.MoveToFront(entry) 28 | } 29 | } 30 | 31 | func (s *Slru[K, V]) remove(entry *Entry[K, V]) { 32 | if entry.flag.IsProbation() { 33 | s.probation.remove(entry) 34 | } else if entry.flag.IsProtected() { 35 | s.protected.remove(entry) 36 | } 37 | } 38 | 39 | func (s *Slru[K, V]) updateCost(entry *Entry[K, V], delta int64) { 40 | if entry.flag.IsProbation() { 41 | s.probation.len += delta 42 | } else if entry.flag.IsProtected() { 43 | s.protected.len += delta 44 | } 45 | } 46 | 47 | func (s *Slru[K, V]) len() int { 48 | return s.probation.Len() + s.protected.Len() 49 | } 50 | -------------------------------------------------------------------------------- /internal/stats.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | type Stats struct { 4 | hits uint64 5 | misses uint64 6 | } 7 | 8 | func newStats(hits uint64, misses uint64) Stats { 9 | return Stats{ 10 | hits: hits, 11 | misses: misses, 12 | } 13 | } 14 | 15 | func (s Stats) Hits() uint64 { 16 | return s.hits 17 | } 18 | 19 | func (s Stats) Misses() uint64 { 20 | return s.misses 21 | } 22 | 23 | func (s Stats) HitRatio() float64 { 24 | total := s.hits + s.misses 25 | if total == 0 { 26 | return 0.0 27 | } 28 | return float64(s.hits) / float64(total) 29 | } 30 | -------------------------------------------------------------------------------- /internal/store_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "sync/atomic" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestStore_WindowExpire(t *testing.T) { 14 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 5000}) 15 | defer store.Close() 16 | 17 | expired := map[int]int{} 18 | var mu sync.Mutex 19 | store.removalListener = func(key, value int, reason RemoveReason) { 20 | if reason == EXPIRED { 21 | mu.Lock() 22 | expired[key] = value 23 | mu.Unlock() 24 | } 25 | } 26 | for i := 0; i < 50; i++ { 27 | store.Set(i, i, 1, 200*time.Millisecond) 28 | } 29 | store.Wait() 30 | mu.Lock() 31 | require.True(t, len(expired) == 0) 32 | mu.Unlock() 33 | time.Sleep(3 * time.Second) 34 | mu.Lock() 35 | require.True(t, len(expired) > 0) 36 | mu.Unlock() 37 | } 38 | 39 | func TestStore_Window(t *testing.T) { 40 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 41 | defer store.Close() 42 | 43 | evicted := map[int]int{} 44 | var mu sync.Mutex 45 | store.removalListener = func(key, value int, reason RemoveReason) { 46 | if reason == EVICTED { 47 | mu.Lock() 48 | evicted[key] = value 49 | mu.Unlock() 50 | } 51 | } 52 | 53 | for i := 0; i < 5; i++ { 54 | store.Set(i, i, 1, 0) 55 | } 56 | // move 0,1,2 entries to slru 57 | store.Set(123, 123, 8, 0) 58 | store.Wait() 59 | require.Equal(t, store.policy.window.Len(), 10) 60 | keys := []int{} 61 | for e := store.policy.window.PopTail(); e != nil; e = store.policy.window.PopTail() { 62 | keys = append(keys, e.key) 63 | } 64 | require.Equal(t, []int{3, 4, 123}, keys) 65 | require.Equal(t, 0, len(evicted)) 66 | store.Wait() 67 | } 68 | func TestStore_WindowEvict(t *testing.T) { 69 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 70 | store.policy.sketch.EnsureCapacity(1000) 71 | defer store.Close() 72 | 73 | evicted := map[int]int{} 74 | var mu sync.Mutex 75 | store.removalListener = func(key, value int, reason RemoveReason) { 76 | if reason == EVICTED { 77 | mu.Lock() 78 | evicted[key] = value 79 | mu.Unlock() 80 | } 81 | } 82 | require.Equal(t, int(store.policy.window.capacity), 10) 83 | 84 | // test evicted callback 85 | // fill window with weight 2 items first 86 | for i := 0; i < 500; i++ { 87 | store.Set(i, i, 2, 0) 88 | } 89 | store.Wait() 90 | require.Equal(t, 0, len(evicted)) 91 | 92 | // add 15 weight 1 items, window currently has 5 weight2 items. 93 | // This will send 5 weight2 items and 5 weight1 items to probation, 94 | // all items has freq 1 in cache, which means these 15 entries don't 95 | // have enough freq to be admitted. 96 | for i := 700; i < 715; i++ { 97 | store.Set(i, i, 1, 0) 98 | } 99 | store.Wait() 100 | mu.Lock() 101 | defer mu.Unlock() 102 | require.Equal(t, 10, len(evicted)) 103 | } 104 | 105 | func TestStore_DoorKeeperDynamicSize(t *testing.T) { 106 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 200000, Doorkeeper: true}) 107 | defer store.Close() 108 | shard := store.shards[0] 109 | require.True(t, shard.dookeeper.Capacity == 512) 110 | for i := 0; i < 5000; i++ { 111 | shard.set(i, &Entry[int, int]{}) 112 | } 113 | require.True(t, shard.dookeeper.Capacity > 100000) 114 | } 115 | 116 | func TestStore_PolicyCounter(t *testing.T) { 117 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 118 | defer store.Close() 119 | for i := 0; i < 1000; i++ { 120 | store.Set(i, i, 1, 0) 121 | } 122 | // hit 123 | for i := 0; i < 1600; i++ { 124 | store.Get(100) 125 | } 126 | // miss 127 | for i := 0; i < 1600; i++ { 128 | store.Get(10000) 129 | } 130 | 131 | require.Equal(t, uint64(1600), store.policy.hits.Value(), int(store.policy.hits.Value())) 132 | require.Equal(t, uint64(1600), store.policy.misses.Value(), int(store.policy.misses.Value())) 133 | } 134 | 135 | func TestStore_GetExpire(t *testing.T) { 136 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 137 | defer store.Close() 138 | 139 | _, i := store.index(123) 140 | fakeNow := store.timerwheel.clock.NowNano() - 100*10e9 141 | testNow := store.timerwheel.clock.NowNano() 142 | entry := &Entry[int, int]{ 143 | key: 123, 144 | value: 123, 145 | } 146 | entry.expire.Store(fakeNow) 147 | 148 | store.shards[i].hashmap[123] = entry 149 | store.policyMu.Lock() 150 | 151 | // already exprired 152 | store.timerwheel.clock.SetNowCache(fakeNow + 1) 153 | _, ok := store.Get(123) 154 | require.False(t, ok) 155 | 156 | // use cached now, not expire 157 | store.timerwheel.clock.SetNowCache(fakeNow - 31*10e9) 158 | v, ok := store.Get(123) 159 | require.True(t, ok) 160 | require.Equal(t, 123, v) 161 | 162 | // less than 30 seconds and not expired, use real now 163 | store.timerwheel.clock.SetNowCache(fakeNow - 1) 164 | _, ok = store.Get(123) 165 | require.False(t, ok) 166 | store.policyMu.Unlock() 167 | 168 | // ticker refresh cached now 169 | time.Sleep(1200 * time.Millisecond) 170 | cachedNow := store.timerwheel.clock.NowNanoCached() 171 | require.True(t, cachedNow > testNow) 172 | } 173 | 174 | func TestStore_SinkWritePolicyWeight(t *testing.T) { 175 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 10000}) 176 | defer store.Close() 177 | 178 | entry := &Entry[int, int]{key: 1, value: 1} 179 | h := store.hasher.Hash(1) 180 | 181 | // wright change 5 -> 1 -> 8 182 | store.sinkWrite(WriteBufItem[int, int]{ 183 | entry: entry, 184 | costChange: -4, 185 | code: UPDATE, 186 | hash: h, 187 | }) 188 | 189 | store.sinkWrite(WriteBufItem[int, int]{ 190 | entry: entry, 191 | costChange: 5, 192 | code: NEW, 193 | hash: h, 194 | }) 195 | 196 | store.sinkWrite(WriteBufItem[int, int]{ 197 | entry: entry, 198 | costChange: 7, 199 | code: UPDATE, 200 | hash: h, 201 | }) 202 | 203 | require.Equal(t, 8, int(store.policy.weightedSize)) 204 | } 205 | 206 | func TestStore_CloseRace(t *testing.T) { 207 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 208 | 209 | var wg sync.WaitGroup 210 | var closed atomic.Bool 211 | for i := 0; i < 10; i++ { 212 | wg.Add(1) 213 | go func(i int) { 214 | counter := i * 5 215 | countdown := -1 216 | defer wg.Done() 217 | for { 218 | // continue get/set 20 times after cache closed 219 | if countdown == 0 { 220 | return 221 | } 222 | if closed.Load() && countdown == -1 { 223 | countdown = 20 224 | } 225 | store.Get(counter) 226 | store.Set(100, 100, 1, 0) 227 | counter += i 228 | if countdown > 0 { 229 | countdown -= 1 230 | } 231 | } 232 | }(i) 233 | } 234 | wg.Add(1) 235 | go func() { 236 | defer wg.Done() 237 | store.Close() 238 | closed.Store(true) 239 | }() 240 | wg.Wait() 241 | 242 | _ = store.Set(100, 100, 1, 0) 243 | v, ok := store.Get(100) 244 | require.False(t, ok) 245 | require.Equal(t, 0, v) 246 | require.NotNil(t, store.ctx.Err()) 247 | } 248 | 249 | func TestStore_CloseRaceLoadingCache(t *testing.T) { 250 | store := NewStore[int, int](&StoreOptions[int, int]{MaxSize: 1000}) 251 | loadingStore := NewLoadingStore(store) 252 | loadingStore.loader = func(ctx context.Context, key int) (Loaded[int], error) { 253 | return Loaded[int]{Value: 100, Cost: 1}, nil 254 | } 255 | ctx := context.TODO() 256 | 257 | var wg sync.WaitGroup 258 | var closed atomic.Bool 259 | for i := 0; i < 10; i++ { 260 | wg.Add(1) 261 | go func(i int) { 262 | counter := i * 5 263 | countdown := -1 264 | defer wg.Done() 265 | for { 266 | // continue get/set 20 times after cache closed 267 | if countdown == 0 { 268 | return 269 | } 270 | if closed.Load() && countdown == -1 { 271 | countdown = 20 272 | } 273 | _, err := loadingStore.Get(ctx, counter) 274 | if countdown > 0 { 275 | require.Equal(t, ErrCacheClosed, err) 276 | } 277 | counter += i 278 | if countdown > 0 { 279 | countdown -= 1 280 | } 281 | } 282 | }(i) 283 | } 284 | wg.Add(1) 285 | go func() { 286 | defer wg.Done() 287 | loadingStore.Close() 288 | closed.Store(true) 289 | }() 290 | wg.Wait() 291 | 292 | _, err := loadingStore.Get(ctx, 100) 293 | require.Equal(t, ErrCacheClosed, err) 294 | require.NotNil(t, store.ctx.Err()) 295 | } 296 | -------------------------------------------------------------------------------- /internal/timerwheel.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "math/bits" 5 | "time" 6 | 7 | "github.com/Yiling-J/theine-go/internal/clock" 8 | ) 9 | 10 | func next2Power(x uint) uint { 11 | x-- 12 | x |= x >> 1 13 | x |= x >> 2 14 | x |= x >> 4 15 | x |= x >> 8 16 | x |= x >> 16 17 | x |= x >> 32 18 | x++ 19 | return x 20 | } 21 | 22 | type TimerWheel[K comparable, V any] struct { 23 | clock *clock.Clock 24 | buckets []uint 25 | spans []uint 26 | shift []uint 27 | wheel [][]*List[K, V] 28 | nanos int64 29 | } 30 | 31 | func NewTimerWheel[K comparable, V any](size uint) *TimerWheel[K, V] { 32 | clock := &clock.Clock{Start: time.Now()} 33 | buckets := []uint{64, 64, 32, 4, 1} 34 | spans := []uint{ 35 | next2Power(uint((1 * time.Second).Nanoseconds())), 36 | next2Power(uint((1 * time.Minute).Nanoseconds())), 37 | next2Power(uint((1 * time.Hour).Nanoseconds())), 38 | next2Power(uint((24 * time.Hour).Nanoseconds())), 39 | next2Power(uint((24 * time.Hour).Nanoseconds())) * 4, 40 | next2Power(uint((24 * time.Hour).Nanoseconds())) * 4, 41 | } 42 | 43 | shift := []uint{ 44 | uint(bits.TrailingZeros(spans[0])), 45 | uint(bits.TrailingZeros(spans[1])), 46 | uint(bits.TrailingZeros(spans[2])), 47 | uint(bits.TrailingZeros(spans[3])), 48 | uint(bits.TrailingZeros(spans[4])), 49 | } 50 | 51 | wheel := [][]*List[K, V]{} 52 | for i := 0; i < 5; i++ { 53 | tmp := []*List[K, V]{} 54 | for j := 0; j < int(buckets[i]); j++ { 55 | tmp = append(tmp, NewList[K, V](0, WHEEL_LIST)) 56 | } 57 | wheel = append(wheel, tmp) 58 | } 59 | 60 | return &TimerWheel[K, V]{ 61 | buckets: buckets, 62 | spans: spans, 63 | shift: shift, 64 | wheel: wheel, 65 | nanos: clock.NowNano(), 66 | clock: clock, 67 | } 68 | } 69 | 70 | func (tw *TimerWheel[K, V]) findIndex(expire int64) (int, int) { 71 | duration := expire - tw.nanos 72 | for i := 0; i < 5; i++ { 73 | if duration < int64(tw.spans[i+1]) { 74 | ticks := expire >> int(tw.shift[i]) 75 | slot := int(ticks) & (int(tw.buckets[i]) - 1) 76 | return i, slot 77 | } 78 | } 79 | return 4, 0 80 | } 81 | 82 | func (tw *TimerWheel[K, V]) deschedule(entry *Entry[K, V]) { 83 | entry.prev(WHEEL_LIST).setNext(entry.next(WHEEL_LIST), WHEEL_LIST) 84 | entry.next(WHEEL_LIST).setPrev(entry.prev(WHEEL_LIST), WHEEL_LIST) 85 | entry.setNext(nil, WHEEL_LIST) 86 | entry.setPrev(nil, WHEEL_LIST) 87 | } 88 | 89 | func (tw *TimerWheel[K, V]) schedule(entry *Entry[K, V]) { 90 | if entry.meta.wheelPrev != nil { 91 | tw.deschedule(entry) 92 | } 93 | x, y := tw.findIndex(entry.expire.Load()) 94 | tw.wheel[x][y].PushFront(entry) 95 | } 96 | 97 | func (tw *TimerWheel[K, V]) advance(now int64, remove func(entry *Entry[K, V], reason RemoveReason)) { 98 | if now == 0 { 99 | now = tw.clock.NowNano() 100 | } 101 | previous := tw.nanos 102 | tw.nanos = now 103 | 104 | for i := 0; i < 5; i++ { 105 | prevTicks := previous >> int64(tw.shift[i]) 106 | currentTicks := tw.nanos >> int64(tw.shift[i]) 107 | if currentTicks <= prevTicks { 108 | break 109 | } 110 | tw.expire(i, prevTicks, currentTicks-prevTicks, remove) 111 | } 112 | } 113 | 114 | func (tw *TimerWheel[K, V]) expire(index int, prevTicks int64, delta int64, remove func(entry *Entry[K, V], reason RemoveReason)) { 115 | mask := tw.buckets[index] - 1 116 | steps := tw.buckets[index] 117 | if delta < int64(steps) { 118 | steps = uint(delta) 119 | } 120 | start := prevTicks & int64(mask) 121 | end := start + int64(steps) 122 | for i := start; i < end; i++ { 123 | list := tw.wheel[index][i&int64(mask)] 124 | entry := list.Front() 125 | for entry != nil { 126 | next := entry.Next(WHEEL_LIST) 127 | if entry.expire.Load() <= tw.nanos { 128 | tw.deschedule(entry) 129 | remove(entry, EXPIRED) 130 | } else { 131 | tw.schedule(entry) 132 | } 133 | entry = next 134 | } 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /internal/timerwheel_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func expire(now int64, expire int64) int64 { 12 | return now + (time.Second * time.Duration(expire)).Nanoseconds() 13 | } 14 | 15 | func TestFindBucket(t *testing.T) { 16 | tw := NewTimerWheel[string, string](1000) 17 | // max 1.14m 18 | for _, i := range []int{0, 10, 30, 68} { 19 | x, _ := tw.findIndex(tw.clock.NowNano() + (time.Second * time.Duration(i)).Nanoseconds()) 20 | require.Equal(t, 0, x) 21 | } 22 | // max 1.22h 23 | for _, i := range []int{69, 120, 200, 1000, 2500, 4398} { 24 | x, _ := tw.findIndex(tw.clock.NowNano() + (time.Second * time.Duration(i)).Nanoseconds()) 25 | require.Equal(t, 1, x) 26 | } 27 | // max 1.63d 28 | for _, i := range []int{4399, 8000, 20000, 50000, 140737} { 29 | x, _ := tw.findIndex(tw.clock.NowNano() + (time.Second * time.Duration(i)).Nanoseconds()) 30 | require.Equal(t, 2, x) 31 | } 32 | // max 6.5d 33 | for _, i := range []int{140738, 200000, 400000, 562949} { 34 | x, _ := tw.findIndex(tw.clock.NowNano() + (time.Second * time.Duration(i)).Nanoseconds()) 35 | require.Equal(t, 3, x) 36 | } 37 | // > 6.5d 38 | for _, i := range []int{562950, 1562950, 2562950, 3562950} { 39 | x, _ := tw.findIndex(tw.clock.NowNano() + (time.Second * time.Duration(i)).Nanoseconds()) 40 | require.Equal(t, 4, x) 41 | } 42 | } 43 | 44 | func TestSchedule(t *testing.T) { 45 | tw := NewTimerWheel[string, string](1000) 46 | entries := []*Entry[string, string]{ 47 | NewEntry("k1", "", 1, expire(tw.clock.NowNano(), 1)), 48 | NewEntry("k2", "", 1, expire(tw.clock.NowNano(), 69)), 49 | NewEntry("k3", "", 1, expire(tw.clock.NowNano(), 4399)), 50 | } 51 | 52 | for _, entry := range entries { 53 | tw.schedule(entry) 54 | } 55 | var found bool 56 | for _, l := range tw.wheel[0] { 57 | if l.Contains(entries[0]) { 58 | found = true 59 | } 60 | } 61 | require.True(t, found) 62 | 63 | found = false 64 | for _, l := range tw.wheel[1] { 65 | if l.Contains(entries[1]) { 66 | found = true 67 | } 68 | } 69 | require.True(t, found) 70 | 71 | found = false 72 | for _, l := range tw.wheel[2] { 73 | if l.Contains(entries[2]) { 74 | found = true 75 | } 76 | } 77 | require.True(t, found) 78 | } 79 | 80 | func TestAdvance(t *testing.T) { 81 | tw := NewTimerWheel[string, string](1000) 82 | entries := []*Entry[string, string]{ 83 | NewEntry("k1", "", 1, expire(tw.clock.NowNano(), 1)), 84 | NewEntry("k2", "", 1, expire(tw.clock.NowNano(), 10)), 85 | NewEntry("k3", "", 1, expire(tw.clock.NowNano(), 30)), 86 | NewEntry("k4", "", 1, expire(tw.clock.NowNano(), 120)), 87 | NewEntry("k5", "", 1, expire(tw.clock.NowNano(), 6500)), 88 | NewEntry("k6", "", 1, expire(tw.clock.NowNano(), 142000)), 89 | NewEntry("k7", "", 1, expire(tw.clock.NowNano(), 1420000)), 90 | } 91 | 92 | for _, entry := range entries { 93 | tw.schedule(entry) 94 | } 95 | evicted := []string{} 96 | tw.advance(tw.clock.NowNano()+(time.Second*time.Duration(64)).Nanoseconds(), func(entry *Entry[string, string], reason RemoveReason) { 97 | evicted = append(evicted, entry.key) 98 | }) 99 | require.ElementsMatch(t, []string{"k1", "k2", "k3"}, evicted) 100 | 101 | tw.advance(tw.clock.NowNano()+(time.Second*time.Duration(200)).Nanoseconds(), func(entry *Entry[string, string], reason RemoveReason) { 102 | evicted = append(evicted, entry.key) 103 | }) 104 | 105 | require.ElementsMatch(t, []string{"k1", "k2", "k3", "k4"}, evicted) 106 | 107 | tw.advance(tw.clock.NowNano()+(time.Second*time.Duration(12000)).Nanoseconds(), func(entry *Entry[string, string], reason RemoveReason) { 108 | evicted = append(evicted, entry.key) 109 | }) 110 | 111 | require.ElementsMatch(t, []string{"k1", "k2", "k3", "k4", "k5"}, evicted) 112 | 113 | tw.advance(tw.clock.NowNano()+(time.Second*time.Duration(350000)).Nanoseconds(), func(entry *Entry[string, string], reason RemoveReason) { 114 | evicted = append(evicted, entry.key) 115 | }) 116 | 117 | require.ElementsMatch(t, []string{"k1", "k2", "k3", "k4", "k5", "k6"}, evicted) 118 | 119 | tw.advance(tw.clock.NowNano()+(time.Second*time.Duration(1520000)).Nanoseconds(), func(entry *Entry[string, string], reason RemoveReason) { 120 | evicted = append(evicted, entry.key) 121 | }) 122 | 123 | require.ElementsMatch(t, []string{"k1", "k2", "k3", "k4", "k5", "k6", "k7"}, evicted) 124 | } 125 | 126 | func TestAdvanceClear(t *testing.T) { 127 | tw := NewTimerWheel[string, string](100000) 128 | em := []*Entry[string, string]{} 129 | for i := 0; i < 50000; i++ { 130 | ttl := 1 + rand.Intn(12) 131 | entry := NewEntry("k1", "", 1, expire(tw.clock.NowNano(), int64(ttl))) 132 | tw.schedule(entry) 133 | em = append(em, entry) 134 | } 135 | for _, entry := range em { 136 | require.NotNil(t, entry.meta.wheelPrev) 137 | require.NotNil(t, entry.meta.wheelNext) 138 | } 139 | 140 | for i := 0; i < 15; i++ { 141 | tw.advance(tw.clock.NowNano()+time.Duration(float64(i)*1.0*float64(time.Second)).Nanoseconds(), func(entry *Entry[string, string], reason RemoveReason) { 142 | }) 143 | } 144 | 145 | for _, entry := range em { 146 | require.Nil(t, entry.meta.wheelPrev) 147 | require.Nil(t, entry.meta.wheelNext) 148 | } 149 | 150 | for _, w := range tw.wheel { 151 | for _, l := range w { 152 | require.Equal(t, &l.root, l.root.meta.wheelPrev) 153 | require.Equal(t, &l.root, l.root.meta.wheelNext) 154 | } 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /internal/tlfu.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "math" 5 | 6 | "github.com/Yiling-J/theine-go/internal/hasher" 7 | "github.com/Yiling-J/theine-go/internal/xruntime" 8 | ) 9 | 10 | const ( 11 | ADMIT_HASHDOS_THRESHOLD = 6 12 | HILL_CLIMBER_STEP_DECAY_RATE = 0.98 13 | HILL_CLIMBER_STEP_PERCENT = 0.0625 14 | ) 15 | 16 | type TinyLfu[K comparable, V any] struct { 17 | window *List[K, V] 18 | slru *Slru[K, V] 19 | sketch *CountMinSketch 20 | hasher *hasher.Hasher[K] 21 | capacity uint 22 | weightedSize uint 23 | misses *UnsignedCounter 24 | hits *UnsignedCounter 25 | hitsInSample uint64 26 | missesInSample uint64 27 | hr float32 28 | step float32 29 | amount int 30 | removeCallback func(entry *Entry[K, V]) 31 | } 32 | 33 | func NewTinyLfu[K comparable, V any](size uint, hasher *hasher.Hasher[K]) *TinyLfu[K, V] { 34 | windowSize := uint(float32(size) * 0.01) 35 | if windowSize < 1 { 36 | windowSize = 1 37 | } 38 | mainSize := size - windowSize 39 | tlfu := &TinyLfu[K, V]{ 40 | capacity: size, 41 | slru: NewSlru[K, V](mainSize), 42 | sketch: NewCountMinSketch(), 43 | step: -float32(size) * 0.0625, 44 | hasher: hasher, 45 | misses: NewUnsignedCounter(), 46 | hits: NewUnsignedCounter(), 47 | window: NewList[K, V](windowSize, LIST_WINDOW), 48 | } 49 | 50 | return tlfu 51 | } 52 | 53 | func (t *TinyLfu[K, V]) increaseWindow(amount int) int { 54 | // try move from protected/probation to window 55 | for { 56 | probation := true 57 | entry := t.slru.probation.Back() 58 | if entry == nil || entry.policyWeight > int64(amount) { 59 | probation = false 60 | entry = t.slru.protected.Back() 61 | } 62 | if entry == nil { 63 | break 64 | } 65 | 66 | weight := entry.policyWeight 67 | if weight > int64(amount) { 68 | break 69 | } 70 | amount -= int(weight) 71 | if probation { 72 | t.slru.probation.Remove(entry) 73 | } else { 74 | t.slru.protected.Remove(entry) 75 | } 76 | t.window.PushFront(entry) 77 | } 78 | return amount 79 | } 80 | 81 | func (t *TinyLfu[K, V]) decreaseWindow(amount int) int { 82 | // try move from window to probation 83 | for { 84 | entry := t.window.Back() 85 | if entry == nil { 86 | break 87 | } 88 | weight := entry.policyWeight 89 | if weight > int64(amount) { 90 | break 91 | } 92 | amount -= int(weight) 93 | t.window.Remove(entry) 94 | t.slru.probation.PushFront(entry) 95 | } 96 | return amount 97 | } 98 | 99 | func (t *TinyLfu[K, V]) resizeWindow() { 100 | t.window.capacity += uint(t.amount) 101 | t.slru.protected.capacity -= uint(t.amount) 102 | // demote first to make sure policy size is right 103 | t.demoteFromProtected() 104 | 105 | var remain int 106 | if t.amount > 0 { 107 | remain = t.increaseWindow(t.amount) 108 | t.amount = remain 109 | } else if t.amount < 0 { 110 | remain = t.decreaseWindow(-t.amount) 111 | t.amount = -remain 112 | } 113 | 114 | t.window.capacity -= uint(t.amount) 115 | t.slru.protected.capacity += uint(t.amount) 116 | } 117 | 118 | func (t *TinyLfu[K, V]) climb() { 119 | var delta float32 120 | if t.hitsInSample+t.missesInSample == 0 { 121 | delta = 0 122 | } else { 123 | current := float32(t.hitsInSample) / float32(t.hitsInSample+t.missesInSample) 124 | delta = current - t.hr 125 | t.hr = current 126 | } 127 | t.hitsInSample = 0 128 | t.missesInSample = 0 129 | 130 | var amount float32 131 | if delta >= 0 { 132 | amount = t.step 133 | } else { 134 | amount = -t.step 135 | } 136 | 137 | nextStepSize := amount * HILL_CLIMBER_STEP_DECAY_RATE 138 | if math.Abs(float64(delta)) >= 0.05 { 139 | nextStepSizeAbs := float32(t.capacity) * HILL_CLIMBER_STEP_PERCENT 140 | if amount >= 0 { 141 | nextStepSize = nextStepSizeAbs 142 | } else { 143 | nextStepSize = -nextStepSizeAbs 144 | } 145 | } 146 | 147 | t.step = nextStepSize 148 | t.amount = int(amount) 149 | // decrease protected, min protected is 0 150 | if t.amount > 0 && t.amount > int(t.slru.protected.capacity) { 151 | t.amount = int(t.slru.protected.capacity) 152 | } 153 | 154 | // decrease window, min window size is 1 155 | if t.amount < 0 && -t.amount > int(t.window.capacity-1) { 156 | t.amount = -int(t.window.capacity - 1) 157 | } 158 | } 159 | 160 | func (t *TinyLfu[K, V]) Set(entry *Entry[K, V]) { 161 | if uint(t.hitsInSample)+uint(t.missesInSample) > t.sketch.SampleSize { 162 | t.climb() 163 | t.resizeWindow() 164 | } 165 | 166 | t.weightedSize += uint(entry.policyWeight) 167 | 168 | if entry.meta.prev == nil { 169 | t.missesInSample++ 170 | t.window.PushFront(entry) 171 | } 172 | 173 | t.demoteFromProtected() 174 | t.EvictEntries() 175 | 176 | if t.weightedSize <= t.capacity { 177 | count := t.slru.probation.count + t.slru.protected.count + t.window.count 178 | t.sketch.EnsureCapacity(uint(count)) 179 | } 180 | } 181 | 182 | func (t *TinyLfu[K, V]) Access(item ReadBufItem[K, V]) { 183 | if uint(t.hitsInSample)+uint(t.missesInSample) > t.sketch.SampleSize { 184 | t.climb() 185 | t.resizeWindow() 186 | } 187 | 188 | if entry := item.entry; entry != nil { 189 | t.hitsInSample++ 190 | t.sketch.Add(item.hash) 191 | if entry.meta.prev != nil { 192 | if entry.flag.IsWindow() { 193 | t.window.MoveToFront(entry) 194 | } else { 195 | t.slru.access(entry) 196 | } 197 | } 198 | } 199 | // Access may promote entry from probation to protected, 200 | // cause protected size larger then its capacity, 201 | // but we can delay demote until next set 202 | // because on Access the total size of cache won't change. 203 | } 204 | 205 | func (t *TinyLfu[K, V]) Remove(entry *Entry[K, V], callback bool) { 206 | if entry.flag.IsWindow() { 207 | t.window.Remove(entry) 208 | } else { 209 | t.slru.remove(entry) 210 | } 211 | t.weightedSize -= uint(entry.policyWeight) 212 | if callback { 213 | t.removeCallback(entry) 214 | } 215 | } 216 | 217 | func (t *TinyLfu[K, V]) UpdateCost(entry *Entry[K, V], weightChange int64) { 218 | // entry's policy weigh already updated 219 | // so update weightedSize to keep sync 220 | t.weightedSize += uint(weightChange) 221 | 222 | // update window/slru 223 | // if entry new weight > max size 224 | // evict immediately 225 | if entry.flag.IsWindow() { 226 | t.window.len += weightChange 227 | if entry.policyWeight > int64(t.capacity) { 228 | t.Remove(entry, true) 229 | } else { 230 | t.window.MoveToFront(entry) 231 | } 232 | } else { 233 | t.slru.updateCost(entry, weightChange) 234 | if entry.policyWeight > int64(t.capacity) { 235 | t.Remove(entry, true) 236 | } else { 237 | t.slru.access(entry) 238 | } 239 | } 240 | 241 | if t.weightedSize > t.capacity { 242 | t.EvictEntries() 243 | } 244 | } 245 | 246 | // move entry from protected to probation 247 | func (t *TinyLfu[K, V]) demoteFromProtected() { 248 | for t.slru.protected.Len() > int(t.slru.protected.capacity) { 249 | entry := t.slru.protected.PopTail() 250 | t.slru.probation.PushFront(entry) 251 | } 252 | } 253 | 254 | func (t *TinyLfu[K, V]) evictFromWindow() *Entry[K, V] { 255 | var first *Entry[K, V] 256 | for t.window.Len() > int(t.window.capacity) { 257 | if victim := t.window.PopTail(); victim != nil { 258 | if first == nil { 259 | first = victim 260 | } 261 | t.slru.insert(victim) 262 | } 263 | } 264 | return first 265 | } 266 | 267 | func (t *TinyLfu[K, V]) admit(candidateKey, victimKey K) bool { 268 | victimFreq := t.sketch.Estimate(t.hasher.Hash(victimKey)) 269 | candidateFreq := t.sketch.Estimate(t.hasher.Hash(candidateKey)) 270 | if candidateFreq > victimFreq { 271 | return true 272 | } else if candidateFreq >= ADMIT_HASHDOS_THRESHOLD { 273 | // The maximum frequency is 15 and halved to 7 after a reset to age the history. An attack 274 | // exploits that a hot candidate is rejected in favor of a hot victim. The threshold of a warm 275 | // candidate reduces the number of random acceptances to minimize the impact on the hit rate. 276 | rand := xruntime.Fastrand() 277 | return (rand & 127) == 0 278 | } 279 | return false 280 | } 281 | 282 | // compare and evict entries until cache size fit. 283 | // candidate is the first entry evicted from window, 284 | // if head is null, start from last entry from window. 285 | func (t *TinyLfu[K, V]) evictFromMain(candidate *Entry[K, V]) { 286 | victimQueue := LIST_PROBATION 287 | candidateQueue := LIST_PROBATION 288 | victim := t.slru.probation.Back() 289 | 290 | for t.weightedSize > t.capacity { 291 | if candidate == nil && candidateQueue == LIST_PROBATION { 292 | candidate = t.window.Back() 293 | candidateQueue = LIST_WINDOW 294 | } 295 | 296 | if candidate == nil && victim == nil { 297 | if victimQueue == LIST_PROBATION { 298 | victim = t.slru.protected.Back() 299 | victimQueue = LIST_PROTECTED 300 | continue 301 | } else if victimQueue == LIST_PROTECTED { 302 | victim = t.window.Back() 303 | victimQueue = LIST_WINDOW 304 | continue 305 | } 306 | break 307 | } 308 | 309 | if victim == nil { 310 | previous := candidate.PrevPolicy() 311 | evict := candidate 312 | candidate = previous 313 | t.Remove(evict, true) 314 | continue 315 | } else if candidate == nil { 316 | evict := victim 317 | victim = victim.PrevPolicy() 318 | t.Remove(evict, true) 319 | continue 320 | } 321 | 322 | if victim == candidate { 323 | victim = victim.PrevPolicy() 324 | t.Remove(candidate, true) 325 | candidate = nil 326 | continue 327 | } 328 | 329 | if candidate.policyWeight > int64(t.weightedSize) { 330 | evict := candidate 331 | candidate = candidate.PrevPolicy() 332 | t.Remove(evict, true) 333 | continue 334 | } 335 | 336 | if t.admit(candidate.key, victim.key) { 337 | evict := victim 338 | victim = victim.PrevPolicy() 339 | t.Remove(evict, true) 340 | candidate = candidate.PrevPolicy() 341 | } else { 342 | evict := candidate 343 | candidate = candidate.PrevPolicy() 344 | t.Remove(evict, true) 345 | } 346 | } 347 | } 348 | 349 | func (t *TinyLfu[K, V]) EvictEntries() { 350 | first := t.evictFromWindow() 351 | t.evictFromMain(first) 352 | } 353 | -------------------------------------------------------------------------------- /internal/tlfu_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/Yiling-J/theine-go/internal/hasher" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | type testEventType uint8 14 | 15 | const ( 16 | TestEventGet testEventType = iota 17 | TestEventSet 18 | TestEventUpdate 19 | TestEventFreq 20 | TestEventRemove 21 | TestEventResizeWindow 22 | ) 23 | 24 | type testEvent struct { 25 | event testEventType 26 | key int 27 | value int 28 | } 29 | 30 | type testCase struct { 31 | name string 32 | events []testEvent 33 | expected string 34 | } 35 | 36 | var weightTests = []testCase{ 37 | { 38 | "window promote", 39 | []testEvent{ 40 | {TestEventGet, 13, 1}, 41 | }, 42 | "13/14/12/11/10:9/8/7/6/5/4/3/2/1/0:", 43 | }, 44 | { 45 | "probation promote", 46 | []testEvent{ 47 | {TestEventGet, 7, 1}, 48 | }, 49 | "14/13/12/11/10:9/8/6/5/4/3/2/1/0:7", 50 | }, 51 | { 52 | "protect promote", 53 | []testEvent{ 54 | {TestEventGet, 5, 1}, 55 | {TestEventGet, 6, 1}, 56 | {TestEventGet, 7, 1}, 57 | {TestEventGet, 8, 1}, 58 | {TestEventGet, 5, 1}, 59 | }, 60 | "14/13/12/11/10:9/4/3/2/1/0:5/8/7/6", 61 | }, 62 | { 63 | "simple insert", 64 | []testEvent{ 65 | {TestEventSet, 15, 1}, 66 | }, 67 | // 10 is evicted because of frequency 68 | "15/14/13/12/11:9/8/7/6/5/4/3/2/1/0:", 69 | }, 70 | { 71 | "simple insert, low freq", 72 | []testEvent{ 73 | {TestEventFreq, 0, 5}, 74 | {TestEventSet, 15, 1}, 75 | }, 76 | // 10 is evicted because of frequency 77 | "15/14/13/12/11:9/8/7/6/5/4/3/2/1/0:", 78 | }, 79 | { 80 | "simple insert, high freq", 81 | []testEvent{ 82 | {TestEventFreq, 10, 5}, 83 | {TestEventSet, 15, 1}, 84 | }, 85 | // 0 is evicted because of frequency 86 | "15/14/13/12/11:10/9/8/7/6/5/4/3/2/1:", 87 | }, 88 | { 89 | "simple insert, high weight", 90 | []testEvent{ 91 | {TestEventSet, 15, 3}, 92 | }, 93 | // after window evict: 94 | // 15/14/13:12/11/10/9/8/7/6/5/4/3/2/1/0: 95 | // compare 10-0, evict 10, 96 | // compare 11-1, evict 11, 97 | // compare 12-2, evict 12, 98 | // now 15/14/13/9/8/7/6/5/4/3/2/1/0: 99 | "15/14/13:9/8/7/6/5/4/3/2/1/0:", 100 | }, 101 | { 102 | "simple insert, high weight, high freq", 103 | []testEvent{ 104 | {TestEventFreq, 10, 5}, 105 | {TestEventFreq, 11, 5}, 106 | {TestEventSet, 15, 3}, 107 | }, 108 | // after window evict: 109 | // 15/14/13:12/11/10/9/8/7/6/5/4/3/2/1/0: 110 | // compare 10-0, evict 0, 111 | // compare 11-1, evict 1, 112 | // compare 12-2, evict 12, 113 | // now 15/14/13/9/8/7/6/5/4/3/2/1/0: 114 | "15/14/13:11/10/9/8/7/6/5/4/3/2:", 115 | }, 116 | { 117 | "simple insert, weight lt window", 118 | []testEvent{ 119 | {TestEventSet, 15, 8}, 120 | }, 121 | // after window evict: 122 | // :15/14/13/12/11/10/9/8/7/6/5/4/3/2/1/0: 123 | // evict all winodw entries 124 | ":9/8/7/6/5/4/3/2/1/0:", 125 | }, 126 | { 127 | "simple insert, weight lt window, high freq", 128 | []testEvent{ 129 | {TestEventFreq, 15, 5}, 130 | {TestEventSet, 15, 8}, 131 | }, 132 | // 10-14 evicted, 133 | // 0 evicted, 15 remain 134 | // candidate is nil, keep evict victim 1 135 | // candidate is nil, keep evict victim 2 136 | // size fit 137 | ":15/9/8/7/6/5/4/3:", 138 | }, 139 | { 140 | "update weight", 141 | []testEvent{ 142 | {TestEventUpdate, 13, 7}, 143 | }, 144 | ":9/8/7/6/5/4/3/2/1/0:", 145 | }, 146 | { 147 | "update protected, delay", 148 | []testEvent{ 149 | {TestEventRemove, 14, 1}, 150 | {TestEventRemove, 13, 1}, 151 | {TestEventRemove, 12, 1}, 152 | {TestEventRemove, 9, 1}, 153 | {TestEventRemove, 8, 1}, 154 | {TestEventRemove, 7, 1}, 155 | {TestEventRemove, 6, 1}, 156 | {TestEventGet, 4, 1}, 157 | {TestEventUpdate, 4, 5}, 158 | }, 159 | // protected cap exceed, next resize will demote it 160 | "11/10:5/3/2/1/0:4", 161 | }, 162 | { 163 | "update protected, demote ", 164 | []testEvent{ 165 | {TestEventRemove, 14, 1}, 166 | {TestEventRemove, 13, 1}, 167 | {TestEventRemove, 12, 1}, 168 | {TestEventRemove, 9, 1}, 169 | {TestEventRemove, 8, 1}, 170 | {TestEventRemove, 7, 1}, 171 | {TestEventRemove, 6, 1}, 172 | {TestEventGet, 4, 1}, 173 | {TestEventUpdate, 4, 5}, 174 | {TestEventResizeWindow, 0, 0}, 175 | }, 176 | // protected cap exceed, demote 177 | "11/10:4/5/3/2/1/0:", 178 | }, 179 | { 180 | "update protected, demote not run", 181 | []testEvent{ 182 | {TestEventRemove, 14, 1}, 183 | {TestEventRemove, 13, 1}, 184 | {TestEventRemove, 12, 1}, 185 | {TestEventRemove, 9, 1}, 186 | {TestEventRemove, 8, 1}, 187 | {TestEventRemove, 7, 1}, 188 | {TestEventRemove, 6, 1}, 189 | {TestEventGet, 4, 1}, 190 | {TestEventUpdate, 4, 5}, 191 | {TestEventGet, 4, 1}, 192 | }, 193 | "11/10:5/3/2/1/0:4", 194 | }, 195 | { 196 | "update protected, demote auto run", 197 | []testEvent{ 198 | {TestEventRemove, 14, 1}, 199 | {TestEventRemove, 13, 1}, 200 | {TestEventRemove, 12, 1}, 201 | {TestEventRemove, 9, 1}, 202 | {TestEventRemove, 8, 1}, 203 | {TestEventRemove, 7, 1}, 204 | {TestEventRemove, 6, 1}, 205 | {TestEventGet, 4, 1}, 206 | {TestEventUpdate, 4, 5}, 207 | {TestEventSet, 12, 1}, 208 | }, 209 | "12/11/10:4/5/3/2/1/0:", 210 | }, 211 | { 212 | "window too large", 213 | []testEvent{ 214 | {TestEventGet, 6, 7}, 215 | {TestEventFreq, 14, 5}, 216 | {TestEventUpdate, 14, 16}, 217 | }, 218 | // larger than cap will be evicted immediately 219 | "13/12/11/10:9/8/7/5/4/3/2/1/0:6", 220 | }, 221 | { 222 | "probation too large", 223 | []testEvent{ 224 | {TestEventGet, 6, 7}, 225 | {TestEventFreq, 7, 5}, 226 | {TestEventUpdate, 7, 16}, 227 | }, 228 | // larger than cap will be evicted immediately 229 | "14/13/12/11/10:9/8/5/4/3/2/1/0:6", 230 | }, 231 | { 232 | "protected too large", 233 | []testEvent{ 234 | {TestEventGet, 6, 7}, 235 | {TestEventUpdate, 6, 16}, 236 | }, 237 | // larger than cap will be evicted immediately 238 | "14/13/12/11/10:9/8/7/5/4/3/2/1/0:", 239 | }, 240 | { 241 | "window very large", 242 | []testEvent{ 243 | {TestEventGet, 6, 7}, 244 | {TestEventFreq, 14, 5}, 245 | {TestEventUpdate, 14, 13}, 246 | }, 247 | ":14:6", 248 | }, 249 | { 250 | "probation very large", 251 | []testEvent{ 252 | {TestEventGet, 6, 7}, 253 | {TestEventFreq, 7, 5}, 254 | {TestEventUpdate, 7, 13}, 255 | }, 256 | // larger than cap will be evicted immediately 257 | "::7/6", 258 | }, 259 | { 260 | "protected very large", 261 | []testEvent{ 262 | {TestEventGet, 6, 7}, 263 | {TestEventUpdate, 6, 14}, 264 | }, 265 | // key 6 is in protected and has eight larger than capacity, 266 | // so all probation will be evicted 267 | "::6", 268 | }, 269 | } 270 | 271 | func newTinyLfuSized[K comparable, V any](wsize, msize, psize uint, hasher *hasher.Hasher[K]) *TinyLfu[K, V] { 272 | tlfu := &TinyLfu[K, V]{ 273 | capacity: wsize + msize, 274 | slru: &Slru[K, V]{ 275 | maxsize: msize, 276 | // probation list size is dynamic 277 | probation: NewList[K, V](0, LIST_PROBATION), 278 | protected: NewList[K, V](psize, LIST_PROTECTED), 279 | }, 280 | sketch: NewCountMinSketch(), 281 | step: -float32(wsize+msize) * 0.0625, 282 | hasher: hasher, 283 | misses: NewUnsignedCounter(), 284 | hits: NewUnsignedCounter(), 285 | window: NewList[K, V](wsize, LIST_WINDOW), 286 | } 287 | 288 | return tlfu 289 | } 290 | 291 | func assertLen(t *testing.T, list *List[int, int]) { 292 | sum := 0 293 | for _, e := range list.entries() { 294 | sum += int(e.PolicyWeight()) 295 | } 296 | require.Equal(t, list.Len(), sum) 297 | } 298 | 299 | func TestTlfu_Weight(t *testing.T) { 300 | hasher := hasher.NewHasher[int](nil) 301 | for _, cs := range weightTests { 302 | t.Run(cs.name, func(t *testing.T) { 303 | // window size 5, main size 10, protected size 5 304 | tlfu := newTinyLfuSized[int, int](5, 10, 5, hasher) 305 | tlfu.removeCallback = func(entry *Entry[int, int]) {} 306 | em := map[int]*Entry[int, int]{} 307 | 308 | // fill tlfu with 15 entries 309 | for i := 0; i < 15; i++ { 310 | entry := &Entry[int, int]{key: i, value: i, policyWeight: 1} 311 | em[i] = entry 312 | tlfu.Set(entry) 313 | } 314 | tlfu.EvictEntries() 315 | 316 | for _, event := range cs.events { 317 | switch event.event { 318 | case TestEventGet: 319 | for i := 0; i < event.value; i++ { 320 | entry := em[event.key] 321 | tlfu.Access(ReadBufItem[int, int]{ 322 | entry: entry, 323 | hash: tlfu.hasher.Hash(event.key), 324 | }) 325 | } 326 | case TestEventFreq: 327 | tlfu.sketch.Addn(hasher.Hash(event.key), event.value) 328 | case TestEventSet: 329 | entry := &Entry[int, int]{ 330 | key: event.key, value: event.key, 331 | policyWeight: int64(event.value), 332 | } 333 | tlfu.Set(entry) 334 | case TestEventUpdate: 335 | entry := em[event.key] 336 | entry.policyWeight += int64(event.value) 337 | tlfu.UpdateCost(entry, int64(event.value)) 338 | case TestEventRemove: 339 | entry := em[event.key] 340 | tlfu.Remove(entry, true) 341 | case TestEventResizeWindow: 342 | tlfu.resizeWindow() 343 | } 344 | } 345 | 346 | assertLen(t, tlfu.window) 347 | assertLen(t, tlfu.slru.probation) 348 | assertLen(t, tlfu.slru.protected) 349 | require.Equal(t, int(tlfu.weightedSize), tlfu.window.Len()+tlfu.slru.len()) 350 | 351 | result := strings.Join( 352 | []string{ 353 | tlfu.window.display(), tlfu.slru.probation.display(), 354 | tlfu.slru.protected.display()}, ":") 355 | 356 | require.Equal(t, cs.expected, result) 357 | }) 358 | } 359 | } 360 | 361 | func groupNumbers(input []string) string { 362 | if len(input) == 0 { 363 | return "" 364 | } 365 | 366 | var result []string 367 | var currentGroup []string 368 | prev, _ := strconv.Atoi(input[0]) 369 | currentGroup = append(currentGroup, input[0]) 370 | 371 | for i := 1; i < len(input); i++ { 372 | num, _ := strconv.Atoi(input[i]) 373 | if num == prev+1 || num == prev-1 { 374 | currentGroup = append(currentGroup, input[i]) 375 | } else { 376 | result = append(result, fmt.Sprintf("%s-%s", currentGroup[0], currentGroup[len(currentGroup)-1])) 377 | currentGroup = []string{input[i]} 378 | } 379 | prev = num 380 | } 381 | 382 | // Append the last group 383 | result = append(result, fmt.Sprintf("%s-%s", currentGroup[0], currentGroup[len(currentGroup)-1])) 384 | 385 | return strings.Join(result, ">") 386 | } 387 | 388 | type adaptiveTestEvent struct { 389 | hrChanges []float32 390 | expected string 391 | } 392 | 393 | var adaptiveTests = []adaptiveTestEvent{ 394 | // init, default hr will be 0.2 395 | {[]float32{}, "149-100:99-80:79-0"}, 396 | 397 | // same hr, window size decrease(repeat default), 100-108 move to probation front 398 | {[]float32{0.2}, "149-109:108-80:79-0"}, 399 | 400 | // hr increase, decrease window, 100-108 move to probation front 401 | {[]float32{0.4}, "149-109:108-80:79-0"}, 402 | 403 | // hr decrease, increase window, decrease protected 404 | // move 0-8 from protected to probation front, 405 | // move 80-88 from probation tail to window front 406 | {[]float32{0.1}, "88-80>149-100:8-0>99-89:79-9"}, 407 | 408 | // increase twice (decrease/decrease window) 409 | {[]float32{0.4, 0.6}, "149-118:117-80:79-0"}, 410 | 411 | // decrease twice (increase/decrease window) 412 | {[]float32{0.1, 0.08}, "88-80>149-109:108-100>8-0>99-89:79-9"}, 413 | 414 | // increase decrease (decrease/increase window) 415 | {[]float32{0.4, 0.2}, "88-80>149-109:108-89:79-0"}, 416 | 417 | // decrease increase (increase/increase window) 418 | {[]float32{0.1, 0.2}, "97-80>149-100:17-0>99-98:79-18"}, 419 | } 420 | 421 | func TestTlfu_Adaptive(t *testing.T) { 422 | hasher := hasher.NewHasher[int](nil) 423 | for _, cs := range adaptiveTests { 424 | t.Run(fmt.Sprintf("%v", cs.hrChanges), func(t *testing.T) { 425 | // window size 50, main size 100, protected size 80 426 | tlfu := newTinyLfuSized[int, int](50, 100, 80, hasher) 427 | tlfu.hr = 0.2 428 | tlfu.removeCallback = func(entry *Entry[int, int]) {} 429 | em := map[int]*Entry[int, int]{} 430 | 431 | for i := 0; i < 150; i++ { 432 | entry := &Entry[int, int]{key: i, value: i, policyWeight: 1} 433 | em[i] = entry 434 | tlfu.Set(entry) 435 | } 436 | tlfu.EvictEntries() 437 | 438 | for i := 0; i < 80; i++ { 439 | entry := em[i] 440 | tlfu.Access(ReadBufItem[int, int]{ 441 | entry: entry, 442 | hash: tlfu.hasher.Hash(i), 443 | }) 444 | } 445 | 446 | for _, hrc := range cs.hrChanges { 447 | newHits := int(hrc * 100) 448 | newMisses := 100 - newHits 449 | tlfu.hitsInSample = uint64(newHits) 450 | tlfu.missesInSample = uint64(newMisses) 451 | tlfu.climb() 452 | tlfu.resizeWindow() 453 | } 454 | 455 | assertLen(t, tlfu.window) 456 | assertLen(t, tlfu.slru.probation) 457 | assertLen(t, tlfu.slru.protected) 458 | require.Equal(t, int(tlfu.weightedSize), tlfu.window.Len()+tlfu.slru.len()) 459 | 460 | result, total := grouped(tlfu) 461 | require.Equal(t, 150, total) 462 | require.Equal(t, cs.expected, result) 463 | }) 464 | } 465 | } 466 | 467 | func grouped(tlfu *TinyLfu[int, int]) (string, int) { 468 | total := 0 469 | l := strings.Split(tlfu.window.display(), "/") 470 | total += len(l) 471 | windowSeq := groupNumbers(l) 472 | 473 | l = strings.Split(tlfu.slru.probation.display(), "/") 474 | total += len(l) 475 | probationSeq := groupNumbers(l) 476 | l = strings.Split(tlfu.slru.protected.display(), "/") 477 | total += len(l) 478 | protectedSeq := groupNumbers(l) 479 | 480 | result := strings.Join( 481 | []string{ 482 | windowSeq, probationSeq, 483 | protectedSeq}, ":") 484 | return result, total 485 | } 486 | 487 | func TestTlfu_AdaptiveAmountRemain(t *testing.T) { 488 | hasher := hasher.NewHasher[int](nil) 489 | // window size 50, main size 100, protected size 80 490 | tlfu := newTinyLfuSized[int, int](50, 100, 80, hasher) 491 | tlfu.hr = 0.2 492 | tlfu.removeCallback = func(entry *Entry[int, int]) {} 493 | em := map[int]*Entry[int, int]{} 494 | 495 | for i := 0; i < 150; i++ { 496 | entry := &Entry[int, int]{key: i, value: i, policyWeight: 1} 497 | em[i] = entry 498 | tlfu.Set(entry) 499 | } 500 | tlfu.EvictEntries() 501 | 502 | for i := 0; i < 80; i++ { 503 | entry := em[i] 504 | tlfu.Access(ReadBufItem[int, int]{ 505 | entry: entry, 506 | hash: tlfu.hasher.Hash(i), 507 | }) 508 | } 509 | 510 | require.Equal(t, -9.375, float64(tlfu.step)) 511 | 512 | // increase entry 100 weight to 4, 513 | entry := em[100] 514 | entry.policyWeight += int64(3) 515 | tlfu.window.len += 3 516 | // increase entry 101 weight to 4, 517 | entry = em[101] 518 | entry.policyWeight += int64(3) 519 | tlfu.window.len += 3 520 | // increase entry 102 weight to 3, 521 | entry = em[102] 522 | entry.policyWeight += int64(2) 523 | tlfu.window.len += 2 524 | 525 | // the step is 9, so 100 and 101 will move but 102 can't 526 | newHits := int(0.2 * 100) 527 | newMisses := 100 - newHits 528 | tlfu.hitsInSample = uint64(newHits) 529 | tlfu.missesInSample = uint64(newMisses) 530 | tlfu.climb() 531 | tlfu.resizeWindow() 532 | 533 | require.Equal(t, -1, tlfu.amount) 534 | require.Equal(t, 42, int(tlfu.window.capacity)) 535 | require.Equal(t, 88, int(tlfu.slru.protected.capacity)) 536 | 537 | result, total := grouped(tlfu) 538 | require.Equal(t, 150, total) 539 | require.Equal(t, "149-102:101-80:79-0", result) 540 | 541 | // manually add one entry, so window tail(101) changed 542 | entry = &Entry[int, int]{key: 998, value: 998, policyWeight: 1} 543 | em[998] = entry 544 | tlfu.Set(entry) 545 | 546 | result, total = grouped(tlfu) 547 | require.Equal(t, 150, total) 548 | require.Equal(t, "998-998>149-109:108-103>101-80:79-0", result) 549 | 550 | // apply remaining amount 551 | tlfu.resizeWindow() 552 | require.Equal(t, 0, tlfu.amount) 553 | require.Equal(t, 41, int(tlfu.window.capacity)) 554 | require.Equal(t, 89, int(tlfu.slru.protected.capacity)) 555 | result, total = grouped(tlfu) 556 | require.Equal(t, 150, total) 557 | require.Equal(t, "998-998>149-110:109-103>101-80:79-0", result) 558 | } 559 | 560 | func TestTlfu_SketchResize(t *testing.T) { 561 | hasher := hasher.NewHasher[int](nil) 562 | tlfu := NewTinyLfu[int, int](10000, hasher) 563 | 564 | for i := 0; i < 10000; i++ { 565 | tlfu.Set(&Entry[int, int]{key: i, value: i, policyWeight: 1}) 566 | require.True(t, len(tlfu.sketch.Table) >= i, fmt.Sprintf("sketch size %d < %d", len(tlfu.sketch.Table), i)) 567 | } 568 | 569 | size := len(tlfu.sketch.Table) 570 | require.Equal(t, 16384, size) 571 | 572 | for i := 10000; i < 20000; i++ { 573 | require.Equal(t, size, len(tlfu.sketch.Table)) 574 | } 575 | } 576 | 577 | func TestTlfu_UpdateCost(t *testing.T) { 578 | hasher := hasher.NewHasher[int](nil) 579 | tlfu := NewTinyLfu[int, int](100, hasher) 580 | e1 := &Entry[int, int]{key: 1, value: 1, policyWeight: 1} 581 | e2 := &Entry[int, int]{key: 2, value: 1, policyWeight: 2} 582 | e3 := &Entry[int, int]{key: 3, value: 1, policyWeight: 3} 583 | 584 | tlfu.Set(e1) 585 | tlfu.Set(e2) 586 | tlfu.Set(e3) 587 | require.Equal(t, 6, int(tlfu.weightedSize)) 588 | 589 | e1.policyWeight = 3 590 | e2.policyWeight = 2 591 | e3.policyWeight = 1 592 | tlfu.UpdateCost(e1, 2) 593 | tlfu.UpdateCost(e2, 0) 594 | tlfu.UpdateCost(e3, -2) 595 | require.Equal(t, 6, int(tlfu.weightedSize)) 596 | } 597 | -------------------------------------------------------------------------------- /internal/utils.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | // RoundUpPowerOf2 is based on https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2. 4 | func RoundUpPowerOf2(v uint32) uint32 { 5 | if v == 0 { 6 | return 1 7 | } 8 | v-- 9 | v |= v >> 1 10 | v |= v >> 2 11 | v |= v >> 4 12 | v |= v >> 8 13 | v |= v >> 16 14 | v++ 15 | return v 16 | } 17 | -------------------------------------------------------------------------------- /internal/utils_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestUtils_RoundUpPowerOf2(t *testing.T) { 11 | tests := []struct { 12 | input uint32 13 | expected uint32 14 | }{ 15 | {0, 1}, 16 | {1, 1}, 17 | {2, 2}, 18 | {3, 4}, 19 | {7, 8}, 20 | {8, 8}, 21 | {9999, 16384}, 22 | } 23 | 24 | for _, tc := range tests { 25 | t.Run(fmt.Sprintf("%+v", tc), func(t *testing.T) { 26 | result := RoundUpPowerOf2(tc.input) 27 | require.Equal(t, tc.expected, result) 28 | }) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /internal/utils_test_go124.go: -------------------------------------------------------------------------------- 1 | //go:build go1.24 2 | // +build go1.24 3 | 4 | package internal 5 | 6 | import ( 7 | "strconv" 8 | "testing" 9 | 10 | "github.com/Yiling-J/theine-go/internal/hasher" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | type Foo struct { 15 | Bar string 16 | } 17 | 18 | func TestStringKey(t *testing.T) { 19 | hasher := hasher.NewHasher[string](nil) 20 | h := hasher.Hash(strconv.Itoa(123456)) 21 | for i := 0; i < 10; i++ { 22 | require.Equal(t, h, hasher.Hash(strconv.Itoa(123456))) 23 | } 24 | } 25 | 26 | func TestStructStringKey(t *testing.T) { 27 | hasher1 := hasher.NewHasher[Foo](nil) 28 | hasher2 := hasher.NewHasher[Foo](func(k Foo) string { 29 | return k.Bar 30 | }) 31 | h1 := uint64(0) 32 | h2 := uint64(0) 33 | for i := 0; i < 10; i++ { 34 | foo := Foo{Bar: strconv.Itoa(123456)} 35 | if h1 == 0 { 36 | h1 = hasher1.Hash(foo) 37 | } else { 38 | require.Equal(t, h1, hasher1.Hash(foo)) 39 | } 40 | } 41 | for i := 0; i < 10; i++ { 42 | foo := Foo{Bar: strconv.Itoa(123456)} 43 | if h2 == 0 { 44 | h2 = hasher2.Hash(foo) 45 | } else { 46 | require.Equal(t, h2, hasher2.Hash(foo)) 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /internal/xruntime/rand.go: -------------------------------------------------------------------------------- 1 | //go:build !go1.22 2 | 3 | // Copyright (c) 2024 Yiling-J. All rights reserved. 4 | // Copyright (c) 2023 Alexey Mayshev. All rights reserved. 5 | // 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | package xruntime 19 | 20 | import ( 21 | _ "unsafe" 22 | ) 23 | 24 | //go:noescape 25 | //go:linkname Fastrand runtime.fastrand 26 | func Fastrand() uint32 27 | -------------------------------------------------------------------------------- /internal/xruntime/rand_1.22.go: -------------------------------------------------------------------------------- 1 | //go:build go1.22 2 | 3 | // Copyright (c) 2024 Yiling-J. All rights reserved. 4 | // Copyright (c) 2023 Alexey Mayshev. All rights reserved. 5 | // 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | package xruntime 19 | 20 | import ( 21 | "math/rand/v2" 22 | ) 23 | 24 | func Fastrand() uint32 { 25 | return rand.Uint32() 26 | } 27 | -------------------------------------------------------------------------------- /internal/xruntime/xruntime.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Yiling-J. All rights reserved. 2 | // Copyright (c) 2023 Alexey Mayshev. All rights reserved. 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | package xruntime 17 | 18 | import ( 19 | "runtime" 20 | "unsafe" 21 | 22 | "golang.org/x/sys/cpu" 23 | ) 24 | 25 | const ( 26 | // CacheLineSize is useful for preventing false sharing. 27 | CacheLineSize = unsafe.Sizeof(cpu.CacheLinePad{}) 28 | ) 29 | 30 | // Parallelism returns the maximum possible number of concurrently running goroutines. 31 | func Parallelism() uint32 { 32 | maxProcs := uint32(runtime.GOMAXPROCS(0)) 33 | numCPU := uint32(runtime.NumCPU()) 34 | if maxProcs < numCPU { 35 | return maxProcs 36 | } 37 | return numCPU 38 | } 39 | -------------------------------------------------------------------------------- /loading_cache_test.go: -------------------------------------------------------------------------------- 1 | package theine_test 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "math/rand" 8 | "strings" 9 | "sync" 10 | "sync/atomic" 11 | "testing" 12 | "time" 13 | 14 | "github.com/Yiling-J/theine-go" 15 | "github.com/stretchr/testify/require" 16 | ) 17 | 18 | func TestLoadingCache_BuildError(t *testing.T) { 19 | _, err := theine.NewBuilder[string, string](0).BuildWithLoader( 20 | func(ctx context.Context, key string) (theine.Loaded[string], error) { 21 | return theine.Loaded[string]{Value: key}, nil 22 | }, 23 | ) 24 | require.NotNil(t, err) 25 | _, err = theine.NewBuilder[string, string](100).BuildWithLoader(nil) 26 | require.NotNil(t, err) 27 | } 28 | 29 | func TestLoadingCache_GetSetParallel(t *testing.T) { 30 | client, err := theine.NewBuilder[string, string](1000).BuildWithLoader( 31 | func(ctx context.Context, key string) (theine.Loaded[string], error) { 32 | return theine.Loaded[string]{Value: key}, nil 33 | }, 34 | ) 35 | require.Nil(t, err) 36 | var wg sync.WaitGroup 37 | for i := 1; i <= 12; i++ { 38 | wg.Add(1) 39 | go func() { 40 | defer wg.Done() 41 | ctx := context.TODO() 42 | for i := 0; i < 10000; i++ { 43 | key := fmt.Sprintf("key:%d", rand.Intn(3000)) 44 | v, err := client.Get(ctx, key) 45 | require.Nil(t, err) 46 | require.Equal(t, key, v) 47 | } 48 | }() 49 | } 50 | wg.Wait() 51 | time.Sleep(300 * time.Millisecond) 52 | require.True(t, client.Len() < 1200) 53 | client.Close() 54 | } 55 | 56 | func TestLoadingCache_SetWithTTL(t *testing.T) { 57 | counter := 0 58 | client, err := theine.NewBuilder[string, string](500).BuildWithLoader( 59 | func(ctx context.Context, key string) (theine.Loaded[string], error) { 60 | counter++ 61 | return theine.Loaded[string]{Value: key, TTL: 1 * time.Second}, nil 62 | }, 63 | ) 64 | require.Nil(t, err) 65 | v, err := client.Get(context.TODO(), "foo") 66 | require.Nil(t, err) 67 | require.Equal(t, "foo", v) 68 | require.Equal(t, 1, client.Len()) 69 | require.Equal(t, 1, counter) 70 | 71 | time.Sleep(3 * time.Second) 72 | 73 | v, err = client.Get(context.TODO(), "foo") 74 | require.Nil(t, err) 75 | require.Equal(t, "foo", v) 76 | require.Equal(t, 1, client.Len()) 77 | require.Equal(t, 2, counter) 78 | client.Close() 79 | 80 | counter = 0 81 | client, err = theine.NewBuilder[string, string](500).BuildWithLoader( 82 | func(ctx context.Context, key string) (theine.Loaded[string], error) { 83 | counter++ 84 | return theine.Loaded[string]{Value: key, TTL: 10 * time.Second}, nil 85 | }, 86 | ) 87 | require.Nil(t, err) 88 | v, err = client.Get(context.TODO(), "foo") 89 | require.Nil(t, err) 90 | require.Equal(t, "foo", v) 91 | require.Equal(t, 1, client.Len()) 92 | require.Equal(t, 1, counter) 93 | 94 | time.Sleep(2 * time.Second) 95 | 96 | v, err = client.Get(context.TODO(), "foo") 97 | require.Nil(t, err) 98 | require.Equal(t, "foo", v) 99 | require.Equal(t, 1, client.Len()) 100 | require.Equal(t, 1, counter) 101 | client.Close() 102 | } 103 | 104 | func TestLoadingCache_SetWithTTLAutoExpire(t *testing.T) { 105 | client, err := theine.NewBuilder[string, string](100).BuildWithLoader( 106 | func(ctx context.Context, key string) (theine.Loaded[string], error) { 107 | return theine.Loaded[string]{Value: key, TTL: 5 * time.Second}, nil 108 | }, 109 | ) 110 | require.Nil(t, err) 111 | for i := 0; i < 30; i++ { 112 | key := fmt.Sprintf("key:%d", i) 113 | v, err := client.Get(context.TODO(), key) 114 | require.Nil(t, err) 115 | require.Equal(t, key, v) 116 | } 117 | for { 118 | time.Sleep(5 * time.Second) 119 | if client.Len() == 0 { 120 | break 121 | } 122 | } 123 | client.Close() 124 | } 125 | 126 | func TestLoadingCache_Simple(t *testing.T) { 127 | builder := theine.NewBuilder[int, int](100) 128 | counter := atomic.Uint32{} 129 | client, err := builder.BuildWithLoader(func(ctx context.Context, key int) (theine.Loaded[int], error) { 130 | time.Sleep(50 * time.Millisecond) 131 | counter.Add(1) 132 | return theine.Loaded[int]{Value: key, Cost: 1, TTL: theine.ZERO_TTL}, nil 133 | }) 134 | require.Nil(t, err) 135 | var wg sync.WaitGroup 136 | for i := 1; i <= 2000; i++ { 137 | wg.Add(1) 138 | go func() { 139 | ctx := context.TODO() 140 | defer wg.Done() 141 | v, err := client.Get(ctx, 1) 142 | if err != nil || v != 1 { 143 | panic("") 144 | } 145 | }() 146 | } 147 | wg.Wait() 148 | require.True(t, counter.Load() < 50) 149 | 150 | success := client.Set(9999, 9999, 1) 151 | require.True(t, success) 152 | value, err := client.Get(context.TODO(), 9999) 153 | require.Nil(t, err) 154 | require.Equal(t, 9999, value) 155 | client.Delete(9999) 156 | require.Nil(t, err) 157 | value, err = client.Get(context.TODO(), 9999) 158 | require.Nil(t, err) 159 | require.Equal(t, 9999, value) 160 | success = client.SetWithTTL(9999, 9999, 1, 5*time.Second) 161 | require.True(t, success) 162 | } 163 | 164 | func TestLoadingCache_LoadError(t *testing.T) { 165 | builder := theine.NewBuilder[int, int](100) 166 | client, err := builder.Loading(func(ctx context.Context, key int) (theine.Loaded[int], error) { 167 | if key != 1 { 168 | return theine.Loaded[int]{}, errors.New("error") 169 | } 170 | return theine.Loaded[int]{Value: key, Cost: 1, TTL: theine.ZERO_TTL}, nil 171 | }).Build() 172 | require.Nil(t, err) 173 | _, err = client.Get(context.TODO(), 2) 174 | require.NotNil(t, err) 175 | } 176 | 177 | func TestLoadingCache_Cost(t *testing.T) { 178 | // test cost func 179 | builder := theine.NewBuilder[string, string](500) 180 | builder.Cost(func(v string) int64 { 181 | return int64(len(v)) 182 | }) 183 | client, err := builder.BuildWithLoader(func(ctx context.Context, key string) (theine.Loaded[string], error) { 184 | return theine.Loaded[string]{Value: key, Cost: 1, TTL: 0}, nil 185 | }) 186 | require.Nil(t, err) 187 | success := client.Set("z", strings.Repeat("z", 501), 0) 188 | require.False(t, success) 189 | for i := 0; i < 30; i++ { 190 | key := fmt.Sprintf("key:%d", i) 191 | success = client.Set(key, strings.Repeat("z", 20), 0) 192 | require.True(t, success) 193 | } 194 | time.Sleep(time.Second) 195 | require.True(t, client.Len() <= 25 && client.Len() >= 24) 196 | client.Close() 197 | } 198 | 199 | func TestLoadingCache_Doorkeeper(t *testing.T) { 200 | builder := theine.NewBuilder[string, string](500) 201 | builder.Doorkeeper(true) 202 | client, err := builder.BuildWithLoader(func(ctx context.Context, key string) (theine.Loaded[string], error) { 203 | return theine.Loaded[string]{Value: key, Cost: 1, TTL: 0}, nil 204 | }) 205 | require.Nil(t, err) 206 | for i := 0; i < 200; i++ { 207 | key := fmt.Sprintf("key:%d", i) 208 | success := client.Set(key, key, 1) 209 | require.False(t, success) 210 | } 211 | require.True(t, client.Len() == 0) 212 | time.Sleep(time.Second) 213 | for i := 0; i < 200; i++ { 214 | key := fmt.Sprintf("key:%d", i) 215 | success := client.Set(key, key, 1) 216 | require.True(t, success) 217 | } 218 | require.True(t, client.Len() > 0) 219 | for i := 0; i < 500000; i++ { 220 | key := fmt.Sprintf("key:%d:2", i) 221 | client.Set(key, key, 1) 222 | } 223 | } 224 | 225 | func TestLoadingCache_RemovalListener(t *testing.T) { 226 | builder := theine.NewBuilder[int, int](100) 227 | var lock sync.Mutex 228 | removed := map[int]int{} 229 | evicted := map[int]int{} 230 | expired := map[int]int{} 231 | builder.RemovalListener(func(key, value int, reason theine.RemoveReason) { 232 | lock.Lock() 233 | defer lock.Unlock() 234 | switch reason { 235 | case theine.REMOVED: 236 | removed[key] = value 237 | case theine.EVICTED: 238 | evicted[key] = value 239 | case theine.EXPIRED: 240 | expired[key] = value 241 | } 242 | }) 243 | client, err := builder.BuildWithLoader(func(ctx context.Context, key int) (theine.Loaded[int], error) { 244 | return theine.Loaded[int]{Value: key, Cost: 1, TTL: 0}, nil 245 | }) 246 | require.Nil(t, err) 247 | for i := 0; i < 100; i++ { 248 | success := client.Set(i, i, 1) 249 | require.True(t, success) 250 | } 251 | // this will evict one entry: 0 252 | success := client.Set(100, 100, 1) 253 | require.True(t, success) 254 | time.Sleep(100 * time.Millisecond) 255 | lock.Lock() 256 | require.Equal(t, 1, len(evicted)) 257 | require.True(t, evicted[0] == 0) 258 | lock.Unlock() 259 | // manually remove one 260 | client.Delete(5) 261 | time.Sleep(100 * time.Millisecond) 262 | lock.Lock() 263 | require.Equal(t, 1, len(removed)) 264 | require.True(t, removed[5] == 5) 265 | lock.Unlock() 266 | // expire one 267 | for i := 0; i < 100; i++ { 268 | success := client.SetWithTTL(i+100, i+100, 1, 1*time.Second) 269 | require.True(t, success) 270 | } 271 | time.Sleep(5 * time.Second) 272 | lock.Lock() 273 | require.True(t, len(expired) > 0) 274 | lock.Unlock() 275 | } 276 | 277 | func TestLoadingCache_Range(t *testing.T) { 278 | for _, cap := range []int{100, 200000} { 279 | client, err := theine.NewBuilder[int, int](int64(cap)).BuildWithLoader(func(ctx context.Context, key int) (theine.Loaded[int], error) { 280 | return theine.Loaded[int]{Value: key, Cost: 1, TTL: 0}, nil 281 | }) 282 | require.Nil(t, err) 283 | for i := 0; i < 100; i++ { 284 | success := client.Set(i, i, 1) 285 | require.True(t, success) 286 | } 287 | data := map[int]int{} 288 | client.Range(func(key, value int) bool { 289 | data[key] = value 290 | return true 291 | }) 292 | require.Equal(t, 100, len(data)) 293 | for i := 0; i < 100; i++ { 294 | require.Equal(t, i, data[i]) 295 | } 296 | data = map[int]int{} 297 | client.Range(func(key, value int) bool { 298 | data[key] = value 299 | return len(data) < 20 300 | }) 301 | require.Equal(t, 20, len(data)) 302 | } 303 | } 304 | 305 | func TestLoadingCache_GetSetDeleteNoRace(t *testing.T) { 306 | for _, size := range []int{500, 100000} { 307 | client, err := theine.NewBuilder[string, string](int64(size)).BuildWithLoader(func(ctx context.Context, key string) (theine.Loaded[string], error) { 308 | return theine.Loaded[string]{Value: key, Cost: 1, TTL: 0}, nil 309 | }) 310 | ctx := context.TODO() 311 | require.Nil(t, err) 312 | var wg sync.WaitGroup 313 | keys := []string{} 314 | for i := 0; i < 100000; i++ { 315 | keys = append(keys, fmt.Sprintf("%d", rand.Intn(1000000))) 316 | } 317 | for i := 1; i <= 20; i++ { 318 | wg.Add(1) 319 | go func() { 320 | defer wg.Done() 321 | for i := 0; i < 100000; i++ { 322 | key := keys[i] 323 | v, err := client.Get(ctx, key) 324 | if err != nil || v != key { 325 | panic(key) 326 | } 327 | if i%3 == 0 { 328 | client.SetWithTTL(key, key, 1, time.Second*time.Duration(i%25+5)) 329 | } 330 | if i%5 == 0 { 331 | client.Delete(key) 332 | } 333 | if i%5000 == 0 { 334 | client.Range(func(key, value string) bool { 335 | return true 336 | }) 337 | } 338 | } 339 | }() 340 | } 341 | wg.Wait() 342 | time.Sleep(300 * time.Millisecond) 343 | require.True(t, client.Len() < size+50) 344 | client.Close() 345 | } 346 | } 347 | 348 | func TestLoadingCache_Zipf(t *testing.T) { 349 | var miss atomic.Uint64 350 | client, err := theine.NewBuilder[uint64, uint64](50000).BuildWithLoader(func(ctx context.Context, key uint64) (theine.Loaded[uint64], error) { 351 | miss.Add(1) 352 | return theine.Loaded[uint64]{Value: key, Cost: 1, TTL: 0}, nil 353 | }) 354 | require.NoError(t, err) 355 | defer client.Close() 356 | r := rand.New(rand.NewSource(0)) 357 | z := rand.NewZipf(r, 1.01, 9.0, 50000*1000) 358 | ctx := context.TODO() 359 | 360 | total := 10000000 361 | for i := 0; i < total; i++ { 362 | key := z.Uint64() 363 | v, err := client.Get(ctx, key) 364 | require.NoError(t, err) 365 | require.Equal(t, key, v) 366 | } 367 | stats := client.Stats() 368 | require.True(t, stats.HitRatio() > 0.5, stats.HitRatio()) 369 | require.True(t, stats.HitRatio() < 0.6) 370 | require.True(t, 1-float64(miss.Load())/float64(total) > 0.5) 371 | require.True(t, 1-float64(miss.Load())/float64(total) < 0.6) 372 | } 373 | -------------------------------------------------------------------------------- /otest: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yiling-J/theine-go/ecceec36c9963b4853244ac59c71b0da56ae5dc7/otest -------------------------------------------------------------------------------- /persistence_test.go: -------------------------------------------------------------------------------- 1 | package theine_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "testing" 8 | "time" 9 | 10 | "github.com/Yiling-J/theine-go" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func TestPersist_Basic(t *testing.T) { 15 | client, err := theine.NewBuilder[int, int](100).Build() 16 | require.Nil(t, err) 17 | for i := 0; i < 1000; i++ { 18 | client.Set(i, i, 1) 19 | } 20 | f, err := os.Create("ptest") 21 | defer os.Remove("ptest") 22 | require.Nil(t, err) 23 | err = client.SaveCache(0, f) 24 | require.Nil(t, err) 25 | f.Close() 26 | 27 | f, err = os.Open("ptest") 28 | require.Nil(t, err) 29 | new, err := theine.NewBuilder[int, int](100).Build() 30 | require.Nil(t, err) 31 | err = new.LoadCache(0, f) 32 | require.Nil(t, err) 33 | f.Close() 34 | m := map[int]int{} 35 | new.Range(func(key, value int) bool { 36 | m[key] = value 37 | return true 38 | }) 39 | require.Equal(t, 100, len(m)) 40 | for k, v := range m { 41 | require.Equal(t, k, v) 42 | } 43 | } 44 | 45 | func TestPersist_LoadingBasic(t *testing.T) { 46 | client, err := theine.NewBuilder[int, int](100).BuildWithLoader(func(ctx context.Context, key int) (theine.Loaded[int], error) { 47 | return theine.Loaded[int]{Value: key, Cost: 1, TTL: 0}, nil 48 | }) 49 | require.Nil(t, err) 50 | for i := 0; i < 1000; i++ { 51 | client.Set(i, i, 1) 52 | } 53 | f, err := os.Create("ptest") 54 | defer os.Remove("ptest") 55 | require.Nil(t, err) 56 | err = client.SaveCache(0, f) 57 | require.Nil(t, err) 58 | f.Close() 59 | 60 | f, err = os.Open("ptest") 61 | require.Nil(t, err) 62 | new, err := theine.NewBuilder[int, int](100).BuildWithLoader(func(ctx context.Context, key int) (theine.Loaded[int], error) { 63 | return theine.Loaded[int]{Value: key, Cost: 1, TTL: 0}, nil 64 | }) 65 | require.Nil(t, err) 66 | err = new.LoadCache(0, f) 67 | require.Nil(t, err) 68 | f.Close() 69 | m := map[int]int{} 70 | new.Range(func(key, value int) bool { 71 | m[key] = value 72 | return true 73 | }) 74 | require.Equal(t, 100, len(m)) 75 | for k, v := range m { 76 | require.Equal(t, k, v) 77 | } 78 | } 79 | 80 | func TestPersist_TestVersionMismatch(t *testing.T) { 81 | client, err := theine.NewBuilder[int, int](100).Build() 82 | require.Nil(t, err) 83 | f, err := os.Create("ptest") 84 | defer os.Remove("ptest") 85 | require.Nil(t, err) 86 | err = client.SaveCache(0, f) 87 | require.Nil(t, err) 88 | f.Close() 89 | 90 | f, err = os.Open("ptest") 91 | require.Nil(t, err) 92 | new, err := theine.NewBuilder[int, int](100).Build() 93 | require.Nil(t, err) 94 | err = new.LoadCache(1, f) 95 | require.Equal(t, theine.VersionMismatch, err) 96 | } 97 | 98 | func TestPersist_TestChecksumMismatch(t *testing.T) { 99 | client, err := theine.NewBuilder[int, int](100).Build() 100 | require.Nil(t, err) 101 | f, err := os.Create("ptest") 102 | defer os.Remove("ptest") 103 | require.Nil(t, err) 104 | err = client.SaveCache(1, f) 105 | require.Nil(t, err) 106 | // change file content 107 | for _, i := range []int64{222} { 108 | _, err = f.WriteAt([]byte{1, 0, 1, 1}, i) 109 | require.Nil(t, err) 110 | } 111 | f.Close() 112 | 113 | f, err = os.Open("ptest") 114 | require.Nil(t, err) 115 | new, err := theine.NewBuilder[int, int](100).Build() 116 | require.Nil(t, err) 117 | err = new.LoadCache(1, f) 118 | require.Equal(t, "checksum mismatch", err.Error()) 119 | } 120 | 121 | type PStruct struct { 122 | Id int 123 | Name string 124 | Data []byte 125 | } 126 | 127 | func TestPersist_Large(t *testing.T) { 128 | client, err := theine.NewBuilder[int, PStruct](100000).Build() 129 | require.Nil(t, err) 130 | for i := 0; i < 100000; i++ { 131 | client.Set(i, PStruct{ 132 | Id: i, 133 | Name: fmt.Sprintf("struct-%d", i), 134 | Data: make([]byte, i%1000), 135 | }, 1) 136 | } 137 | require.Equal(t, 100000, client.Len()) 138 | f, err := os.Create("ptest") 139 | defer os.Remove("ptest") 140 | require.Nil(t, err) 141 | time.Sleep(time.Second) 142 | err = client.SaveCache(0, f) 143 | require.Nil(t, err) 144 | f.Close() 145 | 146 | f, err = os.Open("ptest") 147 | require.Nil(t, err) 148 | new, err := theine.NewBuilder[int, PStruct](100000).Build() 149 | require.Nil(t, err) 150 | err = new.LoadCache(0, f) 151 | require.Nil(t, err) 152 | f.Close() 153 | m := map[int]PStruct{} 154 | new.Range(func(key int, value PStruct) bool { 155 | m[key] = value 156 | return true 157 | }) 158 | require.Equal(t, 100000, len(m)) 159 | for k, v := range m { 160 | require.Equal(t, k, v.Id) 161 | require.Equal(t, fmt.Sprintf("struct-%d", k), v.Name) 162 | require.Equal(t, k%1000, len(v.Data)) 163 | } 164 | } 165 | 166 | func TestPersist_OS(t *testing.T) { 167 | f, err := os.Open("otest") 168 | require.Nil(t, err) 169 | client, err := theine.NewBuilder[int, int](100).Build() 170 | require.Nil(t, err) 171 | err = client.LoadCache(0, f) 172 | require.Nil(t, err) 173 | f.Close() 174 | m := map[int]int{} 175 | client.Range(func(key, value int) bool { 176 | m[key] = value 177 | return true 178 | }) 179 | require.Equal(t, 100, len(m)) 180 | for k, v := range m { 181 | require.Equal(t, k, v) 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /run/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/binary" 5 | "fmt" 6 | "log" 7 | "math/rand" 8 | "net/http" 9 | "sync" 10 | "time" 11 | 12 | // #nosec G108 13 | _ "net/http/pprof" 14 | 15 | "github.com/Yiling-J/theine-go" 16 | // "github.com/go-echarts/statsview" 17 | ) 18 | 19 | // A simple infinite loop script to monitor heap and GC status during concurrent get/set/update operations. 20 | // Install github.com/go-echarts/statsview, uncomment the relevant code, 21 | // then start the script and visit http://localhost:18066/debug/statsview to view the results. 22 | 23 | const CACHE_SIZE = 500000 24 | 25 | func keyGen() []uint64 { 26 | keys := []uint64{} 27 | r := rand.New(rand.NewSource(0)) 28 | z := rand.NewZipf(r, 1.01, 9.0, CACHE_SIZE*100) 29 | for i := 0; i < 2<<23; i++ { 30 | keys = append(keys, z.Uint64()) 31 | } 32 | return keys 33 | } 34 | 35 | type v128 struct { 36 | _v [128]byte 37 | } 38 | 39 | // GetU64 retrieves the first 8 bytes of _v as a uint64 40 | func (v *v128) GetU64() uint64 { 41 | return binary.LittleEndian.Uint64(v._v[:8]) 42 | } 43 | 44 | // SetU64 sets the first 8 bytes of _v to the value of the provided uint64 45 | func (v *v128) SetU64(val uint64) { 46 | binary.LittleEndian.PutUint64(v._v[:8], val) 47 | } 48 | 49 | func NewV128(val uint64) v128 { 50 | var v v128 51 | v.SetU64(val) 52 | return v 53 | } 54 | 55 | func main() { 56 | go func() { 57 | // #nosec G114 58 | log.Println(http.ListenAndServe("localhost:6060", nil)) 59 | }() 60 | 61 | // http://localhost:18066/debug/statsview 62 | // mgr := statsview.New() 63 | // go func() { _ = mgr.Start() }() 64 | 65 | builder := theine.NewBuilder[uint64, v128](int64(CACHE_SIZE)) 66 | builder.RemovalListener(func(key uint64, value v128, reason theine.RemoveReason) {}) 67 | client, err := builder.Build() 68 | if err != nil { 69 | panic("client build failed") 70 | } 71 | var wg sync.WaitGroup 72 | keys := keyGen() 73 | 74 | for i := 0; i < CACHE_SIZE; i++ { 75 | client.SetWithTTL( 76 | uint64(i), NewV128(uint64(i)), 1, 500*time.Second, 77 | ) 78 | } 79 | 80 | fmt.Println("==== start ====") 81 | for i := 1; i <= 6; i++ { 82 | wg.Add(1) 83 | go func() { 84 | defer wg.Done() 85 | rd := rand.Intn(2 << 16) 86 | i := 0 87 | for { 88 | keyGet := keys[(i+rd)&(2<<23-1)] 89 | keyUpdate := keys[(i+3*rd)&(2<<23-1)] 90 | 91 | v, ok := client.Get(keyGet) 92 | if ok && v.GetU64() != keyGet { 93 | panic(keyGet) 94 | } 95 | if !ok { 96 | client.SetWithTTL( 97 | keyGet, NewV128(keyGet), 1, time.Second*time.Duration(i%255+10), 98 | ) 99 | } 100 | 101 | client.SetWithTTL( 102 | keyUpdate, NewV128(keyUpdate), int64(keyUpdate&7+1), 103 | time.Second*time.Duration(i&63+30), 104 | ) 105 | i++ 106 | } 107 | }() 108 | } 109 | wg.Wait() 110 | client.Close() 111 | // mgr.Stop() 112 | } 113 | -------------------------------------------------------------------------------- /secondary_cache_test.go: -------------------------------------------------------------------------------- 1 | package theine_test 2 | 3 | import ( 4 | "context" 5 | "runtime" 6 | "sync" 7 | "testing" 8 | "time" 9 | 10 | "github.com/Yiling-J/theine-go" 11 | "github.com/Yiling-J/theine-go/internal" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestSecondaryCache_GetSetGetDeleteGet(t *testing.T) { 16 | secondary := internal.NewSimpleMapSecondary[int, int]() 17 | client, err := theine.NewBuilder[int, int](50000).Hybrid(secondary).Workers(8).AdmProbability(1).Build() 18 | require.Nil(t, err) 19 | for i := 0; i < 1000; i++ { 20 | _, ok, _ := client.Get(i) 21 | require.False(t, ok) 22 | ok = client.Set(i, i, 1) 23 | require.True(t, ok) 24 | v, ok, _ := client.Get(i) 25 | require.True(t, ok) 26 | require.Equal(t, i, v) 27 | err = client.Delete(i) 28 | require.Nil(t, err) 29 | _, ok, _ = client.Get(i) 30 | require.False(t, ok) 31 | } 32 | } 33 | 34 | func TestSecondaryCache_AdmProb(t *testing.T) { 35 | secondary := internal.NewSimpleMapSecondary[int, int]() 36 | client, err := theine.NewBuilder[int, int](100).Hybrid(secondary).Workers(8).AdmProbability(0.5).Build() 37 | require.Nil(t, err) 38 | for i := 0; i < 1000; i++ { 39 | success := client.Set(i, i, 1) 40 | require.Nil(t, err) 41 | require.True(t, success) 42 | } 43 | time.Sleep(50 * time.Millisecond) 44 | 45 | counter := 0 46 | for i := 0; i < 1000; i++ { 47 | _, success, err := client.Get(i) 48 | require.Nil(t, err) 49 | if success { 50 | counter += 1 51 | } 52 | } 53 | require.True(t, counter < 600) 54 | } 55 | 56 | func TestSecondaryCache_ErrorHandler(t *testing.T) { 57 | secondary := internal.NewSimpleMapSecondary[int, int]() 58 | secondary.ErrMode = true 59 | client, err := theine.NewBuilder[int, int](100).Hybrid(secondary).Workers(8).AdmProbability(1).Build() 60 | require.Nil(t, err) 61 | 62 | for i := 0; i < 1000; i++ { 63 | success := client.Set(i, i, 1) 64 | require.Nil(t, err) 65 | require.True(t, success) 66 | } 67 | 68 | require.True(t, secondary.ErrCounter.Load() > 0) 69 | } 70 | 71 | func TestSecondaryCache_GetSetNoRace(t *testing.T) { 72 | secondary := internal.NewSimpleMapSecondary[int, int]() 73 | client, err := theine.NewBuilder[int, int](100).Hybrid(secondary).Workers(8).AdmProbability(1).Build() 74 | require.Nil(t, err) 75 | var wg sync.WaitGroup 76 | for i := 1; i <= runtime.GOMAXPROCS(0)*2; i++ { 77 | wg.Add(1) 78 | go func() { 79 | defer wg.Done() 80 | for i := 0; i < 20000; i++ { 81 | key := i 82 | v, ok, err := client.Get(key) 83 | if err != nil { 84 | panic(err) 85 | } 86 | if !ok { 87 | if i%2 == 0 { 88 | _ = client.Set(key, i, 1) 89 | } 90 | if i%5 == 0 { 91 | err := client.Delete(key) 92 | if err != nil { 93 | panic(err) 94 | } 95 | } 96 | } else { 97 | if i != v { 98 | panic("value mismatch") 99 | } 100 | } 101 | } 102 | }() 103 | } 104 | wg.Wait() 105 | time.Sleep(500 * time.Millisecond) 106 | client.Close() 107 | } 108 | 109 | func TestSecondaryCache_LoadingCache(t *testing.T) { 110 | secondary := internal.NewSimpleMapSecondary[int, int]() 111 | client, err := theine.NewBuilder[int, int](100).Hybrid(secondary).Workers(8).AdmProbability(1). 112 | Loading(func(ctx context.Context, key int) (theine.Loaded[int], error) { 113 | return theine.Loaded[int]{Value: key, Cost: 1, TTL: 0}, nil 114 | }).Build() 115 | require.Nil(t, err) 116 | 117 | for i := 0; i < 1000; i++ { 118 | value, err := client.Get(context.TODO(), i) 119 | require.Nil(t, err) 120 | require.Equal(t, i, value) 121 | } 122 | 123 | for i := 0; i < 1000; i++ { 124 | value, err := client.Get(context.TODO(), i) 125 | require.Nil(t, err) 126 | require.Equal(t, i, value) 127 | } 128 | 129 | success := client.Set(999, 999, 1) 130 | require.True(t, success) 131 | _, err = client.Get(context.TODO(), 999) 132 | require.Nil(t, err) 133 | err = client.Delete(999) 134 | require.Nil(t, err) 135 | _, err = client.Get(context.TODO(), 999) 136 | require.Nil(t, err) 137 | success = client.SetWithTTL(999, 999, 1, 5*time.Second) 138 | require.True(t, success) 139 | } 140 | -------------------------------------------------------------------------------- /stats_test.go: -------------------------------------------------------------------------------- 1 | package theine_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/Yiling-J/theine-go" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestStats(t *testing.T) { 11 | client, err := theine.NewBuilder[int, int](1000).Build() 12 | require.Nil(t, err) 13 | st := client.Stats() 14 | require.Equal(t, uint64(0), st.Hits()) 15 | require.Equal(t, uint64(0), st.Misses()) 16 | require.Equal(t, float64(0), st.HitRatio()) 17 | 18 | client.Set(1, 1, 1) 19 | for i := 0; i < 2000; i++ { 20 | _, ok := client.Get(1) 21 | require.True(t, ok) 22 | } 23 | 24 | st = client.Stats() 25 | require.Equal(t, uint64(2000), st.Hits()) 26 | require.Equal(t, uint64(0), st.Misses()) 27 | require.Equal(t, float64(1), st.HitRatio()) 28 | 29 | for i := 0; i < 10000; i++ { 30 | _, ok := client.Get(1) 31 | require.True(t, ok) 32 | } 33 | 34 | for i := 0; i < 10000; i++ { 35 | _, ok := client.Get(2) 36 | require.False(t, ok) 37 | } 38 | 39 | st = client.Stats() 40 | require.Equal(t, uint64(12000), st.Hits()) 41 | require.Equal(t, uint64(10000), st.Misses()) 42 | require.Equal(t, float64(12000)/float64(12000+10000), st.HitRatio()) 43 | } 44 | --------------------------------------------------------------------------------