├── .github └── workflows │ ├── lint.yml │ └── test.yml ├── LICENSE ├── README.md ├── cache.go ├── cache_test.go ├── go.mod ├── go.sum ├── iterator.go ├── ringbuf.go ├── ringbuf_test.go ├── segment.go ├── server └── main.go └── timer.go /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | on: 3 | push: 4 | branches: 5 | - "**" 6 | pull_request: 7 | 8 | permissions: 9 | contents: read 10 | pull-requests: read 11 | checks: write 12 | 13 | jobs: 14 | lint: 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - name: Checkout 19 | uses: actions/checkout@v4 20 | 21 | - name: Setup golang 22 | uses: actions/setup-go@v5 23 | with: 24 | go-version: stable 25 | 26 | - name: Lint 27 | uses: golangci/golangci-lint-action@v4 28 | with: 29 | only-new-issues: true 30 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: 6 | - "**" 7 | pull_request: 8 | 9 | permissions: 10 | contents: write 11 | 12 | jobs: 13 | test: 14 | strategy: 15 | matrix: 16 | os: 17 | - ubuntu-latest 18 | - macos-latest 19 | - windows-latest 20 | go-version: 21 | - "1.13" 22 | - oldstable 23 | - stable 24 | 25 | runs-on: ${{ matrix.os }} 26 | 27 | steps: 28 | - name: Checkout 29 | uses: actions/checkout@v4 30 | 31 | - name: Setup golang 32 | uses: actions/setup-go@v5 33 | with: 34 | go-version: ${{ matrix.go-version }} 35 | 36 | - name: Test 37 | run: go test -race -v 38 | 39 | - name: Coverage report 40 | if: github.event_name == 'push' && matrix.os == 'ubuntu-latest' && matrix.go-version == 'stable' 41 | continue-on-error: true 42 | uses: ncruces/go-coverage-report@v0 43 | with: 44 | report: true 45 | chart: true 46 | amend: true 47 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2015 Ewan Chou. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FreeCache - A cache library for Go with zero GC overhead and high concurrent performance. 2 | 3 | Long lived objects in memory introduce expensive GC overhead, With FreeCache, you can cache unlimited number of objects in memory 4 | without increased latency and degraded throughput. 5 | 6 | [![Build Status](https://github.com/coocood/freecache/workflows/Test/badge.svg)](https://github.com/coocood/freecache/actions/workflows/test.yml) 7 | [![GoCover](http://github.com/coocood/freecache/wiki/coverage.svg)](https://raw.githack.com/wiki/coocood/freecache/coverage.html) 8 | [![GoDoc](https://godoc.org/github.com/coocood/freecache?status.svg)](https://godoc.org/github.com/coocood/freecache) 9 | 10 | ## Features 11 | 12 | * Store hundreds of millions of entries 13 | * Zero GC overhead 14 | * High concurrent thread-safe access 15 | * Pure Go implementation 16 | * Expiration support 17 | * Nearly LRU algorithm 18 | * Strictly limited memory usage 19 | * Come with a toy server that supports a few basic Redis commands with pipeline 20 | * Iterator support 21 | 22 | ## Performance 23 | 24 | Here is the benchmark result compares to built-in map, `Set` performance is about 2x faster than built-in map, `Get` performance is about 1/2x slower than built-in map. Since it is single threaded benchmark, in multi-threaded environment, 25 | FreeCache should be many times faster than single lock protected built-in map. 26 | 27 | BenchmarkCacheSet 3000000 446 ns/op 28 | BenchmarkMapSet 2000000 861 ns/op 29 | BenchmarkCacheGet 3000000 517 ns/op 30 | BenchmarkMapGet 10000000 212 ns/op 31 | 32 | ## Example Usage 33 | 34 | ```go 35 | // In bytes, where 1024 * 1024 represents a single Megabyte, and 100 * 1024*1024 represents 100 Megabytes. 36 | cacheSize := 100 * 1024 * 1024 37 | cache := freecache.NewCache(cacheSize) 38 | debug.SetGCPercent(20) 39 | key := []byte("abc") 40 | val := []byte("def") 41 | expire := 60 // expire in 60 seconds 42 | cache.Set(key, val, expire) 43 | got, err := cache.Get(key) 44 | if err != nil { 45 | fmt.Println(err) 46 | } else { 47 | fmt.Printf("%s\n", got) 48 | } 49 | affected := cache.Del(key) 50 | fmt.Println("deleted key ", affected) 51 | fmt.Println("entry count ", cache.EntryCount()) 52 | ``` 53 | 54 | ## Notice 55 | 56 | * Memory is preallocated. 57 | * If you allocate large amount of memory, you may need to set `debug.SetGCPercent()` 58 | to a much lower percentage to get a normal GC frequency. 59 | * If you set a key to be expired in X seconds, e.g. using `cache.Set(key, val, X)`, 60 | the effective cache duration will be within this range: `(X-1, X] seconds`. 61 | This is because that sub-second time at the moment will be ignored when calculating the 62 | the expiration: for example, if the current time is 8:15::01.800 (800 milliseconds passed 63 | since 8:15::01), the actual duration will be `X-800ms`. 64 | 65 | ## How it is done 66 | 67 | FreeCache avoids GC overhead by reducing the number of pointers. 68 | No matter how many entries stored in it, there are only 512 pointers. 69 | The data set is sharded into 256 segments by the hash value of the key. 70 | Each segment has only two pointers, one is the ring buffer that stores keys and values, 71 | the other one is the index slice which used to lookup for an entry. 72 | Each segment has its own lock, so it supports high concurrent access. 73 | 74 | ## TODO 75 | 76 | * Support dump to file and load from file. 77 | * Support resize cache size at runtime. 78 | 79 | ## License 80 | 81 | The MIT License 82 | -------------------------------------------------------------------------------- /cache.go: -------------------------------------------------------------------------------- 1 | package freecache 2 | 3 | import ( 4 | "encoding/binary" 5 | "sync" 6 | "sync/atomic" 7 | 8 | "github.com/cespare/xxhash/v2" 9 | ) 10 | 11 | const ( 12 | // segmentCount represents the number of segments within a freecache instance. 13 | segmentCount = 256 14 | // segmentAndOpVal is bitwise AND applied to the hashVal to find the segment id. 15 | segmentAndOpVal = 255 16 | minBufSize = 512 * 1024 17 | ) 18 | 19 | // Cache is a freecache instance. 20 | type Cache struct { 21 | locks [segmentCount]sync.Mutex 22 | segments [segmentCount]segment 23 | } 24 | 25 | type Updater func(value []byte, found bool) (newValue []byte, replace bool, expireSeconds int) 26 | 27 | func hashFunc(data []byte) uint64 { 28 | return xxhash.Sum64(data) 29 | } 30 | 31 | // NewCache returns a newly initialize cache by size. 32 | // The cache size will be set to 512KB at minimum. 33 | // If the size is set relatively large, you should call 34 | // `debug.SetGCPercent()`, set it to a much smaller value 35 | // to limit the memory consumption and GC pause time. 36 | func NewCache(size int) (cache *Cache) { 37 | return NewCacheCustomTimer(size, defaultTimer{}) 38 | } 39 | 40 | // NewCacheCustomTimer returns new cache with custom timer. 41 | func NewCacheCustomTimer(size int, timer Timer) (cache *Cache) { 42 | if size < minBufSize { 43 | size = minBufSize 44 | } 45 | if timer == nil { 46 | timer = defaultTimer{} 47 | } 48 | cache = new(Cache) 49 | for i := 0; i < segmentCount; i++ { 50 | cache.segments[i] = newSegment(size/segmentCount, i, timer) 51 | } 52 | return 53 | } 54 | 55 | // Set sets a key, value and expiration for a cache entry and stores it in the cache. 56 | // If the key is larger than 65535 or value is larger than 1/1024 of the cache size, 57 | // the entry will not be written to the cache. expireSeconds <= 0 means no expire, 58 | // but it can be evicted when cache is full. 59 | func (cache *Cache) Set(key, value []byte, expireSeconds int) (err error) { 60 | hashVal := hashFunc(key) 61 | segID := hashVal & segmentAndOpVal 62 | cache.locks[segID].Lock() 63 | err = cache.segments[segID].set(key, value, hashVal, expireSeconds) 64 | cache.locks[segID].Unlock() 65 | return 66 | } 67 | 68 | // Touch updates the expiration time of an existing key. expireSeconds <= 0 means no expire, 69 | // but it can be evicted when cache is full. 70 | func (cache *Cache) Touch(key []byte, expireSeconds int) (err error) { 71 | hashVal := hashFunc(key) 72 | segID := hashVal & segmentAndOpVal 73 | cache.locks[segID].Lock() 74 | err = cache.segments[segID].touch(key, hashVal, expireSeconds) 75 | cache.locks[segID].Unlock() 76 | return 77 | } 78 | 79 | // Get returns the value or not found error. 80 | func (cache *Cache) Get(key []byte) (value []byte, err error) { 81 | hashVal := hashFunc(key) 82 | segID := hashVal & segmentAndOpVal 83 | cache.locks[segID].Lock() 84 | value, _, err = cache.segments[segID].get(key, nil, hashVal, false) 85 | cache.locks[segID].Unlock() 86 | return 87 | } 88 | 89 | // GetFn is equivalent to Get or GetWithBuf, but it attempts to be zero-copy, 90 | // calling the provided function with slice view over the current underlying 91 | // value of the key in memory. The slice is constrained in length and capacity. 92 | // 93 | // In moth cases, this method will not alloc a byte buffer. The only exception 94 | // is when the value wraps around the underlying segment ring buffer. 95 | // 96 | // The method will return ErrNotFound is there's a miss, and the function will 97 | // not be called. Errors returned by the function will be propagated. 98 | func (cache *Cache) GetFn(key []byte, fn func([]byte) error) (err error) { 99 | hashVal := hashFunc(key) 100 | segID := hashVal & segmentAndOpVal 101 | cache.locks[segID].Lock() 102 | err = cache.segments[segID].view(key, fn, hashVal, false) 103 | cache.locks[segID].Unlock() 104 | return 105 | } 106 | 107 | // GetOrSet returns existing value or if record doesn't exist 108 | // it sets a new key, value and expiration for a cache entry and stores it in the cache, returns nil in that case 109 | func (cache *Cache) GetOrSet(key, value []byte, expireSeconds int) (retValue []byte, err error) { 110 | hashVal := hashFunc(key) 111 | segID := hashVal & segmentAndOpVal 112 | cache.locks[segID].Lock() 113 | defer cache.locks[segID].Unlock() 114 | 115 | retValue, _, err = cache.segments[segID].get(key, nil, hashVal, false) 116 | if err != nil { 117 | err = cache.segments[segID].set(key, value, hashVal, expireSeconds) 118 | } 119 | return 120 | } 121 | 122 | // SetAndGet sets a key, value and expiration for a cache entry and stores it in the cache. 123 | // If the key is larger than 65535 or value is larger than 1/1024 of the cache size, 124 | // the entry will not be written to the cache. expireSeconds <= 0 means no expire, 125 | // but it can be evicted when cache is full. Returns existing value if record exists 126 | // with a bool value to indicate whether an existing record was found 127 | func (cache *Cache) SetAndGet(key, value []byte, expireSeconds int) (retValue []byte, found bool, err error) { 128 | hashVal := hashFunc(key) 129 | segID := hashVal & segmentAndOpVal 130 | cache.locks[segID].Lock() 131 | defer cache.locks[segID].Unlock() 132 | 133 | retValue, _, err = cache.segments[segID].get(key, nil, hashVal, false) 134 | if err == nil { 135 | found = true 136 | } 137 | err = cache.segments[segID].set(key, value, hashVal, expireSeconds) 138 | return 139 | } 140 | 141 | // Update gets value for a key, passes it to updater function that decides if set should be called as well 142 | // This allows for an atomic Get plus Set call using the existing value to decide on whether to call Set. 143 | // If the key is larger than 65535 or value is larger than 1/1024 of the cache size, 144 | // the entry will not be written to the cache. expireSeconds <= 0 means no expire, 145 | // but it can be evicted when cache is full. Returns bool value to indicate if existing record was found along with bool 146 | // value indicating the value was replaced and error if any 147 | func (cache *Cache) Update(key []byte, updater Updater) (found bool, replaced bool, err error) { 148 | hashVal := hashFunc(key) 149 | segID := hashVal & segmentAndOpVal 150 | cache.locks[segID].Lock() 151 | defer cache.locks[segID].Unlock() 152 | 153 | retValue, _, err := cache.segments[segID].get(key, nil, hashVal, false) 154 | if err == nil { 155 | found = true 156 | } else { 157 | err = nil // Clear ErrNotFound error since we're returning found flag 158 | } 159 | value, replaced, expireSeconds := updater(retValue, found) 160 | if !replaced { 161 | return 162 | } 163 | err = cache.segments[segID].set(key, value, hashVal, expireSeconds) 164 | return 165 | } 166 | 167 | // Peek returns the value or not found error, without updating access time or counters. 168 | // Warning: No expiry check is performed so if an expired value is found, it will be 169 | // returned without error 170 | func (cache *Cache) Peek(key []byte) (value []byte, err error) { 171 | hashVal := hashFunc(key) 172 | segID := hashVal & segmentAndOpVal 173 | cache.locks[segID].Lock() 174 | value, _, err = cache.segments[segID].get(key, nil, hashVal, true) 175 | cache.locks[segID].Unlock() 176 | return 177 | } 178 | 179 | // PeekFn is equivalent to Peek, but it attempts to be zero-copy, calling the 180 | // provided function with slice view over the current underlying value of the 181 | // key in memory. The slice is constrained in length and capacity. 182 | // 183 | // In most cases, this method will not alloc a byte buffer. The only exception 184 | // is when the value wraps around the underlying segment ring buffer. 185 | // 186 | // The method will return ErrNotFound is there's a miss, and the function will 187 | // not be called. Errors returned by the function will be propagated. 188 | // Warning: No expiry check is performed so if an expired value is found, it will be 189 | // returned without error 190 | func (cache *Cache) PeekFn(key []byte, fn func([]byte) error) (err error) { 191 | hashVal := hashFunc(key) 192 | segID := hashVal & segmentAndOpVal 193 | cache.locks[segID].Lock() 194 | err = cache.segments[segID].view(key, fn, hashVal, true) 195 | cache.locks[segID].Unlock() 196 | return 197 | } 198 | 199 | // GetWithBuf copies the value to the buf or returns not found error. 200 | // This method doesn't allocate memory when the capacity of buf is greater or equal to value. 201 | func (cache *Cache) GetWithBuf(key, buf []byte) (value []byte, err error) { 202 | hashVal := hashFunc(key) 203 | segID := hashVal & segmentAndOpVal 204 | cache.locks[segID].Lock() 205 | value, _, err = cache.segments[segID].get(key, buf, hashVal, false) 206 | cache.locks[segID].Unlock() 207 | return 208 | } 209 | 210 | // GetWithExpiration returns the value with expiration or not found error. 211 | func (cache *Cache) GetWithExpiration(key []byte) (value []byte, expireAt uint32, err error) { 212 | hashVal := hashFunc(key) 213 | segID := hashVal & segmentAndOpVal 214 | cache.locks[segID].Lock() 215 | value, expireAt, err = cache.segments[segID].get(key, nil, hashVal, false) 216 | cache.locks[segID].Unlock() 217 | return 218 | } 219 | 220 | // TTL returns the TTL time left for a given key or a not found error. 221 | func (cache *Cache) TTL(key []byte) (timeLeft uint32, err error) { 222 | hashVal := hashFunc(key) 223 | segID := hashVal & segmentAndOpVal 224 | cache.locks[segID].Lock() 225 | timeLeft, err = cache.segments[segID].ttl(key, hashVal) 226 | cache.locks[segID].Unlock() 227 | return 228 | } 229 | 230 | // Del deletes an item in the cache by key and returns true or false if a delete occurred. 231 | func (cache *Cache) Del(key []byte) (affected bool) { 232 | hashVal := hashFunc(key) 233 | segID := hashVal & segmentAndOpVal 234 | cache.locks[segID].Lock() 235 | affected = cache.segments[segID].del(key, hashVal) 236 | cache.locks[segID].Unlock() 237 | return 238 | } 239 | 240 | // SetInt stores in integer value in the cache. 241 | func (cache *Cache) SetInt(key int64, value []byte, expireSeconds int) (err error) { 242 | var bKey [8]byte 243 | binary.LittleEndian.PutUint64(bKey[:], uint64(key)) 244 | return cache.Set(bKey[:], value, expireSeconds) 245 | } 246 | 247 | // GetInt returns the value for an integer within the cache or a not found error. 248 | func (cache *Cache) GetInt(key int64) (value []byte, err error) { 249 | var bKey [8]byte 250 | binary.LittleEndian.PutUint64(bKey[:], uint64(key)) 251 | return cache.Get(bKey[:]) 252 | } 253 | 254 | // GetIntWithExpiration returns the value and expiration or a not found error. 255 | func (cache *Cache) GetIntWithExpiration(key int64) (value []byte, expireAt uint32, err error) { 256 | var bKey [8]byte 257 | binary.LittleEndian.PutUint64(bKey[:], uint64(key)) 258 | return cache.GetWithExpiration(bKey[:]) 259 | } 260 | 261 | // DelInt deletes an item in the cache by int key and returns true or false if a delete occurred. 262 | func (cache *Cache) DelInt(key int64) (affected bool) { 263 | var bKey [8]byte 264 | binary.LittleEndian.PutUint64(bKey[:], uint64(key)) 265 | return cache.Del(bKey[:]) 266 | } 267 | 268 | // EvacuateCount is a metric indicating the number of times an eviction occurred. 269 | func (cache *Cache) EvacuateCount() (count int64) { 270 | for i := range cache.segments { 271 | count += atomic.LoadInt64(&cache.segments[i].totalEvacuate) 272 | } 273 | return 274 | } 275 | 276 | // ExpiredCount is a metric indicating the number of times an expire occurred. 277 | func (cache *Cache) ExpiredCount() (count int64) { 278 | for i := range cache.segments { 279 | count += atomic.LoadInt64(&cache.segments[i].totalExpired) 280 | } 281 | return 282 | } 283 | 284 | // EntryCount returns the number of items currently in the cache. 285 | func (cache *Cache) EntryCount() (entryCount int64) { 286 | for i := range cache.segments { 287 | entryCount += atomic.LoadInt64(&cache.segments[i].entryCount) 288 | } 289 | return 290 | } 291 | 292 | // AverageAccessTime returns the average unix timestamp when a entry being accessed. 293 | // Entries have greater access time will be evacuated when it 294 | // is about to be overwritten by new value. 295 | func (cache *Cache) AverageAccessTime() int64 { 296 | var entryCount, totalTime int64 297 | for i := range cache.segments { 298 | totalTime += atomic.LoadInt64(&cache.segments[i].totalTime) 299 | entryCount += atomic.LoadInt64(&cache.segments[i].totalCount) 300 | } 301 | if entryCount == 0 { 302 | return 0 303 | } else { 304 | return totalTime / entryCount 305 | } 306 | } 307 | 308 | // HitCount is a metric that returns number of times a key was found in the cache. 309 | func (cache *Cache) HitCount() (count int64) { 310 | for i := range cache.segments { 311 | count += atomic.LoadInt64(&cache.segments[i].hitCount) 312 | } 313 | return 314 | } 315 | 316 | // MissCount is a metric that returns the number of times a miss occurred in the cache. 317 | func (cache *Cache) MissCount() (count int64) { 318 | for i := range cache.segments { 319 | count += atomic.LoadInt64(&cache.segments[i].missCount) 320 | } 321 | return 322 | } 323 | 324 | // LookupCount is a metric that returns the number of times a lookup for a given key occurred. 325 | func (cache *Cache) LookupCount() int64 { 326 | return cache.HitCount() + cache.MissCount() 327 | } 328 | 329 | // HitRate is the ratio of hits over lookups. 330 | func (cache *Cache) HitRate() float64 { 331 | hitCount, missCount := cache.HitCount(), cache.MissCount() 332 | lookupCount := hitCount + missCount 333 | if lookupCount == 0 { 334 | return 0 335 | } else { 336 | return float64(hitCount) / float64(lookupCount) 337 | } 338 | } 339 | 340 | // OverwriteCount indicates the number of times entries have been overridden. 341 | func (cache *Cache) OverwriteCount() (overwriteCount int64) { 342 | for i := range cache.segments { 343 | overwriteCount += atomic.LoadInt64(&cache.segments[i].overwrites) 344 | } 345 | return 346 | } 347 | 348 | // TouchedCount indicates the number of times entries have had their expiration time extended. 349 | func (cache *Cache) TouchedCount() (touchedCount int64) { 350 | for i := range cache.segments { 351 | touchedCount += atomic.LoadInt64(&cache.segments[i].touched) 352 | } 353 | return 354 | } 355 | 356 | // Clear clears the cache. 357 | func (cache *Cache) Clear() { 358 | for i := range cache.segments { 359 | cache.locks[i].Lock() 360 | cache.segments[i].clear() 361 | cache.locks[i].Unlock() 362 | } 363 | } 364 | 365 | // ResetStatistics refreshes the current state of the statistics. 366 | func (cache *Cache) ResetStatistics() { 367 | for i := range cache.segments { 368 | cache.locks[i].Lock() 369 | cache.segments[i].resetStatistics() 370 | cache.locks[i].Unlock() 371 | } 372 | } 373 | -------------------------------------------------------------------------------- /cache_test.go: -------------------------------------------------------------------------------- 1 | package freecache 2 | 3 | import ( 4 | "bytes" 5 | "crypto/rand" 6 | "encoding/binary" 7 | "errors" 8 | "fmt" 9 | "log" 10 | mrand "math/rand" 11 | "strconv" 12 | "strings" 13 | "sync" 14 | "sync/atomic" 15 | "testing" 16 | "time" 17 | ) 18 | 19 | // mockTimer is a mock for Timer contract. 20 | type mockTimer struct { 21 | nowCallsCnt uint32 // stores the number of times Now() was called 22 | nowCallback func() uint32 // callback to be executed inside Now() 23 | } 24 | 25 | // Now mock logic. 26 | func (mock *mockTimer) Now() uint32 { 27 | atomic.AddUint32(&mock.nowCallsCnt, 1) 28 | if mock.nowCallback != nil { 29 | return mock.nowCallback() 30 | } 31 | 32 | return uint32(time.Now().Unix()) 33 | } 34 | 35 | // SetNowCallback sets the callback to be executed inside Now(). 36 | // You can control the return value this way. 37 | func (mock *mockTimer) SetNowCallback(callback func() uint32) { 38 | mock.nowCallback = callback 39 | } 40 | 41 | // nowCallsCount returns the number of times Now() was called. 42 | func (mock *mockTimer) NowCallsCount() int { 43 | return int(atomic.LoadUint32(&mock.nowCallsCnt)) 44 | } 45 | 46 | func TestFreeCache(t *testing.T) { 47 | cache := NewCache(1024) 48 | if cache.HitRate() != 0 { 49 | t.Error("initial hit rate should be zero") 50 | } 51 | if cache.AverageAccessTime() != 0 { 52 | t.Error("initial average access time should be zero") 53 | } 54 | key := []byte("abcd") 55 | val := []byte("efghijkl") 56 | err := cache.Set(key, val, 0) 57 | if err != nil { 58 | t.Error("err should be nil") 59 | } 60 | value, err := cache.Get(key) 61 | if err != nil || !bytes.Equal(value, val) { 62 | t.Error("value not equal") 63 | } 64 | affected := cache.Del(key) 65 | if !affected { 66 | t.Error("del should return affected true") 67 | } 68 | value, err = cache.Get(key) 69 | if err != ErrNotFound { 70 | t.Error("error should be ErrNotFound after being deleted") 71 | } 72 | affected = cache.Del(key) 73 | if affected { 74 | t.Error("del should not return affected true") 75 | } 76 | 77 | cache.Clear() 78 | n := 5000 79 | for i := 0; i < n; i++ { 80 | keyStr := fmt.Sprintf("key%v", i) 81 | valStr := strings.Repeat(keyStr, 10) 82 | err = cache.Set([]byte(keyStr), []byte(valStr), 0) 83 | if err != nil { 84 | t.Error(err) 85 | } 86 | } 87 | time.Sleep(time.Second) 88 | for i := 1; i < n; i += 2 { 89 | keyStr := fmt.Sprintf("key%v", i) 90 | cache.Get([]byte(keyStr)) 91 | } 92 | 93 | for i := 1; i < n; i += 8 { 94 | keyStr := fmt.Sprintf("key%v", i) 95 | cache.Del([]byte(keyStr)) 96 | } 97 | 98 | for i := 0; i < n; i += 2 { 99 | keyStr := fmt.Sprintf("key%v", i) 100 | valStr := strings.Repeat(keyStr, 10) 101 | err = cache.Set([]byte(keyStr), []byte(valStr), 0) 102 | if err != nil { 103 | t.Error(err) 104 | } 105 | } 106 | for i := 1; i < n; i += 2 { 107 | keyStr := fmt.Sprintf("key%v", i) 108 | expectedValStr := strings.Repeat(keyStr, 10) 109 | value, err = cache.Get([]byte(keyStr)) 110 | if err == nil { 111 | if string(value) != expectedValStr { 112 | t.Errorf("value is %v, expected %v", string(value), expectedValStr) 113 | } 114 | } 115 | err = cache.GetFn([]byte(keyStr), func(val []byte) error { 116 | if string(val) != expectedValStr { 117 | t.Errorf("getfn: value is %v, expected %v", string(val), expectedValStr) 118 | } 119 | return nil 120 | }) 121 | } 122 | 123 | t.Logf("hit rate is %v, evacuates %v, entries %v, average time %v, expire count %v\n", 124 | cache.HitRate(), cache.EvacuateCount(), cache.EntryCount(), cache.AverageAccessTime(), cache.ExpiredCount()) 125 | 126 | cache.ResetStatistics() 127 | t.Logf("hit rate is %v, evacuates %v, entries %v, average time %v, expire count %v\n", 128 | cache.HitRate(), cache.EvacuateCount(), cache.EntryCount(), cache.AverageAccessTime(), cache.ExpiredCount()) 129 | } 130 | 131 | func TestOverwrite(t *testing.T) { 132 | cache := NewCache(1024) 133 | key := []byte("abcd") 134 | var val []byte 135 | cache.Set(key, val, 0) 136 | val = []byte("efgh") 137 | cache.Set(key, val, 0) 138 | val = append(val, 'i') 139 | cache.Set(key, val, 0) 140 | if count := cache.OverwriteCount(); count != 0 { 141 | t.Error("overwrite count is", count, "expected ", 0) 142 | } 143 | res, _ := cache.Get(key) 144 | if string(res) != string(val) { 145 | t.Error(string(res)) 146 | } 147 | val = append(val, 'j') 148 | cache.Set(key, val, 0) 149 | res, _ = cache.Get(key) 150 | if string(res) != string(val) { 151 | t.Error(string(res), "aaa") 152 | } 153 | val = append(val, 'k') 154 | cache.Set(key, val, 0) 155 | res, _ = cache.Get(key) 156 | if string(res) != "efghijk" { 157 | t.Error(string(res)) 158 | } 159 | val = append(val, 'l') 160 | cache.Set(key, val, 0) 161 | res, _ = cache.Get(key) 162 | if string(res) != "efghijkl" { 163 | t.Error(string(res)) 164 | } 165 | val = append(val, 'm') 166 | cache.Set(key, val, 0) 167 | if count := cache.OverwriteCount(); count != 3 { 168 | t.Error("overwrite count is", count, "expected ", 3) 169 | } 170 | } 171 | 172 | func TestGetOrSet(t *testing.T) { 173 | cache := NewCache(1024) 174 | key := []byte("abcd") 175 | val := []byte("efgh") 176 | 177 | r, err := cache.GetOrSet(key, val, 10) 178 | if err != nil || r != nil { 179 | t.Errorf("Expected to have nils: value=%v, err=%v", string(r), err) 180 | } 181 | 182 | // check entry 183 | r, err = cache.Get(key) 184 | if err != nil || string(r) != "efgh" { 185 | t.Errorf("Expected to have val=%v and err != nil, got: value=%v, err=%v", string(val), string(r), err) 186 | } 187 | 188 | // call twice for the same key 189 | val = []byte("xxxx") 190 | r, err = cache.GetOrSet(key, val, 10) 191 | if err != nil || string(r) != "efgh" { 192 | t.Errorf("Expected to get old record, got: value=%v, err=%v", string(r), err) 193 | } 194 | err = cache.GetFn(key, func(val []byte) error { 195 | if string(val) != "efgh" { 196 | t.Errorf("getfn: Expected to get old record, got: value=%v, err=%v", string(r), err) 197 | } 198 | return nil 199 | }) 200 | if err != nil { 201 | t.Errorf("did not expect error from GetFn, got: %s", err) 202 | } 203 | } 204 | 205 | func TestGetWithExpiration(t *testing.T) { 206 | cache := NewCache(1024) 207 | key := []byte("abcd") 208 | val := []byte("efgh") 209 | err := cache.Set(key, val, 2) 210 | if err != nil { 211 | t.Error("err should be nil", err.Error()) 212 | } 213 | 214 | res, expiry, err := cache.GetWithExpiration(key) 215 | var expireTime time.Time 216 | var startTime = time.Now() 217 | for { 218 | _, _, err := cache.GetWithExpiration(key) 219 | expireTime = time.Now() 220 | if err != nil { 221 | break 222 | } 223 | if time.Now().Unix() > int64(expiry+1) { 224 | break 225 | } 226 | time.Sleep(1 * time.Millisecond) 227 | } 228 | if time.Second > expireTime.Sub(startTime) || 3*time.Second < expireTime.Sub(startTime) { 229 | t.Error("Cache should expire within a second of the expire time") 230 | } 231 | 232 | if err != nil { 233 | t.Error("err should be nil", err.Error()) 234 | } 235 | if !bytes.Equal(val, res) { 236 | t.Fatalf("%s should be the same as %s but isn't", res, val) 237 | } 238 | } 239 | 240 | func TestExpire(t *testing.T) { 241 | cache := NewCache(1024) 242 | key := []byte("abcd") 243 | val := []byte("efgh") 244 | err := cache.Set(key, val, 1) 245 | if err != nil { 246 | t.Error("err should be nil") 247 | } 248 | time.Sleep(time.Second) 249 | val, err = cache.Get(key) 250 | if err == nil { 251 | t.Fatal("key should be expired", string(val)) 252 | } 253 | 254 | cache.ResetStatistics() 255 | if cache.ExpiredCount() != 0 { 256 | t.Error("expired count should be zero.") 257 | } 258 | } 259 | 260 | func TestTTL(t *testing.T) { 261 | t.Run("with no expire key", testTTLWithNoExpireKey) 262 | t.Run("with expire key, not yet expired", testTTLWithNotYetExpiredKey) 263 | t.Run("with expire key, expired", testTTLWithExpiredKey) 264 | t.Run("with not found key", testTTLWithNotFoundKey) 265 | } 266 | 267 | func testTTLWithNoExpireKey(t *testing.T) { 268 | t.Parallel() 269 | 270 | // arrange 271 | var now uint32 = 1659954367 272 | timer := new(mockTimer) 273 | timer.SetNowCallback(func() uint32 { 274 | return now 275 | }) 276 | cache := NewCacheCustomTimer(512*1024, timer) 277 | key := []byte("test-key") 278 | value := []byte("this key does not expire") 279 | expireSeconds := 0 280 | if err := cache.Set(key, value, expireSeconds); err != nil { 281 | t.Fatalf("prerequisite failed: could not set the key to query ttl for: %v", err) 282 | } 283 | 284 | // act 285 | ttl, err := cache.TTL(key) 286 | 287 | // assert 288 | if err != nil { 289 | t.Errorf("expected nil, but got %v", err) 290 | } 291 | if ttl != uint32(expireSeconds) { 292 | t.Errorf("expected %d, but got %d ", expireSeconds, ttl) 293 | } 294 | if timer.NowCallsCount() != 1 { 295 | t.Errorf("expected %d, but got %d ", 1, timer.NowCallsCount()) 296 | } 297 | } 298 | 299 | func testTTLWithNotYetExpiredKey(t *testing.T) { 300 | t.Parallel() 301 | 302 | // arrange 303 | var now uint32 = 1659954368 304 | timer := new(mockTimer) 305 | timer.SetNowCallback(func() uint32 { 306 | return now 307 | }) 308 | cache := NewCacheCustomTimer(512*1024, timer) 309 | key := []byte("test-key") 310 | value := []byte("this key expires, but is not expired") 311 | expireSeconds := 300 312 | if err := cache.Set(key, value, expireSeconds); err != nil { 313 | t.Fatalf("prerequisite failed: could not set the key to query ttl for: %v", err) 314 | } 315 | 316 | // act 317 | ttl, err := cache.TTL(key) 318 | 319 | // assert 320 | if err != nil { 321 | t.Errorf("expected nil, but got %v", err) 322 | } 323 | if ttl != uint32(expireSeconds) { 324 | t.Errorf("expected %d, but got %d ", expireSeconds, ttl) 325 | } 326 | if timer.NowCallsCount() != 2 { // one call from set, one from ttl 327 | t.Errorf("expected %d, but got %d ", 2, timer.NowCallsCount()) 328 | } 329 | } 330 | 331 | func testTTLWithExpiredKey(t *testing.T) { 332 | t.Parallel() 333 | 334 | // arrange 335 | var now uint32 = 1659954369 336 | expireSeconds := 600 337 | timer := new(mockTimer) 338 | timer.SetNowCallback(func() uint32 { 339 | switch timer.NowCallsCount() { 340 | case 1: 341 | return now 342 | case 2: 343 | return now + uint32(expireSeconds) 344 | } 345 | 346 | return now 347 | }) 348 | cache := NewCacheCustomTimer(512*1024, timer) 349 | key := []byte("test-key") 350 | value := []byte("this key is expired") 351 | if err := cache.Set(key, value, expireSeconds); err != nil { 352 | t.Fatalf("prerequisite failed: could not set the key to query ttl for: %v", err) 353 | } 354 | 355 | // act 356 | ttl, err := cache.TTL(key) 357 | 358 | // assert 359 | if !errors.Is(err, ErrNotFound) { 360 | t.Errorf("expected %v, but got %v", ErrNotFound, err) 361 | } 362 | if ttl != 0 { 363 | t.Errorf("expected %d, but got %d ", 0, ttl) 364 | } 365 | if timer.NowCallsCount() != 2 { // one call from set, one from ttl 366 | t.Errorf("expected %d, but got %d ", 2, timer.NowCallsCount()) 367 | } 368 | } 369 | 370 | func testTTLWithNotFoundKey(t *testing.T) { 371 | t.Parallel() 372 | 373 | // arrange 374 | timer := new(mockTimer) 375 | cache := NewCacheCustomTimer(512*1024, timer) 376 | key := []byte("test-not-found-key") 377 | 378 | // act 379 | ttl, err := cache.TTL(key) 380 | 381 | // assert 382 | if !errors.Is(err, ErrNotFound) { 383 | t.Errorf("expected %v, but got %v", ErrNotFound, err) 384 | } 385 | if ttl != 0 { 386 | t.Errorf("expected %d, but got %d ", 0, ttl) 387 | } 388 | if timer.NowCallsCount() != 0 { 389 | t.Errorf("expected %d, but got %d ", 0, timer.NowCallsCount()) 390 | } 391 | } 392 | 393 | func TestTouch(t *testing.T) { 394 | cache := NewCache(1024) 395 | key1 := []byte("abcd") 396 | val1 := []byte("efgh") 397 | key2 := []byte("ijkl") 398 | val2 := []byte("mnop") 399 | err := cache.Set(key1, val1, 1) 400 | if err != nil { 401 | t.Error("err should be nil", err.Error()) 402 | } 403 | err = cache.Set(key2, val2, 1) 404 | if err != nil { 405 | t.Error("err should be nil", err.Error()) 406 | } 407 | if touched := cache.TouchedCount(); touched != 0 { 408 | t.Fatalf("touched count should be 0, but %d returned", touched) 409 | } 410 | err = cache.Touch(key1, 2) 411 | if err != nil { 412 | t.Error("err should be nil", err.Error()) 413 | } 414 | time.Sleep(time.Second) 415 | ttl, err := cache.TTL(key1) 416 | if err != nil { 417 | t.Error("err should be nil", err.Error()) 418 | } 419 | if ttl != 1 { 420 | t.Fatalf("ttl should be 1, but %d returned", ttl) 421 | } 422 | if touched := cache.TouchedCount(); touched != 1 { 423 | t.Fatalf("touched count should be 1, but %d returned", touched) 424 | } 425 | err = cache.Touch(key2, 2) 426 | if err != ErrNotFound { 427 | t.Error("error should be ErrNotFound after expiring") 428 | } 429 | if touched := cache.TouchedCount(); touched != 1 { 430 | t.Fatalf("touched count should be 1, but %d returned", touched) 431 | } 432 | } 433 | 434 | func TestAverageAccessTimeWhenUpdateInplace(t *testing.T) { 435 | cache := NewCache(1024) 436 | 437 | key := []byte("test-key") 438 | valueLong := []byte("very-long-de-value") 439 | valueShort := []byte("short") 440 | 441 | err := cache.Set(key, valueLong, 0) 442 | if err != nil { 443 | t.Fatal("err should be nil") 444 | } 445 | now := time.Now().Unix() 446 | aat := cache.AverageAccessTime() 447 | if (now - aat) > 1 { 448 | t.Fatalf("track average access time error, now:%d, aat:%d", now, aat) 449 | } 450 | 451 | time.Sleep(time.Second * 4) 452 | err = cache.Set(key, valueShort, 0) 453 | if err != nil { 454 | t.Fatal("err should be nil") 455 | } 456 | now = time.Now().Unix() 457 | aat = cache.AverageAccessTime() 458 | if (now - aat) > 1 { 459 | t.Fatalf("track average access time error, now:%d, aat:%d", now, aat) 460 | } 461 | } 462 | 463 | func TestAverageAccessTimeWhenUpdateWithNewSpace(t *testing.T) { 464 | cache := NewCache(1024) 465 | 466 | key := []byte("test-key") 467 | valueLong := []byte("very-long-de-value") 468 | valueShort := []byte("short") 469 | 470 | err := cache.Set(key, valueShort, 0) 471 | if err != nil { 472 | t.Fatal("err should be nil") 473 | } 474 | now := time.Now().Unix() 475 | aat := cache.AverageAccessTime() 476 | if (now - aat) > 1 { 477 | t.Fatalf("track average access time error, now:%d, aat:%d", now, aat) 478 | } 479 | 480 | time.Sleep(time.Second * 4) 481 | err = cache.Set(key, valueLong, 0) 482 | if err != nil { 483 | t.Fatal("err should be nil") 484 | } 485 | now = time.Now().Unix() 486 | aat = cache.AverageAccessTime() 487 | if (now - aat) > 2 { 488 | t.Fatalf("track average access time error, now:%d, aat:%d", now, aat) 489 | } 490 | } 491 | 492 | func TestLargeEntry(t *testing.T) { 493 | cacheSize := 512 * 1024 494 | cache := NewCache(cacheSize) 495 | key := make([]byte, 65536) 496 | val := []byte("efgh") 497 | err := cache.Set(key, val, 0) 498 | if err != ErrLargeKey { 499 | t.Error("large key should return ErrLargeKey") 500 | } 501 | val, err = cache.Get(key) 502 | if val != nil { 503 | t.Error("value should be nil when get a big key") 504 | } 505 | key = []byte("abcd") 506 | maxValLen := cacheSize/1024 - ENTRY_HDR_SIZE - len(key) 507 | val = make([]byte, maxValLen+1) 508 | err = cache.Set(key, val, 0) 509 | if err != ErrLargeEntry { 510 | t.Error("err should be ErrLargeEntry", err) 511 | } 512 | val = make([]byte, maxValLen-2) 513 | err = cache.Set(key, val, 0) 514 | if err != nil { 515 | t.Error(err) 516 | } 517 | val = append(val, 0) 518 | err = cache.Set(key, val, 0) 519 | if err != nil { 520 | t.Error(err) 521 | } 522 | val = append(val, 0) 523 | err = cache.Set(key, val, 0) 524 | if err != nil { 525 | t.Error(err) 526 | } 527 | if cache.OverwriteCount() != 1 { 528 | t.Errorf("over write count should be one, actual: %d.", cache.OverwriteCount()) 529 | } 530 | val = append(val, 0) 531 | err = cache.Set(key, val, 0) 532 | if err != ErrLargeEntry { 533 | t.Error("err should be ErrLargeEntry", err) 534 | } 535 | 536 | cache.ResetStatistics() 537 | if cache.OverwriteCount() != 0 { 538 | t.Error("over write count should be zero.") 539 | } 540 | } 541 | 542 | func TestInt64Key(t *testing.T) { 543 | cache := NewCache(1024) 544 | err := cache.SetInt(1, []byte("abc"), 3) 545 | if err != nil { 546 | t.Error("err should be nil") 547 | } 548 | err = cache.SetInt(2, []byte("cde"), 3) 549 | if err != nil { 550 | t.Error("err should be nil") 551 | } 552 | val, err := cache.GetInt(1) 553 | if err != nil { 554 | t.Error("err should be nil") 555 | } 556 | if !bytes.Equal(val, []byte("abc")) { 557 | t.Error("value not equal") 558 | } 559 | time.Sleep(2 * time.Second) 560 | val, expiry, err := cache.GetIntWithExpiration(1) 561 | if err != nil { 562 | t.Error("err should be nil") 563 | } 564 | if !bytes.Equal(val, []byte("abc")) { 565 | t.Error("value not equal") 566 | } 567 | now := time.Now() 568 | if expiry != uint32(now.Unix()+1) { 569 | t.Errorf("Expiry should one second in the future but was %v", now) 570 | } 571 | 572 | affected := cache.DelInt(1) 573 | if !affected { 574 | t.Error("del should return affected true") 575 | } 576 | _, err = cache.GetInt(1) 577 | if err != ErrNotFound { 578 | t.Error("error should be ErrNotFound after being deleted") 579 | } 580 | } 581 | 582 | func TestIterator(t *testing.T) { 583 | cache := NewCache(1024) 584 | count := 10000 585 | for i := 0; i < count; i++ { 586 | err := cache.Set([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("val%d", i)), 0) 587 | if err != nil { 588 | t.Error(err) 589 | } 590 | } 591 | // Set some value that expires to make sure expired entry is not returned. 592 | cache.Set([]byte("abc"), []byte("def"), 1) 593 | time.Sleep(2 * time.Second) 594 | it := cache.NewIterator() 595 | for i := 0; i < count; i++ { 596 | entry := it.Next() 597 | if entry == nil { 598 | t.Fatalf("entry is nil for %d", i) 599 | } 600 | if string(entry.Value) != "val"+string(entry.Key) { 601 | t.Fatalf("entry key value not match %s %s", entry.Key, entry.Value) 602 | } 603 | } 604 | e := it.Next() 605 | if e != nil { 606 | t.Fail() 607 | } 608 | } 609 | 610 | func TestIteratorExpireAt(t *testing.T) { 611 | cache := NewCache(1024) 612 | expireSecond := uint32(5) 613 | // Set some value that expires to make sure expired entry is not returned. 614 | cache.Set([]byte("no_expire"), []byte("def"), 0) 615 | cache.Set([]byte("has_expire"), []byte("exp"), int(expireSecond)) 616 | 617 | it := cache.NewIterator() 618 | for { 619 | next := it.Next() 620 | if next == nil { 621 | break 622 | } 623 | if string(next.Key) == "no_expire" && next.ExpireAt != 0 { 624 | t.Fatalf("no_expire's ExpireAt should be 0") 625 | } 626 | expectExpireAt := uint32(time.Now().Unix()) + expireSecond 627 | if string(next.Key) == "has_expire" && next.ExpireAt != expectExpireAt { 628 | t.Fatalf("has_expire's ExpireAt should be 10,actually is %d", next.ExpireAt) 629 | } 630 | } 631 | time.Sleep(time.Duration(expireSecond) * time.Second) 632 | it2 := cache.NewIterator() 633 | for { 634 | next := it2.Next() 635 | if next == nil { 636 | return 637 | } 638 | if string(next.Key) == "no_expire" && next.ExpireAt != 0 { 639 | t.Fatalf("no_expire's ExpireAt should be 0") 640 | } 641 | if string(next.Key) == "has_expire" { 642 | t.Fatalf("has_expire should expired") 643 | } 644 | } 645 | } 646 | 647 | func TestSetLargerEntryDeletesWrongEntry(t *testing.T) { 648 | cachesize := 512 * 1024 649 | cache := NewCache(cachesize) 650 | 651 | value1 := "aaa" 652 | key1 := []byte("key1") 653 | value := value1 654 | cache.Set(key1, []byte(value), 0) 655 | 656 | it := cache.NewIterator() 657 | entry := it.Next() 658 | if !bytes.Equal(entry.Key, key1) { 659 | t.Fatalf("key %s not equal to %s", entry.Key, key1) 660 | } 661 | if !bytes.Equal(entry.Value, []byte(value)) { 662 | t.Fatalf("value %s not equal to %s", entry.Value, value) 663 | } 664 | entry = it.Next() 665 | if entry != nil { 666 | t.Fatalf("expected nil entry but got %s %s", entry.Key, entry.Value) 667 | } 668 | 669 | value = value1 + "XXXXXX" 670 | cache.Set(key1, []byte(value), 0) 671 | 672 | value = value1 + "XXXXYYYYYYY" 673 | cache.Set(key1, []byte(value), 0) 674 | it = cache.NewIterator() 675 | entry = it.Next() 676 | if !bytes.Equal(entry.Key, key1) { 677 | t.Fatalf("key %s not equal to %s", entry.Key, key1) 678 | } 679 | if !bytes.Equal(entry.Value, []byte(value)) { 680 | t.Fatalf("value %s not equal to %s", entry.Value, value) 681 | } 682 | entry = it.Next() 683 | if entry != nil { 684 | t.Fatalf("expected nil entry but got %s %s", entry.Key, entry.Value) 685 | } 686 | } 687 | 688 | func TestRace(t *testing.T) { 689 | cache := NewCache(minBufSize) 690 | inUse := 8 691 | wg := sync.WaitGroup{} 692 | var iters int64 = 1000 693 | 694 | wg.Add(6) 695 | addFunc := func() { 696 | var i int64 697 | for i = 0; i < iters; i++ { 698 | err := cache.SetInt(int64(mrand.Intn(inUse)), []byte("abc"), 1) 699 | if err != nil { 700 | t.Errorf("err: %s", err) 701 | } 702 | } 703 | wg.Done() 704 | } 705 | getFunc := func() { 706 | var i int64 707 | for i = 0; i < iters; i++ { 708 | _, _ = cache.GetInt(int64(mrand.Intn(inUse))) // it will likely error w/ delFunc running too 709 | } 710 | wg.Done() 711 | } 712 | delFunc := func() { 713 | var i int64 714 | for i = 0; i < iters; i++ { 715 | cache.DelInt(int64(mrand.Intn(inUse))) 716 | } 717 | wg.Done() 718 | } 719 | evacFunc := func() { 720 | var i int64 721 | for i = 0; i < iters; i++ { 722 | _ = cache.EvacuateCount() 723 | _ = cache.ExpiredCount() 724 | _ = cache.EntryCount() 725 | _ = cache.AverageAccessTime() 726 | _ = cache.HitCount() 727 | _ = cache.LookupCount() 728 | _ = cache.HitRate() 729 | _ = cache.OverwriteCount() 730 | } 731 | wg.Done() 732 | } 733 | resetFunc := func() { 734 | var i int64 735 | for i = 0; i < iters; i++ { 736 | cache.ResetStatistics() 737 | } 738 | wg.Done() 739 | } 740 | clearFunc := func() { 741 | var i int64 742 | for i = 0; i < iters; i++ { 743 | cache.Clear() 744 | } 745 | wg.Done() 746 | } 747 | 748 | go addFunc() 749 | go getFunc() 750 | go delFunc() 751 | go evacFunc() 752 | go resetFunc() 753 | go clearFunc() 754 | wg.Wait() 755 | } 756 | 757 | func TestConcurrentSet(t *testing.T) { 758 | var wg sync.WaitGroup 759 | cache := NewCache(256 * 1024 * 1024) 760 | N := 4000 761 | routines := 50 762 | wg.Add(routines) 763 | for k := 0; k < routines; k++ { 764 | go func(fact int) { 765 | defer wg.Done() 766 | for i := N * fact; i < (fact+1)*N; i++ { 767 | var key, value [8]byte 768 | 769 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 770 | binary.LittleEndian.PutUint64(value[:], uint64(i*2)) 771 | cache.Set(key[:], value[:], 0) 772 | } 773 | }(k) 774 | } 775 | wg.Wait() 776 | for i := 0; i < routines*N; i++ { 777 | var key, value [8]byte 778 | 779 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 780 | cache.GetWithBuf(key[:], value[:]) 781 | var num uint64 782 | binary.Read(bytes.NewBuffer(value[:]), binary.LittleEndian, &num) 783 | if num != uint64(i*2) { 784 | t.Fatalf("key %d not equal to %d", int(num), (i * 2)) 785 | } 786 | } 787 | } 788 | 789 | func TestEvacuateCount(t *testing.T) { 790 | cache := NewCache(1024 * 1024) 791 | n := 100000 792 | for i := 0; i < n; i++ { 793 | err := cache.Set([]byte(strconv.Itoa(i)), []byte("A"), 0) 794 | if err != nil { 795 | log.Fatal(err) 796 | } 797 | } 798 | missingItems := 0 799 | for i := 0; i < n; i++ { 800 | res, err := cache.Get([]byte(strconv.Itoa(i))) 801 | if err == ErrNotFound || (err == nil && string(res) != "A") { 802 | missingItems++ 803 | } else if err != nil { 804 | log.Fatal(err) 805 | } 806 | } 807 | if cache.EntryCount()+cache.EvacuateCount() != int64(n) { 808 | t.Fatal(cache.EvacuateCount(), cache.EvacuateCount()) 809 | } 810 | } 811 | 812 | func BenchmarkCacheSet(b *testing.B) { 813 | cache := NewCache(256 * 1024 * 1024) 814 | var key [8]byte 815 | for i := 0; i < b.N; i++ { 816 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 817 | cache.Set(key[:], make([]byte, 8), 0) 818 | } 819 | } 820 | func BenchmarkParallelCacheSet(b *testing.B) { 821 | cache := NewCache(256 * 1024 * 1024) 822 | var key [8]byte 823 | 824 | b.RunParallel(func(pb *testing.PB) { 825 | counter := 0 826 | b.ReportAllocs() 827 | 828 | for pb.Next() { 829 | binary.LittleEndian.PutUint64(key[:], uint64(counter)) 830 | cache.Set(key[:], make([]byte, 8), 0) 831 | counter = counter + 1 832 | } 833 | }) 834 | } 835 | 836 | func BenchmarkMapSet(b *testing.B) { 837 | m := make(map[string][]byte) 838 | var key [8]byte 839 | for i := 0; i < b.N; i++ { 840 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 841 | m[string(key[:])] = make([]byte, 8) 842 | } 843 | } 844 | 845 | func BenchmarkCacheGet(b *testing.B) { 846 | b.ReportAllocs() 847 | b.StopTimer() 848 | cache := NewCache(256 * 1024 * 1024) 849 | var key [8]byte 850 | buf := make([]byte, 64) 851 | for i := 0; i < b.N; i++ { 852 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 853 | cache.Set(key[:], buf, 0) 854 | } 855 | b.StartTimer() 856 | for i := 0; i < b.N; i++ { 857 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 858 | cache.Get(key[:]) 859 | } 860 | } 861 | 862 | func BenchmarkCacheGetFn(b *testing.B) { 863 | b.ReportAllocs() 864 | b.StopTimer() 865 | cache := NewCache(256 * 1024 * 1024) 866 | var key [8]byte 867 | buf := make([]byte, 64) 868 | for i := 0; i < b.N; i++ { 869 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 870 | cache.Set(key[:], buf, 0) 871 | } 872 | b.StartTimer() 873 | for i := 0; i < b.N; i++ { 874 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 875 | _ = cache.GetFn(key[:], func(val []byte) error { 876 | _ = val 877 | return nil 878 | }) 879 | } 880 | b.Logf("b.N: %d; hit rate: %f", b.N, cache.HitRate()) 881 | } 882 | 883 | func BenchmarkParallelCacheGet(b *testing.B) { 884 | b.ReportAllocs() 885 | b.StopTimer() 886 | cache := NewCache(256 * 1024 * 1024) 887 | buf := make([]byte, 64) 888 | var key [8]byte 889 | for i := 0; i < b.N; i++ { 890 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 891 | cache.Set(key[:], buf, 0) 892 | } 893 | b.StartTimer() 894 | b.RunParallel(func(pb *testing.PB) { 895 | counter := 0 896 | b.ReportAllocs() 897 | for pb.Next() { 898 | binary.LittleEndian.PutUint64(key[:], uint64(counter)) 899 | cache.Get(key[:]) 900 | counter = counter + 1 901 | } 902 | }) 903 | } 904 | 905 | func BenchmarkCacheGetWithBuf(b *testing.B) { 906 | b.ReportAllocs() 907 | b.StopTimer() 908 | cache := NewCache(256 * 1024 * 1024) 909 | var key [8]byte 910 | buf := make([]byte, 64) 911 | for i := 0; i < b.N; i++ { 912 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 913 | cache.Set(key[:], buf, 0) 914 | } 915 | b.StartTimer() 916 | for i := 0; i < b.N; i++ { 917 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 918 | cache.GetWithBuf(key[:], buf) 919 | } 920 | } 921 | 922 | func BenchmarkParallelCacheGetWithBuf(b *testing.B) { 923 | b.ReportAllocs() 924 | b.StopTimer() 925 | cache := NewCache(256 * 1024 * 1024) 926 | var key [8]byte 927 | buf := make([]byte, 64) 928 | for i := 0; i < b.N; i++ { 929 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 930 | cache.Set(key[:], buf, 0) 931 | } 932 | b.StartTimer() 933 | 934 | b.RunParallel(func(pb *testing.PB) { 935 | counter := 0 936 | b.ReportAllocs() 937 | for pb.Next() { 938 | binary.LittleEndian.PutUint64(key[:], uint64(counter)) 939 | cache.GetWithBuf(key[:], buf) 940 | counter = counter + 1 941 | } 942 | }) 943 | } 944 | 945 | func BenchmarkCacheGetWithExpiration(b *testing.B) { 946 | b.StopTimer() 947 | cache := NewCache(256 * 1024 * 1024) 948 | var key [8]byte 949 | for i := 0; i < b.N; i++ { 950 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 951 | cache.Set(key[:], make([]byte, 8), 0) 952 | } 953 | b.StartTimer() 954 | for i := 0; i < b.N; i++ { 955 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 956 | cache.GetWithExpiration(key[:]) 957 | } 958 | } 959 | 960 | func BenchmarkMapGet(b *testing.B) { 961 | b.StopTimer() 962 | m := make(map[string][]byte) 963 | var key [8]byte 964 | for i := 0; i < b.N; i++ { 965 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 966 | m[string(key[:])] = make([]byte, 8) 967 | } 968 | b.StartTimer() 969 | var hitCount int64 970 | for i := 0; i < b.N; i++ { 971 | binary.LittleEndian.PutUint64(key[:], uint64(i)) 972 | if m[string(key[:])] != nil { 973 | hitCount++ 974 | } 975 | } 976 | } 977 | 978 | func BenchmarkHashFunc(b *testing.B) { 979 | key := make([]byte, 8) 980 | rand.Read(key) 981 | 982 | b.ResetTimer() 983 | for i := 0; i < b.N; i++ { 984 | hashFunc(key) 985 | } 986 | } 987 | 988 | func benchmarkTTL(expireSeconds int) func(b *testing.B) { 989 | return func(b *testing.B) { 990 | cache := NewCache(512 * 1024) 991 | key := []byte("bench-ttl-key") 992 | value := []byte("bench-ttl-value") 993 | if err := cache.Set(key, value, expireSeconds); err != nil { 994 | b.Fatalf("prerequisite failed: could not set the key to query TTL for: %v", err) 995 | } 996 | 997 | b.ReportAllocs() 998 | b.ResetTimer() 999 | 1000 | for i := 0; i < b.N; i++ { 1001 | _, err := cache.TTL(key) 1002 | if err != nil { 1003 | b.Error(err) 1004 | } 1005 | } 1006 | } 1007 | } 1008 | 1009 | func BenchmarkTTL_withKeyThatDoesNotExpire(b *testing.B) { 1010 | benchmarkTTL(0)(b) 1011 | } 1012 | 1013 | func BenchmarkTTL_withKeyThatDoesExpire(b *testing.B) { 1014 | benchmarkTTL(30)(b) 1015 | } 1016 | 1017 | func TestConcurrentGetTTL(t *testing.T) { 1018 | cache := NewCache(256 * 1024 * 1024) 1019 | primaryKey := []byte("hello") 1020 | primaryVal := []byte("world") 1021 | cache.Set(primaryKey, primaryVal, 2) 1022 | 1023 | // Do concurrent mutation by adding various keys. 1024 | for i := 0; i < 1000; i++ { 1025 | go func(idx int) { 1026 | keyValue := []byte(fmt.Sprintf("counter_%d", idx)) 1027 | cache.Set(keyValue, keyValue, 0) 1028 | }(i) 1029 | } 1030 | 1031 | // While trying to read the TTL. 1032 | _, err := cache.TTL(primaryKey) 1033 | if err != nil { 1034 | t.Fatalf("Failed to get the TTL with an error: %+v", err) 1035 | } 1036 | } 1037 | 1038 | func TestSetAndGet(t *testing.T) { 1039 | cache := NewCache(1024) 1040 | key := []byte("abcd") 1041 | val1 := []byte("efgh") 1042 | 1043 | _, found, _ := cache.SetAndGet(key, val1, 0) 1044 | if found == true { 1045 | t.Fatalf("SetAndGet unexpected found data") 1046 | } 1047 | 1048 | val2 := []byte("ijkl") 1049 | rval, found, _ := cache.SetAndGet(key, val2, 0) 1050 | if found == false { 1051 | t.Fatalf("SetAndGet expected found data") 1052 | } 1053 | if string(val1) != string(rval) { 1054 | t.Fatalf("SetAndGet expected SetAndGet %s: got %s", string(val1), string(rval)) 1055 | } 1056 | } 1057 | 1058 | func TestUpdate(t *testing.T) { 1059 | testName := "Update" 1060 | cache := NewCache(1024) 1061 | key := []byte("abcd") 1062 | val1 := []byte("efgh") 1063 | val2 := []byte("ijkl") 1064 | 1065 | var found, replaced bool 1066 | var err error 1067 | var prevVal, updaterVal []byte 1068 | updaterReplace := false 1069 | expireSeconds := 123 1070 | 1071 | updater := func(value []byte, found bool) ([]byte, bool, int) { 1072 | prevVal = value 1073 | return updaterVal, updaterReplace, expireSeconds 1074 | } 1075 | 1076 | setUpdaterResponse := func(value []byte, replace bool) { 1077 | updaterVal = value 1078 | updaterReplace = replace 1079 | } 1080 | 1081 | assertExpectations := func(testCase int, expectedFound, expectedReplaced bool, expectedPrevVal []byte, 1082 | expectedVal []byte) { 1083 | failPrefix := fmt.Sprintf("%s(%d)", testName, testCase) 1084 | 1085 | if expectedFound != found { 1086 | t.Fatalf("%s found should be %v", failPrefix, expectedFound) 1087 | } 1088 | if expectedReplaced != replaced { 1089 | t.Fatalf("%s found should be %v", failPrefix, expectedReplaced) 1090 | } 1091 | if err != nil { 1092 | t.Fatalf("%s unexpected err %v", failPrefix, err) 1093 | } 1094 | if string(prevVal) != string(expectedPrevVal) { 1095 | t.Fatalf("%s previous value expected %s instead of %s", failPrefix, string(expectedPrevVal), 1096 | string(prevVal)) 1097 | } 1098 | 1099 | // Check value 1100 | value, err := cache.Get(key) 1101 | if err == ErrNotFound && expectedVal != nil { 1102 | t.Fatalf("%s previous value expected %s instead of nil", failPrefix, string(expectedVal)) 1103 | } 1104 | if string(value) != string(expectedVal) { 1105 | t.Fatalf("%s previous value expected %s instead of %s", failPrefix, string(expectedVal), string(value)) 1106 | } 1107 | } 1108 | 1109 | // Doesn't exist yet, decide not to update, set should not be called 1110 | found, replaced, err = cache.Update(key, updater) 1111 | assertExpectations(1, false, false, nil, nil) 1112 | 1113 | // Doesn't exist yet, decide to update, set should be called with new value 1114 | setUpdaterResponse(val1, true) 1115 | found, replaced, err = cache.Update(key, updater) 1116 | assertExpectations(2, false, true, nil, val1) 1117 | 1118 | // Key exists, decide to update, updater is given old value and set should be called with new value 1119 | setUpdaterResponse(val2, true) 1120 | found, replaced, err = cache.Update(key, updater) 1121 | assertExpectations(3, true, true, val1, val2) 1122 | 1123 | // Key exists, decide not to update, updater is given old value and set should not be called 1124 | setUpdaterResponse(val1, false) 1125 | found, replaced, err = cache.Update(key, updater) 1126 | assertExpectations(4, true, false, val2, val2) 1127 | } 1128 | 1129 | func TestBenchmarkCacheGetWithBuf(t *testing.T) { 1130 | alloc := testing.Benchmark(BenchmarkCacheGetWithBuf).AllocsPerOp() 1131 | if alloc > 0 { 1132 | t.Errorf("current alloc count '%d' is higher than 0", alloc) 1133 | } 1134 | } 1135 | 1136 | func TestBenchmarkCacheSet(t *testing.T) { 1137 | alloc := testing.Benchmark(BenchmarkCacheSet).AllocsPerOp() 1138 | if alloc > 0 { 1139 | t.Errorf("current alloc count '%d' is higher than 0", alloc) 1140 | } 1141 | } 1142 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/coocood/freecache 2 | 3 | go 1.13 4 | 5 | require github.com/cespare/xxhash/v2 v2.1.2 6 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= 2 | github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 3 | -------------------------------------------------------------------------------- /iterator.go: -------------------------------------------------------------------------------- 1 | package freecache 2 | 3 | import ( 4 | "unsafe" 5 | ) 6 | 7 | // Iterator iterates the entries for the cache. 8 | type Iterator struct { 9 | cache *Cache 10 | segmentIdx int 11 | slotIdx int 12 | entryIdx int 13 | } 14 | 15 | // Entry represents a key/value pair. 16 | type Entry struct { 17 | Key []byte 18 | Value []byte 19 | ExpireAt uint32 20 | } 21 | 22 | // Next returns the next entry for the iterator. 23 | // The order of the entries is not guaranteed. 24 | // If there is no more entries to return, nil will be returned. 25 | func (it *Iterator) Next() *Entry { 26 | for it.segmentIdx < 256 { 27 | entry := it.nextForSegment(it.segmentIdx) 28 | if entry != nil { 29 | return entry 30 | } 31 | it.segmentIdx++ 32 | it.slotIdx = 0 33 | it.entryIdx = 0 34 | } 35 | return nil 36 | } 37 | 38 | func (it *Iterator) nextForSegment(segIdx int) *Entry { 39 | it.cache.locks[segIdx].Lock() 40 | defer it.cache.locks[segIdx].Unlock() 41 | seg := &it.cache.segments[segIdx] 42 | for it.slotIdx < 256 { 43 | entry := it.nextForSlot(seg, it.slotIdx) 44 | if entry != nil { 45 | return entry 46 | } 47 | it.slotIdx++ 48 | it.entryIdx = 0 49 | } 50 | return nil 51 | } 52 | 53 | func (it *Iterator) nextForSlot(seg *segment, slotId int) *Entry { 54 | slotOff := int32(it.slotIdx) * seg.slotCap 55 | slot := seg.slotsData[slotOff : slotOff+seg.slotLens[it.slotIdx] : slotOff+seg.slotCap] 56 | for it.entryIdx < len(slot) { 57 | ptr := slot[it.entryIdx] 58 | it.entryIdx++ 59 | now := seg.timer.Now() 60 | var hdrBuf [ENTRY_HDR_SIZE]byte 61 | seg.rb.ReadAt(hdrBuf[:], ptr.offset) 62 | hdr := (*entryHdr)(unsafe.Pointer(&hdrBuf[0])) 63 | if hdr.expireAt == 0 || hdr.expireAt > now { 64 | entry := new(Entry) 65 | entry.Key = make([]byte, hdr.keyLen) 66 | entry.Value = make([]byte, hdr.valLen) 67 | entry.ExpireAt = hdr.expireAt 68 | seg.rb.ReadAt(entry.Key, ptr.offset+ENTRY_HDR_SIZE) 69 | seg.rb.ReadAt(entry.Value, ptr.offset+ENTRY_HDR_SIZE+int64(hdr.keyLen)) 70 | return entry 71 | } 72 | } 73 | return nil 74 | } 75 | 76 | // NewIterator creates a new iterator for the cache. 77 | func (cache *Cache) NewIterator() *Iterator { 78 | return &Iterator{ 79 | cache: cache, 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /ringbuf.go: -------------------------------------------------------------------------------- 1 | package freecache 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | "io" 8 | ) 9 | 10 | var ErrOutOfRange = errors.New("out of range") 11 | 12 | // Ring buffer has a fixed size, when data exceeds the 13 | // size, old data will be overwritten by new data. 14 | // It only contains the data in the stream from begin to end 15 | type RingBuf struct { 16 | begin int64 // beginning offset of the data stream. 17 | end int64 // ending offset of the data stream. 18 | data []byte 19 | index int //range from '0' to 'len(rb.data)-1' 20 | } 21 | 22 | func NewRingBuf(size int, begin int64) (rb RingBuf) { 23 | rb.data = make([]byte, size) 24 | rb.Reset(begin) 25 | return 26 | } 27 | 28 | // Reset the ring buffer 29 | // 30 | // Parameters: 31 | // begin: beginning offset of the data stream 32 | func (rb *RingBuf) Reset(begin int64) { 33 | rb.begin = begin 34 | rb.end = begin 35 | rb.index = 0 36 | } 37 | 38 | // Create a copy of the buffer. 39 | func (rb *RingBuf) Dump() []byte { 40 | dump := make([]byte, len(rb.data)) 41 | copy(dump, rb.data) 42 | return dump 43 | } 44 | 45 | func (rb *RingBuf) String() string { 46 | return fmt.Sprintf("[size:%v, start:%v, end:%v, index:%v]", len(rb.data), rb.begin, rb.end, rb.index) 47 | } 48 | 49 | func (rb *RingBuf) Size() int64 { 50 | return int64(len(rb.data)) 51 | } 52 | 53 | func (rb *RingBuf) Begin() int64 { 54 | return rb.begin 55 | } 56 | 57 | func (rb *RingBuf) End() int64 { 58 | return rb.end 59 | } 60 | 61 | // read up to len(p), at off of the data stream. 62 | func (rb *RingBuf) ReadAt(p []byte, off int64) (n int, err error) { 63 | if off > rb.end || off < rb.begin { 64 | err = ErrOutOfRange 65 | return 66 | } 67 | readOff := rb.getDataOff(off) 68 | readEnd := readOff + int(rb.end-off) 69 | if readEnd <= len(rb.data) { 70 | n = copy(p, rb.data[readOff:readEnd]) 71 | } else { 72 | n = copy(p, rb.data[readOff:]) 73 | if n < len(p) { 74 | n += copy(p[n:], rb.data[:readEnd-len(rb.data)]) 75 | } 76 | } 77 | if n < len(p) { 78 | err = io.EOF 79 | } 80 | return 81 | } 82 | 83 | func (rb *RingBuf) getDataOff(off int64) int { 84 | var dataOff int 85 | if rb.end-rb.begin < int64(len(rb.data)) { 86 | dataOff = int(off - rb.begin) 87 | } else { 88 | dataOff = rb.index + int(off-rb.begin) 89 | } 90 | if dataOff >= len(rb.data) { 91 | dataOff -= len(rb.data) 92 | } 93 | return dataOff 94 | } 95 | 96 | // Slice returns a slice of the supplied range of the ring buffer. It will 97 | // not alloc unless the requested range wraps the ring buffer. 98 | func (rb *RingBuf) Slice(off, length int64) ([]byte, error) { 99 | if off > rb.end || off < rb.begin { 100 | return nil, ErrOutOfRange 101 | } 102 | readOff := rb.getDataOff(off) 103 | readEnd := readOff + int(length) 104 | if readEnd <= len(rb.data) { 105 | return rb.data[readOff:readEnd:readEnd], nil 106 | } 107 | buf := make([]byte, length) 108 | n := copy(buf, rb.data[readOff:]) 109 | if n < int(length) { 110 | n += copy(buf[n:], rb.data[:readEnd-len(rb.data)]) 111 | } 112 | if n < int(length) { 113 | return nil, io.EOF 114 | } 115 | return buf, nil 116 | } 117 | 118 | func (rb *RingBuf) Write(p []byte) (n int, err error) { 119 | if len(p) > len(rb.data) { 120 | err = ErrOutOfRange 121 | return 122 | } 123 | for n < len(p) { 124 | written := copy(rb.data[rb.index:], p[n:]) 125 | rb.end += int64(written) 126 | n += written 127 | rb.index += written 128 | if rb.index >= len(rb.data) { 129 | rb.index -= len(rb.data) 130 | } 131 | } 132 | if int(rb.end-rb.begin) > len(rb.data) { 133 | rb.begin = rb.end - int64(len(rb.data)) 134 | } 135 | return 136 | } 137 | 138 | func (rb *RingBuf) WriteAt(p []byte, off int64) (n int, err error) { 139 | if off+int64(len(p)) > rb.end || off < rb.begin { 140 | err = ErrOutOfRange 141 | return 142 | } 143 | writeOff := rb.getDataOff(off) 144 | writeEnd := writeOff + int(rb.end-off) 145 | if writeEnd <= len(rb.data) { 146 | n = copy(rb.data[writeOff:writeEnd], p) 147 | } else { 148 | n = copy(rb.data[writeOff:], p) 149 | if n < len(p) { 150 | n += copy(rb.data[:writeEnd-len(rb.data)], p[n:]) 151 | } 152 | } 153 | return 154 | } 155 | 156 | func (rb *RingBuf) EqualAt(p []byte, off int64) bool { 157 | if off+int64(len(p)) > rb.end || off < rb.begin { 158 | return false 159 | } 160 | readOff := rb.getDataOff(off) 161 | readEnd := readOff + len(p) 162 | if readEnd <= len(rb.data) { 163 | return bytes.Equal(p, rb.data[readOff:readEnd]) 164 | } else { 165 | firstLen := len(rb.data) - readOff 166 | equal := bytes.Equal(p[:firstLen], rb.data[readOff:]) 167 | if equal { 168 | secondLen := len(p) - firstLen 169 | equal = bytes.Equal(p[firstLen:], rb.data[:secondLen]) 170 | } 171 | return equal 172 | } 173 | } 174 | 175 | // Evacuate read the data at off, then write it to the the data stream, 176 | // Keep it from being overwritten by new data. 177 | func (rb *RingBuf) Evacuate(off int64, length int) (newOff int64) { 178 | if off+int64(length) > rb.end || off < rb.begin { 179 | return -1 180 | } 181 | readOff := rb.getDataOff(off) 182 | if readOff == rb.index { 183 | // no copy evacuate 184 | rb.index += length 185 | if rb.index >= len(rb.data) { 186 | rb.index -= len(rb.data) 187 | } 188 | } else if readOff < rb.index { 189 | var n = copy(rb.data[rb.index:], rb.data[readOff:readOff+length]) 190 | rb.index += n 191 | if rb.index == len(rb.data) { 192 | rb.index = copy(rb.data, rb.data[readOff+n:readOff+length]) 193 | } 194 | } else { 195 | var readEnd = readOff + length 196 | var n int 197 | if readEnd <= len(rb.data) { 198 | n = copy(rb.data[rb.index:], rb.data[readOff:readEnd]) 199 | rb.index += n 200 | } else { 201 | n = copy(rb.data[rb.index:], rb.data[readOff:]) 202 | rb.index += n 203 | var tail = length - n 204 | n = copy(rb.data[rb.index:], rb.data[:tail]) 205 | rb.index += n 206 | if rb.index == len(rb.data) { 207 | rb.index = copy(rb.data, rb.data[n:tail]) 208 | } 209 | } 210 | } 211 | newOff = rb.end 212 | rb.end += int64(length) 213 | if rb.begin < rb.end-int64(len(rb.data)) { 214 | rb.begin = rb.end - int64(len(rb.data)) 215 | } 216 | return 217 | } 218 | 219 | func (rb *RingBuf) Resize(newSize int) { 220 | if len(rb.data) == newSize { 221 | return 222 | } 223 | newData := make([]byte, newSize) 224 | var offset int 225 | if rb.end-rb.begin == int64(len(rb.data)) { 226 | offset = rb.index 227 | } 228 | if int(rb.end-rb.begin) > newSize { 229 | discard := int(rb.end-rb.begin) - newSize 230 | offset = (offset + discard) % len(rb.data) 231 | rb.begin = rb.end - int64(newSize) 232 | } 233 | n := copy(newData, rb.data[offset:]) 234 | if n < newSize { 235 | copy(newData[n:], rb.data[:offset]) 236 | } 237 | rb.data = newData 238 | rb.index = 0 239 | } 240 | 241 | func (rb *RingBuf) Skip(length int64) { 242 | rb.end += length 243 | rb.index += int(length) 244 | for rb.index >= len(rb.data) { 245 | rb.index -= len(rb.data) 246 | } 247 | if int(rb.end-rb.begin) > len(rb.data) { 248 | rb.begin = rb.end - int64(len(rb.data)) 249 | } 250 | } 251 | -------------------------------------------------------------------------------- /ringbuf_test.go: -------------------------------------------------------------------------------- 1 | package freecache 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestRingBuf(t *testing.T) { 8 | rb := NewRingBuf(16, 0) 9 | for i := 0; i < 2; i++ { 10 | rb.Write([]byte("fghibbbbccccddde")) 11 | rb.Write([]byte("fghibbbbc")) 12 | rb.Resize(16) 13 | off := rb.Evacuate(9, 3) 14 | t.Log(string(rb.Dump())) 15 | if off != rb.End()-3 { 16 | t.Log(string(rb.Dump()), rb.End()) 17 | t.Fatalf("off got %v", off) 18 | } 19 | off = rb.Evacuate(15, 5) 20 | t.Log(string(rb.Dump())) 21 | if off != rb.End()-5 { 22 | t.Fatalf("off got %v", off) 23 | } 24 | rb.Resize(64) 25 | rb.Resize(32) 26 | data := make([]byte, 5) 27 | rb.ReadAt(data, off) 28 | if string(data) != "efghi" { 29 | t.Fatalf("read at should be efghi, got %v", string(data)) 30 | } 31 | 32 | off = rb.Evacuate(0, 10) 33 | if off != -1 { 34 | t.Fatal("evacutate out of range offset should return error") 35 | } 36 | 37 | /* -- After reset the buffer should behave exactly the same as a new one. 38 | * Hence, run the test once more again with reset buffer. */ 39 | rb.Reset(0) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /segment.go: -------------------------------------------------------------------------------- 1 | package freecache 2 | 3 | import ( 4 | "errors" 5 | "sync/atomic" 6 | "unsafe" 7 | ) 8 | 9 | const HASH_ENTRY_SIZE = 16 10 | const ENTRY_HDR_SIZE = 24 11 | 12 | var ErrLargeKey = errors.New("The key is larger than 65535") 13 | var ErrLargeEntry = errors.New("The entry size is larger than 1/1024 of cache size") 14 | var ErrNotFound = errors.New("Entry not found") 15 | 16 | // entry pointer struct points to an entry in ring buffer 17 | type entryPtr struct { 18 | offset int64 // entry offset in ring buffer 19 | hash16 uint16 // entries are ordered by hash16 in a slot. 20 | keyLen uint16 // used to compare a key 21 | reserved uint32 22 | } 23 | 24 | // entry header struct in ring buffer, followed by key and value. 25 | type entryHdr struct { 26 | accessTime uint32 27 | expireAt uint32 28 | keyLen uint16 29 | hash16 uint16 30 | valLen uint32 31 | valCap uint32 32 | deleted bool 33 | slotId uint8 34 | reserved uint16 35 | } 36 | 37 | // a segment contains 256 slots, a slot is an array of entry pointers ordered by hash16 value 38 | // the entry can be looked up by hash value of the key. 39 | type segment struct { 40 | rb RingBuf // ring buffer that stores data 41 | segId int 42 | _ uint32 43 | missCount int64 44 | hitCount int64 45 | entryCount int64 46 | totalCount int64 // number of entries in ring buffer, including deleted entries. 47 | totalTime int64 // used to calculate least recent used entry. 48 | timer Timer // Timer giving current time 49 | totalEvacuate int64 // used for debug 50 | totalExpired int64 // used for debug 51 | overwrites int64 // used for debug 52 | touched int64 // used for debug 53 | vacuumLen int64 // up to vacuumLen, new data can be written without overwriting old data. 54 | slotLens [256]int32 // The actual length for every slot. 55 | slotCap int32 // max number of entry pointers a slot can hold. 56 | slotsData []entryPtr // shared by all 256 slots 57 | } 58 | 59 | func newSegment(bufSize int, segId int, timer Timer) (seg segment) { 60 | seg.rb = NewRingBuf(bufSize, 0) 61 | seg.segId = segId 62 | seg.timer = timer 63 | seg.vacuumLen = int64(bufSize) 64 | seg.slotCap = 1 65 | seg.slotsData = make([]entryPtr, 256*seg.slotCap) 66 | return 67 | } 68 | 69 | func (seg *segment) set(key, value []byte, hashVal uint64, expireSeconds int) (err error) { 70 | if len(key) > 65535 { 71 | return ErrLargeKey 72 | } 73 | maxKeyValLen := len(seg.rb.data)/4 - ENTRY_HDR_SIZE 74 | if len(key)+len(value) > maxKeyValLen { 75 | // Do not accept large entry. 76 | return ErrLargeEntry 77 | } 78 | now := seg.timer.Now() 79 | expireAt := uint32(0) 80 | if expireSeconds > 0 { 81 | expireAt = now + uint32(expireSeconds) 82 | } 83 | 84 | slotId := uint8(hashVal >> 8) 85 | hash16 := uint16(hashVal >> 16) 86 | slot := seg.getSlot(slotId) 87 | idx, match := seg.lookup(slot, hash16, key) 88 | 89 | var hdrBuf [ENTRY_HDR_SIZE]byte 90 | hdr := (*entryHdr)(unsafe.Pointer(&hdrBuf[0])) 91 | if match { 92 | matchedPtr := &slot[idx] 93 | seg.rb.ReadAt(hdrBuf[:], matchedPtr.offset) 94 | hdr.slotId = slotId 95 | hdr.hash16 = hash16 96 | hdr.keyLen = uint16(len(key)) 97 | originAccessTime := hdr.accessTime 98 | hdr.accessTime = now 99 | hdr.expireAt = expireAt 100 | hdr.valLen = uint32(len(value)) 101 | if hdr.valCap >= hdr.valLen { 102 | // in place overwrite 103 | atomic.AddInt64(&seg.totalTime, int64(hdr.accessTime)-int64(originAccessTime)) 104 | seg.rb.WriteAt(hdrBuf[:], matchedPtr.offset) 105 | seg.rb.WriteAt(value, matchedPtr.offset+ENTRY_HDR_SIZE+int64(hdr.keyLen)) 106 | atomic.AddInt64(&seg.overwrites, 1) 107 | return 108 | } 109 | // avoid unnecessary memory copy. 110 | seg.delEntryPtr(slotId, slot, idx) 111 | match = false 112 | // increase capacity and limit entry len. 113 | for hdr.valCap < hdr.valLen { 114 | hdr.valCap *= 2 115 | } 116 | if hdr.valCap > uint32(maxKeyValLen-len(key)) { 117 | hdr.valCap = uint32(maxKeyValLen - len(key)) 118 | } 119 | } else { 120 | hdr.slotId = slotId 121 | hdr.hash16 = hash16 122 | hdr.keyLen = uint16(len(key)) 123 | hdr.accessTime = now 124 | hdr.expireAt = expireAt 125 | hdr.valLen = uint32(len(value)) 126 | hdr.valCap = uint32(len(value)) 127 | if hdr.valCap == 0 { // avoid infinite loop when increasing capacity. 128 | hdr.valCap = 1 129 | } 130 | } 131 | 132 | entryLen := ENTRY_HDR_SIZE + int64(len(key)) + int64(hdr.valCap) 133 | slotModified := seg.evacuate(entryLen, slotId, now) 134 | if slotModified { 135 | // the slot has been modified during evacuation, we need to looked up for the 'idx' again. 136 | // otherwise there would be index out of bound error. 137 | slot = seg.getSlot(slotId) 138 | idx, match = seg.lookup(slot, hash16, key) 139 | // assert(match == false) 140 | } 141 | newOff := seg.rb.End() 142 | seg.insertEntryPtr(slotId, hash16, newOff, idx, hdr.keyLen) 143 | seg.rb.Write(hdrBuf[:]) 144 | seg.rb.Write(key) 145 | seg.rb.Write(value) 146 | seg.rb.Skip(int64(hdr.valCap - hdr.valLen)) 147 | atomic.AddInt64(&seg.totalTime, int64(now)) 148 | atomic.AddInt64(&seg.totalCount, 1) 149 | seg.vacuumLen -= entryLen 150 | return 151 | } 152 | 153 | func (seg *segment) touch(key []byte, hashVal uint64, expireSeconds int) (err error) { 154 | if len(key) > 65535 { 155 | return ErrLargeKey 156 | } 157 | 158 | slotId := uint8(hashVal >> 8) 159 | hash16 := uint16(hashVal >> 16) 160 | slot := seg.getSlot(slotId) 161 | idx, match := seg.lookup(slot, hash16, key) 162 | if !match { 163 | err = ErrNotFound 164 | return 165 | } 166 | matchedPtr := &slot[idx] 167 | 168 | var hdrBuf [ENTRY_HDR_SIZE]byte 169 | seg.rb.ReadAt(hdrBuf[:], matchedPtr.offset) 170 | hdr := (*entryHdr)(unsafe.Pointer(&hdrBuf[0])) 171 | 172 | now := seg.timer.Now() 173 | if isExpired(hdr.expireAt, now) { 174 | seg.delEntryPtr(slotId, slot, idx) 175 | atomic.AddInt64(&seg.totalExpired, 1) 176 | err = ErrNotFound 177 | atomic.AddInt64(&seg.missCount, 1) 178 | return 179 | } 180 | 181 | expireAt := uint32(0) 182 | if expireSeconds > 0 { 183 | expireAt = now + uint32(expireSeconds) 184 | } 185 | 186 | originAccessTime := hdr.accessTime 187 | hdr.accessTime = now 188 | hdr.expireAt = expireAt 189 | // in place overwrite 190 | atomic.AddInt64(&seg.totalTime, int64(hdr.accessTime)-int64(originAccessTime)) 191 | seg.rb.WriteAt(hdrBuf[:], matchedPtr.offset) 192 | atomic.AddInt64(&seg.touched, 1) 193 | return 194 | } 195 | 196 | func (seg *segment) evacuate(entryLen int64, slotId uint8, now uint32) (slotModified bool) { 197 | var oldHdrBuf [ENTRY_HDR_SIZE]byte 198 | consecutiveEvacuate := 0 199 | for seg.vacuumLen < entryLen { 200 | oldOff := seg.rb.End() + seg.vacuumLen - seg.rb.Size() 201 | seg.rb.ReadAt(oldHdrBuf[:], oldOff) 202 | oldHdr := (*entryHdr)(unsafe.Pointer(&oldHdrBuf[0])) 203 | oldEntryLen := ENTRY_HDR_SIZE + int64(oldHdr.keyLen) + int64(oldHdr.valCap) 204 | if oldHdr.deleted { 205 | consecutiveEvacuate = 0 206 | atomic.AddInt64(&seg.totalTime, -int64(oldHdr.accessTime)) 207 | atomic.AddInt64(&seg.totalCount, -1) 208 | seg.vacuumLen += oldEntryLen 209 | continue 210 | } 211 | expired := isExpired(oldHdr.expireAt, now) 212 | leastRecentUsed := int64(oldHdr.accessTime)*atomic.LoadInt64(&seg.totalCount) <= atomic.LoadInt64(&seg.totalTime) 213 | if expired || leastRecentUsed || consecutiveEvacuate > 5 { 214 | seg.delEntryPtrByOffset(oldHdr.slotId, oldHdr.hash16, oldOff) 215 | if oldHdr.slotId == slotId { 216 | slotModified = true 217 | } 218 | consecutiveEvacuate = 0 219 | atomic.AddInt64(&seg.totalTime, -int64(oldHdr.accessTime)) 220 | atomic.AddInt64(&seg.totalCount, -1) 221 | seg.vacuumLen += oldEntryLen 222 | if expired { 223 | atomic.AddInt64(&seg.totalExpired, 1) 224 | } else { 225 | atomic.AddInt64(&seg.totalEvacuate, 1) 226 | } 227 | } else { 228 | // evacuate an old entry that has been accessed recently for better cache hit rate. 229 | newOff := seg.rb.Evacuate(oldOff, int(oldEntryLen)) 230 | seg.updateEntryPtr(oldHdr.slotId, oldHdr.hash16, oldOff, newOff) 231 | consecutiveEvacuate++ 232 | atomic.AddInt64(&seg.totalEvacuate, 1) 233 | } 234 | } 235 | return 236 | } 237 | 238 | func (seg *segment) get(key, buf []byte, hashVal uint64, peek bool) (value []byte, expireAt uint32, err error) { 239 | hdr, ptrOffset, err := seg.locate(key, hashVal, peek) 240 | if err != nil { 241 | return 242 | } 243 | expireAt = hdr.expireAt 244 | if cap(buf) >= int(hdr.valLen) { 245 | value = buf[:hdr.valLen] 246 | } else { 247 | value = make([]byte, hdr.valLen) 248 | } 249 | 250 | seg.rb.ReadAt(value, ptrOffset+ENTRY_HDR_SIZE+int64(hdr.keyLen)) 251 | if !peek { 252 | atomic.AddInt64(&seg.hitCount, 1) 253 | } 254 | return 255 | } 256 | 257 | // view provides zero-copy access to the element's value, without copying to 258 | // an intermediate buffer. 259 | func (seg *segment) view(key []byte, fn func([]byte) error, hashVal uint64, peek bool) (err error) { 260 | hdr, ptrOffset, err := seg.locate(key, hashVal, peek) 261 | if err != nil { 262 | return 263 | } 264 | start := ptrOffset + ENTRY_HDR_SIZE + int64(hdr.keyLen) 265 | val, err := seg.rb.Slice(start, int64(hdr.valLen)) 266 | if err != nil { 267 | return err 268 | } 269 | err = fn(val) 270 | if !peek { 271 | atomic.AddInt64(&seg.hitCount, 1) 272 | } 273 | return 274 | } 275 | 276 | func (seg *segment) locate(key []byte, hashVal uint64, peek bool) (hdrEntry entryHdr, ptrOffset int64, err error) { 277 | slotId := uint8(hashVal >> 8) 278 | hash16 := uint16(hashVal >> 16) 279 | slot := seg.getSlot(slotId) 280 | idx, match := seg.lookup(slot, hash16, key) 281 | if !match { 282 | err = ErrNotFound 283 | if !peek { 284 | atomic.AddInt64(&seg.missCount, 1) 285 | } 286 | return 287 | } 288 | ptr := &slot[idx] 289 | 290 | var hdrBuf [ENTRY_HDR_SIZE]byte 291 | seg.rb.ReadAt(hdrBuf[:], ptr.offset) 292 | hdr := (*entryHdr)(unsafe.Pointer(&hdrBuf[0])) 293 | if !peek { 294 | now := seg.timer.Now() 295 | if isExpired(hdr.expireAt, now) { 296 | seg.delEntryPtr(slotId, slot, idx) 297 | atomic.AddInt64(&seg.totalExpired, 1) 298 | err = ErrNotFound 299 | atomic.AddInt64(&seg.missCount, 1) 300 | return 301 | } 302 | atomic.AddInt64(&seg.totalTime, int64(now-hdr.accessTime)) 303 | hdr.accessTime = now 304 | seg.rb.WriteAt(hdrBuf[:], ptr.offset) 305 | } 306 | return *hdr, ptr.offset, nil 307 | } 308 | 309 | func (seg *segment) del(key []byte, hashVal uint64) (affected bool) { 310 | slotId := uint8(hashVal >> 8) 311 | hash16 := uint16(hashVal >> 16) 312 | slot := seg.getSlot(slotId) 313 | idx, match := seg.lookup(slot, hash16, key) 314 | if !match { 315 | return false 316 | } 317 | seg.delEntryPtr(slotId, slot, idx) 318 | return true 319 | } 320 | 321 | func (seg *segment) ttl(key []byte, hashVal uint64) (timeLeft uint32, err error) { 322 | slotId := uint8(hashVal >> 8) 323 | hash16 := uint16(hashVal >> 16) 324 | slot := seg.getSlot(slotId) 325 | idx, match := seg.lookup(slot, hash16, key) 326 | if !match { 327 | err = ErrNotFound 328 | return 329 | } 330 | ptr := &slot[idx] 331 | 332 | var hdrBuf [ENTRY_HDR_SIZE]byte 333 | seg.rb.ReadAt(hdrBuf[:], ptr.offset) 334 | hdr := (*entryHdr)(unsafe.Pointer(&hdrBuf[0])) 335 | 336 | if hdr.expireAt == 0 { 337 | return 338 | } else { 339 | now := seg.timer.Now() 340 | if !isExpired(hdr.expireAt, now) { 341 | timeLeft = hdr.expireAt - now 342 | return 343 | } 344 | } 345 | err = ErrNotFound 346 | return 347 | } 348 | 349 | func (seg *segment) expand() { 350 | newSlotData := make([]entryPtr, seg.slotCap*2*256) 351 | for i := 0; i < 256; i++ { 352 | off := int32(i) * seg.slotCap 353 | copy(newSlotData[off*2:], seg.slotsData[off:off+seg.slotLens[i]]) 354 | } 355 | seg.slotCap *= 2 356 | seg.slotsData = newSlotData 357 | } 358 | 359 | func (seg *segment) updateEntryPtr(slotId uint8, hash16 uint16, oldOff, newOff int64) { 360 | slot := seg.getSlot(slotId) 361 | idx, match := seg.lookupByOff(slot, hash16, oldOff) 362 | if !match { 363 | return 364 | } 365 | ptr := &slot[idx] 366 | ptr.offset = newOff 367 | } 368 | 369 | func (seg *segment) insertEntryPtr(slotId uint8, hash16 uint16, offset int64, idx int, keyLen uint16) { 370 | if seg.slotLens[slotId] == seg.slotCap { 371 | seg.expand() 372 | } 373 | seg.slotLens[slotId]++ 374 | atomic.AddInt64(&seg.entryCount, 1) 375 | slot := seg.getSlot(slotId) 376 | copy(slot[idx+1:], slot[idx:]) 377 | slot[idx].offset = offset 378 | slot[idx].hash16 = hash16 379 | slot[idx].keyLen = keyLen 380 | } 381 | 382 | func (seg *segment) delEntryPtrByOffset(slotId uint8, hash16 uint16, offset int64) { 383 | slot := seg.getSlot(slotId) 384 | idx, match := seg.lookupByOff(slot, hash16, offset) 385 | if !match { 386 | return 387 | } 388 | seg.delEntryPtr(slotId, slot, idx) 389 | } 390 | 391 | func (seg *segment) delEntryPtr(slotId uint8, slot []entryPtr, idx int) { 392 | offset := slot[idx].offset 393 | var entryHdrBuf [ENTRY_HDR_SIZE]byte 394 | seg.rb.ReadAt(entryHdrBuf[:], offset) 395 | entryHdr := (*entryHdr)(unsafe.Pointer(&entryHdrBuf[0])) 396 | entryHdr.deleted = true 397 | seg.rb.WriteAt(entryHdrBuf[:], offset) 398 | copy(slot[idx:], slot[idx+1:]) 399 | seg.slotLens[slotId]-- 400 | atomic.AddInt64(&seg.entryCount, -1) 401 | } 402 | 403 | func entryPtrIdx(slot []entryPtr, hash16 uint16) (idx int) { 404 | high := len(slot) 405 | for idx < high { 406 | mid := (idx + high) >> 1 407 | oldEntry := &slot[mid] 408 | if oldEntry.hash16 < hash16 { 409 | idx = mid + 1 410 | } else { 411 | high = mid 412 | } 413 | } 414 | return 415 | } 416 | 417 | func (seg *segment) lookup(slot []entryPtr, hash16 uint16, key []byte) (idx int, match bool) { 418 | idx = entryPtrIdx(slot, hash16) 419 | for idx < len(slot) { 420 | ptr := &slot[idx] 421 | if ptr.hash16 != hash16 { 422 | break 423 | } 424 | match = int(ptr.keyLen) == len(key) && seg.rb.EqualAt(key, ptr.offset+ENTRY_HDR_SIZE) 425 | if match { 426 | return 427 | } 428 | idx++ 429 | } 430 | return 431 | } 432 | 433 | func (seg *segment) lookupByOff(slot []entryPtr, hash16 uint16, offset int64) (idx int, match bool) { 434 | idx = entryPtrIdx(slot, hash16) 435 | for idx < len(slot) { 436 | ptr := &slot[idx] 437 | if ptr.hash16 != hash16 { 438 | break 439 | } 440 | match = ptr.offset == offset 441 | if match { 442 | return 443 | } 444 | idx++ 445 | } 446 | return 447 | } 448 | 449 | func (seg *segment) resetStatistics() { 450 | atomic.StoreInt64(&seg.totalEvacuate, 0) 451 | atomic.StoreInt64(&seg.totalExpired, 0) 452 | atomic.StoreInt64(&seg.overwrites, 0) 453 | atomic.StoreInt64(&seg.hitCount, 0) 454 | atomic.StoreInt64(&seg.missCount, 0) 455 | } 456 | 457 | func (seg *segment) clear() { 458 | bufSize := len(seg.rb.data) 459 | seg.rb.Reset(0) 460 | seg.vacuumLen = int64(bufSize) 461 | seg.slotCap = 1 462 | seg.slotsData = make([]entryPtr, 256*seg.slotCap) 463 | for i := 0; i < len(seg.slotLens); i++ { 464 | seg.slotLens[i] = 0 465 | } 466 | 467 | atomic.StoreInt64(&seg.hitCount, 0) 468 | atomic.StoreInt64(&seg.missCount, 0) 469 | atomic.StoreInt64(&seg.entryCount, 0) 470 | atomic.StoreInt64(&seg.totalCount, 0) 471 | atomic.StoreInt64(&seg.totalTime, 0) 472 | atomic.StoreInt64(&seg.totalEvacuate, 0) 473 | atomic.StoreInt64(&seg.totalExpired, 0) 474 | atomic.StoreInt64(&seg.overwrites, 0) 475 | } 476 | 477 | func (seg *segment) getSlot(slotId uint8) []entryPtr { 478 | slotOff := int32(slotId) * seg.slotCap 479 | return seg.slotsData[slotOff : slotOff+seg.slotLens[slotId] : slotOff+seg.slotCap] 480 | } 481 | 482 | // isExpired checks if a key is expired. 483 | func isExpired(keyExpireAt, now uint32) bool { 484 | return keyExpireAt != 0 && keyExpireAt <= now 485 | } 486 | -------------------------------------------------------------------------------- /server/main.go: -------------------------------------------------------------------------------- 1 | //A basic freecache server supports redis protocol 2 | package main 3 | 4 | import ( 5 | "bufio" 6 | "bytes" 7 | "errors" 8 | "github.com/coocood/freecache" 9 | "io" 10 | "log" 11 | "net" 12 | "net/http" 13 | _ "net/http/pprof" 14 | "runtime" 15 | "runtime/debug" 16 | "strconv" 17 | "time" 18 | ) 19 | 20 | var ( 21 | protocolErr = errors.New("protocol error") 22 | CRLF = []byte("\r\n") 23 | PING = []byte("ping") 24 | DBSIZE = []byte("dbsize") 25 | ERROR_UNSUPPORTED = []byte("-ERR unsupported command\r\n") 26 | OK = []byte("+OK\r\n") 27 | PONG = []byte("+PONG\r\n") 28 | GET = []byte("get") 29 | SET = []byte("set") 30 | SETEX = []byte("setex") 31 | DEL = []byte("del") 32 | NIL = []byte("$-1\r\n") 33 | CZERO = []byte(":0\r\n") 34 | CONE = []byte(":1\r\n") 35 | BulkSign = []byte("$") 36 | ) 37 | 38 | type Request struct { 39 | args [][]byte 40 | buf *bytes.Buffer 41 | } 42 | 43 | func (req *Request) Reset() { 44 | req.args = req.args[:0] 45 | req.buf.Reset() 46 | } 47 | 48 | type operation struct { 49 | req Request 50 | replyChan chan *bytes.Buffer 51 | } 52 | 53 | type Session struct { 54 | server *Server 55 | conn net.Conn 56 | addr string 57 | reader *bufio.Reader 58 | replyChan chan *bytes.Buffer 59 | } 60 | 61 | type Server struct { 62 | cache *freecache.Cache 63 | } 64 | 65 | func NewServer(cacheSize int) (server *Server) { 66 | server = new(Server) 67 | server.cache = freecache.NewCache(cacheSize) 68 | return 69 | } 70 | 71 | func (server *Server) Start(addr string) error { 72 | l, err := net.Listen("tcp", addr) 73 | if err != nil { 74 | log.Println(err) 75 | return err 76 | } 77 | defer l.Close() 78 | log.Println("Listening on port", addr) 79 | for { 80 | tcpListener := l.(*net.TCPListener) 81 | tcpListener.SetDeadline(time.Now().Add(time.Second)) 82 | conn, err := l.Accept() 83 | if err != nil { 84 | if ne, ok := err.(net.Error); ok && ne.Temporary() { 85 | continue 86 | } 87 | return err 88 | } 89 | 90 | session := new(Session) 91 | session.conn = conn 92 | session.replyChan = make(chan *bytes.Buffer, 100) 93 | session.addr = conn.RemoteAddr().String() 94 | session.server = server 95 | session.reader = bufio.NewReader(conn) 96 | go session.readLoop() 97 | go session.writeLoop() 98 | } 99 | } 100 | 101 | func copyN(buffer *bytes.Buffer, r *bufio.Reader, n int64) (err error) { 102 | if n <= 512 { 103 | var buf [512]byte 104 | _, err = r.Read(buf[:n]) 105 | if err != nil { 106 | return 107 | } 108 | buffer.Write(buf[:n]) 109 | } else { 110 | _, err = io.CopyN(buffer, r, n) 111 | } 112 | return 113 | } 114 | 115 | func (server *Server) ReadClient(r *bufio.Reader, req *Request) (err error) { 116 | line, err := readLine(r) 117 | if err != nil { 118 | return 119 | } 120 | if len(line) == 0 || line[0] != '*' { 121 | err = protocolErr 122 | return 123 | } 124 | argc, err := btoi(line[1:]) 125 | if err != nil { 126 | return 127 | } 128 | if argc <= 0 || argc > 4 { 129 | err = protocolErr 130 | return 131 | } 132 | var argStarts [4]int 133 | var argEnds [4]int 134 | req.buf.Write(line) 135 | req.buf.Write(CRLF) 136 | cursor := len(line) + 2 137 | for i := 0; i < argc; i++ { 138 | line, err = readLine(r) 139 | if err != nil { 140 | return 141 | } 142 | if len(line) == 0 || line[0] != '$' { 143 | err = protocolErr 144 | return 145 | } 146 | var argLen int 147 | argLen, err = btoi(line[1:]) 148 | if err != nil { 149 | return 150 | } 151 | if argLen < 0 || argLen > 512*1024*1024 { 152 | err = protocolErr 153 | return 154 | } 155 | req.buf.Write(line) 156 | req.buf.Write(CRLF) 157 | cursor += len(line) + 2 158 | err = copyN(req.buf, r, int64(argLen)+2) 159 | if err != nil { 160 | return 161 | } 162 | argStarts[i] = cursor 163 | argEnds[i] = cursor + argLen 164 | cursor += argLen + 2 165 | } 166 | data := req.buf.Bytes() 167 | for i := 0; i < argc; i++ { 168 | req.args = append(req.args, data[argStarts[i]:argEnds[i]]) 169 | } 170 | lower(req.args[0]) 171 | return 172 | } 173 | 174 | func (down *Session) readLoop() { 175 | var req = new(Request) 176 | req.buf = new(bytes.Buffer) 177 | for { 178 | req.Reset() 179 | err := down.server.ReadClient(down.reader, req) 180 | if err != nil { 181 | close(down.replyChan) 182 | return 183 | } 184 | reply := new(bytes.Buffer) 185 | if len(req.args) == 4 && bytes.Equal(req.args[0], SETEX) { 186 | expire, err := btoi(req.args[2]) 187 | if err != nil { 188 | reply.Write(ERROR_UNSUPPORTED) 189 | } else { 190 | down.server.cache.Set(req.args[1], req.args[3], expire) 191 | reply.Write(OK) 192 | } 193 | } else if len(req.args) == 3 && bytes.Equal(req.args[0], SET) { 194 | down.server.cache.Set(req.args[1], req.args[2], 0) 195 | reply.Write(OK) 196 | } else if len(req.args) == 2 { 197 | if bytes.Equal(req.args[0], GET) { 198 | value, err := down.server.cache.Get(req.args[1]) 199 | if err != nil { 200 | reply.Write(NIL) 201 | } else { 202 | bukLen := strconv.Itoa(len(value)) 203 | reply.Write(BulkSign) 204 | reply.WriteString(bukLen) 205 | reply.Write(CRLF) 206 | reply.Write(value) 207 | reply.Write(CRLF) 208 | } 209 | } else if bytes.Equal(req.args[0], DEL) { 210 | if down.server.cache.Del(req.args[1]) { 211 | reply.Write(CONE) 212 | } else { 213 | reply.Write(CZERO) 214 | } 215 | } 216 | } else if len(req.args) == 1 { 217 | if bytes.Equal(req.args[0], PING) { 218 | reply.Write(PONG) 219 | } else if bytes.Equal(req.args[0], DBSIZE) { 220 | entryCount := down.server.cache.EntryCount() 221 | reply.WriteString(":") 222 | reply.WriteString(strconv.Itoa(int(entryCount))) 223 | reply.Write(CRLF) 224 | } else { 225 | reply.Write(ERROR_UNSUPPORTED) 226 | } 227 | } 228 | down.replyChan <- reply 229 | } 230 | } 231 | 232 | func (down *Session) writeLoop() { 233 | var buffer = bytes.NewBuffer(nil) 234 | var replies = make([]*bytes.Buffer, 1) 235 | for { 236 | buffer.Reset() 237 | select { 238 | case reply, ok := <-down.replyChan: 239 | if !ok { 240 | down.conn.Close() 241 | return 242 | } 243 | replies = replies[:1] 244 | replies[0] = reply 245 | queueLen := len(down.replyChan) 246 | for i := 0; i < queueLen; i++ { 247 | reply = <-down.replyChan 248 | replies = append(replies, reply) 249 | } 250 | for _, reply := range replies { 251 | if reply == nil { 252 | buffer.Write(NIL) 253 | continue 254 | } 255 | buffer.Write(reply.Bytes()) 256 | } 257 | _, err := down.conn.Write(buffer.Bytes()) 258 | if err != nil { 259 | down.conn.Close() 260 | return 261 | } 262 | } 263 | } 264 | } 265 | 266 | func readLine(r *bufio.Reader) ([]byte, error) { 267 | p, err := r.ReadSlice('\n') 268 | if err != nil { 269 | return nil, err 270 | } 271 | i := len(p) - 2 272 | if i < 0 || p[i] != '\r' { 273 | return nil, protocolErr 274 | } 275 | return p[:i], nil 276 | } 277 | 278 | func btoi(data []byte) (int, error) { 279 | if len(data) == 0 { 280 | return 0, nil 281 | } 282 | i := 0 283 | sign := 1 284 | if data[0] == '-' { 285 | i++ 286 | sign *= -1 287 | } 288 | if i >= len(data) { 289 | return 0, protocolErr 290 | } 291 | var l int 292 | for ; i < len(data); i++ { 293 | c := data[i] 294 | if c < '0' || c > '9' { 295 | return 0, protocolErr 296 | } 297 | l = l*10 + int(c-'0') 298 | } 299 | return sign * l, nil 300 | } 301 | 302 | func lower(data []byte) { 303 | for i := 0; i < len(data); i++ { 304 | if data[i] >= 'A' && data[i] <= 'Z' { 305 | data[i] += 'a' - 'A' 306 | } 307 | } 308 | } 309 | 310 | func main() { 311 | runtime.GOMAXPROCS(runtime.NumCPU() - 1) 312 | server := NewServer(256 * 1024 * 1024) 313 | debug.SetGCPercent(10) 314 | go func() { 315 | log.Println(http.ListenAndServe("localhost:6060", nil)) 316 | }() 317 | server.Start(":7788") 318 | } 319 | -------------------------------------------------------------------------------- /timer.go: -------------------------------------------------------------------------------- 1 | package freecache 2 | 3 | import ( 4 | "sync/atomic" 5 | "time" 6 | ) 7 | 8 | // Timer holds representation of current time. 9 | type Timer interface { 10 | // Give current time (in seconds) 11 | Now() uint32 12 | } 13 | 14 | // Timer that must be stopped. 15 | type StoppableTimer interface { 16 | Timer 17 | 18 | // Release resources of the timer, functionality may or may not be affected 19 | // It is not called automatically, so user must call it just once 20 | Stop() 21 | } 22 | 23 | // Helper function that returns Unix time in seconds 24 | func getUnixTime() uint32 { 25 | return uint32(time.Now().Unix()) 26 | } 27 | 28 | // Default timer reads Unix time always when requested 29 | type defaultTimer struct{} 30 | 31 | func (timer defaultTimer) Now() uint32 { 32 | return getUnixTime() 33 | } 34 | 35 | // Cached timer stores Unix time every second and returns the cached value 36 | type cachedTimer struct { 37 | now uint32 38 | ticker *time.Ticker 39 | done chan bool 40 | } 41 | 42 | // Create cached timer and start runtime timer that updates time every second 43 | func NewCachedTimer() StoppableTimer { 44 | timer := &cachedTimer{ 45 | now: getUnixTime(), 46 | ticker: time.NewTicker(time.Second), 47 | done: make(chan bool), 48 | } 49 | 50 | go timer.update() 51 | 52 | return timer 53 | } 54 | 55 | func (timer *cachedTimer) Now() uint32 { 56 | return atomic.LoadUint32(&timer.now) 57 | } 58 | 59 | // Stop runtime timer and finish routine that updates time 60 | func (timer *cachedTimer) Stop() { 61 | timer.ticker.Stop() 62 | timer.done <- true 63 | close(timer.done) 64 | 65 | timer.done = nil 66 | timer.ticker = nil 67 | } 68 | 69 | // Periodically check and update of time 70 | func (timer *cachedTimer) update() { 71 | for { 72 | select { 73 | case <-timer.done: 74 | return 75 | case <-timer.ticker.C: 76 | atomic.StoreUint32(&timer.now, getUnixTime()) 77 | } 78 | } 79 | } 80 | --------------------------------------------------------------------------------