├── .codecov.yml ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ └── issue_template.md ├── dependabot.yml ├── release-drafter.yml └── workflows │ ├── build.yml │ └── release-management.yml ├── .gitignore ├── .golangci.yaml ├── LICENSE ├── README.md ├── assert_test.go ├── bigcache.go ├── bigcache_bench_test.go ├── bigcache_test.go ├── bytes.go ├── bytes_appengine.go ├── clock.go ├── config.go ├── encoding.go ├── encoding_test.go ├── entry_not_found_error.go ├── examples_test.go ├── fnv.go ├── fnv_bench_test.go ├── fnv_test.go ├── go.mod ├── go.sum ├── hash.go ├── hash_test.go ├── iterator.go ├── iterator_test.go ├── logger.go ├── queue ├── bytes_queue.go └── bytes_queue_test.go ├── server ├── README.md ├── cache_handlers.go ├── middleware.go ├── middleware_test.go ├── server.go ├── server_test.go └── stats_handler.go ├── shard.go ├── stats.go └── utils.go /.codecov.yml: -------------------------------------------------------------------------------- 1 | --- 2 | codecov: 3 | require_ci_to_pass: true 4 | comment: 5 | behavior: default 6 | layout: reach, diff, flags, files, footer 7 | require_base: false 8 | require_changes: false 9 | require_head: true 10 | coverage: 11 | precision: 2 12 | range: 13 | - 70 14 | - 100 15 | round: down 16 | status: 17 | changes: false 18 | patch: true 19 | project: true 20 | parsers: 21 | gcov: 22 | branch_detection: 23 | conditional: true 24 | loop: true 25 | macro: false 26 | method: false 27 | javascript: 28 | enable_partials: false 29 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @janisz @cristaloleg 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/issue_template.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Report a bug about BigCache 4 | labels: bug 5 | --- 6 | 7 | **What is the issue you are having?** 8 | 9 | 10 | 11 | **What is BigCache doing that it shouldn't?** 12 | 13 | 14 | 15 | **Minimal, Complete, and Verifiable Example** 16 | 17 | When asking a question about a problem caused by your code, you will get much better answers if you provide code we can use to reproduce the problem. That code should be... 18 | 19 | * ...Minimal – Use as little code as possible that still produces the same problem 20 | * ...Complete – Provide all parts needed to reproduce the problem 21 | * ...Verifiable – Test the code you're about to provide to make sure it reproduces the problem 22 | 23 | For more information on how to provide an MCVE, please see the [Stack Overflow documentation](https://stackoverflow.com/help/mcve). 24 | 25 | **Environment:** 26 | - Version (git sha or release): 27 | - OS (e.g. from `/etc/os-release` or winver.exe): 28 | - go version: 29 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | updates: 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "weekly" 8 | labels: 9 | - "enhancement" 10 | - package-ecosystem: "gomod" 11 | directory: "/" 12 | schedule: 13 | interval: "weekly" 14 | labels: 15 | - "enhancement" 16 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: "v$NEXT_PATCH_VERSION 🌈" 2 | tag-template: "v$NEXT_PATCH_VERSION" 3 | categories: 4 | - title: "🚀 Features" 5 | labels: 6 | - "feature" 7 | - "enhancement" 8 | - title: "🐛 Bug Fixes" 9 | labels: 10 | - "fix" 11 | - "bugfix" 12 | - "bug" 13 | - title: "🧰 Maintenance" 14 | label: "chore" 15 | change-template: "- $TITLE @$AUTHOR (#$NUMBER)" 16 | template: | 17 | ## Changes 18 | 19 | $CHANGES 20 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | on: [push, pull_request] 3 | env: 4 | GO111MODULE: on 5 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 6 | jobs: 7 | build: 8 | name: Build 9 | runs-on: ubuntu-latest 10 | strategy: 11 | fail-fast: true 12 | max-parallel: 2 13 | matrix: 14 | go: ["stable", "oldstable"] 15 | 16 | steps: 17 | - name: Set up Go 18 | uses: actions/setup-go@v5 19 | with: 20 | go-version: ${{matrix.go}} 21 | id: go 22 | 23 | - name: Check out code into the Go module directory 24 | uses: actions/checkout@v4 25 | 26 | - name: Lint code 27 | uses: golangci/golangci-lint-action@v7 28 | with: 29 | only-new-issues: true 30 | 31 | - name: Test 32 | run: | 33 | go test -race -count=1 -coverprofile=queue.coverprofile ./queue 34 | go test -race -count=1 -coverprofile=server.coverprofile ./server 35 | go test -race -count=1 -coverprofile=main.coverprofile 36 | 37 | - name: Upload coverage to codecov 38 | run: | 39 | go install github.com/modocache/gover@v0.0.0-20171022184752-b58185e213c5 40 | echo "" > coverage.txt 41 | gover 42 | cat gover.coverprofile >> coverage.txt 43 | bash <(curl -s https://codecov.io/bash) 44 | 45 | - name: Build 46 | run: go build -v . 47 | -------------------------------------------------------------------------------- /.github/workflows/release-management.yml: -------------------------------------------------------------------------------- 1 | name: Release Management 2 | 3 | on: 4 | push: 5 | # branches to consider in the event; optional, defaults to all 6 | branches: 7 | - main 8 | 9 | jobs: 10 | update_draft_release: 11 | runs-on: ubuntu-latest 12 | steps: 13 | # Drafts your next Release notes as Pull Requests are merged into "main" 14 | - uses: toolmantim/release-drafter@v6.0.0 15 | env: 16 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .DS_Store 3 | /server/server.exe 4 | /server/server 5 | /server/server_dar* 6 | /server/server_fre* 7 | /server/server_win* 8 | /server/server_net* 9 | /server/server_ope* 10 | /server/server_lin* 11 | CHANGELOG.md 12 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | disable: 4 | - errcheck 5 | settings: 6 | staticcheck: 7 | checks: 8 | - -SA1019 9 | - all 10 | exclusions: 11 | generated: lax 12 | presets: 13 | - comments 14 | - common-false-positives 15 | - legacy 16 | - std-error-handling 17 | paths: 18 | - third_party$ 19 | - builtin$ 20 | - examples$ 21 | formatters: 22 | exclusions: 23 | generated: lax 24 | paths: 25 | - third_party$ 26 | - builtin$ 27 | - examples$ 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BigCache [![Build Status](https://github.com/allegro/bigcache/workflows/build/badge.svg)](https://github.com/allegro/bigcache/actions?query=workflow%3Abuild) [![Coverage Status](https://coveralls.io/repos/github/allegro/bigcache/badge.svg?branch=main)](https://coveralls.io/github/allegro/bigcache?branch=main) [![GoDoc](https://godoc.org/github.com/allegro/bigcache/v3?status.svg)](https://godoc.org/github.com/allegro/bigcache/v3) [![Go Report Card](https://goreportcard.com/badge/github.com/allegro/bigcache/v3)](https://goreportcard.com/report/github.com/allegro/bigcache/v3) 2 | 3 | Fast, concurrent, evicting in-memory cache written to keep big number of entries without impact on performance. 4 | BigCache keeps entries on heap but omits GC for them. To achieve that, operations on byte slices take place, 5 | therefore entries (de)serialization in front of the cache will be needed in most use cases. 6 | 7 | Requires Go 1.12 or newer. 8 | 9 | ## Usage 10 | 11 | ### Simple initialization 12 | 13 | ```go 14 | import ( 15 | "fmt" 16 | "context" 17 | "github.com/allegro/bigcache/v3" 18 | ) 19 | 20 | cache, _ := bigcache.New(context.Background(), bigcache.DefaultConfig(10 * time.Minute)) 21 | 22 | cache.Set("my-unique-key", []byte("value")) 23 | 24 | entry, _ := cache.Get("my-unique-key") 25 | fmt.Println(string(entry)) 26 | ``` 27 | 28 | ### Custom initialization 29 | 30 | When cache load can be predicted in advance then it is better to use custom initialization because additional memory 31 | allocation can be avoided in that way. 32 | 33 | ```go 34 | import ( 35 | "log" 36 | 37 | "github.com/allegro/bigcache/v3" 38 | ) 39 | 40 | config := bigcache.Config { 41 | // number of shards (must be a power of 2) 42 | Shards: 1024, 43 | 44 | // time after which entry can be evicted 45 | LifeWindow: 10 * time.Minute, 46 | 47 | // Interval between removing expired entries (clean up). 48 | // If set to <= 0 then no action is performed. 49 | // Setting to < 1 second is counterproductive — bigcache has a one second resolution. 50 | CleanWindow: 5 * time.Minute, 51 | 52 | // rps * lifeWindow, used only in initial memory allocation 53 | MaxEntriesInWindow: 1000 * 10 * 60, 54 | 55 | // max entry size in bytes, used only in initial memory allocation 56 | MaxEntrySize: 500, 57 | 58 | // prints information about additional memory allocation 59 | Verbose: true, 60 | 61 | // cache will not allocate more memory than this limit, value in MB 62 | // if value is reached then the oldest entries can be overridden for the new ones 63 | // 0 value means no size limit 64 | HardMaxCacheSize: 8192, 65 | 66 | // callback fired when the oldest entry is removed because of its expiration time or no space left 67 | // for the new entry, or because delete was called. A bitmask representing the reason will be returned. 68 | // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. 69 | OnRemove: nil, 70 | 71 | // OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left 72 | // for the new entry, or because delete was called. A constant representing the reason will be passed through. 73 | // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. 74 | // Ignored if OnRemove is specified. 75 | OnRemoveWithReason: nil, 76 | } 77 | 78 | cache, initErr := bigcache.New(context.Background(), config) 79 | if initErr != nil { 80 | log.Fatal(initErr) 81 | } 82 | 83 | cache.Set("my-unique-key", []byte("value")) 84 | 85 | if entry, err := cache.Get("my-unique-key"); err == nil { 86 | fmt.Println(string(entry)) 87 | } 88 | ``` 89 | 90 | ### `LifeWindow` & `CleanWindow` 91 | 92 | 1. `LifeWindow` is a time. After that time, an entry can be called dead but not deleted. 93 | 94 | 2. `CleanWindow` is a time. After that time, all the dead entries will be deleted, but not the entries that still have life. 95 | 96 | ## [Benchmarks](https://github.com/allegro/bigcache-bench) 97 | 98 | Three caches were compared: bigcache, [freecache](https://github.com/coocood/freecache) and map. 99 | Benchmark tests were made using an 100 | i7-6700K CPU @ 4.00GHz with 32GB of RAM on Ubuntu 18.04 LTS (5.2.12-050212-generic). 101 | 102 | Benchmarks source code can be found [here](https://github.com/allegro/bigcache-bench) 103 | 104 | ### Writes and reads 105 | 106 | ```bash 107 | go version 108 | go version go1.13 linux/amd64 109 | 110 | go test -bench=. -benchmem -benchtime=4s ./... -timeout 30m 111 | goos: linux 112 | goarch: amd64 113 | pkg: github.com/allegro/bigcache/v3/caches_bench 114 | BenchmarkMapSet-8 12999889 376 ns/op 199 B/op 3 allocs/op 115 | BenchmarkConcurrentMapSet-8 4355726 1275 ns/op 337 B/op 8 allocs/op 116 | BenchmarkFreeCacheSet-8 11068976 703 ns/op 328 B/op 2 allocs/op 117 | BenchmarkBigCacheSet-8 10183717 478 ns/op 304 B/op 2 allocs/op 118 | BenchmarkMapGet-8 16536015 324 ns/op 23 B/op 1 allocs/op 119 | BenchmarkConcurrentMapGet-8 13165708 401 ns/op 24 B/op 2 allocs/op 120 | BenchmarkFreeCacheGet-8 10137682 690 ns/op 136 B/op 2 allocs/op 121 | BenchmarkBigCacheGet-8 11423854 450 ns/op 152 B/op 4 allocs/op 122 | BenchmarkBigCacheSetParallel-8 34233472 148 ns/op 317 B/op 3 allocs/op 123 | BenchmarkFreeCacheSetParallel-8 34222654 268 ns/op 350 B/op 3 allocs/op 124 | BenchmarkConcurrentMapSetParallel-8 19635688 240 ns/op 200 B/op 6 allocs/op 125 | BenchmarkBigCacheGetParallel-8 60547064 86.1 ns/op 152 B/op 4 allocs/op 126 | BenchmarkFreeCacheGetParallel-8 50701280 147 ns/op 136 B/op 3 allocs/op 127 | BenchmarkConcurrentMapGetParallel-8 27353288 175 ns/op 24 B/op 2 allocs/op 128 | PASS 129 | ok github.com/allegro/bigcache/v3/caches_bench 256.257s 130 | ``` 131 | 132 | Writes and reads in bigcache are faster than in freecache. 133 | Writes to map are the slowest. 134 | 135 | ### GC pause time 136 | 137 | ```bash 138 | go version 139 | go version go1.13 linux/amd64 140 | 141 | go run caches_gc_overhead_comparison.go 142 | 143 | Number of entries: 20000000 144 | GC pause for bigcache: 1.506077ms 145 | GC pause for freecache: 5.594416ms 146 | GC pause for map: 9.347015ms 147 | ``` 148 | 149 | ``` 150 | go version 151 | go version go1.13 linux/arm64 152 | 153 | go run caches_gc_overhead_comparison.go 154 | Number of entries: 20000000 155 | GC pause for bigcache: 22.382827ms 156 | GC pause for freecache: 41.264651ms 157 | GC pause for map: 72.236853ms 158 | ``` 159 | 160 | Test shows how long are the GC pauses for caches filled with 20mln of entries. 161 | Bigcache and freecache have very similar GC pause time. 162 | 163 | ### Memory usage 164 | 165 | You may encounter system memory reporting what appears to be an exponential increase, however this is expected behaviour. Go runtime allocates memory in chunks or 'spans' and will inform the OS when they are no longer required by changing their state to 'idle'. The 'spans' will remain part of the process resource usage until the OS needs to repurpose the address. Further reading available [here](https://utcc.utoronto.ca/~cks/space/blog/programming/GoNoMemoryFreeing). 166 | 167 | ## How it works 168 | 169 | BigCache relies on optimization presented in 1.5 version of Go ([issue-9477](https://github.com/golang/go/issues/9477)). 170 | This optimization states that if map without pointers in keys and values is used then GC will omit its content. 171 | Therefore BigCache uses `map[uint64]uint32` where keys are hashed and values are offsets of entries. 172 | 173 | Entries are kept in byte slices, to omit GC again. 174 | Byte slices size can grow to gigabytes without impact on performance 175 | because GC will only see single pointer to it. 176 | 177 | ### Collisions 178 | 179 | BigCache does not handle collisions. When new item is inserted and it's hash collides with previously stored item, new item overwrites previously stored value. 180 | 181 | ## Bigcache vs Freecache 182 | 183 | Both caches provide the same core features but they reduce GC overhead in different ways. 184 | Bigcache relies on `map[uint64]uint32`, freecache implements its own mapping built on 185 | slices to reduce number of pointers. 186 | 187 | Results from benchmark tests are presented above. 188 | One of the advantage of bigcache over freecache is that you don’t need to know 189 | the size of the cache in advance, because when bigcache is full, 190 | it can allocate additional memory for new entries instead of 191 | overwriting existing ones as freecache does currently. 192 | However hard max size in bigcache also can be set, check [HardMaxCacheSize](https://godoc.org/github.com/allegro/bigcache#Config). 193 | 194 | ## HTTP Server 195 | 196 | This package also includes an easily deployable HTTP implementation of BigCache, which can be found in the [server](/server) package. 197 | 198 | ## More 199 | 200 | Bigcache genesis is described in allegro.tech blog post: [writing a very fast cache service in Go](http://allegro.tech/2016/03/writing-fast-cache-service-in-go.html) 201 | 202 | ## License 203 | 204 | BigCache is released under the Apache 2.0 license (see [LICENSE](LICENSE)) 205 | -------------------------------------------------------------------------------- /assert_test.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "path" 7 | "reflect" 8 | "runtime" 9 | "testing" 10 | ) 11 | 12 | func assertEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) { 13 | if !objectsAreEqual(expected, actual) { 14 | _, file, line, _ := runtime.Caller(1) 15 | file = path.Base(file) 16 | t.Errorf(fmt.Sprintf("\n%s:%d: Not equal: \n"+ 17 | "expected: %T(%#v)\n"+ 18 | "actual : %T(%#v)\n", 19 | file, line, expected, expected, actual, actual), msgAndArgs...) 20 | } 21 | } 22 | 23 | func noError(t *testing.T, e error) { 24 | if e != nil { 25 | _, file, line, _ := runtime.Caller(1) 26 | file = path.Base(file) 27 | t.Errorf(fmt.Sprintf("\n%s:%d: Error is not nil: \n"+ 28 | "actual : %T(%#v)\n", file, line, e, e)) 29 | } 30 | } 31 | 32 | func objectsAreEqual(expected, actual interface{}) bool { 33 | if expected == nil || actual == nil { 34 | return expected == actual 35 | } 36 | 37 | exp, ok := expected.([]byte) 38 | if !ok { 39 | return reflect.DeepEqual(expected, actual) 40 | } 41 | 42 | act, ok := actual.([]byte) 43 | if !ok { 44 | return false 45 | } 46 | if exp == nil || act == nil { 47 | return exp == nil && act == nil 48 | } 49 | return bytes.Equal(exp, act) 50 | } 51 | -------------------------------------------------------------------------------- /bigcache.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "time" 7 | ) 8 | 9 | const ( 10 | minimumEntriesInShard = 10 // Minimum number of entries in single shard 11 | ) 12 | 13 | // BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance. 14 | // It keeps entries on heap but omits GC for them. To achieve that, operations take place on byte arrays, 15 | // therefore entries (de)serialization in front of the cache will be needed in most use cases. 16 | type BigCache struct { 17 | shards []*cacheShard 18 | lifeWindow uint64 19 | clock clock 20 | hash Hasher 21 | config Config 22 | shardMask uint64 23 | close chan struct{} 24 | } 25 | 26 | // Response will contain metadata about the entry for which GetWithInfo(key) was called 27 | type Response struct { 28 | EntryStatus RemoveReason 29 | } 30 | 31 | // RemoveReason is a value used to signal to the user why a particular key was removed in the OnRemove callback. 32 | type RemoveReason uint32 33 | 34 | const ( 35 | // Expired means the key is past its LifeWindow. 36 | Expired = RemoveReason(1) 37 | // NoSpace means the key is the oldest and the cache size was at its maximum when Set was called, or the 38 | // entry exceeded the maximum shard size. 39 | NoSpace = RemoveReason(2) 40 | // Deleted means Delete was called and this key was removed as a result. 41 | Deleted = RemoveReason(3) 42 | ) 43 | 44 | // New initialize new instance of BigCache 45 | func New(ctx context.Context, config Config) (*BigCache, error) { 46 | return newBigCache(ctx, config, &systemClock{}) 47 | } 48 | 49 | // NewBigCache initialize new instance of BigCache 50 | // 51 | // Deprecated: NewBigCache is deprecated, please use New(ctx, config) instead, 52 | // New takes in context and can gracefully 53 | // shutdown with context cancellations 54 | func NewBigCache(config Config) (*BigCache, error) { 55 | return newBigCache(context.Background(), config, &systemClock{}) 56 | } 57 | 58 | func newBigCache(ctx context.Context, config Config, clock clock) (*BigCache, error) { 59 | if !isPowerOfTwo(config.Shards) { 60 | return nil, errors.New("Shards number must be power of two") 61 | } 62 | if config.MaxEntrySize < 0 { 63 | return nil, errors.New("MaxEntrySize must be >= 0") 64 | } 65 | if config.MaxEntriesInWindow < 0 { 66 | return nil, errors.New("MaxEntriesInWindow must be >= 0") 67 | } 68 | if config.HardMaxCacheSize < 0 { 69 | return nil, errors.New("HardMaxCacheSize must be >= 0") 70 | } 71 | 72 | lifeWindowSeconds := uint64(config.LifeWindow.Seconds()) 73 | if config.CleanWindow > 0 && lifeWindowSeconds == 0 { 74 | return nil, errors.New("LifeWindow must be >= 1s when CleanWindow is set") 75 | } 76 | 77 | if config.Hasher == nil { 78 | config.Hasher = newDefaultHasher() 79 | } 80 | 81 | cache := &BigCache{ 82 | shards: make([]*cacheShard, config.Shards), 83 | lifeWindow: lifeWindowSeconds, 84 | clock: clock, 85 | hash: config.Hasher, 86 | config: config, 87 | shardMask: uint64(config.Shards - 1), 88 | close: make(chan struct{}), 89 | } 90 | 91 | var onRemove func(wrappedEntry []byte, reason RemoveReason) 92 | if config.OnRemoveWithMetadata != nil { 93 | onRemove = cache.providedOnRemoveWithMetadata 94 | } else if config.OnRemove != nil { 95 | onRemove = cache.providedOnRemove 96 | } else if config.OnRemoveWithReason != nil { 97 | onRemove = cache.providedOnRemoveWithReason 98 | } else { 99 | onRemove = cache.notProvidedOnRemove 100 | } 101 | 102 | for i := 0; i < config.Shards; i++ { 103 | cache.shards[i] = initNewShard(config, onRemove, clock) 104 | } 105 | 106 | if config.CleanWindow > 0 { 107 | go func() { 108 | ticker := time.NewTicker(config.CleanWindow) 109 | defer ticker.Stop() 110 | for { 111 | select { 112 | case <-ctx.Done(): 113 | return 114 | case t := <-ticker.C: 115 | cache.cleanUp(uint64(t.Unix())) 116 | case <-cache.close: 117 | return 118 | } 119 | } 120 | }() 121 | } 122 | 123 | return cache, nil 124 | } 125 | 126 | // Close is used to signal a shutdown of the cache when you are done with it. 127 | // This allows the cleaning goroutines to exit and ensures references are not 128 | // kept to the cache preventing GC of the entire cache. 129 | func (c *BigCache) Close() error { 130 | close(c.close) 131 | return nil 132 | } 133 | 134 | // Get reads entry for the key. 135 | // It returns an ErrEntryNotFound when 136 | // no entry exists for the given key. 137 | func (c *BigCache) Get(key string) ([]byte, error) { 138 | hashedKey := c.hash.Sum64(key) 139 | shard := c.getShard(hashedKey) 140 | return shard.get(key, hashedKey) 141 | } 142 | 143 | // GetWithInfo reads entry for the key with Response info. 144 | // It returns an ErrEntryNotFound when 145 | // no entry exists for the given key. 146 | func (c *BigCache) GetWithInfo(key string) ([]byte, Response, error) { 147 | hashedKey := c.hash.Sum64(key) 148 | shard := c.getShard(hashedKey) 149 | return shard.getWithInfo(key, hashedKey) 150 | } 151 | 152 | // Set saves entry under the key 153 | func (c *BigCache) Set(key string, entry []byte) error { 154 | hashedKey := c.hash.Sum64(key) 155 | shard := c.getShard(hashedKey) 156 | return shard.set(key, hashedKey, entry) 157 | } 158 | 159 | // Append appends entry under the key if key exists, otherwise 160 | // it will set the key (same behaviour as Set()). With Append() you can 161 | // concatenate multiple entries under the same key in a lock-optimized way. 162 | func (c *BigCache) Append(key string, entry []byte) error { 163 | hashedKey := c.hash.Sum64(key) 164 | shard := c.getShard(hashedKey) 165 | return shard.append(key, hashedKey, entry) 166 | } 167 | 168 | // Delete removes the key 169 | func (c *BigCache) Delete(key string) error { 170 | hashedKey := c.hash.Sum64(key) 171 | shard := c.getShard(hashedKey) 172 | return shard.del(hashedKey) 173 | } 174 | 175 | // Reset empties all cache shards 176 | func (c *BigCache) Reset() error { 177 | for _, shard := range c.shards { 178 | shard.reset(c.config) 179 | } 180 | return nil 181 | } 182 | 183 | // ResetStats resets cache stats 184 | func (c *BigCache) ResetStats() error { 185 | for _, shard := range c.shards { 186 | shard.resetStats() 187 | } 188 | return nil 189 | } 190 | 191 | // Len computes the number of entries in the cache. 192 | func (c *BigCache) Len() int { 193 | var len int 194 | for _, shard := range c.shards { 195 | len += shard.len() 196 | } 197 | return len 198 | } 199 | 200 | // Capacity returns the amount of bytes stored in the cache. 201 | func (c *BigCache) Capacity() int { 202 | var len int 203 | for _, shard := range c.shards { 204 | len += shard.capacity() 205 | } 206 | return len 207 | } 208 | 209 | // Stats returns cache's statistics 210 | func (c *BigCache) Stats() Stats { 211 | var s Stats 212 | for _, shard := range c.shards { 213 | tmp := shard.getStats() 214 | s.Hits += tmp.Hits 215 | s.Misses += tmp.Misses 216 | s.DelHits += tmp.DelHits 217 | s.DelMisses += tmp.DelMisses 218 | s.Collisions += tmp.Collisions 219 | } 220 | return s 221 | } 222 | 223 | // KeyMetadata returns number of times a cached resource was requested. 224 | func (c *BigCache) KeyMetadata(key string) Metadata { 225 | hashedKey := c.hash.Sum64(key) 226 | shard := c.getShard(hashedKey) 227 | return shard.getKeyMetadataWithLock(hashedKey) 228 | } 229 | 230 | // Iterator returns iterator function to iterate over EntryInfo's from whole cache. 231 | func (c *BigCache) Iterator() *EntryInfoIterator { 232 | return newIterator(c) 233 | } 234 | 235 | func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool { 236 | oldestTimestamp := readTimestampFromEntry(oldestEntry) 237 | if currentTimestamp < oldestTimestamp { 238 | return false 239 | } 240 | if currentTimestamp-oldestTimestamp > c.lifeWindow { 241 | evict(Expired) 242 | return true 243 | } 244 | return false 245 | } 246 | 247 | func (c *BigCache) cleanUp(currentTimestamp uint64) { 248 | for _, shard := range c.shards { 249 | shard.cleanUp(currentTimestamp) 250 | } 251 | } 252 | 253 | func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) { 254 | return c.shards[hashedKey&c.shardMask] 255 | } 256 | 257 | func (c *BigCache) providedOnRemove(wrappedEntry []byte, reason RemoveReason) { 258 | c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry)) 259 | } 260 | 261 | func (c *BigCache) providedOnRemoveWithReason(wrappedEntry []byte, reason RemoveReason) { 262 | if c.config.onRemoveFilter == 0 || (1< 0 { 263 | c.config.OnRemoveWithReason(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry), reason) 264 | } 265 | } 266 | 267 | func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte, reason RemoveReason) { 268 | } 269 | 270 | func (c *BigCache) providedOnRemoveWithMetadata(wrappedEntry []byte, reason RemoveReason) { 271 | key := readKeyFromEntry(wrappedEntry) 272 | 273 | hashedKey := c.hash.Sum64(key) 274 | shard := c.getShard(hashedKey) 275 | c.config.OnRemoveWithMetadata(key, readEntry(wrappedEntry), shard.getKeyMetadata(hashedKey)) 276 | } 277 | -------------------------------------------------------------------------------- /bigcache_bench_test.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math/rand" 7 | "strconv" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | var message = blob('a', 256) 13 | 14 | func BenchmarkWriteToCacheWith1Shard(b *testing.B) { 15 | writeToCache(b, 1, 100*time.Second, b.N) 16 | } 17 | 18 | func BenchmarkWriteToLimitedCacheWithSmallInitSizeAnd1Shard(b *testing.B) { 19 | m := blob('a', 1024) 20 | cache, _ := New(context.Background(), Config{ 21 | Shards: 1, 22 | LifeWindow: 100 * time.Second, 23 | MaxEntriesInWindow: 100, 24 | MaxEntrySize: 256, 25 | HardMaxCacheSize: 1, 26 | }) 27 | 28 | b.ReportAllocs() 29 | for i := 0; i < b.N; i++ { 30 | cache.Set(fmt.Sprintf("key-%d", i), m) 31 | } 32 | } 33 | 34 | func BenchmarkWriteToUnlimitedCacheWithSmallInitSizeAnd1Shard(b *testing.B) { 35 | m := blob('a', 1024) 36 | cache, _ := New(context.Background(), Config{ 37 | Shards: 1, 38 | LifeWindow: 100 * time.Second, 39 | MaxEntriesInWindow: 100, 40 | MaxEntrySize: 256, 41 | }) 42 | 43 | b.ReportAllocs() 44 | for i := 0; i < b.N; i++ { 45 | cache.Set(fmt.Sprintf("key-%d", i), m) 46 | } 47 | } 48 | 49 | func BenchmarkWriteToCache(b *testing.B) { 50 | for _, shards := range []int{1, 512, 1024, 8192} { 51 | b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) { 52 | writeToCache(b, shards, 100*time.Second, b.N) 53 | }) 54 | } 55 | } 56 | func BenchmarkAppendToCache(b *testing.B) { 57 | for _, shards := range []int{1, 512, 1024, 8192} { 58 | b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) { 59 | appendToCache(b, shards, 100*time.Second, b.N) 60 | }) 61 | } 62 | } 63 | 64 | func BenchmarkReadFromCache(b *testing.B) { 65 | for _, shards := range []int{1, 512, 1024, 8192} { 66 | b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) { 67 | readFromCache(b, shards, false) 68 | }) 69 | } 70 | } 71 | 72 | func BenchmarkReadFromCacheWithInfo(b *testing.B) { 73 | for _, shards := range []int{1, 512, 1024, 8192} { 74 | b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) { 75 | readFromCache(b, shards, true) 76 | }) 77 | } 78 | } 79 | func BenchmarkIterateOverCache(b *testing.B) { 80 | 81 | m := blob('a', 1) 82 | 83 | for _, shards := range []int{512, 1024, 8192} { 84 | b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) { 85 | cache, _ := New(context.Background(), Config{ 86 | Shards: shards, 87 | LifeWindow: 1000 * time.Second, 88 | MaxEntriesInWindow: max(b.N, 100), 89 | MaxEntrySize: 500, 90 | }) 91 | 92 | for i := 0; i < b.N; i++ { 93 | cache.Set(fmt.Sprintf("key-%d", i), m) 94 | } 95 | 96 | b.ResetTimer() 97 | it := cache.Iterator() 98 | 99 | b.RunParallel(func(pb *testing.PB) { 100 | b.ReportAllocs() 101 | 102 | for pb.Next() { 103 | if it.SetNext() { 104 | it.Value() 105 | } 106 | } 107 | }) 108 | }) 109 | } 110 | } 111 | 112 | func BenchmarkWriteToCacheWith1024ShardsAndSmallShardInitSize(b *testing.B) { 113 | writeToCache(b, 1024, 100*time.Second, 100) 114 | } 115 | 116 | func BenchmarkReadFromCacheNonExistentKeys(b *testing.B) { 117 | for _, shards := range []int{1, 512, 1024, 8192} { 118 | b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) { 119 | readFromCacheNonExistentKeys(b, 1024) 120 | }) 121 | } 122 | } 123 | 124 | func writeToCache(b *testing.B, shards int, lifeWindow time.Duration, requestsInLifeWindow int) { 125 | cache, _ := New(context.Background(), Config{ 126 | Shards: shards, 127 | LifeWindow: lifeWindow, 128 | MaxEntriesInWindow: max(requestsInLifeWindow, 100), 129 | MaxEntrySize: 500, 130 | }) 131 | rand.Seed(time.Now().Unix()) 132 | 133 | b.RunParallel(func(pb *testing.PB) { 134 | id := rand.Int() 135 | counter := 0 136 | 137 | b.ReportAllocs() 138 | for pb.Next() { 139 | cache.Set(fmt.Sprintf("key-%d-%d", id, counter), message) 140 | counter = counter + 1 141 | } 142 | }) 143 | } 144 | 145 | func appendToCache(b *testing.B, shards int, lifeWindow time.Duration, requestsInLifeWindow int) { 146 | cache, _ := New(context.Background(), Config{ 147 | Shards: shards, 148 | LifeWindow: lifeWindow, 149 | MaxEntriesInWindow: max(requestsInLifeWindow, 100), 150 | MaxEntrySize: 2000, 151 | }) 152 | rand.Seed(time.Now().Unix()) 153 | 154 | b.RunParallel(func(pb *testing.PB) { 155 | id := rand.Int() 156 | counter := 0 157 | 158 | b.ReportAllocs() 159 | for pb.Next() { 160 | key := fmt.Sprintf("key-%d-%d", id, counter) 161 | for j := 0; j < 7; j++ { 162 | cache.Append(key, message) 163 | } 164 | counter = counter + 1 165 | } 166 | }) 167 | } 168 | 169 | func readFromCache(b *testing.B, shards int, info bool) { 170 | cache, _ := New(context.Background(), Config{ 171 | Shards: shards, 172 | LifeWindow: 1000 * time.Second, 173 | MaxEntriesInWindow: max(b.N, 100), 174 | MaxEntrySize: 500, 175 | }) 176 | for i := 0; i < b.N; i++ { 177 | cache.Set(strconv.Itoa(i), message) 178 | } 179 | b.ResetTimer() 180 | 181 | b.RunParallel(func(pb *testing.PB) { 182 | b.ReportAllocs() 183 | 184 | for pb.Next() { 185 | if info { 186 | cache.GetWithInfo(strconv.Itoa(rand.Intn(b.N))) 187 | } else { 188 | cache.Get(strconv.Itoa(rand.Intn(b.N))) 189 | } 190 | } 191 | }) 192 | } 193 | 194 | func readFromCacheNonExistentKeys(b *testing.B, shards int) { 195 | cache, _ := New(context.Background(), Config{ 196 | Shards: shards, 197 | LifeWindow: 1000 * time.Second, 198 | MaxEntriesInWindow: max(b.N, 100), 199 | MaxEntrySize: 500, 200 | }) 201 | b.ResetTimer() 202 | 203 | b.RunParallel(func(pb *testing.PB) { 204 | b.ReportAllocs() 205 | 206 | for pb.Next() { 207 | cache.Get(strconv.Itoa(rand.Intn(b.N))) 208 | } 209 | }) 210 | } 211 | -------------------------------------------------------------------------------- /bigcache_test.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "math" 8 | "math/rand" 9 | "runtime" 10 | "strings" 11 | "sync" 12 | "testing" 13 | "time" 14 | ) 15 | 16 | func TestWriteAndGetOnCache(t *testing.T) { 17 | t.Parallel() 18 | 19 | // given 20 | cache, _ := New(context.Background(), DefaultConfig(5*time.Second)) 21 | value := []byte("value") 22 | 23 | // when 24 | cache.Set("key", value) 25 | cachedValue, err := cache.Get("key") 26 | 27 | // then 28 | noError(t, err) 29 | assertEqual(t, value, cachedValue) 30 | } 31 | 32 | func TestAppendAndGetOnCache(t *testing.T) { 33 | t.Parallel() 34 | 35 | // given 36 | cache, _ := New(context.Background(), DefaultConfig(5*time.Second)) 37 | key := "key" 38 | value1 := make([]byte, 50) 39 | rand.Read(value1) 40 | value2 := make([]byte, 50) 41 | rand.Read(value2) 42 | value3 := make([]byte, 50) 43 | rand.Read(value3) 44 | 45 | // when 46 | _, err := cache.Get(key) 47 | 48 | // then 49 | assertEqual(t, ErrEntryNotFound, err) 50 | 51 | // when 52 | cache.Append(key, value1) 53 | cachedValue, err := cache.Get(key) 54 | 55 | // then 56 | noError(t, err) 57 | assertEqual(t, value1, cachedValue) 58 | 59 | // when 60 | cache.Append(key, value2) 61 | cachedValue, err = cache.Get(key) 62 | 63 | // then 64 | noError(t, err) 65 | expectedValue := value1 66 | expectedValue = append(expectedValue, value2...) 67 | assertEqual(t, expectedValue, cachedValue) 68 | 69 | // when 70 | cache.Append(key, value3) 71 | cachedValue, err = cache.Get(key) 72 | 73 | // then 74 | noError(t, err) 75 | expectedValue = value1 76 | expectedValue = append(expectedValue, value2...) 77 | expectedValue = append(expectedValue, value3...) 78 | assertEqual(t, expectedValue, cachedValue) 79 | } 80 | 81 | // TestAppendRandomly does simultaneous appends to check for corruption errors. 82 | func TestAppendRandomly(t *testing.T) { 83 | t.Parallel() 84 | 85 | c := Config{ 86 | Shards: 1, 87 | LifeWindow: 5 * time.Second, 88 | CleanWindow: 1 * time.Second, 89 | MaxEntriesInWindow: 1000 * 10 * 60, 90 | MaxEntrySize: 500, 91 | StatsEnabled: true, 92 | Verbose: true, 93 | Hasher: newDefaultHasher(), 94 | HardMaxCacheSize: 1, 95 | Logger: DefaultLogger(), 96 | } 97 | cache, err := New(context.Background(), c) 98 | noError(t, err) 99 | 100 | nKeys := 5 101 | nAppendsPerKey := 2000 102 | nWorker := 10 103 | var keys []string 104 | for i := 0; i < nKeys; i++ { 105 | for j := 0; j < nAppendsPerKey; j++ { 106 | keys = append(keys, fmt.Sprintf("key%d", i)) 107 | } 108 | } 109 | rand.Shuffle(len(keys), func(i, j int) { 110 | keys[i], keys[j] = keys[j], keys[i] 111 | }) 112 | 113 | jobs := make(chan string, len(keys)) 114 | for _, key := range keys { 115 | jobs <- key 116 | } 117 | close(jobs) 118 | 119 | var wg sync.WaitGroup 120 | for i := 0; i < nWorker; i++ { 121 | wg.Add(1) 122 | go func() { 123 | for { 124 | key, ok := <-jobs 125 | if !ok { 126 | break 127 | } 128 | cache.Append(key, []byte(key)) 129 | } 130 | wg.Done() 131 | }() 132 | } 133 | wg.Wait() 134 | 135 | assertEqual(t, nKeys, cache.Len()) 136 | for i := 0; i < nKeys; i++ { 137 | key := fmt.Sprintf("key%d", i) 138 | expectedValue := []byte(strings.Repeat(key, nAppendsPerKey)) 139 | cachedValue, err := cache.Get(key) 140 | noError(t, err) 141 | assertEqual(t, expectedValue, cachedValue) 142 | } 143 | } 144 | 145 | func TestAppendCollision(t *testing.T) { 146 | t.Parallel() 147 | 148 | // given 149 | cache, _ := New(context.Background(), Config{ 150 | Shards: 1, 151 | LifeWindow: 5 * time.Second, 152 | MaxEntriesInWindow: 10, 153 | MaxEntrySize: 256, 154 | Verbose: true, 155 | Hasher: hashStub(5), 156 | }) 157 | 158 | //when 159 | cache.Append("a", []byte("1")) 160 | cachedValue, err := cache.Get("a") 161 | 162 | //then 163 | noError(t, err) 164 | assertEqual(t, []byte("1"), cachedValue) 165 | 166 | // when 167 | err = cache.Append("b", []byte("2")) 168 | 169 | // then 170 | noError(t, err) 171 | assertEqual(t, cache.Stats().Collisions, int64(1)) 172 | cachedValue, err = cache.Get("b") 173 | noError(t, err) 174 | assertEqual(t, []byte("2"), cachedValue) 175 | 176 | } 177 | 178 | func TestConstructCacheWithDefaultHasher(t *testing.T) { 179 | t.Parallel() 180 | 181 | // given 182 | cache, _ := New(context.Background(), Config{ 183 | Shards: 16, 184 | LifeWindow: 5 * time.Second, 185 | MaxEntriesInWindow: 10, 186 | MaxEntrySize: 256, 187 | }) 188 | 189 | _, ok := cache.hash.(fnv64a) 190 | assertEqual(t, true, ok) 191 | } 192 | 193 | func TestNewBigcacheValidation(t *testing.T) { 194 | t.Parallel() 195 | 196 | for _, tc := range []struct { 197 | cfg Config 198 | want string 199 | }{ 200 | { 201 | cfg: Config{Shards: 18}, 202 | want: "Shards number must be power of two", 203 | }, 204 | { 205 | cfg: Config{Shards: 16, MaxEntriesInWindow: -1}, 206 | want: "MaxEntriesInWindow must be >= 0", 207 | }, 208 | { 209 | cfg: Config{Shards: 16, MaxEntrySize: -1}, 210 | want: "MaxEntrySize must be >= 0", 211 | }, 212 | { 213 | cfg: Config{Shards: 16, HardMaxCacheSize: -1}, 214 | want: "HardMaxCacheSize must be >= 0", 215 | }, 216 | } { 217 | t.Run(tc.want, func(t *testing.T) { 218 | cache, error := New(context.Background(), tc.cfg) 219 | 220 | assertEqual(t, (*BigCache)(nil), cache) 221 | assertEqual(t, tc.want, error.Error()) 222 | }) 223 | } 224 | } 225 | 226 | func TestEntryNotFound(t *testing.T) { 227 | t.Parallel() 228 | 229 | // given 230 | cache, _ := New(context.Background(), Config{ 231 | Shards: 16, 232 | LifeWindow: 5 * time.Second, 233 | MaxEntriesInWindow: 10, 234 | MaxEntrySize: 256, 235 | }) 236 | 237 | // when 238 | _, err := cache.Get("nonExistingKey") 239 | 240 | // then 241 | assertEqual(t, ErrEntryNotFound, err) 242 | } 243 | 244 | func TestTimingEviction(t *testing.T) { 245 | t.Parallel() 246 | 247 | // given 248 | clock := mockedClock{value: 0} 249 | cache, _ := newBigCache(context.Background(), Config{ 250 | Shards: 1, 251 | LifeWindow: time.Second, 252 | MaxEntriesInWindow: 1, 253 | MaxEntrySize: 256, 254 | }, &clock) 255 | 256 | cache.Set("key", []byte("value")) 257 | 258 | // when 259 | clock.set(1) 260 | cache.Set("key2", []byte("value2")) 261 | _, err := cache.Get("key") 262 | 263 | // then 264 | noError(t, err) 265 | 266 | // when 267 | clock.set(5) 268 | cache.Set("key2", []byte("value2")) 269 | _, err = cache.Get("key") 270 | 271 | // then 272 | assertEqual(t, ErrEntryNotFound, err) 273 | } 274 | 275 | func TestTimingEvictionShouldEvictOnlyFromUpdatedShard(t *testing.T) { 276 | t.Parallel() 277 | 278 | // given 279 | clock := mockedClock{value: 0} 280 | cache, _ := newBigCache(context.Background(), Config{ 281 | Shards: 4, 282 | LifeWindow: time.Second, 283 | MaxEntriesInWindow: 1, 284 | MaxEntrySize: 256, 285 | }, &clock) 286 | 287 | // when 288 | cache.Set("key", []byte("value")) 289 | clock.set(5) 290 | cache.Set("key2", []byte("value 2")) 291 | value, err := cache.Get("key") 292 | 293 | // then 294 | noError(t, err) 295 | assertEqual(t, []byte("value"), value) 296 | } 297 | 298 | func TestCleanShouldEvictAll(t *testing.T) { 299 | t.Parallel() 300 | 301 | // given 302 | cache, _ := New(context.Background(), Config{ 303 | Shards: 4, 304 | LifeWindow: time.Second, 305 | CleanWindow: time.Second, 306 | MaxEntriesInWindow: 1, 307 | MaxEntrySize: 256, 308 | }) 309 | 310 | // when 311 | cache.Set("key", []byte("value")) 312 | <-time.After(3 * time.Second) 313 | value, err := cache.Get("key") 314 | 315 | // then 316 | assertEqual(t, ErrEntryNotFound, err) 317 | assertEqual(t, value, []byte(nil)) 318 | } 319 | 320 | func TestOnRemoveCallback(t *testing.T) { 321 | t.Parallel() 322 | 323 | // given 324 | clock := mockedClock{value: 0} 325 | onRemoveInvoked := false 326 | onRemoveExtInvoked := false 327 | onRemove := func(key string, entry []byte) { 328 | onRemoveInvoked = true 329 | assertEqual(t, "key", key) 330 | assertEqual(t, []byte("value"), entry) 331 | } 332 | onRemoveExt := func(key string, entry []byte, reason RemoveReason) { 333 | onRemoveExtInvoked = true 334 | } 335 | cache, _ := newBigCache(context.Background(), Config{ 336 | Shards: 1, 337 | LifeWindow: time.Second, 338 | MaxEntriesInWindow: 1, 339 | MaxEntrySize: 256, 340 | OnRemove: onRemove, 341 | OnRemoveWithReason: onRemoveExt, 342 | }, &clock) 343 | 344 | // when 345 | cache.Set("key", []byte("value")) 346 | clock.set(5) 347 | cache.Set("key2", []byte("value2")) 348 | 349 | // then 350 | assertEqual(t, true, onRemoveInvoked) 351 | assertEqual(t, false, onRemoveExtInvoked) 352 | } 353 | 354 | func TestOnRemoveWithReasonCallback(t *testing.T) { 355 | t.Parallel() 356 | 357 | // given 358 | clock := mockedClock{value: 0} 359 | onRemoveInvoked := false 360 | onRemove := func(key string, entry []byte, reason RemoveReason) { 361 | onRemoveInvoked = true 362 | assertEqual(t, "key", key) 363 | assertEqual(t, []byte("value"), entry) 364 | assertEqual(t, reason, RemoveReason(Expired)) 365 | } 366 | cache, _ := newBigCache(context.Background(), Config{ 367 | Shards: 1, 368 | LifeWindow: time.Second, 369 | MaxEntriesInWindow: 1, 370 | MaxEntrySize: 256, 371 | OnRemoveWithReason: onRemove, 372 | }, &clock) 373 | 374 | // when 375 | cache.Set("key", []byte("value")) 376 | clock.set(5) 377 | cache.Set("key2", []byte("value2")) 378 | 379 | // then 380 | assertEqual(t, true, onRemoveInvoked) 381 | } 382 | 383 | func TestOnRemoveFilter(t *testing.T) { 384 | t.Parallel() 385 | 386 | // given 387 | clock := mockedClock{value: 0} 388 | onRemoveInvoked := false 389 | onRemove := func(key string, entry []byte, reason RemoveReason) { 390 | onRemoveInvoked = true 391 | } 392 | c := Config{ 393 | Shards: 1, 394 | LifeWindow: time.Second, 395 | MaxEntriesInWindow: 1, 396 | MaxEntrySize: 256, 397 | OnRemoveWithReason: onRemove, 398 | }.OnRemoveFilterSet(Deleted, NoSpace) 399 | 400 | cache, _ := newBigCache(context.Background(), c, &clock) 401 | 402 | // when 403 | cache.Set("key", []byte("value")) 404 | clock.set(5) 405 | cache.Set("key2", []byte("value2")) 406 | 407 | // then 408 | assertEqual(t, false, onRemoveInvoked) 409 | 410 | // and when 411 | cache.Delete("key2") 412 | 413 | // then 414 | assertEqual(t, true, onRemoveInvoked) 415 | } 416 | 417 | func TestOnRemoveFilterExpired(t *testing.T) { 418 | // t.Parallel() 419 | 420 | // given 421 | clock := mockedClock{value: 0} 422 | onRemoveDeleted, onRemoveExpired := false, false 423 | var err error 424 | onRemove := func(key string, entry []byte, reason RemoveReason) { 425 | switch reason { 426 | 427 | case Deleted: 428 | onRemoveDeleted = true 429 | case Expired: 430 | onRemoveExpired = true 431 | 432 | } 433 | } 434 | c := Config{ 435 | Shards: 1, 436 | LifeWindow: 3 * time.Second, 437 | CleanWindow: 0, 438 | MaxEntriesInWindow: 10, 439 | MaxEntrySize: 256, 440 | OnRemoveWithReason: onRemove, 441 | } 442 | 443 | cache, err := newBigCache(context.Background(), c, &clock) 444 | assertEqual(t, err, nil) 445 | 446 | // case 1: key is deleted AFTER expire 447 | // when 448 | onRemoveDeleted, onRemoveExpired = false, false 449 | clock.set(0) 450 | 451 | cache.Set("key", []byte("value")) 452 | clock.set(5) 453 | cache.cleanUp(uint64(clock.Epoch())) 454 | 455 | err = cache.Delete("key") 456 | 457 | // then 458 | assertEqual(t, err, ErrEntryNotFound) 459 | assertEqual(t, false, onRemoveDeleted) 460 | assertEqual(t, true, onRemoveExpired) 461 | 462 | // case 1: key is deleted BEFORE expire 463 | // when 464 | onRemoveDeleted, onRemoveExpired = false, false 465 | clock.set(0) 466 | 467 | cache.Set("key2", []byte("value2")) 468 | err = cache.Delete("key2") 469 | clock.set(5) 470 | cache.cleanUp(uint64(clock.Epoch())) 471 | // then 472 | 473 | assertEqual(t, err, nil) 474 | assertEqual(t, true, onRemoveDeleted) 475 | assertEqual(t, false, onRemoveExpired) 476 | } 477 | 478 | func TestOnRemoveGetEntryStats(t *testing.T) { 479 | t.Parallel() 480 | 481 | // given 482 | clock := mockedClock{value: 0} 483 | count := uint32(0) 484 | onRemove := func(key string, entry []byte, keyMetadata Metadata) { 485 | count = keyMetadata.RequestCount 486 | } 487 | c := Config{ 488 | Shards: 1, 489 | LifeWindow: time.Second, 490 | MaxEntriesInWindow: 1, 491 | MaxEntrySize: 256, 492 | OnRemoveWithMetadata: onRemove, 493 | StatsEnabled: true, 494 | }.OnRemoveFilterSet(Deleted, NoSpace) 495 | 496 | cache, _ := newBigCache(context.Background(), c, &clock) 497 | 498 | // when 499 | cache.Set("key", []byte("value")) 500 | 501 | for i := 0; i < 100; i++ { 502 | cache.Get("key") 503 | } 504 | 505 | cache.Delete("key") 506 | 507 | // then 508 | assertEqual(t, uint32(100), count) 509 | } 510 | 511 | func TestCacheLen(t *testing.T) { 512 | t.Parallel() 513 | 514 | // given 515 | cache, _ := New(context.Background(), Config{ 516 | Shards: 8, 517 | LifeWindow: time.Second, 518 | MaxEntriesInWindow: 1, 519 | MaxEntrySize: 256, 520 | }) 521 | keys := 1337 522 | 523 | // when 524 | for i := 0; i < keys; i++ { 525 | cache.Set(fmt.Sprintf("key%d", i), []byte("value")) 526 | } 527 | 528 | // then 529 | assertEqual(t, keys, cache.Len()) 530 | } 531 | 532 | func TestCacheCapacity(t *testing.T) { 533 | t.Parallel() 534 | 535 | // given 536 | cache, _ := New(context.Background(), Config{ 537 | Shards: 8, 538 | LifeWindow: time.Second, 539 | MaxEntriesInWindow: 1, 540 | MaxEntrySize: 256, 541 | }) 542 | keys := 1337 543 | 544 | // when 545 | for i := 0; i < keys; i++ { 546 | cache.Set(fmt.Sprintf("key%d", i), []byte("value")) 547 | } 548 | 549 | // then 550 | assertEqual(t, keys, cache.Len()) 551 | assertEqual(t, 40960, cache.Capacity()) 552 | } 553 | 554 | func TestCacheInitialCapacity(t *testing.T) { 555 | t.Parallel() 556 | 557 | // given 558 | cache, _ := New(context.Background(), Config{ 559 | Shards: 1, 560 | LifeWindow: time.Second, 561 | MaxEntriesInWindow: 2 * 1024, 562 | HardMaxCacheSize: 1, 563 | MaxEntrySize: 1024, 564 | }) 565 | 566 | assertEqual(t, 0, cache.Len()) 567 | assertEqual(t, 1024*1024, cache.Capacity()) 568 | 569 | keys := 1024 * 1024 570 | 571 | // when 572 | for i := 0; i < keys; i++ { 573 | cache.Set(fmt.Sprintf("key%d", i), []byte("value")) 574 | } 575 | 576 | // then 577 | assertEqual(t, true, cache.Len() < keys) 578 | assertEqual(t, 1024*1024, cache.Capacity()) 579 | } 580 | 581 | func TestRemoveEntriesWhenShardIsFull(t *testing.T) { 582 | t.Parallel() 583 | 584 | // given 585 | cache, _ := New(context.Background(), Config{ 586 | Shards: 1, 587 | LifeWindow: 100 * time.Second, 588 | MaxEntriesInWindow: 100, 589 | MaxEntrySize: 256, 590 | HardMaxCacheSize: 1, 591 | }) 592 | 593 | value := blob('a', 1024*300) 594 | 595 | // when 596 | cache.Set("key", value) 597 | cache.Set("key", value) 598 | cache.Set("key", value) 599 | cache.Set("key", value) 600 | cache.Set("key", value) 601 | cachedValue, err := cache.Get("key") 602 | 603 | // then 604 | noError(t, err) 605 | assertEqual(t, value, cachedValue) 606 | } 607 | 608 | func TestCacheStats(t *testing.T) { 609 | t.Parallel() 610 | 611 | // given 612 | cache, _ := New(context.Background(), Config{ 613 | Shards: 8, 614 | LifeWindow: time.Second, 615 | MaxEntriesInWindow: 1, 616 | MaxEntrySize: 256, 617 | }) 618 | 619 | // when 620 | for i := 0; i < 100; i++ { 621 | cache.Set(fmt.Sprintf("key%d", i), []byte("value")) 622 | } 623 | 624 | for i := 0; i < 10; i++ { 625 | value, err := cache.Get(fmt.Sprintf("key%d", i)) 626 | noError(t, err) 627 | assertEqual(t, string(value), "value") 628 | } 629 | for i := 100; i < 110; i++ { 630 | _, err := cache.Get(fmt.Sprintf("key%d", i)) 631 | assertEqual(t, ErrEntryNotFound, err) 632 | } 633 | for i := 10; i < 20; i++ { 634 | err := cache.Delete(fmt.Sprintf("key%d", i)) 635 | noError(t, err) 636 | } 637 | for i := 110; i < 120; i++ { 638 | err := cache.Delete(fmt.Sprintf("key%d", i)) 639 | assertEqual(t, ErrEntryNotFound, err) 640 | } 641 | 642 | // then 643 | stats := cache.Stats() 644 | assertEqual(t, stats.Hits, int64(10)) 645 | assertEqual(t, stats.Misses, int64(10)) 646 | assertEqual(t, stats.DelHits, int64(10)) 647 | assertEqual(t, stats.DelMisses, int64(10)) 648 | } 649 | func TestCacheEntryStats(t *testing.T) { 650 | t.Parallel() 651 | 652 | // given 653 | cache, _ := New(context.Background(), Config{ 654 | Shards: 8, 655 | LifeWindow: time.Second, 656 | MaxEntriesInWindow: 1, 657 | MaxEntrySize: 256, 658 | StatsEnabled: true, 659 | }) 660 | 661 | cache.Set("key0", []byte("value")) 662 | 663 | for i := 0; i < 10; i++ { 664 | _, err := cache.Get("key0") 665 | noError(t, err) 666 | } 667 | 668 | // then 669 | keyMetadata := cache.KeyMetadata("key0") 670 | assertEqual(t, uint32(10), keyMetadata.RequestCount) 671 | } 672 | 673 | func TestCacheRestStats(t *testing.T) { 674 | t.Parallel() 675 | 676 | // given 677 | cache, _ := New(context.Background(), Config{ 678 | Shards: 8, 679 | LifeWindow: time.Second, 680 | MaxEntriesInWindow: 1, 681 | MaxEntrySize: 256, 682 | }) 683 | 684 | // when 685 | for i := 0; i < 100; i++ { 686 | cache.Set(fmt.Sprintf("key%d", i), []byte("value")) 687 | } 688 | 689 | for i := 0; i < 10; i++ { 690 | value, err := cache.Get(fmt.Sprintf("key%d", i)) 691 | noError(t, err) 692 | assertEqual(t, string(value), "value") 693 | } 694 | for i := 100; i < 110; i++ { 695 | _, err := cache.Get(fmt.Sprintf("key%d", i)) 696 | assertEqual(t, ErrEntryNotFound, err) 697 | } 698 | for i := 10; i < 20; i++ { 699 | err := cache.Delete(fmt.Sprintf("key%d", i)) 700 | noError(t, err) 701 | } 702 | for i := 110; i < 120; i++ { 703 | err := cache.Delete(fmt.Sprintf("key%d", i)) 704 | assertEqual(t, ErrEntryNotFound, err) 705 | } 706 | 707 | stats := cache.Stats() 708 | assertEqual(t, stats.Hits, int64(10)) 709 | assertEqual(t, stats.Misses, int64(10)) 710 | assertEqual(t, stats.DelHits, int64(10)) 711 | assertEqual(t, stats.DelMisses, int64(10)) 712 | 713 | //then 714 | cache.ResetStats() 715 | stats = cache.Stats() 716 | assertEqual(t, stats.Hits, int64(0)) 717 | assertEqual(t, stats.Misses, int64(0)) 718 | assertEqual(t, stats.DelHits, int64(0)) 719 | assertEqual(t, stats.DelMisses, int64(0)) 720 | } 721 | 722 | func TestCacheDel(t *testing.T) { 723 | t.Parallel() 724 | 725 | // given 726 | cache, _ := New(context.Background(), DefaultConfig(time.Second)) 727 | 728 | // when 729 | err := cache.Delete("nonExistingKey") 730 | 731 | // then 732 | assertEqual(t, err, ErrEntryNotFound) 733 | 734 | // and when 735 | cache.Set("existingKey", nil) 736 | err = cache.Delete("existingKey") 737 | cachedValue, _ := cache.Get("existingKey") 738 | 739 | // then 740 | noError(t, err) 741 | assertEqual(t, 0, len(cachedValue)) 742 | } 743 | 744 | // TestCacheDelRandomly does simultaneous deletes, puts and gets, to check for corruption errors. 745 | func TestCacheDelRandomly(t *testing.T) { 746 | t.Parallel() 747 | 748 | c := Config{ 749 | Shards: 1, 750 | LifeWindow: time.Second, 751 | CleanWindow: 0, 752 | MaxEntriesInWindow: 10, 753 | MaxEntrySize: 10, 754 | Verbose: false, 755 | Hasher: newDefaultHasher(), 756 | HardMaxCacheSize: 1, 757 | StatsEnabled: true, 758 | Logger: DefaultLogger(), 759 | } 760 | 761 | cache, _ := New(context.Background(), c) 762 | var wg sync.WaitGroup 763 | var ntest = 800000 764 | wg.Add(3) 765 | go func() { 766 | for i := 0; i < ntest; i++ { 767 | r := uint8(rand.Int()) 768 | key := fmt.Sprintf("thekey%d", r) 769 | 770 | cache.Delete(key) 771 | } 772 | wg.Done() 773 | }() 774 | valueLen := 1024 775 | go func() { 776 | val := make([]byte, valueLen) 777 | for i := 0; i < ntest; i++ { 778 | r := byte(rand.Int()) 779 | key := fmt.Sprintf("thekey%d", r) 780 | 781 | for j := 0; j < len(val); j++ { 782 | val[j] = r 783 | } 784 | cache.Set(key, val) 785 | } 786 | wg.Done() 787 | }() 788 | go func() { 789 | val := make([]byte, valueLen) 790 | for i := 0; i < ntest; i++ { 791 | r := byte(rand.Int()) 792 | key := fmt.Sprintf("thekey%d", r) 793 | 794 | for j := 0; j < len(val); j++ { 795 | val[j] = r 796 | } 797 | if got, err := cache.Get(key); err == nil && !bytes.Equal(got, val) { 798 | t.Errorf("got %s ->\n %x\n expected:\n %x\n ", key, got, val) 799 | } 800 | } 801 | wg.Done() 802 | }() 803 | wg.Wait() 804 | } 805 | 806 | func TestWriteAndReadParallelSameKeyWithStats(t *testing.T) { 807 | t.Parallel() 808 | 809 | c := DefaultConfig(10 * time.Second) 810 | c.StatsEnabled = true 811 | 812 | cache, _ := New(context.Background(), c) 813 | var wg sync.WaitGroup 814 | ntest := 1000 815 | n := 10 816 | wg.Add(n) 817 | key := "key" 818 | value := blob('a', 1024) 819 | for i := 0; i < ntest; i++ { 820 | assertEqual(t, nil, cache.Set(key, value)) 821 | } 822 | for j := 0; j < n; j++ { 823 | go func() { 824 | for i := 0; i < ntest; i++ { 825 | v, err := cache.Get(key) 826 | assertEqual(t, nil, err) 827 | assertEqual(t, value, v) 828 | } 829 | wg.Done() 830 | }() 831 | } 832 | 833 | wg.Wait() 834 | 835 | assertEqual(t, Stats{Hits: int64(n * ntest)}, cache.Stats()) 836 | assertEqual(t, ntest*n, int(cache.KeyMetadata(key).RequestCount)) 837 | } 838 | 839 | func TestCacheReset(t *testing.T) { 840 | t.Parallel() 841 | 842 | // given 843 | cache, _ := New(context.Background(), Config{ 844 | Shards: 8, 845 | LifeWindow: time.Second, 846 | MaxEntriesInWindow: 1, 847 | MaxEntrySize: 256, 848 | }) 849 | keys := 1337 850 | 851 | // when 852 | for i := 0; i < keys; i++ { 853 | cache.Set(fmt.Sprintf("key%d", i), []byte("value")) 854 | } 855 | 856 | // then 857 | assertEqual(t, keys, cache.Len()) 858 | 859 | // and when 860 | cache.Reset() 861 | 862 | // then 863 | assertEqual(t, 0, cache.Len()) 864 | 865 | // and when 866 | for i := 0; i < keys; i++ { 867 | cache.Set(fmt.Sprintf("key%d", i), []byte("value")) 868 | } 869 | 870 | // then 871 | assertEqual(t, keys, cache.Len()) 872 | } 873 | 874 | func TestIterateOnResetCache(t *testing.T) { 875 | t.Parallel() 876 | 877 | // given 878 | cache, _ := New(context.Background(), Config{ 879 | Shards: 8, 880 | LifeWindow: time.Second, 881 | MaxEntriesInWindow: 1, 882 | MaxEntrySize: 256, 883 | }) 884 | keys := 1337 885 | 886 | // when 887 | for i := 0; i < keys; i++ { 888 | cache.Set(fmt.Sprintf("key%d", i), []byte("value")) 889 | } 890 | cache.Reset() 891 | 892 | // then 893 | iterator := cache.Iterator() 894 | 895 | assertEqual(t, false, iterator.SetNext()) 896 | } 897 | 898 | func TestGetOnResetCache(t *testing.T) { 899 | t.Parallel() 900 | 901 | // given 902 | cache, _ := New(context.Background(), Config{ 903 | Shards: 8, 904 | LifeWindow: time.Second, 905 | MaxEntriesInWindow: 1, 906 | MaxEntrySize: 256, 907 | }) 908 | keys := 1337 909 | 910 | // when 911 | for i := 0; i < keys; i++ { 912 | cache.Set(fmt.Sprintf("key%d", i), []byte("value")) 913 | } 914 | 915 | cache.Reset() 916 | 917 | // then 918 | value, err := cache.Get("key1") 919 | 920 | assertEqual(t, err, ErrEntryNotFound) 921 | assertEqual(t, value, []byte(nil)) 922 | } 923 | 924 | func TestEntryUpdate(t *testing.T) { 925 | t.Parallel() 926 | 927 | // given 928 | clock := mockedClock{value: 0} 929 | cache, _ := newBigCache(context.Background(), Config{ 930 | Shards: 1, 931 | LifeWindow: 6 * time.Second, 932 | MaxEntriesInWindow: 1, 933 | MaxEntrySize: 256, 934 | }, &clock) 935 | 936 | // when 937 | cache.Set("key", []byte("value")) 938 | clock.set(5) 939 | cache.Set("key", []byte("value2")) 940 | clock.set(7) 941 | cache.Set("key2", []byte("value3")) 942 | cachedValue, _ := cache.Get("key") 943 | 944 | // then 945 | assertEqual(t, []byte("value2"), cachedValue) 946 | } 947 | 948 | func TestOldestEntryDeletionWhenMaxCacheSizeIsReached(t *testing.T) { 949 | t.Parallel() 950 | 951 | // given 952 | cache, _ := New(context.Background(), Config{ 953 | Shards: 1, 954 | LifeWindow: 5 * time.Second, 955 | MaxEntriesInWindow: 1, 956 | MaxEntrySize: 1, 957 | HardMaxCacheSize: 1, 958 | }) 959 | 960 | // when 961 | cache.Set("key1", blob('a', 1024*400)) 962 | cache.Set("key2", blob('b', 1024*400)) 963 | cache.Set("key3", blob('c', 1024*800)) 964 | 965 | _, key1Err := cache.Get("key1") 966 | _, key2Err := cache.Get("key2") 967 | entry3, _ := cache.Get("key3") 968 | 969 | // then 970 | assertEqual(t, key1Err, ErrEntryNotFound) 971 | assertEqual(t, key2Err, ErrEntryNotFound) 972 | assertEqual(t, blob('c', 1024*800), entry3) 973 | } 974 | 975 | func TestRetrievingEntryShouldCopy(t *testing.T) { 976 | t.Parallel() 977 | 978 | // given 979 | cache, _ := New(context.Background(), Config{ 980 | Shards: 1, 981 | LifeWindow: 5 * time.Second, 982 | MaxEntriesInWindow: 1, 983 | MaxEntrySize: 1, 984 | HardMaxCacheSize: 1, 985 | }) 986 | cache.Set("key1", blob('a', 1024*400)) 987 | value, key1Err := cache.Get("key1") 988 | 989 | // when 990 | // override queue 991 | cache.Set("key2", blob('b', 1024*400)) 992 | cache.Set("key3", blob('c', 1024*400)) 993 | cache.Set("key4", blob('d', 1024*400)) 994 | cache.Set("key5", blob('d', 1024*400)) 995 | 996 | // then 997 | noError(t, key1Err) 998 | assertEqual(t, blob('a', 1024*400), value) 999 | } 1000 | 1001 | func TestEntryBiggerThanMaxShardSizeError(t *testing.T) { 1002 | t.Parallel() 1003 | 1004 | // given 1005 | cache, _ := New(context.Background(), Config{ 1006 | Shards: 1, 1007 | LifeWindow: 5 * time.Second, 1008 | MaxEntriesInWindow: 1, 1009 | MaxEntrySize: 1, 1010 | HardMaxCacheSize: 1, 1011 | }) 1012 | 1013 | // when 1014 | err := cache.Set("key1", blob('a', 1024*1025)) 1015 | 1016 | // then 1017 | assertEqual(t, "entry is bigger than max shard size", err.Error()) 1018 | } 1019 | 1020 | func TestHashCollision(t *testing.T) { 1021 | t.Parallel() 1022 | 1023 | ml := &mockedLogger{} 1024 | // given 1025 | cache, _ := New(context.Background(), Config{ 1026 | Shards: 16, 1027 | LifeWindow: 5 * time.Second, 1028 | MaxEntriesInWindow: 10, 1029 | MaxEntrySize: 256, 1030 | Verbose: true, 1031 | Hasher: hashStub(5), 1032 | Logger: ml, 1033 | }) 1034 | 1035 | // when 1036 | cache.Set("liquid", []byte("value")) 1037 | cachedValue, err := cache.Get("liquid") 1038 | 1039 | // then 1040 | noError(t, err) 1041 | assertEqual(t, []byte("value"), cachedValue) 1042 | 1043 | // when 1044 | cache.Set("costarring", []byte("value 2")) 1045 | cachedValue, err = cache.Get("costarring") 1046 | 1047 | // then 1048 | noError(t, err) 1049 | assertEqual(t, []byte("value 2"), cachedValue) 1050 | 1051 | // when 1052 | cachedValue, err = cache.Get("liquid") 1053 | 1054 | // then 1055 | assertEqual(t, ErrEntryNotFound, err) 1056 | assertEqual(t, []byte(nil), cachedValue) 1057 | 1058 | assertEqual(t, "Collision detected. Both %q and %q have the same hash %x", ml.lastFormat) 1059 | assertEqual(t, cache.Stats().Collisions, int64(1)) 1060 | } 1061 | 1062 | func TestNilValueCaching(t *testing.T) { 1063 | t.Parallel() 1064 | 1065 | // given 1066 | cache, _ := New(context.Background(), Config{ 1067 | Shards: 1, 1068 | LifeWindow: 5 * time.Second, 1069 | MaxEntriesInWindow: 1, 1070 | MaxEntrySize: 1, 1071 | HardMaxCacheSize: 1, 1072 | }) 1073 | 1074 | // when 1075 | cache.Set("Kierkegaard", []byte{}) 1076 | cachedValue, err := cache.Get("Kierkegaard") 1077 | 1078 | // then 1079 | noError(t, err) 1080 | assertEqual(t, []byte{}, cachedValue) 1081 | 1082 | // when 1083 | cache.Set("Sartre", nil) 1084 | cachedValue, err = cache.Get("Sartre") 1085 | 1086 | // then 1087 | noError(t, err) 1088 | assertEqual(t, []byte{}, cachedValue) 1089 | 1090 | // when 1091 | cache.Set("Nietzsche", []byte(nil)) 1092 | cachedValue, err = cache.Get("Nietzsche") 1093 | 1094 | // then 1095 | noError(t, err) 1096 | assertEqual(t, []byte{}, cachedValue) 1097 | } 1098 | 1099 | func TestClosing(t *testing.T) { 1100 | // given 1101 | config := Config{ 1102 | CleanWindow: time.Minute, 1103 | Shards: 1, 1104 | LifeWindow: 1 * time.Second, 1105 | } 1106 | startGR := runtime.NumGoroutine() 1107 | 1108 | // when 1109 | for i := 0; i < 100; i++ { 1110 | cache, _ := New(context.Background(), config) 1111 | cache.Close() 1112 | } 1113 | 1114 | // wait till all goroutines are stopped. 1115 | time.Sleep(200 * time.Millisecond) 1116 | 1117 | // then 1118 | endGR := runtime.NumGoroutine() 1119 | assertEqual(t, true, endGR >= startGR) 1120 | assertEqual(t, true, math.Abs(float64(endGR-startGR)) < 25) 1121 | } 1122 | 1123 | func TestEntryNotPresent(t *testing.T) { 1124 | t.Parallel() 1125 | 1126 | // given 1127 | clock := mockedClock{value: 0} 1128 | cache, _ := newBigCache(context.Background(), Config{ 1129 | Shards: 1, 1130 | LifeWindow: 5 * time.Second, 1131 | MaxEntriesInWindow: 1, 1132 | MaxEntrySize: 1, 1133 | HardMaxCacheSize: 1, 1134 | }, &clock) 1135 | 1136 | // when 1137 | value, resp, err := cache.GetWithInfo("blah") 1138 | assertEqual(t, ErrEntryNotFound, err) 1139 | assertEqual(t, resp.EntryStatus, RemoveReason(0)) 1140 | assertEqual(t, cache.Stats().Misses, int64(1)) 1141 | assertEqual(t, []byte(nil), value) 1142 | } 1143 | 1144 | func TestBigCache_GetWithInfo(t *testing.T) { 1145 | t.Parallel() 1146 | 1147 | // given 1148 | clock := mockedClock{value: 0} 1149 | cache, _ := newBigCache(context.Background(), Config{ 1150 | Shards: 1, 1151 | LifeWindow: 5 * time.Second, 1152 | CleanWindow: 5 * time.Minute, 1153 | MaxEntriesInWindow: 1, 1154 | MaxEntrySize: 1, 1155 | HardMaxCacheSize: 1, 1156 | Verbose: true, 1157 | }, &clock) 1158 | key := "deadEntryKey" 1159 | value := "100" 1160 | cache.Set(key, []byte(value)) 1161 | 1162 | for _, tc := range []struct { 1163 | name string 1164 | clock int64 1165 | wantData string 1166 | wantResp Response 1167 | }{ 1168 | { 1169 | name: "zero", 1170 | clock: 0, 1171 | wantData: value, 1172 | wantResp: Response{}, 1173 | }, 1174 | { 1175 | name: "Before Expired", 1176 | clock: 4, 1177 | wantData: value, 1178 | wantResp: Response{}, 1179 | }, 1180 | { 1181 | name: "Expired", 1182 | clock: 5, 1183 | wantData: value, 1184 | wantResp: Response{}, 1185 | }, 1186 | { 1187 | name: "After Expired", 1188 | clock: 6, 1189 | wantData: value, 1190 | wantResp: Response{EntryStatus: Expired}, 1191 | }, 1192 | } { 1193 | t.Run(tc.name, func(t *testing.T) { 1194 | clock.set(tc.clock) 1195 | data, resp, err := cache.GetWithInfo(key) 1196 | 1197 | assertEqual(t, []byte(tc.wantData), data) 1198 | noError(t, err) 1199 | assertEqual(t, tc.wantResp, resp) 1200 | }) 1201 | } 1202 | } 1203 | 1204 | func TestBigCache_GetWithInfoCollision(t *testing.T) { 1205 | t.Parallel() 1206 | 1207 | // given 1208 | cache, _ := New(context.Background(), Config{ 1209 | Shards: 1, 1210 | LifeWindow: 5 * time.Second, 1211 | MaxEntriesInWindow: 10, 1212 | MaxEntrySize: 256, 1213 | Verbose: true, 1214 | Hasher: hashStub(5), 1215 | }) 1216 | 1217 | //when 1218 | cache.Set("a", []byte("1")) 1219 | cachedValue, resp, err := cache.GetWithInfo("a") 1220 | 1221 | // then 1222 | noError(t, err) 1223 | assertEqual(t, []byte("1"), cachedValue) 1224 | assertEqual(t, Response{}, resp) 1225 | 1226 | // when 1227 | cachedValue, resp, err = cache.GetWithInfo("b") 1228 | 1229 | // then 1230 | assertEqual(t, []byte(nil), cachedValue) 1231 | assertEqual(t, Response{}, resp) 1232 | assertEqual(t, ErrEntryNotFound, err) 1233 | assertEqual(t, cache.Stats().Collisions, int64(1)) 1234 | 1235 | } 1236 | 1237 | type mockedLogger struct { 1238 | lastFormat string 1239 | lastArgs []interface{} 1240 | } 1241 | 1242 | func (ml *mockedLogger) Printf(format string, v ...interface{}) { 1243 | ml.lastFormat = format 1244 | ml.lastArgs = v 1245 | } 1246 | 1247 | type mockedClock struct { 1248 | value int64 1249 | } 1250 | 1251 | func (mc *mockedClock) Epoch() int64 { 1252 | return mc.value 1253 | } 1254 | 1255 | func (mc *mockedClock) set(value int64) { 1256 | mc.value = value 1257 | } 1258 | 1259 | func blob(char byte, len int) []byte { 1260 | return bytes.Repeat([]byte{char}, len) 1261 | } 1262 | 1263 | func TestCache_SetWithoutCleanWindow(t *testing.T) { 1264 | 1265 | opt := DefaultConfig(time.Second) 1266 | opt.CleanWindow = 0 1267 | opt.HardMaxCacheSize = 1 1268 | bc, _ := New(context.Background(), opt) 1269 | 1270 | err := bc.Set("2225", make([]byte, 200)) 1271 | if nil != err { 1272 | t.Error(err) 1273 | t.FailNow() 1274 | } 1275 | } 1276 | 1277 | func TestCache_RepeatedSetWithBiggerEntry(t *testing.T) { 1278 | 1279 | opt := DefaultConfig(time.Second) 1280 | opt.Shards = 2 << 10 1281 | opt.MaxEntriesInWindow = 1024 1282 | opt.MaxEntrySize = 1 1283 | opt.HardMaxCacheSize = 1 1284 | bc, _ := New(context.Background(), opt) 1285 | 1286 | err := bc.Set("2225", make([]byte, 200)) 1287 | if nil != err { 1288 | t.Error(err) 1289 | t.FailNow() 1290 | } 1291 | err = bc.Set("8573", make([]byte, 100)) 1292 | if nil != err { 1293 | t.Error(err) 1294 | t.FailNow() 1295 | } 1296 | 1297 | err = bc.Set("8573", make([]byte, 450)) 1298 | if nil != err { 1299 | // occur error but go next 1300 | t.Logf("%v", err) 1301 | } 1302 | 1303 | err = bc.Set("7327", make([]byte, 300)) 1304 | if nil != err { 1305 | t.Error(err) 1306 | t.FailNow() 1307 | } 1308 | 1309 | err = bc.Set("8573", make([]byte, 200)) 1310 | if nil != err { 1311 | t.Error(err) 1312 | t.FailNow() 1313 | } 1314 | 1315 | } 1316 | 1317 | // TestBigCache_allocateAdditionalMemoryLeadPanic 1318 | // The new commit 16df11e change the encoding method,it can fix issue #300 1319 | func TestBigCache_allocateAdditionalMemoryLeadPanic(t *testing.T) { 1320 | t.Parallel() 1321 | clock := mockedClock{value: 0} 1322 | cache, _ := newBigCache(context.Background(), Config{ 1323 | Shards: 1, 1324 | LifeWindow: 3 * time.Second, 1325 | MaxEntrySize: 52, 1326 | }, &clock) 1327 | ts := time.Now().Unix() 1328 | clock.set(ts) 1329 | cache.Set("a", blob(0xff, 235)) 1330 | ts += 2 1331 | clock.set(ts) 1332 | cache.Set("b", blob(0xff, 235)) 1333 | // expire the key "a" 1334 | ts += 2 1335 | clock.set(ts) 1336 | // move tail to leftMargin,insert before head 1337 | cache.Set("c", blob(0xff, 108)) 1338 | // reallocate memory,fill the tail to head with zero byte,move head to leftMargin 1339 | cache.Set("d", blob(0xff, 1024)) 1340 | ts += 4 1341 | clock.set(ts) 1342 | // expire the key "c" 1343 | cache.Set("e", blob(0xff, 3)) 1344 | // expire the zero bytes 1345 | cache.Set("f", blob(0xff, 3)) 1346 | // expire the key "b" 1347 | cache.Set("g", blob(0xff, 3)) 1348 | _, err := cache.Get("b") 1349 | assertEqual(t, err, ErrEntryNotFound) 1350 | data, _ := cache.Get("g") 1351 | assertEqual(t, []byte{0xff, 0xff, 0xff}, data) 1352 | } 1353 | 1354 | func TestRemoveNonExpiredData(t *testing.T) { 1355 | onRemove := func(key string, entry []byte, reason RemoveReason) { 1356 | if reason != Deleted { 1357 | if reason == Expired { 1358 | t.Errorf("[%d]Expired OnRemove [%s]\n", reason, key) 1359 | t.FailNow() 1360 | } else { 1361 | time.Sleep(time.Second) 1362 | } 1363 | } 1364 | } 1365 | 1366 | config := DefaultConfig(10 * time.Minute) 1367 | config.HardMaxCacheSize = 1 1368 | config.MaxEntrySize = 1024 1369 | config.MaxEntriesInWindow = 1024 1370 | config.OnRemoveWithReason = onRemove 1371 | cache, err := New(context.Background(), config) 1372 | noError(t, err) 1373 | defer func() { 1374 | err := cache.Close() 1375 | noError(t, err) 1376 | }() 1377 | 1378 | data := func(l int) []byte { 1379 | m := make([]byte, l) 1380 | _, err := rand.Read(m) 1381 | noError(t, err) 1382 | return m 1383 | } 1384 | 1385 | for i := 0; i < 50; i++ { 1386 | key := fmt.Sprintf("key_%d", i) 1387 | //key := "key1" 1388 | err := cache.Set(key, data(800)) 1389 | noError(t, err) 1390 | } 1391 | } 1392 | -------------------------------------------------------------------------------- /bytes.go: -------------------------------------------------------------------------------- 1 | //go:build !appengine 2 | // +build !appengine 3 | 4 | package bigcache 5 | 6 | import ( 7 | "unsafe" 8 | ) 9 | 10 | func bytesToString(b []byte) string { 11 | return *(*string)(unsafe.Pointer(&b)) 12 | } 13 | -------------------------------------------------------------------------------- /bytes_appengine.go: -------------------------------------------------------------------------------- 1 | //go:build appengine 2 | // +build appengine 3 | 4 | package bigcache 5 | 6 | func bytesToString(b []byte) string { 7 | return string(b) 8 | } 9 | -------------------------------------------------------------------------------- /clock.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import "time" 4 | 5 | type clock interface { 6 | Epoch() int64 7 | } 8 | 9 | type systemClock struct { 10 | } 11 | 12 | func (c systemClock) Epoch() int64 { 13 | return time.Now().Unix() 14 | } 15 | -------------------------------------------------------------------------------- /config.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import "time" 4 | 5 | // Config for BigCache 6 | type Config struct { 7 | // Number of cache shards, value must be a power of two 8 | Shards int 9 | // Time after which entry can be evicted 10 | LifeWindow time.Duration 11 | // Interval between removing expired entries (clean up). 12 | // If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution. 13 | CleanWindow time.Duration 14 | // Max number of entries in life window. Used only to calculate initial size for cache shards. 15 | // When proper value is set then additional memory allocation does not occur. 16 | MaxEntriesInWindow int 17 | // Max size of entry in bytes. Used only to calculate initial size for cache shards. 18 | MaxEntrySize int 19 | // StatsEnabled if true calculate the number of times a cached resource was requested. 20 | StatsEnabled bool 21 | // Verbose mode prints information about new memory allocation 22 | Verbose bool 23 | // Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used. 24 | Hasher Hasher 25 | // HardMaxCacheSize is a limit for BytesQueue size in MB. 26 | // It can protect application from consuming all available memory on machine, therefore from running OOM Killer. 27 | // Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then 28 | // the oldest entries are overridden for the new ones. The max memory consumption will be bigger than 29 | // HardMaxCacheSize due to Shards' s additional memory. Every Shard consumes additional memory for map of keys 30 | // and statistics (map[uint64]uint32) the size of this map is equal to number of entries in 31 | // cache ~ 2×(64+32)×n bits + overhead or map itself. 32 | HardMaxCacheSize int 33 | // OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left 34 | // for the new entry, or because delete was called. 35 | // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. 36 | // ignored if OnRemoveWithMetadata is specified. 37 | OnRemove func(key string, entry []byte) 38 | // OnRemoveWithMetadata is a callback fired when the oldest entry is removed because of its expiration time or no space left 39 | // for the new entry, or because delete was called. A structure representing details about that specific entry. 40 | // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. 41 | OnRemoveWithMetadata func(key string, entry []byte, keyMetadata Metadata) 42 | // OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left 43 | // for the new entry, or because delete was called. A constant representing the reason will be passed through. 44 | // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. 45 | // Ignored if OnRemove is specified. 46 | OnRemoveWithReason func(key string, entry []byte, reason RemoveReason) 47 | 48 | onRemoveFilter int 49 | 50 | // Logger is a logging interface and used in combination with `Verbose` 51 | // Defaults to `DefaultLogger()` 52 | Logger Logger 53 | } 54 | 55 | // DefaultConfig initializes config with default values. 56 | // When load for BigCache can be predicted in advance then it is better to use custom config. 57 | func DefaultConfig(eviction time.Duration) Config { 58 | return Config{ 59 | Shards: 1024, 60 | LifeWindow: eviction, 61 | CleanWindow: 1 * time.Second, 62 | MaxEntriesInWindow: 1000 * 10 * 60, 63 | MaxEntrySize: 500, 64 | StatsEnabled: false, 65 | Verbose: true, 66 | Hasher: newDefaultHasher(), 67 | HardMaxCacheSize: 0, 68 | Logger: DefaultLogger(), 69 | } 70 | } 71 | 72 | // initialShardSize computes initial shard size 73 | func (c Config) initialShardSize() int { 74 | return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard) 75 | } 76 | 77 | // maximumShardSizeInBytes computes maximum shard size in bytes 78 | func (c Config) maximumShardSizeInBytes() int { 79 | maxShardSize := 0 80 | 81 | if c.HardMaxCacheSize > 0 { 82 | maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards 83 | } 84 | 85 | return maxShardSize 86 | } 87 | 88 | // OnRemoveFilterSet sets which remove reasons will trigger a call to OnRemoveWithReason. 89 | // Filtering out reasons prevents bigcache from unwrapping them, which saves cpu. 90 | func (c Config) OnRemoveFilterSet(reasons ...RemoveReason) Config { 91 | c.onRemoveFilter = 0 92 | for i := range reasons { 93 | c.onRemoveFilter |= 1 << uint(reasons[i]) 94 | } 95 | 96 | return c 97 | } 98 | -------------------------------------------------------------------------------- /encoding.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "encoding/binary" 5 | ) 6 | 7 | const ( 8 | timestampSizeInBytes = 8 // Number of bytes used for timestamp 9 | hashSizeInBytes = 8 // Number of bytes used for hash 10 | keySizeInBytes = 2 // Number of bytes used for size of entry key 11 | headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers 12 | ) 13 | 14 | func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte { 15 | keyLength := len(key) 16 | blobLength := len(entry) + headersSizeInBytes + keyLength 17 | 18 | if blobLength > len(*buffer) { 19 | *buffer = make([]byte, blobLength) 20 | } 21 | blob := *buffer 22 | 23 | binary.LittleEndian.PutUint64(blob, timestamp) 24 | binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash) 25 | binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength)) 26 | copy(blob[headersSizeInBytes:], key) 27 | copy(blob[headersSizeInBytes+keyLength:], entry) 28 | 29 | return blob[:blobLength] 30 | } 31 | 32 | func appendToWrappedEntry(timestamp uint64, wrappedEntry []byte, entry []byte, buffer *[]byte) []byte { 33 | blobLength := len(wrappedEntry) + len(entry) 34 | if blobLength > len(*buffer) { 35 | *buffer = make([]byte, blobLength) 36 | } 37 | 38 | blob := *buffer 39 | 40 | binary.LittleEndian.PutUint64(blob, timestamp) 41 | copy(blob[timestampSizeInBytes:], wrappedEntry[timestampSizeInBytes:]) 42 | copy(blob[len(wrappedEntry):], entry) 43 | 44 | return blob[:blobLength] 45 | } 46 | 47 | func readEntry(data []byte) []byte { 48 | length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) 49 | 50 | // copy on read 51 | dst := make([]byte, len(data)-int(headersSizeInBytes+length)) 52 | copy(dst, data[headersSizeInBytes+length:]) 53 | 54 | return dst 55 | } 56 | 57 | func readTimestampFromEntry(data []byte) uint64 { 58 | return binary.LittleEndian.Uint64(data) 59 | } 60 | 61 | func readKeyFromEntry(data []byte) string { 62 | length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) 63 | 64 | // copy on read 65 | dst := make([]byte, length) 66 | copy(dst, data[headersSizeInBytes:headersSizeInBytes+length]) 67 | 68 | return bytesToString(dst) 69 | } 70 | 71 | func compareKeyFromEntry(data []byte, key string) bool { 72 | length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) 73 | 74 | return bytesToString(data[headersSizeInBytes:headersSizeInBytes+length]) == key 75 | } 76 | 77 | func readHashFromEntry(data []byte) uint64 { 78 | return binary.LittleEndian.Uint64(data[timestampSizeInBytes:]) 79 | } 80 | 81 | func resetHashFromEntry(data []byte) { 82 | binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0) 83 | } 84 | -------------------------------------------------------------------------------- /encoding_test.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | ) 7 | 8 | func TestEncodeDecode(t *testing.T) { 9 | // given 10 | now := uint64(time.Now().Unix()) 11 | hash := uint64(42) 12 | key := "key" 13 | data := []byte("data") 14 | buffer := make([]byte, 100) 15 | 16 | // when 17 | wrapped := wrapEntry(now, hash, key, data, &buffer) 18 | 19 | // then 20 | assertEqual(t, key, readKeyFromEntry(wrapped)) 21 | assertEqual(t, hash, readHashFromEntry(wrapped)) 22 | assertEqual(t, now, readTimestampFromEntry(wrapped)) 23 | assertEqual(t, data, readEntry(wrapped)) 24 | assertEqual(t, 100, len(buffer)) 25 | } 26 | 27 | func TestAllocateBiggerBuffer(t *testing.T) { 28 | //given 29 | now := uint64(time.Now().Unix()) 30 | hash := uint64(42) 31 | key := "1" 32 | data := []byte("2") 33 | buffer := make([]byte, 1) 34 | 35 | // when 36 | wrapped := wrapEntry(now, hash, key, data, &buffer) 37 | 38 | // then 39 | assertEqual(t, key, readKeyFromEntry(wrapped)) 40 | assertEqual(t, hash, readHashFromEntry(wrapped)) 41 | assertEqual(t, now, readTimestampFromEntry(wrapped)) 42 | assertEqual(t, data, readEntry(wrapped)) 43 | assertEqual(t, 2+headersSizeInBytes, len(buffer)) 44 | } 45 | -------------------------------------------------------------------------------- /entry_not_found_error.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import "errors" 4 | 5 | var ( 6 | // ErrEntryNotFound is an error type struct which is returned when entry was not found for provided key 7 | ErrEntryNotFound = errors.New("Entry not found") 8 | ) 9 | -------------------------------------------------------------------------------- /examples_test.go: -------------------------------------------------------------------------------- 1 | package bigcache_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "time" 8 | 9 | "github.com/allegro/bigcache/v3" 10 | ) 11 | 12 | func Example() { 13 | cache, _ := bigcache.New(context.Background(), bigcache.DefaultConfig(10*time.Minute)) 14 | 15 | cache.Set("my-unique-key", []byte("value")) 16 | 17 | entry, _ := cache.Get("my-unique-key") 18 | fmt.Println(string(entry)) 19 | // Output: value 20 | } 21 | 22 | func Example_custom() { 23 | // When cache load can be predicted in advance then it is better to use custom initialization 24 | // because additional memory allocation can be avoided in that way. 25 | config := bigcache.Config{ 26 | // number of shards (must be a power of 2) 27 | Shards: 1024, 28 | 29 | // time after which entry can be evicted 30 | LifeWindow: 10 * time.Minute, 31 | 32 | // Interval between removing expired entries (clean up). 33 | // If set to <= 0 then no action is performed. 34 | // Setting to < 1 second is counterproductive — bigcache has a one second resolution. 35 | CleanWindow: 5 * time.Minute, 36 | 37 | // rps * lifeWindow, used only in initial memory allocation 38 | MaxEntriesInWindow: 1000 * 10 * 60, 39 | 40 | // max entry size in bytes, used only in initial memory allocation 41 | MaxEntrySize: 500, 42 | 43 | // prints information about additional memory allocation 44 | Verbose: true, 45 | 46 | // cache will not allocate more memory than this limit, value in MB 47 | // if value is reached then the oldest entries can be overridden for the new ones 48 | // 0 value means no size limit 49 | HardMaxCacheSize: 8192, 50 | 51 | // callback fired when the oldest entry is removed because of its expiration time or no space left 52 | // for the new entry, or because delete was called. A bitmask representing the reason will be returned. 53 | // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. 54 | OnRemove: nil, 55 | 56 | // OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left 57 | // for the new entry, or because delete was called. A constant representing the reason will be passed through. 58 | // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. 59 | // Ignored if OnRemove is specified. 60 | OnRemoveWithReason: nil, 61 | } 62 | 63 | cache, initErr := bigcache.New(context.Background(), config) 64 | if initErr != nil { 65 | log.Fatal(initErr) 66 | } 67 | 68 | err := cache.Set("my-unique-key", []byte("value")) 69 | if err != nil { 70 | log.Fatal(err) 71 | } 72 | 73 | entry, err := cache.Get("my-unique-key") 74 | if err != nil { 75 | log.Fatal(err) 76 | } 77 | fmt.Println(string(entry)) 78 | // Output: value 79 | } 80 | -------------------------------------------------------------------------------- /fnv.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | // newDefaultHasher returns a new 64-bit FNV-1a Hasher which makes no memory allocations. 4 | // Its Sum64 method will lay the value out in big-endian byte order. 5 | // See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function 6 | func newDefaultHasher() Hasher { 7 | return fnv64a{} 8 | } 9 | 10 | type fnv64a struct{} 11 | 12 | const ( 13 | // offset64 FNVa offset basis. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash 14 | offset64 = 14695981039346656037 15 | // prime64 FNVa prime value. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash 16 | prime64 = 1099511628211 17 | ) 18 | 19 | // Sum64 gets the string and returns its uint64 hash value. 20 | func (f fnv64a) Sum64(key string) uint64 { 21 | var hash uint64 = offset64 22 | for i := 0; i < len(key); i++ { 23 | hash ^= uint64(key[i]) 24 | hash *= prime64 25 | } 26 | 27 | return hash 28 | } 29 | -------------------------------------------------------------------------------- /fnv_bench_test.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import "testing" 4 | 5 | var text = "abcdefg" 6 | 7 | func BenchmarkFnvHashSum64(b *testing.B) { 8 | h := newDefaultHasher() 9 | for i := 0; i < b.N; i++ { 10 | h.Sum64(text) 11 | } 12 | } 13 | 14 | func BenchmarkFnvHashStdLibSum64(b *testing.B) { 15 | for i := 0; i < b.N; i++ { 16 | stdLibFnvSum64(text) 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /fnv_test.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "hash/fnv" 5 | "testing" 6 | ) 7 | 8 | type testCase struct { 9 | text string 10 | expectedHash uint64 11 | } 12 | 13 | var testCases = []testCase{ 14 | {"", stdLibFnvSum64("")}, 15 | {"a", stdLibFnvSum64("a")}, 16 | {"ab", stdLibFnvSum64("ab")}, 17 | {"abc", stdLibFnvSum64("abc")}, 18 | {"some longer and more complicated text", stdLibFnvSum64("some longer and more complicated text")}, 19 | } 20 | 21 | func TestFnvHashSum64(t *testing.T) { 22 | h := newDefaultHasher() 23 | for _, testCase := range testCases { 24 | hashed := h.Sum64(testCase.text) 25 | if hashed != testCase.expectedHash { 26 | t.Errorf("hash(%q) = %d want %d", testCase.text, hashed, testCase.expectedHash) 27 | } 28 | } 29 | } 30 | 31 | func stdLibFnvSum64(key string) uint64 { 32 | h := fnv.New64a() 33 | h.Write([]byte(key)) 34 | return h.Sum64() 35 | } 36 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/allegro/bigcache/v3 2 | 3 | go 1.16 4 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/allegro/bigcache/5aa251c4cc3d607bbb48b825ef583ad1fafa1845/go.sum -------------------------------------------------------------------------------- /hash.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | // Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions 4 | // (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e. 5 | // you can use FarmHash family). 6 | type Hasher interface { 7 | Sum64(string) uint64 8 | } 9 | -------------------------------------------------------------------------------- /hash_test.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | type hashStub uint64 4 | 5 | func (stub hashStub) Sum64(_ string) uint64 { 6 | return uint64(stub) 7 | } 8 | -------------------------------------------------------------------------------- /iterator.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | type iteratorError string 8 | 9 | func (e iteratorError) Error() string { 10 | return string(e) 11 | } 12 | 13 | // ErrInvalidIteratorState is reported when iterator is in invalid state 14 | const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position") 15 | 16 | // ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying 17 | const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache") 18 | 19 | var emptyEntryInfo = EntryInfo{} 20 | 21 | // EntryInfo holds informations about entry in the cache 22 | type EntryInfo struct { 23 | timestamp uint64 24 | hash uint64 25 | key string 26 | value []byte 27 | err error 28 | } 29 | 30 | // Key returns entry's underlying key 31 | func (e EntryInfo) Key() string { 32 | return e.key 33 | } 34 | 35 | // Hash returns entry's hash value 36 | func (e EntryInfo) Hash() uint64 { 37 | return e.hash 38 | } 39 | 40 | // Timestamp returns entry's timestamp (time of insertion) 41 | func (e EntryInfo) Timestamp() uint64 { 42 | return e.timestamp 43 | } 44 | 45 | // Value returns entry's underlying value 46 | func (e EntryInfo) Value() []byte { 47 | return e.value 48 | } 49 | 50 | // EntryInfoIterator allows to iterate over entries in the cache 51 | type EntryInfoIterator struct { 52 | mutex sync.Mutex 53 | cache *BigCache 54 | currentShard int 55 | currentIndex int 56 | currentEntryInfo EntryInfo 57 | elements []uint64 58 | elementsCount int 59 | valid bool 60 | } 61 | 62 | // SetNext moves to next element and returns true if it exists. 63 | func (it *EntryInfoIterator) SetNext() bool { 64 | it.mutex.Lock() 65 | 66 | it.valid = false 67 | it.currentIndex++ 68 | 69 | if it.elementsCount > it.currentIndex { 70 | it.valid = true 71 | 72 | empty := it.setCurrentEntry() 73 | it.mutex.Unlock() 74 | 75 | if empty { 76 | return it.SetNext() 77 | } 78 | return true 79 | } 80 | 81 | for i := it.currentShard + 1; i < it.cache.config.Shards; i++ { 82 | it.elements, it.elementsCount = it.cache.shards[i].copyHashedKeys() 83 | 84 | // Non empty shard - stick with it 85 | if it.elementsCount > 0 { 86 | it.currentIndex = 0 87 | it.currentShard = i 88 | it.valid = true 89 | 90 | empty := it.setCurrentEntry() 91 | it.mutex.Unlock() 92 | 93 | if empty { 94 | return it.SetNext() 95 | } 96 | return true 97 | } 98 | } 99 | it.mutex.Unlock() 100 | return false 101 | } 102 | 103 | func (it *EntryInfoIterator) setCurrentEntry() bool { 104 | var entryNotFound = false 105 | entry, err := it.cache.shards[it.currentShard].getEntry(it.elements[it.currentIndex]) 106 | 107 | if err == ErrEntryNotFound { 108 | it.currentEntryInfo = emptyEntryInfo 109 | entryNotFound = true 110 | } else if err != nil { 111 | it.currentEntryInfo = EntryInfo{ 112 | err: err, 113 | } 114 | } else { 115 | it.currentEntryInfo = EntryInfo{ 116 | timestamp: readTimestampFromEntry(entry), 117 | hash: readHashFromEntry(entry), 118 | key: readKeyFromEntry(entry), 119 | value: readEntry(entry), 120 | err: err, 121 | } 122 | } 123 | 124 | return entryNotFound 125 | } 126 | 127 | func newIterator(cache *BigCache) *EntryInfoIterator { 128 | elements, count := cache.shards[0].copyHashedKeys() 129 | 130 | return &EntryInfoIterator{ 131 | cache: cache, 132 | currentShard: 0, 133 | currentIndex: -1, 134 | elements: elements, 135 | elementsCount: count, 136 | } 137 | } 138 | 139 | // Value returns current value from the iterator 140 | func (it *EntryInfoIterator) Value() (EntryInfo, error) { 141 | if !it.valid { 142 | return emptyEntryInfo, ErrInvalidIteratorState 143 | } 144 | 145 | return it.currentEntryInfo, it.currentEntryInfo.err 146 | } 147 | -------------------------------------------------------------------------------- /iterator_test.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math/rand" 7 | "runtime" 8 | "strconv" 9 | "sync" 10 | "testing" 11 | "time" 12 | ) 13 | 14 | func TestEntriesIterator(t *testing.T) { 15 | t.Parallel() 16 | 17 | // given 18 | keysCount := 1000 19 | cache, _ := New(context.Background(), Config{ 20 | Shards: 8, 21 | LifeWindow: 6 * time.Second, 22 | MaxEntriesInWindow: 1, 23 | MaxEntrySize: 256, 24 | }) 25 | value := []byte("value") 26 | 27 | for i := 0; i < keysCount; i++ { 28 | cache.Set(fmt.Sprintf("key%d", i), value) 29 | } 30 | 31 | // when 32 | keys := make(map[string]struct{}) 33 | iterator := cache.Iterator() 34 | 35 | for iterator.SetNext() { 36 | current, err := iterator.Value() 37 | 38 | if err == nil { 39 | keys[current.Key()] = struct{}{} 40 | } 41 | } 42 | 43 | // then 44 | assertEqual(t, keysCount, len(keys)) 45 | } 46 | 47 | func TestEntriesIteratorWithMostShardsEmpty(t *testing.T) { 48 | t.Parallel() 49 | 50 | // given 51 | clock := mockedClock{value: 0} 52 | cache, _ := newBigCache(context.Background(), Config{ 53 | Shards: 8, 54 | LifeWindow: 6 * time.Second, 55 | MaxEntriesInWindow: 1, 56 | MaxEntrySize: 256, 57 | }, &clock) 58 | 59 | cache.Set("key", []byte("value")) 60 | 61 | // when 62 | iterator := cache.Iterator() 63 | 64 | // then 65 | if !iterator.SetNext() { 66 | t.Errorf("Iterator should contain at least single element") 67 | } 68 | 69 | current, err := iterator.Value() 70 | 71 | // then 72 | noError(t, err) 73 | assertEqual(t, "key", current.Key()) 74 | assertEqual(t, uint64(0x3dc94a19365b10ec), current.Hash()) 75 | assertEqual(t, []byte("value"), current.Value()) 76 | assertEqual(t, uint64(0), current.Timestamp()) 77 | } 78 | 79 | func TestEntriesIteratorWithConcurrentUpdate(t *testing.T) { 80 | t.Parallel() 81 | 82 | // given 83 | cache, _ := New(context.Background(), Config{ 84 | Shards: 1, 85 | LifeWindow: time.Second, 86 | MaxEntriesInWindow: 1, 87 | MaxEntrySize: 256, 88 | }) 89 | 90 | cache.Set("key", []byte("value")) 91 | 92 | // when 93 | iterator := cache.Iterator() 94 | 95 | // then 96 | if !iterator.SetNext() { 97 | t.Errorf("Iterator should contain at least single element") 98 | } 99 | 100 | getOldestEntry := func(s *cacheShard) ([]byte, error) { 101 | s.lock.RLock() 102 | defer s.lock.RUnlock() 103 | return s.entries.Peek() 104 | } 105 | 106 | // Quite ugly but works 107 | for i := 0; i < cache.config.Shards; i++ { 108 | if oldestEntry, err := getOldestEntry(cache.shards[i]); err == nil { 109 | cache.onEvict(oldestEntry, 10, cache.shards[i].removeOldestEntry) 110 | } 111 | } 112 | 113 | current, err := iterator.Value() 114 | assertEqual(t, nil, err) 115 | assertEqual(t, []byte("value"), current.Value()) 116 | 117 | next := iterator.SetNext() 118 | assertEqual(t, false, next) 119 | } 120 | 121 | func TestEntriesIteratorWithAllShardsEmpty(t *testing.T) { 122 | t.Parallel() 123 | 124 | // given 125 | cache, _ := New(context.Background(), Config{ 126 | Shards: 1, 127 | LifeWindow: time.Second, 128 | MaxEntriesInWindow: 1, 129 | MaxEntrySize: 256, 130 | }) 131 | 132 | // when 133 | iterator := cache.Iterator() 134 | 135 | // then 136 | if iterator.SetNext() { 137 | t.Errorf("Iterator should not contain any elements") 138 | } 139 | } 140 | 141 | func TestEntriesIteratorInInvalidState(t *testing.T) { 142 | t.Parallel() 143 | 144 | // given 145 | cache, _ := New(context.Background(), Config{ 146 | Shards: 1, 147 | LifeWindow: time.Second, 148 | MaxEntriesInWindow: 1, 149 | MaxEntrySize: 256, 150 | }) 151 | 152 | // when 153 | iterator := cache.Iterator() 154 | 155 | // then 156 | _, err := iterator.Value() 157 | assertEqual(t, ErrInvalidIteratorState, err) 158 | assertEqual(t, "Iterator is in invalid state. Use SetNext() to move to next position", err.Error()) 159 | } 160 | 161 | func TestEntriesIteratorParallelAdd(t *testing.T) { 162 | bc, err := New(context.Background(), DefaultConfig(1*time.Minute)) 163 | if err != nil { 164 | panic(err) 165 | } 166 | 167 | wg := sync.WaitGroup{} 168 | wg.Add(1) 169 | go func() { 170 | for i := 0; i < 10000; i++ { 171 | err := bc.Set(strconv.Itoa(i), []byte("aaaaaaa")) 172 | if err != nil { 173 | panic(err) 174 | } 175 | 176 | runtime.Gosched() 177 | } 178 | wg.Done() 179 | }() 180 | 181 | for i := 0; i < 100; i++ { 182 | iter := bc.Iterator() 183 | for iter.SetNext() { 184 | _, _ = iter.Value() 185 | } 186 | } 187 | wg.Wait() 188 | } 189 | 190 | func TestParallelSetAndIteration(t *testing.T) { 191 | t.Parallel() 192 | 193 | rand.Seed(0) 194 | 195 | cache, _ := New(context.Background(), Config{ 196 | Shards: 1, 197 | LifeWindow: time.Second, 198 | MaxEntriesInWindow: 100, 199 | MaxEntrySize: 256, 200 | HardMaxCacheSize: 1, 201 | Verbose: true, 202 | }) 203 | 204 | entrySize := 1024 * 100 205 | ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 206 | defer cancel() 207 | 208 | wg := sync.WaitGroup{} 209 | wg.Add(2) 210 | 211 | go func() { 212 | defer func() { 213 | err := recover() 214 | // no panic 215 | assertEqual(t, err, nil) 216 | }() 217 | 218 | defer wg.Done() 219 | 220 | isTimeout := false 221 | 222 | for { 223 | if isTimeout { 224 | break 225 | } 226 | select { 227 | case <-ctx.Done(): 228 | isTimeout = true 229 | default: 230 | err := cache.Set(strconv.Itoa(rand.Intn(100)), blob('a', entrySize)) 231 | noError(t, err) 232 | } 233 | } 234 | }() 235 | 236 | go func() { 237 | defer func() { 238 | err := recover() 239 | // no panic 240 | assertEqual(t, nil, err) 241 | }() 242 | 243 | defer wg.Done() 244 | 245 | isTimeout := false 246 | 247 | for { 248 | if isTimeout { 249 | break 250 | } 251 | select { 252 | case <-ctx.Done(): 253 | isTimeout = true 254 | default: 255 | iter := cache.Iterator() 256 | for iter.SetNext() { 257 | entry, err := iter.Value() 258 | 259 | // then 260 | noError(t, err) 261 | assertEqual(t, entrySize, len(entry.Value())) 262 | } 263 | } 264 | } 265 | }() 266 | 267 | wg.Wait() 268 | } 269 | -------------------------------------------------------------------------------- /logger.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "log" 5 | "os" 6 | ) 7 | 8 | // Logger is invoked when `Config.Verbose=true` 9 | type Logger interface { 10 | Printf(format string, v ...interface{}) 11 | } 12 | 13 | // this is a safeguard, breaking on compile time in case 14 | // `log.Logger` does not adhere to our `Logger` interface. 15 | // see https://golang.org/doc/faq#guarantee_satisfies_interface 16 | var _ Logger = &log.Logger{} 17 | 18 | // DefaultLogger returns a `Logger` implementation 19 | // backed by stdlib's log 20 | func DefaultLogger() *log.Logger { 21 | return log.New(os.Stdout, "", log.LstdFlags) 22 | } 23 | 24 | func newLogger(custom Logger) Logger { 25 | if custom != nil { 26 | return custom 27 | } 28 | 29 | return DefaultLogger() 30 | } 31 | -------------------------------------------------------------------------------- /queue/bytes_queue.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "encoding/binary" 5 | "log" 6 | "time" 7 | ) 8 | 9 | const ( 10 | // Number of bytes to encode 0 in uvarint format 11 | minimumHeaderSize = 17 // 1 byte blobsize + timestampSizeInBytes + hashSizeInBytes 12 | // Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index 13 | leftMarginIndex = 1 14 | ) 15 | 16 | var ( 17 | errEmptyQueue = &queueError{"Empty queue"} 18 | errInvalidIndex = &queueError{"Index must be greater than zero. Invalid index."} 19 | errIndexOutOfBounds = &queueError{"Index out of range"} 20 | ) 21 | 22 | // BytesQueue is a non-thread safe queue type of fifo based on bytes array. 23 | // For every push operation index of entry is returned. It can be used to read the entry later 24 | type BytesQueue struct { 25 | full bool 26 | array []byte 27 | capacity int 28 | maxCapacity int 29 | head int 30 | tail int 31 | count int 32 | rightMargin int 33 | headerBuffer []byte 34 | verbose bool 35 | } 36 | 37 | type queueError struct { 38 | message string 39 | } 40 | 41 | // getNeededSize returns the number of bytes an entry of length need in the queue 42 | func getNeededSize(length int) int { 43 | var header int 44 | switch { 45 | case length < 127: // 1<<7-1 46 | header = 1 47 | case length < 16382: // 1<<14-2 48 | header = 2 49 | case length < 2097149: // 1<<21 -3 50 | header = 3 51 | case length < 268435452: // 1<<28 -4 52 | header = 4 53 | default: 54 | header = 5 55 | } 56 | 57 | return length + header 58 | } 59 | 60 | // NewBytesQueue initialize new bytes queue. 61 | // capacity is used in bytes array allocation 62 | // When verbose flag is set then information about memory allocation are printed 63 | func NewBytesQueue(capacity int, maxCapacity int, verbose bool) *BytesQueue { 64 | return &BytesQueue{ 65 | array: make([]byte, capacity), 66 | capacity: capacity, 67 | maxCapacity: maxCapacity, 68 | headerBuffer: make([]byte, binary.MaxVarintLen32), 69 | tail: leftMarginIndex, 70 | head: leftMarginIndex, 71 | rightMargin: leftMarginIndex, 72 | verbose: verbose, 73 | } 74 | } 75 | 76 | // Reset removes all entries from queue 77 | func (q *BytesQueue) Reset() { 78 | // Just reset indexes 79 | q.tail = leftMarginIndex 80 | q.head = leftMarginIndex 81 | q.rightMargin = leftMarginIndex 82 | q.count = 0 83 | q.full = false 84 | } 85 | 86 | // Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed. 87 | // Returns index for pushed data or error if maximum size queue limit is reached. 88 | func (q *BytesQueue) Push(data []byte) (int, error) { 89 | neededSize := getNeededSize(len(data)) 90 | 91 | if !q.canInsertAfterTail(neededSize) { 92 | if q.canInsertBeforeHead(neededSize) { 93 | q.tail = leftMarginIndex 94 | } else if q.capacity+neededSize >= q.maxCapacity && q.maxCapacity > 0 { 95 | return -1, &queueError{"Full queue. Maximum size limit reached."} 96 | } else { 97 | q.allocateAdditionalMemory(neededSize) 98 | } 99 | } 100 | 101 | index := q.tail 102 | 103 | q.push(data, neededSize) 104 | 105 | return index, nil 106 | } 107 | 108 | func (q *BytesQueue) allocateAdditionalMemory(minimum int) { 109 | start := time.Now() 110 | if q.capacity < minimum { 111 | q.capacity += minimum 112 | } 113 | q.capacity = q.capacity * 2 114 | if q.capacity > q.maxCapacity && q.maxCapacity > 0 { 115 | q.capacity = q.maxCapacity 116 | } 117 | 118 | oldArray := q.array 119 | q.array = make([]byte, q.capacity) 120 | 121 | if leftMarginIndex != q.rightMargin { 122 | copy(q.array, oldArray[:q.rightMargin]) 123 | 124 | if q.tail <= q.head { 125 | if q.tail != q.head { 126 | // created slice is slightly larger than need but this is fine after only the needed bytes are copied 127 | q.push(make([]byte, q.head-q.tail), q.head-q.tail) 128 | } 129 | 130 | q.head = leftMarginIndex 131 | q.tail = q.rightMargin 132 | } 133 | } 134 | 135 | q.full = false 136 | 137 | if q.verbose { 138 | log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity) 139 | } 140 | } 141 | 142 | func (q *BytesQueue) push(data []byte, len int) { 143 | headerEntrySize := binary.PutUvarint(q.headerBuffer, uint64(len)) 144 | q.copy(q.headerBuffer, headerEntrySize) 145 | 146 | q.copy(data, len-headerEntrySize) 147 | 148 | if q.tail > q.head { 149 | q.rightMargin = q.tail 150 | } 151 | if q.tail == q.head { 152 | q.full = true 153 | } 154 | 155 | q.count++ 156 | } 157 | 158 | func (q *BytesQueue) copy(data []byte, len int) { 159 | q.tail += copy(q.array[q.tail:], data[:len]) 160 | } 161 | 162 | // Pop reads the oldest entry from queue and moves head pointer to the next one 163 | func (q *BytesQueue) Pop() ([]byte, error) { 164 | data, blockSize, err := q.peek(q.head) 165 | if err != nil { 166 | return nil, err 167 | } 168 | 169 | q.head += blockSize 170 | q.count-- 171 | 172 | if q.head == q.rightMargin { 173 | q.head = leftMarginIndex 174 | if q.tail == q.rightMargin { 175 | q.tail = leftMarginIndex 176 | } 177 | q.rightMargin = q.tail 178 | } 179 | 180 | q.full = false 181 | 182 | return data, nil 183 | } 184 | 185 | // Peek reads the oldest entry from list without moving head pointer 186 | func (q *BytesQueue) Peek() ([]byte, error) { 187 | data, _, err := q.peek(q.head) 188 | return data, err 189 | } 190 | 191 | // Get reads entry from index 192 | func (q *BytesQueue) Get(index int) ([]byte, error) { 193 | data, _, err := q.peek(index) 194 | return data, err 195 | } 196 | 197 | // CheckGet checks if an entry can be read from index 198 | func (q *BytesQueue) CheckGet(index int) error { 199 | return q.peekCheckErr(index) 200 | } 201 | 202 | // Capacity returns number of allocated bytes for queue 203 | func (q *BytesQueue) Capacity() int { 204 | return q.capacity 205 | } 206 | 207 | // Len returns number of entries kept in queue 208 | func (q *BytesQueue) Len() int { 209 | return q.count 210 | } 211 | 212 | // Error returns error message 213 | func (e *queueError) Error() string { 214 | return e.message 215 | } 216 | 217 | // peekCheckErr is identical to peek, but does not actually return any data 218 | func (q *BytesQueue) peekCheckErr(index int) error { 219 | 220 | if q.count == 0 { 221 | return errEmptyQueue 222 | } 223 | 224 | if index <= 0 { 225 | return errInvalidIndex 226 | } 227 | 228 | if index >= len(q.array) { 229 | return errIndexOutOfBounds 230 | } 231 | return nil 232 | } 233 | 234 | // peek returns the data from index and the number of bytes to encode the length of the data in uvarint format 235 | func (q *BytesQueue) peek(index int) ([]byte, int, error) { 236 | err := q.peekCheckErr(index) 237 | if err != nil { 238 | return nil, 0, err 239 | } 240 | 241 | blockSize, n := binary.Uvarint(q.array[index:]) 242 | return q.array[index+n : index+int(blockSize)], int(blockSize), nil 243 | } 244 | 245 | // canInsertAfterTail returns true if it's possible to insert an entry of size of need after the tail of the queue 246 | func (q *BytesQueue) canInsertAfterTail(need int) bool { 247 | if q.full { 248 | return false 249 | } 250 | if q.tail >= q.head { 251 | return q.capacity-q.tail >= need 252 | } 253 | // 1. there is exactly need bytes between head and tail, so we do not need 254 | // to reserve extra space for a potential empty entry when realloc this queue 255 | // 2. still have unused space between tail and head, then we must reserve 256 | // at least headerEntrySize bytes so we can put an empty entry 257 | return q.head-q.tail == need || q.head-q.tail >= need+minimumHeaderSize 258 | } 259 | 260 | // canInsertBeforeHead returns true if it's possible to insert an entry of size of need before the head of the queue 261 | func (q *BytesQueue) canInsertBeforeHead(need int) bool { 262 | if q.full { 263 | return false 264 | } 265 | if q.tail >= q.head { 266 | return q.head-leftMarginIndex == need || q.head-leftMarginIndex >= need+minimumHeaderSize 267 | } 268 | return q.head-q.tail == need || q.head-q.tail >= need+minimumHeaderSize 269 | } 270 | -------------------------------------------------------------------------------- /queue/bytes_queue_test.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "path" 7 | "reflect" 8 | "runtime" 9 | "testing" 10 | ) 11 | 12 | func TestPushAndPop(t *testing.T) { 13 | t.Parallel() 14 | 15 | // given 16 | queue := NewBytesQueue(10, 0, true) 17 | entry := []byte("hello") 18 | 19 | // when 20 | _, err := queue.Pop() 21 | 22 | // then 23 | assertEqual(t, "Empty queue", err.Error()) 24 | 25 | // when 26 | queue.Push(entry) 27 | 28 | // then 29 | assertEqual(t, entry, pop(queue)) 30 | } 31 | 32 | func TestLen(t *testing.T) { 33 | t.Parallel() 34 | 35 | // given 36 | queue := NewBytesQueue(100, 0, false) 37 | entry := []byte("hello") 38 | assertEqual(t, 0, queue.Len()) 39 | 40 | // when 41 | queue.Push(entry) 42 | 43 | // then 44 | assertEqual(t, queue.Len(), 1) 45 | } 46 | 47 | func TestPeek(t *testing.T) { 48 | t.Parallel() 49 | 50 | // given 51 | queue := NewBytesQueue(100, 0, false) 52 | entry := []byte("hello") 53 | 54 | // when 55 | read, err := queue.Peek() 56 | err2 := queue.peekCheckErr(queue.head) 57 | // then 58 | assertEqual(t, err, err2) 59 | assertEqual(t, "Empty queue", err.Error()) 60 | assertEqual(t, 0, len(read)) 61 | 62 | // when 63 | queue.Push(entry) 64 | read, err = queue.Peek() 65 | err2 = queue.peekCheckErr(queue.head) 66 | 67 | // then 68 | assertEqual(t, err, err2) 69 | noError(t, err) 70 | assertEqual(t, pop(queue), read) 71 | assertEqual(t, entry, read) 72 | } 73 | 74 | func TestResetFullQueue(t *testing.T) { 75 | t.Parallel() 76 | 77 | // given 78 | queue := NewBytesQueue(10, 20, false) 79 | 80 | // when 81 | queue.Push(blob('a', 3)) 82 | queue.Push(blob('b', 4)) 83 | 84 | // when 85 | assertEqual(t, blob('a', 3), pop(queue)) // space freed at the beginning 86 | _, err := queue.Push(blob('a', 3)) // will set q.full to true 87 | 88 | // then 89 | assertEqual(t, err, nil) 90 | 91 | // when 92 | queue.Reset() 93 | queue.Push(blob('c', 8)) // should not trigger a re-allocation 94 | 95 | // then 96 | assertEqual(t, blob('c', 8), pop(queue)) 97 | assertEqual(t, queue.Capacity(), 10) 98 | } 99 | 100 | func TestReset(t *testing.T) { 101 | t.Parallel() 102 | 103 | // given 104 | queue := NewBytesQueue(100, 0, false) 105 | entry := []byte("hello") 106 | 107 | // when 108 | queue.Push(entry) 109 | queue.Push(entry) 110 | queue.Push(entry) 111 | 112 | queue.Reset() 113 | read, err := queue.Peek() 114 | 115 | // then 116 | assertEqual(t, "Empty queue", err.Error()) 117 | assertEqual(t, 0, len(read)) 118 | 119 | // when 120 | queue.Push(entry) 121 | read, err = queue.Peek() 122 | 123 | // then 124 | noError(t, err) 125 | assertEqual(t, pop(queue), read) 126 | assertEqual(t, entry, read) 127 | 128 | // when 129 | read, err = queue.Peek() 130 | 131 | // then 132 | assertEqual(t, "Empty queue", err.Error()) 133 | assertEqual(t, 0, len(read)) 134 | } 135 | 136 | func TestReuseAvailableSpace(t *testing.T) { 137 | t.Parallel() 138 | 139 | // given 140 | queue := NewBytesQueue(100, 0, false) 141 | 142 | // when 143 | queue.Push(blob('a', 70)) 144 | queue.Push(blob('b', 20)) 145 | queue.Pop() 146 | queue.Push(blob('c', 20)) 147 | 148 | // then 149 | assertEqual(t, 100, queue.Capacity()) 150 | assertEqual(t, blob('b', 20), pop(queue)) 151 | } 152 | 153 | func TestAllocateAdditionalSpace(t *testing.T) { 154 | t.Parallel() 155 | 156 | // given 157 | queue := NewBytesQueue(11, 0, false) 158 | 159 | // when 160 | queue.Push([]byte("hello1")) 161 | queue.Push([]byte("hello2")) 162 | 163 | // then 164 | assertEqual(t, 22, queue.Capacity()) 165 | } 166 | 167 | func TestAllocateAdditionalSpaceForInsufficientFreeFragmentedSpaceWhereHeadIsBeforeTail(t *testing.T) { 168 | t.Parallel() 169 | 170 | // given 171 | queue := NewBytesQueue(25, 0, false) 172 | 173 | // when 174 | queue.Push(blob('a', 3)) // header + entry + left margin = 5 bytes 175 | queue.Push(blob('b', 6)) // additional 7 bytes 176 | queue.Pop() // space freed, 4 bytes available at the beginning 177 | queue.Push(blob('c', 6)) // 7 bytes needed, 13 bytes available at the tail 178 | 179 | // then 180 | assertEqual(t, 25, queue.Capacity()) 181 | assertEqual(t, blob('b', 6), pop(queue)) 182 | assertEqual(t, blob('c', 6), pop(queue)) 183 | } 184 | 185 | func TestUnchangedEntriesIndexesAfterAdditionalMemoryAllocationWhereHeadIsBeforeTail(t *testing.T) { 186 | t.Parallel() 187 | 188 | // given 189 | queue := NewBytesQueue(25, 0, false) 190 | 191 | // when 192 | queue.Push(blob('a', 3)) // header + entry + left margin = 5 bytes 193 | index, _ := queue.Push(blob('b', 6)) // additional 7 bytes 194 | queue.Pop() // space freed, 4 bytes available at the beginning 195 | newestIndex, _ := queue.Push(blob('c', 6)) // 7 bytes needed, 13 available at the tail 196 | 197 | // then 198 | assertEqual(t, 25, queue.Capacity()) 199 | assertEqual(t, blob('b', 6), get(queue, index)) 200 | assertEqual(t, blob('c', 6), get(queue, newestIndex)) 201 | } 202 | 203 | func TestAllocateAdditionalSpaceForInsufficientFreeFragmentedSpaceWhereTailIsBeforeHead(t *testing.T) { 204 | t.Parallel() 205 | 206 | // given 207 | queue := NewBytesQueue(100, 0, false) 208 | 209 | // when 210 | queue.Push(blob('a', 70)) // header + entry + left margin = 72 bytes 211 | queue.Push(blob('b', 10)) // 72 + 10 + 1 = 83 bytes 212 | queue.Pop() // space freed at the beginning 213 | queue.Push(blob('c', 30)) // 31 bytes used at the beginning, tail pointer is before head pointer 214 | queue.Push(blob('d', 40)) // 41 bytes needed but no available in one segment, allocate new memory 215 | 216 | // then 217 | assertEqual(t, 200, queue.Capacity()) 218 | assertEqual(t, blob('c', 30), pop(queue)) 219 | // empty blob fills space between tail and head, 220 | // created when additional memory was allocated, 221 | // it keeps current entries indexes unchanged 222 | assertEqual(t, blob(0, 39), pop(queue)) 223 | assertEqual(t, blob('b', 10), pop(queue)) 224 | assertEqual(t, blob('d', 40), pop(queue)) 225 | } 226 | 227 | func TestAllocateAdditionalSpaceForInsufficientFreeFragmentedSpaceWhereTailIsBeforeHead128(t *testing.T) { 228 | t.Parallel() 229 | 230 | // given 231 | queue := NewBytesQueue(200, 0, false) 232 | 233 | // when 234 | queue.Push(blob('a', 30)) // header + entry + left margin = 32 bytes 235 | queue.Push(blob('b', 1)) // 32 + 128 + 1 = 161 bytes 236 | queue.Push(blob('b', 125)) // 32 + 128 + 1 = 161 bytes 237 | queue.Push(blob('c', 20)) // 160 + 20 + 1 = 182 238 | queue.Pop() // space freed at the beginning 239 | queue.Pop() // free 2 bytes 240 | queue.Pop() // free 126 241 | queue.Push(blob('d', 30)) // 31 bytes used at the beginning, tail pointer is before head pointer, now free space is 128 bytes 242 | queue.Push(blob('e', 160)) // invoke allocateAdditionalMemory but fill 127 bytes free space (It should be 128 bytes, but 127 are filled, leaving one byte unfilled) 243 | 244 | // then 245 | assertEqual(t, 400, queue.Capacity()) 246 | assertEqual(t, blob('d', 30), pop(queue)) 247 | assertEqual(t, blob(0, 126), pop(queue)) //126 bytes data with 2bytes header only possible as empty entry 248 | assertEqual(t, blob('c', 20), pop(queue)) //The data is not expected 249 | assertEqual(t, blob('e', 160), pop(queue)) 250 | } 251 | 252 | func TestUnchangedEntriesIndexesAfterAdditionalMemoryAllocationWhereTailIsBeforeHead(t *testing.T) { 253 | t.Parallel() 254 | 255 | // given 256 | queue := NewBytesQueue(100, 0, false) 257 | 258 | // when 259 | queue.Push(blob('a', 70)) // header + entry + left margin = 72 bytes 260 | index, _ := queue.Push(blob('b', 10)) // 72 + 10 + 1 = 83 bytes 261 | queue.Pop() // space freed at the beginning 262 | queue.Push(blob('c', 30)) // 31 bytes used at the beginning, tail pointer is before head pointer 263 | newestIndex, _ := queue.Push(blob('d', 40)) // 41 bytes needed but no available in one segment, allocate new memory 264 | 265 | // then 266 | assertEqual(t, 200, queue.Capacity()) 267 | assertEqual(t, blob('b', 10), get(queue, index)) 268 | assertEqual(t, blob('d', 40), get(queue, newestIndex)) 269 | } 270 | 271 | func TestAllocateAdditionalSpaceForValueBiggerThanInitQueue(t *testing.T) { 272 | t.Parallel() 273 | 274 | // given 275 | queue := NewBytesQueue(11, 0, false) 276 | 277 | // when 278 | queue.Push(blob('a', 100)) 279 | // then 280 | assertEqual(t, blob('a', 100), pop(queue)) 281 | // 224 = (101 + 11) * 2 282 | assertEqual(t, 224, queue.Capacity()) 283 | } 284 | 285 | func TestAllocateAdditionalSpaceForValueBiggerThanQueue(t *testing.T) { 286 | t.Parallel() 287 | 288 | // given 289 | queue := NewBytesQueue(21, 0, false) 290 | 291 | // when 292 | queue.Push(make([]byte, 2)) 293 | queue.Push(make([]byte, 2)) 294 | queue.Push(make([]byte, 100)) 295 | 296 | // then 297 | queue.Pop() 298 | queue.Pop() 299 | assertEqual(t, make([]byte, 100), pop(queue)) 300 | // 244 = (101 + 21) * 2 301 | assertEqual(t, 244, queue.Capacity()) 302 | } 303 | 304 | func TestPopWholeQueue(t *testing.T) { 305 | t.Parallel() 306 | 307 | // given 308 | queue := NewBytesQueue(13, 0, false) 309 | 310 | // when 311 | queue.Push([]byte("a")) 312 | queue.Push([]byte("b")) 313 | queue.Pop() 314 | queue.Pop() 315 | queue.Push([]byte("c")) 316 | 317 | // then 318 | assertEqual(t, 13, queue.Capacity()) 319 | assertEqual(t, []byte("c"), pop(queue)) 320 | } 321 | 322 | func TestGetEntryFromIndex(t *testing.T) { 323 | t.Parallel() 324 | 325 | // given 326 | queue := NewBytesQueue(20, 0, false) 327 | 328 | // when 329 | queue.Push([]byte("a")) 330 | index, _ := queue.Push([]byte("b")) 331 | queue.Push([]byte("c")) 332 | result, _ := queue.Get(index) 333 | 334 | // then 335 | assertEqual(t, []byte("b"), result) 336 | } 337 | 338 | func TestGetEntryFromInvalidIndex(t *testing.T) { 339 | t.Parallel() 340 | 341 | // given 342 | queue := NewBytesQueue(1, 0, false) 343 | queue.Push([]byte("a")) 344 | 345 | // when 346 | result, err := queue.Get(0) 347 | err2 := queue.CheckGet(0) 348 | 349 | // then 350 | assertEqual(t, err, err2) 351 | assertEqual(t, []byte(nil), result) 352 | assertEqual(t, "Index must be greater than zero. Invalid index.", err.Error()) 353 | } 354 | 355 | func TestGetEntryFromIndexOutOfRange(t *testing.T) { 356 | t.Parallel() 357 | 358 | // given 359 | queue := NewBytesQueue(1, 0, false) 360 | queue.Push([]byte("a")) 361 | 362 | // when 363 | result, err := queue.Get(42) 364 | err2 := queue.CheckGet(42) 365 | 366 | // then 367 | assertEqual(t, err, err2) 368 | assertEqual(t, []byte(nil), result) 369 | assertEqual(t, "Index out of range", err.Error()) 370 | } 371 | 372 | func TestGetEntryFromEmptyQueue(t *testing.T) { 373 | t.Parallel() 374 | 375 | // given 376 | queue := NewBytesQueue(13, 0, false) 377 | 378 | // when 379 | result, err := queue.Get(1) 380 | err2 := queue.CheckGet(1) 381 | 382 | // then 383 | assertEqual(t, err, err2) 384 | assertEqual(t, []byte(nil), result) 385 | assertEqual(t, "Empty queue", err.Error()) 386 | } 387 | 388 | func TestMaxSizeLimit(t *testing.T) { 389 | t.Parallel() 390 | 391 | // given 392 | queue := NewBytesQueue(30, 50, false) 393 | 394 | // when 395 | queue.Push(blob('a', 25)) 396 | queue.Push(blob('b', 5)) 397 | capacity := queue.Capacity() 398 | _, err := queue.Push(blob('c', 20)) 399 | 400 | // then 401 | assertEqual(t, 50, capacity) 402 | assertEqual(t, "Full queue. Maximum size limit reached.", err.Error()) 403 | assertEqual(t, blob('a', 25), pop(queue)) 404 | assertEqual(t, blob('b', 5), pop(queue)) 405 | } 406 | 407 | func TestPushEntryAfterAllocateAdditionMemory(t *testing.T) { 408 | t.Parallel() 409 | 410 | // given 411 | queue := NewBytesQueue(9, 20, true) 412 | 413 | // when 414 | queue.Push([]byte("aaa")) 415 | queue.Push([]byte("bb")) 416 | queue.Pop() 417 | 418 | // allocate more memory 419 | assertEqual(t, 9, queue.Capacity()) 420 | queue.Push([]byte("c")) 421 | assertEqual(t, 18, queue.Capacity()) 422 | 423 | // push after allocate 424 | _, err := queue.Push([]byte("d")) 425 | noError(t, err) 426 | } 427 | 428 | func TestPushEntryAfterAllocateAdditionMemoryInFull(t *testing.T) { 429 | t.Parallel() 430 | 431 | // given 432 | queue := NewBytesQueue(9, 40, true) 433 | 434 | // when 435 | queue.Push([]byte("aaa")) 436 | queue.Push([]byte("bb")) 437 | _, err := queue.Pop() 438 | noError(t, err) 439 | 440 | queue.Push([]byte("c")) 441 | queue.Push([]byte("d")) 442 | queue.Push([]byte("e")) 443 | _, err = queue.Pop() 444 | noError(t, err) 445 | _, err = queue.Pop() 446 | noError(t, err) 447 | queue.Push([]byte("fff")) 448 | _, err = queue.Pop() 449 | noError(t, err) 450 | } 451 | 452 | func pop(queue *BytesQueue) []byte { 453 | entry, err := queue.Pop() 454 | if err != nil { 455 | panic(err) 456 | } 457 | return entry 458 | } 459 | 460 | func get(queue *BytesQueue, index int) []byte { 461 | entry, err := queue.Get(index) 462 | if err != nil { 463 | panic(err) 464 | } 465 | return entry 466 | } 467 | 468 | func blob(char byte, len int) []byte { 469 | return bytes.Repeat([]byte{char}, len) 470 | } 471 | 472 | func assertEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) { 473 | if !objectsAreEqual(expected, actual) { 474 | _, file, line, _ := runtime.Caller(1) 475 | file = path.Base(file) 476 | t.Errorf(fmt.Sprintf("\n%s:%d: Not equal: \n"+ 477 | "expected: %T(%#v)\n"+ 478 | "actual : %T(%#v)\n", 479 | file, line, expected, expected, actual, actual), msgAndArgs...) 480 | } 481 | } 482 | 483 | func noError(t *testing.T, e error) { 484 | if e != nil { 485 | _, file, line, _ := runtime.Caller(1) 486 | file = path.Base(file) 487 | t.Errorf(fmt.Sprintf("\n%s:%d: Error is not nil: \n"+ 488 | "actual : %T(%#v)\n", file, line, e, e)) 489 | } 490 | } 491 | 492 | func objectsAreEqual(expected, actual interface{}) bool { 493 | if expected == nil || actual == nil { 494 | return expected == actual 495 | } 496 | 497 | exp, ok := expected.([]byte) 498 | if !ok { 499 | return reflect.DeepEqual(expected, actual) 500 | } 501 | 502 | act, ok := actual.([]byte) 503 | if !ok { 504 | return false 505 | } 506 | if exp == nil || act == nil { 507 | return exp == nil && act == nil 508 | } 509 | return bytes.Equal(exp, act) 510 | } 511 | -------------------------------------------------------------------------------- /server/README.md: -------------------------------------------------------------------------------- 1 | # BigCache HTTP Server 2 | 3 | This is a basic HTTP server implementation for BigCache. It has a basic RESTful API and is designed for easy operational deployments. This server is intended to be consumed as a standalone executable, for things like Cloud Foundry, Heroku, etc. A design goal is versatility, so if you want to cache pictures, software artifacts, text, or any type of bit, the BigCache HTTP Server should fit your needs. 4 | 5 | ```bash 6 | # cache API. 7 | GET /api/v1/cache/{key} 8 | PUT /api/v1/cache/{key} 9 | DELETE /api/v1/cache/{key} 10 | 11 | # stats API. 12 | GET /api/v1/stats 13 | ``` 14 | 15 | The cache API is designed for ease-of-use caching and accepts any content type. The stats API will return hit and miss statistics about the cache since the last time the server was started - they will reset whenever the server is restarted. 16 | 17 | ### Notes for Operators 18 | 19 | 1. No SSL support, currently. 20 | 1. No authentication, currently. 21 | 1. Stats from the stats API are not persistent. 22 | 1. The easiest way to clean the cache is to restart the process; it takes less than a second to initialise. 23 | 1. There is no replication or clustering. 24 | 25 | ### Command-line Interface 26 | 27 | ```powershell 28 | PS C:\go\src\github.com\mxplusb\bigcache\server> .\server.exe -h 29 | Usage of C:\go\src\github.com\mxplusb\bigcache\server\server.exe: 30 | -lifetime duration 31 | Lifetime of each cache object. (default 10m0s) 32 | -logfile string 33 | Location of the logfile. 34 | -max int 35 | Maximum amount of data in the cache in MB. (default 8192) 36 | -maxInWindow int 37 | Used only in initial memory allocation. (default 600000) 38 | -maxShardEntrySize int 39 | The maximum size of each object stored in a shard. Used only in initial memory allocation. (default 500) 40 | -port int 41 | The port to listen on. (default 9090) 42 | -shards int 43 | Number of shards for the cache. (default 1024) 44 | -v Verbose logging. 45 | -version 46 | Print server version. 47 | ``` 48 | 49 | Example: 50 | 51 | ```bash 52 | $ curl -v -XPUT localhost:9090/api/v1/cache/example -d "yay!" 53 | * Trying 127.0.0.1... 54 | * Connected to localhost (127.0.0.1) port 9090 (#0) 55 | > PUT /api/v1/cache/example HTTP/1.1 56 | > Host: localhost:9090 57 | > User-Agent: curl/7.47.0 58 | > Accept: */* 59 | > Content-Length: 4 60 | > Content-Type: application/x-www-form-urlencoded 61 | > 62 | * upload completely sent off: 4 out of 4 bytes 63 | < HTTP/1.1 201 Created 64 | < Date: Fri, 17 Nov 2017 03:50:07 GMT 65 | < Content-Length: 0 66 | < Content-Type: text/plain; charset=utf-8 67 | < 68 | * Connection #0 to host localhost left intact 69 | $ 70 | $ curl -v -XGET localhost:9090/api/v1/cache/example 71 | Note: Unnecessary use of -X or --request, GET is already inferred. 72 | * Trying 127.0.0.1... 73 | * Connected to localhost (127.0.0.1) port 9090 (#0) 74 | > GET /api/v1/cache/example HTTP/1.1 75 | > Host: localhost:9090 76 | > User-Agent: curl/7.47.0 77 | > Accept: */* 78 | > 79 | < HTTP/1.1 200 OK 80 | < Date: Fri, 17 Nov 2017 03:50:23 GMT 81 | < Content-Length: 4 82 | < Content-Type: text/plain; charset=utf-8 83 | < 84 | * Connection #0 to host localhost left intact 85 | yay! 86 | ``` 87 | 88 | The server does log basic metrics: 89 | 90 | ```bash 91 | $ ./server 92 | 2017/11/16 22:49:22 cache initialised. 93 | 2017/11/16 22:49:22 starting server on :9090 94 | 2017/11/16 22:50:07 stored "example" in cache. 95 | 2017/11/16 22:50:07 request took 277000ns. 96 | 2017/11/16 22:50:23 request took 9000ns. 97 | ``` 98 | 99 | ### Acquiring Natively 100 | 101 | This is native Go with no external dependencies, so it will compile for all supported Golang platforms. To build: 102 | 103 | ```bash 104 | go build server.go 105 | ``` 106 | -------------------------------------------------------------------------------- /server/cache_handlers.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io" 5 | "log" 6 | "net/http" 7 | "strings" 8 | ) 9 | 10 | func cacheIndexHandler() http.Handler { 11 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 12 | switch r.Method { 13 | case http.MethodGet: 14 | getCacheHandler(w, r) 15 | case http.MethodPut: 16 | putCacheHandler(w, r) 17 | case http.MethodDelete: 18 | deleteCacheHandler(w, r) 19 | } 20 | }) 21 | } 22 | 23 | func cacheClearHandler() http.Handler { 24 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 25 | clearCache(w, r) 26 | }) 27 | } 28 | 29 | func clearCache(w http.ResponseWriter, r *http.Request) { 30 | if err := cache.Reset(); err != nil { 31 | w.WriteHeader(http.StatusInternalServerError) 32 | log.Printf("internal cache error: %s", err) 33 | } 34 | log.Println("cache is successfully cleared") 35 | w.WriteHeader(http.StatusOK) 36 | } 37 | 38 | // handles get requests. 39 | func getCacheHandler(w http.ResponseWriter, r *http.Request) { 40 | target := r.URL.Path[len(cachePath):] 41 | if target == "" { 42 | w.WriteHeader(http.StatusBadRequest) 43 | w.Write([]byte("can't get a key if there is no key.")) 44 | log.Print("empty request.") 45 | return 46 | } 47 | entry, err := cache.Get(target) 48 | if err != nil { 49 | errMsg := (err).Error() 50 | if strings.Contains(errMsg, "not found") { 51 | log.Print(err) 52 | w.WriteHeader(http.StatusNotFound) 53 | return 54 | } 55 | log.Print(err) 56 | w.WriteHeader(http.StatusInternalServerError) 57 | return 58 | } 59 | w.Write(entry) 60 | } 61 | 62 | func putCacheHandler(w http.ResponseWriter, r *http.Request) { 63 | target := r.URL.Path[len(cachePath):] 64 | if target == "" { 65 | w.WriteHeader(http.StatusBadRequest) 66 | w.Write([]byte("can't put a key if there is no key.")) 67 | log.Print("empty request.") 68 | return 69 | } 70 | 71 | entry, err := io.ReadAll(r.Body) 72 | if err != nil { 73 | log.Print(err) 74 | w.WriteHeader(http.StatusInternalServerError) 75 | return 76 | } 77 | 78 | if err := cache.Set(target, []byte(entry)); err != nil { 79 | log.Print(err) 80 | w.WriteHeader(http.StatusInternalServerError) 81 | return 82 | } 83 | log.Printf("stored \"%s\" in cache.", target) 84 | w.WriteHeader(http.StatusCreated) 85 | } 86 | 87 | // delete cache objects. 88 | func deleteCacheHandler(w http.ResponseWriter, r *http.Request) { 89 | target := r.URL.Path[len(cachePath):] 90 | if err := cache.Delete(target); err != nil { 91 | if strings.Contains((err).Error(), "not found") { 92 | w.WriteHeader(http.StatusNotFound) 93 | log.Printf("%s not found.", target) 94 | return 95 | } 96 | w.WriteHeader(http.StatusInternalServerError) 97 | log.Printf("internal cache error: %s", err) 98 | } 99 | // this is what the RFC says to use when calling DELETE. 100 | w.WriteHeader(http.StatusOK) 101 | } 102 | -------------------------------------------------------------------------------- /server/middleware.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "net/http" 6 | "time" 7 | ) 8 | 9 | // our base middleware implementation. 10 | type service func(http.Handler) http.Handler 11 | 12 | // chain load middleware services. 13 | func serviceLoader(h http.Handler, svcs ...service) http.Handler { 14 | for _, svc := range svcs { 15 | h = svc(h) 16 | } 17 | return h 18 | } 19 | 20 | // middleware for request length metrics. 21 | func requestMetrics(l *log.Logger) service { 22 | return func(h http.Handler) http.Handler { 23 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 24 | start := time.Now() 25 | h.ServeHTTP(w, r) 26 | l.Printf("%s request to %s took %vns.", r.Method, r.URL.Path, time.Since(start).Nanoseconds()) 27 | }) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /server/middleware_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "log" 6 | "net/http" 7 | "net/http/httptest" 8 | "testing" 9 | ) 10 | 11 | func emptyTestHandler() service { 12 | return func(h http.Handler) http.Handler { 13 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 14 | w.WriteHeader(http.StatusAccepted) 15 | }) 16 | } 17 | } 18 | 19 | func TestServiceLoader(t *testing.T) { 20 | req, err := http.NewRequest("GET", "/api/v1/stats", nil) 21 | if err != nil { 22 | t.Error(err) 23 | } 24 | rr := httptest.NewRecorder() 25 | testHandlers := serviceLoader(cacheIndexHandler(), emptyTestHandler()) 26 | testHandlers.ServeHTTP(rr, req) 27 | if status := rr.Code; status != http.StatusAccepted { 28 | t.Errorf("handlers not loading properly. want: 202, got: %d", rr.Code) 29 | } 30 | } 31 | 32 | func TestRequestMetrics(t *testing.T) { 33 | var b bytes.Buffer 34 | logger := log.New(&b, "", log.LstdFlags) 35 | req, err := http.NewRequest("GET", "/api/v1/cache/empty", nil) 36 | if err != nil { 37 | t.Error(err) 38 | } 39 | rr := httptest.NewRecorder() 40 | testHandlers := serviceLoader(cacheIndexHandler(), requestMetrics(logger)) 41 | testHandlers.ServeHTTP(rr, req) 42 | targetTestString := b.String() 43 | if len(targetTestString) == 0 { 44 | t.Errorf("we are not logging request length strings.") 45 | } 46 | t.Log(targetTestString) 47 | } 48 | -------------------------------------------------------------------------------- /server/server.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "net/http" 9 | "os" 10 | "strconv" 11 | 12 | "github.com/allegro/bigcache/v3" 13 | ) 14 | 15 | const ( 16 | // base HTTP paths. 17 | apiVersion = "v1" 18 | apiBasePath = "/api/" + apiVersion + "/" 19 | 20 | // path to cache. 21 | cachePath = apiBasePath + "cache/" 22 | statsPath = apiBasePath + "stats" 23 | cacheClearPath = apiBasePath + "cache/clear" 24 | // server version. 25 | version = "1.0.0" 26 | ) 27 | 28 | var ( 29 | port int 30 | logfile string 31 | ver bool 32 | 33 | // cache-specific settings. 34 | cache *bigcache.BigCache 35 | config = bigcache.Config{} 36 | ) 37 | 38 | func init() { 39 | flag.BoolVar(&config.Verbose, "v", false, "Verbose logging.") 40 | flag.IntVar(&config.Shards, "shards", 1024, "Number of shards for the cache.") 41 | flag.IntVar(&config.MaxEntriesInWindow, "maxInWindow", 1000*10*60, "Used only in initial memory allocation.") 42 | flag.DurationVar(&config.LifeWindow, "lifetime", 100000*100000*60, "Lifetime of each cache object.") 43 | flag.IntVar(&config.HardMaxCacheSize, "max", 8192, "Maximum amount of data in the cache in MB.") 44 | flag.IntVar(&config.MaxEntrySize, "maxShardEntrySize", 500, "The maximum size of each object stored in a shard. Used only in initial memory allocation.") 45 | flag.IntVar(&port, "port", 9090, "The port to listen on.") 46 | flag.StringVar(&logfile, "logfile", "", "Location of the logfile.") 47 | flag.BoolVar(&ver, "version", false, "Print server version.") 48 | } 49 | 50 | func main() { 51 | flag.Parse() 52 | 53 | if ver { 54 | fmt.Printf("BigCache HTTP Server v%s", version) 55 | os.Exit(0) 56 | } 57 | 58 | var logger *log.Logger 59 | 60 | if logfile == "" { 61 | logger = log.New(os.Stdout, "", log.LstdFlags) 62 | } else { 63 | f, err := os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, 0600) 64 | if err != nil { 65 | panic(err) 66 | } 67 | logger = log.New(f, "", log.LstdFlags) 68 | } 69 | 70 | var err error 71 | cache, err = bigcache.New(context.Background(), config) 72 | if err != nil { 73 | logger.Fatal(err) 74 | } 75 | 76 | logger.Print("cache initialised.") 77 | 78 | // let the middleware log. 79 | http.Handle(cacheClearPath, serviceLoader(cacheClearHandler(), requestMetrics(logger))) 80 | http.Handle(cachePath, serviceLoader(cacheIndexHandler(), requestMetrics(logger))) 81 | http.Handle(statsPath, serviceLoader(statsIndexHandler(), requestMetrics(logger))) 82 | 83 | logger.Printf("starting server on :%d", port) 84 | 85 | strPort := ":" + strconv.Itoa(port) 86 | log.Fatal("ListenAndServe: ", http.ListenAndServe(strPort, nil)) 87 | } 88 | -------------------------------------------------------------------------------- /server/server_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "errors" 8 | "io" 9 | "net/http/httptest" 10 | "testing" 11 | "time" 12 | 13 | "github.com/allegro/bigcache/v3" 14 | ) 15 | 16 | const ( 17 | testBaseString = "http://bigcache.org" 18 | ) 19 | 20 | func testCacheSetup() { 21 | cache, _ = bigcache.New(context.Background(), bigcache.Config{ 22 | Shards: 1024, 23 | LifeWindow: 10 * time.Minute, 24 | MaxEntriesInWindow: 1000 * 10 * 60, 25 | MaxEntrySize: 500, 26 | Verbose: true, 27 | HardMaxCacheSize: 8192, 28 | OnRemove: nil, 29 | }) 30 | } 31 | 32 | func TestMain(m *testing.M) { 33 | testCacheSetup() 34 | m.Run() 35 | } 36 | 37 | func TestGetWithNoKey(t *testing.T) { 38 | t.Parallel() 39 | req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/", nil) 40 | rr := httptest.NewRecorder() 41 | 42 | getCacheHandler(rr, req) 43 | resp := rr.Result() 44 | 45 | if resp.StatusCode != 400 { 46 | t.Errorf("want: 400; got: %d", resp.StatusCode) 47 | } 48 | } 49 | 50 | func TestGetWithMissingKey(t *testing.T) { 51 | t.Parallel() 52 | req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/doesNotExist", nil) 53 | rr := httptest.NewRecorder() 54 | 55 | getCacheHandler(rr, req) 56 | resp := rr.Result() 57 | 58 | if resp.StatusCode != 404 { 59 | t.Errorf("want: 404; got: %d", resp.StatusCode) 60 | } 61 | } 62 | 63 | func TestGetKey(t *testing.T) { 64 | t.Parallel() 65 | req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/getKey", nil) 66 | rr := httptest.NewRecorder() 67 | 68 | // set something. 69 | cache.Set("getKey", []byte("123")) 70 | 71 | getCacheHandler(rr, req) 72 | resp := rr.Result() 73 | 74 | body, err := io.ReadAll(resp.Body) 75 | if err != nil { 76 | t.Errorf("cannot deserialise test response: %s", err) 77 | } 78 | 79 | if string(body) != "123" { 80 | t.Errorf("want: 123; got: %s.\n\tcan't get existing key getKey.", string(body)) 81 | } 82 | } 83 | 84 | func TestPutKey(t *testing.T) { 85 | t.Parallel() 86 | req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/putKey", bytes.NewBuffer([]byte("123"))) 87 | rr := httptest.NewRecorder() 88 | 89 | putCacheHandler(rr, req) 90 | 91 | testPutKeyResult, err := cache.Get("putKey") 92 | if err != nil { 93 | t.Errorf("error returning cache entry: %s", err) 94 | } 95 | 96 | if string(testPutKeyResult) != "123" { 97 | t.Errorf("want: 123; got: %s.\n\tcan't get PUT key putKey.", string(testPutKeyResult)) 98 | } 99 | } 100 | 101 | func TestPutEmptyKey(t *testing.T) { 102 | t.Parallel() 103 | 104 | req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/", bytes.NewBuffer([]byte("123"))) 105 | rr := httptest.NewRecorder() 106 | 107 | putCacheHandler(rr, req) 108 | resp := rr.Result() 109 | 110 | if resp.StatusCode != 400 { 111 | t.Errorf("want: 400; got: %d.\n\tempty key insertion should return with 400", resp.StatusCode) 112 | } 113 | } 114 | 115 | func TestDeleteEmptyKey(t *testing.T) { 116 | t.Parallel() 117 | 118 | req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/", bytes.NewBuffer([]byte("123"))) 119 | rr := httptest.NewRecorder() 120 | 121 | deleteCacheHandler(rr, req) 122 | resp := rr.Result() 123 | 124 | if resp.StatusCode != 404 { 125 | t.Errorf("want: 404; got: %d.\n\tapparently we're trying to delete empty keys.", resp.StatusCode) 126 | } 127 | } 128 | 129 | func TestDeleteInvalidKey(t *testing.T) { 130 | t.Parallel() 131 | 132 | req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/invalidDeleteKey", bytes.NewBuffer([]byte("123"))) 133 | rr := httptest.NewRecorder() 134 | 135 | deleteCacheHandler(rr, req) 136 | resp := rr.Result() 137 | 138 | if resp.StatusCode != 404 { 139 | t.Errorf("want: 404; got: %d.\n\tapparently we're trying to delete invalid keys.", resp.StatusCode) 140 | } 141 | } 142 | 143 | func TestDeleteKey(t *testing.T) { 144 | t.Parallel() 145 | 146 | req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/testDeleteKey", bytes.NewBuffer([]byte("123"))) 147 | rr := httptest.NewRecorder() 148 | 149 | if err := cache.Set("testDeleteKey", []byte("123")); err != nil { 150 | t.Errorf("can't set key for testing. %s", err) 151 | } 152 | 153 | deleteCacheHandler(rr, req) 154 | resp := rr.Result() 155 | 156 | if resp.StatusCode != 200 { 157 | t.Errorf("want: 200; got: %d.\n\tcan't delete keys.", resp.StatusCode) 158 | } 159 | } 160 | 161 | func TestClearCache(t *testing.T) { 162 | t.Parallel() 163 | 164 | putRequest := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/putKey", bytes.NewBuffer([]byte("123"))) 165 | putResponseRecorder := httptest.NewRecorder() 166 | 167 | putCacheHandler(putResponseRecorder, putRequest) 168 | 169 | requestClear := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/clear", nil) 170 | rr := httptest.NewRecorder() 171 | 172 | if err := cache.Set("testDeleteKey", []byte("123")); err != nil { 173 | t.Errorf("can't set key for testing. %s", err) 174 | } 175 | 176 | clearCache(rr, requestClear) 177 | resp := rr.Result() 178 | 179 | if resp.StatusCode != 200 { 180 | t.Errorf("want: 200; got: %d.\n\tcan't delete keys.", resp.StatusCode) 181 | } 182 | } 183 | func TestGetStats(t *testing.T) { 184 | t.Parallel() 185 | var testStats bigcache.Stats 186 | 187 | req := httptest.NewRequest("GET", testBaseString+"/api/v1/stats", nil) 188 | rr := httptest.NewRecorder() 189 | 190 | // manually enter a key so there are some stats. get it so there's at least 1 hit. 191 | if err := cache.Set("incrementStats", []byte("123")); err != nil { 192 | t.Errorf("error setting cache value. error %s", err) 193 | } 194 | // it's okay if this fails, since we'll catch it downstream. 195 | if _, err := cache.Get("incrementStats"); err != nil { 196 | t.Errorf("can't find incrementStats. error: %s", err) 197 | } 198 | 199 | getCacheStatsHandler(rr, req) 200 | resp := rr.Result() 201 | 202 | if err := json.NewDecoder(resp.Body).Decode(&testStats); err != nil { 203 | t.Errorf("error decoding cache stats. error: %s", err) 204 | } 205 | 206 | if testStats.Hits == 0 { 207 | t.Errorf("want: > 0; got: 0.\n\thandler not properly returning stats info.") 208 | } 209 | } 210 | 211 | func TestGetStatsIndex(t *testing.T) { 212 | t.Parallel() 213 | var testStats bigcache.Stats 214 | 215 | getreq := httptest.NewRequest("GET", testBaseString+"/api/v1/stats", nil) 216 | putreq := httptest.NewRequest("PUT", testBaseString+"/api/v1/stats", nil) 217 | rr := httptest.NewRecorder() 218 | 219 | // manually enter a key so there are some stats. get it so there's at least 1 hit. 220 | if err := cache.Set("incrementStats", []byte("123")); err != nil { 221 | t.Errorf("error setting cache value. error %s", err) 222 | } 223 | // it's okay if this fails, since we'll catch it downstream. 224 | if _, err := cache.Get("incrementStats"); err != nil { 225 | t.Errorf("can't find incrementStats. error: %s", err) 226 | } 227 | 228 | testHandlers := statsIndexHandler() 229 | testHandlers.ServeHTTP(rr, getreq) 230 | resp := rr.Result() 231 | 232 | if err := json.NewDecoder(resp.Body).Decode(&testStats); err != nil { 233 | t.Errorf("error decoding cache stats. error: %s", err) 234 | } 235 | 236 | if testStats.Hits == 0 { 237 | t.Errorf("want: > 0; got: 0.\n\thandler not properly returning stats info.") 238 | } 239 | 240 | testHandlers = statsIndexHandler() 241 | testHandlers.ServeHTTP(rr, putreq) 242 | resp = rr.Result() 243 | _, err := io.ReadAll(resp.Body) 244 | if err != nil { 245 | t.Errorf("cannot deserialise test response: %s", err) 246 | } 247 | } 248 | 249 | func TestCacheIndexHandler(t *testing.T) { 250 | getreq := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/testkey", nil) 251 | putreq := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/testkey", bytes.NewBuffer([]byte("123"))) 252 | delreq := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/testkey", bytes.NewBuffer([]byte("123"))) 253 | 254 | getrr := httptest.NewRecorder() 255 | putrr := httptest.NewRecorder() 256 | delrr := httptest.NewRecorder() 257 | testHandlers := cacheIndexHandler() 258 | 259 | testHandlers.ServeHTTP(putrr, putreq) 260 | resp := putrr.Result() 261 | if resp.StatusCode != 201 { 262 | t.Errorf("want: 201; got: %d.\n\tcan't put keys.", resp.StatusCode) 263 | } 264 | testHandlers.ServeHTTP(getrr, getreq) 265 | resp = getrr.Result() 266 | if resp.StatusCode != 200 { 267 | t.Errorf("want: 200; got: %d.\n\tcan't get keys.", resp.StatusCode) 268 | } 269 | testHandlers.ServeHTTP(delrr, delreq) 270 | resp = delrr.Result() 271 | if resp.StatusCode != 200 { 272 | t.Errorf("want: 200; got: %d.\n\tcan't delete keys.", resp.StatusCode) 273 | } 274 | } 275 | 276 | func TestInvalidPutWhenExceedShardCap(t *testing.T) { 277 | t.Parallel() 278 | req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/putKey", bytes.NewBuffer(bytes.Repeat([]byte("a"), 8*1024*1024))) 279 | rr := httptest.NewRecorder() 280 | 281 | putCacheHandler(rr, req) 282 | resp := rr.Result() 283 | 284 | if resp.StatusCode != 500 { 285 | t.Errorf("want: 500; got: %d", resp.StatusCode) 286 | } 287 | } 288 | 289 | func TestInvalidPutWhenReading(t *testing.T) { 290 | t.Parallel() 291 | req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/putKey", errReader(0)) 292 | rr := httptest.NewRecorder() 293 | 294 | putCacheHandler(rr, req) 295 | resp := rr.Result() 296 | 297 | if resp.StatusCode != 500 { 298 | t.Errorf("want: 500; got: %d", resp.StatusCode) 299 | } 300 | } 301 | 302 | type errReader int 303 | 304 | func (errReader) Read([]byte) (int, error) { 305 | return 0, errors.New("test read error") 306 | } 307 | -------------------------------------------------------------------------------- /server/stats_handler.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "net/http" 7 | ) 8 | 9 | // index for stats handle 10 | func statsIndexHandler() http.Handler { 11 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 12 | switch r.Method { 13 | case http.MethodGet: 14 | getCacheStatsHandler(w, r) 15 | default: 16 | w.WriteHeader(http.StatusMethodNotAllowed) 17 | } 18 | }) 19 | } 20 | 21 | // returns the cache's statistics. 22 | func getCacheStatsHandler(w http.ResponseWriter, r *http.Request) { 23 | target, err := json.Marshal(cache.Stats()) 24 | if err != nil { 25 | w.WriteHeader(http.StatusInternalServerError) 26 | log.Printf("cannot marshal cache stats. error: %s", err) 27 | return 28 | } 29 | // since we're sending a struct, make it easy for consumers to interface. 30 | w.Header().Set("Content-Type", "application/json; charset=utf-8") 31 | w.Write(target) 32 | } 33 | -------------------------------------------------------------------------------- /shard.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "errors" 5 | "sync" 6 | "sync/atomic" 7 | 8 | "github.com/allegro/bigcache/v3/queue" 9 | ) 10 | 11 | type onRemoveCallback func(wrappedEntry []byte, reason RemoveReason) 12 | 13 | // Metadata contains information of a specific entry 14 | type Metadata struct { 15 | RequestCount uint32 16 | } 17 | 18 | type cacheShard struct { 19 | hashmap map[uint64]uint64 20 | entries queue.BytesQueue 21 | lock sync.RWMutex 22 | entryBuffer []byte 23 | onRemove onRemoveCallback 24 | 25 | isVerbose bool 26 | statsEnabled bool 27 | logger Logger 28 | clock clock 29 | lifeWindow uint64 30 | 31 | hashmapStats map[uint64]uint32 32 | stats Stats 33 | cleanEnabled bool 34 | } 35 | 36 | func (s *cacheShard) getWithInfo(key string, hashedKey uint64) (entry []byte, resp Response, err error) { 37 | currentTime := uint64(s.clock.Epoch()) 38 | s.lock.RLock() 39 | wrappedEntry, err := s.getWrappedEntry(hashedKey) 40 | if err != nil { 41 | s.lock.RUnlock() 42 | return nil, resp, err 43 | } 44 | if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey { 45 | s.lock.RUnlock() 46 | s.collision() 47 | if s.isVerbose { 48 | s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey) 49 | } 50 | return nil, resp, ErrEntryNotFound 51 | } 52 | 53 | entry = readEntry(wrappedEntry) 54 | if s.isExpired(wrappedEntry, currentTime) { 55 | resp.EntryStatus = Expired 56 | } 57 | s.lock.RUnlock() 58 | s.hit(hashedKey) 59 | return entry, resp, nil 60 | } 61 | 62 | func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) { 63 | s.lock.RLock() 64 | wrappedEntry, err := s.getWrappedEntry(hashedKey) 65 | if err != nil { 66 | s.lock.RUnlock() 67 | return nil, err 68 | } 69 | if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey { 70 | s.lock.RUnlock() 71 | s.collision() 72 | if s.isVerbose { 73 | s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey) 74 | } 75 | return nil, ErrEntryNotFound 76 | } 77 | entry := readEntry(wrappedEntry) 78 | s.lock.RUnlock() 79 | s.hit(hashedKey) 80 | 81 | return entry, nil 82 | } 83 | 84 | func (s *cacheShard) getWrappedEntry(hashedKey uint64) ([]byte, error) { 85 | itemIndex := s.hashmap[hashedKey] 86 | 87 | if itemIndex == 0 { 88 | s.miss() 89 | return nil, ErrEntryNotFound 90 | } 91 | 92 | wrappedEntry, err := s.entries.Get(int(itemIndex)) 93 | if err != nil { 94 | s.miss() 95 | return nil, err 96 | } 97 | 98 | return wrappedEntry, err 99 | } 100 | 101 | func (s *cacheShard) getValidWrapEntry(key string, hashedKey uint64) ([]byte, error) { 102 | wrappedEntry, err := s.getWrappedEntry(hashedKey) 103 | if err != nil { 104 | return nil, err 105 | } 106 | 107 | if !compareKeyFromEntry(wrappedEntry, key) { 108 | s.collision() 109 | if s.isVerbose { 110 | s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, readKeyFromEntry(wrappedEntry), hashedKey) 111 | } 112 | 113 | return nil, ErrEntryNotFound 114 | } 115 | s.hitWithoutLock(hashedKey) 116 | 117 | return wrappedEntry, nil 118 | } 119 | 120 | func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error { 121 | currentTimestamp := uint64(s.clock.Epoch()) 122 | 123 | s.lock.Lock() 124 | 125 | if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 { 126 | if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil { 127 | resetHashFromEntry(previousEntry) 128 | //remove hashkey 129 | delete(s.hashmap, hashedKey) 130 | } 131 | } 132 | 133 | if !s.cleanEnabled { 134 | if oldestEntry, err := s.entries.Peek(); err == nil { 135 | s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry) 136 | } 137 | } 138 | 139 | w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer) 140 | 141 | for { 142 | if index, err := s.entries.Push(w); err == nil { 143 | s.hashmap[hashedKey] = uint64(index) 144 | s.lock.Unlock() 145 | return nil 146 | } 147 | if s.removeOldestEntry(NoSpace) != nil { 148 | s.lock.Unlock() 149 | return errors.New("entry is bigger than max shard size") 150 | } 151 | } 152 | } 153 | 154 | func (s *cacheShard) addNewWithoutLock(key string, hashedKey uint64, entry []byte) error { 155 | currentTimestamp := uint64(s.clock.Epoch()) 156 | 157 | if !s.cleanEnabled { 158 | if oldestEntry, err := s.entries.Peek(); err == nil { 159 | s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry) 160 | } 161 | } 162 | 163 | w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer) 164 | 165 | for { 166 | if index, err := s.entries.Push(w); err == nil { 167 | s.hashmap[hashedKey] = uint64(index) 168 | return nil 169 | } 170 | if s.removeOldestEntry(NoSpace) != nil { 171 | return errors.New("entry is bigger than max shard size") 172 | } 173 | } 174 | } 175 | 176 | func (s *cacheShard) setWrappedEntryWithoutLock(currentTimestamp uint64, w []byte, hashedKey uint64) error { 177 | if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 { 178 | if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil { 179 | resetHashFromEntry(previousEntry) 180 | } 181 | } 182 | 183 | if !s.cleanEnabled { 184 | if oldestEntry, err := s.entries.Peek(); err == nil { 185 | s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry) 186 | } 187 | } 188 | 189 | for { 190 | if index, err := s.entries.Push(w); err == nil { 191 | s.hashmap[hashedKey] = uint64(index) 192 | return nil 193 | } 194 | if s.removeOldestEntry(NoSpace) != nil { 195 | return errors.New("entry is bigger than max shard size") 196 | } 197 | } 198 | } 199 | 200 | func (s *cacheShard) append(key string, hashedKey uint64, entry []byte) error { 201 | s.lock.Lock() 202 | wrappedEntry, err := s.getValidWrapEntry(key, hashedKey) 203 | 204 | if err == ErrEntryNotFound { 205 | err = s.addNewWithoutLock(key, hashedKey, entry) 206 | s.lock.Unlock() 207 | return err 208 | } 209 | if err != nil { 210 | s.lock.Unlock() 211 | return err 212 | } 213 | 214 | currentTimestamp := uint64(s.clock.Epoch()) 215 | 216 | w := appendToWrappedEntry(currentTimestamp, wrappedEntry, entry, &s.entryBuffer) 217 | 218 | err = s.setWrappedEntryWithoutLock(currentTimestamp, w, hashedKey) 219 | s.lock.Unlock() 220 | 221 | return err 222 | } 223 | 224 | func (s *cacheShard) del(hashedKey uint64) error { 225 | // Optimistic pre-check using only readlock 226 | s.lock.RLock() 227 | { 228 | itemIndex := s.hashmap[hashedKey] 229 | 230 | if itemIndex == 0 { 231 | s.lock.RUnlock() 232 | s.delmiss() 233 | return ErrEntryNotFound 234 | } 235 | 236 | if err := s.entries.CheckGet(int(itemIndex)); err != nil { 237 | s.lock.RUnlock() 238 | s.delmiss() 239 | return err 240 | } 241 | } 242 | s.lock.RUnlock() 243 | 244 | s.lock.Lock() 245 | { 246 | // After obtaining the writelock, we need to read the same again, 247 | // since the data delivered earlier may be stale now 248 | itemIndex := s.hashmap[hashedKey] 249 | 250 | if itemIndex == 0 { 251 | s.lock.Unlock() 252 | s.delmiss() 253 | return ErrEntryNotFound 254 | } 255 | 256 | wrappedEntry, err := s.entries.Get(int(itemIndex)) 257 | if err != nil { 258 | s.lock.Unlock() 259 | s.delmiss() 260 | return err 261 | } 262 | 263 | delete(s.hashmap, hashedKey) 264 | s.onRemove(wrappedEntry, Deleted) 265 | if s.statsEnabled { 266 | delete(s.hashmapStats, hashedKey) 267 | } 268 | resetHashFromEntry(wrappedEntry) 269 | } 270 | s.lock.Unlock() 271 | 272 | s.delhit() 273 | return nil 274 | } 275 | 276 | func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool { 277 | if s.isExpired(oldestEntry, currentTimestamp) { 278 | evict(Expired) 279 | return true 280 | } 281 | return false 282 | } 283 | 284 | func (s *cacheShard) isExpired(oldestEntry []byte, currentTimestamp uint64) bool { 285 | oldestTimestamp := readTimestampFromEntry(oldestEntry) 286 | if currentTimestamp <= oldestTimestamp { // if currentTimestamp < oldestTimestamp, the result will out of uint64 limits; 287 | return false 288 | } 289 | return currentTimestamp-oldestTimestamp > s.lifeWindow 290 | } 291 | 292 | func (s *cacheShard) cleanUp(currentTimestamp uint64) { 293 | s.lock.Lock() 294 | for { 295 | if oldestEntry, err := s.entries.Peek(); err != nil { 296 | break 297 | } else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted { 298 | break 299 | } 300 | } 301 | s.lock.Unlock() 302 | } 303 | 304 | func (s *cacheShard) getEntry(hashedKey uint64) ([]byte, error) { 305 | s.lock.RLock() 306 | 307 | entry, err := s.getWrappedEntry(hashedKey) 308 | // copy entry 309 | newEntry := make([]byte, len(entry)) 310 | copy(newEntry, entry) 311 | 312 | s.lock.RUnlock() 313 | 314 | return newEntry, err 315 | } 316 | 317 | func (s *cacheShard) copyHashedKeys() (keys []uint64, next int) { 318 | s.lock.RLock() 319 | keys = make([]uint64, len(s.hashmap)) 320 | 321 | for key := range s.hashmap { 322 | keys[next] = key 323 | next++ 324 | } 325 | 326 | s.lock.RUnlock() 327 | return keys, next 328 | } 329 | 330 | func (s *cacheShard) removeOldestEntry(reason RemoveReason) error { 331 | oldest, err := s.entries.Pop() 332 | if err == nil { 333 | hash := readHashFromEntry(oldest) 334 | if hash == 0 { 335 | // entry has been explicitly deleted with resetHashFromEntry, ignore 336 | return nil 337 | } 338 | delete(s.hashmap, hash) 339 | s.onRemove(oldest, reason) 340 | if s.statsEnabled { 341 | delete(s.hashmapStats, hash) 342 | } 343 | return nil 344 | } 345 | return err 346 | } 347 | 348 | func (s *cacheShard) reset(config Config) { 349 | s.lock.Lock() 350 | s.hashmap = make(map[uint64]uint64, config.initialShardSize()) 351 | s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes) 352 | s.entries.Reset() 353 | s.lock.Unlock() 354 | } 355 | 356 | func (s *cacheShard) resetStats() { 357 | s.lock.Lock() 358 | s.stats = Stats{} 359 | s.lock.Unlock() 360 | } 361 | 362 | func (s *cacheShard) len() int { 363 | s.lock.RLock() 364 | res := len(s.hashmap) 365 | s.lock.RUnlock() 366 | return res 367 | } 368 | 369 | func (s *cacheShard) capacity() int { 370 | s.lock.RLock() 371 | res := s.entries.Capacity() 372 | s.lock.RUnlock() 373 | return res 374 | } 375 | 376 | func (s *cacheShard) getStats() Stats { 377 | var stats = Stats{ 378 | Hits: atomic.LoadInt64(&s.stats.Hits), 379 | Misses: atomic.LoadInt64(&s.stats.Misses), 380 | DelHits: atomic.LoadInt64(&s.stats.DelHits), 381 | DelMisses: atomic.LoadInt64(&s.stats.DelMisses), 382 | Collisions: atomic.LoadInt64(&s.stats.Collisions), 383 | } 384 | return stats 385 | } 386 | 387 | func (s *cacheShard) getKeyMetadataWithLock(key uint64) Metadata { 388 | s.lock.RLock() 389 | c := s.hashmapStats[key] 390 | s.lock.RUnlock() 391 | return Metadata{ 392 | RequestCount: c, 393 | } 394 | } 395 | 396 | func (s *cacheShard) getKeyMetadata(key uint64) Metadata { 397 | return Metadata{ 398 | RequestCount: s.hashmapStats[key], 399 | } 400 | } 401 | 402 | func (s *cacheShard) hit(key uint64) { 403 | atomic.AddInt64(&s.stats.Hits, 1) 404 | if s.statsEnabled { 405 | s.lock.Lock() 406 | s.hashmapStats[key]++ 407 | s.lock.Unlock() 408 | } 409 | } 410 | 411 | func (s *cacheShard) hitWithoutLock(key uint64) { 412 | atomic.AddInt64(&s.stats.Hits, 1) 413 | if s.statsEnabled { 414 | s.hashmapStats[key]++ 415 | } 416 | } 417 | 418 | func (s *cacheShard) miss() { 419 | atomic.AddInt64(&s.stats.Misses, 1) 420 | } 421 | 422 | func (s *cacheShard) delhit() { 423 | atomic.AddInt64(&s.stats.DelHits, 1) 424 | } 425 | 426 | func (s *cacheShard) delmiss() { 427 | atomic.AddInt64(&s.stats.DelMisses, 1) 428 | } 429 | 430 | func (s *cacheShard) collision() { 431 | atomic.AddInt64(&s.stats.Collisions, 1) 432 | } 433 | 434 | func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard { 435 | bytesQueueInitialCapacity := config.initialShardSize() * config.MaxEntrySize 436 | maximumShardSizeInBytes := config.maximumShardSizeInBytes() 437 | if maximumShardSizeInBytes > 0 && bytesQueueInitialCapacity > maximumShardSizeInBytes { 438 | bytesQueueInitialCapacity = maximumShardSizeInBytes 439 | } 440 | return &cacheShard{ 441 | hashmap: make(map[uint64]uint64, config.initialShardSize()), 442 | hashmapStats: make(map[uint64]uint32, config.initialShardSize()), 443 | entries: *queue.NewBytesQueue(bytesQueueInitialCapacity, maximumShardSizeInBytes, config.Verbose), 444 | entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes), 445 | onRemove: callback, 446 | 447 | isVerbose: config.Verbose, 448 | logger: newLogger(config.Logger), 449 | clock: clock, 450 | lifeWindow: uint64(config.LifeWindow.Seconds()), 451 | statsEnabled: config.StatsEnabled, 452 | cleanEnabled: config.CleanWindow > 0, 453 | } 454 | } 455 | -------------------------------------------------------------------------------- /stats.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | // Stats stores cache statistics 4 | type Stats struct { 5 | // Hits is a number of successfully found keys 6 | Hits int64 `json:"hits"` 7 | // Misses is a number of not found keys 8 | Misses int64 `json:"misses"` 9 | // DelHits is a number of successfully deleted keys 10 | DelHits int64 `json:"delete_hits"` 11 | // DelMisses is a number of not deleted keys 12 | DelMisses int64 `json:"delete_misses"` 13 | // Collisions is a number of happened key-collisions 14 | Collisions int64 `json:"collisions"` 15 | } 16 | -------------------------------------------------------------------------------- /utils.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | func max(a, b int) int { 4 | if a > b { 5 | return a 6 | } 7 | return b 8 | } 9 | 10 | func convertMBToBytes(value int) int { 11 | return value * 1024 * 1024 12 | } 13 | 14 | func isPowerOfTwo(number int) bool { 15 | return (number != 0) && (number&(number-1)) == 0 16 | } 17 | --------------------------------------------------------------------------------