├── .github └── workflows │ └── go.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── benchmark_old.txt ├── docs ├── consistency.md ├── efficient-batching.md ├── images │ ├── cache-aside.png │ ├── invalidate-flow.png │ ├── race-condition.png │ ├── replication.png │ └── thundering-herd.png ├── replication.md └── thundering-herd.md ├── error.go ├── examples ├── failover │ └── main.go └── simple │ ├── go.mod │ ├── go.sum │ └── main.go ├── fake ├── fake.go └── fake_test.go ├── go.mod ├── go.sum ├── go.work ├── go.work.sum ├── heap.go ├── heap_test.go ├── item ├── item.go ├── item_bench_test.go ├── item_property_test.go ├── item_test.go ├── pool.go └── pool_test.go ├── memproxy.go ├── mmap ├── README.md ├── bucket.go ├── bucket_test.go ├── filler.go ├── filler_test.go ├── mmap.go ├── mmap_bench_test.go ├── mmap_property_test.go ├── mmap_test.go └── option.go ├── mocks ├── generate.go └── memproxy_mocks.go ├── plain_memcache.go ├── plain_memcache_test.go ├── proxy ├── config.go ├── pool.go ├── pool_test.go ├── proxy.go ├── proxy_integration_test.go ├── proxy_mocks_test.go ├── proxy_test.go ├── replicated.go ├── replicated_test.go ├── stats.go ├── stats_mocks_test.go ├── stats_test.go └── tests │ ├── generate.go │ ├── item_test.go │ └── proxy_mocks_test.go ├── revive.toml ├── session.go ├── session_test.go └── tools.go /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: memproxy 2 | on: 3 | push: 4 | branches: [ master ] 5 | pull_request: 6 | branches: [ master ] 7 | jobs: 8 | build: 9 | runs-on: ubuntu-20.04 10 | services: 11 | memcached: 12 | image: memcached:1.6.37 13 | ports: 14 | - 11211:11211 15 | memcached2: 16 | image: memcached:1.6.37 17 | ports: 18 | - 11212:11211 19 | steps: 20 | - uses: actions/checkout@v2 21 | - uses: actions/setup-go@v2 22 | with: 23 | go-version: 1.19 24 | - name: Install Tools 25 | run: make install-tools 26 | - name: Lint 27 | run: make lint 28 | - name: Test 29 | run: make test 30 | - name: Test Race 31 | run: make test-race 32 | - name: Benchmark 33 | run: make benchmark 34 | - name: Benchmark Stat 35 | run: benchstat benchmark_new.txt benchmark_new.txt 36 | - name: Benchmark Memory Allocation 37 | run: make membench 38 | - name: Convert coverage.out to coverage.lcov 39 | uses: jandelgado/gcov2lcov-action@v1.0.6 40 | - name: Coveralls 41 | uses: coverallsapp/github-action@v1.1.2 42 | with: 43 | github-token: ${{ secrets.github_token }} 44 | path-to-lcov: coverage.lcov 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | /coverage.out 3 | /benchmark_new.txt 4 | /**/*profile.out -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023 Ta Quang Tung 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: lint install-tools test test-race coverage benchmark compare new_to_old membench profile 2 | 3 | lint: 4 | go fmt ./... 5 | go vet ./... 6 | revive -config revive.toml -formatter friendly ./... 7 | 8 | test: 9 | go test -v -p 1 -count=1 -covermode=count -coverprofile=coverage.out ./... 10 | 11 | test-race: 12 | go test -v -p 1 -race -count=1 ./... 13 | 14 | install-tools: 15 | go install github.com/matryer/moq 16 | go install github.com/mgechev/revive 17 | go install golang.org/x/perf/cmd/benchstat 18 | 19 | coverage: 20 | go tool cover -func coverage.out | grep ^total 21 | 22 | benchmark: 23 | go test -run="^Benchmark" -bench=. -count=10 ./... > benchmark_new.txt 24 | 25 | compare: 26 | benchstat benchmark_old.txt benchmark_new.txt 27 | 28 | new_to_old: 29 | mv benchmark_new.txt benchmark_old.txt 30 | 31 | membench: 32 | go test -run="^Benchmark" -bench=. -benchmem ./... 33 | 34 | profile: 35 | go tool pprof -http=:8080 ./item/bench_profile.out 36 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![memproxy](https://github.com/QuangTung97/memproxy/actions/workflows/go.yml/badge.svg)](https://github.com/QuangTung97/memproxy/actions/workflows/go.yml) 2 | [![Coverage Status](https://coveralls.io/repos/github/QuangTung97/memproxy/badge.svg?branch=master)](https://coveralls.io/github/QuangTung97/memproxy?branch=master) 3 | 4 | # Golang Memcache Proxy Library 5 | 6 | ## Why this library? 7 | 8 | This library helps to utilize memcached in a consistent and efficient way. 9 | 10 | **Supporting features**: 11 | 12 | * Deal with Consistency between Memcached and Database using the Lease Mechanism. 13 | * Prevent thundering herd (a.k.a Cache Stampede). 14 | * Efficient batching get to the underlining database, batching between lease gets 15 | and between retries for preventing thundering-herd. 16 | * Memcached replication similar to MCRouter, without the need for an external proxy. 17 | * Memory-weighted load-balancing for replication. 18 | 19 | ## Table of Contents 20 | 21 | 1. [Usage](#usage) 22 | 2. [Consistency between Memcached and Database](docs/consistency.md) 23 | 3. [Preventing Thundering Herd](docs/thundering-herd.md) 24 | 4. [Efficient Batching](docs/efficient-batching.md) 25 | 5. [Memcache Replication & Memory-Weighted Load Balancing](docs/replication.md) 26 | 27 | ## Usage 28 | 29 | * [Using a single memcached server and source from the database](examples/simple/main.go) 30 | * [Using multiple memcached servers](examples/failover/main.go) 31 | -------------------------------------------------------------------------------- /benchmark_old.txt: -------------------------------------------------------------------------------- 1 | PASS 2 | ok github.com/QuangTung97/memproxy 0.003s 3 | ? github.com/QuangTung97/memproxy/examples/failover [no test files] 4 | goos: linux 5 | goarch: amd64 6 | pkg: github.com/QuangTung97/memproxy/item 7 | cpu: Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz 8 | BenchmarkItemGetSingle-8 35326 31967 ns/op 9 | BenchmarkItemGetSingle-8 36493 31969 ns/op 10 | BenchmarkItemGetSingle-8 37573 31455 ns/op 11 | BenchmarkItemGetSingle-8 38120 32097 ns/op 12 | BenchmarkItemGetSingle-8 37936 31581 ns/op 13 | BenchmarkItemGetSingle-8 36225 31765 ns/op 14 | BenchmarkItemGetSingle-8 37867 31955 ns/op 15 | BenchmarkItemGetSingle-8 37804 31910 ns/op 16 | BenchmarkItemGetSingle-8 37418 32489 ns/op 17 | BenchmarkItemGetSingle-8 36849 32434 ns/op 18 | BenchmarkItemGetByBatch1000-8 627 2025879 ns/op 19 | BenchmarkItemGetByBatch1000-8 646 1883982 ns/op 20 | BenchmarkItemGetByBatch1000-8 553 1907529 ns/op 21 | BenchmarkItemGetByBatch1000-8 580 1874875 ns/op 22 | BenchmarkItemGetByBatch1000-8 643 1884685 ns/op 23 | BenchmarkItemGetByBatch1000-8 580 1971675 ns/op 24 | BenchmarkItemGetByBatch1000-8 595 1923921 ns/op 25 | BenchmarkItemGetByBatch1000-8 600 1971834 ns/op 26 | BenchmarkItemGetByBatch1000-8 598 1981879 ns/op 27 | BenchmarkItemGetByBatch1000-8 594 1919507 ns/op 28 | BenchmarkItemGetByBatch100-8 4671 236381 ns/op 29 | BenchmarkItemGetByBatch100-8 5317 223988 ns/op 30 | BenchmarkItemGetByBatch100-8 5156 218834 ns/op 31 | BenchmarkItemGetByBatch100-8 5510 227907 ns/op 32 | BenchmarkItemGetByBatch100-8 5040 233091 ns/op 33 | BenchmarkItemGetByBatch100-8 5270 228814 ns/op 34 | BenchmarkItemGetByBatch100-8 4850 229665 ns/op 35 | BenchmarkItemGetByBatch100-8 5224 228918 ns/op 36 | BenchmarkItemGetByBatch100-8 5043 220884 ns/op 37 | BenchmarkItemGetByBatch100-8 5169 227841 ns/op 38 | BenchmarkItemWithProxyGetByBatch1000-8 520 2402913 ns/op 39 | BenchmarkItemWithProxyGetByBatch1000-8 522 2284023 ns/op 40 | BenchmarkItemWithProxyGetByBatch1000-8 522 2309371 ns/op 41 | BenchmarkItemWithProxyGetByBatch1000-8 499 2219671 ns/op 42 | BenchmarkItemWithProxyGetByBatch1000-8 567 2213934 ns/op 43 | BenchmarkItemWithProxyGetByBatch1000-8 518 2214115 ns/op 44 | BenchmarkItemWithProxyGetByBatch1000-8 537 2203416 ns/op 45 | BenchmarkItemWithProxyGetByBatch1000-8 488 2416846 ns/op 46 | BenchmarkItemWithProxyGetByBatch1000-8 496 2373470 ns/op 47 | BenchmarkItemWithProxyGetByBatch1000-8 500 2394059 ns/op 48 | BenchmarkItemWithProxyGetByBatch100-8 3754 296214 ns/op 49 | BenchmarkItemWithProxyGetByBatch100-8 3699 290346 ns/op 50 | BenchmarkItemWithProxyGetByBatch100-8 3710 286893 ns/op 51 | BenchmarkItemWithProxyGetByBatch100-8 3602 287893 ns/op 52 | BenchmarkItemWithProxyGetByBatch100-8 3602 287129 ns/op 53 | BenchmarkItemWithProxyGetByBatch100-8 3604 287107 ns/op 54 | BenchmarkItemWithProxyGetByBatch100-8 3795 285217 ns/op 55 | BenchmarkItemWithProxyGetByBatch100-8 3628 290297 ns/op 56 | BenchmarkItemWithProxyGetByBatch100-8 3745 434007 ns/op 57 | BenchmarkItemWithProxyGetByBatch100-8 4048 271639 ns/op 58 | BenchmarkHeapAlloc-8 15361941 76.03 ns/op 59 | BenchmarkHeapAlloc-8 15850196 88.00 ns/op 60 | BenchmarkHeapAlloc-8 13285598 90.40 ns/op 61 | BenchmarkHeapAlloc-8 13346054 84.37 ns/op 62 | BenchmarkHeapAlloc-8 14340439 80.81 ns/op 63 | BenchmarkHeapAlloc-8 15103576 80.10 ns/op 64 | BenchmarkHeapAlloc-8 14817286 80.38 ns/op 65 | BenchmarkHeapAlloc-8 14440092 73.32 ns/op 66 | BenchmarkHeapAlloc-8 16515190 71.90 ns/op 67 | BenchmarkHeapAlloc-8 16187142 71.89 ns/op 68 | PASS 69 | ok github.com/QuangTung97/memproxy/item 79.630s 70 | PASS 71 | ok github.com/QuangTung97/memproxy/mhash 0.003s 72 | ? github.com/QuangTung97/memproxy/mocks [no test files] 73 | PASS 74 | ok github.com/QuangTung97/memproxy/proxy 0.003s 75 | -------------------------------------------------------------------------------- /docs/consistency.md: -------------------------------------------------------------------------------- 1 | # Consistency between Memcached and Database 2 | 3 | Memcached, as the name suggests, most commonly used as Cache Server 4 | for protecting other data sources, often be the databases. 5 | 6 | The recommended way to use memcached is the **Cache-Aside Pattern**. 7 | In which, the user requests will hit the memcached server first. 8 | And only if memcached does not contain the data, the application server will 9 | fetch from the backing sources (e.g. Databases), 10 | and then set back the fetched data to the memcached server. 11 | 12 | ![Cache Aside Pattern](images/cache-aside.png) 13 | 14 | ### But how to make the data consistent between memcached and database? 15 | 16 | The solution will be used for this library is **invalidating (deleting) keys on database updates**. 17 | 18 | The general flow will look like this: 19 | ![Invalidate Flow](images/invalidate-flow.png) 20 | 21 | 1. First, user request will open a transaction, does the update as normal, 22 | and then the application will specifies and inserts the list of **invalidated keys** in some table before 23 | commit the transaction (*Step 1 to 4*). 24 | Reader familiar with patterns in distributed systems & microservices 25 | will recognize this 26 | as [Transaction Outbox Pattern](https://microservices.io/patterns/data/transactional-outbox.html). 27 | This pattern allow to guarantee the keys in the memcached server will be deleted accordingly. 28 | 2. After the transaction has been committed, the application will delete the keys in the memcached server 29 | in the same thread user request is running before returning to the users (*Step 5*). 30 | This step serves two purposes: 31 | * Keeps the **read-your-own-writes consistency** in the normal condition (no failure occurs). 32 | * Helps mitigate the case the background job not working, 33 | or it can not proceed because some errors has not been handled gracefully. 34 | 3. The background job read the invalidated keys and does the deletion again (*Step 6 and 7*). 35 | 36 | ### Race condition when deleting keys 37 | 38 | Even with the Transactional Outbox Pattern, there is still a **race condition** 39 | that can make data in the cache be **staled indefinitely**. 40 | 41 | Consider the execution: 42 | 43 | ![Race Condition](images/race-condition.png) 44 | 45 | 1. At the start, the database contains the variable **x = v1**, the memcached server is empty. 46 | 2. **User Request 1** read from memcached => not found the cached key for **x** => then does the read from database. 47 | 3. In the same time, **User Request 2** started, update the variable **x** from **x = v1** to **x = v2**. 48 | And then do delete the key on memcached server, this deletion does nothing because no key existed in the cache. 49 | 4. **User Request 1** experiences a network problem that makes the **Set back to cache with x = v1** 50 | operation take quite a long time, 51 | long enough that the step 4 **Delete key** has already completed. 52 | 53 | The final result, memcached server will store the cache key contains **x = v1** 54 | and serves that staled value **indefinitely** unless other updates happened for the key **x**. 55 | 56 | ### Lease Mechanism for solving the Stale Set Problem 57 | 58 | The idea using Lease is from the paper 59 | [Scaling Memcache at Facebook](https://www.usenix.org/system/files/conference/nsdi13/nsdi13-final170_update.pdf). 60 | 61 | In the **step 1**, when not found, instead of return no result, memcached server will put 62 | an item with an empty value, a TTL and a **Lease Token** (**cas** number in memcached terminology). 63 | That **lease token** will be returned to the client. 64 | 65 | And instead of simply **set back to cache** in **step 5**, the client need to pass the **lease token** 66 | to the memcached server in the set command. And memcached server will only update **x = v1** into its hash table 67 | **ONLY IF** the key is still existed and **lease token** is equal. 68 | 69 | In the new memcached server, the get command will be: 70 | 71 | ``` 72 | mg userkey v c N3\r\n 73 | ``` 74 | 75 | The flag ``v`` indicates the value is returned, flag ``c`` is for returns ``cas`` number, 76 | and then flag ``N3`` means that when the key ``userkey`` not existed, 77 | the memcached server will create a new item will ``TTL=3s``. 78 | 79 | For the set command: 80 | 81 | ``` 82 | ms userkey 4 C1234\r\n 83 | ABCD\r\n 84 | ``` 85 | 86 | With the value size is 4 bytes and value is ``ABCD``. This command will be ignored if the key ``userkey`` not existed 87 | or ``cas`` number 88 | stored in the item is not currently equals ``1234``. 89 | 90 | More detail in: 91 | 92 | * https://github.com/memcached/memcached/wiki/MetaCommands 93 | * https://github.com/memcached/memcached/blob/master/doc/protocol.txt 94 | 95 | #### Next: [Preventing Thundering Herd](thundering-herd.md) -------------------------------------------------------------------------------- /docs/efficient-batching.md: -------------------------------------------------------------------------------- 1 | # Efficient Batching 2 | 3 | One of the most important thing make caching efficiently is Batching & Pipeline. 4 | Similar to [the N+1 Problem](https://planetscale.com/blog/what-is-n+1-query-problem-and-how-to-solve-it) 5 | when doing batching gets is much more efficient than doing get one by one. 6 | 7 | There are points in the caching solution that when do batching can improve its effectiveness dramatically: 8 | 9 | * **Batching Get** for keys of the same type of item: for example multi-get for multiple product infos. 10 | * **Batching Get** for keys of different type of items, when there is no data dependent between items: for example, 11 | getting product information and getting its prices is often independent, and can be fetched at the same time. 12 | * **Batching Get** to the database (will be N + 1 problem if it's not) and **Batching Set** 13 | to set back data to the Cache Server when many of the keys are missed at the same time, 14 | especially at the start when Cache is empty. 15 | * **Batching Get** and "Batching" Sleep for the algorithm that [Preventing Thundering Herd](thundering-herd.md). 16 | Which means instead of sleep one by one, all keys will sleep for the same duration 17 | and then **Multi-Get** to retry all the keys again. 18 | 19 | This library will help solve all these problems. 20 | 21 | ## Efficient Batching through Defer Function Calls 22 | 23 | ### Functional Batching in Pipeline 24 | 25 | The idea started with the interface ``Pipeline``: 26 | 27 | ```go 28 | package memproxy 29 | 30 | type Pipeline interface { 31 | LeaseGet(key string, options LeaseGetOptions) func() (LeaseGetResponse, error) 32 | LeaseSet(key string, data []byte, cas uint64, options LeaseSetOptions) func() (LeaseSetResponse, error) 33 | Delete(key string, options DeleteOptions) func() (DeleteResponse, error) 34 | 35 | // Execute flush commands to the network 36 | Execute() 37 | 38 | // Finish must be called after create a Pipeline, often by defer 39 | Finish() 40 | 41 | // LowerSession returns a lower priority session 42 | LowerSession() Session 43 | } 44 | 45 | ``` 46 | 47 | Let focus on these 3 functions: 48 | 49 | ```go 50 | LeaseGet(key string, options LeaseGetOptions) func () (LeaseGetResponse, error) 51 | LeaseSet(key string, data []byte, cas uint64, options LeaseSetOptions) func () (LeaseSetResponse, error) 52 | Delete(key string, options DeleteOptions) func () (DeleteResponse, error) 53 | ``` 54 | 55 | Instead of returning immediately with the result, those 3 functions when called will only ``collect the operations``. 56 | Only after the returned anonymous functions are called, the ``collected operations`` will actually be executed. 57 | 58 | For example, in the following code: 59 | 60 | ```go 61 | fn1 := pipeline.LeaseGet("key01", LeaseGetOptions{}) 62 | fn2 := pipeline.LeaseGet("key02", LeaseGetOptions{}) 63 | fn3 := pipeline.LeaseGet("key03", LeaseGetOptions{}) 64 | 65 | resp1, err := fn1() 66 | resp2, err := fn2() 67 | resp3, err := fn3() 68 | ``` 69 | 70 | The first 3 calls will do nothing except collects the operations in an internal buffer. 71 | **ONLY** at the forth call: ``resp1, err := fn1()``, the operations are actually being flushed to the network and 72 | waiting for the results back from memcached servers. 73 | 74 | The fifth and the sixth lines: 75 | 76 | ```go 77 | resp2, err := fn2() 78 | resp3, err := fn3() 79 | ``` 80 | 81 | actually does NOT much else except mapping the results from the 82 | waiting in 83 | 84 | ```go 85 | resp1, err := fn1() 86 | ``` 87 | 88 | to the corresponding operations. 89 | 90 | Readers familiar with ``Promise`` in ``JavaScript`` will find this function signature looks like a ``Promise``. 91 | Instead of returning the result right away, the function will return a ``Promise`` 92 | that the result can get from in the future. 93 | 94 | In the case of ``Pipeline``, the ``Promise`` is a simple and efficient ``anonymous`` function that facilitates batching. 95 | 96 | ### Batching between Different Types of Data 97 | 98 | Using the idea of returning anonymous functions, we can 99 | make the batching between different data types easier to implements. 100 | 101 | For example, consider the following example: 102 | 103 | ```go 104 | package example 105 | 106 | import "context" 107 | 108 | type ProductInfo struct { 109 | // detail fields 110 | } 111 | 112 | type ProductPrice struct { 113 | // detail fields 114 | } 115 | 116 | type Repository interface { 117 | GetProductInfo(ctx context.Context, sku string) func() (ProductInfo, error) 118 | GetProductPrice(ctx context.Context, sku string) func() (ProductPrice, error) 119 | } 120 | 121 | func main() { 122 | var repo Repository // assume to be initialized with a correct implementation 123 | ctx := context.Background() 124 | 125 | infoFunc1 := repo.GetProductInfo(ctx, "SKU01") 126 | infoFunc2 := repo.GetProductInfo(ctx, "SKU02") 127 | 128 | priceFunc1 := repo.GetProductPrice(ctx, "SKU01") 129 | priceFunc2 := repo.GetProductPrice(ctx, "SKU02") 130 | 131 | info1, err := infoFunc1() 132 | info2, err := infoFunc2() 133 | 134 | price1, err := priceFunc1() 135 | price2, err := priceFunc2() 136 | } 137 | ``` 138 | 139 | We can see the 4 operations with 2 kinds of data can be batching efficiently using this approach. 140 | 141 | ### Chaining Anonymous Functions for Complex Batching 142 | 143 | Simply returning anonymous functions can help batching between unrelated kinds of data. 144 | But how to do batching when the actions are related? 145 | 146 | For example, how to do batching for this chain of actions: 147 | 148 | ``` 149 | get from cache if not found => get from DB => then set back to the cache 150 | ``` 151 | 152 | The idea is using the help of the ``Session`` interface: 153 | 154 | ```go 155 | package memproxy 156 | 157 | type Session interface { 158 | AddNextCall(fn func()) 159 | Execute() 160 | 161 | // other functions 162 | } 163 | ``` 164 | 165 | The function ``AddNextCall`` simply appends to a list of defer calls. 166 | The function ``Execute`` loops through this list and executes each defer functions one by one. 167 | 168 | Assuming 3 operations above belong to the same interface, if we implement as below: 169 | 170 | ```go 171 | package example 172 | 173 | import ( 174 | "context" 175 | "errors" 176 | ) 177 | 178 | var ErrNotFound = errors.New("not found") 179 | 180 | type UserData struct { 181 | // other fields 182 | } 183 | 184 | type Session interface { 185 | AddNextCall(fn func()) 186 | Execute() 187 | } 188 | 189 | type Repository interface { 190 | GetFromCache(ctx context.Context) func() (UserData, error) 191 | GetFromDB(ctx context.Context) func() (UserData, error) 192 | SetToCache(ctx context.Context, data UserData) func() error 193 | } 194 | 195 | func GetCache( 196 | ctx context.Context, 197 | repo Repository, 198 | sess Session, 199 | ) func() (UserData, error) { 200 | cacheFunc := repo.GetFromCache(ctx) 201 | 202 | var result UserData 203 | var err error 204 | 205 | sess.AddNextCall(func() { 206 | result, err = cacheFunc() 207 | if err == ErrNotFound { 208 | getDBFunc := repo.GetFromDB(ctx) 209 | sess.AddNextCall(func() { 210 | result, err = getDBFunc() 211 | if err != nil { 212 | return 213 | } 214 | setCacheFunc := repo.SetToCache(ctx, result) 215 | sess.AddNextCall(func() { 216 | _ = setCacheFunc() 217 | }) 218 | }) 219 | } 220 | }) 221 | 222 | return func() (UserData, error) { 223 | sess.Execute() 224 | return result, err 225 | } 226 | } 227 | ``` 228 | 229 | The way the ``GetCache`` function implemented will make the 3 operations: 230 | 231 | * GetFromCache 232 | * GetFromDB 233 | * SetToCache 234 | 235 | Behave in a batching manner. 236 | 237 | The actual implement will be more complicated because of many options and 238 | have to deal with sleeping for Thundering Herd Protection. But the main idea remains the same. 239 | 240 | #### Previous: [Preventing Thundering Herd](thundering-herd.md) 241 | #### Next: [Memcache Replication & Memory-Weighted Load Balancing](replication.md) 242 | -------------------------------------------------------------------------------- /docs/images/cache-aside.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuangTung97/memproxy/f1a6602b46798060fa94ac1c4b37cc0cb8e25721/docs/images/cache-aside.png -------------------------------------------------------------------------------- /docs/images/invalidate-flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuangTung97/memproxy/f1a6602b46798060fa94ac1c4b37cc0cb8e25721/docs/images/invalidate-flow.png -------------------------------------------------------------------------------- /docs/images/race-condition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuangTung97/memproxy/f1a6602b46798060fa94ac1c4b37cc0cb8e25721/docs/images/race-condition.png -------------------------------------------------------------------------------- /docs/images/replication.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuangTung97/memproxy/f1a6602b46798060fa94ac1c4b37cc0cb8e25721/docs/images/replication.png -------------------------------------------------------------------------------- /docs/images/thundering-herd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuangTung97/memproxy/f1a6602b46798060fa94ac1c4b37cc0cb8e25721/docs/images/thundering-herd.png -------------------------------------------------------------------------------- /docs/replication.md: -------------------------------------------------------------------------------- 1 | # Memcache Replication & Memory-Weighted Load Balancing 2 | 3 | In normal database systems, the replication mechanisms often using some form of operation logs. 4 | 5 | For example: 6 | 7 | * Binlog in MySQL. 8 | * A stream of commands in the case of Redis. 9 | * Write-Ahead Log in Postgres. 10 | 11 | One problem with this kind of replication is that it is often 12 | asynchronous for performance reasons. For that it is impossible to guarantee **strong consistency**. 13 | 14 | For example, in case of Redis Sentinel, when a new election happens, 15 | the new master will lose the latest acknowledged writes. 16 | 17 | Moreover, Memcached does NOT support a stream of operation logs to allow to do replication in this way. 18 | Instead, in this library, the replication will use the mechanism similar to 19 | [MCRouter](https://github.com/facebook/mcrouter/wiki/Replicated-pools-setup) 20 | 21 | ## Replication for Cache-Aside Pattern 22 | 23 | ![Replication](images/replication.png) 24 | 25 | Instead of the memcached servers itself doing replication, when the ``application server`` 26 | fetching from the cache, it will choose, with a configured load balancing algorithm, a memcached server 27 | and get the desired keys from that memcached server. 28 | 29 | If the keys do NOT existed, it will do the fetch from Database and set back values to that memcached server. 30 | After some times, both memcached servers will contain mostly the same data from the backing Database. 31 | 32 | ### Fail-over & High Availability 33 | 34 | With that setup, a simple fail-over can be implemented simply as: 35 | 36 | 1. First, choose a memcached server and get from that server. 37 | 2. If the response is ok, the memcached server is still alive, response back to the client. 38 | 3. If the response is NOT ok, choose a memcached server from the remaining servers, and 39 | does the same thing as the step 1. 40 | 41 | This library implemented actually like this, and to prevent retrying too many times, 42 | it will do the step 3 **ONCE**. If the second chosen memcached server is also NOT alive, 43 | the library will return back that error to the client. 44 | 45 | All of that retry logic also be implemented using the principle in the [Efficient Batching](efficient-batching.md). 46 | 47 | ### Memory-Weighted Load Balancing 48 | 49 | To support better cache utilization, instead of doing round-robin or a simple random selection for Replication. 50 | This library is relying on the actual memory usage of the memcached servers to do load-balancing. 51 | 52 | For example, if we have 2 memcached servers ``memcached 01`` and `memcached 02`: 53 | 54 | * If ``memcached 01`` memory usage is ``256MB``, the ``memcached 02`` memory usage is also ``256MB`` 55 | => then both will serve randomly ``50%`` of traffic. 56 | * If ``memcached 01 = 80MB``, ``memcached 02 = 0MB``, the ``memcached 01`` will serve ``99%`` 57 | and ``memcached 02`` will be ``1%``, this ``1%`` value can be changed by using the ``proxy.WithMinPercentage()`` 58 | option. 59 | * If ``memcached 01 = 80MB``, ``memcached 02 = 40MB`` => 60 | then the ``memcached 01`` will serve ``80 / (80 + 40) = 66.67%`` traffic. 61 | 62 | If the option ``proxy.WithMemoryScoringFunc()`` be used with the function ``f(x)``. Then the formula 63 | becomes: ``f(80) / (f(80) + f(40))``. 64 | 65 | Note that the ``memory usage`` is actually RAM usage that used for keys in memcached, 66 | not the RAM usage that memcached server allocated from underlining OSes. 67 | That ``memory usage`` will be ``zero`` after the command ``flush_all`` is executed. 68 | 69 | #### Previous: [Efficient Batching](efficient-batching.md) 70 | -------------------------------------------------------------------------------- /docs/thundering-herd.md: -------------------------------------------------------------------------------- 1 | # Preventing Thundering Herd 2 | 3 | A well known problem when using Cache-Aside Pattern, especially when doing **invalidate keys on update**, 4 | is the Thundering-Herd problem (also known as **Cache Stampede**). 5 | 6 | It happens when a frequently accessed key being deleted from the cache, and many 7 | clients concurrently get the same key from the cache => not found => 8 | all accessing to the backing database to get the same key. 9 | 10 | ![Thundering Herd](images/thundering-herd.png) 11 | 12 | This make the operation **delete key** becomes expensive, and the database 13 | might not keep up with the sudden increase of traffic. 14 | 15 | ### Using Lease Mechanism to Prevent Thundering Herd 16 | 17 | The lease mechanism ([Lease for solving stale set](consistency.md#lease-mechanism-for-solving-the-stale-set-problem)) 18 | can be extended to help solving the thundering herd problem. 19 | 20 | * The first **lease get** after key being deleted will create a new item and return the ``cas`` number as normal. 21 | * The second **lease get** will receive a flag indicate that it is the second **lease get**, 22 | for the memcached meta commands, when the ``N`` flag is used, this set of flags might be returned: 23 | 24 | ``` 25 | These extra flags can be added to the response: 26 | - W: client has "won" the recache flag 27 | - X: item is stale 28 | - Z: item has already sent a winning flag 29 | ``` 30 | 31 | Reference [Memcache Protocol](https://github.com/memcached/memcached/blob/master/doc/protocol.txt). 32 | 33 | In this library, after receiving the ``Z`` flag, the client will sleep for specified durations 34 | (configured by the option ``item.WithSleepDurations``). 35 | 36 | And the behaviour after all the retries in ``item.WithSleepDurations`` can be configured by 37 | ``item.WithEnableErrorOnExceedRetryLimit``, ``enable = true`` will return error, ``enable = false`` 38 | will continue get from the backing store and set back to the memcached server. 39 | 40 | #### Previous: [Consistency between Memcached and Database](consistency.md) 41 | #### Next: [Efficient Batching](efficient-batching.md) 42 | -------------------------------------------------------------------------------- /error.go: -------------------------------------------------------------------------------- 1 | package memproxy 2 | 3 | import "errors" 4 | 5 | // ErrInvalidLeaseGetResponse ... 6 | var ErrInvalidLeaseGetResponse = errors.New("invalid lease get response") 7 | -------------------------------------------------------------------------------- /examples/failover/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "github.com/QuangTung97/memproxy" 8 | mcitem "github.com/QuangTung97/memproxy/item" 9 | "github.com/QuangTung97/memproxy/proxy" 10 | "time" 11 | ) 12 | 13 | // User ... 14 | type User struct { 15 | ID int64 `json:"id"` 16 | Username string `json:"username"` 17 | } 18 | 19 | // Marshal ... 20 | func (u User) Marshal() ([]byte, error) { 21 | return json.Marshal(u) 22 | } 23 | 24 | func unmarshalUser(data []byte) (User, error) { 25 | var u User 26 | err := json.Unmarshal(data, &u) 27 | return u, err 28 | } 29 | 30 | // UserKey ... 31 | type UserKey struct { 32 | ID int64 33 | } 34 | 35 | // String ... 36 | func (u UserKey) String() string { 37 | return fmt.Sprintf("users:%d", u.ID) 38 | } 39 | 40 | func main() { 41 | servers := []proxy.SimpleServerConfig{ 42 | { 43 | ID: 1, 44 | Host: "localhost", 45 | Port: 11211, 46 | }, 47 | { 48 | ID: 2, 49 | Host: "localhost", 50 | Port: 11212, 51 | }, 52 | } 53 | 54 | stats := proxy.NewSimpleStats(servers, 55 | proxy.WithSimpleStatsMemLogger(func(server proxy.ServerID, mem uint64, err error) { 56 | fmt.Println("SERVER MEM:", server, mem, err) 57 | }), 58 | proxy.WithSimpleStatsCheckDuration(10*time.Second), 59 | ) 60 | defer stats.Shutdown() 61 | 62 | mc, closeFun, err := proxy.NewSimpleReplicatedMemcache( 63 | servers, 3, stats, 64 | proxy.WithMinPercentage(10), 65 | ) 66 | if err != nil { 67 | panic(err) 68 | } 69 | defer closeFun() 70 | 71 | userSeq := 0 72 | for { 73 | doGetFromCache(mc, &userSeq) 74 | time.Sleep(1 * time.Second) 75 | } 76 | } 77 | 78 | func doGetFromCache( 79 | mc memproxy.Memcache, 80 | userSeq *int, 81 | ) { 82 | pipe := mc.Pipeline(context.Background()) 83 | defer pipe.Finish() 84 | 85 | *userSeq++ 86 | id := *userSeq % 11 87 | 88 | userItem := mcitem.New[User, UserKey]( 89 | pipe, unmarshalUser, 90 | func(ctx context.Context, key UserKey) func() (User, error) { 91 | fmt.Println("DO Fill with Key:", key) 92 | return func() (User, error) { 93 | return User{ 94 | ID: int64(id), 95 | Username: fmt.Sprintf("username:%d", *userSeq), 96 | }, nil 97 | } 98 | }, 99 | ) 100 | 101 | fn := userItem.Get(context.Background(), UserKey{ 102 | ID: int64(id), 103 | }) 104 | user, err := fn() 105 | fmt.Println(user, err) 106 | } 107 | -------------------------------------------------------------------------------- /examples/simple/go.mod: -------------------------------------------------------------------------------- 1 | module simple 2 | 3 | go 1.19 4 | 5 | require ( 6 | github.com/QuangTung97/go-memcache v0.5.6 7 | github.com/QuangTung97/memproxy v0.4.4 8 | github.com/go-sql-driver/mysql v1.6.0 9 | github.com/jmoiron/sqlx v1.3.5 10 | ) 11 | 12 | require ( 13 | github.com/BurntSushi/toml v1.2.1 // indirect 14 | github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794 // indirect 15 | github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect 16 | github.com/fatih/color v1.15.0 // indirect 17 | github.com/fatih/structtag v1.2.0 // indirect 18 | github.com/matryer/moq v0.3.0 // indirect 19 | github.com/mattn/go-colorable v0.1.13 // indirect 20 | github.com/mattn/go-isatty v0.0.17 // indirect 21 | github.com/mattn/go-runewidth v0.0.9 // indirect 22 | github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 // indirect 23 | github.com/mgechev/revive v1.3.1 // indirect 24 | github.com/mitchellh/go-homedir v1.1.0 // indirect 25 | github.com/olekukonko/tablewriter v0.0.5 // indirect 26 | github.com/pkg/errors v0.9.1 // indirect 27 | golang.org/x/mod v0.9.0 // indirect 28 | golang.org/x/perf v0.0.0-20230113213139-801c7ef9e5c5 // indirect 29 | golang.org/x/sys v0.6.0 // indirect 30 | golang.org/x/tools v0.7.0 // indirect 31 | ) 32 | 33 | replace github.com/QuangTung97/memproxy v0.4.4 => ../../ 34 | -------------------------------------------------------------------------------- /examples/simple/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "github.com/QuangTung97/go-memcache/memcache" 8 | "github.com/QuangTung97/memproxy" 9 | "github.com/QuangTung97/memproxy/item" 10 | "github.com/jmoiron/sqlx" 11 | 12 | _ "github.com/go-sql-driver/mysql" 13 | ) 14 | 15 | type service struct { 16 | db *sqlx.DB 17 | memcache memproxy.Memcache 18 | } 19 | 20 | var dropTableSQL = ` 21 | DROP TABLE IF EXISTS customer 22 | ` 23 | 24 | var createTableSQL = ` 25 | CREATE TABLE customer ( 26 | id INT PRIMARY KEY, 27 | username VARCHAR(100) NOT NULL 28 | ) 29 | ` 30 | 31 | // Customer ... 32 | type Customer struct { 33 | ID int64 `db:"id" json:"id"` 34 | Username string `db:"username" json:"username"` 35 | } 36 | 37 | // Marshal ... 38 | func (c Customer) Marshal() ([]byte, error) { 39 | return json.Marshal(c) 40 | } 41 | 42 | // GetKey ... 43 | func (c Customer) GetKey() CustomerKey { 44 | return CustomerKey{ID: c.ID} 45 | } 46 | 47 | func unmarshalCustomer(data []byte) (Customer, error) { 48 | var c Customer 49 | err := json.Unmarshal(data, &c) 50 | return c, err 51 | } 52 | 53 | // CustomerKey ... 54 | type CustomerKey struct { 55 | ID int64 56 | } 57 | 58 | func (k CustomerKey) String() string { 59 | return fmt.Sprintf("customers:%d", k.ID) 60 | } 61 | 62 | func (s *service) getCustomersFromDB(ctx context.Context, keys []CustomerKey) ([]Customer, error) { 63 | ids := make([]int64, 0, len(keys)) 64 | for _, k := range keys { 65 | ids = append(ids, k.ID) 66 | } 67 | 68 | fmt.Println("Multi Get from Database with IDs =", ids) 69 | 70 | query, args, err := sqlx.In(`SELECT id, username FROM customer WHERE id IN (?)`, ids) 71 | if err != nil { 72 | return nil, err 73 | } 74 | 75 | var customers []Customer 76 | err = s.db.SelectContext(ctx, &customers, query, args...) 77 | return customers, err 78 | } 79 | 80 | func (s *service) newCustomerItem(pipe memproxy.Pipeline) *item.Item[Customer, CustomerKey] { 81 | return item.New[Customer, CustomerKey]( 82 | pipe, 83 | unmarshalCustomer, 84 | item.NewMultiGetFiller[Customer, CustomerKey]( 85 | s.getCustomersFromDB, 86 | Customer.GetKey, 87 | ), 88 | ) 89 | } 90 | 91 | func main() { 92 | client, err := memcache.New("localhost:11211", 3) 93 | if err != nil { 94 | panic(err) 95 | } 96 | mc := memproxy.NewPlainMemcache(client) 97 | 98 | db := sqlx.MustConnect("mysql", "root:1@tcp(localhost:3306)/memtest?") 99 | 100 | db.MustExec(dropTableSQL) 101 | db.MustExec(createTableSQL) 102 | 103 | db.MustExec(` 104 | INSERT INTO customer (id, username) 105 | VALUES (11, "user01"), (12, "user02") 106 | `) 107 | 108 | svc := &service{ 109 | db: db, 110 | memcache: mc, 111 | } 112 | 113 | pipe := mc.Pipeline(context.Background()) 114 | customerItem := svc.newCustomerItem(pipe) 115 | 116 | fn1 := customerItem.Get(context.Background(), CustomerKey{ID: 11}) 117 | fn2 := customerItem.Get(context.Background(), CustomerKey{ID: 12}) 118 | 119 | // not found 120 | fn3 := customerItem.Get(context.Background(), CustomerKey{ID: 13}) 121 | 122 | c1, err := fn1() 123 | fmt.Println("CUSTOMER 01:", c1, err) 124 | 125 | c2, err := fn2() 126 | fmt.Println("CUSTOMER 02:", c2, err) 127 | 128 | c3, err := fn3() 129 | fmt.Println("CUSTOMER 03:", c3, err) 130 | 131 | // should use defer pipe.Finish() 132 | pipe.Finish() 133 | 134 | // The 3 keys: customers:11, customers:12, customers:13 will exist in the memcached server 135 | // Can check using: telnet localhost 11211 136 | // get customers:11 137 | 138 | // ============================================= 139 | // Do Get Again 140 | // ============================================= 141 | pipe = mc.Pipeline(context.Background()) 142 | customerItem = svc.newCustomerItem(pipe) 143 | 144 | fn1 = customerItem.Get(context.Background(), CustomerKey{ID: 11}) 145 | fn2 = customerItem.Get(context.Background(), CustomerKey{ID: 12}) 146 | 147 | c1, err = fn1() 148 | fmt.Println("CUSTOMER 01 AGAIN:", c1, err) 149 | c2, err = fn2() 150 | fmt.Println("CUSTOMER 02 AGAIN:", c2, err) 151 | 152 | pipe.Finish() 153 | } 154 | -------------------------------------------------------------------------------- /fake/fake.go: -------------------------------------------------------------------------------- 1 | package fake 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/QuangTung97/memproxy" 8 | "github.com/QuangTung97/memproxy/mocks" 9 | ) 10 | 11 | // Entry ... 12 | type Entry struct { 13 | Valid bool 14 | Data []byte 15 | CAS uint64 16 | } 17 | 18 | // Memcache fake memcached for testing purpose 19 | type Memcache struct { 20 | sessProvider memproxy.SessionProvider 21 | 22 | mut sync.Mutex 23 | cas uint64 24 | entries map[string]Entry 25 | } 26 | 27 | var _ memproxy.Memcache = &Memcache{} 28 | 29 | // New ... 30 | func New() *Memcache { 31 | return &Memcache{ 32 | sessProvider: memproxy.NewSessionProvider(), 33 | 34 | entries: map[string]Entry{}, 35 | } 36 | } 37 | 38 | func (m *Memcache) nextCAS() uint64 { 39 | m.cas++ 40 | return m.cas 41 | } 42 | 43 | // Pipeline returns a Fake Pipeline 44 | // 45 | //revive:disable-next-line:cognitive-complexity 46 | func (m *Memcache) Pipeline(_ context.Context, _ ...memproxy.PipelineOption) memproxy.Pipeline { 47 | sess := m.sessProvider.New() 48 | var calls []func() 49 | doCalls := func() { 50 | for _, fn := range calls { 51 | fn() 52 | } 53 | calls = nil 54 | } 55 | 56 | pipe := &mocks.PipelineMock{} 57 | 58 | pipe.LeaseGetFunc = func(key string, options memproxy.LeaseGetOptions) memproxy.LeaseGetResult { 59 | var resp memproxy.LeaseGetResponse 60 | 61 | callFn := func() { 62 | m.mut.Lock() 63 | defer m.mut.Unlock() 64 | 65 | entry, ok := m.entries[key] 66 | 67 | if !ok { 68 | cas := m.nextCAS() 69 | m.entries[key] = Entry{ 70 | CAS: cas, 71 | } 72 | resp = memproxy.LeaseGetResponse{ 73 | Status: memproxy.LeaseGetStatusLeaseGranted, 74 | CAS: cas, 75 | } 76 | return 77 | } 78 | 79 | if !entry.Valid { 80 | resp = memproxy.LeaseGetResponse{ 81 | Status: memproxy.LeaseGetStatusLeaseRejected, 82 | CAS: entry.CAS, 83 | } 84 | return 85 | } 86 | 87 | resp = memproxy.LeaseGetResponse{ 88 | Status: memproxy.LeaseGetStatusFound, 89 | CAS: entry.CAS, 90 | Data: entry.Data, 91 | } 92 | } 93 | 94 | calls = append(calls, callFn) 95 | 96 | return memproxy.LeaseGetResultFunc(func() (memproxy.LeaseGetResponse, error) { 97 | doCalls() 98 | return resp, nil 99 | }) 100 | } 101 | 102 | pipe.LeaseSetFunc = func( 103 | key string, data []byte, cas uint64, options memproxy.LeaseSetOptions, 104 | ) func() (memproxy.LeaseSetResponse, error) { 105 | status := memproxy.LeaseSetStatusNotStored 106 | 107 | callFn := func() { 108 | m.mut.Lock() 109 | defer m.mut.Unlock() 110 | 111 | entry, ok := m.entries[key] 112 | if !ok { 113 | return 114 | } 115 | 116 | if entry.CAS != cas { 117 | return 118 | } 119 | 120 | m.entries[key] = Entry{ 121 | Valid: true, 122 | Data: data, 123 | CAS: cas, 124 | } 125 | status = memproxy.LeaseSetStatusStored 126 | } 127 | 128 | calls = append(calls, callFn) 129 | 130 | return func() (memproxy.LeaseSetResponse, error) { 131 | doCalls() 132 | return memproxy.LeaseSetResponse{ 133 | Status: status, 134 | }, nil 135 | } 136 | } 137 | 138 | pipe.DeleteFunc = func(key string, options memproxy.DeleteOptions) func() (memproxy.DeleteResponse, error) { 139 | callFn := func() { 140 | m.mut.Lock() 141 | defer m.mut.Unlock() 142 | 143 | delete(m.entries, key) 144 | } 145 | 146 | calls = append(calls, callFn) 147 | 148 | return func() (memproxy.DeleteResponse, error) { 149 | doCalls() 150 | return memproxy.DeleteResponse{}, nil 151 | } 152 | } 153 | 154 | pipe.FinishFunc = func() { 155 | doCalls() 156 | } 157 | 158 | pipe.ExecuteFunc = func() { 159 | doCalls() 160 | } 161 | 162 | pipe.LowerSessionFunc = func() memproxy.Session { 163 | return sess 164 | } 165 | 166 | return pipe 167 | } 168 | 169 | // Close ... 170 | func (*Memcache) Close() error { 171 | return nil 172 | } 173 | -------------------------------------------------------------------------------- /fake/fake_test.go: -------------------------------------------------------------------------------- 1 | package fake 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | 9 | "github.com/QuangTung97/memproxy" 10 | ) 11 | 12 | func newPipelineTest() memproxy.Pipeline { 13 | mc := New() 14 | return mc.Pipeline(context.Background()) 15 | } 16 | 17 | func TestPipeline(t *testing.T) { 18 | t.Run("lease-get-then-set", func(t *testing.T) { 19 | pipe := newPipelineTest() 20 | defer pipe.Finish() 21 | 22 | fn1 := pipe.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 23 | fn2 := pipe.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 24 | 25 | resp1, err := fn1.Result() 26 | assert.Equal(t, nil, err) 27 | assert.Equal(t, memproxy.LeaseGetResponse{ 28 | Status: memproxy.LeaseGetStatusLeaseGranted, 29 | CAS: 1, 30 | }, resp1) 31 | 32 | resp2, err := fn2.Result() 33 | assert.Equal(t, nil, err) 34 | assert.Equal(t, memproxy.LeaseGetResponse{ 35 | Status: memproxy.LeaseGetStatusLeaseRejected, 36 | CAS: 1, 37 | }, resp2) 38 | 39 | // Do Set 40 | setFn := pipe.LeaseSet("KEY01", []byte("data 01"), 1, memproxy.LeaseSetOptions{}) 41 | setResp, err := setFn() 42 | assert.Equal(t, nil, err) 43 | assert.Equal(t, memproxy.LeaseSetResponse{ 44 | Status: memproxy.LeaseSetStatusStored, 45 | }, setResp) 46 | 47 | fn3 := pipe.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 48 | resp3, err := fn3.Result() 49 | assert.Equal(t, nil, err) 50 | assert.Equal(t, memproxy.LeaseGetResponse{ 51 | Status: memproxy.LeaseGetStatusFound, 52 | CAS: 1, 53 | Data: []byte("data 01"), 54 | }, resp3) 55 | }) 56 | 57 | t.Run("lease-get-2-different-keys", func(t *testing.T) { 58 | pipe := newPipelineTest() 59 | defer pipe.Finish() 60 | 61 | fn1 := pipe.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 62 | fn2 := pipe.LeaseGet("KEY02", memproxy.LeaseGetOptions{}) 63 | 64 | resp1, err := fn1.Result() 65 | assert.Equal(t, nil, err) 66 | assert.Equal(t, memproxy.LeaseGetResponse{ 67 | Status: memproxy.LeaseGetStatusLeaseGranted, 68 | CAS: 1, 69 | }, resp1) 70 | 71 | resp2, err := fn2.Result() 72 | assert.Equal(t, nil, err) 73 | assert.Equal(t, memproxy.LeaseGetResponse{ 74 | Status: memproxy.LeaseGetStatusLeaseGranted, 75 | CAS: 2, 76 | }, resp2) 77 | }) 78 | 79 | t.Run("set-not-stored", func(t *testing.T) { 80 | pipe := newPipelineTest() 81 | defer pipe.Finish() 82 | 83 | fn1 := pipe.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 84 | 85 | resp1, err := fn1.Result() 86 | assert.Equal(t, nil, err) 87 | assert.Equal(t, memproxy.LeaseGetResponse{ 88 | Status: memproxy.LeaseGetStatusLeaseGranted, 89 | CAS: 1, 90 | }, resp1) 91 | 92 | setFn := pipe.LeaseSet("KEY01", []byte("data 01"), 3, memproxy.LeaseSetOptions{}) 93 | setResp, err := setFn() 94 | assert.Equal(t, nil, err) 95 | assert.Equal(t, memproxy.LeaseSetResponse{ 96 | Status: memproxy.LeaseSetStatusNotStored, 97 | }, setResp) 98 | 99 | fn2 := pipe.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 100 | resp2, err := fn2.Result() 101 | assert.Equal(t, nil, err) 102 | assert.Equal(t, memproxy.LeaseGetResponse{ 103 | Status: memproxy.LeaseGetStatusLeaseRejected, 104 | CAS: 1, 105 | }, resp2) 106 | }) 107 | 108 | t.Run("set-not-exist-not-stored", func(t *testing.T) { 109 | pipe := newPipelineTest() 110 | defer pipe.Finish() 111 | 112 | setFn := pipe.LeaseSet("KEY01", []byte("data 01"), 3, memproxy.LeaseSetOptions{}) 113 | setResp, err := setFn() 114 | assert.Equal(t, nil, err) 115 | assert.Equal(t, memproxy.LeaseSetResponse{ 116 | Status: memproxy.LeaseSetStatusNotStored, 117 | }, setResp) 118 | 119 | fn2 := pipe.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 120 | resp2, err := fn2.Result() 121 | assert.Equal(t, nil, err) 122 | assert.Equal(t, memproxy.LeaseGetResponse{ 123 | Status: memproxy.LeaseGetStatusLeaseGranted, 124 | CAS: 1, 125 | }, resp2) 126 | }) 127 | 128 | t.Run("lease-get-and-delete", func(t *testing.T) { 129 | pipe := newPipelineTest() 130 | defer pipe.Finish() 131 | 132 | fn1 := pipe.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 133 | resp1, err := fn1.Result() 134 | assert.Equal(t, nil, err) 135 | assert.Equal(t, memproxy.LeaseGetResponse{ 136 | Status: memproxy.LeaseGetStatusLeaseGranted, 137 | CAS: 1, 138 | }, resp1) 139 | 140 | delResp, err := pipe.Delete("KEY01", memproxy.DeleteOptions{})() 141 | assert.Equal(t, nil, err) 142 | assert.Equal(t, memproxy.DeleteResponse{}, delResp) 143 | 144 | fn2 := pipe.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 145 | resp2, err := fn2.Result() 146 | assert.Equal(t, nil, err) 147 | assert.Equal(t, memproxy.LeaseGetResponse{ 148 | Status: memproxy.LeaseGetStatusLeaseGranted, 149 | CAS: 2, 150 | }, resp2) 151 | }) 152 | 153 | t.Run("lease-get-and-delete-and-lease-get-on-another-pipeline", func(t *testing.T) { 154 | mc := New() 155 | pipe1 := mc.Pipeline(context.Background()) 156 | 157 | fn1 := pipe1.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 158 | resp1, err := fn1.Result() 159 | assert.Equal(t, nil, err) 160 | assert.Equal(t, memproxy.LeaseGetResponse{ 161 | Status: memproxy.LeaseGetStatusLeaseGranted, 162 | CAS: 1, 163 | }, resp1) 164 | 165 | _, err = pipe1.Delete("KEY01", memproxy.DeleteOptions{})() 166 | assert.Equal(t, nil, err) 167 | 168 | pipe2 := mc.Pipeline(context.Background()) 169 | fn2 := pipe2.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 170 | resp2, err := fn2.Result() 171 | assert.Equal(t, nil, err) 172 | assert.Equal(t, memproxy.LeaseGetResponse{ 173 | Status: memproxy.LeaseGetStatusLeaseGranted, 174 | CAS: 2, 175 | }, resp2) 176 | }) 177 | } 178 | 179 | func TestPipeline__Do_Finish(t *testing.T) { 180 | t.Run("call-finish", func(t *testing.T) { 181 | mc := New() 182 | pipe1 := mc.Pipeline(context.Background()) 183 | 184 | resp1, err := pipe1.LeaseGet("KEY01", memproxy.LeaseGetOptions{}).Result() 185 | assert.Equal(t, nil, err) 186 | 187 | pipe1.LeaseSet("KEY01", []byte("data 01"), resp1.CAS, memproxy.LeaseSetOptions{}) 188 | 189 | pipe1.Finish() 190 | 191 | pipe2 := mc.Pipeline(context.Background()) 192 | 193 | resp2, err := pipe2.LeaseGet("KEY01", memproxy.LeaseGetOptions{}).Result() 194 | assert.Equal(t, nil, err) 195 | assert.Equal(t, memproxy.LeaseGetResponse{ 196 | Status: memproxy.LeaseGetStatusFound, 197 | CAS: 1, 198 | Data: []byte("data 01"), 199 | }, resp2) 200 | }) 201 | 202 | t.Run("call-execute", func(t *testing.T) { 203 | mc := New() 204 | pipe1 := mc.Pipeline(context.Background()) 205 | 206 | resp1, err := pipe1.LeaseGet("KEY01", memproxy.LeaseGetOptions{}).Result() 207 | assert.Equal(t, nil, err) 208 | 209 | pipe1.LeaseSet("KEY01", []byte("data 01"), resp1.CAS, memproxy.LeaseSetOptions{}) 210 | 211 | pipe1.Execute() 212 | 213 | pipe2 := mc.Pipeline(context.Background()) 214 | 215 | resp2, err := pipe2.LeaseGet("KEY01", memproxy.LeaseGetOptions{}).Result() 216 | assert.Equal(t, nil, err) 217 | assert.Equal(t, memproxy.LeaseGetResponse{ 218 | Status: memproxy.LeaseGetStatusFound, 219 | CAS: 1, 220 | Data: []byte("data 01"), 221 | }, resp2) 222 | }) 223 | 224 | t.Run("lower-session", func(t *testing.T) { 225 | mc := New() 226 | pipe := mc.Pipeline(context.Background()) 227 | 228 | sess := pipe.LowerSession() 229 | 230 | calls := 0 231 | sess.AddNextCall(memproxy.NewEmptyCallback(func() { 232 | calls++ 233 | })) 234 | sess.Execute() 235 | 236 | assert.Equal(t, 1, calls) 237 | 238 | assert.Nil(t, mc.Close()) 239 | }) 240 | } 241 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/QuangTung97/memproxy 2 | 3 | go 1.19 4 | 5 | require ( 6 | github.com/QuangTung97/go-memcache v1.2.0 7 | github.com/google/btree v1.1.2 8 | github.com/matryer/moq v0.3.0 9 | github.com/mgechev/revive v1.3.1 10 | github.com/spaolacci/murmur3 v1.1.0 11 | github.com/stretchr/testify v1.8.2 12 | golang.org/x/perf v0.0.0-20230113213139-801c7ef9e5c5 13 | ) 14 | 15 | require ( 16 | github.com/BurntSushi/toml v1.2.1 // indirect 17 | github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794 // indirect 18 | github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect 19 | github.com/davecgh/go-spew v1.1.1 // indirect 20 | github.com/fatih/color v1.15.0 // indirect 21 | github.com/fatih/structtag v1.2.0 // indirect 22 | github.com/mattn/go-colorable v0.1.13 // indirect 23 | github.com/mattn/go-isatty v0.0.17 // indirect 24 | github.com/mattn/go-runewidth v0.0.9 // indirect 25 | github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 // indirect 26 | github.com/mitchellh/go-homedir v1.1.0 // indirect 27 | github.com/olekukonko/tablewriter v0.0.5 // indirect 28 | github.com/pkg/errors v0.9.1 // indirect 29 | github.com/pmezard/go-difflib v1.0.0 // indirect 30 | golang.org/x/mod v0.9.0 // indirect 31 | golang.org/x/sys v0.6.0 // indirect 32 | golang.org/x/tools v0.7.0 // indirect 33 | gopkg.in/yaml.v3 v3.0.1 // indirect 34 | ) 35 | -------------------------------------------------------------------------------- /go.work: -------------------------------------------------------------------------------- 1 | go 1.19 2 | 3 | use ( 4 | examples/simple 5 | . 6 | ) -------------------------------------------------------------------------------- /go.work.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.0.0-20170206221025-ce650573d812 h1:OlBOgdliYbNVZliwxIKggGXluOjC+4jNtl62Gt7KWl8= 2 | dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= 3 | gioui.org v0.0.0-20210308172011-57750fc8a0a6 h1:K72hopUosKG3ntOPNG4OzzbuhxGuVf06fa2la1/H/Ho= 4 | github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= 5 | github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190129172621-c8b1d7a94ddf h1:8F6fjL5iQP6sArGtPuXh0l6hggdcIpAm4ChjVJE4oTs= 6 | github.com/aclements/go-gg v0.0.0-20170118225347-6dbb4e4fefb0 h1:E5Dzlk3akC+T2Zj1LBHgfPK1y8YWgLDnNDRmG+tpSKw= 7 | github.com/ajstarks/svgo v0.0.0-20210923152817-c3b6e2f0c527 h1:NImof/JkF93OVWZY+PINgl6fPtQyF6f+hNUtZ0QZA1c= 8 | github.com/boombuler/barcode v1.0.1 h1:NDBbPmhS+EqABEs5Kg3n/5ZNjy73Pz7SIV+KCeqyXcs= 9 | github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= 10 | github.com/go-fonts/dejavu v0.1.0 h1:JSajPXURYqpr+Cu8U9bt8K+XcACIHWqWrvWCKyeFmVQ= 11 | github.com/go-fonts/latin-modern v0.2.0 h1:5/Tv1Ek/QCr20C6ZOz15vw3g7GELYL98KWr8Hgo+3vk= 12 | github.com/go-fonts/liberation v0.2.0 h1:jAkAWJP4S+OsrPLZM4/eC9iW7CtHy+HBXrEwZXWo5VM= 13 | github.com/go-fonts/stix v0.1.0 h1:UlZlgrvvmT/58o573ot7NFw0vZasZ5I6bcIft/oMdgg= 14 | github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= 15 | github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81 h1:6zl3BbBhdnMkpSj2YY30qV3gDcVBGtFgVsV3+/i+mKQ= 16 | github.com/go-pdf/fpdf v0.5.0 h1:GHpcYsiDV2hdo77VTOuTF9k1sN8F8IY7NjnCo9x+NPY= 17 | github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= 18 | github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= 19 | github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac h1:Q0Jsdxl5jbxouNs1TQYt0gxesYMU4VXRbsTlgDloZ50= 20 | github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 h1:EvokxLQsaaQjcWVWSV38221VAK7qc2zhaO17bKys/18= 21 | github.com/gonum/internal v0.0.0-20181124074243-f884aa714029 h1:8jtTdc+Nfj9AR+0soOeia9UZSvYBvETVHZrugUowJ7M= 22 | github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9 h1:7qnwS9+oeSiOIsiUMajT+0R7HR6hw5NegnKPmn/94oI= 23 | github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9 h1:V2IgdyerlBa/MxaEFRbV5juy/C3MGdj4ePi+g6ePIp4= 24 | github.com/google/safehtml v0.0.2 h1:ZOt2VXg4x24bW0m2jtzAOkhoXV0iM8vNKc0paByCZqM= 25 | github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e h1:CYRpN206UTHUinz3VJoLaBdy1gEGeJNsqT0mvswDcMw= 26 | github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0= 27 | github.com/phpdave11/gofpdf v1.4.2 h1:KPKiIbfwbvC/wOncwhrpRdXVj2CZTCFlw4wnoyjtHfQ= 28 | github.com/phpdave11/gofpdi v1.0.13 h1:o61duiW8M9sMlkVXWlvP92sZJtGKENvW3VExs6dZukQ= 29 | github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245 h1:K1Xf3bKttbF+koVGaX5xngRIZ5bVjbmPnaxE/dR08uY= 30 | github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= 31 | github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= 32 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 33 | golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 h1:iMGN4xG0cnqj3t+zOM8wUB0BiPKHEwSxEZCvzcbZuvk= 34 | golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3 h1:n9HxLrNxWWtEb1cA950nuEEj3QnKbtsCJ6KjcgisNUs= 35 | golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d h1:RNPAfi2nHY7C2srAV8A49jpsYr0ADedCk1wq6fTMTvs= 36 | golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= 37 | golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= 38 | golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= 39 | golang.org/x/oauth2 v0.0.0-20170207211851-4464e7848382 h1:0OCuBzvchCPSrbAeMiELJtL7n1+h/OF/XAfzLNHtt80= 40 | golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 41 | golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= 42 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= 43 | gonum.org/v1/gonum v0.9.3 h1:DnoIG+QAMaF5NvxnGe/oKsgKcAc6PcUyl8q0VetfQ8s= 44 | gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= 45 | gonum.org/v1/plot v0.10.0 h1:ymLukg4XJlQnYUJCp+coQq5M7BsUJFk6XQE4HPflwdw= 46 | google.golang.org/api v0.0.0-20170206182103-3d017632ea10 h1:aBEil1MW4dayuySSqEpRAgntGVdmqMVGpaSMN82Na78= 47 | google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= 48 | google.golang.org/grpc v0.0.0-20170208002647-2a6bf6142e96 h1:gBo1yWpiY9b2tWsiqph3ajtZ9DOup3hdyd93jJDmvRg= 49 | rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= 50 | -------------------------------------------------------------------------------- /heap.go: -------------------------------------------------------------------------------- 1 | package memproxy 2 | 3 | type delayedCallHeap struct { 4 | data []delayedCall 5 | } 6 | 7 | func heapParent(index int) int { 8 | return (index+1)/2 - 1 9 | } 10 | 11 | func heapLeftChild(index int) int { 12 | return index*2 + 1 13 | } 14 | 15 | func (h *delayedCallHeap) swap(i, j int) { 16 | h.data[i], h.data[j] = h.data[j], h.data[i] 17 | } 18 | 19 | func (h *delayedCallHeap) smaller(i, j int) bool { 20 | return h.data[i].startedAt.Before(h.data[j].startedAt) 21 | } 22 | 23 | func (h *delayedCallHeap) push(e delayedCall) { 24 | index := len(h.data) 25 | h.data = append(h.data, e) 26 | 27 | for index > 0 { 28 | parent := heapParent(index) 29 | if h.smaller(index, parent) { 30 | h.swap(index, parent) 31 | } 32 | index = parent 33 | } 34 | } 35 | 36 | func (h *delayedCallHeap) size() int { 37 | return len(h.data) 38 | } 39 | 40 | func (h *delayedCallHeap) top() delayedCall { 41 | return h.data[0] 42 | } 43 | 44 | func (h *delayedCallHeap) pop() delayedCall { 45 | result := h.data[0] 46 | last := len(h.data) - 1 47 | h.data[0] = h.data[last] 48 | h.data[last] = delayedCall{} // clear last 49 | h.data = h.data[:last] 50 | 51 | index := 0 52 | for { 53 | left := heapLeftChild(index) 54 | right := left + 1 55 | 56 | smallest := index 57 | if left < len(h.data) && h.smaller(left, smallest) { 58 | smallest = left 59 | } 60 | if right < len(h.data) && h.smaller(right, smallest) { 61 | smallest = right 62 | } 63 | 64 | if smallest == index { 65 | break 66 | } 67 | h.swap(index, smallest) 68 | index = smallest 69 | } 70 | 71 | return result 72 | } 73 | -------------------------------------------------------------------------------- /heap_test.go: -------------------------------------------------------------------------------- 1 | package memproxy 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "math/rand" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func newHeapTest() *delayedCallHeap { 11 | return &delayedCallHeap{} 12 | } 13 | 14 | func newTime(s string) time.Time { 15 | t, err := time.Parse(time.RFC3339, s) 16 | if err != nil { 17 | panic(err) 18 | } 19 | return t.UTC() 20 | } 21 | 22 | func TestHeap_Simple(t *testing.T) { 23 | h := newHeapTest() 24 | 25 | h.push(delayedCall{startedAt: newTime("2022-05-09T10:00:00+07:00")}) 26 | assert.Equal(t, 1, h.size()) 27 | 28 | e := h.pop() 29 | assert.Equal(t, delayedCall{startedAt: newTime("2022-05-09T10:00:00+07:00")}, e) 30 | assert.Equal(t, 0, h.size()) 31 | } 32 | 33 | func TestHeap_Push_Smaller(t *testing.T) { 34 | h := newHeapTest() 35 | 36 | h.push(delayedCall{startedAt: newTime("2022-05-09T10:00:00+07:00")}) 37 | assert.Equal(t, 1, h.size()) 38 | 39 | h.push(delayedCall{startedAt: newTime("2022-05-08T10:00:00+07:00")}) 40 | assert.Equal(t, 2, h.size()) 41 | 42 | e := h.pop() 43 | assert.Equal(t, delayedCall{startedAt: newTime("2022-05-08T10:00:00+07:00")}, e) 44 | assert.Equal(t, 1, h.size()) 45 | 46 | e = h.pop() 47 | assert.Equal(t, delayedCall{startedAt: newTime("2022-05-09T10:00:00+07:00")}, e) 48 | assert.Equal(t, 0, h.size()) 49 | } 50 | 51 | func TestHeap_Properties_Based(t *testing.T) { 52 | start := newTime("2022-05-09T10:00:00+07:00") 53 | const num = 1000 54 | calls := make([]delayedCall, 0, num) 55 | for i := 0; i < num; i++ { 56 | calls = append(calls, delayedCall{ 57 | startedAt: start.Add(time.Duration(i) * time.Hour), 58 | }) 59 | } 60 | 61 | rand.Seed(1234) 62 | 63 | rand.Shuffle(len(calls), func(i, j int) { 64 | calls[i], calls[j] = calls[j], calls[i] 65 | }) 66 | 67 | h := newHeapTest() 68 | 69 | for _, call := range calls { 70 | h.push(call) 71 | } 72 | 73 | assert.Equal(t, delayedCall{startedAt: start}, h.top()) 74 | 75 | assert.Equal(t, num, h.size()) 76 | 77 | for i := 0; i < num; i++ { 78 | e := h.pop() 79 | assert.Equal(t, start.Add(time.Duration(i)*time.Hour), e.startedAt) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /item/item.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "log" 7 | "time" 8 | "unsafe" 9 | 10 | "github.com/QuangTung97/go-memcache/memcache" 11 | 12 | "github.com/QuangTung97/memproxy" 13 | ) 14 | 15 | // Value is the value constraint 16 | type Value interface { 17 | Marshal() ([]byte, error) 18 | } 19 | 20 | // Key is the key constraint 21 | type Key interface { 22 | comparable 23 | String() string 24 | } 25 | 26 | // Unmarshaler transforms raw bytes from memcached servers to the correct type 27 | type Unmarshaler[T any] func(data []byte) (T, error) 28 | 29 | // Filler is for getting data from the backing store and set back to memcached servers 30 | type Filler[T any, K any] func(ctx context.Context, key K) func() (T, error) 31 | 32 | type itemOptions struct { 33 | sleepDurations []time.Duration 34 | errorOnRetryLimit bool 35 | fillingOnCacheError bool 36 | errorLogger func(err error) 37 | } 38 | 39 | // Option ... 40 | type Option func(opts *itemOptions) 41 | 42 | // DefaultSleepDurations ... 43 | func DefaultSleepDurations() []time.Duration { 44 | return []time.Duration{ 45 | 2 * time.Millisecond, 46 | 4 * time.Millisecond, 47 | 10 * time.Millisecond, 48 | 20 * time.Millisecond, 49 | } 50 | } 51 | 52 | func defaultErrorLogger(err error) { 53 | log.Println("[ERROR] item: get error:", err) 54 | } 55 | 56 | func computeOptions(options []Option) *itemOptions { 57 | opts := &itemOptions{ 58 | sleepDurations: DefaultSleepDurations(), 59 | errorOnRetryLimit: false, 60 | fillingOnCacheError: false, 61 | errorLogger: defaultErrorLogger, 62 | } 63 | 64 | for _, fn := range options { 65 | fn(opts) 66 | } 67 | return opts 68 | } 69 | 70 | // WithSleepDurations configures the sleep durations and number of retries after Lease Get returns Rejected status 71 | // default is the value of DefaultSleepDurations() 72 | func WithSleepDurations(durations ...time.Duration) Option { 73 | return func(opts *itemOptions) { 74 | opts.sleepDurations = durations 75 | } 76 | } 77 | 78 | // WithEnableErrorOnExceedRetryLimit enables returning error if sleepDurations exceeded 79 | // when enable = true, and after retried all the durations configured by WithSleepDurations, 80 | // the Item.Get will return the error ErrExceededRejectRetryLimit 81 | // default enable = false 82 | func WithEnableErrorOnExceedRetryLimit(enable bool) Option { 83 | return func(opts *itemOptions) { 84 | opts.errorOnRetryLimit = enable 85 | } 86 | } 87 | 88 | // WithEnableFillingOnCacheError when enable = true, continue to read from DB when get from memcached returns error 89 | // default enable = false 90 | func WithEnableFillingOnCacheError(enable bool) Option { 91 | return func(opts *itemOptions) { 92 | opts.fillingOnCacheError = enable 93 | } 94 | } 95 | 96 | // WithErrorLogger configures the error logger when there are problems with the memcache client or unmarshalling 97 | func WithErrorLogger(logger func(err error)) Option { 98 | return func(opts *itemOptions) { 99 | opts.errorLogger = logger 100 | } 101 | } 102 | 103 | // ErrNotFound ONLY be returned from the filler function, to do delete of lease get key in the memcached server 104 | var ErrNotFound = errors.New("item: not found") 105 | 106 | // ErrExceededRejectRetryLimit returned when number of rejected lease gets exceed number of sleep durations 107 | var ErrExceededRejectRetryLimit = errors.New("item: exceeded lease rejected retry limit") 108 | 109 | // ErrInvalidLeaseGetStatus ... 110 | var ErrInvalidLeaseGetStatus = errors.New("item: invalid lease get response status") 111 | 112 | type multiGetState[T any, K comparable] struct { 113 | completed bool 114 | keys []K 115 | result map[K]T 116 | err error 117 | } 118 | 119 | type multiGetFillerConfig struct { 120 | deleteOnNotFound bool 121 | } 122 | 123 | // MultiGetFillerOption ... 124 | type MultiGetFillerOption func(conf *multiGetFillerConfig) 125 | 126 | // WithMultiGetEnableDeleteOnNotFound when enable = true will delete the empty 127 | // key-value (used for lease get) from memcached server, 128 | // when the multiGetFunc NOT returning the corresponding values for the keys. 129 | // Otherwise, the empty value (zero value) will be set to the memcached server. 130 | // By default, enable = false. 131 | func WithMultiGetEnableDeleteOnNotFound(enable bool) MultiGetFillerOption { 132 | return func(conf *multiGetFillerConfig) { 133 | conf.deleteOnNotFound = enable 134 | } 135 | } 136 | 137 | // NewMultiGetFiller ... 138 | // 139 | //revive:disable-next-line:cognitive-complexity 140 | func NewMultiGetFiller[T any, K comparable]( 141 | multiGetFunc func(ctx context.Context, keys []K) ([]T, error), 142 | getKey func(v T) K, 143 | options ...MultiGetFillerOption, 144 | ) Filler[T, K] { 145 | conf := &multiGetFillerConfig{ 146 | deleteOnNotFound: false, 147 | } 148 | for _, opt := range options { 149 | opt(conf) 150 | } 151 | 152 | var state *multiGetState[T, K] 153 | 154 | return func(ctx context.Context, key K) func() (T, error) { 155 | if state == nil { 156 | state = &multiGetState[T, K]{ 157 | result: map[K]T{}, 158 | } 159 | } 160 | s := state 161 | s.keys = append(s.keys, key) 162 | 163 | return func() (T, error) { 164 | if !s.completed { 165 | s.completed = true 166 | state = nil 167 | 168 | values, err := multiGetFunc(ctx, s.keys) 169 | if err != nil { 170 | s.err = err 171 | } else { 172 | for _, v := range values { 173 | s.result[getKey(v)] = v 174 | } 175 | } 176 | } 177 | 178 | if s.err != nil { 179 | var empty T 180 | return empty, s.err 181 | } 182 | 183 | result, ok := s.result[key] 184 | if !ok && conf.deleteOnNotFound { 185 | var empty T 186 | return empty, ErrNotFound 187 | } 188 | return result, nil 189 | } 190 | } 191 | } 192 | 193 | // New creates an item.Item. 194 | // Param: unmarshaler is for unmarshalling the Value type. 195 | // Param: filler is for fetching data from the backing source (e.g. Database), 196 | // and can use the function NewMultiGetFiller() for simple multi get from database 197 | func New[T Value, K Key]( 198 | pipeline memproxy.Pipeline, 199 | unmarshaler Unmarshaler[T], 200 | filler Filler[T, K], 201 | options ...Option, 202 | ) *Item[T, K] { 203 | return &Item[T, K]{ 204 | common: itemCommon{ 205 | options: computeOptions(options), 206 | sess: pipeline.LowerSession(), 207 | pipeline: pipeline, 208 | }, 209 | 210 | unmarshaler: unmarshaler, 211 | filler: filler, 212 | 213 | getKeys: map[K]*getResultType[T]{}, 214 | } 215 | } 216 | 217 | // Item is NOT thread safe and, it contains a cached keys 218 | // once a key is cached in memory, it will return the same value unless call **Reset** 219 | type Item[T Value, K Key] struct { 220 | unmarshaler Unmarshaler[T] 221 | filler Filler[T, K] 222 | 223 | getKeys map[K]*getResultType[T] 224 | 225 | common itemCommon 226 | } 227 | 228 | type itemCommon struct { 229 | options *itemOptions 230 | sess memproxy.Session 231 | pipeline memproxy.Pipeline 232 | stats Stats 233 | } 234 | 235 | func (i *itemCommon) addNextCall(fn func(obj unsafe.Pointer)) { 236 | i.sess.AddNextCall(memproxy.CallbackFunc{ 237 | Object: nil, 238 | Func: fn, 239 | }) 240 | } 241 | 242 | func (i *itemCommon) addDelayedCall(d time.Duration, fn func(obj unsafe.Pointer)) { 243 | i.sess.AddDelayedCall(d, memproxy.CallbackFunc{ 244 | Object: nil, 245 | Func: fn, 246 | }) 247 | } 248 | 249 | type getResultType[T any] struct { 250 | resp T 251 | err error 252 | } 253 | 254 | func (s *GetState[T, K]) handleLeaseGranted(cas uint64) { 255 | it := s.getItem() 256 | 257 | fillFn := it.filler(s.common.ctx, s.key) 258 | 259 | it.common.addNextCall(func(_ unsafe.Pointer) { 260 | it := s.common.item 261 | 262 | fillResp, err := fillFn() 263 | 264 | if err == ErrNotFound { 265 | s.setResponse(fillResp) 266 | it.pipeline.Delete(s.common.keyStr, memproxy.DeleteOptions{}) 267 | return 268 | } 269 | 270 | if err != nil { 271 | s.setResponseError(err) 272 | return 273 | } 274 | 275 | data, err := fillResp.Marshal() 276 | if err != nil { 277 | s.setResponseError(err) 278 | return 279 | } 280 | s.setResponse(fillResp) 281 | 282 | if cas > 0 { 283 | _ = it.pipeline.LeaseSet(s.common.keyStr, data, cas, memproxy.LeaseSetOptions{}) 284 | it.addNextCall(func(obj unsafe.Pointer) { 285 | s.common.item.pipeline.Execute() 286 | }) 287 | } 288 | }) 289 | } 290 | 291 | type getStateMethods interface { 292 | setResponseError(err error) 293 | doFillFunc(cas uint64) 294 | unmarshalAndSet(data []byte) 295 | } 296 | 297 | type getStateCommon struct { 298 | ctx context.Context 299 | 300 | itemRoot unsafe.Pointer 301 | item *itemCommon 302 | 303 | retryCount int 304 | keyStr string 305 | 306 | leaseGetResult memproxy.LeaseGetResult 307 | 308 | methods getStateMethods 309 | } 310 | 311 | // GetState store intermediate state when getting item 312 | type GetState[T Value, K Key] struct { 313 | common *getStateCommon 314 | key K 315 | result getResultType[T] 316 | } 317 | 318 | func (s *GetState[T, K]) getItem() *Item[T, K] { 319 | return (*Item[T, K])(s.common.itemRoot) 320 | } 321 | 322 | func (s *GetState[T, K]) unmarshalAndSet(data []byte) { 323 | it := s.getItem() 324 | resp, err := it.unmarshaler(data) 325 | 326 | memcache.ReleaseGetResponseData(data) 327 | 328 | if err != nil { 329 | s.setResponseError(err) 330 | return 331 | } 332 | s.setResponse(resp) 333 | } 334 | 335 | func (s *GetState[T, K]) setResponseError(err error) { 336 | it := s.getItem() 337 | 338 | s.common.item.options.errorLogger(err) 339 | it.getKeys[s.key].err = err 340 | } 341 | 342 | func (s *GetState[T, K]) setResponse(resp T) { 343 | it := s.getItem() 344 | it.getKeys[s.key].resp = resp 345 | } 346 | 347 | func (s *GetState[T, K]) doFillFunc(cas uint64) { 348 | s.common.item.stats.FillCount++ 349 | s.handleLeaseGranted(cas) 350 | } 351 | 352 | func (s *getStateCommon) handleCacheError(err error) { 353 | s.item.stats.LeaseGetError++ 354 | if s.item.options.fillingOnCacheError { 355 | s.item.options.errorLogger(err) 356 | s.methods.doFillFunc(0) 357 | } else { 358 | s.methods.setResponseError(err) 359 | } 360 | } 361 | 362 | func (s *getStateCommon) newNextCallback() memproxy.CallbackFunc { 363 | return memproxy.CallbackFunc{ 364 | Object: unsafe.Pointer(s), 365 | Func: stateCommonNextCallback, 366 | } 367 | } 368 | 369 | func stateCommonNextCallback(obj unsafe.Pointer) { 370 | s := (*getStateCommon)(obj) 371 | s.nextFunc() 372 | } 373 | 374 | func (s *getStateCommon) nextFunc() { 375 | leaseGetResp, err := s.leaseGetResult.Result() 376 | 377 | s.leaseGetResult = nil 378 | 379 | if err != nil { 380 | s.handleCacheError(err) 381 | return 382 | } 383 | 384 | it := s.item 385 | 386 | if leaseGetResp.Status == memproxy.LeaseGetStatusFound { 387 | it.stats.HitCount++ 388 | it.stats.TotalBytesRecv += uint64(len(leaseGetResp.Data)) 389 | 390 | s.methods.unmarshalAndSet(leaseGetResp.Data) 391 | return 392 | } 393 | 394 | if leaseGetResp.Status == memproxy.LeaseGetStatusLeaseGranted { 395 | s.methods.doFillFunc(leaseGetResp.CAS) 396 | return 397 | } 398 | 399 | if leaseGetResp.Status == memproxy.LeaseGetStatusLeaseRejected { 400 | it.increaseRejectedCount(s.retryCount) 401 | 402 | if s.retryCount < len(it.options.sleepDurations) { 403 | it.addDelayedCall(it.options.sleepDurations[s.retryCount], func(_ unsafe.Pointer) { 404 | s.retryCount++ 405 | 406 | s.leaseGetResult = it.pipeline.LeaseGet(s.keyStr, memproxy.LeaseGetOptions{}) 407 | it.sess.AddNextCall(s.newNextCallback()) 408 | }) 409 | return 410 | } 411 | 412 | if !it.options.errorOnRetryLimit { 413 | s.methods.doFillFunc(leaseGetResp.CAS) 414 | return 415 | } 416 | 417 | s.methods.setResponseError(ErrExceededRejectRetryLimit) 418 | return 419 | } 420 | 421 | s.handleCacheError(ErrInvalidLeaseGetStatus) 422 | } 423 | 424 | // Result returns result 425 | func (s *GetState[T, K]) Result() (T, error) { 426 | it := s.getItem() 427 | it.common.sess.Execute() 428 | 429 | putGetStateCommon(s.common) 430 | s.common = nil 431 | 432 | result := it.getKeys[s.key] 433 | return result.resp, result.err 434 | } 435 | 436 | // Get a single item with key 437 | func (i *Item[T, K]) Get(ctx context.Context, key K) func() (T, error) { 438 | return i.GetFast(ctx, key).Result 439 | } 440 | 441 | // GetFast is similar to Get but reduced one alloc 442 | func (i *Item[T, K]) GetFast(ctx context.Context, key K) *GetState[T, K] { 443 | keyStr := key.String() 444 | 445 | // init get state common 446 | sc := newGetStateCommon() 447 | 448 | sc.ctx = ctx 449 | 450 | sc.itemRoot = unsafe.Pointer(i) 451 | sc.item = &i.common 452 | 453 | sc.keyStr = keyStr 454 | // end init get state common 455 | 456 | state := &GetState[T, K]{ 457 | common: sc, 458 | key: key, 459 | } 460 | 461 | sc.methods = state 462 | 463 | _, existed := i.getKeys[key] 464 | if existed { 465 | return state 466 | } 467 | i.getKeys[key] = &state.result 468 | 469 | sc.leaseGetResult = i.common.pipeline.LeaseGet(keyStr, memproxy.LeaseGetOptions{}) 470 | 471 | i.common.sess.AddNextCall(sc.newNextCallback()) 472 | 473 | return state 474 | } 475 | 476 | // GetMulti gets multiple keys at once 477 | func (i *Item[T, K]) GetMulti(ctx context.Context, keys []K) func() ([]T, error) { 478 | states := make([]*GetState[T, K], 0, len(keys)) 479 | for _, k := range keys { 480 | state := i.GetFast(ctx, k) 481 | states = append(states, state) 482 | } 483 | 484 | return func() ([]T, error) { 485 | result := make([]T, 0, len(states)) 486 | for _, state := range states { 487 | val, err := state.Result() 488 | if err != nil { 489 | return nil, err 490 | } 491 | result = append(result, val) 492 | } 493 | return result, nil 494 | } 495 | } 496 | 497 | func (i *itemCommon) increaseRejectedCount(retryCount int) { 498 | i.stats.TotalRejectedCount++ 499 | 500 | switch retryCount { 501 | case 0: 502 | i.stats.FirstRejectedCount++ 503 | case 1: 504 | i.stats.SecondRejectedCount++ 505 | case 2: 506 | i.stats.ThirdRejectedCount++ 507 | } 508 | } 509 | 510 | // LowerSession ... 511 | func (i *Item[T, K]) LowerSession() memproxy.Session { 512 | return i.common.sess.GetLower() 513 | } 514 | 515 | // Reset clear in-memory cached values 516 | func (i *Item[T, K]) Reset() { 517 | i.getKeys = map[K]*getResultType[T]{} 518 | } 519 | 520 | // Stats ... 521 | type Stats struct { 522 | HitCount uint64 523 | FillCount uint64 // can also be interpreted as the miss count 524 | 525 | LeaseGetError uint64 // lease get error count 526 | 527 | FirstRejectedCount uint64 528 | SecondRejectedCount uint64 529 | ThirdRejectedCount uint64 530 | TotalRejectedCount uint64 531 | 532 | TotalBytesRecv uint64 533 | } 534 | 535 | // GetStats ... 536 | func (i *Item[T, K]) GetStats() Stats { 537 | return i.common.stats 538 | } 539 | -------------------------------------------------------------------------------- /item/item_bench_test.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "context" 5 | "encoding/binary" 6 | "os" 7 | "runtime" 8 | "runtime/pprof" 9 | "strconv" 10 | "testing" 11 | 12 | "github.com/QuangTung97/go-memcache/memcache" 13 | "github.com/stretchr/testify/assert" 14 | 15 | "github.com/QuangTung97/memproxy" 16 | "github.com/QuangTung97/memproxy/proxy" 17 | ) 18 | 19 | type benchValue struct { 20 | key uint64 21 | value int64 22 | } 23 | 24 | type benchKey struct { 25 | key uint64 26 | } 27 | 28 | func (k benchKey) String() string { 29 | return strconv.FormatUint(k.key, 10) 30 | } 31 | 32 | func (v benchValue) Marshal() ([]byte, error) { 33 | var result [16]byte 34 | binary.LittleEndian.PutUint64(result[:], v.key) 35 | binary.LittleEndian.PutUint64(result[8:], uint64(v.value)) 36 | return result[:], nil 37 | } 38 | 39 | func unmarshalBench(data []byte) (benchValue, error) { 40 | key := binary.LittleEndian.Uint64(data[:]) 41 | val := binary.LittleEndian.Uint64(data[8:]) 42 | return benchValue{ 43 | key: key, 44 | value: int64(val), 45 | }, nil 46 | } 47 | 48 | func TestMarshalBench(t *testing.T) { 49 | v := benchValue{ 50 | key: 124, 51 | value: 889, 52 | } 53 | 54 | data, err := v.Marshal() 55 | assert.Equal(t, nil, err) 56 | 57 | result, err := unmarshalBench(data) 58 | assert.Equal(t, nil, err) 59 | assert.Equal(t, v, result) 60 | } 61 | 62 | func clearMemcache(c *memcache.Client) { 63 | pipe := c.Pipeline() 64 | defer pipe.Finish() 65 | err := pipe.FlushAll()() 66 | if err != nil { 67 | panic(err) 68 | } 69 | } 70 | 71 | func newMemcache(b *testing.B) memproxy.Memcache { 72 | client, err := memcache.New("localhost:11211", 1) 73 | if err != nil { 74 | panic(err) 75 | } 76 | clearMemcache(client) 77 | 78 | mc := memproxy.NewPlainMemcache(client) 79 | b.Cleanup(func() { _ = mc.Close() }) 80 | 81 | return mc 82 | } 83 | 84 | func newMemcacheWithProxy(b *testing.B) memproxy.Memcache { 85 | clearClient, err := memcache.New("localhost:11211", 1) 86 | if err != nil { 87 | panic(err) 88 | } 89 | clearMemcache(clearClient) 90 | defer func() { _ = clearClient.Close() }() 91 | 92 | server1 := proxy.SimpleServerConfig{ 93 | ID: 1, 94 | Host: "localhost", 95 | Port: 11211, 96 | } 97 | 98 | servers := []proxy.SimpleServerConfig{server1} 99 | mc, closeFunc, err := proxy.NewSimpleReplicatedMemcache(servers, 1, proxy.NewSimpleStats(servers)) 100 | if err != nil { 101 | panic(err) 102 | } 103 | b.Cleanup(closeFunc) 104 | 105 | if err != nil { 106 | panic(err) 107 | } 108 | 109 | return mc 110 | } 111 | 112 | func BenchmarkItemGetSingle(b *testing.B) { 113 | mc := newMemcache(b) 114 | 115 | b.ResetTimer() 116 | 117 | value := int64(112) 118 | 119 | for n := 0; n < b.N; n++ { 120 | pipe := mc.Pipeline(context.Background()) 121 | 122 | var filler Filler[benchValue, benchKey] = func(ctx context.Context, key benchKey) func() (benchValue, error) { 123 | return func() (benchValue, error) { 124 | value++ 125 | return benchValue{ 126 | key: key.key, 127 | value: value, 128 | }, nil 129 | } 130 | } 131 | autoFill := New[benchValue, benchKey](pipe, unmarshalBench, filler) 132 | 133 | fn := autoFill.Get(context.Background(), benchKey{ 134 | key: 3344, 135 | }) 136 | 137 | val, err := fn() 138 | if err != nil { 139 | panic(err) 140 | } 141 | 142 | if val.value != value { 143 | panic(value) 144 | } 145 | 146 | pipe.Finish() 147 | } 148 | } 149 | 150 | func writeMemProfile() { 151 | if os.Getenv("ENABLE_BENCH_PROFILE") == "" { 152 | return 153 | } 154 | 155 | file, err := os.Create("./bench_profile.out") 156 | if err != nil { 157 | panic(err) 158 | } 159 | defer func() { 160 | err := file.Close() 161 | if err != nil { 162 | panic(err) 163 | } 164 | }() 165 | 166 | err = pprof.WriteHeapProfile(file) 167 | if err != nil { 168 | panic(err) 169 | } 170 | } 171 | 172 | func benchmarkWithBatch( 173 | b *testing.B, 174 | newFunc func(b *testing.B) memproxy.Memcache, 175 | batchSize int, 176 | ) { 177 | mc := newFunc(b) 178 | 179 | b.ResetTimer() 180 | 181 | value := int64(112) 182 | 183 | for n := 0; n < b.N; n++ { 184 | pipe := mc.Pipeline(context.Background()) 185 | 186 | var filler Filler[benchValue, benchKey] = func(ctx context.Context, key benchKey) func() (benchValue, error) { 187 | return func() (benchValue, error) { 188 | value++ 189 | return benchValue{ 190 | key: key.key, 191 | value: value, 192 | }, nil 193 | } 194 | } 195 | autoFill := New[benchValue, benchKey](pipe, unmarshalBench, filler) 196 | 197 | fnList := make([]func() (benchValue, error), 0, batchSize) 198 | for i := 0; i < batchSize; i++ { 199 | fn := autoFill.Get(context.Background(), benchKey{ 200 | key: 33000 + uint64(i), 201 | }) 202 | fnList = append(fnList, fn) 203 | } 204 | 205 | for _, fn := range fnList { 206 | _, err := fn() 207 | if err != nil { 208 | panic(err) 209 | } 210 | } 211 | pipe.Finish() 212 | } 213 | 214 | b.StopTimer() 215 | writeMemProfile() 216 | } 217 | 218 | func BenchmarkItemGetByBatch1000(b *testing.B) { 219 | benchmarkWithBatch(b, newMemcache, 1000) // => 400K / seconds 220 | } 221 | 222 | func BenchmarkItemGetByBatch100(b *testing.B) { 223 | benchmarkWithBatch(b, newMemcache, 100) // => 348K / seconds 224 | } 225 | 226 | func BenchmarkItemWithProxyGetByBatch1000(b *testing.B) { 227 | benchmarkWithBatch(b, newMemcacheWithProxy, 1000) // => 400K / seconds 228 | } 229 | 230 | func BenchmarkItemWithProxyGetByBatch100(b *testing.B) { 231 | benchmarkWithBatch(b, newMemcacheWithProxy, 100) // => 348K / seconds 232 | } 233 | 234 | func BenchmarkHeapAlloc(b *testing.B) { 235 | count := uint64(0) 236 | var last any 237 | for n := 0; n < b.N; n++ { 238 | x := make([]byte, 128) 239 | var v any = x 240 | v.([]byte)[0] = uint8(count) 241 | count += uint64(x[0]) 242 | last = x 243 | } 244 | runtime.KeepAlive(last) 245 | } 246 | -------------------------------------------------------------------------------- /item/item_property_test.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math/rand" 7 | "sync" 8 | "testing" 9 | "time" 10 | 11 | "github.com/QuangTung97/go-memcache/memcache" 12 | "github.com/stretchr/testify/assert" 13 | 14 | "github.com/QuangTung97/memproxy" 15 | "github.com/QuangTung97/memproxy/proxy" 16 | ) 17 | 18 | type itemPropertyTest struct { 19 | client *memcache.Client 20 | client2 *memcache.Client 21 | 22 | mc memproxy.Memcache 23 | 24 | mut sync.Mutex 25 | currentAge int64 26 | } 27 | 28 | func (p *itemPropertyTest) newItem() (*Item[userValue, userKey], func()) { 29 | pipe := p.mc.Pipeline(newContext()) 30 | return New[userValue, userKey]( 31 | pipe, unmarshalUser, 32 | NewMultiGetFiller[userValue, userKey](func(ctx context.Context, keys []userKey) ([]userValue, error) { 33 | values := make([]userValue, 0, len(keys)) 34 | 35 | p.mut.Lock() 36 | for _, k := range keys { 37 | values = append(values, userValue{ 38 | Tenant: k.Tenant, 39 | Name: k.Name, 40 | Age: p.currentAge, 41 | }) 42 | } 43 | p.mut.Unlock() 44 | 45 | time.Sleep(time.Millisecond * time.Duration(rand.Intn(6))) 46 | 47 | return values, nil 48 | }, userValue.GetKey), 49 | WithEnableErrorOnExceedRetryLimit(true), 50 | ), pipe.Finish 51 | } 52 | 53 | func (p *itemPropertyTest) updateAge(key userKey) { 54 | p.mut.Lock() 55 | p.currentAge++ 56 | p.mut.Unlock() 57 | 58 | pipe := p.mc.Pipeline(newContext()) 59 | pipe.Delete(key.String(), memproxy.DeleteOptions{}) 60 | pipe.Finish() 61 | } 62 | 63 | func (p *itemPropertyTest) flushAll() { 64 | pipe := p.client.Pipeline() 65 | err := pipe.FlushAll()() 66 | if err != nil { 67 | panic(err) 68 | } 69 | 70 | if p.client2 != nil { 71 | pipe := p.client2.Pipeline() 72 | err := pipe.FlushAll()() 73 | if err != nil { 74 | panic(err) 75 | } 76 | } 77 | } 78 | 79 | func newItemPropertyTest(t *testing.T) *itemPropertyTest { 80 | p := &itemPropertyTest{} 81 | 82 | client, err := memcache.New("localhost:11211", 3) 83 | if err != nil { 84 | panic(err) 85 | } 86 | t.Cleanup(func() { _ = client.Close() }) 87 | p.client = client 88 | 89 | p.mc = memproxy.NewPlainMemcache(client) 90 | return p 91 | } 92 | 93 | func newItemPropertyTestWithProxy(t *testing.T) *itemPropertyTest { 94 | p := &itemPropertyTest{} 95 | 96 | client, err := memcache.New("localhost:11211", 3) 97 | if err != nil { 98 | panic(err) 99 | } 100 | t.Cleanup(func() { _ = client.Close() }) 101 | p.client = client 102 | 103 | servers := []proxy.SimpleServerConfig{ 104 | { 105 | Host: "localhost", 106 | Port: 11211, 107 | }, 108 | } 109 | mc, closeFunc, err := proxy.NewSimpleReplicatedMemcache( 110 | servers, 3, 111 | proxy.NewSimpleStats(servers), 112 | ) 113 | if err != nil { 114 | panic(err) 115 | } 116 | t.Cleanup(closeFunc) 117 | p.mc = mc 118 | 119 | return p 120 | } 121 | 122 | func newItemPropertyTestWithProxyTwoNodes(t *testing.T) *itemPropertyTest { 123 | p := &itemPropertyTest{} 124 | 125 | client1, err := memcache.New("localhost:11211", 3) 126 | if err != nil { 127 | panic(err) 128 | } 129 | 130 | client2, err := memcache.New("localhost:11212", 3) 131 | if err != nil { 132 | panic(err) 133 | } 134 | 135 | t.Cleanup(func() { 136 | _ = client1.Close() 137 | _ = client2.Close() 138 | }) 139 | 140 | p.client = client1 141 | p.client2 = client2 142 | 143 | servers := []proxy.SimpleServerConfig{ 144 | { 145 | Host: "localhost", 146 | Port: 11211, 147 | }, 148 | { 149 | Host: "localhost", 150 | Port: 11212, 151 | }, 152 | } 153 | mc, closeFunc, err := proxy.NewSimpleReplicatedMemcache( 154 | servers, 3, 155 | proxy.NewSimpleStats(servers), 156 | ) 157 | if err != nil { 158 | panic(err) 159 | } 160 | t.Cleanup(closeFunc) 161 | p.mc = mc 162 | 163 | return p 164 | } 165 | 166 | func (p *itemPropertyTest) testConsistency(t *testing.T) { 167 | var wg sync.WaitGroup 168 | 169 | const numThreads = 5 170 | 171 | wg.Add(numThreads * 4) 172 | 173 | for th := 0; th < numThreads*3; th++ { 174 | go func() { 175 | defer wg.Done() 176 | 177 | time.Sleep(time.Millisecond * time.Duration(rand.Intn(5))) 178 | 179 | it, finish := p.newItem() 180 | defer finish() 181 | 182 | fn := it.Get(newContext(), userKey{ 183 | Tenant: "TENANT01", 184 | Name: "user01", 185 | }) 186 | _, err := fn() 187 | if err != nil { 188 | panic(err) 189 | } 190 | }() 191 | } 192 | 193 | for th := 0; th < numThreads; th++ { 194 | go func() { 195 | defer wg.Done() 196 | 197 | time.Sleep(time.Millisecond * time.Duration(rand.Intn(5))) 198 | 199 | p.updateAge(userKey{ 200 | Tenant: "TENANT01", 201 | Name: "user01", 202 | }) 203 | }() 204 | } 205 | 206 | wg.Wait() 207 | 208 | it, finish := p.newItem() 209 | defer finish() 210 | 211 | fn := it.Get(newContext(), userKey{ 212 | Tenant: "TENANT01", 213 | Name: "user01", 214 | }) 215 | 216 | val, err := fn() 217 | assert.Equal(t, nil, err) 218 | assert.Equal(t, userValue{ 219 | Tenant: "TENANT01", 220 | Name: "user01", 221 | Age: p.currentAge, 222 | }, val) 223 | fmt.Println(p.currentAge) 224 | } 225 | 226 | func TestProperty_SingleKey(t *testing.T) { 227 | t.Run("normal", func(t *testing.T) { 228 | seed := time.Now().UnixNano() 229 | rand.Seed(seed) 230 | fmt.Println("SEED:", seed) 231 | 232 | p := newItemPropertyTest(t) 233 | 234 | for i := 0; i < 100; i++ { 235 | p.flushAll() 236 | p.testConsistency(t) 237 | } 238 | }) 239 | 240 | t.Run("with-proxy", func(t *testing.T) { 241 | seed := time.Now().UnixNano() 242 | rand.Seed(seed) 243 | fmt.Println("SEED:", seed) 244 | 245 | p := newItemPropertyTestWithProxy(t) 246 | 247 | for i := 0; i < 100; i++ { 248 | p.flushAll() 249 | p.testConsistency(t) 250 | } 251 | }) 252 | 253 | t.Run("with-proxy-two-nodes", func(t *testing.T) { 254 | seed := time.Now().UnixNano() 255 | rand.Seed(seed) 256 | fmt.Println("SEED:", seed) 257 | 258 | p := newItemPropertyTestWithProxy(t) 259 | 260 | for i := 0; i < 100; i++ { 261 | p.flushAll() 262 | p.testConsistency(t) 263 | } 264 | }) 265 | } 266 | -------------------------------------------------------------------------------- /item/pool.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | var getStateCommonPool = sync.Pool{ 8 | New: func() any { 9 | return &getStateCommon{} 10 | }, 11 | } 12 | 13 | func newGetStateCommon() *getStateCommon { 14 | return getStateCommonPool.Get().(*getStateCommon) 15 | } 16 | 17 | func putGetStateCommon(s *getStateCommon) { 18 | *s = getStateCommon{} 19 | getStateCommonPool.Put(s) 20 | } 21 | -------------------------------------------------------------------------------- /item/pool_test.go: -------------------------------------------------------------------------------- 1 | package item 2 | -------------------------------------------------------------------------------- /memproxy.go: -------------------------------------------------------------------------------- 1 | package memproxy 2 | 3 | import ( 4 | "context" 5 | "time" 6 | "unsafe" 7 | ) 8 | 9 | // Memcache represents a generic Memcache interface 10 | // implementations of this interface must be thread safe 11 | type Memcache interface { 12 | // Pipeline creates a Pipeline, a NON thread safe object 13 | Pipeline(ctx context.Context, options ...PipelineOption) Pipeline 14 | 15 | // Close shutdowns memcache client 16 | Close() error 17 | } 18 | 19 | // LeaseGetResult is the response of LeaseGet, method Result MUST only be called Once. 20 | // Calling Result more than once is undefined behaviour 21 | type LeaseGetResult interface { 22 | Result() (LeaseGetResponse, error) 23 | } 24 | 25 | // LeaseGetErrorResult for error only result 26 | type LeaseGetErrorResult struct { 27 | Error error 28 | } 29 | 30 | // Result ... 31 | func (r LeaseGetErrorResult) Result() (LeaseGetResponse, error) { 32 | return LeaseGetResponse{}, r.Error 33 | } 34 | 35 | // LeaseGetResultFunc for function implementation of LeaseGetResult 36 | type LeaseGetResultFunc func() (LeaseGetResponse, error) 37 | 38 | // Result ... 39 | func (f LeaseGetResultFunc) Result() (LeaseGetResponse, error) { 40 | return f() 41 | } 42 | 43 | // Pipeline represents a generic Pipeline 44 | type Pipeline interface { 45 | // LeaseGet should not be used directly, use the item or mmap package instead 46 | LeaseGet(key string, options LeaseGetOptions) LeaseGetResult 47 | 48 | LeaseSet(key string, data []byte, cas uint64, options LeaseSetOptions) func() (LeaseSetResponse, error) 49 | Delete(key string, options DeleteOptions) func() (DeleteResponse, error) 50 | 51 | // Execute flush commands to the network 52 | Execute() 53 | 54 | // Finish must be called after create a Pipeline, often by defer 55 | Finish() 56 | 57 | // LowerSession returns a lower priority session 58 | LowerSession() Session 59 | } 60 | 61 | // SessionProvider for controlling delayed tasks, this object is Thread Safe 62 | type SessionProvider interface { 63 | New() Session 64 | } 65 | 66 | // CallbackFunc for session 67 | type CallbackFunc struct { 68 | Object unsafe.Pointer 69 | Func func(obj unsafe.Pointer) 70 | } 71 | 72 | // Call ... 73 | func (f CallbackFunc) Call() { 74 | f.Func(f.Object) 75 | } 76 | 77 | // NewEmptyCallback creates CallbackFunc from empty args function 78 | func NewEmptyCallback(fn func()) CallbackFunc { 79 | return CallbackFunc{ 80 | Object: nil, 81 | Func: func(_ unsafe.Pointer) { 82 | fn() 83 | }, 84 | } 85 | } 86 | 87 | // Session controlling session values & delayed tasks, this object is NOT Thread Safe 88 | type Session interface { 89 | AddNextCall(fn CallbackFunc) 90 | AddDelayedCall(d time.Duration, fn CallbackFunc) 91 | Execute() 92 | 93 | GetLower() Session 94 | } 95 | 96 | // LeaseGetOptions lease get options 97 | type LeaseGetOptions struct { 98 | } 99 | 100 | // LeaseGetStatus status of lease get 101 | type LeaseGetStatus uint32 102 | 103 | const ( 104 | // LeaseGetStatusFound returns Data 105 | LeaseGetStatusFound LeaseGetStatus = iota + 1 106 | 107 | // LeaseGetStatusLeaseGranted lease granted 108 | LeaseGetStatusLeaseGranted 109 | 110 | // LeaseGetStatusLeaseRejected lease rejected 111 | LeaseGetStatusLeaseRejected 112 | ) 113 | 114 | // LeaseGetResponse lease get response 115 | type LeaseGetResponse struct { 116 | Status LeaseGetStatus 117 | CAS uint64 118 | Data []byte 119 | } 120 | 121 | // LeaseSetOptions lease set options 122 | type LeaseSetOptions struct { 123 | TTL uint32 124 | } 125 | 126 | // LeaseSetStatus ... 127 | type LeaseSetStatus uint32 128 | 129 | const ( 130 | // LeaseSetStatusStored ... 131 | LeaseSetStatusStored LeaseSetStatus = iota + 1 132 | 133 | // LeaseSetStatusNotStored NOT stored because of key already been deleted or CAS has changed 134 | LeaseSetStatusNotStored 135 | ) 136 | 137 | // LeaseSetResponse lease set response 138 | type LeaseSetResponse struct { 139 | Status LeaseSetStatus 140 | } 141 | 142 | // DeleteOptions delete options 143 | type DeleteOptions struct { 144 | } 145 | 146 | // DeleteResponse delete response 147 | type DeleteResponse struct { 148 | } 149 | 150 | // ============================================== 151 | // Pipeline Options 152 | // ============================================== 153 | 154 | // PipelineConfig ... 155 | type PipelineConfig struct { 156 | existingSess Session 157 | } 158 | 159 | // GetSession ... 160 | func (c *PipelineConfig) GetSession(provider SessionProvider) Session { 161 | if c.existingSess != nil { 162 | return c.existingSess 163 | } 164 | return provider.New() 165 | } 166 | 167 | // ComputePipelineConfig ... 168 | func ComputePipelineConfig(options []PipelineOption) *PipelineConfig { 169 | conf := &PipelineConfig{ 170 | existingSess: nil, 171 | } 172 | for _, fn := range options { 173 | fn(conf) 174 | } 175 | return conf 176 | } 177 | 178 | // PipelineOption ... 179 | type PipelineOption func(conf *PipelineConfig) 180 | 181 | // WithPipelineExistingSession ... 182 | func WithPipelineExistingSession(sess Session) PipelineOption { 183 | return func(conf *PipelineConfig) { 184 | conf.existingSess = sess 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /mmap/README.md: -------------------------------------------------------------------------------- 1 | ## MMap - An Efficient & High Cardinality Hash Map that Lives inside Memcached Servers 2 | ### Warning: This is an experimental package, and should NOT be used in production environments -------------------------------------------------------------------------------- /mmap/bucket.go: -------------------------------------------------------------------------------- 1 | package mmap 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "encoding/hex" 7 | "errors" 8 | "math" 9 | "strconv" 10 | 11 | "github.com/QuangTung97/memproxy/item" 12 | ) 13 | 14 | // BucketKey ... 15 | type BucketKey[R RootKey] struct { 16 | RootKey R 17 | SizeLog uint8 18 | Hash uint64 19 | Sep string // separator 20 | } 21 | 22 | // String ... 23 | func (k BucketKey[R]) String() string { 24 | rootKey := k.RootKey.String() 25 | 26 | // 2 bytes for size log 27 | // 8 bytes for hash value => can contain 4 byte uint32 28 | // 2 bytes for separator 29 | // 116 bytes for key length 30 | var tmpBuf [128]byte 31 | result := tmpBuf[:0] 32 | 33 | result = append(result, rootKey...) 34 | result = append(result, k.Sep...) 35 | 36 | sizeLogNum := strconv.FormatInt(int64(k.SizeLog), 10) 37 | result = append(result, sizeLogNum...) 38 | 39 | result = append(result, k.Sep...) 40 | 41 | hash := k.Hash & (math.MaxUint64 << (64 - k.SizeLog)) 42 | 43 | var data [8]byte 44 | binary.BigEndian.PutUint64(data[:], hash) 45 | 46 | numBytes := (k.SizeLog + 7) >> 3 47 | hexStr := hex.EncodeToString(data[:numBytes]) 48 | 49 | numDigits := (k.SizeLog + 3) >> 2 50 | if numDigits&0b1 != 0 { 51 | hexStr = hexStr[:len(hexStr)-1] 52 | } 53 | 54 | result = append(result, hexStr...) 55 | return string(result) 56 | } 57 | 58 | // GetHashRange ... 59 | func (k BucketKey[R]) GetHashRange() HashRange { 60 | mask := uint64(math.MaxUint64) << (64 - k.SizeLog) 61 | 62 | begin := k.Hash & mask 63 | return HashRange{ 64 | Begin: begin, 65 | End: begin | ^mask, 66 | } 67 | } 68 | 69 | // Bucket ... 70 | type Bucket[T Value] struct { 71 | Values []T 72 | } 73 | 74 | func putLength(buf *bytes.Buffer, length int) { 75 | var lenBytes [binary.MaxVarintLen64]byte 76 | 77 | n := binary.PutUvarint(lenBytes[:], uint64(length)) 78 | _, _ = buf.Write(lenBytes[:n]) 79 | } 80 | 81 | // Marshal ... 82 | func (b Bucket[T]) Marshal() ([]byte, error) { 83 | var buf bytes.Buffer 84 | 85 | putLength(&buf, len(b.Values)) 86 | 87 | for _, v := range b.Values { 88 | data, err := v.Marshal() 89 | if err != nil { 90 | return nil, err 91 | } 92 | 93 | putLength(&buf, len(data)) 94 | _, _ = buf.Write(data) 95 | } 96 | 97 | return buf.Bytes(), nil 98 | } 99 | 100 | // NewBucketUnmarshaler ... 101 | func NewBucketUnmarshaler[T Value]( 102 | unmarshaler item.Unmarshaler[T], 103 | ) func(data []byte) (Bucket[T], error) { 104 | return func(data []byte) (Bucket[T], error) { 105 | numValues, n := binary.Uvarint(data) 106 | if n <= 0 { 107 | return Bucket[T]{}, errors.New("mmap bucket: invalid number of values") 108 | } 109 | data = data[n:] 110 | 111 | values := make([]T, 0, numValues) 112 | 113 | for i := uint64(0); i < numValues; i++ { 114 | numBytes, n := binary.Uvarint(data) 115 | if n <= 0 { 116 | return Bucket[T]{}, errors.New("mmap bucket: invalid length number of data") 117 | } 118 | data = data[n:] 119 | 120 | if len(data) < int(numBytes) { 121 | return Bucket[T]{}, errors.New("mmap bucket: invalid data") 122 | } 123 | 124 | value, err := unmarshaler(data[:numBytes]) 125 | if err != nil { 126 | return Bucket[T]{}, err 127 | } 128 | values = append(values, value) 129 | 130 | data = data[numBytes:] 131 | } 132 | 133 | return Bucket[T]{ 134 | Values: values, 135 | }, nil 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /mmap/bucket_test.go: -------------------------------------------------------------------------------- 1 | package mmap 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | type testRootKey struct { 12 | prefix string 13 | } 14 | 15 | func (k testRootKey) String() string { 16 | return k.prefix 17 | } 18 | 19 | func (testRootKey) AvgBucketSizeLog() uint8 { 20 | return 2 21 | } 22 | 23 | // length in bytes 24 | func newHash(prefix uint64, length int) uint64 { 25 | size := length * 8 26 | return prefix << (64 - size) 27 | } 28 | 29 | func TestNewHash(t *testing.T) { 30 | assert.Equal(t, uint64(0x1234_5600_0000_0000), newHash(0x1234_56, 3)) 31 | } 32 | 33 | func TestBucketKey_String(t *testing.T) { 34 | t.Run("normal", func(t *testing.T) { 35 | k := BucketKey[testRootKey]{ 36 | RootKey: testRootKey{ 37 | prefix: "hello", 38 | }, 39 | SizeLog: 3 * 8, 40 | Hash: newHash(0x1234_5678, 4), 41 | Sep: ":", 42 | } 43 | assert.Equal(t, "hello:24:123456", k.String()) 44 | }) 45 | 46 | t.Run("near align with bytes", func(t *testing.T) { 47 | k := BucketKey[testRootKey]{ 48 | RootKey: testRootKey{ 49 | prefix: "hello", 50 | }, 51 | SizeLog: 23, 52 | Hash: newHash(0x1234_ff78, 4), 53 | Sep: "/", 54 | } 55 | assert.Equal(t, "hello/23/1234fe", k.String()) 56 | }) 57 | 58 | t.Run("middle of byte", func(t *testing.T) { 59 | k := BucketKey[testRootKey]{ 60 | RootKey: testRootKey{ 61 | prefix: "hello", 62 | }, 63 | SizeLog: 12, 64 | Hash: newHash(0x1234_ff78, 4), 65 | Sep: "/", 66 | } 67 | assert.Equal(t, "hello/12/123", k.String()) 68 | }) 69 | 70 | t.Run("single digit", func(t *testing.T) { 71 | k := BucketKey[testRootKey]{ 72 | RootKey: testRootKey{ 73 | prefix: "hello", 74 | }, 75 | SizeLog: 4, 76 | Hash: newHash(0x5234_ff78, 4), 77 | Sep: "/", 78 | } 79 | assert.Equal(t, "hello/4/5", k.String()) 80 | }) 81 | 82 | t.Run("single bit", func(t *testing.T) { 83 | k := BucketKey[testRootKey]{ 84 | RootKey: testRootKey{ 85 | prefix: "hello", 86 | }, 87 | SizeLog: 1, 88 | Hash: newHash(0xf4, 1), 89 | Sep: "/", 90 | } 91 | assert.Equal(t, "hello/1/8", k.String()) 92 | }) 93 | 94 | t.Run("size log zero", func(t *testing.T) { 95 | k := BucketKey[testRootKey]{ 96 | RootKey: testRootKey{ 97 | prefix: "hello", 98 | }, 99 | SizeLog: 0, 100 | Hash: newHash(0xf4, 1), 101 | Sep: "/", 102 | } 103 | assert.Equal(t, "hello/0/", k.String()) 104 | }) 105 | } 106 | 107 | type testUser struct { 108 | Name string `json:"name"` 109 | Age int `json:"age"` 110 | } 111 | 112 | func (u testUser) Marshal() ([]byte, error) { 113 | return json.Marshal(u) 114 | } 115 | 116 | func unmarshalTestUser(data []byte) (testUser, error) { 117 | var u testUser 118 | err := json.Unmarshal(data, &u) 119 | return u, err 120 | } 121 | 122 | type simpleString string 123 | 124 | func (s simpleString) Marshal() ([]byte, error) { 125 | return []byte(s), nil 126 | } 127 | 128 | func unmarshalSimpleString(data []byte) (simpleString, error) { 129 | return simpleString(data), nil 130 | } 131 | 132 | func TestNewBucketUnmarshaler(t *testing.T) { 133 | t.Run("normal", func(t *testing.T) { 134 | u1 := testUser{ 135 | Name: "user01", 136 | Age: 81, 137 | } 138 | u2 := testUser{ 139 | Name: "user02", 140 | Age: 82, 141 | } 142 | 143 | bucket := Bucket[testUser]{ 144 | Values: []testUser{u1, u2}, 145 | } 146 | 147 | data, err := bucket.Marshal() 148 | assert.Equal(t, nil, err) 149 | 150 | unmarshaler := NewBucketUnmarshaler[testUser](unmarshalTestUser) 151 | 152 | newBucket, err := unmarshaler(data) 153 | assert.Equal(t, nil, err) 154 | assert.Equal(t, bucket, newBucket) 155 | }) 156 | 157 | t.Run("empty", func(t *testing.T) { 158 | bucket := Bucket[testUser]{} 159 | 160 | data, err := bucket.Marshal() 161 | assert.Equal(t, nil, err) 162 | 163 | unmarshaler := NewBucketUnmarshaler[testUser](unmarshalTestUser) 164 | 165 | newBucket, err := unmarshaler(data) 166 | assert.Equal(t, nil, err) 167 | 168 | bucket.Values = []testUser{} 169 | assert.Equal(t, bucket, newBucket) 170 | }) 171 | 172 | t.Run("byte format", func(t *testing.T) { 173 | bucket := Bucket[simpleString]{ 174 | Values: []simpleString{ 175 | "ABC", 176 | "X", 177 | }, 178 | } 179 | 180 | data, err := bucket.Marshal() 181 | assert.Equal(t, nil, err) 182 | assert.Equal(t, []byte{2, 3, 'A', 'B', 'C', 1, 'X'}, data) 183 | }) 184 | 185 | t.Run("missing length", func(t *testing.T) { 186 | fn := NewBucketUnmarshaler[simpleString](unmarshalSimpleString) 187 | 188 | _, err := fn(nil) 189 | assert.Equal(t, errors.New("mmap bucket: invalid number of values"), err) 190 | }) 191 | 192 | t.Run("missing data len", func(t *testing.T) { 193 | fn := NewBucketUnmarshaler[simpleString](unmarshalSimpleString) 194 | 195 | _, err := fn([]byte{2}) 196 | assert.Equal(t, errors.New("mmap bucket: invalid length number of data"), err) 197 | }) 198 | 199 | t.Run("missing data", func(t *testing.T) { 200 | fn := NewBucketUnmarshaler[simpleString](unmarshalSimpleString) 201 | 202 | _, err := fn([]byte{2, 3, 'A', 'B'}) 203 | assert.Equal(t, errors.New("mmap bucket: invalid data"), err) 204 | }) 205 | 206 | t.Run("success single record", func(t *testing.T) { 207 | fn := NewBucketUnmarshaler[simpleString](unmarshalSimpleString) 208 | 209 | bucket, err := fn([]byte{1, 3, 'A', 'B', 'C'}) 210 | assert.Equal(t, nil, err) 211 | assert.Equal(t, Bucket[simpleString]{ 212 | Values: []simpleString{ 213 | "ABC", 214 | }, 215 | }, bucket) 216 | }) 217 | } 218 | 219 | func TestNewBucketUnmarshaler_WithInnerError(t *testing.T) { 220 | fn := NewBucketUnmarshaler[simpleString](func(data []byte) (simpleString, error) { 221 | return "", errors.New("inner error") 222 | }) 223 | 224 | _, err := fn([]byte{1, 2, 'A', 'B'}) 225 | assert.Equal(t, errors.New("inner error"), err) 226 | } 227 | 228 | type errorString string 229 | 230 | func (errorString) Marshal() ([]byte, error) { 231 | return nil, errors.New("marshal error") 232 | } 233 | 234 | func TestBucket_Marshal_With_Error(t *testing.T) { 235 | b := Bucket[errorString]{ 236 | Values: []errorString{ 237 | "ABCD", 238 | }, 239 | } 240 | _, err := b.Marshal() 241 | assert.Equal(t, errors.New("marshal error"), err) 242 | } 243 | 244 | func TestBucketKey_GetHashRange(t *testing.T) { 245 | t.Run("normal", func(t *testing.T) { 246 | k := BucketKey[testRootKey]{ 247 | RootKey: testRootKey{ 248 | prefix: "hello", 249 | }, 250 | SizeLog: 16, 251 | Hash: newHash(0x1234_56, 3), 252 | } 253 | 254 | assert.Equal(t, HashRange{ 255 | Begin: newHash(0x1234, 2), 256 | End: 0x1234_ffff_ffff_ffff, 257 | }, k.GetHashRange()) 258 | }) 259 | 260 | t.Run("zero size log", func(t *testing.T) { 261 | k := BucketKey[testRootKey]{ 262 | RootKey: testRootKey{ 263 | prefix: "hello", 264 | }, 265 | SizeLog: 0, 266 | Hash: newHash(0x1234_56, 3), 267 | } 268 | 269 | assert.Equal(t, HashRange{ 270 | Begin: 0, 271 | End: 0xffff_ffff_ffff_ffff, 272 | }, k.GetHashRange()) 273 | }) 274 | 275 | t.Run("size log middle", func(t *testing.T) { 276 | k := BucketKey[testRootKey]{ 277 | RootKey: testRootKey{ 278 | prefix: "hello", 279 | }, 280 | SizeLog: 7, 281 | Hash: newHash(0x1734_56, 3), 282 | } 283 | 284 | assert.Equal(t, HashRange{ 285 | Begin: 0x1600_0000_0000_0000, 286 | End: 0x17ff_ffff_ffff_ffff, 287 | }, k.GetHashRange()) 288 | }) 289 | } 290 | -------------------------------------------------------------------------------- /mmap/filler.go: -------------------------------------------------------------------------------- 1 | package mmap 2 | 3 | import ( 4 | "context" 5 | "sort" 6 | ) 7 | 8 | // FillKey ... 9 | type FillKey[R comparable] struct { 10 | RootKey R 11 | Range HashRange 12 | } 13 | 14 | // NewMultiGetFiller converts from function often using SELECT WHERE IN 15 | // into a Filler[T, R] that allow to be passed to New 16 | func NewMultiGetFiller[T any, R comparable, K Key]( 17 | multiGetFunc func(ctx context.Context, keys []FillKey[R]) ([]T, error), 18 | getRootKey func(v T) R, 19 | getKey func(v T) K, 20 | ) Filler[T, R] { 21 | var state *multiGetState[T, R] 22 | 23 | return func(ctx context.Context, rootKey R, hashRange HashRange) func() ([]T, error) { 24 | if state == nil { 25 | state = &multiGetState[T, R]{ 26 | result: map[R][]T{}, 27 | } 28 | } 29 | s := state 30 | s.keys = append(s.keys, FillKey[R]{ 31 | RootKey: rootKey, 32 | Range: hashRange, 33 | }) 34 | 35 | return func() ([]T, error) { 36 | if state != nil { 37 | state = nil 38 | 39 | values, err := multiGetFunc(ctx, s.keys) 40 | if err != nil { 41 | s.err = err 42 | } else { 43 | collectStateValues(s, values, getRootKey, getKey) 44 | } 45 | } 46 | 47 | if s.err != nil { 48 | return nil, s.err 49 | } 50 | 51 | valuesByRootKey := s.result[rootKey] 52 | lowerBound := findLowerBound(valuesByRootKey, getKey, hashRange.Begin) 53 | 54 | return computeValuesInHashRange(valuesByRootKey, lowerBound, hashRange, getKey), nil 55 | } 56 | } 57 | } 58 | 59 | type multiGetState[T any, R comparable] struct { 60 | keys []FillKey[R] 61 | result map[R][]T 62 | err error 63 | } 64 | 65 | func findLowerBound[T any, K Key]( 66 | values []T, 67 | getKey func(v T) K, 68 | lowerBound uint64, 69 | ) int { 70 | // similar to std::lower_bound of C++ 71 | first := 0 72 | last := len(values) 73 | for first != last { 74 | mid := (first + last) / 2 75 | 76 | val := values[mid] 77 | if getKey(val).Hash() >= lowerBound { 78 | last = mid 79 | } else { 80 | first = mid + 1 81 | } 82 | } 83 | return first 84 | } 85 | 86 | func computeValuesInHashRange[T any, K Key]( 87 | values []T, 88 | lowerBound int, 89 | hashRange HashRange, 90 | getKey func(T) K, 91 | ) []T { 92 | var result []T 93 | for i := lowerBound; i < len(values); i++ { 94 | v := values[i] 95 | if getKey(v).Hash() > hashRange.End { 96 | break 97 | } 98 | result = append(result, v) 99 | } 100 | return result 101 | } 102 | 103 | func collectStateValues[T any, R comparable, K Key]( 104 | s *multiGetState[T, R], 105 | values []T, 106 | getRootKey func(T) R, 107 | getKey func(T) K, 108 | ) { 109 | for _, v := range values { 110 | rootKey := getRootKey(v) 111 | prev := s.result[rootKey] 112 | s.result[rootKey] = append(prev, v) 113 | } 114 | 115 | // sort by hash 116 | for _, v := range s.result { 117 | sort.Slice(v, func(i, j int) bool { 118 | return getKey(v[i]).Hash() < getKey(v[j]).Hash() 119 | }) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /mmap/filler_test.go: -------------------------------------------------------------------------------- 1 | package mmap 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | type multiGetFillerTest struct { 12 | filler Filler[stockLocation, stockLocationRootKey] 13 | 14 | fillKeys [][]FillKey[stockLocationRootKey] 15 | 16 | fillFunc func(ctx context.Context, keys []FillKey[stockLocationRootKey]) ([]stockLocation, error) 17 | } 18 | 19 | func newMultiGetFillerTest() *multiGetFillerTest { 20 | f := &multiGetFillerTest{} 21 | 22 | f.filler = NewMultiGetFiller[stockLocation, stockLocationRootKey]( 23 | func(ctx context.Context, keys []FillKey[stockLocationRootKey]) ([]stockLocation, error) { 24 | f.fillKeys = append(f.fillKeys, keys) 25 | return f.fillFunc(ctx, keys) 26 | }, 27 | stockLocation.getRootKey, 28 | stockLocation.getKey, 29 | ) 30 | 31 | return f 32 | } 33 | func TestNewMultiGetFiller(t *testing.T) { 34 | hash1 := HashRange{ 35 | Begin: newHash(0x1000, 2), 36 | End: newHash(0x1fff, 2), 37 | } 38 | hash2 := HashRange{ 39 | Begin: newHash(0x2000, 2), 40 | End: newHash(0x2fff, 2), 41 | } 42 | 43 | t.Run("single", func(t *testing.T) { 44 | f := newMultiGetFillerTest() 45 | 46 | stock1 := stockLocation{ 47 | Sku: sku1, 48 | Location: loc1, 49 | Hash: hash1.Begin, 50 | Quantity: 41, 51 | } 52 | 53 | f.fillFunc = func(ctx context.Context, keys []FillKey[stockLocationRootKey]) ([]stockLocation, error) { 54 | return []stockLocation{stock1}, nil 55 | } 56 | 57 | fn := f.filler(context.Background(), stock1.getRootKey(), hash1) 58 | 59 | // check resp 60 | resp, err := fn() 61 | assert.Equal(t, nil, err) 62 | assert.Equal(t, []stockLocation{ 63 | stock1, 64 | }, resp) 65 | 66 | assert.Equal(t, [][]FillKey[stockLocationRootKey]{ 67 | { 68 | {RootKey: stock1.getRootKey(), Range: hash1}, 69 | }, 70 | }, f.fillKeys) 71 | 72 | // Get Again 73 | stock2 := stockLocation{ 74 | Sku: sku2, 75 | Location: loc2, 76 | Hash: hash2.Begin, 77 | Quantity: 42, 78 | } 79 | 80 | f.fillFunc = func(ctx context.Context, keys []FillKey[stockLocationRootKey]) ([]stockLocation, error) { 81 | return []stockLocation{stock2}, nil 82 | } 83 | 84 | fn1 := f.filler(context.Background(), stock1.getRootKey(), hash1) 85 | fn2 := f.filler(context.Background(), stock2.getRootKey(), hash2) 86 | 87 | // check resp 88 | resp, err = fn1() 89 | assert.Equal(t, nil, err) 90 | assert.Equal(t, 0, len(resp)) 91 | 92 | resp, err = fn2() 93 | assert.Equal(t, nil, err) 94 | assert.Equal(t, []stockLocation{ 95 | stock2, 96 | }, resp) 97 | 98 | assert.Equal(t, [][]FillKey[stockLocationRootKey]{ 99 | { 100 | {RootKey: stock1.getRootKey(), Range: hash1}, 101 | }, 102 | { 103 | {RootKey: stock1.getRootKey(), Range: hash1}, 104 | {RootKey: stock2.getRootKey(), Range: hash2}, 105 | }, 106 | }, f.fillKeys) 107 | }) 108 | 109 | t.Run("multiple keys", func(t *testing.T) { 110 | f := newMultiGetFillerTest() 111 | 112 | stock1 := stockLocation{ 113 | Sku: sku1, 114 | Location: loc1, 115 | Hash: hash1.Begin, 116 | Quantity: 41, 117 | } 118 | stock2 := stockLocation{ 119 | Sku: sku2, 120 | Location: loc2, 121 | Hash: hash2.Begin, 122 | Quantity: 42, 123 | } 124 | 125 | f.fillFunc = func(ctx context.Context, keys []FillKey[stockLocationRootKey]) ([]stockLocation, error) { 126 | return []stockLocation{stock1, stock2}, nil 127 | } 128 | 129 | fn1 := f.filler(context.Background(), stock1.getRootKey(), hash1) 130 | fn2 := f.filler(context.Background(), stock2.getRootKey(), hash2) 131 | fn3 := f.filler(context.Background(), stockLocationRootKey{sku: sku3}, hash1) 132 | 133 | resp, err := fn1() 134 | assert.Equal(t, nil, err) 135 | assert.Equal(t, []stockLocation{ 136 | stock1, 137 | }, resp) 138 | 139 | resp, err = fn2() 140 | assert.Equal(t, nil, err) 141 | assert.Equal(t, []stockLocation{ 142 | stock2, 143 | }, resp) 144 | 145 | resp, err = fn3() 146 | assert.Equal(t, nil, err) 147 | assert.Equal(t, 0, len(resp)) 148 | 149 | assert.Equal(t, [][]FillKey[stockLocationRootKey]{ 150 | { 151 | {RootKey: stock1.getRootKey(), Range: hash1}, 152 | {RootKey: stock2.getRootKey(), Range: hash2}, 153 | {RootKey: stockLocationRootKey{sku: sku3}, Range: hash1}, 154 | }, 155 | }, f.fillKeys) 156 | }) 157 | 158 | t.Run("with error", func(t *testing.T) { 159 | f := newMultiGetFillerTest() 160 | 161 | f.fillFunc = func(ctx context.Context, keys []FillKey[stockLocationRootKey]) ([]stockLocation, error) { 162 | return nil, errors.New("fill error") 163 | } 164 | 165 | fn := f.filler(context.Background(), stockLocationRootKey{sku: sku1}, hash1) 166 | 167 | resp, err := fn() 168 | assert.Equal(t, errors.New("fill error"), err) 169 | assert.Equal(t, 0, len(resp)) 170 | 171 | assert.Equal(t, [][]FillKey[stockLocationRootKey]{ 172 | { 173 | {RootKey: stockLocationRootKey{sku: sku1}, Range: hash1}, 174 | }, 175 | }, f.fillKeys) 176 | }) 177 | 178 | t.Run("multiple same root key", func(t *testing.T) { 179 | f := newMultiGetFillerTest() 180 | 181 | stock1 := stockLocation{ 182 | Sku: sku1, 183 | Location: loc1, 184 | Hash: hash1.Begin + 100, 185 | Quantity: 41, 186 | } 187 | stock2 := stockLocation{ 188 | Sku: sku1, 189 | Location: loc2, 190 | Hash: hash2.Begin + 100, 191 | Quantity: 42, 192 | } 193 | 194 | f.fillFunc = func(ctx context.Context, keys []FillKey[stockLocationRootKey]) ([]stockLocation, error) { 195 | return []stockLocation{stock2, stock1}, nil 196 | } 197 | 198 | fn1 := f.filler(context.Background(), stock1.getRootKey(), hash1) 199 | fn2 := f.filler(context.Background(), stock2.getRootKey(), hash2) 200 | 201 | resp, err := fn1() 202 | assert.Equal(t, nil, err) 203 | assert.Equal(t, []stockLocation{ 204 | stock1, 205 | }, resp) 206 | 207 | resp, err = fn2() 208 | assert.Equal(t, nil, err) 209 | assert.Equal(t, []stockLocation{ 210 | stock2, 211 | }, resp) 212 | 213 | assert.Equal(t, [][]FillKey[stockLocationRootKey]{ 214 | { 215 | {RootKey: stock1.getRootKey(), Range: hash1}, 216 | {RootKey: stock2.getRootKey(), Range: hash2}, 217 | }, 218 | }, f.fillKeys) 219 | }) 220 | } 221 | 222 | func TestLowerBound(t *testing.T) { 223 | newLoc := func(hash uint64) stockLocation { 224 | return stockLocation{ 225 | Sku: "SKU01", 226 | Location: "LOC01", 227 | Hash: hash, 228 | Quantity: 12, 229 | } 230 | } 231 | 232 | t.Run("normal", func(t *testing.T) { 233 | index := findLowerBound[stockLocation, stockLocationKey]( 234 | []stockLocation{ 235 | newLoc(11), 236 | newLoc(12), 237 | newLoc(13), 238 | newLoc(13), 239 | newLoc(14), 240 | newLoc(14), 241 | newLoc(15), 242 | newLoc(16), 243 | newLoc(17), 244 | }, 245 | stockLocation.getKey, 246 | 14, 247 | ) 248 | assert.Equal(t, 4, index) 249 | }) 250 | 251 | t.Run("empty", func(t *testing.T) { 252 | index := findLowerBound[stockLocation, stockLocationKey]( 253 | []stockLocation{}, 254 | stockLocation.getKey, 255 | 14, 256 | ) 257 | assert.Equal(t, 0, index) 258 | }) 259 | 260 | t.Run("single smaller than bound", func(t *testing.T) { 261 | index := findLowerBound[stockLocation, stockLocationKey]( 262 | []stockLocation{newLoc(11)}, 263 | stockLocation.getKey, 264 | 14, 265 | ) 266 | assert.Equal(t, 1, index) 267 | }) 268 | 269 | t.Run("values with no value = bound", func(t *testing.T) { 270 | index := findLowerBound[stockLocation, stockLocationKey]( 271 | []stockLocation{ 272 | newLoc(11), 273 | newLoc(12), 274 | newLoc(13), 275 | newLoc(15), 276 | newLoc(16), 277 | }, 278 | stockLocation.getKey, 279 | 14, 280 | ) 281 | assert.Equal(t, 3, index) 282 | }) 283 | 284 | t.Run("all bigger than bound", func(t *testing.T) { 285 | index := findLowerBound[stockLocation, stockLocationKey]( 286 | []stockLocation{newLoc(15), newLoc(16)}, 287 | stockLocation.getKey, 288 | 14, 289 | ) 290 | assert.Equal(t, 0, index) 291 | }) 292 | } 293 | -------------------------------------------------------------------------------- /mmap/mmap.go: -------------------------------------------------------------------------------- 1 | package mmap 2 | 3 | import ( 4 | "context" 5 | "math" 6 | "math/bits" 7 | 8 | "github.com/QuangTung97/memproxy" 9 | "github.com/QuangTung97/memproxy/item" 10 | ) 11 | 12 | // RootKey constraints 13 | type RootKey interface { 14 | item.Key 15 | 16 | // AvgBucketSizeLog returns the logarithm base 2 of expected average size per bucket 17 | // values should be between [0, 8] 18 | // value = 0 => average 1 element per bucket 19 | // value = 3 => average 8 elements per bucket 20 | AvgBucketSizeLog() uint8 21 | } 22 | 23 | // Key child key constraint 24 | type Key interface { 25 | comparable 26 | Hash() uint64 27 | } 28 | 29 | // Value constraints 30 | type Value interface { 31 | item.Value 32 | } 33 | 34 | // HashRange for range of value [begin, end] includes both ends 35 | // More specific: begin <= h <= end 36 | // For each hash value h 37 | type HashRange struct { 38 | Begin uint64 // inclusive 39 | End uint64 // inclusive 40 | } 41 | 42 | // Filler ... 43 | type Filler[T any, R any] func(ctx context.Context, rootKey R, hashRange HashRange) func() ([]T, error) 44 | 45 | // Map ... 46 | type Map[T Value, R RootKey, K Key] struct { 47 | item *item.Item[Bucket[T], BucketKey[R]] 48 | 49 | getKeyFunc func(v T) K 50 | separator string 51 | } 52 | 53 | // New ... 54 | func New[T Value, R RootKey, K Key]( 55 | pipeline memproxy.Pipeline, 56 | unmarshaler item.Unmarshaler[T], 57 | filler Filler[T, R], 58 | getKeyFunc func(v T) K, 59 | options ...MapOption, 60 | ) *Map[T, R, K] { 61 | conf := computeMapConfig(options) 62 | 63 | bucketFiller := func(ctx context.Context, key BucketKey[R]) func() (Bucket[T], error) { 64 | fn := filler(ctx, key.RootKey, key.GetHashRange()) 65 | return func() (Bucket[T], error) { 66 | values, err := fn() 67 | if err != nil { 68 | return Bucket[T]{}, err 69 | } 70 | return Bucket[T]{ 71 | Values: values, 72 | }, nil 73 | } 74 | } 75 | 76 | return &Map[T, R, K]{ 77 | item: item.New[Bucket[T], BucketKey[R]]( 78 | pipeline, 79 | NewBucketUnmarshaler(unmarshaler), 80 | bucketFiller, 81 | conf.itemOptions..., 82 | ), 83 | getKeyFunc: getKeyFunc, 84 | separator: conf.separator, 85 | } 86 | } 87 | 88 | // Option an optional value 89 | type Option[T any] struct { 90 | Valid bool 91 | Data T 92 | } 93 | 94 | func computeSizeLog( 95 | avgBucketSizeLog uint8, 96 | elemCount uint64, 97 | hash uint64, 98 | ) uint8 { 99 | avgBucketSize := uint64(1) << avgBucketSizeLog 100 | if elemCount <= avgBucketSize { 101 | return 0 102 | } 103 | 104 | sizeLog := uint8(bits.Len64(elemCount-1)) - avgBucketSizeLog 105 | 106 | prevSizeLog := uint64(1) << (avgBucketSizeLog + sizeLog - 1) 107 | 108 | var boundEnd uint64 109 | if avgBucketSizeLog >= 1 { 110 | boundValue := (elemCount - 1 - prevSizeLog) >> (avgBucketSizeLog - 1) 111 | boundEnd = boundValue<<(64-sizeLog) | (math.MaxUint64 >> sizeLog) 112 | } else { 113 | boundValue := elemCount - 1 - prevSizeLog 114 | shift := sizeLog - 1 115 | boundEnd = boundValue<<(64-shift) | (math.MaxUint64 >> shift) 116 | } 117 | 118 | if hash <= boundEnd { 119 | return sizeLog 120 | } 121 | return sizeLog - 1 122 | } 123 | 124 | // ComputeBucketKey ... 125 | func ComputeBucketKey[R RootKey, K Key]( 126 | elemCount uint64, 127 | rootKey R, key K, 128 | separator string, 129 | ) BucketKey[R] { 130 | hash := key.Hash() 131 | 132 | sizeLog := computeSizeLog(rootKey.AvgBucketSizeLog(), elemCount, hash) 133 | mask := uint64(math.MaxUint64) << (64 - sizeLog) 134 | 135 | return BucketKey[R]{ 136 | RootKey: rootKey, 137 | SizeLog: sizeLog, 138 | Hash: hash & mask, 139 | Sep: separator, 140 | } 141 | } 142 | 143 | // ComputeBucketKeyString ... 144 | func ComputeBucketKeyString[R RootKey, K Key]( 145 | elemCount uint64, 146 | rootKey R, key K, 147 | ) string { 148 | return ComputeBucketKeyStringWithSeparator(elemCount, rootKey, key, ":") 149 | } 150 | 151 | // ComputeBucketKeyStringWithSeparator ... 152 | func ComputeBucketKeyStringWithSeparator[R RootKey, K Key]( 153 | elemCount uint64, 154 | rootKey R, key K, 155 | separator string, 156 | ) string { 157 | return ComputeBucketKey(elemCount, rootKey, key, separator).String() 158 | } 159 | 160 | // Get from Map 161 | // The elemCount need *NOT* be exact, but *MUST* be monotonically increasing 162 | // Otherwise Map can return incorrect values 163 | func (m *Map[T, R, K]) Get( 164 | ctx context.Context, 165 | elemCount uint64, 166 | rootKey R, key K, 167 | ) func() (Option[T], error) { 168 | bucketKey := ComputeBucketKey(elemCount, rootKey, key, m.separator) 169 | 170 | getState := m.item.GetFast(ctx, bucketKey) 171 | 172 | return func() (Option[T], error) { 173 | bucket, err := getState.Result() 174 | if err != nil { 175 | return Option[T]{}, err 176 | } 177 | 178 | for _, v := range bucket.Values { 179 | if m.getKeyFunc(v) == key { 180 | return Option[T]{ 181 | Valid: true, 182 | Data: v, 183 | }, nil 184 | } 185 | } 186 | 187 | return Option[T]{}, nil 188 | } 189 | } 190 | 191 | // GetItemStats returns the underlining item stats 192 | func (m *Map[T, R, K]) GetItemStats() item.Stats { 193 | return m.item.GetStats() 194 | } 195 | -------------------------------------------------------------------------------- /mmap/mmap_bench_test.go: -------------------------------------------------------------------------------- 1 | package mmap 2 | 3 | import ( 4 | "context" 5 | "encoding/binary" 6 | "os" 7 | "runtime/pprof" 8 | "strconv" 9 | "testing" 10 | 11 | "github.com/spaolacci/murmur3" 12 | "github.com/stretchr/testify/assert" 13 | 14 | "github.com/QuangTung97/memproxy" 15 | ) 16 | 17 | type benchValue struct { 18 | rootKey benchRootKey 19 | key benchKey 20 | value int64 21 | } 22 | 23 | type benchRootKey struct { 24 | value uint64 25 | } 26 | 27 | type benchKey struct { 28 | value uint64 29 | } 30 | 31 | const benchKeyNum = 229 32 | const benchValueNum = 331 33 | 34 | func newBenchMapCache(pipe memproxy.Pipeline) *Map[benchValue, benchRootKey, benchKey] { 35 | return New[benchValue, benchRootKey, benchKey]( 36 | pipe, 37 | unmarshalBenchValue, 38 | func(ctx context.Context, rootKey benchRootKey, hashRange HashRange) func() ([]benchValue, error) { 39 | return func() ([]benchValue, error) { 40 | return []benchValue{ 41 | { 42 | rootKey: rootKey, 43 | key: benchKey{ 44 | value: benchKeyNum, 45 | }, 46 | value: benchValueNum, 47 | }, 48 | }, nil 49 | } 50 | }, 51 | benchValue.getKey, 52 | ) 53 | } 54 | 55 | func doGetMapElemFromMemcache(mc memproxy.Memcache, numKeys int) { 56 | pipe := mc.Pipeline(context.Background()) 57 | defer pipe.Finish() 58 | 59 | mapCache := newBenchMapCache(pipe) 60 | 61 | fnList := make([]func() (Option[benchValue], error), 0, numKeys) 62 | for i := 0; i < numKeys; i++ { 63 | fn := mapCache.Get(context.Background(), uint64(numKeys), benchRootKey{ 64 | value: uint64(1000 + i), 65 | }, benchKey{ 66 | value: benchKeyNum, 67 | }) 68 | fnList = append(fnList, fn) 69 | } 70 | 71 | for _, fn := range fnList { 72 | result, err := fn() 73 | if err != nil { 74 | panic(err) 75 | } 76 | if !result.Valid { 77 | panic("not valid") 78 | } 79 | if result.Data.value != benchValueNum { 80 | panic("wrong value") 81 | } 82 | } 83 | } 84 | 85 | func BenchmarkWithProxy__Map_Get_Batch_100(b *testing.B) { 86 | mc := newMemcacheWithProxy(b) 87 | 88 | const numKeys = 100 89 | 90 | doGetMapElemFromMemcache(mc, numKeys) 91 | 92 | b.ResetTimer() 93 | 94 | for n := 0; n < b.N; n++ { 95 | doGetMapElemFromMemcache(mc, numKeys) 96 | } 97 | 98 | b.StopTimer() 99 | writeMemProfile() 100 | } 101 | 102 | func BenchmarkWithProxy__Map_Get_Batch_1000(b *testing.B) { 103 | mc := newMemcacheWithProxy(b) 104 | 105 | const numKeys = 1000 106 | 107 | doGetMapElemFromMemcache(mc, numKeys) 108 | 109 | b.ResetTimer() 110 | 111 | for n := 0; n < b.N; n++ { 112 | doGetMapElemFromMemcache(mc, numKeys) 113 | } 114 | 115 | b.StopTimer() 116 | writeMemProfile() 117 | } 118 | 119 | func BenchmarkComputeBucketKeyString(b *testing.B) { 120 | var sum int 121 | for n := 0; n < b.N; n++ { 122 | k := BucketKey[benchRootKey]{ 123 | RootKey: benchRootKey{ 124 | value: 23, 125 | }, 126 | SizeLog: 7, 127 | Hash: newHash(0x1234, 2), 128 | Sep: ":", 129 | }.String() 130 | sum += len(k) 131 | } 132 | b.StopTimer() 133 | writeMemProfile() 134 | } 135 | 136 | func (v benchValue) getKey() benchKey { 137 | return v.key 138 | } 139 | 140 | func (k benchKey) Hash() uint64 { 141 | var data [8]byte 142 | binary.LittleEndian.PutUint64(data[:], k.value) 143 | return murmur3.Sum64(data[:]) 144 | } 145 | 146 | func (k benchRootKey) String() string { 147 | return strconv.FormatUint(k.value, 10) 148 | } 149 | 150 | func (benchRootKey) AvgBucketSizeLog() uint8 { 151 | return 1 152 | } 153 | 154 | func (v benchValue) Marshal() ([]byte, error) { 155 | var result [24]byte 156 | binary.LittleEndian.PutUint64(result[:], v.rootKey.value) 157 | binary.LittleEndian.PutUint64(result[8:], v.key.value) 158 | binary.LittleEndian.PutUint64(result[16:], uint64(v.value)) 159 | return result[:], nil 160 | } 161 | 162 | func unmarshalBenchValue(data []byte) (benchValue, error) { 163 | rootKey := binary.LittleEndian.Uint64(data[:]) 164 | key := binary.LittleEndian.Uint64(data[8:]) 165 | val := binary.LittleEndian.Uint64(data[16:]) 166 | 167 | return benchValue{ 168 | rootKey: benchRootKey{ 169 | value: rootKey, 170 | }, 171 | key: benchKey{ 172 | value: key, 173 | }, 174 | value: int64(val), 175 | }, nil 176 | } 177 | 178 | func TestMarshalBenchValue(t *testing.T) { 179 | b := benchValue{ 180 | rootKey: benchRootKey{ 181 | value: 41, 182 | }, 183 | key: benchKey{ 184 | value: 31, 185 | }, 186 | value: 55, 187 | } 188 | data, err := b.Marshal() 189 | assert.Equal(t, nil, err) 190 | 191 | newVal, err := unmarshalBenchValue(data) 192 | assert.Equal(t, nil, err) 193 | assert.Equal(t, b, newVal) 194 | } 195 | 196 | func writeMemProfile() { 197 | if os.Getenv("ENABLE_BENCH_PROFILE") == "" { 198 | return 199 | } 200 | 201 | file, err := os.Create("./bench_profile.out") 202 | if err != nil { 203 | panic(err) 204 | } 205 | defer func() { 206 | err := file.Close() 207 | if err != nil { 208 | panic(err) 209 | } 210 | }() 211 | 212 | err = pprof.WriteHeapProfile(file) 213 | if err != nil { 214 | panic(err) 215 | } 216 | } 217 | -------------------------------------------------------------------------------- /mmap/option.go: -------------------------------------------------------------------------------- 1 | package mmap 2 | 3 | import ( 4 | "github.com/QuangTung97/memproxy/item" 5 | ) 6 | 7 | type mapConfig struct { 8 | itemOptions []item.Option 9 | separator string 10 | } 11 | 12 | func computeMapConfig(options []MapOption) mapConfig { 13 | conf := mapConfig{ 14 | itemOptions: nil, 15 | separator: ":", 16 | } 17 | for _, fn := range options { 18 | fn(&conf) 19 | } 20 | return conf 21 | } 22 | 23 | // MapOption ... 24 | type MapOption func(conf *mapConfig) 25 | 26 | // WithItemOptions ... 27 | func WithItemOptions(options ...item.Option) MapOption { 28 | return func(conf *mapConfig) { 29 | conf.itemOptions = options 30 | } 31 | } 32 | 33 | // WithSeparator ... 34 | func WithSeparator(sep string) MapOption { 35 | return func(conf *mapConfig) { 36 | conf.separator = sep 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /mocks/generate.go: -------------------------------------------------------------------------------- 1 | package mocks 2 | 3 | import "github.com/QuangTung97/memproxy" 4 | 5 | // Memcache ... 6 | type Memcache = memproxy.Memcache 7 | 8 | // Pipeline ... 9 | type Pipeline = memproxy.Pipeline 10 | 11 | // SessionProvider ... 12 | type SessionProvider = memproxy.SessionProvider 13 | 14 | // Session ... 15 | type Session = memproxy.Session 16 | 17 | //go:generate moq -rm -out memproxy_mocks.go . Memcache Pipeline SessionProvider Session 18 | -------------------------------------------------------------------------------- /plain_memcache.go: -------------------------------------------------------------------------------- 1 | package memproxy 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/QuangTung97/go-memcache/memcache" 8 | ) 9 | 10 | type plainMemcacheImpl struct { 11 | client *memcache.Client 12 | sessProvider SessionProvider 13 | leaseDuration uint32 14 | } 15 | 16 | var _ Memcache = &plainMemcacheImpl{} 17 | 18 | type plainPipelineImpl struct { 19 | sess Session 20 | pipeline *memcache.Pipeline 21 | leaseDuration uint32 22 | } 23 | 24 | type plainMemcacheConfig struct { 25 | leaseDurationSeconds uint32 26 | sessProvider SessionProvider 27 | } 28 | 29 | func computePlainMemcacheConfig(options ...PlainMemcacheOption) *plainMemcacheConfig { 30 | conf := &plainMemcacheConfig{ 31 | leaseDurationSeconds: 3, 32 | sessProvider: NewSessionProvider(), 33 | } 34 | for _, fn := range options { 35 | fn(conf) 36 | } 37 | return conf 38 | } 39 | 40 | // PlainMemcacheOption ... 41 | type PlainMemcacheOption func(opts *plainMemcacheConfig) 42 | 43 | // WithPlainMemcacheLeaseDuration ... 44 | func WithPlainMemcacheLeaseDuration(leaseDurationSeconds uint32) PlainMemcacheOption { 45 | return func(opts *plainMemcacheConfig) { 46 | opts.leaseDurationSeconds = leaseDurationSeconds 47 | } 48 | } 49 | 50 | // WithPlainMemcacheSessionProvider ... 51 | func WithPlainMemcacheSessionProvider(sessProvider SessionProvider) PlainMemcacheOption { 52 | return func(opts *plainMemcacheConfig) { 53 | opts.sessProvider = sessProvider 54 | } 55 | } 56 | 57 | var _ Pipeline = &plainPipelineImpl{} 58 | 59 | // NewPlainMemcache a light wrapper around memcached client 60 | func NewPlainMemcache( 61 | client *memcache.Client, 62 | options ...PlainMemcacheOption, 63 | ) Memcache { 64 | conf := computePlainMemcacheConfig(options...) 65 | return &plainMemcacheImpl{ 66 | client: client, 67 | sessProvider: conf.sessProvider, 68 | leaseDuration: conf.leaseDurationSeconds, 69 | } 70 | } 71 | 72 | // Pipeline ... 73 | func (m *plainMemcacheImpl) Pipeline(_ context.Context, options ...PipelineOption) Pipeline { 74 | conf := ComputePipelineConfig(options) 75 | sess := conf.GetSession(m.sessProvider) 76 | 77 | return &plainPipelineImpl{ 78 | sess: sess, 79 | pipeline: m.client.Pipeline(), 80 | leaseDuration: m.leaseDuration, 81 | } 82 | } 83 | 84 | // Close ... 85 | func (m *plainMemcacheImpl) Close() error { 86 | return m.client.Close() 87 | } 88 | 89 | func (p *plainPipelineImpl) LowerSession() Session { 90 | return p.sess.GetLower() 91 | } 92 | 93 | // LeaseGet ... 94 | func (p *plainPipelineImpl) LeaseGet(key string, _ LeaseGetOptions) LeaseGetResult { 95 | result, getErr := p.pipeline.MGetFast(key, memcache.MGetOptions{ 96 | N: p.leaseDuration, 97 | CAS: true, 98 | }) 99 | if getErr != nil { 100 | return LeaseGetErrorResult{Error: getErr} 101 | } 102 | 103 | r := getPlainLeaseGetResult() 104 | r.mgetResult = result 105 | return r 106 | } 107 | 108 | type plainLeaseGetResult struct { 109 | mgetResult memcache.MGetResult 110 | } 111 | 112 | func (r *plainLeaseGetResult) Result() (LeaseGetResponse, error) { 113 | defer putPlainLeaseGetResult(r) 114 | 115 | mgetResp, err := r.mgetResult.Result() 116 | 117 | memcache.ReleaseMGetResult(r.mgetResult) 118 | 119 | if err != nil { 120 | return LeaseGetResponse{}, err 121 | } 122 | 123 | if mgetResp.Type != memcache.MGetResponseTypeVA { 124 | return LeaseGetResponse{}, ErrInvalidLeaseGetResponse 125 | } 126 | 127 | if mgetResp.Flags == 0 { 128 | return LeaseGetResponse{ 129 | Status: LeaseGetStatusFound, 130 | CAS: mgetResp.CAS, 131 | Data: mgetResp.Data, 132 | }, nil 133 | } 134 | 135 | if (mgetResp.Flags & memcache.MGetFlagW) > 0 { 136 | return LeaseGetResponse{ 137 | Status: LeaseGetStatusLeaseGranted, 138 | CAS: mgetResp.CAS, 139 | }, nil 140 | } 141 | 142 | return LeaseGetResponse{ 143 | Status: LeaseGetStatusLeaseRejected, 144 | CAS: mgetResp.CAS, 145 | }, nil 146 | } 147 | 148 | // LeaseSet ... 149 | func (p *plainPipelineImpl) LeaseSet( 150 | key string, data []byte, cas uint64, options LeaseSetOptions, 151 | ) func() (LeaseSetResponse, error) { 152 | fn := p.pipeline.MSet(key, data, memcache.MSetOptions{ 153 | CAS: cas, 154 | TTL: options.TTL, 155 | }) 156 | return func() (LeaseSetResponse, error) { 157 | resp, err := fn() 158 | if err != nil { 159 | return LeaseSetResponse{}, err 160 | } 161 | status := LeaseSetStatusNotStored 162 | if resp.Type == memcache.MSetResponseTypeHD { 163 | status = LeaseSetStatusStored 164 | } 165 | return LeaseSetResponse{ 166 | status, 167 | }, nil 168 | } 169 | } 170 | 171 | // Delete ... 172 | func (p *plainPipelineImpl) Delete(key string, _ DeleteOptions) func() (DeleteResponse, error) { 173 | fn := p.pipeline.MDel(key, memcache.MDelOptions{}) 174 | return func() (DeleteResponse, error) { 175 | _, err := fn() 176 | return DeleteResponse{}, err 177 | } 178 | } 179 | 180 | // Execute ... 181 | func (p *plainPipelineImpl) Execute() { 182 | p.pipeline.Execute() 183 | } 184 | 185 | // Finish ... 186 | func (p *plainPipelineImpl) Finish() { 187 | p.pipeline.Finish() 188 | } 189 | 190 | // ======================================== 191 | // plain memcache lease get result pool 192 | // ======================================== 193 | 194 | var plainLeaseResultPool = sync.Pool{ 195 | New: func() any { 196 | return &plainLeaseGetResult{} 197 | }, 198 | } 199 | 200 | func getPlainLeaseGetResult() *plainLeaseGetResult { 201 | return plainLeaseResultPool.Get().(*plainLeaseGetResult) 202 | } 203 | 204 | func putPlainLeaseGetResult(r *plainLeaseGetResult) { 205 | *r = plainLeaseGetResult{} 206 | plainLeaseResultPool.Put(r) 207 | } 208 | -------------------------------------------------------------------------------- /plain_memcache_test.go: -------------------------------------------------------------------------------- 1 | package memproxy 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "net" 7 | "testing" 8 | "time" 9 | 10 | "github.com/QuangTung97/go-memcache/memcache" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | type plainMemcacheTest struct { 15 | pipe Pipeline 16 | } 17 | 18 | func newPlainMemcacheTest(t *testing.T) *plainMemcacheTest { 19 | client, err := memcache.New("localhost:11211", 1) 20 | if err != nil { 21 | panic(err) 22 | } 23 | t.Cleanup(func() { 24 | _ = client.Close() 25 | }) 26 | 27 | err = client.Pipeline().FlushAll()() 28 | if err != nil { 29 | panic(err) 30 | } 31 | 32 | cache := NewPlainMemcache(client, WithPlainMemcacheLeaseDuration(7)) 33 | 34 | return &plainMemcacheTest{ 35 | pipe: cache.Pipeline(context.Background()), 36 | } 37 | } 38 | 39 | func TestPlainMemcache_LeaseGet_Granted_And_LeaseSet__Then_LeaseGet_Found(t *testing.T) { 40 | m := newPlainMemcacheTest(t) 41 | 42 | const key = "key01" 43 | 44 | // Lease Get 45 | leaseGetResp, err := m.pipe.LeaseGet(key, LeaseGetOptions{}).Result() 46 | assert.Equal(t, nil, err) 47 | 48 | cas := leaseGetResp.CAS 49 | leaseGetResp.CAS = 0 50 | 51 | assert.Equal(t, LeaseGetResponse{ 52 | Status: LeaseGetStatusLeaseGranted, 53 | }, leaseGetResp) 54 | 55 | assert.Greater(t, cas, uint64(0)) 56 | 57 | // Do Set 58 | value := []byte("some value 01") 59 | 60 | setResp, err := m.pipe.LeaseSet(key, value, cas, LeaseSetOptions{})() 61 | assert.Equal(t, nil, err) 62 | assert.Equal(t, LeaseSetResponse{ 63 | Status: LeaseSetStatusStored, 64 | }, setResp) 65 | 66 | // Lease Get Again 67 | leaseGetResp, err = m.pipe.LeaseGet(key, LeaseGetOptions{}).Result() 68 | assert.Equal(t, nil, err) 69 | assert.Equal(t, LeaseGetResponse{ 70 | Status: LeaseGetStatusFound, 71 | CAS: cas + 1, 72 | Data: value, 73 | }, leaseGetResp) 74 | 75 | // Get Again 76 | leaseGetResp, err = m.pipe.LeaseGet(key, LeaseGetOptions{}).Result() 77 | assert.Equal(t, nil, err) 78 | 79 | leaseGetResp.CAS = 1 80 | assert.Equal(t, LeaseGetResponse{ 81 | Status: LeaseGetStatusFound, 82 | CAS: 1, 83 | Data: value, 84 | }, leaseGetResp) 85 | } 86 | 87 | func TestPlainMemcache_LeaseGet_Rejected(t *testing.T) { 88 | m := newPlainMemcacheTest(t) 89 | 90 | const key = "key01" 91 | 92 | // Lease Get 93 | leaseGetResp, err := m.pipe.LeaseGet(key, LeaseGetOptions{}).Result() 94 | assert.Equal(t, nil, err) 95 | 96 | cas := leaseGetResp.CAS 97 | leaseGetResp.CAS = 0 98 | 99 | assert.Equal(t, LeaseGetResponse{ 100 | Status: LeaseGetStatusLeaseGranted, 101 | }, leaseGetResp) 102 | 103 | assert.Greater(t, cas, uint64(0)) 104 | 105 | // Lease Get Rejected 106 | leaseGetResp, err = m.pipe.LeaseGet(key, LeaseGetOptions{}).Result() 107 | assert.Equal(t, nil, err) 108 | assert.Equal(t, LeaseGetResponse{ 109 | Status: LeaseGetStatusLeaseRejected, 110 | CAS: cas, 111 | }, leaseGetResp) 112 | 113 | // Do Set 114 | value := []byte("some value 01") 115 | setResp, err := m.pipe.LeaseSet(key, value, cas, LeaseSetOptions{})() 116 | assert.Equal(t, nil, err) 117 | assert.Equal(t, LeaseSetResponse{ 118 | Status: LeaseSetStatusStored, 119 | }, setResp) 120 | 121 | // Lease Get Again 122 | leaseGetResp, err = m.pipe.LeaseGet(key, LeaseGetOptions{}).Result() 123 | assert.Equal(t, nil, err) 124 | assert.Equal(t, LeaseGetResponse{ 125 | Status: LeaseGetStatusFound, 126 | CAS: cas + 1, 127 | Data: value, 128 | }, leaseGetResp) 129 | } 130 | 131 | func TestPlainMemcache_LeaseSet_After_Delete__Rejected(t *testing.T) { 132 | m := newPlainMemcacheTest(t) 133 | 134 | const key = "key01" 135 | 136 | // Lease Get 137 | leaseGetResp, err := m.pipe.LeaseGet(key, LeaseGetOptions{}).Result() 138 | assert.Equal(t, nil, err) 139 | 140 | cas := leaseGetResp.CAS 141 | leaseGetResp.CAS = 0 142 | 143 | assert.Equal(t, LeaseGetResponse{ 144 | Status: LeaseGetStatusLeaseGranted, 145 | }, leaseGetResp) 146 | assert.Greater(t, cas, uint64(0)) 147 | 148 | // Delete 149 | delResp, err := m.pipe.Delete(key, DeleteOptions{})() 150 | assert.Equal(t, nil, err) 151 | assert.Equal(t, DeleteResponse{}, delResp) 152 | 153 | // Do Set 154 | value := []byte("some value 01") 155 | setResp, err := m.pipe.LeaseSet(key, value, cas, LeaseSetOptions{})() 156 | assert.Equal(t, nil, err) 157 | assert.Equal(t, LeaseSetResponse{ 158 | Status: LeaseSetStatusNotStored, 159 | }, setResp) 160 | 161 | // Lease Get Again 162 | leaseGetResp, err = m.pipe.LeaseGet(key, LeaseGetOptions{}).Result() 163 | assert.Equal(t, nil, err) 164 | assert.Equal(t, LeaseGetResponse{ 165 | Status: LeaseGetStatusLeaseGranted, 166 | CAS: cas + 2, 167 | }, leaseGetResp) 168 | } 169 | 170 | func TestPlainMemcache_LeaseGet_After_Delete(t *testing.T) { 171 | m := newPlainMemcacheTest(t) 172 | 173 | const key = "key01" 174 | 175 | // Lease Get 176 | leaseGetResp, err := m.pipe.LeaseGet(key, LeaseGetOptions{}).Result() 177 | assert.Equal(t, nil, err) 178 | 179 | cas := leaseGetResp.CAS 180 | leaseGetResp.CAS = 0 181 | 182 | assert.Equal(t, LeaseGetResponse{ 183 | Status: LeaseGetStatusLeaseGranted, 184 | }, leaseGetResp) 185 | 186 | assert.Greater(t, cas, uint64(0)) 187 | 188 | // Do Set 189 | value := []byte("some value 01") 190 | 191 | setResp, err := m.pipe.LeaseSet(key, value, cas, LeaseSetOptions{})() 192 | assert.Equal(t, nil, err) 193 | assert.Equal(t, LeaseSetResponse{ 194 | Status: LeaseSetStatusStored, 195 | }, setResp) 196 | 197 | // Lease Get Again 198 | leaseGetResp, err = m.pipe.LeaseGet(key, LeaseGetOptions{}).Result() 199 | assert.Equal(t, nil, err) 200 | assert.Equal(t, LeaseGetResponse{ 201 | Status: LeaseGetStatusFound, 202 | CAS: cas + 1, 203 | Data: value, 204 | }, leaseGetResp) 205 | 206 | // Do Delete 207 | deleteResp, err := m.pipe.Delete(key, DeleteOptions{})() 208 | assert.Equal(t, nil, err) 209 | assert.Equal(t, DeleteResponse{}, deleteResp) 210 | 211 | leaseGetResp, err = m.pipe.LeaseGet(key, LeaseGetOptions{}).Result() 212 | assert.Equal(t, nil, err) 213 | assert.Equal(t, LeaseGetResponse{ 214 | Status: LeaseGetStatusLeaseGranted, 215 | CAS: cas + 2, 216 | }, leaseGetResp) 217 | } 218 | 219 | func TestPlainMemcache__Lease_Get__Pipeline(t *testing.T) { 220 | m1 := newPlainMemcacheTest(t) 221 | m2 := newPlainMemcacheTest(t) 222 | 223 | const key1 = "key01" 224 | const key2 = "key02" 225 | 226 | fn1 := m1.pipe.LeaseGet(key1, LeaseGetOptions{}) 227 | fn2 := m1.pipe.LeaseGet(key2, LeaseGetOptions{}) 228 | 229 | getResp, err := m2.pipe.LeaseGet(key1, LeaseGetOptions{}).Result() 230 | assert.Equal(t, nil, err) 231 | 232 | cas := getResp.CAS 233 | getResp.CAS = 0 234 | 235 | assert.Greater(t, cas, uint64(0)) 236 | 237 | assert.Equal(t, LeaseGetResponse{ 238 | Status: LeaseGetStatusLeaseGranted, 239 | }, getResp) 240 | 241 | // After Do Flush Pipeline 242 | getResp, err = fn1.Result() 243 | assert.Equal(t, LeaseGetResponse{ 244 | Status: LeaseGetStatusLeaseRejected, 245 | CAS: cas, 246 | }, getResp) 247 | 248 | getResp, err = fn2.Result() 249 | assert.Equal(t, LeaseGetResponse{ 250 | Status: LeaseGetStatusLeaseGranted, 251 | CAS: cas + 1, 252 | }, getResp) 253 | } 254 | 255 | func TestPlainMemcache__Lease_Get_Then_Execute(t *testing.T) { 256 | m1 := newPlainMemcacheTest(t) 257 | m2 := newPlainMemcacheTest(t) 258 | 259 | const key1 = "key01" 260 | const key2 = "key02" 261 | 262 | m1.pipe.LeaseGet(key1, LeaseGetOptions{}) 263 | m1.pipe.LeaseGet(key2, LeaseGetOptions{}) 264 | 265 | m1.pipe.Execute() 266 | time.Sleep(10 * time.Millisecond) 267 | 268 | getResp, err := m2.pipe.LeaseGet(key1, LeaseGetOptions{}).Result() 269 | assert.Equal(t, nil, err) 270 | 271 | cas := getResp.CAS 272 | getResp.CAS = 0 273 | 274 | assert.Greater(t, cas, uint64(0)) 275 | 276 | assert.Equal(t, LeaseGetResponse{ 277 | Status: LeaseGetStatusLeaseRejected, 278 | }, getResp) 279 | } 280 | 281 | func TestPlainMemcache__Finish_Do_Flush(t *testing.T) { 282 | m1 := newPlainMemcacheTest(t) 283 | m2 := newPlainMemcacheTest(t) 284 | 285 | const key1 = "key01" 286 | 287 | getResp, err := m1.pipe.LeaseGet(key1, LeaseGetOptions{}).Result() 288 | assert.Equal(t, nil, err) 289 | assert.Equal(t, LeaseGetStatusLeaseGranted, getResp.Status) 290 | 291 | cas := getResp.CAS 292 | 293 | data := []byte("some value 01") 294 | m1.pipe.LeaseSet(key1, data, cas, LeaseSetOptions{}) 295 | 296 | m1.pipe.Finish() 297 | 298 | getResp, err = m2.pipe.LeaseGet(key1, LeaseGetOptions{}).Result() 299 | assert.Equal(t, nil, err) 300 | assert.Equal(t, LeaseGetResponse{ 301 | Status: LeaseGetStatusFound, 302 | CAS: cas + 1, 303 | Data: data, 304 | }, getResp) 305 | } 306 | 307 | func TestPlainMemcache__With_Existing_Session(t *testing.T) { 308 | client, err := memcache.New("localhost:11211", 1) 309 | if err != nil { 310 | panic(err) 311 | } 312 | t.Cleanup(func() { 313 | _ = client.Close() 314 | }) 315 | 316 | err = client.Pipeline().FlushAll()() 317 | if err != nil { 318 | panic(err) 319 | } 320 | 321 | cache := NewPlainMemcache(client, 322 | WithPlainMemcacheLeaseDuration(7), 323 | WithPlainMemcacheSessionProvider(NewSessionProvider()), 324 | ) 325 | 326 | provider := NewSessionProvider() 327 | sess := provider.New() 328 | 329 | pipe := cache.Pipeline(context.Background(), WithPipelineExistingSession(sess)) 330 | 331 | assert.Same(t, sess.GetLower(), pipe.LowerSession()) 332 | } 333 | 334 | func TestPlainMemcache__Invalid_Key(t *testing.T) { 335 | client, err := memcache.New("localhost:11211", 1) 336 | if err != nil { 337 | panic(err) 338 | } 339 | t.Cleanup(func() { 340 | _ = client.Close() 341 | }) 342 | 343 | err = client.Pipeline().FlushAll()() 344 | if err != nil { 345 | panic(err) 346 | } 347 | 348 | cache := NewPlainMemcache(client, 349 | WithPlainMemcacheLeaseDuration(7), 350 | WithPlainMemcacheSessionProvider(NewSessionProvider()), 351 | ) 352 | 353 | pipe := cache.Pipeline(context.Background()) 354 | fn := pipe.LeaseGet(" abcd ", LeaseGetOptions{}) 355 | 356 | resp, err := fn.Result() 357 | assert.Equal(t, memcache.ErrInvalidKeyFormat, err) 358 | assert.Equal(t, LeaseGetResponse{}, resp) 359 | } 360 | 361 | func TestPlainMemcache__Connection_Error(t *testing.T) { 362 | client, err := memcache.New("localhost:11200", 1) 363 | if err != nil { 364 | panic(err) 365 | } 366 | t.Cleanup(func() { 367 | _ = client.Close() 368 | }) 369 | 370 | cache := NewPlainMemcache(client, 371 | WithPlainMemcacheLeaseDuration(7), 372 | WithPlainMemcacheSessionProvider(NewSessionProvider()), 373 | ) 374 | 375 | pipe := cache.Pipeline(context.Background()) 376 | fn := pipe.LeaseGet("KEY01", LeaseGetOptions{}) 377 | 378 | resp, err := fn.Result() 379 | 380 | var netErr *net.OpError 381 | isNetErr := errors.As(err, &netErr) 382 | assert.Equal(t, true, isNetErr) 383 | assert.Equal(t, "dial", netErr.Op) 384 | assert.Equal(t, "tcp", netErr.Net) 385 | assert.Equal(t, "connect: connection refused", netErr.Err.Error()) 386 | assert.Equal(t, LeaseGetResponse{}, resp) 387 | } 388 | -------------------------------------------------------------------------------- /proxy/config.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import "fmt" 4 | 5 | // ServerID ... 6 | type ServerID int 7 | 8 | // ServerConfig is a constraint for server config type 9 | type ServerConfig interface { 10 | // GetID returns the server id, must be unique 11 | GetID() ServerID 12 | } 13 | 14 | //go:generate moq -rm -out proxy_mocks_test.go . Route Selector ServerStats 15 | 16 | // Route must be Thread Safe 17 | type Route interface { 18 | // NewSelector ... 19 | NewSelector() Selector 20 | 21 | // AllServerIDs returns list of all possible server ids 22 | AllServerIDs() []ServerID 23 | } 24 | 25 | // Selector is NOT thread safe 26 | type Selector interface { 27 | // SetFailedServer ... 28 | SetFailedServer(server ServerID) 29 | 30 | // HasNextAvailableServer check if next available server ready to be fallback to 31 | HasNextAvailableServer() bool 32 | 33 | // SelectServer choose a server id, will keep in this server id unless Reset is call or failed server added 34 | SelectServer(key string) ServerID 35 | 36 | // SelectForDelete choose servers for deleting 37 | SelectForDelete(key string) []ServerID 38 | 39 | // Reset the selection 40 | Reset() 41 | } 42 | 43 | // Config ... 44 | type Config[S ServerConfig] struct { 45 | Servers []S 46 | Route Route 47 | } 48 | 49 | // SimpleServerConfig ... 50 | type SimpleServerConfig struct { 51 | ID ServerID 52 | Host string 53 | Port uint16 54 | } 55 | 56 | // GetID ... 57 | func (c SimpleServerConfig) GetID() ServerID { 58 | return c.ID 59 | } 60 | 61 | // Address ... 62 | func (c SimpleServerConfig) Address() string { 63 | return fmt.Sprintf("%s:%d", c.Host, c.Port) 64 | } 65 | 66 | // ServerStats is thread safe 67 | type ServerStats interface { 68 | // IsServerFailed check whether the server is currently not connected 69 | IsServerFailed(server ServerID) bool 70 | 71 | // NotifyServerFailed ... 72 | NotifyServerFailed(server ServerID) 73 | 74 | // GetMemUsage returns memory usage in bytes 75 | GetMemUsage(server ServerID) float64 76 | } 77 | -------------------------------------------------------------------------------- /proxy/pool.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | // =============================== 8 | // pool of lease get state 9 | // =============================== 10 | 11 | var leaseGetStatePool = sync.Pool{ 12 | New: func() any { 13 | return &leaseGetState{} 14 | }, 15 | } 16 | 17 | func putLeaseGetState(s *leaseGetState) { 18 | *s = leaseGetState{} 19 | leaseGetStatePool.Put(s) 20 | } 21 | 22 | func getLeaseGetState() *leaseGetState { 23 | return leaseGetStatePool.Get().(*leaseGetState) 24 | } 25 | -------------------------------------------------------------------------------- /proxy/pool_test.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestLeaseGetStatePool(t *testing.T) { 10 | t.Run("normal", func(t *testing.T) { 11 | s := getLeaseGetState() 12 | assert.Equal(t, &leaseGetState{}, s) 13 | 14 | s.serverID = 123 15 | 16 | putLeaseGetState(s) 17 | s = getLeaseGetState() 18 | assert.Equal(t, &leaseGetState{}, s) 19 | }) 20 | } 21 | -------------------------------------------------------------------------------- /proxy/proxy.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "unsafe" 8 | 9 | "github.com/QuangTung97/go-memcache/memcache" 10 | 11 | "github.com/QuangTung97/memproxy" 12 | ) 13 | 14 | // Memcache is thread safe 15 | type Memcache struct { 16 | sessProvider memproxy.SessionProvider 17 | clients map[ServerID]memproxy.Memcache 18 | route Route 19 | } 20 | 21 | type memcacheConfig struct { 22 | sessProvider memproxy.SessionProvider 23 | } 24 | 25 | func computeMemcacheConfig(options ...MemcacheOption) *memcacheConfig { 26 | conf := &memcacheConfig{ 27 | sessProvider: memproxy.NewSessionProvider(), 28 | } 29 | for _, fn := range options { 30 | fn(conf) 31 | } 32 | return conf 33 | } 34 | 35 | // MemcacheOption ... 36 | type MemcacheOption func(conf *memcacheConfig) 37 | 38 | // WithMemcacheSessionProvider ... 39 | func WithMemcacheSessionProvider(provider memproxy.SessionProvider) MemcacheOption { 40 | return func(conf *memcacheConfig) { 41 | conf.sessProvider = provider 42 | } 43 | } 44 | 45 | // New ... 46 | func New[S ServerConfig]( 47 | conf Config[S], 48 | newFunc func(conf S) memproxy.Memcache, 49 | options ...MemcacheOption, 50 | ) (*Memcache, error) { 51 | if len(conf.Servers) == 0 { 52 | return nil, errors.New("proxy: empty server list") 53 | } 54 | 55 | if conf.Route == nil { 56 | return nil, errors.New("proxy: route is nil") 57 | } 58 | 59 | memcacheConf := computeMemcacheConfig(options...) 60 | 61 | clients := map[ServerID]memproxy.Memcache{} 62 | 63 | for _, server := range conf.Servers { 64 | client := newFunc(server) 65 | clients[server.GetID()] = client 66 | } 67 | 68 | allServerIDs := conf.Route.AllServerIDs() 69 | for _, serverID := range allServerIDs { 70 | _, ok := clients[serverID] 71 | if !ok { 72 | return nil, fmt.Errorf("proxy: server id '%d' not in server list", serverID) 73 | } 74 | } 75 | 76 | return &Memcache{ 77 | sessProvider: memcacheConf.sessProvider, 78 | clients: clients, 79 | route: conf.Route, 80 | }, nil 81 | } 82 | 83 | // Pipeline is NOT thread safe 84 | type Pipeline struct { 85 | ctx context.Context 86 | 87 | client *Memcache 88 | selector Selector 89 | 90 | sess memproxy.Session 91 | pipeSession memproxy.Session 92 | 93 | pipelines map[ServerID]memproxy.Pipeline 94 | 95 | needExecServers []ServerID 96 | //revive:disable-next-line:nested-structs 97 | needExecServerSet map[ServerID]struct{} 98 | 99 | leaseSetServers map[string]leaseSetState 100 | } 101 | 102 | type leaseSetState struct { 103 | valid bool // for preventing a special race condition 104 | serverID ServerID 105 | } 106 | 107 | // Pipeline ... 108 | func (m *Memcache) Pipeline( 109 | ctx context.Context, options ...memproxy.PipelineOption, 110 | ) memproxy.Pipeline { 111 | conf := memproxy.ComputePipelineConfig(options) 112 | sess := conf.GetSession(m.sessProvider) 113 | 114 | return &Pipeline{ 115 | ctx: ctx, 116 | 117 | client: m, 118 | selector: m.route.NewSelector(), 119 | 120 | pipeSession: sess, 121 | sess: sess.GetLower(), 122 | 123 | pipelines: map[ServerID]memproxy.Pipeline{}, 124 | 125 | leaseSetServers: map[string]leaseSetState{}, 126 | } 127 | } 128 | 129 | // Close ... 130 | func (m *Memcache) Close() error { 131 | var lastErr error 132 | for _, client := range m.clients { 133 | err := client.Close() 134 | if err != nil { 135 | lastErr = err 136 | } 137 | } 138 | return lastErr 139 | } 140 | 141 | func (p *Pipeline) getRoutePipeline(serverID ServerID) memproxy.Pipeline { 142 | pipe, existed := p.pipelines[serverID] 143 | if !existed { 144 | pipe = p.client.clients[serverID].Pipeline(p.ctx, memproxy.WithPipelineExistingSession(p.pipeSession)) 145 | p.pipelines[serverID] = pipe 146 | } 147 | 148 | if p.needExecServerSet == nil { 149 | p.needExecServerSet = map[ServerID]struct{}{ 150 | serverID: {}, 151 | } 152 | p.needExecServers = append(p.needExecServers, serverID) 153 | } else if _, existed := p.needExecServerSet[serverID]; !existed { 154 | p.needExecServerSet[serverID] = struct{}{} 155 | p.needExecServers = append(p.needExecServers, serverID) 156 | } 157 | 158 | return pipe 159 | } 160 | 161 | func (p *Pipeline) doExecuteForAllServers() { 162 | for _, server := range p.needExecServers { 163 | pipe := p.pipelines[server] 164 | pipe.Execute() 165 | } 166 | p.needExecServers = nil 167 | p.needExecServerSet = nil 168 | } 169 | 170 | func (p *Pipeline) setKeyForLeaseSet( 171 | key string, 172 | resp memproxy.LeaseGetResponse, 173 | serverID ServerID, 174 | ) { 175 | if resp.Status == memproxy.LeaseGetStatusLeaseGranted || resp.Status == memproxy.LeaseGetStatusLeaseRejected { 176 | prev, ok := p.leaseSetServers[key] 177 | if ok { 178 | if prev.serverID != serverID { 179 | prev.valid = false 180 | p.leaseSetServers[key] = prev 181 | return 182 | } 183 | return 184 | } 185 | 186 | p.leaseSetServers[key] = leaseSetState{ 187 | valid: true, 188 | serverID: serverID, 189 | } 190 | } 191 | } 192 | 193 | type leaseGetState struct { 194 | pipe *Pipeline 195 | serverID ServerID 196 | key string 197 | options memproxy.LeaseGetOptions 198 | 199 | fn memproxy.LeaseGetResult 200 | 201 | resp memproxy.LeaseGetResponse 202 | err error 203 | } 204 | 205 | func retryOnOtherNodeCallback(obj unsafe.Pointer) { 206 | s := (*leaseGetState)(obj) 207 | s.retryOnOtherNode() 208 | } 209 | 210 | func (s *leaseGetState) retryOnOtherNode() { 211 | s.pipe.doExecuteForAllServers() 212 | 213 | s.resp, s.err = s.fn.Result() 214 | s.fn = nil 215 | 216 | if s.err == nil { 217 | s.pipe.setKeyForLeaseSet(s.key, s.resp, s.serverID) 218 | } 219 | } 220 | 221 | func leaseGetStateNextFuncCallback(obj unsafe.Pointer) { 222 | s := (*leaseGetState)(obj) 223 | s.nextFunc() 224 | } 225 | 226 | func (s *leaseGetState) nextFunc() { 227 | s.pipe.doExecuteForAllServers() 228 | 229 | s.resp, s.err = s.fn.Result() 230 | s.fn = nil 231 | 232 | if s.err != nil { 233 | s.pipe.selector.SetFailedServer(s.serverID) 234 | if !s.pipe.selector.HasNextAvailableServer() { 235 | return 236 | } 237 | 238 | s.serverID = s.pipe.selector.SelectServer(s.key) 239 | 240 | pipe := s.pipe.getRoutePipeline(s.serverID) 241 | s.fn = pipe.LeaseGet(s.key, s.options) 242 | 243 | s.pipe.sess.AddNextCall(memproxy.CallbackFunc{ 244 | Object: unsafe.Pointer(s), 245 | Func: retryOnOtherNodeCallback, 246 | }) 247 | 248 | return 249 | } 250 | 251 | s.pipe.setKeyForLeaseSet(s.key, s.resp, s.serverID) 252 | } 253 | 254 | func (s *leaseGetState) Result() (memproxy.LeaseGetResponse, error) { 255 | s.pipe.sess.Execute() 256 | s.pipe.selector.Reset() 257 | 258 | resp, err := s.resp, s.err 259 | 260 | putLeaseGetState(s) 261 | 262 | return resp, err 263 | } 264 | 265 | // LeaseGet ... 266 | func (p *Pipeline) LeaseGet( 267 | key string, options memproxy.LeaseGetOptions, 268 | ) memproxy.LeaseGetResult { 269 | serverID := p.selector.SelectServer(key) 270 | 271 | pipe := p.getRoutePipeline(serverID) 272 | fn := pipe.LeaseGet(key, options) 273 | 274 | state := getLeaseGetState() 275 | *state = leaseGetState{ 276 | pipe: p, 277 | serverID: serverID, 278 | key: key, 279 | options: options, 280 | 281 | fn: fn, 282 | } 283 | 284 | p.sess.AddNextCall(memproxy.CallbackFunc{ 285 | Object: unsafe.Pointer(state), 286 | Func: leaseGetStateNextFuncCallback, 287 | }) 288 | return state 289 | } 290 | 291 | // LeaseSet ... 292 | func (p *Pipeline) LeaseSet( 293 | key string, data []byte, cas uint64, 294 | options memproxy.LeaseSetOptions, 295 | ) func() (memproxy.LeaseSetResponse, error) { 296 | setState, ok := p.leaseSetServers[key] 297 | if !ok || !setState.valid { 298 | return func() (memproxy.LeaseSetResponse, error) { 299 | return memproxy.LeaseSetResponse{}, nil 300 | } 301 | } 302 | pipe := p.getRoutePipeline(setState.serverID) 303 | return pipe.LeaseSet(key, data, cas, options) 304 | } 305 | 306 | // Delete ... 307 | func (p *Pipeline) Delete( 308 | key string, options memproxy.DeleteOptions, 309 | ) func() (memproxy.DeleteResponse, error) { 310 | serverIDs := p.selector.SelectForDelete(key) 311 | fnList := make([]func() (memproxy.DeleteResponse, error), 0, len(serverIDs)) 312 | for _, id := range serverIDs { 313 | fnList = append(fnList, p.getRoutePipeline(id).Delete(key, options)) 314 | } 315 | 316 | return func() (memproxy.DeleteResponse, error) { 317 | var lastErr error 318 | for _, fn := range fnList { 319 | _, err := fn() 320 | if err != nil { 321 | lastErr = err 322 | } 323 | } 324 | return memproxy.DeleteResponse{}, lastErr 325 | } 326 | } 327 | 328 | // Execute ... 329 | func (p *Pipeline) Execute() { 330 | p.doExecuteForAllServers() 331 | } 332 | 333 | // Finish ... 334 | func (p *Pipeline) Finish() { 335 | for _, server := range p.needExecServers { 336 | pipe := p.pipelines[server] 337 | pipe.Finish() 338 | } 339 | p.needExecServers = nil 340 | p.needExecServerSet = nil 341 | } 342 | 343 | // LowerSession returns a lower priority session 344 | func (p *Pipeline) LowerSession() memproxy.Session { 345 | return p.sess.GetLower() 346 | } 347 | 348 | // NewSimpleStats ... 349 | func NewSimpleStats(servers []SimpleServerConfig, options ...SimpleStatsOption) *SimpleServerStats { 350 | return NewSimpleServerStats[SimpleServerConfig](servers, NewSimpleStatsClient, options...) 351 | } 352 | 353 | // NewSimpleReplicatedMemcache ... 354 | func NewSimpleReplicatedMemcache( 355 | servers []SimpleServerConfig, 356 | numConnsPerServer int, 357 | stats ServerStats, 358 | options ...ReplicatedRouteOption, 359 | ) (*Memcache, func(), error) { 360 | serverIDs := make([]ServerID, 0, len(servers)) 361 | for _, s := range servers { 362 | serverIDs = append(serverIDs, s.GetID()) 363 | } 364 | 365 | conf := Config[SimpleServerConfig]{ 366 | Servers: servers, 367 | Route: NewReplicatedRoute(serverIDs, stats, options...), 368 | } 369 | 370 | mc, err := New[SimpleServerConfig]( 371 | conf, 372 | func(conf SimpleServerConfig) memproxy.Memcache { 373 | client, err := memcache.New(conf.Address(), numConnsPerServer) 374 | if err != nil { 375 | panic(err) 376 | } 377 | return memproxy.NewPlainMemcache(client) 378 | }, 379 | ) 380 | if err != nil { 381 | return nil, nil, err 382 | } 383 | 384 | return mc, func() { _ = mc.Close() }, nil 385 | } 386 | -------------------------------------------------------------------------------- /proxy/proxy_integration_test.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/QuangTung97/go-memcache/memcache" 7 | "github.com/stretchr/testify/assert" 8 | 9 | "github.com/QuangTung97/memproxy" 10 | ) 11 | 12 | func clearMemcache(c *memcache.Client) { 13 | pipe := c.Pipeline() 14 | defer pipe.Finish() 15 | err := pipe.FlushAll()() 16 | if err != nil { 17 | panic(err) 18 | } 19 | } 20 | 21 | func newMemcacheWithProxy(t *testing.T) memproxy.Memcache { 22 | clearClient, err := memcache.New("localhost:11211", 1) 23 | if err != nil { 24 | panic(err) 25 | } 26 | clearMemcache(clearClient) 27 | err = clearClient.Close() 28 | if err != nil { 29 | panic(err) 30 | } 31 | 32 | server1 := SimpleServerConfig{ 33 | ID: 1, 34 | Host: "localhost", 35 | Port: 11211, 36 | } 37 | 38 | servers := []SimpleServerConfig{server1} 39 | mc, closeFunc, err := NewSimpleReplicatedMemcache(servers, 1, NewSimpleStats(servers)) 40 | if err != nil { 41 | panic(err) 42 | } 43 | t.Cleanup(closeFunc) 44 | 45 | return mc 46 | } 47 | 48 | func TestProxyIntegration(t *testing.T) { 49 | t.Run("simple-lease-get-set", func(t *testing.T) { 50 | mc := newMemcacheWithProxy(t) 51 | pipe := mc.Pipeline(newContext()) 52 | defer pipe.Finish() 53 | 54 | fn1 := pipe.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 55 | resp, err := fn1.Result() 56 | assert.Equal(t, nil, err) 57 | assert.Equal(t, memproxy.LeaseGetStatusLeaseGranted, resp.Status) 58 | 59 | fn2 := pipe.LeaseSet("KEY01", []byte("some data 01"), resp.CAS, memproxy.LeaseSetOptions{}) 60 | setResp, err := fn2() 61 | assert.Equal(t, nil, err) 62 | assert.Equal(t, memproxy.LeaseSetResponse{ 63 | Status: memproxy.LeaseSetStatusStored, 64 | }, setResp) 65 | 66 | fn3 := pipe.LeaseGet("KEY01", memproxy.LeaseGetOptions{}) 67 | resp, err = fn3.Result() 68 | assert.Equal(t, nil, err) 69 | assert.Equal(t, memproxy.LeaseGetStatusFound, resp.Status) 70 | assert.Equal(t, []byte("some data 01"), resp.Data) 71 | }) 72 | 73 | t.Run("simple-lease-get-set-multi", func(t *testing.T) { 74 | mc := newMemcacheWithProxy(t) 75 | pipe := mc.Pipeline(newContext()) 76 | defer pipe.Finish() 77 | 78 | const key1 = "KEY01" 79 | const key2 = "KEY02" 80 | 81 | value1 := []byte("some data 01") 82 | value2 := []byte("some data 02") 83 | 84 | fn1 := pipe.LeaseGet(key1, memproxy.LeaseGetOptions{}) 85 | fn2 := pipe.LeaseGet(key2, memproxy.LeaseGetOptions{}) 86 | 87 | resp1, err := fn1.Result() 88 | assert.Equal(t, nil, err) 89 | assert.Equal(t, memproxy.LeaseGetStatusLeaseGranted, resp1.Status) 90 | 91 | resp2, err := fn2.Result() 92 | assert.Equal(t, nil, err) 93 | assert.Equal(t, memproxy.LeaseGetStatusLeaseGranted, resp2.Status) 94 | 95 | // DO Set 96 | fn3 := pipe.LeaseSet(key1, value1, resp1.CAS, memproxy.LeaseSetOptions{}) 97 | setResp, err := fn3() 98 | assert.Equal(t, nil, err) 99 | assert.Equal(t, memproxy.LeaseSetResponse{ 100 | Status: memproxy.LeaseSetStatusStored, 101 | }, setResp) 102 | 103 | fn4 := pipe.LeaseSet(key2, value2, resp2.CAS, memproxy.LeaseSetOptions{}) 104 | setResp, err = fn4() 105 | assert.Equal(t, nil, err) 106 | assert.Equal(t, memproxy.LeaseSetResponse{ 107 | Status: memproxy.LeaseSetStatusStored, 108 | }, setResp) 109 | 110 | // Get Again 111 | fn5 := pipe.LeaseGet(key1, memproxy.LeaseGetOptions{}) 112 | fn6 := pipe.LeaseGet(key2, memproxy.LeaseGetOptions{}) 113 | 114 | resp1, err = fn5.Result() 115 | assert.Equal(t, nil, err) 116 | assert.Equal(t, memproxy.LeaseGetStatusFound, resp1.Status) 117 | assert.Equal(t, value1, resp1.Data) 118 | 119 | resp2, err = fn6.Result() 120 | assert.Equal(t, nil, err) 121 | assert.Equal(t, memproxy.LeaseGetStatusFound, resp2.Status) 122 | assert.Equal(t, value2, resp2.Data) 123 | }) 124 | 125 | t.Run("lease-finish-and-then-new-pipeline", func(t *testing.T) { 126 | mc := newMemcacheWithProxy(t) 127 | pipe1 := mc.Pipeline(newContext()) 128 | 129 | const key1 = "KEY01" 130 | value1 := []byte("some data 01") 131 | 132 | fn1 := pipe1.LeaseGet(key1, memproxy.LeaseGetOptions{}) 133 | resp, err := fn1.Result() 134 | assert.Equal(t, nil, err) 135 | assert.Equal(t, memproxy.LeaseGetStatusLeaseGranted, resp.Status) 136 | 137 | // DO Set 138 | pipe1.LeaseSet(key1, value1, resp.CAS, memproxy.LeaseSetOptions{}) 139 | pipe1.Finish() 140 | 141 | // Get Again 142 | pipe2 := mc.Pipeline(newContext()) 143 | fn3 := pipe2.LeaseGet(key1, memproxy.LeaseGetOptions{}) 144 | resp, err = fn3.Result() 145 | assert.Equal(t, nil, err) 146 | assert.Equal(t, memproxy.LeaseGetStatusFound, resp.Status) 147 | assert.Equal(t, value1, resp.Data) 148 | }) 149 | } 150 | -------------------------------------------------------------------------------- /proxy/replicated.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "math/rand" 5 | ) 6 | 7 | type replicatedRoute struct { 8 | configServers []ServerID 9 | 10 | conf *replicatedRouteConfig 11 | stats ServerStats 12 | } 13 | 14 | // RandomMaxValues random from 0 => 999,999 15 | const RandomMaxValues uint64 = 1000000 16 | 17 | type replicatedRouteConfig struct { 18 | // compute score from memory 19 | memScore func(mem float64) float64 20 | 21 | // random from 0 => n - 1 22 | randFunc func(n uint64) uint64 23 | 24 | // default 1% 25 | minPercent float64 26 | } 27 | 28 | type replicatedRouteSelector struct { 29 | //revive:disable-next-line:nested-structs 30 | failedServers map[ServerID]struct{} 31 | 32 | remainingServers []ServerID 33 | 34 | route *replicatedRoute 35 | weightAccum []float64 36 | 37 | alreadyChosen bool 38 | chosenServer ServerID 39 | } 40 | 41 | var _ Route = &replicatedRoute{} 42 | 43 | // ReplicatedRouteOption ... 44 | type ReplicatedRouteOption func(conf *replicatedRouteConfig) 45 | 46 | // WithRandFunc ... 47 | func WithRandFunc(randFunc func(n uint64) uint64) ReplicatedRouteOption { 48 | return func(conf *replicatedRouteConfig) { 49 | conf.randFunc = randFunc 50 | } 51 | } 52 | 53 | // WithMinPercentage minimum request percentage to memcached servers 54 | func WithMinPercentage(percentage float64) ReplicatedRouteOption { 55 | return func(conf *replicatedRouteConfig) { 56 | conf.minPercent = percentage 57 | } 58 | } 59 | 60 | // WithMemoryScoringFunc changes the weight function for replication load-balancing 61 | func WithMemoryScoringFunc(memScoreFunc func(mem float64) float64) ReplicatedRouteOption { 62 | return func(conf *replicatedRouteConfig) { 63 | conf.memScore = memScoreFunc 64 | } 65 | } 66 | 67 | // NewReplicatedRoute ... 68 | func NewReplicatedRoute( 69 | servers []ServerID, 70 | stats ServerStats, 71 | options ...ReplicatedRouteOption, 72 | ) Route { 73 | if len(servers) == 0 { 74 | panic("replicated route: servers can not be empty") 75 | } 76 | 77 | conf := &replicatedRouteConfig{ 78 | memScore: func(mem float64) float64 { 79 | return mem 80 | }, 81 | randFunc: func(n uint64) uint64 { 82 | return uint64(rand.Intn(int(n))) 83 | }, 84 | minPercent: 1.0, // 1% 85 | } 86 | 87 | for _, opt := range options { 88 | opt(conf) 89 | } 90 | 91 | return &replicatedRoute{ 92 | configServers: servers, 93 | 94 | conf: conf, 95 | stats: stats, 96 | } 97 | } 98 | 99 | // NewSelector ... 100 | func (r *replicatedRoute) NewSelector() Selector { 101 | s := &replicatedRouteSelector{ 102 | route: r, 103 | } 104 | s.remainingServers = s.computeRemainingServers() 105 | return s 106 | } 107 | 108 | // AllServerIDs returns the list of all possible servers 109 | func (r *replicatedRoute) AllServerIDs() []ServerID { 110 | return r.configServers 111 | } 112 | 113 | func (s *replicatedRouteSelector) getFailedServers() map[ServerID]struct{} { 114 | if s.failedServers == nil { 115 | s.failedServers = map[ServerID]struct{}{} 116 | } 117 | return s.failedServers 118 | } 119 | 120 | // SetFailedServer ... 121 | func (s *replicatedRouteSelector) SetFailedServer(server ServerID) { 122 | failed := s.getFailedServers() 123 | 124 | _, existed := failed[server] 125 | failed[server] = struct{}{} 126 | 127 | if !existed { 128 | s.Reset() 129 | s.remainingServers = s.computeRemainingServers() 130 | s.route.stats.NotifyServerFailed(server) 131 | } 132 | } 133 | 134 | // HasNextAvailableServer check if next available server ready to be fallback to 135 | func (s *replicatedRouteSelector) HasNextAvailableServer() bool { 136 | return len(s.failedServers) < len(s.route.configServers) 137 | } 138 | 139 | func (s *replicatedRouteSelector) computeRemainingServers() []ServerID { 140 | remainingServers := make([]ServerID, 0, len(s.route.configServers)) 141 | for _, server := range s.route.configServers { 142 | if s.route.stats.IsServerFailed(server) { 143 | s.getFailedServers()[server] = struct{}{} 144 | continue 145 | } 146 | 147 | if s.failedServers != nil { 148 | _, existed := s.getFailedServers()[server] 149 | if existed { 150 | continue 151 | } 152 | } 153 | 154 | remainingServers = append(remainingServers, server) 155 | } 156 | 157 | if len(remainingServers) == 0 { 158 | return s.route.configServers 159 | } 160 | return remainingServers 161 | } 162 | 163 | // SelectServer choose a server id, will keep in this server id unless Reset is call or failed server added 164 | func (s *replicatedRouteSelector) SelectServer(string) ServerID { 165 | if s.alreadyChosen { 166 | return s.chosenServer 167 | } 168 | 169 | for _, server := range s.remainingServers { 170 | w := s.route.conf.memScore(s.route.stats.GetMemUsage(server)) 171 | // current not accumulated 172 | s.weightAccum = append(s.weightAccum, w) 173 | } 174 | 175 | randVal := s.route.conf.randFunc(RandomMaxValues) 176 | 177 | index, weights := computeChosenServer(s.weightAccum, s.route.conf.minPercent, randVal) 178 | s.weightAccum = weights 179 | 180 | s.alreadyChosen = true 181 | s.chosenServer = s.remainingServers[index] 182 | return s.chosenServer 183 | } 184 | 185 | // SelectForDelete choose servers for deleting 186 | func (s *replicatedRouteSelector) SelectForDelete(string) []ServerID { 187 | return s.remainingServers 188 | } 189 | 190 | // Reset the selection 191 | func (s *replicatedRouteSelector) Reset() { 192 | s.alreadyChosen = false 193 | s.weightAccum = s.weightAccum[:0] 194 | } 195 | 196 | func computeWeightAccumWithMinPercent( 197 | weights []float64, minPercent float64, 198 | ) []float64 { 199 | sum := 0.0 200 | for i, w := range weights { 201 | if w < 1.0 { 202 | weights[i] = 1.0 203 | w = 1.0 204 | } 205 | sum += w 206 | } 207 | 208 | belowMinCount := 0 209 | belowMinWeightSum := float64(0) 210 | minWeight := minPercent * sum / 100.0 211 | 212 | for _, w := range weights { 213 | if w < minWeight { 214 | belowMinWeightSum += w 215 | belowMinCount++ 216 | } 217 | } 218 | 219 | ratio := 100.0 / minPercent / float64(belowMinCount) 220 | newMinWeight := (sum - belowMinWeightSum) / (ratio - 1.0) 221 | for index, w := range weights { 222 | if w < newMinWeight { 223 | weights[index] = newMinWeight 224 | } 225 | } 226 | 227 | for i := 1; i < len(weights); i++ { 228 | weights[i] = weights[i] + weights[i-1] 229 | } 230 | return weights 231 | } 232 | 233 | func computeChosenServer( 234 | weights []float64, 235 | minPercent float64, 236 | randVal uint64, 237 | ) (int, []float64) { 238 | weights = computeWeightAccumWithMinPercent(weights, minPercent) 239 | sum := weights[len(weights)-1] 240 | 241 | chosenWeight := float64(randVal) / float64(RandomMaxValues) * sum 242 | 243 | for i, w := range weights { 244 | if chosenWeight < w { 245 | return i, weights 246 | } 247 | } 248 | return 0, weights 249 | } 250 | -------------------------------------------------------------------------------- /proxy/replicated_test.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "fmt" 5 | "github.com/stretchr/testify/assert" 6 | "math" 7 | "testing" 8 | ) 9 | 10 | type replicatedRouteTest struct { 11 | stats *ServerStatsMock 12 | randFunc func(n uint64) uint64 13 | randArgs []uint64 14 | 15 | route Route 16 | selector Selector 17 | } 18 | 19 | func newReplicatedRouteTest(options ...ReplicatedRouteOption) *replicatedRouteTest { 20 | r := &replicatedRouteTest{} 21 | 22 | r.stats = &ServerStatsMock{ 23 | NotifyServerFailedFunc: func(server ServerID) { 24 | }, 25 | } 26 | 27 | randFunc := func(n uint64) uint64 { 28 | r.randArgs = append(r.randArgs, n) 29 | return r.randFunc(n) 30 | } 31 | 32 | r.stubServerFailed(false) 33 | 34 | opts := []ReplicatedRouteOption{ 35 | WithRandFunc(randFunc), 36 | } 37 | opts = append(opts, options...) 38 | 39 | r.route = NewReplicatedRoute( 40 | []ServerID{ 41 | serverID1, 42 | serverID2, 43 | }, 44 | r.stats, 45 | opts..., 46 | ) 47 | r.selector = r.route.NewSelector() 48 | 49 | return r 50 | } 51 | 52 | func (r *replicatedRouteTest) stubGetMem(values ...float64) { 53 | r.stats.GetMemUsageFunc = func(server ServerID) float64 { 54 | index := len(r.stats.GetMemUsageCalls()) - 1 55 | return values[index] 56 | } 57 | } 58 | 59 | func (r *replicatedRouteTest) stubRand(val uint64) { 60 | r.randFunc = func(n uint64) uint64 { 61 | return val 62 | } 63 | } 64 | 65 | func (r *replicatedRouteTest) stubServerFailed(failed bool) { 66 | r.stats.IsServerFailedFunc = func(server ServerID) bool { 67 | return failed 68 | } 69 | } 70 | 71 | func TestReplicatedRoute(t *testing.T) { 72 | t.Run("simple", func(t *testing.T) { 73 | r := newReplicatedRouteTest() 74 | 75 | r.stubGetMem( 76 | 50, 50, 77 | 50, 50, 78 | ) 79 | 80 | r.stubRand(499000) 81 | assert.Equal(t, serverID1, r.selector.SelectServer("")) 82 | assert.Equal(t, true, r.selector.HasNextAvailableServer()) 83 | 84 | r.stubRand(500000) 85 | 86 | // Get Again 87 | assert.Equal(t, serverID1, r.selector.SelectServer("")) 88 | 89 | // Get Again after Reset 90 | r.selector.Reset() 91 | assert.Equal(t, serverID2, r.selector.SelectServer("")) 92 | 93 | assert.Equal(t, []uint64{RandomMaxValues, RandomMaxValues}, r.randArgs) 94 | 95 | getMemCalls := r.stats.GetMemUsageCalls() 96 | assert.Equal(t, 4, len(getMemCalls)) 97 | 98 | assert.Equal(t, serverID1, getMemCalls[0].Server) 99 | assert.Equal(t, serverID2, getMemCalls[1].Server) 100 | 101 | assert.Equal(t, serverID1, getMemCalls[2].Server) 102 | assert.Equal(t, serverID2, getMemCalls[3].Server) 103 | 104 | getFailedCalls := r.stats.IsServerFailedCalls() 105 | assert.Equal(t, 2, len(getFailedCalls)) 106 | assert.Equal(t, serverID1, getFailedCalls[0].Server) 107 | assert.Equal(t, serverID2, getFailedCalls[1].Server) 108 | }) 109 | 110 | t.Run("weight-is-changed-in-between", func(t *testing.T) { 111 | r := newReplicatedRouteTest() 112 | 113 | r.stubGetMem( 114 | 50, 50, 115 | 60, 40, 116 | ) 117 | 118 | r.stubRand(499000) 119 | assert.Equal(t, serverID1, r.selector.SelectServer("")) 120 | 121 | r.stubRand(500000) 122 | 123 | // Get Again 124 | assert.Equal(t, serverID1, r.selector.SelectServer("")) 125 | 126 | // Get Again after Reset 127 | r.selector.Reset() 128 | assert.Equal(t, serverID1, r.selector.SelectServer("")) 129 | 130 | assert.Equal(t, []ServerID{serverID1, serverID2}, r.selector.SelectForDelete("")) 131 | }) 132 | 133 | t.Run("set-failed-server--fallback-to-another", func(t *testing.T) { 134 | r := newReplicatedRouteTest() 135 | 136 | r.stubGetMem( 137 | 50, 50, 138 | 50, 50, 139 | ) 140 | 141 | r.stubRand(499000) 142 | assert.Equal(t, serverID1, r.selector.SelectServer("")) 143 | 144 | r.selector.SetFailedServer(serverID1) 145 | 146 | r.stubRand(499000) 147 | assert.Equal(t, serverID2, r.selector.SelectServer("")) 148 | assert.Equal(t, true, r.selector.HasNextAvailableServer()) 149 | 150 | assert.Equal(t, []ServerID{serverID2}, r.selector.SelectForDelete("")) 151 | 152 | assert.Equal(t, 1, len(r.stats.NotifyServerFailedCalls())) 153 | assert.Equal(t, serverID1, r.stats.NotifyServerFailedCalls()[0].Server) 154 | }) 155 | 156 | t.Run("all-servers-failed--use-normal-random", func(t *testing.T) { 157 | r := newReplicatedRouteTest() 158 | 159 | r.stubGetMem( 160 | 50, 50, 161 | 50, 50, 162 | ) 163 | 164 | r.stubRand(499000) 165 | assert.Equal(t, serverID1, r.selector.SelectServer("")) 166 | assert.Equal(t, true, r.selector.HasNextAvailableServer()) 167 | 168 | r.selector.SetFailedServer(serverID1) 169 | r.selector.SetFailedServer(serverID2) 170 | 171 | r.stubRand(499000) 172 | assert.Equal(t, serverID1, r.selector.SelectServer("")) 173 | assert.Equal(t, false, r.selector.HasNextAvailableServer()) 174 | 175 | assert.Equal(t, []ServerID{serverID1, serverID2}, r.selector.SelectForDelete("")) 176 | }) 177 | 178 | t.Run("set-failed-server--but-status-all-server-already-failed", func(t *testing.T) { 179 | r := newReplicatedRouteTest() 180 | 181 | r.stubGetMem( 182 | 50, 50, 183 | 50, 50, 184 | ) 185 | 186 | r.stubServerFailed(true) 187 | r.selector.SetFailedServer(serverID1) 188 | 189 | r.stubRand(499000) 190 | assert.Equal(t, serverID1, r.selector.SelectServer("")) 191 | assert.Equal(t, false, r.selector.HasNextAvailableServer()) 192 | 193 | assert.Equal(t, []ServerID{serverID1, serverID2}, r.selector.SelectForDelete("")) 194 | 195 | assert.Equal(t, 1, len(r.stats.NotifyServerFailedCalls())) 196 | assert.Equal(t, serverID1, r.stats.NotifyServerFailedCalls()[0].Server) 197 | }) 198 | 199 | t.Run("with-mem-zero-use-default-1-percent-min", func(t *testing.T) { 200 | r := newReplicatedRouteTest() 201 | 202 | r.stubGetMem( 203 | 0, 50, 204 | ) 205 | 206 | r.stubRand(1000) // 1 / 1000 207 | assert.Equal(t, serverID1, r.selector.SelectServer("")) 208 | assert.Equal(t, true, r.selector.HasNextAvailableServer()) 209 | 210 | assert.Equal(t, []ServerID{serverID1, serverID2}, r.selector.SelectForDelete("")) 211 | }) 212 | 213 | t.Run("with-mem-zero-use-default-3-percent-min", func(t *testing.T) { 214 | r := newReplicatedRouteTest(WithMinPercentage(3.0)) 215 | 216 | r.stubGetMem( 217 | 0, 50, 218 | 0, 50, 219 | ) 220 | 221 | r.stubRand(30000) 222 | assert.Equal(t, serverID2, r.selector.SelectServer("")) 223 | assert.Equal(t, true, r.selector.HasNextAvailableServer()) 224 | 225 | r.selector.Reset() 226 | 227 | r.stubRand(29000) 228 | assert.Equal(t, serverID1, r.selector.SelectServer("")) 229 | assert.Equal(t, true, r.selector.HasNextAvailableServer()) 230 | 231 | assert.Equal(t, []ServerID{serverID1, serverID2}, r.selector.SelectForDelete("")) 232 | }) 233 | 234 | t.Run("with-mem-non-zero--using-another-scoring-function", func(t *testing.T) { 235 | r := newReplicatedRouteTest( 236 | WithMemoryScoringFunc(func(mem float64) float64 { 237 | return math.Sqrt(mem) 238 | }), 239 | ) 240 | 241 | r.stubGetMem( 242 | 9, 16, 243 | 9, 16, 244 | ) 245 | // 3, 4 => 3 / 7 = 0.42857142857 246 | 247 | r.stubRand(uint64(float64(RandomMaxValues) * 0.42)) 248 | assert.Equal(t, serverID1, r.selector.SelectServer("")) 249 | assert.Equal(t, true, r.selector.HasNextAvailableServer()) 250 | 251 | r.selector.Reset() 252 | 253 | r.stubRand(uint64(float64(RandomMaxValues) * 0.43)) 254 | assert.Equal(t, serverID2, r.selector.SelectServer("")) 255 | assert.Equal(t, true, r.selector.HasNextAvailableServer()) 256 | }) 257 | } 258 | 259 | func TestReplicatedRoute_With_Real_Rand(*testing.T) { 260 | stats := &ServerStatsMock{ 261 | GetMemUsageFunc: func(server ServerID) float64 { 262 | return 50 263 | }, 264 | IsServerFailedFunc: func(server ServerID) bool { 265 | return false 266 | }, 267 | } 268 | route := NewReplicatedRoute( 269 | []ServerID{ 270 | serverID1, 271 | serverID2, 272 | }, 273 | stats, 274 | ) 275 | selector := route.NewSelector() 276 | 277 | counters := map[ServerID]int{} 278 | for i := 0; i < 1000; i++ { 279 | server := selector.SelectServer("") 280 | 281 | counters[server]++ 282 | 283 | selector.Reset() 284 | } 285 | 286 | fmt.Println(counters) 287 | } 288 | 289 | func TestReplicatedRoute_With_Empty_Server_List(t *testing.T) { 290 | stats := &ServerStatsMock{} 291 | 292 | assert.PanicsWithValue(t, "replicated route: servers can not be empty", func() { 293 | NewReplicatedRoute( 294 | []ServerID{}, 295 | stats, 296 | ) 297 | }) 298 | } 299 | 300 | func TestComputeWeightAccumWithMinPercent(t *testing.T) { 301 | table := []struct { 302 | name string 303 | weights []float64 304 | minPercent float64 305 | 306 | newWeights []float64 307 | }{ 308 | { 309 | name: "empty", 310 | weights: nil, 311 | minPercent: 1.0, 312 | newWeights: nil, 313 | }, 314 | { 315 | name: "no-min", 316 | weights: []float64{1000, 2000, 3000}, 317 | minPercent: 1.0, 318 | newWeights: []float64{1000, 3000, 6000}, 319 | }, 320 | { 321 | name: "with-one-zero", 322 | weights: []float64{1000, 2000, 0}, 323 | minPercent: 1.0, 324 | newWeights: []float64{1000, 3000, 3000 + 3000.0/99.0}, 325 | }, 326 | { 327 | name: "with-one-zero-in-middle", 328 | weights: []float64{100, 200, 0, 300}, 329 | minPercent: 1.0, 330 | newWeights: []float64{100, 300, 300 + 600.0/99.0, 600 + 600.0/99.0}, 331 | }, 332 | { 333 | name: "with-one-zero-in-the-beginning", 334 | weights: []float64{0, 100, 200, 300}, 335 | minPercent: 1.0, 336 | newWeights: []float64{ 337 | 600.0 / 99.0, 338 | 100 + 600.0/99.0, 339 | 300 + 600.0/99.0, 340 | 600 + 600.0/99.0, 341 | }, 342 | }, 343 | { 344 | name: "with-two-zeros", 345 | weights: []float64{0, 10, 0, 30}, 346 | minPercent: 4.0, 347 | newWeights: []float64{ 348 | 40.0 / 11.5, 349 | 10 + 40.0/11.5, 350 | 10 + 80.0/11.5, 351 | 40 + 80.0/11.5, 352 | }, 353 | }, 354 | { 355 | name: "all-zeros", 356 | weights: []float64{0, 0, 0}, 357 | minPercent: 4.0, 358 | newWeights: []float64{ 359 | 1.0, 360 | 2.0, 361 | 3.0, 362 | }, 363 | }, 364 | } 365 | 366 | for _, e := range table { 367 | t.Run(e.name, func(t *testing.T) { 368 | weights := computeWeightAccumWithMinPercent(e.weights, e.minPercent) 369 | assert.Equal(t, e.newWeights, weights) 370 | }) 371 | } 372 | } 373 | -------------------------------------------------------------------------------- /proxy/stats.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "fmt" 5 | mcstats "github.com/QuangTung97/go-memcache/memcache/stats" 6 | "log" 7 | "sync" 8 | "sync/atomic" 9 | "time" 10 | ) 11 | 12 | //go:generate moq -rm -out stats_mocks_test.go . StatsClient 13 | 14 | type serverStatus struct { 15 | memory atomic.Uint64 16 | failed atomic.Bool 17 | } 18 | 19 | type simpleStatsConfig struct { 20 | errorLogger func(err error) 21 | memLogger func(server ServerID, mem uint64, err error) 22 | checkDuration time.Duration 23 | } 24 | 25 | // SimpleStatsOption ... 26 | type SimpleStatsOption func(conf *simpleStatsConfig) 27 | 28 | // WithSimpleStatsErrorLogger ... 29 | func WithSimpleStatsErrorLogger(logger func(err error)) SimpleStatsOption { 30 | return func(conf *simpleStatsConfig) { 31 | conf.errorLogger = logger 32 | } 33 | } 34 | 35 | // WithSimpleStatsMemLogger ... 36 | func WithSimpleStatsMemLogger(memLogger func(server ServerID, mem uint64, err error)) SimpleStatsOption { 37 | return func(conf *simpleStatsConfig) { 38 | conf.memLogger = memLogger 39 | } 40 | } 41 | 42 | // WithSimpleStatsCheckDuration ... 43 | func WithSimpleStatsCheckDuration(d time.Duration) SimpleStatsOption { 44 | return func(conf *simpleStatsConfig) { 45 | conf.checkDuration = d 46 | } 47 | } 48 | 49 | func computeSimpleStatsConfig(options ...SimpleStatsOption) *simpleStatsConfig { 50 | conf := &simpleStatsConfig{ 51 | errorLogger: func(err error) { 52 | log.Println("[ERROR] SimpleServerStats:", err) 53 | }, 54 | memLogger: func(server ServerID, mem uint64, err error) { 55 | }, 56 | checkDuration: 30 * time.Second, 57 | } 58 | for _, option := range options { 59 | option(conf) 60 | } 61 | return conf 62 | } 63 | 64 | // SimpleServerStats ... 65 | type SimpleServerStats struct { 66 | conf *simpleStatsConfig 67 | 68 | wg sync.WaitGroup 69 | 70 | //revive:disable-next-line:nested-structs 71 | clientSignals map[ServerID]chan struct{} 72 | 73 | statuses map[ServerID]*serverStatus 74 | 75 | newClientFunc func(server ServerID) StatsClient 76 | } 77 | 78 | // StatsClient ... 79 | type StatsClient interface { 80 | // GetMemUsage get memory usage in bytes 81 | GetMemUsage() (uint64, error) 82 | 83 | // Close client 84 | Close() error 85 | } 86 | 87 | const signalChanSize = 128 88 | 89 | // NewSimpleServerStats ... 90 | func NewSimpleServerStats[S ServerConfig]( 91 | servers []S, 92 | factory func(conf S) StatsClient, 93 | options ...SimpleStatsOption, 94 | ) *SimpleServerStats { 95 | conf := computeSimpleStatsConfig(options...) 96 | 97 | clients := map[ServerID]StatsClient{} 98 | clientSignals := map[ServerID]chan struct{}{} 99 | statuses := map[ServerID]*serverStatus{} 100 | confMap := map[ServerID]S{} 101 | 102 | for _, server := range servers { 103 | confMap[server.GetID()] = server 104 | 105 | client := factory(server) 106 | 107 | clients[server.GetID()] = client 108 | clientSignals[server.GetID()] = make(chan struct{}, signalChanSize) 109 | statuses[server.GetID()] = &serverStatus{} 110 | } 111 | 112 | s := &SimpleServerStats{ 113 | conf: conf, 114 | 115 | clientSignals: clientSignals, 116 | statuses: statuses, 117 | newClientFunc: func(server ServerID) StatsClient { 118 | return factory(confMap[server]) 119 | }, 120 | } 121 | 122 | for _, server := range servers { 123 | client := clients[server.GetID()] 124 | clients[server.GetID()] = s.clientGetMemory(server.GetID(), client) 125 | } 126 | 127 | s.wg.Add(len(servers)) 128 | 129 | for _, server := range servers { 130 | serverID := server.GetID() 131 | 132 | client := clients[serverID] 133 | ch := clientSignals[serverID] 134 | 135 | go func() { 136 | defer s.wg.Done() 137 | 138 | s.handleClient(serverID, client, ch) 139 | }() 140 | } 141 | 142 | return s 143 | } 144 | 145 | func (s *SimpleServerStats) clientGetMemory(server ServerID, client StatsClient) StatsClient { 146 | status := s.statuses[server] 147 | 148 | if status.failed.Load() { 149 | _ = client.Close() 150 | client = s.newClientFunc(server) 151 | } 152 | 153 | mem, err := client.GetMemUsage() 154 | s.conf.memLogger(server, mem, err) 155 | if err != nil { 156 | s.conf.errorLogger(err) 157 | status.failed.Store(true) 158 | return client 159 | } 160 | status.failed.Store(false) 161 | status.memory.Store(mem) 162 | return client 163 | } 164 | 165 | func drainSignal(signal <-chan struct{}) { 166 | for i := 0; i < signalChanSize-1; i++ { 167 | select { 168 | case <-signal: 169 | default: 170 | } 171 | } 172 | } 173 | 174 | func (s *SimpleServerStats) handleClient(server ServerID, client StatsClient, signal <-chan struct{}) { 175 | alreadySignaled := false 176 | timer := time.NewTimer(s.conf.checkDuration) 177 | 178 | for { 179 | select { 180 | case _, ok := <-signal: 181 | if !ok { 182 | _ = client.Close() 183 | return 184 | } 185 | drainSignal(signal) 186 | 187 | if alreadySignaled { 188 | continue 189 | } 190 | alreadySignaled = true 191 | 192 | if !timer.Stop() { 193 | <-timer.C 194 | } 195 | timer.Reset(s.conf.checkDuration) 196 | 197 | client = s.clientGetMemory(server, client) 198 | 199 | case <-timer.C: 200 | client = s.clientGetMemory(server, client) 201 | alreadySignaled = false 202 | 203 | timer.Reset(s.conf.checkDuration) 204 | } 205 | } 206 | } 207 | 208 | // IsServerFailed check whether the server is currently not connected 209 | func (s *SimpleServerStats) IsServerFailed(server ServerID) bool { 210 | return s.statuses[server].failed.Load() 211 | } 212 | 213 | // NotifyServerFailed ... 214 | func (s *SimpleServerStats) NotifyServerFailed(server ServerID) { 215 | ch := s.clientSignals[server] 216 | select { 217 | case ch <- struct{}{}: 218 | default: 219 | } 220 | } 221 | 222 | // GetMemUsage returns memory usage in bytes 223 | func (s *SimpleServerStats) GetMemUsage(server ServerID) float64 { 224 | status := s.statuses[server] 225 | return float64(status.memory.Load()) 226 | } 227 | 228 | // Shutdown ... 229 | func (s *SimpleServerStats) Shutdown() { 230 | for _, ch := range s.clientSignals { 231 | close(ch) 232 | } 233 | s.wg.Wait() 234 | } 235 | 236 | type simpleStatsClient struct { 237 | client *mcstats.Client 238 | } 239 | 240 | // NewSimpleStatsClient ... 241 | func NewSimpleStatsClient(conf SimpleServerConfig) StatsClient { 242 | client := mcstats.New(fmt.Sprintf("%s:%d", conf.Host, conf.Port)) 243 | return &simpleStatsClient{ 244 | client: client, 245 | } 246 | } 247 | 248 | // GetMemUsage ... 249 | func (s *simpleStatsClient) GetMemUsage() (uint64, error) { 250 | slabs, err := s.client.GetSlabsStats() 251 | if err != nil { 252 | return 0, err 253 | } 254 | 255 | mem := uint64(0) 256 | for _, slabID := range slabs.SlabIDs { 257 | slab := slabs.Slabs[slabID] 258 | mem += uint64(slab.ChunkSize) * slab.UsedChunks 259 | } 260 | 261 | return mem, nil 262 | } 263 | 264 | // Close ... 265 | func (s *simpleStatsClient) Close() error { 266 | return s.client.Close() 267 | } 268 | -------------------------------------------------------------------------------- /proxy/stats_mocks_test.go: -------------------------------------------------------------------------------- 1 | // Code generated by moq; DO NOT EDIT. 2 | // github.com/matryer/moq 3 | 4 | package proxy 5 | 6 | import ( 7 | "sync" 8 | ) 9 | 10 | // Ensure, that StatsClientMock does implement StatsClient. 11 | // If this is not the case, regenerate this file with moq. 12 | var _ StatsClient = &StatsClientMock{} 13 | 14 | // StatsClientMock is a mock implementation of StatsClient. 15 | // 16 | // func TestSomethingThatUsesStatsClient(t *testing.T) { 17 | // 18 | // // make and configure a mocked StatsClient 19 | // mockedStatsClient := &StatsClientMock{ 20 | // CloseFunc: func() error { 21 | // panic("mock out the Close method") 22 | // }, 23 | // GetMemUsageFunc: func() (uint64, error) { 24 | // panic("mock out the GetMemUsage method") 25 | // }, 26 | // } 27 | // 28 | // // use mockedStatsClient in code that requires StatsClient 29 | // // and then make assertions. 30 | // 31 | // } 32 | type StatsClientMock struct { 33 | // CloseFunc mocks the Close method. 34 | CloseFunc func() error 35 | 36 | // GetMemUsageFunc mocks the GetMemUsage method. 37 | GetMemUsageFunc func() (uint64, error) 38 | 39 | // calls tracks calls to the methods. 40 | calls struct { 41 | // Close holds details about calls to the Close method. 42 | Close []struct { 43 | } 44 | // GetMemUsage holds details about calls to the GetMemUsage method. 45 | GetMemUsage []struct { 46 | } 47 | } 48 | lockClose sync.RWMutex 49 | lockGetMemUsage sync.RWMutex 50 | } 51 | 52 | // Close calls CloseFunc. 53 | func (mock *StatsClientMock) Close() error { 54 | if mock.CloseFunc == nil { 55 | panic("StatsClientMock.CloseFunc: method is nil but StatsClient.Close was just called") 56 | } 57 | callInfo := struct { 58 | }{} 59 | mock.lockClose.Lock() 60 | mock.calls.Close = append(mock.calls.Close, callInfo) 61 | mock.lockClose.Unlock() 62 | return mock.CloseFunc() 63 | } 64 | 65 | // CloseCalls gets all the calls that were made to Close. 66 | // Check the length with: 67 | // 68 | // len(mockedStatsClient.CloseCalls()) 69 | func (mock *StatsClientMock) CloseCalls() []struct { 70 | } { 71 | var calls []struct { 72 | } 73 | mock.lockClose.RLock() 74 | calls = mock.calls.Close 75 | mock.lockClose.RUnlock() 76 | return calls 77 | } 78 | 79 | // GetMemUsage calls GetMemUsageFunc. 80 | func (mock *StatsClientMock) GetMemUsage() (uint64, error) { 81 | if mock.GetMemUsageFunc == nil { 82 | panic("StatsClientMock.GetMemUsageFunc: method is nil but StatsClient.GetMemUsage was just called") 83 | } 84 | callInfo := struct { 85 | }{} 86 | mock.lockGetMemUsage.Lock() 87 | mock.calls.GetMemUsage = append(mock.calls.GetMemUsage, callInfo) 88 | mock.lockGetMemUsage.Unlock() 89 | return mock.GetMemUsageFunc() 90 | } 91 | 92 | // GetMemUsageCalls gets all the calls that were made to GetMemUsage. 93 | // Check the length with: 94 | // 95 | // len(mockedStatsClient.GetMemUsageCalls()) 96 | func (mock *StatsClientMock) GetMemUsageCalls() []struct { 97 | } { 98 | var calls []struct { 99 | } 100 | mock.lockGetMemUsage.RLock() 101 | calls = mock.calls.GetMemUsage 102 | mock.lockGetMemUsage.RUnlock() 103 | return calls 104 | } 105 | -------------------------------------------------------------------------------- /proxy/stats_test.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/stretchr/testify/assert" 7 | "sync" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | func TestSimpleStatsClient(t *testing.T) { 13 | client := NewSimpleStatsClient(SimpleServerConfig{ 14 | Host: "localhost", 15 | Port: 11211, 16 | }) 17 | fmt.Println(client.GetMemUsage()) 18 | 19 | assert.Equal(t, nil, client.Close()) 20 | } 21 | 22 | type serverStatsTest struct { 23 | clients map[ServerID]*StatsClientMock 24 | stats *SimpleServerStats 25 | 26 | mut sync.Mutex 27 | newArgs []SimpleServerConfig 28 | 29 | newFunc func(conf SimpleServerConfig) StatsClient 30 | } 31 | 32 | func (s *serverStatsTest) getNewArgs() []SimpleServerConfig { 33 | s.mut.Lock() 34 | defer s.mut.Unlock() 35 | 36 | result := make([]SimpleServerConfig, len(s.newArgs)) 37 | copy(result, s.newArgs) 38 | 39 | return result 40 | } 41 | 42 | func newServerStatsTest(t *testing.T, options ...SimpleStatsOption) *serverStatsTest { 43 | s := &serverStatsTest{} 44 | 45 | s.clients = map[ServerID]*StatsClientMock{ 46 | serverID1: { 47 | CloseFunc: func() error { return nil }, 48 | }, 49 | serverID2: { 50 | CloseFunc: func() error { return nil }, 51 | }, 52 | } 53 | 54 | s.stubGetMem(serverID1, 8000, nil) 55 | s.stubGetMem(serverID2, 9000, nil) 56 | 57 | s.newFunc = func(conf SimpleServerConfig) StatsClient { 58 | return s.clients[conf.ID] 59 | } 60 | 61 | s.stats = NewSimpleServerStats[SimpleServerConfig]([]SimpleServerConfig{ 62 | { 63 | ID: serverID1, 64 | Host: "localhost", 65 | Port: 11201, 66 | }, 67 | { 68 | ID: serverID2, 69 | Host: "localhost", 70 | Port: 11202, 71 | }, 72 | }, func(conf SimpleServerConfig) StatsClient { 73 | s.mut.Lock() 74 | s.newArgs = append(s.newArgs, conf) 75 | s.mut.Unlock() 76 | 77 | return s.newFunc(conf) 78 | }, options...) 79 | 80 | assert.Equal(t, []SimpleServerConfig{ 81 | { 82 | ID: serverID1, 83 | Host: "localhost", 84 | Port: 11201, 85 | }, 86 | { 87 | ID: serverID2, 88 | Host: "localhost", 89 | Port: 11202, 90 | }, 91 | }, s.getNewArgs()) 92 | 93 | return s 94 | } 95 | 96 | func (s *serverStatsTest) stubGetMem(serverID ServerID, mem uint64, err error) { 97 | s.clients[serverID].GetMemUsageFunc = func() (uint64, error) { 98 | return mem, err 99 | } 100 | } 101 | 102 | func TestSimpleServerStats(t *testing.T) { 103 | t.Run("get-mem", func(t *testing.T) { 104 | s := newServerStatsTest(t) 105 | defer s.stats.Shutdown() 106 | 107 | getCalls := s.clients[serverID1].GetMemUsageCalls() 108 | assert.Equal(t, 1, len(getCalls)) 109 | 110 | assert.Equal(t, float64(8000), s.stats.GetMemUsage(serverID1)) 111 | assert.Equal(t, float64(9000), s.stats.GetMemUsage(serverID2)) 112 | 113 | assert.Equal(t, false, s.stats.IsServerFailed(serverID1)) 114 | assert.Equal(t, false, s.stats.IsServerFailed(serverID2)) 115 | 116 | s.stubGetMem(serverID1, 18000, nil) 117 | 118 | s.stats.NotifyServerFailed(serverID1) 119 | time.Sleep(40 * time.Millisecond) 120 | 121 | assert.Equal(t, float64(18000), s.stats.GetMemUsage(serverID1)) 122 | 123 | getCalls = s.clients[serverID1].GetMemUsageCalls() 124 | assert.Equal(t, 2, len(getCalls)) 125 | 126 | // Check Failed Again 127 | assert.Equal(t, false, s.stats.IsServerFailed(serverID1)) 128 | assert.Equal(t, false, s.stats.IsServerFailed(serverID2)) 129 | }) 130 | 131 | t.Run("server-get-mem-error--is-server-failed", func(t *testing.T) { 132 | s := newServerStatsTest(t, WithSimpleStatsCheckDuration(200*time.Millisecond)) 133 | 134 | assert.Equal(t, 2, len(s.getNewArgs())) 135 | 136 | getCalls := s.clients[serverID1].GetMemUsageCalls() 137 | assert.Equal(t, 1, len(getCalls)) 138 | 139 | assert.Equal(t, float64(8000), s.stats.GetMemUsage(serverID1)) 140 | assert.Equal(t, float64(9000), s.stats.GetMemUsage(serverID2)) 141 | 142 | assert.Equal(t, false, s.stats.IsServerFailed(serverID1)) 143 | assert.Equal(t, false, s.stats.IsServerFailed(serverID2)) 144 | 145 | s.stubGetMem(serverID1, 0, errors.New("some error")) 146 | newClient := &StatsClientMock{ 147 | CloseFunc: func() error { return nil }, 148 | GetMemUsageFunc: func() (uint64, error) { 149 | return 888, nil 150 | }, 151 | } 152 | s.newFunc = func(conf SimpleServerConfig) StatsClient { 153 | return newClient 154 | } 155 | 156 | s.stats.NotifyServerFailed(serverID1) 157 | time.Sleep(40 * time.Millisecond) 158 | 159 | assert.Equal(t, float64(8000), s.stats.GetMemUsage(serverID1)) 160 | 161 | assert.Equal(t, true, s.stats.IsServerFailed(serverID1)) 162 | assert.Equal(t, false, s.stats.IsServerFailed(serverID2)) 163 | assert.Equal(t, 2, len(s.getNewArgs())) 164 | 165 | // Notify Again But Nothing Is Called 166 | s.stats.NotifyServerFailed(serverID1) 167 | time.Sleep(40 * time.Millisecond) 168 | 169 | assert.Equal(t, 2, len(s.getNewArgs())) 170 | 171 | // Wait for Stats Check Duration 172 | time.Sleep(140 * time.Millisecond) 173 | 174 | assert.Equal(t, float64(888), s.stats.GetMemUsage(serverID1)) 175 | 176 | // Check client calls 177 | getCalls = s.clients[serverID1].GetMemUsageCalls() 178 | assert.Equal(t, 2, len(getCalls)) 179 | 180 | assert.Equal(t, 1, len(newClient.GetMemUsageCalls())) 181 | 182 | assert.Equal(t, 3, len(s.getNewArgs())) 183 | assert.Equal(t, SimpleServerConfig{ 184 | ID: serverID1, 185 | Host: "localhost", 186 | Port: 11201, 187 | }, s.getNewArgs()[2]) 188 | 189 | // Check Failed Again 190 | assert.Equal(t, false, s.stats.IsServerFailed(serverID1)) 191 | assert.Equal(t, false, s.stats.IsServerFailed(serverID2)) 192 | 193 | // Check Call After Shutdown 194 | s.stats.Shutdown() 195 | 196 | assert.Equal(t, 1, len(s.clients[serverID1].CloseCalls())) 197 | assert.Equal(t, 1, len(s.clients[serverID2].CloseCalls())) 198 | assert.Equal(t, 1, len(newClient.CloseCalls())) 199 | }) 200 | 201 | t.Run("server-get-mem-error--is-server-failed--with-options", func(t *testing.T) { 202 | s := newServerStatsTest(t, 203 | WithSimpleStatsCheckDuration(200*time.Millisecond), 204 | WithSimpleStatsErrorLogger(func(err error) { 205 | fmt.Println("Option Logger:", err) 206 | }), 207 | WithSimpleStatsMemLogger(func(server ServerID, mem uint64, err error) { 208 | fmt.Println("MEM USED:", server, mem, err) 209 | }), 210 | ) 211 | 212 | getCalls := s.clients[serverID1].GetMemUsageCalls() 213 | assert.Equal(t, 1, len(getCalls)) 214 | 215 | assert.Equal(t, float64(8000), s.stats.GetMemUsage(serverID1)) 216 | assert.Equal(t, float64(9000), s.stats.GetMemUsage(serverID2)) 217 | 218 | assert.Equal(t, false, s.stats.IsServerFailed(serverID1)) 219 | assert.Equal(t, false, s.stats.IsServerFailed(serverID2)) 220 | 221 | s.stubGetMem(serverID1, 0, errors.New("some error")) 222 | newClient := &StatsClientMock{ 223 | CloseFunc: func() error { return nil }, 224 | GetMemUsageFunc: func() (uint64, error) { 225 | return 888, nil 226 | }, 227 | } 228 | s.newFunc = func(conf SimpleServerConfig) StatsClient { 229 | return newClient 230 | } 231 | 232 | s.stats.NotifyServerFailed(serverID1) 233 | time.Sleep(40 * time.Millisecond) 234 | 235 | assert.Equal(t, float64(8000), s.stats.GetMemUsage(serverID1)) 236 | 237 | assert.Equal(t, true, s.stats.IsServerFailed(serverID1)) 238 | assert.Equal(t, false, s.stats.IsServerFailed(serverID2)) 239 | 240 | // Wait for Stats Duration Timeout 241 | time.Sleep(180 * time.Millisecond) 242 | 243 | assert.Equal(t, float64(888), s.stats.GetMemUsage(serverID1)) 244 | assert.Equal(t, false, s.stats.IsServerFailed(serverID1)) 245 | 246 | // Check client calls 247 | getCalls = s.clients[serverID1].GetMemUsageCalls() 248 | assert.Equal(t, 2, len(getCalls)) 249 | 250 | assert.Equal(t, 1, len(newClient.GetMemUsageCalls())) 251 | 252 | assert.Equal(t, 3, len(s.getNewArgs())) 253 | assert.Equal(t, SimpleServerConfig{ 254 | ID: serverID1, 255 | Host: "localhost", 256 | Port: 11201, 257 | }, s.getNewArgs()[2]) 258 | 259 | assert.Equal(t, 1, len(newClient.GetMemUsageCalls())) 260 | 261 | // Notify Again 262 | s.stats.NotifyServerFailed(serverID1) 263 | time.Sleep(40 * time.Millisecond) 264 | 265 | assert.Equal(t, 2, len(newClient.GetMemUsageCalls())) 266 | 267 | // Check Failed Again 268 | assert.Equal(t, false, s.stats.IsServerFailed(serverID1)) 269 | assert.Equal(t, false, s.stats.IsServerFailed(serverID2)) 270 | 271 | // Check Call After Shutdown 272 | s.stats.Shutdown() 273 | 274 | assert.Equal(t, 1, len(s.clients[serverID1].CloseCalls())) 275 | assert.Equal(t, 1, len(s.clients[serverID2].CloseCalls())) 276 | assert.Equal(t, 1, len(newClient.CloseCalls())) 277 | }) 278 | 279 | t.Run("multiple-time-out-happened", func(t *testing.T) { 280 | s := newServerStatsTest(t, WithSimpleStatsCheckDuration(100*time.Millisecond)) 281 | 282 | assert.Equal(t, 2, len(s.getNewArgs())) 283 | 284 | getCalls := s.clients[serverID1].GetMemUsageCalls() 285 | assert.Equal(t, 1, len(getCalls)) 286 | 287 | assert.Equal(t, float64(8000), s.stats.GetMemUsage(serverID1)) 288 | assert.Equal(t, float64(9000), s.stats.GetMemUsage(serverID2)) 289 | 290 | assert.Equal(t, false, s.stats.IsServerFailed(serverID1)) 291 | assert.Equal(t, false, s.stats.IsServerFailed(serverID2)) 292 | 293 | // Wait for Timeout 294 | time.Sleep(110 * time.Millisecond) 295 | 296 | getCalls = s.clients[serverID1].GetMemUsageCalls() 297 | assert.Equal(t, 2, len(getCalls)) 298 | 299 | // Wait for Timeout 300 | time.Sleep(110 * time.Millisecond) 301 | 302 | getCalls = s.clients[serverID1].GetMemUsageCalls() 303 | assert.Equal(t, 3, len(getCalls)) 304 | 305 | // Check Call After Shutdown 306 | s.stats.Shutdown() 307 | 308 | assert.Equal(t, 1, len(s.clients[serverID1].CloseCalls())) 309 | assert.Equal(t, 1, len(s.clients[serverID2].CloseCalls())) 310 | }) 311 | } 312 | -------------------------------------------------------------------------------- /proxy/tests/generate.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import "github.com/QuangTung97/memproxy/proxy" 4 | 5 | // ServerStats ... 6 | type ServerStats = proxy.ServerStats 7 | 8 | //go:generate moq -rm -out proxy_mocks_test.go . ServerStats 9 | -------------------------------------------------------------------------------- /proxy/tests/item_test.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | 12 | "github.com/QuangTung97/memproxy" 13 | "github.com/QuangTung97/memproxy/item" 14 | "github.com/QuangTung97/memproxy/mocks" 15 | "github.com/QuangTung97/memproxy/proxy" 16 | ) 17 | 18 | type itemTest struct { 19 | pipe1 *mocks.PipelineMock 20 | pipe2 *mocks.PipelineMock 21 | 22 | randFunc func(n uint64) uint64 23 | 24 | fillCalls []userKey 25 | stats *ServerStatsMock 26 | 27 | it *item.Item[userValue, userKey] 28 | 29 | actions []string 30 | } 31 | 32 | type userValue struct { 33 | Tenant string `json:"tenant"` 34 | Name string `json:"name"` 35 | Age int64 `json:"age"` 36 | } 37 | 38 | type userKey struct { 39 | Tenant string 40 | Name string 41 | } 42 | 43 | func (u userValue) GetKey() userKey { 44 | return userKey{ 45 | Tenant: u.Tenant, 46 | Name: u.Name, 47 | } 48 | } 49 | 50 | func (u userValue) Marshal() ([]byte, error) { 51 | return json.Marshal(u) 52 | } 53 | 54 | func unmarshalUser(data []byte) (userValue, error) { 55 | var user userValue 56 | err := json.Unmarshal(data, &user) 57 | return user, err 58 | } 59 | 60 | func (k userKey) String() string { 61 | return k.Tenant + ":" + k.Name 62 | } 63 | 64 | const server1 = proxy.ServerID(11) 65 | const server2 = proxy.ServerID(12) 66 | 67 | func newItemTest() *itemTest { 68 | result := &itemTest{ 69 | stats: &ServerStatsMock{}, 70 | } 71 | 72 | result.stats.NotifyServerFailedFunc = func(server proxy.ServerID) { 73 | result.addAction("notify-server-failed: ", server) 74 | } 75 | return result 76 | } 77 | 78 | func (i *itemTest) initItem() { 79 | mc1 := &mocks.MemcacheMock{} 80 | mc2 := &mocks.MemcacheMock{} 81 | 82 | i.pipe1 = &mocks.PipelineMock{} 83 | i.pipe2 = &mocks.PipelineMock{} 84 | 85 | mc1.PipelineFunc = func(ctx context.Context, options ...memproxy.PipelineOption) memproxy.Pipeline { 86 | i.addAction("pipeline 1") 87 | return i.pipe1 88 | } 89 | mc2.PipelineFunc = func(ctx context.Context, options ...memproxy.PipelineOption) memproxy.Pipeline { 90 | i.addAction("pipeline 2") 91 | return i.pipe2 92 | } 93 | 94 | i.pipe1.ExecuteFunc = func() { 95 | i.addAction("execute 1") 96 | } 97 | i.pipe2.ExecuteFunc = func() { 98 | i.addAction("execute 2") 99 | } 100 | 101 | mcMap := map[proxy.ServerID]memproxy.Memcache{ 102 | server1: mc1, 103 | server2: mc2, 104 | } 105 | 106 | servers := []proxy.SimpleServerConfig{ 107 | {ID: server1, Host: "localhost1"}, 108 | {ID: server2, Host: "localhost2"}, 109 | } 110 | 111 | mc, err := proxy.New[proxy.SimpleServerConfig]( 112 | proxy.Config[proxy.SimpleServerConfig]{ 113 | Servers: servers, 114 | Route: proxy.NewReplicatedRoute( 115 | []proxy.ServerID{server1, server2}, 116 | i.stats, 117 | proxy.WithRandFunc(func(n uint64) uint64 { 118 | return i.randFunc(n) 119 | }), 120 | ), 121 | }, 122 | func(conf proxy.SimpleServerConfig) memproxy.Memcache { 123 | return mcMap[conf.ID] 124 | }, 125 | ) 126 | if err != nil { 127 | panic(err) 128 | } 129 | 130 | age := 100 131 | 132 | i.it = item.New[userValue, userKey]( 133 | mc.Pipeline(context.Background()), 134 | unmarshalUser, 135 | func(ctx context.Context, key userKey) func() (userValue, error) { 136 | i.addAction("fill: ", key.String()) 137 | return func() (userValue, error) { 138 | i.addAction("fill-func: ", key.String()) 139 | 140 | i.fillCalls = append(i.fillCalls, key) 141 | age++ 142 | return userValue{ 143 | Tenant: key.Tenant, 144 | Name: key.Name, 145 | Age: int64(age), 146 | }, nil 147 | } 148 | }, 149 | ) 150 | } 151 | 152 | func (i *itemTest) stubServersFailStatus( 153 | healthyServers []proxy.ServerID, 154 | failedServers []proxy.ServerID, 155 | ) { 156 | i.stats.IsServerFailedFunc = func(server proxy.ServerID) bool { 157 | for _, s := range healthyServers { 158 | if s == server { 159 | return false 160 | } 161 | } 162 | for _, s := range failedServers { 163 | if s == server { 164 | return true 165 | } 166 | } 167 | panic(fmt.Sprint("not found server:", server)) 168 | } 169 | } 170 | 171 | func (i *itemTest) stubServerMem(servers map[proxy.ServerID]float64) { 172 | i.stats.GetMemUsageFunc = func(server proxy.ServerID) float64 { 173 | mem, ok := servers[server] 174 | if !ok { 175 | panic(fmt.Sprint("not found server:", server)) 176 | } 177 | return mem 178 | } 179 | } 180 | 181 | func (i *itemTest) stubRand(r uint64) { 182 | i.randFunc = func(n uint64) uint64 { 183 | i.addAction("rand-func") 184 | return r 185 | } 186 | } 187 | 188 | func (i *itemTest) stubLeaseGet( 189 | pipe *mocks.PipelineMock, 190 | resp memproxy.LeaseGetResponse, 191 | err error, 192 | ) { 193 | pipe.LeaseGetFunc = func( 194 | key string, options memproxy.LeaseGetOptions, 195 | ) memproxy.LeaseGetResult { 196 | i.addAction("lease-get: ", key) 197 | return memproxy.LeaseGetResultFunc(func() (memproxy.LeaseGetResponse, error) { 198 | i.addAction("lease-get-func: ", key) 199 | return resp, err 200 | }) 201 | } 202 | } 203 | 204 | func (i *itemTest) stubLeaseGetMulti( 205 | pipe *mocks.PipelineMock, 206 | respList ...memproxy.LeaseGetResponse, 207 | ) { 208 | pipe.LeaseGetFunc = func( 209 | key string, options memproxy.LeaseGetOptions, 210 | ) memproxy.LeaseGetResult { 211 | index := len(pipe.LeaseGetCalls()) - 1 212 | i.addAction("lease-get: ", key) 213 | return memproxy.LeaseGetResultFunc(func() (memproxy.LeaseGetResponse, error) { 214 | i.addAction("lease-get-func: ", key) 215 | return respList[index], nil 216 | }) 217 | } 218 | } 219 | 220 | func (i *itemTest) addAction(s string, args ...any) { 221 | var vals []any 222 | vals = append(vals, s) 223 | vals = append(vals, args...) 224 | 225 | i.actions = append(i.actions, fmt.Sprint(vals...)) 226 | } 227 | 228 | func mustMarshalUser(u userValue) []byte { 229 | data, err := json.Marshal(u) 230 | if err != nil { 231 | panic(err) 232 | } 233 | return data 234 | } 235 | 236 | func TestItemProxy__SimpleGet(t *testing.T) { 237 | i := newItemTest() 238 | 239 | i.stubRand(proxy.RandomMaxValues / 3) 240 | i.stubServersFailStatus( 241 | []proxy.ServerID{server1, server2}, 242 | nil, 243 | ) 244 | i.stubServerMem(map[proxy.ServerID]float64{ 245 | server1: 200, 246 | server2: 200, 247 | }) 248 | 249 | // Do Init 250 | i.initItem() 251 | 252 | i.stubLeaseGet(i.pipe1, memproxy.LeaseGetResponse{ 253 | Status: memproxy.LeaseGetStatusFound, 254 | Data: mustMarshalUser(userValue{ 255 | Tenant: "TENANT01", 256 | Name: "USER01", 257 | Age: 88, 258 | }), 259 | }, nil) 260 | 261 | fn := i.it.Get(context.Background(), userKey{ 262 | Tenant: "TENANT01", 263 | Name: "USER01", 264 | }) 265 | resp, err := fn() 266 | assert.Equal(t, nil, err) 267 | assert.Equal(t, userValue{ 268 | Tenant: "TENANT01", 269 | Name: "USER01", 270 | Age: 88, 271 | }, resp) 272 | } 273 | 274 | func TestItemProxy__FailOver__LeaseGetRejected(t *testing.T) { 275 | i := newItemTest() 276 | 277 | i.stubRand(proxy.RandomMaxValues / 3) 278 | i.stubServersFailStatus( 279 | []proxy.ServerID{server1, server2}, 280 | nil, 281 | ) 282 | i.stubServerMem(map[proxy.ServerID]float64{ 283 | server1: 200, 284 | server2: 200, 285 | }) 286 | 287 | // Do Init 288 | i.initItem() 289 | 290 | i.stubLeaseGet(i.pipe1, memproxy.LeaseGetResponse{}, errors.New("server down")) 291 | i.stubLeaseGetMulti(i.pipe2, 292 | memproxy.LeaseGetResponse{ 293 | Status: memproxy.LeaseGetStatusLeaseRejected, 294 | CAS: 2311, 295 | }, 296 | memproxy.LeaseGetResponse{ 297 | Status: memproxy.LeaseGetStatusFound, 298 | Data: mustMarshalUser(userValue{ 299 | Tenant: "TENANT01", 300 | Name: "USER01", 301 | Age: 81, 302 | }), 303 | }, 304 | ) 305 | 306 | fn := i.it.Get(context.Background(), userKey{ 307 | Tenant: "TENANT01", 308 | Name: "USER01", 309 | }) 310 | resp, err := fn() 311 | assert.Equal(t, nil, err) 312 | assert.Equal(t, userValue{ 313 | Tenant: "TENANT01", 314 | Name: "USER01", 315 | Age: 81, 316 | }, resp) 317 | 318 | assert.Equal(t, []string{ 319 | "rand-func", 320 | "pipeline 1", 321 | 322 | "lease-get: TENANT01:USER01", 323 | "execute 1", 324 | "lease-get-func: TENANT01:USER01", 325 | 326 | "notify-server-failed: 11", 327 | 328 | "rand-func", 329 | "pipeline 2", 330 | "lease-get: TENANT01:USER01", 331 | "execute 2", 332 | "lease-get-func: TENANT01:USER01", 333 | 334 | "rand-func", 335 | "lease-get: TENANT01:USER01", 336 | "execute 2", 337 | "lease-get-func: TENANT01:USER01", 338 | }, i.actions) 339 | } 340 | 341 | func TestItemProxy__FailOver__Filler__On_Multi_Keys(t *testing.T) { 342 | i := newItemTest() 343 | 344 | i.stubRand(proxy.RandomMaxValues / 3) 345 | i.stubServersFailStatus( 346 | []proxy.ServerID{server1, server2}, 347 | nil, 348 | ) 349 | i.stubServerMem(map[proxy.ServerID]float64{ 350 | server1: 200, 351 | server2: 200, 352 | }) 353 | 354 | // Do Init 355 | i.initItem() 356 | 357 | firstResp := []memproxy.LeaseGetResponse{ 358 | { 359 | Status: memproxy.LeaseGetStatusLeaseGranted, 360 | CAS: 544, 361 | }, 362 | {}, 363 | } 364 | firstErr := []error{ 365 | nil, 366 | errors.New("server failed"), 367 | } 368 | 369 | i.pipe1.LeaseGetFunc = func( 370 | key string, options memproxy.LeaseGetOptions, 371 | ) memproxy.LeaseGetResult { 372 | i.addAction("lease-get: ", key) 373 | index := len(i.pipe1.LeaseGetCalls()) - 1 374 | return memproxy.LeaseGetResultFunc(func() (memproxy.LeaseGetResponse, error) { 375 | i.addAction("lease-get-func: ", key) 376 | return firstResp[index], firstErr[index] 377 | }) 378 | } 379 | 380 | i.pipe1.LeaseSetFunc = func( 381 | key string, data []byte, cas uint64, options memproxy.LeaseSetOptions, 382 | ) func() (memproxy.LeaseSetResponse, error) { 383 | i.addAction("lease-set: ", key) 384 | return func() (memproxy.LeaseSetResponse, error) { 385 | i.addAction("lease-set-func: ", key) 386 | return memproxy.LeaseSetResponse{}, nil 387 | } 388 | } 389 | 390 | i.stubLeaseGetMulti(i.pipe2, 391 | memproxy.LeaseGetResponse{ 392 | Status: memproxy.LeaseGetStatusLeaseRejected, 393 | CAS: 2311, 394 | }, 395 | memproxy.LeaseGetResponse{ 396 | Status: memproxy.LeaseGetStatusFound, 397 | Data: mustMarshalUser(userValue{ 398 | Tenant: "TENANT02", 399 | Name: "USER02", 400 | Age: 82, 401 | }), 402 | }, 403 | ) 404 | 405 | fn1 := i.it.Get(context.Background(), userKey{ 406 | Tenant: "TENANT01", 407 | Name: "USER01", 408 | }) 409 | fn2 := i.it.Get(context.Background(), userKey{ 410 | Tenant: "TENANT02", 411 | Name: "USER02", 412 | }) 413 | 414 | resp, err := fn1() 415 | assert.Equal(t, nil, err) 416 | assert.Equal(t, userValue{ 417 | Tenant: "TENANT01", 418 | Name: "USER01", 419 | Age: 101, 420 | }, resp) 421 | 422 | resp, err = fn2() 423 | assert.Equal(t, nil, err) 424 | assert.Equal(t, userValue{ 425 | Tenant: "TENANT02", 426 | Name: "USER02", 427 | Age: 82, 428 | }, resp) 429 | 430 | assert.Equal(t, []string{ 431 | "rand-func", 432 | "pipeline 1", 433 | 434 | "lease-get: TENANT01:USER01", 435 | "lease-get: TENANT02:USER02", 436 | "execute 1", 437 | "lease-get-func: TENANT01:USER01", 438 | "lease-get-func: TENANT02:USER02", 439 | 440 | "notify-server-failed: 11", 441 | 442 | "rand-func", 443 | "pipeline 2", 444 | 445 | "lease-get: TENANT02:USER02", 446 | "execute 2", 447 | "lease-get-func: TENANT02:USER02", 448 | 449 | "fill: TENANT01:USER01", 450 | "fill-func: TENANT01:USER01", 451 | 452 | "lease-set: TENANT01:USER01", 453 | "execute 1", 454 | 455 | "rand-func", 456 | "lease-get: TENANT02:USER02", 457 | "execute 2", 458 | "lease-get-func: TENANT02:USER02", 459 | }, i.actions) 460 | } 461 | -------------------------------------------------------------------------------- /proxy/tests/proxy_mocks_test.go: -------------------------------------------------------------------------------- 1 | // Code generated by moq; DO NOT EDIT. 2 | // github.com/matryer/moq 3 | 4 | package tests 5 | 6 | import ( 7 | "github.com/QuangTung97/memproxy/proxy" 8 | "sync" 9 | ) 10 | 11 | // Ensure, that ServerStatsMock does implement ServerStats. 12 | // If this is not the case, regenerate this file with moq. 13 | var _ ServerStats = &ServerStatsMock{} 14 | 15 | // ServerStatsMock is a mock implementation of ServerStats. 16 | // 17 | // func TestSomethingThatUsesServerStats(t *testing.T) { 18 | // 19 | // // make and configure a mocked ServerStats 20 | // mockedServerStats := &ServerStatsMock{ 21 | // GetMemUsageFunc: func(server proxy.ServerID) float64 { 22 | // panic("mock out the GetMemUsage method") 23 | // }, 24 | // IsServerFailedFunc: func(server proxy.ServerID) bool { 25 | // panic("mock out the IsServerFailed method") 26 | // }, 27 | // NotifyServerFailedFunc: func(server proxy.ServerID) { 28 | // panic("mock out the NotifyServerFailed method") 29 | // }, 30 | // } 31 | // 32 | // // use mockedServerStats in code that requires ServerStats 33 | // // and then make assertions. 34 | // 35 | // } 36 | type ServerStatsMock struct { 37 | // GetMemUsageFunc mocks the GetMemUsage method. 38 | GetMemUsageFunc func(server proxy.ServerID) float64 39 | 40 | // IsServerFailedFunc mocks the IsServerFailed method. 41 | IsServerFailedFunc func(server proxy.ServerID) bool 42 | 43 | // NotifyServerFailedFunc mocks the NotifyServerFailed method. 44 | NotifyServerFailedFunc func(server proxy.ServerID) 45 | 46 | // calls tracks calls to the methods. 47 | calls struct { 48 | // GetMemUsage holds details about calls to the GetMemUsage method. 49 | GetMemUsage []struct { 50 | // Server is the server argument value. 51 | Server proxy.ServerID 52 | } 53 | // IsServerFailed holds details about calls to the IsServerFailed method. 54 | IsServerFailed []struct { 55 | // Server is the server argument value. 56 | Server proxy.ServerID 57 | } 58 | // NotifyServerFailed holds details about calls to the NotifyServerFailed method. 59 | NotifyServerFailed []struct { 60 | // Server is the server argument value. 61 | Server proxy.ServerID 62 | } 63 | } 64 | lockGetMemUsage sync.RWMutex 65 | lockIsServerFailed sync.RWMutex 66 | lockNotifyServerFailed sync.RWMutex 67 | } 68 | 69 | // GetMemUsage calls GetMemUsageFunc. 70 | func (mock *ServerStatsMock) GetMemUsage(server proxy.ServerID) float64 { 71 | if mock.GetMemUsageFunc == nil { 72 | panic("ServerStatsMock.GetMemUsageFunc: method is nil but ServerStats.GetMemUsage was just called") 73 | } 74 | callInfo := struct { 75 | Server proxy.ServerID 76 | }{ 77 | Server: server, 78 | } 79 | mock.lockGetMemUsage.Lock() 80 | mock.calls.GetMemUsage = append(mock.calls.GetMemUsage, callInfo) 81 | mock.lockGetMemUsage.Unlock() 82 | return mock.GetMemUsageFunc(server) 83 | } 84 | 85 | // GetMemUsageCalls gets all the calls that were made to GetMemUsage. 86 | // Check the length with: 87 | // 88 | // len(mockedServerStats.GetMemUsageCalls()) 89 | func (mock *ServerStatsMock) GetMemUsageCalls() []struct { 90 | Server proxy.ServerID 91 | } { 92 | var calls []struct { 93 | Server proxy.ServerID 94 | } 95 | mock.lockGetMemUsage.RLock() 96 | calls = mock.calls.GetMemUsage 97 | mock.lockGetMemUsage.RUnlock() 98 | return calls 99 | } 100 | 101 | // IsServerFailed calls IsServerFailedFunc. 102 | func (mock *ServerStatsMock) IsServerFailed(server proxy.ServerID) bool { 103 | if mock.IsServerFailedFunc == nil { 104 | panic("ServerStatsMock.IsServerFailedFunc: method is nil but ServerStats.IsServerFailed was just called") 105 | } 106 | callInfo := struct { 107 | Server proxy.ServerID 108 | }{ 109 | Server: server, 110 | } 111 | mock.lockIsServerFailed.Lock() 112 | mock.calls.IsServerFailed = append(mock.calls.IsServerFailed, callInfo) 113 | mock.lockIsServerFailed.Unlock() 114 | return mock.IsServerFailedFunc(server) 115 | } 116 | 117 | // IsServerFailedCalls gets all the calls that were made to IsServerFailed. 118 | // Check the length with: 119 | // 120 | // len(mockedServerStats.IsServerFailedCalls()) 121 | func (mock *ServerStatsMock) IsServerFailedCalls() []struct { 122 | Server proxy.ServerID 123 | } { 124 | var calls []struct { 125 | Server proxy.ServerID 126 | } 127 | mock.lockIsServerFailed.RLock() 128 | calls = mock.calls.IsServerFailed 129 | mock.lockIsServerFailed.RUnlock() 130 | return calls 131 | } 132 | 133 | // NotifyServerFailed calls NotifyServerFailedFunc. 134 | func (mock *ServerStatsMock) NotifyServerFailed(server proxy.ServerID) { 135 | if mock.NotifyServerFailedFunc == nil { 136 | panic("ServerStatsMock.NotifyServerFailedFunc: method is nil but ServerStats.NotifyServerFailed was just called") 137 | } 138 | callInfo := struct { 139 | Server proxy.ServerID 140 | }{ 141 | Server: server, 142 | } 143 | mock.lockNotifyServerFailed.Lock() 144 | mock.calls.NotifyServerFailed = append(mock.calls.NotifyServerFailed, callInfo) 145 | mock.lockNotifyServerFailed.Unlock() 146 | mock.NotifyServerFailedFunc(server) 147 | } 148 | 149 | // NotifyServerFailedCalls gets all the calls that were made to NotifyServerFailed. 150 | // Check the length with: 151 | // 152 | // len(mockedServerStats.NotifyServerFailedCalls()) 153 | func (mock *ServerStatsMock) NotifyServerFailedCalls() []struct { 154 | Server proxy.ServerID 155 | } { 156 | var calls []struct { 157 | Server proxy.ServerID 158 | } 159 | mock.lockNotifyServerFailed.RLock() 160 | calls = mock.calls.NotifyServerFailed 161 | mock.lockNotifyServerFailed.RUnlock() 162 | return calls 163 | } 164 | -------------------------------------------------------------------------------- /revive.toml: -------------------------------------------------------------------------------- 1 | severity = "error" 2 | confidence = 0.8 3 | 4 | # Sets the error code for failures with severity "error" 5 | errorCode = 2 6 | # Sets the error code for failures with severity "warning" 7 | warningCode = 1 8 | 9 | # Enable all available rules 10 | enableAllRules = true 11 | 12 | # Disabled rules 13 | [rule.file-header] 14 | Disabled = true 15 | [rule.max-public-structs] 16 | Disabled = true 17 | [rule.function-length] 18 | Disabled = true 19 | [rule.add-constant] 20 | Disabled = true 21 | [rule.banned-characters] 22 | Disabled = true 23 | [rule.package-comments] 24 | Disabled = true 25 | 26 | # Rule tuning 27 | [rule.argument-limit] 28 | Arguments = [6] 29 | [rule.cyclomatic] 30 | Arguments = [14] 31 | [rule.cognitive-complexity] 32 | Arguments = [18] 33 | [rule.function-result-limit] 34 | Arguments = [4] 35 | [rule.unhandled-error] 36 | Arguments = ["fmt.Printf", "fmt.Println", "buf.WriteString", "buf.Write", "buf.WriteByte"] 37 | [rule.line-length-limit] 38 | Arguments = [120] 39 | [rule.defer] 40 | Arguments = [["loop", "method-call", "recover", "return"]] 41 | -------------------------------------------------------------------------------- /session.go: -------------------------------------------------------------------------------- 1 | package memproxy 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | type sessionProviderImpl struct { 9 | nowFn func() time.Time 10 | sleepFn func(d time.Duration) 11 | } 12 | 13 | var _ SessionProvider = &sessionProviderImpl{} 14 | 15 | type sessionProviderConf struct { 16 | nowFn func() time.Time 17 | sleepFn func(d time.Duration) 18 | } 19 | 20 | // SessionProviderOption ... 21 | type SessionProviderOption func(conf *sessionProviderConf) 22 | 23 | // WithSessionNowFunc ... 24 | func WithSessionNowFunc(nowFn func() time.Time) SessionProviderOption { 25 | return func(conf *sessionProviderConf) { 26 | conf.nowFn = nowFn 27 | } 28 | } 29 | 30 | // WithSessionSleepFunc ... 31 | func WithSessionSleepFunc(sleepFn func(d time.Duration)) SessionProviderOption { 32 | return func(conf *sessionProviderConf) { 33 | conf.sleepFn = sleepFn 34 | } 35 | } 36 | 37 | // NewSessionProvider is THREAD SAFE 38 | func NewSessionProvider(options ...SessionProviderOption) SessionProvider { 39 | conf := &sessionProviderConf{ 40 | nowFn: time.Now, 41 | sleepFn: time.Sleep, 42 | } 43 | 44 | for _, opt := range options { 45 | opt(conf) 46 | } 47 | 48 | return &sessionProviderImpl{ 49 | nowFn: conf.nowFn, 50 | sleepFn: conf.sleepFn, 51 | } 52 | } 53 | 54 | // New a Session, NOT a Thread Safe Object 55 | func (p *sessionProviderImpl) New() Session { 56 | return newSession(p, nil) 57 | } 58 | 59 | func newSession( 60 | provider *sessionProviderImpl, higher *sessionImpl, 61 | ) *sessionImpl { 62 | s := &sessionImpl{ 63 | provider: provider, 64 | lower: nil, 65 | higher: higher, 66 | } 67 | 68 | if higher != nil { 69 | higher.lower = s 70 | s.isDirty = higher.isDirty 71 | } 72 | return s 73 | } 74 | 75 | type sessionImpl struct { 76 | provider *sessionProviderImpl 77 | nextCalls callbackList 78 | heap delayedCallHeap 79 | 80 | isDirty bool // an optimization 81 | 82 | lower *sessionImpl 83 | higher *sessionImpl 84 | } 85 | 86 | type delayedCall struct { 87 | startedAt time.Time 88 | call CallbackFunc 89 | } 90 | 91 | var _ Session = &sessionImpl{} 92 | 93 | func setDirtyRecursive(s *sessionImpl) { 94 | for !s.isDirty { 95 | s.isDirty = true 96 | if s.lower == nil { 97 | return 98 | } 99 | s = s.lower 100 | } 101 | } 102 | 103 | // AddNextCall ... 104 | func (s *sessionImpl) AddNextCall(fn CallbackFunc) { 105 | setDirtyRecursive(s) 106 | s.nextCalls.append(fn) 107 | } 108 | 109 | // AddDelayedCall ... 110 | func (s *sessionImpl) AddDelayedCall(d time.Duration, fn CallbackFunc) { 111 | setDirtyRecursive(s) 112 | s.heap.push(delayedCall{ 113 | startedAt: s.provider.nowFn().Add(d), 114 | call: fn, 115 | }) 116 | } 117 | 118 | // Execute ... 119 | func (s *sessionImpl) Execute() { 120 | if !s.isDirty { 121 | return 122 | } 123 | 124 | if s.higher != nil { 125 | s.higher.Execute() 126 | } 127 | 128 | for { 129 | s.executeNextCalls() 130 | 131 | if s.heap.size() == 0 { 132 | s.isDirty = false 133 | return 134 | } 135 | 136 | s.executeDelayedCalls() 137 | } 138 | } 139 | 140 | // GetLower get lower priority session 141 | func (s *sessionImpl) GetLower() Session { 142 | if s.lower != nil { 143 | return s.lower 144 | } 145 | return newSession(s.provider, s) 146 | } 147 | 148 | func (s *sessionImpl) executeNextCalls() { 149 | for !s.nextCalls.isEmpty() { 150 | it := s.nextCalls.getIterator() 151 | 152 | for { 153 | fn, ok := it.getNext() 154 | if !ok { 155 | break 156 | } 157 | fn.Call() 158 | } 159 | } 160 | } 161 | 162 | const deviationDuration = 100 * time.Microsecond 163 | 164 | func (s *sessionImpl) executeDelayedCalls() { 165 | MainLoop: 166 | for s.heap.size() > 0 { 167 | now := s.provider.nowFn() 168 | 169 | for s.heap.size() > 0 { 170 | top := s.heap.top() 171 | topStart := top.startedAt 172 | if topStart.Add(-deviationDuration).After(now) { 173 | duration := topStart.Sub(now) 174 | s.provider.sleepFn(duration) 175 | continue MainLoop 176 | } 177 | s.heap.pop() 178 | top.call.Call() 179 | } 180 | } 181 | } 182 | 183 | // =============================== 184 | // callback list 185 | // =============================== 186 | 187 | type callbackList struct { 188 | head *callbackSegment 189 | tail *callbackSegment 190 | } 191 | 192 | type callbackSegment struct { 193 | next *callbackSegment // linked list of callback 194 | size int 195 | funcs [16]CallbackFunc 196 | } 197 | 198 | func (s *callbackList) append(fn CallbackFunc) { 199 | if s.tail == nil { 200 | s.head = getCallbackSegment() 201 | s.tail = s.head 202 | } else if s.tail.size >= len(s.tail.funcs) { 203 | newTail := getCallbackSegment() 204 | s.tail.next = newTail 205 | s.tail = newTail 206 | } 207 | 208 | n := s.tail 209 | n.funcs[n.size] = fn 210 | n.size++ 211 | } 212 | 213 | func (s *callbackList) isEmpty() bool { 214 | return s.head == nil 215 | } 216 | 217 | type callbackListIterator struct { 218 | current *callbackSegment 219 | index int 220 | } 221 | 222 | // getIterator also clears the list 223 | func (s *callbackList) getIterator() callbackListIterator { 224 | it := callbackListIterator{ 225 | current: s.head, 226 | index: 0, 227 | } 228 | 229 | s.head = nil 230 | s.tail = nil 231 | 232 | return it 233 | } 234 | 235 | func (it *callbackListIterator) getNext() (CallbackFunc, bool) { 236 | if it.current == nil { 237 | return CallbackFunc{}, false 238 | } 239 | 240 | if it.index >= it.current.size { 241 | prev := it.current 242 | it.current = it.current.next 243 | 244 | putCallbackSegment(prev) 245 | 246 | it.index = 0 247 | 248 | if it.current == nil { 249 | return CallbackFunc{}, false 250 | } 251 | } 252 | 253 | fn := it.current.funcs[it.index] 254 | it.index++ 255 | return fn, true 256 | } 257 | 258 | // =============================== 259 | // Pool of Callback Segments 260 | // =============================== 261 | 262 | var callbackSegmentPool = sync.Pool{ 263 | New: func() any { 264 | return &callbackSegment{} 265 | }, 266 | } 267 | 268 | func getCallbackSegment() *callbackSegment { 269 | return callbackSegmentPool.Get().(*callbackSegment) 270 | } 271 | 272 | func putCallbackSegment(s *callbackSegment) { 273 | s.next = nil 274 | for i := 0; i < s.size; i++ { 275 | s.funcs[i] = CallbackFunc{} 276 | } 277 | s.size = 0 278 | callbackSegmentPool.Put(s) 279 | } 280 | -------------------------------------------------------------------------------- /tools.go: -------------------------------------------------------------------------------- 1 | //go:build tools 2 | // +build tools 3 | 4 | package tools 5 | 6 | import ( 7 | _ "github.com/matryer/moq" 8 | _ "github.com/mgechev/revive" 9 | _ "golang.org/x/perf/cmd/benchstat" 10 | ) 11 | --------------------------------------------------------------------------------