├── LICENSE ├── README.md ├── mutex.go └── mutex_test.go /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018-present Eagle Chen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mapmutex 2 | 3 | mapmutex is a simple implementation to act as a group of mutex. 4 | 5 | ## What's it for? 6 | Synchronization is needed in many cases. But in some cases, you don't want a gaint lock to block totally irrelevant actions. Instead, you need many fine-grained tiny locks to only block on same resource. 7 | 8 | Take an example. A website have many users. Each user has a different counter. While one user want to increment the counter at the same time in different devices(say, from a pad and a phone), these increments need to happen one by one. But user A's incremntation has nothing to do with user B's incrementation, they don't have to affect each other. 9 | This is where this package comes in. You can lock for each user (by using user id as key) without blocking other users. 10 | 11 | ## Performance 12 | As shown by the result of benchmark(in `mutex_test.go`), it's several times faster than one giant mutex. 13 | ``` 14 | (11 times faster) 15 | BenchmarkMutex1000_100_20_20-4 1 20164937908 ns/op 16 | BenchmarkMapMutex1000_100_20_20-4 1 1821899222 ns/op 17 | 18 | (7 times faster) 19 | BenchmarkMutex1000_20_20_20-4 1 19726327623 ns/op 20 | BenchmarkMapMutex1000_20_20_20-4 1 2759654813 ns/op 21 | 22 | (11 times faster) 23 | BenchmarkMutex1000_20_40_20-4 1 20380128848 ns/op 24 | BenchmarkMapMutex1000_20_40_20-4 1 1828899343 ns/op 25 | 26 | (only 2 keys in map, 2 times faster) 27 | (in case of only one key in map, it's the same as one gaint lock) 28 | BenchmarkMutex1000_2_40_20-4 1 20721092007 ns/op 29 | BenchmarkMapMutex1000_2_40_20-4 1 10818512020 ns/op (989 of 1000 success) 30 | 31 | (9 times faster) 32 | BenchmarkMutex1000_20_40_60-4 1 60341833247 ns/op 33 | BenchmarkMapMutex1000_20_40_60-4 1 6240238975 ns/op 34 | 35 | (11 times faster) 36 | BenchmarkMutex10000_20_40_20-4 1 205493472245 ns/op 37 | BenchmarkMapMutex10000_20_40_20-4 1 18677416055 ns/op 38 | ``` 39 | 40 | ## How to get 41 | ``` 42 | go get github.com/EagleChen/mapmutex 43 | ``` 44 | 45 | ## How to use 46 | ``` 47 | mutex := mapmutex.NewMapMutex() 48 | if mutex.TryLock(key) { // for example, key can be user id 49 | // do the real job here 50 | 51 | mutex.Unlock(key) 52 | } 53 | ``` 54 | 55 | TryLock itself will retry several times to aquire the lock. But in the application level, you can also try several times when the lock cannot be got. 56 | ``` 57 | got := false 58 | for i := 0; && i < retryTimes; i++ { 59 | if got = mutex.TryLock(key); got { 60 | break 61 | } 62 | } 63 | if got { 64 | // do the real job here 65 | 66 | mutex.Unlock(key) 67 | } 68 | ``` 69 | 70 | ## How to tune 71 | 1. Use `NewCustomizedMapMutex` to customize how hard 'TryLock' will try to get the lock. The parameters controls how many times to try, how long to wait before another try when failing to aquire the lock, etc. They may be very different for various use cases. 72 | 73 | 2. Change some source code for your use case. For general use, `map[interface{}]interface{}` is used for storing 'locks'. But it can be changed to `map[int]bool` if your `key` is `int` and `map[string]bool` if you `key` is `string`. As far as i know, this trick will improve the performance, a little bit. -------------------------------------------------------------------------------- /mutex.go: -------------------------------------------------------------------------------- 1 | package mapmutex 2 | 3 | import ( 4 | "math/rand" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | // Mutex is the mutex with synchronized map 10 | // it's for reducing unnecessary locks among different keys 11 | type Mutex struct { 12 | locks map[interface{}]interface{} 13 | m *sync.Mutex 14 | maxRetry int 15 | maxDelay float64 // in nanosend 16 | baseDelay float64 // in nanosecond 17 | factor float64 18 | jitter float64 19 | } 20 | 21 | // TryLock tries to aquire the lock. 22 | func (m *Mutex) TryLock(key interface{}) (gotLock bool) { 23 | for i := 0; i < m.maxRetry; i++ { 24 | m.m.Lock() 25 | if _, ok := m.locks[key]; ok { // if locked 26 | m.m.Unlock() 27 | time.Sleep(m.backoff(i)) 28 | } else { // if unlock, lockit 29 | m.locks[key] = struct{}{} 30 | m.m.Unlock() 31 | return true 32 | } 33 | } 34 | 35 | return false 36 | } 37 | 38 | // Unlock unlocks for the key 39 | // please call Unlock only after having aquired the lock 40 | func (m *Mutex) Unlock(key interface{}) { 41 | m.m.Lock() 42 | delete(m.locks, key) 43 | m.m.Unlock() 44 | } 45 | 46 | // borrowed from grpc 47 | func (m *Mutex) backoff(retries int) time.Duration { 48 | if retries == 0 { 49 | return time.Duration(m.baseDelay) * time.Nanosecond 50 | } 51 | backoff, max := m.baseDelay, m.maxDelay 52 | for backoff < max && retries > 0 { 53 | backoff *= m.factor 54 | retries-- 55 | } 56 | if backoff > max { 57 | backoff = max 58 | } 59 | backoff *= 1 + m.jitter*(rand.Float64()*2-1) 60 | if backoff < 0 { 61 | return 0 62 | } 63 | return time.Duration(backoff) * time.Nanosecond 64 | } 65 | 66 | // NewMapMutex returns a mapmutex with default configs 67 | func NewMapMutex() *Mutex { 68 | return &Mutex{ 69 | locks: make(map[interface{}]interface{}), 70 | m: &sync.Mutex{}, 71 | maxRetry: 200, 72 | maxDelay: 100000000, // 0.1 second 73 | baseDelay: 10, // 10 nanosecond 74 | factor: 1.1, 75 | jitter: 0.2, 76 | } 77 | } 78 | 79 | // NewCustomizedMapMutex returns a customized mapmutex 80 | func NewCustomizedMapMutex(mRetry int, mDelay, bDelay, factor, jitter float64) *Mutex { 81 | return &Mutex{ 82 | locks: make(map[interface{}]interface{}), 83 | m: &sync.Mutex{}, 84 | maxRetry: mRetry, 85 | maxDelay: mDelay, 86 | baseDelay: bDelay, 87 | factor: factor, 88 | jitter: jitter, 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /mutex_test.go: -------------------------------------------------------------------------------- 1 | package mapmutex 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "sync" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | const MaxRetry = 100000 12 | 13 | func TestLockSuccess(t *testing.T) { 14 | m := NewMapMutex() 15 | 16 | if !m.TryLock("123") { 17 | t.Error("fail to get lock") 18 | } 19 | m.Unlock("123") 20 | } 21 | 22 | func TestLockFail(t *testing.T) { 23 | // fail fast 24 | m := NewCustomizedMapMutex(1, 1, 1, 2, 0.1) 25 | 26 | c := make(chan bool) 27 | finish := make(chan bool) 28 | 29 | num := 5 30 | success := make([]int, num) 31 | 32 | for i := 0; i < num; i++ { 33 | go func(i int) { 34 | if m.TryLock("123") { 35 | <-c // block here 36 | success[i] = 1 37 | m.Unlock("123") 38 | } 39 | finish <- true 40 | }(i) 41 | } 42 | 43 | // most goroutines fail to get the lock 44 | for i := 0; i < num-1; i++ { 45 | <-finish 46 | } 47 | 48 | sum := 0 49 | for _, s := range success { 50 | sum += s 51 | } 52 | 53 | if sum != 0 { 54 | t.Error("some other goroutine got the lock") 55 | } 56 | 57 | // finish the success one 58 | c <- true 59 | // wait 60 | <-finish 61 | for _, s := range success { 62 | sum += s 63 | } 64 | if sum != 1 { 65 | t.Error("no goroutine got the lock") 66 | } 67 | } 68 | 69 | func TestLockIndivisually(t *testing.T) { 70 | m := NewMapMutex() 71 | 72 | if !m.TryLock(123) || !m.TryLock(456) { 73 | t.Error("different locks affect each other") 74 | } 75 | } 76 | 77 | func BenchmarkMutex1000_100_20_20(b *testing.B) { lockByOneMutex(1000, 100, 20, 20) } 78 | func BenchmarkMapWithMutex1000_100_20_20(b *testing.B) { lockByMapWithMutex(1000, 100, 20, 20) } 79 | func BenchmarkMapMutex1000_100_20_20(b *testing.B) { lockByMapMutex(1000, 100, 20, 20) } 80 | 81 | // less key, more conflict for map key 82 | func BenchmarkMutex1000_20_20_20(b *testing.B) { lockByOneMutex(1000, 20, 20, 20) } 83 | func BenchmarkMapWithMutex1000_20_20_20(b *testing.B) { lockByMapWithMutex(1000, 20, 20, 20) } 84 | func BenchmarkMapMutex1000_20_20_20(b *testing.B) { lockByMapMutex(1000, 20, 20, 20) } 85 | 86 | // less key, more goroutine, more conflict for map key 87 | func BenchmarkMutex1000_20_40_20(b *testing.B) { lockByOneMutex(1000, 20, 40, 20) } 88 | func BenchmarkMapWithMutex1000_20_40_20(b *testing.B) { lockByMapWithMutex(1000, 20, 40, 20) } 89 | func BenchmarkMapMutex1000_20_40_20(b *testing.B) { lockByMapMutex(1000, 20, 40, 20) } 90 | 91 | // even we want to use map to avoid unnecessary lock 92 | // if case of only 2 entries, a lot of locking occurs 93 | func BenchmarkMutex1000_2_40_20(b *testing.B) { lockByOneMutex(1000, 2, 40, 20) } 94 | func BenchmarkMapWithMutex1000_2_40_20(b *testing.B) { lockByMapWithMutex(1000, 2, 40, 20) } 95 | func BenchmarkMapMutex1000_2_40_20(b *testing.B) { lockByMapMutex(1000, 2, 40, 20) } 96 | 97 | // longer time per job, more conflict for map key 98 | func BenchmarkMutex1000_20_40_60(b *testing.B) { lockByOneMutex(1000, 20, 40, 60) } 99 | func BenchmarkMapWithMutex1000_20_40_60(b *testing.B) { lockByMapWithMutex(1000, 20, 40, 60) } 100 | func BenchmarkMapMutex1000_20_40_60(b *testing.B) { lockByMapMutex(1000, 20, 40, 60) } 101 | 102 | // much more actions 103 | func BenchmarkMutex10000_20_40_20(b *testing.B) { lockByOneMutex(10000, 20, 40, 20) } 104 | func BenchmarkMapWithMutex10000_20_40_20(b *testing.B) { lockByMapWithMutex(10000, 20, 40, 20) } 105 | func BenchmarkMapMutex10000_20_40_20(b *testing.B) { lockByMapMutex(10000, 20, 40, 20) } 106 | 107 | func min(a, b int) int { 108 | if a < b { 109 | return a 110 | } 111 | return b 112 | } 113 | 114 | // load should be larger than 0 115 | func splitLoad(load, buckets int) []int { 116 | result := make([]int, buckets) 117 | avg := load / buckets 118 | remain := load % buckets 119 | 120 | // split 121 | for i := range result { 122 | result[i] = avg 123 | if remain > 0 { 124 | result[i]++ 125 | remain-- 126 | } 127 | } 128 | 129 | // randomize 130 | for i := 0; i < buckets; i += 2 { 131 | if i+1 < buckets { 132 | r := rand.Intn(min(result[i], result[i+1])) 133 | if rand.Intn(r+1)%2 == 0 { 134 | result[i] -= r 135 | result[i+1] += r 136 | } else { 137 | result[i] += r 138 | result[i+1] -= r 139 | } 140 | } 141 | } 142 | 143 | return result 144 | } 145 | 146 | func lockByOneMutex(actionCount, keyCount, goroutineNum, averageTime int) { 147 | sharedSlice := make([]int, keyCount) 148 | var m sync.Mutex 149 | 150 | loads := splitLoad(actionCount, goroutineNum) 151 | var wg sync.WaitGroup 152 | wg.Add(goroutineNum) 153 | success := make([]int, goroutineNum) 154 | for i, load := range loads { 155 | go func(i, load int) { 156 | success[i] = runWithOneMutex(load, keyCount, averageTime, 157 | sharedSlice, &m) 158 | wg.Done() 159 | }(i, load) 160 | } 161 | 162 | wg.Wait() 163 | sum := 0 164 | for _, s := range success { 165 | sum += s 166 | } 167 | fmt.Println("one mutex: ", actionCount, keyCount, goroutineNum, averageTime, "sum is: ", sum) 168 | } 169 | 170 | func lockByMapWithMutex(actionCount, keyCount, goroutineNum, averageTime int) { 171 | sharedSlice := make([]int, keyCount) 172 | locks := make(map[int]bool) 173 | var m sync.Mutex 174 | 175 | loads := splitLoad(actionCount, goroutineNum) 176 | var wg sync.WaitGroup 177 | wg.Add(goroutineNum) 178 | success := make([]int, goroutineNum) 179 | for i, load := range loads { 180 | go func(i, load int) { 181 | success[i] = runWithMapWithMutex(load, keyCount, averageTime, 182 | sharedSlice, &m, locks) 183 | wg.Done() 184 | }(i, load) 185 | } 186 | 187 | wg.Wait() 188 | sum := 0 189 | for _, s := range success { 190 | sum += s 191 | } 192 | fmt.Println("map with mutex: ", actionCount, keyCount, goroutineNum, averageTime, "sum is: ", sum) 193 | } 194 | 195 | func lockByMapMutex(actionCount, keyCount, goroutineNum, averageTime int) { 196 | sharedSlice := make([]int, keyCount) 197 | m := NewMapMutex() 198 | 199 | loads := splitLoad(actionCount, goroutineNum) 200 | var wg sync.WaitGroup 201 | wg.Add(goroutineNum) 202 | success := make([]int, goroutineNum) 203 | for i, load := range loads { 204 | go func(i, load int) { 205 | success[i] = runWithMapMutex(load, keyCount, averageTime, 206 | sharedSlice, m) 207 | wg.Done() 208 | }(i, load) 209 | } 210 | 211 | wg.Wait() 212 | sum := 0 213 | for _, s := range success { 214 | sum += s 215 | } 216 | fmt.Println("map mutex: ", actionCount, keyCount, goroutineNum, averageTime, "sum is: ", sum) 217 | } 218 | 219 | func runWithOneMutex(iterateNum, keyCount, averageTime int, sharedSlice []int, 220 | m *sync.Mutex) int { 221 | success := 0 222 | for ; iterateNum > 0; iterateNum-- { 223 | m.Lock() 224 | 225 | idx := rand.Intn(keyCount) 226 | doTheJob(averageTime, idx, sharedSlice) 227 | success++ 228 | 229 | m.Unlock() 230 | } 231 | 232 | return success 233 | } 234 | 235 | func runWithMapWithMutex(iterateNum, keyCount, averageTime int, 236 | sharedSlice []int, m *sync.Mutex, locks map[int]bool) int { 237 | success := 0 238 | for ; iterateNum > 0; iterateNum-- { 239 | idx := rand.Intn(keyCount) 240 | goon := false 241 | for i := 0; i < MaxRetry; i++ { 242 | m.Lock() 243 | if locks[idx] { // if locked 244 | m.Unlock() 245 | time.Sleep(time.Duration(rand.Intn(100)*(i/100+1)) * time.Nanosecond) 246 | } else { // if unlock, lockit 247 | locks[idx] = true 248 | m.Unlock() 249 | goon = true 250 | break 251 | } 252 | } 253 | 254 | if !goon { 255 | continue // failed to get lock, go on for next iteration 256 | } 257 | doTheJob(averageTime, idx, sharedSlice) 258 | success++ 259 | 260 | m.Lock() 261 | delete(locks, idx) 262 | m.Unlock() 263 | } 264 | return success 265 | } 266 | 267 | func runWithMapMutex(iterateNum, keyCount, averageTime int, 268 | sharedSlice []int, m *Mutex) int { 269 | success := 0 270 | for ; iterateNum > 0; iterateNum-- { 271 | idx := rand.Intn(keyCount) 272 | // fail to get lock 273 | if !m.TryLock(idx) { 274 | continue 275 | } 276 | 277 | doTheJob(averageTime, idx, sharedSlice) 278 | success++ 279 | 280 | m.Unlock(idx) 281 | } 282 | return success 283 | } 284 | 285 | func doTheJob(averageTime, idx int, sharedSlice []int) { 286 | // do real job, just sleep some time and set a value 287 | miliSec := rand.Intn(averageTime * 2) 288 | time.Sleep(time.Duration(miliSec) * time.Millisecond) 289 | sharedSlice[idx] = miliSec 290 | } 291 | --------------------------------------------------------------------------------