├── cmap_test.go ├── cmap_bench_test.go ├── README.md └── cmap.go /cmap_test.go: -------------------------------------------------------------------------------- 1 | package cmap 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | type Animal struct { 11 | name string 12 | } 13 | 14 | func TestLength(t *testing.T) { 15 | myMap := &Map{} 16 | for i := 0; i < 1000000; i++ { 17 | go myMap.LoadOrStore(rand.Intn(1000000), rand.Intn(1000000)) 18 | go myMap.Store(i, i) 19 | go myMap.Load(i) 20 | go myMap.Delete(rand.Intn(1000000)) 21 | } 22 | time.Sleep(time.Second * 30) 23 | fmt.Println("O(1) cmp length:", myMap.Length()) 24 | length := 0 25 | myMap.Range(func(_, _ interface{}) bool { 26 | length++ 27 | return true 28 | }) 29 | fmt.Println("O(n) real length:", length) 30 | } 31 | -------------------------------------------------------------------------------- /cmap_bench_test.go: -------------------------------------------------------------------------------- 1 | package cmap 2 | 3 | import ( 4 | "sync" 5 | "testing" 6 | ) 7 | 8 | func BenchmarkCMapStore(b *testing.B) { 9 | b.StopTimer() 10 | myMap := &Map{} 11 | b.N = 1000000 12 | b.StartTimer() 13 | for i := 0; i < b.N; i++ { 14 | for ii := 0; ii < 100; ii++ { 15 | myMap.Store(i, i) 16 | myMap.Delete(i) 17 | } 18 | } 19 | } 20 | func BenchmarkSyncMapStore(b *testing.B) { 21 | b.StopTimer() 22 | myMap := &sync.Map{} 23 | b.N = 1000000 24 | b.StartTimer() 25 | for i := 0; i < b.N; i++ { 26 | for ii := 0; ii < 100; ii++ { 27 | myMap.Store(i, i) 28 | myMap.Delete(i) 29 | } 30 | } 31 | } 32 | 33 | func BenchmarkSyncMapRange(b *testing.B) { 34 | b.StopTimer() 35 | myMap := &sync.Map{} 36 | for i := 0; i < 1; i++ { 37 | myMap.Store(i, i) 38 | } 39 | b.StartTimer() 40 | for ii := 0; ii < b.N; ii++ { 41 | myMap.Range(func(_, _ interface{}) bool { 42 | return true 43 | }) 44 | } 45 | } 46 | 47 | // 1500000 48 | //len 100000 map range 49 | //BenchmarkSyncMapRange-4 200 8010973 ns/op 0 B/op 0 allocs/op 50 | 51 | //120000 52 | //len 10000 map range 53 | //BenchmarkSyncMapRange-4 5000 325012 ns/op 0 B/op 0 allocs/op 54 | 55 | //15000 56 | //len 1000 map range 57 | //BenchmarkSyncMapRange-4 100000 22647 ns/op 0 B/op 0 allocs/op 58 | 59 | //1500 60 | //len 100 map range 61 | //BenchmarkSyncMapRange-4 1000000 1527 ns/op 0 B/op 0 allocs/op 62 | 63 | //150 64 | //len 10 map range 65 | //BenchmarkSyncMapRange-4 10000000 168 ns/op 0 B/op 0 allocs/op 66 | 67 | //150 68 | //len 1 map range 69 | //BenchmarkSyncMapRange-4 30000000 44.4 ns/op 0 B/op 0 allocs/op 70 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cmap 2 | 3 | ## support feather 4 | 5 | 1. support O(1) length method, like the issue decsribed here https://github.com/golang/go/issues/20680 6 | 7 | 新增特性,O(1)复杂度的长度方法 8 | 9 | 2. reduce contention between Map operations with muti hash lock (doing), like the issue decsribed here https://github.com/golang/go/issues/21035 10 | 11 | 目前sync map 对不同的key操作,对应的都是同一个锁,可以通过多锁的方式,减少竞争。(实现中) 12 | 13 | In Go 1.9, `sync.Map` was introduced, but `sync.Map` dont support length method , 14 | 15 | when you need get the length of your map 16 | 17 | 获取map长度示例对比 18 | 19 | ## sync.map usage 20 | ```go 21 | 22 | import ( 23 | "sync" 24 | ) 25 | 26 | length := 0 27 | 28 | myMap.Range(func(_, _ interface{}) bool { 29 | length++ 30 | return true 31 | }) 32 | ``` 33 | it will lock your map, and take O(n) times 34 | 35 | 此操作会触发锁,复杂度O(n) 36 | 37 | ## cmap usage 38 | 39 | ```go 40 | import ( 41 | "github.com/mojinfu/cmap" 42 | ) 43 | 44 | length := myCMap.Length() 45 | ``` 46 | cmap will **not** lock your cmap, and take O(1) times 47 | 48 | 此操作**不**会触发锁,复杂度O(1) 49 | 50 | ## benchmark 51 | 52 | 100 times Store(i, i) and Delete(i) in env goos: darwin ; goarch: amd64 53 | 54 | | package | ns/op| B/op|allocs/op| 55 | | :------:| :------: | :------: | :------: | 56 | | sync.Map| 21230 ns/op| 5600 B/op| 499 allocs/op| 57 | | cmap.Map |24243 ns/op| 5600 B/op |499 allocs/op| 58 | ---------- 59 | 60 | - it means each Store or Delete action will take another 15ns 61 | - cmap.Map中使用的原子计数器虽然线程安全,是通过底层硬件的支持作为保障的,这使得每次新增Key 删除Key,相对于sync.Map 都将有15ns的额外耗时 62 | 63 | | map length | get length in sync.Map time-consuming |store in cmap.Map Extra time consuming | 64 | | :------:| :------: | :------: | 65 | | map长度 | 使用sync.Map通过range获取长度耗时 |使用CMap存删造成的理论额外耗时 | 66 | | 1 | 44.4 ns |15 ns | 67 | | 10 | 168 ns |150 ns | 68 | | 100 | 1527 ns |1500 ns | 69 | | 1000 | 22647 ns |15000 ns | 70 | | 10000 | 0.32 ms |0.15 ms | 71 | | 100000 | 8.01 ms |1.5 ms | 72 | ---------- 73 | 74 | - it means when programme needs the length of the map(even for one time), use cmap package take the place of sync.map is better. 75 | 76 | - 如果程序中需要获取Map长度,务必使用cmap来减少性能损耗。对长度的获取越频繁,使用cmap的必要性就越大。 77 | -------------------------------------------------------------------------------- /cmap.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package cmap 6 | 7 | import ( 8 | "sync" 9 | "sync/atomic" 10 | "unsafe" 11 | ) 12 | 13 | // Map is like a Go map[interface{}]interface{} but is safe for concurrent use 14 | // by multiple goroutines without additional locking or coordination. 15 | // Loads, stores, and deletes run in amortized constant time. 16 | // 17 | // The Map type is specialized. Most code should use a plain Go map instead, 18 | // with separate locking or coordination, for better type safety and to make it 19 | // easier to maintain other invariants along with the map content. 20 | // 21 | // The Map type is optimized for two common use cases: (1) when the entry for a given 22 | // key is only ever written once but read many times, as in caches that only grow, 23 | // or (2) when multiple goroutines read, write, and overwrite entries for disjoint 24 | // sets of keys. In these two cases, use of a Map may significantly reduce lock 25 | // contention compared to a Go map paired with a separate Mutex or RWMutex. 26 | // 27 | // The zero Map is empty and ready for use. A Map must not be copied after first use. 28 | type Map struct { 29 | mu sync.Mutex 30 | 31 | // read contains the portion of the map's contents that are safe for 32 | // concurrent access (with or without mu held). 33 | // 34 | // The read field itself is always safe to load, but must only be stored with 35 | // mu held. 36 | // 37 | // Entries stored in read may be updated concurrently without mu, but updating 38 | // a previously-expunged entry requires that the entry be copied to the dirty 39 | // map and unexpunged with mu held. 40 | read atomic.Value // readOnly 41 | 42 | // dirty contains the portion of the map's contents that require mu to be 43 | // held. To ensure that the dirty map can be promoted to the read map quickly, 44 | // it also includes all of the non-expunged entries in the read map. 45 | // 46 | // Expunged entries are not stored in the dirty map. An expunged entry in the 47 | // clean map must be unexpunged and added to the dirty map before a new value 48 | // can be stored to it. 49 | // 50 | // If the dirty map is nil, the next write to the map will initialize it by 51 | // making a shallow copy of the clean map, omitting stale entries. 52 | dirty map[interface{}]*entry 53 | 54 | // misses counts the number of loads since the read map was last updated that 55 | // needed to lock mu to determine whether the key was present. 56 | // 57 | // Once enough misses have occurred to cover the cost of copying the dirty 58 | // map, the dirty map will be promoted to the read map (in the unamended 59 | // state) and the next store to the map will make a new dirty copy. 60 | misses int 61 | //lock free length 62 | length int64 63 | } 64 | 65 | // readOnly is an immutable struct stored atomically in the Map.read field. 66 | type readOnly struct { 67 | m map[interface{}]*entry 68 | amended bool // true if the dirty map contains some key not in m. 69 | } 70 | 71 | // expunged is an arbitrary pointer that marks entries which have been deleted 72 | // from the dirty map. 73 | var expunged = unsafe.Pointer(new(interface{})) 74 | 75 | // An entry is a slot in the map corresponding to a particular key. 76 | type entry struct { 77 | // p points to the interface{} value stored for the entry. 78 | // 79 | // If p == nil, the entry has been deleted and m.dirty == nil. 80 | // 81 | // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 82 | // is missing from m.dirty. 83 | // 84 | // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 85 | // != nil, in m.dirty[key]. 86 | // 87 | // An entry can be deleted by atomic replacement with nil: when m.dirty is 88 | // next created, it will atomically replace nil with expunged and leave 89 | // m.dirty[key] unset. 90 | // 91 | // An entry's associated value can be updated by atomic replacement, provided 92 | // p != expunged. If p == expunged, an entry's associated value can be updated 93 | // only after first setting m.dirty[key] = e so that lookups using the dirty 94 | // map find the entry. 95 | p unsafe.Pointer // *interface{} 96 | } 97 | 98 | func (m *Map) Length() int64 { 99 | return m.length 100 | } 101 | func newEntry(i interface{}) *entry { 102 | return &entry{p: unsafe.Pointer(&i)} 103 | } 104 | 105 | // Load returns the value stored in the map for a key, or nil if no 106 | // value is present. 107 | // The ok result indicates whether value was found in the map. 108 | func (m *Map) Load(key interface{}) (value interface{}, ok bool) { 109 | read, _ := m.read.Load().(readOnly) 110 | e, ok := read.m[key] 111 | if !ok && read.amended { 112 | m.mu.Lock() 113 | // Avoid reporting a spurious miss if m.dirty got promoted while we were 114 | // blocked on m.mu. (If further loads of the same key will not miss, it's 115 | // not worth copying the dirty map for this key.) 116 | read, _ = m.read.Load().(readOnly) 117 | e, ok = read.m[key] 118 | if !ok && read.amended { 119 | e, ok = m.dirty[key] 120 | // Regardless of whether the entry was present, record a miss: this key 121 | // will take the slow path until the dirty map is promoted to the read 122 | // map. 123 | m.missLocked() 124 | } 125 | m.mu.Unlock() 126 | } 127 | if !ok { 128 | return nil, false 129 | } 130 | return e.load() 131 | } 132 | 133 | func (e *entry) load() (value interface{}, ok bool) { 134 | p := atomic.LoadPointer(&e.p) 135 | if p == nil || p == expunged { 136 | return nil, false 137 | } 138 | return *(*interface{})(p), true 139 | } 140 | func (m *Map) lengthReduce() { 141 | atomic.AddInt64(&m.length, -1) 142 | } 143 | func (m *Map) lengthIncrease() { 144 | atomic.AddInt64(&m.length, 1) 145 | } 146 | 147 | // Store sets the value for a key. 148 | func (m *Map) Store(key, value interface{}) { 149 | read, _ := m.read.Load().(readOnly) 150 | //read 中 是nil 或者 normal 直接赋值 151 | // 是nil时 说明dirty map中这个key 还存在, 如果已经是expunged,说明key不在dirty map中了,不能直接赋值。 152 | if e, ok := read.m[key]; ok { 153 | created, stored := e.tryStore(&value) 154 | if stored { 155 | if created { 156 | m.lengthIncrease() 157 | } 158 | return 159 | } 160 | } 161 | 162 | m.mu.Lock() 163 | read, _ = m.read.Load().(readOnly) 164 | if e, ok := read.m[key]; ok { 165 | //如果在read中存在 且为expunged(说明key不在dirty map中了)那么 指向nil,然后往dirty map中插入nil的key 166 | //然后赋值 167 | ////如果在read中存在 且为 不为 expunged(说明key在dirty map中)那么 直接赋值 168 | if e.unexpungeLocked() { 169 | // The entry was previously expunged, which implies that there is a 170 | // non-nil dirty map and this entry is not in it. 171 | m.dirty[key] = e 172 | 173 | } 174 | if e.createOrMustStoreLocked(&value) { 175 | m.lengthIncrease() 176 | } 177 | } else if e, ok := m.dirty[key]; ok { 178 | if e.createOrMustStoreLocked(&value) { 179 | m.lengthIncrease() 180 | } 181 | } else { 182 | if !read.amended { 183 | // We're adding the first new key to the dirty map. 184 | // Make sure it is allocated and mark the read-only map as incomplete. 185 | //如果dirty map 为nil,那么把read中所有normal key 拷贝到 dirty 中 186 | //拷贝过程中要把 read 中的nil 变成expunged,因为这些key在dirty中找不到了 187 | m.dirtyLocked() 188 | m.read.Store(readOnly{m: read.m, amended: true}) 189 | } 190 | m.dirty[key] = newEntry(value) 191 | m.lengthIncrease() 192 | } 193 | m.mu.Unlock() 194 | } 195 | 196 | // tryStore stores a value if the entry has not been expunged. 197 | // 198 | // If the entry is expunged, tryStore returns false and leaves the entry 199 | // unchanged. 200 | func (e *entry) tryStore(i *interface{}) (created, stored bool) { 201 | p := atomic.LoadPointer(&e.p) 202 | if p == expunged { 203 | return false, false 204 | } 205 | 206 | //如何解决如下情况 207 | 208 | //如果传入时 p是nil &e.p也是nil,执行到此次时其他线程影响下,p是nil&e.p不是nil 209 | 210 | for { 211 | if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(i)) { 212 | //map length add 213 | return true, true 214 | } 215 | p = atomic.LoadPointer(&e.p) 216 | if p == expunged { 217 | return false, false 218 | } 219 | if p == nil { 220 | continue 221 | } 222 | //执行到此处时 其他线程影响下,p是nil &e.p又恢复nil 223 | if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 224 | return false, true 225 | } 226 | p = atomic.LoadPointer(&e.p) 227 | if p == expunged { 228 | return false, false 229 | } 230 | } 231 | } 232 | 233 | // unexpungeLocked ensures that the entry is not marked as expunged. 234 | // 235 | // If the entry was previously expunged, it must be added to the dirty map 236 | // before m.mu is unlocked. 237 | func (e *entry) unexpungeLocked() (wasExpunged bool) { 238 | return atomic.CompareAndSwapPointer(&e.p, expunged, nil) 239 | } 240 | 241 | // storeLocked unconditionally stores a value to the entry. 242 | // 243 | // The entry must be known not to be expunged. 244 | func (e *entry) storeLocked(i *interface{}) { 245 | atomic.StorePointer(&e.p, unsafe.Pointer(i)) 246 | } 247 | func (e *entry) createOrMustStoreLocked(i *interface{}) (created bool) { 248 | //此处不可能出现expunge 无需考虑 249 | p := atomic.LoadPointer(&e.p) 250 | if p == expunged { 251 | panic("found expunge here, logic err") 252 | } 253 | for { 254 | if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(i)) { 255 | return true 256 | } 257 | p = atomic.LoadPointer(&e.p) 258 | if p == expunged { 259 | panic("found expunge here, logic err") 260 | } 261 | if p == nil { 262 | continue 263 | } 264 | if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 265 | return false 266 | } 267 | p = atomic.LoadPointer(&e.p) 268 | if p == expunged { 269 | panic("found expunge here, logic err") 270 | } 271 | } 272 | } 273 | 274 | // LoadOrStore returns the existing value for the key if present. 275 | // Otherwise, it stores and returns the given value. 276 | // The loaded result is true if the value was loaded, false if stored. 277 | func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { 278 | // Avoid locking if it's a clean hit. 279 | created := false 280 | read, _ := m.read.Load().(readOnly) 281 | if e, ok := read.m[key]; ok { 282 | actual, loaded, created, ok := e.tryLoadOrStore(value) 283 | if ok { 284 | if created { 285 | m.lengthIncrease() 286 | } 287 | return actual, loaded 288 | } 289 | } 290 | m.mu.Lock() 291 | read, _ = m.read.Load().(readOnly) 292 | if e, ok := read.m[key]; ok { 293 | if e.unexpungeLocked() { 294 | //如果是 expunge 让e.p指向nil 赋值退出 295 | m.dirty[key] = e 296 | //此处必定 created 297 | actual, loaded, created, _ = e.tryLoadOrStore(value) 298 | if created { 299 | m.lengthIncrease() 300 | } else { 301 | panic("found not created , logic err") 302 | } 303 | m.mu.Unlock() 304 | return actual, loaded 305 | } 306 | actual, loaded, created, _ = e.tryLoadOrStore(value) 307 | if created { 308 | m.lengthIncrease() 309 | } 310 | } else if e, ok := m.dirty[key]; ok { 311 | actual, loaded, created, _ = e.tryLoadOrStore(value) 312 | if created { 313 | m.lengthIncrease() 314 | } 315 | m.missLocked() 316 | } else { 317 | if !read.amended { 318 | // We're adding the first new key to the dirty map. 319 | // Make sure it is allocated and mark the read-only map as incomplete. 320 | m.dirtyLocked() 321 | m.read.Store(readOnly{m: read.m, amended: true}) 322 | } 323 | m.dirty[key] = newEntry(value) 324 | m.lengthIncrease() 325 | actual, loaded = value, false 326 | } 327 | m.mu.Unlock() 328 | 329 | return actual, loaded 330 | } 331 | 332 | // tryLoadOrStore atomically loads or stores a value if the entry is not 333 | // expunged. 334 | // 335 | // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 336 | // returns with ok==false. 337 | func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, created, ok bool) { 338 | p := atomic.LoadPointer(&e.p) 339 | if p == expunged { 340 | return nil, false, false, false 341 | } 342 | if p != nil { 343 | return *(*interface{})(p), true, false, true 344 | } 345 | 346 | // Copy the interface after the first load to make this method more amenable 347 | // to escape analysis: if we hit the "load" path or the entry is expunged, we 348 | // shouldn't bother heap-allocating. 349 | ic := i 350 | for { 351 | if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 352 | //add 353 | return i, false, true, true 354 | } 355 | p = atomic.LoadPointer(&e.p) 356 | if p == expunged { 357 | return nil, false, false, false 358 | } 359 | if p != nil { 360 | return *(*interface{})(p), true, false, true 361 | } 362 | } 363 | } 364 | 365 | // Delete deletes the value for a key. 366 | func (m *Map) Delete(key interface{}) { 367 | read, _ := m.read.Load().(readOnly) 368 | e, ok := read.m[key] 369 | //如果read map中存在,normal key则化为 nil 370 | //如果不存在,在dirty map中 用delete 方法 删除之 371 | if !ok && read.amended { 372 | m.mu.Lock() 373 | read, _ = m.read.Load().(readOnly) 374 | e, ok = read.m[key] 375 | if !ok && read.amended { 376 | de, dok := m.dirty[key] 377 | if dok { 378 | //需要delete 379 | dep := atomic.LoadPointer(&de.p) 380 | if dep == nil || dep == expunged { 381 | // 382 | //无长度变化 383 | // 384 | } else { 385 | //长度变化 386 | // 此时 atomic.LoadPointer(&de.p) 的值不可能发生变化,因为key在read中不存在,dirty也被锁了 387 | m.lengthReduce() 388 | } 389 | delete(m.dirty, key) 390 | } 391 | } 392 | m.mu.Unlock() 393 | } 394 | if ok { 395 | if e.delete() { 396 | m.lengthReduce() 397 | } 398 | } 399 | } 400 | 401 | func (e *entry) delete() (hadValue bool) { 402 | for { 403 | p := atomic.LoadPointer(&e.p) 404 | if p == nil || p == expunged { 405 | return false 406 | } 407 | if atomic.CompareAndSwapPointer(&e.p, p, nil) { 408 | return true 409 | } 410 | } 411 | } 412 | 413 | // Range calls f sequentially for each key and value present in the map. 414 | // If f returns false, range stops the iteration. 415 | // 416 | // Range does not necessarily correspond to any consistent snapshot of the Map's 417 | // contents: no key will be visited more than once, but if the value for any key 418 | // is stored or deleted concurrently, Range may reflect any mapping for that key 419 | // from any point during the Range call. 420 | // 421 | // Range may be O(N) with the number of elements in the map even if f returns 422 | // false after a constant number of calls. 423 | func (m *Map) Range(f func(key, value interface{}) bool) { 424 | // We need to be able to iterate over all of the keys that were already 425 | // present at the start of the call to Range. 426 | // If read.amended is false, then read.m satisfies that property without 427 | // requiring us to hold m.mu for a long time. 428 | read, _ := m.read.Load().(readOnly) 429 | if read.amended { 430 | // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 431 | // (assuming the caller does not break out early), so a call to Range 432 | // amortizes an entire copy of the map: we can promote the dirty copy 433 | // immediately! 434 | m.mu.Lock() 435 | read, _ = m.read.Load().(readOnly) 436 | if read.amended { 437 | read = readOnly{m: m.dirty} 438 | m.read.Store(read) 439 | m.dirty = nil 440 | m.misses = 0 441 | } 442 | m.mu.Unlock() 443 | } 444 | 445 | for k, e := range read.m { 446 | v, ok := e.load() 447 | if !ok { 448 | continue 449 | } 450 | if !f(k, v) { 451 | break 452 | } 453 | } 454 | } 455 | 456 | func (m *Map) missLocked() { 457 | m.misses++ 458 | if m.misses < len(m.dirty) { 459 | return 460 | } 461 | m.read.Store(readOnly{m: m.dirty}) 462 | m.dirty = nil 463 | m.misses = 0 464 | } 465 | 466 | func (m *Map) dirtyLocked() { 467 | if m.dirty != nil { 468 | return 469 | } 470 | 471 | read, _ := m.read.Load().(readOnly) 472 | m.dirty = make(map[interface{}]*entry, len(read.m)) 473 | //把read map中的nil 全变成 expunged, normal key 存入dirty map 474 | for k, e := range read.m { 475 | if !e.tryExpungeLocked() { 476 | m.dirty[k] = e 477 | } 478 | } 479 | } 480 | 481 | func (e *entry) tryExpungeLocked() (isExpunged bool) { 482 | p := atomic.LoadPointer(&e.p) 483 | for p == nil { 484 | if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { 485 | return true 486 | } 487 | p = atomic.LoadPointer(&e.p) 488 | } 489 | return p == expunged 490 | } 491 | --------------------------------------------------------------------------------