├── stubs.s ├── .gitignore ├── go.mod ├── .travis.yml ├── LICENSE ├── cmap_reference_test.go ├── map_test.go ├── README.md ├── cmap_bench_test.go ├── cmap_test.go ├── cmap.go └── map.go /stubs.s: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/lrita/cmap 2 | 3 | go 1.18 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.10.x 5 | - 1.11.x 6 | - 1.12.x 7 | - 1.13.x 8 | - 1.14.x 9 | - 1.15.x 10 | - 1.16.x 11 | - 1.17.x 12 | - 1.18.x 13 | - 1.19.x 14 | - 1.20.x 15 | 16 | # let us have speedy Docker-based Travis workers 17 | sudo: true 18 | 19 | script: 20 | - go test -v -race -coverprofile=coverage.txt -covermode=atomic 21 | 22 | after_success: 23 | - bash <(curl -s https://codecov.io/bash) 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2019 Neal 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /cmap_reference_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package cmap_test 6 | 7 | import ( 8 | "sync" 9 | "sync/atomic" 10 | ) 11 | 12 | // This file contains reference map implementations for unit-tests. 13 | 14 | // mapInterface is the interface Map implements. 15 | type mapInterface interface { 16 | Load(interface{}) (interface{}, bool) 17 | Store(key, value interface{}) 18 | LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) 19 | Delete(interface{}) 20 | Range(func(key, value interface{}) (shouldContinue bool)) 21 | } 22 | 23 | // RWMutexMap is an implementation of mapInterface using a sync.RWMutex. 24 | type RWMutexMap struct { 25 | mu sync.RWMutex 26 | dirty map[interface{}]interface{} 27 | } 28 | 29 | func (m *RWMutexMap) Load(key interface{}) (value interface{}, ok bool) { 30 | m.mu.RLock() 31 | value, ok = m.dirty[key] 32 | m.mu.RUnlock() 33 | return 34 | } 35 | 36 | func (m *RWMutexMap) Store(key, value interface{}) { 37 | m.mu.Lock() 38 | if m.dirty == nil { 39 | m.dirty = make(map[interface{}]interface{}) 40 | } 41 | m.dirty[key] = value 42 | m.mu.Unlock() 43 | } 44 | 45 | func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { 46 | m.mu.Lock() 47 | actual, loaded = m.dirty[key] 48 | if !loaded { 49 | actual = value 50 | if m.dirty == nil { 51 | m.dirty = make(map[interface{}]interface{}) 52 | } 53 | m.dirty[key] = value 54 | } 55 | m.mu.Unlock() 56 | return actual, loaded 57 | } 58 | 59 | func (m *RWMutexMap) Delete(key interface{}) { 60 | m.mu.Lock() 61 | delete(m.dirty, key) 62 | m.mu.Unlock() 63 | } 64 | 65 | func (m *RWMutexMap) Range(f func(key, value interface{}) (shouldContinue bool)) { 66 | m.mu.RLock() 67 | keys := make([]interface{}, 0, len(m.dirty)) 68 | for k := range m.dirty { 69 | keys = append(keys, k) 70 | } 71 | m.mu.RUnlock() 72 | 73 | for _, k := range keys { 74 | v, ok := m.Load(k) 75 | if !ok { 76 | continue 77 | } 78 | if !f(k, v) { 79 | break 80 | } 81 | } 82 | } 83 | 84 | // DeepCopyMap is an implementation of mapInterface using a Mutex and 85 | // atomic.Value. It makes deep copies of the map on every write to avoid 86 | // acquiring the Mutex in Load. 87 | type DeepCopyMap struct { 88 | mu sync.Mutex 89 | clean atomic.Value 90 | } 91 | 92 | func (m *DeepCopyMap) Load(key interface{}) (value interface{}, ok bool) { 93 | clean, _ := m.clean.Load().(map[interface{}]interface{}) 94 | value, ok = clean[key] 95 | return value, ok 96 | } 97 | 98 | func (m *DeepCopyMap) Store(key, value interface{}) { 99 | m.mu.Lock() 100 | dirty := m.dirty() 101 | dirty[key] = value 102 | m.clean.Store(dirty) 103 | m.mu.Unlock() 104 | } 105 | 106 | func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { 107 | clean, _ := m.clean.Load().(map[interface{}]interface{}) 108 | actual, loaded = clean[key] 109 | if loaded { 110 | return actual, loaded 111 | } 112 | 113 | m.mu.Lock() 114 | // Reload clean in case it changed while we were waiting on m.mu. 115 | clean, _ = m.clean.Load().(map[interface{}]interface{}) 116 | actual, loaded = clean[key] 117 | if !loaded { 118 | dirty := m.dirty() 119 | dirty[key] = value 120 | actual = value 121 | m.clean.Store(dirty) 122 | } 123 | m.mu.Unlock() 124 | return actual, loaded 125 | } 126 | 127 | func (m *DeepCopyMap) Delete(key interface{}) { 128 | m.mu.Lock() 129 | dirty := m.dirty() 130 | delete(dirty, key) 131 | m.clean.Store(dirty) 132 | m.mu.Unlock() 133 | } 134 | 135 | func (m *DeepCopyMap) Range(f func(key, value interface{}) (shouldContinue bool)) { 136 | clean, _ := m.clean.Load().(map[interface{}]interface{}) 137 | for k, v := range clean { 138 | if !f(k, v) { 139 | break 140 | } 141 | } 142 | } 143 | 144 | func (m *DeepCopyMap) dirty() map[interface{}]interface{} { 145 | clean, _ := m.clean.Load().(map[interface{}]interface{}) 146 | dirty := make(map[interface{}]interface{}, len(clean)+1) 147 | for k, v := range clean { 148 | dirty[k] = v 149 | } 150 | return dirty 151 | } 152 | -------------------------------------------------------------------------------- /map_test.go: -------------------------------------------------------------------------------- 1 | //go:build go1.18 2 | // +build go1.18 3 | 4 | package cmap_test 5 | 6 | import ( 7 | "math/rand" 8 | "runtime" 9 | "sync" 10 | "testing" 11 | "testing/quick" 12 | 13 | "github.com/lrita/cmap" 14 | ) 15 | 16 | type StringMap struct { 17 | m cmap.Map[string, interface{}] 18 | } 19 | 20 | func (m *StringMap) Load(k interface{}) (interface{}, bool) { 21 | return m.m.Load(k.(string)) 22 | } 23 | 24 | func (m *StringMap) Store(key, value interface{}) { 25 | m.m.Store(key.(string), value) 26 | } 27 | 28 | func (m *StringMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { 29 | return m.m.LoadOrStore(key.(string), value) 30 | } 31 | 32 | func (m *StringMap) Delete(key interface{}) { 33 | m.m.Delete(key.(string)) 34 | } 35 | 36 | func (m *StringMap) Range(fn func(key, value interface{}) bool) { 37 | m.m.Range(func(k string, v interface{}) bool { 38 | return fn(k, v) 39 | }) 40 | } 41 | 42 | func applyGMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { 43 | return applyCalls(new(StringMap), calls) 44 | } 45 | 46 | func TestGMapMatchesRWMutex(t *testing.T) { 47 | if err := quick.CheckEqual(applyGMap, applyRWMutexMap, nil); err != nil { 48 | t.Error(err) 49 | } 50 | } 51 | 52 | func TestGMapMatchesDeepCopy(t *testing.T) { 53 | if err := quick.CheckEqual(applyGMap, applyDeepCopyMap, nil); err != nil { 54 | t.Error(err) 55 | } 56 | } 57 | 58 | func TestGMapConcurrentRange(t *testing.T) { 59 | const mapSize = 1 << 10 60 | 61 | m := new(cmap.Map[int64, any]) 62 | for n := int64(1); n <= mapSize; n++ { 63 | m.Store(n, int64(n)) 64 | } 65 | 66 | done := make(chan struct{}) 67 | var wg sync.WaitGroup 68 | defer func() { 69 | close(done) 70 | wg.Wait() 71 | }() 72 | for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- { 73 | r := rand.New(rand.NewSource(g)) 74 | wg.Add(1) 75 | go func(g int64) { 76 | defer wg.Done() 77 | for i := int64(0); ; i++ { 78 | select { 79 | case <-done: 80 | return 81 | default: 82 | } 83 | for n := int64(1); n < mapSize; n++ { 84 | if r.Int63n(mapSize) == 0 { 85 | m.Store(n, n*i*g) 86 | } else { 87 | m.Load(n) 88 | } 89 | } 90 | } 91 | }(g) 92 | } 93 | 94 | iters := 1 << 10 95 | if testing.Short() { 96 | iters = 16 97 | } 98 | for n := iters; n > 0; n-- { 99 | seen := make(map[int64]bool, mapSize) 100 | 101 | m.Range(func(ki int64, vi interface{}) bool { 102 | k, v := ki, vi.(int64) 103 | if v%k != 0 { 104 | t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v) 105 | } 106 | if seen[k] { 107 | t.Fatalf("Range visited key %v twice", k) 108 | } 109 | seen[k] = true 110 | return true 111 | }) 112 | 113 | if len(seen) != mapSize { 114 | t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize) 115 | } 116 | } 117 | } 118 | 119 | func TestGMapCreation(t *testing.T) { 120 | m := cmap.Map[int, int]{} 121 | 122 | if m.Count() != 0 { 123 | t.Error("new map should be empty.") 124 | } 125 | if !m.IsEmpty() { 126 | t.Error("new map should be empty.") 127 | } 128 | } 129 | 130 | func TestGMapStoreOperationDuplicatedKey(t *testing.T) { 131 | m := cmap.Map[string, interface{}]{} 132 | m.Store("t", "") 133 | m.Store("t", "") 134 | if v := m.Count(); v != 1 { 135 | t.Errorf("map Count() should be %d, got %d", 1, v) 136 | } 137 | m.LoadOrStore("m", "") 138 | if v := m.Count(); v != 2 { 139 | t.Errorf("map Count() should be %d, got %d", 2, v) 140 | } 141 | m.Delete("t") 142 | if v := m.Count(); v != 1 { 143 | t.Errorf("map Count() should be %d, got %d", 1, v) 144 | } 145 | m.Delete("t") 146 | if v := m.Count(); v != 1 { 147 | t.Errorf("map Count() should be %d, got %d", 1, v) 148 | } 149 | } 150 | 151 | func TestGMapStoreAndLoad(t *testing.T) { 152 | const mapSize = 1 << 14 153 | 154 | var ( 155 | m cmap.Map[int64, interface{}] 156 | wg sync.WaitGroup 157 | seen = make(map[int64]bool, mapSize) 158 | ) 159 | 160 | for n := int64(1); n <= mapSize; n++ { 161 | nn := n 162 | wg.Add(1) 163 | go func() { 164 | defer wg.Done() 165 | m.Store(nn, nn) 166 | }() 167 | } 168 | 169 | wg.Wait() 170 | 171 | m.Range(func(ki int64, vi interface{}) bool { 172 | k, v := ki, vi.(int64) 173 | if v%k != 0 { 174 | t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v) 175 | } 176 | if seen[k] { 177 | t.Fatalf("Range visited key %v twice", k) 178 | } 179 | seen[k] = true 180 | return true 181 | }) 182 | 183 | if len(seen) != mapSize { 184 | t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize) 185 | } 186 | 187 | for n := int64(1); n <= mapSize; n++ { 188 | nn := n 189 | wg.Add(1) 190 | go func() { 191 | defer wg.Done() 192 | m.Delete(nn) 193 | }() 194 | } 195 | 196 | wg.Wait() 197 | 198 | if !m.IsEmpty() { 199 | t.Fatalf("Map should be empty, remained %v", m.Count()) 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cmap [![Build Status](https://travis-ci.org/lrita/cmap.svg?branch=master)](https://travis-ci.org/lrita/cmap) [![GoDoc](https://godoc.org/github.com/lrita/cmap?status.png)](https://godoc.org/github.com/lrita/cmap) [![codecov](https://codecov.io/gh/lrita/cmap/branch/master/graph/badge.svg)](https://codecov.io/gh/lrita/cmap) [![Go Report Card](https://goreportcard.com/badge/github.com/lrita/cmap)](https://goreportcard.com/report/github.com/lrita/cmap) 2 | 3 | The `map` type in Go doesn't support concurrent reads and writes. `cmap(concurrent-map)` provides a high-performance solution to this by sharding the map with minimal time spent waiting for locks. 4 | 5 | The `sync.Map` has a few key differences from this map. The stdlib `sync.Map` is designed for append-only scenarios. So if you want to use the map for something more like in-memory db, you might benefit from using our version. You can read more about it in the golang repo, for example [here](https://github.com/golang/go/issues/21035) and [here](https://stackoverflow.com/questions/11063473/map-with-concurrent-access) 6 | 7 | _Here we fork some README document from [concurrent-map](https://github.com/orcaman/concurrent-map)_ 8 | 9 | ## usage 10 | 11 | Import the package: 12 | 13 | ```go 14 | import ( 15 | "github.com/lrita/cmap" 16 | ) 17 | 18 | ``` 19 | 20 | ```bash 21 | go get "github.com/lrita/cmap" 22 | ``` 23 | 24 | The package is now imported under the "cmap" namespace. 25 | 26 | ## example 27 | 28 | ```go 29 | 30 | // Create a new map. 31 | var m cmap.Cmap 32 | 33 | // Stores item within map, sets "bar" under key "foo" 34 | m.Store("foo", "bar") 35 | 36 | // Retrieve item from map. 37 | if tmp, ok := m.Load("foo"); ok { 38 | bar := tmp.(string) 39 | } 40 | 41 | // Deletes item under key "foo" 42 | m.Delete("foo") 43 | 44 | // If you are using g1.18+, you can use the generics implementation 45 | 46 | var n cmap.Map[string, string] 47 | 48 | // Stores item within map, sets "bar" under key "foo" 49 | n.Store("foo", "bar") 50 | 51 | // Retrieve item from map. 52 | if tmp, ok := n.Load("foo"); ok { 53 | bar := tmp 54 | } 55 | 56 | // Deletes item under key "foo" 57 | n.Delete("foo") 58 | ``` 59 | 60 | ## benchmark 61 | 62 | ```bash 63 | goos: darwin 64 | goarch: amd64 65 | pkg: github.com/lrita/cmap 66 | BenchmarkLoadMostlyHits/*cmap_test.DeepCopyMap-4 50000000 34.5 ns/op 67 | BenchmarkLoadMostlyHits/*cmap_test.RWMutexMap-4 20000000 65.2 ns/op 68 | BenchmarkLoadMostlyHits/*sync.Map-4 50000000 34.8 ns/op 69 | BenchmarkLoadMostlyHits/*cmap.Cmap-4 30000000 53.5 ns/op 70 | BenchmarkLoadMostlyMisses/*cmap_test.DeepCopyMap-4 50000000 26.7 ns/op 71 | BenchmarkLoadMostlyMisses/*cmap_test.RWMutexMap-4 20000000 62.5 ns/op 72 | BenchmarkLoadMostlyMisses/*sync.Map-4 50000000 22.7 ns/op 73 | BenchmarkLoadMostlyMisses/*cmap.Cmap-4 30000000 40.3 ns/op 74 | --- SKIP: BenchmarkLoadOrStoreBalanced/*cmap_test.DeepCopyMap 75 | cmap_bench_test.go:91: DeepCopyMap has quadratic running time. 76 | BenchmarkLoadOrStoreBalanced/*cmap_test.RWMutexMap-4 3000000 437 ns/op 77 | BenchmarkLoadOrStoreBalanced/*sync.Map-4 3000000 546 ns/op 78 | BenchmarkLoadOrStoreBalanced/*cmap.Cmap-4 3000000 497 ns/op 79 | --- SKIP: BenchmarkLoadOrStoreUnique/*cmap_test.DeepCopyMap 80 | cmap_bench_test.go:123: DeepCopyMap has quadratic running time. 81 | BenchmarkLoadOrStoreUnique/*cmap_test.RWMutexMap-4 2000000 990 ns/op 82 | BenchmarkLoadOrStoreUnique/*sync.Map-4 1000000 1032 ns/op 83 | BenchmarkLoadOrStoreUnique/*cmap.Cmap-4 2000000 892 ns/op 84 | BenchmarkLoadOrStoreCollision/*cmap_test.DeepCopyMap-4 100000000 18.2 ns/op 85 | BenchmarkLoadOrStoreCollision/*cmap_test.RWMutexMap-4 10000000 165 ns/op 86 | BenchmarkLoadOrStoreCollision/*sync.Map-4 100000000 19.6 ns/op 87 | BenchmarkLoadOrStoreCollision/*cmap.Cmap-4 20000000 65.7 ns/op 88 | BenchmarkRange/*cmap_test.DeepCopyMap-4 200000 8646 ns/op 89 | BenchmarkRange/*cmap_test.RWMutexMap-4 20000 62046 ns/op 90 | BenchmarkRange/*sync.Map-4 200000 9317 ns/op 91 | BenchmarkRange/*cmap.Cmap-4 50000 31107 ns/op 92 | BenchmarkAdversarialAlloc/*cmap_test.DeepCopyMap-4 2000000 531 ns/op 93 | BenchmarkAdversarialAlloc/*cmap_test.RWMutexMap-4 20000000 74.3 ns/op 94 | BenchmarkAdversarialAlloc/*sync.Map-4 5000000 390 ns/op 95 | BenchmarkAdversarialAlloc/*cmap.Cmap-4 30000000 53.6 ns/op 96 | BenchmarkAdversarialDelete/*cmap_test.DeepCopyMap-4 5000000 273 ns/op 97 | BenchmarkAdversarialDelete/*cmap_test.RWMutexMap-4 20000000 94.4 ns/op 98 | BenchmarkAdversarialDelete/*sync.Map-4 10000000 137 ns/op 99 | BenchmarkAdversarialDelete/*cmap.Cmap-4 30000000 43.3 ns/op 100 | ``` 101 | -------------------------------------------------------------------------------- /cmap_bench_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package cmap_test 6 | 7 | import ( 8 | "fmt" 9 | "reflect" 10 | "sync" 11 | "sync/atomic" 12 | "testing" 13 | 14 | "github.com/lrita/cmap" 15 | ) 16 | 17 | type bench struct { 18 | setup func(*testing.B, mapInterface) 19 | perG func(b *testing.B, pb *testing.PB, i int, m mapInterface) 20 | } 21 | 22 | func benchMap(b *testing.B, bench bench) { 23 | for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &sync.Map{}, &cmap.Cmap{}} { 24 | b.Run(fmt.Sprintf("%T", m), func(b *testing.B) { 25 | m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface) 26 | if bench.setup != nil { 27 | bench.setup(b, m) 28 | } 29 | 30 | b.ResetTimer() 31 | 32 | var i int64 33 | b.RunParallel(func(pb *testing.PB) { 34 | id := int(atomic.AddInt64(&i, 1) - 1) 35 | bench.perG(b, pb, id*b.N, m) 36 | }) 37 | }) 38 | } 39 | } 40 | 41 | func BenchmarkLoadMostlyHits(b *testing.B) { 42 | const hits, misses = 1023, 1 43 | 44 | benchMap(b, bench{ 45 | setup: func(_ *testing.B, m mapInterface) { 46 | for i := 0; i < hits; i++ { 47 | m.LoadOrStore(i, i) 48 | } 49 | // Prime the map to get it into a steady state. 50 | for i := 0; i < hits*2; i++ { 51 | m.Load(i % hits) 52 | } 53 | }, 54 | 55 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 56 | for ; pb.Next(); i++ { 57 | m.Load(i % (hits + misses)) 58 | } 59 | }, 60 | }) 61 | } 62 | 63 | func BenchmarkLoadMostlyMisses(b *testing.B) { 64 | const hits, misses = 1, 1023 65 | 66 | benchMap(b, bench{ 67 | setup: func(_ *testing.B, m mapInterface) { 68 | for i := 0; i < hits; i++ { 69 | m.LoadOrStore(i, i) 70 | } 71 | // Prime the map to get it into a steady state. 72 | for i := 0; i < hits*2; i++ { 73 | m.Load(i % hits) 74 | } 75 | }, 76 | 77 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 78 | for ; pb.Next(); i++ { 79 | m.Load(i % (hits + misses)) 80 | } 81 | }, 82 | }) 83 | } 84 | 85 | func BenchmarkLoadOrStoreBalanced(b *testing.B) { 86 | const hits, misses = 128, 128 87 | 88 | benchMap(b, bench{ 89 | setup: func(b *testing.B, m mapInterface) { 90 | if _, ok := m.(*DeepCopyMap); ok { 91 | b.Skip("DeepCopyMap has quadratic running time.") 92 | } 93 | for i := 0; i < hits; i++ { 94 | m.LoadOrStore(i, i) 95 | } 96 | // Prime the map to get it into a steady state. 97 | for i := 0; i < hits*2; i++ { 98 | m.Load(i % hits) 99 | } 100 | }, 101 | 102 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 103 | for ; pb.Next(); i++ { 104 | j := i % (hits + misses) 105 | if j < hits { 106 | if _, ok := m.LoadOrStore(j, i); !ok { 107 | b.Fatalf("unexpected miss for %v", j) 108 | } 109 | } else { 110 | if v, loaded := m.LoadOrStore(i, i); loaded { 111 | b.Fatalf("failed to store %v: existing value %v", i, v) 112 | } 113 | } 114 | } 115 | }, 116 | }) 117 | } 118 | 119 | func BenchmarkLoadOrStoreUnique(b *testing.B) { 120 | benchMap(b, bench{ 121 | setup: func(b *testing.B, m mapInterface) { 122 | if _, ok := m.(*DeepCopyMap); ok { 123 | b.Skip("DeepCopyMap has quadratic running time.") 124 | } 125 | }, 126 | 127 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 128 | for ; pb.Next(); i++ { 129 | m.LoadOrStore(i, i) 130 | } 131 | }, 132 | }) 133 | } 134 | 135 | func BenchmarkLoadOrStoreCollision(b *testing.B) { 136 | benchMap(b, bench{ 137 | setup: func(_ *testing.B, m mapInterface) { 138 | m.LoadOrStore(0, 0) 139 | }, 140 | 141 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 142 | for ; pb.Next(); i++ { 143 | m.LoadOrStore(0, 0) 144 | } 145 | }, 146 | }) 147 | } 148 | 149 | func BenchmarkRange(b *testing.B) { 150 | const mapSize = 1 << 10 151 | 152 | benchMap(b, bench{ 153 | setup: func(_ *testing.B, m mapInterface) { 154 | for i := 0; i < mapSize; i++ { 155 | m.Store(i, i) 156 | } 157 | }, 158 | 159 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 160 | for ; pb.Next(); i++ { 161 | m.Range(func(_, _ interface{}) bool { return true }) 162 | } 163 | }, 164 | }) 165 | } 166 | 167 | // BenchmarkAdversarialAlloc tests performance when we store a new value 168 | // immediately whenever the map is promoted to clean and otherwise load a 169 | // unique, missing key. 170 | // 171 | // This forces the Load calls to always acquire the map's mutex. 172 | func BenchmarkAdversarialAlloc(b *testing.B) { 173 | benchMap(b, bench{ 174 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 175 | var stores, loadsSinceStore int64 176 | for ; pb.Next(); i++ { 177 | m.Load(i) 178 | if loadsSinceStore++; loadsSinceStore > stores { 179 | m.LoadOrStore(i, stores) 180 | loadsSinceStore = 0 181 | stores++ 182 | } 183 | } 184 | }, 185 | }) 186 | } 187 | 188 | // BenchmarkAdversarialDelete tests performance when we periodically delete 189 | // one key and add a different one in a large map. 190 | // 191 | // This forces the Load calls to always acquire the map's mutex and periodically 192 | // makes a full copy of the map despite changing only one entry. 193 | func BenchmarkAdversarialDelete(b *testing.B) { 194 | const mapSize = 1 << 10 195 | 196 | benchMap(b, bench{ 197 | setup: func(_ *testing.B, m mapInterface) { 198 | for i := 0; i < mapSize; i++ { 199 | m.Store(i, i) 200 | } 201 | }, 202 | 203 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 204 | for ; pb.Next(); i++ { 205 | m.Load(i) 206 | 207 | if i%mapSize == 0 { 208 | m.Range(func(k, _ interface{}) bool { 209 | m.Delete(k) 210 | return false 211 | }) 212 | m.Store(i, i) 213 | } 214 | } 215 | }, 216 | }) 217 | } 218 | -------------------------------------------------------------------------------- /cmap_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package cmap_test 6 | 7 | import ( 8 | "math/rand" 9 | "reflect" 10 | "runtime" 11 | "sync" 12 | "testing" 13 | "testing/quick" 14 | 15 | "github.com/lrita/cmap" 16 | ) 17 | 18 | type mapOp string 19 | 20 | const ( 21 | opLoad = mapOp("Load") 22 | opStore = mapOp("Store") 23 | opLoadOrStore = mapOp("LoadOrStore") 24 | opDelete = mapOp("Delete") 25 | ) 26 | 27 | var mapOps = [...]mapOp{opLoad, opStore, opLoadOrStore, opDelete} 28 | 29 | // mapCall is a quick.Generator for calls on mapInterface. 30 | type mapCall struct { 31 | op mapOp 32 | k, v interface{} 33 | } 34 | 35 | func (c mapCall) apply(m mapInterface) (interface{}, bool) { 36 | switch c.op { 37 | case opLoad: 38 | return m.Load(c.k) 39 | case opStore: 40 | m.Store(c.k, c.v) 41 | return nil, false 42 | case opLoadOrStore: 43 | return m.LoadOrStore(c.k, c.v) 44 | case opDelete: 45 | m.Delete(c.k) 46 | return nil, false 47 | default: 48 | panic("invalid mapOp") 49 | } 50 | } 51 | 52 | type mapResult struct { 53 | value interface{} 54 | ok bool 55 | } 56 | 57 | func randValue(r *rand.Rand) interface{} { 58 | b := make([]byte, r.Intn(4)) 59 | for i := range b { 60 | b[i] = 'a' + byte(rand.Intn(26)) 61 | } 62 | return string(b) 63 | } 64 | 65 | func (mapCall) Generate(r *rand.Rand, size int) reflect.Value { 66 | c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)} 67 | switch c.op { 68 | case opStore, opLoadOrStore: 69 | c.v = randValue(r) 70 | } 71 | return reflect.ValueOf(c) 72 | } 73 | 74 | func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[interface{}]interface{}) { 75 | for _, c := range calls { 76 | v, ok := c.apply(m) 77 | results = append(results, mapResult{v, ok}) 78 | } 79 | 80 | final = make(map[interface{}]interface{}) 81 | m.Range(func(k, v interface{}) bool { 82 | final[k] = v 83 | return true 84 | }) 85 | 86 | return results, final 87 | } 88 | 89 | func applyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { 90 | return applyCalls(new(cmap.Cmap), calls) 91 | } 92 | 93 | func applyRWMutexMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { 94 | return applyCalls(new(RWMutexMap), calls) 95 | } 96 | 97 | func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { 98 | return applyCalls(new(DeepCopyMap), calls) 99 | } 100 | 101 | func TestMapMatchesRWMutex(t *testing.T) { 102 | if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil { 103 | t.Error(err) 104 | } 105 | } 106 | 107 | func TestMapMatchesDeepCopy(t *testing.T) { 108 | if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil { 109 | t.Error(err) 110 | } 111 | } 112 | 113 | func TestConcurrentRange(t *testing.T) { 114 | const mapSize = 1 << 10 115 | 116 | m := new(cmap.Cmap) 117 | for n := int64(1); n <= mapSize; n++ { 118 | m.Store(n, int64(n)) 119 | } 120 | 121 | done := make(chan struct{}) 122 | var wg sync.WaitGroup 123 | defer func() { 124 | close(done) 125 | wg.Wait() 126 | }() 127 | for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- { 128 | r := rand.New(rand.NewSource(g)) 129 | wg.Add(1) 130 | go func(g int64) { 131 | defer wg.Done() 132 | for i := int64(0); ; i++ { 133 | select { 134 | case <-done: 135 | return 136 | default: 137 | } 138 | for n := int64(1); n < mapSize; n++ { 139 | if r.Int63n(mapSize) == 0 { 140 | m.Store(n, n*i*g) 141 | } else { 142 | m.Load(n) 143 | } 144 | } 145 | } 146 | }(g) 147 | } 148 | 149 | iters := 1 << 10 150 | if testing.Short() { 151 | iters = 16 152 | } 153 | for n := iters; n > 0; n-- { 154 | seen := make(map[int64]bool, mapSize) 155 | 156 | m.Range(func(ki, vi interface{}) bool { 157 | k, v := ki.(int64), vi.(int64) 158 | if v%k != 0 { 159 | t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v) 160 | } 161 | if seen[k] { 162 | t.Fatalf("Range visited key %v twice", k) 163 | } 164 | seen[k] = true 165 | return true 166 | }) 167 | 168 | if len(seen) != mapSize { 169 | t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize) 170 | } 171 | } 172 | } 173 | 174 | func TestMapCreation(t *testing.T) { 175 | m := cmap.Cmap{} 176 | 177 | if m.Count() != 0 { 178 | t.Error("new map should be empty.") 179 | } 180 | if !m.IsEmpty() { 181 | t.Error("new map should be empty.") 182 | } 183 | } 184 | 185 | func TestStoreOperationDuplicatedKey(t *testing.T) { 186 | m := cmap.Cmap{} 187 | m.Store(t, "") 188 | m.Store(t, "") 189 | if v := m.Count(); v != 1 { 190 | t.Errorf("map Count() should be %d, got %d", 1, v) 191 | } 192 | m.LoadOrStore("m", "") 193 | if v := m.Count(); v != 2 { 194 | t.Errorf("map Count() should be %d, got %d", 2, v) 195 | } 196 | m.Delete(t) 197 | if v := m.Count(); v != 1 { 198 | t.Errorf("map Count() should be %d, got %d", 1, v) 199 | } 200 | m.Delete(t) 201 | if v := m.Count(); v != 1 { 202 | t.Errorf("map Count() should be %d, got %d", 1, v) 203 | } 204 | } 205 | 206 | func TestMapStoreAndLoad(t *testing.T) { 207 | const mapSize = 1 << 14 208 | 209 | var ( 210 | m cmap.Cmap 211 | wg sync.WaitGroup 212 | seen = make(map[int64]bool, mapSize) 213 | ) 214 | 215 | for n := int64(1); n <= mapSize; n++ { 216 | nn := n 217 | wg.Add(1) 218 | go func() { 219 | defer wg.Done() 220 | m.Store(nn, nn) 221 | }() 222 | } 223 | 224 | wg.Wait() 225 | 226 | m.Range(func(ki, vi interface{}) bool { 227 | k, v := ki.(int64), vi.(int64) 228 | if v%k != 0 { 229 | t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v) 230 | } 231 | if seen[k] { 232 | t.Fatalf("Range visited key %v twice", k) 233 | } 234 | seen[k] = true 235 | return true 236 | }) 237 | 238 | if len(seen) != mapSize { 239 | t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize) 240 | } 241 | 242 | for n := int64(1); n <= mapSize; n++ { 243 | nn := n 244 | wg.Add(1) 245 | go func() { 246 | defer wg.Done() 247 | m.Delete(nn) 248 | }() 249 | } 250 | 251 | wg.Wait() 252 | 253 | if !m.IsEmpty() { 254 | t.Fatalf("Map should be empty, remained %v", m.Count()) 255 | } 256 | } 257 | -------------------------------------------------------------------------------- /cmap.go: -------------------------------------------------------------------------------- 1 | package cmap 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | "unsafe" 7 | ) 8 | 9 | const ( 10 | mInitialSize = 1 << 4 11 | mOverflowThreshold = 1 << 6 12 | mOverflowGrowThreshold = 1 << 7 13 | ) 14 | 15 | // Cmap is a "thread" safe map of type AnyComparableType:Any. 16 | // To avoid lock bottlenecks this map is dived to several map shards. 17 | // We can store different type key and value into the same map. 18 | type Cmap struct { 19 | lock sync.Mutex 20 | inode unsafe.Pointer // *inode 21 | count int64 22 | } 23 | 24 | type inode struct { 25 | mask uintptr 26 | overflow int64 27 | growThreshold int64 28 | shrinkThreshold int64 29 | resizeInProgress int64 30 | pred unsafe.Pointer // *inode 31 | buckets []bucket 32 | } 33 | 34 | type entry struct { 35 | key, value interface{} 36 | } 37 | 38 | type bucket struct { 39 | lock sync.RWMutex 40 | init int64 41 | m map[interface{}]interface{} 42 | frozen bool 43 | } 44 | 45 | // Store sets the value for a key. 46 | func (m *Cmap) Store(key, value interface{}) { 47 | hash := ehash(key) 48 | for { 49 | inode, b := m.getInodeAndBucket(hash) 50 | if b.tryStore(m, inode, false, key, value) { 51 | return 52 | } 53 | } 54 | } 55 | 56 | // Load returns the value stored in the map for a key, or nil if no 57 | // value is present. 58 | // The ok result indicates whether value was found in the map. 59 | func (m *Cmap) Load(key interface{}) (value interface{}, ok bool) { 60 | hash := ehash(key) 61 | _, b := m.getInodeAndBucket(hash) 62 | return b.tryLoad(key) 63 | } 64 | 65 | // LoadOrStore returns the existing value for the key if present. 66 | // Otherwise, it stores and returns the given value. 67 | // The loaded result is true if the value was loaded, false if stored. 68 | func (m *Cmap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { 69 | hash := ehash(key) 70 | for { 71 | inode, b := m.getInodeAndBucket(hash) 72 | actual, loaded = b.tryLoad(key) 73 | if loaded { 74 | return 75 | } 76 | if b.tryStore(m, inode, true, key, value) { 77 | return value, false 78 | } 79 | } 80 | } 81 | 82 | // Delete deletes the value for a key. 83 | func (m *Cmap) Delete(key interface{}) { 84 | hash := ehash(key) 85 | for { 86 | inode, b := m.getInodeAndBucket(hash) 87 | if b.tryDelete(m, inode, key) { 88 | return 89 | } 90 | } 91 | } 92 | 93 | // Range calls f sequentially for each key and value present in the map. 94 | // If f returns false, range stops the iteration. 95 | // 96 | // Range does not necessarily correspond to any consistent snapshot of the Map's 97 | // contents: no key will be visited more than once, but if the value for any key 98 | // is stored or deleted concurrently, Range may reflect any mapping for that key 99 | // from any point during the Range call. 100 | // 101 | // Range may be O(N) with the number of elements in the map even if f returns 102 | // false after a constant number of calls. 103 | func (m *Cmap) Range(f func(key, value interface{}) bool) { 104 | n := m.getInode() 105 | for i := 0; i < len(n.buckets); i++ { 106 | b := &(n.buckets[i]) 107 | if !b.inited() { 108 | n.initBucket(uintptr(i)) 109 | } 110 | for _, e := range b.clone() { 111 | if !f(e.key, e.value) { 112 | return 113 | } 114 | } 115 | } 116 | } 117 | 118 | // Count returns the number of elements within the map. 119 | func (m *Cmap) Count() int { 120 | return int(atomic.LoadInt64(&m.count)) 121 | } 122 | 123 | // IsEmpty checks if map is empty. 124 | func (m *Cmap) IsEmpty() bool { 125 | return m.Count() == 0 126 | } 127 | 128 | func (m *Cmap) getInode() *inode { 129 | n := (*inode)(atomic.LoadPointer(&m.inode)) 130 | if n == nil { 131 | m.lock.Lock() 132 | n = (*inode)(atomic.LoadPointer(&m.inode)) 133 | if n == nil { 134 | n = &inode{ 135 | mask: uintptr(mInitialSize - 1), 136 | growThreshold: int64(mInitialSize * mOverflowThreshold), 137 | shrinkThreshold: 0, 138 | buckets: make([]bucket, mInitialSize), 139 | } 140 | atomic.StorePointer(&m.inode, unsafe.Pointer(n)) 141 | } 142 | m.lock.Unlock() 143 | } 144 | return n 145 | } 146 | 147 | func (m *Cmap) getInodeAndBucket(hash uintptr) (*inode, *bucket) { 148 | n := m.getInode() 149 | i := hash & n.mask 150 | b := &(n.buckets[i]) 151 | if !b.inited() { 152 | n.initBucket(i) 153 | } 154 | return n, b 155 | } 156 | 157 | func (n *inode) initBuckets() { 158 | for i := range n.buckets { 159 | n.initBucket(uintptr(i)) 160 | } 161 | atomic.StorePointer(&n.pred, nil) 162 | } 163 | 164 | func (n *inode) initBucket(i uintptr) { 165 | b := &(n.buckets[i]) 166 | b.lock.Lock() 167 | if b.inited() { 168 | b.lock.Unlock() 169 | return 170 | } 171 | 172 | b.m = make(map[interface{}]interface{}) 173 | p := (*inode)(atomic.LoadPointer(&n.pred)) // predecessor 174 | if p != nil { 175 | if n.mask > p.mask { 176 | // Grow 177 | pb := &(p.buckets[i&p.mask]) 178 | if !pb.inited() { 179 | p.initBucket(i & p.mask) 180 | } 181 | for k, v := range pb.freeze() { 182 | hash := ehash(k) 183 | if hash&n.mask == i { 184 | b.m[k] = v 185 | } 186 | } 187 | } else { 188 | // Shrink 189 | pb0 := &(p.buckets[i]) 190 | if !pb0.inited() { 191 | p.initBucket(i) 192 | } 193 | pb1 := &(p.buckets[i+uintptr(len(n.buckets))]) 194 | if !pb1.inited() { 195 | p.initBucket(i + uintptr(len(n.buckets))) 196 | } 197 | for k, v := range pb0.freeze() { 198 | b.m[k] = v 199 | } 200 | for k, v := range pb1.freeze() { 201 | b.m[k] = v 202 | } 203 | } 204 | if len(b.m) > mOverflowThreshold { 205 | atomic.AddInt64(&n.overflow, int64(len(b.m)-mOverflowThreshold)) 206 | } 207 | } 208 | 209 | atomic.StoreInt64(&b.init, 1) 210 | b.lock.Unlock() 211 | } 212 | 213 | func (b *bucket) inited() bool { 214 | return atomic.LoadInt64(&b.init) == 1 215 | } 216 | 217 | func (b *bucket) freeze() map[interface{}]interface{} { 218 | b.lock.Lock() 219 | b.frozen = true 220 | m := b.m 221 | b.lock.Unlock() 222 | return m 223 | } 224 | 225 | func (b *bucket) clone() []entry { 226 | b.lock.RLock() 227 | entries := make([]entry, 0, len(b.m)) 228 | for k, v := range b.m { 229 | entries = append(entries, entry{key: k, value: v}) 230 | } 231 | b.lock.RUnlock() 232 | return entries 233 | } 234 | 235 | func (b *bucket) tryLoad(key interface{}) (value interface{}, ok bool) { 236 | b.lock.RLock() 237 | value, ok = b.m[key] 238 | b.lock.RUnlock() 239 | return 240 | } 241 | 242 | func (b *bucket) tryStore(m *Cmap, n *inode, check bool, key, value interface{}) (done bool) { 243 | b.lock.Lock() 244 | if b.frozen { 245 | b.lock.Unlock() 246 | return 247 | } 248 | 249 | if check { 250 | if _, ok := b.m[key]; ok { 251 | b.lock.Unlock() 252 | return 253 | } 254 | } 255 | 256 | l0 := len(b.m) // Using length check existence is faster than accessing. 257 | b.m[key] = value 258 | length := len(b.m) 259 | b.lock.Unlock() 260 | 261 | if l0 == length { 262 | return true 263 | } 264 | 265 | // Update counter 266 | grow := atomic.AddInt64(&m.count, 1) >= n.growThreshold 267 | if length > mOverflowThreshold { 268 | grow = grow || atomic.AddInt64(&n.overflow, 1) >= mOverflowGrowThreshold 269 | } 270 | 271 | // Grow 272 | if grow && atomic.CompareAndSwapInt64(&n.resizeInProgress, 0, 1) { 273 | nlen := len(n.buckets) << 1 274 | node := &inode{ 275 | mask: uintptr(nlen) - 1, 276 | pred: unsafe.Pointer(n), 277 | growThreshold: int64(nlen) * mOverflowThreshold, 278 | shrinkThreshold: int64(nlen) >> 1, 279 | buckets: make([]bucket, nlen), 280 | } 281 | ok := atomic.CompareAndSwapPointer(&m.inode, unsafe.Pointer(n), unsafe.Pointer(node)) 282 | if !ok { 283 | panic("BUG: failed swapping head") 284 | } 285 | go node.initBuckets() 286 | } 287 | 288 | return true 289 | } 290 | 291 | func (b *bucket) tryDelete(m *Cmap, n *inode, key interface{}) (done bool) { 292 | b.lock.Lock() 293 | if b.frozen { 294 | b.lock.Unlock() 295 | return 296 | } 297 | 298 | l0 := len(b.m) 299 | delete(b.m, key) 300 | length := len(b.m) 301 | b.lock.Unlock() 302 | 303 | if l0 == length { 304 | return true 305 | } 306 | 307 | // Update counter 308 | shrink := atomic.AddInt64(&m.count, -1) < n.shrinkThreshold 309 | if length >= mOverflowThreshold { 310 | atomic.AddInt64(&n.overflow, -1) 311 | } 312 | // Shrink 313 | if shrink && len(n.buckets) > mInitialSize && atomic.CompareAndSwapInt64(&n.resizeInProgress, 0, 1) { 314 | nlen := len(n.buckets) >> 1 315 | node := &inode{ 316 | mask: uintptr(nlen) - 1, 317 | pred: unsafe.Pointer(n), 318 | growThreshold: int64(nlen) * mOverflowThreshold, 319 | shrinkThreshold: int64(nlen) >> 1, 320 | buckets: make([]bucket, nlen), 321 | } 322 | ok := atomic.CompareAndSwapPointer(&m.inode, unsafe.Pointer(n), unsafe.Pointer(node)) 323 | if !ok { 324 | panic("BUG: failed swapping head") 325 | } 326 | go node.initBuckets() 327 | } 328 | return true 329 | } 330 | 331 | func ehash(i interface{}) uintptr { 332 | return nilinterhash(noescape(unsafe.Pointer(&i)), 0xdeadbeef) 333 | } 334 | 335 | //go:linkname nilinterhash runtime.nilinterhash 336 | func nilinterhash(p unsafe.Pointer, h uintptr) uintptr 337 | 338 | //go:nocheckptr 339 | //go:nosplit 340 | func noescape(p unsafe.Pointer) unsafe.Pointer { 341 | x := uintptr(p) 342 | return unsafe.Pointer(x ^ 0) 343 | } 344 | -------------------------------------------------------------------------------- /map.go: -------------------------------------------------------------------------------- 1 | //go:build go1.18 2 | // +build go1.18 3 | 4 | package cmap 5 | 6 | import ( 7 | "reflect" 8 | "sync" 9 | "sync/atomic" 10 | "unsafe" 11 | ) 12 | 13 | // Map is a "thread" generics safe map of type AnyComparableType:Any 14 | // (AnyComparableType exclude interface type). 15 | // To avoid lock bottlenecks this map is dived to several map shards. 16 | type Map[K comparable, V any] struct { 17 | lock sync.Mutex 18 | inode unsafe.Pointer // *inode2 19 | typ *rtype 20 | count int64 21 | } 22 | 23 | type bucket2[K comparable, V any] struct { 24 | lock sync.RWMutex 25 | init int64 26 | m map[K]V 27 | frozen bool 28 | } 29 | 30 | type entry2[K any, V any] struct { 31 | key K 32 | value V 33 | } 34 | 35 | type inode2[K comparable, V any] struct { 36 | mask uintptr 37 | overflow int64 38 | growThreshold int64 39 | shrinkThreshold int64 40 | resizeInProgress int64 41 | pred unsafe.Pointer // *inode 42 | buckets []bucket2[K, V] 43 | } 44 | 45 | // Store sets the value for a key. 46 | func (m *Map[K, V]) Store(key K, value V) { 47 | hash := m.ehash(key) 48 | for { 49 | inode, b := m.getInodeAndBucket(hash) 50 | if b.tryStore(m, inode, false, key, value) { 51 | return 52 | } 53 | } 54 | } 55 | 56 | // Load returns the value stored in the map for a key, or nil if no 57 | // value is present. 58 | // The ok result indicates whether value was found in the map. 59 | func (m *Map[K, V]) Load(key K) (value V, ok bool) { 60 | hash := m.ehash(key) 61 | _, b := m.getInodeAndBucket(hash) 62 | return b.tryLoad(key) 63 | } 64 | 65 | // LoadOrStore returns the existing value for the key if present. 66 | // Otherwise, it stores and returns the given value. 67 | // The loaded result is true if the value was loaded, false if stored. 68 | func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { 69 | hash := m.ehash(key) 70 | for { 71 | inode, b := m.getInodeAndBucket(hash) 72 | actual, loaded = b.tryLoad(key) 73 | if loaded { 74 | return 75 | } 76 | if b.tryStore(m, inode, true, key, value) { 77 | return value, false 78 | } 79 | } 80 | } 81 | 82 | // Delete deletes the value for a key. 83 | func (m *Map[K, V]) Delete(key K) { 84 | hash := m.ehash(key) 85 | for { 86 | inode, b := m.getInodeAndBucket(hash) 87 | if b.tryDelete(m, inode, key) { 88 | return 89 | } 90 | } 91 | } 92 | 93 | // Range calls f sequentially for each key and value present in the map. 94 | // If f returns false, range stops the iteration. 95 | // 96 | // Range does not necessarily correspond to any consistent snapshot of the Map's 97 | // contents: no key will be visited more than once, but if the value for any key 98 | // is stored or deleted concurrently, Range may reflect any mapping for that key 99 | // from any point during the Range call. 100 | // 101 | // Range may be O(N) with the number of elements in the map even if f returns 102 | // false after a constant number of calls. 103 | func (m *Map[K, V]) Range(f func(key K, value V) bool) { 104 | n := m.getInode() 105 | for i := 0; i < len(n.buckets); i++ { 106 | b := &(n.buckets[i]) 107 | if !b.inited() { 108 | n.initBucket(m, uintptr(i)) 109 | } 110 | for _, e := range b.clone() { 111 | if !f(e.key, e.value) { 112 | return 113 | } 114 | } 115 | } 116 | } 117 | 118 | // Count returns the number of elements within the map. 119 | func (m *Map[K, V]) Count() int { 120 | return int(atomic.LoadInt64(&m.count)) 121 | } 122 | 123 | // IsEmpty checks if map is empty. 124 | func (m *Map[K, V]) IsEmpty() bool { 125 | return m.Count() == 0 126 | } 127 | 128 | func (m *Map[K, V]) getInode() *inode2[K, V] { 129 | n := (*inode2[K, V])(atomic.LoadPointer(&m.inode)) 130 | if n == nil { 131 | m.lock.Lock() 132 | n = (*inode2[K, V])(atomic.LoadPointer(&m.inode)) 133 | if n == nil { 134 | n = &inode2[K, V]{ 135 | mask: uintptr(mInitialSize - 1), 136 | growThreshold: int64(mInitialSize * mOverflowThreshold), 137 | shrinkThreshold: 0, 138 | buckets: make([]bucket2[K, V], mInitialSize), 139 | } 140 | atomic.StorePointer(&m.inode, unsafe.Pointer(n)) 141 | } 142 | m.lock.Unlock() 143 | } 144 | return n 145 | } 146 | 147 | func (m *Map[K, V]) getInodeAndBucket(hash uintptr) (*inode2[K, V], *bucket2[K, V]) { 148 | n := m.getInode() 149 | i := hash & n.mask 150 | b := &(n.buckets[i]) 151 | if !b.inited() { 152 | n.initBucket(m, i) 153 | } 154 | return n, b 155 | } 156 | 157 | func (n *inode2[K, V]) initBuckets(m *Map[K, V]) { 158 | for i := range n.buckets { 159 | n.initBucket(m, uintptr(i)) 160 | } 161 | atomic.StorePointer(&n.pred, nil) 162 | } 163 | 164 | func (n *inode2[K, V]) initBucket(m *Map[K, V], i uintptr) { 165 | b := &(n.buckets[i]) 166 | b.lock.Lock() 167 | if b.inited() { 168 | b.lock.Unlock() 169 | return 170 | } 171 | 172 | b.m = make(map[K]V) 173 | p := (*inode2[K, V])(atomic.LoadPointer(&n.pred)) // predecessor 174 | if p != nil { 175 | if n.mask > p.mask { 176 | // Grow 177 | pb := &(p.buckets[i&p.mask]) 178 | if !pb.inited() { 179 | p.initBucket(m, i&p.mask) 180 | } 181 | for k, v := range pb.freeze() { 182 | hash := m.ehash(k) 183 | if hash&n.mask == i { 184 | b.m[k] = v 185 | } 186 | } 187 | } else { 188 | // Shrink 189 | pb0 := &(p.buckets[i]) 190 | if !pb0.inited() { 191 | p.initBucket(m, i) 192 | } 193 | pb1 := &(p.buckets[i+uintptr(len(n.buckets))]) 194 | if !pb1.inited() { 195 | p.initBucket(m, i+uintptr(len(n.buckets))) 196 | } 197 | for k, v := range pb0.freeze() { 198 | b.m[k] = v 199 | } 200 | for k, v := range pb1.freeze() { 201 | b.m[k] = v 202 | } 203 | } 204 | if len(b.m) > mOverflowThreshold { 205 | atomic.AddInt64(&n.overflow, int64(len(b.m)-mOverflowThreshold)) 206 | } 207 | } 208 | 209 | atomic.StoreInt64(&b.init, 1) 210 | b.lock.Unlock() 211 | } 212 | 213 | func (b *bucket2[K, V]) inited() bool { 214 | return atomic.LoadInt64(&b.init) == 1 215 | } 216 | 217 | func (b *bucket2[K, V]) freeze() map[K]V { 218 | b.lock.Lock() 219 | b.frozen = true 220 | m := b.m 221 | b.lock.Unlock() 222 | return m 223 | } 224 | 225 | func (b *bucket2[K, V]) clone() []entry2[K, V] { 226 | b.lock.RLock() 227 | entries := make([]entry2[K, V], 0, len(b.m)) 228 | for k, v := range b.m { 229 | entries = append(entries, entry2[K, V]{key: k, value: v}) 230 | } 231 | b.lock.RUnlock() 232 | return entries 233 | } 234 | 235 | func (b *bucket2[K, V]) tryLoad(key K) (value V, ok bool) { 236 | b.lock.RLock() 237 | value, ok = b.m[key] 238 | b.lock.RUnlock() 239 | return 240 | } 241 | 242 | func (b *bucket2[K, V]) tryStore(m *Map[K, V], n *inode2[K, V], check bool, key K, value V) (done bool) { 243 | b.lock.Lock() 244 | if b.frozen { 245 | b.lock.Unlock() 246 | return 247 | } 248 | 249 | if check { 250 | if _, ok := b.m[key]; ok { 251 | b.lock.Unlock() 252 | return 253 | } 254 | } 255 | 256 | l0 := len(b.m) // Using length check existence is faster than accessing. 257 | b.m[key] = value 258 | length := len(b.m) 259 | b.lock.Unlock() 260 | 261 | if l0 == length { 262 | return true 263 | } 264 | 265 | // Update counter 266 | grow := atomic.AddInt64(&m.count, 1) >= n.growThreshold 267 | if length > mOverflowThreshold { 268 | grow = grow || atomic.AddInt64(&n.overflow, 1) >= mOverflowGrowThreshold 269 | } 270 | 271 | // Grow 272 | if grow && atomic.CompareAndSwapInt64(&n.resizeInProgress, 0, 1) { 273 | nlen := len(n.buckets) << 1 274 | node := &inode2[K, V]{ 275 | mask: uintptr(nlen) - 1, 276 | pred: unsafe.Pointer(n), 277 | growThreshold: int64(nlen) * mOverflowThreshold, 278 | shrinkThreshold: int64(nlen) >> 1, 279 | buckets: make([]bucket2[K, V], nlen), 280 | } 281 | ok := atomic.CompareAndSwapPointer(&m.inode, unsafe.Pointer(n), unsafe.Pointer(node)) 282 | if !ok { 283 | panic("BUG: failed swapping head") 284 | } 285 | go node.initBuckets(m) 286 | } 287 | 288 | return true 289 | } 290 | 291 | func (b *bucket2[K, V]) tryDelete(m *Map[K, V], n *inode2[K, V], key K) (done bool) { 292 | b.lock.Lock() 293 | if b.frozen { 294 | b.lock.Unlock() 295 | return 296 | } 297 | 298 | l0 := len(b.m) 299 | delete(b.m, key) 300 | length := len(b.m) 301 | b.lock.Unlock() 302 | 303 | if l0 == length { 304 | return true 305 | } 306 | 307 | // Update counter 308 | shrink := atomic.AddInt64(&m.count, -1) < n.shrinkThreshold 309 | if length >= mOverflowThreshold { 310 | atomic.AddInt64(&n.overflow, -1) 311 | } 312 | // Shrink 313 | if shrink && len(n.buckets) > mInitialSize && atomic.CompareAndSwapInt64(&n.resizeInProgress, 0, 1) { 314 | nlen := len(n.buckets) >> 1 315 | node := &inode2[K, V]{ 316 | mask: uintptr(nlen) - 1, 317 | pred: unsafe.Pointer(n), 318 | growThreshold: int64(nlen) * mOverflowThreshold, 319 | shrinkThreshold: int64(nlen) >> 1, 320 | buckets: make([]bucket2[K, V], nlen), 321 | } 322 | ok := atomic.CompareAndSwapPointer(&m.inode, unsafe.Pointer(n), unsafe.Pointer(node)) 323 | if !ok { 324 | panic("BUG: failed swapping head") 325 | } 326 | go node.initBuckets(m) 327 | } 328 | return true 329 | } 330 | 331 | // tflag is used by an rtype to signal what extra type information is 332 | // available in the memory directly following the rtype value. 333 | // 334 | // tflag values must be kept in sync with copies in: 335 | // cmd/compile/internal/reflectdata/reflect.go 336 | // cmd/link/internal/ld/decodesym.go 337 | // runtime/type.go 338 | type tflag uint8 339 | 340 | const ( 341 | // tflagUncommon means that there is a pointer, *uncommonType, 342 | // just beyond the outer type structure. 343 | // 344 | // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0, 345 | // then t has uncommonType data and it can be accessed as: 346 | // 347 | // type tUncommon struct { 348 | // structType 349 | // u uncommonType 350 | // } 351 | // u := &(*tUncommon)(unsafe.Pointer(t)).u 352 | tflagUncommon tflag = 1 << 0 353 | 354 | // tflagExtraStar means the name in the str field has an 355 | // extraneous '*' prefix. This is because for most types T in 356 | // a program, the type *T also exists and reusing the str data 357 | // saves binary size. 358 | tflagExtraStar tflag = 1 << 1 359 | 360 | // tflagNamed means the type has a name. 361 | tflagNamed tflag = 1 << 2 362 | 363 | // tflagRegularMemory means that equal and hash functions can treat 364 | // this type as a single region of t.size bytes. 365 | tflagRegularMemory tflag = 1 << 3 366 | ) 367 | 368 | // rtype is the common implementation of most values. 369 | // It is embedded in other struct types. 370 | // 371 | // rtype must be kept in sync with ../runtime/type.go:/^type._type. 372 | type rtype struct { 373 | size uintptr 374 | ptrdata uintptr // number of bytes in the type that can contain pointers 375 | hash uint32 // hash of type; avoids computation in hash tables 376 | tflag tflag // extra type information flags 377 | align uint8 // alignment of variable with this type 378 | fieldAlign uint8 // alignment of struct field with this type 379 | kind uint8 // enumeration for C 380 | } 381 | 382 | //func (t *rtype) IsRegularMemory() bool { 383 | // return t.tflag&tflagRegularMemory != 0 384 | //} 385 | 386 | func (t *rtype) IsDirectIface() bool { 387 | const kindDirectIface = 1 << 5 388 | return t.kind&kindDirectIface != 0 389 | } 390 | 391 | // eface must be kept in sync with ../src/runtime/runtime2.go:/^eface. 392 | type eface struct { 393 | typ *rtype 394 | data unsafe.Pointer 395 | } 396 | 397 | func efaceOf(ep *any) *eface { 398 | return (*eface)(unsafe.Pointer(ep)) 399 | } 400 | 401 | func (m *Map[K, V]) ehash(i K) uintptr { 402 | if m.typ == nil { 403 | func() { 404 | m.lock.Lock() 405 | defer m.lock.Unlock() 406 | if m.typ == nil { 407 | // if K is interface type, then the direct reflect.TypeOf(K).Kind return reflect.Ptr 408 | if typ := reflect.TypeOf(&i); typ.Elem().Kind() == reflect.Interface { 409 | panic("not support interface type") 410 | } 411 | var e any = i 412 | m.typ = efaceOf(&e).typ 413 | } 414 | }() 415 | } 416 | 417 | var f eface 418 | f.typ = m.typ 419 | if f.typ.IsDirectIface() { 420 | f.data = *(*unsafe.Pointer)(unsafe.Pointer(&i)) 421 | } else { 422 | f.data = noescape(unsafe.Pointer(&i)) 423 | } 424 | 425 | return nilinterhash(noescape(unsafe.Pointer(&f)), 0xdeadbeef) 426 | } 427 | --------------------------------------------------------------------------------