├── .travis.yml ├── go.mod ├── testdata ├── gen.go ├── gen_test.go ├── map_reference_test.go ├── map_bench_test.go ├── intmap.go ├── intptrs.go ├── writermap.go ├── stringmap.go ├── requests.go ├── stringintchan.go ├── structmap.go ├── stringbytechan.go └── stringermap.go ├── go.sum ├── README.md └── syncmap.go /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | language: go 3 | 4 | go: 5 | - "1.15.x" 6 | - "1.16.x" 7 | - master 8 | 9 | script: 10 | - cd testdata; go generate; go test; go test -bench=.; 11 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/a8m/syncmap 2 | 3 | go 1.16 4 | 5 | require ( 6 | golang.org/x/mod v0.4.1 // indirect 7 | golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 // indirect 8 | golang.org/x/tools v0.1.0 9 | ) 10 | -------------------------------------------------------------------------------- /testdata/gen.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | //go:generate go run github.com/a8m/syncmap -name Requests map[string]*http.Request 4 | 5 | //go:generate go run github.com/a8m/syncmap -name StringMap map[string]interface{} 6 | 7 | //go:generate go run github.com/a8m/syncmap -name WriterMap map[string]io.Writer 8 | 9 | //go:generate go run github.com/a8m/syncmap -name stringerMap "map[string]interface{ String() string }" 10 | 11 | //go:generate go run github.com/a8m/syncmap -name IntMap map[int]int 12 | 13 | //go:generate go run github.com/a8m/syncmap -name StructMap "map[struct{ Name string }]struct{ Age int }" 14 | 15 | //go:generate go run github.com/a8m/syncmap -name IntPtrs map[*int]*int 16 | 17 | //go:generate go run github.com/a8m/syncmap -name StringByteChan "map[string](chan []byte)" 18 | 19 | //go:generate go run github.com/a8m/syncmap -name StringIntChan "map[string](chan int)" 20 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 2 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 3 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 4 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 5 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 6 | golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= 7 | golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 8 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 9 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 10 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 11 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 12 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 13 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 14 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 15 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 16 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 17 | golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 18 | golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 h1:46ULzRKLh1CwgRq2dC5SlBzEqqNCi8rreOZnNrbqcIY= 19 | golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 20 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 21 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 22 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 23 | golang.org/x/tools v0.0.0-20190501045030-23463209683d h1:D7DVZUZEUgsSIDTivnUtVeGfN5AvhDIKtdIZAqx0ieE= 24 | golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 25 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 26 | golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= 27 | golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= 28 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 29 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 30 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= 31 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 32 | -------------------------------------------------------------------------------- /testdata/gen_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "net/http" 5 | "testing" 6 | ) 7 | 8 | func TestIntMap(t *testing.T) { 9 | var m IntMap 10 | m.Store(1, 2) 11 | _, ok := m.Load(1) 12 | if !ok { 13 | t.Fatal("value should be existed") 14 | } 15 | m.Delete(1) 16 | _, ok = m.Load(1) 17 | if ok { 18 | t.Fatal("value should not be existed") 19 | } 20 | r, loaded := m.LoadOrStore(1, 2) 21 | if loaded { 22 | t.Fatal("value should not be loaded") 23 | } 24 | lr, loaded := m.LoadOrStore(1, r) 25 | if !loaded { 26 | t.Fatal("value should not be loaded") 27 | } 28 | if lr != r { 29 | t.Fatal("loaded value should be the same") 30 | } 31 | s, _ := m.LoadOrStore(2, 3) 32 | kv := map[int]int{1: r, 2: s} 33 | m.Range(func(key, value int) bool { 34 | v, ok := kv[key] 35 | if !ok { 36 | t.Fatal("keys do not match") 37 | } 38 | if value != v { 39 | t.Fatal("values do not match") 40 | } 41 | delete(kv, key) 42 | return true 43 | }) 44 | } 45 | 46 | func TestRequests(t *testing.T) { 47 | var m Requests 48 | m.Store("r", &http.Request{}) 49 | _, ok := m.Load("r") 50 | if !ok { 51 | t.Fatal("value should be existed") 52 | } 53 | v, ok := m.LoadAndDelete("r") 54 | if !ok || v == nil { 55 | t.Fatal("value should be existed") 56 | } 57 | _, ok = m.Load("r") 58 | if ok { 59 | t.Fatal("value should not be existed") 60 | } 61 | r, loaded := m.LoadOrStore("r", &http.Request{}) 62 | if loaded { 63 | t.Fatal("value should not be loaded") 64 | } 65 | lr, loaded := m.LoadOrStore("r", r) 66 | if !loaded { 67 | t.Fatal("value should not be loaded") 68 | } 69 | if lr != r { 70 | t.Fatal("loaded value should be the same") 71 | } 72 | s, _ := m.LoadOrStore("s", &http.Request{}) 73 | kv := map[string]*http.Request{"r": r, "s": s} 74 | m.Range(func(key string, value *http.Request) bool { 75 | v, ok := kv[key] 76 | if !ok { 77 | t.Fatal("keys do not match") 78 | } 79 | if value != v { 80 | t.Fatal("values do not match") 81 | } 82 | delete(kv, key) 83 | return true 84 | }) 85 | } 86 | 87 | func TestStringByteChan(t *testing.T) { 88 | var m StringByteChan 89 | m.Store("r", make(chan []byte)) 90 | _, ok := m.Load("r") 91 | if !ok { 92 | t.Fatal("value should be existed") 93 | } 94 | m.Delete("r") 95 | _, ok = m.Load("r") 96 | if ok { 97 | t.Fatal("value should not be existed") 98 | } 99 | r, loaded := m.LoadOrStore("r", make(chan []byte)) 100 | if loaded { 101 | t.Fatal("value should not be loaded") 102 | } 103 | lr, loaded := m.LoadOrStore("r", r) 104 | if !loaded { 105 | t.Fatal("value should not be loaded") 106 | } 107 | if lr != r { 108 | t.Fatal("loaded value should be the same") 109 | } 110 | s, _ := m.LoadOrStore("s", make(chan []byte)) 111 | kv := map[string](chan []byte){"r": r, "s": s} 112 | m.Range(func(key string, value chan []byte) bool { 113 | v, ok := kv[key] 114 | if !ok { 115 | t.Fatal("keys do not match") 116 | } 117 | if value != v { 118 | t.Fatal("values do not match") 119 | } 120 | delete(kv, key) 121 | return true 122 | }) 123 | } 124 | 125 | func TestStringIntChan(t *testing.T) { 126 | var m StringIntChan 127 | m.Store("r", make(chan int)) 128 | _, ok := m.Load("r") 129 | if !ok { 130 | t.Fatal("value should be existed") 131 | } 132 | m.Delete("r") 133 | _, ok = m.Load("r") 134 | if ok { 135 | t.Fatal("value should not be existed") 136 | } 137 | r, loaded := m.LoadOrStore("r", make(chan int)) 138 | if loaded { 139 | t.Fatal("value should not be loaded") 140 | } 141 | lr, loaded := m.LoadOrStore("r", r) 142 | if !loaded { 143 | t.Fatal("value should not be loaded") 144 | } 145 | if lr != r { 146 | t.Fatal("loaded value should be the same") 147 | } 148 | s, _ := m.LoadOrStore("s", make(chan int)) 149 | kv := map[string](chan int){"r": r, "s": s} 150 | m.Range(func(key string, value chan int) bool { 151 | v, ok := kv[key] 152 | if !ok { 153 | t.Fatal("keys do not match") 154 | } 155 | if value != v { 156 | t.Fatal("values do not match") 157 | } 158 | delete(kv, key) 159 | return true 160 | }) 161 | } 162 | -------------------------------------------------------------------------------- /testdata/map_reference_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package main 6 | 7 | import ( 8 | "sync" 9 | "sync/atomic" 10 | ) 11 | 12 | // This file contains reference map implementations for unit-tests. 13 | 14 | // mapInterface is the interface Map implements. 15 | type mapInterface interface { 16 | Load(interface{}) (interface{}, bool) 17 | Store(key, value interface{}) 18 | LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) 19 | Delete(interface{}) 20 | Range(func(key, value interface{}) (shouldContinue bool)) 21 | } 22 | 23 | // RWMutexMap is an implementation of mapInterface using a sync.RWMutex. 24 | type RWMutexMap struct { 25 | mu sync.RWMutex 26 | dirty map[interface{}]interface{} 27 | } 28 | 29 | func (m *RWMutexMap) Load(key interface{}) (value interface{}, ok bool) { 30 | m.mu.RLock() 31 | value, ok = m.dirty[key] 32 | m.mu.RUnlock() 33 | return 34 | } 35 | 36 | func (m *RWMutexMap) Store(key, value interface{}) { 37 | m.mu.Lock() 38 | if m.dirty == nil { 39 | m.dirty = make(map[interface{}]interface{}) 40 | } 41 | m.dirty[key] = value 42 | m.mu.Unlock() 43 | } 44 | 45 | func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { 46 | m.mu.Lock() 47 | actual, loaded = m.dirty[key] 48 | if !loaded { 49 | actual = value 50 | if m.dirty == nil { 51 | m.dirty = make(map[interface{}]interface{}) 52 | } 53 | m.dirty[key] = value 54 | } 55 | m.mu.Unlock() 56 | return actual, loaded 57 | } 58 | 59 | func (m *RWMutexMap) Delete(key interface{}) { 60 | m.mu.Lock() 61 | delete(m.dirty, key) 62 | m.mu.Unlock() 63 | } 64 | 65 | func (m *RWMutexMap) Range(f func(key, value interface{}) (shouldContinue bool)) { 66 | m.mu.RLock() 67 | keys := make([]interface{}, 0, len(m.dirty)) 68 | for k := range m.dirty { 69 | keys = append(keys, k) 70 | } 71 | m.mu.RUnlock() 72 | 73 | for _, k := range keys { 74 | v, ok := m.Load(k) 75 | if !ok { 76 | continue 77 | } 78 | if !f(k, v) { 79 | break 80 | } 81 | } 82 | } 83 | 84 | // DeepCopyMap is an implementation of mapInterface using a Mutex and 85 | // atomic.Value. It makes deep copies of the map on every write to avoid 86 | // acquiring the Mutex in Load. 87 | type DeepCopyMap struct { 88 | mu sync.Mutex 89 | clean atomic.Value 90 | } 91 | 92 | func (m *DeepCopyMap) Load(key interface{}) (value interface{}, ok bool) { 93 | clean, _ := m.clean.Load().(map[interface{}]interface{}) 94 | value, ok = clean[key] 95 | return value, ok 96 | } 97 | 98 | func (m *DeepCopyMap) Store(key, value interface{}) { 99 | m.mu.Lock() 100 | dirty := m.dirty() 101 | dirty[key] = value 102 | m.clean.Store(dirty) 103 | m.mu.Unlock() 104 | } 105 | 106 | func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { 107 | clean, _ := m.clean.Load().(map[interface{}]interface{}) 108 | actual, loaded = clean[key] 109 | if loaded { 110 | return actual, loaded 111 | } 112 | 113 | m.mu.Lock() 114 | // Reload clean in case it changed while we were waiting on m.mu. 115 | clean, _ = m.clean.Load().(map[interface{}]interface{}) 116 | actual, loaded = clean[key] 117 | if !loaded { 118 | dirty := m.dirty() 119 | dirty[key] = value 120 | actual = value 121 | m.clean.Store(dirty) 122 | } 123 | m.mu.Unlock() 124 | return actual, loaded 125 | } 126 | 127 | func (m *DeepCopyMap) Delete(key interface{}) { 128 | m.mu.Lock() 129 | dirty := m.dirty() 130 | delete(dirty, key) 131 | m.clean.Store(dirty) 132 | m.mu.Unlock() 133 | } 134 | 135 | func (m *DeepCopyMap) Range(f func(key, value interface{}) (shouldContinue bool)) { 136 | clean, _ := m.clean.Load().(map[interface{}]interface{}) 137 | for k, v := range clean { 138 | if !f(k, v) { 139 | break 140 | } 141 | } 142 | } 143 | 144 | func (m *DeepCopyMap) dirty() map[interface{}]interface{} { 145 | clean, _ := m.clean.Load().(map[interface{}]interface{}) 146 | dirty := make(map[interface{}]interface{}, len(clean)+1) 147 | for k, v := range clean { 148 | dirty[k] = v 149 | } 150 | return dirty 151 | } 152 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # syncmap 2 | ![https://godoc.org/github.com/a8m/syncmap](https://img.shields.io/badge/api-reference-blue.svg?style=flat-square) 3 | ![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg?style=flat-square) 4 | [![Build Status](https://travis-ci.com/a8m/syncmap.svg?token=ckAPcX3LvhP9wJPS6sgW&branch=master)](https://travis-ci.com/a8m/syncmap) 5 | [![Go Report Card](https://goreportcard.com/badge/github.com/a8m/syncmap)](https://goreportcard.com/report/github.com/a8m/syncmap) 6 | 7 | A __typed__ implementation of the Go `sync.Map` using code generation. 8 | 9 | ### Install 10 | 11 | ``` 12 | go get -u github.com/a8m/syncmap@master 13 | ``` 14 | 15 | ### Examples: 16 | 17 | 1. Using CLI 18 | ```bash 19 | $ syncmap -name IntMap "map[int]int" 20 | $ syncmap -name RequestMap -pkg mypkg "map[string]*http.Request" 21 | ``` 22 | Or: 23 | ```bash 24 | $ go run github.com/a8m/syncmap -name IntMap "map[int]int" 25 | ``` 26 | 27 | 2. Using `go generate`. 28 | 29 | - Add a directive with map definition: 30 | ```go 31 | //go:generate go run github.com/a8m/syncmap -name WriterMap map[string]io.Writer 32 | 33 | //go:generate go run github.com/a8m/syncmap -name Requests map[string]*http.Request 34 | ``` 35 | - Then, run `go generate` on this package. 36 | 37 | See [testdata/gen.go](https://github.com/a8m/syncmap/blob/master/testdata/gen.go) for more examples. 38 | 39 | ### How does it work? 40 | 41 | `syncmap` didn't copy the code of `sync/map.go` and replace its identifiers. Instead, it reads the `sync/map.go` from 42 | your `GOROOT`, parses it into an `*ast.File`, and runs a few mutators that bring it to the desired state. 43 | Check the [code](https://github.com/a8m/syncmap/blob/master/syncmap.go#L91) for more information. 44 | 45 | __How can we make sure it will continue to work?__ - I'm running a daily CI test on _TravisCI_. 46 | 47 | ### Benchmark 48 | Benchmark tests were taken from the `sync` package. 49 | ``` 50 | BenchmarkLoadMostlyHits/*main.DeepCopyMap-8 100000000 15.1 ns/op 51 | BenchmarkLoadMostlyHits/*main.RWMutexMap-8 30000000 54.4 ns/op 52 | BenchmarkLoadMostlyHits/*sync.Map-8 100000000 14.0 ns/op 53 | BenchmarkLoadMostlyHits/*main.IntMap-8 300000000 5.65 ns/op <-- 54 | 55 | BenchmarkLoadMostlyMisses/*main.DeepCopyMap-8 200000000 10.2 ns/op 56 | BenchmarkLoadMostlyMisses/*main.RWMutexMap-8 30000000 59.2 ns/op 57 | BenchmarkLoadMostlyMisses/*sync.Map-8 100000000 11.3 ns/op 58 | BenchmarkLoadMostlyMisses/*main.IntMap-8 300000000 4.05 ns/op <-- 59 | 60 | BenchmarkLoadOrStoreBalanced/*main.RWMutexMap-8 3000000 400 ns/op 61 | BenchmarkLoadOrStoreBalanced/*sync.Map-8 3000000 400 ns/op 62 | BenchmarkLoadOrStoreBalanced/*main.IntMap-8 5000000 233 ns/op <-- 63 | 64 | BenchmarkLoadOrStoreUnique/*main.RWMutexMap-8 2000000 744 ns/op 65 | BenchmarkLoadOrStoreUnique/*sync.Map-8 2000000 903 ns/op 66 | BenchmarkLoadOrStoreUnique/*main.IntMap-8 3000000 388 ns/op <-- 67 | 68 | BenchmarkLoadOrStoreCollision/*main.DeepCopyMap-8 200000000 7.29 ns/op 69 | BenchmarkLoadOrStoreCollision/*main.RWMutexMap-8 20000000 97.5 ns/op 70 | BenchmarkLoadOrStoreCollision/*sync.Map-8 200000000 9.11 ns/op 71 | BenchmarkLoadOrStoreCollision/*main.IntMap-8 500000000 3.14 ns/op <-- 72 | 73 | BenchmarkRange/*main.DeepCopyMap-8 500000 4479 ns/op 74 | BenchmarkRange/*main.RWMutexMap-8 30000 56834 ns/op 75 | BenchmarkRange/*sync.Map-8 300000 4464 ns/op 76 | BenchmarkRange/*main.IntMap-8 1000000000 2.38 ns/op <-- 77 | 78 | BenchmarkAdversarialAlloc/*main.DeepCopyMap-8 2000000 826 ns/op 79 | BenchmarkAdversarialAlloc/*main.RWMutexMap-8 20000000 73.6 ns/op 80 | BenchmarkAdversarialAlloc/*sync.Map-8 5000000 303 ns/op 81 | BenchmarkAdversarialAlloc/*main.IntMap-8 10000000 182 ns/op <-- 82 | 83 | BenchmarkAdversarialDelete/*main.DeepCopyMap-8 10000000 204 ns/op 84 | BenchmarkAdversarialDelete/*main.RWMutexMap-8 20000000 78.3 ns/op 85 | BenchmarkAdversarialDelete/*sync.Map-8 20000000 72.2 ns/op 86 | BenchmarkAdversarialDelete/*main.IntMap-8 100000000 14.2 ns/op <-- 87 | ``` 88 | 89 | Running benchmark with `-benchmem` 90 | ``` 91 | BenchmarkLoadMostlyHits/*main.DeepCopyMap-8 100000000 12.7 ns/op 7 B/op 0 allocs/op 92 | BenchmarkLoadMostlyHits/*main.RWMutexMap-8 30000000 53.6 ns/op 7 B/op 0 allocs/op 93 | BenchmarkLoadMostlyHits/*sync.Map-8 100000000 16.3 ns/op 7 B/op 0 allocs/op 94 | BenchmarkLoadMostlyHits/*main.IntMap-8 200000000 6.02 ns/op 0 B/op 0 allocs/op <-- 95 | 96 | BenchmarkLoadMostlyMisses/*main.DeepCopyMap-8 200000000 7.99 ns/op 7 B/op 0 allocs/op 97 | BenchmarkLoadMostlyMisses/*main.RWMutexMap-8 30000000 52.6 ns/op 7 B/op 0 allocs/op 98 | BenchmarkLoadMostlyMisses/*sync.Map-8 200000000 8.87 ns/op 7 B/op 0 allocs/op 99 | BenchmarkLoadMostlyMisses/*main.IntMap-8 1000000000 2.88 ns/op 0 B/op 0 allocs/op <-- 100 | 101 | BenchmarkLoadOrStoreBalanced/*main.RWMutexMap-8 3000000 357 ns/op 71 B/op 2 allocs/op 102 | BenchmarkLoadOrStoreBalanced/*sync.Map-8 3000000 417 ns/op 70 B/op 3 allocs/op 103 | BenchmarkLoadOrStoreBalanced/*main.IntMap-8 5000000 202 ns/op 42 B/op 1 allocs/op <-- 104 | 105 | BenchmarkLoadOrStoreUnique/*main.RWMutexMap-8 2000000 648 ns/op 178 B/op 2 allocs/op 106 | BenchmarkLoadOrStoreUnique/*sync.Map-8 2000000 745 ns/op 163 B/op 4 allocs/op 107 | BenchmarkLoadOrStoreUnique/*main.IntMap-8 3000000 368 ns/op 74 B/op 2 allocs/op <-- 108 | 109 | BenchmarkLoadOrStoreCollision/*main.DeepCopyMap-8 300000000 5.90 ns/op 0 B/op 0 allocs/op 110 | BenchmarkLoadOrStoreCollision/*main.RWMutexMap-8 20000000 94.5 ns/op 0 B/op 0 allocs/op 111 | BenchmarkLoadOrStoreCollision/*sync.Map-8 200000000 7.55 ns/op 0 B/op 0 allocs/op 112 | BenchmarkLoadOrStoreCollision/*main.IntMap-8 1000000000 2.68 ns/op 0 B/op 0 allocs/op <-- 113 | 114 | BenchmarkRange/*main.DeepCopyMap-8 500000 3376 ns/op 0 B/op 0 allocs/op 115 | BenchmarkRange/*main.RWMutexMap-8 30000 56675 ns/op 16384 B/op 1 allocs/op 116 | BenchmarkRange/*sync.Map-8 500000 3587 ns/op 0 B/op 0 allocs/op 117 | BenchmarkRange/*main.IntMap-8 2000000000 1.75 ns/op 0 B/op 0 allocs/op <-- 118 | 119 | BenchmarkAdversarialAlloc/*main.DeepCopyMap-8 2000000 761 ns/op 535 B/op 1 allocs/op 120 | BenchmarkAdversarialAlloc/*main.RWMutexMap-8 20000000 67.9 ns/op 8 B/op 1 allocs/op 121 | BenchmarkAdversarialAlloc/*sync.Map-8 5000000 264 ns/op 51 B/op 1 allocs/op 122 | BenchmarkAdversarialAlloc/*main.IntMap-8 10000000 176 ns/op 28 B/op 0 allocs/op <-- 123 | 124 | BenchmarkAdversarialDelete/*main.DeepCopyMap-8 10000000 194 ns/op 168 B/op 1 allocs/op 125 | BenchmarkAdversarialDelete/*main.RWMutexMap-8 20000000 76.9 ns/op 25 B/op 1 allocs/op 126 | BenchmarkAdversarialDelete/*sync.Map-8 20000000 60.8 ns/op 18 B/op 1 allocs/op 127 | BenchmarkAdversarialDelete/*main.IntMap-8 100000000 13.1 ns/op 0 B/op 0 allocs/op <-- 128 | ``` 129 | 130 | 131 | ## LICENSE 132 | I am providing code in the repository to you under MIT license. Because this is my personal repository, the license you receive to my code is from me and not my employer (Facebook) 133 | 134 | -------------------------------------------------------------------------------- /testdata/map_bench_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package main 6 | 7 | import ( 8 | "fmt" 9 | "reflect" 10 | "sync" 11 | "sync/atomic" 12 | "testing" 13 | ) 14 | 15 | type bench struct { 16 | setup func(*testing.B, mapInterface) 17 | perG func(b *testing.B, pb *testing.PB, i int, m mapInterface) 18 | } 19 | 20 | func benchMap(b *testing.B, bench bench) { 21 | for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &sync.Map{}} { 22 | b.Run(fmt.Sprintf("%T", m), func(b *testing.B) { 23 | m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface) 24 | if bench.setup != nil { 25 | bench.setup(b, m) 26 | } 27 | 28 | b.ResetTimer() 29 | 30 | var i int64 31 | b.RunParallel(func(pb *testing.PB) { 32 | id := int(atomic.AddInt64(&i, 1) - 1) 33 | bench.perG(b, pb, id*b.N, m) 34 | }) 35 | }) 36 | } 37 | } 38 | 39 | func BenchmarkLoadMostlyHits(b *testing.B) { 40 | const hits, misses = 1023, 1 41 | 42 | benchMap(b, bench{ 43 | setup: func(_ *testing.B, m mapInterface) { 44 | for i := 0; i < hits; i++ { 45 | m.LoadOrStore(i, i) 46 | } 47 | // Prime the map to get it into a steady state. 48 | for i := 0; i < hits*2; i++ { 49 | m.Load(i % hits) 50 | } 51 | }, 52 | 53 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 54 | for ; pb.Next(); i++ { 55 | m.Load(i % (hits + misses)) 56 | } 57 | }, 58 | }) 59 | 60 | // syncmap code: 61 | b.Run(fmt.Sprintf("%T", &IntMap{}), func(b *testing.B) { 62 | m := &IntMap{} 63 | // setup: 64 | for i := 0; i < hits; i++ { 65 | m.LoadOrStore(i, i) 66 | } 67 | for i := 0; i < hits*2; i++ { 68 | m.Load(i % hits) 69 | } 70 | 71 | // reset: 72 | b.ResetTimer() 73 | 74 | // perG: 75 | perG := func(b *testing.B, pb *testing.PB, i int, m *IntMap) { 76 | for ; pb.Next(); i++ { 77 | m.Load(i % (hits + misses)) 78 | } 79 | } 80 | var i int64 81 | b.RunParallel(func(pb *testing.PB) { 82 | id := int(atomic.AddInt64(&i, 1) - 1) 83 | perG(b, pb, id*b.N, m) 84 | }) 85 | }) 86 | } 87 | 88 | func BenchmarkLoadMostlyMisses(b *testing.B) { 89 | const hits, misses = 1, 1023 90 | 91 | benchMap(b, bench{ 92 | setup: func(_ *testing.B, m mapInterface) { 93 | for i := 0; i < hits; i++ { 94 | m.LoadOrStore(i, i) 95 | } 96 | // Prime the map to get it into a steady state. 97 | for i := 0; i < hits*2; i++ { 98 | m.Load(i % hits) 99 | } 100 | }, 101 | 102 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 103 | for ; pb.Next(); i++ { 104 | m.Load(i % (hits + misses)) 105 | } 106 | }, 107 | }) 108 | 109 | // syncmap code: 110 | b.Run(fmt.Sprintf("%T", &IntMap{}), func(b *testing.B) { 111 | m := &IntMap{} 112 | // setup: 113 | for i := 0; i < hits; i++ { 114 | m.LoadOrStore(i, i) 115 | } 116 | for i := 0; i < hits*2; i++ { 117 | m.Load(i % hits) 118 | } 119 | 120 | // reset: 121 | b.ResetTimer() 122 | 123 | // perG: 124 | perG := func(b *testing.B, pb *testing.PB, i int, m *IntMap) { 125 | for ; pb.Next(); i++ { 126 | m.Load(i % (hits + misses)) 127 | } 128 | } 129 | var i int64 130 | b.RunParallel(func(pb *testing.PB) { 131 | id := int(atomic.AddInt64(&i, 1) - 1) 132 | perG(b, pb, id*b.N, m) 133 | }) 134 | }) 135 | } 136 | 137 | func BenchmarkLoadOrStoreBalanced(b *testing.B) { 138 | const hits, misses = 128, 128 139 | 140 | benchMap(b, bench{ 141 | setup: func(b *testing.B, m mapInterface) { 142 | if _, ok := m.(*DeepCopyMap); ok { 143 | b.Skip("DeepCopyMap has quadratic running time.") 144 | } 145 | for i := 0; i < hits; i++ { 146 | m.LoadOrStore(i, i) 147 | } 148 | // Prime the map to get it into a steady state. 149 | for i := 0; i < hits*2; i++ { 150 | m.Load(i % hits) 151 | } 152 | }, 153 | 154 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 155 | for ; pb.Next(); i++ { 156 | j := i % (hits + misses) 157 | if j < hits { 158 | if _, ok := m.LoadOrStore(j, i); !ok { 159 | b.Fatalf("unexpected miss for %v", j) 160 | } 161 | } else { 162 | if v, loaded := m.LoadOrStore(i, i); loaded { 163 | b.Fatalf("failed to store %v: existing value %v", i, v) 164 | } 165 | } 166 | } 167 | }, 168 | }) 169 | 170 | // syncmap code: 171 | b.Run(fmt.Sprintf("%T", &IntMap{}), func(b *testing.B) { 172 | m := &IntMap{} 173 | // setup: 174 | for i := 0; i < hits; i++ { 175 | m.LoadOrStore(i, i) 176 | } 177 | for i := 0; i < hits*2; i++ { 178 | m.Load(i % hits) 179 | } 180 | 181 | // reset: 182 | b.ResetTimer() 183 | 184 | // perG: 185 | perG := func(b *testing.B, pb *testing.PB, i int, m *IntMap) { 186 | for ; pb.Next(); i++ { 187 | j := i % (hits + misses) 188 | if j < hits { 189 | if _, ok := m.LoadOrStore(j, i); !ok { 190 | b.Fatalf("unexpected miss for %v", j) 191 | } 192 | } else { 193 | if v, loaded := m.LoadOrStore(i, i); loaded { 194 | b.Fatalf("failed to store %v: existing value %v", i, v) 195 | } 196 | } 197 | } 198 | } 199 | 200 | var i int64 201 | b.RunParallel(func(pb *testing.PB) { 202 | id := int(atomic.AddInt64(&i, 1) - 1) 203 | perG(b, pb, id*b.N, m) 204 | }) 205 | }) 206 | } 207 | 208 | func BenchmarkLoadOrStoreUnique(b *testing.B) { 209 | benchMap(b, bench{ 210 | setup: func(b *testing.B, m mapInterface) { 211 | if _, ok := m.(*DeepCopyMap); ok { 212 | b.Skip("DeepCopyMap has quadratic running time.") 213 | } 214 | }, 215 | 216 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 217 | for ; pb.Next(); i++ { 218 | m.LoadOrStore(i, i) 219 | } 220 | }, 221 | }) 222 | 223 | // syncmap code: 224 | b.Run(fmt.Sprintf("%T", &IntMap{}), func(b *testing.B) { 225 | m := &IntMap{} 226 | // setup: 227 | 228 | // reset: 229 | b.ResetTimer() 230 | 231 | // perG: 232 | perG := func(b *testing.B, pb *testing.PB, i int, m *IntMap) { 233 | for ; pb.Next(); i++ { 234 | m.LoadOrStore(i, i) 235 | } 236 | } 237 | var i int64 238 | b.RunParallel(func(pb *testing.PB) { 239 | id := int(atomic.AddInt64(&i, 1) - 1) 240 | perG(b, pb, id*b.N, m) 241 | }) 242 | }) 243 | } 244 | 245 | func BenchmarkLoadOrStoreCollision(b *testing.B) { 246 | benchMap(b, bench{ 247 | setup: func(_ *testing.B, m mapInterface) { 248 | m.LoadOrStore(0, 0) 249 | }, 250 | 251 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 252 | for ; pb.Next(); i++ { 253 | m.LoadOrStore(0, 0) 254 | } 255 | }, 256 | }) 257 | 258 | // syncmap code: 259 | b.Run(fmt.Sprintf("%T", &IntMap{}), func(b *testing.B) { 260 | m := &IntMap{} 261 | // setup: 262 | 263 | // reset: 264 | b.ResetTimer() 265 | 266 | // perG: 267 | perG := func(b *testing.B, pb *testing.PB, i int, m *IntMap) { 268 | for ; pb.Next(); i++ { 269 | m.LoadOrStore(0, 0) 270 | } 271 | } 272 | var i int64 273 | b.RunParallel(func(pb *testing.PB) { 274 | id := int(atomic.AddInt64(&i, 1) - 1) 275 | perG(b, pb, id*b.N, m) 276 | }) 277 | }) 278 | } 279 | 280 | func BenchmarkRange(b *testing.B) { 281 | const mapSize = 1 << 10 282 | 283 | benchMap(b, bench{ 284 | setup: func(_ *testing.B, m mapInterface) { 285 | for i := 0; i < mapSize; i++ { 286 | m.Store(i, i) 287 | } 288 | }, 289 | 290 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 291 | for ; pb.Next(); i++ { 292 | m.Range(func(_, _ interface{}) bool { return true }) 293 | } 294 | }, 295 | }) 296 | 297 | // syncmap code: 298 | b.Run(fmt.Sprintf("%T", &IntMap{}), func(b *testing.B) { 299 | m := &IntMap{} 300 | // setup: 301 | 302 | // reset: 303 | b.ResetTimer() 304 | 305 | // perG: 306 | perG := func(b *testing.B, pb *testing.PB, i int, m *IntMap) { 307 | for ; pb.Next(); i++ { 308 | m.Range(func(_, _ int) bool { return true }) 309 | } 310 | } 311 | var i int64 312 | b.RunParallel(func(pb *testing.PB) { 313 | id := int(atomic.AddInt64(&i, 1) - 1) 314 | perG(b, pb, id*b.N, m) 315 | }) 316 | }) 317 | } 318 | 319 | // BenchmarkAdversarialAlloc tests performance when we store a new value 320 | // immediately whenever the map is promoted to clean and otherwise load a 321 | // unique, missing key. 322 | // 323 | // This forces the Load calls to always acquire the map's mutex. 324 | func BenchmarkAdversarialAlloc(b *testing.B) { 325 | benchMap(b, bench{ 326 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 327 | var stores, loadsSinceStore int64 328 | for ; pb.Next(); i++ { 329 | m.Load(i) 330 | if loadsSinceStore++; loadsSinceStore > stores { 331 | m.LoadOrStore(i, stores) 332 | loadsSinceStore = 0 333 | stores++ 334 | } 335 | } 336 | }, 337 | }) 338 | 339 | // syncmap code: 340 | b.Run(fmt.Sprintf("%T", &IntMap{}), func(b *testing.B) { 341 | m := &IntMap{} 342 | // setup: 343 | 344 | // reset: 345 | b.ResetTimer() 346 | 347 | // perG: 348 | perG := func(b *testing.B, pb *testing.PB, i int, m *IntMap) { 349 | var stores, loadsSinceStore int 350 | for ; pb.Next(); i++ { 351 | m.Load(i) 352 | if loadsSinceStore++; loadsSinceStore > stores { 353 | m.LoadOrStore(i, stores) 354 | loadsSinceStore = 0 355 | stores++ 356 | } 357 | } 358 | } 359 | var i int64 360 | b.RunParallel(func(pb *testing.PB) { 361 | id := int(atomic.AddInt64(&i, 1) - 1) 362 | perG(b, pb, id*b.N, m) 363 | }) 364 | }) 365 | } 366 | 367 | // BenchmarkAdversarialDelete tests performance when we periodically delete 368 | // one key and add a different one in a large map. 369 | // 370 | // This forces the Load calls to always acquire the map's mutex and periodically 371 | // makes a full copy of the map despite changing only one entry. 372 | func BenchmarkAdversarialDelete(b *testing.B) { 373 | const mapSize = 1 << 10 374 | 375 | benchMap(b, bench{ 376 | setup: func(_ *testing.B, m mapInterface) { 377 | for i := 0; i < mapSize; i++ { 378 | m.Store(i, i) 379 | } 380 | }, 381 | 382 | perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { 383 | for ; pb.Next(); i++ { 384 | m.Load(i) 385 | 386 | if i%mapSize == 0 { 387 | m.Range(func(k, _ interface{}) bool { 388 | m.Delete(k) 389 | return false 390 | }) 391 | m.Store(i, i) 392 | } 393 | } 394 | }, 395 | }) 396 | 397 | // syncmap code: 398 | b.Run(fmt.Sprintf("%T", &IntMap{}), func(b *testing.B) { 399 | m := &IntMap{} 400 | // setup: 401 | 402 | // reset: 403 | b.ResetTimer() 404 | 405 | // perG: 406 | perG := func(b *testing.B, pb *testing.PB, i int, m *IntMap) { 407 | for ; pb.Next(); i++ { 408 | m.Load(i) 409 | 410 | if i%mapSize == 0 { 411 | m.Range(func(k, _ int) bool { 412 | m.Delete(k) 413 | return false 414 | }) 415 | m.Store(i, i) 416 | } 417 | } 418 | } 419 | var i int64 420 | b.RunParallel(func(pb *testing.PB) { 421 | id := int(atomic.AddInt64(&i, 1) - 1) 422 | perG(b, pb, id*b.N, m) 423 | }) 424 | }) 425 | } 426 | -------------------------------------------------------------------------------- /syncmap.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "flag" 6 | "fmt" 7 | "go/ast" 8 | "go/format" 9 | "go/parser" 10 | "go/token" 11 | "go/types" 12 | "io/ioutil" 13 | "os" 14 | "reflect" 15 | "runtime" 16 | "strings" 17 | 18 | "golang.org/x/tools/go/ast/astutil" 19 | "golang.org/x/tools/imports" 20 | ) 21 | 22 | var ( 23 | out = flag.String("o", "", "") 24 | pkg = flag.String("pkg", "main", "") 25 | name = flag.String("name", "Map", "") 26 | usage = `Usage: syncmap [options...] map[T1]T2 27 | 28 | Options: 29 | -o Specify file output. If none is specified, the name 30 | will be derived from the map type. 31 | -pkg Package name to use in the generated code. If none is 32 | specified, the name will main. 33 | -name Struct name to use in the generated code. If none is 34 | specified, the name will be Map. 35 | ` 36 | ) 37 | 38 | func main() { 39 | flag.Usage = func() { 40 | fmt.Fprint(os.Stderr, fmt.Sprintf(usage)) 41 | } 42 | flag.Parse() 43 | g, err := NewGenerator() 44 | failOnErr(err) 45 | err = g.Mutate() 46 | failOnErr(err) 47 | err = g.Gen() 48 | failOnErr(err) 49 | } 50 | 51 | // Generator generates the typed syncmap object. 52 | type Generator struct { 53 | // flag options. 54 | pkg string // package name. 55 | out string // file name. 56 | name string // struct name. 57 | key string // map key type. 58 | value string // map value type. 59 | // mutation state and traversal handlers. 60 | file *ast.File 61 | fset *token.FileSet 62 | funcs map[string]func(*ast.FuncDecl) 63 | types map[string]func(*ast.TypeSpec) 64 | values map[string]func(*ast.ValueSpec) 65 | } 66 | 67 | // NewGenerator returns a new generator for syncmap. 68 | func NewGenerator() (g *Generator, err error) { 69 | defer catch(&err) 70 | g = &Generator{fset: token.NewFileSet(), pkg: *pkg, out: *out, name: *name} 71 | g.funcs = g.Funcs() 72 | g.types = g.Types() 73 | g.values = g.Values() 74 | exp, err := parser.ParseExpr(os.Args[len(os.Args)-1]) 75 | check(err, "parse expr: %s", os.Args[len(os.Args)-1]) 76 | m, ok := exp.(*ast.MapType) 77 | expect(ok, "invalid argument. expected map[T1]T2") 78 | b := bytes.NewBuffer(nil) 79 | err = format.Node(b, g.fset, m.Key) 80 | check(err, "format map key") 81 | g.key = b.String() 82 | b.Reset() 83 | err = format.Node(b, g.fset, m.Value) 84 | check(err, "format map value") 85 | g.value = b.String() 86 | if g.out == "" { 87 | g.out = strings.ToLower(g.name) + ".go" 88 | } 89 | return 90 | } 91 | 92 | // Mutate mutates the original `sync/map` AST and brings it to the desired state. 93 | // It fails if it encounters an unrecognized node in the AST. 94 | func (g *Generator) Mutate() (err error) { 95 | defer catch(&err) 96 | path := fmt.Sprintf("%s/src/sync/map.go", runtime.GOROOT()) 97 | b, err := ioutil.ReadFile(path) 98 | check(err, "read %q file", path) 99 | f, err := parser.ParseFile(g.fset, "", b, parser.ParseComments) 100 | check(err, "parse %q file", path) 101 | f.Name.Name = g.pkg 102 | astutil.AddImport(g.fset, f, "sync") 103 | for _, d := range f.Decls { 104 | switch d := d.(type) { 105 | case *ast.FuncDecl: 106 | handler, ok := g.funcs[d.Name.Name] 107 | expect(ok, "unrecognized function: %s", d.Name.Name) 108 | handler(d) 109 | delete(g.funcs, d.Name.Name) 110 | case *ast.GenDecl: 111 | switch d := d.Specs[0].(type) { 112 | case *ast.TypeSpec: 113 | handler, ok := g.types[d.Name.Name] 114 | expect(ok, "unrecognized type: %s", d.Name.Name) 115 | handler(d) 116 | delete(g.types, d.Name.Name) 117 | case *ast.ValueSpec: 118 | handler, ok := g.values[d.Names[0].Name] 119 | expect(ok, "unrecognized value: %s", d.Names[0].Name) 120 | handler(d) 121 | expect(len(d.Names) == 1, "mismatch values length: %d", len(d.Names)) 122 | delete(g.values, d.Names[0].Name) 123 | } 124 | default: 125 | expect(false, "unrecognized type: %s", d) 126 | } 127 | } 128 | expect(len(g.funcs) == 0, "function was deleted") 129 | expect(len(g.types) == 0, "type was deleted") 130 | expect(len(g.values) == 0, "value was deleted") 131 | rename(f, map[string]string{ 132 | "Map": g.name, 133 | "entry": "entry" + strings.Title(g.name), 134 | "readOnly": "readOnly" + strings.Title(g.name), 135 | "expunged": "expunged" + strings.Title(g.name), 136 | "newEntry": "newEntry" + strings.Title(g.name), 137 | }) 138 | g.file = f 139 | return 140 | } 141 | 142 | // Gen dumps the mutated AST to a file in the configured destination. 143 | func (g *Generator) Gen() (err error) { 144 | defer catch(&err) 145 | b := bytes.NewBuffer([]byte("// Code generated by syncmap; DO NOT EDIT.\n\n")) 146 | err = format.Node(b, g.fset, g.file) 147 | check(err, "format mutated code") 148 | src, err := imports.Process(g.out, b.Bytes(), nil) 149 | check(err, "running goimports on: %s", g.out) 150 | err = ioutil.WriteFile(g.out, src, 0644) 151 | check(err, "writing file: %s", g.out) 152 | return 153 | } 154 | 155 | // Values returns all ValueSpec handlers for AST mutation. 156 | func (g *Generator) Values() map[string]func(*ast.ValueSpec) { 157 | return map[string]func(*ast.ValueSpec){ 158 | "expunged": func(v *ast.ValueSpec) { g.replaceValue(v) }, 159 | } 160 | } 161 | 162 | // Types returns all TypesSpec handlers for AST mutation. 163 | func (g *Generator) Types() map[string]func(*ast.TypeSpec) { 164 | return map[string]func(*ast.TypeSpec){ 165 | "Map": func(t *ast.TypeSpec) { 166 | l := t.Type.(*ast.StructType).Fields.List[0] 167 | l.Type = expr("sync.Mutex", l.Type.Pos()) 168 | g.replaceKey(t.Type) 169 | }, 170 | "readOnly": func(t *ast.TypeSpec) { g.replaceKey(t) }, 171 | "entry": func(*ast.TypeSpec) {}, 172 | } 173 | } 174 | 175 | // Funcs returns all FuncDecl handlers for AST mutation. 176 | func (g *Generator) Funcs() map[string]func(*ast.FuncDecl) { 177 | nop := func(*ast.FuncDecl) {} 178 | return map[string]func(*ast.FuncDecl){ 179 | "Load": func(f *ast.FuncDecl) { 180 | g.replaceKey(f.Type.Params) 181 | g.replaceValue(f.Type.Results) 182 | renameNil(f.Body, f.Type.Results.List[0].Names[0].Name) 183 | }, 184 | "load": func(f *ast.FuncDecl) { 185 | g.replaceValue(f) 186 | renameNil(f.Body, f.Type.Results.List[0].Names[0].Name) 187 | }, 188 | "Store": func(f *ast.FuncDecl) { 189 | g.renameTuple(f.Type.Params) 190 | }, 191 | "LoadOrStore": func(f *ast.FuncDecl) { 192 | g.renameTuple(f.Type.Params) 193 | g.replaceValue(f.Type.Results) 194 | }, 195 | "LoadAndDelete": func(f *ast.FuncDecl) { 196 | g.replaceKey(f.Type.Params) 197 | g.replaceValue(f.Type.Results) 198 | renameNil(f.Body, f.Type.Results.List[0].Names[0].Name) 199 | }, 200 | "tryLoadOrStore": func(f *ast.FuncDecl) { 201 | g.replaceValue(f) 202 | renameNil(f.Body, f.Type.Results.List[0].Names[0].Name) 203 | }, 204 | "Range": func(f *ast.FuncDecl) { 205 | g.renameTuple(f.Type.Params.List[0].Type.(*ast.FuncType).Params) 206 | }, 207 | "Delete": func(f *ast.FuncDecl) { g.replaceKey(f) }, 208 | "newEntry": func(f *ast.FuncDecl) { g.replaceValue(f) }, 209 | "tryStore": func(f *ast.FuncDecl) { g.replaceValue(f) }, 210 | "dirtyLocked": func(f *ast.FuncDecl) { g.replaceKey(f) }, 211 | "storeLocked": func(f *ast.FuncDecl) { g.replaceValue(f) }, 212 | "delete": func(f *ast.FuncDecl) { 213 | g.replaceValue(f) 214 | renameNil(f.Body, f.Type.Results.List[0].Names[0].Name) 215 | }, 216 | "missLocked": nop, 217 | "unexpungeLocked": nop, 218 | "tryExpungeLocked": nop, 219 | } 220 | } 221 | 222 | // replaceKey replaces all `interface{}` occurrences in the given Node with the key node. 223 | func (g *Generator) replaceKey(n ast.Node) { replaceIface(n, g.key) } 224 | 225 | // replaceValue replaces all `interface{}` occurrences in the given Node with the value node. 226 | func (g *Generator) replaceValue(n ast.Node) { replaceIface(n, g.value) } 227 | 228 | func (g *Generator) renameTuple(l *ast.FieldList) { 229 | if g.key == g.value { 230 | g.replaceKey(l.List[0]) 231 | return 232 | } 233 | l.List = append(l.List, &ast.Field{ 234 | Names: []*ast.Ident{l.List[0].Names[1]}, 235 | Type: l.List[0].Type, 236 | }) 237 | l.List[0].Names = l.List[0].Names[:1] 238 | g.replaceKey(l.List[0]) 239 | g.replaceValue(l.List[1]) 240 | } 241 | 242 | func replaceIface(n ast.Node, s string) { 243 | astutil.Apply(n, func(c *astutil.Cursor) bool { 244 | n := c.Node() 245 | if it, ok := n.(*ast.InterfaceType); ok { 246 | c.Replace(expr(s, it.Interface)) 247 | } 248 | if it, ok := n.(*ast.Ident); ok && it.Name == "any" { 249 | c.Replace(expr(s, it.Pos())) 250 | } 251 | return true 252 | }, nil) 253 | } 254 | 255 | func rename(f *ast.File, oldnew map[string]string) { 256 | astutil.Apply(f, func(c *astutil.Cursor) bool { 257 | switch n := c.Node().(type) { 258 | case *ast.Ident: 259 | if name, ok := oldnew[n.Name]; ok { 260 | n.Name = name 261 | n.Obj.Name = name 262 | } 263 | case *ast.FuncDecl: 264 | if name, ok := oldnew[n.Name.Name]; ok { 265 | n.Name.Name = name 266 | } 267 | } 268 | return true 269 | }, nil) 270 | } 271 | 272 | func renameNil(n ast.Node, name string) { 273 | astutil.Apply(n, func(c *astutil.Cursor) bool { 274 | if _, ok := c.Parent().(*ast.ReturnStmt); ok { 275 | if i, ok := c.Node().(*ast.Ident); ok && i.Name == new(types.Nil).String() { 276 | i.Name = name 277 | } 278 | } 279 | return true 280 | }, nil) 281 | } 282 | 283 | func expr(s string, pos token.Pos) ast.Expr { 284 | exp, err := parser.ParseExpr(s) 285 | check(err, "parse expr: %q", s) 286 | setPos(exp, pos) 287 | return exp 288 | } 289 | 290 | func setPos(n ast.Node, p token.Pos) { 291 | if reflect.ValueOf(n).IsNil() { 292 | return 293 | } 294 | switch n := n.(type) { 295 | case *ast.Ident: 296 | n.NamePos = p 297 | case *ast.MapType: 298 | n.Map = p 299 | setPos(n.Key, p) 300 | setPos(n.Value, p) 301 | case *ast.FieldList: 302 | n.Closing = p 303 | n.Opening = p 304 | if len(n.List) > 0 { 305 | setPos(n.List[0], p) 306 | } 307 | case *ast.Field: 308 | setPos(n.Type, p) 309 | if len(n.Names) > 0 { 310 | setPos(n.Names[0], p) 311 | } 312 | case *ast.FuncType: 313 | n.Func = p 314 | setPos(n.Params, p) 315 | setPos(n.Results, p) 316 | case *ast.ArrayType: 317 | n.Lbrack = p 318 | setPos(n.Elt, p) 319 | case *ast.StructType: 320 | n.Struct = p 321 | setPos(n.Fields, p) 322 | case *ast.SelectorExpr: 323 | setPos(n.X, p) 324 | n.Sel.NamePos = p 325 | case *ast.InterfaceType: 326 | n.Interface = p 327 | setPos(n.Methods, p) 328 | case *ast.StarExpr: 329 | n.Star = p 330 | setPos(n.X, p) 331 | case *ast.ChanType: 332 | setPos(n.Value, p) 333 | case *ast.ParenExpr: 334 | setPos(n.X, p) 335 | default: 336 | panic(fmt.Sprintf("unknown type: %v", n)) 337 | } 338 | } 339 | 340 | // check panics if the error is not nil. 341 | func check(err error, msg string, args ...interface{}) { 342 | if err != nil { 343 | args = append(args, err) 344 | panic(genError{fmt.Sprintf(msg+": %s", args...)}) 345 | } 346 | } 347 | 348 | // expect panic if the condition is false. 349 | func expect(cond bool, msg string, args ...interface{}) { 350 | if !cond { 351 | panic(genError{fmt.Sprintf(msg, args...)}) 352 | } 353 | } 354 | 355 | type genError struct { 356 | msg string 357 | } 358 | 359 | func (p genError) Error() string { return fmt.Sprintf("syncmap: %s", p.msg) } 360 | 361 | func catch(err *error) { 362 | if e := recover(); e != nil { 363 | gerr, ok := e.(genError) 364 | if !ok { 365 | panic(e) 366 | } 367 | *err = gerr 368 | } 369 | } 370 | 371 | func failOnErr(err error) { 372 | if err != nil { 373 | fmt.Fprintf(os.Stderr, "%v\n\n", err.Error()) 374 | os.Exit(1) 375 | } 376 | } 377 | -------------------------------------------------------------------------------- /testdata/intmap.go: -------------------------------------------------------------------------------- 1 | // Code generated by syncmap; DO NOT EDIT. 2 | 3 | // Copyright 2016 The Go Authors. All rights reserved. 4 | // Use of this source code is governed by a BSD-style 5 | // license that can be found in the LICENSE file. 6 | 7 | package main 8 | 9 | import ( 10 | "sync" 11 | "sync/atomic" 12 | "unsafe" 13 | ) 14 | 15 | // Map is like a Go map[interface{}]interface{} but is safe for concurrent use 16 | // by multiple goroutines without additional locking or coordination. 17 | // Loads, stores, and deletes run in amortized constant time. 18 | // 19 | // The Map type is specialized. Most code should use a plain Go map instead, 20 | // with separate locking or coordination, for better type safety and to make it 21 | // easier to maintain other invariants along with the map content. 22 | // 23 | // The Map type is optimized for two common use cases: (1) when the entry for a given 24 | // key is only ever written once but read many times, as in caches that only grow, 25 | // or (2) when multiple goroutines read, write, and overwrite entries for disjoint 26 | // sets of keys. In these two cases, use of a Map may significantly reduce lock 27 | // contention compared to a Go map paired with a separate Mutex or RWMutex. 28 | // 29 | // The zero Map is empty and ready for use. A Map must not be copied after first use. 30 | type IntMap struct { 31 | mu sync.Mutex 32 | 33 | // read contains the portion of the map's contents that are safe for 34 | // concurrent access (with or without mu held). 35 | // 36 | // The read field itself is always safe to load, but must only be stored with 37 | // mu held. 38 | // 39 | // Entries stored in read may be updated concurrently without mu, but updating 40 | // a previously-expunged entry requires that the entry be copied to the dirty 41 | // map and unexpunged with mu held. 42 | read atomic.Value // readOnly 43 | 44 | // dirty contains the portion of the map's contents that require mu to be 45 | // held. To ensure that the dirty map can be promoted to the read map quickly, 46 | // it also includes all of the non-expunged entries in the read map. 47 | // 48 | // Expunged entries are not stored in the dirty map. An expunged entry in the 49 | // clean map must be unexpunged and added to the dirty map before a new value 50 | // can be stored to it. 51 | // 52 | // If the dirty map is nil, the next write to the map will initialize it by 53 | // making a shallow copy of the clean map, omitting stale entries. 54 | dirty map[int]*entryIntMap 55 | 56 | // misses counts the number of loads since the read map was last updated that 57 | // needed to lock mu to determine whether the key was present. 58 | // 59 | // Once enough misses have occurred to cover the cost of copying the dirty 60 | // map, the dirty map will be promoted to the read map (in the unamended 61 | // state) and the next store to the map will make a new dirty copy. 62 | misses int 63 | } 64 | 65 | // readOnly is an immutable struct stored atomically in the Map.read field. 66 | type readOnlyIntMap struct { 67 | m map[int]*entryIntMap 68 | amended bool // true if the dirty map contains some key not in m. 69 | } 70 | 71 | // expunged is an arbitrary pointer that marks entries which have been deleted 72 | // from the dirty map. 73 | var expungedIntMap = unsafe.Pointer(new(int)) 74 | 75 | // An entry is a slot in the map corresponding to a particular key. 76 | type entryIntMap struct { 77 | // p points to the interface{} value stored for the entry. 78 | // 79 | // If p == nil, the entry has been deleted and m.dirty == nil. 80 | // 81 | // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 82 | // is missing from m.dirty. 83 | // 84 | // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 85 | // != nil, in m.dirty[key]. 86 | // 87 | // An entry can be deleted by atomic replacement with nil: when m.dirty is 88 | // next created, it will atomically replace nil with expunged and leave 89 | // m.dirty[key] unset. 90 | // 91 | // An entry's associated value can be updated by atomic replacement, provided 92 | // p != expunged. If p == expunged, an entry's associated value can be updated 93 | // only after first setting m.dirty[key] = e so that lookups using the dirty 94 | // map find the entry. 95 | p unsafe.Pointer // *interface{} 96 | } 97 | 98 | func newEntryIntMap(i int) *entryIntMap { 99 | return &entryIntMap{p: unsafe.Pointer(&i)} 100 | } 101 | 102 | // Load returns the value stored in the map for a key, or nil if no 103 | // value is present. 104 | // The ok result indicates whether value was found in the map. 105 | func (m *IntMap) Load(key int) (value int, ok bool) { 106 | read, _ := m.read.Load().(readOnlyIntMap) 107 | e, ok := read.m[key] 108 | if !ok && read.amended { 109 | m.mu.Lock() 110 | // Avoid reporting a spurious miss if m.dirty got promoted while we were 111 | // blocked on m.mu. (If further loads of the same key will not miss, it's 112 | // not worth copying the dirty map for this key.) 113 | read, _ = m.read.Load().(readOnlyIntMap) 114 | e, ok = read.m[key] 115 | if !ok && read.amended { 116 | e, ok = m.dirty[key] 117 | // Regardless of whether the entry was present, record a miss: this key 118 | // will take the slow path until the dirty map is promoted to the read 119 | // map. 120 | m.missLocked() 121 | } 122 | m.mu.Unlock() 123 | } 124 | if !ok { 125 | return value, false 126 | } 127 | return e.load() 128 | } 129 | 130 | func (e *entryIntMap) load() (value int, ok bool) { 131 | p := atomic.LoadPointer(&e.p) 132 | if p == nil || p == expungedIntMap { 133 | return value, false 134 | } 135 | return *(*int)(p), true 136 | } 137 | 138 | // Store sets the value for a key. 139 | func (m *IntMap) Store(key, value int) { 140 | read, _ := m.read.Load().(readOnlyIntMap) 141 | if e, ok := read.m[key]; ok && e.tryStore(&value) { 142 | return 143 | } 144 | 145 | m.mu.Lock() 146 | read, _ = m.read.Load().(readOnlyIntMap) 147 | if e, ok := read.m[key]; ok { 148 | if e.unexpungeLocked() { 149 | // The entry was previously expunged, which implies that there is a 150 | // non-nil dirty map and this entry is not in it. 151 | m.dirty[key] = e 152 | } 153 | e.storeLocked(&value) 154 | } else if e, ok := m.dirty[key]; ok { 155 | e.storeLocked(&value) 156 | } else { 157 | if !read.amended { 158 | // We're adding the first new key to the dirty map. 159 | // Make sure it is allocated and mark the read-only map as incomplete. 160 | m.dirtyLocked() 161 | m.read.Store(readOnlyIntMap{m: read.m, amended: true}) 162 | } 163 | m.dirty[key] = newEntryIntMap(value) 164 | } 165 | m.mu.Unlock() 166 | } 167 | 168 | // tryStore stores a value if the entry has not been expunged. 169 | // 170 | // If the entry is expunged, tryStore returns false and leaves the entry 171 | // unchanged. 172 | func (e *entryIntMap) tryStore(i *int) bool { 173 | for { 174 | p := atomic.LoadPointer(&e.p) 175 | if p == expungedIntMap { 176 | return false 177 | } 178 | if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 179 | return true 180 | } 181 | } 182 | } 183 | 184 | // unexpungeLocked ensures that the entry is not marked as expunged. 185 | // 186 | // If the entry was previously expunged, it must be added to the dirty map 187 | // before m.mu is unlocked. 188 | func (e *entryIntMap) unexpungeLocked() (wasExpunged bool) { 189 | return atomic.CompareAndSwapPointer(&e.p, expungedIntMap, nil) 190 | } 191 | 192 | // storeLocked unconditionally stores a value to the entry. 193 | // 194 | // The entry must be known not to be expunged. 195 | func (e *entryIntMap) storeLocked(i *int) { 196 | atomic.StorePointer(&e.p, unsafe.Pointer(i)) 197 | } 198 | 199 | // LoadOrStore returns the existing value for the key if present. 200 | // Otherwise, it stores and returns the given value. 201 | // The loaded result is true if the value was loaded, false if stored. 202 | func (m *IntMap) LoadOrStore(key, value int) (actual int, loaded bool) { 203 | // Avoid locking if it's a clean hit. 204 | read, _ := m.read.Load().(readOnlyIntMap) 205 | if e, ok := read.m[key]; ok { 206 | actual, loaded, ok := e.tryLoadOrStore(value) 207 | if ok { 208 | return actual, loaded 209 | } 210 | } 211 | 212 | m.mu.Lock() 213 | read, _ = m.read.Load().(readOnlyIntMap) 214 | if e, ok := read.m[key]; ok { 215 | if e.unexpungeLocked() { 216 | m.dirty[key] = e 217 | } 218 | actual, loaded, _ = e.tryLoadOrStore(value) 219 | } else if e, ok := m.dirty[key]; ok { 220 | actual, loaded, _ = e.tryLoadOrStore(value) 221 | m.missLocked() 222 | } else { 223 | if !read.amended { 224 | // We're adding the first new key to the dirty map. 225 | // Make sure it is allocated and mark the read-only map as incomplete. 226 | m.dirtyLocked() 227 | m.read.Store(readOnlyIntMap{m: read.m, amended: true}) 228 | } 229 | m.dirty[key] = newEntryIntMap(value) 230 | actual, loaded = value, false 231 | } 232 | m.mu.Unlock() 233 | 234 | return actual, loaded 235 | } 236 | 237 | // tryLoadOrStore atomically loads or stores a value if the entry is not 238 | // expunged. 239 | // 240 | // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 241 | // returns with ok==false. 242 | func (e *entryIntMap) tryLoadOrStore(i int) (actual int, loaded, ok bool) { 243 | p := atomic.LoadPointer(&e.p) 244 | if p == expungedIntMap { 245 | return actual, false, false 246 | } 247 | if p != nil { 248 | return *(*int)(p), true, true 249 | } 250 | 251 | // Copy the interface after the first load to make this method more amenable 252 | // to escape analysis: if we hit the "load" path or the entry is expunged, we 253 | // shouldn't bother heap-allocating. 254 | ic := i 255 | for { 256 | if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 257 | return i, false, true 258 | } 259 | p = atomic.LoadPointer(&e.p) 260 | if p == expungedIntMap { 261 | return actual, false, false 262 | } 263 | if p != nil { 264 | return *(*int)(p), true, true 265 | } 266 | } 267 | } 268 | 269 | // LoadAndDelete deletes the value for a key, returning the previous value if any. 270 | // The loaded result reports whether the key was present. 271 | func (m *IntMap) LoadAndDelete(key int) (value int, loaded bool) { 272 | read, _ := m.read.Load().(readOnlyIntMap) 273 | e, ok := read.m[key] 274 | if !ok && read.amended { 275 | m.mu.Lock() 276 | read, _ = m.read.Load().(readOnlyIntMap) 277 | e, ok = read.m[key] 278 | if !ok && read.amended { 279 | e, ok = m.dirty[key] 280 | delete(m.dirty, key) 281 | // Regardless of whether the entry was present, record a miss: this key 282 | // will take the slow path until the dirty map is promoted to the read 283 | // map. 284 | m.missLocked() 285 | } 286 | m.mu.Unlock() 287 | } 288 | if ok { 289 | return e.delete() 290 | } 291 | return value, false 292 | } 293 | 294 | // Delete deletes the value for a key. 295 | func (m *IntMap) Delete(key int) { 296 | m.LoadAndDelete(key) 297 | } 298 | 299 | func (e *entryIntMap) delete() (value int, ok bool) { 300 | for { 301 | p := atomic.LoadPointer(&e.p) 302 | if p == nil || p == expungedIntMap { 303 | return value, false 304 | } 305 | if atomic.CompareAndSwapPointer(&e.p, p, nil) { 306 | return *(*int)(p), true 307 | } 308 | } 309 | } 310 | 311 | // Range calls f sequentially for each key and value present in the map. 312 | // If f returns false, range stops the iteration. 313 | // 314 | // Range does not necessarily correspond to any consistent snapshot of the Map's 315 | // contents: no key will be visited more than once, but if the value for any key 316 | // is stored or deleted concurrently, Range may reflect any mapping for that key 317 | // from any point during the Range call. 318 | // 319 | // Range may be O(N) with the number of elements in the map even if f returns 320 | // false after a constant number of calls. 321 | func (m *IntMap) Range(f func(key, value int) bool) { 322 | // We need to be able to iterate over all of the keys that were already 323 | // present at the start of the call to Range. 324 | // If read.amended is false, then read.m satisfies that property without 325 | // requiring us to hold m.mu for a long time. 326 | read, _ := m.read.Load().(readOnlyIntMap) 327 | if read.amended { 328 | // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 329 | // (assuming the caller does not break out early), so a call to Range 330 | // amortizes an entire copy of the map: we can promote the dirty copy 331 | // immediately! 332 | m.mu.Lock() 333 | read, _ = m.read.Load().(readOnlyIntMap) 334 | if read.amended { 335 | read = readOnlyIntMap{m: m.dirty} 336 | m.read.Store(read) 337 | m.dirty = nil 338 | m.misses = 0 339 | } 340 | m.mu.Unlock() 341 | } 342 | 343 | for k, e := range read.m { 344 | v, ok := e.load() 345 | if !ok { 346 | continue 347 | } 348 | if !f(k, v) { 349 | break 350 | } 351 | } 352 | } 353 | 354 | func (m *IntMap) missLocked() { 355 | m.misses++ 356 | if m.misses < len(m.dirty) { 357 | return 358 | } 359 | m.read.Store(readOnlyIntMap{m: m.dirty}) 360 | m.dirty = nil 361 | m.misses = 0 362 | } 363 | 364 | func (m *IntMap) dirtyLocked() { 365 | if m.dirty != nil { 366 | return 367 | } 368 | 369 | read, _ := m.read.Load().(readOnlyIntMap) 370 | m.dirty = make(map[int]*entryIntMap, len(read.m)) 371 | for k, e := range read.m { 372 | if !e.tryExpungeLocked() { 373 | m.dirty[k] = e 374 | } 375 | } 376 | } 377 | 378 | func (e *entryIntMap) tryExpungeLocked() (isExpunged bool) { 379 | p := atomic.LoadPointer(&e.p) 380 | for p == nil { 381 | if atomic.CompareAndSwapPointer(&e.p, nil, expungedIntMap) { 382 | return true 383 | } 384 | p = atomic.LoadPointer(&e.p) 385 | } 386 | return p == expungedIntMap 387 | } 388 | -------------------------------------------------------------------------------- /testdata/intptrs.go: -------------------------------------------------------------------------------- 1 | // Code generated by syncmap; DO NOT EDIT. 2 | 3 | // Copyright 2016 The Go Authors. All rights reserved. 4 | // Use of this source code is governed by a BSD-style 5 | // license that can be found in the LICENSE file. 6 | 7 | package main 8 | 9 | import ( 10 | "sync" 11 | "sync/atomic" 12 | "unsafe" 13 | ) 14 | 15 | // Map is like a Go map[interface{}]interface{} but is safe for concurrent use 16 | // by multiple goroutines without additional locking or coordination. 17 | // Loads, stores, and deletes run in amortized constant time. 18 | // 19 | // The Map type is specialized. Most code should use a plain Go map instead, 20 | // with separate locking or coordination, for better type safety and to make it 21 | // easier to maintain other invariants along with the map content. 22 | // 23 | // The Map type is optimized for two common use cases: (1) when the entry for a given 24 | // key is only ever written once but read many times, as in caches that only grow, 25 | // or (2) when multiple goroutines read, write, and overwrite entries for disjoint 26 | // sets of keys. In these two cases, use of a Map may significantly reduce lock 27 | // contention compared to a Go map paired with a separate Mutex or RWMutex. 28 | // 29 | // The zero Map is empty and ready for use. A Map must not be copied after first use. 30 | type IntPtrs struct { 31 | mu sync.Mutex 32 | 33 | // read contains the portion of the map's contents that are safe for 34 | // concurrent access (with or without mu held). 35 | // 36 | // The read field itself is always safe to load, but must only be stored with 37 | // mu held. 38 | // 39 | // Entries stored in read may be updated concurrently without mu, but updating 40 | // a previously-expunged entry requires that the entry be copied to the dirty 41 | // map and unexpunged with mu held. 42 | read atomic.Value // readOnly 43 | 44 | // dirty contains the portion of the map's contents that require mu to be 45 | // held. To ensure that the dirty map can be promoted to the read map quickly, 46 | // it also includes all of the non-expunged entries in the read map. 47 | // 48 | // Expunged entries are not stored in the dirty map. An expunged entry in the 49 | // clean map must be unexpunged and added to the dirty map before a new value 50 | // can be stored to it. 51 | // 52 | // If the dirty map is nil, the next write to the map will initialize it by 53 | // making a shallow copy of the clean map, omitting stale entries. 54 | dirty map[*int]*entryIntPtrs 55 | 56 | // misses counts the number of loads since the read map was last updated that 57 | // needed to lock mu to determine whether the key was present. 58 | // 59 | // Once enough misses have occurred to cover the cost of copying the dirty 60 | // map, the dirty map will be promoted to the read map (in the unamended 61 | // state) and the next store to the map will make a new dirty copy. 62 | misses int 63 | } 64 | 65 | // readOnly is an immutable struct stored atomically in the Map.read field. 66 | type readOnlyIntPtrs struct { 67 | m map[*int]*entryIntPtrs 68 | amended bool // true if the dirty map contains some key not in m. 69 | } 70 | 71 | // expunged is an arbitrary pointer that marks entries which have been deleted 72 | // from the dirty map. 73 | var expungedIntPtrs = unsafe.Pointer(new(*int)) 74 | 75 | // An entry is a slot in the map corresponding to a particular key. 76 | type entryIntPtrs struct { 77 | // p points to the interface{} value stored for the entry. 78 | // 79 | // If p == nil, the entry has been deleted and m.dirty == nil. 80 | // 81 | // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 82 | // is missing from m.dirty. 83 | // 84 | // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 85 | // != nil, in m.dirty[key]. 86 | // 87 | // An entry can be deleted by atomic replacement with nil: when m.dirty is 88 | // next created, it will atomically replace nil with expunged and leave 89 | // m.dirty[key] unset. 90 | // 91 | // An entry's associated value can be updated by atomic replacement, provided 92 | // p != expunged. If p == expunged, an entry's associated value can be updated 93 | // only after first setting m.dirty[key] = e so that lookups using the dirty 94 | // map find the entry. 95 | p unsafe.Pointer // *interface{} 96 | } 97 | 98 | func newEntryIntPtrs(i *int) *entryIntPtrs { 99 | return &entryIntPtrs{p: unsafe.Pointer(&i)} 100 | } 101 | 102 | // Load returns the value stored in the map for a key, or nil if no 103 | // value is present. 104 | // The ok result indicates whether value was found in the map. 105 | func (m *IntPtrs) Load(key *int) (value *int, ok bool) { 106 | read, _ := m.read.Load().(readOnlyIntPtrs) 107 | e, ok := read.m[key] 108 | if !ok && read.amended { 109 | m.mu.Lock() 110 | // Avoid reporting a spurious miss if m.dirty got promoted while we were 111 | // blocked on m.mu. (If further loads of the same key will not miss, it's 112 | // not worth copying the dirty map for this key.) 113 | read, _ = m.read.Load().(readOnlyIntPtrs) 114 | e, ok = read.m[key] 115 | if !ok && read.amended { 116 | e, ok = m.dirty[key] 117 | // Regardless of whether the entry was present, record a miss: this key 118 | // will take the slow path until the dirty map is promoted to the read 119 | // map. 120 | m.missLocked() 121 | } 122 | m.mu.Unlock() 123 | } 124 | if !ok { 125 | return value, false 126 | } 127 | return e.load() 128 | } 129 | 130 | func (e *entryIntPtrs) load() (value *int, ok bool) { 131 | p := atomic.LoadPointer(&e.p) 132 | if p == nil || p == expungedIntPtrs { 133 | return value, false 134 | } 135 | return *(**int)(p), true 136 | } 137 | 138 | // Store sets the value for a key. 139 | func (m *IntPtrs) Store(key, value *int) { 140 | read, _ := m.read.Load().(readOnlyIntPtrs) 141 | if e, ok := read.m[key]; ok && e.tryStore(&value) { 142 | return 143 | } 144 | 145 | m.mu.Lock() 146 | read, _ = m.read.Load().(readOnlyIntPtrs) 147 | if e, ok := read.m[key]; ok { 148 | if e.unexpungeLocked() { 149 | // The entry was previously expunged, which implies that there is a 150 | // non-nil dirty map and this entry is not in it. 151 | m.dirty[key] = e 152 | } 153 | e.storeLocked(&value) 154 | } else if e, ok := m.dirty[key]; ok { 155 | e.storeLocked(&value) 156 | } else { 157 | if !read.amended { 158 | // We're adding the first new key to the dirty map. 159 | // Make sure it is allocated and mark the read-only map as incomplete. 160 | m.dirtyLocked() 161 | m.read.Store(readOnlyIntPtrs{m: read.m, amended: true}) 162 | } 163 | m.dirty[key] = newEntryIntPtrs(value) 164 | } 165 | m.mu.Unlock() 166 | } 167 | 168 | // tryStore stores a value if the entry has not been expunged. 169 | // 170 | // If the entry is expunged, tryStore returns false and leaves the entry 171 | // unchanged. 172 | func (e *entryIntPtrs) tryStore(i **int) bool { 173 | for { 174 | p := atomic.LoadPointer(&e.p) 175 | if p == expungedIntPtrs { 176 | return false 177 | } 178 | if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 179 | return true 180 | } 181 | } 182 | } 183 | 184 | // unexpungeLocked ensures that the entry is not marked as expunged. 185 | // 186 | // If the entry was previously expunged, it must be added to the dirty map 187 | // before m.mu is unlocked. 188 | func (e *entryIntPtrs) unexpungeLocked() (wasExpunged bool) { 189 | return atomic.CompareAndSwapPointer(&e.p, expungedIntPtrs, nil) 190 | } 191 | 192 | // storeLocked unconditionally stores a value to the entry. 193 | // 194 | // The entry must be known not to be expunged. 195 | func (e *entryIntPtrs) storeLocked(i **int) { 196 | atomic.StorePointer(&e.p, unsafe.Pointer(i)) 197 | } 198 | 199 | // LoadOrStore returns the existing value for the key if present. 200 | // Otherwise, it stores and returns the given value. 201 | // The loaded result is true if the value was loaded, false if stored. 202 | func (m *IntPtrs) LoadOrStore(key, value *int) (actual *int, loaded bool) { 203 | // Avoid locking if it's a clean hit. 204 | read, _ := m.read.Load().(readOnlyIntPtrs) 205 | if e, ok := read.m[key]; ok { 206 | actual, loaded, ok := e.tryLoadOrStore(value) 207 | if ok { 208 | return actual, loaded 209 | } 210 | } 211 | 212 | m.mu.Lock() 213 | read, _ = m.read.Load().(readOnlyIntPtrs) 214 | if e, ok := read.m[key]; ok { 215 | if e.unexpungeLocked() { 216 | m.dirty[key] = e 217 | } 218 | actual, loaded, _ = e.tryLoadOrStore(value) 219 | } else if e, ok := m.dirty[key]; ok { 220 | actual, loaded, _ = e.tryLoadOrStore(value) 221 | m.missLocked() 222 | } else { 223 | if !read.amended { 224 | // We're adding the first new key to the dirty map. 225 | // Make sure it is allocated and mark the read-only map as incomplete. 226 | m.dirtyLocked() 227 | m.read.Store(readOnlyIntPtrs{m: read.m, amended: true}) 228 | } 229 | m.dirty[key] = newEntryIntPtrs(value) 230 | actual, loaded = value, false 231 | } 232 | m.mu.Unlock() 233 | 234 | return actual, loaded 235 | } 236 | 237 | // tryLoadOrStore atomically loads or stores a value if the entry is not 238 | // expunged. 239 | // 240 | // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 241 | // returns with ok==false. 242 | func (e *entryIntPtrs) tryLoadOrStore(i *int) (actual *int, loaded, ok bool) { 243 | p := atomic.LoadPointer(&e.p) 244 | if p == expungedIntPtrs { 245 | return actual, false, false 246 | } 247 | if p != nil { 248 | return *(**int)(p), true, true 249 | } 250 | 251 | // Copy the interface after the first load to make this method more amenable 252 | // to escape analysis: if we hit the "load" path or the entry is expunged, we 253 | // shouldn't bother heap-allocating. 254 | ic := i 255 | for { 256 | if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 257 | return i, false, true 258 | } 259 | p = atomic.LoadPointer(&e.p) 260 | if p == expungedIntPtrs { 261 | return actual, false, false 262 | } 263 | if p != nil { 264 | return *(**int)(p), true, true 265 | } 266 | } 267 | } 268 | 269 | // LoadAndDelete deletes the value for a key, returning the previous value if any. 270 | // The loaded result reports whether the key was present. 271 | func (m *IntPtrs) LoadAndDelete(key *int) (value *int, loaded bool) { 272 | read, _ := m.read.Load().(readOnlyIntPtrs) 273 | e, ok := read.m[key] 274 | if !ok && read.amended { 275 | m.mu.Lock() 276 | read, _ = m.read.Load().(readOnlyIntPtrs) 277 | e, ok = read.m[key] 278 | if !ok && read.amended { 279 | e, ok = m.dirty[key] 280 | delete(m.dirty, key) 281 | // Regardless of whether the entry was present, record a miss: this key 282 | // will take the slow path until the dirty map is promoted to the read 283 | // map. 284 | m.missLocked() 285 | } 286 | m.mu.Unlock() 287 | } 288 | if ok { 289 | return e.delete() 290 | } 291 | return value, false 292 | } 293 | 294 | // Delete deletes the value for a key. 295 | func (m *IntPtrs) Delete(key *int) { 296 | m.LoadAndDelete(key) 297 | } 298 | 299 | func (e *entryIntPtrs) delete() (value *int, ok bool) { 300 | for { 301 | p := atomic.LoadPointer(&e.p) 302 | if p == nil || p == expungedIntPtrs { 303 | return value, false 304 | } 305 | if atomic.CompareAndSwapPointer(&e.p, p, nil) { 306 | return *(**int)(p), true 307 | } 308 | } 309 | } 310 | 311 | // Range calls f sequentially for each key and value present in the map. 312 | // If f returns false, range stops the iteration. 313 | // 314 | // Range does not necessarily correspond to any consistent snapshot of the Map's 315 | // contents: no key will be visited more than once, but if the value for any key 316 | // is stored or deleted concurrently, Range may reflect any mapping for that key 317 | // from any point during the Range call. 318 | // 319 | // Range may be O(N) with the number of elements in the map even if f returns 320 | // false after a constant number of calls. 321 | func (m *IntPtrs) Range(f func(key, value *int) bool) { 322 | // We need to be able to iterate over all of the keys that were already 323 | // present at the start of the call to Range. 324 | // If read.amended is false, then read.m satisfies that property without 325 | // requiring us to hold m.mu for a long time. 326 | read, _ := m.read.Load().(readOnlyIntPtrs) 327 | if read.amended { 328 | // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 329 | // (assuming the caller does not break out early), so a call to Range 330 | // amortizes an entire copy of the map: we can promote the dirty copy 331 | // immediately! 332 | m.mu.Lock() 333 | read, _ = m.read.Load().(readOnlyIntPtrs) 334 | if read.amended { 335 | read = readOnlyIntPtrs{m: m.dirty} 336 | m.read.Store(read) 337 | m.dirty = nil 338 | m.misses = 0 339 | } 340 | m.mu.Unlock() 341 | } 342 | 343 | for k, e := range read.m { 344 | v, ok := e.load() 345 | if !ok { 346 | continue 347 | } 348 | if !f(k, v) { 349 | break 350 | } 351 | } 352 | } 353 | 354 | func (m *IntPtrs) missLocked() { 355 | m.misses++ 356 | if m.misses < len(m.dirty) { 357 | return 358 | } 359 | m.read.Store(readOnlyIntPtrs{m: m.dirty}) 360 | m.dirty = nil 361 | m.misses = 0 362 | } 363 | 364 | func (m *IntPtrs) dirtyLocked() { 365 | if m.dirty != nil { 366 | return 367 | } 368 | 369 | read, _ := m.read.Load().(readOnlyIntPtrs) 370 | m.dirty = make(map[*int]*entryIntPtrs, len(read.m)) 371 | for k, e := range read.m { 372 | if !e.tryExpungeLocked() { 373 | m.dirty[k] = e 374 | } 375 | } 376 | } 377 | 378 | func (e *entryIntPtrs) tryExpungeLocked() (isExpunged bool) { 379 | p := atomic.LoadPointer(&e.p) 380 | for p == nil { 381 | if atomic.CompareAndSwapPointer(&e.p, nil, expungedIntPtrs) { 382 | return true 383 | } 384 | p = atomic.LoadPointer(&e.p) 385 | } 386 | return p == expungedIntPtrs 387 | } 388 | -------------------------------------------------------------------------------- /testdata/writermap.go: -------------------------------------------------------------------------------- 1 | // Code generated by syncmap; DO NOT EDIT. 2 | 3 | // Copyright 2016 The Go Authors. All rights reserved. 4 | // Use of this source code is governed by a BSD-style 5 | // license that can be found in the LICENSE file. 6 | 7 | package main 8 | 9 | import ( 10 | "io" 11 | "sync" 12 | "sync/atomic" 13 | "unsafe" 14 | ) 15 | 16 | // Map is like a Go map[interface{}]interface{} but is safe for concurrent use 17 | // by multiple goroutines without additional locking or coordination. 18 | // Loads, stores, and deletes run in amortized constant time. 19 | // 20 | // The Map type is specialized. Most code should use a plain Go map instead, 21 | // with separate locking or coordination, for better type safety and to make it 22 | // easier to maintain other invariants along with the map content. 23 | // 24 | // The Map type is optimized for two common use cases: (1) when the entry for a given 25 | // key is only ever written once but read many times, as in caches that only grow, 26 | // or (2) when multiple goroutines read, write, and overwrite entries for disjoint 27 | // sets of keys. In these two cases, use of a Map may significantly reduce lock 28 | // contention compared to a Go map paired with a separate Mutex or RWMutex. 29 | // 30 | // The zero Map is empty and ready for use. A Map must not be copied after first use. 31 | type WriterMap struct { 32 | mu sync.Mutex 33 | 34 | // read contains the portion of the map's contents that are safe for 35 | // concurrent access (with or without mu held). 36 | // 37 | // The read field itself is always safe to load, but must only be stored with 38 | // mu held. 39 | // 40 | // Entries stored in read may be updated concurrently without mu, but updating 41 | // a previously-expunged entry requires that the entry be copied to the dirty 42 | // map and unexpunged with mu held. 43 | read atomic.Value // readOnly 44 | 45 | // dirty contains the portion of the map's contents that require mu to be 46 | // held. To ensure that the dirty map can be promoted to the read map quickly, 47 | // it also includes all of the non-expunged entries in the read map. 48 | // 49 | // Expunged entries are not stored in the dirty map. An expunged entry in the 50 | // clean map must be unexpunged and added to the dirty map before a new value 51 | // can be stored to it. 52 | // 53 | // If the dirty map is nil, the next write to the map will initialize it by 54 | // making a shallow copy of the clean map, omitting stale entries. 55 | dirty map[string]*entryWriterMap 56 | 57 | // misses counts the number of loads since the read map was last updated that 58 | // needed to lock mu to determine whether the key was present. 59 | // 60 | // Once enough misses have occurred to cover the cost of copying the dirty 61 | // map, the dirty map will be promoted to the read map (in the unamended 62 | // state) and the next store to the map will make a new dirty copy. 63 | misses int 64 | } 65 | 66 | // readOnly is an immutable struct stored atomically in the Map.read field. 67 | type readOnlyWriterMap struct { 68 | m map[string]*entryWriterMap 69 | amended bool // true if the dirty map contains some key not in m. 70 | } 71 | 72 | // expunged is an arbitrary pointer that marks entries which have been deleted 73 | // from the dirty map. 74 | var expungedWriterMap = unsafe.Pointer(new(io.Writer)) 75 | 76 | // An entry is a slot in the map corresponding to a particular key. 77 | type entryWriterMap struct { 78 | // p points to the interface{} value stored for the entry. 79 | // 80 | // If p == nil, the entry has been deleted and m.dirty == nil. 81 | // 82 | // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 83 | // is missing from m.dirty. 84 | // 85 | // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 86 | // != nil, in m.dirty[key]. 87 | // 88 | // An entry can be deleted by atomic replacement with nil: when m.dirty is 89 | // next created, it will atomically replace nil with expunged and leave 90 | // m.dirty[key] unset. 91 | // 92 | // An entry's associated value can be updated by atomic replacement, provided 93 | // p != expunged. If p == expunged, an entry's associated value can be updated 94 | // only after first setting m.dirty[key] = e so that lookups using the dirty 95 | // map find the entry. 96 | p unsafe.Pointer // *interface{} 97 | } 98 | 99 | func newEntryWriterMap(i io.Writer) *entryWriterMap { 100 | return &entryWriterMap{p: unsafe.Pointer(&i)} 101 | } 102 | 103 | // Load returns the value stored in the map for a key, or nil if no 104 | // value is present. 105 | // The ok result indicates whether value was found in the map. 106 | func (m *WriterMap) Load(key string) (value io.Writer, ok bool) { 107 | read, _ := m.read.Load().(readOnlyWriterMap) 108 | e, ok := read.m[key] 109 | if !ok && read.amended { 110 | m.mu.Lock() 111 | // Avoid reporting a spurious miss if m.dirty got promoted while we were 112 | // blocked on m.mu. (If further loads of the same key will not miss, it's 113 | // not worth copying the dirty map for this key.) 114 | read, _ = m.read.Load().(readOnlyWriterMap) 115 | e, ok = read.m[key] 116 | if !ok && read.amended { 117 | e, ok = m.dirty[key] 118 | // Regardless of whether the entry was present, record a miss: this key 119 | // will take the slow path until the dirty map is promoted to the read 120 | // map. 121 | m.missLocked() 122 | } 123 | m.mu.Unlock() 124 | } 125 | if !ok { 126 | return value, false 127 | } 128 | return e.load() 129 | } 130 | 131 | func (e *entryWriterMap) load() (value io.Writer, ok bool) { 132 | p := atomic.LoadPointer(&e.p) 133 | if p == nil || p == expungedWriterMap { 134 | return value, false 135 | } 136 | return *(*io.Writer)(p), true 137 | } 138 | 139 | // Store sets the value for a key. 140 | func (m *WriterMap) Store(key string, value io.Writer) { 141 | read, _ := m.read.Load().(readOnlyWriterMap) 142 | if e, ok := read.m[key]; ok && e.tryStore(&value) { 143 | return 144 | } 145 | 146 | m.mu.Lock() 147 | read, _ = m.read.Load().(readOnlyWriterMap) 148 | if e, ok := read.m[key]; ok { 149 | if e.unexpungeLocked() { 150 | // The entry was previously expunged, which implies that there is a 151 | // non-nil dirty map and this entry is not in it. 152 | m.dirty[key] = e 153 | } 154 | e.storeLocked(&value) 155 | } else if e, ok := m.dirty[key]; ok { 156 | e.storeLocked(&value) 157 | } else { 158 | if !read.amended { 159 | // We're adding the first new key to the dirty map. 160 | // Make sure it is allocated and mark the read-only map as incomplete. 161 | m.dirtyLocked() 162 | m.read.Store(readOnlyWriterMap{m: read.m, amended: true}) 163 | } 164 | m.dirty[key] = newEntryWriterMap(value) 165 | } 166 | m.mu.Unlock() 167 | } 168 | 169 | // tryStore stores a value if the entry has not been expunged. 170 | // 171 | // If the entry is expunged, tryStore returns false and leaves the entry 172 | // unchanged. 173 | func (e *entryWriterMap) tryStore(i *io.Writer) bool { 174 | for { 175 | p := atomic.LoadPointer(&e.p) 176 | if p == expungedWriterMap { 177 | return false 178 | } 179 | if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 180 | return true 181 | } 182 | } 183 | } 184 | 185 | // unexpungeLocked ensures that the entry is not marked as expunged. 186 | // 187 | // If the entry was previously expunged, it must be added to the dirty map 188 | // before m.mu is unlocked. 189 | func (e *entryWriterMap) unexpungeLocked() (wasExpunged bool) { 190 | return atomic.CompareAndSwapPointer(&e.p, expungedWriterMap, nil) 191 | } 192 | 193 | // storeLocked unconditionally stores a value to the entry. 194 | // 195 | // The entry must be known not to be expunged. 196 | func (e *entryWriterMap) storeLocked(i *io.Writer) { 197 | atomic.StorePointer(&e.p, unsafe.Pointer(i)) 198 | } 199 | 200 | // LoadOrStore returns the existing value for the key if present. 201 | // Otherwise, it stores and returns the given value. 202 | // The loaded result is true if the value was loaded, false if stored. 203 | func (m *WriterMap) LoadOrStore(key string, value io.Writer) (actual io.Writer, loaded bool) { 204 | // Avoid locking if it's a clean hit. 205 | read, _ := m.read.Load().(readOnlyWriterMap) 206 | if e, ok := read.m[key]; ok { 207 | actual, loaded, ok := e.tryLoadOrStore(value) 208 | if ok { 209 | return actual, loaded 210 | } 211 | } 212 | 213 | m.mu.Lock() 214 | read, _ = m.read.Load().(readOnlyWriterMap) 215 | if e, ok := read.m[key]; ok { 216 | if e.unexpungeLocked() { 217 | m.dirty[key] = e 218 | } 219 | actual, loaded, _ = e.tryLoadOrStore(value) 220 | } else if e, ok := m.dirty[key]; ok { 221 | actual, loaded, _ = e.tryLoadOrStore(value) 222 | m.missLocked() 223 | } else { 224 | if !read.amended { 225 | // We're adding the first new key to the dirty map. 226 | // Make sure it is allocated and mark the read-only map as incomplete. 227 | m.dirtyLocked() 228 | m.read.Store(readOnlyWriterMap{m: read.m, amended: true}) 229 | } 230 | m.dirty[key] = newEntryWriterMap(value) 231 | actual, loaded = value, false 232 | } 233 | m.mu.Unlock() 234 | 235 | return actual, loaded 236 | } 237 | 238 | // tryLoadOrStore atomically loads or stores a value if the entry is not 239 | // expunged. 240 | // 241 | // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 242 | // returns with ok==false. 243 | func (e *entryWriterMap) tryLoadOrStore(i io.Writer) (actual io.Writer, loaded, ok bool) { 244 | p := atomic.LoadPointer(&e.p) 245 | if p == expungedWriterMap { 246 | return actual, false, false 247 | } 248 | if p != nil { 249 | return *(*io.Writer)(p), true, true 250 | } 251 | 252 | // Copy the interface after the first load to make this method more amenable 253 | // to escape analysis: if we hit the "load" path or the entry is expunged, we 254 | // shouldn't bother heap-allocating. 255 | ic := i 256 | for { 257 | if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 258 | return i, false, true 259 | } 260 | p = atomic.LoadPointer(&e.p) 261 | if p == expungedWriterMap { 262 | return actual, false, false 263 | } 264 | if p != nil { 265 | return *(*io.Writer)(p), true, true 266 | } 267 | } 268 | } 269 | 270 | // LoadAndDelete deletes the value for a key, returning the previous value if any. 271 | // The loaded result reports whether the key was present. 272 | func (m *WriterMap) LoadAndDelete(key string) (value io.Writer, loaded bool) { 273 | read, _ := m.read.Load().(readOnlyWriterMap) 274 | e, ok := read.m[key] 275 | if !ok && read.amended { 276 | m.mu.Lock() 277 | read, _ = m.read.Load().(readOnlyWriterMap) 278 | e, ok = read.m[key] 279 | if !ok && read.amended { 280 | e, ok = m.dirty[key] 281 | delete(m.dirty, key) 282 | // Regardless of whether the entry was present, record a miss: this key 283 | // will take the slow path until the dirty map is promoted to the read 284 | // map. 285 | m.missLocked() 286 | } 287 | m.mu.Unlock() 288 | } 289 | if ok { 290 | return e.delete() 291 | } 292 | return value, false 293 | } 294 | 295 | // Delete deletes the value for a key. 296 | func (m *WriterMap) Delete(key string) { 297 | m.LoadAndDelete(key) 298 | } 299 | 300 | func (e *entryWriterMap) delete() (value io.Writer, ok bool) { 301 | for { 302 | p := atomic.LoadPointer(&e.p) 303 | if p == nil || p == expungedWriterMap { 304 | return value, false 305 | } 306 | if atomic.CompareAndSwapPointer(&e.p, p, nil) { 307 | return *(*io.Writer)(p), true 308 | } 309 | } 310 | } 311 | 312 | // Range calls f sequentially for each key and value present in the map. 313 | // If f returns false, range stops the iteration. 314 | // 315 | // Range does not necessarily correspond to any consistent snapshot of the Map's 316 | // contents: no key will be visited more than once, but if the value for any key 317 | // is stored or deleted concurrently, Range may reflect any mapping for that key 318 | // from any point during the Range call. 319 | // 320 | // Range may be O(N) with the number of elements in the map even if f returns 321 | // false after a constant number of calls. 322 | func (m *WriterMap) Range(f func(key string, value io.Writer) bool) { 323 | // We need to be able to iterate over all of the keys that were already 324 | // present at the start of the call to Range. 325 | // If read.amended is false, then read.m satisfies that property without 326 | // requiring us to hold m.mu for a long time. 327 | read, _ := m.read.Load().(readOnlyWriterMap) 328 | if read.amended { 329 | // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 330 | // (assuming the caller does not break out early), so a call to Range 331 | // amortizes an entire copy of the map: we can promote the dirty copy 332 | // immediately! 333 | m.mu.Lock() 334 | read, _ = m.read.Load().(readOnlyWriterMap) 335 | if read.amended { 336 | read = readOnlyWriterMap{m: m.dirty} 337 | m.read.Store(read) 338 | m.dirty = nil 339 | m.misses = 0 340 | } 341 | m.mu.Unlock() 342 | } 343 | 344 | for k, e := range read.m { 345 | v, ok := e.load() 346 | if !ok { 347 | continue 348 | } 349 | if !f(k, v) { 350 | break 351 | } 352 | } 353 | } 354 | 355 | func (m *WriterMap) missLocked() { 356 | m.misses++ 357 | if m.misses < len(m.dirty) { 358 | return 359 | } 360 | m.read.Store(readOnlyWriterMap{m: m.dirty}) 361 | m.dirty = nil 362 | m.misses = 0 363 | } 364 | 365 | func (m *WriterMap) dirtyLocked() { 366 | if m.dirty != nil { 367 | return 368 | } 369 | 370 | read, _ := m.read.Load().(readOnlyWriterMap) 371 | m.dirty = make(map[string]*entryWriterMap, len(read.m)) 372 | for k, e := range read.m { 373 | if !e.tryExpungeLocked() { 374 | m.dirty[k] = e 375 | } 376 | } 377 | } 378 | 379 | func (e *entryWriterMap) tryExpungeLocked() (isExpunged bool) { 380 | p := atomic.LoadPointer(&e.p) 381 | for p == nil { 382 | if atomic.CompareAndSwapPointer(&e.p, nil, expungedWriterMap) { 383 | return true 384 | } 385 | p = atomic.LoadPointer(&e.p) 386 | } 387 | return p == expungedWriterMap 388 | } 389 | -------------------------------------------------------------------------------- /testdata/stringmap.go: -------------------------------------------------------------------------------- 1 | // Code generated by syncmap; DO NOT EDIT. 2 | 3 | // Copyright 2016 The Go Authors. All rights reserved. 4 | // Use of this source code is governed by a BSD-style 5 | // license that can be found in the LICENSE file. 6 | 7 | package main 8 | 9 | import ( 10 | "sync" 11 | "sync/atomic" 12 | "unsafe" 13 | ) 14 | 15 | // Map is like a Go map[interface{}]interface{} but is safe for concurrent use 16 | // by multiple goroutines without additional locking or coordination. 17 | // Loads, stores, and deletes run in amortized constant time. 18 | // 19 | // The Map type is specialized. Most code should use a plain Go map instead, 20 | // with separate locking or coordination, for better type safety and to make it 21 | // easier to maintain other invariants along with the map content. 22 | // 23 | // The Map type is optimized for two common use cases: (1) when the entry for a given 24 | // key is only ever written once but read many times, as in caches that only grow, 25 | // or (2) when multiple goroutines read, write, and overwrite entries for disjoint 26 | // sets of keys. In these two cases, use of a Map may significantly reduce lock 27 | // contention compared to a Go map paired with a separate Mutex or RWMutex. 28 | // 29 | // The zero Map is empty and ready for use. A Map must not be copied after first use. 30 | type StringMap struct { 31 | mu sync.Mutex 32 | 33 | // read contains the portion of the map's contents that are safe for 34 | // concurrent access (with or without mu held). 35 | // 36 | // The read field itself is always safe to load, but must only be stored with 37 | // mu held. 38 | // 39 | // Entries stored in read may be updated concurrently without mu, but updating 40 | // a previously-expunged entry requires that the entry be copied to the dirty 41 | // map and unexpunged with mu held. 42 | read atomic.Value // readOnly 43 | 44 | // dirty contains the portion of the map's contents that require mu to be 45 | // held. To ensure that the dirty map can be promoted to the read map quickly, 46 | // it also includes all of the non-expunged entries in the read map. 47 | // 48 | // Expunged entries are not stored in the dirty map. An expunged entry in the 49 | // clean map must be unexpunged and added to the dirty map before a new value 50 | // can be stored to it. 51 | // 52 | // If the dirty map is nil, the next write to the map will initialize it by 53 | // making a shallow copy of the clean map, omitting stale entries. 54 | dirty map[string]*entryStringMap 55 | 56 | // misses counts the number of loads since the read map was last updated that 57 | // needed to lock mu to determine whether the key was present. 58 | // 59 | // Once enough misses have occurred to cover the cost of copying the dirty 60 | // map, the dirty map will be promoted to the read map (in the unamended 61 | // state) and the next store to the map will make a new dirty copy. 62 | misses int 63 | } 64 | 65 | // readOnly is an immutable struct stored atomically in the Map.read field. 66 | type readOnlyStringMap struct { 67 | m map[string]*entryStringMap 68 | amended bool // true if the dirty map contains some key not in m. 69 | } 70 | 71 | // expunged is an arbitrary pointer that marks entries which have been deleted 72 | // from the dirty map. 73 | var expungedStringMap = unsafe.Pointer(new(interface{})) 74 | 75 | // An entry is a slot in the map corresponding to a particular key. 76 | type entryStringMap struct { 77 | // p points to the interface{} value stored for the entry. 78 | // 79 | // If p == nil, the entry has been deleted and m.dirty == nil. 80 | // 81 | // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 82 | // is missing from m.dirty. 83 | // 84 | // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 85 | // != nil, in m.dirty[key]. 86 | // 87 | // An entry can be deleted by atomic replacement with nil: when m.dirty is 88 | // next created, it will atomically replace nil with expunged and leave 89 | // m.dirty[key] unset. 90 | // 91 | // An entry's associated value can be updated by atomic replacement, provided 92 | // p != expunged. If p == expunged, an entry's associated value can be updated 93 | // only after first setting m.dirty[key] = e so that lookups using the dirty 94 | // map find the entry. 95 | p unsafe.Pointer // *interface{} 96 | } 97 | 98 | func newEntryStringMap(i interface{}) *entryStringMap { 99 | return &entryStringMap{p: unsafe.Pointer(&i)} 100 | } 101 | 102 | // Load returns the value stored in the map for a key, or nil if no 103 | // value is present. 104 | // The ok result indicates whether value was found in the map. 105 | func (m *StringMap) Load(key string) (value interface{}, ok bool) { 106 | read, _ := m.read.Load().(readOnlyStringMap) 107 | e, ok := read.m[key] 108 | if !ok && read.amended { 109 | m.mu.Lock() 110 | // Avoid reporting a spurious miss if m.dirty got promoted while we were 111 | // blocked on m.mu. (If further loads of the same key will not miss, it's 112 | // not worth copying the dirty map for this key.) 113 | read, _ = m.read.Load().(readOnlyStringMap) 114 | e, ok = read.m[key] 115 | if !ok && read.amended { 116 | e, ok = m.dirty[key] 117 | // Regardless of whether the entry was present, record a miss: this key 118 | // will take the slow path until the dirty map is promoted to the read 119 | // map. 120 | m.missLocked() 121 | } 122 | m.mu.Unlock() 123 | } 124 | if !ok { 125 | return value, false 126 | } 127 | return e.load() 128 | } 129 | 130 | func (e *entryStringMap) load() (value interface{}, ok bool) { 131 | p := atomic.LoadPointer(&e.p) 132 | if p == nil || p == expungedStringMap { 133 | return value, false 134 | } 135 | return *(*interface{})(p), true 136 | } 137 | 138 | // Store sets the value for a key. 139 | func (m *StringMap) Store(key string, value interface{}) { 140 | read, _ := m.read.Load().(readOnlyStringMap) 141 | if e, ok := read.m[key]; ok && e.tryStore(&value) { 142 | return 143 | } 144 | 145 | m.mu.Lock() 146 | read, _ = m.read.Load().(readOnlyStringMap) 147 | if e, ok := read.m[key]; ok { 148 | if e.unexpungeLocked() { 149 | // The entry was previously expunged, which implies that there is a 150 | // non-nil dirty map and this entry is not in it. 151 | m.dirty[key] = e 152 | } 153 | e.storeLocked(&value) 154 | } else if e, ok := m.dirty[key]; ok { 155 | e.storeLocked(&value) 156 | } else { 157 | if !read.amended { 158 | // We're adding the first new key to the dirty map. 159 | // Make sure it is allocated and mark the read-only map as incomplete. 160 | m.dirtyLocked() 161 | m.read.Store(readOnlyStringMap{m: read.m, amended: true}) 162 | } 163 | m.dirty[key] = newEntryStringMap(value) 164 | } 165 | m.mu.Unlock() 166 | } 167 | 168 | // tryStore stores a value if the entry has not been expunged. 169 | // 170 | // If the entry is expunged, tryStore returns false and leaves the entry 171 | // unchanged. 172 | func (e *entryStringMap) tryStore(i *interface{}) bool { 173 | for { 174 | p := atomic.LoadPointer(&e.p) 175 | if p == expungedStringMap { 176 | return false 177 | } 178 | if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 179 | return true 180 | } 181 | } 182 | } 183 | 184 | // unexpungeLocked ensures that the entry is not marked as expunged. 185 | // 186 | // If the entry was previously expunged, it must be added to the dirty map 187 | // before m.mu is unlocked. 188 | func (e *entryStringMap) unexpungeLocked() (wasExpunged bool) { 189 | return atomic.CompareAndSwapPointer(&e.p, expungedStringMap, nil) 190 | } 191 | 192 | // storeLocked unconditionally stores a value to the entry. 193 | // 194 | // The entry must be known not to be expunged. 195 | func (e *entryStringMap) storeLocked(i *interface{}) { 196 | atomic.StorePointer(&e.p, unsafe.Pointer(i)) 197 | } 198 | 199 | // LoadOrStore returns the existing value for the key if present. 200 | // Otherwise, it stores and returns the given value. 201 | // The loaded result is true if the value was loaded, false if stored. 202 | func (m *StringMap) LoadOrStore(key string, value interface{}) (actual interface{}, loaded bool) { 203 | // Avoid locking if it's a clean hit. 204 | read, _ := m.read.Load().(readOnlyStringMap) 205 | if e, ok := read.m[key]; ok { 206 | actual, loaded, ok := e.tryLoadOrStore(value) 207 | if ok { 208 | return actual, loaded 209 | } 210 | } 211 | 212 | m.mu.Lock() 213 | read, _ = m.read.Load().(readOnlyStringMap) 214 | if e, ok := read.m[key]; ok { 215 | if e.unexpungeLocked() { 216 | m.dirty[key] = e 217 | } 218 | actual, loaded, _ = e.tryLoadOrStore(value) 219 | } else if e, ok := m.dirty[key]; ok { 220 | actual, loaded, _ = e.tryLoadOrStore(value) 221 | m.missLocked() 222 | } else { 223 | if !read.amended { 224 | // We're adding the first new key to the dirty map. 225 | // Make sure it is allocated and mark the read-only map as incomplete. 226 | m.dirtyLocked() 227 | m.read.Store(readOnlyStringMap{m: read.m, amended: true}) 228 | } 229 | m.dirty[key] = newEntryStringMap(value) 230 | actual, loaded = value, false 231 | } 232 | m.mu.Unlock() 233 | 234 | return actual, loaded 235 | } 236 | 237 | // tryLoadOrStore atomically loads or stores a value if the entry is not 238 | // expunged. 239 | // 240 | // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 241 | // returns with ok==false. 242 | func (e *entryStringMap) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) { 243 | p := atomic.LoadPointer(&e.p) 244 | if p == expungedStringMap { 245 | return actual, false, false 246 | } 247 | if p != nil { 248 | return *(*interface{})(p), true, true 249 | } 250 | 251 | // Copy the interface after the first load to make this method more amenable 252 | // to escape analysis: if we hit the "load" path or the entry is expunged, we 253 | // shouldn't bother heap-allocating. 254 | ic := i 255 | for { 256 | if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 257 | return i, false, true 258 | } 259 | p = atomic.LoadPointer(&e.p) 260 | if p == expungedStringMap { 261 | return actual, false, false 262 | } 263 | if p != nil { 264 | return *(*interface{})(p), true, true 265 | } 266 | } 267 | } 268 | 269 | // LoadAndDelete deletes the value for a key, returning the previous value if any. 270 | // The loaded result reports whether the key was present. 271 | func (m *StringMap) LoadAndDelete(key string) (value interface{}, loaded bool) { 272 | read, _ := m.read.Load().(readOnlyStringMap) 273 | e, ok := read.m[key] 274 | if !ok && read.amended { 275 | m.mu.Lock() 276 | read, _ = m.read.Load().(readOnlyStringMap) 277 | e, ok = read.m[key] 278 | if !ok && read.amended { 279 | e, ok = m.dirty[key] 280 | delete(m.dirty, key) 281 | // Regardless of whether the entry was present, record a miss: this key 282 | // will take the slow path until the dirty map is promoted to the read 283 | // map. 284 | m.missLocked() 285 | } 286 | m.mu.Unlock() 287 | } 288 | if ok { 289 | return e.delete() 290 | } 291 | return value, false 292 | } 293 | 294 | // Delete deletes the value for a key. 295 | func (m *StringMap) Delete(key string) { 296 | m.LoadAndDelete(key) 297 | } 298 | 299 | func (e *entryStringMap) delete() (value interface{}, ok bool) { 300 | for { 301 | p := atomic.LoadPointer(&e.p) 302 | if p == nil || p == expungedStringMap { 303 | return value, false 304 | } 305 | if atomic.CompareAndSwapPointer(&e.p, p, nil) { 306 | return *(*interface{})(p), true 307 | } 308 | } 309 | } 310 | 311 | // Range calls f sequentially for each key and value present in the map. 312 | // If f returns false, range stops the iteration. 313 | // 314 | // Range does not necessarily correspond to any consistent snapshot of the Map's 315 | // contents: no key will be visited more than once, but if the value for any key 316 | // is stored or deleted concurrently, Range may reflect any mapping for that key 317 | // from any point during the Range call. 318 | // 319 | // Range may be O(N) with the number of elements in the map even if f returns 320 | // false after a constant number of calls. 321 | func (m *StringMap) Range(f func(key string, value interface{}) bool) { 322 | // We need to be able to iterate over all of the keys that were already 323 | // present at the start of the call to Range. 324 | // If read.amended is false, then read.m satisfies that property without 325 | // requiring us to hold m.mu for a long time. 326 | read, _ := m.read.Load().(readOnlyStringMap) 327 | if read.amended { 328 | // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 329 | // (assuming the caller does not break out early), so a call to Range 330 | // amortizes an entire copy of the map: we can promote the dirty copy 331 | // immediately! 332 | m.mu.Lock() 333 | read, _ = m.read.Load().(readOnlyStringMap) 334 | if read.amended { 335 | read = readOnlyStringMap{m: m.dirty} 336 | m.read.Store(read) 337 | m.dirty = nil 338 | m.misses = 0 339 | } 340 | m.mu.Unlock() 341 | } 342 | 343 | for k, e := range read.m { 344 | v, ok := e.load() 345 | if !ok { 346 | continue 347 | } 348 | if !f(k, v) { 349 | break 350 | } 351 | } 352 | } 353 | 354 | func (m *StringMap) missLocked() { 355 | m.misses++ 356 | if m.misses < len(m.dirty) { 357 | return 358 | } 359 | m.read.Store(readOnlyStringMap{m: m.dirty}) 360 | m.dirty = nil 361 | m.misses = 0 362 | } 363 | 364 | func (m *StringMap) dirtyLocked() { 365 | if m.dirty != nil { 366 | return 367 | } 368 | 369 | read, _ := m.read.Load().(readOnlyStringMap) 370 | m.dirty = make(map[string]*entryStringMap, len(read.m)) 371 | for k, e := range read.m { 372 | if !e.tryExpungeLocked() { 373 | m.dirty[k] = e 374 | } 375 | } 376 | } 377 | 378 | func (e *entryStringMap) tryExpungeLocked() (isExpunged bool) { 379 | p := atomic.LoadPointer(&e.p) 380 | for p == nil { 381 | if atomic.CompareAndSwapPointer(&e.p, nil, expungedStringMap) { 382 | return true 383 | } 384 | p = atomic.LoadPointer(&e.p) 385 | } 386 | return p == expungedStringMap 387 | } 388 | -------------------------------------------------------------------------------- /testdata/requests.go: -------------------------------------------------------------------------------- 1 | // Code generated by syncmap; DO NOT EDIT. 2 | 3 | // Copyright 2016 The Go Authors. All rights reserved. 4 | // Use of this source code is governed by a BSD-style 5 | // license that can be found in the LICENSE file. 6 | 7 | package main 8 | 9 | import ( 10 | "net/http" 11 | "sync" 12 | "sync/atomic" 13 | "unsafe" 14 | ) 15 | 16 | // Map is like a Go map[interface{}]interface{} but is safe for concurrent use 17 | // by multiple goroutines without additional locking or coordination. 18 | // Loads, stores, and deletes run in amortized constant time. 19 | // 20 | // The Map type is specialized. Most code should use a plain Go map instead, 21 | // with separate locking or coordination, for better type safety and to make it 22 | // easier to maintain other invariants along with the map content. 23 | // 24 | // The Map type is optimized for two common use cases: (1) when the entry for a given 25 | // key is only ever written once but read many times, as in caches that only grow, 26 | // or (2) when multiple goroutines read, write, and overwrite entries for disjoint 27 | // sets of keys. In these two cases, use of a Map may significantly reduce lock 28 | // contention compared to a Go map paired with a separate Mutex or RWMutex. 29 | // 30 | // The zero Map is empty and ready for use. A Map must not be copied after first use. 31 | type Requests struct { 32 | mu sync.Mutex 33 | 34 | // read contains the portion of the map's contents that are safe for 35 | // concurrent access (with or without mu held). 36 | // 37 | // The read field itself is always safe to load, but must only be stored with 38 | // mu held. 39 | // 40 | // Entries stored in read may be updated concurrently without mu, but updating 41 | // a previously-expunged entry requires that the entry be copied to the dirty 42 | // map and unexpunged with mu held. 43 | read atomic.Value // readOnly 44 | 45 | // dirty contains the portion of the map's contents that require mu to be 46 | // held. To ensure that the dirty map can be promoted to the read map quickly, 47 | // it also includes all of the non-expunged entries in the read map. 48 | // 49 | // Expunged entries are not stored in the dirty map. An expunged entry in the 50 | // clean map must be unexpunged and added to the dirty map before a new value 51 | // can be stored to it. 52 | // 53 | // If the dirty map is nil, the next write to the map will initialize it by 54 | // making a shallow copy of the clean map, omitting stale entries. 55 | dirty map[string]*entryRequests 56 | 57 | // misses counts the number of loads since the read map was last updated that 58 | // needed to lock mu to determine whether the key was present. 59 | // 60 | // Once enough misses have occurred to cover the cost of copying the dirty 61 | // map, the dirty map will be promoted to the read map (in the unamended 62 | // state) and the next store to the map will make a new dirty copy. 63 | misses int 64 | } 65 | 66 | // readOnly is an immutable struct stored atomically in the Map.read field. 67 | type readOnlyRequests struct { 68 | m map[string]*entryRequests 69 | amended bool // true if the dirty map contains some key not in m. 70 | } 71 | 72 | // expunged is an arbitrary pointer that marks entries which have been deleted 73 | // from the dirty map. 74 | var expungedRequests = unsafe.Pointer(new(*http.Request)) 75 | 76 | // An entry is a slot in the map corresponding to a particular key. 77 | type entryRequests struct { 78 | // p points to the interface{} value stored for the entry. 79 | // 80 | // If p == nil, the entry has been deleted and m.dirty == nil. 81 | // 82 | // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 83 | // is missing from m.dirty. 84 | // 85 | // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 86 | // != nil, in m.dirty[key]. 87 | // 88 | // An entry can be deleted by atomic replacement with nil: when m.dirty is 89 | // next created, it will atomically replace nil with expunged and leave 90 | // m.dirty[key] unset. 91 | // 92 | // An entry's associated value can be updated by atomic replacement, provided 93 | // p != expunged. If p == expunged, an entry's associated value can be updated 94 | // only after first setting m.dirty[key] = e so that lookups using the dirty 95 | // map find the entry. 96 | p unsafe.Pointer // *interface{} 97 | } 98 | 99 | func newEntryRequests(i *http.Request) *entryRequests { 100 | return &entryRequests{p: unsafe.Pointer(&i)} 101 | } 102 | 103 | // Load returns the value stored in the map for a key, or nil if no 104 | // value is present. 105 | // The ok result indicates whether value was found in the map. 106 | func (m *Requests) Load(key string) (value *http.Request, ok bool) { 107 | read, _ := m.read.Load().(readOnlyRequests) 108 | e, ok := read.m[key] 109 | if !ok && read.amended { 110 | m.mu.Lock() 111 | // Avoid reporting a spurious miss if m.dirty got promoted while we were 112 | // blocked on m.mu. (If further loads of the same key will not miss, it's 113 | // not worth copying the dirty map for this key.) 114 | read, _ = m.read.Load().(readOnlyRequests) 115 | e, ok = read.m[key] 116 | if !ok && read.amended { 117 | e, ok = m.dirty[key] 118 | // Regardless of whether the entry was present, record a miss: this key 119 | // will take the slow path until the dirty map is promoted to the read 120 | // map. 121 | m.missLocked() 122 | } 123 | m.mu.Unlock() 124 | } 125 | if !ok { 126 | return value, false 127 | } 128 | return e.load() 129 | } 130 | 131 | func (e *entryRequests) load() (value *http.Request, ok bool) { 132 | p := atomic.LoadPointer(&e.p) 133 | if p == nil || p == expungedRequests { 134 | return value, false 135 | } 136 | return *(**http.Request)(p), true 137 | } 138 | 139 | // Store sets the value for a key. 140 | func (m *Requests) Store(key string, value *http.Request) { 141 | read, _ := m.read.Load().(readOnlyRequests) 142 | if e, ok := read.m[key]; ok && e.tryStore(&value) { 143 | return 144 | } 145 | 146 | m.mu.Lock() 147 | read, _ = m.read.Load().(readOnlyRequests) 148 | if e, ok := read.m[key]; ok { 149 | if e.unexpungeLocked() { 150 | // The entry was previously expunged, which implies that there is a 151 | // non-nil dirty map and this entry is not in it. 152 | m.dirty[key] = e 153 | } 154 | e.storeLocked(&value) 155 | } else if e, ok := m.dirty[key]; ok { 156 | e.storeLocked(&value) 157 | } else { 158 | if !read.amended { 159 | // We're adding the first new key to the dirty map. 160 | // Make sure it is allocated and mark the read-only map as incomplete. 161 | m.dirtyLocked() 162 | m.read.Store(readOnlyRequests{m: read.m, amended: true}) 163 | } 164 | m.dirty[key] = newEntryRequests(value) 165 | } 166 | m.mu.Unlock() 167 | } 168 | 169 | // tryStore stores a value if the entry has not been expunged. 170 | // 171 | // If the entry is expunged, tryStore returns false and leaves the entry 172 | // unchanged. 173 | func (e *entryRequests) tryStore(i **http.Request) bool { 174 | for { 175 | p := atomic.LoadPointer(&e.p) 176 | if p == expungedRequests { 177 | return false 178 | } 179 | if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 180 | return true 181 | } 182 | } 183 | } 184 | 185 | // unexpungeLocked ensures that the entry is not marked as expunged. 186 | // 187 | // If the entry was previously expunged, it must be added to the dirty map 188 | // before m.mu is unlocked. 189 | func (e *entryRequests) unexpungeLocked() (wasExpunged bool) { 190 | return atomic.CompareAndSwapPointer(&e.p, expungedRequests, nil) 191 | } 192 | 193 | // storeLocked unconditionally stores a value to the entry. 194 | // 195 | // The entry must be known not to be expunged. 196 | func (e *entryRequests) storeLocked(i **http.Request) { 197 | atomic.StorePointer(&e.p, unsafe.Pointer(i)) 198 | } 199 | 200 | // LoadOrStore returns the existing value for the key if present. 201 | // Otherwise, it stores and returns the given value. 202 | // The loaded result is true if the value was loaded, false if stored. 203 | func (m *Requests) LoadOrStore(key string, value *http.Request) (actual *http.Request, loaded bool) { 204 | // Avoid locking if it's a clean hit. 205 | read, _ := m.read.Load().(readOnlyRequests) 206 | if e, ok := read.m[key]; ok { 207 | actual, loaded, ok := e.tryLoadOrStore(value) 208 | if ok { 209 | return actual, loaded 210 | } 211 | } 212 | 213 | m.mu.Lock() 214 | read, _ = m.read.Load().(readOnlyRequests) 215 | if e, ok := read.m[key]; ok { 216 | if e.unexpungeLocked() { 217 | m.dirty[key] = e 218 | } 219 | actual, loaded, _ = e.tryLoadOrStore(value) 220 | } else if e, ok := m.dirty[key]; ok { 221 | actual, loaded, _ = e.tryLoadOrStore(value) 222 | m.missLocked() 223 | } else { 224 | if !read.amended { 225 | // We're adding the first new key to the dirty map. 226 | // Make sure it is allocated and mark the read-only map as incomplete. 227 | m.dirtyLocked() 228 | m.read.Store(readOnlyRequests{m: read.m, amended: true}) 229 | } 230 | m.dirty[key] = newEntryRequests(value) 231 | actual, loaded = value, false 232 | } 233 | m.mu.Unlock() 234 | 235 | return actual, loaded 236 | } 237 | 238 | // tryLoadOrStore atomically loads or stores a value if the entry is not 239 | // expunged. 240 | // 241 | // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 242 | // returns with ok==false. 243 | func (e *entryRequests) tryLoadOrStore(i *http.Request) (actual *http.Request, loaded, ok bool) { 244 | p := atomic.LoadPointer(&e.p) 245 | if p == expungedRequests { 246 | return actual, false, false 247 | } 248 | if p != nil { 249 | return *(**http.Request)(p), true, true 250 | } 251 | 252 | // Copy the interface after the first load to make this method more amenable 253 | // to escape analysis: if we hit the "load" path or the entry is expunged, we 254 | // shouldn't bother heap-allocating. 255 | ic := i 256 | for { 257 | if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 258 | return i, false, true 259 | } 260 | p = atomic.LoadPointer(&e.p) 261 | if p == expungedRequests { 262 | return actual, false, false 263 | } 264 | if p != nil { 265 | return *(**http.Request)(p), true, true 266 | } 267 | } 268 | } 269 | 270 | // LoadAndDelete deletes the value for a key, returning the previous value if any. 271 | // The loaded result reports whether the key was present. 272 | func (m *Requests) LoadAndDelete(key string) (value *http.Request, loaded bool) { 273 | read, _ := m.read.Load().(readOnlyRequests) 274 | e, ok := read.m[key] 275 | if !ok && read.amended { 276 | m.mu.Lock() 277 | read, _ = m.read.Load().(readOnlyRequests) 278 | e, ok = read.m[key] 279 | if !ok && read.amended { 280 | e, ok = m.dirty[key] 281 | delete(m.dirty, key) 282 | // Regardless of whether the entry was present, record a miss: this key 283 | // will take the slow path until the dirty map is promoted to the read 284 | // map. 285 | m.missLocked() 286 | } 287 | m.mu.Unlock() 288 | } 289 | if ok { 290 | return e.delete() 291 | } 292 | return value, false 293 | } 294 | 295 | // Delete deletes the value for a key. 296 | func (m *Requests) Delete(key string) { 297 | m.LoadAndDelete(key) 298 | } 299 | 300 | func (e *entryRequests) delete() (value *http.Request, ok bool) { 301 | for { 302 | p := atomic.LoadPointer(&e.p) 303 | if p == nil || p == expungedRequests { 304 | return value, false 305 | } 306 | if atomic.CompareAndSwapPointer(&e.p, p, nil) { 307 | return *(**http.Request)(p), true 308 | } 309 | } 310 | } 311 | 312 | // Range calls f sequentially for each key and value present in the map. 313 | // If f returns false, range stops the iteration. 314 | // 315 | // Range does not necessarily correspond to any consistent snapshot of the Map's 316 | // contents: no key will be visited more than once, but if the value for any key 317 | // is stored or deleted concurrently, Range may reflect any mapping for that key 318 | // from any point during the Range call. 319 | // 320 | // Range may be O(N) with the number of elements in the map even if f returns 321 | // false after a constant number of calls. 322 | func (m *Requests) Range(f func(key string, value *http.Request) bool) { 323 | // We need to be able to iterate over all of the keys that were already 324 | // present at the start of the call to Range. 325 | // If read.amended is false, then read.m satisfies that property without 326 | // requiring us to hold m.mu for a long time. 327 | read, _ := m.read.Load().(readOnlyRequests) 328 | if read.amended { 329 | // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 330 | // (assuming the caller does not break out early), so a call to Range 331 | // amortizes an entire copy of the map: we can promote the dirty copy 332 | // immediately! 333 | m.mu.Lock() 334 | read, _ = m.read.Load().(readOnlyRequests) 335 | if read.amended { 336 | read = readOnlyRequests{m: m.dirty} 337 | m.read.Store(read) 338 | m.dirty = nil 339 | m.misses = 0 340 | } 341 | m.mu.Unlock() 342 | } 343 | 344 | for k, e := range read.m { 345 | v, ok := e.load() 346 | if !ok { 347 | continue 348 | } 349 | if !f(k, v) { 350 | break 351 | } 352 | } 353 | } 354 | 355 | func (m *Requests) missLocked() { 356 | m.misses++ 357 | if m.misses < len(m.dirty) { 358 | return 359 | } 360 | m.read.Store(readOnlyRequests{m: m.dirty}) 361 | m.dirty = nil 362 | m.misses = 0 363 | } 364 | 365 | func (m *Requests) dirtyLocked() { 366 | if m.dirty != nil { 367 | return 368 | } 369 | 370 | read, _ := m.read.Load().(readOnlyRequests) 371 | m.dirty = make(map[string]*entryRequests, len(read.m)) 372 | for k, e := range read.m { 373 | if !e.tryExpungeLocked() { 374 | m.dirty[k] = e 375 | } 376 | } 377 | } 378 | 379 | func (e *entryRequests) tryExpungeLocked() (isExpunged bool) { 380 | p := atomic.LoadPointer(&e.p) 381 | for p == nil { 382 | if atomic.CompareAndSwapPointer(&e.p, nil, expungedRequests) { 383 | return true 384 | } 385 | p = atomic.LoadPointer(&e.p) 386 | } 387 | return p == expungedRequests 388 | } 389 | -------------------------------------------------------------------------------- /testdata/stringintchan.go: -------------------------------------------------------------------------------- 1 | // Code generated by syncmap; DO NOT EDIT. 2 | 3 | // Copyright 2016 The Go Authors. All rights reserved. 4 | // Use of this source code is governed by a BSD-style 5 | // license that can be found in the LICENSE file. 6 | 7 | package main 8 | 9 | import ( 10 | "sync" 11 | "sync/atomic" 12 | "unsafe" 13 | ) 14 | 15 | // Map is like a Go map[interface{}]interface{} but is safe for concurrent use 16 | // by multiple goroutines without additional locking or coordination. 17 | // Loads, stores, and deletes run in amortized constant time. 18 | // 19 | // The Map type is specialized. Most code should use a plain Go map instead, 20 | // with separate locking or coordination, for better type safety and to make it 21 | // easier to maintain other invariants along with the map content. 22 | // 23 | // The Map type is optimized for two common use cases: (1) when the entry for a given 24 | // key is only ever written once but read many times, as in caches that only grow, 25 | // or (2) when multiple goroutines read, write, and overwrite entries for disjoint 26 | // sets of keys. In these two cases, use of a Map may significantly reduce lock 27 | // contention compared to a Go map paired with a separate Mutex or RWMutex. 28 | // 29 | // The zero Map is empty and ready for use. A Map must not be copied after first use. 30 | type StringIntChan struct { 31 | mu sync.Mutex 32 | 33 | // read contains the portion of the map's contents that are safe for 34 | // concurrent access (with or without mu held). 35 | // 36 | // The read field itself is always safe to load, but must only be stored with 37 | // mu held. 38 | // 39 | // Entries stored in read may be updated concurrently without mu, but updating 40 | // a previously-expunged entry requires that the entry be copied to the dirty 41 | // map and unexpunged with mu held. 42 | read atomic.Value // readOnly 43 | 44 | // dirty contains the portion of the map's contents that require mu to be 45 | // held. To ensure that the dirty map can be promoted to the read map quickly, 46 | // it also includes all of the non-expunged entries in the read map. 47 | // 48 | // Expunged entries are not stored in the dirty map. An expunged entry in the 49 | // clean map must be unexpunged and added to the dirty map before a new value 50 | // can be stored to it. 51 | // 52 | // If the dirty map is nil, the next write to the map will initialize it by 53 | // making a shallow copy of the clean map, omitting stale entries. 54 | dirty map[string]*entryStringIntChan 55 | 56 | // misses counts the number of loads since the read map was last updated that 57 | // needed to lock mu to determine whether the key was present. 58 | // 59 | // Once enough misses have occurred to cover the cost of copying the dirty 60 | // map, the dirty map will be promoted to the read map (in the unamended 61 | // state) and the next store to the map will make a new dirty copy. 62 | misses int 63 | } 64 | 65 | // readOnly is an immutable struct stored atomically in the Map.read field. 66 | type readOnlyStringIntChan struct { 67 | m map[string]*entryStringIntChan 68 | amended bool // true if the dirty map contains some key not in m. 69 | } 70 | 71 | // expunged is an arbitrary pointer that marks entries which have been deleted 72 | // from the dirty map. 73 | var expungedStringIntChan = unsafe.Pointer(new((chan int))) 74 | 75 | // An entry is a slot in the map corresponding to a particular key. 76 | type entryStringIntChan struct { 77 | // p points to the interface{} value stored for the entry. 78 | // 79 | // If p == nil, the entry has been deleted and m.dirty == nil. 80 | // 81 | // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 82 | // is missing from m.dirty. 83 | // 84 | // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 85 | // != nil, in m.dirty[key]. 86 | // 87 | // An entry can be deleted by atomic replacement with nil: when m.dirty is 88 | // next created, it will atomically replace nil with expunged and leave 89 | // m.dirty[key] unset. 90 | // 91 | // An entry's associated value can be updated by atomic replacement, provided 92 | // p != expunged. If p == expunged, an entry's associated value can be updated 93 | // only after first setting m.dirty[key] = e so that lookups using the dirty 94 | // map find the entry. 95 | p unsafe.Pointer // *interface{} 96 | } 97 | 98 | func newEntryStringIntChan(i chan int) *entryStringIntChan { 99 | return &entryStringIntChan{p: unsafe.Pointer(&i)} 100 | } 101 | 102 | // Load returns the value stored in the map for a key, or nil if no 103 | // value is present. 104 | // The ok result indicates whether value was found in the map. 105 | func (m *StringIntChan) Load(key string) (value chan int, ok bool) { 106 | read, _ := m.read.Load().(readOnlyStringIntChan) 107 | e, ok := read.m[key] 108 | if !ok && read.amended { 109 | m.mu.Lock() 110 | // Avoid reporting a spurious miss if m.dirty got promoted while we were 111 | // blocked on m.mu. (If further loads of the same key will not miss, it's 112 | // not worth copying the dirty map for this key.) 113 | read, _ = m.read.Load().(readOnlyStringIntChan) 114 | e, ok = read.m[key] 115 | if !ok && read.amended { 116 | e, ok = m.dirty[key] 117 | // Regardless of whether the entry was present, record a miss: this key 118 | // will take the slow path until the dirty map is promoted to the read 119 | // map. 120 | m.missLocked() 121 | } 122 | m.mu.Unlock() 123 | } 124 | if !ok { 125 | return value, false 126 | } 127 | return e.load() 128 | } 129 | 130 | func (e *entryStringIntChan) load() (value chan int, ok bool) { 131 | p := atomic.LoadPointer(&e.p) 132 | if p == nil || p == expungedStringIntChan { 133 | return value, false 134 | } 135 | return *(*(chan int))(p), true 136 | } 137 | 138 | // Store sets the value for a key. 139 | func (m *StringIntChan) Store(key string, value chan int) { 140 | read, _ := m.read.Load().(readOnlyStringIntChan) 141 | if e, ok := read.m[key]; ok && e.tryStore(&value) { 142 | return 143 | } 144 | 145 | m.mu.Lock() 146 | read, _ = m.read.Load().(readOnlyStringIntChan) 147 | if e, ok := read.m[key]; ok { 148 | if e.unexpungeLocked() { 149 | // The entry was previously expunged, which implies that there is a 150 | // non-nil dirty map and this entry is not in it. 151 | m.dirty[key] = e 152 | } 153 | e.storeLocked(&value) 154 | } else if e, ok := m.dirty[key]; ok { 155 | e.storeLocked(&value) 156 | } else { 157 | if !read.amended { 158 | // We're adding the first new key to the dirty map. 159 | // Make sure it is allocated and mark the read-only map as incomplete. 160 | m.dirtyLocked() 161 | m.read.Store(readOnlyStringIntChan{m: read.m, amended: true}) 162 | } 163 | m.dirty[key] = newEntryStringIntChan(value) 164 | } 165 | m.mu.Unlock() 166 | } 167 | 168 | // tryStore stores a value if the entry has not been expunged. 169 | // 170 | // If the entry is expunged, tryStore returns false and leaves the entry 171 | // unchanged. 172 | func (e *entryStringIntChan) tryStore(i *(chan int), 173 | 174 | ) bool { 175 | for { 176 | p := atomic.LoadPointer(&e.p) 177 | if p == expungedStringIntChan { 178 | return false 179 | } 180 | if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 181 | return true 182 | } 183 | } 184 | } 185 | 186 | // unexpungeLocked ensures that the entry is not marked as expunged. 187 | // 188 | // If the entry was previously expunged, it must be added to the dirty map 189 | // before m.mu is unlocked. 190 | func (e *entryStringIntChan) unexpungeLocked() (wasExpunged bool) { 191 | return atomic.CompareAndSwapPointer(&e.p, expungedStringIntChan, nil) 192 | } 193 | 194 | // storeLocked unconditionally stores a value to the entry. 195 | // 196 | // The entry must be known not to be expunged. 197 | func (e *entryStringIntChan) storeLocked(i *(chan int), 198 | 199 | ) { 200 | atomic.StorePointer(&e.p, unsafe.Pointer(i)) 201 | } 202 | 203 | // LoadOrStore returns the existing value for the key if present. 204 | // Otherwise, it stores and returns the given value. 205 | // The loaded result is true if the value was loaded, false if stored. 206 | func (m *StringIntChan) LoadOrStore(key string, value chan int) (actual chan int, loaded bool) { 207 | // Avoid locking if it's a clean hit. 208 | read, _ := m.read.Load().(readOnlyStringIntChan) 209 | if e, ok := read.m[key]; ok { 210 | actual, loaded, ok := e.tryLoadOrStore(value) 211 | if ok { 212 | return actual, loaded 213 | } 214 | } 215 | 216 | m.mu.Lock() 217 | read, _ = m.read.Load().(readOnlyStringIntChan) 218 | if e, ok := read.m[key]; ok { 219 | if e.unexpungeLocked() { 220 | m.dirty[key] = e 221 | } 222 | actual, loaded, _ = e.tryLoadOrStore(value) 223 | } else if e, ok := m.dirty[key]; ok { 224 | actual, loaded, _ = e.tryLoadOrStore(value) 225 | m.missLocked() 226 | } else { 227 | if !read.amended { 228 | // We're adding the first new key to the dirty map. 229 | // Make sure it is allocated and mark the read-only map as incomplete. 230 | m.dirtyLocked() 231 | m.read.Store(readOnlyStringIntChan{m: read.m, amended: true}) 232 | } 233 | m.dirty[key] = newEntryStringIntChan(value) 234 | actual, loaded = value, false 235 | } 236 | m.mu.Unlock() 237 | 238 | return actual, loaded 239 | } 240 | 241 | // tryLoadOrStore atomically loads or stores a value if the entry is not 242 | // expunged. 243 | // 244 | // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 245 | // returns with ok==false. 246 | func (e *entryStringIntChan) tryLoadOrStore(i chan int) (actual chan int, loaded, ok bool) { 247 | p := atomic.LoadPointer(&e.p) 248 | if p == expungedStringIntChan { 249 | return actual, false, false 250 | } 251 | if p != nil { 252 | return *(*(chan int))(p), true, true 253 | } 254 | 255 | // Copy the interface after the first load to make this method more amenable 256 | // to escape analysis: if we hit the "load" path or the entry is expunged, we 257 | // shouldn't bother heap-allocating. 258 | ic := i 259 | for { 260 | if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 261 | return i, false, true 262 | } 263 | p = atomic.LoadPointer(&e.p) 264 | if p == expungedStringIntChan { 265 | return actual, false, false 266 | } 267 | if p != nil { 268 | return *(*(chan int))(p), true, true 269 | } 270 | } 271 | } 272 | 273 | // LoadAndDelete deletes the value for a key, returning the previous value if any. 274 | // The loaded result reports whether the key was present. 275 | func (m *StringIntChan) LoadAndDelete(key string) (value chan int, loaded bool) { 276 | read, _ := m.read.Load().(readOnlyStringIntChan) 277 | e, ok := read.m[key] 278 | if !ok && read.amended { 279 | m.mu.Lock() 280 | read, _ = m.read.Load().(readOnlyStringIntChan) 281 | e, ok = read.m[key] 282 | if !ok && read.amended { 283 | e, ok = m.dirty[key] 284 | delete(m.dirty, key) 285 | // Regardless of whether the entry was present, record a miss: this key 286 | // will take the slow path until the dirty map is promoted to the read 287 | // map. 288 | m.missLocked() 289 | } 290 | m.mu.Unlock() 291 | } 292 | if ok { 293 | return e.delete() 294 | } 295 | return value, false 296 | } 297 | 298 | // Delete deletes the value for a key. 299 | func (m *StringIntChan) Delete(key string) { 300 | m.LoadAndDelete(key) 301 | } 302 | 303 | func (e *entryStringIntChan) delete() (value chan int, ok bool) { 304 | for { 305 | p := atomic.LoadPointer(&e.p) 306 | if p == nil || p == expungedStringIntChan { 307 | return value, false 308 | } 309 | if atomic.CompareAndSwapPointer(&e.p, p, nil) { 310 | return *(*(chan int))(p), true 311 | } 312 | } 313 | } 314 | 315 | // Range calls f sequentially for each key and value present in the map. 316 | // If f returns false, range stops the iteration. 317 | // 318 | // Range does not necessarily correspond to any consistent snapshot of the Map's 319 | // contents: no key will be visited more than once, but if the value for any key 320 | // is stored or deleted concurrently, Range may reflect any mapping for that key 321 | // from any point during the Range call. 322 | // 323 | // Range may be O(N) with the number of elements in the map even if f returns 324 | // false after a constant number of calls. 325 | func (m *StringIntChan) Range(f func(key string, value chan int) bool) { 326 | // We need to be able to iterate over all of the keys that were already 327 | // present at the start of the call to Range. 328 | // If read.amended is false, then read.m satisfies that property without 329 | // requiring us to hold m.mu for a long time. 330 | read, _ := m.read.Load().(readOnlyStringIntChan) 331 | if read.amended { 332 | // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 333 | // (assuming the caller does not break out early), so a call to Range 334 | // amortizes an entire copy of the map: we can promote the dirty copy 335 | // immediately! 336 | m.mu.Lock() 337 | read, _ = m.read.Load().(readOnlyStringIntChan) 338 | if read.amended { 339 | read = readOnlyStringIntChan{m: m.dirty} 340 | m.read.Store(read) 341 | m.dirty = nil 342 | m.misses = 0 343 | } 344 | m.mu.Unlock() 345 | } 346 | 347 | for k, e := range read.m { 348 | v, ok := e.load() 349 | if !ok { 350 | continue 351 | } 352 | if !f(k, v) { 353 | break 354 | } 355 | } 356 | } 357 | 358 | func (m *StringIntChan) missLocked() { 359 | m.misses++ 360 | if m.misses < len(m.dirty) { 361 | return 362 | } 363 | m.read.Store(readOnlyStringIntChan{m: m.dirty}) 364 | m.dirty = nil 365 | m.misses = 0 366 | } 367 | 368 | func (m *StringIntChan) dirtyLocked() { 369 | if m.dirty != nil { 370 | return 371 | } 372 | 373 | read, _ := m.read.Load().(readOnlyStringIntChan) 374 | m.dirty = make(map[string]*entryStringIntChan, len(read.m)) 375 | for k, e := range read.m { 376 | if !e.tryExpungeLocked() { 377 | m.dirty[k] = e 378 | } 379 | } 380 | } 381 | 382 | func (e *entryStringIntChan) tryExpungeLocked() (isExpunged bool) { 383 | p := atomic.LoadPointer(&e.p) 384 | for p == nil { 385 | if atomic.CompareAndSwapPointer(&e.p, nil, expungedStringIntChan) { 386 | return true 387 | } 388 | p = atomic.LoadPointer(&e.p) 389 | } 390 | return p == expungedStringIntChan 391 | } 392 | -------------------------------------------------------------------------------- /testdata/structmap.go: -------------------------------------------------------------------------------- 1 | // Code generated by syncmap; DO NOT EDIT. 2 | 3 | // Copyright 2016 The Go Authors. All rights reserved. 4 | // Use of this source code is governed by a BSD-style 5 | // license that can be found in the LICENSE file. 6 | 7 | package main 8 | 9 | import ( 10 | "sync" 11 | "sync/atomic" 12 | "unsafe" 13 | ) 14 | 15 | // Map is like a Go map[interface{}]interface{} but is safe for concurrent use 16 | // by multiple goroutines without additional locking or coordination. 17 | // Loads, stores, and deletes run in amortized constant time. 18 | // 19 | // The Map type is specialized. Most code should use a plain Go map instead, 20 | // with separate locking or coordination, for better type safety and to make it 21 | // easier to maintain other invariants along with the map content. 22 | // 23 | // The Map type is optimized for two common use cases: (1) when the entry for a given 24 | // key is only ever written once but read many times, as in caches that only grow, 25 | // or (2) when multiple goroutines read, write, and overwrite entries for disjoint 26 | // sets of keys. In these two cases, use of a Map may significantly reduce lock 27 | // contention compared to a Go map paired with a separate Mutex or RWMutex. 28 | // 29 | // The zero Map is empty and ready for use. A Map must not be copied after first use. 30 | type StructMap struct { 31 | mu sync.Mutex 32 | 33 | // read contains the portion of the map's contents that are safe for 34 | // concurrent access (with or without mu held). 35 | // 36 | // The read field itself is always safe to load, but must only be stored with 37 | // mu held. 38 | // 39 | // Entries stored in read may be updated concurrently without mu, but updating 40 | // a previously-expunged entry requires that the entry be copied to the dirty 41 | // map and unexpunged with mu held. 42 | read atomic.Value // readOnly 43 | 44 | // dirty contains the portion of the map's contents that require mu to be 45 | // held. To ensure that the dirty map can be promoted to the read map quickly, 46 | // it also includes all of the non-expunged entries in the read map. 47 | // 48 | // Expunged entries are not stored in the dirty map. An expunged entry in the 49 | // clean map must be unexpunged and added to the dirty map before a new value 50 | // can be stored to it. 51 | // 52 | // If the dirty map is nil, the next write to the map will initialize it by 53 | // making a shallow copy of the clean map, omitting stale entries. 54 | dirty map[struct{ Name string }]*entryStructMap 55 | 56 | // misses counts the number of loads since the read map was last updated that 57 | // needed to lock mu to determine whether the key was present. 58 | // 59 | // Once enough misses have occurred to cover the cost of copying the dirty 60 | // map, the dirty map will be promoted to the read map (in the unamended 61 | // state) and the next store to the map will make a new dirty copy. 62 | misses int 63 | } 64 | 65 | // readOnly is an immutable struct stored atomically in the Map.read field. 66 | type readOnlyStructMap struct { 67 | m map[struct{ Name string }]*entryStructMap 68 | amended bool // true if the dirty map contains some key not in m. 69 | } 70 | 71 | // expunged is an arbitrary pointer that marks entries which have been deleted 72 | // from the dirty map. 73 | var expungedStructMap = unsafe.Pointer(new(struct{ Age int })) 74 | 75 | // An entry is a slot in the map corresponding to a particular key. 76 | type entryStructMap struct { 77 | // p points to the interface{} value stored for the entry. 78 | // 79 | // If p == nil, the entry has been deleted and m.dirty == nil. 80 | // 81 | // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 82 | // is missing from m.dirty. 83 | // 84 | // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 85 | // != nil, in m.dirty[key]. 86 | // 87 | // An entry can be deleted by atomic replacement with nil: when m.dirty is 88 | // next created, it will atomically replace nil with expunged and leave 89 | // m.dirty[key] unset. 90 | // 91 | // An entry's associated value can be updated by atomic replacement, provided 92 | // p != expunged. If p == expunged, an entry's associated value can be updated 93 | // only after first setting m.dirty[key] = e so that lookups using the dirty 94 | // map find the entry. 95 | p unsafe.Pointer // *interface{} 96 | } 97 | 98 | func newEntryStructMap(i struct{ Age int }) *entryStructMap { 99 | return &entryStructMap{p: unsafe.Pointer(&i)} 100 | } 101 | 102 | // Load returns the value stored in the map for a key, or nil if no 103 | // value is present. 104 | // The ok result indicates whether value was found in the map. 105 | func (m *StructMap) Load(key struct{ Name string }) (value struct{ Age int }, ok bool) { 106 | read, _ := m.read.Load().(readOnlyStructMap) 107 | e, ok := read.m[key] 108 | if !ok && read.amended { 109 | m.mu.Lock() 110 | // Avoid reporting a spurious miss if m.dirty got promoted while we were 111 | // blocked on m.mu. (If further loads of the same key will not miss, it's 112 | // not worth copying the dirty map for this key.) 113 | read, _ = m.read.Load().(readOnlyStructMap) 114 | e, ok = read.m[key] 115 | if !ok && read.amended { 116 | e, ok = m.dirty[key] 117 | // Regardless of whether the entry was present, record a miss: this key 118 | // will take the slow path until the dirty map is promoted to the read 119 | // map. 120 | m.missLocked() 121 | } 122 | m.mu.Unlock() 123 | } 124 | if !ok { 125 | return value, false 126 | } 127 | return e.load() 128 | } 129 | 130 | func (e *entryStructMap) load() (value struct{ Age int }, ok bool) { 131 | p := atomic.LoadPointer(&e.p) 132 | if p == nil || p == expungedStructMap { 133 | return value, false 134 | } 135 | return *(*struct{ Age int })(p), true 136 | } 137 | 138 | // Store sets the value for a key. 139 | func (m *StructMap) Store(key struct{ Name string }, value struct{ Age int }) { 140 | read, _ := m.read.Load().(readOnlyStructMap) 141 | if e, ok := read.m[key]; ok && e.tryStore(&value) { 142 | return 143 | } 144 | 145 | m.mu.Lock() 146 | read, _ = m.read.Load().(readOnlyStructMap) 147 | if e, ok := read.m[key]; ok { 148 | if e.unexpungeLocked() { 149 | // The entry was previously expunged, which implies that there is a 150 | // non-nil dirty map and this entry is not in it. 151 | m.dirty[key] = e 152 | } 153 | e.storeLocked(&value) 154 | } else if e, ok := m.dirty[key]; ok { 155 | e.storeLocked(&value) 156 | } else { 157 | if !read.amended { 158 | // We're adding the first new key to the dirty map. 159 | // Make sure it is allocated and mark the read-only map as incomplete. 160 | m.dirtyLocked() 161 | m.read.Store(readOnlyStructMap{m: read.m, amended: true}) 162 | } 163 | m.dirty[key] = newEntryStructMap(value) 164 | } 165 | m.mu.Unlock() 166 | } 167 | 168 | // tryStore stores a value if the entry has not been expunged. 169 | // 170 | // If the entry is expunged, tryStore returns false and leaves the entry 171 | // unchanged. 172 | func (e *entryStructMap) tryStore(i *struct{ Age int }) bool { 173 | for { 174 | p := atomic.LoadPointer(&e.p) 175 | if p == expungedStructMap { 176 | return false 177 | } 178 | if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 179 | return true 180 | } 181 | } 182 | } 183 | 184 | // unexpungeLocked ensures that the entry is not marked as expunged. 185 | // 186 | // If the entry was previously expunged, it must be added to the dirty map 187 | // before m.mu is unlocked. 188 | func (e *entryStructMap) unexpungeLocked() (wasExpunged bool) { 189 | return atomic.CompareAndSwapPointer(&e.p, expungedStructMap, nil) 190 | } 191 | 192 | // storeLocked unconditionally stores a value to the entry. 193 | // 194 | // The entry must be known not to be expunged. 195 | func (e *entryStructMap) storeLocked(i *struct{ Age int }) { 196 | atomic.StorePointer(&e.p, unsafe.Pointer(i)) 197 | } 198 | 199 | // LoadOrStore returns the existing value for the key if present. 200 | // Otherwise, it stores and returns the given value. 201 | // The loaded result is true if the value was loaded, false if stored. 202 | func (m *StructMap) LoadOrStore(key struct{ Name string }, value struct{ Age int }) (actual struct{ Age int }, loaded bool) { 203 | // Avoid locking if it's a clean hit. 204 | read, _ := m.read.Load().(readOnlyStructMap) 205 | if e, ok := read.m[key]; ok { 206 | actual, loaded, ok := e.tryLoadOrStore(value) 207 | if ok { 208 | return actual, loaded 209 | } 210 | } 211 | 212 | m.mu.Lock() 213 | read, _ = m.read.Load().(readOnlyStructMap) 214 | if e, ok := read.m[key]; ok { 215 | if e.unexpungeLocked() { 216 | m.dirty[key] = e 217 | } 218 | actual, loaded, _ = e.tryLoadOrStore(value) 219 | } else if e, ok := m.dirty[key]; ok { 220 | actual, loaded, _ = e.tryLoadOrStore(value) 221 | m.missLocked() 222 | } else { 223 | if !read.amended { 224 | // We're adding the first new key to the dirty map. 225 | // Make sure it is allocated and mark the read-only map as incomplete. 226 | m.dirtyLocked() 227 | m.read.Store(readOnlyStructMap{m: read.m, amended: true}) 228 | } 229 | m.dirty[key] = newEntryStructMap(value) 230 | actual, loaded = value, false 231 | } 232 | m.mu.Unlock() 233 | 234 | return actual, loaded 235 | } 236 | 237 | // tryLoadOrStore atomically loads or stores a value if the entry is not 238 | // expunged. 239 | // 240 | // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 241 | // returns with ok==false. 242 | func (e *entryStructMap) tryLoadOrStore(i struct{ Age int }) (actual struct{ Age int }, loaded, ok bool) { 243 | p := atomic.LoadPointer(&e.p) 244 | if p == expungedStructMap { 245 | return actual, false, false 246 | } 247 | if p != nil { 248 | return *(*struct{ Age int })(p), true, true 249 | } 250 | 251 | // Copy the interface after the first load to make this method more amenable 252 | // to escape analysis: if we hit the "load" path or the entry is expunged, we 253 | // shouldn't bother heap-allocating. 254 | ic := i 255 | for { 256 | if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 257 | return i, false, true 258 | } 259 | p = atomic.LoadPointer(&e.p) 260 | if p == expungedStructMap { 261 | return actual, false, false 262 | } 263 | if p != nil { 264 | return *(*struct{ Age int })(p), true, true 265 | } 266 | } 267 | } 268 | 269 | // LoadAndDelete deletes the value for a key, returning the previous value if any. 270 | // The loaded result reports whether the key was present. 271 | func (m *StructMap) LoadAndDelete(key struct{ Name string }) (value struct{ Age int }, loaded bool) { 272 | read, _ := m.read.Load().(readOnlyStructMap) 273 | e, ok := read.m[key] 274 | if !ok && read.amended { 275 | m.mu.Lock() 276 | read, _ = m.read.Load().(readOnlyStructMap) 277 | e, ok = read.m[key] 278 | if !ok && read.amended { 279 | e, ok = m.dirty[key] 280 | delete(m.dirty, key) 281 | // Regardless of whether the entry was present, record a miss: this key 282 | // will take the slow path until the dirty map is promoted to the read 283 | // map. 284 | m.missLocked() 285 | } 286 | m.mu.Unlock() 287 | } 288 | if ok { 289 | return e.delete() 290 | } 291 | return value, false 292 | } 293 | 294 | // Delete deletes the value for a key. 295 | func (m *StructMap) Delete(key struct{ Name string }) { 296 | m.LoadAndDelete(key) 297 | } 298 | 299 | func (e *entryStructMap) delete() (value struct{ Age int }, ok bool) { 300 | for { 301 | p := atomic.LoadPointer(&e.p) 302 | if p == nil || p == expungedStructMap { 303 | return value, false 304 | } 305 | if atomic.CompareAndSwapPointer(&e.p, p, nil) { 306 | return *(*struct{ Age int })(p), true 307 | } 308 | } 309 | } 310 | 311 | // Range calls f sequentially for each key and value present in the map. 312 | // If f returns false, range stops the iteration. 313 | // 314 | // Range does not necessarily correspond to any consistent snapshot of the Map's 315 | // contents: no key will be visited more than once, but if the value for any key 316 | // is stored or deleted concurrently, Range may reflect any mapping for that key 317 | // from any point during the Range call. 318 | // 319 | // Range may be O(N) with the number of elements in the map even if f returns 320 | // false after a constant number of calls. 321 | func (m *StructMap) Range(f func(key struct{ Name string }, value struct{ Age int }) bool) { 322 | // We need to be able to iterate over all of the keys that were already 323 | // present at the start of the call to Range. 324 | // If read.amended is false, then read.m satisfies that property without 325 | // requiring us to hold m.mu for a long time. 326 | read, _ := m.read.Load().(readOnlyStructMap) 327 | if read.amended { 328 | // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 329 | // (assuming the caller does not break out early), so a call to Range 330 | // amortizes an entire copy of the map: we can promote the dirty copy 331 | // immediately! 332 | m.mu.Lock() 333 | read, _ = m.read.Load().(readOnlyStructMap) 334 | if read.amended { 335 | read = readOnlyStructMap{m: m.dirty} 336 | m.read.Store(read) 337 | m.dirty = nil 338 | m.misses = 0 339 | } 340 | m.mu.Unlock() 341 | } 342 | 343 | for k, e := range read.m { 344 | v, ok := e.load() 345 | if !ok { 346 | continue 347 | } 348 | if !f(k, v) { 349 | break 350 | } 351 | } 352 | } 353 | 354 | func (m *StructMap) missLocked() { 355 | m.misses++ 356 | if m.misses < len(m.dirty) { 357 | return 358 | } 359 | m.read.Store(readOnlyStructMap{m: m.dirty}) 360 | m.dirty = nil 361 | m.misses = 0 362 | } 363 | 364 | func (m *StructMap) dirtyLocked() { 365 | if m.dirty != nil { 366 | return 367 | } 368 | 369 | read, _ := m.read.Load().(readOnlyStructMap) 370 | m.dirty = make(map[struct{ Name string }]*entryStructMap, len(read.m)) 371 | for k, e := range read.m { 372 | if !e.tryExpungeLocked() { 373 | m.dirty[k] = e 374 | } 375 | } 376 | } 377 | 378 | func (e *entryStructMap) tryExpungeLocked() (isExpunged bool) { 379 | p := atomic.LoadPointer(&e.p) 380 | for p == nil { 381 | if atomic.CompareAndSwapPointer(&e.p, nil, expungedStructMap) { 382 | return true 383 | } 384 | p = atomic.LoadPointer(&e.p) 385 | } 386 | return p == expungedStructMap 387 | } 388 | -------------------------------------------------------------------------------- /testdata/stringbytechan.go: -------------------------------------------------------------------------------- 1 | // Code generated by syncmap; DO NOT EDIT. 2 | 3 | // Copyright 2016 The Go Authors. All rights reserved. 4 | // Use of this source code is governed by a BSD-style 5 | // license that can be found in the LICENSE file. 6 | 7 | package main 8 | 9 | import ( 10 | "sync" 11 | "sync/atomic" 12 | "unsafe" 13 | ) 14 | 15 | // Map is like a Go map[interface{}]interface{} but is safe for concurrent use 16 | // by multiple goroutines without additional locking or coordination. 17 | // Loads, stores, and deletes run in amortized constant time. 18 | // 19 | // The Map type is specialized. Most code should use a plain Go map instead, 20 | // with separate locking or coordination, for better type safety and to make it 21 | // easier to maintain other invariants along with the map content. 22 | // 23 | // The Map type is optimized for two common use cases: (1) when the entry for a given 24 | // key is only ever written once but read many times, as in caches that only grow, 25 | // or (2) when multiple goroutines read, write, and overwrite entries for disjoint 26 | // sets of keys. In these two cases, use of a Map may significantly reduce lock 27 | // contention compared to a Go map paired with a separate Mutex or RWMutex. 28 | // 29 | // The zero Map is empty and ready for use. A Map must not be copied after first use. 30 | type StringByteChan struct { 31 | mu sync.Mutex 32 | 33 | // read contains the portion of the map's contents that are safe for 34 | // concurrent access (with or without mu held). 35 | // 36 | // The read field itself is always safe to load, but must only be stored with 37 | // mu held. 38 | // 39 | // Entries stored in read may be updated concurrently without mu, but updating 40 | // a previously-expunged entry requires that the entry be copied to the dirty 41 | // map and unexpunged with mu held. 42 | read atomic.Value // readOnly 43 | 44 | // dirty contains the portion of the map's contents that require mu to be 45 | // held. To ensure that the dirty map can be promoted to the read map quickly, 46 | // it also includes all of the non-expunged entries in the read map. 47 | // 48 | // Expunged entries are not stored in the dirty map. An expunged entry in the 49 | // clean map must be unexpunged and added to the dirty map before a new value 50 | // can be stored to it. 51 | // 52 | // If the dirty map is nil, the next write to the map will initialize it by 53 | // making a shallow copy of the clean map, omitting stale entries. 54 | dirty map[string]*entryStringByteChan 55 | 56 | // misses counts the number of loads since the read map was last updated that 57 | // needed to lock mu to determine whether the key was present. 58 | // 59 | // Once enough misses have occurred to cover the cost of copying the dirty 60 | // map, the dirty map will be promoted to the read map (in the unamended 61 | // state) and the next store to the map will make a new dirty copy. 62 | misses int 63 | } 64 | 65 | // readOnly is an immutable struct stored atomically in the Map.read field. 66 | type readOnlyStringByteChan struct { 67 | m map[string]*entryStringByteChan 68 | amended bool // true if the dirty map contains some key not in m. 69 | } 70 | 71 | // expunged is an arbitrary pointer that marks entries which have been deleted 72 | // from the dirty map. 73 | var expungedStringByteChan = unsafe.Pointer(new((chan []byte))) 74 | 75 | // An entry is a slot in the map corresponding to a particular key. 76 | type entryStringByteChan struct { 77 | // p points to the interface{} value stored for the entry. 78 | // 79 | // If p == nil, the entry has been deleted and m.dirty == nil. 80 | // 81 | // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 82 | // is missing from m.dirty. 83 | // 84 | // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 85 | // != nil, in m.dirty[key]. 86 | // 87 | // An entry can be deleted by atomic replacement with nil: when m.dirty is 88 | // next created, it will atomically replace nil with expunged and leave 89 | // m.dirty[key] unset. 90 | // 91 | // An entry's associated value can be updated by atomic replacement, provided 92 | // p != expunged. If p == expunged, an entry's associated value can be updated 93 | // only after first setting m.dirty[key] = e so that lookups using the dirty 94 | // map find the entry. 95 | p unsafe.Pointer // *interface{} 96 | } 97 | 98 | func newEntryStringByteChan(i chan []byte) *entryStringByteChan { 99 | return &entryStringByteChan{p: unsafe.Pointer(&i)} 100 | } 101 | 102 | // Load returns the value stored in the map for a key, or nil if no 103 | // value is present. 104 | // The ok result indicates whether value was found in the map. 105 | func (m *StringByteChan) Load(key string) (value chan []byte, ok bool) { 106 | read, _ := m.read.Load().(readOnlyStringByteChan) 107 | e, ok := read.m[key] 108 | if !ok && read.amended { 109 | m.mu.Lock() 110 | // Avoid reporting a spurious miss if m.dirty got promoted while we were 111 | // blocked on m.mu. (If further loads of the same key will not miss, it's 112 | // not worth copying the dirty map for this key.) 113 | read, _ = m.read.Load().(readOnlyStringByteChan) 114 | e, ok = read.m[key] 115 | if !ok && read.amended { 116 | e, ok = m.dirty[key] 117 | // Regardless of whether the entry was present, record a miss: this key 118 | // will take the slow path until the dirty map is promoted to the read 119 | // map. 120 | m.missLocked() 121 | } 122 | m.mu.Unlock() 123 | } 124 | if !ok { 125 | return value, false 126 | } 127 | return e.load() 128 | } 129 | 130 | func (e *entryStringByteChan) load() (value chan []byte, ok bool) { 131 | p := atomic.LoadPointer(&e.p) 132 | if p == nil || p == expungedStringByteChan { 133 | return value, false 134 | } 135 | return *(*(chan []byte))(p), true 136 | } 137 | 138 | // Store sets the value for a key. 139 | func (m *StringByteChan) Store(key string, value chan []byte) { 140 | read, _ := m.read.Load().(readOnlyStringByteChan) 141 | if e, ok := read.m[key]; ok && e.tryStore(&value) { 142 | return 143 | } 144 | 145 | m.mu.Lock() 146 | read, _ = m.read.Load().(readOnlyStringByteChan) 147 | if e, ok := read.m[key]; ok { 148 | if e.unexpungeLocked() { 149 | // The entry was previously expunged, which implies that there is a 150 | // non-nil dirty map and this entry is not in it. 151 | m.dirty[key] = e 152 | } 153 | e.storeLocked(&value) 154 | } else if e, ok := m.dirty[key]; ok { 155 | e.storeLocked(&value) 156 | } else { 157 | if !read.amended { 158 | // We're adding the first new key to the dirty map. 159 | // Make sure it is allocated and mark the read-only map as incomplete. 160 | m.dirtyLocked() 161 | m.read.Store(readOnlyStringByteChan{m: read.m, amended: true}) 162 | } 163 | m.dirty[key] = newEntryStringByteChan(value) 164 | } 165 | m.mu.Unlock() 166 | } 167 | 168 | // tryStore stores a value if the entry has not been expunged. 169 | // 170 | // If the entry is expunged, tryStore returns false and leaves the entry 171 | // unchanged. 172 | func (e *entryStringByteChan) tryStore(i *(chan []byte), 173 | 174 | ) bool { 175 | for { 176 | p := atomic.LoadPointer(&e.p) 177 | if p == expungedStringByteChan { 178 | return false 179 | } 180 | if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 181 | return true 182 | } 183 | } 184 | } 185 | 186 | // unexpungeLocked ensures that the entry is not marked as expunged. 187 | // 188 | // If the entry was previously expunged, it must be added to the dirty map 189 | // before m.mu is unlocked. 190 | func (e *entryStringByteChan) unexpungeLocked() (wasExpunged bool) { 191 | return atomic.CompareAndSwapPointer(&e.p, expungedStringByteChan, nil) 192 | } 193 | 194 | // storeLocked unconditionally stores a value to the entry. 195 | // 196 | // The entry must be known not to be expunged. 197 | func (e *entryStringByteChan) storeLocked(i *(chan []byte), 198 | 199 | ) { 200 | atomic.StorePointer(&e.p, unsafe.Pointer(i)) 201 | } 202 | 203 | // LoadOrStore returns the existing value for the key if present. 204 | // Otherwise, it stores and returns the given value. 205 | // The loaded result is true if the value was loaded, false if stored. 206 | func (m *StringByteChan) LoadOrStore(key string, value chan []byte) (actual chan []byte, loaded bool) { 207 | // Avoid locking if it's a clean hit. 208 | read, _ := m.read.Load().(readOnlyStringByteChan) 209 | if e, ok := read.m[key]; ok { 210 | actual, loaded, ok := e.tryLoadOrStore(value) 211 | if ok { 212 | return actual, loaded 213 | } 214 | } 215 | 216 | m.mu.Lock() 217 | read, _ = m.read.Load().(readOnlyStringByteChan) 218 | if e, ok := read.m[key]; ok { 219 | if e.unexpungeLocked() { 220 | m.dirty[key] = e 221 | } 222 | actual, loaded, _ = e.tryLoadOrStore(value) 223 | } else if e, ok := m.dirty[key]; ok { 224 | actual, loaded, _ = e.tryLoadOrStore(value) 225 | m.missLocked() 226 | } else { 227 | if !read.amended { 228 | // We're adding the first new key to the dirty map. 229 | // Make sure it is allocated and mark the read-only map as incomplete. 230 | m.dirtyLocked() 231 | m.read.Store(readOnlyStringByteChan{m: read.m, amended: true}) 232 | } 233 | m.dirty[key] = newEntryStringByteChan(value) 234 | actual, loaded = value, false 235 | } 236 | m.mu.Unlock() 237 | 238 | return actual, loaded 239 | } 240 | 241 | // tryLoadOrStore atomically loads or stores a value if the entry is not 242 | // expunged. 243 | // 244 | // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 245 | // returns with ok==false. 246 | func (e *entryStringByteChan) tryLoadOrStore(i chan []byte) (actual chan []byte, loaded, ok bool) { 247 | p := atomic.LoadPointer(&e.p) 248 | if p == expungedStringByteChan { 249 | return actual, false, false 250 | } 251 | if p != nil { 252 | return *(*(chan []byte))(p), true, true 253 | } 254 | 255 | // Copy the interface after the first load to make this method more amenable 256 | // to escape analysis: if we hit the "load" path or the entry is expunged, we 257 | // shouldn't bother heap-allocating. 258 | ic := i 259 | for { 260 | if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 261 | return i, false, true 262 | } 263 | p = atomic.LoadPointer(&e.p) 264 | if p == expungedStringByteChan { 265 | return actual, false, false 266 | } 267 | if p != nil { 268 | return *(*(chan []byte))(p), true, true 269 | } 270 | } 271 | } 272 | 273 | // LoadAndDelete deletes the value for a key, returning the previous value if any. 274 | // The loaded result reports whether the key was present. 275 | func (m *StringByteChan) LoadAndDelete(key string) (value chan []byte, loaded bool) { 276 | read, _ := m.read.Load().(readOnlyStringByteChan) 277 | e, ok := read.m[key] 278 | if !ok && read.amended { 279 | m.mu.Lock() 280 | read, _ = m.read.Load().(readOnlyStringByteChan) 281 | e, ok = read.m[key] 282 | if !ok && read.amended { 283 | e, ok = m.dirty[key] 284 | delete(m.dirty, key) 285 | // Regardless of whether the entry was present, record a miss: this key 286 | // will take the slow path until the dirty map is promoted to the read 287 | // map. 288 | m.missLocked() 289 | } 290 | m.mu.Unlock() 291 | } 292 | if ok { 293 | return e.delete() 294 | } 295 | return value, false 296 | } 297 | 298 | // Delete deletes the value for a key. 299 | func (m *StringByteChan) Delete(key string) { 300 | m.LoadAndDelete(key) 301 | } 302 | 303 | func (e *entryStringByteChan) delete() (value chan []byte, ok bool) { 304 | for { 305 | p := atomic.LoadPointer(&e.p) 306 | if p == nil || p == expungedStringByteChan { 307 | return value, false 308 | } 309 | if atomic.CompareAndSwapPointer(&e.p, p, nil) { 310 | return *(*(chan []byte))(p), true 311 | } 312 | } 313 | } 314 | 315 | // Range calls f sequentially for each key and value present in the map. 316 | // If f returns false, range stops the iteration. 317 | // 318 | // Range does not necessarily correspond to any consistent snapshot of the Map's 319 | // contents: no key will be visited more than once, but if the value for any key 320 | // is stored or deleted concurrently, Range may reflect any mapping for that key 321 | // from any point during the Range call. 322 | // 323 | // Range may be O(N) with the number of elements in the map even if f returns 324 | // false after a constant number of calls. 325 | func (m *StringByteChan) Range(f func(key string, value chan []byte) bool) { 326 | // We need to be able to iterate over all of the keys that were already 327 | // present at the start of the call to Range. 328 | // If read.amended is false, then read.m satisfies that property without 329 | // requiring us to hold m.mu for a long time. 330 | read, _ := m.read.Load().(readOnlyStringByteChan) 331 | if read.amended { 332 | // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 333 | // (assuming the caller does not break out early), so a call to Range 334 | // amortizes an entire copy of the map: we can promote the dirty copy 335 | // immediately! 336 | m.mu.Lock() 337 | read, _ = m.read.Load().(readOnlyStringByteChan) 338 | if read.amended { 339 | read = readOnlyStringByteChan{m: m.dirty} 340 | m.read.Store(read) 341 | m.dirty = nil 342 | m.misses = 0 343 | } 344 | m.mu.Unlock() 345 | } 346 | 347 | for k, e := range read.m { 348 | v, ok := e.load() 349 | if !ok { 350 | continue 351 | } 352 | if !f(k, v) { 353 | break 354 | } 355 | } 356 | } 357 | 358 | func (m *StringByteChan) missLocked() { 359 | m.misses++ 360 | if m.misses < len(m.dirty) { 361 | return 362 | } 363 | m.read.Store(readOnlyStringByteChan{m: m.dirty}) 364 | m.dirty = nil 365 | m.misses = 0 366 | } 367 | 368 | func (m *StringByteChan) dirtyLocked() { 369 | if m.dirty != nil { 370 | return 371 | } 372 | 373 | read, _ := m.read.Load().(readOnlyStringByteChan) 374 | m.dirty = make(map[string]*entryStringByteChan, len(read.m)) 375 | for k, e := range read.m { 376 | if !e.tryExpungeLocked() { 377 | m.dirty[k] = e 378 | } 379 | } 380 | } 381 | 382 | func (e *entryStringByteChan) tryExpungeLocked() (isExpunged bool) { 383 | p := atomic.LoadPointer(&e.p) 384 | for p == nil { 385 | if atomic.CompareAndSwapPointer(&e.p, nil, expungedStringByteChan) { 386 | return true 387 | } 388 | p = atomic.LoadPointer(&e.p) 389 | } 390 | return p == expungedStringByteChan 391 | } 392 | -------------------------------------------------------------------------------- /testdata/stringermap.go: -------------------------------------------------------------------------------- 1 | // Code generated by syncmap; DO NOT EDIT. 2 | 3 | // Copyright 2016 The Go Authors. All rights reserved. 4 | // Use of this source code is governed by a BSD-style 5 | // license that can be found in the LICENSE file. 6 | 7 | package main 8 | 9 | import ( 10 | "sync" 11 | "sync/atomic" 12 | "unsafe" 13 | ) 14 | 15 | // Map is like a Go map[interface{}]interface{} but is safe for concurrent use 16 | // by multiple goroutines without additional locking or coordination. 17 | // Loads, stores, and deletes run in amortized constant time. 18 | // 19 | // The Map type is specialized. Most code should use a plain Go map instead, 20 | // with separate locking or coordination, for better type safety and to make it 21 | // easier to maintain other invariants along with the map content. 22 | // 23 | // The Map type is optimized for two common use cases: (1) when the entry for a given 24 | // key is only ever written once but read many times, as in caches that only grow, 25 | // or (2) when multiple goroutines read, write, and overwrite entries for disjoint 26 | // sets of keys. In these two cases, use of a Map may significantly reduce lock 27 | // contention compared to a Go map paired with a separate Mutex or RWMutex. 28 | // 29 | // The zero Map is empty and ready for use. A Map must not be copied after first use. 30 | type stringerMap struct { 31 | mu sync.Mutex 32 | 33 | // read contains the portion of the map's contents that are safe for 34 | // concurrent access (with or without mu held). 35 | // 36 | // The read field itself is always safe to load, but must only be stored with 37 | // mu held. 38 | // 39 | // Entries stored in read may be updated concurrently without mu, but updating 40 | // a previously-expunged entry requires that the entry be copied to the dirty 41 | // map and unexpunged with mu held. 42 | read atomic.Value // readOnly 43 | 44 | // dirty contains the portion of the map's contents that require mu to be 45 | // held. To ensure that the dirty map can be promoted to the read map quickly, 46 | // it also includes all of the non-expunged entries in the read map. 47 | // 48 | // Expunged entries are not stored in the dirty map. An expunged entry in the 49 | // clean map must be unexpunged and added to the dirty map before a new value 50 | // can be stored to it. 51 | // 52 | // If the dirty map is nil, the next write to the map will initialize it by 53 | // making a shallow copy of the clean map, omitting stale entries. 54 | dirty map[string]*entryStringerMap 55 | 56 | // misses counts the number of loads since the read map was last updated that 57 | // needed to lock mu to determine whether the key was present. 58 | // 59 | // Once enough misses have occurred to cover the cost of copying the dirty 60 | // map, the dirty map will be promoted to the read map (in the unamended 61 | // state) and the next store to the map will make a new dirty copy. 62 | misses int 63 | } 64 | 65 | // readOnly is an immutable struct stored atomically in the Map.read field. 66 | type readOnlyStringerMap struct { 67 | m map[string]*entryStringerMap 68 | amended bool // true if the dirty map contains some key not in m. 69 | } 70 | 71 | // expunged is an arbitrary pointer that marks entries which have been deleted 72 | // from the dirty map. 73 | var expungedStringerMap = unsafe.Pointer(new(interface{ String() string })) 74 | 75 | // An entry is a slot in the map corresponding to a particular key. 76 | type entryStringerMap struct { 77 | // p points to the interface{} value stored for the entry. 78 | // 79 | // If p == nil, the entry has been deleted and m.dirty == nil. 80 | // 81 | // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 82 | // is missing from m.dirty. 83 | // 84 | // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 85 | // != nil, in m.dirty[key]. 86 | // 87 | // An entry can be deleted by atomic replacement with nil: when m.dirty is 88 | // next created, it will atomically replace nil with expunged and leave 89 | // m.dirty[key] unset. 90 | // 91 | // An entry's associated value can be updated by atomic replacement, provided 92 | // p != expunged. If p == expunged, an entry's associated value can be updated 93 | // only after first setting m.dirty[key] = e so that lookups using the dirty 94 | // map find the entry. 95 | p unsafe.Pointer // *interface{} 96 | } 97 | 98 | func newEntryStringerMap(i interface{ String() string }) *entryStringerMap { 99 | return &entryStringerMap{p: unsafe.Pointer(&i)} 100 | } 101 | 102 | // Load returns the value stored in the map for a key, or nil if no 103 | // value is present. 104 | // The ok result indicates whether value was found in the map. 105 | func (m *stringerMap) Load(key string) (value interface{ String() string }, ok bool) { 106 | read, _ := m.read.Load().(readOnlyStringerMap) 107 | e, ok := read.m[key] 108 | if !ok && read.amended { 109 | m.mu.Lock() 110 | // Avoid reporting a spurious miss if m.dirty got promoted while we were 111 | // blocked on m.mu. (If further loads of the same key will not miss, it's 112 | // not worth copying the dirty map for this key.) 113 | read, _ = m.read.Load().(readOnlyStringerMap) 114 | e, ok = read.m[key] 115 | if !ok && read.amended { 116 | e, ok = m.dirty[key] 117 | // Regardless of whether the entry was present, record a miss: this key 118 | // will take the slow path until the dirty map is promoted to the read 119 | // map. 120 | m.missLocked() 121 | } 122 | m.mu.Unlock() 123 | } 124 | if !ok { 125 | return value, false 126 | } 127 | return e.load() 128 | } 129 | 130 | func (e *entryStringerMap) load() (value interface{ String() string }, ok bool) { 131 | p := atomic.LoadPointer(&e.p) 132 | if p == nil || p == expungedStringerMap { 133 | return value, false 134 | } 135 | return *(*interface{ String() string })(p), true 136 | } 137 | 138 | // Store sets the value for a key. 139 | func (m *stringerMap) Store(key string, value interface{ String() string }) { 140 | read, _ := m.read.Load().(readOnlyStringerMap) 141 | if e, ok := read.m[key]; ok && e.tryStore(&value) { 142 | return 143 | } 144 | 145 | m.mu.Lock() 146 | read, _ = m.read.Load().(readOnlyStringerMap) 147 | if e, ok := read.m[key]; ok { 148 | if e.unexpungeLocked() { 149 | // The entry was previously expunged, which implies that there is a 150 | // non-nil dirty map and this entry is not in it. 151 | m.dirty[key] = e 152 | } 153 | e.storeLocked(&value) 154 | } else if e, ok := m.dirty[key]; ok { 155 | e.storeLocked(&value) 156 | } else { 157 | if !read.amended { 158 | // We're adding the first new key to the dirty map. 159 | // Make sure it is allocated and mark the read-only map as incomplete. 160 | m.dirtyLocked() 161 | m.read.Store(readOnlyStringerMap{m: read.m, amended: true}) 162 | } 163 | m.dirty[key] = newEntryStringerMap(value) 164 | } 165 | m.mu.Unlock() 166 | } 167 | 168 | // tryStore stores a value if the entry has not been expunged. 169 | // 170 | // If the entry is expunged, tryStore returns false and leaves the entry 171 | // unchanged. 172 | func (e *entryStringerMap) tryStore(i *interface{ String() string }) bool { 173 | for { 174 | p := atomic.LoadPointer(&e.p) 175 | if p == expungedStringerMap { 176 | return false 177 | } 178 | if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 179 | return true 180 | } 181 | } 182 | } 183 | 184 | // unexpungeLocked ensures that the entry is not marked as expunged. 185 | // 186 | // If the entry was previously expunged, it must be added to the dirty map 187 | // before m.mu is unlocked. 188 | func (e *entryStringerMap) unexpungeLocked() (wasExpunged bool) { 189 | return atomic.CompareAndSwapPointer(&e.p, expungedStringerMap, nil) 190 | } 191 | 192 | // storeLocked unconditionally stores a value to the entry. 193 | // 194 | // The entry must be known not to be expunged. 195 | func (e *entryStringerMap) storeLocked(i *interface{ String() string }) { 196 | atomic.StorePointer(&e.p, unsafe.Pointer(i)) 197 | } 198 | 199 | // LoadOrStore returns the existing value for the key if present. 200 | // Otherwise, it stores and returns the given value. 201 | // The loaded result is true if the value was loaded, false if stored. 202 | func (m *stringerMap) LoadOrStore(key string, value interface{ String() string }) (actual interface{ String() string }, loaded bool) { 203 | // Avoid locking if it's a clean hit. 204 | read, _ := m.read.Load().(readOnlyStringerMap) 205 | if e, ok := read.m[key]; ok { 206 | actual, loaded, ok := e.tryLoadOrStore(value) 207 | if ok { 208 | return actual, loaded 209 | } 210 | } 211 | 212 | m.mu.Lock() 213 | read, _ = m.read.Load().(readOnlyStringerMap) 214 | if e, ok := read.m[key]; ok { 215 | if e.unexpungeLocked() { 216 | m.dirty[key] = e 217 | } 218 | actual, loaded, _ = e.tryLoadOrStore(value) 219 | } else if e, ok := m.dirty[key]; ok { 220 | actual, loaded, _ = e.tryLoadOrStore(value) 221 | m.missLocked() 222 | } else { 223 | if !read.amended { 224 | // We're adding the first new key to the dirty map. 225 | // Make sure it is allocated and mark the read-only map as incomplete. 226 | m.dirtyLocked() 227 | m.read.Store(readOnlyStringerMap{m: read.m, amended: true}) 228 | } 229 | m.dirty[key] = newEntryStringerMap(value) 230 | actual, loaded = value, false 231 | } 232 | m.mu.Unlock() 233 | 234 | return actual, loaded 235 | } 236 | 237 | // tryLoadOrStore atomically loads or stores a value if the entry is not 238 | // expunged. 239 | // 240 | // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 241 | // returns with ok==false. 242 | func (e *entryStringerMap) tryLoadOrStore(i interface{ String() string }) (actual interface{ String() string }, loaded, ok bool) { 243 | p := atomic.LoadPointer(&e.p) 244 | if p == expungedStringerMap { 245 | return actual, false, false 246 | } 247 | if p != nil { 248 | return *(*interface{ String() string })(p), true, true 249 | } 250 | 251 | // Copy the interface after the first load to make this method more amenable 252 | // to escape analysis: if we hit the "load" path or the entry is expunged, we 253 | // shouldn't bother heap-allocating. 254 | ic := i 255 | for { 256 | if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 257 | return i, false, true 258 | } 259 | p = atomic.LoadPointer(&e.p) 260 | if p == expungedStringerMap { 261 | return actual, false, false 262 | } 263 | if p != nil { 264 | return *(*interface{ String() string })(p), true, true 265 | } 266 | } 267 | } 268 | 269 | // LoadAndDelete deletes the value for a key, returning the previous value if any. 270 | // The loaded result reports whether the key was present. 271 | func (m *stringerMap) LoadAndDelete(key string) (value interface{ String() string }, loaded bool) { 272 | read, _ := m.read.Load().(readOnlyStringerMap) 273 | e, ok := read.m[key] 274 | if !ok && read.amended { 275 | m.mu.Lock() 276 | read, _ = m.read.Load().(readOnlyStringerMap) 277 | e, ok = read.m[key] 278 | if !ok && read.amended { 279 | e, ok = m.dirty[key] 280 | delete(m.dirty, key) 281 | // Regardless of whether the entry was present, record a miss: this key 282 | // will take the slow path until the dirty map is promoted to the read 283 | // map. 284 | m.missLocked() 285 | } 286 | m.mu.Unlock() 287 | } 288 | if ok { 289 | return e.delete() 290 | } 291 | return value, false 292 | } 293 | 294 | // Delete deletes the value for a key. 295 | func (m *stringerMap) Delete(key string) { 296 | m.LoadAndDelete(key) 297 | } 298 | 299 | func (e *entryStringerMap) delete() (value interface{ String() string }, ok bool) { 300 | for { 301 | p := atomic.LoadPointer(&e.p) 302 | if p == nil || p == expungedStringerMap { 303 | return value, false 304 | } 305 | if atomic.CompareAndSwapPointer(&e.p, p, nil) { 306 | return *(*interface{ String() string })(p), true 307 | } 308 | } 309 | } 310 | 311 | // Range calls f sequentially for each key and value present in the map. 312 | // If f returns false, range stops the iteration. 313 | // 314 | // Range does not necessarily correspond to any consistent snapshot of the Map's 315 | // contents: no key will be visited more than once, but if the value for any key 316 | // is stored or deleted concurrently, Range may reflect any mapping for that key 317 | // from any point during the Range call. 318 | // 319 | // Range may be O(N) with the number of elements in the map even if f returns 320 | // false after a constant number of calls. 321 | func (m *stringerMap) Range(f func(key string, value interface{ String() string }) bool) { 322 | // We need to be able to iterate over all of the keys that were already 323 | // present at the start of the call to Range. 324 | // If read.amended is false, then read.m satisfies that property without 325 | // requiring us to hold m.mu for a long time. 326 | read, _ := m.read.Load().(readOnlyStringerMap) 327 | if read.amended { 328 | // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 329 | // (assuming the caller does not break out early), so a call to Range 330 | // amortizes an entire copy of the map: we can promote the dirty copy 331 | // immediately! 332 | m.mu.Lock() 333 | read, _ = m.read.Load().(readOnlyStringerMap) 334 | if read.amended { 335 | read = readOnlyStringerMap{m: m.dirty} 336 | m.read.Store(read) 337 | m.dirty = nil 338 | m.misses = 0 339 | } 340 | m.mu.Unlock() 341 | } 342 | 343 | for k, e := range read.m { 344 | v, ok := e.load() 345 | if !ok { 346 | continue 347 | } 348 | if !f(k, v) { 349 | break 350 | } 351 | } 352 | } 353 | 354 | func (m *stringerMap) missLocked() { 355 | m.misses++ 356 | if m.misses < len(m.dirty) { 357 | return 358 | } 359 | m.read.Store(readOnlyStringerMap{m: m.dirty}) 360 | m.dirty = nil 361 | m.misses = 0 362 | } 363 | 364 | func (m *stringerMap) dirtyLocked() { 365 | if m.dirty != nil { 366 | return 367 | } 368 | 369 | read, _ := m.read.Load().(readOnlyStringerMap) 370 | m.dirty = make(map[string]*entryStringerMap, len(read.m)) 371 | for k, e := range read.m { 372 | if !e.tryExpungeLocked() { 373 | m.dirty[k] = e 374 | } 375 | } 376 | } 377 | 378 | func (e *entryStringerMap) tryExpungeLocked() (isExpunged bool) { 379 | p := atomic.LoadPointer(&e.p) 380 | for p == nil { 381 | if atomic.CompareAndSwapPointer(&e.p, nil, expungedStringerMap) { 382 | return true 383 | } 384 | p = atomic.LoadPointer(&e.p) 385 | } 386 | return p == expungedStringerMap 387 | } 388 | --------------------------------------------------------------------------------