├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── bucket.go ├── const_check.go ├── go.mod ├── go.sum ├── hiter.go ├── map.go ├── map_benchmark_test.go └── map_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | 17 | .DS_Store 18 | art/ 19 | .vscode 20 | cmd 21 | .cpu.out -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | go test ./... -count=1 3 | 4 | bench: 5 | go test . -run=^$$ -bench . -benchmem 6 | 7 | bench-overflow: 8 | go test . -run=^$$ -bench ^BenchmarkPutWithOverflow$$ -benchmem 9 | 10 | bench-overflow-arena: 11 | GOEXPERIMENT=arenas go test . -run=^$$ -bench ^BenchmarkPutWithOverflow$$ -benchmem 12 | 13 | bench-overflow-profile: 14 | go test . -run=^$$ -bench ^BenchmarkPutWithOverflow$$ -benchmem -cpuprofile cpu.out && \ 15 | go tool pprof -http :8080 cpu.out 16 | 17 | bench-overflow-memprofile: 18 | go test . -run=^$$ -bench ^BenchmarkPutWithOverflow$$ -benchmem -memprofile mem.out && \ 19 | go tool pprof -http :8080 mem.out 20 | 21 | fuzz: 22 | go test -run=^$$ -fuzz FuzzMap -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # go-map 2 | 3 | Go's 1.19 hashmap implementation using pure Go with generics. 4 | 5 | ## About 6 | 7 | This is Go's 1.19 map implementation written with pure Go. Using this repo you can learn how maps work in Go internally and undestand the main concept of a hashmap in general. This repo uses algorithms from an actual map implementation under the Go's hood, except some things due to better undestanding. 8 | 9 | This code was written additionaly to my [article](https://habr.com/ru/post/704796/) on Habr, where I've desribed maps internals - what is a hashmap, main terms, concepts, difference with Python and Java. 10 | 11 | [Here](https://prog.world/hashmap-according-to-golang-along-with-implementation-on-generics/) you can find some kind of auto-generated English translation. 12 | 13 | You can also use this repo as a start point to improve/change a base implementation that we have in Go 1.19. 14 | 15 | # Contributions 16 | 17 | Any contributions are welcome. Don't hesitate creating PRs to improve similarity to a base implementation, to fix bugs, typos, redability etc. 18 | 19 | # Links 20 | 21 | Articles\videos which will help you to delve into a hashmap: 22 | 23 | Go: 24 | [GopherCon 2016: Keith Randall - Inside the Map Implementation](https://www.youtube.com/watch?v=Tl7mi9QmLns) 25 | [How the Go runtime implements maps efficiently (without generics)](https://dave.cheney.net/2018/05/29/how-the-go-runtime-implements-maps-efficiently-without-generics) 26 | [Hacking Go's Runtime with Generics](https://www.dolthub.com/blog/2022-12-19-maphash/) 27 | 28 | Python: 29 | [Raymond Hettinger Modern Python Dictionaries A confluence of a dozen great ideas PyCon 2017 ](https://www.youtube.com/watch?v=npw4s1QTmPg) 30 | [Raymond Hettinger.More compact dictionaries with faster iteration](https://mail.python.org/pipermail/python-dev/2012-December/123028.html) 31 | 32 | Java: 33 | [The Java HashMap Under the Hood](https://www.baeldung.com/java-hashmap-advanced) 34 | [Liner probing lecture. cs166 stanford](https://web.stanford.edu/class/archive/cs/cs166/cs166.1166/lectures/12/Small12.pdf) 35 | [An Analysis of Hash Map Implementations in Popular Languages](https://rcoh.me/posts/hash-map-analysis/) 36 | -------------------------------------------------------------------------------- /bucket.go: -------------------------------------------------------------------------------- 1 | package gomap 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | const ( 9 | bucketSize = 8 10 | 11 | emptyRest = 0 // this and all other cells with bigger index are empty 12 | emptyCell = 1 // there is no value at that index 13 | evacuatedFirst = 2 // key/elem is valid. Entry has been evacuated to first half of larger table. 14 | evacuatedSecond = 3 // same as above, but evacuated to second half of larger table. 15 | evacuatedEmpty = 4 // cell is empty, bucket is evacuated. 16 | minTopHash = 5 // minimum topHash value for filled cell 17 | ) 18 | 19 | // bucket - the Go's bucket explicit representation. 20 | type bucket[K comparable, V any] struct { 21 | tophash [bucketSize]uint8 22 | 23 | keys [bucketSize]K 24 | values [bucketSize]V 25 | 26 | overflow *bucket[K, V] 27 | } 28 | 29 | // Get - returns an element for the given key. 30 | // If an element doesn't exist for the given key returns zero value for and false. 31 | func (b *bucket[K, V]) Get(key K, topHash uint8) (V, bool) { 32 | bkt := b 33 | bucketLoop: 34 | for ; bkt != nil; bkt = bkt.overflow { 35 | for i := range bkt.tophash { 36 | top := bkt.tophash[i] 37 | if top != topHash { 38 | // if there are no filled cells we break the loop and return zero value 39 | if top == emptyRest { 40 | break bucketLoop 41 | } 42 | continue 43 | } 44 | 45 | if bkt.keys[i] == key { 46 | return bkt.values[i], true 47 | } 48 | } 49 | } 50 | 51 | return *new(V), false 52 | } 53 | 54 | // Put - adds value to the bucket. 55 | // if the value for a given key already exists, it'll be replaced 56 | // if there is no place in this bucket for a new value, new overflow bucket will be created 57 | func (b *bucket[K, V]) Put(key K, topHash uint8, value V) (isAdded bool) { 58 | var insertIdx int 59 | var insertBkt *bucket[K, V] 60 | 61 | bkt := b 62 | for bkt != nil { 63 | for i := range bkt.tophash { 64 | // comparing topHash bits, not keys 65 | // because we can store there flags describing cell state such as cell is empty, cell is evacuating etc. 66 | // also it's faster than comparing keys 67 | top := bkt.tophash[i] 68 | if top != topHash { 69 | if top == emptyRest { 70 | insertBkt = bkt 71 | insertIdx = i 72 | break 73 | } 74 | 75 | if insertBkt == nil && isCellEmpty(top) { 76 | insertBkt = bkt 77 | insertIdx = i 78 | } 79 | continue 80 | } 81 | 82 | // when we have different keys but tophash is equal 83 | if bkt.keys[i] != key { 84 | continue 85 | } 86 | 87 | bkt.values[i] = value 88 | return false 89 | } 90 | 91 | if bkt.overflow == nil { 92 | // if we didn't find a place to put 93 | if insertBkt == nil { 94 | bkt.overflow = &bucket[K, V]{} 95 | insertBkt = bkt.overflow 96 | break 97 | } else { // break if we found a place for the value 98 | break 99 | } 100 | } 101 | 102 | bkt = bkt.overflow 103 | } 104 | 105 | insertBkt.keys[insertIdx] = key 106 | insertBkt.values[insertIdx] = value 107 | insertBkt.tophash[insertIdx] = topHash 108 | 109 | return true 110 | } 111 | 112 | func (b *bucket[K, V]) putAt(key K, topHash uint8, value V, idx uint) { 113 | b.tophash[idx] = topHash 114 | b.keys[idx] = key 115 | b.values[idx] = value 116 | } 117 | 118 | // Delete - deletes an element with the given key 119 | func (b *bucket[K, V]) Delete(key K, topHash uint8) (deleted bool) { 120 | bkt := b 121 | for bkt != nil { 122 | for i := range bkt.tophash { 123 | top := bkt.tophash[i] 124 | if top != topHash { 125 | // if there are no filled cells we return 126 | if top == emptyRest { 127 | return false 128 | } 129 | continue 130 | } 131 | 132 | if bkt.keys[i] == key { 133 | bkt.tophash[i] = emptyCell 134 | return true 135 | } 136 | } 137 | bkt = bkt.overflow 138 | } 139 | 140 | return false 141 | } 142 | 143 | func isCellEmpty(val uint8) bool { 144 | return val <= emptyCell 145 | } 146 | 147 | func (b bucket[K, V]) isEvacuated() bool { 148 | h := b.tophash[0] 149 | return h > emptyCell && h < minTopHash 150 | } 151 | 152 | func (b bucket[K, V]) debug() string { 153 | str := strings.Builder{} 154 | str.WriteString("bucket[") 155 | for i := range b.keys { 156 | str.WriteString(fmt.Sprintf("%v:%v ", b.keys[i], b.values[i])) 157 | } 158 | 159 | return str.String()[:str.Len()-1] + "]" 160 | } 161 | -------------------------------------------------------------------------------- /const_check.go: -------------------------------------------------------------------------------- 1 | package gomap 2 | 3 | // this is a compile time check for const values. instead a check below 4 | // wich happens every time during an evacuate() func under the hood. 5 | // 6 | // if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY { 7 | // throw("bad evacuatedN") 8 | // } 9 | func _() { 10 | var x [1]struct{} 11 | _ = x[evacuatedFirst-2] 12 | _ = x[evacuatedSecond-3] 13 | } 14 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/w1kend/go-map 2 | 3 | go 1.19 4 | 5 | require github.com/dolthub/maphash v0.0.0-20221220182448-74e1e1ea1577 6 | 7 | require ( 8 | github.com/klauspost/cpuid/v2 v2.0.9 // indirect 9 | github.com/tidwall/hashmap v1.8.0 // indirect 10 | github.com/zeebo/xxh3 v1.0.2 // indirect 11 | ) 12 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/dolthub/maphash v0.0.0-20221220182448-74e1e1ea1577 h1:SegEguMxToBn045KRHLIUlF2/jR7Y2qD6fF+3tdOfvI= 3 | github.com/dolthub/maphash v0.0.0-20221220182448-74e1e1ea1577/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4= 4 | github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= 5 | github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= 6 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 7 | github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= 8 | github.com/tidwall/hashmap v1.8.0 h1:e5vXVBTv8PZGyg8kxhrvb7uNrfZ3R+5KRHRHnVM+Rb4= 9 | github.com/tidwall/hashmap v1.8.0/go.mod h1:v+0qJrJn7l+l2dB8+fAFpC62p2G0SMP2Teu8ejkebg8= 10 | github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= 11 | github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= 12 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 13 | -------------------------------------------------------------------------------- /hiter.go: -------------------------------------------------------------------------------- 1 | package gomap 2 | 3 | import ( 4 | "math/rand" 5 | ) 6 | 7 | const noCheck uint64 = 1<<(8*ptrSize) - 1 8 | 9 | // A hash iteration structure. 10 | type hiter[K comparable, V any] struct { 11 | key *K 12 | elem *V 13 | m *hmap[K, V] 14 | buckets *[]bucket[K, V] // bucket ptr at hash_iter initialization time 15 | currBktPtr *bucket[K, V] // current bucket 16 | startBucket uint64 // bucket iteration started at 17 | offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1) 18 | wrapped bool // already wrapped around from end of bucket array to beginning 19 | B uint8 20 | i uint8 21 | currBucketNum uint64 22 | checkBucket uint64 23 | } 24 | 25 | func iterInit[K comparable, V any](m *hmap[K, V]) *hiter[K, V] { 26 | h := hiter[K, V]{} 27 | 28 | if m == nil || m.len == 0 { 29 | return &h 30 | } 31 | 32 | h.m = m 33 | h.B = m.B 34 | h.buckets = &m.buckets 35 | h.startBucket = rand.Uint64() & bucketMask(m.B) // pick random bucket 36 | // choose offset to start from inside a bucket 37 | h.offset = uint8(uint8(h.startBucket) >> h.B & (bucketSize - 1)) 38 | h.currBucketNum = h.startBucket 39 | 40 | h.m.flags |= iterator | oldIterator // set iterators flags 41 | h.next() 42 | 43 | return &h 44 | } 45 | 46 | func (it *hiter[K, V]) next() { 47 | b := it.currBktPtr 48 | bucketNum := it.currBucketNum 49 | i := it.i 50 | checkBucket := it.checkBucket 51 | next: 52 | // choose bucket 53 | if b == nil { 54 | if bucketNum == it.startBucket && it.wrapped { 55 | // end of iteration 56 | it.key = nil 57 | it.elem = nil 58 | return 59 | } 60 | 61 | // check old buckets if gwoth is not done 62 | // skip it if growth started during iteration 63 | if it.m.isGrowing() && it.B == it.m.B { 64 | // runtime/map.go:890 65 | // Iterator was started in the middle of a grow, and the grow isn't done yet. 66 | // If the bucket we're looking at hasn't been filled in yet (i.e. the old 67 | // bucket hasn't been evacuated) then we need to iterate through the old 68 | // bucket and only return the ones that will be migrated to this bucket. 69 | oldBucketNum := bucketNum & it.m.oldBucketMask() 70 | b = &(*it.m.oldbuckets)[oldBucketNum] 71 | if !b.isEvacuated() { 72 | checkBucket = bucketNum 73 | } else { 74 | checkBucket = noCheck 75 | b = &it.m.buckets[bucketNum] 76 | } 77 | } else { 78 | checkBucket = noCheck 79 | b = &it.m.buckets[bucketNum] 80 | } 81 | 82 | bucketNum++ 83 | if bucketNum == bucketsNum(it.B) { 84 | bucketNum = 0 85 | it.wrapped = true 86 | } 87 | i = 0 88 | } 89 | 90 | // iterate over the bucket 91 | for ; i < bucketSize; i++ { 92 | // index with offset 93 | offI := (i + it.offset) & (bucketSize - 1) 94 | top := b.tophash[offI] 95 | // we don't check emptyRest as we start iterating in the middle of a bucket 96 | if isCellEmpty(top) || top == evacuatedEmpty { 97 | continue 98 | } 99 | key := &b.keys[offI] 100 | elem := &b.values[offI] 101 | 102 | if checkBucket != noCheck && !it.m.sameSizeGrow() { 103 | // runtime/map.go:925 104 | // Special case: iterator was started during a grow to a larger size 105 | // and the grow is not done yet. We're working on a bucket whose 106 | // oldbucket has not been evacuated yet. Or at least, it wasn't 107 | // evacuated when we started the bucket. So we're iterating 108 | // through the oldbucket, skipping any keys that will go 109 | // to the other new bucket (each oldbucket expands to two 110 | // buckets during a grow). 111 | 112 | if key == key { 113 | hash := it.m.hasher.Hash(*key) 114 | if hash&bucketMask(it.B) != checkBucket { 115 | continue 116 | } 117 | } else { 118 | // runtime/map.go:941 119 | // Hash isn't repeatable if k != k (NaNs). We need a 120 | // repeatable and randomish choice of which direction 121 | // to send NaNs during evacuation. We'll use the low 122 | // bit of tophash to decide which way NaNs go. 123 | // NOTE: this case is why we need two evacuate tophash 124 | // values, evacuatedX and evacuatedY, that differ in 125 | // their low bit. 126 | if checkBucket>>(it.B-1) != uint64(b.tophash[offI]&1) { 127 | continue 128 | } 129 | } 130 | } 131 | 132 | if (top != evacuatedFirst && top != evacuatedSecond) || key != key { 133 | // This is the golden data, we can return it. 134 | it.key = key 135 | it.elem = elem 136 | } else { 137 | // The hash table has grown since the iterator was started. 138 | // The golden data for this key is now somewhere else. 139 | // Check the current hash table for the data. 140 | // 141 | // This code handles the case where the key 142 | // has been deleted, updated, or deleted and reinserted. 143 | // NOTE: we need to regrab the key as it has potentially been 144 | // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). 145 | re, ok := it.m.Get2(*key) // todo: add getK method 146 | if !ok { 147 | continue // key has been deleted 148 | } 149 | it.key = key 150 | it.elem = &re 151 | } 152 | 153 | // update iteration state and return 154 | it.currBucketNum = bucketNum 155 | if it.currBktPtr != b { 156 | it.currBktPtr = b 157 | } 158 | it.i = i + 1 159 | it.checkBucket = checkBucket 160 | return 161 | } 162 | 163 | // go to an overflow when finished with the current bucket 164 | b = b.overflow 165 | i = 0 166 | goto next 167 | } 168 | -------------------------------------------------------------------------------- /map.go: -------------------------------------------------------------------------------- 1 | package gomap 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/dolthub/maphash" 8 | ) 9 | 10 | const ( 11 | // Maximum average load of a bucket that triggers growth is 6.5. 12 | // Represent as loadFactorNum/loadFactorDen, to allow integer math. 13 | loadFactorNum = 13 14 | loadFactorDen = 2 15 | 16 | ptrSize = 4 << (^uintptr(0) >> 63) // pointer size 17 | 18 | // flags 19 | iterator = 1 // there may be an iterator using buckets 20 | oldIterator = 2 // there may be an iterator using oldbuckets 21 | hashWriting = 4 // a goroutine is writing to the map 22 | sameSizeGrow = 8 // the current map growth is to a new map of the same size 23 | ) 24 | 25 | // hmap - map struct 26 | type hmap[K comparable, V any] struct { 27 | len int 28 | B uint8 // log_2 of # of buckets 29 | 30 | buckets []bucket[K, V] 31 | hasher maphash.Hasher[K] // Go's runtime hasher 32 | 33 | oldbuckets *[]bucket[K, V] 34 | numEvacuated uint64 // progress counter for evacuation (buckets less than this have been evacuated) 35 | 36 | flags uint8 37 | } 38 | 39 | type Hashmap[K comparable, V any] interface { 40 | // gets the value for the given key. 41 | // returns zero value for if there is no value for the given key 42 | Get(key K) V 43 | // gets the value for the given key and the flag indicating whether the value exists 44 | // returns zero value for and false if there is no value for the given key 45 | Get2(key K) (V, bool) 46 | // puts value into the map 47 | Put(key K, value V) 48 | // deletes an element from the map 49 | Delete(key K) 50 | // iterates through the map and calls the given func for each key, value. 51 | // if the given func returns false, loop breaks. 52 | Range(f func(k K, v V) bool) 53 | // returns the length of the map 54 | Len() int 55 | String() string 56 | } 57 | 58 | // New - creates a new map for elements 59 | func New[K comparable, V any](size int) Hashmap[K, V] { 60 | h := new(hmap[K, V]) 61 | 62 | B := uint8(0) 63 | for overLoadFactor(size, B) { 64 | B++ 65 | } 66 | h.B = B 67 | 68 | h.buckets = make([]bucket[K, V], bucketsNum(h.B)) 69 | h.hasher = maphash.NewHasher[K]() 70 | 71 | return h 72 | } 73 | 74 | func (h *hmap[K, V]) Get(key K) V { 75 | v, _ := h.Get2(key) 76 | return v 77 | } 78 | 79 | func (h *hmap[K, V]) Get2(key K) (V, bool) { 80 | if h.flags&hashWriting != 0 { 81 | panic("concurrent map access and write") 82 | } 83 | 84 | tophash, targetBucket := h.locateBucket(key) 85 | 86 | b := &h.buckets[targetBucket] 87 | 88 | if h.isGrowing() { 89 | oldB := &(*h.oldbuckets)[targetBucket&h.oldBucketMask()] 90 | if !oldB.isEvacuated() { 91 | b = oldB 92 | } 93 | } 94 | 95 | return b.Get(key, tophash) 96 | } 97 | 98 | func (h *hmap[K, V]) Put(key K, value V) { 99 | if h.flags&hashWriting != 0 { 100 | panic("concurrent map access and write") 101 | } 102 | h.flags ^= hashWriting 103 | 104 | tophash, targetBucket := h.locateBucket(key) 105 | 106 | // start growing if adding an element will trigger overload 107 | if !h.isGrowing() && overLoadFactor(h.len+1, h.B) { 108 | h.startGrowth() 109 | } 110 | 111 | // evacuate old bucket first 112 | if h.isGrowing() { 113 | h.growWork(targetBucket) 114 | } 115 | 116 | if h.buckets[targetBucket].Put(key, tophash, value) { 117 | h.len++ 118 | } 119 | if h.flags&hashWriting == 0 { 120 | panic("concurrent map writes") 121 | } 122 | h.flags &^= hashWriting 123 | } 124 | 125 | func (h *hmap[K, V]) Delete(key K) { 126 | if h.flags&hashWriting != 0 { 127 | panic("concurrent map writes") 128 | } 129 | 130 | h.flags ^= hashWriting 131 | 132 | tophash, targetBucket := h.locateBucket(key) 133 | 134 | b := &h.buckets[targetBucket] 135 | 136 | if h.isGrowing() { 137 | oldB := &(*h.oldbuckets)[targetBucket&h.oldBucketMask()] 138 | if !oldB.isEvacuated() { 139 | b = oldB 140 | } 141 | } 142 | 143 | if deleted := b.Delete(key, tophash); deleted { 144 | h.len-- 145 | } 146 | if h.flags&hashWriting == 0 { 147 | panic("concurrent map writes") 148 | } 149 | h.flags &^= hashWriting 150 | } 151 | 152 | // locateBucket - returns bucket index, where to put/search a value 153 | // and tophash value from hash of the given key 154 | func (h *hmap[K, V]) locateBucket(key K) (tophash uint8, targetBucket uint64) { 155 | hash := h.hasher.Hash(key) 156 | tophash = topHash(hash) 157 | mask := bucketMask(h.B) 158 | 159 | // calculate target bucket number, from N available 160 | // mask represents N-1 161 | // for N=9 it's 0111 162 | // for N=16 it's 1111, etc. 163 | // then, using binary and (hash & mask) we can get up to N different values(index of bucket) 164 | // where to put/search a value for a given key 165 | targetBucket = hash & mask 166 | 167 | return tophash, targetBucket 168 | } 169 | 170 | func (h *hmap[K, V]) String() string { 171 | buf := strings.Builder{} 172 | buf.WriteString("go-map[") 173 | h.Range(func(k K, v V) bool { 174 | buf.WriteString(fmt.Sprintf("%v:%v ", k, v)) 175 | return true 176 | }) 177 | 178 | return strings.TrimRight(buf.String(), " ") + "]" 179 | } 180 | 181 | // returns first 8 bits from the val 182 | func topHash(val uint64) uint8 { 183 | tophash := uint8(val >> (ptrSize*8 - 8)) 184 | if tophash < minTopHash { 185 | tophash += minTopHash 186 | } 187 | return tophash 188 | } 189 | 190 | // bucketShift returns 1< bucketSize && uint64(size) > loadFactorNum*(bucketsNum(B)/loadFactorDen) 204 | } 205 | 206 | func (m *hmap[K, V]) Range(f func(k K, v V) bool) { 207 | iter := iterInit(m) 208 | for iter.key != nil && iter.elem != nil { 209 | if !f(*iter.key, *iter.elem) { 210 | break 211 | } 212 | iter.next() 213 | } 214 | } 215 | 216 | func (m *hmap[K, V]) Len() int { 217 | return m.len 218 | } 219 | 220 | // sameSizeGrow reports whether the current growth is to a map of the same size. 221 | func (h *hmap[K, V]) sameSizeGrow() bool { 222 | return h.flags&sameSizeGrow != 0 223 | } 224 | 225 | func (m *hmap[K, V]) isGrowing() bool { 226 | return m.oldbuckets != nil 227 | } 228 | 229 | func (m *hmap[K, V]) growWork(bucket uint64) { 230 | // make sure we evacuate the oldbucket corresponding 231 | // to the bucket we're about to use 232 | m.evacuate(bucket & m.oldBucketMask()) 233 | 234 | // evacuate one more oldbucket to make progress on growing 235 | if m.isGrowing() { 236 | m.evacuate(m.numEvacuated) 237 | } 238 | } 239 | 240 | func (m *hmap[K, V]) evacuate(oldbucket uint64) { 241 | b := &(*m.oldbuckets)[oldbucket] 242 | newBit := m.numOldBuckets() 243 | 244 | if !b.isEvacuated() { 245 | // two halfs of the new buckets 246 | halfs := [2]evacDst[K, V]{{b: &m.buckets[oldbucket]}} 247 | 248 | if !m.sameSizeGrow() { 249 | // Only calculate y pointers if we're growing bigger. 250 | // Otherwise GC can see bad pointers. 251 | halfs[1].b = &m.buckets[oldbucket+newBit] 252 | } 253 | 254 | for ; b != nil; b = b.overflow { 255 | // moving all values from the old bucket to the new one 256 | for i := 0; i < bucketSize; i++ { 257 | top := b.tophash[i] 258 | 259 | if isCellEmpty(top) { 260 | b.tophash[i] = evacuatedEmpty 261 | continue 262 | } 263 | 264 | key := &b.keys[i] 265 | value := &b.values[i] 266 | 267 | hash := m.hasher.Hash(*key) 268 | 269 | // decide where to evacuate the element. 270 | // the first or the second half of the new buckets 271 | // 272 | // newBit == # of prev buckets. it's called like that because of it's purpose 273 | // the value represents new bit of our new mask(# of curr buckets - 1) 274 | // if newBit == 8 (1000) then newMask == 15(1111) and oldMask == 7(0111) 275 | // and in that case only the 4th bit(from the end) of mask matters 276 | // because it decides whether targetBucket changes or not. 277 | 278 | var useSecond uint8 279 | if !m.sameSizeGrow() && hash&newBit != 0 { 280 | useSecond = 1 281 | } 282 | 283 | // evacuatedFirst + useSecond == evaluatedSecond 284 | b.tophash[i] = evacuatedFirst + useSecond 285 | dst := &halfs[useSecond] 286 | // check bounds 287 | if dst.i == bucketSize { 288 | dst.b = newOverflow(dst.b) 289 | dst.i = 0 290 | } 291 | dst.b.putAt(*key, top, *value, dst.i) 292 | dst.i++ 293 | } 294 | } 295 | } 296 | 297 | if oldbucket == m.numEvacuated { 298 | m.advanceEvacuationMark(newBit) 299 | } 300 | } 301 | 302 | func (m *hmap[K, V]) advanceEvacuationMark(newBit uint64) { 303 | m.numEvacuated++ 304 | 305 | stop := newBit + 1024 306 | if stop > newBit { 307 | stop = newBit 308 | } 309 | 310 | for m.numEvacuated != stop && (*m.oldbuckets)[m.numEvacuated].isEvacuated() { 311 | m.numEvacuated++ 312 | } 313 | 314 | if m.numEvacuated == newBit { // newbit == # of oldbuckets 315 | // Growing is all done. Free old main bucket array. 316 | m.oldbuckets = nil 317 | m.flags &^= sameSizeGrow 318 | } 319 | } 320 | 321 | // evacDst is an evacuation destination. 322 | type evacDst[K comparable, V any] struct { 323 | b *bucket[K, V] // pointer to the bucket 324 | i uint // index for the next element in the destination bucket 325 | } 326 | 327 | // noldbuckets calculates the number of buckets prior to the current map growth. 328 | func (m *hmap[K, V]) numOldBuckets() uint64 { 329 | oldB := m.B 330 | if !m.sameSizeGrow() { 331 | oldB-- 332 | } 333 | 334 | return bucketsNum(oldB) 335 | } 336 | 337 | // oldbucketmask provides a mask that can be applied to calculate n % noldbuckets(). 338 | func (m *hmap[K, V]) oldBucketMask() uint64 { 339 | return m.numOldBuckets() - 1 340 | } 341 | 342 | func (m *hmap[K, V]) startGrowth() { 343 | oldBuckets := m.buckets 344 | m.B++ 345 | m.buckets = make([]bucket[K, V], bucketsNum(m.B)) 346 | m.oldbuckets = &oldBuckets 347 | m.numEvacuated = 0 348 | 349 | flags := m.flags &^ (iterator | oldIterator) // remove iterators flags 350 | if m.flags&iterator != 0 { 351 | flags |= oldIterator 352 | } 353 | 354 | // actual growth happens in the evacuate() and growWork() functions 355 | } 356 | 357 | func newOverflow[K comparable, V any](b *bucket[K, V]) *bucket[K, V] { 358 | if b.overflow == nil { 359 | b.overflow = &bucket[K, V]{} 360 | } 361 | 362 | return b.overflow 363 | } 364 | 365 | func (m *hmap[K, V]) debug() { 366 | fmt.Println("main buckets:") 367 | for i, b := range m.buckets { 368 | bk := &b 369 | for bk != nil { 370 | fmt.Printf("\t\t%d - %s\n", i, bk.debug()) 371 | bk = bk.overflow 372 | } 373 | } 374 | 375 | if m.oldbuckets != nil { 376 | fmt.Println("old buckets:") 377 | for i, b := range *m.oldbuckets { 378 | bk := &b 379 | for bk != nil { 380 | fmt.Printf("\t\t%d - %s\n", i, bk.debug()) 381 | bk = bk.overflow 382 | } 383 | } 384 | } 385 | } 386 | -------------------------------------------------------------------------------- /map_benchmark_test.go: -------------------------------------------------------------------------------- 1 | package gomap 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/tidwall/hashmap" 8 | ) 9 | 10 | var sizes = []int{128, 8192, 32768, 131072} 11 | 12 | func BenchmarkGet(b *testing.B) { 13 | for _, n := range sizes { 14 | keys := make([]string, 0, n) 15 | mm := New[string, int64](n) 16 | openAddrMap := hashmap.New[string, int64](n) 17 | stdm := make(map[string]int64, n) 18 | 19 | for i := 0; i < n; i++ { 20 | k := fmt.Sprintf("key__%d", i) 21 | mm.Put(k, int64(i)*2) 22 | stdm[k] = int64(i) * 2 23 | openAddrMap.Set(k, int64(i)*2) 24 | keys = append(keys, k) 25 | } 26 | 27 | j := 0 28 | b.Run(fmt.Sprintf("generic-map %d", n), func(b *testing.B) { 29 | var got int64 30 | for i := 0; i < b.N; i++ { 31 | if j == n { 32 | j = 0 33 | } 34 | got = mm.Get(keys[j]) 35 | j++ 36 | } 37 | _ = got 38 | }) 39 | 40 | j = 0 41 | b.Run(fmt.Sprintf("STD-map %d", n), func(b *testing.B) { 42 | var got int64 43 | for i := 0; i < b.N; i++ { 44 | if j == n { 45 | j = 0 46 | } 47 | got = stdm[keys[j]] 48 | j++ 49 | } 50 | _ = got 51 | }) 52 | 53 | j = 0 54 | b.Run(fmt.Sprintf("tidwall-hashmap (open-addressing hashmap) %d", n), func(b *testing.B) { 55 | var got int64 56 | for i := 0; i < b.N; i++ { 57 | if j == n { 58 | j = 0 59 | } 60 | got, _ = openAddrMap.Get(keys[j]) 61 | j++ 62 | } 63 | _ = got 64 | }) 65 | 66 | } 67 | } 68 | 69 | func BenchmarkPut(b *testing.B) { 70 | for _, n := range sizes { 71 | keys := make([]string, 0, n) 72 | for i := 0; i < n; i++ { 73 | keys = append(keys, fmt.Sprintf("key__%d", i)) 74 | } 75 | mm := New[string, int64](n) 76 | j := 0 77 | multiplier := 1 78 | b.Run(fmt.Sprintf("generic-map %d", n), func(b *testing.B) { 79 | for i := 0; i < b.N; i++ { 80 | if j == n { 81 | j = 0 82 | multiplier += 1 83 | } 84 | mm.Put(keys[j], int64(j*multiplier)) 85 | j++ 86 | } 87 | }) 88 | 89 | j = 0 90 | multiplier = 1 91 | stdm := make(map[string]int64, n) 92 | b.Run(fmt.Sprintf("STD-map %d", n), func(b *testing.B) { 93 | for i := 0; i < b.N; i++ { 94 | if j == n { 95 | j = 0 96 | multiplier += 1 97 | } 98 | stdm[keys[j]] = int64(j * multiplier) 99 | j++ 100 | } 101 | }) 102 | } 103 | } 104 | 105 | func BenchmarkPutWithOverflow(b *testing.B) { 106 | startSize := 1_000 107 | targetSize := []int{10_000, 100_000, 1_000_000, 10_000_000} 108 | type someStruct struct { 109 | x string 110 | y int 111 | } 112 | 113 | for _, n := range targetSize { 114 | keys := make([]string, 0, n) 115 | for i := 0; i < n; i++ { 116 | keys = append(keys, fmt.Sprintf("key__%d", i)) 117 | } 118 | 119 | mm := New[string, someStruct](startSize) 120 | j := 0 121 | multiplier := 1 122 | b.Run(fmt.Sprintf("gen-map (string key)%d", n), func(b *testing.B) { 123 | var key string 124 | for i := 0; i < b.N; i++ { 125 | if j == n { 126 | j = 0 127 | multiplier += 1 128 | } 129 | key = keys[j] 130 | mm.Put(key, someStruct{x: key, y: j * multiplier}) 131 | j++ 132 | } 133 | }) 134 | 135 | stdm := make(map[string]someStruct, startSize) 136 | j = 0 137 | multiplier = 1 138 | b.Run(fmt.Sprintf("STD (string key)%d", n), func(b *testing.B) { 139 | var key string 140 | for i := 0; i < b.N; i++ { 141 | if j == n { 142 | j = 0 143 | multiplier += 1 144 | } 145 | key = keys[j] 146 | stdm[key] = someStruct{x: key, y: j * multiplier} 147 | j++ 148 | } 149 | }) 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /map_test.go: -------------------------------------------------------------------------------- 1 | package gomap 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "sort" 7 | "testing" 8 | ) 9 | 10 | func TestMap(t *testing.T) { 11 | mm := New[string, int64](8) 12 | 13 | v, ok := mm.Get2("123") 14 | isEqual(t, ok, false) 15 | isEqual(t, v, *new(int64)) 16 | 17 | mm.Put("key1", 10) 18 | v = mm.Get("key1") 19 | isEqual(t, v, int64(10)) 20 | 21 | mm.Put("", 144) 22 | isEqual(t, mm.Get(""), int64(144)) 23 | 24 | mm.Put(" ", 145) 25 | isEqual(t, mm.Get(" "), int64(145)) 26 | 27 | mm.Delete("123") 28 | v, ok = mm.Get2("123") 29 | isEqual(t, ok, false) 30 | isEqual(t, v, *new(int64)) 31 | 32 | mm.Put("key1", 20) 33 | v = mm.Get("key1") 34 | isEqual(t, v, int64(20)) 35 | 36 | t.Run("target value in overflow bucket", func(t *testing.T) { 37 | mm := New[string, int](8) 38 | mm.Put("key0", 20) 39 | 40 | for i := 0; i < 8; i++ { 41 | mm.Put(fmt.Sprintf("key_%d", i), i*10) 42 | } 43 | 44 | mm.Put("key__1", 10) 45 | // remove space for an element in the bucket 46 | mm.Delete("key0") 47 | 48 | // try to add a value in a hole. "key__1" now is stored in an overflow bucket 49 | mm.Put("key__1", 20) 50 | v := mm.Get("key__1") 51 | isEqual(t, v, 20) 52 | // the values must be deleted from the overflow bucket 53 | mm.Delete("key__1") 54 | 55 | v = mm.Get("key__1") 56 | isEqual(t, v, 0) 57 | }) 58 | } 59 | 60 | func isEqual(t *testing.T, got interface{}, want interface{}) { 61 | if !reflect.DeepEqual(got, want) { 62 | t.Fatalf("result is not equal\ngot: %+v\nwant: %+v\n", got, want) 63 | } 64 | } 65 | 66 | func TestBucketOverflow(t *testing.T) { 67 | // create map with 8 elements(1 bucket) 68 | mm := New[string, int](8) 69 | 70 | values := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} 71 | prefix := "key_" 72 | 73 | for _, v := range values { 74 | mm.Put(fmt.Sprintf("%s%d", prefix, v), v) 75 | } 76 | 77 | dm := mm.(*hmap[string, int]) 78 | dm.debug() 79 | 80 | for _, v := range values { 81 | got := mm.Get(fmt.Sprintf("%s%d", prefix, v)) 82 | isEqual(t, got, v) 83 | } 84 | } 85 | 86 | type NestedStruct struct { 87 | A int64 88 | B struct { 89 | C string 90 | D string 91 | E struct { 92 | F []int64 93 | } 94 | } 95 | } 96 | 97 | func TestGet2(t *testing.T) { 98 | m := New[string, NestedStruct](10) 99 | 100 | emptyStruct := NestedStruct{} 101 | m.Put("123", emptyStruct) 102 | got, ok := m.Get2("123") 103 | isEqual(t, ok, true) 104 | isEqual(t, got, emptyStruct) 105 | 106 | got, ok = m.Get2("random_key") 107 | isEqual(t, ok, false) 108 | isEqual(t, got, emptyStruct) 109 | } 110 | 111 | func FuzzMap(f *testing.F) { 112 | f.Fuzz(func(t *testing.T, key string) { 113 | m := New[string, string](1) 114 | m.Put(key, key) 115 | if v := m.Get(key); v != key { 116 | t.Fatal(v, "!==", key) 117 | } 118 | }) 119 | } 120 | 121 | func TestRange(t *testing.T) { 122 | m := New[string, int64](100) 123 | 124 | n := 100 125 | wantKeys := make([]string, 0, n) 126 | wantValues := make([]int64, 0, n) 127 | 128 | for i := 0; i < n; i++ { 129 | k := fmt.Sprintf("k%d", i) 130 | v := int64(i) * 10 131 | m.Put(k, v) 132 | wantKeys = append(wantKeys, k) 133 | wantValues = append(wantValues, v) 134 | } 135 | 136 | gotKeys := make([]string, 0, n) 137 | gotValues := make([]int64, 0, n) 138 | m.Range(func(k string, v int64) bool { 139 | gotKeys = append(gotKeys, k) 140 | gotValues = append(gotValues, v) 141 | return true 142 | }) 143 | 144 | sort.Strings(wantKeys) 145 | sort.Strings(gotKeys) 146 | isEqual(t, gotKeys, wantKeys) 147 | 148 | i64Less := func(s []int64) func(i, j int) bool { 149 | return func(i, j int) bool { 150 | return s[i] < s[j] 151 | } 152 | } 153 | sort.Slice(wantValues, i64Less(wantValues)) 154 | sort.Slice(gotValues, i64Less(gotValues)) 155 | isEqual(t, wantValues, gotValues) 156 | } 157 | 158 | type testcase[K comparable, V any] struct{} 159 | 160 | func (tt testcase[K, V]) test(t *testing.T, keys []K, values []V) { 161 | if len(keys) != len(values) { 162 | t.Fatalf("lengths of keys(%d) and values(%d) must be equal", len(keys), len(values)) 163 | } 164 | m := New[K, V](len(keys)) 165 | 166 | for i, k := range keys { 167 | m.Put(k, values[i]) 168 | 169 | got, ok := m.Get2(k) 170 | isEqual(t, ok, true) 171 | isEqual(t, got, values[i]) 172 | } 173 | } 174 | func TestDifferentKeyTypes(t *testing.T) { 175 | t.Run("struct", func(t *testing.T) { 176 | type keyStruct struct { 177 | key string 178 | anyData [1]int 179 | } 180 | 181 | tests := testcase[keyStruct, string]{} 182 | tests.test( 183 | t, 184 | []keyStruct{ 185 | {key: "k1"}, {key: "k2", anyData: [1]int{1}}, {key: "k3"}, {key: "k4", anyData: [1]int{2}}, 186 | {key: "k5"}, {key: "k6", anyData: [1]int{3}}, {key: "k7"}, {key: "k8", anyData: [1]int{4}}, 187 | {key: "k9"}, 188 | }, 189 | []string{"val1", "val2", "val3", "val4", "val5", "val6", "val7", "val8", "val9"}, 190 | ) 191 | }) 192 | 193 | t.Run("array", func(t *testing.T) { 194 | type keyArray [2]int 195 | 196 | tests := testcase[keyArray, string]{} 197 | 198 | tests.test( 199 | t, 200 | []keyArray{{1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {6, 7}, {7, 8}, {8, 9}, {9, 10}}, 201 | []string{"val1", "val2", "val3", "val4", "val5", "val6", "val7", "val8", "val9"}, 202 | ) 203 | }) 204 | 205 | t.Run("bools", func(t *testing.T) { 206 | tests := testcase[bool, int]{} 207 | tests.test(t, []bool{true, true, false, false, true}, []int{1, 0, 1, 0, 20}) 208 | }) 209 | 210 | t.Run("numbers", func(t *testing.T) { 211 | t.Run("float64", func(t *testing.T) { 212 | tests := testcase[float64, int]{} 213 | tests.test(t, []float64{1.1, 2.2, 3.3, 4.4}, []int{1, 2, 3, 4}) 214 | }) 215 | t.Run("uint64", func(t *testing.T) { 216 | tests := testcase[uint64, int]{} 217 | tests.test(t, []uint64{1, 2, 3, 4}, []int{1, 2, 3, 4}) 218 | }) 219 | t.Run("int64", func(t *testing.T) { 220 | tests := testcase[int64, int]{} 221 | tests.test(t, []int64{1, 2, 3, 4}, []int{1, 2, 3, 4}) 222 | }) 223 | t.Run("complex64", func(t *testing.T) { 224 | tests := testcase[complex64, int]{} 225 | tests.test(t, []complex64{1 + 1i, 2 + 2i, 3 + 3i, 4 + 4i}, []int{1, 2, 3, 4}) 226 | }) 227 | }) 228 | 229 | t.Run("byte", func(t *testing.T) { 230 | tests := testcase[byte, int]{} 231 | tests.test(t, []byte{'1', '2', '3', '4'}, []int{1, 2, 3, 4}) 232 | }) 233 | 234 | t.Run("channel", func(t *testing.T) { 235 | tests := testcase[chan int, int]{} 236 | ch1, ch2, ch3, ch4 := make(chan int, 1), make(chan int, 1), make(chan int, 1), make(chan int, 1) 237 | ch2 <- 2 238 | ch3 <- 3 239 | ch4 <- 4 240 | tests.test(t, []chan int{ch1, ch2, ch3, ch4}, []int{1, 2, 3, 4}) 241 | }) 242 | } 243 | --------------------------------------------------------------------------------