├── .gitignore ├── LICENSE.txt ├── .travis.yml ├── licenses ├── BSL-Couchbase.txt └── APL2.txt ├── README.md ├── slab.go └── slab_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.out 3 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Source code in this repository is licensed under various licenses. The 2 | Business Source License 1.1 (BSL) is one such license. Each file indicates in 3 | a section at the beginning of the file the name of the license that applies to 4 | it. All licenses used in this repository can be found in the top-level 5 | licenses directory. 6 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2013-Present Couchbase, Inc. 2 | # 3 | # Use of this software is governed by the Business Source License included in 4 | # the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that 5 | # file, in accordance with the Business Source License, use of this software 6 | # will be governed by the Apache License, Version 2.0, included in the file 7 | # licenses/APL2.txt. 8 | 9 | language: go 10 | 11 | go: 12 | - 1.1 13 | - tip 14 | 15 | -------------------------------------------------------------------------------- /licenses/BSL-Couchbase.txt: -------------------------------------------------------------------------------- 1 | COUCHBASE BUSINESS SOURCE LICENSE AGREEMENT 2 | 3 | Business Source License 1.1 4 | Licensor: Couchbase, Inc. 5 | Licensed Work: Couchbase Server Version 7.2 6 | The Licensed Work is © 2021-Present Couchbase, Inc. 7 | 8 | Additional Use Grant: You may make production use of the Licensed Work, provided 9 | you comply with the following conditions: 10 | 11 | (i) You may not prepare a derivative work based upon the Licensed Work and 12 | distribute or otherwise offer such derivative work, whether on a standalone 13 | basis or in combination with other products, applications, or services 14 | (including in any "as-a-service" offering, such as, by way of example, a 15 | software-as-a-service, database-as-a-service, or infrastructure-as-a-service 16 | offering, or any other offering based on a cloud computing or other type of 17 | hosted distribution model (collectively, "Hosted Offerings")), for a fee or 18 | otherwise on a commercial or other for-profit basis. 19 | 20 | (ii) You may not link the Licensed Work to, or otherwise include the Licensed 21 | Work in or with, any product, application, or service (including in any Hosted 22 | Offering) that is distributed or otherwise offered, whether on a standalone 23 | basis or in combination with other products, applications, or services for a fee 24 | or otherwise on a commercial or other for-profit basis. Condition (ii) shall not 25 | limit the generality of condition (i) above. 26 | 27 | 28 | Change Date: February 1, 2029 29 | 30 | Change License: Apache License, Version 2.0 31 | 32 | 33 | Notice 34 | 35 | The Business Source License (this document, or the "License") is not an Open 36 | Source license. However, the Licensed Work will eventually be made available 37 | under an Open Source License, as stated in this License. License text copyright 38 | © 2017 MariaDB Corporation Ab, All Rights Reserved. "Business Source License" is 39 | a trademark of MariaDB Corporation Ab. 40 | 41 | Terms 42 | 43 | The Licensor hereby grants You the right to copy, modify, create derivative 44 | works, redistribute, and make non-production use of the Licensed Work. The 45 | Licensor may make an Additional Use Grant, above, permitting limited production 46 | use. 47 | 48 | Effective on the Change Date, or the fourth anniversary of the first publicly 49 | available distribution of a specific version of the Licensed Work under this 50 | License, whichever comes first, the Licensor hereby grants you rights under the 51 | terms of the Change License, and the rights granted in the paragraph above 52 | terminate. 53 | 54 | If your use of the Licensed Work does not comply with the requirements currently 55 | in effect as described in this License, you must purchase a commercial license 56 | from the Licensor, its affiliated entities, or authorized resellers, or you must 57 | refrain from using the Licensed Work. 58 | 59 | All copies of the original and modified Licensed Work, and derivative works of 60 | the Licensed Work, are subject to this License. This License applies separately 61 | for each version of the Licensed Work and the Change Date may vary for each 62 | version of the Licensed Work released by Licensor. 63 | 64 | You must conspicuously display this License on each original or modified copy of 65 | the Licensed Work. If you receive the Licensed Work in original or modified form 66 | from a third party, the terms and conditions set forth in this License apply to 67 | your use of that work. 68 | 69 | Any use of the Licensed Work in violation of this License will automatically 70 | terminate your rights under this License for the current and all other versions 71 | of the Licensed Work. 72 | 73 | This License does not grant you any right in any trademark or logo of Licensor 74 | or its affiliates (provided that you may use a trademark or logo of Licensor as 75 | expressly required by this License). 76 | 77 | TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN 78 | "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS 79 | OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, 80 | FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. 81 | 82 | MariaDB hereby grants you permission to use this License's text to license your 83 | works, and to refer to it using the trademark "Business Source License", as long 84 | as you comply with the Covenants of Licensor below. 85 | 86 | Covenants of Licensor 87 | 88 | In consideration of the right to use this License's text and the "Business 89 | Source License" name and trademark, Licensor covenants to MariaDB, and to all 90 | other recipients of the licensed work to be provided by Licensor: 91 | 92 | 1. To specify as the Change License the GPL Version 2.0 or any later version, or 93 | a license that is compatible with GPL Version 2.0 or a later version, where 94 | "compatible" means that software provided under the Change License can be 95 | included in a program with software provided under GPL Version 2.0 or a later 96 | version. Licensor may specify additional Change Licenses without limitation. 97 | 98 | 2. To either: (a) specify an additional grant of rights to use that does not 99 | impose any additional restriction on the right granted in this License, as the 100 | Additional Use Grant; or (b) insert the text "None". 101 | 102 | 3. To specify a Change Date. 103 | 104 | 4. Not to modify this License in any other way. 105 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # go-slab - slab allocator in go 2 | 3 | A slab allocator library in the Go Programming Language. 4 | 5 | [![GoDoc](https://godoc.org/github.com/steveyen/go-slab?status.svg)](https://godoc.org/github.com/steveyen/go-slab) [![Build Status](https://drone.io/github.com/steveyen/go-slab/status.png)](https://drone.io/github.com/steveyen/go-slab/latest) [![Coverage Status](https://coveralls.io/repos/steveyen/go-slab/badge.png)](https://coveralls.io/r/steveyen/go-slab) 6 | 7 | # Who is this for 8 | 9 | This library may be interesting to you if you wish to reduce garbage 10 | collection (e.g. stop-the-world GC) performance issues in your golang 11 | programs, allowing you to switch to explicit byte array memory 12 | management techniques. 13 | 14 | This can be useful, for example, for long-running server programs that 15 | manage lots of in-memory data items, such as caches and databases. 16 | 17 | # Example usage 18 | 19 | arena := NewArena(48, // The smallest slab class "chunk size" is 48 bytes. 20 | 1024*1024, // Each slab will be 1MB in size. 21 | 2, // Power of 2 growth in "chunk sizes". 22 | nil) // Use default make([]byte) for slab memory. 23 | 24 | var buf []byte 25 | 26 | buf = arena.Alloc(64) // Allocate 64 bytes. 27 | ... use the buf ... 28 | arena.DecRef(buf) // Release our ref-count when we're done with buf. 29 | 30 | buf = arena.Alloc(1024) // Allocate another 1K byte array. 31 | ... use the buf ... 32 | arena.AddRef(buf) // The buf's ref-count now goes to 2. 33 | ... use the buf some more ... 34 | arena.DecRef(buf) 35 | ... still can use the buf since we still have 1 ref-count ... 36 | arena.DecRef(buf) // We shouldn't use the buf after this last DecRef(), 37 | // as the library might recycle it for a future Alloc(). 38 | 39 | # Design concepts 40 | 41 | The byte arrays ([]byte) that are allocated by this library are 42 | reference-counted. When a byte array's reference count drops to 0, it 43 | will be placed onto a free-list for later re-use. This can reduce the 44 | need to ask the go runtime to allocate new memory and perhaps delay 45 | the need for a full stop-the-world GC. 46 | 47 | The AddRef()/DecRef() functions use slice/capacity math instead of 48 | "large" additional tracking data structures (e.g., no extra 49 | hashtables) in order to reach the right ref-counter metadata. 50 | 51 | This implementation also does not use any of go's "unsafe" 52 | capabilities, allowing it to remain relatively simple. 53 | 54 | Memory is managed via a simple slab allocator algorithm. See: 55 | http://en.wikipedia.org/wiki/Slab_allocation 56 | 57 | Each arena tracks one or more slabClass structs. Each slabClass 58 | manages a different "chunk size", where chunk sizes are computed using 59 | a simple "growth factor" (e.g., the "power of 2 growth" in the above 60 | example). Each slabClass also tracks zero or more slabs, where every 61 | slab tracked by a slabClass will all have the same chunk size. A slab 62 | manages a (usually large) continguous array of memory bytes (1MB from 63 | the above example), and the slab's memory is subdivided into many 64 | chunks of the same chunk size. All the chunks in a new slab are 65 | placed on a free-list that's part of the slabClass. 66 | 67 | When Alloc() is invoked, the first "large enough" slabClass is found, 68 | and a chunk from the free-list is taken to service the allocation. If 69 | there are no more free chunks available in a slabClass, then a new 70 | slab (e.g., 1MB) is allocated, chunk'ified, and the request is 71 | processed as before. 72 | 73 | # Concurrency 74 | 75 | The Arena returned from NewArena() is not concurrency safe. 76 | Please use your own locking. 77 | 78 | # Chainability 79 | 80 | The []byte buf's can be chained via the SetNext()/GetNext() functions. 81 | This may be useful for developers wishing to reduce fragmentation when 82 | they have wildly varying byte array sizes. 83 | 84 | For example, a server cache may need to manage many items whose sizes 85 | range from small to large (16 bytes to 1MB). Instead of invoking 86 | Arena.Alloc() on the exact item size, the developer may wish to 87 | consider slicing an item into many more smaller 4KB byte arrays. 88 | 89 | For a 1MB item, for example, the application can instead invoke 90 | Arena.Alloc(4096) for 256 times and use the Arena.SetNext() function 91 | to chain those smaller 4KB buffers together. By slicing memory into 92 | uniform-sized, smaller-sized buffers, there may be less fragmentation 93 | and better overall re-use of slabs. Additionally, the last []byte 94 | buffer in the chain may be smaller than 4KB to not waste space. 95 | 96 | # Application specific slab memory allocator 97 | 98 | The NewArena() function takes an optional malloc() callback function, 99 | which will be invoked whenever the arena needs more memory for a new 100 | slab. If the malloc() func is nil, the arena will default to using 101 | the builtin make([]byte, sizeNeeded). 102 | 103 | An application-specific malloc() func can be useful for tracking 104 | and/or limiting the amount of slab memory that an Arena uses. It can 105 | be also used by advanced applications to supply mmap()'ed memory to an 106 | Arena. 107 | 108 | # Rules 109 | 110 | * You need to invoke AddRef()/DecRef() with the exact same buf 111 | that you received from Alloc(), from the same arena. 112 | * Don't call Alloc() with a size greater than the arena's slab size. 113 | e.g., if your slab size is 1MB, then Alloc(1024 * 1024 + 1) will fail. 114 | * Careful with your ref-counting -- that's the fundamental tradeoff 115 | with now trying to avoid GC. 116 | * Do not grow or append() on the slices returned by Alloc(). 117 | * Do not use cap() on slices returned by Alloc(), as that has 118 | information / abstraction "leakage" and should not be depended on. 119 | 120 | # LICENSE 121 | 122 | Apache 2 license. 123 | 124 | # Testing 125 | 126 | Unit test code coverage, as of version 0.0.0-42-g60296ca, is 99.4%. 127 | 128 | # TODO 129 | 130 | * Currently, slabs that are allocated are never freed. 131 | * Memory for one slabClass is never reassigned to another slabClass. 132 | Memory reassignment might be useful whenever data sizes of items in 133 | long-running systems change over time. For example, sessions in an 134 | online game may initially fit fine into a 1K slab class, but start 135 | getting larger than 1K as long time players acquire more inventory. 136 | Meanwhile, most of the slab memory is "stuck" in the 1K slab class 137 | when it's now needed in the 2K slab class. The chainability features 138 | of go-slab, of note, should also be considered in these cases. 139 | -------------------------------------------------------------------------------- /licenses/APL2.txt: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | -------------------------------------------------------------------------------- /slab.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | // Package slab provides a 100% golang slab allocator for byte slices. 10 | package slab 11 | 12 | import ( 13 | "encoding/binary" 14 | "fmt" 15 | "math" 16 | "math/rand" 17 | "sort" 18 | ) 19 | 20 | // An opaque reference to bytes managed by an Arena. See 21 | // Arena.BufToLoc/LocToBuf(). A Loc struct is GC friendly in that a 22 | // Loc does not have direct pointer fields into the Arena's memory 23 | // that the GC's scanner must traverse. 24 | type Loc struct { 25 | slabClassIndex int 26 | slabIndex int 27 | chunkIndex int 28 | bufStart int 29 | bufLen int 30 | } 31 | 32 | // NilLoc returns a Loc where Loc.IsNil() is true. 33 | func NilLoc() Loc { 34 | return nilLoc 35 | } 36 | 37 | var nilLoc = Loc{-1, -1, -1, -1, -1} // A sentinel. 38 | 39 | // IsNil returns true if the Loc came from NilLoc(). 40 | func (cl Loc) IsNil() bool { 41 | return cl.slabClassIndex < 0 && cl.slabIndex < 0 && 42 | cl.chunkIndex < 0 && cl.bufStart < 0 && cl.bufLen < 0 43 | } 44 | 45 | // Slice returns a Loc that a represents a different slice of the 46 | // backing buffer, where the bufStart and bufLen are relative to the 47 | // backing buffer. Does not change the ref-count of the underlying 48 | // buffer. 49 | // 50 | // NOTE: Many API's (such as BufToLoc) do not correctly handle Loc's 51 | // with non-zero bufStart, so please be careful with using sliced 52 | // Loc's. 53 | func (cl Loc) Slice(bufStart, bufLen int) Loc { 54 | rv := cl // Makes a copy. 55 | rv.bufStart = bufStart 56 | rv.bufLen = bufLen 57 | return rv 58 | } 59 | 60 | // An Arena manages a set of slab classes and memory. 61 | type Arena struct { 62 | growthFactor float64 63 | slabClasses []slabClass // slabClasses's chunkSizes grow by growthFactor. 64 | slabMagic int32 // Magic # suffix on each slab memory []byte. 65 | slabSize int 66 | malloc func(size int) []byte // App-specific allocator. 67 | 68 | totAllocs int64 69 | totAddRefs int64 70 | totDecRefs int64 71 | totDecRefZeroes int64 // Inc'ed when a ref-count reaches zero. 72 | totGetNexts int64 73 | totSetNexts int64 74 | totMallocs int64 75 | totMallocErrs int64 76 | totTooBigErrs int64 77 | totAddSlabErrs int64 78 | totPushFreeChunks int64 // Inc'ed when chunk added to free list. 79 | totPopFreeChunks int64 // Inc'ed when chunk removed from free list. 80 | totPopFreeChunkErrs int64 81 | } 82 | 83 | type slabClass struct { 84 | slabs []*slab // A growing array of slabs. 85 | chunkSize int // Each slab is sliced into fixed-sized chunks. 86 | chunkFree Loc // Chunks are tracked in a free-list per slabClass. 87 | 88 | numChunks int64 89 | numChunksFree int64 90 | } 91 | 92 | type slab struct { 93 | // len(memory) == slabSize + slabMemoryFooterLen. 94 | memory []byte 95 | 96 | // Matching array of chunk metadata, and len(memory) == len(chunks). 97 | chunks []chunk 98 | } 99 | 100 | // Based on slabClassIndex + slabIndex + slabMagic. 101 | const slabMemoryFooterLen int = 4 + 4 + 4 102 | 103 | type chunk struct { 104 | refs int32 // Ref-count. 105 | self Loc // The self is the Loc for this chunk. 106 | next Loc // Used when chunk is in the free-list or when chained. 107 | } 108 | 109 | // NewArena returns an Arena to manage byte slice memory based on a 110 | // slab allocator approach. 111 | // 112 | // The startChunkSize and slabSize should be > 0. 113 | // The growthFactor should be > 1.0. 114 | // The malloc() func is invoked when Arena needs memory for a new slab. 115 | // When malloc() is nil, then Arena defaults to make([]byte, size). 116 | func NewArena(startChunkSize int, slabSize int, growthFactor float64, 117 | malloc func(size int) []byte) *Arena { 118 | if malloc == nil { 119 | malloc = defaultMalloc 120 | } 121 | s := &Arena{ 122 | growthFactor: growthFactor, 123 | slabMagic: rand.Int31(), 124 | slabSize: slabSize, 125 | malloc: malloc, 126 | } 127 | s.addSlabClass(startChunkSize) 128 | return s 129 | } 130 | 131 | func defaultMalloc(size int) []byte { 132 | return make([]byte, size) 133 | } 134 | 135 | // Alloc may return nil on errors, such as if no more free chunks are 136 | // available and new slab memory was not allocatable (such as if 137 | // malloc() returns nil). The returned buf may not be append()'ed to 138 | // for growth. The returned buf must be DecRef()'ed for memory reuse. 139 | func (s *Arena) Alloc(bufLen int) (buf []byte) { 140 | sc, chunk := s.allocChunk(bufLen) 141 | if sc == nil || chunk == nil { 142 | return nil 143 | } 144 | return sc.chunkMem(chunk)[0:bufLen] 145 | } 146 | 147 | // Owns returns true if this Arena owns the buf. 148 | func (s *Arena) Owns(buf []byte) bool { 149 | sc, c := s.bufChunk(buf) 150 | return sc != nil && c != nil 151 | } 152 | 153 | // AddRef increase the ref count on a buf. The input buf must be from 154 | // an Alloc() from the same Arena. 155 | func (s *Arena) AddRef(buf []byte) { 156 | s.totAddRefs++ 157 | sc, c := s.bufChunk(buf) 158 | if sc == nil || c == nil { 159 | panic("buf not from this arena") 160 | } 161 | c.addRef() 162 | } 163 | 164 | // DecRef decreases the ref count on a buf. The input buf must be 165 | // from an Alloc() from the same Arena. Once the buf's ref-count 166 | // drops to 0, the Arena may reuse the buf. Returns true if this was 167 | // the last DecRef() invocation (ref count reached 0). 168 | func (s *Arena) DecRef(buf []byte) bool { 169 | s.totDecRefs++ 170 | sc, c := s.bufChunk(buf) 171 | if sc == nil || c == nil { 172 | panic("buf not from this arena") 173 | } 174 | return s.decRef(sc, c) 175 | } 176 | 177 | // GetNext returns the next chained buf for the given input buf. The 178 | // buf's managed by an Arena can be chained. The returned bufNext may 179 | // be nil. When the returned bufNext is non-nil, the caller owns a 180 | // ref-count on bufNext and must invoke DecRef(bufNext) when the 181 | // caller is finished using bufNext. 182 | func (s *Arena) GetNext(buf []byte) (bufNext []byte) { 183 | s.totGetNexts++ 184 | sc, c := s.bufChunk(buf) 185 | if sc == nil || c == nil { 186 | panic("buf not from this arena") 187 | } 188 | if c.refs <= 0 { 189 | panic(fmt.Sprintf("unexpected ref-count during GetNext: %#v", c)) 190 | } 191 | 192 | scNext, cNext := s.chunk(c.next) 193 | if scNext == nil || cNext == nil { 194 | return nil 195 | } 196 | 197 | cNext.addRef() 198 | 199 | return scNext.chunkMem(cNext)[c.next.bufStart : c.next.bufStart+c.next.bufLen] 200 | } 201 | 202 | // SetNext associates the next chain buf following the input buf to be 203 | // bufNext. The buf's from an Arena can be chained, where buf will 204 | // own an AddRef() on bufNext. When buf's ref-count goes to zero, it 205 | // will call DecRef() on bufNext. The bufNext may be nil. The 206 | // bufNext must have start position 0 (or bufStart of 0) with respect 207 | // to its backing buffer. 208 | func (s *Arena) SetNext(buf, bufNext []byte) { 209 | s.totSetNexts++ 210 | sc, c := s.bufChunk(buf) 211 | if sc == nil || c == nil { 212 | panic("buf not from this arena") 213 | } 214 | if c.refs <= 0 { 215 | panic(fmt.Sprintf("refs <= 0 during SetNext: %#v", c)) 216 | } 217 | 218 | scOldNext, cOldNext := s.chunk(c.next) 219 | if scOldNext != nil && cOldNext != nil { 220 | s.decRef(scOldNext, cOldNext) 221 | } 222 | 223 | c.next = nilLoc 224 | if bufNext != nil { 225 | scNewNext, cNewNext := s.bufChunk(bufNext) 226 | if scNewNext == nil || cNewNext == nil { 227 | panic("bufNext not from this arena") 228 | } 229 | cNewNext.addRef() 230 | 231 | c.next = cNewNext.self 232 | c.next.bufStart = 0 233 | c.next.bufLen = len(bufNext) 234 | } 235 | } 236 | 237 | // BufToLoc returns a Loc that represents an Arena-managed buf. Does 238 | // not affect the reference count of the buf. The buf slice must have 239 | // start position 0 (must not be a sliced Loc with non-zero bufStart). 240 | func (s *Arena) BufToLoc(buf []byte) Loc { 241 | sc, c := s.bufChunk(buf) 242 | if sc == nil || c == nil { 243 | return NilLoc() 244 | } 245 | 246 | var loc = c.self // Makes a copy. 247 | loc.bufStart = 0 248 | loc.bufLen = len(buf) 249 | return loc 250 | } 251 | 252 | // LocToBuf returns a buf for an Arena-managed Loc. Does not affect 253 | // the reference count of the buf. The Loc may have come from 254 | // Loc.Slice(). 255 | func (s *Arena) LocToBuf(loc Loc) []byte { 256 | sc, chunk := s.chunk(loc) 257 | if sc == nil || chunk == nil { 258 | return nil 259 | } 260 | return sc.chunkMem(chunk)[loc.bufStart : loc.bufStart+loc.bufLen] 261 | } 262 | 263 | func (s *Arena) LocAddRef(loc Loc) { 264 | s.totAddRefs++ 265 | sc, chunk := s.chunk(loc) 266 | if sc == nil || chunk == nil { 267 | return 268 | } 269 | chunk.addRef() 270 | } 271 | 272 | func (s *Arena) LocDecRef(loc Loc) { 273 | s.totDecRefs++ 274 | sc, chunk := s.chunk(loc) 275 | if sc == nil || chunk == nil { 276 | return 277 | } 278 | s.decRef(sc, chunk) 279 | } 280 | 281 | // --------------------------------------------------------------- 282 | 283 | func (s *Arena) allocChunk(bufLen int) (*slabClass, *chunk) { 284 | s.totAllocs++ 285 | 286 | if bufLen > s.slabSize { 287 | s.totTooBigErrs++ 288 | return nil, nil 289 | } 290 | 291 | slabClassIndex := s.findSlabClassIndex(bufLen) 292 | sc := &(s.slabClasses[slabClassIndex]) 293 | if sc.chunkFree.IsNil() { 294 | if !s.addSlab(slabClassIndex, s.slabSize, s.slabMagic) { 295 | s.totAddSlabErrs++ 296 | return nil, nil 297 | } 298 | } 299 | 300 | s.totPopFreeChunks++ 301 | chunk := sc.popFreeChunk() 302 | if chunk == nil { 303 | s.totPopFreeChunkErrs++ 304 | return nil, nil 305 | } 306 | 307 | return sc, chunk 308 | } 309 | 310 | func (s *Arena) findSlabClassIndex(bufLen int) int { 311 | i := sort.Search(len(s.slabClasses), 312 | func(i int) bool { return bufLen <= s.slabClasses[i].chunkSize }) 313 | if i >= len(s.slabClasses) { 314 | slabClass := &(s.slabClasses[len(s.slabClasses)-1]) 315 | nextChunkSize := float64(slabClass.chunkSize) * s.growthFactor 316 | s.addSlabClass(int(math.Ceil(nextChunkSize))) 317 | return s.findSlabClassIndex(bufLen) 318 | } 319 | return i 320 | } 321 | 322 | func (s *Arena) addSlabClass(chunkSize int) { 323 | s.slabClasses = append(s.slabClasses, slabClass{ 324 | chunkSize: chunkSize, 325 | chunkFree: nilLoc, 326 | }) 327 | } 328 | 329 | func (s *Arena) addSlab( 330 | slabClassIndex, slabSize int, slabMagic int32) bool { 331 | sc := &(s.slabClasses[slabClassIndex]) 332 | 333 | chunksPerSlab := slabSize / sc.chunkSize 334 | if chunksPerSlab <= 0 { 335 | chunksPerSlab = 1 336 | } 337 | 338 | slabIndex := len(sc.slabs) 339 | 340 | s.totMallocs++ 341 | // Re-multiplying to avoid any extra fractional chunk memory. 342 | memorySize := (sc.chunkSize * chunksPerSlab) + slabMemoryFooterLen 343 | memory := s.malloc(memorySize) 344 | if memory == nil { 345 | s.totMallocErrs++ 346 | return false 347 | } 348 | 349 | slab := &slab{ 350 | memory: memory, 351 | chunks: make([]chunk, chunksPerSlab), 352 | } 353 | 354 | footer := slab.memory[len(slab.memory)-slabMemoryFooterLen:] 355 | binary.BigEndian.PutUint32(footer[0:4], uint32(slabClassIndex)) 356 | binary.BigEndian.PutUint32(footer[4:8], uint32(slabIndex)) 357 | binary.BigEndian.PutUint32(footer[8:12], uint32(slabMagic)) 358 | 359 | sc.slabs = append(sc.slabs, slab) 360 | 361 | for i := 0; i < len(slab.chunks); i++ { 362 | c := &(slab.chunks[i]) 363 | c.self.slabClassIndex = slabClassIndex 364 | c.self.slabIndex = slabIndex 365 | c.self.chunkIndex = i 366 | c.self.bufStart = 0 367 | c.self.bufLen = sc.chunkSize 368 | sc.pushFreeChunk(c) 369 | } 370 | sc.numChunks += int64(len(slab.chunks)) 371 | 372 | return true 373 | } 374 | 375 | func (sc *slabClass) pushFreeChunk(c *chunk) { 376 | if c.refs != 0 { 377 | panic(fmt.Sprintf("pushFreeChunk() non-zero refs: %v", c.refs)) 378 | } 379 | c.next = sc.chunkFree 380 | sc.chunkFree = c.self 381 | sc.numChunksFree++ 382 | } 383 | 384 | func (sc *slabClass) popFreeChunk() *chunk { 385 | if sc.chunkFree.IsNil() { 386 | panic("popFreeChunk() when chunkFree is nil") 387 | } 388 | c := sc.chunk(sc.chunkFree) 389 | if c.refs != 0 { 390 | panic(fmt.Sprintf("popFreeChunk() non-zero refs: %v", c.refs)) 391 | } 392 | c.refs = 1 393 | sc.chunkFree = c.next 394 | c.next = nilLoc 395 | sc.numChunksFree-- 396 | if sc.numChunksFree < 0 { 397 | panic("popFreeChunk() got < 0 numChunksFree") 398 | } 399 | return c 400 | } 401 | 402 | func (sc *slabClass) chunkMem(c *chunk) []byte { 403 | if c == nil || c.self.IsNil() { 404 | return nil 405 | } 406 | beg := sc.chunkSize * c.self.chunkIndex 407 | return sc.slabs[c.self.slabIndex].memory[beg : beg+sc.chunkSize] 408 | } 409 | 410 | func (sc *slabClass) chunk(cl Loc) *chunk { 411 | if cl.IsNil() { 412 | return nil 413 | } 414 | return &(sc.slabs[cl.slabIndex].chunks[cl.chunkIndex]) 415 | } 416 | 417 | func (s *Arena) chunk(cl Loc) (*slabClass, *chunk) { 418 | if cl.IsNil() { 419 | return nil, nil 420 | } 421 | sc := &(s.slabClasses[cl.slabClassIndex]) 422 | return sc, sc.chunk(cl) 423 | } 424 | 425 | // Determine the slabClass & chunk for an Arena managed buf []byte. 426 | func (s *Arena) bufChunk(buf []byte) (*slabClass, *chunk) { 427 | if buf == nil || cap(buf) <= slabMemoryFooterLen { 428 | return nil, nil 429 | } 430 | 431 | rest := buf[:cap(buf)] 432 | footerDistance := len(rest) - slabMemoryFooterLen 433 | footer := rest[footerDistance:] 434 | 435 | slabClassIndex := binary.BigEndian.Uint32(footer[0:4]) 436 | slabIndex := binary.BigEndian.Uint32(footer[4:8]) 437 | slabMagic := binary.BigEndian.Uint32(footer[8:12]) 438 | if slabMagic != uint32(s.slabMagic) { 439 | return nil, nil 440 | } 441 | 442 | sc := &(s.slabClasses[slabClassIndex]) 443 | slab := sc.slabs[slabIndex] 444 | chunkIndex := len(slab.chunks) - 445 | int(math.Ceil(float64(footerDistance)/float64(sc.chunkSize))) 446 | 447 | return sc, &(slab.chunks[chunkIndex]) 448 | } 449 | 450 | func (c *chunk) addRef() *chunk { 451 | c.refs++ 452 | if c.refs <= 1 { 453 | panic(fmt.Sprintf("refs <= 1 during addRef: %#v", c)) 454 | } 455 | return c 456 | } 457 | 458 | func (s *Arena) decRef(sc *slabClass, c *chunk) bool { 459 | c.refs-- 460 | if c.refs < 0 { 461 | panic(fmt.Sprintf("refs < 0 during decRef: %#v", c)) 462 | } 463 | if c.refs == 0 { 464 | s.totDecRefZeroes++ 465 | scNext, cNext := s.chunk(c.next) 466 | if scNext != nil && cNext != nil { 467 | s.decRef(scNext, cNext) 468 | } 469 | c.next = nilLoc 470 | s.totPushFreeChunks++ 471 | sc.pushFreeChunk(c) 472 | return true 473 | } 474 | return false 475 | } 476 | 477 | // Stats fills an input map with runtime metrics about the Arena. 478 | func (s *Arena) Stats(m map[string]int64) map[string]int64 { 479 | m["totSlabClasses"] = int64(len(s.slabClasses)) 480 | m["totAllocs"] = s.totAllocs 481 | m["totAddRefs"] = s.totAddRefs 482 | m["totDecRefs"] = s.totDecRefs 483 | m["totDecRefZeroes"] = s.totDecRefZeroes 484 | m["totGetNexts"] = s.totGetNexts 485 | m["totSetNexts"] = s.totSetNexts 486 | m["totMallocs"] = s.totMallocs 487 | m["totMallocErrs"] = s.totMallocErrs 488 | m["totTooBigErrs"] = s.totTooBigErrs 489 | m["totAddSlabErrs"] = s.totAddSlabErrs 490 | m["totPushFreeChunks"] = s.totPushFreeChunks 491 | m["totPopFreeChunks"] = s.totPopFreeChunks 492 | m["totPopFreeChunkErrs"] = s.totPopFreeChunkErrs 493 | for i, sc := range s.slabClasses { 494 | prefix := fmt.Sprintf("slabClass-%06d-", i) 495 | m[prefix+"numSlabs"] = int64(len(sc.slabs)) 496 | m[prefix+"chunkSize"] = int64(sc.chunkSize) 497 | m[prefix+"numChunks"] = int64(sc.numChunks) 498 | m[prefix+"numChunksFree"] = int64(sc.numChunksFree) 499 | m[prefix+"numChunksInUse"] = int64(sc.numChunks - sc.numChunksFree) 500 | } 501 | return m 502 | } 503 | -------------------------------------------------------------------------------- /slab_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2013-Present Couchbase, Inc. 3 | 4 | Use of this software is governed by the Business Source License included in 5 | the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that 6 | file, in accordance with the Business Source License, use of this software will 7 | be governed by the Apache License, Version 2.0, included in the file 8 | licenses/APL2.txt. 9 | */ 10 | 11 | package slab 12 | 13 | import ( 14 | "sort" 15 | "testing" 16 | ) 17 | 18 | func TestBasics(t *testing.T) { 19 | s := NewArena(1, 1024, 2, nil) 20 | if s == nil { 21 | t.Errorf("expected new slab arena to work") 22 | } 23 | a := s.Alloc(1) 24 | if a == nil { 25 | t.Errorf("expected alloc to work") 26 | } 27 | if len(a) != 1 { 28 | t.Errorf("expected alloc to give right size buf") 29 | } 30 | if cap(a) != 1+slabMemoryFooterLen { 31 | t.Errorf("expected alloc cap to match algorithm, got: %v vs %v", 32 | cap(a), 1+slabMemoryFooterLen) 33 | } 34 | a[0] = 66 35 | if s.DecRef(a) != true { 36 | t.Errorf("expected DecRef to be the last one") 37 | } 38 | b := s.Alloc(1) 39 | if b == nil { 40 | t.Errorf("expected alloc to work") 41 | } 42 | if len(b) != 1 { 43 | t.Errorf("expected alloc to give right size buf") 44 | } 45 | if cap(b) != 1+slabMemoryFooterLen { 46 | t.Errorf("expected alloc cap to match algorithm, got: %v vs %v", 47 | cap(b), 1+slabMemoryFooterLen) 48 | } 49 | if b[0] != 66 { 50 | t.Errorf("expected alloc to return last freed buf") 51 | } 52 | s.AddRef(b) 53 | if s.DecRef(b) != false { 54 | t.Errorf("expected DecRef() to not be the last") 55 | } 56 | if s.DecRef(b) != true { 57 | t.Errorf("expected DecRef() to be the last") 58 | } 59 | c := s.Alloc(1) 60 | if c[0] != 66 { 61 | t.Errorf("expected alloc to return last freed buf") 62 | } 63 | } 64 | 65 | func TestSlabClassGrowth(t *testing.T) { 66 | s := NewArena(1, 8, 2, nil) 67 | expectSlabClasses := func(numSlabClasses int) { 68 | if len(s.slabClasses) != numSlabClasses { 69 | t.Errorf("expected %v slab classses, got: %v", 70 | numSlabClasses, len(s.slabClasses)) 71 | } 72 | } 73 | expectSlabClasses(1) 74 | s.Alloc(1) 75 | expectSlabClasses(1) 76 | s.Alloc(1) 77 | expectSlabClasses(1) 78 | s.Alloc(2) 79 | expectSlabClasses(2) 80 | s.Alloc(1) 81 | s.Alloc(2) 82 | expectSlabClasses(2) 83 | s.Alloc(3) 84 | s.Alloc(4) 85 | expectSlabClasses(3) 86 | s.Alloc(5) 87 | s.Alloc(8) 88 | expectSlabClasses(4) 89 | } 90 | 91 | func TestDecRef(t *testing.T) { 92 | s := NewArena(1, 8, 2, nil) 93 | expectSlabClasses := func(numSlabClasses int) { 94 | if len(s.slabClasses) != numSlabClasses { 95 | t.Errorf("expected %v slab classses, got: %v", 96 | numSlabClasses, len(s.slabClasses)) 97 | } 98 | } 99 | a := make([][]byte, 128) 100 | for j := 0; j < 100; j++ { 101 | for i := 0; i < len(a); i++ { 102 | a[i] = s.Alloc(i % 8) 103 | } 104 | for i := 0; i < len(a); i++ { 105 | s.DecRef(a[i]) 106 | } 107 | } 108 | expectSlabClasses(4) 109 | } 110 | 111 | func TestAddRef(t *testing.T) { 112 | s := NewArena(1, 1, 2, nil) 113 | if !s.slabClasses[0].chunkFree.IsNil() { 114 | t.Errorf("expected no free chunks") 115 | } 116 | a := s.Alloc(1) 117 | a[0] = 123 118 | if !s.slabClasses[0].chunkFree.IsNil() { 119 | t.Errorf("expected no free chunks") 120 | } 121 | s.AddRef(a) 122 | if !s.slabClasses[0].chunkFree.IsNil() { 123 | t.Errorf("expected no free chunks") 124 | } 125 | s.DecRef(a) 126 | if !s.slabClasses[0].chunkFree.IsNil() { 127 | t.Errorf("expected no free chunks") 128 | } 129 | s.DecRef(a) 130 | if s.slabClasses[0].chunkFree.IsNil() { 131 | t.Errorf("expected 1 free chunk") 132 | } 133 | b := s.Alloc(1) 134 | if b[0] != 123 { 135 | t.Errorf("expected chunk to be reused") 136 | } 137 | } 138 | 139 | func TestLargeAlloc(t *testing.T) { 140 | s := NewArena(1, 1, 2, nil) 141 | if s.Alloc(2) != nil { 142 | t.Errorf("expected alloc larger than slab size to fail") 143 | } 144 | } 145 | 146 | func TestEmptyChunk(t *testing.T) { 147 | s := NewArena(1, 1, 2, nil) 148 | sc := s.slabClasses[0] 149 | if sc.chunk(nilLoc) != nil { 150 | t.Errorf("expected empty chunk to not have a chunk()") 151 | } 152 | sc1, c1 := s.chunk(nilLoc) 153 | if sc1 != nil || c1 != nil { 154 | t.Errorf("expected empty chunk to not have a chunk()") 155 | } 156 | } 157 | 158 | func TestEmptyChunkMem(t *testing.T) { 159 | s := NewArena(1, 1, 2, nil) 160 | sc := s.slabClasses[0] 161 | if sc.chunkMem(nil) != nil { 162 | t.Errorf("expected nil chunk to not have a chunk()") 163 | } 164 | if sc.chunkMem(&chunk{self: nilLoc}) != nil { 165 | t.Errorf("expected empty chunk to not have a chunk()") 166 | } 167 | } 168 | 169 | func TestAddRefOnAlreadyReleasedBuf(t *testing.T) { 170 | s := NewArena(1, 1, 2, nil) 171 | a := s.Alloc(1) 172 | s.DecRef(a) 173 | var err interface{} 174 | func() { 175 | defer func() { err = recover() }() 176 | s.AddRef(a) 177 | }() 178 | if err == nil { 179 | t.Errorf("expected panic on AddRef on already release buf") 180 | } 181 | } 182 | 183 | func TestDecRefOnAlreadyReleasedBuf(t *testing.T) { 184 | s := NewArena(1, 1, 2, nil) 185 | a := s.Alloc(1) 186 | s.DecRef(a) 187 | var err interface{} 188 | func() { 189 | defer func() { err = recover() }() 190 | s.DecRef(a) 191 | }() 192 | if err == nil { 193 | t.Errorf("expected panic on DecRef on already release buf") 194 | } 195 | } 196 | 197 | func TestPushFreeChunkOnReferencedChunk(t *testing.T) { 198 | s := NewArena(1, 1, 2, nil) 199 | sc := s.slabClasses[0] 200 | var err interface{} 201 | func() { 202 | defer func() { err = recover() }() 203 | sc.pushFreeChunk(&chunk{refs: 1}) 204 | }() 205 | if err == nil { 206 | t.Errorf("expected panic when free'ing a ref-counted chunk") 207 | } 208 | } 209 | 210 | func TestPopFreeChunkOnFreeChunk(t *testing.T) { 211 | s := NewArena(1, 1, 2, nil) 212 | sc := s.slabClasses[0] 213 | sc.chunkFree = nilLoc 214 | var err interface{} 215 | func() { 216 | defer func() { err = recover() }() 217 | sc.popFreeChunk() 218 | }() 219 | if err == nil { 220 | t.Errorf("expected panic when popFreeChunk() on free chunk") 221 | } 222 | } 223 | 224 | func TestPopFreeChunkOnReferencedFreeChunk(t *testing.T) { 225 | s := NewArena(1, 1024, 2, nil) 226 | s.Alloc(1) 227 | sc := s.slabClasses[0] 228 | sc.chunk(sc.chunkFree).refs = 1 229 | var err interface{} 230 | func() { 231 | defer func() { err = recover() }() 232 | sc.popFreeChunk() 233 | }() 234 | if err == nil { 235 | t.Errorf("expected panic when popFreeChunk() on ref'ed chunk") 236 | } 237 | } 238 | 239 | func TestOwns(t *testing.T) { 240 | s := NewArena(1, 1024, 2, nil) 241 | if s.Owns(nil) { 242 | t.Errorf("expected false when Owns on nil buf") 243 | } 244 | if s.Owns(make([]byte, 1)) { 245 | t.Errorf("expected false when Owns on small buf") 246 | } 247 | if s.Owns(make([]byte, 1+slabMemoryFooterLen)) { 248 | t.Errorf("expected false whens Owns on non-magic buf") 249 | } 250 | if !s.Owns(s.Alloc(1)) { 251 | t.Errorf("expected Owns on Alloc'ed buf") 252 | } 253 | } 254 | 255 | func TestAddDecRefOnUnowned(t *testing.T) { 256 | s := NewArena(1, 1024, 2, nil) 257 | var err interface{} 258 | func() { 259 | defer func() { err = recover() }() 260 | s.AddRef(make([]byte, 1000)) 261 | }() 262 | if err == nil { 263 | t.Errorf("expected panic when AddRef() on unowned buf") 264 | } 265 | err = nil 266 | func() { 267 | defer func() { err = recover() }() 268 | s.DecRef(make([]byte, 1000)) 269 | }() 270 | if err == nil { 271 | t.Errorf("expected panic when DecRef() on unowned buf") 272 | } 273 | } 274 | 275 | func TestArenaChunk(t *testing.T) { 276 | s := NewArena(1, 100, 2, nil) 277 | s.Alloc(1) 278 | sc := &(s.slabClasses[0]) 279 | c := sc.popFreeChunk() 280 | if sc.chunk(c.self) != c { 281 | t.Errorf("expected chunk to be the same") 282 | } 283 | sc1, c1 := s.chunk(c.self) 284 | if sc1 != sc || c1 != c { 285 | t.Errorf("expected chunk to be the same") 286 | } 287 | } 288 | 289 | func TestArenaChunkMem(t *testing.T) { 290 | s := NewArena(1, 100, 2, nil) 291 | s.Alloc(1) 292 | sc := s.slabClasses[0] 293 | c := sc.popFreeChunk() 294 | if sc.chunkMem(c) == nil { 295 | t.Errorf("expected chunkMem to be non-nil") 296 | } 297 | } 298 | 299 | func TestMalloc(t *testing.T) { 300 | mallocWorks := true 301 | mallocCalls := 0 302 | malloc := func(sizeNeeded int) []byte { 303 | mallocCalls++ 304 | if mallocWorks { 305 | return make([]byte, sizeNeeded) 306 | } 307 | return nil 308 | } 309 | mustNil := func(aaa []byte) { 310 | if aaa != nil { 311 | t.Errorf("expected array to be nil") 312 | } 313 | } 314 | notNil := func(aaa []byte) { 315 | if aaa == nil { 316 | t.Errorf("expected array to be not nil") 317 | } 318 | } 319 | s := NewArena(1, 4, 2, malloc) 320 | if mallocCalls != 0 { 321 | t.Errorf("expect no mallocs yet") 322 | } 323 | a := s.Alloc(1) 324 | notNil(a) 325 | if mallocCalls != 1 { 326 | t.Errorf("expect 1 malloc") 327 | } 328 | a = s.Alloc(1) 329 | notNil(a) 330 | if mallocCalls != 1 { 331 | t.Errorf("expect 1 malloc still, since we don't need another slab yet") 332 | } 333 | a = s.Alloc(2) 334 | notNil(a) 335 | if mallocCalls != 2 { 336 | t.Errorf("expect 2 mallocs, since we need another slab") 337 | } 338 | a = s.Alloc(1) 339 | notNil(a) 340 | if mallocCalls != 2 { 341 | t.Errorf("expect 2 malloc still, since we don't need another slab yet") 342 | } 343 | a = s.Alloc(1) 344 | notNil(a) 345 | if mallocCalls != 2 { 346 | t.Errorf("expect 2 malloc still, since we don't need another slab yet") 347 | } 348 | a = s.Alloc(1) 349 | notNil(a) 350 | if mallocCalls != 3 { 351 | t.Errorf("expect 3 mallocs, since we need another slab") 352 | } 353 | mallocWorks = false // Now we pretend to run out of memory. 354 | a = s.Alloc(2) 355 | notNil(a) 356 | if mallocCalls != 3 { 357 | t.Errorf("expect 3 mallocs, since don't need another slab yet") 358 | } 359 | a = s.Alloc(2) 360 | mustNil(a) 361 | if mallocCalls != 4 { 362 | t.Errorf("expect 4 mallocs, since needed another slab") 363 | } 364 | a = s.Alloc(3) 365 | mustNil(a) 366 | if mallocCalls != 5 { 367 | t.Errorf("expect 5 mallocs, since needed another slab") 368 | } 369 | } 370 | 371 | func TestChaining(t *testing.T) { 372 | testChaining(t, NewArena(1, 1, 2, nil)) 373 | testChaining(t, NewArena(1, 100, 2, nil)) 374 | } 375 | 376 | func testChaining(t *testing.T, s *Arena) { 377 | a := s.Alloc(1) 378 | f := s.Alloc(1) 379 | s.DecRef(f) // The f buf is now freed. 380 | if s.GetNext(a) != nil { 381 | t.Errorf("expected nil GetNext()") 382 | } 383 | s.SetNext(a, nil) 384 | if s.GetNext(a) != nil { 385 | t.Errorf("expected nil GetNext()") 386 | } 387 | var err interface{} 388 | func() { 389 | defer func() { err = recover() }() 390 | s.GetNext(nil) 391 | }() 392 | if err == nil { 393 | t.Errorf("expected panic when GetNext(nil)") 394 | } 395 | err = nil 396 | func() { 397 | defer func() { err = recover() }() 398 | s.GetNext(make([]byte, 1)) 399 | }() 400 | if err == nil { 401 | t.Errorf("expected panic when GetNext(non-arena-buf)") 402 | } 403 | err = nil 404 | func() { 405 | defer func() { err = recover() }() 406 | s.GetNext(f) 407 | }() 408 | if err == nil { 409 | t.Errorf("expected panic when GetNext(already-freed-buf)") 410 | } 411 | err = nil 412 | func() { 413 | defer func() { err = recover() }() 414 | s.SetNext(nil, make([]byte, 1)) 415 | }() 416 | if err == nil { 417 | t.Errorf("expected panic when SetNext(nil)") 418 | } 419 | err = nil 420 | func() { 421 | defer func() { err = recover() }() 422 | s.SetNext(a, make([]byte, 1)) 423 | }() 424 | if err == nil { 425 | t.Errorf("expected panic when SetNext(non-arena-buf)") 426 | } 427 | err = nil 428 | func() { 429 | defer func() { err = recover() }() 430 | s.SetNext(f, nil) 431 | }() 432 | if err == nil { 433 | t.Errorf("expected panic when SetNext(already-freed-buf)") 434 | } 435 | b0 := s.Alloc(1) 436 | b1 := s.Alloc(1) 437 | b1[0] = 201 438 | s.SetNext(b0, b1) 439 | bx := s.GetNext(b0) 440 | if bx[0] != 201 { 441 | t.Errorf("expected chain to work") 442 | } 443 | s.DecRef(bx) 444 | s.DecRef(b1) 445 | bx = s.GetNext(b0) 446 | if bx[0] != 201 { 447 | t.Errorf("expected chain to still work") 448 | } 449 | s.DecRef(bx) 450 | _, b0chunk := s.bufChunk(b0) 451 | if b0chunk.refs != 1 { 452 | t.Errorf("expected b0chunk to still be alive") 453 | } 454 | _, b1chunk := s.bufChunk(b1) 455 | if b1chunk == nil { 456 | t.Errorf("expected b1chunk to still be alive") 457 | } 458 | if b1chunk.refs != 1 { 459 | t.Errorf("expected b1chunk to still be ref'ed") 460 | } 461 | if b0chunk.next.IsNil() { 462 | t.Errorf("expected b0chunk to not be empty") 463 | } 464 | if !b1chunk.next.IsNil() { 465 | t.Errorf("expected b1chunk to have no next") 466 | } 467 | s.DecRef(b0) 468 | if b0chunk.refs != 0 { 469 | t.Errorf("expected b0chunk to not be ref'ed") 470 | } 471 | if b1chunk.refs != 0 { 472 | t.Errorf("expected b1chunk to not be ref'ed") 473 | } 474 | alice := s.Alloc(1) 475 | bob := s.Alloc(1) 476 | betty := s.Alloc(1) 477 | _, bobChunk := s.bufChunk(bob) 478 | _, bettyChunk := s.bufChunk(betty) 479 | s.SetNext(alice, bob) 480 | if bobChunk.refs != 2 { 481 | t.Errorf("expected bob to have 2 refs") 482 | } 483 | if bettyChunk.refs != 1 { 484 | t.Errorf("expected betty to have 1 ref") 485 | } 486 | s.DecRef(bob) 487 | if bobChunk.refs != 1 { 488 | t.Errorf("expected bob to have 1 ref (from alice)") 489 | } 490 | if bettyChunk.refs != 1 { 491 | t.Errorf("expected betty to have 1 ref") 492 | } 493 | s.SetNext(alice, betty) 494 | if bobChunk.refs != 0 { 495 | t.Errorf("expected bob to have 0 ref's (alice dropped bob for betty)") 496 | } 497 | if bettyChunk.refs != 2 { 498 | t.Errorf("expected betty to have 2 ref (1 from alice)") 499 | } 500 | s.DecRef(betty) 501 | if bobChunk.refs != 0 { 502 | t.Errorf("expected bob to have 0 ref's (alice dropped bob for betty)") 503 | } 504 | if bettyChunk.refs != 1 { 505 | t.Errorf("expected betty to have 1 ref (from alice)") 506 | } 507 | s.DecRef(alice) 508 | if bobChunk.refs != 0 { 509 | t.Errorf("expected bob to have 0 ref's (alice dropped bob for betty)") 510 | } 511 | if bettyChunk.refs != 0 { 512 | t.Errorf("expected betty to have 0 ref (alice dropped betty)") 513 | } 514 | } 515 | 516 | func TestFindSlabClassIndex(t *testing.T) { 517 | s := NewArena(1, 1024, 2, nil) 518 | test := func(bufLen, idxExp int) { 519 | idxAct := s.findSlabClassIndex(bufLen) 520 | if idxExp != idxAct { 521 | t.Errorf("expected slab class index: %v, got: %v, bufLen: %v", 522 | idxExp, idxAct, bufLen) 523 | } 524 | } 525 | test(0, 0) 526 | test(1, 0) 527 | test(2, 1) 528 | test(3, 2) 529 | test(4, 2) 530 | test(5, 3) 531 | test(256, 8) 532 | } 533 | 534 | func TestGrowthFactors(t *testing.T) { 535 | for gf := 1.1; gf < 16.7; gf = gf + 0.1 { 536 | s := NewArena(1, 1024, gf, nil) 537 | a := s.Alloc(1024) 538 | a[0] = 123 539 | s.DecRef(a) 540 | b := s.Alloc(1024) 541 | if b[0] != 123 { 542 | t.Errorf("expected re-used alloc mem") 543 | } 544 | } 545 | } 546 | 547 | func BenchmarkReffing(b *testing.B) { 548 | a := NewArena(1, 1024, 2, nil) 549 | 550 | data := a.Alloc(1) 551 | 552 | b.ResetTimer() 553 | for i := 0; i < b.N; i++ { 554 | a.AddRef(data) 555 | a.DecRef(data) 556 | } 557 | } 558 | 559 | func BenchmarkAllocingSize1(b *testing.B) { 560 | benchmarkAllocingConstant(b, NewArena(1, 1024, 2, nil), 1) 561 | } 562 | 563 | func BenchmarkAllocingSize128(b *testing.B) { 564 | benchmarkAllocingConstant(b, NewArena(1, 1024, 2, nil), 128) 565 | } 566 | 567 | func BenchmarkAllocingSize256(b *testing.B) { 568 | benchmarkAllocingConstant(b, NewArena(1, 1024, 2, nil), 256) 569 | } 570 | 571 | func benchmarkAllocingConstant(b *testing.B, a *Arena, allocSize int) { 572 | stuff := [][]byte{} 573 | for i := 0; i < 1024; i++ { 574 | stuff = append(stuff, a.Alloc(allocSize)) 575 | } 576 | for _, x := range stuff { 577 | a.DecRef(x) 578 | } 579 | 580 | b.ResetTimer() 581 | for i := 0; i < b.N; i++ { 582 | a.DecRef(a.Alloc(allocSize)) 583 | } 584 | } 585 | 586 | func BenchmarkAllocingModSizes(b *testing.B) { 587 | benchmarkAllocingFunc(b, NewArena(1, 1024, 2, nil), 588 | func(i int) int { return i % 1024 }) 589 | } 590 | 591 | func BenchmarkAllocingModSizesGrowthFactor1Dot1(b *testing.B) { 592 | benchmarkAllocingFunc(b, NewArena(1, 1024, 1.1, nil), 593 | func(i int) int { return i % 1024 }) 594 | } 595 | 596 | func benchmarkAllocingFunc(b *testing.B, a *Arena, 597 | allocSize func(i int) int) { 598 | stuff := [][]byte{} 599 | for i := 0; i < 1024; i++ { 600 | stuff = append(stuff, a.Alloc(allocSize(i))) 601 | } 602 | for _, x := range stuff { 603 | a.DecRef(x) 604 | } 605 | 606 | b.ResetTimer() 607 | for i := 0; i < b.N; i++ { 608 | a.DecRef(a.Alloc(allocSize(i))) 609 | } 610 | } 611 | 612 | func TestChainingSizes(t *testing.T) { 613 | testChainingSizes(t, NewArena(1, 100, 2, nil)) 614 | testChainingSizes(t, NewArena(1, 200, 8, nil)) 615 | } 616 | 617 | func testChainingSizes(t *testing.T, s *Arena) { 618 | curr := s.Alloc(91) 619 | for i := 90; i > 0; i-- { 620 | next := s.Alloc(100)[0:i] 621 | s.SetNext(next, curr) 622 | curr = next 623 | } 624 | i := 1 625 | for x := curr; x != nil; x = s.GetNext(x) { 626 | if len(x) != i { 627 | t.Fatalf("expected len(x): %d, got: %d", i, len(x)) 628 | } 629 | i++ 630 | } 631 | 632 | b := s.Alloc(5) 633 | if s.GetNext(b) != nil { 634 | t.Fatalf("expected nil") 635 | } 636 | } 637 | 638 | func TestStats(t *testing.T) { 639 | a := NewArena(1, 1024*1024, 2, nil) 640 | a.Alloc(3) 641 | aa := a.Alloc(17) 642 | if len(aa) != 17 { 643 | t.Errorf("expected 17") 644 | } 645 | aaloc := a.BufToLoc(aa) 646 | aaloc45 := aaloc.Slice(4, 5) 647 | bb := a.LocToBuf(aaloc45) 648 | if len(bb) != 5 { 649 | t.Errorf("expected 5") 650 | } 651 | a.DecRef(bb) 652 | a.Alloc(4096) 653 | stats := a.Stats(map[string]int64{}) 654 | if len(stats) == 0 { 655 | t.Errorf("expected some stats") 656 | } 657 | if stats["totSlabClasses"] != 13 || 658 | stats["totAllocs"] != 3 || 659 | stats["totAddRefs"] != 0 || 660 | stats["totDecRefs"] != 1 || 661 | stats["totDecRefZeroes"] != 1 || 662 | stats["totGetNexts"] != 0 || 663 | stats["totSetNexts"] != 0 || 664 | stats["totMallocs"] != 3 || 665 | stats["totMallocErrs"] != 0 || 666 | stats["totTooBigErrs"] != 0 || 667 | stats["totAddSlabErrs"] != 0 || 668 | stats["totPushFreeChunks"] != 1 || 669 | stats["totPopFreeChunks"] != 3 || 670 | stats["totPopFreeChunkErrs"] != 0 { 671 | t.Errorf("expected stats did not match") 672 | } 673 | if stats["slabClass-000002-numChunksInUse"] != 1 || 674 | stats["slabClass-000005-numChunksInUse"] != 0 { 675 | t.Errorf("expected stats did not match InUse") 676 | } 677 | if stats["slabClass-000012-chunkSize"] != 4096 || 678 | stats["slabClass-000012-numChunks"] != 256 || 679 | stats["slabClass-000012-numChunksFree"] != 255 || 680 | stats["slabClass-000012-numChunksInUse"] != 1 || 681 | stats["slabClass-000012-numSlabs"] != 1 { 682 | t.Errorf("expected stats did not match slabClass 12") 683 | } 684 | 685 | mk := []string{} 686 | for k := range stats { 687 | mk = append(mk, k) 688 | } 689 | sort.Strings(mk) 690 | for _, k := range mk { 691 | t.Logf("%s = %d", k, stats[k]) 692 | } 693 | } 694 | 695 | func TestLoc(t *testing.T) { 696 | loc := NilLoc() 697 | if !loc.IsNil() { 698 | t.Errorf("expected nil loc to be nil") 699 | } 700 | 701 | a := NewArena(1, 1024*1024, 2, nil) 702 | 703 | loc = a.BufToLoc(nil) 704 | if !loc.IsNil() { 705 | t.Errorf("expected loc to buf to nil on bad buf") 706 | } 707 | 708 | b := a.Alloc(3) 709 | if len(b) != 3 { 710 | t.Errorf("expected len 3") 711 | } 712 | 713 | b[0] = 'a' 714 | b[1] = 'b' 715 | b[2] = 'c' 716 | 717 | loc = a.BufToLoc(b) 718 | if loc.IsNil() { 719 | t.Errorf("expected non nil loc") 720 | } 721 | 722 | b2 := a.LocToBuf(loc) 723 | if b2 == nil { 724 | t.Errorf("expected loc to buf to work") 725 | } 726 | if len(b2) != len(b) { 727 | t.Errorf("expected len(b2) to be len(b)") 728 | } 729 | if string(b) != string(b2) { 730 | t.Errorf("expected b == b2") 731 | } 732 | 733 | loc = a.BufToLoc(b[0:1]) 734 | if loc.IsNil() { 735 | t.Errorf("expected non nil loc") 736 | } 737 | 738 | b3 := a.LocToBuf(loc) 739 | if b3 == nil { 740 | t.Errorf("expected loc to buf to work") 741 | } 742 | if len(b3) != 1 { 743 | t.Errorf("expected len(b2) to be len(b)") 744 | } 745 | if string(b3) != "a" { 746 | t.Errorf("expected b3 to be a") 747 | } 748 | 749 | b4 := a.LocToBuf(NilLoc()) 750 | if b4 != nil { 751 | t.Errorf("expected nil loc to have nil buf") 752 | } 753 | 754 | loc = a.BufToLoc([]byte("hello")) 755 | if !loc.IsNil() { 756 | t.Errorf("expected loc nil on non-arena buf") 757 | } 758 | 759 | loc = a.BufToLoc(b) 760 | 761 | a.LocAddRef(loc) 762 | a.LocDecRef(loc) 763 | 764 | bx := a.LocToBuf(loc) 765 | if string(bx) != "abc" { 766 | t.Errorf("expected loc to be abc, got: %s", bx) 767 | } 768 | 769 | a.LocAddRef(NilLoc()) 770 | a.LocDecRef(NilLoc()) 771 | } 772 | --------------------------------------------------------------------------------