├── version.json ├── codecov.yml ├── go.mod ├── .github └── workflows │ ├── stale.yml │ ├── generated-pr.yml │ ├── tagpush.yml │ ├── releaser.yml │ ├── go-check.yml │ ├── release-check.yml │ └── go-test.yml ├── LICENSE ├── LICENSE-BSD ├── writer_test.go ├── README.md ├── writer.go ├── pool.go ├── pool_test.go ├── buffer.go └── buffer_test.go /version.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "v0.1.0" 3 | } 4 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | range: "50...100" 3 | comment: off 4 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/libp2p/go-buffer-pool 2 | 3 | go 1.24 4 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close Stale Issues 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/generated-pr.yml: -------------------------------------------------------------------------------- 1 | name: Close Generated PRs 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/tagpush.yml: -------------------------------------------------------------------------------- 1 | name: Tag Push Checker 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | 8 | permissions: 9 | contents: read 10 | issues: write 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | releaser: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/tagpush.yml@v1.0 19 | -------------------------------------------------------------------------------- /.github/workflows/releaser.yml: -------------------------------------------------------------------------------- 1 | name: Releaser 2 | 3 | on: 4 | push: 5 | paths: [ 'version.json' ] 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: write 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.sha }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | releaser: 17 | uses: ipdxco/unified-github-workflows/.github/workflows/releaser.yml@v1.0 18 | -------------------------------------------------------------------------------- /.github/workflows/go-check.yml: -------------------------------------------------------------------------------- 1 | name: Go Checks 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: ["master"] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: read 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | go-check: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/go-check.yml@v1.0 19 | -------------------------------------------------------------------------------- /.github/workflows/release-check.yml: -------------------------------------------------------------------------------- 1 | name: Release Checker 2 | 3 | on: 4 | pull_request_target: 5 | paths: [ 'version.json' ] 6 | types: [ opened, synchronize, reopened, labeled, unlabeled ] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: write 11 | pull-requests: write 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | release-check: 19 | uses: ipdxco/unified-github-workflows/.github/workflows/release-check.yml@v1.0 20 | -------------------------------------------------------------------------------- /.github/workflows/go-test.yml: -------------------------------------------------------------------------------- 1 | name: Go Test 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: ["master"] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: read 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | go-test: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/go-test.yml@v1.0 19 | secrets: 20 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Juan Batiz-Benet 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /LICENSE-BSD: -------------------------------------------------------------------------------- 1 | ### Applies to buffer.go and buffer_test.go ### 2 | 3 | Copyright (c) 2009 The Go Authors. All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are 7 | met: 8 | 9 | * Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following disclaimer 13 | in the documentation and/or other materials provided with the 14 | distribution. 15 | * Neither the name of Google Inc. nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /writer_test.go: -------------------------------------------------------------------------------- 1 | package pool 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | ) 7 | 8 | func checkSize(t *testing.T, w *Writer) { 9 | if w.Size()-w.Buffered() != w.Available() { 10 | t.Fatalf("size (%d), buffered (%d), available (%d) mismatch", w.Size(), w.Buffered(), w.Available()) 11 | } 12 | } 13 | 14 | func TestWriter(t *testing.T) { 15 | var b bytes.Buffer 16 | w := Writer{W: &b} 17 | n, err := w.Write([]byte("foobar")) 18 | checkSize(t, &w) 19 | 20 | if err != nil || n != 6 { 21 | t.Fatalf("write failed: %d, %s", n, err) 22 | } 23 | if b.Len() != 0 { 24 | t.Fatal("expected the buffer to be empty") 25 | } 26 | if w.Buffered() != 6 { 27 | t.Fatalf("expected 6 bytes to be buffered, got %d", w.Buffered()) 28 | } 29 | checkSize(t, &w) 30 | if err := w.Flush(); err != nil { 31 | t.Fatal(err) 32 | } 33 | checkSize(t, &w) 34 | if err := w.Flush(); err != nil { 35 | t.Fatal(err) 36 | } 37 | checkSize(t, &w) 38 | if b.String() != "foobar" { 39 | t.Fatal("expected to have written foobar") 40 | } 41 | b.Reset() 42 | 43 | buf := make([]byte, WriterBufferSize) 44 | n, err = w.Write(buf) 45 | if n != WriterBufferSize || err != nil { 46 | t.Fatalf("write failed: %d, %s", n, err) 47 | } 48 | checkSize(t, &w) 49 | if b.Len() != WriterBufferSize { 50 | t.Fatal("large write should have gone through directly") 51 | } 52 | if err := w.Flush(); err != nil { 53 | t.Fatal(err) 54 | } 55 | checkSize(t, &w) 56 | 57 | b.Reset() 58 | if err := w.WriteByte(1); err != nil { 59 | t.Fatal(err) 60 | } 61 | if w.Buffered() != 1 { 62 | t.Fatalf("expected 1 byte to be buffered, got %d", w.Buffered()) 63 | } 64 | if n, err := w.WriteRune('1'); err != nil || n != 1 { 65 | t.Fatal(err) 66 | } 67 | if w.Buffered() != 2 { 68 | t.Fatalf("expected 2 bytes to be buffered, got %d", w.Buffered()) 69 | } 70 | checkSize(t, &w) 71 | if n, err := w.WriteString("foobar"); err != nil || n != 6 { 72 | t.Fatal(err) 73 | } 74 | if w.Buffered() != 8 { 75 | t.Fatalf("expected 8 bytes to be buffered, got %d", w.Buffered()) 76 | } 77 | checkSize(t, &w) 78 | if b.Len() != 0 { 79 | t.Fatal("write should have been buffered") 80 | } 81 | n, err = w.Write(buf) 82 | if n != WriterBufferSize || err != nil { 83 | t.Fatalf("write failed: %d, %s", n, err) 84 | } 85 | if b.Len() != WriterBufferSize || b.Bytes()[0] != 1 || b.String()[1:8] != "1foobar" { 86 | t.Fatalf("failed to flush properly: len:%d, prefix:%#v", b.Len(), b.Bytes()[:10]) 87 | } 88 | if err := w.Close(); err != nil { 89 | t.Fatal(err) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | go-buffer-pool 2 | ================== 3 | 4 | [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) 5 | [![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io/) 6 | [![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23libp2p) 7 | [![codecov](https://codecov.io/gh/libp2p/go-buffer-pool/branch/master/graph/badge.svg)](https://codecov.io/gh/libp2p/go-buffer-pool) 8 | [![Travis CI](https://travis-ci.org/libp2p/go-buffer-pool.svg?branch=master)](https://travis-ci.org/libp2p/go-buffer-pool) 9 | [![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io) 10 | 11 | > A variable size buffer pool for go. 12 | 13 | ## Table of Contents 14 | 15 | - [About](#about) 16 | - [Advantages over GC](#advantages-over-gc) 17 | - [Disadvantages over GC:](#disadvantages-over-gc) 18 | - [Contribute](#contribute) 19 | - [License](#license) 20 | 21 | ## About 22 | 23 | This library provides: 24 | 25 | 1. `BufferPool`: A pool for re-using byte slices of varied sizes. This pool will always return a slice with at least the size requested and a capacity up to the next power of two. Each size class is pooled independently which makes the `BufferPool` more space efficient than a plain `sync.Pool` when used in situations where data size may vary over an arbitrary range. 26 | 2. `Buffer`: a buffer compatible with `bytes.Buffer` but backed by a `BufferPool`. Unlike `bytes.Buffer`, `Buffer` will automatically "shrink" on read, using the buffer pool to avoid causing too much work for the allocator. This is primarily useful for long lived buffers that usually sit empty. 27 | 28 | ### Advantages over GC 29 | 30 | * Reduces Memory Usage: 31 | * We don't have to wait for a GC to run before we can reuse memory. This is essential if you're repeatedly allocating large short-lived buffers. 32 | 33 | * Reduces CPU usage: 34 | * It takes some load off of the GC (due to buffer reuse). 35 | * We don't have to zero buffers (fewer wasteful memory writes). 36 | 37 | ### Disadvantages over GC: 38 | 39 | * Can leak memory contents. Unlike the go GC, we *don't* zero memory. 40 | * All buffers have a capacity of a power of 2. This is fine if you either expect these buffers to be temporary or you need buffers of this size. 41 | * Requires that buffers be explicitly put back into the pool. This can lead to race conditions and memory corruption if the buffer is released while it's still in use. 42 | 43 | ## Contribute 44 | 45 | PRs are welcome! 46 | 47 | Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. 48 | 49 | ## License 50 | 51 | MIT © Protocol Labs 52 | BSD © The Go Authors 53 | 54 | --- 55 | 56 | The last gx published version of this module was: 0.1.3: QmQDvJoB6aJWN3sjr3xsgXqKCXf4jU5zdMXpDMsBkYVNqa 57 | -------------------------------------------------------------------------------- /writer.go: -------------------------------------------------------------------------------- 1 | package pool 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | "sync" 7 | ) 8 | 9 | const WriterBufferSize = 4096 10 | 11 | var bufioWriterPool = sync.Pool{ 12 | New: func() interface{} { 13 | return bufio.NewWriterSize(nil, WriterBufferSize) 14 | }, 15 | } 16 | 17 | // Writer is a buffered writer that returns its internal buffer in a pool when 18 | // not in use. 19 | type Writer struct { 20 | W io.Writer 21 | bufw *bufio.Writer 22 | } 23 | 24 | func (w *Writer) ensureBuffer() { 25 | if w.bufw == nil { 26 | w.bufw = bufioWriterPool.Get().(*bufio.Writer) 27 | w.bufw.Reset(w.W) 28 | } 29 | } 30 | 31 | // Write writes the given byte slice to the underlying connection. 32 | // 33 | // Note: Write won't return the write buffer to the pool even if it ends up 34 | // being empty after the write. You must call Flush() to do that. 35 | func (w *Writer) Write(b []byte) (int, error) { 36 | if w.bufw == nil { 37 | if len(b) >= WriterBufferSize { 38 | return w.W.Write(b) 39 | } 40 | w.bufw = bufioWriterPool.Get().(*bufio.Writer) 41 | w.bufw.Reset(w.W) 42 | } 43 | return w.bufw.Write(b) 44 | } 45 | 46 | // Size returns the size of the underlying buffer. 47 | func (w *Writer) Size() int { 48 | return WriterBufferSize 49 | } 50 | 51 | // Available returns the amount buffer space available. 52 | func (w *Writer) Available() int { 53 | if w.bufw != nil { 54 | return w.bufw.Available() 55 | } 56 | return WriterBufferSize 57 | } 58 | 59 | // Buffered returns the amount of data buffered. 60 | func (w *Writer) Buffered() int { 61 | if w.bufw != nil { 62 | return w.bufw.Buffered() 63 | } 64 | return 0 65 | } 66 | 67 | // WriteByte writes a single byte. 68 | func (w *Writer) WriteByte(b byte) error { 69 | w.ensureBuffer() 70 | return w.bufw.WriteByte(b) 71 | } 72 | 73 | // WriteRune writes a single rune, returning the number of bytes written. 74 | func (w *Writer) WriteRune(r rune) (int, error) { 75 | w.ensureBuffer() 76 | return w.bufw.WriteRune(r) 77 | } 78 | 79 | // WriteString writes a string, returning the number of bytes written. 80 | func (w *Writer) WriteString(s string) (int, error) { 81 | w.ensureBuffer() 82 | return w.bufw.WriteString(s) 83 | } 84 | 85 | // Flush flushes the write buffer, if any, and returns it to the pool. 86 | func (w *Writer) Flush() error { 87 | if w.bufw == nil { 88 | return nil 89 | } 90 | if err := w.bufw.Flush(); err != nil { 91 | return err 92 | } 93 | w.bufw.Reset(nil) 94 | bufioWriterPool.Put(w.bufw) 95 | w.bufw = nil 96 | return nil 97 | } 98 | 99 | // Close flushes the underlying writer and closes it if it implements the 100 | // io.Closer interface. 101 | // 102 | // Note: Close() closes the writer even if Flush() fails to avoid leaking system 103 | // resources. If you want to make sure Flush() succeeds, call it first. 104 | func (w *Writer) Close() error { 105 | var ( 106 | ferr, cerr error 107 | ) 108 | ferr = w.Flush() 109 | 110 | // always close even if flush fails. 111 | if closer, ok := w.W.(io.Closer); ok { 112 | cerr = closer.Close() 113 | } 114 | 115 | if ferr != nil { 116 | return ferr 117 | } 118 | return cerr 119 | } 120 | -------------------------------------------------------------------------------- /pool.go: -------------------------------------------------------------------------------- 1 | // Package pool provides a sync.Pool equivalent that buckets incoming 2 | // requests to one of 32 sub-pools, one for each power of 2, 0-32. 3 | // 4 | // import (pool "github.com/libp2p/go-buffer-pool") 5 | // var p pool.BufferPool 6 | // 7 | // small := make([]byte, 1024) 8 | // large := make([]byte, 4194304) 9 | // p.Put(small) 10 | // p.Put(large) 11 | // 12 | // small2 := p.Get(1024) 13 | // large2 := p.Get(4194304) 14 | // fmt.Println("small2 len:", len(small2)) 15 | // fmt.Println("large2 len:", len(large2)) 16 | // 17 | // // Output: 18 | // // small2 len: 1024 19 | // // large2 len: 4194304 20 | package pool 21 | 22 | import ( 23 | "math" 24 | "math/bits" 25 | "sync" 26 | ) 27 | 28 | // GlobalPool is a static Pool for reusing byteslices of various sizes. 29 | var GlobalPool = new(BufferPool) 30 | 31 | // MaxLength is the maximum length of an element that can be added to the Pool. 32 | const MaxLength = math.MaxInt32 33 | 34 | // BufferPool is a pool to handle cases of reusing elements of varying sizes. It 35 | // maintains 32 internal pools, for each power of 2 in 0-32. 36 | // 37 | // You should generally just call the package level Get and Put methods or use 38 | // the GlobalPool BufferPool instead of constructing your own. 39 | // 40 | // You MUST NOT copy Pool after using. 41 | type BufferPool struct { 42 | pools [32]sync.Pool // a list of singlePools 43 | ptrs sync.Pool 44 | } 45 | 46 | type bufp struct { 47 | buf []byte 48 | } 49 | 50 | // Get retrieves a buffer of the appropriate length from the buffer pool or 51 | // allocates a new one. Get may choose to ignore the pool and treat it as empty. 52 | // Callers should not assume any relation between values passed to Put and the 53 | // values returned by Get. 54 | // 55 | // If no suitable buffer exists in the pool, Get creates one. 56 | func (p *BufferPool) Get(length int) []byte { 57 | if length == 0 { 58 | return nil 59 | } 60 | // Calling this function with a negative length is invalid. 61 | // make will panic if length is negative, so we don't have to. 62 | if length > MaxLength || length < 0 { 63 | return make([]byte, length) 64 | } 65 | idx := nextLogBase2(uint32(length)) 66 | if ptr := p.pools[idx].Get(); ptr != nil { 67 | bp := ptr.(*bufp) 68 | buf := bp.buf[:uint32(length)] 69 | bp.buf = nil 70 | p.ptrs.Put(ptr) 71 | return buf 72 | } 73 | return make([]byte, 1< MaxLength { 80 | return // drop it 81 | } 82 | idx := prevLogBase2(uint32(capacity)) 83 | var bp *bufp 84 | if ptr := p.ptrs.Get(); ptr != nil { 85 | bp = ptr.(*bufp) 86 | } else { 87 | bp = new(bufp) 88 | } 89 | bp.buf = buf 90 | p.pools[idx].Put(bp) 91 | } 92 | 93 | // Get retrieves a buffer of the appropriate length from the global buffer pool 94 | // (or allocates a new one). 95 | func Get(length int) []byte { 96 | return GlobalPool.Get(length) 97 | } 98 | 99 | // Put returns a buffer to the global buffer pool. 100 | func Put(slice []byte) { 101 | GlobalPool.Put(slice) 102 | } 103 | 104 | // Log of base two, round up (for v > 0). 105 | func nextLogBase2(v uint32) uint32 { 106 | return uint32(bits.Len32(v - 1)) 107 | } 108 | 109 | // Log of base two, round down (for v > 0) 110 | func prevLogBase2(num uint32) uint32 { 111 | next := nextLogBase2(num) 112 | if num == (1 << uint32(next)) { 113 | return next 114 | } 115 | return next - 1 116 | } 117 | -------------------------------------------------------------------------------- /pool_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // Pool is no-op under race detector, so all these tests do not work. 6 | //go:build !race 7 | 8 | package pool 9 | 10 | import ( 11 | "bytes" 12 | "fmt" 13 | "math/rand" 14 | "runtime" 15 | "runtime/debug" 16 | "testing" 17 | ) 18 | 19 | func TestAllocations(t *testing.T) { 20 | var m1, m2 runtime.MemStats 21 | runtime.ReadMemStats(&m1) 22 | runtime.GC() 23 | for i := 0; i < 10000; i++ { 24 | b := Get(1010) 25 | Put(b) 26 | } 27 | runtime.GC() 28 | runtime.ReadMemStats(&m2) 29 | frees := m2.Frees - m1.Frees 30 | if frees > 1000 { 31 | t.Fatalf("expected less than 100 frees after GC, got %d", frees) 32 | } 33 | } 34 | 35 | func TestRange(t *testing.T) { 36 | min := nextLogBase2(1) 37 | max := nextLogBase2(uint32(MaxLength)) 38 | if int(max) != len(GlobalPool.pools)-1 { 39 | t.Errorf("expected %d pools, found %d", max, len(GlobalPool.pools)) 40 | } 41 | if min != 0 { 42 | t.Errorf("unused min pool") 43 | } 44 | } 45 | 46 | func TestPool(t *testing.T) { 47 | // disable GC so we can control when it happens. 48 | defer debug.SetGCPercent(debug.SetGCPercent(-1)) 49 | var p BufferPool 50 | 51 | a := make([]byte, 21) 52 | a[0] = 1 53 | b := make([]byte, 2050) 54 | b[0] = 2 55 | p.Put(a) 56 | p.Put(b) 57 | if g := p.Get(16); &g[0] != &a[0] { 58 | t.Fatalf("got [%d,...]; want [1,...]", g[0]) 59 | } 60 | if g := p.Get(2048); &g[0] != &b[0] { 61 | t.Fatalf("got [%d,...]; want [2,...]", g[0]) 62 | } 63 | if g := p.Get(16); cap(g) != 16 || !bytes.Equal(g[:16], make([]byte, 16)) { 64 | t.Fatalf("got existing slice; want new slice") 65 | } 66 | if g := p.Get(2048); cap(g) != 2048 || !bytes.Equal(g[:2048], make([]byte, 2048)) { 67 | t.Fatalf("got existing slice; want new slice") 68 | } 69 | if g := p.Get(1); cap(g) != 1 || !bytes.Equal(g[:1], make([]byte, 1)) { 70 | t.Fatalf("got existing slice; want new slice") 71 | } 72 | d := make([]byte, 1023) 73 | d[0] = 3 74 | p.Put(d) 75 | if g := p.Get(1024); cap(g) != 1024 || !bytes.Equal(g, make([]byte, 1024)) { 76 | t.Fatalf("got existing slice; want new slice") 77 | } 78 | if g := p.Get(512); cap(g) != 1023 || g[0] != 3 { 79 | t.Fatalf("got [%d,...]; want [3,...]", g[0]) 80 | } 81 | p.Put(a) 82 | 83 | debug.SetGCPercent(100) // to allow following GC to actually run 84 | runtime.GC() 85 | // For some reason, you need to run GC twice on go 1.16 if you want it to reliably work. 86 | runtime.GC() 87 | if g := p.Get(10); &g[0] == &a[0] { 88 | t.Fatalf("got a; want new slice after GC") 89 | } 90 | } 91 | 92 | func TestPoolStressByteSlicePool(t *testing.T) { 93 | var p BufferPool 94 | 95 | const P = 10 96 | chs := 10 97 | maxSize := 1 << 16 98 | N := int(1e4) 99 | if testing.Short() { 100 | N /= 100 101 | } 102 | done := make(chan bool) 103 | errs := make(chan error) 104 | for i := 0; i < P; i++ { 105 | go func() { 106 | ch := make(chan []byte, chs+1) 107 | 108 | for i := 0; i < chs; i++ { 109 | j := rand.Int() % maxSize 110 | ch <- p.Get(j) 111 | } 112 | 113 | for j := 0; j < N; j++ { 114 | r := 0 115 | for i := 0; i < chs; i++ { 116 | v := <-ch 117 | p.Put(v) 118 | r = rand.Int() % maxSize 119 | v = p.Get(r) 120 | if len(v) < r { 121 | errs <- fmt.Errorf("expect len(v) >= %d, got %d", j, len(v)) 122 | } 123 | ch <- v 124 | } 125 | 126 | if r%1000 == 0 { 127 | runtime.GC() 128 | } 129 | } 130 | done <- true 131 | }() 132 | } 133 | 134 | for i := 0; i < P; { 135 | select { 136 | case <-done: 137 | i++ 138 | case err := <-errs: 139 | t.Error(err) 140 | } 141 | } 142 | } 143 | 144 | func BenchmarkPool(b *testing.B) { 145 | var p BufferPool 146 | b.RunParallel(func(pb *testing.PB) { 147 | i := 7 148 | for pb.Next() { 149 | if i > 1<<20 { 150 | i = 7 151 | } else { 152 | i = i << 1 153 | } 154 | b := p.Get(i) 155 | b[0] = byte(i) 156 | p.Put(b) 157 | } 158 | }) 159 | } 160 | 161 | func BenchmarkAlloc(b *testing.B) { 162 | b.RunParallel(func(pb *testing.PB) { 163 | i := 7 164 | for pb.Next() { 165 | if i > 1<<20 { 166 | i = 7 167 | } else { 168 | i = i << 1 169 | } 170 | b := make([]byte, i) 171 | b[1] = byte(i) 172 | } 173 | }) 174 | } 175 | 176 | func BenchmarkPoolOverlflow(b *testing.B) { 177 | var p BufferPool 178 | b.RunParallel(func(pb *testing.PB) { 179 | for pb.Next() { 180 | bufs := make([][]byte, 2100) 181 | for pow := uint32(0); pow < 21; pow++ { 182 | for i := 0; i < 100; i++ { 183 | bufs = append(bufs, p.Get(1< 0 { 49 | b.buf = b.getBuf(len(buf)) 50 | copy(b.buf, buf) 51 | } 52 | return b 53 | } 54 | 55 | // NewBufferString is identical to NewBuffer *except* that it allows one to 56 | // initialize the buffer from a string (without having to allocate an 57 | // intermediate bytes slice). 58 | func NewBufferString(buf string) *Buffer { 59 | b := new(Buffer) 60 | if len(buf) > 0 { 61 | b.buf = b.getBuf(len(buf)) 62 | copy(b.buf, buf) 63 | } 64 | return b 65 | } 66 | 67 | func (b *Buffer) grow(n int) int { 68 | wOff := len(b.buf) 69 | bCap := cap(b.buf) 70 | 71 | if bCap >= wOff+n { 72 | b.buf = b.buf[:wOff+n] 73 | return wOff 74 | } 75 | 76 | bSize := b.Len() 77 | 78 | minCap := 2*bSize + n 79 | 80 | // Slide if cap >= minCap. 81 | // Reallocate otherwise. 82 | if bCap >= minCap { 83 | copy(b.buf, b.buf[b.rOff:]) 84 | } else { 85 | // Needs new buffer. 86 | newBuf := b.getBuf(minCap) 87 | copy(newBuf, b.buf[b.rOff:]) 88 | b.returnBuf() 89 | b.buf = newBuf 90 | } 91 | 92 | b.rOff = 0 93 | b.buf = b.buf[:bSize+n] 94 | return bSize 95 | } 96 | 97 | func (b *Buffer) getPool() *BufferPool { 98 | if b.Pool == nil { 99 | return GlobalPool 100 | } 101 | return b.Pool 102 | } 103 | 104 | func (b *Buffer) returnBuf() { 105 | if cap(b.buf) > len(b.bootstrap) { 106 | b.getPool().Put(b.buf) 107 | } 108 | b.buf = nil 109 | } 110 | 111 | func (b *Buffer) getBuf(n int) []byte { 112 | if n <= len(b.bootstrap) { 113 | return b.bootstrap[:n] 114 | } 115 | return b.getPool().Get(n) 116 | } 117 | 118 | // Len returns the number of bytes that can be read from this buffer. 119 | func (b *Buffer) Len() int { 120 | return len(b.buf) - b.rOff 121 | } 122 | 123 | // Cap returns the current capacity of the buffer. 124 | // 125 | // Note: Buffer *may* re-allocate when writing (or growing by) `n` bytes even if 126 | // `Cap() < Len() + n` to avoid excessive copying. 127 | func (b *Buffer) Cap() int { 128 | return cap(b.buf) 129 | } 130 | 131 | // Bytes returns the slice of bytes currently buffered in the Buffer. 132 | // 133 | // The buffer returned by Bytes is valid until the next call grow, truncate, 134 | // read, or write. Really, just don't touch the Buffer until you're done with 135 | // the return value of this function. 136 | func (b *Buffer) Bytes() []byte { 137 | return b.buf[b.rOff:] 138 | } 139 | 140 | // String returns the string representation of the buffer. 141 | // 142 | // It returns `` the buffer is a nil pointer. 143 | func (b *Buffer) String() string { 144 | if b == nil { 145 | return "" 146 | } 147 | return string(b.buf[b.rOff:]) 148 | } 149 | 150 | // WriteString writes a string to the buffer. 151 | // 152 | // This function is identical to Write except that it allows one to write a 153 | // string directly without allocating an intermediate byte slice. 154 | func (b *Buffer) WriteString(buf string) (int, error) { 155 | wOff := b.grow(len(buf)) 156 | return copy(b.buf[wOff:], buf), nil 157 | } 158 | 159 | // Truncate truncates the Buffer. 160 | // 161 | // Panics if `n > b.Len()`. 162 | // 163 | // This function may free memory by shrinking the internal buffer. 164 | func (b *Buffer) Truncate(n int) { 165 | if n < 0 || n > b.Len() { 166 | panic("truncation out of range") 167 | } 168 | b.buf = b.buf[:b.rOff+n] 169 | b.shrink() 170 | } 171 | 172 | // Reset is equivalent to Truncate(0). 173 | func (b *Buffer) Reset() { 174 | b.returnBuf() 175 | b.rOff = 0 176 | } 177 | 178 | // ReadByte reads a single byte from the Buffer. 179 | func (b *Buffer) ReadByte() (byte, error) { 180 | if b.rOff >= len(b.buf) { 181 | return 0, io.EOF 182 | } 183 | c := b.buf[b.rOff] 184 | b.rOff++ 185 | return c, nil 186 | } 187 | 188 | // WriteByte writes a single byte to the Buffer. 189 | func (b *Buffer) WriteByte(c byte) error { 190 | wOff := b.grow(1) 191 | b.buf[wOff] = c 192 | return nil 193 | } 194 | 195 | // Grow grows the internal buffer such that `n` bytes can be written without 196 | // reallocating. 197 | func (b *Buffer) Grow(n int) { 198 | wOff := b.grow(n) 199 | b.buf = b.buf[:wOff] 200 | } 201 | 202 | // Next is an alternative to `Read` that returns a byte slice instead of taking 203 | // one. 204 | // 205 | // The returned byte slice is valid until the next read, write, grow, or 206 | // truncate. 207 | func (b *Buffer) Next(n int) []byte { 208 | m := b.Len() 209 | if m < n { 210 | n = m 211 | } 212 | data := b.buf[b.rOff : b.rOff+n] 213 | b.rOff += n 214 | return data 215 | } 216 | 217 | // Write writes the byte slice to the buffer. 218 | func (b *Buffer) Write(buf []byte) (int, error) { 219 | wOff := b.grow(len(buf)) 220 | return copy(b.buf[wOff:], buf), nil 221 | } 222 | 223 | // WriteTo copies from the buffer into the given writer until the buffer is 224 | // empty. 225 | func (b *Buffer) WriteTo(w io.Writer) (int64, error) { 226 | if b.rOff < len(b.buf) { 227 | n, err := w.Write(b.buf[b.rOff:]) 228 | b.rOff += n 229 | if b.rOff > len(b.buf) { 230 | panic("invalid write count") 231 | } 232 | b.shrink() 233 | return int64(n), err 234 | } 235 | return 0, nil 236 | } 237 | 238 | // MinRead is the minimum slice size passed to a Read call by 239 | // Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond 240 | // what is required to hold the contents of r, ReadFrom will not grow the 241 | // underlying buffer. 242 | const MinRead = 512 243 | 244 | // ReadFrom reads from the given reader into the buffer. 245 | func (b *Buffer) ReadFrom(r io.Reader) (int64, error) { 246 | n := int64(0) 247 | for { 248 | wOff := b.grow(MinRead) 249 | // Use *entire* buffer. 250 | b.buf = b.buf[:cap(b.buf)] 251 | 252 | read, err := r.Read(b.buf[wOff:]) 253 | b.buf = b.buf[:wOff+read] 254 | n += int64(read) 255 | switch err { 256 | case nil: 257 | case io.EOF: 258 | err = nil 259 | fallthrough 260 | default: 261 | b.shrink() 262 | return n, err 263 | } 264 | } 265 | } 266 | 267 | // Read reads at most `len(buf)` bytes from the internal buffer into the given 268 | // buffer. 269 | func (b *Buffer) Read(buf []byte) (int, error) { 270 | if len(buf) == 0 { 271 | return 0, nil 272 | } 273 | if b.rOff >= len(b.buf) { 274 | return 0, io.EOF 275 | } 276 | n := copy(buf, b.buf[b.rOff:]) 277 | b.rOff += n 278 | b.shrink() 279 | return n, nil 280 | } 281 | 282 | func (b *Buffer) shrink() { 283 | c := b.Cap() 284 | // Either nil or bootstrap. 285 | if c <= len(b.bootstrap) { 286 | return 287 | } 288 | 289 | l := b.Len() 290 | if l == 0 { 291 | // Shortcut if empty. 292 | b.returnBuf() 293 | b.rOff = 0 294 | } else if l*8 < c { 295 | // Only shrink when capacity > 8x length. Avoids shrinking too aggressively. 296 | newBuf := b.getBuf(l) 297 | copy(newBuf, b.buf[b.rOff:]) 298 | b.returnBuf() 299 | b.rOff = 0 300 | b.buf = newBuf[:l] 301 | } 302 | } 303 | -------------------------------------------------------------------------------- /buffer_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2009 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // Modified by stebalien, 2018 6 | 7 | package pool 8 | 9 | import ( 10 | "bytes" 11 | "math/rand" 12 | "runtime" 13 | "testing" 14 | ) 15 | 16 | const N = 10000 // make this bigger for a larger (and slower) test 17 | var data string // test data for write tests 18 | var testBytes []byte // test data; same as data but as a slice. 19 | 20 | func init() { 21 | testBytes = make([]byte, N) 22 | for i := 0; i < N; i++ { 23 | testBytes[i] = 'a' + byte(i%26) 24 | } 25 | data = string(testBytes) 26 | } 27 | 28 | // Verify that contents of buf match the string s. 29 | func check(t *testing.T, testname string, buf *Buffer, s string) { 30 | bytes := buf.Bytes() 31 | str := buf.String() 32 | if buf.Len() != len(bytes) { 33 | t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) 34 | } 35 | 36 | if buf.Len() != len(str) { 37 | t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) 38 | } 39 | 40 | if buf.Len() != len(s) { 41 | t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) 42 | } 43 | 44 | if string(bytes) != s { 45 | t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) 46 | } 47 | } 48 | 49 | // Fill buf through n writes of string fus. 50 | // The initial contents of buf corresponds to the string s; 51 | // the result is the final contents of buf returned as a string. 52 | func fillString(t *testing.T, testname string, buf *Buffer, s string, n int, fus string) string { 53 | check(t, testname+" (fill 1)", buf, s) 54 | for ; n > 0; n-- { 55 | m, err := buf.WriteString(fus) 56 | if m != len(fus) { 57 | t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fus)) 58 | } 59 | if err != nil { 60 | t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) 61 | } 62 | s += fus 63 | check(t, testname+" (fill 4)", buf, s) 64 | } 65 | return s 66 | } 67 | 68 | // Fill buf through n writes of byte slice fub. 69 | // The initial contents of buf corresponds to the string s; 70 | // the result is the final contents of buf returned as a string. 71 | func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { 72 | check(t, testname+" (fill 1)", buf, s) 73 | for ; n > 0; n-- { 74 | m, err := buf.Write(fub) 75 | if m != len(fub) { 76 | t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) 77 | } 78 | if err != nil { 79 | t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) 80 | } 81 | s += string(fub) 82 | check(t, testname+" (fill 4)", buf, s) 83 | } 84 | return s 85 | } 86 | 87 | func TestNewBuffer(t *testing.T) { 88 | buf := NewBuffer(testBytes) 89 | check(t, "NewBuffer", buf, data) 90 | } 91 | 92 | func TestNewBufferString(t *testing.T) { 93 | buf := NewBufferString(data) 94 | check(t, "NewBufferString", buf, data) 95 | } 96 | 97 | // Empty buf through repeated reads into fub. 98 | // The initial contents of buf corresponds to the string s. 99 | func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { 100 | check(t, testname+" (empty 1)", buf, s) 101 | 102 | for { 103 | n, err := buf.Read(fub) 104 | if n == 0 { 105 | break 106 | } 107 | if err != nil { 108 | t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) 109 | } 110 | s = s[n:] 111 | check(t, testname+" (empty 3)", buf, s) 112 | } 113 | 114 | check(t, testname+" (empty 4)", buf, "") 115 | } 116 | 117 | func TestBasicOperations(t *testing.T) { 118 | var buf Buffer 119 | 120 | for i := 0; i < 5; i++ { 121 | check(t, "TestBasicOperations (1)", &buf, "") 122 | 123 | buf.Reset() 124 | check(t, "TestBasicOperations (2)", &buf, "") 125 | 126 | buf.Truncate(0) 127 | check(t, "TestBasicOperations (3)", &buf, "") 128 | 129 | n, err := buf.Write([]byte(data[0:1])) 130 | if n != 1 { 131 | t.Errorf("wrote 1 byte, but n == %d", n) 132 | } 133 | if err != nil { 134 | t.Errorf("err should always be nil, but err == %s", err) 135 | } 136 | check(t, "TestBasicOperations (4)", &buf, "a") 137 | 138 | buf.WriteByte(data[1]) 139 | check(t, "TestBasicOperations (5)", &buf, "ab") 140 | 141 | n, err = buf.Write([]byte(data[2:26])) 142 | if err != nil { 143 | t.Fatal(err) 144 | } 145 | if n != 24 { 146 | t.Errorf("wrote 25 bytes, but n == %d", n) 147 | } 148 | check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) 149 | 150 | buf.Truncate(26) 151 | check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) 152 | 153 | buf.Truncate(20) 154 | check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) 155 | 156 | empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) 157 | empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) 158 | 159 | buf.WriteByte(data[1]) 160 | c, err := buf.ReadByte() 161 | if err != nil { 162 | t.Error("ReadByte unexpected eof") 163 | } 164 | if c != data[1] { 165 | t.Errorf("ReadByte wrong value c=%v", c) 166 | } 167 | if _, err = buf.ReadByte(); err == nil { 168 | t.Error("ReadByte unexpected not eof") 169 | } 170 | } 171 | } 172 | 173 | func TestLargeStringWrites(t *testing.T) { 174 | var buf Buffer 175 | limit := 30 176 | if testing.Short() { 177 | limit = 9 178 | } 179 | for i := 3; i < limit; i += 3 { 180 | s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, data) 181 | empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(data)/i)) 182 | } 183 | check(t, "TestLargeStringWrites (3)", &buf, "") 184 | } 185 | 186 | func TestLargeByteWrites(t *testing.T) { 187 | var buf Buffer 188 | limit := 30 189 | if testing.Short() { 190 | limit = 9 191 | } 192 | for i := 3; i < limit; i += 3 { 193 | s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) 194 | empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) 195 | } 196 | check(t, "TestLargeByteWrites (3)", &buf, "") 197 | } 198 | 199 | func TestLargeStringReads(t *testing.T) { 200 | var buf Buffer 201 | for i := 3; i < 30; i += 3 { 202 | s := fillString(t, "TestLargeReads (1)", &buf, "", 5, data[0:len(data)/i]) 203 | empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) 204 | } 205 | check(t, "TestLargeStringReads (3)", &buf, "") 206 | } 207 | 208 | func TestLargeByteReads(t *testing.T) { 209 | var buf Buffer 210 | for i := 3; i < 30; i += 3 { 211 | s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) 212 | empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) 213 | } 214 | check(t, "TestLargeByteReads (3)", &buf, "") 215 | } 216 | 217 | func TestMixedReadsAndWrites(t *testing.T) { 218 | var buf Buffer 219 | s := "" 220 | for i := 0; i < 50; i++ { 221 | wlen := rand.Intn(len(data)) 222 | if i%2 == 0 { 223 | s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, data[0:wlen]) 224 | } else { 225 | s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) 226 | } 227 | 228 | rlen := rand.Intn(len(data)) 229 | fub := make([]byte, rlen) 230 | n, _ := buf.Read(fub) 231 | s = s[n:] 232 | } 233 | empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) 234 | } 235 | 236 | func TestNil(t *testing.T) { 237 | var b *Buffer 238 | if b.String() != "" { 239 | t.Errorf("expected ; got %q", b.String()) 240 | } 241 | } 242 | 243 | func TestReadFrom(t *testing.T) { 244 | var buf Buffer 245 | for i := 3; i < 30; i += 3 { 246 | s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) 247 | var b Buffer 248 | b.ReadFrom(&buf) 249 | empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) 250 | } 251 | } 252 | 253 | func TestWriteTo(t *testing.T) { 254 | var buf Buffer 255 | for i := 3; i < 30; i += 3 { 256 | s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) 257 | var b Buffer 258 | buf.WriteTo(&b) 259 | empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) 260 | } 261 | } 262 | 263 | func TestNext(t *testing.T) { 264 | b := []byte{0, 1, 2, 3, 4} 265 | tmp := make([]byte, 5) 266 | for i := 0; i <= 5; i++ { 267 | for j := i; j <= 5; j++ { 268 | for k := 0; k <= 6; k++ { 269 | // 0 <= i <= j <= 5; 0 <= k <= 6 270 | // Check that if we start with a buffer 271 | // of length j at offset i and ask for 272 | // Next(k), we get the right bytes. 273 | buf := NewBuffer(b[0:j]) 274 | n, _ := buf.Read(tmp[0:i]) 275 | if n != i { 276 | t.Fatalf("Read %d returned %d", i, n) 277 | } 278 | bb := buf.Next(k) 279 | want := k 280 | if want > j-i { 281 | want = j - i 282 | } 283 | if len(bb) != want { 284 | t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) 285 | } 286 | for l, v := range bb { 287 | if v != byte(l+i) { 288 | t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) 289 | } 290 | } 291 | } 292 | } 293 | } 294 | } 295 | 296 | func TestGrow(t *testing.T) { 297 | x := []byte{'x'} 298 | y := []byte{'y'} 299 | tmp := make([]byte, 72) 300 | for _, startLen := range []int{0, 100, 1000, 10000, 100000} { 301 | xBytes := bytes.Repeat(x, startLen) 302 | for _, growLen := range []int{0, 100, 1000, 10000, 100000} { 303 | buf := NewBuffer(xBytes) 304 | // If we read, this affects buf.off, which is good to test. 305 | readBytes, _ := buf.Read(tmp) 306 | buf.Grow(growLen) 307 | yBytes := bytes.Repeat(y, growLen) 308 | // Check no allocation occurs in write, as long as we're single-threaded. 309 | var m1, m2 runtime.MemStats 310 | runtime.ReadMemStats(&m1) 311 | buf.Write(yBytes) 312 | runtime.ReadMemStats(&m2) 313 | if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { 314 | t.Errorf("allocation occurred during write") 315 | } 316 | // Check that buffer has correct data. 317 | if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { 318 | t.Errorf("bad initial data at %d %d", startLen, growLen) 319 | } 320 | if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { 321 | t.Errorf("bad written data at %d %d", startLen, growLen) 322 | } 323 | } 324 | } 325 | } 326 | 327 | // Was a bug: used to give EOF reading empty slice at EOF. 328 | func TestReadEmptyAtEOF(t *testing.T) { 329 | b := new(Buffer) 330 | slice := make([]byte, 0) 331 | n, err := b.Read(slice) 332 | if err != nil { 333 | t.Errorf("read error: %v", err) 334 | } 335 | if n != 0 { 336 | t.Errorf("wrong count; got %d want 0", n) 337 | } 338 | } 339 | 340 | // Tests that we occasionally compact. Issue 5154. 341 | func TestBufferGrowth(t *testing.T) { 342 | var b Buffer 343 | buf := make([]byte, 1024) 344 | b.Write(buf[0:1]) 345 | var cap0 int 346 | for i := 0; i < 5<<10; i++ { 347 | b.Write(buf) 348 | b.Read(buf) 349 | if i == 0 { 350 | cap0 = b.Cap() 351 | } 352 | } 353 | cap1 := b.Cap() 354 | // (*Buffer).grow allows for 2x capacity slop before sliding, 355 | // so set our error threshold at 3x. 356 | if cap1 > cap0*3 { 357 | t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) 358 | } 359 | } 360 | 361 | func BenchmarkWriteByte(b *testing.B) { 362 | const n = 4 << 10 363 | b.SetBytes(n) 364 | buf := NewBuffer(make([]byte, n)) 365 | for i := 0; i < b.N; i++ { 366 | buf.Reset() 367 | for i := 0; i < n; i++ { 368 | buf.WriteByte('x') 369 | } 370 | } 371 | } 372 | 373 | // From Issue 5154. 374 | func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { 375 | buf := make([]byte, 1024) 376 | for i := 0; i < b.N; i++ { 377 | var b Buffer 378 | b.Write(buf[0:1]) 379 | for i := 0; i < 5<<10; i++ { 380 | b.Write(buf) 381 | b.Read(buf) 382 | } 383 | } 384 | } 385 | 386 | // Check that we don't compact too often. From Issue 5154. 387 | func BenchmarkBufferFullSmallReads(b *testing.B) { 388 | buf := make([]byte, 1024) 389 | for i := 0; i < b.N; i++ { 390 | var b Buffer 391 | b.Write(buf) 392 | for b.Len()+20 < b.Cap() { 393 | b.Write(buf[:10]) 394 | } 395 | for i := 0; i < 5<<10; i++ { 396 | b.Read(buf[:1]) 397 | b.Write(buf[:1]) 398 | } 399 | } 400 | } 401 | --------------------------------------------------------------------------------