├── .gitignore ├── go.mod ├── internal └── goid │ ├── getg_arm.s │ ├── getg_arm64.s │ ├── getg_riscv64.s │ ├── getg_s390x.s │ ├── getg_wasm.s │ ├── getg_386.s │ ├── getg_amd64.s │ ├── getg_loong64.s │ ├── getg_mipsx.s │ ├── getg_ppc64x.s │ ├── getg_mips64x.s │ ├── go_tls.h │ └── goid.go ├── util.go ├── .github └── workflows │ └── go.yml ├── LICENSE ├── util_test.go ├── counter_test.go ├── Makefile ├── counter_bench_test.go ├── counter.go ├── .golangci.json ├── chan_bench_test.go ├── README.md ├── chan_test.go └── chan.go /.gitignore: -------------------------------------------------------------------------------- 1 | .fleet 2 | .idea 3 | *.out 4 | *_stat* 5 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/storozhukBM/thp 2 | 3 | go 1.19 4 | -------------------------------------------------------------------------------- /internal/goid/getg_arm.s: -------------------------------------------------------------------------------- 1 | #include "funcdata.h" 2 | #include "go_asm.h" 3 | #include "textflag.h" 4 | 5 | TEXT ·getg(SB), NOSPLIT, $0-4 6 | MOVW g, R8 7 | MOVW R8, ret+0(FP) 8 | RET 9 | -------------------------------------------------------------------------------- /internal/goid/getg_arm64.s: -------------------------------------------------------------------------------- 1 | #include "funcdata.h" 2 | #include "go_asm.h" 3 | #include "textflag.h" 4 | 5 | TEXT ·getg(SB), NOSPLIT, $0-8 6 | MOVD g, R8 7 | MOVD R8, ret+0(FP) 8 | RET 9 | -------------------------------------------------------------------------------- /internal/goid/getg_riscv64.s: -------------------------------------------------------------------------------- 1 | #include "funcdata.h" 2 | #include "go_asm.h" 3 | #include "textflag.h" 4 | 5 | TEXT ·getg(SB), NOSPLIT, $0-8 6 | MOV g, X10 7 | MOV X10, ret+0(FP) 8 | RET 9 | -------------------------------------------------------------------------------- /internal/goid/getg_s390x.s: -------------------------------------------------------------------------------- 1 | #include "funcdata.h" 2 | #include "go_asm.h" 3 | #include "textflag.h" 4 | 5 | TEXT ·getg(SB), NOSPLIT, $0-8 6 | MOVD g, R8 7 | MOVD R8, ret+0(FP) 8 | RET 9 | -------------------------------------------------------------------------------- /internal/goid/getg_wasm.s: -------------------------------------------------------------------------------- 1 | #include "funcdata.h" 2 | #include "go_asm.h" 3 | #include "textflag.h" 4 | 5 | TEXT ·getg(SB), NOSPLIT, $0-8 6 | MOVD g, R8 7 | MOVD R8, ret+0(FP) 8 | RET 9 | -------------------------------------------------------------------------------- /internal/goid/getg_386.s: -------------------------------------------------------------------------------- 1 | #include "funcdata.h" 2 | #include "go_asm.h" 3 | #include "go_tls.h" 4 | #include "textflag.h" 5 | 6 | TEXT ·getg(SB), NOSPLIT, $0-4 7 | get_tls(CX) 8 | MOVL g(CX), AX 9 | MOVL AX, ret+0(FP) 10 | RET 11 | -------------------------------------------------------------------------------- /internal/goid/getg_amd64.s: -------------------------------------------------------------------------------- 1 | #include "funcdata.h" 2 | #include "go_asm.h" 3 | #include "go_tls.h" 4 | #include "textflag.h" 5 | 6 | TEXT ·getg(SB), NOSPLIT, $0-8 7 | get_tls(CX) 8 | MOVQ g(CX), AX 9 | MOVQ AX, ret+0(FP) 10 | RET 11 | -------------------------------------------------------------------------------- /internal/goid/getg_loong64.s: -------------------------------------------------------------------------------- 1 | //go:build loong64 2 | // +build loong64 3 | 4 | #include "funcdata.h" 5 | #include "go_asm.h" 6 | #include "textflag.h" 7 | 8 | TEXT ·getg(SB), NOSPLIT, $0-8 9 | MOVV g, R8 10 | MOVV R8, ret+0(FP) 11 | RET 12 | -------------------------------------------------------------------------------- /internal/goid/getg_mipsx.s: -------------------------------------------------------------------------------- 1 | //go:build mips || mipsle 2 | // +build mips mipsle 3 | 4 | #include "funcdata.h" 5 | #include "go_asm.h" 6 | #include "textflag.h" 7 | 8 | TEXT ·getg(SB), NOSPLIT, $0-4 9 | MOVW g, R8 10 | MOVW R8, ret+0(FP) 11 | RET 12 | -------------------------------------------------------------------------------- /internal/goid/getg_ppc64x.s: -------------------------------------------------------------------------------- 1 | //go:build ppc64 || ppc64le 2 | // +build ppc64 ppc64le 3 | 4 | #include "funcdata.h" 5 | #include "go_asm.h" 6 | #include "textflag.h" 7 | 8 | TEXT ·getg(SB), NOSPLIT, $0-8 9 | MOVD g, R8 10 | MOVD R8, ret+0(FP) 11 | RET 12 | -------------------------------------------------------------------------------- /internal/goid/getg_mips64x.s: -------------------------------------------------------------------------------- 1 | //go:build mips64 || mips64le 2 | // +build mips64 mips64le 3 | 4 | #include "funcdata.h" 5 | #include "go_asm.h" 6 | #include "textflag.h" 7 | 8 | TEXT ·getg(SB), NOSPLIT, $0-8 9 | MOVV g, R8 10 | MOVV R8, ret+0(FP) 11 | RET 12 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | package thp 2 | 3 | const cacheLineSize = 64 4 | 5 | func zero[T any]() T { 6 | return *new(T) 7 | } 8 | 9 | //nolint:gomnd // pure magic, described here https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 10 | func nextHighestPowerOf2(wideness int32) int32 { 11 | n := wideness 12 | n-- 13 | n |= n >> 1 14 | n |= n >> 2 15 | n |= n >> 4 16 | n |= n >> 8 17 | n |= n >> 16 18 | n++ 19 | return n 20 | } 21 | -------------------------------------------------------------------------------- /internal/goid/go_tls.h: -------------------------------------------------------------------------------- 1 | // Copyright 2014 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | #ifdef GOARCH_arm 6 | #define LR R14 7 | #endif 8 | 9 | #ifdef GOARCH_amd64 10 | #define get_tls(r) MOVQ TLS, r 11 | #define g(r) 0(r)(TLS*1) 12 | #endif 13 | 14 | #ifdef GOARCH_386 15 | #define get_tls(r) MOVL TLS, r 16 | #define g(r) 0(r)(TLS*1) 17 | #endif 18 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | workflow_dispatch: 9 | inputs: 10 | tag: 11 | description: 'Tag to create' 12 | required: true 13 | default: 'v0.0.0' 14 | 15 | jobs: 16 | build: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Set up Go 1.x 20 | uses: actions/setup-go@v2 21 | with: 22 | go-version: ^1.20 23 | 24 | - name: Check out code 25 | uses: actions/checkout@v3 26 | 27 | - name: Lint 28 | run: make lint 29 | 30 | - name: Test 31 | run: make test 32 | 33 | - name: Coverage 34 | run: make coverage 35 | 36 | - name: Send coverage 37 | uses: shogo82148/actions-goveralls@v1 38 | with: 39 | path-to-profile: coverage.out 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Bohdan Storozhuk 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /util_test.go: -------------------------------------------------------------------------------- 1 | package thp_test 2 | 3 | import ( 4 | "errors" 5 | "reflect" 6 | "testing" 7 | ) 8 | 9 | func expectPanic(t *testing.T, f func(), expectedError error) { 10 | t.Helper() 11 | var caughtPanic error 12 | func() { 13 | defer func() { 14 | actualPanic, ok := recover().(error) 15 | if !ok { 16 | t.Fatal("recovered panic is not error") 17 | } 18 | caughtPanic = actualPanic 19 | if expectedError != nil { 20 | if actualPanic == nil { 21 | t.Fatalf( 22 | "expected error didn't happen. expected %T(%v)", 23 | expectedError, expectedError, 24 | ) 25 | } 26 | if !errors.Is(actualPanic, expectedError) { 27 | t.Fatalf( 28 | "unexpected error type. expected %T(%v); actual: %T(%v)", 29 | expectedError, expectedError, actualPanic, actualPanic, 30 | ) 31 | } 32 | if actualPanic.Error() != expectedError.Error() { 33 | t.Fatalf( 34 | "unexpected error formatting. expected %T(%v); actual: %T(%v)", 35 | expectedError, expectedError, actualPanic, actualPanic, 36 | ) 37 | } 38 | } 39 | }() 40 | f() 41 | }() 42 | if caughtPanic == nil { 43 | t.Fatal("panic isn't detected") 44 | } 45 | } 46 | 47 | func eq[V any](t *testing.T, expected V, actual V) { 48 | t.Helper() 49 | if !reflect.DeepEqual(expected, actual) { 50 | t.Fatalf("\nexp: %T:`%#v`\nact: %T:`%#v`", expected, expected, actual, actual) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /internal/goid/goid.go: -------------------------------------------------------------------------------- 1 | package goid 2 | 3 | import ( 4 | "reflect" 5 | "unsafe" 6 | ) 7 | 8 | //go:linkname typelinks reflect.typelinks 9 | func typelinks() ([]unsafe.Pointer, [][]int32) 10 | 11 | //go:linkname resolveTypeOff reflect.resolveTypeOff 12 | func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 13 | 14 | // getg returns the pointer to the current runtime.g. 15 | // 16 | //go:nosplit 17 | func getg() unsafe.Pointer 18 | 19 | type iface struct { 20 | tab unsafe.Pointer 21 | data unsafe.Pointer 22 | } 23 | 24 | //nolint:gochecknoglobals // this is better than hardcoding, `goid` field offset 25 | var _goIDOffset uintptr 26 | 27 | //nolint:gochecknoinits // this is better than hardcoding, `goid` field offset 28 | func init() { 29 | _goIDOffset = getGoroutineIDOffsetInRuntimeGStruct() 30 | } 31 | 32 | func getGoroutineIDOffsetInRuntimeGStruct() uintptr { 33 | typ := reflect.TypeOf(0) 34 | face := (*iface)(unsafe.Pointer(&typ)) 35 | 36 | sections, offset := typelinks() 37 | for i, offs := range offset { 38 | rodata := sections[i] 39 | for _, off := range offs { 40 | face.data = resolveTypeOff(rodata, off) 41 | if typ.Kind() != reflect.Ptr || len(typ.Elem().Name()) == 0 { 42 | continue 43 | } 44 | if typ.Elem().String() == "runtime.g" { 45 | typ = typ.Elem() 46 | } 47 | if typ.String() == "runtime.g" { 48 | for i := 0; i < typ.NumField(); i++ { 49 | f := typ.Field(i) 50 | if f.Name == "goid" && f.Type == reflect.TypeOf(uint64(0)) { 51 | return f.Offset 52 | } 53 | } 54 | } 55 | } 56 | } 57 | panic("runtime.g.goid not found") 58 | } 59 | 60 | // ID returns current goroutine's runtime ID. 61 | func ID() uint64 { 62 | gp := getg() 63 | return *(*uint64)(unsafe.Pointer(uintptr(gp) + _goIDOffset)) 64 | } 65 | -------------------------------------------------------------------------------- /counter_test.go: -------------------------------------------------------------------------------- 1 | package thp_test 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "runtime" 7 | "sync" 8 | "testing" 9 | 10 | "github.com/storozhukBM/thp" 11 | ) 12 | 13 | func TestCounterExample(t *testing.T) { 14 | counter := thp.NewCounter() 15 | incsPerGoroutine := 1_000_000 16 | wg := &sync.WaitGroup{} 17 | wg.Add(runtime.NumCPU()) 18 | for i := 0; i < runtime.NumCPU(); i++ { 19 | go func() { 20 | defer wg.Done() 21 | for j := 0; j < incsPerGoroutine; j++ { 22 | counter.Add(1) 23 | } 24 | }() 25 | } 26 | wg.Wait() 27 | expectedResult := int64(runtime.NumCPU() * incsPerGoroutine) 28 | if counter.Load() != expectedResult { 29 | t.Errorf("result is not as expected: %v != %v", counter.Load(), expectedResult) 30 | } 31 | } 32 | 33 | func TestCounter(t *testing.T) { 34 | t.Parallel() 35 | 36 | t.Run("default", func(t *testing.T) { 37 | t.Parallel() 38 | defaultCounter := thp.NewCounter() 39 | runCounterTest(t, defaultCounter) 40 | }) 41 | 42 | counterWidenessList := []int{-1, 0, 1, 2, runtime.NumCPU(), runtime.NumCPU() + 1} 43 | for _, w := range counterWidenessList { 44 | wideness := w 45 | t.Run( 46 | fmt.Sprintf("wideness: %v", wideness), 47 | func(t *testing.T) { 48 | t.Parallel() 49 | counter := thp.NewCounterWithWideness(wideness) 50 | runCounterTest(t, counter) 51 | }, 52 | ) 53 | } 54 | } 55 | 56 | //nolint:thelper // This is not exactly helper and in case of error we want to know line 57 | func runCounterTest(t *testing.T, counter *thp.Counter) { 58 | result := 256 + (rand.Int31() / 256) 59 | perGoroutineIncs := int(result) / runtime.NumCPU() 60 | counter.Add(int64(result) % int64(runtime.NumCPU())) 61 | 62 | wg := &sync.WaitGroup{} 63 | wg.Add(runtime.NumCPU()) 64 | for i := 0; i < runtime.NumCPU(); i++ { 65 | go func() { 66 | defer wg.Done() 67 | for j := 0; j < perGoroutineIncs; j++ { 68 | counter.Add(1) 69 | } 70 | }() 71 | } 72 | 73 | wg.Wait() 74 | 75 | eq(t, int64(result), counter.Load()) 76 | counter.Clear() 77 | eq(t, 0, counter.Load()) 78 | } 79 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | benchstat := go run golang.org/x/perf/cmd/benchstat@v0.0.0-20220920022801-e8d778a60d07 2 | benchart := go run github.com/storozhukBM/benchart@v1.0.0 3 | golangci := go run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.2 4 | gofumpt := go run mvdan.cc/gofumpt@v0.4.0 5 | gci := go run github.com/daixiang0/gci@v0.10.1 6 | 7 | BOLD = \033[1m 8 | CLEAR = \033[0m 9 | CYAN = \033[36m 10 | 11 | help: ## Display this help 12 | @awk '\ 13 | BEGIN {FS = ":.*##"; printf "Usage: make $(CYAN)$(CLEAR)\n"} \ 14 | /^[a-z0-9]+([\/]%)?([\/](%-)?[a-z\-0-9%]+)*:.*? ##/ { printf " $(CYAN)%-15s$(CLEAR) %s\n", $$1, $$2 } \ 15 | /^##@/ { printf "\n$(BOLD)%s$(CLEAR)\n", substr($$0, 5) }' \ 16 | $(MAKEFILE_LIST) 17 | 18 | clean: ## Clean intermediate coverage, profiler and benchmark result files 19 | @go clean 20 | @rm -f profile.out 21 | @rm -f coverage.out 22 | @rm -f result.html 23 | 24 | gci: ## Fix imports order 25 | $(gci) write . 26 | 27 | format: gci ## Run formatting 28 | $(gofumpt) -l -w . 29 | 30 | lint: clean ## Run linters 31 | $(golangci) run ./... 32 | 33 | test: clean format ## Run tests 34 | go test -race -count 1 ./... 35 | 36 | qtest: clean ## Run quick tests 37 | go test ./... 38 | 39 | coverage: ## Measure and show coverage profile 40 | go test -coverprofile coverage.out ./... 41 | go tool cover -html=coverage.out 42 | 43 | cntprofile: clean ## Get counter CPU profile 44 | go test -run=xxx -bench=BenchmarkCounterThroughput -cpuprofile profile.out 45 | go tool pprof -http=:8080 profile.out 46 | 47 | 48 | chanbench: ## Run channel benchmarks and show benchart 49 | go test -timeout 3h -count=5 -run=xxx -bench=BenchmarkChanThroughput ./... | tee chan_stat.txt 50 | $(benchstat) chan_stat.txt 51 | $(benchstat) -csv chan_stat.txt > chan_stat.csv 52 | $(benchart) 'ChanThroughput;xAxisType=log' chan_stat.csv chan_stat.html 53 | open chan_stat.html 54 | 55 | cntbench: ## Run counter benchmarks and show benchart 56 | go test -timeout 3h -count=5 -run=xxx -bench=BenchmarkCounterThroughput ./... | tee cnt_stat.txt 57 | $(benchstat) cnt_stat.txt 58 | $(benchstat) -csv cnt_stat.txt > cnt_stat.csv 59 | $(benchart) 'CounterThroughput;xAxisType=log' cnt_stat.csv cnt_stat.html 60 | open cnt_stat.html 61 | -------------------------------------------------------------------------------- /counter_bench_test.go: -------------------------------------------------------------------------------- 1 | package thp_test 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "sync/atomic" 7 | "testing" 8 | 9 | "github.com/storozhukBM/thp" 10 | ) 11 | 12 | func BenchmarkCounterThroughput(b *testing.B) { 13 | for pIdx := 1; pIdx <= 32; pIdx *= 2 { 14 | b.Run(fmt.Sprintf("type:%s;goroutines:%d", "atomic", pIdx), func(b *testing.B) { 15 | regularAtomicCnt(b, pIdx) 16 | }) 17 | b.Run(fmt.Sprintf("type:%s;goroutines:%d", "thp", pIdx), func(b *testing.B) { 18 | thpCnt(b, pIdx) 19 | }) 20 | } 21 | } 22 | 23 | //nolint:thelper // This is not exactly helper and in case of error we want to know line 24 | func thpCnt(b *testing.B, goroutines int) { 25 | counter := thp.NewCounterWithWideness(goroutines) 26 | 27 | canRun := &sync.WaitGroup{} 28 | canRun.Add(1) 29 | 30 | wg := &sync.WaitGroup{} 31 | wg.Add(goroutines) 32 | 33 | incsPerGoroutine := b.N / goroutines 34 | for i := 0; i < goroutines; i++ { 35 | go func() { 36 | defer wg.Done() 37 | canRun.Wait() 38 | 39 | for j := 0; j < incsPerGoroutine; j++ { 40 | counter.Add(1) 41 | } 42 | }() 43 | } 44 | 45 | b.ResetTimer() 46 | b.ReportAllocs() 47 | canRun.Done() 48 | 49 | wg.Wait() 50 | b.StopTimer() 51 | 52 | expectedResult := int64(goroutines * incsPerGoroutine) 53 | if counter.Load() != expectedResult { 54 | b.Errorf("result is not as expected: %v != %v", counter.Load(), expectedResult) 55 | } 56 | } 57 | 58 | //nolint:thelper // This is not exactly helper and in case of error we want to know line 59 | func regularAtomicCnt(b *testing.B, goroutines int) { 60 | counter := atomic.Int64{} 61 | 62 | canRun := &sync.WaitGroup{} 63 | canRun.Add(1) 64 | 65 | wg := &sync.WaitGroup{} 66 | wg.Add(goroutines) 67 | 68 | incsPerGoroutine := b.N / goroutines 69 | for i := 0; i < goroutines; i++ { 70 | go func() { 71 | defer wg.Done() 72 | canRun.Wait() 73 | 74 | for j := 0; j < incsPerGoroutine; j++ { 75 | counter.Add(1) 76 | } 77 | }() 78 | } 79 | 80 | b.ResetTimer() 81 | b.ReportAllocs() 82 | canRun.Done() 83 | 84 | wg.Wait() 85 | b.StopTimer() 86 | 87 | expectedResult := int64(goroutines * incsPerGoroutine) 88 | if counter.Load() != expectedResult { 89 | b.Errorf("result is not as expected: %v != %v", counter.Load(), expectedResult) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /counter.go: -------------------------------------------------------------------------------- 1 | package thp 2 | 3 | import ( 4 | "runtime" 5 | "sync/atomic" 6 | 7 | "github.com/storozhukBM/thp/internal/goid" 8 | ) 9 | 10 | // wideInt64 struct represents a 64-bit integer value, 11 | // padded with an array of bytes to align it with the size of a cache line. 12 | // This padding helps prevent false sharing, which can occur when 13 | // multiple CPU cores access nearby memory locations simultaneously. 14 | type wideInt64 struct { 15 | _ [cacheLineSize - 8]byte 16 | v atomic.Int64 17 | } 18 | 19 | // Counter is a concurrent counter implementation with striping, 20 | // designed to enhance performance in write-heavy and contended workloads. 21 | // It reduces contention by distributing the workload across multiple internal counters. 22 | // Compared to the atomic.Int64 type, this counter may use more memory 23 | // and have a slower Load operation. 24 | // However, its Add operations scales better under high load and contention. 25 | // To balance scalability and memory overhead, you can adjust the level of striping 26 | // by using the NewCounterWithWideness function and specifying your desired wideness. 27 | // 28 | // NOTE: zero value of Counter is NOT valid, please create new counters using methods provided below. 29 | type Counter struct { 30 | stripedValues []wideInt64 31 | } 32 | 33 | // NewCounter create new instance of Counter optimised for maximum scalability of write operations. 34 | func NewCounter() *Counter { 35 | return NewCounterWithWideness(runtime.NumCPU()) 36 | } 37 | 38 | // NewCounterWithWideness creates new instance of Counter with specified wideness. 39 | // Using this method you can balance scalability and memory overhead. 40 | func NewCounterWithWideness(wideness int) *Counter { 41 | if wideness > runtime.NumCPU() || wideness <= 0 { 42 | wideness = runtime.NumCPU() 43 | } 44 | n := nextHighestPowerOf2(int32(wideness)) 45 | return &Counter{ 46 | stripedValues: make([]wideInt64, n), 47 | } 48 | } 49 | 50 | // Add atomically adds x to current Counter value. 51 | func (c *Counter) Add(x int64) { 52 | // put our value into stripped slice of values 53 | localStripeIdx := (len(c.stripedValues) - 1) & int(goid.ID()) 54 | c.stripedValues[localStripeIdx].v.Add(x) 55 | } 56 | 57 | // Load calculates current Counter value, but can omit concurrent updates that happen during Load. 58 | func (c *Counter) Load() int64 { 59 | result := int64(0) 60 | for i := range c.stripedValues { 61 | result += c.stripedValues[i].v.Load() 62 | } 63 | return result 64 | } 65 | 66 | // Clear sets counter to 0. 67 | func (c *Counter) Clear() { 68 | for i := range c.stripedValues { 69 | c.stripedValues[i].v.Store(0) 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /.golangci.json: -------------------------------------------------------------------------------- 1 | { 2 | "linters-settings": { 3 | "govet": { 4 | "check-shadowing": true 5 | }, 6 | "nakedret": { 7 | "lines": 1 8 | }, 9 | "godox": { 10 | "keywords": [ 11 | "NOCOMMIT", 12 | "NOCOMIT" 13 | ] 14 | } 15 | }, 16 | "issues": { 17 | "exclude-rules": [ 18 | { 19 | "path": "_test.go", 20 | "linters": [ 21 | "dupl", 22 | "goconst", 23 | "gomnd", 24 | "structcheck", 25 | "unused", 26 | "gochecknoglobals", 27 | "gosec", 28 | "gocognit", 29 | "wsl" 30 | ] 31 | } 32 | ] 33 | }, 34 | "linters": { 35 | "enable": [ 36 | "asciicheck", 37 | "bidichk", 38 | "bodyclose", 39 | "bodyclose", 40 | "decorder", 41 | "depguard", 42 | "dogsled", 43 | "dupl", 44 | "durationcheck", 45 | "errcheck", 46 | "errchkjson", 47 | "errname", 48 | "errorlint", 49 | "execinquery", 50 | "exhaustive", 51 | "exhaustruct", 52 | "exportloopref", 53 | "exportloopref", 54 | "forcetypeassert", 55 | "gci", 56 | "gochecknoglobals", 57 | "gochecknoinits", 58 | "gocognit", 59 | "goconst", 60 | "gocritic", 61 | "gocyclo", 62 | "godot", 63 | "godox", 64 | "goerr113", 65 | "gofmt", 66 | "gofumpt", 67 | "goheader", 68 | "goimports", 69 | "gomnd", 70 | "gomoddirectives", 71 | "gomodguard", 72 | "goprintffuncname", 73 | "gosec", 74 | "gosimple", 75 | "govet", 76 | "grouper", 77 | "ineffassign", 78 | "lll", 79 | "misspell", 80 | "nakedret", 81 | "nestif", 82 | "nilerr", 83 | "noctx", 84 | "nolintlint", 85 | "nonamedreturns", 86 | "prealloc", 87 | "revive", 88 | "rowserrcheck", 89 | "staticcheck", 90 | "stylecheck", 91 | "tagliatelle", 92 | "tenv", 93 | "thelper", 94 | "tparallel", 95 | "typecheck", 96 | "unconvert", 97 | "unparam", 98 | "unused", 99 | "whitespace" 100 | ] 101 | } 102 | } -------------------------------------------------------------------------------- /chan_bench_test.go: -------------------------------------------------------------------------------- 1 | package thp_test 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "sync/atomic" 7 | "testing" 8 | 9 | "github.com/storozhukBM/thp" 10 | ) 11 | 12 | func BenchmarkChanThroughput(b *testing.B) { 13 | for pIdx := 1; pIdx < 32; pIdx *= 2 { 14 | for bufSize := 1; bufSize <= 1024; bufSize *= 2 { 15 | b.Run(fmt.Sprintf("type:%s;pCnt:%d;cCnt:%d;buf:%d", "standard", pIdx, 8, bufSize), func(b *testing.B) { 16 | runStandardChan(b, pIdx, 8, bufSize) 17 | }) 18 | } 19 | for bufSize := 1; bufSize <= 1024; bufSize *= 2 { 20 | b.Run(fmt.Sprintf("type:%s;pCnt:%d;cCnt:%d;buf:%d", "thp", pIdx, 8, bufSize), func(b *testing.B) { 21 | runThpChan(b, pIdx, 8, bufSize) 22 | }) 23 | } 24 | } 25 | } 26 | 27 | //nolint:thelper // This is not exactly helper and in case of error we want to know line 28 | func runStandardChan(b *testing.B, producersCnt int, consumersCnt int, bufferSize int) { 29 | canRun := &sync.WaitGroup{} 30 | canRun.Add(1) 31 | 32 | ch := make(chan int, bufferSize) 33 | 34 | itemsPerProducer := b.N / producersCnt 35 | producersWg := &sync.WaitGroup{} 36 | producersWg.Add(producersCnt) 37 | for i := 0; i < producersCnt; i++ { 38 | go func() { 39 | defer producersWg.Done() 40 | canRun.Wait() 41 | for j := 0; j < itemsPerProducer; j++ { 42 | ch <- 1 43 | } 44 | }() 45 | } 46 | 47 | consumersWg := &sync.WaitGroup{} 48 | consumersWg.Add(consumersCnt) 49 | counter := &atomic.Int64{} 50 | for i := 0; i < consumersCnt; i++ { 51 | go func() { 52 | defer consumersWg.Done() 53 | result := 0 54 | canRun.Wait() 55 | for item := range ch { 56 | result += item 57 | } 58 | counter.Add(int64(result)) 59 | }() 60 | } 61 | 62 | b.ResetTimer() 63 | b.ReportAllocs() 64 | canRun.Done() 65 | 66 | producersWg.Wait() 67 | close(ch) 68 | consumersWg.Wait() 69 | b.StopTimer() 70 | 71 | expectedResult := int64(producersCnt * itemsPerProducer) 72 | if counter.Load() != expectedResult { 73 | b.Errorf("result is not as expected: %v != %v", counter.Load(), expectedResult) 74 | } 75 | } 76 | 77 | //nolint:thelper // This is not exactly helper and in case of error we want to know line 78 | func runThpChan(b *testing.B, producersCnt int, consumersCnt int, bufferSize int) { 79 | canRun := &sync.WaitGroup{} 80 | canRun.Add(1) 81 | 82 | ch, chCloser := thp.NewChan[int](bufferSize) 83 | 84 | itemsPerProducer := b.N / producersCnt 85 | producersWg := &sync.WaitGroup{} 86 | producersWg.Add(producersCnt) 87 | for i := 0; i < producersCnt; i++ { 88 | go func() { 89 | defer producersWg.Done() 90 | producer, flush := ch.Producer() 91 | defer flush() 92 | canRun.Wait() 93 | for j := 0; j < itemsPerProducer; j++ { 94 | producer.Put(1) 95 | } 96 | }() 97 | } 98 | 99 | consumersWg := &sync.WaitGroup{} 100 | consumersWg.Add(consumersCnt) 101 | counter := &atomic.Int64{} 102 | for i := 0; i < consumersCnt; i++ { 103 | go func() { 104 | defer consumersWg.Done() 105 | consumer := ch.Consumer() 106 | result := 0 107 | canRun.Wait() 108 | for item, ok := consumer.Poll(); ok; item, ok = consumer.Poll() { 109 | result += item 110 | } 111 | counter.Add(int64(result)) 112 | }() 113 | } 114 | 115 | b.ResetTimer() 116 | b.ReportAllocs() 117 | canRun.Done() 118 | 119 | producersWg.Wait() 120 | chCloser() 121 | consumersWg.Wait() 122 | b.StopTimer() 123 | 124 | expectedResult := int64(producersCnt * itemsPerProducer) 125 | if counter.Load() != expectedResult { 126 | b.Errorf("result is not as expected: %v != %v", counter.Load(), expectedResult) 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # **thp** - High throughput primitives library 2 | [![Go Reference](https://pkg.go.dev/badge/github.com/storozhukBM/thp.svg)](https://pkg.go.dev/github.com/storozhukBM/thp) 3 | ![Build](https://github.com/storozhukBM/thp/actions/workflows/go.yml/badge.svg) 4 | [![Go Report Card](https://goreportcard.com/badge/github.com/storozhukBM/thp)](https://goreportcard.com/report/github.com/storozhukBM/thp) 5 | [![Coverage Status](https://coveralls.io/repos/github/storozhukBM/thp/badge.svg)](https://coveralls.io/github/storozhukBM/thp) 6 | 7 | ## **thp.Chan[T any]** 8 | 9 | **Chan** represents a concurrent channel with batching capability. 10 | It allows efficient batched communication between producers and consumers, 11 | reducing the overhead of individual item transfers. 12 | 13 | The channel operates in a concurrent manner, but each producer and consumer 14 | should be exclusively used by a single goroutine to ensure thread safety, 15 | so create separate **Producer[T any]** or **Consumer[T any]** for every goroutine 16 | that sends or receives messages. 17 | The producer is responsible for adding items to the channel's buffer 18 | and flushing them when the batch size is reached. The consumer 19 | retrieves items from the channel's buffer and processes them sequentially. 20 | 21 | The channel's batch size determines the number of items accumulated in the buffer 22 | before a flush operation is triggered. Adjusting the batch size can impact 23 | the trade-off between throughput and latency. Smaller batch sizes result in more 24 | frequent flushes and lower latency, while larger batch sizes increase throughput 25 | at the cost of higher latency. 26 | You can also manually trigger flushes. 27 | 28 | The channel internally manages a sync.Pool to reuse batch buffers and avoid 29 | unnecessary allocations. This optimization improves performance by reducing 30 | memory allocations during batch creation and disposal. 31 | 32 | ### Example with comparison to built-in channel: 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 93 | 146 | 147 |
Built-in channelthp.Chan
41 | 42 | ```go 43 | ch := make(chan int, 1024) 44 | producersWg := &sync.WaitGroup{} 45 | producersCount := 16 46 | itemsPerProducer := 1_000_000 47 | producersWg.Add(producersCount) 48 | 49 | for i := 0; i < producersCount; i++ { 50 | go func() { 51 | defer producersWg.Done() 52 | for j := 0; j < itemsPerProducer; j++ { 53 | ch <- 1 54 | } 55 | }() 56 | } 57 | 58 | 59 | 60 | consumersCount := 16 61 | consumersWg := &sync.WaitGroup{} 62 | consumersWg.Add(consumersCount) 63 | counter := &atomic.Int64{} 64 | for i := 0; i < consumersCount; i++ { 65 | go func() { 66 | defer consumersWg.Done() 67 | result := 0 68 | for item := range ch { 69 | result += item 70 | } 71 | counter.Add(int64(result)) 72 | }() 73 | } 74 | 75 | 76 | 77 | producersWg.Wait() 78 | close(ch) 79 | consumersWg.Wait() 80 | 81 | expectedResult := int64( 82 | producersCount * itemsPerProducer 83 | ) 84 | if counter.Load() != expectedResult { 85 | t.Errorf( 86 | "result is not as expected: %v != %v", 87 | counter.Load(), expectedResult, 88 | ) 89 | } 90 | ``` 91 | 92 | 94 | 95 | ```go 96 | ch, chCloser := thp.NewChan[int](1024) 97 | producersWg := &sync.WaitGroup{} 98 | producersCount := 16 99 | itemsPerProducer := 1_000_000 100 | producersWg.Add(producersCount) 101 | 102 | for i := 0; i < producersCount; i++ { 103 | go func() { 104 | defer producersWg.Done() 105 | producer, flush := ch.Producer() 106 | defer flush() 107 | for j := 0; j < itemsPerProducer; j++ { 108 | producer.Put(1) 109 | } 110 | }() 111 | } 112 | 113 | consumersCount := 16 114 | consumersWg := &sync.WaitGroup{} 115 | consumersWg.Add(consumersCount) 116 | counter := &atomic.Int64{} 117 | for i := 0; i < consumersCount; i++ { 118 | go func() { 119 | defer consumersWg.Done() 120 | consumer := ch.Consumer() 121 | result := 0 122 | item, ok := consumer.Poll() 123 | for ; ok; item, ok = consumer.Poll() { 124 | result += item 125 | } 126 | counter.Add(int64(result)) 127 | }() 128 | } 129 | 130 | producersWg.Wait() 131 | chCloser() 132 | consumersWg.Wait() 133 | 134 | expectedResult := int64( 135 | producersCount * itemsPerProducer 136 | ) 137 | if counter.Load() != expectedResult { 138 | t.Errorf( 139 | "result is not as expected: %v != %v", 140 | counter.Load(), expectedResult, 141 | ) 142 | } 143 | ``` 144 | 145 |
148 | 149 | ### Performance 150 | 151 | Run `make chanbench` to get results on your machine. 152 | 153 | Benchmark results 154 | 155 | ## **thp.Counter** 156 | 157 | Counter is a concurrent counter implementation with striping, 158 | designed to enhance performance in write-heavy and contended workloads. 159 | 160 | It reduces contention by distributing the workload across multiple internal counters. 161 | Compared to the atomic.Int64 type, this counter may use more memory 162 | and have a slower Load operation. 163 | 164 | However, its Add operations scales better under high load and contention. 165 | To balance scalability and memory overhead, you can adjust the level of striping 166 | by using the NewCounterWithWideness function and specifying your desired wideness. 167 | 168 | NOTE: zero value of Counter is NOT valid, please create new counters using methods provided below. 169 | 170 | ### Example: 171 | 172 | ```go 173 | counter := thp.NewCounter() 174 | incsPerGoroutine := 1_000_000 175 | wg := &sync.WaitGroup{} 176 | wg.Add(runtime.NumCPU()) 177 | for i := 0; i < runtime.NumCPU(); i++ { 178 | go func() { 179 | defer wg.Done() 180 | for j := 0; j < incsPerGoroutine; j++ { 181 | counter.Add(1) 182 | } 183 | }() 184 | } 185 | wg.Wait() 186 | expectedResult := int64(runtime.NumCPU() * incsPerGoroutine) 187 | if counter.Load() != expectedResult { 188 | t.Errorf("result is not as expected: %v != %v", counter.Load(), expectedResult) 189 | } 190 | ``` 191 | 192 | ### Performance 193 | 194 | Run `make cntbench` to get results on your machine. 195 | Counter benchmark results 196 | -------------------------------------------------------------------------------- /chan_test.go: -------------------------------------------------------------------------------- 1 | package thp_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "runtime" 7 | "sync" 8 | "sync/atomic" 9 | "testing" 10 | "time" 11 | 12 | "github.com/storozhukBM/thp" 13 | ) 14 | 15 | func TestExample(t *testing.T) { 16 | ch, chCloser := thp.NewChan[int](1024) 17 | producersWg := &sync.WaitGroup{} 18 | producersCount := 16 19 | itemsPerProducer := 1_000_000 20 | producersWg.Add(producersCount) 21 | for i := 0; i < producersCount; i++ { 22 | go func() { 23 | defer producersWg.Done() 24 | producer, flush := ch.Producer() 25 | defer flush() 26 | for j := 0; j < itemsPerProducer; j++ { 27 | producer.Put(1) 28 | } 29 | }() 30 | } 31 | 32 | consumersCount := 16 33 | consumersWg := &sync.WaitGroup{} 34 | consumersWg.Add(consumersCount) 35 | counter := &atomic.Int64{} 36 | for i := 0; i < consumersCount; i++ { 37 | go func() { 38 | defer consumersWg.Done() 39 | consumer := ch.Consumer() 40 | result := 0 41 | item, ok := consumer.Poll() 42 | for ; ok; item, ok = consumer.Poll() { 43 | result += item 44 | } 45 | counter.Add(int64(result)) 46 | }() 47 | } 48 | 49 | producersWg.Wait() 50 | chCloser() 51 | consumersWg.Wait() 52 | 53 | expectedResult := int64(producersCount * itemsPerProducer) 54 | if counter.Load() != expectedResult { 55 | t.Errorf("result is not as expected: %v != %v", counter.Load(), expectedResult) 56 | } 57 | } 58 | 59 | func TestStandardExample(t *testing.T) { 60 | ch := make(chan int, 1024) 61 | producersWg := &sync.WaitGroup{} 62 | producersCount := 16 63 | itemsPerProducer := 1_000_000 64 | producersWg.Add(producersCount) 65 | for i := 0; i < producersCount; i++ { 66 | go func() { 67 | defer producersWg.Done() 68 | for j := 0; j < itemsPerProducer; j++ { 69 | ch <- 1 70 | } 71 | }() 72 | } 73 | 74 | consumersCount := 16 75 | consumersWg := &sync.WaitGroup{} 76 | consumersWg.Add(consumersCount) 77 | counter := &atomic.Int64{} 78 | for i := 0; i < consumersCount; i++ { 79 | go func() { 80 | defer consumersWg.Done() 81 | result := 0 82 | for item := range ch { 83 | result += item 84 | } 85 | counter.Add(int64(result)) 86 | }() 87 | } 88 | 89 | producersWg.Wait() 90 | close(ch) 91 | consumersWg.Wait() 92 | 93 | expectedResult := int64(producersCount * itemsPerProducer) 94 | if counter.Load() != expectedResult { 95 | t.Errorf("result is not as expected: %v != %v", counter.Load(), expectedResult) 96 | } 97 | } 98 | 99 | func TestNewChan(t *testing.T) { 100 | t.Parallel() 101 | 102 | expectPanic(t, func() { 103 | thp.NewChan[*int](-1) 104 | }, thp.ErrChanBatchSize) 105 | expectPanic(t, func() { 106 | thp.NewChan[*int](0) 107 | }, thp.ErrChanBatchSize) 108 | _, _ = thp.NewChan[*int](1) 109 | } 110 | 111 | func TestPrefetch(t *testing.T) { 112 | t.Parallel() 113 | 114 | ch, chCloser := thp.NewChan[int](3) 115 | defer chCloser() 116 | 117 | producer, flush := ch.Producer() 118 | go func() { 119 | defer flush() 120 | for i := 0; i < 3; i++ { 121 | producer.Put(1) 122 | } 123 | }() 124 | 125 | res := 0 126 | ctx, cancel := context.WithCancel(context.Background()) 127 | consumer := ch.Consumer() 128 | for i := 0; i < 3; i++ { 129 | value, ok, err := consumer.PollCtx(ctx) 130 | eq(t, true, ok) 131 | eq(t, nil, err) 132 | res += value 133 | } 134 | eq(t, 3, res) 135 | 136 | cancel() 137 | value, ok, err := consumer.PollCtx(ctx) 138 | eq(t, true, err != nil) 139 | eq(t, "context canceled", err.Error()) 140 | eq(t, false, ok) 141 | eq(t, 0, value) 142 | 143 | oneMoreValue, ok, err := consumer.PollCtx(ctx) 144 | eq(t, true, err != nil) 145 | eq(t, "context canceled", err.Error()) 146 | eq(t, false, ok) 147 | eq(t, 0, oneMoreValue) 148 | } 149 | 150 | func TestFlushCtx(t *testing.T) { 151 | t.Parallel() 152 | 153 | ch, chCloser := thp.NewChan[int](3) 154 | defer chCloser() 155 | 156 | consumerCtx, consumerCtxCancel := context.WithCancel(context.Background()) 157 | consumer := ch.Consumer() 158 | 159 | producerCtx, producerCtxCancel := context.WithCancel(context.Background()) 160 | producer, _ := ch.Producer() 161 | 162 | // FlushCtx of empty context returns no error 163 | { 164 | errFlush := producer.FlushCtx(producerCtx) 165 | eq(t, nil, errFlush) 166 | } 167 | 168 | { 169 | errPut := producer.PutCtx(producerCtx, 1) 170 | eq(t, nil, errPut) 171 | } 172 | 173 | // Check FlushCtx goes through with first item 174 | { 175 | errFlush := producer.FlushCtx(producerCtx) 176 | eq(t, nil, errFlush) 177 | } 178 | 179 | // thp.Chan internally has capacity == runtime.NumCPU 180 | for i := 1; i < runtime.NumCPU(); i++ { 181 | { 182 | errPut := producer.PutCtx(producerCtx, i+1) 183 | eq(t, nil, errPut) 184 | } 185 | { 186 | errPut := producer.PutCtx(producerCtx, i+1) 187 | eq(t, nil, errPut) 188 | } 189 | { 190 | errPut := producer.PutCtx(producerCtx, i+1) 191 | eq(t, nil, errPut) 192 | } 193 | } 194 | 195 | pwg := &sync.WaitGroup{} 196 | pwg.Add(1) 197 | // Next FlushCtx should block, but context cancellation unblocks it 198 | go func() { 199 | defer pwg.Done() 200 | errPut := producer.PutCtx(producerCtx, runtime.NumCPU()) 201 | eq(t, nil, errPut) 202 | errFlush := producer.FlushCtx(producerCtx) 203 | eq(t, true, errFlush != nil) 204 | eq(t, "context canceled", errFlush.Error()) 205 | }() 206 | 207 | time.Sleep(10 * time.Millisecond) 208 | producerCtxCancel() 209 | pwg.Wait() 210 | 211 | // FlushCtx on canceled ctx returns error right away 212 | { 213 | errFlush := producer.FlushCtx(producerCtx) 214 | eq(t, true, errFlush != nil) 215 | eq(t, "context canceled", errFlush.Error()) 216 | } 217 | 218 | for { 219 | _, success, _ := consumer.NonBlockingPoll() 220 | if !success { 221 | break 222 | } 223 | } 224 | 225 | cwg := &sync.WaitGroup{} 226 | cwg.Add(1) 227 | // Next PollCtx should block, but context cancellation unblocks it 228 | go func() { 229 | defer cwg.Done() 230 | value, success, errPoll := consumer.PollCtx(consumerCtx) 231 | eq(t, 0, value) 232 | eq(t, false, success) 233 | eq(t, true, errPoll != nil) 234 | eq(t, "context canceled", errPoll.Error()) 235 | }() 236 | 237 | time.Sleep(10 * time.Millisecond) 238 | consumerCtxCancel() 239 | cwg.Wait() 240 | } 241 | 242 | func TestNonBlockingFlush(t *testing.T) { 243 | t.Parallel() 244 | 245 | ch, chCloser := thp.NewChan[int](3) 246 | defer chCloser() 247 | 248 | consumer := ch.Consumer() 249 | // Check NonBlockingPoll is empty on empty channel 250 | { 251 | s, ok, stillOpen := consumer.NonBlockingPoll() 252 | eq(t, 0, s) 253 | eq(t, false, ok) 254 | eq(t, true, stillOpen) 255 | } 256 | 257 | producer, flush := ch.Producer() 258 | flush() 259 | 260 | // Check NonBlockingPoll is empty after empty flush 261 | { 262 | s, ok, stillOpen := consumer.NonBlockingPoll() 263 | eq(t, 0, s) 264 | eq(t, false, ok) 265 | eq(t, true, stillOpen) 266 | } 267 | 268 | // Check NonBlockingFlush goes returns false on empty batch 269 | { 270 | result := producer.NonBlockingFlush() 271 | eq(t, false, result) 272 | } 273 | 274 | producer.Put(1) 275 | 276 | // Check NonBlockingPoll is empty without flush 277 | { 278 | s, ok, stillOpen := consumer.NonBlockingPoll() 279 | eq(t, 0, s) 280 | eq(t, false, ok) 281 | eq(t, true, stillOpen) 282 | } 283 | // Check NonBlockingPoll goes through with first item 284 | { 285 | result := producer.NonBlockingFlush() 286 | eq(t, true, result) 287 | } 288 | 289 | // thp.Chan internally has capacity == runtime.NumCPU 290 | for i := 1; i < runtime.NumCPU(); i++ { 291 | producer.Put(i + 1) 292 | result := producer.NonBlockingFlush() 293 | eq(t, true, result) 294 | } 295 | 296 | // Next Flush should block, but non-blocking flush just returns false 297 | { 298 | producer.Put(runtime.NumCPU()) 299 | result := producer.NonBlockingFlush() 300 | eq(t, false, result) 301 | } 302 | } 303 | 304 | func TestNonBlockingPut(t *testing.T) { 305 | t.Parallel() 306 | 307 | ch, chCloser := thp.NewChan[int](3) 308 | defer chCloser() 309 | 310 | consumer := ch.Consumer() 311 | // Check NonBlockingPoll is empty on empty channel 312 | { 313 | s, ok, stillOpen := consumer.NonBlockingPoll() 314 | eq(t, 0, s) 315 | eq(t, false, ok) 316 | eq(t, true, stillOpen) 317 | } 318 | 319 | producer, flush := ch.Producer() 320 | flush() 321 | 322 | // Check NonBlockingPoll is empty after empty flush 323 | { 324 | s, ok, stillOpen := consumer.NonBlockingPoll() 325 | eq(t, 0, s) 326 | eq(t, false, ok) 327 | eq(t, true, stillOpen) 328 | } 329 | 330 | // Check NonBlockingFlush goes returns false on empty batch 331 | { 332 | result := producer.NonBlockingFlush() 333 | eq(t, false, result) 334 | } 335 | 336 | { 337 | ok := producer.NonBlockingPut(1) 338 | eq(t, true, ok) 339 | } 340 | 341 | // Check NonBlockingPoll is empty without flush 342 | { 343 | s, ok, stillOpen := consumer.NonBlockingPoll() 344 | eq(t, 0, s) 345 | eq(t, false, ok) 346 | eq(t, true, stillOpen) 347 | } 348 | // Check NonBlockingPoll goes through with first item 349 | { 350 | result := producer.NonBlockingFlush() 351 | eq(t, true, result) 352 | } 353 | 354 | // thp.Chan internally has capacity == runtime.NumCPU 355 | for i := 1; i < runtime.NumCPU(); i++ { 356 | eq(t, true, producer.NonBlockingPut(i+1)) 357 | eq(t, true, producer.NonBlockingPut(i+1)) 358 | eq(t, true, producer.NonBlockingPut(i+1)) 359 | } 360 | 361 | // Next Flush should block, but non-blocking flush just returns false 362 | { 363 | eq(t, true, producer.NonBlockingPut(runtime.NumCPU()+1)) 364 | eq(t, true, producer.NonBlockingPut(runtime.NumCPU()+1)) 365 | result := producer.NonBlockingPut(runtime.NumCPU() + 1) 366 | eq(t, false, result) 367 | } 368 | } 369 | 370 | func TestNonBlockingFetch(t *testing.T) { 371 | t.Parallel() 372 | 373 | // New channel 374 | ch, chCloser := thp.NewChan[string](3) 375 | 376 | consumer := ch.Consumer() 377 | // Check NonBlockingPoll is empty on empty channel 378 | { 379 | s, ok, stillOpen := consumer.NonBlockingPoll() 380 | eq(t, "", s) 381 | eq(t, false, ok) 382 | eq(t, true, stillOpen) 383 | } 384 | 385 | // Put one item into a batch, but don't flush 386 | producer, flush := ch.Producer() 387 | producer.Put("a") 388 | 389 | // Check that NonBlockingPoll is still empty on empty channel 390 | { 391 | s, ok, stillOpen := consumer.NonBlockingPoll() 392 | eq(t, "", s) 393 | eq(t, false, ok) 394 | eq(t, true, stillOpen) 395 | } 396 | 397 | // Flush to commit batch 398 | flush() 399 | 400 | // Check that NonBlockingPoll returns expected value 401 | { 402 | s, ok, stillOpen := consumer.NonBlockingPoll() 403 | eq(t, "a", s) 404 | eq(t, true, ok) 405 | eq(t, true, stillOpen) 406 | } 407 | // Now check that channel is empty 408 | { 409 | s, ok, stillOpen := consumer.NonBlockingPoll() 410 | eq(t, "", s) 411 | eq(t, false, ok) 412 | eq(t, true, stillOpen) 413 | } 414 | 415 | // Empty batch flush 416 | flush() 417 | 418 | // Check that NonBlockingPoll is still empty on empty channel 419 | { 420 | s, ok, stillOpen := consumer.NonBlockingPoll() 421 | eq(t, "", s) 422 | eq(t, false, ok) 423 | eq(t, true, stillOpen) 424 | } 425 | 426 | chCloser() 427 | 428 | // Check that NonBlockingPoll is still empty on closed channel 429 | { 430 | s, ok, stillOpen := consumer.NonBlockingPoll() 431 | eq(t, "", s) 432 | eq(t, false, ok) 433 | eq(t, false, stillOpen) 434 | } 435 | } 436 | 437 | func TestChan(t *testing.T) { 438 | t.Parallel() 439 | 440 | poolSizes := []int{1, 2, 4, 8, 9, 16, 31} 441 | batchSizes := []int{1, 2, 4, 8, 10, 1024} 442 | itemsPerProducers := []int{1, 2, 4, 5, 10, 31, 33, 100, 1024} 443 | 444 | for _, p := range poolSizes { 445 | for _, c := range poolSizes { 446 | for _, batchSize := range batchSizes { 447 | for _, itemsPerProducer := range itemsPerProducers { 448 | iPP := itemsPerProducer 449 | t.Run( 450 | fmt.Sprintf( 451 | "primitive;p:%v;c:%v;bSz:%v;iPP:%v", 452 | p, c, batchSize, iPP, 453 | ), func(t *testing.T) { 454 | t.Parallel() 455 | runPrimitiveChanTest(t, batchSize, p, c, iPP) 456 | }, 457 | ) 458 | t.Run( 459 | fmt.Sprintf( 460 | "obj;p:%v;c:%v;bSz:%v;iPP:%v", 461 | p, c, batchSize, iPP, 462 | ), 463 | func(t *testing.T) { 464 | t.Parallel() 465 | runObjChanTest(t, batchSize, p, c, iPP) 466 | }, 467 | ) 468 | } 469 | } 470 | } 471 | } 472 | } 473 | 474 | //nolint:thelper // This is not exactly helper and in case of error we want to know line 475 | func runPrimitiveChanTest(t *testing.T, batchSize int, producersCnt int, consumersCnt int, itemsPerProducer int) { 476 | ch, chCloser := thp.NewChan[int](batchSize) 477 | producersWg := &sync.WaitGroup{} 478 | producersWg.Add(producersCnt) 479 | for i := 0; i < producersCnt; i++ { 480 | go func() { 481 | defer producersWg.Done() 482 | producer, flush := ch.Producer() 483 | defer flush() 484 | for j := 0; j < itemsPerProducer; j++ { 485 | producer.Put(1) 486 | } 487 | }() 488 | } 489 | 490 | consumersWg := &sync.WaitGroup{} 491 | consumersWg.Add(consumersCnt) 492 | counter := &atomic.Int64{} 493 | for i := 0; i < consumersCnt; i++ { 494 | go func() { 495 | defer consumersWg.Done() 496 | consumer := ch.Consumer() 497 | result := 0 498 | item, ok := consumer.Poll() 499 | for ; ok; item, ok = consumer.Poll() { 500 | result += item 501 | } 502 | counter.Add(int64(result)) 503 | }() 504 | } 505 | 506 | producersWg.Wait() 507 | chCloser() 508 | consumersWg.Wait() 509 | 510 | expectedResult := int64(producersCnt * itemsPerProducer) 511 | if counter.Load() != expectedResult { 512 | t.Errorf("result is not as expected: %v != %v", counter.Load(), expectedResult) 513 | } 514 | } 515 | 516 | //nolint:thelper // This is not exactly helper and in case of error we want to know line 517 | func runObjChanTest(t *testing.T, batchSize int, producersCnt int, consumersCnt int, itemsPerProducer int) { 518 | ch, chCloser := thp.NewChan[*int](batchSize) 519 | producersWg := &sync.WaitGroup{} 520 | producersWg.Add(producersCnt) 521 | for i := 0; i < producersCnt; i++ { 522 | go func() { 523 | defer producersWg.Done() 524 | producer, flush := ch.Producer() 525 | defer flush() 526 | for j := 0; j < itemsPerProducer; j++ { 527 | msg := 1 528 | producer.Put(&msg) 529 | } 530 | }() 531 | } 532 | 533 | consumersWg := &sync.WaitGroup{} 534 | consumersWg.Add(consumersCnt) 535 | counter := &atomic.Int64{} 536 | for i := 0; i < consumersCnt; i++ { 537 | go func() { 538 | defer consumersWg.Done() 539 | consumer := ch.Consumer() 540 | result := 0 541 | item, ok := consumer.Poll() 542 | for ; ok; item, ok = consumer.Poll() { 543 | result += *item 544 | } 545 | counter.Add(int64(result)) 546 | }() 547 | } 548 | 549 | producersWg.Wait() 550 | chCloser() 551 | consumersWg.Wait() 552 | 553 | expectedResult := int64(producersCnt * itemsPerProducer) 554 | if counter.Load() != expectedResult { 555 | t.Errorf("result is not as expected: %v != %v", counter.Load(), expectedResult) 556 | } 557 | } 558 | -------------------------------------------------------------------------------- /chan.go: -------------------------------------------------------------------------------- 1 | package thp 2 | 3 | import ( 4 | "context" 5 | "runtime" 6 | "sync" 7 | ) 8 | 9 | const ErrChanBatchSize chanError = "Batch size for thp.Chan can't be lower than 1" 10 | 11 | // Chan represents a concurrent channel with batching capability. 12 | // It allows efficient batched communication between producers and consumers, 13 | // reducing the overhead of individual item transfers. 14 | // 15 | // The channel operates in a concurrent manner, but each producer and consumer 16 | // should be exclusively used by a single goroutine to ensure thread safety, 17 | // so create separate Producer[T any] or Consumer[T any] for every goroutine 18 | // that sends or receives messages. 19 | // The producer is responsible for adding items to the channel's buffer 20 | // and flushing them when the batch size is reached. The consumer 21 | // retrieves items from the channel's buffer and processes them sequentially. 22 | // 23 | // The channel's batch size determines the number of items accumulated in the buffer 24 | // before a flush operation is triggered. Adjusting the batch size can impact 25 | // the trade-off between throughput and latency. Smaller batch sizes result in more 26 | // frequent flushes and lower latency, while larger batch sizes increase throughput 27 | // at the cost of higher latency. 28 | // You can also manually trigger flushes. 29 | // 30 | // Context cancellation is supported via separate methods, 31 | // allowing graceful termination of producers and consumers. 32 | // 33 | // The channel internally manages a sync.Pool to reuse batch buffers and avoid 34 | // unnecessary allocations. This optimization improves performance by reducing 35 | // memory allocations during batch creation and disposal. 36 | type Chan[T any] struct { 37 | // the number of items to accumulate in the buffer 38 | // before triggering a flush operation to the internal channel. 39 | batchSize int 40 | // the internal channel used for communication between producers and consumers. 41 | internalChan chan *batch[T] 42 | // a sync.Pool used to reuse batch buffers and avoid unnecessary allocations. 43 | batchPool sync.Pool 44 | } 45 | 46 | // batch represents a batch of elements. 47 | // We use a pointer to a batch instead of a regular slice to place it into sync.Pool. 48 | // This helps avoid extra allocation when storing it as interface{}/any. 49 | type batch[T any] struct { 50 | buf []T 51 | } 52 | 53 | // NewChan creates a new concurrent channel. 54 | // batchSize specifies the number of elements to batch together before sending them. 55 | // It returns a pointer to Chan[T] and a cleanup function to close the channel. 56 | func NewChan[T any](batchSize int) (*Chan[T], func()) { 57 | if batchSize < 1 { 58 | panic(ErrChanBatchSize) 59 | } 60 | 61 | ch := &Chan[T]{ 62 | batchSize: batchSize, 63 | internalChan: make(chan *batch[T], runtime.NumCPU()), 64 | batchPool: sync.Pool{ 65 | New: func() any { 66 | // We use a pointer to a batch instead of a regular slice to place it into sync.Pool. 67 | // This helps avoid extra allocation when storing it as interface{}/any. 68 | return &batch[T]{buf: make([]T, 0, batchSize)} 69 | }, 70 | }, 71 | } 72 | 73 | return ch, ch.Close 74 | } 75 | 76 | // Close closes the concurrent channel. 77 | // Close panics on attempted close of already close Chan. 78 | func (ch *Chan[T]) Close() { 79 | close(ch.internalChan) 80 | } 81 | 82 | // getBatchFromPool retrieves a batch from the sync.Pool. 83 | // It returns a pointer to the batch. 84 | // Note: It is assumed that this method is called exclusively by producers, 85 | // and it should return initialised batch with 86 | // cap(batch.buf) == ch.batchSize and len(batch.buf) == 0. 87 | func (ch *Chan[T]) getBatchFromPool() *batch[T] { 88 | //nolint:forcetypeassert // Panic on type mismatch is fine here. 89 | return ch.batchPool.Get().(*batch[T]) 90 | } 91 | 92 | // putBatchToPool returns a batch to the sync.Pool. 93 | // It takes a pointer to the batch. 94 | // Note: It is assumed that this method is called exclusively by consumers, 95 | // when all batch items are consumed and/or not required anymore. 96 | func (ch *Chan[T]) putBatchToPool(batch *batch[T]) { 97 | batch.buf = batch.buf[:0] 98 | ch.batchPool.Put(batch) 99 | } 100 | 101 | // Producer represents a producer for the concurrent channel. 102 | // Each producer should be exclusively used by a single goroutine to ensure thread safety. 103 | // Create separate Producer instance for every goroutine that sends messages. 104 | type Producer[T any] struct { 105 | parent *Chan[T] 106 | batch *batch[T] 107 | // We unpack the batch.buf into a separate field to avoid extra memory hop 108 | // every time we access it, this yields significant speedup on our tests. 109 | // But we still need to store pointer to a batch to avoid allocations when working with sync.Pool. 110 | buf []T 111 | } 112 | 113 | // Producer creates a producer for the concurrent channel. 114 | // The producer is responsible for adding items to the channel's buffer and flushing 115 | // them when the batch size is reached. 116 | // 117 | // Note: flush method should be called by the same goroutine that will use the producer. 118 | // 119 | // Example usage: 120 | // 121 | // producer, flush := channel.Producer() 122 | // defer flush() // Ensure sending items through the channel 123 | // producer.Put(item1) 124 | // producer.Put(item2) 125 | // 126 | // Methods with provided `ctx` allows for graceful termination of the producer. If the 127 | // context is canceled, the producer stops accepting new items, any remaining items stay in 128 | // the buffer. 129 | // WARNING: do not use returned flush method if you want context aware operations, 130 | // use FlushCtx instead. 131 | // 132 | // Example of ctx aware operations usage: 133 | // 134 | // producer, _ := channel.Producer() 135 | // defer producer.FlushCtx(ctx) // Ensure sending items through the channel 136 | // err := producer.PutCtx(ctx, item) 137 | // if err != nil { 138 | // return err 139 | // } 140 | // 141 | // Returns: 142 | // - producer: The created producer instance. 143 | // - flush: A function to send any remaining items. 144 | func (ch *Chan[T]) Producer() (*Producer[T], func()) { 145 | initialBatch := ch.getBatchFromPool() 146 | result := &Producer[T]{ 147 | parent: ch, 148 | batch: initialBatch, 149 | buf: initialBatch.buf, 150 | } 151 | return result, result.Flush 152 | } 153 | 154 | // NonBlockingFlush attempts to flush the items in the buffer to the channel without blocking. 155 | // It returns true if the flush was successful, or false if the channel is full. 156 | // In most cases you should use regular flush method provided to you upon Producer creation. 157 | // Note: This method is intended to be used exclusively by a goroutine that owns this Producer. 158 | func (p *Producer[T]) NonBlockingFlush() bool { 159 | if len(p.buf) == 0 { 160 | return false 161 | } 162 | p.batch.buf = p.buf 163 | select { 164 | case p.parent.internalChan <- p.batch: 165 | // Batch sent successfully, get a new batch from the pool for the next flush. 166 | p.batch = p.parent.getBatchFromPool() 167 | p.buf = p.batch.buf 168 | return true 169 | default: 170 | // Channel is full, unable to flush at the moment. 171 | return false 172 | } 173 | } 174 | 175 | // Flush flushes the items in the buffer to the channel, blocking if necessary. 176 | // If the channel is full, it blocks until there is space available. 177 | // Note: This method is intended to be used exclusively by a goroutine that owns this Producer. 178 | func (p *Producer[T]) Flush() { 179 | if len(p.buf) == 0 { 180 | return 181 | } 182 | p.batch.buf = p.buf 183 | p.parent.internalChan <- p.batch 184 | // Batch sent successfully, get a new batch from the pool for the next flush. 185 | p.batch = p.parent.getBatchFromPool() 186 | p.buf = p.batch.buf 187 | } 188 | 189 | // FlushCtx flushes the items in the buffer to the channel, blocking if necessary. 190 | // If the channel is full, it blocks until there is space available. 191 | // It returns error if context gets canceled during flush operation. 192 | // If the provided context is canceled, the remaining items stay in the buffer. 193 | // Note: This method is intended to be used exclusively by a goroutine that owns this Producer. 194 | func (p *Producer[T]) FlushCtx(ctx context.Context) error { 195 | ctxErr := ctx.Err() 196 | if ctxErr != nil { 197 | return ctxErr 198 | } 199 | if len(p.buf) == 0 { 200 | return nil 201 | } 202 | 203 | p.batch.buf = p.buf 204 | select { 205 | case p.parent.internalChan <- p.batch: 206 | // Batch sent successfully, get a new batch from the pool for the next flush. 207 | p.batch = p.parent.getBatchFromPool() 208 | p.buf = p.batch.buf 209 | return nil 210 | case <-ctx.Done(): 211 | return ctx.Err() 212 | } 213 | } 214 | 215 | // NonBlockingPut adds an item to the producer's buffer without blocking. 216 | // If the buffer reaches the batchSize, it attempts a non-blocking flush. 217 | // It returns true if the item was successfully added, or false if the channel is full. 218 | // Note: This method is intended to be used exclusively by a goroutine that owns this Producer. 219 | func (p *Producer[T]) NonBlockingPut(v T) bool { 220 | p.buf = append(p.buf, v) 221 | if len(p.buf) >= p.parent.batchSize { 222 | return p.NonBlockingFlush() 223 | } 224 | return true 225 | } 226 | 227 | // Put adds an item to the producer's buffer. 228 | // If the buffer reaches the batchSize, it triggers a flush to the channel. 229 | // Note: This method is intended to be used exclusively by a goroutine that owns this Producer. 230 | func (p *Producer[T]) Put(v T) { 231 | p.buf = append(p.buf, v) 232 | if len(p.buf) >= p.parent.batchSize { 233 | p.Flush() 234 | } 235 | } 236 | 237 | // PutCtx adds an item to the producer's buffer. 238 | // If the buffer reaches the batchSize, it triggers a flush to the channel. 239 | // It returns error if context gets canceled during flush operation. 240 | // Note: This method is intended to be used exclusively by a goroutine that owns this Producer. 241 | func (p *Producer[T]) PutCtx(ctx context.Context, v T) error { 242 | p.buf = append(p.buf, v) 243 | if len(p.buf) >= p.parent.batchSize { 244 | return p.FlushCtx(ctx) 245 | } 246 | return nil 247 | } 248 | 249 | // Consumer represents a consumer for the concurrent channel. 250 | // It retrieves items from the channel's buffer and processes them sequentially. 251 | // 252 | // The consumer operates in a concurrent manner, but it should be exclusively used 253 | // by a single goroutine to ensure thread safety. 254 | // Create separate Producer instance for every goroutine that sends messages. 255 | // 256 | // The consumer retrieves items by calling the Poll method, 257 | // which returns the next item from the buffer. If the 258 | // buffer is empty, the consumer will prefetch the next batch of items from the 259 | // internal channel to ensure a continuous supply. 260 | // 261 | // The consumer supports both blocking and non-blocking operations. 262 | // 263 | // Context cancellation is supported via separate method, 264 | // allowing graceful termination of the consumer. 265 | // When the consumer's context is canceled, it stops fetching new batches from the 266 | // internal channel and signals the end of consumption. 267 | type Consumer[T any] struct { 268 | parent *Chan[T] 269 | idx int 270 | batch *batch[T] 271 | // We unpack the batch.buf into a separate field to avoid extra memory hop 272 | // every time we access it, this yields significant speedup on our tests. 273 | // But we still need to store pointer to a batch to avoid allocations when working with sync.Pool. 274 | buf []T 275 | } 276 | 277 | // Consumer creates a consumer for the concurrent channel with the given context. 278 | // The consumer is responsible for retrieving items from the channel's buffer and 279 | // processing them sequentially. 280 | // 281 | // Note: This method should be called by the same goroutine that will use the consumer. 282 | // 283 | // Example usage: 284 | // 285 | // consumer := channel.Consumer() 286 | // for { 287 | // item, ok := consumer.Poll() 288 | // if !ok { 289 | // break 290 | // } 291 | // // Process the item 292 | // } 293 | // 294 | // Returns: 295 | // - consumer: The created consumer instance. 296 | func (ch *Chan[T]) Consumer() *Consumer[T] { 297 | result := &Consumer[T]{ 298 | parent: ch, 299 | idx: 0, 300 | batch: &batch[T]{}, 301 | buf: nil, 302 | } 303 | return result 304 | } 305 | 306 | // nonBlockingPrefetch attempts to prefetch the next batch of items from the internal channel 307 | // in a non-blocking manner, ensuring a continuous supply of items for consumption. 308 | // Returns: 309 | // - readSuccess: A boolean indicating whether a new batch was successfully fetched. 310 | // - channelIsOpen: A boolean indicating whether the internal channel is still open for 311 | // further consumption. If false, no more batches will be available. 312 | // 313 | //nolint:nonamedreturns // here we usenamesreturns to documents meaning of two returned booleans 314 | func (c *Consumer[T]) nonBlockingPrefetch() (readSuccess bool, channelIsOpen bool) { 315 | if cap(c.buf) > 0 { 316 | c.parent.putBatchToPool(c.batch) 317 | } 318 | 319 | c.idx = 0 320 | c.batch = nil 321 | c.buf = nil 322 | 323 | select { 324 | case batch, ok := <-c.parent.internalChan: 325 | c.batch = batch 326 | if batch != nil { 327 | c.buf = batch.buf 328 | } 329 | return ok, ok 330 | default: 331 | return false, true 332 | } 333 | } 334 | 335 | // prefetch fetches the next batch from the channel and prepares the consumer for reading. 336 | // It returns true if a new batch is fetched successfully, 337 | // or false if the channel is closed. 338 | func (c *Consumer[T]) prefetch() bool { 339 | if cap(c.buf) > 0 { 340 | // Return the current batch to the pool. 341 | c.parent.putBatchToPool(c.batch) 342 | } 343 | 344 | c.idx = 0 345 | c.batch = nil 346 | c.buf = nil 347 | 348 | batch, ok := <-c.parent.internalChan 349 | c.batch = batch 350 | if batch != nil { 351 | c.buf = batch.buf 352 | } 353 | return ok 354 | } 355 | 356 | // prefetchCtx fetches the next batch from the channel and prepares the consumer for reading. 357 | // It returns (true, nil) if a new batch is fetched successfully, 358 | // or (false, nil) if the channel is closed 359 | // or (false, error) if the context is canceled. 360 | func (c *Consumer[T]) prefetchCtx(ctx context.Context) (bool, error) { 361 | if cap(c.buf) > 0 { 362 | // Return the current batch to the pool. 363 | c.parent.putBatchToPool(c.batch) 364 | } 365 | 366 | c.idx = 0 367 | c.batch = nil 368 | c.buf = nil 369 | 370 | select { 371 | case batch, ok := <-c.parent.internalChan: 372 | c.batch = batch 373 | if batch != nil { 374 | c.buf = batch.buf 375 | } 376 | return ok, nil 377 | case <-ctx.Done(): 378 | return false, ctx.Err() 379 | } 380 | } 381 | 382 | // NonBlockingPoll retrieves the next item from the consumer's buffer in a non-blocking manner. 383 | // It returns the item, a boolean indicating whether the retrieval was successful, and a boolean 384 | // indicating whether the internal channel is still open for further consumption. 385 | // 386 | // Note: This method is intended to be used exclusively by a goroutine that owns this Consumer. 387 | // 388 | //nolint:nonamedreturns // here we usenamesreturns to documents meaning of two last returned booleans 389 | func (c *Consumer[T]) NonBlockingPoll() (value T, readSuccess bool, channelIsOpen bool) { 390 | if c.idx >= len(c.buf) { 391 | success, open := c.nonBlockingPrefetch() 392 | if !success { 393 | return zero[T](), success, open 394 | } 395 | } 396 | item := c.buf[c.idx] 397 | c.idx++ 398 | return item, true, true 399 | } 400 | 401 | // Poll retrieves the next item from the consumer's buffer. 402 | // It returns the item and true if successful, or a zero value and false if there are no more items. 403 | // Note: This method is intended to be used exclusively by a goroutine that owns this Consumer. 404 | func (c *Consumer[T]) Poll() (T, bool) { 405 | if c.idx >= len(c.buf) { 406 | ok := c.prefetch() 407 | if !ok { 408 | return zero[T](), false 409 | } 410 | } 411 | item := c.buf[c.idx] 412 | c.idx++ 413 | return item, true 414 | } 415 | 416 | // PollCtx retrieves the next item from the consumer's buffer. 417 | // It returns the item and true if successful, 418 | // or a (zero value, false, nil) if there are no more items 419 | // or a (zero value, false, error) if context is canceled. 420 | // Note: This method is intended to be used exclusively by a goroutine that owns this Consumer. 421 | func (c *Consumer[T]) PollCtx(ctx context.Context) (T, bool, error) { 422 | if c.idx >= len(c.buf) { 423 | ok, err := c.prefetchCtx(ctx) 424 | if err != nil { 425 | return zero[T](), false, err 426 | } 427 | if !ok { 428 | return zero[T](), false, nil 429 | } 430 | } 431 | item := c.buf[c.idx] 432 | c.idx++ 433 | return item, true, nil 434 | } 435 | 436 | type chanError string 437 | 438 | func (e chanError) Error() string { 439 | return string(e) 440 | } 441 | --------------------------------------------------------------------------------