├── .gitignore ├── go.mod ├── .github ├── dependabot.yml ├── release.yaml └── workflows │ ├── release.yaml │ └── check.yaml ├── wrap124.go ├── mockapi ├── README.md ├── files.go └── users.go ├── util_test.go ├── internal ├── core │ ├── util.go │ ├── transform.go │ ├── once.go │ ├── util_test.go │ ├── once_test.go │ ├── merge_test.go │ ├── merge.go │ ├── transform_test.go │ ├── delay.go │ ├── batch.go │ ├── delay_test.go │ ├── batch_test.go │ ├── loops_test.go │ ├── reduce_test.go │ ├── loops.go │ └── reduce.go ├── th │ ├── helpers.go │ ├── concurrency_monitor.go │ └── assertions.go └── ringbuffer │ ├── buffer.go │ └── buffer_test.go ├── helpers_test.go ├── LICENSE ├── util.go ├── batch_test.go ├── batch.go ├── example123_test.go ├── iter.go ├── benchmark_test.go ├── iter_test.go ├── merge.go ├── reduce.go ├── consume.go ├── doc.go ├── merge_test.go ├── wrap_test.go ├── wrap.go ├── transform.go ├── reduce_test.go ├── consume_test.go ├── transform_test.go ├── README.md └── example_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | .idea -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/destel/rill 2 | 3 | go 1.20 4 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" -------------------------------------------------------------------------------- /.github/release.yaml: -------------------------------------------------------------------------------- 1 | changelog: 2 | categories: 3 | - title: 🟢 New 4 | labels: ["new"] 5 | - title: 🔴 Fixes 6 | labels: ["fix", "bug"] 7 | - title: 🟣 Documentation 8 | labels: ["documentation", "docs"] 9 | - title: 🟠 Dependencies 10 | labels: ["dependencies"] 11 | - title: 🔵 Other 12 | labels: ["*"] -------------------------------------------------------------------------------- /wrap124.go: -------------------------------------------------------------------------------- 1 | //go:build go1.24 2 | 3 | package rill 4 | 5 | // Stream is a type alias for a channel of [Try] containers. 6 | // This alias is optional, but it can make the code more readable. 7 | // 8 | // Before: 9 | // 10 | // func StreamUsers() <-chan rill.Try[*User] { 11 | // ... 12 | // } 13 | // 14 | // After: 15 | // 16 | // func StreamUsers() rill.Stream[*User] { 17 | // ... 18 | // } 19 | type Stream[T any] = <-chan Try[T] 20 | -------------------------------------------------------------------------------- /mockapi/README.md: -------------------------------------------------------------------------------- 1 | # Mock API 2 | 3 | A minimal mock API implementation used in runnable examples on [pkg.go.dev](https://pkg.go.dev/github.com/destel/rill). 4 | 5 | This package simulates common API patterns like: 6 | - Single and bulk item fetching 7 | - Pagination 8 | - Network delay simulation 9 | - Realistic error scenarios 10 | 11 | The package is intentionally kept public to enable running and experimenting with examples in the Go Playground. -------------------------------------------------------------------------------- /util_test.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/destel/rill/internal/th" 7 | ) 8 | 9 | func TestDrain(t *testing.T) { 10 | // real tests are in another package 11 | Drain[int](th.FromRange(0, 10)) 12 | Discard[int](th.FromRange(0, 10)) 13 | DrainNB[int](th.FromRange(0, 10)) 14 | } 15 | 16 | func TestBuffer(t *testing.T) { 17 | // real tests are in another package 18 | Buffer[int](th.FromRange(0, 10), 5) 19 | } 20 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*.*.*" 7 | - "v*.*.*-*" 8 | 9 | jobs: 10 | release: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v5 14 | 15 | - name: Create Release 16 | uses: softprops/action-gh-release@v2 17 | with: 18 | prerelease: ${{ contains(github.ref, '-') }} 19 | generate_release_notes: true 20 | -------------------------------------------------------------------------------- /mockapi/files.go: -------------------------------------------------------------------------------- 1 | package mockapi 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | ) 8 | 9 | // DownloadFile simulates a file download. It returns the whole content as []byte. 10 | func DownloadFile(ctx context.Context, url string) ([]byte, error) { 11 | randomSleep(ctx, 1000*time.Millisecond) 12 | if err := ctx.Err(); err != nil { 13 | return nil, err 14 | } 15 | 16 | return []byte(fmt.Sprintf("This is the content of %s", url)), nil 17 | } 18 | -------------------------------------------------------------------------------- /internal/core/util.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | func Drain[A any](in <-chan A) { 4 | for range in { 5 | } 6 | } 7 | 8 | func Discard[A any](in <-chan A) { 9 | go Drain(in) 10 | } 11 | 12 | func Buffer[A any](in <-chan A, size int) <-chan A { 13 | // we use size-1 since 1 additional item is held on the stack (x variable) 14 | out := make(chan A, size-1) 15 | 16 | go func() { 17 | defer close(out) 18 | for x := range in { 19 | out <- x 20 | } 21 | }() 22 | 23 | return out 24 | } 25 | -------------------------------------------------------------------------------- /internal/core/transform.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | func FilterMap[A, B any](in <-chan A, n int, f func(A) (B, bool)) <-chan B { 4 | if in == nil { 5 | return nil 6 | } 7 | 8 | out := make(chan B) 9 | 10 | Loop(in, out, n, func(a A) { 11 | b, keep := f(a) 12 | if keep { 13 | out <- b 14 | } 15 | }) 16 | 17 | return out 18 | } 19 | 20 | func OrderedFilterMap[A, B any](in <-chan A, n int, f func(A) (B, bool)) <-chan B { 21 | if in == nil { 22 | return nil 23 | } 24 | 25 | out := make(chan B) 26 | OrderedLoop(in, out, n, func(a A, canWrite <-chan struct{}) { 27 | y, keep := f(a) 28 | <-canWrite 29 | if keep { 30 | out <- y 31 | } 32 | }) 33 | 34 | return out 35 | } 36 | -------------------------------------------------------------------------------- /helpers_test.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | func toSliceAndErrors[A any](in <-chan Try[A]) ([]A, []string) { 4 | var values []A 5 | var errors []string 6 | 7 | for x := range in { 8 | if x.Error != nil { 9 | errors = append(errors, x.Error.Error()) 10 | continue 11 | } 12 | 13 | values = append(values, x.Value) 14 | } 15 | 16 | return values, errors 17 | } 18 | 19 | func replaceWithError[A comparable](in <-chan Try[A], value A, err error) <-chan Try[A] { 20 | out := make(chan Try[A]) 21 | 22 | go func() { 23 | defer close(out) 24 | 25 | for x := range in { 26 | if x.Error == nil && x.Value == value { 27 | x.Error = err 28 | } 29 | out <- x 30 | } 31 | }() 32 | 33 | return out 34 | } 35 | -------------------------------------------------------------------------------- /internal/core/once.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | ) 7 | 8 | // OnceWithWait is like sync.Once, but also allows waiting until the first call is complete. 9 | type OnceWithWait struct { 10 | once sync.Once 11 | done chan struct{} // used in Wait 12 | fastDone atomic.Bool // used in WasCalled 13 | initOnce sync.Once 14 | } 15 | 16 | func (o *OnceWithWait) init() { 17 | o.initOnce.Do(func() { 18 | o.done = make(chan struct{}) 19 | }) 20 | } 21 | 22 | // Do executes the function f only once, no matter how many times Do is called. 23 | // It also signals any goroutines waiting on Wait(). 24 | func (o *OnceWithWait) Do(f func()) { 25 | o.once.Do(func() { 26 | o.init() 27 | f() 28 | o.fastDone.Store(true) 29 | close(o.done) 30 | }) 31 | } 32 | 33 | // Wait blocks until the first call to Do is complete. 34 | // It returns immediately if Do has already been called. 35 | func (o *OnceWithWait) Wait() { 36 | o.init() 37 | <-o.done 38 | } 39 | 40 | // WasCalled returns true if Do has been called. 41 | func (o *OnceWithWait) WasCalled() bool { 42 | return o.fastDone.Load() 43 | } 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Viktor Nikolaiev 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /internal/core/util_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/destel/rill/internal/th" 8 | ) 9 | 10 | func TestDrain(t *testing.T) { 11 | in := th.FromRange(0, 100) 12 | Drain(in) 13 | th.ExpectDrainedChan(t, in) 14 | } 15 | 16 | func TestDiscard(t *testing.T) { 17 | th.ExpectNotHang(t, 10*time.Second, func() { 18 | in := make(chan int) 19 | Discard(in) 20 | 21 | // able write in the main goroutine 22 | in <- 1 23 | in <- 2 24 | close(in) 25 | }) 26 | } 27 | 28 | func TestBuffer(t *testing.T) { 29 | trySend := func(ch chan<- int, x int) bool { 30 | select { 31 | case ch <- x: 32 | return true 33 | case <-time.After(100 * time.Millisecond): 34 | return false 35 | } 36 | } 37 | 38 | in := make(chan int) 39 | inBuf := Buffer(in, 2) 40 | _ = inBuf 41 | 42 | th.ExpectValue(t, trySend(in, 1), true) 43 | th.ExpectValue(t, trySend(in, 2), true) 44 | th.ExpectValue(t, trySend(in, 3), false) 45 | 46 | x, ok := <-inBuf 47 | th.ExpectValue(t, x, 1) 48 | th.ExpectValue(t, ok, true) 49 | 50 | th.ExpectValue(t, trySend(in, 4), true) 51 | 52 | close(in) 53 | inSlice := th.ToSlice(inBuf) 54 | th.ExpectSlice(t, inSlice, []int{2, 4}) 55 | } 56 | -------------------------------------------------------------------------------- /internal/core/once_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "sync/atomic" 5 | "testing" 6 | "time" 7 | 8 | "github.com/destel/rill/internal/th" 9 | ) 10 | 11 | func TestOnceWithWait(t *testing.T) { 12 | t.Run("Do called once", func(t *testing.T) { 13 | var o OnceWithWait 14 | var calls atomic.Int64 15 | 16 | th.ExpectValue(t, o.WasCalled(), false) 17 | 18 | for i := 0; i < 5; i++ { 19 | go func() { 20 | o.Do(func() { 21 | calls.Add(1) 22 | }) 23 | }() 24 | } 25 | 26 | time.Sleep(1 * time.Second) 27 | 28 | th.ExpectValue(t, calls.Load(), 1) 29 | th.ExpectValue(t, o.WasCalled(), true) 30 | }) 31 | 32 | t.Run("Wait after Do", func(t *testing.T) { 33 | th.ExpectNotHang(t, 1*time.Second, func() { 34 | var o OnceWithWait 35 | o.Do(func() {}) 36 | o.Wait() 37 | }) 38 | }) 39 | 40 | t.Run("Wait before Do", func(t *testing.T) { 41 | th.ExpectNotHang(t, 1*time.Second, func() { 42 | var o OnceWithWait 43 | 44 | go func() { 45 | time.Sleep(500 * time.Millisecond) 46 | o.Do(func() {}) 47 | }() 48 | 49 | o.Wait() 50 | }) 51 | }) 52 | 53 | t.Run("Wait without Do", func(t *testing.T) { 54 | th.ExpectHang(t, 1*time.Second, func() { 55 | var o OnceWithWait 56 | o.Wait() 57 | }) 58 | }) 59 | } 60 | -------------------------------------------------------------------------------- /internal/core/merge_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/destel/rill/internal/th" 8 | ) 9 | 10 | func TestMerge(t *testing.T) { 11 | t.Run("empty", func(t *testing.T) { 12 | out := Merge[string]() 13 | th.ExpectValue(t, out, nil) 14 | }) 15 | 16 | for _, numChans := range []int{1, 3, 5, 10} { 17 | t.Run(th.Name("correctness", numChans), func(t *testing.T) { 18 | ins := make([]<-chan int, numChans) 19 | 20 | for i := 0; i < numChans; i++ { 21 | ins[i] = th.FromRange(i*10, (i+1)*10) 22 | } 23 | 24 | out := Merge(ins...) 25 | outSlice := th.ToSlice(out) 26 | 27 | expectedSlice := make([]int, 0, numChans*10) 28 | for i := 0; i < numChans*10; i++ { 29 | expectedSlice = append(expectedSlice, i) 30 | } 31 | 32 | th.Sort(outSlice) 33 | th.ExpectSlice(t, outSlice, expectedSlice) 34 | }) 35 | 36 | t.Run(th.Name("nil hang", numChans), func(t *testing.T) { 37 | ins := make([]<-chan int, numChans) 38 | 39 | for i := 0; i < numChans-1; i++ { 40 | ins[i] = th.FromRange(i*10, (i+1)*10) 41 | } 42 | 43 | // make last channel nil 44 | ins[numChans-1] = nil 45 | 46 | out := Merge(ins...) 47 | 48 | th.ExpectNeverClosedChan(t, out, 1*time.Second) 49 | }) 50 | 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /.github/workflows/check.yaml: -------------------------------------------------------------------------------- 1 | name: Check 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | tags-ignore: [ "**" ] 7 | pull_request: 8 | 9 | 10 | jobs: 11 | gotest: 12 | strategy: 13 | matrix: 14 | go-version: [1.21.x, 1.25.x] 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v5 18 | 19 | - uses: actions/setup-go@v5 20 | with: 21 | go-version: ${{ matrix.go-version }} 22 | 23 | - name: Run tests 24 | run: go test -race ./... 25 | 26 | 27 | coverage: 28 | runs-on: ubuntu-latest 29 | steps: 30 | - uses: actions/checkout@v5 31 | 32 | - uses: actions/setup-go@v5 33 | with: 34 | go-version: '1.25.x' 35 | 36 | - name: Run coverage 37 | run: go test -coverprofile=coverage.out -covermode=atomic $(go list ./... | grep -v internal/th | grep -v mockapi) 38 | 39 | - name: Upload coverage reports to Codecov 40 | uses: codecov/codecov-action@v5.4.3 41 | with: 42 | token: ${{ secrets.CODECOV_TOKEN }} 43 | slug: destel/rill 44 | 45 | - name: Upload coverage reports to Coveralls 46 | uses: coverallsapp/github-action@v2.3.6 47 | with: 48 | github-token: ${{ secrets.github_token }} 49 | file: coverage.out 50 | format: golang 51 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import "github.com/destel/rill/internal/core" 4 | 5 | // Drain consumes and discards all items from an input channel, blocking until the channel is closed. 6 | func Drain[A any](in <-chan A) { 7 | core.Drain(in) 8 | } 9 | 10 | // Discard is a non-blocking function that discards all items from an input channel. 11 | func Discard[A any](in <-chan A) { 12 | core.Discard(in) 13 | } 14 | 15 | // DrainNB is a non-blocking version of [Drain]. It does draining in a separate goroutine. 16 | // 17 | // Deprecated: use [Discard] instead 18 | func DrainNB[A any](in <-chan A) { 19 | core.Discard(in) 20 | } 21 | 22 | // Buffer takes a channel of items and returns a buffered channel of exact same items in the same order. 23 | // This can be useful for preventing write operations on the input channel from blocking, especially if subsequent stages 24 | // in the processing pipeline are slow. 25 | // Buffering allows up to size items to be held in memory before back pressure is applied to the upstream producer. 26 | // 27 | // Typical usage of Buffer might look like this: 28 | // 29 | // users := getUsers(ctx, companyID) 30 | // users = rill.Buffer(users, 100) 31 | // // Now work with the users channel as usual. 32 | // // Up to 100 users can be buffered if subsequent stages of the pipeline are slow. 33 | func Buffer[A any](in <-chan A, size int) <-chan A { 34 | return core.Buffer(in, size) 35 | } 36 | -------------------------------------------------------------------------------- /batch_test.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/destel/rill/internal/th" 8 | ) 9 | 10 | func TestBatch(t *testing.T) { 11 | // most logic is covered by the chans pkg tests 12 | 13 | t.Run("correctness", func(t *testing.T) { 14 | in := FromChan(th.FromRange(0, 10), fmt.Errorf("err0")) 15 | in = replaceWithError(in, 5, fmt.Errorf("err5")) 16 | in = replaceWithError(in, 7, fmt.Errorf("err7")) 17 | 18 | batches, errs := toSliceAndErrors(Batch(in, 3, -1)) 19 | 20 | th.ExpectValue(t, len(batches), 3) 21 | th.ExpectSlice(t, batches[0], []int{0, 1, 2}) 22 | th.ExpectSlice(t, batches[1], []int{3, 4, 6}) 23 | th.ExpectSlice(t, batches[2], []int{8, 9}) 24 | 25 | th.ExpectSlice(t, errs, []string{"err0", "err5", "err7"}) 26 | }) 27 | 28 | } 29 | 30 | func TestUnbatch(t *testing.T) { 31 | // most logic is covered by the common package tests 32 | 33 | t.Run("correctness", func(t *testing.T) { 34 | in := FromSlice([][]int{{1, 2}, {3, 4}, {5, 6}, {7, 8}, {9, 10}}, nil) 35 | in = OrderedMap(in, 1, func(x []int) ([]int, error) { 36 | if x[0] == 3 { 37 | return nil, fmt.Errorf("err3") 38 | } 39 | if x[0] == 7 { 40 | return nil, fmt.Errorf("err7") 41 | } 42 | return x, nil 43 | }) 44 | 45 | values, errs := toSliceAndErrors(Unbatch(in)) 46 | 47 | th.ExpectSlice(t, values, []int{1, 2, 5, 6, 9, 10}) 48 | th.ExpectSlice(t, errs, []string{"err3", "err7"}) 49 | }) 50 | } 51 | -------------------------------------------------------------------------------- /internal/core/merge.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | func fastMerge[A any](ins []<-chan A) <-chan A { 8 | // len(ins) must be between 2 and 5 9 | 10 | remaining := len(ins) 11 | for len(ins) < 5 { 12 | ins = append(ins, nil) 13 | } 14 | 15 | out := make(chan A) 16 | 17 | go func() { 18 | defer close(out) 19 | 20 | var a A 21 | var ok bool 22 | var i int 23 | 24 | for { 25 | if remaining == 0 { 26 | return 27 | } 28 | 29 | select { 30 | case a, ok = <-ins[0]: 31 | i = 0 32 | case a, ok = <-ins[1]: 33 | i = 1 34 | case a, ok = <-ins[2]: 35 | i = 2 36 | case a, ok = <-ins[3]: 37 | i = 3 38 | case a, ok = <-ins[4]: 39 | i = 4 40 | } 41 | 42 | if !ok { 43 | remaining-- 44 | ins[i] = nil 45 | continue 46 | } 47 | 48 | out <- a 49 | } 50 | }() 51 | 52 | return out 53 | } 54 | 55 | func slowMerge[A any](ins []<-chan A) <-chan A { 56 | out := make(chan A) 57 | 58 | var wg sync.WaitGroup 59 | for _, in := range ins { 60 | in1 := in 61 | wg.Add(1) 62 | go func() { 63 | defer wg.Done() 64 | for x := range in1 { 65 | out <- x 66 | } 67 | }() 68 | } 69 | 70 | go func() { 71 | wg.Wait() 72 | close(out) 73 | }() 74 | 75 | return out 76 | } 77 | 78 | func Merge[A any](ins ...<-chan A) <-chan A { 79 | switch len(ins) { 80 | case 0: 81 | return nil 82 | case 1: 83 | return ins[0] 84 | case 2, 3, 4, 5: 85 | return fastMerge(ins) 86 | default: 87 | return slowMerge(ins) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /batch.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/destel/rill/internal/core" 7 | ) 8 | 9 | // Batch take a stream of items and returns a stream of batches based on a maximum size and a timeout. 10 | // 11 | // A batch is emitted when one of the following conditions is met: 12 | // - The batch reaches the maximum size 13 | // - The time since the first item was added to the batch exceeds the timeout 14 | // - The input stream is closed 15 | // 16 | // This function never emits empty batches. To disable the timeout and emit batches only based on the size, 17 | // set the timeout to -1. Setting the timeout to zero is not supported and will result in a panic 18 | // 19 | // This is a non-blocking ordered function that processes items sequentially. 20 | // 21 | // See the package documentation for more information on non-blocking ordered functions and error handling. 22 | func Batch[A any](in <-chan Try[A], size int, timeout time.Duration) <-chan Try[[]A] { 23 | values, errs := ToChans(in) 24 | batches := core.Batch(values, size, timeout) 25 | return FromChans(batches, errs) 26 | } 27 | 28 | // Unbatch is the inverse of [Batch]. It takes a stream of batches and returns a stream of individual items. 29 | // 30 | // This is a non-blocking ordered function that processes items sequentially. 31 | // See the package documentation for more information on non-blocking ordered functions and error handling. 32 | func Unbatch[A any](in <-chan Try[[]A]) <-chan Try[A] { 33 | batches, errs := ToChans(in) 34 | values := core.Unbatch(batches) 35 | return FromChans(values, errs) 36 | } 37 | -------------------------------------------------------------------------------- /internal/core/transform_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/destel/rill/internal/th" 8 | ) 9 | 10 | func universalFilterMap[A, B any](ord bool, in <-chan A, n int, f func(A) (B, bool)) <-chan B { 11 | if ord { 12 | return OrderedFilterMap(in, n, f) 13 | } 14 | return FilterMap(in, n, f) 15 | } 16 | 17 | func TestFilterMap(t *testing.T) { 18 | th.TestBothOrderings(t, func(t *testing.T, ord bool) { 19 | for _, n := range []int{1, 5} { 20 | 21 | t.Run(th.Name("nil", n), func(t *testing.T) { 22 | out := universalFilterMap(ord, nil, n, func(x int) (int, bool) { return x, true }) 23 | th.ExpectValue(t, out, nil) 24 | }) 25 | 26 | t.Run(th.Name("correctness", n), func(t *testing.T) { 27 | in := th.FromRange(0, 20) 28 | out := universalFilterMap(ord, in, n, func(x int) (string, bool) { 29 | return fmt.Sprintf("%03d", x), x%2 == 0 30 | }) 31 | 32 | outSlice := th.ToSlice(out) 33 | 34 | expectedSlice := make([]string, 0, 20) 35 | for i := 0; i < 20; i++ { 36 | if i%2 != 0 { 37 | continue 38 | } 39 | expectedSlice = append(expectedSlice, fmt.Sprintf("%03d", i)) 40 | } 41 | 42 | th.Sort(outSlice) 43 | th.ExpectSlice(t, outSlice, expectedSlice) 44 | }) 45 | 46 | t.Run(th.Name("ordering", n), func(t *testing.T) { 47 | in := th.FromRange(0, 20000) 48 | 49 | out := universalFilterMap(ord, in, n, func(x int) (int, bool) { 50 | return x, x%2 == 0 51 | }) 52 | 53 | outSlice := th.ToSlice(out) 54 | 55 | if ord || n == 1 { 56 | th.ExpectSorted(t, outSlice) 57 | } else { 58 | th.ExpectUnsorted(t, outSlice) 59 | } 60 | }) 61 | 62 | } 63 | }) 64 | } 65 | -------------------------------------------------------------------------------- /example123_test.go: -------------------------------------------------------------------------------- 1 | //go:build go1.23 2 | 3 | package rill_test 4 | 5 | import ( 6 | "fmt" 7 | "slices" 8 | 9 | "github.com/destel/rill" 10 | ) 11 | 12 | func ExampleFromSeq() { 13 | // Start with an iterator that yields numbers from 1 to 10 14 | numbersSeq := slices.Values([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) 15 | 16 | // Convert the iterator into a stream 17 | numbers := rill.FromSeq(numbersSeq, nil) 18 | 19 | // Transform each number 20 | // Concurrency = 3 21 | squares := rill.Map(numbers, 3, func(x int) (int, error) { 22 | return square(x), nil 23 | }) 24 | 25 | printStream(squares) 26 | } 27 | 28 | func ExampleFromSeq2() { 29 | // Create an iter.Seq2 iterator that yields numbers from 1 to 10 30 | numberSeq := func(yield func(int, error) bool) { 31 | for i := 1; i <= 10; i++ { 32 | if !yield(i, nil) { 33 | return 34 | } 35 | } 36 | } 37 | 38 | // Convert the iterator into a stream 39 | numbers := rill.FromSeq2(numberSeq) 40 | 41 | // Transform each number 42 | // Concurrency = 3 43 | squares := rill.Map(numbers, 3, func(x int) (int, error) { 44 | return square(x), nil 45 | }) 46 | 47 | printStream(squares) 48 | } 49 | 50 | func ExampleToSeq2() { 51 | // Convert a slice of numbers into a stream 52 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 53 | 54 | // Transform each number 55 | // Concurrency = 3 56 | squares := rill.Map(numbers, 3, func(x int) (int, error) { 57 | return square(x), nil 58 | }) 59 | 60 | // Convert the stream into an iterator and use for-range to print the results 61 | for val, err := range rill.ToSeq2(squares) { 62 | if err != nil { 63 | fmt.Println("Error:", err) 64 | break // cleanup is done regardless of early exit 65 | } 66 | fmt.Printf("%+v\n", val) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /internal/th/helpers.go: -------------------------------------------------------------------------------- 1 | package th 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "strings" 7 | "sync" 8 | "testing" 9 | ) 10 | 11 | func FromSlice[A any](slice []A) <-chan A { 12 | out := make(chan A, len(slice)) 13 | for _, a := range slice { 14 | out <- a 15 | } 16 | close(out) 17 | return out 18 | } 19 | 20 | func ToSlice[A any](in <-chan A) []A { 21 | var res []A 22 | for x := range in { 23 | res = append(res, x) 24 | } 25 | return res 26 | } 27 | 28 | func FromRange(start, end int) <-chan int { 29 | ch := make(chan int, end-start) 30 | for i := start; i < end; i++ { 31 | ch <- i 32 | } 33 | close(ch) 34 | return ch 35 | } 36 | 37 | func Send[T any](ch chan<- T, items ...T) { 38 | for _, item := range items { 39 | ch <- item 40 | } 41 | } 42 | 43 | func Sort[A ordered](s []A) { 44 | sort.Slice(s, func(i, j int) bool { 45 | return s[i] < s[j] 46 | }) 47 | } 48 | 49 | func DoConcurrently(ff ...func()) { 50 | var wg sync.WaitGroup 51 | 52 | for _, f := range ff { 53 | f := f 54 | wg.Add(1) 55 | go func() { 56 | defer wg.Done() 57 | f() 58 | }() 59 | } 60 | 61 | wg.Wait() 62 | } 63 | 64 | func DoConcurrentlyN(n int, f func(i int)) { 65 | var wg sync.WaitGroup 66 | 67 | for i := 0; i < n; i++ { 68 | i := i 69 | wg.Add(1) 70 | go func() { 71 | defer wg.Done() 72 | f(i) 73 | }() 74 | } 75 | 76 | wg.Wait() 77 | } 78 | 79 | // Name generates a test name. 80 | // Works the same way as fmt.Sprint, but adds spaces between all arguments. 81 | func Name(args ...any) string { 82 | res := fmt.Sprintln(args...) 83 | return strings.TrimSpace(res) 84 | } 85 | 86 | func TestBothOrderings(t *testing.T, f func(t *testing.T, ord bool)) { 87 | t.Run("unordered", func(t *testing.T) { 88 | f(t, false) 89 | }) 90 | 91 | t.Run("ordered", func(t *testing.T) { 92 | f(t, true) 93 | }) 94 | } 95 | -------------------------------------------------------------------------------- /iter.go: -------------------------------------------------------------------------------- 1 | //go:build go1.23 2 | 3 | package rill 4 | 5 | import ( 6 | "iter" 7 | ) 8 | 9 | // FromSeq converts an iterator into a stream. 10 | // If err is not nil function returns a stream with a single error. 11 | // 12 | // Such function signature allows concise wrapping of functions that return an 13 | // iterator and an error: 14 | // 15 | // stream := rill.FromSeq(someFunc()) 16 | func FromSeq[A any](seq iter.Seq[A], err error) <-chan Try[A] { 17 | if seq == nil && err == nil { 18 | return nil 19 | } 20 | if err != nil { 21 | out := make(chan Try[A], 1) 22 | out <- Try[A]{Error: err} 23 | close(out) 24 | return out 25 | 26 | } 27 | 28 | out := make(chan Try[A]) 29 | go func() { 30 | for val := range seq { 31 | out <- Wrap(val, nil) 32 | } 33 | close(out) 34 | }() 35 | return out 36 | } 37 | 38 | // FromSeq2 converts an iterator of value-error pairs into a stream. 39 | func FromSeq2[A any](seq iter.Seq2[A, error]) <-chan Try[A] { 40 | if seq == nil { 41 | return nil 42 | } 43 | 44 | out := make(chan Try[A]) 45 | go func() { 46 | for val, err := range seq { 47 | out <- Wrap(val, err) 48 | } 49 | close(out) 50 | }() 51 | return out 52 | } 53 | 54 | // ToSeq2 converts an input stream into an iterator of value-error pairs. 55 | // 56 | // This is a blocking ordered function that processes items sequentially. 57 | // It does not return on the first encountered error. Instead, it iterates over all value-error 58 | // pairs, either until the input stream is fully consumed or the loop is broken by the caller. 59 | // So all error handling, if needed, should be done inside the iterator (for-range loop body). 60 | // 61 | // See the package documentation for more information on blocking ordered functions. 62 | func ToSeq2[A any](in <-chan Try[A]) iter.Seq2[A, error] { 63 | return func(yield func(A, error) bool) { 64 | defer Discard(in) 65 | for x := range in { 66 | if !yield(x.Value, x.Error) { 67 | return 68 | } 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /internal/core/delay.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/destel/rill/internal/ringbuffer" 8 | ) 9 | 10 | func infiniteBuffer[A any](in <-chan A) <-chan A { 11 | const shrinkInterval = 60 * time.Second 12 | 13 | out := make(chan A) 14 | go func() { 15 | defer close(out) 16 | 17 | buf := ringbuffer.Buffer[A]{} 18 | 19 | var nextValue A 20 | var hasNextValue bool 21 | 22 | var out1 chan<- A 23 | 24 | shrinkTicker := time.NewTicker(shrinkInterval) 25 | defer shrinkTicker.Stop() 26 | canShrink := true 27 | 28 | MainLoop: 29 | for { 30 | if !hasNextValue { 31 | nextValue, hasNextValue = buf.Read() 32 | } 33 | 34 | if !hasNextValue { 35 | if in == nil { 36 | return 37 | } 38 | out1 = nil 39 | } else { 40 | out1 = out 41 | } 42 | 43 | select { 44 | case v, ok := <-in: 45 | if !ok { 46 | in = nil 47 | continue MainLoop 48 | } 49 | buf.Write(v) 50 | canShrink = canShrink && buf.CanShrink() 51 | 52 | case out1 <- nextValue: 53 | hasNextValue = false 54 | 55 | case <-shrinkTicker.C: 56 | fmt.Println("<-shrinkTicker.C") 57 | if canShrink { 58 | buf.Shrink() 59 | } 60 | canShrink = true 61 | 62 | } 63 | } 64 | }() 65 | 66 | return out 67 | } 68 | 69 | type delayedValue[A any] struct { 70 | Value A 71 | SendAt time.Time 72 | } 73 | 74 | // Delay postpones the delivery of items from an input channel by a specified duration, maintaining the order. 75 | // Useful for adding delays in processing or simulating latency. 76 | func Delay[A any](in <-chan A, delay time.Duration) <-chan A { 77 | wrapped := make(chan delayedValue[A]) 78 | go func() { 79 | defer close(wrapped) 80 | for v := range in { 81 | wrapped <- delayedValue[A]{v, time.Now().Add(delay)} 82 | } 83 | }() 84 | 85 | // buffering is needed to freely use sleeps in the loop below 86 | buffered := infiniteBuffer(wrapped) 87 | 88 | out := make(chan A) 89 | go func() { 90 | defer close(out) 91 | for item := range buffered { 92 | sendIn := item.SendAt.Sub(time.Now()) 93 | if sendIn > 0 { 94 | time.Sleep(sendIn) 95 | } 96 | out <- item.Value 97 | } 98 | }() 99 | 100 | return out 101 | } 102 | -------------------------------------------------------------------------------- /internal/th/concurrency_monitor.go: -------------------------------------------------------------------------------- 1 | package th 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | // ConcurrencyMonitor measures the maximum concurrency level reached by goroutines. 9 | // It enforces maximum possible concurrency by requiring each goroutine to call Inc() at the start and Dec() at the end of its execution. 10 | // Goroutines calling Inc() are blocked until the concurrency level remains stable for a specified time window, ensuring that concurrency peaks are accurately captured. 11 | // The highest level of concurrency observed can be retrieved using the Max() method. 12 | type ConcurrencyMonitor struct { 13 | cond *sync.Cond 14 | current int 15 | max int 16 | 17 | target int 18 | window time.Duration 19 | 20 | lastChangeAt time.Time 21 | timer *time.Timer 22 | timerFired bool 23 | } 24 | 25 | func NewConcurrencyMonitor(window time.Duration) *ConcurrencyMonitor { 26 | c := &ConcurrencyMonitor{ 27 | cond: sync.NewCond(&sync.Mutex{}), 28 | window: window, 29 | } 30 | 31 | c.timer = time.AfterFunc(1*time.Hour, func() { 32 | c.cond.L.Lock() 33 | defer c.cond.L.Unlock() 34 | 35 | c.timerFired = true 36 | c.cond.Broadcast() 37 | }) 38 | 39 | return c 40 | } 41 | 42 | func (c *ConcurrencyMonitor) Inc() { 43 | c.cond.L.Lock() 44 | defer c.cond.L.Unlock() 45 | 46 | c.lastChangeAt = time.Now() 47 | if !c.timerFired { 48 | c.timer.Reset(c.window) 49 | } 50 | 51 | c.current++ 52 | if c.max < c.current { 53 | c.max = c.current 54 | } 55 | 56 | // block all goroutines unless "window" has passed since the last counter change 57 | for !c.timerFired && time.Since(c.lastChangeAt) < c.window { 58 | c.cond.Wait() 59 | } 60 | } 61 | 62 | func (c *ConcurrencyMonitor) Dec() { 63 | c.cond.L.Lock() 64 | defer c.cond.L.Unlock() 65 | 66 | c.lastChangeAt = time.Now() 67 | if !c.timerFired { 68 | c.timer.Reset(c.window) 69 | } 70 | 71 | c.current-- 72 | c.cond.Broadcast() 73 | } 74 | 75 | func (c *ConcurrencyMonitor) Reset() int { 76 | c.cond.L.Lock() 77 | defer c.cond.L.Unlock() 78 | 79 | if c.timer != nil { 80 | c.timer.Stop() 81 | } 82 | 83 | c.current = 0 84 | c.max = 0 85 | c.lastChangeAt = time.Time{} 86 | c.timer.Reset(1 * time.Hour) 87 | c.timerFired = false 88 | return c.max 89 | } 90 | 91 | func (c *ConcurrencyMonitor) Max() int { 92 | c.cond.L.Lock() 93 | defer c.cond.L.Unlock() 94 | 95 | return c.max 96 | } 97 | -------------------------------------------------------------------------------- /internal/core/batch.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | // Batch groups items from an input channel into batches based on a maximum size and a timeout. 9 | // A batch is emitted when it reaches the maximum size, the timeout expires, or the input channel closes. 10 | // This function never emits empty batches. The timeout countdown starts when the first item is added to a new batch. 11 | // To emit batches only when full, set the timeout to -1. Zero timeout is not supported and will panic. 12 | func Batch[A any](in <-chan A, size int, timeout time.Duration) <-chan []A { 13 | if in == nil { 14 | return nil 15 | } 16 | 17 | out := make(chan []A) 18 | 19 | switch { 20 | case timeout == 0: 21 | panic(fmt.Errorf("zero timeout is not supported yet")) 22 | 23 | case timeout < 0: 24 | // infinite timeout 25 | go func() { 26 | defer close(out) 27 | var batch []A 28 | for a := range in { 29 | batch = append(batch, a) 30 | if len(batch) >= size { 31 | out <- batch 32 | batch = make([]A, 0, size) 33 | } 34 | } 35 | if len(batch) > 0 { 36 | out <- batch 37 | } 38 | }() 39 | 40 | default: 41 | // finite timeout 42 | go func() { 43 | batch := make([]A, 0, size) 44 | t := time.NewTicker(1 * time.Hour) 45 | t.Stop() 46 | 47 | flush := func() { 48 | if len(batch) > 0 { 49 | out <- batch 50 | batch = make([]A, 0, size) 51 | } 52 | 53 | t.Stop() 54 | // consume a tick that might have been sent while we were flushing 55 | select { 56 | case <-t.C: 57 | default: 58 | } 59 | } 60 | 61 | for { 62 | select { 63 | case <-t.C: 64 | // timeout 65 | flush() 66 | 67 | case a, ok := <-in: 68 | if !ok { 69 | // end of input 70 | flush() 71 | close(out) 72 | return 73 | } 74 | 75 | // got new item 76 | batch = append(batch, a) 77 | 78 | if len(batch) == 1 { 79 | // we've just started collecting a new batch. 80 | // start the timer to flush the batch after the timeout. 81 | t.Reset(timeout) 82 | } 83 | 84 | if len(batch) >= size { 85 | // batch is full 86 | flush() 87 | } 88 | } 89 | 90 | } 91 | }() 92 | 93 | } 94 | 95 | return out 96 | } 97 | 98 | // Unbatch is the inverse of Batch. It takes a channel of batches and emits individual items. 99 | func Unbatch[A any](in <-chan []A) <-chan A { 100 | if in == nil { 101 | return nil 102 | } 103 | 104 | out := make(chan A) 105 | 106 | go func() { 107 | defer close(out) 108 | for batch := range in { 109 | for _, a := range batch { 110 | out <- a 111 | } 112 | } 113 | }() 114 | 115 | return out 116 | } 117 | -------------------------------------------------------------------------------- /internal/ringbuffer/buffer.go: -------------------------------------------------------------------------------- 1 | package ringbuffer 2 | 3 | const minCap = 16 4 | 5 | type Buffer[T any] struct { 6 | data []T 7 | offset, size int 8 | } 9 | 10 | func (b *Buffer[T]) Cap() int { 11 | return len(b.data) 12 | } 13 | 14 | func (b *Buffer[T]) Len() int { 15 | return b.size 16 | } 17 | 18 | // write to end 19 | func (b *Buffer[T]) Write(v T) { 20 | b.Grow(1) 21 | 22 | pos := (b.offset + b.size) % len(b.data) 23 | b.data[pos] = v 24 | b.size++ 25 | } 26 | 27 | // read from start 28 | func (b *Buffer[T]) Read() (T, bool) { 29 | if b.size == 0 { 30 | var zero T 31 | return zero, false 32 | } 33 | 34 | v := b.data[b.offset] 35 | b.Discard() 36 | return v, true 37 | } 38 | 39 | func (b *Buffer[T]) Peek() (T, bool) { 40 | if b.size == 0 { 41 | var zero T 42 | return zero, false 43 | } 44 | 45 | return b.data[b.offset], true 46 | } 47 | 48 | func (b *Buffer[T]) Discard() bool { 49 | if b.size == 0 { 50 | return false 51 | } 52 | 53 | var zero T 54 | b.data[b.offset] = zero // let GC do its work 55 | 56 | b.offset = (b.offset + 1) % len(b.data) 57 | b.size-- 58 | return true 59 | } 60 | 61 | // change the capacity and defragment the buffer 62 | // panics if newCap is less than buf.size 63 | func (b *Buffer[T]) setCap(newCap int) { 64 | newData := make([]T, newCap) 65 | 66 | end := b.offset + b.size 67 | if end <= len(b.data) { 68 | copy(newData, b.data[b.offset:end]) 69 | } else { 70 | copied := copy(newData, b.data[b.offset:]) 71 | copy(newData[copied:], b.data[:b.size-copied]) 72 | } 73 | 74 | b.data = newData 75 | b.offset = 0 76 | } 77 | 78 | func (b *Buffer[T]) Grow(n int) { 79 | targetSize := b.size + n 80 | targetCap := cap(b.data) 81 | 82 | if targetCap >= targetSize { 83 | return // enough 84 | } 85 | 86 | if targetCap < minCap { 87 | targetCap = minCap 88 | } 89 | for targetCap < targetSize { 90 | targetCap <<= 1 // double the capacity 91 | } 92 | 93 | b.setCap(targetCap) 94 | } 95 | 96 | func (b *Buffer[T]) CanShrink() bool { 97 | half := cap(b.data) >> 1 98 | return half >= minCap && half >= b.size 99 | } 100 | 101 | func (b *Buffer[T]) Shrink() { 102 | if b.CanShrink() { 103 | b.setCap(cap(b.data) >> 1) 104 | } 105 | } 106 | 107 | func (b *Buffer[T]) Compact() { 108 | targetCap := cap(b.data) 109 | for { 110 | half := targetCap >> 1 111 | if half >= minCap && half >= b.size { 112 | targetCap = half 113 | } else { 114 | break 115 | } 116 | } 117 | 118 | if targetCap < cap(b.data) { 119 | b.setCap(targetCap) 120 | } 121 | } 122 | 123 | func (b *Buffer[T]) Reset() { 124 | var zero T 125 | for i := 0; i < b.size; i++ { 126 | b.data[(b.offset+i)%len(b.data)] = zero 127 | } 128 | b.offset = 0 129 | b.size = 0 130 | } 131 | -------------------------------------------------------------------------------- /internal/core/delay_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/destel/rill/internal/th" 8 | ) 9 | 10 | func TestInfiniteBuffer(t *testing.T) { 11 | in := make(chan int) 12 | out := infiniteBuffer(in) 13 | 14 | for i := 0; i < 1000; i++ { 15 | in <- i 16 | } 17 | close(in) 18 | 19 | i := -1 20 | for v := range out { 21 | i++ 22 | th.ExpectValue(t, v, i) 23 | } 24 | th.ExpectValue(t, i, 1000-1) 25 | } 26 | 27 | func TestDelay(t *testing.T) { 28 | type Item struct { 29 | Value int 30 | SentAt time.Time 31 | } 32 | 33 | t.Run("correctness", func(t *testing.T) { 34 | const delay = 5 * time.Second 35 | const eps = 1000 * time.Millisecond // Race detector slows down the execution. Need to use larger epsilon 36 | 37 | in := make(chan Item) 38 | out := Delay(in, delay) 39 | 40 | go func() { 41 | defer close(in) 42 | for i := 0; i < 100000; i++ { 43 | in <- Item{Value: i, SentAt: time.Now()} 44 | } 45 | }() 46 | 47 | i := -1 48 | for item := range out { 49 | i++ 50 | th.ExpectValue(t, item.Value, i) 51 | th.ExpectValueInDelta(t, time.Since(item.SentAt), delay, eps) 52 | if t.Failed() { 53 | t.FailNow() 54 | } 55 | } 56 | th.ExpectValue(t, i, 100000-1) 57 | }) 58 | 59 | t.Run("slow producer", func(t *testing.T) { 60 | const delay = 5 * time.Second 61 | const eps = 1000 * time.Millisecond 62 | 63 | in := make(chan Item) 64 | out := Delay(in, delay) 65 | 66 | go func() { 67 | defer close(in) 68 | for i := 0; i < 100; i++ { 69 | in <- Item{Value: i, SentAt: time.Now()} 70 | 71 | if i == 30 || i == 50 { 72 | time.Sleep(1 * time.Second) // make producer slow 73 | } 74 | } 75 | 76 | // let buffer be fully consumed, by the time we close the channel 77 | time.Sleep(3 * time.Second) 78 | }() 79 | 80 | i := -1 81 | for item := range out { 82 | i++ 83 | th.ExpectValue(t, item.Value, i) 84 | th.ExpectValueInDelta(t, time.Since(item.SentAt), delay, eps) 85 | } 86 | th.ExpectValue(t, i, 100-1) 87 | }) 88 | 89 | t.Run("slow consumer", func(t *testing.T) { 90 | const delay = 5 * time.Second 91 | 92 | in := make(chan Item) 93 | out := Delay(in, delay) 94 | 95 | go func() { 96 | defer close(in) 97 | for i := 0; i < 100; i++ { 98 | in <- Item{Value: i, SentAt: time.Now()} 99 | } 100 | }() 101 | 102 | i := -1 103 | for item := range out { 104 | i++ 105 | th.ExpectValue(t, item.Value, i) 106 | 107 | if i == 30 || i == 50 { 108 | time.Sleep(1 * time.Second) // make consumer slow 109 | } 110 | 111 | // less strict condition than for the fast consumer 112 | th.ExpectValueGTE(t, time.Since(item.SentAt), delay) 113 | } 114 | th.ExpectValue(t, i, 100-1) 115 | }) 116 | } 117 | -------------------------------------------------------------------------------- /internal/core/batch_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/destel/rill/internal/th" 8 | ) 9 | 10 | func TestBatch(t *testing.T) { 11 | t.Run("nil", func(t *testing.T) { 12 | var nilChan chan []string 13 | th.ExpectValue(t, Batch(nilChan, 10, 10*time.Second), nil) 14 | }) 15 | 16 | t.Run("fast", func(t *testing.T) { 17 | in := make(chan int) 18 | go func() { 19 | defer close(in) 20 | th.Send(in, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) 21 | }() 22 | 23 | out := Batch(in, 4, 500*time.Millisecond) 24 | 25 | outSlice := th.ToSlice(out) 26 | th.ExpectValue(t, len(outSlice), 3) 27 | th.ExpectSlice(t, outSlice[0], []int{1, 2, 3, 4}) 28 | th.ExpectSlice(t, outSlice[1], []int{5, 6, 7, 8}) 29 | th.ExpectSlice(t, outSlice[2], []int{9, 10}) 30 | }) 31 | 32 | t.Run("slow", func(t *testing.T) { 33 | in := make(chan int) 34 | go func() { 35 | defer close(in) 36 | th.Send(in, 1, 2, 3, 4, 5) 37 | time.Sleep(1 * time.Second) 38 | th.Send(in, 6, 7, 8, 9, 10) 39 | }() 40 | 41 | out := Batch(in, 4, 500*time.Millisecond) 42 | 43 | outSlice := th.ToSlice(out) 44 | th.ExpectValue(t, len(outSlice), 4) 45 | th.ExpectSlice(t, outSlice[0], []int{1, 2, 3, 4}) 46 | th.ExpectSlice(t, outSlice[1], []int{5}) 47 | th.ExpectSlice(t, outSlice[2], []int{6, 7, 8, 9}) 48 | th.ExpectSlice(t, outSlice[3], []int{10}) 49 | }) 50 | 51 | t.Run("slow wo timeout", func(t *testing.T) { 52 | in := make(chan int) 53 | go func() { 54 | defer close(in) 55 | th.Send(in, 1, 2, 3, 4, 5) 56 | time.Sleep(1 * time.Second) 57 | th.Send(in, 6, 7, 8, 9, 10) 58 | }() 59 | 60 | out := Batch(in, 4, -1) 61 | 62 | outSlice := th.ToSlice(out) 63 | th.ExpectValue(t, len(outSlice), 3) 64 | th.ExpectSlice(t, outSlice[0], []int{1, 2, 3, 4}) 65 | th.ExpectSlice(t, outSlice[1], []int{5, 6, 7, 8}) 66 | th.ExpectSlice(t, outSlice[2], []int{9, 10}) 67 | }) 68 | 69 | for _, timeout := range []time.Duration{-1, 10 * time.Second} { 70 | t.Run(th.Name("ordering", timeout), func(t *testing.T) { 71 | in := th.FromRange(0, 20000) 72 | 73 | out := Batch(in, 1000, timeout) 74 | 75 | for batch := range out { 76 | th.ExpectSorted(t, batch) 77 | } 78 | 79 | }) 80 | } 81 | } 82 | 83 | func TestUnbatch(t *testing.T) { 84 | t.Run("nil", func(t *testing.T) { 85 | var nilChan chan []string 86 | th.ExpectValue(t, Unbatch(nilChan), nil) 87 | }) 88 | 89 | t.Run("normal", func(t *testing.T) { 90 | in := make(chan []int) 91 | go func() { 92 | defer close(in) 93 | th.Send(in, []int{1, 2, 3}, []int{4}, []int{5, 6, 7, 8}, []int{9, 10}) 94 | }() 95 | 96 | out := Unbatch(in) 97 | outSlice := th.ToSlice(out) 98 | 99 | th.ExpectSlice(t, outSlice, []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) 100 | }) 101 | } 102 | -------------------------------------------------------------------------------- /internal/core/loops_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "sync/atomic" 5 | "testing" 6 | "time" 7 | 8 | "github.com/destel/rill/internal/th" 9 | ) 10 | 11 | func universalLoop[A, B any](ord bool, in <-chan A, done chan<- B, n int, f func(a A, canWrite <-chan struct{})) { 12 | if ord { 13 | OrderedLoop(in, done, n, f) 14 | } else { 15 | canWrite := make(chan struct{}, n) 16 | close(canWrite) 17 | 18 | Loop(in, done, n, func(a A) { 19 | f(a, canWrite) 20 | }) 21 | } 22 | } 23 | 24 | func TestLoop(t *testing.T) { 25 | th.TestBothOrderings(t, func(t *testing.T, ord bool) { 26 | for _, n := range []int{1, 5} { 27 | t.Run(th.Name("correctness", n), func(t *testing.T) { 28 | in := th.FromRange(0, 20) 29 | done := make(chan struct{}) 30 | 31 | var sum atomic.Int64 32 | 33 | universalLoop(ord, in, done, n, func(x int, canWrite <-chan struct{}) { 34 | <-canWrite 35 | sum.Add(int64(x)) 36 | }) 37 | 38 | <-done 39 | th.ExpectValue(t, sum.Load(), 19*20/2) 40 | }) 41 | 42 | t.Run(th.Name("concurrency", n), func(t *testing.T) { 43 | in := th.FromRange(0, 100) 44 | out := make(chan int) 45 | 46 | monitor := th.NewConcurrencyMonitor(1 * time.Second) 47 | 48 | universalLoop(ord, in, out, n, func(x int, canWrite <-chan struct{}) { 49 | monitor.Inc() 50 | defer monitor.Dec() 51 | 52 | <-canWrite 53 | 54 | out <- x 55 | }) 56 | 57 | Drain(out) 58 | 59 | th.ExpectValue(t, monitor.Max(), n) 60 | }) 61 | 62 | t.Run(th.Name("ordering", n), func(t *testing.T) { 63 | in := th.FromRange(0, 20000) 64 | out := make(chan int) 65 | 66 | universalLoop(ord, in, out, n, func(x int, canWrite <-chan struct{}) { 67 | 68 | <-canWrite 69 | 70 | out <- x 71 | }) 72 | 73 | outSlice := th.ToSlice(out) 74 | 75 | if ord || n == 1 { 76 | th.ExpectSorted(t, outSlice) 77 | } else { 78 | th.ExpectUnsorted(t, outSlice) 79 | } 80 | 81 | }) 82 | } 83 | 84 | }) 85 | } 86 | 87 | func TestForEach(t *testing.T) { 88 | for _, n := range []int{1, 5} { 89 | t.Run(th.Name("correctness", n), func(t *testing.T) { 90 | in := th.FromRange(0, 20) 91 | 92 | var sum atomic.Int64 93 | 94 | ForEach(in, n, func(x int) { 95 | sum.Add(int64(x)) 96 | }) 97 | 98 | th.ExpectValue(t, sum.Load(), 19*20/2) 99 | }) 100 | 101 | t.Run(th.Name("concurrency", n), func(t *testing.T) { 102 | in := th.FromRange(0, 100) 103 | 104 | mon := th.NewConcurrencyMonitor(1 * time.Second) 105 | 106 | ForEach(in, n, func(x int) { 107 | mon.Inc() 108 | defer mon.Dec() 109 | }) 110 | 111 | th.ExpectValue(t, mon.Max(), n) 112 | }) 113 | 114 | t.Run(th.Name("ordering", n), func(t *testing.T) { 115 | in := th.FromRange(0, 20000) 116 | out := make(chan int) 117 | 118 | go func() { 119 | ForEach(in, n, func(x int) { 120 | out <- x 121 | }) 122 | close(out) 123 | }() 124 | 125 | outSlice := th.ToSlice(out) 126 | 127 | if n == 1 { 128 | th.ExpectSorted(t, outSlice) 129 | } else { 130 | th.ExpectUnsorted(t, outSlice) 131 | } 132 | }) 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /benchmark_test.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/destel/rill/internal/th" 8 | ) 9 | 10 | const benchmarkInputSize = 100000 11 | 12 | // code called on each benchmark iteration 13 | func benchmarkIteration() { 14 | busySleep(1 * time.Microsecond) 15 | //time.Sleep(1 * time.Microsecond) 16 | //busySleep(10 * time.Microsecond) 17 | //time.Sleep(10 * time.Microsecond) 18 | } 19 | 20 | func busySleep(d time.Duration) { 21 | if d == 0 { 22 | return 23 | } 24 | 25 | start := time.Now() 26 | for time.Since(start) < d { 27 | } 28 | } 29 | 30 | func runBenchmark(b *testing.B, name string, body func(in <-chan Try[int])) { 31 | b.Run(name, func(b *testing.B) { 32 | for i := 0; i < b.N; i++ { 33 | b.StopTimer() 34 | 35 | in := make(chan Try[int]) 36 | done := make(chan struct{}) 37 | 38 | go func() { 39 | defer close(done) 40 | body(in) 41 | }() 42 | 43 | // Give body a some time to spawn goroutines 44 | time.Sleep(100 * time.Millisecond) 45 | 46 | b.StartTimer() 47 | 48 | // write to input 49 | for k := 0; k < benchmarkInputSize; k++ { 50 | in <- Try[int]{Value: k} 51 | } 52 | close(in) 53 | 54 | // wait for body to finish 55 | <-done 56 | b.StopTimer() 57 | } 58 | }) 59 | } 60 | 61 | // Benchmarks below are commented out to remove dependency on errgroup 62 | 63 | //// This benchmark uses classic goroutine-per-item + semaphore pattern. 64 | //func BenchmarkErrGroupWithSetLimit(b *testing.B) { 65 | // for _, n := range []int{1, 2, 4, 8} { 66 | // runBenchmark(b, th.Name(n), func(in <-chan Try[int]) { 67 | // var eg errgroup.Group 68 | // eg.SetLimit(n) 69 | // 70 | // for x := range in { 71 | // x := x 72 | // eg.Go(func() error { 73 | // if err := x.Error; err != nil { 74 | // return err 75 | // } 76 | // benchmarkIteration() 77 | // return nil 78 | // }) 79 | // } 80 | // 81 | // _ = eg.Wait() 82 | // }) 83 | // } 84 | //} 85 | // 86 | //// This benchmark uses much less common worker pool pattern. 87 | //func BenchmarkErrGroupWithWorkerPool(b *testing.B) { 88 | // for _, n := range []int{1, 2, 4, 8} { 89 | // runBenchmark(b, th.Name(n), func(in <-chan Try[int]) { 90 | // var eg errgroup.Group 91 | // for i := 0; i < n; i++ { 92 | // eg.Go(func() error { 93 | // for x := range in { 94 | // if err := x.Error; err != nil { 95 | // return err 96 | // } 97 | // benchmarkIteration() 98 | // } 99 | // return nil 100 | // }) 101 | // } 102 | // _ = eg.Wait() 103 | // }) 104 | // } 105 | //} 106 | 107 | func BenchmarkForEach(b *testing.B) { 108 | for _, n := range []int{1, 2, 4, 8} { 109 | runBenchmark(b, th.Name(n), func(in <-chan Try[int]) { 110 | _ = ForEach(in, n, func(x int) error { 111 | benchmarkIteration() 112 | return nil 113 | }) 114 | }) 115 | } 116 | } 117 | 118 | func BenchmarkMapAndDrain(b *testing.B) { 119 | for _, n := range []int{1, 2, 4, 8} { 120 | runBenchmark(b, th.Name(n), func(in <-chan Try[int]) { 121 | out := Map(in, n, func(x int) (int, error) { 122 | benchmarkIteration() 123 | return x, nil 124 | }) 125 | 126 | Drain(out) 127 | }) 128 | } 129 | } 130 | 131 | func BenchmarkReduce(b *testing.B) { 132 | for _, n := range []int{1, 2, 4, 8} { 133 | runBenchmark(b, th.Name(n), func(in <-chan Try[int]) { 134 | _, _, _ = Reduce(in, n, func(x, y int) (int, error) { 135 | benchmarkIteration() 136 | return x, nil 137 | }) 138 | }) 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /iter_test.go: -------------------------------------------------------------------------------- 1 | //go:build go1.23 2 | 3 | package rill 4 | 5 | import ( 6 | "errors" 7 | "fmt" 8 | "iter" 9 | "testing" 10 | "time" 11 | 12 | "github.com/destel/rill/internal/th" 13 | ) 14 | 15 | func rangeInt(from, to int) iter.Seq[int] { 16 | return func(yield func(i int) bool) { 17 | for i := from; i < to; i++ { 18 | if !yield(i) { 19 | break 20 | } 21 | } 22 | } 23 | } 24 | 25 | func TestToSeq2(t *testing.T) { 26 | t.Run("errors", func(t *testing.T) { 27 | in := FromSeq(rangeInt(0, 20), nil) 28 | expectedErrs := []error{fmt.Errorf("err15"), fmt.Errorf("err18")} 29 | in = replaceWithError(in, 15, expectedErrs[0]) 30 | in = replaceWithError(in, 18, expectedErrs[1]) 31 | 32 | var outSlice []int 33 | var outErrs []error 34 | for i, err := range ToSeq2(in) { 35 | outSlice = append(outSlice, i) 36 | if err != nil { 37 | outErrs = append(outErrs, err) 38 | } 39 | } 40 | 41 | th.ExpectSlice(t, outSlice, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}) 42 | th.ExpectSlice(t, outErrs, expectedErrs) 43 | 44 | time.Sleep(1 * time.Second) 45 | th.ExpectDrainedChan(t, in) 46 | }) 47 | 48 | t.Run("errors with break", func(t *testing.T) { 49 | in := FromSeq(rangeInt(0, 20), nil) 50 | in = replaceWithError(in, 15, fmt.Errorf("err15")) 51 | in = replaceWithError(in, 18, fmt.Errorf("err18")) 52 | 53 | var outSlice []int 54 | var outErr error 55 | 56 | // sceneraio: let's client side determine when to break 57 | for i, err := range ToSeq2(in) { 58 | if err != nil { 59 | outErr = err 60 | break 61 | } 62 | outSlice = append(outSlice, i) 63 | } 64 | 65 | th.ExpectSlice(t, outSlice, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}) 66 | th.ExpectError(t, outErr, "err15") 67 | 68 | time.Sleep(1 * time.Second) 69 | th.ExpectDrainedChan(t, in) 70 | }) 71 | } 72 | 73 | func TestFromSeq(t *testing.T) { 74 | t.Run("nil", func(t *testing.T) { 75 | in := FromSeq[int](nil, nil) 76 | th.ExpectValue(t, in, nil) 77 | }) 78 | 79 | t.Run("normal ", func(t *testing.T) { 80 | in := FromSeq(rangeInt(0, 20), nil) 81 | 82 | outSlice, outErrs := toSliceAndErrors(in) 83 | th.Sort(outSlice) 84 | 85 | th.ExpectSlice(t, outSlice, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}) 86 | th.ExpectSlice(t, outErrs, nil) 87 | }) 88 | 89 | t.Run("with error", func(t *testing.T) { 90 | in := FromSeq(rangeInt(0, 20), errors.New("err")) 91 | a := <-in 92 | th.ExpectDrainedChan(t, in) 93 | th.ExpectError(t, a.Error, "err") 94 | }) 95 | } 96 | 97 | func TestFromSeq2(t *testing.T) { 98 | t.Run("nil", func(t *testing.T) { 99 | in := FromSeq2[int](nil) 100 | th.ExpectValue(t, in, nil) 101 | }) 102 | 103 | t.Run("normal", func(t *testing.T) { 104 | // generate from 0 to 7, and when the value is 5, yield error 105 | err5 := errors.New("err5") 106 | gen := func(yield func(x int, err error) bool) { 107 | for i := 0; i < 8; i++ { 108 | var err error 109 | if i == 5 { 110 | err = err5 111 | } 112 | if !yield(i, err) { 113 | break 114 | } 115 | } 116 | } 117 | 118 | in := FromSeq2(gen) 119 | 120 | var outSlice []int 121 | var outError []error 122 | for a := range in { 123 | outSlice = append(outSlice, a.Value) 124 | outError = append(outError, a.Error) 125 | } 126 | 127 | th.ExpectSlice(t, outSlice, []int{0, 1, 2, 3, 4, 5, 6, 7}) 128 | th.ExpectSlice(t, outError, []error{nil, nil, nil, nil, nil, err5, nil, nil}) 129 | }) 130 | } 131 | -------------------------------------------------------------------------------- /internal/core/reduce_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | 8 | "github.com/destel/rill/internal/th" 9 | ) 10 | 11 | func TestReduce(t *testing.T) { 12 | for _, n := range []int{1, 4, 8} { 13 | t.Run(th.Name("nil", n), func(t *testing.T) { 14 | n := n 15 | th.ExpectHang(t, 1*time.Second, func() { 16 | _, _ = Reduce[int](nil, n, func(a, b int) int { 17 | return a + b 18 | }) 19 | }) 20 | }) 21 | 22 | t.Run(th.Name("empty", n), func(t *testing.T) { 23 | in := th.FromSlice([]int{}) 24 | _, ok := Reduce(in, n, func(a, b int) int { 25 | return a + b 26 | }) 27 | 28 | th.ExpectValue(t, ok, false) 29 | }) 30 | 31 | t.Run(th.Name("correctness", n), func(t *testing.T) { 32 | in := th.FromRange(0, 100) 33 | out, ok := Reduce(in, n, func(a, b int) int { 34 | return a + b 35 | }) 36 | 37 | th.ExpectValue(t, out, 99*100/2) 38 | th.ExpectValue(t, ok, true) 39 | }) 40 | 41 | t.Run(th.Name("concurrency", n), func(t *testing.T) { 42 | in := th.FromRange(0, 100) 43 | 44 | monitor := th.NewConcurrencyMonitor(1 * time.Second) 45 | 46 | _, _ = Reduce(in, n, func(a, b int) int { 47 | monitor.Inc() 48 | defer monitor.Dec() 49 | 50 | return a + b 51 | }) 52 | 53 | th.ExpectValue(t, monitor.Max(), n) 54 | }) 55 | } 56 | } 57 | 58 | func TestMapReduce(t *testing.T) { 59 | for _, nm := range []int{1, 4} { 60 | for _, nr := range []int{1, 4, 8} { 61 | t.Run(th.Name("nil", nm, nr), func(t *testing.T) { 62 | nm, nr := nm, nr 63 | th.ExpectHang(t, 1*time.Second, func() { 64 | var in chan int 65 | _ = MapReduce(in, 66 | nm, func(x int) (string, int) { 67 | return "", 1 68 | }, 69 | nr, func(a, b int) int { 70 | return a + b 71 | }, 72 | ) 73 | }) 74 | }) 75 | 76 | t.Run(th.Name("empty", nm, nr), func(t *testing.T) { 77 | in := th.FromSlice([]int{}) 78 | out := MapReduce(in, 79 | nm, func(x int) (string, int) { 80 | return fmt.Sprintf("%d mod 3", x%3), 1 81 | }, 82 | nr, func(a, b int) int { 83 | return a + b 84 | }, 85 | ) 86 | 87 | th.ExpectMap(t, out, map[string]int{}) 88 | }) 89 | 90 | t.Run(th.Name("correctness", nm, nr), func(t *testing.T) { 91 | in := th.FromRange(0, 200) 92 | out := MapReduce(in, 93 | nm, func(x int) (string, int) { 94 | s := fmt.Sprint(x) 95 | return fmt.Sprintf("%d-digit", len(s)), x 96 | }, 97 | nr, func(a, b int) int { 98 | return a + b 99 | }, 100 | ) 101 | 102 | th.ExpectMap(t, out, map[string]int{ 103 | "1-digit": (0 + 9) * 10 / 2, 104 | "2-digit": (10 + 99) * 90 / 2, 105 | "3-digit": (100 + 199) * 100 / 2, 106 | }) 107 | }) 108 | 109 | t.Run(th.Name("concurrency", nm, nr), func(t *testing.T) { 110 | // Need a really high number of items to reliably "catch" the max concurrency. 111 | in := th.FromRange(0, 100) 112 | 113 | mapMonitor := th.NewConcurrencyMonitor(1 * time.Second) 114 | reduceMonitor := th.NewConcurrencyMonitor(1 * time.Second) 115 | 116 | _ = MapReduce(in, 117 | nm, func(x int) (string, int) { 118 | mapMonitor.Inc() 119 | defer mapMonitor.Dec() 120 | 121 | return fmt.Sprintf("%d mod 3", x%3), 1 122 | }, 123 | nr, func(a, b int) int { 124 | reduceMonitor.Inc() 125 | defer reduceMonitor.Dec() 126 | 127 | return a + b 128 | }, 129 | ) 130 | 131 | th.ExpectValue(t, mapMonitor.Max(), nm) 132 | th.ExpectValue(t, reduceMonitor.Max(), nr) 133 | 134 | }) 135 | 136 | } 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /internal/core/loops.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | // Loop allows to process items from the input channel concurrently using n goroutines. 8 | // If done channel is not nil, it will be closed after all items are processed. 9 | func Loop[A, B any](in <-chan A, done chan<- B, n int, f func(A)) { 10 | if n == 1 { 11 | go func() { 12 | if done != nil { 13 | defer close(done) 14 | } 15 | 16 | for a := range in { 17 | f(a) 18 | } 19 | }() 20 | return 21 | } 22 | 23 | var wg sync.WaitGroup 24 | 25 | for i := 0; i < n; i++ { 26 | wg.Add(1) 27 | go func() { 28 | defer wg.Done() 29 | 30 | for a := range in { 31 | f(a) 32 | } 33 | return 34 | }() 35 | } 36 | 37 | if done != nil { 38 | go func() { 39 | wg.Wait() 40 | close(done) 41 | }() 42 | } 43 | } 44 | 45 | type orderedValue[A any] struct { 46 | Value A 47 | CanWrite chan struct{} 48 | NextCanWrite chan struct{} 49 | } 50 | 51 | var canWritePool sync.Pool 52 | 53 | func makeCanWriteChan() chan struct{} { 54 | ch := canWritePool.Get() 55 | if ch == nil { 56 | return make(chan struct{}, 1) 57 | } 58 | return ch.(chan struct{}) 59 | } 60 | 61 | func releaseCanWriteChan(ch chan struct{}) { 62 | canWritePool.Put(ch) 63 | } 64 | 65 | // OrderedLoop is similar to Loop, but it allows to write results to some channel in the same order as items were read from the input. 66 | // If done channel is not nil, it will be closed after all items are processed. 67 | // Special "canWrite" channel is passed to user's function f. Typical f function looks like this: 68 | // - Do some processing (this part is executed concurrently). 69 | // - Read from canWrite channel exactly once. This step is required. Otherwise, behavior is undefined. 70 | // - Write result of the processing somewhere. This step is optional. 71 | // This way processing is done concurrently, but results are written in order. 72 | func OrderedLoop[A, B any](in <-chan A, done chan<- B, n int, f func(a A, canWrite <-chan struct{})) { 73 | if n == 1 { 74 | canWrite := makeCanWriteChan() 75 | close(canWrite) 76 | 77 | go func() { 78 | if done != nil { 79 | defer close(done) 80 | } 81 | 82 | for a := range in { 83 | f(a, canWrite) 84 | } 85 | }() 86 | return 87 | } 88 | 89 | // High level idea: 90 | // Each item holds its own canWrite channel and a reference to the next item's canWrite channel. 91 | // After item is processed and written, it sends a signal to the next item that it can also be written. 92 | 93 | orderedIn := make(chan orderedValue[A]) 94 | 95 | go func() { 96 | defer close(orderedIn) 97 | 98 | var canWrite, nextCanWrite chan struct{} 99 | nextCanWrite = makeCanWriteChan() 100 | nextCanWrite <- struct{}{} // first item can be written immediately 101 | 102 | for a := range in { 103 | canWrite, nextCanWrite = nextCanWrite, makeCanWriteChan() 104 | orderedIn <- orderedValue[A]{a, canWrite, nextCanWrite} 105 | } 106 | }() 107 | 108 | var wg sync.WaitGroup 109 | for i := 0; i < n; i++ { 110 | wg.Add(1) 111 | go func() { 112 | defer wg.Done() 113 | for a := range orderedIn { 114 | f(a.Value, a.CanWrite) 115 | 116 | releaseCanWriteChan(a.CanWrite) 117 | a.NextCanWrite <- struct{}{} 118 | } 119 | }() 120 | } 121 | 122 | if done != nil { 123 | go func() { 124 | wg.Wait() 125 | close(done) 126 | }() 127 | } 128 | } 129 | 130 | // ForEach is a blocking function that processes input channel concurrently using n goroutines 131 | func ForEach[A any](in <-chan A, n int, f func(A)) { 132 | if n == 1 { 133 | for a := range in { 134 | f(a) 135 | } 136 | return 137 | } 138 | 139 | done := make(chan struct{}) 140 | Loop(in, done, n, f) 141 | <-done 142 | } 143 | -------------------------------------------------------------------------------- /internal/ringbuffer/buffer_test.go: -------------------------------------------------------------------------------- 1 | package ringbuffer 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/destel/rill/internal/th" 7 | ) 8 | 9 | func makeRwHelpers(buf *Buffer[int]) (read func(t *testing.T, cnt int), write func(t *testing.T, cnt int)) { 10 | var ir, iw int 11 | 12 | write = func(t *testing.T, cnt int) { 13 | t.Helper() 14 | for k := 0; k < cnt; k++ { 15 | buf.Write(iw) 16 | iw++ 17 | } 18 | } 19 | 20 | read = func(t *testing.T, cnt int) { 21 | t.Helper() 22 | 23 | if ir >= iw { 24 | _, ok := buf.Read() 25 | th.ExpectValue(t, ok, false) 26 | return 27 | } 28 | 29 | for k := 0; k < cnt; k++ { 30 | v, ok := buf.Read() 31 | 32 | if ir < iw { 33 | th.ExpectValue(t, ok, true) 34 | th.ExpectValue(t, v, ir) 35 | ir++ 36 | } else { 37 | th.ExpectValue(t, ok, false) 38 | } 39 | } 40 | } 41 | 42 | return 43 | } 44 | 45 | func TestReadWrite(t *testing.T) { 46 | var buf Buffer[int] 47 | read, write := makeRwHelpers(&buf) 48 | 49 | th.ExpectValue(t, buf.Len(), 0) 50 | th.ExpectValue(t, buf.Cap(), 0) 51 | 52 | read(t, 5) // read from empty buffer 53 | 54 | th.ExpectValue(t, buf.Len(), 0) 55 | th.ExpectValue(t, buf.Cap(), 0) 56 | 57 | write(t, 100) 58 | 59 | th.ExpectValue(t, buf.Len(), 100) 60 | th.ExpectValue(t, buf.Cap(), 128) 61 | 62 | read(t, 50) 63 | 64 | th.ExpectValue(t, buf.Len(), 50) 65 | th.ExpectValue(t, buf.Cap(), 128) 66 | 67 | write(t, 50) 68 | 69 | th.ExpectValue(t, buf.Len(), 100) 70 | th.ExpectValue(t, buf.Cap(), 128) 71 | 72 | read(t, 100) 73 | 74 | th.ExpectValue(t, buf.Len(), 0) 75 | th.ExpectValue(t, buf.Cap(), 128) 76 | } 77 | 78 | func TestGrowAndShrink(t *testing.T) { 79 | var buf Buffer[int] 80 | read, write := makeRwHelpers(&buf) 81 | 82 | write(t, 120) 83 | read(t, 120) 84 | write(t, 20) 85 | 86 | if buf.offset+buf.size < len(buf.data) { 87 | t.Fatalf("test is not properly set up, buffer must be wrapped around") 88 | } 89 | 90 | th.ExpectValue(t, buf.Len(), 20) 91 | th.ExpectValue(t, buf.Cap(), 128) 92 | 93 | buf.Shrink() 94 | th.ExpectValue(t, buf.Cap(), 64) 95 | 96 | buf.Shrink() 97 | th.ExpectValue(t, buf.Cap(), 32) 98 | 99 | buf.Shrink() 100 | th.ExpectValue(t, buf.Cap(), 32) 101 | 102 | // empty buffer and try to shrink to min size 103 | read(t, 20) 104 | th.ExpectValue(t, buf.Len(), 0) 105 | 106 | buf.Shrink() 107 | th.ExpectValue(t, buf.Cap(), 16) 108 | 109 | buf.Shrink() 110 | th.ExpectValue(t, buf.Cap(), 16) 111 | } 112 | 113 | func TestCompact(t *testing.T) { 114 | var buf Buffer[int] 115 | read, write := makeRwHelpers(&buf) 116 | 117 | write(t, 120) 118 | read(t, 120) 119 | write(t, 20) 120 | 121 | th.ExpectValue(t, buf.Len(), 20) 122 | th.ExpectValue(t, buf.Cap(), 128) 123 | 124 | buf.Compact() 125 | th.ExpectValue(t, buf.Cap(), 32) 126 | 127 | // empty buffer and try to shrink to min size 128 | read(t, 20) 129 | th.ExpectValue(t, buf.Len(), 0) 130 | 131 | buf.Compact() 132 | th.ExpectValue(t, buf.Cap(), 16) 133 | } 134 | 135 | func TestPeekAndDiscard(t *testing.T) { 136 | var buf Buffer[int] 137 | 138 | buf.Write(10) 139 | buf.Write(11) 140 | 141 | v, ok := buf.Peek() 142 | th.ExpectValue(t, ok, true) 143 | th.ExpectValue(t, v, 10) 144 | 145 | buf.Discard() 146 | 147 | v, ok = buf.Peek() 148 | th.ExpectValue(t, ok, true) 149 | th.ExpectValue(t, v, 11) 150 | 151 | buf.Discard() 152 | 153 | _, ok = buf.Peek() 154 | th.ExpectValue(t, ok, false) 155 | 156 | buf.Discard() 157 | 158 | _, ok = buf.Peek() 159 | th.ExpectValue(t, ok, false) 160 | 161 | } 162 | 163 | func TestReset(t *testing.T) { 164 | var buf Buffer[int] 165 | 166 | for i := 0; i < 100; i++ { 167 | buf.Write(i) 168 | } 169 | 170 | buf.Reset() 171 | 172 | th.ExpectValue(t, buf.Len(), 0) 173 | th.ExpectValue(t, buf.Cap(), 128) 174 | } 175 | -------------------------------------------------------------------------------- /merge.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "github.com/destel/rill/internal/core" 5 | ) 6 | 7 | // Merge performs a fan-in operation on the list of input channels, returning a single output channel. 8 | // The resulting channel will contain all items from all inputs, 9 | // and will be closed when all inputs are fully consumed. 10 | // 11 | // This is a non-blocking function that processes items from each input sequentially. 12 | // 13 | // See the package documentation for more information on non-blocking functions and error handling. 14 | func Merge[A any](ins ...<-chan A) <-chan A { 15 | return core.Merge(ins...) 16 | } 17 | 18 | // Split2 divides the input stream into two output streams based on the predicate function f: 19 | // The splitting behavior is determined by the boolean return value of f. When f returns true, the item is sent to the outTrue stream, 20 | // otherwise it is sent to the outFalse stream. In case of any error, the item is sent to both output streams. 21 | // Both output streams must be consumed independently to avoid deadlocks. 22 | // 23 | // This is a non-blocking unordered function that processes items concurrently using n goroutines. 24 | // An ordered version of this function, [OrderedSplit2], is also available. 25 | // 26 | // See the package documentation for more information on non-blocking unordered functions and error handling. 27 | func Split2[A any](in <-chan Try[A], n int, f func(A) (bool, error)) (outTrue <-chan Try[A], outFalse <-chan Try[A]) { 28 | if in == nil { 29 | return nil, nil 30 | } 31 | 32 | resOutTrue := make(chan Try[A]) 33 | resOutFalse := make(chan Try[A]) 34 | done := make(chan struct{}) 35 | 36 | core.Loop(in, done, n, func(a Try[A]) { 37 | if a.Error != nil { 38 | resOutTrue <- a 39 | resOutFalse <- a 40 | return 41 | } 42 | 43 | isTrue, err := f(a.Value) 44 | switch { 45 | case err != nil: 46 | resOutTrue <- Try[A]{Error: err} 47 | resOutFalse <- Try[A]{Error: err} 48 | case isTrue: 49 | resOutTrue <- a 50 | default: 51 | resOutFalse <- a 52 | } 53 | }) 54 | 55 | go func() { 56 | <-done 57 | close(resOutTrue) 58 | close(resOutFalse) 59 | }() 60 | 61 | return resOutTrue, resOutFalse 62 | } 63 | 64 | // OrderedSplit2 is the ordered version of [Split2]. 65 | func OrderedSplit2[A any](in <-chan Try[A], n int, f func(A) (bool, error)) (outTrue <-chan Try[A], outFalse <-chan Try[A]) { 66 | if in == nil { 67 | return nil, nil 68 | } 69 | 70 | resOutTrue := make(chan Try[A]) 71 | resOutFalse := make(chan Try[A]) 72 | done := make(chan struct{}) 73 | 74 | core.OrderedLoop(in, done, n, func(a Try[A], canWrite <-chan struct{}) { 75 | if a.Error != nil { 76 | <-canWrite 77 | resOutTrue <- a 78 | resOutFalse <- a 79 | return 80 | } 81 | 82 | dir, err := f(a.Value) 83 | <-canWrite 84 | switch { 85 | case err != nil: 86 | resOutTrue <- Try[A]{Error: err} 87 | resOutFalse <- Try[A]{Error: err} 88 | case dir: 89 | resOutTrue <- a 90 | default: 91 | resOutFalse <- a 92 | } 93 | }) 94 | 95 | go func() { 96 | <-done 97 | close(resOutTrue) 98 | close(resOutFalse) 99 | }() 100 | 101 | return resOutTrue, resOutFalse 102 | } 103 | 104 | // Tee returns two streams that are identical to the input stream (both errors and values). 105 | // Both output streams must be consumed independently to avoid deadlocks. 106 | // 107 | // This is a non-blocking function that processes items in a single goroutine. 108 | // See the package documentation for more information on non-blocking functions and error handling. 109 | // 110 | // If deep copying of values is needed, use [Map] on one or both outputs: 111 | // 112 | // out1, out2 := rill.Tee(in) 113 | // out2 = rill.Map(out2, 1, func(x A) (A, error) { 114 | // return deepCopy(x), nil 115 | // }) 116 | func Tee[A any](in <-chan Try[A]) (<-chan Try[A], <-chan Try[A]) { 117 | if in == nil { 118 | return nil, nil 119 | } 120 | 121 | out1 := make(chan Try[A]) 122 | out2 := make(chan Try[A]) 123 | 124 | go func() { 125 | defer close(out1) 126 | defer close(out2) 127 | 128 | for x := range in { 129 | out1 <- x 130 | out2 <- x 131 | } 132 | }() 133 | 134 | return out1, out2 135 | } 136 | -------------------------------------------------------------------------------- /reduce.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "github.com/destel/rill/internal/core" 5 | ) 6 | 7 | // Reduce combines all items from the input stream into a single value using a binary function f. 8 | // The function f is called for pairs of items, progressively reducing the stream contents until only one value remains. 9 | // 10 | // As an unordered function, Reduce can apply f to any pair of items in any order, which requires f to be: 11 | // - Associative: f(a, f(b, c)) == f(f(a, b), c) 12 | // - Commutative: f(a, b) == f(b, a) 13 | // 14 | // The hasResult return flag is set to false if the stream was empty, otherwise it is set to true. 15 | // 16 | // Reduce is a blocking unordered function that processes items concurrently using n goroutines. 17 | // The case when n = 1 is optimized: it does not spawn additional goroutines and processes items sequentially, 18 | // making the function ordered. This also removes the need for the function f to be commutative. 19 | // 20 | // See the package documentation for more information on blocking unordered functions and error handling. 21 | func Reduce[A any](in <-chan Try[A], n int, f func(A, A) (A, error)) (result A, hasResult bool, err error) { 22 | var once core.OnceWithWait 23 | setReturns := func(result1 A, hasResult1 bool, err1 error) { 24 | once.Do(func() { 25 | result = result1 26 | hasResult = hasResult1 27 | err = err1 28 | }) 29 | } 30 | 31 | go func() { 32 | var zero A 33 | var zeroTry Try[A] 34 | 35 | res, ok := core.Reduce(in, n, func(a1, a2 Try[A]) Try[A] { 36 | if once.WasCalled() { 37 | return zeroTry 38 | } 39 | 40 | if err := a1.Error; err != nil { 41 | setReturns(zero, false, err) 42 | return zeroTry 43 | } 44 | 45 | if err := a2.Error; err != nil { 46 | setReturns(zero, false, err) 47 | return zeroTry 48 | } 49 | 50 | res, err := f(a1.Value, a2.Value) 51 | if err != nil { 52 | setReturns(zero, false, err) 53 | return zeroTry 54 | } 55 | 56 | return Try[A]{Value: res} // the only non-dummy return 57 | }) 58 | 59 | setReturns(res.Value, ok, nil) 60 | }() 61 | 62 | once.Wait() 63 | return 64 | } 65 | 66 | // MapReduce transforms the input stream into a Go map using a mapper and a reducer functions. 67 | // The transformation is performed in two concurrent phases. 68 | // 69 | // - The mapper function transforms each input item into a key-value pair. 70 | // - The reducer function reduces values for the same key into a single value. 71 | // This phase has the same semantics as the [Reduce] function, in particular 72 | // the reducer function must be commutative and associative. 73 | // 74 | // MapReduce is a blocking unordered function that processes items concurrently using nm and nr goroutines 75 | // for the mapper and reducer functions respectively. Setting nr = 1 will make the reduce phase sequential and ordered, 76 | // see [Reduce] for more information. 77 | // 78 | // See the package documentation for more information on blocking unordered functions and error handling. 79 | func MapReduce[A any, K comparable, V any](in <-chan Try[A], nm int, mapper func(A) (K, V, error), nr int, reducer func(V, V) (V, error)) (map[K]V, error) { 80 | var retMap map[K]V 81 | var retErr error 82 | var once core.OnceWithWait 83 | setReturns := func(m map[K]V, err error) { 84 | once.Do(func() { 85 | retMap = m 86 | retErr = err 87 | }) 88 | } 89 | 90 | go func() { 91 | var zeroKey K 92 | var zeroVal V 93 | 94 | res := core.MapReduce(in, 95 | nm, func(a Try[A]) (K, V) { 96 | if once.WasCalled() { 97 | return zeroKey, zeroVal 98 | } 99 | 100 | if a.Error != nil { 101 | setReturns(nil, a.Error) 102 | return zeroKey, zeroVal 103 | } 104 | 105 | k, v, err := mapper(a.Value) 106 | if err != nil { 107 | setReturns(nil, err) 108 | return zeroKey, zeroVal 109 | } 110 | 111 | return k, v 112 | }, 113 | nr, func(v1, v2 V) V { 114 | if once.WasCalled() { 115 | return zeroVal 116 | } 117 | 118 | res, err := reducer(v1, v2) 119 | if err != nil { 120 | setReturns(nil, err) 121 | return zeroVal 122 | } 123 | 124 | return res 125 | }, 126 | ) 127 | 128 | setReturns(res, nil) 129 | }() 130 | 131 | once.Wait() 132 | return retMap, retErr 133 | } 134 | -------------------------------------------------------------------------------- /consume.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "github.com/destel/rill/internal/core" 5 | ) 6 | 7 | // ForEach applies a function f to each item in an input stream. 8 | // 9 | // This is a blocking unordered function that processes items concurrently using n goroutines. 10 | // When n = 1, processing becomes sequential, making the function ordered and similar to a regular for-range loop. 11 | // 12 | // See the package documentation for more information on blocking unordered functions and error handling. 13 | func ForEach[A any](in <-chan Try[A], n int, f func(A) error) error { 14 | var retErr error 15 | var once core.OnceWithWait 16 | setReturns := func(err error) { 17 | once.Do(func() { 18 | retErr = err 19 | }) 20 | } 21 | 22 | go func() { 23 | core.ForEach(in, n, func(a Try[A]) { 24 | if once.WasCalled() { 25 | return // drain 26 | } 27 | 28 | err := a.Error 29 | if err == nil { 30 | err = f(a.Value) 31 | } 32 | if err != nil { 33 | setReturns(err) 34 | } 35 | }) 36 | 37 | setReturns(nil) 38 | }() 39 | 40 | once.Wait() 41 | return retErr 42 | } 43 | 44 | // Err returns the first error encountered in the input stream or nil if there were no errors. 45 | // 46 | // This is a blocking ordered function that processes items sequentially. 47 | // See the package documentation for more information on blocking ordered functions and error handling. 48 | func Err[A any](in <-chan Try[A]) error { 49 | defer Discard(in) 50 | 51 | for a := range in { 52 | if a.Error != nil { 53 | return a.Error 54 | } 55 | } 56 | 57 | return nil 58 | } 59 | 60 | // First returns the first item or error encountered in the input stream, whichever comes first. 61 | // The found return flag is set to false if the stream was empty, otherwise it is set to true. 62 | // 63 | // This is a blocking ordered function that processes items sequentially. 64 | // See the package documentation for more information on blocking ordered functions and error handling. 65 | func First[A any](in <-chan Try[A]) (value A, found bool, err error) { 66 | defer Discard(in) 67 | 68 | for a := range in { 69 | return a.Value, true, a.Error 70 | } 71 | 72 | found = false 73 | return 74 | } 75 | 76 | // Any checks if there is an item in the input stream that satisfies the condition f. 77 | // This function returns true as soon as it finds such an item. Otherwise, it returns false. 78 | // 79 | // Any is a blocking unordered function that processes items concurrently using n goroutines. 80 | // When n = 1, processing becomes sequential, making the function ordered. 81 | // 82 | // See the package documentation for more information on blocking unordered functions and error handling. 83 | func Any[A any](in <-chan Try[A], n int, f func(A) (bool, error)) (bool, error) { 84 | var retFound bool 85 | var retErr error 86 | var once core.OnceWithWait 87 | setReturns := func(found bool, err error) { 88 | once.Do(func() { 89 | retFound = found 90 | retErr = err 91 | }) 92 | } 93 | 94 | go func() { 95 | core.ForEach(in, n, func(a Try[A]) { 96 | if once.WasCalled() { 97 | return // drain 98 | } 99 | 100 | if err := a.Error; err != nil { 101 | setReturns(false, err) 102 | return 103 | } 104 | 105 | ok, err := f(a.Value) 106 | if err != nil { 107 | setReturns(false, err) 108 | return 109 | } 110 | if ok { 111 | setReturns(true, nil) 112 | return 113 | } 114 | }) 115 | 116 | setReturns(false, nil) 117 | }() 118 | 119 | once.Wait() 120 | return retFound, retErr 121 | } 122 | 123 | // All checks if all items in the input stream satisfy the condition f. 124 | // This function returns false as soon as it finds an item that does not satisfy the condition. Otherwise, it returns true, 125 | // including the case when the stream was empty. 126 | // 127 | // This is a blocking unordered function that processes items concurrently using n goroutines. 128 | // When n = 1, processing becomes sequential, making the function ordered. 129 | // 130 | // See the package documentation for more information on blocking unordered functions and error handling. 131 | func All[A any](in <-chan Try[A], n int, f func(A) (bool, error)) (bool, error) { 132 | // Idea: x && y && z is the same as !(!x || !y || !z) 133 | // So we can use Any with a negated condition to implement All 134 | res, err := Any(in, n, func(a A) (bool, error) { 135 | ok, err := f(a) 136 | return !ok, err // negate 137 | }) 138 | return !res, err // negate 139 | } 140 | -------------------------------------------------------------------------------- /internal/th/assertions.go: -------------------------------------------------------------------------------- 1 | // Package th provides basic test helpers. 2 | package th 3 | 4 | import ( 5 | "sort" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func ExpectValue[A comparable](t *testing.T, actual A, expected A) { 11 | t.Helper() 12 | if expected != actual { 13 | t.Errorf("expected %v, got %v", expected, actual) 14 | } 15 | } 16 | 17 | func ExpectValueLTE[A number](t *testing.T, actual A, expected A) { 18 | t.Helper() 19 | if actual > expected { 20 | t.Errorf("expected %v <= %v", actual, expected) 21 | } 22 | } 23 | 24 | func ExpectValueGTE[A number](t *testing.T, actual A, expected A) { 25 | t.Helper() 26 | if actual < expected { 27 | t.Errorf("expected %v >= %v", actual, expected) 28 | } 29 | } 30 | 31 | func ExpectValueInDelta[A number](t *testing.T, actual A, expected A, delta A) { 32 | t.Helper() 33 | diff := actual - expected 34 | if diff < 0 { 35 | diff = -diff 36 | } 37 | 38 | if diff > delta { 39 | t.Errorf("expected %v in [%v-%v]", actual, expected-delta, expected+delta) 40 | } 41 | } 42 | 43 | func ExpectSlice[A comparable](t *testing.T, actual []A, expected []A) { 44 | t.Helper() 45 | if len(expected) != len(actual) { 46 | t.Errorf("expected %v, got %v", expected, actual) 47 | return 48 | } 49 | 50 | for i := range expected { 51 | if expected[i] != actual[i] { 52 | t.Errorf("expected %v, got %v, mismatch at pos %d: %v != %v", expected, actual, i, expected[i], actual[i]) 53 | return 54 | } 55 | } 56 | } 57 | 58 | func ExpectMap[K, V comparable](t *testing.T, actual map[K]V, expected map[K]V) { 59 | t.Helper() 60 | if len(expected) != len(actual) { 61 | t.Errorf("expected %v, got %v", expected, actual) 62 | return 63 | } 64 | 65 | for k, v := range expected { 66 | actualV, ok := actual[k] 67 | if !ok { 68 | t.Errorf("expected %v, got %v", expected, actual) 69 | return 70 | } 71 | 72 | if v != actualV { 73 | t.Errorf("expected %v, got %v", expected, actual) 74 | return 75 | } 76 | } 77 | } 78 | 79 | type number interface { 80 | ~int | ~int64 81 | } 82 | 83 | type ordered interface { 84 | ~int | ~int64 | ~string 85 | } 86 | 87 | func ExpectSorted[T ordered](t *testing.T, arr []T) { 88 | t.Helper() 89 | isSorted := sort.SliceIsSorted(arr, func(i, j int) bool { 90 | return arr[i] <= arr[j] 91 | }) 92 | if !isSorted { 93 | t.Errorf("expected sorted slice") 94 | } 95 | } 96 | 97 | func ExpectUnsorted[T ordered](t *testing.T, arr []T) { 98 | t.Helper() 99 | isSorted := sort.SliceIsSorted(arr, func(i, j int) bool { 100 | return arr[i] <= arr[j] 101 | }) 102 | if isSorted { 103 | t.Errorf("expected unsorted slice") 104 | } 105 | } 106 | 107 | func ExpectDrainedChan[A any](t *testing.T, ch <-chan A) { 108 | t.Helper() 109 | select { 110 | case x, ok := <-ch: 111 | if ok { 112 | t.Errorf("expected channel to be closed, but got %v", x) 113 | } 114 | default: 115 | t.Errorf("expected channel to be closed, but it's blocked") 116 | } 117 | } 118 | 119 | func ExpectNeverClosedChan[A any](t *testing.T, ch <-chan A, waitFor time.Duration) { 120 | t.Helper() 121 | timeout := time.After(waitFor) 122 | for { 123 | select { 124 | case _, ok := <-ch: 125 | if !ok { 126 | t.Errorf("expected channel to be never closed") 127 | return 128 | } 129 | case <-timeout: 130 | return 131 | } 132 | } 133 | } 134 | 135 | func ExpectHang(t *testing.T, waitFor time.Duration, f func()) { 136 | t.Helper() 137 | done := make(chan struct{}) 138 | 139 | go func() { 140 | defer close(done) 141 | f() 142 | }() 143 | 144 | select { 145 | case <-done: 146 | t.Errorf("expected hang") 147 | case <-time.After(waitFor): 148 | } 149 | } 150 | 151 | func ExpectNotHang(t *testing.T, waitFor time.Duration, f func()) { 152 | t.Helper() 153 | done := make(chan struct{}) 154 | 155 | go func() { 156 | defer close(done) 157 | f() 158 | }() 159 | 160 | select { 161 | case <-done: 162 | case <-time.After(waitFor): 163 | t.Errorf("test hanged") 164 | } 165 | } 166 | 167 | func ExpectError(t *testing.T, err error, message string) { 168 | t.Helper() 169 | if err == nil { 170 | t.Errorf("expected error '%s', got nil", message) 171 | return 172 | } 173 | 174 | if err.Error() != message { 175 | t.Errorf("expected error '%s', got '%s'", message, err.Error()) 176 | } 177 | } 178 | 179 | func ExpectNoError(t *testing.T, err error) { 180 | t.Helper() 181 | if err != nil { 182 | t.Errorf("unexpected error '%v'", err) 183 | } 184 | } 185 | 186 | func ExpectNotPanic(t *testing.T, f func()) { 187 | t.Helper() 188 | defer func() { 189 | if r := recover(); r != nil { 190 | t.Errorf("unexpected panic: %v", r) 191 | } 192 | }() 193 | f() 194 | } 195 | -------------------------------------------------------------------------------- /internal/core/reduce.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | // nonConcurrentReduce is a non-concurrent version of Reduce. 8 | func nonConcurrentReduce[A any](in <-chan A, f func(A, A) A) (A, bool) { 9 | res, ok := <-in 10 | if !ok { 11 | return res, false 12 | } 13 | 14 | for a := range in { 15 | res = f(res, a) 16 | } 17 | 18 | return res, true 19 | } 20 | 21 | // Reduce reduces the input channel into a single value using the provided function, 22 | // using n goroutines for concurrency 23 | func Reduce[A any](in <-chan A, n int, f func(A, A) A) (A, bool) { 24 | if in == nil { 25 | <-in 26 | } 27 | 28 | // Phase 0: Optimized non-concurrent case 29 | if n == 1 { 30 | return nonConcurrentReduce(in, f) 31 | } 32 | 33 | // Phase 1: Each goroutine calculates its own partial result 34 | partialResults := make(chan A, n) 35 | var wg sync.WaitGroup 36 | 37 | for i := 0; i < n; i++ { 38 | wg.Add(1) 39 | go func() { 40 | defer wg.Done() 41 | 42 | res, ok := nonConcurrentReduce(in, f) 43 | if ok { 44 | partialResults <- res 45 | } 46 | }() 47 | } 48 | 49 | go func() { 50 | wg.Wait() 51 | close(partialResults) 52 | }() 53 | 54 | // Phase 2: Recursive call. Reduce partialResults into a single value. 55 | // Both the number of goroutines and the recursion depth are independent of the input size. 56 | // 57 | // The partialResults channel contains at most n elements, which will be grouped into at most n/2 pairs in the next recursion level. 58 | // The total number of concurrent goroutines is n + n/2 + n/4 + ... = 2n. However, due to integer division, it's actually less than 2n. 59 | // The number of concurrent reductions at any given moment is at most n (see below). 60 | // The recursion depth is at most log2(n). 61 | // 62 | // Number of concurrent reductions: 63 | // - At the current level, there are at most n concurrent reductions. 64 | // - For each additional concurrent reduction at the next level, at least two goroutines from the current level need to 65 | // finish and send their partial results through the partialResults channel. 66 | // - This implies that when the number of concurrent reductions increases by 1 at the next level, it decreases by at least 2 at the current level. 67 | // - Consequently, the total number of concurrent reductions across all levels starts from n and decreases as data travels down the stack. 68 | return Reduce(partialResults, n/2, f) 69 | } 70 | 71 | type keyValue[K, V any] struct { 72 | Key K 73 | Value V 74 | } 75 | 76 | // reduceIntoMap is a helper function that adds a new key-value pair to the map or reduces the value of an existing key. 77 | func reduceIntoMap[K comparable, V any](m map[K]V, k K, v V, f func(V, V) V) { 78 | if oldV, ok := m[k]; ok { 79 | m[k] = f(oldV, v) 80 | } else { 81 | m[k] = v 82 | } 83 | } 84 | 85 | // MapReduce applies a map-reduce pattern to the input channel. 86 | // First inout is converted into key-value pairs using the mapper function and nm goroutines. 87 | // If there are multiple values for the same key, they are reduced into a single value using the reducer function and nr goroutines. 88 | // The result is a map where each key is associated with a single value. 89 | func MapReduce[A any, K comparable, V any](in <-chan A, nm int, mapper func(A) (K, V), nr int, reducer func(V, V) V) map[K]V { 90 | if in == nil { 91 | <-in 92 | } 93 | 94 | // Phase 1: Map 95 | mapped := FilterMap(in, nm, func(a A) (keyValue[K, V], bool) { 96 | k, v := mapper(a) 97 | return keyValue[K, V]{k, v}, true 98 | }) 99 | 100 | // Phase 2.1: Optimized non-concurrent reduce. Build a final map right away. 101 | if nr == 1 { 102 | res := make(map[K]V) 103 | for kv := range mapped { 104 | reduceIntoMap(res, kv.Key, kv.Value, reducer) 105 | } 106 | return res 107 | } 108 | 109 | // Phase 2.2: Each goroutine builds its own partial map 110 | partialResults := make(chan map[K]V, nr) 111 | var wg sync.WaitGroup 112 | 113 | for i := 0; i < nr; i++ { 114 | wg.Add(1) 115 | go func() { 116 | defer wg.Done() 117 | 118 | res := make(map[K]V) 119 | for kv := range mapped { 120 | reduceIntoMap(res, kv.Key, kv.Value, reducer) 121 | } 122 | partialResults <- res 123 | }() 124 | } 125 | 126 | go func() { 127 | wg.Wait() 128 | close(partialResults) 129 | }() 130 | 131 | // Phase 3: Merge all partial maps into a single one 132 | res, _ := Reduce(partialResults, nr/2, func(m1, m2 map[K]V) map[K]V { 133 | // Always merge smaller map into a bigger one 134 | if len(m2) > len(m1) { 135 | m1, m2 = m2, m1 136 | } 137 | 138 | for k, v := range m2 { 139 | reduceIntoMap(m1, k, v, reducer) 140 | } 141 | return m1 142 | }) 143 | 144 | return res 145 | } 146 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | // Package rill provides composable channel-based concurrency primitives for Go that simplify parallel processing, 2 | // batching, and stream handling. It offers building blocks for constructing concurrent pipelines from 3 | // reusable parts while maintaining precise control over concurrency levels. The package reduces boilerplate, 4 | // abstracts away goroutine orchestration, features centralized error handling, and has zero external dependencies. 5 | // 6 | // # Streams and Try Containers 7 | // 8 | // In this package, a stream refers to a channel of [Try] containers. A Try container is a simple struct that holds a value and an error. 9 | // When an "empty stream" is referred to, it means a channel of Try containers that has been closed and was never written to. 10 | // 11 | // Most functions in this package are concurrent, and the level of concurrency can be controlled by the argument n. 12 | // Some functions share common behaviors and characteristics, which are described below. 13 | // 14 | // # Non-blocking functions 15 | // 16 | // Functions such as [Map], [Filter], and [Batch] take a stream as an input and return a new stream as an output. 17 | // They do not block and return the output stream immediately. All the processing is done in the background by the goroutine pools they spawn. 18 | // These functions forward all errors from the input stream to the output stream. 19 | // Any errors returned by the user-provided functions are also sent to the output stream. 20 | // When such a function reaches the end of the input stream, it closes the output stream, stops processing and cleans up resources. 21 | // 22 | // Such functions are designed to be composed together to build complex processing pipelines: 23 | // 24 | // stage2 := rill.Map(input, ...) 25 | // stage3 := rill.Batch(stage2, ...) 26 | // stage4 := rill.Map(stage3, ...) 27 | // results := rill.Unbatch(stage4, ...) 28 | // // consume the results and handle errors with some blocking function 29 | // 30 | // # Blocking functions 31 | // 32 | // Functions such as [ForEach], [Reduce] and [MapReduce] are used at the last stage of the pipeline 33 | // to consume the stream and return the final result or error. 34 | // 35 | // Usually, these functions block until one of the following conditions is met: 36 | // - The end of the stream is reached. In this case, the function returns the final result. 37 | // - An error is encountered either in the input stream or in some user-provided function. In this case, the function returns the error. 38 | // 39 | // In case of an early termination (before reaching the end of the input stream), such functions return immediately 40 | // but spawn a background goroutine that discards the remaining items from the input channel. This is done to prevent goroutine 41 | // leaks by ensuring that all goroutines feeding the stream are allowed to complete. 42 | // The input stream should not be used anymore after calling such functions. 43 | // 44 | // It's also possible to consume the pipeline results manually, for example using a for-range loop. 45 | // In this case, add a deferred call to [Discard] before the loop to ensure that goroutines are not leaked. 46 | // 47 | // defer rill.Discard(results) 48 | // 49 | // for res := range results { 50 | // if res.Error != nil { 51 | // return res.Error 52 | // } 53 | // // process res.Value 54 | // } 55 | // 56 | // # Unordered functions 57 | // 58 | // Functions such as [Map], [Filter], and [FlatMap] write items to the output stream as soon as they become available. 59 | // Due to the concurrent nature of these functions, the order of items in the output stream may not match the order of items in the input stream. 60 | // These functions prioritize performance and concurrency over maintaining the original order. 61 | // 62 | // # Ordered functions 63 | // 64 | // Functions such as [OrderedMap] or [OrderedFilter] preserve the order of items from the input stream. 65 | // These functions are still concurrent, but use special synchronization techniques to ensure that 66 | // items are written to the output stream in the same order as they were read from the input stream. 67 | // This additional synchronization has some overhead, but it is negligible for i/o bound workloads. 68 | // 69 | // Some other functions, such as [ToSlice], [Batch] or [First] are not concurrent and are ordered by nature. 70 | // 71 | // # Error handling 72 | // 73 | // Error handling can be non-trivial in concurrent applications. Rill simplifies this by providing a structured error handling approach. 74 | // As described above, all errors are automatically propagated down the pipeline to the final stage, where they can be caught. 75 | // This allows the pipeline to terminate after the first error is encountered and return it to the caller. 76 | // 77 | // In cases where more complex error handling logic is required, the [Catch] function can be used. 78 | // It can catch and handle errors at any point in the pipeline, providing the flexibility to handle not only the first error, but any of them. 79 | package rill 80 | -------------------------------------------------------------------------------- /merge_test.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/destel/rill/internal/th" 8 | ) 9 | 10 | func TestMerge(t *testing.T) { 11 | // real tests are in another package 12 | Merge[int](nil) 13 | } 14 | 15 | func universalSplit2[A any](ord bool, in <-chan Try[A], n int, f func(A) (bool, error)) (outTrue <-chan Try[A], outFalse <-chan Try[A]) { 16 | if ord { 17 | return OrderedSplit2(in, n, f) 18 | } 19 | return Split2(in, n, f) 20 | } 21 | 22 | func TestSplit2(t *testing.T) { 23 | th.TestBothOrderings(t, func(t *testing.T, ord bool) { 24 | for _, n := range []int{1, 5} { 25 | t.Run(th.Name("nil", n), func(t *testing.T) { 26 | outTrue, outFalse := universalSplit2(ord, nil, n, func(string) (bool, error) { return true, nil }) 27 | th.ExpectValue(t, outTrue, nil) 28 | th.ExpectValue(t, outFalse, nil) 29 | }) 30 | 31 | t.Run(th.Name("correctness", n), func(t *testing.T) { 32 | // idea: split input into 4 groups 33 | // - first 2 groups are sent into corresponding outputs 34 | // - 3rd would cause error during splitting 35 | // - 4th would be errors even before splitting 36 | 37 | in := FromChan(th.FromRange(0, 20*4), nil) 38 | in = OrderedMap(in, 1, func(x int) (int, error) { 39 | if x%4 == 3 { 40 | return 0, fmt.Errorf("err%03d", x) 41 | } 42 | return x, nil 43 | }) 44 | 45 | outTrue, outFalse := universalSplit2(ord, in, n, func(x int) (bool, error) { 46 | switch x % 4 { 47 | case 0: 48 | return true, nil 49 | case 1: 50 | return false, nil 51 | case 2: 52 | return true, fmt.Errorf("err%03d", x) 53 | default: 54 | return true, nil // this should not be called 55 | } 56 | }) 57 | 58 | var outSliceTrue, outSliceFalse []int 59 | var errSliceTrue, errSliceFalse []string 60 | 61 | th.DoConcurrently( 62 | func() { outSliceTrue, errSliceTrue = toSliceAndErrors(outTrue) }, 63 | func() { outSliceFalse, errSliceFalse = toSliceAndErrors(outFalse) }, 64 | ) 65 | 66 | var expectedOutSliceTrue, expectedOutSliceFalse []int 67 | var expectedErrSlice []string 68 | 69 | for i := 0; i < 20*4; i++ { 70 | switch i % 4 { 71 | case 0: 72 | expectedOutSliceTrue = append(expectedOutSliceTrue, i) 73 | case 1: 74 | expectedOutSliceFalse = append(expectedOutSliceFalse, i) 75 | default: 76 | expectedErrSlice = append(expectedErrSlice, fmt.Sprintf("err%03d", i)) 77 | } 78 | } 79 | 80 | th.Sort(outSliceTrue) 81 | th.Sort(outSliceFalse) 82 | th.Sort(errSliceTrue) 83 | th.Sort(errSliceFalse) 84 | 85 | th.ExpectSlice(t, outSliceTrue, expectedOutSliceTrue) 86 | th.ExpectSlice(t, outSliceFalse, expectedOutSliceFalse) 87 | th.ExpectSlice(t, errSliceTrue, expectedErrSlice) 88 | th.ExpectSlice(t, errSliceFalse, expectedErrSlice) 89 | }) 90 | 91 | t.Run(th.Name("ordering", n), func(t *testing.T) { 92 | in := FromChan(th.FromRange(0, 10000*4), nil) 93 | 94 | outTrue, outFalse := universalSplit2(ord, in, n, func(x int) (bool, error) { 95 | switch x % 3 { 96 | case 0: 97 | return true, nil 98 | case 1: 99 | return false, nil 100 | default: 101 | return true, fmt.Errorf("err%06d", x) 102 | } 103 | }) 104 | 105 | var outSliceTrue, outSliceFalse []int 106 | var errSliceTrue, errSliceFalse []string 107 | 108 | th.DoConcurrently( 109 | func() { outSliceTrue, errSliceTrue = toSliceAndErrors(outTrue) }, 110 | func() { outSliceFalse, errSliceFalse = toSliceAndErrors(outFalse) }, 111 | ) 112 | 113 | if ord || n == 1 { 114 | th.ExpectSorted(t, outSliceTrue) 115 | th.ExpectSorted(t, outSliceFalse) 116 | th.ExpectSorted(t, errSliceTrue) 117 | th.ExpectSorted(t, errSliceFalse) 118 | } else { 119 | th.ExpectUnsorted(t, outSliceTrue) 120 | th.ExpectUnsorted(t, outSliceFalse) 121 | th.ExpectUnsorted(t, errSliceTrue) 122 | th.ExpectUnsorted(t, errSliceFalse) 123 | } 124 | }) 125 | 126 | } 127 | }) 128 | } 129 | 130 | func TestTee(t *testing.T) { 131 | t.Run("nil", func(t *testing.T) { 132 | out1, out2 := Tee[int](nil) 133 | th.ExpectValue(t, out1, nil) 134 | th.ExpectValue(t, out2, nil) 135 | }) 136 | 137 | t.Run("correctness", func(t *testing.T) { 138 | // Create input with mixed values and errors 139 | in := FromChan(th.FromRange(0, 10), nil) 140 | in = replaceWithError(in, 2, fmt.Errorf("err2")) 141 | in = replaceWithError(in, 7, fmt.Errorf("err7")) 142 | 143 | out1, out2 := Tee(in) 144 | 145 | var out1Slice, out2Slice []int 146 | var out1Err, out2Err []string 147 | 148 | th.DoConcurrently( 149 | func() { out1Slice, out1Err = toSliceAndErrors(out1) }, 150 | func() { out2Slice, out2Err = toSliceAndErrors(out2) }, 151 | ) 152 | 153 | expected := []int{0, 1, 3, 4, 5, 6, 8, 9} 154 | expectedErr := []string{"err2", "err7"} 155 | 156 | // Both outputs should be identical 157 | th.ExpectSlice(t, out1Slice, expected) 158 | th.ExpectSlice(t, out2Slice, expected) 159 | th.ExpectSlice(t, out1Err, expectedErr) 160 | th.ExpectSlice(t, out2Err, expectedErr) 161 | }) 162 | } 163 | -------------------------------------------------------------------------------- /mockapi/users.go: -------------------------------------------------------------------------------- 1 | // Package mockapi provides a very basic mock API for examples and demos. 2 | // It's intentionally kept public to enable running and experimenting with examples in the Go Playground. 3 | // The implementation is naive and uses full scan for all operations. 4 | package mockapi 5 | 6 | import ( 7 | "context" 8 | "fmt" 9 | "hash/fnv" 10 | "math/rand" 11 | "sync" 12 | "time" 13 | ) 14 | 15 | type User struct { 16 | ID int 17 | Name string 18 | Age int 19 | Department string 20 | IsActive bool 21 | } 22 | 23 | // don't use pointers here, to make sure that raw data is not accessible from outside 24 | var departments []string 25 | var users []User 26 | 27 | var mu sync.RWMutex 28 | 29 | func init() { 30 | const usersCount = 100 31 | 32 | var adjs = []string{"Big", "Small", "Fast", "Slow", "Smart", "Happy", "Sad", "Funny", "Serious", "Angry"} 33 | var nouns = []string{"Dog", "Cat", "Bird", "Fish", "Mouse", "Elephant", "Lion", "Tiger", "Bear", "Wolf"} 34 | 35 | mu.Lock() 36 | defer mu.Unlock() 37 | 38 | departments = []string{"HR", "IT", "Finance", "Marketing", "Sales", "Support", "Engineering", "Management"} 39 | 40 | // Generate users 41 | // Use deterministic values for all fields to make examples reproducible 42 | users = make([]User, 0, usersCount) 43 | 44 | for i := 1; i <= usersCount; i++ { 45 | user := User{ 46 | ID: i, 47 | Name: adjs[hash(i, "name1")%len(adjs)] + " " + nouns[hash(i, "name2")%len(nouns)], // adj + noun 48 | Age: hash(i, "age")%20 + 30, // 20-50 49 | Department: departments[hash(i, "dep")%len(departments)], // one of 50 | IsActive: hash(i, "active")%100 < 60, // 60% 51 | } 52 | 53 | users = append(users, user) 54 | } 55 | } 56 | 57 | func GetDepartments() ([]string, error) { 58 | res := make([]string, len(departments)) 59 | copy(res, departments) 60 | return res, nil 61 | } 62 | 63 | // GetUser returns a user by ID. 64 | func GetUser(ctx context.Context, id int) (*User, error) { 65 | if err := ctx.Err(); err != nil { 66 | return nil, err 67 | } 68 | randomSleep(ctx, 500*time.Millisecond) 69 | 70 | mu.RLock() 71 | defer mu.RUnlock() 72 | 73 | idx, err := getUserIndex(id) 74 | if err != nil { 75 | return nil, err 76 | } 77 | 78 | user := users[idx] 79 | return &user, nil 80 | } 81 | 82 | // GetUsers returns a list of users by IDs. 83 | // If a user is not found, nil is returned in the corresponding position. 84 | func GetUsers(ctx context.Context, ids []int) ([]*User, error) { 85 | if err := ctx.Err(); err != nil { 86 | return nil, err 87 | } 88 | randomSleep(ctx, 1000*time.Millisecond) 89 | 90 | mu.RLock() 91 | defer mu.RUnlock() 92 | 93 | res := make([]*User, 0, len(ids)) 94 | for _, id := range ids { 95 | idx, err := getUserIndex(id) 96 | if err != nil { 97 | res = append(res, nil) 98 | } else { 99 | user := users[idx] 100 | res = append(res, &user) 101 | } 102 | } 103 | 104 | return res, nil 105 | } 106 | 107 | type UserQuery struct { 108 | Department string 109 | Page int 110 | } 111 | 112 | // ListUsers returns a paginated list of users optionally filtered by department. 113 | func ListUsers(ctx context.Context, query *UserQuery) ([]*User, error) { 114 | if err := ctx.Err(); err != nil { 115 | return nil, err 116 | } 117 | randomSleep(ctx, 1000*time.Millisecond) 118 | 119 | const pageSize = 10 120 | if query == nil { 121 | query = &UserQuery{} 122 | } 123 | 124 | offset := query.Page * pageSize 125 | 126 | mu.RLock() 127 | defer mu.RUnlock() 128 | 129 | res := make([]*User, 0, 10) 130 | for _, user := range users { 131 | if query.Department != "" && user.Department != query.Department { 132 | continue 133 | } 134 | 135 | if offset > 0 { 136 | offset-- 137 | continue 138 | } 139 | 140 | if len(res) >= pageSize { 141 | break 142 | } 143 | 144 | userCopy := user 145 | res = append(res, &userCopy) 146 | } 147 | 148 | return res, nil 149 | } 150 | 151 | // SaveUser saves a user. 152 | func SaveUser(ctx context.Context, user *User) error { 153 | if err := ctx.Err(); err != nil { 154 | return err 155 | } 156 | randomSleep(ctx, 1000*time.Millisecond) 157 | 158 | if user == nil { 159 | return fmt.Errorf("user is nil") 160 | } 161 | 162 | if user.Name == "" { 163 | return fmt.Errorf("username is empty") 164 | } 165 | if user.Age <= 0 { 166 | return fmt.Errorf("age is invalid") 167 | } 168 | 169 | mu.Lock() 170 | defer mu.Unlock() 171 | 172 | idx, err := getUserIndex(user.ID) 173 | if err != nil { 174 | users = append(users, *user) 175 | } else { 176 | users[idx] = *user 177 | } 178 | 179 | return nil 180 | } 181 | 182 | func getUserIndex(id int) (int, error) { 183 | for i, u := range users { 184 | if u.ID == id { 185 | return i, nil 186 | } 187 | } 188 | 189 | return -1, fmt.Errorf("user not found") 190 | } 191 | 192 | func hash(input ...any) int { 193 | hasher := fnv.New32() 194 | fmt.Fprintln(hasher, input...) 195 | return int(hasher.Sum32()) 196 | } 197 | 198 | func randomSleep(ctx context.Context, max time.Duration) { 199 | dur := time.Duration(rand.Intn(int(max))) 200 | t := time.NewTimer(dur) 201 | defer t.Stop() 202 | 203 | select { 204 | case <-t.C: 205 | case <-ctx.Done(): 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /wrap_test.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | 8 | "github.com/destel/rill/internal/th" 9 | ) 10 | 11 | func TestWrap(t *testing.T) { 12 | _ = Wrap(10, nil) 13 | } 14 | 15 | func TestFromSlice(t *testing.T) { 16 | t.Run("empty", func(t *testing.T) { 17 | in := FromSlice[int](nil, nil) 18 | outSlice, err := ToSlice(in) 19 | 20 | th.ExpectSlice(t, outSlice, nil) 21 | th.ExpectNoError(t, err) 22 | }) 23 | 24 | t.Run("error in second arg", func(t *testing.T) { 25 | in := FromSlice([]int{1, 2, 3, 4}, fmt.Errorf("err0")) 26 | outSlice, errs := toSliceAndErrors(in) 27 | 28 | th.ExpectSlice(t, outSlice, nil) 29 | th.ExpectSlice(t, errs, []string{"err0"}) 30 | }) 31 | 32 | t.Run("no errors", func(t *testing.T) { 33 | inSlice := make([]int, 20) 34 | for i := range inSlice { 35 | inSlice[i] = i 36 | } 37 | 38 | in := FromSlice(inSlice, nil) 39 | outSlice, err := ToSlice(in) 40 | 41 | th.ExpectSlice(t, outSlice, inSlice) 42 | th.ExpectNoError(t, err) 43 | }) 44 | 45 | t.Run("no errors large", func(t *testing.T) { 46 | inSlice := make([]int, 4000) 47 | for i := range inSlice { 48 | inSlice[i] = i 49 | } 50 | 51 | in := FromSlice(inSlice, nil) 52 | outSlice, err := ToSlice(in) 53 | 54 | th.ExpectSlice(t, outSlice, inSlice) 55 | th.ExpectNoError(t, err) 56 | }) 57 | 58 | t.Run("errors", func(t *testing.T) { 59 | inSlice := make([]int, 20) 60 | for i := 0; i < 20; i++ { 61 | inSlice[i] = i 62 | } 63 | 64 | in := FromSlice(inSlice, nil) 65 | in = replaceWithError(in, 15, fmt.Errorf("err15")) 66 | in = replaceWithError(in, 18, fmt.Errorf("err18")) 67 | 68 | outSlice, err := ToSlice(in) 69 | 70 | th.ExpectSlice(t, outSlice, inSlice[:15]) 71 | th.ExpectError(t, err, "err15") 72 | 73 | time.Sleep(1 * time.Second) 74 | th.ExpectDrainedChan(t, in) 75 | }) 76 | } 77 | 78 | func TestFromChan(t *testing.T) { 79 | t.Run("nil", func(t *testing.T) { 80 | res := FromChan[int](nil, nil) 81 | th.ExpectValue(t, res, nil) 82 | }) 83 | 84 | t.Run("no error", func(t *testing.T) { 85 | var inSlice []int 86 | var expectedOutSlice []Try[int] 87 | 88 | for i := 0; i < 20000; i++ { 89 | inSlice = append(inSlice, i) 90 | expectedOutSlice = append(expectedOutSlice, Try[int]{Value: i}) 91 | } 92 | 93 | wrapped := FromChan(th.FromSlice(inSlice), nil) 94 | outSlice := th.ToSlice(wrapped) 95 | 96 | th.ExpectSlice(t, outSlice, expectedOutSlice) 97 | }) 98 | 99 | t.Run("with error", func(t *testing.T) { 100 | var inSlice []int 101 | var expectedOutSlice []Try[int] 102 | 103 | err := fmt.Errorf("err") 104 | expectedOutSlice = append(expectedOutSlice, Try[int]{Error: err}) 105 | 106 | for i := 0; i < 20000; i++ { 107 | inSlice = append(inSlice, i) 108 | expectedOutSlice = append(expectedOutSlice, Try[int]{Value: i}) 109 | } 110 | 111 | wrapped := FromChan(th.FromSlice(inSlice), err) 112 | outSlice := th.ToSlice(wrapped) 113 | 114 | th.ExpectSlice(t, outSlice, expectedOutSlice) 115 | }) 116 | } 117 | 118 | func TestFromChans(t *testing.T) { 119 | // slices -> FromSlice -> FromChans -> ToChans -> ToSlice -> compare 120 | runTest := func(name string, valsIn []int, errsIn []error) { 121 | t.Run(name, func(t *testing.T) { 122 | var valsInChan <-chan int 123 | if len(valsIn) > 0 { 124 | valsInChan = th.FromSlice(valsIn) 125 | } 126 | 127 | var errsInChan <-chan error 128 | if len(errsIn) > 0 { 129 | errsInChan = th.FromSlice(errsIn) 130 | } 131 | 132 | valsOutChan, errsOutChan := ToChans(FromChans(valsInChan, errsInChan)) 133 | 134 | if valsInChan == nil && errsInChan == nil { 135 | th.ExpectValue(t, valsOutChan, nil) 136 | th.ExpectValue(t, errsOutChan, nil) 137 | return 138 | } 139 | 140 | var valsOut []int 141 | var errsOut []error 142 | 143 | th.DoConcurrently( 144 | func() { valsOut = th.ToSlice(valsOutChan) }, 145 | func() { errsOut = th.ToSlice(errsOutChan) }, 146 | ) 147 | 148 | // nil errors are not expected in the output 149 | var expectedErrors []error 150 | for _, err := range errsIn { 151 | if err != nil { 152 | expectedErrors = append(expectedErrors, err) 153 | } 154 | } 155 | 156 | th.ExpectSlice(t, valsOut, valsIn) 157 | th.ExpectSlice(t, errsOut, expectedErrors) 158 | }) 159 | } 160 | 161 | makeSlice := func(n int) []int { 162 | out := make([]int, n) 163 | for i := 0; i < n; i++ { 164 | out[i] = i 165 | } 166 | return out 167 | } 168 | 169 | makeErrSlice := func(n int) []error { 170 | out := make([]error, n) 171 | for i := 0; i < n; i++ { 172 | out[i] = fmt.Errorf("err%06d", i) 173 | } 174 | return out 175 | } 176 | 177 | runTest("nil", nil, nil) 178 | runTest("no errors", makeSlice(10000), nil) 179 | runTest("only errors", nil, makeErrSlice(10000)) 180 | runTest("values and errors", makeSlice(10000), makeErrSlice(10000)) 181 | runTest("values and nil errors", makeSlice(10), []error{nil, nil, fmt.Errorf("err"), nil}) 182 | } 183 | 184 | func TestGenerate(t *testing.T) { 185 | in := Generate(func(send func(int), sendErr func(error)) { 186 | for i := 0; i < 10; i++ { 187 | if i%2 == 0 { 188 | send(i) 189 | } else { 190 | sendErr(fmt.Errorf("err%d", i)) 191 | } 192 | } 193 | }) 194 | 195 | outSlice, errSlice := toSliceAndErrors(in) 196 | 197 | th.ExpectSlice(t, outSlice, []int{0, 2, 4, 6, 8}) 198 | th.ExpectSlice(t, errSlice, []string{"err1", "err3", "err5", "err7", "err9"}) 199 | } 200 | -------------------------------------------------------------------------------- /wrap.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | // Try is a container holding a value of type A or an error 4 | type Try[A any] struct { 5 | Value A 6 | Error error 7 | } 8 | 9 | // Wrap converts a value and/or error into a [Try] container. 10 | // It's a convenience function to avoid creating a [Try] container manually and benefit from type inference. 11 | // 12 | // Such function signature also allows concise wrapping of functions that return a value and an error: 13 | // 14 | // item := rill.Wrap(strconv.ParseInt("42")) 15 | func Wrap[A any](value A, err error) Try[A] { 16 | return Try[A]{Value: value, Error: err} 17 | } 18 | 19 | // FromSlice converts a slice into a stream. 20 | // If err is not nil function returns a stream with a single error. 21 | // 22 | // Such function signature allows concise wrapping of functions that return a slice and an error: 23 | // 24 | // stream := rill.FromSlice(someFunc()) 25 | func FromSlice[A any](slice []A, err error) <-chan Try[A] { 26 | const maxBufferSize = 512 27 | 28 | if err != nil { 29 | out := make(chan Try[A], 1) 30 | out <- Try[A]{Error: err} 31 | close(out) 32 | return out 33 | } 34 | 35 | sendAll := func(in []A, out chan Try[A]) { 36 | for _, a := range in { 37 | out <- Try[A]{Value: a} 38 | } 39 | close(out) 40 | } 41 | 42 | if len(slice) <= maxBufferSize { 43 | out := make(chan Try[A], len(slice)) 44 | sendAll(slice, out) 45 | return out 46 | } 47 | 48 | out := make(chan Try[A], maxBufferSize) 49 | go sendAll(slice, out) 50 | return out 51 | } 52 | 53 | // ToSlice converts an input stream into a slice. 54 | // 55 | // This is a blocking ordered function that processes items sequentially. 56 | // See the package documentation for more information on blocking ordered functions and error handling. 57 | func ToSlice[A any](in <-chan Try[A]) ([]A, error) { 58 | var res []A 59 | 60 | for x := range in { 61 | if err := x.Error; err != nil { 62 | Discard(in) 63 | return res, err 64 | } 65 | res = append(res, x.Value) 66 | } 67 | 68 | return res, nil 69 | } 70 | 71 | // FromChan converts a regular channel into a stream. 72 | // Additionally, this function can take an error, that will be added to the output stream alongside the values. 73 | // Either argument can be nil, in which case it is ignored. If both arguments are nil, the function returns nil. 74 | // 75 | // Such function signature allows concise wrapping of functions that return a channel and an error: 76 | // 77 | // stream := rill.FromChan(someFunc()) 78 | func FromChan[A any](values <-chan A, err error) <-chan Try[A] { 79 | if values == nil && err == nil { 80 | return nil 81 | } 82 | 83 | out := make(chan Try[A]) 84 | go func() { 85 | defer close(out) 86 | 87 | // error goes first 88 | if err != nil { 89 | out <- Try[A]{Error: err} 90 | } 91 | 92 | for x := range values { 93 | out <- Try[A]{Value: x} 94 | } 95 | }() 96 | 97 | return out 98 | } 99 | 100 | // FromChans converts a regular channel into a stream. 101 | // Additionally, this function can take a channel of errors, which will be added to 102 | // the output stream alongside the values. 103 | // Either argument can be nil, in which case it is ignored. If both arguments are nil, the function returns nil. 104 | // 105 | // Such function signature allows concise wrapping of functions that return two channels: 106 | // 107 | // stream := rill.FromChans(someFunc()) 108 | func FromChans[A any](values <-chan A, errs <-chan error) <-chan Try[A] { 109 | if values == nil && errs == nil { 110 | return nil 111 | } 112 | 113 | out := make(chan Try[A]) 114 | 115 | go func() { 116 | defer close(out) 117 | for { 118 | select { 119 | case err, ok := <-errs: 120 | if ok { 121 | if err != nil { 122 | out <- Try[A]{Error: err} 123 | } 124 | } else { 125 | errs = nil 126 | if values == nil && errs == nil { 127 | return 128 | } 129 | } 130 | 131 | case v, ok := <-values: 132 | if ok { 133 | out <- Try[A]{Value: v} 134 | } else { 135 | values = nil 136 | if values == nil && errs == nil { 137 | return 138 | } 139 | } 140 | } 141 | } 142 | }() 143 | 144 | return out 145 | } 146 | 147 | // ToChans splits an input stream into two channels: one for values and one for errors. 148 | // It's an inverse of [FromChans]. Returns two nil channels if the input is nil. 149 | func ToChans[A any](in <-chan Try[A]) (<-chan A, <-chan error) { 150 | if in == nil { 151 | return nil, nil 152 | } 153 | 154 | out := make(chan A) 155 | errs := make(chan error) 156 | 157 | go func() { 158 | defer close(out) 159 | defer close(errs) 160 | 161 | for x := range in { 162 | if x.Error != nil { 163 | errs <- x.Error 164 | } else { 165 | out <- x.Value 166 | } 167 | } 168 | }() 169 | 170 | return out, errs 171 | } 172 | 173 | // Generate is a shorthand for creating streams. 174 | // It provides a more ergonomic way of sending both values and errors to a stream, manages goroutine and channel lifecycle. 175 | // 176 | // stream := rill.Generate(func(send func(int), sendErr func(error)) { 177 | // for i := 0; i < 100; i++ { 178 | // send(i) 179 | // } 180 | // sendErr(someError) 181 | // }) 182 | // 183 | // Here's how the same code would look without Generate: 184 | // 185 | // stream := make(chan rill.Try[int]) 186 | // go func() { 187 | // defer close(stream) 188 | // for i := 0; i < 100; i++ { 189 | // stream <- rill.Try[int]{Value: i} 190 | // } 191 | // stream <- rill.Try[int]{Error: someError} 192 | // }() 193 | func Generate[A any](f func(send func(A), sendErr func(error))) <-chan Try[A] { 194 | out := make(chan Try[A]) 195 | go func() { 196 | defer close(out) 197 | 198 | send := func(a A) { 199 | out <- Try[A]{Value: a} 200 | } 201 | sendErr := func(err error) { 202 | out <- Try[A]{Error: err} 203 | } 204 | 205 | f(send, sendErr) 206 | }() 207 | return out 208 | } 209 | -------------------------------------------------------------------------------- /transform.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "github.com/destel/rill/internal/core" 5 | ) 6 | 7 | // Map takes a stream of items of type A and transforms them into items of type B using a function f. 8 | // Returns a new stream of transformed items. 9 | // 10 | // This is a non-blocking unordered function that processes items concurrently using n goroutines. 11 | // An ordered version of this function, [OrderedMap], is also available. 12 | // 13 | // See the package documentation for more information on non-blocking unordered functions and error handling. 14 | func Map[A, B any](in <-chan Try[A], n int, f func(A) (B, error)) <-chan Try[B] { 15 | return core.FilterMap(in, n, func(a Try[A]) (Try[B], bool) { 16 | if a.Error != nil { 17 | return Try[B]{Error: a.Error}, true 18 | } 19 | 20 | b, err := f(a.Value) 21 | if err != nil { 22 | return Try[B]{Error: err}, true 23 | } 24 | 25 | return Try[B]{Value: b}, true 26 | }) 27 | } 28 | 29 | // OrderedMap is the ordered version of [Map]. 30 | func OrderedMap[A, B any](in <-chan Try[A], n int, f func(A) (B, error)) <-chan Try[B] { 31 | return core.OrderedFilterMap(in, n, func(a Try[A]) (Try[B], bool) { 32 | if a.Error != nil { 33 | return Try[B]{Error: a.Error}, true 34 | } 35 | 36 | b, err := f(a.Value) 37 | if err != nil { 38 | return Try[B]{Error: err}, true 39 | } 40 | 41 | return Try[B]{Value: b}, true 42 | }) 43 | } 44 | 45 | // Filter takes a stream of items of type A and filters them using a predicate function f. 46 | // Returns a new stream of items that passed the filter. 47 | // 48 | // This is a non-blocking unordered function that processes items concurrently using n goroutines. 49 | // An ordered version of this function, [OrderedFilter], is also available. 50 | // 51 | // See the package documentation for more information on non-blocking unordered functions and error handling. 52 | func Filter[A any](in <-chan Try[A], n int, f func(A) (bool, error)) <-chan Try[A] { 53 | return core.FilterMap(in, n, func(a Try[A]) (Try[A], bool) { 54 | if a.Error != nil { 55 | return a, true // never filter out errors 56 | } 57 | 58 | keep, err := f(a.Value) 59 | if err != nil { 60 | return Try[A]{Error: err}, true // never filter out errors 61 | } 62 | 63 | return a, keep 64 | }) 65 | } 66 | 67 | // OrderedFilter is the ordered version of [Filter]. 68 | func OrderedFilter[A any](in <-chan Try[A], n int, f func(A) (bool, error)) <-chan Try[A] { 69 | return core.OrderedFilterMap(in, n, func(a Try[A]) (Try[A], bool) { 70 | if a.Error != nil { 71 | return a, true // never filter out errors 72 | } 73 | 74 | keep, err := f(a.Value) 75 | if err != nil { 76 | return Try[A]{Error: err}, true // never filter out errors 77 | } 78 | 79 | return a, keep 80 | }) 81 | } 82 | 83 | // FilterMap takes a stream of items of type A, applies a function f that can filter and transform them into items of type B. 84 | // Returns a new stream of transformed items that passed the filter. This operation is equivalent to a 85 | // [Filter] followed by a [Map]. 86 | // 87 | // This is a non-blocking unordered function that processes items concurrently using n goroutines. 88 | // An ordered version of this function, [OrderedFilterMap], is also available. 89 | // 90 | // See the package documentation for more information on non-blocking unordered functions and error handling. 91 | func FilterMap[A, B any](in <-chan Try[A], n int, f func(A) (B, bool, error)) <-chan Try[B] { 92 | return core.FilterMap(in, n, func(a Try[A]) (Try[B], bool) { 93 | if a.Error != nil { 94 | return Try[B]{Error: a.Error}, true 95 | } 96 | 97 | b, keep, err := f(a.Value) 98 | if err != nil { 99 | return Try[B]{Error: err}, true 100 | } 101 | 102 | return Try[B]{Value: b}, keep 103 | }) 104 | } 105 | 106 | // OrderedFilterMap is the ordered version of [FilterMap]. 107 | func OrderedFilterMap[A, B any](in <-chan Try[A], n int, f func(A) (B, bool, error)) <-chan Try[B] { 108 | return core.OrderedFilterMap(in, n, func(a Try[A]) (Try[B], bool) { 109 | if a.Error != nil { 110 | return Try[B]{Error: a.Error}, true 111 | } 112 | 113 | b, keep, err := f(a.Value) 114 | if err != nil { 115 | return Try[B]{Error: err}, true 116 | } 117 | 118 | return Try[B]{Value: b}, keep 119 | }) 120 | } 121 | 122 | // FlatMap takes a stream of items of type A and transforms each item into a new sub-stream of items of type B using a function f. 123 | // Those sub-streams are then flattened into a single output stream, which is returned. 124 | // 125 | // This is a non-blocking unordered function that processes items concurrently using n goroutines. 126 | // An ordered version of this function, [OrderedFlatMap], is also available. 127 | // 128 | // See the package documentation for more information on non-blocking unordered functions and error handling. 129 | func FlatMap[A, B any](in <-chan Try[A], n int, f func(A) <-chan Try[B]) <-chan Try[B] { 130 | if in == nil { 131 | return nil 132 | } 133 | 134 | out := make(chan Try[B]) 135 | 136 | core.Loop(in, out, n, func(a Try[A]) { 137 | if a.Error != nil { 138 | out <- Try[B]{Error: a.Error} 139 | return 140 | } 141 | 142 | bb := f(a.Value) 143 | for b := range bb { 144 | out <- b 145 | } 146 | }) 147 | 148 | return out 149 | } 150 | 151 | // OrderedFlatMap is the ordered version of [FlatMap]. 152 | func OrderedFlatMap[A, B any](in <-chan Try[A], n int, f func(A) <-chan Try[B]) <-chan Try[B] { 153 | if in == nil { 154 | return nil 155 | } 156 | 157 | out := make(chan Try[B]) 158 | 159 | core.OrderedLoop(in, out, n, func(a Try[A], canWrite <-chan struct{}) { 160 | if a.Error != nil { 161 | <-canWrite 162 | out <- Try[B]{Error: a.Error} 163 | return 164 | } 165 | 166 | bb := f(a.Value) 167 | <-canWrite 168 | for b := range bb { 169 | out <- b 170 | } 171 | }) 172 | 173 | return out 174 | } 175 | 176 | // Catch allows handling errors in the middle of a stream processing pipeline. 177 | // Every error encountered in the input stream is passed to the function f for handling. 178 | // 179 | // The outcome depends on the return value of f: 180 | // - If f returns nil, the error is considered handled and filtered out from the output stream. 181 | // - If f returns a non-nil error, the original error is replaced with the result of f. 182 | // 183 | // This is a non-blocking unordered function that handles errors concurrently using n goroutines. 184 | // An ordered version of this function, [OrderedCatch], is also available. 185 | // 186 | // See the package documentation for more information on non-blocking unordered functions and error handling. 187 | func Catch[A any](in <-chan Try[A], n int, f func(error) error) <-chan Try[A] { 188 | return core.FilterMap(in, n, func(a Try[A]) (Try[A], bool) { 189 | if a.Error == nil { 190 | return a, true 191 | } 192 | 193 | err := f(a.Error) 194 | if err == nil { 195 | return a, false // error handled, filter out 196 | } 197 | 198 | return Try[A]{Error: err}, true // error replaced by f(a.Error) 199 | }) 200 | } 201 | 202 | // OrderedCatch is the ordered version of [Catch]. 203 | func OrderedCatch[A any](in <-chan Try[A], n int, f func(error) error) <-chan Try[A] { 204 | return core.OrderedFilterMap(in, n, func(a Try[A]) (Try[A], bool) { 205 | if a.Error == nil { 206 | return a, true 207 | } 208 | 209 | err := f(a.Error) 210 | if err == nil { 211 | return a, false // error handled, filter out 212 | } 213 | 214 | return Try[A]{Error: err}, true // error replaced by f(a.Error) 215 | }) 216 | } 217 | -------------------------------------------------------------------------------- /reduce_test.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "fmt" 5 | "sync/atomic" 6 | "testing" 7 | "time" 8 | 9 | "github.com/destel/rill/internal/th" 10 | ) 11 | 12 | func TestReduce(t *testing.T) { 13 | for _, n := range []int{1, 4} { 14 | t.Run(th.Name("empty", n), func(t *testing.T) { 15 | in := FromSlice([]int{}, nil) 16 | 17 | _, ok, err := Reduce(in, n, func(x, y int) (int, error) { 18 | 19 | return x + y, nil 20 | }) 21 | 22 | th.ExpectNoError(t, err) 23 | th.ExpectValue(t, ok, false) 24 | th.ExpectDrainedChan(t, in) 25 | }) 26 | 27 | t.Run(th.Name("no errors", n), func(t *testing.T) { 28 | in := FromChan(th.FromRange(0, 100), nil) 29 | 30 | var cnt atomic.Int64 31 | out, ok, err := Reduce(in, n, func(x, y int) (int, error) { 32 | cnt.Add(1) 33 | return x + y, nil 34 | }) 35 | 36 | th.ExpectNoError(t, err) 37 | th.ExpectValue(t, out, 99*100/2) 38 | th.ExpectValue(t, ok, true) 39 | th.ExpectValue(t, cnt.Load(), 99) 40 | th.ExpectDrainedChan(t, in) 41 | }) 42 | 43 | t.Run(th.Name("error in input", n), func(t *testing.T) { 44 | in := FromChan(th.FromRange(0, 1000), nil) 45 | in = replaceWithError(in, 100, fmt.Errorf("err100")) 46 | 47 | var cnt atomic.Int64 48 | _, _, err := Reduce(in, n, func(x, y int) (int, error) { 49 | cnt.Add(1) 50 | return x + y, nil 51 | }) 52 | 53 | th.ExpectError(t, err, "err100") 54 | if cnt.Load() > 900 { 55 | t.Errorf("early exit did not happen") 56 | } 57 | 58 | time.Sleep(1 * time.Second) 59 | 60 | th.ExpectDrainedChan(t, in) 61 | if cnt.Load() > 900 { 62 | t.Errorf("extra calls to f were made") 63 | } 64 | }) 65 | 66 | // This one is needed to cover the case when the first argument 67 | // of user function is an error. 68 | t.Run(th.Name("error in first input item", n), func(t *testing.T) { 69 | in := FromChan(th.FromRange(0, 1000), nil) 70 | in = replaceWithError(in, 0, fmt.Errorf("err0")) 71 | 72 | var cnt atomic.Int64 73 | _, _, err := Reduce(in, n, func(x, y int) (int, error) { 74 | cnt.Add(1) 75 | return x + y, nil 76 | }) 77 | 78 | th.ExpectError(t, err, "err0") 79 | if cnt.Load() > 100 { 80 | t.Errorf("early exit did not happen") 81 | } 82 | 83 | time.Sleep(1 * time.Second) 84 | 85 | th.ExpectDrainedChan(t, in) 86 | if cnt.Load() > 100 { 87 | t.Errorf("extra calls to f were made") 88 | } 89 | }) 90 | 91 | t.Run(th.Name("error in func", n), func(t *testing.T) { 92 | in := FromChan(th.FromRange(0, 1000), nil) 93 | 94 | var cnt atomic.Int64 95 | _, _, err := Reduce(in, n, func(x, y int) (int, error) { 96 | if cnt.Add(1) == 100 { 97 | return 0, fmt.Errorf("err100") 98 | } 99 | 100 | return x + y, nil 101 | }) 102 | 103 | th.ExpectError(t, err, "err100") 104 | if cnt.Load() > 900 { 105 | t.Errorf("early exit did not happen") 106 | } 107 | 108 | time.Sleep(1 * time.Second) 109 | 110 | th.ExpectDrainedChan(t, in) 111 | if cnt.Load() > 900 { 112 | t.Errorf("extra calls to f were made") 113 | } 114 | }) 115 | } 116 | } 117 | 118 | func TestMapReduce(t *testing.T) { 119 | for _, nm := range []int{1, 4} { 120 | for _, nr := range []int{1, 4} { 121 | t.Run(th.Name("empty", nm, nr), func(t *testing.T) { 122 | in := FromSlice([]int{}, nil) 123 | 124 | out, err := MapReduce(in, 125 | nm, func(x int) (string, int, error) { 126 | s := fmt.Sprint(x) 127 | return fmt.Sprintf("%d-digit", len(s)), x, nil 128 | }, 129 | nr, func(x, y int) (int, error) { 130 | return x + y, nil 131 | }) 132 | 133 | th.ExpectNoError(t, err) 134 | th.ExpectMap(t, out, map[string]int{}) 135 | th.ExpectDrainedChan(t, in) 136 | }) 137 | 138 | t.Run(th.Name("no errors", nm, nr), func(t *testing.T) { 139 | in := FromChan(th.FromRange(0, 1000), nil) 140 | 141 | var cntMap, cntReduce atomic.Int64 142 | out, err := MapReduce(in, 143 | nm, func(x int) (string, int, error) { 144 | cntMap.Add(1) 145 | s := fmt.Sprint(x) 146 | return fmt.Sprintf("%d-digit", len(s)), x, nil 147 | }, 148 | nr, func(x, y int) (int, error) { 149 | cntReduce.Add(1) 150 | return x + y, nil 151 | }, 152 | ) 153 | 154 | th.ExpectNoError(t, err) 155 | th.ExpectMap(t, out, map[string]int{ 156 | "1-digit": (0 + 9) * 10 / 2, 157 | "2-digit": (10 + 99) * 90 / 2, 158 | "3-digit": (100 + 999) * 900 / 2, 159 | }) 160 | th.ExpectValue(t, cntMap.Load(), 1000) 161 | th.ExpectValue(t, cntReduce.Load(), 9+89+899) 162 | th.ExpectDrainedChan(t, in) 163 | }) 164 | 165 | t.Run(th.Name("error in input", nm, nr), func(t *testing.T) { 166 | in := FromChan(th.FromRange(0, 1000), nil) 167 | in = replaceWithError(in, 100, fmt.Errorf("err100")) 168 | 169 | var cntMap, cntReduce atomic.Int64 170 | _, err := MapReduce(in, 171 | nm, func(x int) (string, int, error) { 172 | cntMap.Add(1) 173 | s := fmt.Sprint(x) 174 | return fmt.Sprintf("%d-digit", len(s)), x, nil 175 | }, 176 | nr, func(x, y int) (int, error) { 177 | cntReduce.Add(1) 178 | return x + y, nil 179 | }, 180 | ) 181 | 182 | th.ExpectError(t, err, "err100") 183 | if cntMap.Load() > 900 { 184 | t.Errorf("early exit did not happen") 185 | } 186 | if cntReduce.Load() > 900 { 187 | t.Errorf("early exit did not happen") 188 | } 189 | 190 | time.Sleep(1 * time.Second) 191 | 192 | th.ExpectDrainedChan(t, in) 193 | if cntMap.Load() > 900 { 194 | t.Errorf("extra calls to f were made") 195 | } 196 | if cntReduce.Load() > 900 { 197 | t.Errorf("extra calls to f were made") 198 | } 199 | }) 200 | 201 | t.Run(th.Name("error in mapper", nm, nr), func(t *testing.T) { 202 | in := FromChan(th.FromRange(0, 1000), nil) 203 | 204 | var cntMap, cntReduce atomic.Int64 205 | _, err := MapReduce(in, 206 | nm, func(x int) (string, int, error) { 207 | if cntMap.Add(1) == 100 { 208 | return "", 0, fmt.Errorf("err100") 209 | } 210 | s := fmt.Sprint(x) 211 | return fmt.Sprintf("%d-digit", len(s)), x, nil 212 | }, 213 | nr, func(x, y int) (int, error) { 214 | cntReduce.Add(1) 215 | return x + y, nil 216 | }, 217 | ) 218 | 219 | th.ExpectError(t, err, "err100") 220 | if cntMap.Load() > 900 { 221 | t.Errorf("early exit did not happen") 222 | } 223 | if cntReduce.Load() > 900 { 224 | t.Errorf("early exit did not happen") 225 | } 226 | 227 | time.Sleep(1 * time.Second) 228 | 229 | th.ExpectDrainedChan(t, in) 230 | if cntMap.Load() > 900 { 231 | t.Errorf("extra calls to f were made") 232 | } 233 | if cntReduce.Load() > 900 { 234 | t.Errorf("extra calls to f were made") 235 | } 236 | }) 237 | 238 | t.Run(th.Name("error in reducer", nm, nr), func(t *testing.T) { 239 | in := FromChan(th.FromRange(0, 1000), nil) 240 | 241 | var cntMap, cntReduce atomic.Int64 242 | _, err := MapReduce(in, 243 | nm, func(x int) (string, int, error) { 244 | cntMap.Add(1) 245 | s := fmt.Sprint(x) 246 | return fmt.Sprintf("%d-digit", len(s)), x, nil 247 | }, 248 | nr, func(x, y int) (int, error) { 249 | if cntReduce.Add(1) == 100 { 250 | return 0, fmt.Errorf("err100") 251 | } 252 | return x + y, nil 253 | }, 254 | ) 255 | 256 | th.ExpectError(t, err, "err100") 257 | if cntMap.Load() > 900 { 258 | t.Errorf("early exit did not happen") 259 | } 260 | if cntReduce.Load() > 900 { 261 | t.Errorf("early exit did not happen") 262 | } 263 | 264 | time.Sleep(1 * time.Second) 265 | 266 | th.ExpectDrainedChan(t, in) 267 | if cntMap.Load() > 900 { 268 | t.Errorf("extra calls to f were made") 269 | } 270 | if cntReduce.Load() > 900 { 271 | t.Errorf("extra calls to f were made") 272 | } 273 | }) 274 | 275 | } 276 | } 277 | } 278 | -------------------------------------------------------------------------------- /consume_test.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "fmt" 5 | "sync/atomic" 6 | "testing" 7 | "time" 8 | 9 | "github.com/destel/rill/internal/th" 10 | ) 11 | 12 | func TestErr(t *testing.T) { 13 | t.Run("empty", func(t *testing.T) { 14 | in := FromChan(th.FromSlice([]int{}), nil) 15 | err := Err(in) 16 | 17 | th.ExpectNoError(t, err) 18 | }) 19 | 20 | t.Run("no errors", func(t *testing.T) { 21 | in := FromChan(th.FromRange(0, 100), nil) 22 | err := Err(in) 23 | 24 | th.ExpectNoError(t, err) 25 | }) 26 | 27 | t.Run("error", func(t *testing.T) { 28 | in := FromChan(th.FromRange(0, 1000), nil) 29 | in = replaceWithError(in, 100, fmt.Errorf("err100")) 30 | 31 | err := Err(in) 32 | th.ExpectError(t, err, "err100") 33 | 34 | // wait until it drained 35 | time.Sleep(1 * time.Second) 36 | th.ExpectDrainedChan(t, in) 37 | }) 38 | } 39 | 40 | func TestFirst(t *testing.T) { 41 | t.Run("empty", func(t *testing.T) { 42 | in := FromChan(th.FromSlice([]int{}), nil) 43 | _, ok, err := First(in) 44 | 45 | th.ExpectNoError(t, err) 46 | th.ExpectValue(t, ok, false) 47 | }) 48 | 49 | t.Run("value is first", func(t *testing.T) { 50 | in := FromChan(th.FromRange(1, 1000), nil) 51 | in = replaceWithError(in, 100, fmt.Errorf("err100")) 52 | x, ok, err := First(in) 53 | 54 | th.ExpectNoError(t, err) 55 | th.ExpectValue(t, ok, true) 56 | th.ExpectValue(t, x, 1) 57 | 58 | // wait until it drained 59 | time.Sleep(1 * time.Second) 60 | th.ExpectDrainedChan(t, in) 61 | }) 62 | 63 | t.Run("error is first", func(t *testing.T) { 64 | in := FromChan(th.FromRange(1, 1000), nil) 65 | in = replaceWithError(in, 1, fmt.Errorf("err1")) 66 | _, _, err := First(in) 67 | 68 | th.ExpectError(t, err, "err1") 69 | 70 | // wait until it drained 71 | time.Sleep(1 * time.Second) 72 | th.ExpectDrainedChan(t, in) 73 | }) 74 | } 75 | 76 | func TestForEach(t *testing.T) { 77 | for _, n := range []int{1, 5} { 78 | 79 | t.Run(th.Name("no errors", n), func(t *testing.T) { 80 | in := FromChan(th.FromRange(0, 10), nil) 81 | 82 | var sum atomic.Int64 83 | err := ForEach(in, n, func(x int) error { 84 | sum.Add(int64(x)) 85 | return nil 86 | }) 87 | 88 | th.ExpectNoError(t, err) 89 | th.ExpectValue(t, sum.Load(), int64(9*10/2)) 90 | }) 91 | 92 | t.Run(th.Name("error in input", n), func(t *testing.T) { 93 | th.ExpectNotHang(t, 10*time.Second, func() { 94 | in := FromChan(th.FromRange(0, 1000), nil) 95 | in = replaceWithError(in, 100, fmt.Errorf("err100")) 96 | 97 | var cnt atomic.Int64 98 | err := ForEach(in, n, func(x int) error { 99 | cnt.Add(1) 100 | return nil 101 | }) 102 | 103 | th.ExpectError(t, err, "err100") 104 | if cnt.Load() > 900 { 105 | t.Errorf("early return did not happen") 106 | } 107 | 108 | time.Sleep(1 * time.Second) 109 | 110 | th.ExpectDrainedChan(t, in) 111 | if cnt.Load() > 900 { 112 | t.Errorf("extra calls to f were made") 113 | } 114 | }) 115 | }) 116 | 117 | t.Run(th.Name("error in func", n), func(t *testing.T) { 118 | th.ExpectNotHang(t, 10*time.Second, func() { 119 | in := FromChan(th.FromRange(0, 1000), nil) 120 | 121 | var cnt atomic.Int64 122 | err := ForEach(in, n, func(x int) error { 123 | if x == 100 { 124 | return fmt.Errorf("err100") 125 | } 126 | cnt.Add(1) 127 | return nil 128 | }) 129 | 130 | th.ExpectError(t, err, "err100") 131 | if cnt.Load() > 900 { 132 | t.Errorf("early return did not happen") 133 | } 134 | 135 | // wait until it drained 136 | time.Sleep(1 * time.Second) 137 | 138 | th.ExpectDrainedChan(t, in) 139 | if cnt.Load() > 900 { 140 | t.Errorf("extra calls to f were made") 141 | } 142 | }) 143 | }) 144 | } 145 | } 146 | 147 | func TestAnyAll(t *testing.T) { 148 | for _, n := range []int{1, 5} { 149 | t.Run(th.Name("empty", n), func(t *testing.T) { 150 | in := FromSlice([]int{}, nil) 151 | 152 | res, err := All(in, 1, func(int) (bool, error) { 153 | return false, nil 154 | }) 155 | 156 | th.ExpectNoError(t, err) 157 | th.ExpectValue(t, res, true) 158 | }) 159 | 160 | t.Run(th.Name("no errors,false", n), func(t *testing.T) { 161 | in := FromChan(th.FromRange(0, 1000), nil) 162 | 163 | var cnt atomic.Int64 164 | ok, err := All(in, n, func(x int) (bool, error) { 165 | cnt.Add(1) 166 | return x < 100, nil 167 | }) 168 | 169 | th.ExpectNoError(t, err) 170 | th.ExpectValue(t, ok, false) 171 | if cnt.Load() > 900 { 172 | t.Errorf("early exit did not happen") 173 | } 174 | 175 | // wait until it drained 176 | time.Sleep(1 * time.Second) 177 | 178 | th.ExpectDrainedChan(t, in) 179 | if cnt.Load() > 900 { 180 | t.Errorf("extra calls to f were made") 181 | } 182 | }) 183 | 184 | t.Run(th.Name("error in input,false", n), func(t *testing.T) { 185 | in := FromChan(th.FromRange(0, 1000), nil) 186 | in = replaceWithError(in, 500, fmt.Errorf("err500")) 187 | 188 | var cnt atomic.Int64 189 | ok, err := All(in, n, func(x int) (bool, error) { 190 | cnt.Add(1) 191 | return x < 100, nil 192 | }) 193 | 194 | th.ExpectNoError(t, err) // error was swallowed by early exit 195 | th.ExpectValue(t, ok, false) 196 | if cnt.Load() > 900 { 197 | t.Errorf("early exit did not happen") 198 | } 199 | 200 | // wait until it drained 201 | time.Sleep(1 * time.Second) 202 | 203 | th.ExpectDrainedChan(t, in) 204 | if cnt.Load() > 900 { 205 | t.Errorf("extra calls to f were made") 206 | } 207 | }) 208 | 209 | t.Run(th.Name("error in func,false", n), func(t *testing.T) { 210 | in := FromChan(th.FromRange(0, 1000), nil) 211 | 212 | var cnt atomic.Int64 213 | ok, err := All(in, n, func(x int) (bool, error) { 214 | cnt.Add(1) 215 | if x == 500 { 216 | return false, fmt.Errorf("err500") 217 | } 218 | return x < 100, nil 219 | }) 220 | 221 | th.ExpectNoError(t, err) // error was swallowed by early exit 222 | th.ExpectValue(t, ok, false) 223 | if cnt.Load() > 900 { 224 | t.Errorf("early exit did not happen") 225 | } 226 | 227 | // wait until it drained 228 | time.Sleep(1 * time.Second) 229 | 230 | th.ExpectDrainedChan(t, in) 231 | if cnt.Load() > 900 { 232 | t.Errorf("extra calls to f were made") 233 | } 234 | }) 235 | 236 | //--- 237 | 238 | t.Run(th.Name("no errors,true", n), func(t *testing.T) { 239 | in := FromChan(th.FromRange(0, 1000), nil) 240 | 241 | var cnt atomic.Int64 242 | ok, err := All(in, n, func(x int) (bool, error) { 243 | cnt.Add(1) 244 | return x < 10000, nil 245 | }) 246 | 247 | th.ExpectNoError(t, err) 248 | th.ExpectValue(t, ok, true) 249 | 250 | th.ExpectDrainedChan(t, in) 251 | }) 252 | 253 | t.Run(th.Name("error in input,true", n), func(t *testing.T) { 254 | in := FromChan(th.FromRange(0, 1000), nil) 255 | in = replaceWithError(in, 500, fmt.Errorf("err500")) 256 | 257 | var cnt atomic.Int64 258 | _, err := All(in, n, func(x int) (bool, error) { 259 | cnt.Add(1) 260 | return x < 10000, nil 261 | }) 262 | 263 | th.ExpectError(t, err, "err500") 264 | if cnt.Load() > 900 { 265 | t.Errorf("early exit did not happen") 266 | } 267 | 268 | // wait until it drained 269 | time.Sleep(1 * time.Second) 270 | 271 | th.ExpectDrainedChan(t, in) 272 | if cnt.Load() > 900 { 273 | t.Errorf("extra calls to f were made") 274 | } 275 | }) 276 | 277 | t.Run(th.Name("error in func,true", n), func(t *testing.T) { 278 | in := FromChan(th.FromRange(0, 1000), nil) 279 | 280 | var cnt atomic.Int64 281 | _, err := All(in, n, func(x int) (bool, error) { 282 | cnt.Add(1) 283 | if x == 500 { 284 | return false, fmt.Errorf("err500") 285 | } 286 | return x < 10000, nil 287 | }) 288 | 289 | th.ExpectError(t, err, "err500") 290 | if cnt.Load() > 900 { 291 | t.Errorf("early exit did not happen") 292 | } 293 | 294 | // wait until it drained 295 | time.Sleep(1 * time.Second) 296 | 297 | th.ExpectDrainedChan(t, in) 298 | if cnt.Load() > 900 { 299 | t.Errorf("extra calls to f were made") 300 | } 301 | }) 302 | 303 | } 304 | 305 | } 306 | -------------------------------------------------------------------------------- /transform_test.go: -------------------------------------------------------------------------------- 1 | package rill 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "testing" 7 | 8 | "github.com/destel/rill/internal/th" 9 | ) 10 | 11 | func universalMap[A, B any](ord bool, in <-chan Try[A], n int, f func(A) (B, error)) <-chan Try[B] { 12 | if ord { 13 | return OrderedMap(in, n, f) 14 | } 15 | return Map(in, n, f) 16 | } 17 | 18 | func TestMap(t *testing.T) { 19 | th.TestBothOrderings(t, func(t *testing.T, ord bool) { 20 | for _, n := range []int{1, 5} { 21 | 22 | t.Run(th.Name("nil", n), func(t *testing.T) { 23 | out := universalMap(ord, nil, n, func(x int) (int, error) { return x, nil }) 24 | th.ExpectValue(t, out, nil) 25 | }) 26 | 27 | t.Run(th.Name("correctness", n), func(t *testing.T) { 28 | in := FromChan(th.FromRange(0, 20), nil) 29 | in = replaceWithError(in, 15, fmt.Errorf("err15")) 30 | 31 | out := universalMap(ord, in, n, func(x int) (string, error) { 32 | if x == 5 { 33 | return "", fmt.Errorf("err05") 34 | } 35 | if x == 6 { 36 | return "", fmt.Errorf("err06") 37 | } 38 | 39 | return fmt.Sprintf("%03d", x), nil 40 | }) 41 | 42 | outSlice, errSlice := toSliceAndErrors(out) 43 | 44 | expectedSlice := make([]string, 0, 20) 45 | for i := 0; i < 20; i++ { 46 | if i == 5 || i == 6 || i == 15 { 47 | continue 48 | } 49 | expectedSlice = append(expectedSlice, fmt.Sprintf("%03d", i)) 50 | } 51 | 52 | sort.Strings(outSlice) 53 | sort.Strings(errSlice) 54 | 55 | th.ExpectSlice(t, outSlice, expectedSlice) 56 | th.ExpectSlice(t, errSlice, []string{"err05", "err06", "err15"}) 57 | }) 58 | 59 | t.Run(th.Name("ordering", n), func(t *testing.T) { 60 | in := FromChan(th.FromRange(0, 20000), nil) 61 | 62 | out := universalMap(ord, in, n, func(x int) (int, error) { 63 | if x%2 == 0 { 64 | return x, fmt.Errorf("err%06d", x) 65 | } 66 | 67 | return x, nil 68 | }) 69 | 70 | outSlice, errSlice := toSliceAndErrors(out) 71 | 72 | if ord || n == 1 { 73 | th.ExpectSorted(t, outSlice) 74 | th.ExpectSorted(t, errSlice) 75 | } else { 76 | th.ExpectUnsorted(t, outSlice) 77 | th.ExpectUnsorted(t, errSlice) 78 | } 79 | 80 | }) 81 | 82 | } 83 | }) 84 | } 85 | 86 | func universalFilter(ord bool, in <-chan Try[int], n int, f func(int) (bool, error)) <-chan Try[int] { 87 | if ord { 88 | return OrderedFilter(in, n, f) 89 | } 90 | return Filter(in, n, f) 91 | } 92 | 93 | func TestFilter(t *testing.T) { 94 | th.TestBothOrderings(t, func(t *testing.T, ord bool) { 95 | for _, n := range []int{1, 5} { 96 | 97 | t.Run(th.Name("nil", n), func(t *testing.T) { 98 | out := universalFilter(ord, nil, n, func(x int) (bool, error) { return true, nil }) 99 | th.ExpectValue(t, out, nil) 100 | }) 101 | 102 | t.Run(th.Name("correctness", n), func(t *testing.T) { 103 | in := FromChan(th.FromRange(0, 20), nil) 104 | in = replaceWithError(in, 15, fmt.Errorf("err15")) 105 | 106 | out := universalFilter(ord, in, n, func(x int) (bool, error) { 107 | if x == 5 { 108 | return false, fmt.Errorf("err05") 109 | } 110 | if x == 6 { 111 | return true, fmt.Errorf("err06") 112 | } 113 | 114 | return x%2 == 0, nil 115 | }) 116 | 117 | outSlice, errSlice := toSliceAndErrors(out) 118 | 119 | expectedSlice := make([]int, 0, 20) 120 | for i := 0; i < 20; i++ { 121 | if i%2 == 1 || i == 5 || i == 6 || i == 15 { 122 | continue 123 | } 124 | expectedSlice = append(expectedSlice, i) 125 | } 126 | 127 | th.Sort(outSlice) 128 | th.Sort(errSlice) 129 | 130 | th.ExpectSlice(t, outSlice, expectedSlice) 131 | th.ExpectSlice(t, errSlice, []string{"err05", "err06", "err15"}) 132 | }) 133 | 134 | t.Run(th.Name("ordering", n), func(t *testing.T) { 135 | in := FromChan(th.FromRange(0, 20000), nil) 136 | 137 | out := universalFilter(ord, in, n, func(x int) (bool, error) { 138 | switch x % 3 { 139 | case 2: 140 | return false, fmt.Errorf("err%06d", x) 141 | case 1: 142 | return false, nil 143 | default: 144 | return true, nil 145 | 146 | } 147 | }) 148 | 149 | outSlice, errSlice := toSliceAndErrors(out) 150 | 151 | if ord || n == 1 { 152 | th.ExpectSorted(t, outSlice) 153 | th.ExpectSorted(t, errSlice) 154 | } else { 155 | th.ExpectUnsorted(t, outSlice) 156 | th.ExpectUnsorted(t, errSlice) 157 | } 158 | }) 159 | 160 | } 161 | }) 162 | } 163 | 164 | func universalFilterMap[A, B any](ord bool, in <-chan Try[A], n int, f func(A) (B, bool, error)) <-chan Try[B] { 165 | if ord { 166 | return OrderedFilterMap(in, n, f) 167 | } 168 | return FilterMap(in, n, f) 169 | } 170 | 171 | func TestFilterMap(t *testing.T) { 172 | th.TestBothOrderings(t, func(t *testing.T, ord bool) { 173 | for _, n := range []int{1, 5} { 174 | 175 | t.Run(th.Name("nil", n), func(t *testing.T) { 176 | out := universalFilterMap(ord, nil, n, func(x int) (int, bool, error) { return x, true, nil }) 177 | th.ExpectValue(t, out, nil) 178 | }) 179 | 180 | t.Run(th.Name("correctness", n), func(t *testing.T) { 181 | in := FromChan(th.FromRange(0, 20), nil) 182 | in = replaceWithError(in, 15, fmt.Errorf("err15")) 183 | 184 | out := universalFilterMap(ord, in, n, func(x int) (string, bool, error) { 185 | if x == 5 { 186 | return "", false, fmt.Errorf("err05") 187 | } 188 | if x == 6 { 189 | return "", true, fmt.Errorf("err06") 190 | } 191 | 192 | return fmt.Sprintf("%03d", x), x%2 == 0, nil 193 | }) 194 | 195 | outSlice, errSlice := toSliceAndErrors(out) 196 | 197 | expectedSlice := make([]string, 0, 20) 198 | for i := 0; i < 20; i++ { 199 | if i == 5 || i == 6 || i == 15 || i%2 == 1 { 200 | continue 201 | } 202 | expectedSlice = append(expectedSlice, fmt.Sprintf("%03d", i)) 203 | } 204 | 205 | sort.Strings(outSlice) 206 | sort.Strings(errSlice) 207 | 208 | th.ExpectSlice(t, outSlice, expectedSlice) 209 | th.ExpectSlice(t, errSlice, []string{"err05", "err06", "err15"}) 210 | }) 211 | 212 | t.Run(th.Name("ordering", n), func(t *testing.T) { 213 | in := FromChan(th.FromRange(0, 20000), nil) 214 | 215 | out := universalFilterMap(ord, in, n, func(x int) (int, bool, error) { 216 | switch x % 3 { 217 | case 2: 218 | return x, false, fmt.Errorf("err%06d", x) 219 | case 1: 220 | return x, false, nil 221 | default: 222 | return x, true, nil 223 | 224 | } 225 | }) 226 | 227 | outSlice, errSlice := toSliceAndErrors(out) 228 | 229 | if ord || n == 1 { 230 | th.ExpectSorted(t, outSlice) 231 | th.ExpectSorted(t, errSlice) 232 | } else { 233 | th.ExpectUnsorted(t, outSlice) 234 | th.ExpectUnsorted(t, errSlice) 235 | } 236 | 237 | }) 238 | 239 | } 240 | }) 241 | } 242 | 243 | func universalFlatMap[A, B any](ord bool, in <-chan Try[A], n int, f func(A) <-chan Try[B]) <-chan Try[B] { 244 | if ord { 245 | return OrderedFlatMap(in, n, f) 246 | } 247 | return FlatMap(in, n, f) 248 | } 249 | 250 | func TestFlatMap(t *testing.T) { 251 | th.TestBothOrderings(t, func(t *testing.T, ord bool) { 252 | for _, n := range []int{1, 5} { 253 | 254 | t.Run(th.Name("nil", n), func(t *testing.T) { 255 | out := universalFlatMap(ord, nil, n, func(x int) <-chan Try[string] { return nil }) 256 | th.ExpectValue(t, out, nil) 257 | }) 258 | 259 | t.Run(th.Name("correctness", n), func(t *testing.T) { 260 | in := FromChan(th.FromRange(0, 20), nil) 261 | in = replaceWithError(in, 5, fmt.Errorf("err05")) 262 | in = replaceWithError(in, 15, fmt.Errorf("err15")) 263 | 264 | out := universalFlatMap(ord, in, n, func(x int) <-chan Try[string] { 265 | return FromSlice([]string{ 266 | fmt.Sprintf("%03dA", x), 267 | fmt.Sprintf("%03dB", x), 268 | }, nil) 269 | }) 270 | 271 | outSlice, errSlice := toSliceAndErrors(out) 272 | 273 | expectedSlice := make([]string, 0, 20*2) 274 | for i := 0; i < 20; i++ { 275 | if i == 5 || i == 15 { 276 | continue 277 | } 278 | expectedSlice = append(expectedSlice, fmt.Sprintf("%03dA", i), fmt.Sprintf("%03dB", i)) 279 | } 280 | 281 | sort.Strings(outSlice) 282 | sort.Strings(errSlice) 283 | 284 | th.ExpectSlice(t, outSlice, expectedSlice) 285 | th.ExpectSlice(t, errSlice, []string{"err05", "err15"}) 286 | }) 287 | 288 | t.Run(th.Name("ordering", n), func(t *testing.T) { 289 | in := FromChan(th.FromRange(0, 20000), nil) 290 | in = OrderedMap(in, 1, func(x int) (int, error) { 291 | if x%2 == 0 { 292 | return x, fmt.Errorf("err%06d", x) 293 | } 294 | return x, nil 295 | }) 296 | 297 | out := universalFlatMap(ord, in, n, func(x int) <-chan Try[string] { 298 | return FromSlice([]string{ 299 | fmt.Sprintf("%06dA", x), 300 | fmt.Sprintf("%06dB", x), 301 | }, nil) 302 | }) 303 | 304 | outSlice, errSlice := toSliceAndErrors(out) 305 | 306 | if ord || n == 1 { 307 | th.ExpectSorted(t, outSlice) 308 | th.ExpectSorted(t, errSlice) 309 | } else { 310 | th.ExpectUnsorted(t, outSlice) 311 | th.ExpectUnsorted(t, errSlice) 312 | } 313 | }) 314 | 315 | } 316 | }) 317 | } 318 | 319 | func universalCatch(ord bool, in <-chan Try[int], n int, f func(error) error) <-chan Try[int] { 320 | if ord { 321 | return OrderedCatch(in, n, f) 322 | } 323 | return Catch(in, n, f) 324 | } 325 | 326 | func TestCatch(t *testing.T) { 327 | th.TestBothOrderings(t, func(t *testing.T, ord bool) { 328 | for _, n := range []int{1, 5} { 329 | t.Run(th.Name("nil", n), func(t *testing.T) { 330 | out := universalCatch(ord, nil, n, func(err error) error { return nil }) 331 | th.ExpectValue(t, out, nil) 332 | }) 333 | 334 | t.Run(th.Name("correctness", n), func(t *testing.T) { 335 | in := FromChan(th.FromRange(0, 20), nil) 336 | in = replaceWithError(in, 5, fmt.Errorf("err05")) 337 | in = replaceWithError(in, 10, fmt.Errorf("err10")) 338 | in = replaceWithError(in, 15, fmt.Errorf("err15")) 339 | 340 | out := universalCatch(ord, in, n, func(err error) error { 341 | if err.Error() == "err05" { 342 | return nil // handled 343 | } 344 | if err.Error() == "err10" { 345 | return fmt.Errorf("%w wrapped", err) // wrapped/replaced 346 | } 347 | 348 | return err // leave as is 349 | }) 350 | 351 | outSlice, errSlice := toSliceAndErrors(out) 352 | 353 | expectedSlice := make([]int, 0, 20) 354 | for i := 0; i < 20; i++ { 355 | if i == 5 || i == 10 || i == 15 { 356 | continue 357 | } 358 | expectedSlice = append(expectedSlice, i) 359 | } 360 | 361 | th.Sort(outSlice) 362 | th.Sort(errSlice) 363 | 364 | th.ExpectSlice(t, outSlice, expectedSlice) 365 | th.ExpectSlice(t, errSlice, []string{"err10 wrapped", "err15"}) 366 | }) 367 | 368 | t.Run(th.Name("ordering", n), func(t *testing.T) { 369 | in := FromChan(th.FromRange(0, 20000), nil) 370 | in = OrderedMap(in, 1, func(x int) (int, error) { 371 | if x%2 == 0 { 372 | return x, fmt.Errorf("err%06d", x) 373 | } 374 | return x, nil 375 | }) 376 | 377 | out := universalCatch(ord, in, n, func(err error) error { 378 | return fmt.Errorf("%w wrapped", err) 379 | }) 380 | 381 | outSlice, errSlice := toSliceAndErrors(out) 382 | 383 | if ord || n == 1 { 384 | th.ExpectSorted(t, outSlice) 385 | th.ExpectSorted(t, errSlice) 386 | } else { 387 | th.ExpectUnsorted(t, outSlice) 388 | th.ExpectUnsorted(t, errSlice) 389 | } 390 | }) 391 | 392 | } 393 | }) 394 | } 395 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Rill [![GoDoc](https://pkg.go.dev/badge/github.com/destel/rill)](https://pkg.go.dev/github.com/destel/rill) [![Go Report Card](https://goreportcard.com/badge/github.com/destel/rill)](https://goreportcard.com/report/github.com/destel/rill) [![Coverage Status](https://coveralls.io/repos/github/destel/rill/badge.svg?branch=main)](https://coveralls.io/github/destel/rill?branch=main) [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go) 2 | 3 | Rill is a toolkit that brings composable concurrency to Go, making it easier to build concurrent programs from simple, reusable parts. 4 | It reduces boilerplate while preserving Go's natural channel-based model and backpressure behavior. 5 | 6 | ```bash 7 | go get -u github.com/destel/rill 8 | ``` 9 | 10 | 11 | ## Goals 12 | 13 | - **Make common tasks easier.** 14 | Rill provides a cleaner and safer way of solving common concurrency problems, such as parallel job execution or 15 | real-time event processing. 16 | It removes boilerplate and abstracts away the complexities of goroutine, channel, and error management. 17 | At the same time, developers retain full control over the concurrency level of all operations. 18 | 19 | - **Make concurrent code composable and clean.** 20 | Most functions in the library take Go channels as inputs and return new, transformed channels as outputs. 21 | This allows them to be chained in various ways to build reusable pipelines from simpler parts, 22 | similar to Unix pipes. 23 | As a result, concurrent programs become clear sequences of reusable operations. 24 | 25 | - **Centralize error handling.** 26 | Errors are automatically propagated through a pipeline and can be handled in a single place at the end. 27 | For more complex scenarios, Rill also provides tools to intercept and handle errors at any point in a pipeline. 28 | 29 | - **Simplify stream processing.** 30 | Thanks to Go channels, built-in functions can handle potentially infinite streams, processing items as they arrive. 31 | This makes Rill a convenient tool for real-time processing or handling large datasets that don't fit in memory. 32 | 33 | - **Provide solutions for advanced tasks.** 34 | Beyond basic operations, the library includes ready-to-use functions for batching, ordered fan-in, map-reduce, 35 | stream splitting, merging, and more. Pipelines, while usually linear, can have any cycle-free topology (DAG). 36 | 37 | - **Support custom extensions.** 38 | Since Rill operates on standard Go channels, it's easy to write custom functions compatible with the library. 39 | 40 | - **Keep it lightweight.** 41 | Rill has a small, type-safe, channel-based API, and zero dependencies, making it straightforward to integrate into existing projects. 42 | It's also lightweight in terms of resource usage, ensuring that the number of memory allocations and goroutines 43 | does not grow with the input size. 44 | 45 | 46 | ## Quick Start 47 | Let's look at a practical example: fetch users from an API, activate them, and save the changes back. 48 | It shows how to control concurrency at each step while keeping the code clean and manageable. 49 | **ForEach** returns on the first error, and context cancellation via defer stops all remaining fetches. 50 | 51 | 52 | [Try in Go playground ↗](https://goplay.tools/snippet/xN_1zaBzfkq) 53 | ```go 54 | func main() { 55 | ctx, cancel := context.WithCancel(context.Background()) 56 | defer cancel() 57 | 58 | // Convert a slice of user IDs into a channel 59 | ids := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 60 | 61 | // Read users from the API. 62 | // Concurrency = 3 63 | users := rill.Map(ids, 3, func(id int) (*mockapi.User, error) { 64 | return mockapi.GetUser(ctx, id) 65 | }) 66 | 67 | // Activate users. 68 | // Concurrency = 2 69 | err := rill.ForEach(users, 2, func(u *mockapi.User) error { 70 | if u.IsActive { 71 | fmt.Printf("User %d is already active\n", u.ID) 72 | return nil 73 | } 74 | 75 | u.IsActive = true 76 | err := mockapi.SaveUser(ctx, u) 77 | if err != nil { 78 | return err 79 | } 80 | 81 | fmt.Printf("User saved: %+v\n", u) 82 | return nil 83 | }) 84 | 85 | // Handle errors 86 | fmt.Println("Error:", err) 87 | } 88 | ``` 89 | 90 | 91 | ## Batching 92 | Processing items in batches rather than individually can significantly improve performance in many scenarios, 93 | particularly when working with external services or databases. Batching reduces the number of queries and API calls, 94 | increases throughput, and typically lowers costs. 95 | 96 | To demonstrate batching, let's improve the previous example by using the API's bulk fetching capability. 97 | The **Batch** function transforms a stream of individual IDs into a stream of slices. This enables the use of `GetUsers` API 98 | to fetch multiple users in a single call, instead of making individual `GetUser` calls. 99 | 100 | 101 | 102 | [Try in Go playground ↗](https://goplay.tools/snippet/fpltOjeX-Le) 103 | ```go 104 | func main() { 105 | ctx, cancel := context.WithCancel(context.Background()) 106 | defer cancel() 107 | 108 | // Convert a slice of user IDs into a channel 109 | ids := rill.FromSlice([]int{ 110 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 111 | 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 112 | }, nil) 113 | 114 | // Group IDs into batches of 5 115 | idBatches := rill.Batch(ids, 5, -1) 116 | 117 | // Bulk fetch users from the API 118 | // Concurrency = 3 119 | userBatches := rill.Map(idBatches, 3, func(ids []int) ([]*mockapi.User, error) { 120 | return mockapi.GetUsers(ctx, ids) 121 | }) 122 | 123 | // Transform the stream of batches back into a flat stream of users 124 | users := rill.Unbatch(userBatches) 125 | 126 | // Activate users. 127 | // Concurrency = 2 128 | err := rill.ForEach(users, 2, func(u *mockapi.User) error { 129 | if u.IsActive { 130 | fmt.Printf("User %d is already active\n", u.ID) 131 | return nil 132 | } 133 | 134 | u.IsActive = true 135 | err := mockapi.SaveUser(ctx, u) 136 | if err != nil { 137 | return err 138 | } 139 | 140 | fmt.Printf("User saved: %+v\n", u) 141 | return nil 142 | }) 143 | 144 | // Handle errors 145 | fmt.Println("Error:", err) 146 | } 147 | ``` 148 | 149 | 150 | ## Real-Time Batching 151 | Real-world applications often need to handle events or data that arrives at unpredictable rates. While batching is still 152 | desirable for efficiency, waiting to collect a full batch might introduce unacceptable delays when 153 | the input stream becomes slow or sparse. 154 | 155 | Rill solves this with timeout-based batching: batches are emitted either when they're full or after a specified timeout, 156 | whichever comes first. This approach ensures optimal batch sizes during high load while maintaining responsiveness during quiet periods. 157 | 158 | Consider an application that needs to update users' _last_active_at_ timestamps in a database. The function responsible 159 | for this - `UpdateUserTimestamp` can be called concurrently, at unpredictable rates, and from different parts of the application. 160 | Performing all these updates individually may create too many concurrent queries, potentially overwhelming the database. 161 | 162 | In the example below, the updates are queued into `userIDsToUpdate` channel and then grouped into batches of up to 5 items, 163 | with each batch sent to the database as a single query. 164 | The **Batch** function is used with a timeout of 100ms, ensuring zero latency during high load, 165 | and up to 100ms latency with smaller batches during quiet periods. 166 | 167 | [Try in Go playground ↗](https://goplay.tools/snippet/w0xsLilX1ca) 168 | ```go 169 | func main() { 170 | // Start the background worker that processes the updates 171 | go updateUserTimestampWorker() 172 | 173 | // Do some updates. They'll be automatically grouped into 174 | // batches: [1,2,3,4,5], [6,7], [8] 175 | UpdateUserTimestamp(1) 176 | UpdateUserTimestamp(2) 177 | UpdateUserTimestamp(3) 178 | UpdateUserTimestamp(4) 179 | UpdateUserTimestamp(5) 180 | UpdateUserTimestamp(6) 181 | UpdateUserTimestamp(7) 182 | time.Sleep(500 * time.Millisecond) // simulate sparse updates 183 | UpdateUserTimestamp(8) 184 | } 185 | 186 | // This is the queue of user IDs to update. 187 | var userIDsToUpdate = make(chan int) 188 | 189 | // UpdateUserTimestamp is the public API for updating the last_active_at column in the users table 190 | func UpdateUserTimestamp(userID int) { 191 | userIDsToUpdate <- userID 192 | } 193 | 194 | // This is a background worker that sends queued updates to the database in batches. 195 | // For simplicity, there are no retries, error handling and synchronization 196 | func updateUserTimestampWorker() { 197 | 198 | ids := rill.FromChan(userIDsToUpdate, nil) 199 | 200 | idBatches := rill.Batch(ids, 5, 100*time.Millisecond) 201 | 202 | _ = rill.ForEach(idBatches, 1, func(batch []int) error { 203 | fmt.Printf("Executed: UPDATE users SET last_active_at = NOW() WHERE id IN (%v)\n", batch) 204 | return nil 205 | }) 206 | } 207 | ``` 208 | 209 | 210 | 211 | ## Errors, Termination and Contexts 212 | Error handling can be non-trivial in concurrent applications. Rill simplifies this by providing a structured approach to the problem. 213 | Pipelines typically consist of a sequence of non-blocking channel transformations, followed by a blocking stage that returns a final result and an error. 214 | The general rule is: any error occurring anywhere in a pipeline is propagated down to the final stage, 215 | where it's caught by some blocking function and returned to the caller. 216 | 217 | Rill provides a wide selection of blocking functions. Here are some commonly used ones: 218 | 219 | - **ForEach:** Concurrently applies a user function to each item in the stream. 220 | [Example](https://pkg.go.dev/github.com/destel/rill#example-ForEach) 221 | - **ToSlice:** Collects all stream items into a slice. 222 | [Example](https://pkg.go.dev/github.com/destel/rill#example-ToSlice) 223 | - **First:** Returns the first item or error encountered in the stream and discards the rest 224 | [Example](https://pkg.go.dev/github.com/destel/rill#example-First) 225 | - **Reduce:** Concurrently reduces the stream to a single value, using a user provided reducer function. 226 | [Example](https://pkg.go.dev/github.com/destel/rill#example-Reduce) 227 | - **All:** Concurrently checks if all items in the stream satisfy a user provided condition. 228 | [Example](https://pkg.go.dev/github.com/destel/rill#example-All) 229 | - **Err:** Returns the first error encountered in the stream or nil, and discards the rest of the stream. 230 | [Example](https://pkg.go.dev/github.com/destel/rill#example-Err) 231 | 232 | 233 | All blocking functions share a common behavior. When they terminate early (before reaching the end of the input stream or when an error occurs), 234 | they return immediately but spawn a background goroutine that discards the remaining items from the input channel. This prevents goroutine leaks by ensuring that 235 | all goroutines feeding the stream are allowed to complete. 236 | 237 | Rill is context-agnostic, meaning that it does not enforce any specific context usage. 238 | However, it's recommended to make user-defined pipeline stages context-aware. 239 | This is especially important for the initial stage, as it allows to stop feeding the pipeline with new items after the context cancellation. 240 | In practice the first stage is often naturally context-aware through Go's standard APIs for databases, HTTP clients, and other external sources. 241 | 242 | In the example below the `CheckAllUsersExist` function uses several concurrent workers to check if all users 243 | from the given list exist. When an error occurs (like a non-existent user), the function returns that error 244 | and cancels the context, which in turn stops all remaining user fetches. 245 | 246 | [Try in Go playground ↗](https://goplay.tools/snippet/AVigyK2JFLC) 247 | ```go 248 | func main() { 249 | ctx := context.Background() 250 | 251 | // ID 999 doesn't exist, so fetching will stop after hitting it. 252 | err := CheckAllUsersExist(ctx, 3, []int{1, 2, 3, 4, 5, 999, 7, 8, 9, 10, 11, 12, 13, 14, 15}) 253 | fmt.Printf("Check result: %v\n", err) 254 | } 255 | 256 | // CheckAllUsersExist uses several concurrent workers to check if all users with given IDs exist. 257 | func CheckAllUsersExist(ctx context.Context, concurrency int, ids []int) error { 258 | // Create new context that will be canceled when this function returns 259 | ctx, cancel := context.WithCancel(ctx) 260 | defer cancel() 261 | 262 | // Convert the slice into a stream 263 | idsStream := rill.FromSlice(ids, nil) 264 | 265 | // Fetch users concurrently. 266 | users := rill.Map(idsStream, concurrency, func(id int) (*mockapi.User, error) { 267 | u, err := mockapi.GetUser(ctx, id) 268 | if err != nil { 269 | return nil, fmt.Errorf("failed to fetch user %d: %w", id, err) 270 | } 271 | 272 | fmt.Printf("Fetched user %d\n", id) 273 | return u, nil 274 | }) 275 | 276 | // Return the first error (if any) and cancel remaining fetches via context 277 | return rill.Err(users) 278 | } 279 | ``` 280 | 281 | In the example above only the second stage (`mockapi.GetUser`) of the pipeline is context-aware. 282 | **FromSlice** works well here since the input is small, iteration is fast and context cancellation prevents expensive API calls regardless. 283 | The following code demonstrates how to replace **FromSlice** with **Generate** when full context awareness becomes important. 284 | 285 | ```go 286 | idsStream := rill.Generate(func(send func(int), sendErr func(error)) { 287 | for _, id := range ids { 288 | if ctx.Err() != nil { 289 | return 290 | } 291 | send(id) 292 | } 293 | }) 294 | ``` 295 | 296 | 297 | 298 | ## Order Preservation (Ordered Fan-In) 299 | Concurrent processing can boost performance, but since tasks take different amounts of time to complete, 300 | the results' order usually differs from the input order. While out-of-order results are acceptable in many scenarios, 301 | some cases require preserving the original order. This seemingly simple problem is deceptively challenging to solve correctly. 302 | 303 | To address this, Rill provides ordered versions of its core functions, such as **OrderedMap** or **OrderedFilter**. 304 | These functions perform additional synchronization under the hood to ensure that if value **x** precedes value **y** in the input stream, 305 | then **f(x)** will precede **f(y)** in the output. 306 | 307 | Here's a practical example: finding the first occurrence of a specific string among 1000 large files hosted online. 308 | Downloading all files at once would consume too much memory, processing them sequentially would be too slow, 309 | and traditional concurrency patterns do not preserve the order of files, making it challenging to find the first match. 310 | 311 | The combination of **OrderedFilter** and **First** functions solves this elegantly, 312 | while downloading and keeping in memory at most 5 files at a time. **First** returns on the first match, 313 | this triggers the context cancellation via defer, stopping URL generation and file downloads. 314 | 315 | [Try in Go playground ↗](https://goplay.tools/snippet/UuuV2t5xbN2) 316 | 317 | ```go 318 | func main() { 319 | ctx, cancel := context.WithCancel(context.Background()) 320 | defer cancel() 321 | 322 | // The string to search for in the downloaded files 323 | needle := []byte("26") 324 | 325 | // Generate a stream of URLs from https://example.com/file-0.txt 326 | // to https://example.com/file-999.txt 327 | // Stop generating URLs if the context is canceled 328 | urls := rill.Generate(func(send func(string), sendErr func(error)) { 329 | for i := 0; i < 1000 && ctx.Err() == nil; i++ { 330 | send(fmt.Sprintf("https://example.com/file-%d.txt", i)) 331 | } 332 | }) 333 | 334 | // Download and process the files 335 | // At most 5 files are downloaded and held in memory at the same time 336 | matchedUrls := rill.OrderedFilter(urls, 5, func(url string) (bool, error) { 337 | fmt.Println("Downloading:", url) 338 | 339 | content, err := mockapi.DownloadFile(ctx, url) 340 | if err != nil { 341 | return false, err 342 | } 343 | 344 | // keep only URLs of files that contain the needle 345 | return bytes.Contains(content, needle), nil 346 | }) 347 | 348 | // Find the first matched URL 349 | firstMatchedUrl, found, err := rill.First(matchedUrls) 350 | if err != nil { 351 | fmt.Println("Error:", err) 352 | return 353 | } 354 | 355 | // Print the result 356 | if found { 357 | fmt.Println("Found in:", firstMatchedUrl) 358 | } else { 359 | fmt.Println("Not found") 360 | } 361 | } 362 | ``` 363 | 364 | 365 | ## Parallel Streaming and FlatMap 366 | Sometimes operations that appear inherently sequential can be parallelized by partitioning the problem space. 367 | This can dramatically speed up data processing by allowing multiple streams to work concurrently instead of waiting 368 | for each to complete sequentially. 369 | 370 | **FlatMap** is particularly powerful for this pattern. It transforms each input item into its own stream, then merges 371 | all these streams together, giving you full control over the level of concurrency. 372 | 373 | In the example below, **FlatMap** transforms each department into a stream of users, then merges these streams into one. 374 | Like other Rill functions, **FlatMap** gives full control over concurrency. 375 | In this particular case the concurrency level is 3, meaning that users are fetched from at most 3 departments at the same time. 376 | 377 | Additionally, this example demonstrates how to write a reusable streaming wrapper over paginated API calls - the `StreamUsers` function. 378 | This wrapper can be useful both on its own and as part of larger pipelines. 379 | 380 | [Try in Go playground ↗](https://goplay.tools/snippet/ckenCrDV3eN) 381 | ```go 382 | func main() { 383 | ctx, cancel := context.WithCancel(context.Background()) 384 | defer cancel() 385 | 386 | // Start with a stream of department names 387 | departments := rill.FromSlice([]string{"IT", "Finance", "Marketing", "Support", "Engineering"}, nil) 388 | 389 | // Stream users from all departments concurrently. 390 | // At most 3 departments at the same time. 391 | users := rill.FlatMap(departments, 3, func(department string) <-chan rill.Try[*mockapi.User] { 392 | return StreamUsers(ctx, &mockapi.UserQuery{Department: department}) 393 | }) 394 | 395 | // Print the users from the combined stream 396 | err := rill.ForEach(users, 1, func(user *mockapi.User) error { 397 | fmt.Printf("%+v\n", user) 398 | return nil 399 | }) 400 | fmt.Println("Error:", err) 401 | } 402 | 403 | // StreamUsers is a reusable streaming wrapper around the mockapi.ListUsers function. 404 | // It iterates through all listing pages and uses [Generate] to simplify sending users and errors to the resulting stream. 405 | // This function is useful both on its own and as part of larger pipelines. 406 | func StreamUsers(ctx context.Context, query *mockapi.UserQuery) <-chan rill.Try[*mockapi.User] { 407 | return rill.Generate(func(send func(*mockapi.User), sendErr func(error)) { 408 | var currentQuery mockapi.UserQuery 409 | if query != nil { 410 | currentQuery = *query 411 | } 412 | 413 | for page := 0; ; page++ { 414 | currentQuery.Page = page 415 | 416 | users, err := mockapi.ListUsers(ctx, ¤tQuery) 417 | if err != nil { 418 | sendErr(err) 419 | return 420 | } 421 | 422 | if len(users) == 0 { 423 | break 424 | } 425 | 426 | for _, user := range users { 427 | send(user) 428 | } 429 | } 430 | }) 431 | } 432 | ``` 433 | 434 | **Note:** Starting from Go 1.24, thanks to generic type aliases, the return type of the `StreamUsers` function 435 | can optionally be simplified to `rill.Stream[*mockapi.User]` 436 | 437 | ```go 438 | func StreamUsers(ctx context.Context, query *mockapi.UserQuery) rill.Stream[*mockapi.User] { 439 | ... 440 | } 441 | ``` 442 | 443 | 444 | ## Go 1.23 Iterators 445 | Starting from Go 1.23, the language added *range-over-function* feature, allowing users to define custom iterators 446 | for use in for-range loops. This feature enables Rill to integrate seamlessly with existing iterator-based functions 447 | in the standard library and third-party packages. 448 | 449 | Rill provides **FromSeq** and **FromSeq2** functions to convert an iterator into a stream, 450 | and **ToSeq2** function to convert a stream back into an iterator. 451 | 452 | **ToSeq2** can be a good alternative to **ForEach** when concurrency is not needed. 453 | It gives more control and performs all necessary cleanup and draining, even if the loop is terminated early using *break* or *return*. 454 | 455 | [Try in Go playground ↗](https://goplay.tools/snippet/M8B0xJj8btk) 456 | 457 | ```go 458 | func main() { 459 | // Convert a slice of numbers into a stream 460 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 461 | 462 | // Transform each number 463 | // Concurrency = 3 464 | squares := rill.Map(numbers, 3, func(x int) (int, error) { 465 | return square(x), nil 466 | }) 467 | 468 | // Convert the stream into an iterator and use for-range to print the results 469 | for val, err := range rill.ToSeq2(squares) { 470 | if err != nil { 471 | fmt.Println("Error:", err) 472 | break // cleanup is done regardless of early exit 473 | } 474 | fmt.Printf("%+v\n", val) 475 | } 476 | } 477 | ``` 478 | 479 | 480 | ## Testing Strategy 481 | Rill has a test coverage of over 95%, with testing focused on: 482 | - **Correctness**: ensuring that functions produce accurate results at different levels of concurrency 483 | - **Concurrency**: confirming that correct number of goroutines is spawned and utilized 484 | - **Ordering**: ensuring that ordered versions of functions preserve the order, while basic versions do not 485 | 486 | 487 | ## Blog Posts 488 | Technical articles exploring different aspects and applications of Rill's concurrency patterns: 489 | - [Real-Time Batching in Go](https://destel.dev/blog/real-time-batching-in-go) 490 | - [Parallel Streaming Pattern in Go: How to Scan Large S3 or GCS Buckets Significantly Faster](https://destel.dev/blog/fast-listing-of-files-from-s3-gcs-and-other-object-storages) 491 | 492 | 493 | ## Contributing 494 | Thank you for your interest in improving Rill! Before submitting your pull request, please consider: 495 | 496 | - Focus on generic, widely applicable solutions 497 | - Consider use cases. Try to avoid highly specialized features that could be separate packages 498 | - Keep the API surface clean and focused 499 | - Try to avoid adding functions that can be easily misused 500 | - Avoid external dependencies 501 | - Add tests and documentation 502 | - For major changes, prefer opening an issue first to discuss the approach 503 | 504 | For bug reports and feature requests, please include a clear description and minimal example when possible. 505 | -------------------------------------------------------------------------------- /example_test.go: -------------------------------------------------------------------------------- 1 | package rill_test 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "errors" 7 | "fmt" 8 | "math/rand" 9 | "regexp" 10 | "strconv" 11 | "strings" 12 | "time" 13 | 14 | "github.com/destel/rill" 15 | "github.com/destel/rill/mockapi" 16 | ) 17 | 18 | // --- Package examples --- 19 | 20 | // This example demonstrates a Rill pipeline that fetches users from an API, 21 | // updates their status to active and saves them back. 22 | // Both operations are performed concurrently. 23 | // [ForEach] returns on the first error, and context cancellation via defer stops all remaining fetches. 24 | func Example() { 25 | ctx, cancel := context.WithCancel(context.Background()) 26 | defer cancel() 27 | 28 | // Convert a slice of user IDs into a stream 29 | ids := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 30 | 31 | // Read users from the API. 32 | // Concurrency = 3 33 | users := rill.Map(ids, 3, func(id int) (*mockapi.User, error) { 34 | return mockapi.GetUser(ctx, id) 35 | }) 36 | 37 | // Activate users. 38 | // Concurrency = 2 39 | err := rill.ForEach(users, 2, func(u *mockapi.User) error { 40 | if u.IsActive { 41 | fmt.Printf("User %d is already active\n", u.ID) 42 | return nil 43 | } 44 | 45 | u.IsActive = true 46 | err := mockapi.SaveUser(ctx, u) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | fmt.Printf("User saved: %+v\n", u) 52 | return nil 53 | }) 54 | 55 | // Handle errors 56 | fmt.Println("Error:", err) 57 | } 58 | 59 | // This example demonstrates a Rill pipeline that fetches users from an API, 60 | // and updates their status to active and saves them back. 61 | // Users are fetched concurrently and in batches to reduce the number of API calls. 62 | func Example_batching() { 63 | ctx, cancel := context.WithCancel(context.Background()) 64 | defer cancel() 65 | 66 | // Convert a slice of user IDs into a stream 67 | ids := rill.FromSlice([]int{ 68 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 69 | 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 70 | }, nil) 71 | 72 | // Group IDs into batches of 5 73 | idBatches := rill.Batch(ids, 5, -1) 74 | 75 | // Bulk fetch users from the API 76 | // Concurrency = 3 77 | userBatches := rill.Map(idBatches, 3, func(ids []int) ([]*mockapi.User, error) { 78 | return mockapi.GetUsers(ctx, ids) 79 | }) 80 | 81 | // Transform the stream of batches back into a flat stream of users 82 | users := rill.Unbatch(userBatches) 83 | 84 | // Activate users. 85 | // Concurrency = 2 86 | err := rill.ForEach(users, 2, func(u *mockapi.User) error { 87 | if u.IsActive { 88 | fmt.Printf("User %d is already active\n", u.ID) 89 | return nil 90 | } 91 | 92 | u.IsActive = true 93 | err := mockapi.SaveUser(ctx, u) 94 | if err != nil { 95 | return err 96 | } 97 | 98 | fmt.Printf("User saved: %+v\n", u) 99 | return nil 100 | }) 101 | 102 | // Handle errors 103 | fmt.Println("Error:", err) 104 | } 105 | 106 | // This example demonstrates how batching can be used to group similar concurrent database updates into a single query. 107 | // The UpdateUserTimestamp function is used to update the last_active_at column in the users table. Updates are not 108 | // executed immediately, but are rather queued and then sent to the database in batches of up to 5. 109 | // 110 | // When updates are sparse, it can take some time to collect a full batch. In this case the [Batch] function 111 | // emits partial batches, ensuring that updates are delayed by at most 100ms. 112 | // 113 | // For simplicity, this example does not have retries, error handling and synchronization 114 | func Example_batchingRealTime() { 115 | // Start the background worker that processes the updates 116 | go updateUserTimestampWorker() 117 | 118 | // Do some updates. They'll be automatically grouped into 119 | // batches: [1,2,3,4,5], [6,7], [8] 120 | UpdateUserTimestamp(1) 121 | UpdateUserTimestamp(2) 122 | UpdateUserTimestamp(3) 123 | UpdateUserTimestamp(4) 124 | UpdateUserTimestamp(5) 125 | UpdateUserTimestamp(6) 126 | UpdateUserTimestamp(7) 127 | time.Sleep(500 * time.Millisecond) // simulate sparse updates 128 | UpdateUserTimestamp(8) 129 | 130 | // Wait for the updates to be processed 131 | // In real-world application, different synchronization mechanisms would be used. 132 | time.Sleep(1 * time.Second) 133 | } 134 | 135 | // This is the queue of user IDs to update. 136 | var userIDsToUpdate = make(chan int) 137 | 138 | // UpdateUserTimestamp is the public API for updating the last_active_at column in the users table 139 | func UpdateUserTimestamp(userID int) { 140 | userIDsToUpdate <- userID 141 | } 142 | 143 | // This is a background worker that sends queued updates to the database in batches. 144 | // For simplicity, there are no retries, error handling and synchronization 145 | func updateUserTimestampWorker() { 146 | // convert channel of userIDsStream into a stream 147 | ids := rill.FromChan(userIDsToUpdate, nil) 148 | 149 | // Group IDs into batches of 5 for bulk processing 150 | // In case of sparse updates, we want to send them to the database no later than 100ms after they were queued. 151 | idBatches := rill.Batch(ids, 5, 100*time.Millisecond) 152 | 153 | // Send updates to the database 154 | // Concurrency = 1 (this controls max number of concurrent updates) 155 | _ = rill.ForEach(idBatches, 1, func(batch []int) error { 156 | fmt.Printf("Executed: UPDATE users SET last_active_at = NOW() WHERE id IN (%v)\n", batch) 157 | return nil 158 | }) 159 | } 160 | 161 | // This example demonstrates how to find the first file containing a specific string among 1000 large files 162 | // hosted online. 163 | // 164 | // Downloading all files at once would consume too much memory, while processing 165 | // them one-by-one would take too long. And traditional concurrency patterns do not preserve the order of files, 166 | // and would make it challenging to find the first match. 167 | // 168 | // The combination of [OrderedFilter] and [First] functions solves the problem, 169 | // while downloading and holding in memory at most 5 files at the same time. 170 | // [First] returns on the first match, this triggers the context cancellation via defer, 171 | // stopping URL generation and file downloads. 172 | func Example_ordering() { 173 | ctx, cancel := context.WithCancel(context.Background()) 174 | defer cancel() 175 | 176 | // The string to search for in the downloaded files 177 | needle := []byte("26") 178 | 179 | // Generate a stream of URLs from https://example.com/file-0.txt 180 | // to https://example.com/file-999.txt 181 | // Stop generating URLs if the context is canceled 182 | urls := rill.Generate(func(send func(string), sendErr func(error)) { 183 | for i := 0; i < 1000 && ctx.Err() == nil; i++ { 184 | send(fmt.Sprintf("https://example.com/file-%d.txt", i)) 185 | } 186 | }) 187 | 188 | // Download and process the files 189 | // At most 5 files are downloaded and held in memory at the same time 190 | matchedUrls := rill.OrderedFilter(urls, 5, func(url string) (bool, error) { 191 | fmt.Println("Downloading:", url) 192 | 193 | content, err := mockapi.DownloadFile(ctx, url) 194 | if err != nil { 195 | return false, err 196 | } 197 | 198 | // keep only URLs of files that contain the needle 199 | return bytes.Contains(content, needle), nil 200 | }) 201 | 202 | // Find the first matched URL 203 | firstMatchedUrl, found, err := rill.First(matchedUrls) 204 | if err != nil { 205 | fmt.Println("Error:", err) 206 | return 207 | } 208 | 209 | // Print the result 210 | if found { 211 | fmt.Println("Found in:", firstMatchedUrl) 212 | } else { 213 | fmt.Println("Not found") 214 | } 215 | } 216 | 217 | // This example demonstrates how to use the Fan-in and Fan-out patterns 218 | // to send messages through multiple servers concurrently. 219 | func Example_fanIn_FanOut() { 220 | // Convert a slice of messages into a stream 221 | messages := rill.FromSlice([]string{ 222 | "message1", "message2", "message3", "message4", "message5", 223 | "message6", "message7", "message8", "message9", "message10", 224 | }, nil) 225 | 226 | // Fan-out the messages to three servers 227 | results1 := rill.Map(messages, 2, func(message string) (string, error) { 228 | return message, sendMessage(message, "server1") 229 | }) 230 | 231 | results2 := rill.Map(messages, 2, func(message string) (string, error) { 232 | return message, sendMessage(message, "server2") 233 | }) 234 | 235 | results3 := rill.Map(messages, 2, func(message string) (string, error) { 236 | return message, sendMessage(message, "server3") 237 | }) 238 | 239 | // Fan-in the results from all servers into a single stream 240 | results := rill.Merge(results1, results2, results3) 241 | 242 | // Handle errors 243 | err := rill.Err(results) 244 | fmt.Println("Error:", err) 245 | } 246 | 247 | // Helper function that simulates sending a message through a server 248 | func sendMessage(message string, server string) error { 249 | randomSleep(500 * time.Millisecond) // simulate some additional work 250 | fmt.Printf("Sent through %s: %s\n", server, message) 251 | return nil 252 | } 253 | 254 | // This example demonstrates using [FlatMap] to fetch users from multiple departments concurrently. 255 | // Additionally, it demonstrates how to write a reusable streaming wrapper over paginated API calls - the StreamUsers function 256 | func Example_flatMap() { 257 | ctx, cancel := context.WithCancel(context.Background()) 258 | defer cancel() 259 | 260 | // Start with a stream of department names 261 | departments := rill.FromSlice([]string{"IT", "Finance", "Marketing", "Support", "Engineering"}, nil) 262 | 263 | // Stream users from all departments concurrently. 264 | // At most 3 departments at the same time. 265 | users := rill.FlatMap(departments, 3, func(department string) <-chan rill.Try[*mockapi.User] { 266 | return StreamUsers(ctx, &mockapi.UserQuery{Department: department}) 267 | }) 268 | 269 | // Print the users from the combined stream 270 | err := rill.ForEach(users, 1, func(user *mockapi.User) error { 271 | fmt.Printf("%+v\n", user) 272 | return nil 273 | }) 274 | fmt.Println("Error:", err) 275 | } 276 | 277 | // StreamUsers is a reusable streaming wrapper around the mockapi.ListUsers function. 278 | // It iterates through all listing pages and uses [Generate] to simplify sending users and errors to the resulting stream. 279 | // This function is useful both on its own and as part of larger pipelines. 280 | func StreamUsers(ctx context.Context, query *mockapi.UserQuery) <-chan rill.Try[*mockapi.User] { 281 | return rill.Generate(func(send func(*mockapi.User), sendErr func(error)) { 282 | var currentQuery mockapi.UserQuery 283 | if query != nil { 284 | currentQuery = *query 285 | } 286 | 287 | for page := 0; ; page++ { 288 | currentQuery.Page = page 289 | 290 | users, err := mockapi.ListUsers(ctx, ¤tQuery) 291 | if err != nil { 292 | sendErr(err) 293 | return 294 | } 295 | 296 | if len(users) == 0 { 297 | break 298 | } 299 | 300 | for _, user := range users { 301 | send(user) 302 | } 303 | } 304 | }) 305 | } 306 | 307 | // This example demonstrates how to gracefully stop a pipeline on the first error. 308 | // The CheckAllUsersExist uses several concurrent workers and returns an error as soon as it encounters a non-existent user. 309 | // Such early return triggers the context cancellation, which in turn stops all remaining users fetches. 310 | func Example_context() { 311 | ctx := context.Background() 312 | 313 | // ID 999 doesn't exist, so fetching will stop after hitting it. 314 | err := CheckAllUsersExist(ctx, 3, []int{1, 2, 3, 4, 5, 999, 7, 8, 9, 10, 11, 12, 13, 14, 15}) 315 | fmt.Printf("Check result: %v\n", err) 316 | } 317 | 318 | // CheckAllUsersExist uses several concurrent workers to checks if all users with given IDs exist. 319 | func CheckAllUsersExist(ctx context.Context, concurrency int, ids []int) error { 320 | // Create new context that will be canceled when this function returns 321 | ctx, cancel := context.WithCancel(ctx) 322 | defer cancel() 323 | 324 | // Convert the slice into a stream 325 | // Use Generate instead of FromSlice to make the first stage context-aware 326 | idsStream := rill.Generate(func(send func(int), sendErr func(error)) { 327 | for _, id := range ids { 328 | if ctx.Err() != nil { 329 | return 330 | } 331 | send(id) 332 | } 333 | }) 334 | 335 | // Fetch users concurrently. 336 | users := rill.Map(idsStream, concurrency, func(id int) (*mockapi.User, error) { 337 | u, err := mockapi.GetUser(ctx, id) 338 | if err != nil { 339 | return nil, fmt.Errorf("failed to fetch user %d: %w", id, err) 340 | } 341 | 342 | fmt.Printf("Fetched user %d\n", id) 343 | return u, nil 344 | }) 345 | 346 | // Return the first error (if any) and cancel remaining fetches via context 347 | return rill.Err(users) 348 | } 349 | 350 | // --- Function examples --- 351 | 352 | func ExampleAll() { 353 | // Convert a slice of numbers into a stream 354 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 355 | 356 | // Are all numbers prime? 357 | // Concurrency = 3 358 | ok, err := rill.All(numbers, 3, func(x int) (bool, error) { 359 | return isPrime(x), nil 360 | }) 361 | 362 | fmt.Println("Result:", ok) 363 | fmt.Println("Error:", err) 364 | } 365 | 366 | func ExampleAny() { 367 | // Convert a slice of numbers into a stream 368 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 369 | 370 | // Is there at least one prime number? 371 | // Concurrency = 3 372 | ok, err := rill.Any(numbers, 3, func(x int) (bool, error) { 373 | return isPrime(x), nil 374 | }) 375 | 376 | fmt.Println("Result: ", ok) 377 | fmt.Println("Error: ", err) 378 | } 379 | 380 | // Also check out the package level examples to see Batch in action 381 | func ExampleBatch() { 382 | // Generate a stream of numbers 0 to 49, where a new number is emitted every 50ms 383 | numbers := make(chan rill.Try[int]) 384 | go func() { 385 | defer close(numbers) 386 | for i := 0; i < 50; i++ { 387 | numbers <- rill.Wrap(i, nil) 388 | time.Sleep(50 * time.Millisecond) 389 | } 390 | }() 391 | 392 | // Group numbers into batches of up to 5 393 | batches := rill.Batch(numbers, 5, 1*time.Second) 394 | 395 | printStream(batches) 396 | } 397 | 398 | func ExampleCatch() { 399 | // Convert a slice of strings into a stream 400 | strs := rill.FromSlice([]string{"1", "2", "3", "4", "5", "not a number 6", "7", "8", "9", "10"}, nil) 401 | 402 | // Convert strings to ints 403 | // Concurrency = 3 404 | ids := rill.Map(strs, 3, func(s string) (int, error) { 405 | randomSleep(500 * time.Millisecond) // simulate some additional work 406 | return strconv.Atoi(s) 407 | }) 408 | 409 | // Catch and ignore number parsing errors 410 | // Concurrency = 2 411 | ids = rill.Catch(ids, 2, func(err error) error { 412 | if errors.Is(err, strconv.ErrSyntax) { 413 | return nil // Ignore this error 414 | } 415 | return err 416 | }) 417 | 418 | // No error will be printed 419 | printStream(ids) 420 | } 421 | 422 | // The same example as for the [Catch], but using ordered versions of functions. 423 | func ExampleOrderedCatch() { 424 | // Convert a slice of strings into a stream 425 | strs := rill.FromSlice([]string{"1", "2", "3", "4", "5", "not a number 6", "7", "8", "9", "10"}, nil) 426 | 427 | // Convert strings to ints 428 | // Concurrency = 3; Ordered 429 | ids := rill.OrderedMap(strs, 3, func(s string) (int, error) { 430 | randomSleep(500 * time.Millisecond) // simulate some additional work 431 | return strconv.Atoi(s) 432 | }) 433 | 434 | // Catch and ignore number parsing errors 435 | // Concurrency = 2; Ordered 436 | ids = rill.OrderedCatch(ids, 2, func(err error) error { 437 | if errors.Is(err, strconv.ErrSyntax) { 438 | return nil // Ignore this error 439 | } 440 | return err 441 | }) 442 | 443 | // No error will be printed 444 | printStream(ids) 445 | } 446 | 447 | func ExampleErr() { 448 | ctx := context.Background() 449 | 450 | // Convert a slice of users into a stream 451 | users := rill.FromSlice([]*mockapi.User{ 452 | {ID: 1, Name: "foo", Age: 25}, 453 | {ID: 2, Name: "bar", Age: 30}, 454 | {ID: 3}, // empty username is invalid 455 | {ID: 4, Name: "baz", Age: 35}, 456 | {ID: 5, Name: "qux", Age: 26}, 457 | {ID: 6, Name: "quux", Age: 27}, 458 | }, nil) 459 | 460 | // Save users. Use struct{} as a result type 461 | // Concurrency = 2 462 | results := rill.Map(users, 2, func(user *mockapi.User) (struct{}, error) { 463 | return struct{}{}, mockapi.SaveUser(ctx, user) 464 | }) 465 | 466 | // We're only need to know if all users were saved successfully 467 | err := rill.Err(results) 468 | fmt.Println("Error:", err) 469 | } 470 | 471 | func ExampleFilter() { 472 | // Convert a slice of numbers into a stream 473 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 474 | 475 | // Keep only prime numbers 476 | // Concurrency = 3 477 | primes := rill.Filter(numbers, 3, func(x int) (bool, error) { 478 | return isPrime(x), nil 479 | }) 480 | 481 | printStream(primes) 482 | } 483 | 484 | // The same example as for the [Filter], but using ordered versions of functions. 485 | func ExampleOrderedFilter() { 486 | // Convert a slice of numbers into a stream 487 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 488 | 489 | // Keep only prime numbers 490 | // Concurrency = 3; Ordered 491 | primes := rill.OrderedFilter(numbers, 3, func(x int) (bool, error) { 492 | return isPrime(x), nil 493 | }) 494 | 495 | printStream(primes) 496 | } 497 | 498 | func ExampleFilterMap() { 499 | // Convert a slice of numbers into a stream 500 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 501 | 502 | // Keep only prime numbers and square them 503 | // Concurrency = 3 504 | squares := rill.FilterMap(numbers, 3, func(x int) (int, bool, error) { 505 | if !isPrime(x) { 506 | return 0, false, nil 507 | } 508 | 509 | return x * x, true, nil 510 | }) 511 | 512 | printStream(squares) 513 | } 514 | 515 | // The same example as for the [FilterMap], but using ordered versions of functions. 516 | func ExampleOrderedFilterMap() { 517 | // Convert a slice of numbers into a stream 518 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 519 | 520 | // Keep only prime numbers and square them 521 | // Concurrency = 3 522 | squares := rill.OrderedFilterMap(numbers, 3, func(x int) (int, bool, error) { 523 | if !isPrime(x) { 524 | return 0, false, nil 525 | } 526 | 527 | return x * x, true, nil 528 | }) 529 | 530 | printStream(squares) 531 | } 532 | 533 | func ExampleFirst() { 534 | // Convert a slice of numbers into a stream 535 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 536 | 537 | // Keep only the numbers divisible by 4 538 | // Concurrency = 3; Ordered 539 | dvisibleBy4 := rill.OrderedFilter(numbers, 3, func(x int) (bool, error) { 540 | return x%4 == 0, nil 541 | }) 542 | 543 | // Get the first number divisible by 4 544 | first, ok, err := rill.First(dvisibleBy4) 545 | 546 | fmt.Println("Result:", first, ok) 547 | fmt.Println("Error:", err) 548 | } 549 | 550 | func ExampleFlatMap() { 551 | // Convert a slice of numbers into a stream 552 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5}, nil) 553 | 554 | // Replace each number in the input stream with three strings 555 | // Concurrency = 2 556 | result := rill.FlatMap(numbers, 2, func(x int) <-chan rill.Try[string] { 557 | randomSleep(500 * time.Millisecond) // simulate some additional work 558 | 559 | return rill.FromSlice([]string{ 560 | fmt.Sprintf("foo%d", x), 561 | fmt.Sprintf("bar%d", x), 562 | fmt.Sprintf("baz%d", x), 563 | }, nil) 564 | }) 565 | 566 | printStream(result) 567 | } 568 | 569 | // The same example as for the [FlatMap], but using ordered versions of functions. 570 | func ExampleOrderedFlatMap() { 571 | // Convert a slice of numbers into a stream 572 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5}, nil) 573 | 574 | // Replace each number in the input stream with three strings 575 | // Concurrency = 2; Ordered 576 | result := rill.OrderedFlatMap(numbers, 2, func(x int) <-chan rill.Try[string] { 577 | randomSleep(500 * time.Millisecond) // simulate some additional work 578 | 579 | return rill.FromSlice([]string{ 580 | fmt.Sprintf("foo%d", x), 581 | fmt.Sprintf("bar%d", x), 582 | fmt.Sprintf("baz%d", x), 583 | }, nil) 584 | }) 585 | 586 | printStream(result) 587 | } 588 | 589 | func ExampleForEach() { 590 | // Convert a slice of numbers into a stream 591 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 592 | 593 | // Square each number and print the result 594 | // Concurrency = 3 595 | err := rill.ForEach(numbers, 3, func(x int) error { 596 | y := square(x) 597 | fmt.Println(y) 598 | return nil 599 | }) 600 | 601 | // Handle errors 602 | fmt.Println("Error:", err) 603 | } 604 | 605 | // There is no ordered version of the ForEach function. To achieve ordered processing, use concurrency set to 1. 606 | // If you need a concurrent and ordered ForEach, then do all processing with the [OrderedMap], 607 | // and then use ForEach with concurrency set to 1 at the final stage. 608 | func ExampleForEach_ordered() { 609 | // Convert a slice of numbers into a stream 610 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 611 | 612 | // Square each number 613 | // Concurrency = 3; Ordered 614 | squares := rill.OrderedMap(numbers, 3, func(x int) (int, error) { 615 | return square(x), nil 616 | }) 617 | 618 | // Print results. 619 | // Concurrency = 1; Ordered 620 | err := rill.ForEach(squares, 1, func(y int) error { 621 | fmt.Println(y) 622 | return nil 623 | }) 624 | 625 | // Handle errors 626 | fmt.Println("Error:", err) 627 | } 628 | 629 | // Generate a stream of URLs from https://example.com/file-0.txt to https://example.com/file-9.txt 630 | func ExampleGenerate() { 631 | urls := rill.Generate(func(send func(string), sendErr func(error)) { 632 | for i := 0; i < 10; i++ { 633 | send(fmt.Sprintf("https://example.com/file-%d.txt", i)) 634 | } 635 | }) 636 | 637 | printStream(urls) 638 | } 639 | 640 | // Generate an infinite stream of natural numbers (1, 2, 3, ...). 641 | // New numbers are sent to the stream every 500ms until the context is canceled 642 | func ExampleGenerate_context() { 643 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 644 | defer cancel() 645 | 646 | numbers := rill.Generate(func(send func(int), sendErr func(error)) { 647 | for i := 1; ctx.Err() == nil; i++ { 648 | send(i) 649 | time.Sleep(500 * time.Millisecond) 650 | } 651 | }) 652 | 653 | printStream(numbers) 654 | } 655 | 656 | func ExampleMap() { 657 | // Convert a slice of numbers into a stream 658 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 659 | 660 | // Transform each number 661 | // Concurrency = 3 662 | squares := rill.Map(numbers, 3, func(x int) (int, error) { 663 | return square(x), nil 664 | }) 665 | 666 | printStream(squares) 667 | } 668 | 669 | // The same example as for the [Map], but using ordered versions of functions. 670 | func ExampleOrderedMap() { 671 | // Convert a slice of numbers into a stream 672 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 673 | 674 | // Transform each number 675 | // Concurrency = 3; Ordered 676 | squares := rill.OrderedMap(numbers, 3, func(x int) (int, error) { 677 | return square(x), nil 678 | }) 679 | 680 | printStream(squares) 681 | } 682 | 683 | func ExampleMapReduce() { 684 | var re = regexp.MustCompile(`\w+`) 685 | text := "Early morning brings early birds to the early market. Birds sing, the market buzzes, and the morning shines." 686 | 687 | // Convert a text into a stream of words 688 | words := rill.FromSlice(re.FindAllString(text, -1), nil) 689 | 690 | // Count the number of occurrences of each word 691 | mr, err := rill.MapReduce(words, 692 | // Map phase: Use the word as key and "1" as value 693 | // Concurrency = 3 694 | 3, func(word string) (string, int, error) { 695 | return strings.ToLower(word), 1, nil 696 | }, 697 | // Reduce phase: Sum all "1" values for the same key 698 | // Concurrency = 2 699 | 2, func(x, y int) (int, error) { 700 | return x + y, nil 701 | }, 702 | ) 703 | 704 | fmt.Println("Result:", mr) 705 | fmt.Println("Error:", err) 706 | } 707 | 708 | func ExampleMerge() { 709 | // Convert slices of numbers into streams 710 | numbers1 := rill.FromSlice([]int{1, 2, 3, 4, 5}, nil) 711 | numbers2 := rill.FromSlice([]int{6, 7, 8, 9, 10}, nil) 712 | numbers3 := rill.FromSlice([]int{11, 12}, nil) 713 | 714 | numbers := rill.Merge(numbers1, numbers2, numbers3) 715 | 716 | printStream(numbers) 717 | } 718 | 719 | func ExampleReduce() { 720 | // Convert a slice of numbers into a stream 721 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 722 | 723 | // Sum all numbers 724 | sum, ok, err := rill.Reduce(numbers, 3, func(a, b int) (int, error) { 725 | return a + b, nil 726 | }) 727 | 728 | fmt.Println("Result:", sum, ok) 729 | fmt.Println("Error:", err) 730 | } 731 | 732 | func ExampleToSlice() { 733 | // Convert a slice of numbers into a stream 734 | numbers := rill.FromSlice([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil) 735 | 736 | // Transform each number 737 | // Concurrency = 3; Ordered 738 | squares := rill.OrderedMap(numbers, 3, func(x int) (int, error) { 739 | return square(x), nil 740 | }) 741 | 742 | resultsSlice, err := rill.ToSlice(squares) 743 | 744 | fmt.Println("Result:", resultsSlice) 745 | fmt.Println("Error:", err) 746 | } 747 | 748 | func ExampleUnbatch() { 749 | // Create a stream of batches 750 | batches := rill.FromSlice([][]int{ 751 | {1, 2, 3}, 752 | {4, 5}, 753 | {6, 7, 8, 9}, 754 | {10}, 755 | }, nil) 756 | 757 | numbers := rill.Unbatch(batches) 758 | 759 | printStream(numbers) 760 | } 761 | 762 | // --- Helpers --- 763 | 764 | // helper function that checks if a number is prime 765 | // and simulates some additional work using sleep 766 | func isPrime(n int) bool { 767 | randomSleep(500 * time.Millisecond) // simulate some additional work 768 | 769 | if n < 2 { 770 | return false 771 | } 772 | for i := 2; i*i <= n; i++ { 773 | if n%i == 0 { 774 | return false 775 | } 776 | } 777 | return true 778 | } 779 | 780 | // helper function that squares the number 781 | // and simulates some additional work using sleep 782 | func square(x int) int { 783 | randomSleep(500 * time.Millisecond) // simulate some additional work 784 | return x * x 785 | } 786 | 787 | // printStream prints all items from a stream (one per line) and an error if any. 788 | func printStream[A any](stream <-chan rill.Try[A]) { 789 | fmt.Println("Result:") 790 | err := rill.ForEach(stream, 1, func(x A) error { 791 | fmt.Printf("%+v\n", x) 792 | return nil 793 | }) 794 | fmt.Println("Error:", err) 795 | } 796 | 797 | func randomSleep(max time.Duration) { 798 | time.Sleep(time.Duration(rand.Intn(int(max)))) 799 | } 800 | --------------------------------------------------------------------------------