├── .envrc ├── .github ├── CODEOWNERS ├── workflows │ ├── build.yml │ └── automerge.yml └── dependabot.yml ├── go.mod ├── testdata └── fuzz │ ├── FuzzTick │ ├── eaae912ff48d75e8 │ └── db1d459b216861c8 │ └── Fuzz_Scaler │ ├── 555659aba42d18b7 │ ├── 62629b316805e69d │ └── b9bef74a2c85cbf3 ├── go.sum ├── .env.act ├── .goreleaser.yaml ├── .gitignore ├── helpers.go ├── .pre-commit-config.yaml ├── docs.go ├── helper_test.go ├── .cz.yaml ├── flake.nix ├── .secrets.baseline ├── stream_bench_test.go ├── Makefile ├── .golangci.yml ├── stream.go ├── README.md ├── scaler.go ├── LICENSE ├── stream_test.go └── scaler_test.go /.envrc: -------------------------------------------------------------------------------- 1 | use flake 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @benjivesterby -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module go.devnw.com //stream 2 | 3 | go 1.24 4 | 5 | require go.devnw.com/gen v1.2.0 6 | -------------------------------------------------------------------------------- /testdata/fuzz/FuzzTick/eaae912ff48d75e8: -------------------------------------------------------------------------------- 1 | go test fuzz v1 2 | int(0) 3 | int(0) 4 | float64(0) 5 | int64(0) 6 | int64(-90) 7 | -------------------------------------------------------------------------------- /testdata/fuzz/FuzzTick/db1d459b216861c8: -------------------------------------------------------------------------------- 1 | go test fuzz v1 2 | int(-84) 3 | int(0) 4 | float64(-0.625) 5 | int64(0) 6 | int64(-94) 7 | -------------------------------------------------------------------------------- /testdata/fuzz/Fuzz_Scaler/555659aba42d18b7: -------------------------------------------------------------------------------- 1 | go test fuzz v1 2 | int64(0) 3 | int64(0) 4 | int(0) 5 | int(29) 6 | float64(0) 7 | uint(36) 8 | int(0) 9 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | go.devnw.com/gen v1.2.0 h1:y3G/Rb3Ih5FZErDZG+QLawwH72mit/V839s17PavXtY= 2 | go.devnw.com/gen v1.2.0/go.mod h1:F4wVSNHandXO/3O8QQb/fjWJ020Er2VE3c16wTL7/gE= 3 | -------------------------------------------------------------------------------- /testdata/fuzz/Fuzz_Scaler/62629b316805e69d: -------------------------------------------------------------------------------- 1 | go test fuzz v1 2 | int64(-62) 3 | int64(-78) 4 | int(54) 5 | int(103) 6 | float64(-0.16666666666666666) 7 | uint(33) 8 | int(76) 9 | -------------------------------------------------------------------------------- /testdata/fuzz/Fuzz_Scaler/b9bef74a2c85cbf3: -------------------------------------------------------------------------------- 1 | go test fuzz v1 2 | int64(-158) 3 | int64(-95) 4 | int(54) 5 | int(19) 6 | float64(-0.16666666666666666) 7 | uint(33) 8 | int(76) 9 | -------------------------------------------------------------------------------- /.env.act: -------------------------------------------------------------------------------- 1 | ALERT_CC_USERS=@benjivesterby 2 | GO_VERSION=1.22.x 3 | PYTHON_VERSION=3.12 4 | 5 | # https://$username:$token@github.com/ 6 | GIT_CREDENTIALS=op://act_test/git_credentials/credential 7 | 8 | GH_PAT=op://act_test/gh_pat/credential 9 | 10 | CODECOV_TOKEN=op://act_test/codecov_token/credential 11 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Lint, Build & Test 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | GH_ACCESS_TOKEN: ${{ secrets.GH_PAT }} 7 | 8 | jobs: 9 | lint-build-test: 10 | name: Lint, Build & Test 11 | uses: devnw/workflows/.github/workflows/make-build.yml@main 12 | secrets: inherit # pragma: allowlist secret 13 | -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | before: 3 | hooks: 4 | - go mod tidy 5 | builds: 6 | - skip: true 7 | checksum: 8 | name_template: 'checksums.txt' 9 | snapshot: 10 | name_template: "{{ incpatch .Version }}-{{ .ShortCommit }}" 11 | changelog: 12 | sort: asc 13 | filters: 14 | exclude: 15 | - '^docs:' 16 | - '^test:' 17 | release: 18 | prerelease: auto 19 | -------------------------------------------------------------------------------- /.github/workflows/automerge.yml: -------------------------------------------------------------------------------- 1 | name: automerge 2 | on: 3 | pull_request: 4 | types: 5 | - labeled 6 | jobs: 7 | automerge: 8 | runs-on: nixos 9 | permissions: 10 | contents: write 11 | pull-requests: write 12 | steps: 13 | - id: automerge 14 | name: automerge 15 | uses: "pascalgn/automerge-action@v0.16.4" 16 | env: 17 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 18 | MERGE_METHOD: squash 19 | MERGE_LABELS: automerge 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | *cweb.log 3 | *.tmp 4 | 5 | external/ 6 | dist/ 7 | out/ 8 | 9 | .direnv/ 10 | 11 | # build folders 12 | bin/ 13 | ui/public/ 14 | ui/build/ 15 | ui/node_modules/ 16 | **/node_modules/ 17 | 18 | # Removing Vendor 19 | vendor/ 20 | 21 | .vscode/ 22 | 23 | # Binaries for programs and plugins 24 | *.exe 25 | *.exe~ 26 | *.dll 27 | *.so 28 | *.dylib 29 | 30 | # Test binary, build with `go test -c` 31 | *.test 32 | 33 | # Output of the go coverage tool, specifically when used with LiteIDE 34 | *.out 35 | 36 | *.crt 37 | *.key 38 | *.pem 39 | 40 | # ignore mac files 41 | .DS_Store 42 | -------------------------------------------------------------------------------- /helpers.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import "context" 4 | 5 | // defaultCtx is the default context used by the stream package. This is 6 | // hardcoded to context.Background() but can be overridden by the unit tests. 7 | // 8 | //nolint:gochecknoglobals // this is on purpose 9 | var defaultCtx = context.Background() 10 | 11 | // _ctx returns a valid Context with CancelFunc even if it the 12 | // supplied context is initially nil. If the supplied context 13 | // is nil it uses the default context. 14 | func _ctx(c context.Context) context.Context { 15 | if c == nil { 16 | c = defaultCtx 17 | } 18 | 19 | return c 20 | } 21 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: '^$' 2 | fail_fast: true 3 | repos: 4 | - repo: https://github.com/Yelp/detect-secrets 5 | rev: v1.5.0 6 | hooks: 7 | - id: detect-secrets 8 | name: Detect secrets 9 | language: python 10 | entry: detect-secrets-hook 11 | args: [ 12 | '--baseline', 13 | '.secrets.baseline', 14 | '--exclude-files', 15 | '(_test\.go$|/testdata/|gomod2nix.toml)', 16 | ] 17 | - repo: https://github.com/mrtazz/checkmake.git 18 | rev: 0.2.2 19 | hooks: 20 | - id: checkmake 21 | - repo: https://github.com/pre-commit/pre-commit-hooks 22 | rev: v5.0.0 23 | hooks: 24 | - id: check-json 25 | - id: check-merge-conflict 26 | - id: check-yaml 27 | - id: end-of-file-fixer 28 | - id: check-symlinks 29 | - repo: https://github.com/markdownlint/markdownlint 30 | rev: v0.12.0 31 | hooks: 32 | - id: markdownlint 33 | - repo: https://github.com/commitizen-tools/commitizen 34 | rev: v3.31.0 35 | hooks: 36 | - id: commitizen 37 | - id: commitizen-branch 38 | stages: [push] 39 | - repo: local 40 | hooks: 41 | - id: makefile 42 | name: Run Makefile Lint 43 | entry: make 44 | args: [pre-commit] 45 | language: system 46 | pass_filenames: false 47 | -------------------------------------------------------------------------------- /docs.go: -------------------------------------------------------------------------------- 1 | // Package stream provides a set of generic functions for working concurrent 2 | // design patterns in Go. 3 | // 4 | // [![Build & Test Action Status](https://github.com/devnw/stream/actions/workflows/build.yml/badge.svg)](https://github.com/devnw/stream/actions) 5 | // [![Go Report Card](https://goreportcard.com/badge/go.atomizer.io/stream)](https://goreportcard.com/report/go.atomizer.io/stream) 6 | // [![codecov](https://codecov.io/gh/devnw/stream/branch/main/graph/badge.svg)](https://codecov.io/gh/devnw/stream) 7 | // [![Go Reference](https://pkg.go.dev/badge/go.atomizer.io/stream.svg)](https://pkg.go.dev/go.atomizer.io/stream) 8 | // [![License: Apache 2.0](https://img.shields.io/badge/license-Apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) 9 | // [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](http://makeapullrequest.com) 10 | // 11 | // ## Installation 12 | // 13 | // To install the package, run: 14 | // 15 | // go get -u go.atomizer.io/stream@latest 16 | // 17 | // ## Usage 18 | // 19 | // import "go.atomizer.io/stream" 20 | // 21 | // ## Benchmarks 22 | // 23 | // To execute the benchmarks, run the following command: 24 | // 25 | // go test -bench=. ./... 26 | // 27 | // To view benchmarks over time for the `main` branch of the repository they can 28 | // be seen on our [Benchmark Report Card]. 29 | // 30 | // [Benchmark Report Card]: https://devnw.github.io/stream/dev/bench/ 31 | package stream 32 | -------------------------------------------------------------------------------- /helper_test.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | type signed interface { 11 | ~int | ~int8 | ~int16 | ~int32 | ~int64 12 | } 13 | 14 | type integer interface { 15 | ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 16 | } 17 | 18 | type float interface { 19 | ~float32 | ~float64 20 | } 21 | 22 | // Initialize the random number generator. 23 | func init() { rand.Seed(time.Now().Unix()) } 24 | 25 | func Tst[U ~[]T, T any]( 26 | t *testing.T, 27 | name string, 28 | data []U, 29 | f func(t *testing.T, test []T), 30 | ) { 31 | for _, test := range data { 32 | testname := fmt.Sprintf("%s-tests[%v]-values[%v]", name, len(data), len(test)) 33 | t.Run(testname, func(t *testing.T) { 34 | f(t, test) 35 | }) 36 | } 37 | } 38 | 39 | func Int[T integer]() T { 40 | value := rand.Int() 41 | return T(value) 42 | } 43 | 44 | func Float[T float]() T { 45 | return T(rand.Float64()) 46 | } 47 | 48 | func Ints[T integer](size int) []T { 49 | out := make([]T, size) 50 | 51 | for i := range out { 52 | out[i] = Int[T]() 53 | } 54 | 55 | return out 56 | } 57 | 58 | func Floats[T float](size int) []T { 59 | out := make([]T, size) 60 | 61 | for i := range out { 62 | out[i] = Float[T]() 63 | } 64 | 65 | return out 66 | } 67 | 68 | func IntTests[T integer](tests, max int) [][]T { 69 | out := make([][]T, tests) 70 | 71 | for i := range out { 72 | out[i] = Ints[T](max) 73 | } 74 | 75 | return out 76 | } 77 | 78 | func FloatTests[T float](tests, max int) [][]T { 79 | out := make([][]T, tests) 80 | 81 | for i := range out { 82 | out[i] = Floats[T](max) 83 | } 84 | 85 | return out 86 | } 87 | -------------------------------------------------------------------------------- /.cz.yaml: -------------------------------------------------------------------------------- 1 | commitizen: 2 | name: cz_customize 3 | customize: 4 | message_template: "{{change_type}}:{% if show_message %} {{message}}{% endif %}" 5 | example: 'feature: this feature enable customize through config file' 6 | schema: ": " 7 | schema_pattern: "(feature|bug fix):(\\s.*)" 8 | bump_pattern: "^(break|new|fix|hotfix)" 9 | commit_parser: "^(?Pfeature|bug fix):\\s(?P.*)?" 10 | changelog_pattern: "^(feature|bug fix)?(!)?" 11 | change_type_map: 12 | feature: Feat 13 | bug fix: Fix 14 | bump_map: 15 | break: MAJOR 16 | new: MINOR 17 | fix: PATCH 18 | hotfix: PATCH 19 | change_type_order: ["BREAKING CHANGE", "feat", "fix", "refactor", "perf"] 20 | info_path: cz_customize_info.txt 21 | info: This is customized info 22 | questions: 23 | - type: list 24 | name: change_type 25 | choices: 26 | - value: feat 27 | name: 'feat: A new feature.' 28 | - value: fix 29 | name: 'fix: A bug fix.' 30 | - value: refactor 31 | name: 'refactor: A code change that neither fixes a bug nor adds a feature.' 32 | - value: perf 33 | name: 'perf: A code change that improves performance.' 34 | - value: test 35 | name: 'test: Adding missing tests or correcting existing tests.' 36 | - value: docs 37 | name: 'docs: Documentation only changes.' 38 | - value: chore 39 | name: 'chore: Changes that don''t modify src or test files.' 40 | - value: cicd 41 | name: 'cicd: Changes to CI/CD configuration files and scripts.' 42 | message: Select the type of change you are committing 43 | - type: input 44 | name: message 45 | message: Body. 46 | - type: confirm 47 | name: show_message 48 | message: Do you want to add body message in commit? 49 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "development flake"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; 6 | _1password-shell-plugins.url = "github:1Password/shell-plugins"; 7 | gomod2nix = { 8 | url = "github:tweag/gomod2nix"; 9 | inputs.nixpkgs.follows = "nixpkgs"; 10 | }; 11 | }; 12 | 13 | outputs = 14 | { 15 | self, 16 | nixpkgs, 17 | flake-utils, 18 | gomod2nix, 19 | ... 20 | }: 21 | flake-utils.lib.eachDefaultSystem ( 22 | system: 23 | let 24 | pkgs = import nixpkgs { 25 | config = { 26 | allowUnfree = true; 27 | }; 28 | system = system; 29 | overlays = [ 30 | gomod2nix.overlays.default 31 | (self: super: { 32 | go = super.go_1_23; 33 | python = super.python3.withPackages ( 34 | subpkgs: with subpkgs; [ 35 | openapi-spec-validator 36 | detect-secrets 37 | requests 38 | python-dotenv 39 | ] 40 | ); 41 | }) 42 | ]; 43 | }; 44 | 45 | pkglist = with pkgs; [ 46 | # system tools 47 | automake 48 | curl 49 | which 50 | act 51 | gcc 52 | ruby 53 | git 54 | sqlite-interactive 55 | _1password 56 | 57 | # lint tools 58 | gibberish-detector 59 | addlicense 60 | shfmt 61 | pre-commit 62 | shellcheck 63 | 64 | # python 65 | python 66 | 67 | # go tools 68 | go 69 | gopls 70 | gotools 71 | go-tools 72 | gomod2nix.packages.${system}.default 73 | delve 74 | golangci-lint 75 | goreleaser 76 | go-licenses 77 | 78 | arangodb 79 | #go-jsonschema 80 | ]; 81 | in 82 | { 83 | packages.default = pkgs.buildGoApplication { 84 | pname = "nist"; 85 | version = "0.1"; 86 | 87 | pwd = ./.; 88 | src = ./.; 89 | modules = ./gomod2nix.toml; 90 | buildInputs = pkglist; 91 | 92 | buildPhase = '' 93 | make build 94 | ''; 95 | installPhase = '' 96 | make install 97 | ''; 98 | }; 99 | 100 | devShells.default = pkgs.mkShell { buildInputs = pkglist; }; 101 | } 102 | ); 103 | } 104 | -------------------------------------------------------------------------------- /.secrets.baseline: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.4.0", 3 | "plugins_used": [ 4 | { 5 | "name": "ArtifactoryDetector" 6 | }, 7 | { 8 | "name": "AWSKeyDetector" 9 | }, 10 | { 11 | "name": "AzureStorageKeyDetector" 12 | }, 13 | { 14 | "name": "Base64HighEntropyString", 15 | "limit": 4.5 16 | }, 17 | { 18 | "name": "BasicAuthDetector" 19 | }, 20 | { 21 | "name": "CloudantDetector" 22 | }, 23 | { 24 | "name": "GitHubTokenDetector" 25 | }, 26 | { 27 | "name": "HexHighEntropyString", 28 | "limit": 3.0 29 | }, 30 | { 31 | "name": "IbmCloudIamDetector" 32 | }, 33 | { 34 | "name": "IbmCosHmacDetector" 35 | }, 36 | { 37 | "name": "JwtTokenDetector" 38 | }, 39 | { 40 | "name": "KeywordDetector", 41 | "keyword_exclude": "" 42 | }, 43 | { 44 | "name": "MailchimpDetector" 45 | }, 46 | { 47 | "name": "NpmDetector" 48 | }, 49 | { 50 | "name": "PrivateKeyDetector" 51 | }, 52 | { 53 | "name": "SendGridDetector" 54 | }, 55 | { 56 | "name": "SlackDetector" 57 | }, 58 | { 59 | "name": "SoftlayerDetector" 60 | }, 61 | { 62 | "name": "SquareOAuthDetector" 63 | }, 64 | { 65 | "name": "StripeDetector" 66 | }, 67 | { 68 | "name": "TwilioKeyDetector" 69 | } 70 | ], 71 | "filters_used": [ 72 | { 73 | "path": "detect_secrets.filters.allowlist.is_line_allowlisted" 74 | }, 75 | { 76 | "path": "detect_secrets.filters.common.is_baseline_file", 77 | "filename": ".secrets.baseline" 78 | }, 79 | { 80 | "path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies", 81 | "min_level": 2 82 | }, 83 | { 84 | "path": "detect_secrets.filters.heuristic.is_indirect_reference" 85 | }, 86 | { 87 | "path": "detect_secrets.filters.heuristic.is_lock_file" 88 | }, 89 | { 90 | "path": "detect_secrets.filters.heuristic.is_not_alphanumeric_string" 91 | }, 92 | { 93 | "path": "detect_secrets.filters.heuristic.is_potential_uuid" 94 | }, 95 | { 96 | "path": "detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign" 97 | }, 98 | { 99 | "path": "detect_secrets.filters.heuristic.is_sequential_string" 100 | }, 101 | { 102 | "path": "detect_secrets.filters.heuristic.is_swagger_file" 103 | }, 104 | { 105 | "path": "detect_secrets.filters.heuristic.is_templated_secret" 106 | } 107 | ], 108 | "results": {}, 109 | "generated_at": "2023-03-27T20:44:00Z" 110 | } 111 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: gomod 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | labels: 8 | - "go" 9 | - "deps" 10 | groups: 11 | production-dependencies: 12 | dependency-type: "production" 13 | development-dependencies: 14 | dependency-type: "development" 15 | 16 | - package-ecosystem: "github-actions" 17 | directory: "/" 18 | schedule: 19 | interval: "daily" 20 | labels: 21 | - "github-actions" 22 | - "deps" 23 | groups: 24 | production-dependencies: 25 | dependency-type: "production" 26 | development-dependencies: 27 | dependency-type: "development" 28 | 29 | - package-ecosystem: "docker-compose" 30 | directory: "/" 31 | schedule: 32 | interval: "daily" 33 | labels: 34 | - "docker-compose" 35 | - "deps" 36 | groups: 37 | production-dependencies: 38 | dependency-type: "production" 39 | development-dependencies: 40 | dependency-type: "development" 41 | 42 | - package-ecosystem: "docker" 43 | directory: "/" 44 | schedule: 45 | interval: "daily" 46 | labels: 47 | - "docker" 48 | - "deps" 49 | groups: 50 | production-dependencies: 51 | dependency-type: "production" 52 | development-dependencies: 53 | dependency-type: "development" 54 | 55 | - package-ecosystem: "npm" 56 | directory: "/" 57 | schedule: 58 | interval: "daily" 59 | labels: 60 | - "npm" 61 | - "deps" 62 | groups: 63 | production-dependencies: 64 | dependency-type: "production" 65 | development-dependencies: 66 | dependency-type: "development" 67 | 68 | - package-ecosystem: "pip" 69 | directory: "/" 70 | schedule: 71 | interval: "daily" 72 | labels: 73 | - "python" 74 | - "deps" 75 | groups: 76 | production-dependencies: 77 | dependency-type: "production" 78 | development-dependencies: 79 | dependency-type: "development" 80 | 81 | - package-ecosystem: "cargo" 82 | directory: "/" 83 | schedule: 84 | interval: "daily" 85 | labels: 86 | - "rust" 87 | - "deps" 88 | groups: 89 | production-dependencies: 90 | dependency-type: "production" 91 | development-dependencies: 92 | dependency-type: "development" 93 | 94 | - package-ecosystem: "terraform" 95 | directory: "/" 96 | schedule: 97 | interval: "daily" 98 | labels: 99 | - "terraform" 100 | - "deps" 101 | groups: 102 | production-dependencies: 103 | dependency-type: "production" 104 | development-dependencies: 105 | dependency-type: "development" 106 | 107 | - package-ecosystem: "gitsubmodule" 108 | directory: "/" 109 | schedule: 110 | interval: "daily" 111 | labels: 112 | - "git" 113 | - "submodule" 114 | - "deps" 115 | groups: 116 | production-dependencies: 117 | dependency-type: "production" 118 | development-dependencies: 119 | dependency-type: "development" 120 | 121 | -------------------------------------------------------------------------------- /stream_bench_test.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "go.devnw.com/gen" 8 | ) 9 | 10 | func Benchmark_Pipe(b *testing.B) { 11 | ctx, cancel := context.WithCancel(context.Background()) 12 | defer cancel() 13 | 14 | c1, c2 := make(chan int), make(chan int) 15 | value := 10 16 | 17 | go Pipe(ctx, c1, c2) 18 | 19 | for n := 0; n < b.N; n++ { 20 | select { 21 | case <-ctx.Done(): 22 | b.Fatal("context canceled") 23 | case c1 <- value: 24 | case out, ok := <-c2: 25 | if !ok { 26 | b.Fatal("c2 closed prematurely") 27 | } 28 | 29 | if out != value { 30 | b.Errorf("expected %v, got %v", value, out) 31 | } 32 | } 33 | } 34 | } 35 | 36 | func Benchmark_Intercept(b *testing.B) { 37 | ctx, cancel := context.WithCancel(context.Background()) 38 | defer cancel() 39 | 40 | in := make(chan int) 41 | value := 10 42 | 43 | out := Intercept(ctx, in, func(_ context.Context, v int) (int, bool) { 44 | return v % 3, true 45 | }) 46 | 47 | for n := 0; n < b.N; n++ { 48 | select { 49 | case <-ctx.Done(): 50 | b.Fatal("context canceled") 51 | case in <- value: 52 | case out, ok := <-out: 53 | if !ok { 54 | b.Fatal("c2 closed prematurely") 55 | } 56 | 57 | if out != value%3 { 58 | b.Errorf("expected %v, got %v", value, out) 59 | } 60 | } 61 | } 62 | } 63 | 64 | func Benchmark_FanIn(b *testing.B) { 65 | ctx, cancel := context.WithCancel(context.Background()) 66 | defer cancel() 67 | 68 | c1, c2 := make(chan int), make(chan int) 69 | out := FanIn(ctx, c1, c2) 70 | 71 | for n := 0; n < b.N; n++ { 72 | c1 <- 1 73 | c2 <- 2 74 | 75 | for i := 0; i < 2; i++ { 76 | select { 77 | case <-ctx.Done(): 78 | b.Fatal("context canceled") 79 | case _, ok := <-out: 80 | if !ok { 81 | b.Fatal("out closed prematurely") 82 | } 83 | } 84 | } 85 | } 86 | } 87 | 88 | func Benchmark_FanOut(b *testing.B) { 89 | ctx, cancel := context.WithCancel(context.Background()) 90 | defer cancel() 91 | 92 | in, out1, out2 := make(chan int), make(chan int), make(chan int) 93 | 94 | go FanOut(ctx, in, out1, out2) 95 | 96 | for n := 0; n < b.N; n++ { 97 | in <- 1 98 | <-out1 99 | <-out2 100 | } 101 | } 102 | 103 | func Benchmark_Distribute(b *testing.B) { 104 | ctx, cancel := context.WithCancel(context.Background()) 105 | defer cancel() 106 | 107 | in, out1, out2 := make(chan int), make(chan int), make(chan int) 108 | 109 | go Distribute(ctx, in, out1, out2) 110 | 111 | for n := 0; n < b.N; n++ { 112 | in <- 1 113 | 114 | select { 115 | case <-out1: 116 | case <-out2: 117 | } 118 | } 119 | } 120 | 121 | func Benchmark_Scaler(b *testing.B) { 122 | ctx, cancel := context.WithCancel(context.Background()) 123 | defer cancel() 124 | 125 | testdata := gen.Slice[int](Ints[int](100)) 126 | 127 | s := Scaler[int, int]{ 128 | Fn: func(_ context.Context, in int) (int, bool) { 129 | return in, true 130 | }, 131 | } 132 | 133 | b.ResetTimer() 134 | 135 | for n := 0; n < b.N; n++ { 136 | // Test that the scaler can be used with a nil context. 137 | //nolint:staticcheck // nil context on purpose 138 | out, err := s.Exec(nil, testdata.Chan(ctx)) 139 | if err != nil { 140 | b.Errorf("expected no error, got %v", err) 141 | } 142 | 143 | seen := 0 144 | 145 | tloop: 146 | for { 147 | select { 148 | case <-ctx.Done(): 149 | b.Fatal("context closed") 150 | case _, ok := <-out: 151 | if !ok { 152 | break tloop 153 | } 154 | seen++ 155 | } 156 | } 157 | 158 | if seen != len(testdata) { 159 | b.Errorf("expected %v, got %v", len(testdata), seen) 160 | } 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: deps tidy fmt build test lint 2 | 3 | op=op run --env-file="./.env" -- 4 | 5 | opact=op run --env-file="./.env.act" -- 6 | 7 | #------------------------------------------------------------------------- 8 | # Variables 9 | # ------------------------------------------------------------------------ 10 | SHELL := $(shell which bash) 11 | fuzzsh=https://raw.githubusercontent.com/devnw/workflows/refs/heads/main/fuzz.sh 12 | env=CGO_ENABLED=0 13 | pyenv=.venv/bin 14 | 15 | #------------------------------------------------------------------------- 16 | # Targets 17 | #------------------------------------------------------------------------- 18 | deps: 19 | python3 -m venv .venv 20 | 21 | $(pyenv)/pip install --upgrade pre-commit 22 | go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest 23 | go install github.com/goreleaser/goreleaser@latest 24 | 25 | ci-test-deps: 26 | # install act 27 | if [ ! -d .act ]; then make install-act; git clone git@github.com:nektos/act.git .act; fi 28 | cd .act && git pull && sudo make install 29 | 30 | test: lint 31 | CGO_ENABLED=1 go test -cover -failfast -race ./... 32 | 33 | fuzz: 34 | curl -fsSL $(fuzzsh) | $(SHELL) 35 | 36 | bench: 37 | go test -bench=. -benchmem ./... 38 | 39 | lint: tidy 40 | golangci-lint run 41 | $(pyenv)/pre-commit run --all-files 42 | 43 | build: update upgrade tidy lint test 44 | $(env) go build ./... 45 | 46 | release: build-ci 47 | goreleaser release --snapshot --clean 48 | 49 | upgrade: 50 | $(pyenv)/pre-commit autoupdate 51 | go get -u ./... 52 | 53 | update: 54 | git submodule update --recursive 55 | 56 | fmt: 57 | gofmt -s -w . 58 | 59 | tidy: fmt 60 | go mod tidy 61 | 62 | clean: 63 | rm -rf dist 64 | rm -rf coverage 65 | rm -rf .act 66 | rm -rf .venv 67 | 68 | #------------------------------------------------------------------------- 69 | # Git targets 70 | #------------------------------------------------------------------------- 71 | 72 | tag: 73 | @latest_tag=$$(git describe --tags `git rev-list --tags --max-count=1`); \ 74 | current_major=$$(echo $$latest_tag | cut -d. -f1); \ 75 | current_minor=$$(echo $$latest_tag | cut -d. -f2); \ 76 | current_patch=$$(echo $$latest_tag | cut -d. -f3); \ 77 | next_minor=$$((current_minor + 1)); \ 78 | default_version="$$current_major.$$next_minor.$$current_patch"; \ 79 | read -p "Enter the version number [$$default_version]: " version; \ 80 | version=$${version:-$$default_version}; \ 81 | commits=$$(git log $$latest_tag..HEAD --pretty=format:"%h %s" | awk '{print "- " $$0}'); \ 82 | git tag -a $$version -m "Release $$version" -m "$$commits"; \ 83 | git push origin $$version 84 | 85 | #------------------------------------------------------------------------- 86 | # CI targets 87 | #------------------------------------------------------------------------- 88 | build-ci: deps 89 | $(env) go build ./... 90 | CGO_ENABLED=1 go test \ 91 | -cover \ 92 | -covermode=atomic \ 93 | -coverprofile=coverage.txt \ 94 | -failfast \ 95 | -race ./... 96 | make fuzz FUZZ_TIME=10 97 | 98 | bench-ci: build-ci 99 | go test -bench=. ./... | tee output.txt 100 | 101 | release-ci: build-ci 102 | goreleaser release --clean 103 | 104 | test-ci: 105 | DOCKER_HOST=$(shell docker context inspect --format='{{json .Endpoints.docker.Host}}' $(shell docker context show)) \ 106 | $(opact) act \ 107 | -s GIT_CREDENTIALS \ 108 | -s GITHUB_TOKEN="$(shell gh auth token)" \ 109 | --var GO_VERSION \ 110 | --var ALERT_CC_USERS 111 | 112 | #------------------------------------------------------------------------- 113 | # Force targets 114 | #------------------------------------------------------------------------- 115 | 116 | FORCE: 117 | 118 | #------------------------------------------------------------------------- 119 | # Phony targets 120 | #------------------------------------------------------------------------- 121 | 122 | .PHONY: build test lint fuzz bench fmt tidy clean release update upgrade deps translate test-act ci-test-deps 123 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | timeout: 3m 3 | linters-settings: 4 | cyclop: 5 | max-complexity: 30 6 | package-average: 10.0 7 | 8 | errcheck: 9 | check-type-assertions: true 10 | 11 | exhaustive: 12 | check: 13 | - switch 14 | - map 15 | 16 | funlen: 17 | lines: 100 18 | statements: 50 19 | 20 | gocognit: 21 | min-complexity: 30 22 | 23 | gocritic: 24 | settings: 25 | captLocal: 26 | paramsOnly: false 27 | underef: 28 | skipRecvDeref: false 29 | 30 | mnd: 31 | ignored-functions: 32 | - os.Chmod 33 | - os.Mkdir 34 | - os.MkdirAll 35 | - os.OpenFile 36 | - os.WriteFile 37 | - prometheus.ExponentialBuckets 38 | - prometheus.ExponentialBucketsRange 39 | - prometheus.LinearBuckets 40 | 41 | gomodguard: 42 | blocked: 43 | modules: 44 | - github.com/golang/protobuf: 45 | recommendations: 46 | - google.golang.org/protobuf 47 | reason: "see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules" 48 | - github.com/satori/go.uuid: 49 | recommendations: 50 | - github.com/google/uuid 51 | reason: "satori's package is not maintained" 52 | - github.com/gofrs/uuid: 53 | recommendations: 54 | - github.com/google/uuid 55 | reason: "see recommendation from dev-infra team: https://confluence.gtforge.com/x/gQI6Aw" 56 | 57 | govet: 58 | enable-all: true 59 | disable: 60 | - fieldalignment 61 | - shadow 62 | nakedret: 63 | max-func-lines: 0 64 | 65 | nolintlint: 66 | allow-no-explanation: [ funlen, gocognit, lll ] 67 | require-explanation: true 68 | require-specific: true 69 | 70 | rowserrcheck: 71 | packages: 72 | - github.com/jmoiron/sqlx 73 | 74 | tenv: 75 | all: true 76 | 77 | tagliatelle: 78 | case: 79 | rules: 80 | json: snake 81 | yaml: camel 82 | xml: camel 83 | 84 | 85 | linters: 86 | disable-all: true 87 | enable: 88 | - errcheck 89 | - gosimple 90 | - govet 91 | - ineffassign 92 | - staticcheck 93 | - typecheck 94 | - unused 95 | - asasalint 96 | - asciicheck 97 | - bidichk 98 | - bodyclose 99 | - cyclop 100 | - dupl 101 | - durationcheck 102 | - errname 103 | - exhaustive 104 | - copyloopvar 105 | - forbidigo 106 | - funlen 107 | - gochecknoglobals 108 | - gocognit 109 | - goconst 110 | - gocritic 111 | - gocyclo 112 | - goimports 113 | - mnd 114 | - gomoddirectives 115 | - gomodguard 116 | - goprintffuncname 117 | - gosec 118 | - lll 119 | - loggercheck 120 | - makezero 121 | - nakedret 122 | - nestif 123 | - nilerr 124 | - nilnil 125 | - noctx 126 | - nolintlint 127 | - nosprintfhostport 128 | - predeclared 129 | - promlinter 130 | - reassign 131 | - revive 132 | - rowserrcheck 133 | - sqlclosecheck 134 | - stylecheck 135 | - tenv 136 | - testableexamples 137 | - tparallel 138 | - unconvert 139 | - unparam 140 | - usestdlibvars 141 | - wastedassign 142 | - whitespace 143 | issues: 144 | max-same-issues: 50 145 | 146 | exclude-dirs: 147 | - testdata 148 | 149 | exclude-rules: 150 | - source: "^//\\s*go:generate\\s" 151 | linters: [ lll ] 152 | - source: "(noinspection|TODO)" 153 | linters: [ godot ] 154 | - source: "//noinspection" 155 | linters: [ gocritic ] 156 | - source: "^\\s+if _, ok := err\\.\\([^.]+\\.InternalError\\); ok {" 157 | linters: [ errorlint ] 158 | - path: "_test\\.go" 159 | linters: 160 | - bodyclose 161 | - dupl 162 | - funlen 163 | - goconst 164 | - gosec 165 | - noctx 166 | - wrapcheck 167 | - mnd 168 | - copyloopref 169 | - gocyclo 170 | - errcheck 171 | - lll 172 | - gochecknoglobals 173 | -------------------------------------------------------------------------------- /stream.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | "sync" 7 | 8 | "go.devnw.com/gen" 9 | ) 10 | 11 | // Pipe accepts an incoming data channel and pipes it to the supplied 12 | // outgoing data channel. 13 | // 14 | // NOTE: Execute the Pipe function in a goroutine if parallel execution is 15 | // desired. Canceling the context or closing the incoming channel is important 16 | // to ensure that the goroutine is properly terminated. 17 | func Pipe[T any]( 18 | ctx context.Context, in <-chan T, out chan<- T, 19 | ) { 20 | ctx = _ctx(ctx) 21 | 22 | for { 23 | select { 24 | case <-ctx.Done(): 25 | return 26 | case v, ok := <-in: 27 | if !ok { 28 | return 29 | } 30 | 31 | select { 32 | case <-ctx.Done(): 33 | return 34 | case out <- v: 35 | } 36 | } 37 | } 38 | } 39 | 40 | type InterceptFunc[T, U any] func(context.Context, T) (U, bool) 41 | 42 | // Intercept accepts an incoming data channel and a function literal that 43 | // accepts the incoming data and returns data of the same type and a boolean 44 | // indicating whether the data should be forwarded to the output channel. 45 | // The function is executed for each data item in the incoming channel as long 46 | // as the context is not canceled or the incoming channel remains open. 47 | func Intercept[T, U any]( 48 | ctx context.Context, 49 | in <-chan T, 50 | fn InterceptFunc[T, U], 51 | ) <-chan U { 52 | ctx = _ctx(ctx) 53 | out := make(chan U) 54 | 55 | go func() { 56 | defer close(out) 57 | 58 | for { 59 | select { 60 | case <-ctx.Done(): 61 | return 62 | case v, ok := <-in: 63 | if !ok { 64 | return 65 | } 66 | 67 | // Executing this in a function literal ensures that any panic 68 | // will be caught during execution of the function 69 | func() { 70 | // Determine if the function was successful 71 | result, ok := fn(ctx, v) 72 | if !ok { 73 | return 74 | } 75 | 76 | // Execute the function against the incoming value 77 | // and send the result to the output channel. 78 | select { 79 | case <-ctx.Done(): 80 | return 81 | case out <- result: 82 | } 83 | }() 84 | } 85 | } 86 | }() 87 | 88 | return out 89 | } 90 | 91 | // FanIn accepts incoming data channels and forwards returns a single channel 92 | // that receives all the data from the supplied channels. 93 | // 94 | // NOTE: The transfer takes place in a goroutine for each channel 95 | // so ensuring that the context is canceled or the incoming channels 96 | // are closed is important to ensure that the goroutine is terminated. 97 | func FanIn[T any](ctx context.Context, in ...<-chan T) <-chan T { 98 | ctx = _ctx(ctx) 99 | out := make(chan T) 100 | 101 | if len(in) == 0 { 102 | defer close(out) 103 | return out 104 | } 105 | 106 | var wg sync.WaitGroup 107 | defer func() { 108 | go func() { 109 | wg.Wait() 110 | close(out) 111 | }() 112 | }() 113 | 114 | wg.Add(len(in)) 115 | for _, i := range in { 116 | // Pipe the result of the channel to the output channel. 117 | go func(i <-chan T) { 118 | defer wg.Done() 119 | Pipe(ctx, i, out) 120 | }(i) 121 | } 122 | 123 | return out 124 | } 125 | 126 | // FanOut accepts an incoming data channel and copies the data to each of the 127 | // supplied outgoing data channels. 128 | // 129 | // NOTE: Execute the FanOut function in a goroutine if parallel execution is 130 | // desired. Canceling the context or closing the incoming channel is important 131 | // to ensure that the goroutine is properly terminated. 132 | func FanOut[T any]( 133 | ctx context.Context, in <-chan T, out ...chan<- T, 134 | ) { 135 | ctx = _ctx(ctx) 136 | 137 | if len(out) == 0 { 138 | return 139 | } 140 | 141 | for { 142 | select { 143 | case <-ctx.Done(): 144 | return 145 | case v, ok := <-in: 146 | if !ok { 147 | return 148 | } 149 | 150 | // Closure to catch panic on closed channel write. 151 | selectCases := make([]reflect.SelectCase, 0, len(out)+1) 152 | 153 | // 0 index is context 154 | selectCases = append(selectCases, reflect.SelectCase{ 155 | Dir: reflect.SelectRecv, 156 | Chan: reflect.ValueOf(ctx.Done()), 157 | }) 158 | 159 | for _, outc := range out { 160 | // Skip nil channels until they are non-nil 161 | if outc == nil { 162 | continue 163 | } 164 | 165 | selectCases = append(selectCases, reflect.SelectCase{ 166 | Dir: reflect.SelectSend, 167 | Chan: reflect.ValueOf(outc), 168 | Send: reflect.ValueOf(v), 169 | }) 170 | } 171 | 172 | for len(selectCases) > 1 { 173 | chosen, _, _ := reflect.Select(selectCases) 174 | 175 | // The context was canceled. 176 | if chosen == 0 { 177 | return 178 | } 179 | 180 | selectCases = gen.Exclude(selectCases, selectCases[chosen]) 181 | } 182 | } 183 | } 184 | } 185 | 186 | // Distribute accepts an incoming data channel and distributes the data among 187 | // the supplied outgoing data channels using a dynamic select statement. 188 | // 189 | // NOTE: Execute the Distribute function in a goroutine if parallel execution is 190 | // desired. Canceling the context or closing the incoming channel is important 191 | // to ensure that the goroutine is properly terminated. 192 | func Distribute[T any]( 193 | ctx context.Context, in <-chan T, out ...chan<- T, 194 | ) { 195 | ctx = _ctx(ctx) 196 | 197 | if len(out) == 0 { 198 | return 199 | } 200 | 201 | for { 202 | select { 203 | case <-ctx.Done(): 204 | return 205 | case v, ok := <-in: 206 | if !ok { 207 | return 208 | } 209 | 210 | selectCases := make([]reflect.SelectCase, 0, len(out)+1) 211 | for _, outc := range out { 212 | selectCases = append(selectCases, reflect.SelectCase{ 213 | Dir: reflect.SelectSend, 214 | Chan: reflect.ValueOf(outc), 215 | Send: reflect.ValueOf(v), 216 | }) 217 | } 218 | selectCases = append(selectCases, reflect.SelectCase{ 219 | Dir: reflect.SelectRecv, 220 | Chan: reflect.ValueOf(ctx.Done()), 221 | }) 222 | _, _, _ = reflect.Select(selectCases) 223 | } 224 | } 225 | } 226 | 227 | // Drain accepts a channel and drains the channel until the channel is closed 228 | // or the context is canceled. 229 | func Drain[T any](ctx context.Context, in <-chan T) { 230 | ctx = _ctx(ctx) 231 | 232 | go func() { 233 | for { 234 | select { 235 | case <-ctx.Done(): 236 | return 237 | case _, ok := <-in: 238 | if !ok { 239 | return 240 | } 241 | } 242 | } 243 | }() 244 | } 245 | 246 | // Any accepts an incoming data channel and converts the channel to a readonly 247 | // channel of the `any` type. 248 | func Any[T any](ctx context.Context, in <-chan T) <-chan any { 249 | return Intercept(ctx, in, func(_ context.Context, in T) (any, bool) { 250 | return in, true 251 | }) 252 | } 253 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # stream 2 | -- 3 | import "." 4 | 5 | Package stream provides a set of generic functions for working concurrent design 6 | patterns in Go. 7 | 8 | [![Build & Test Action 9 | Status](https://github.com/devnw/stream/actions/workflows/build.yml/badge.svg)](https://github.com/devnw/stream/actions) 10 | [![Go Report 11 | Card](https://goreportcard.com/badge/go.atomizer.io/stream)](https://goreportcard.com/report/go.atomizer.io/stream) 12 | [![codecov](https://codecov.io/gh/devnw/stream/branch/main/graph/badge.svg)](https://codecov.io/gh/devnw/stream) 13 | [![Go 14 | Reference](https://pkg.go.dev/badge/go.atomizer.io/stream.svg)](https://pkg.go.dev/go.atomizer.io/stream) 15 | [![License: Apache 16 | 2.0](https://img.shields.io/badge/license-Apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) 17 | [![PRs 18 | Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](http://makeapullrequest.com) 19 | 20 | ## Installation 21 | 22 | To install the package, run: 23 | 24 | go get -u go.atomizer.io/stream@latest 25 | 26 | ## Usage 27 | 28 | import "go.atomizer.io/stream" 29 | 30 | ## Benchmarks 31 | 32 | To execute the benchmarks, run the following command: 33 | 34 | go test -bench=. ./... 35 | 36 | To view benchmarks over time for the `main` branch of the repository they can be 37 | seen on our [Benchmark Report Card]. 38 | 39 | [Benchmark Report Card]: https://devnw.github.io/stream/dev/bench/ 40 | 41 | ## Usage 42 | 43 | ```go 44 | const MinLife = time.Millisecond 45 | ``` 46 | MinLife is the minimum life time for the scaler. This is used to prevent the 47 | scaler from exiting too quickly, and causing too small of a lifetime. 48 | 49 | ```go 50 | const MinWait = time.Millisecond 51 | ``` 52 | MinWait is the absolute minimum wait time for the ticker. This is used to 53 | prevent the ticker from firing too often and causing too small of a wait time. 54 | 55 | ```go 56 | var ErrFnRequired = fmt.Errorf("nil InterceptFunc, Fn is required") 57 | ``` 58 | 59 | #### func Any 60 | 61 | ```go 62 | func Any[T any](ctx context.Context, in <-chan T) <-chan any 63 | ``` 64 | Any accepts an incoming data channel and converts the channel to a readonly 65 | channel of the `any` type. 66 | 67 | #### func Distribute 68 | 69 | ```go 70 | func Distribute[T any]( 71 | ctx context.Context, in <-chan T, out ...chan<- T, 72 | ) 73 | ``` 74 | Distribute accepts an incoming data channel and distributes the data among the 75 | supplied outgoing data channels using a dynamic select statement. 76 | 77 | NOTE: Execute the Distribute function in a goroutine if parallel execution is 78 | desired. Canceling the context or closing the incoming channel is important to 79 | ensure that the goroutine is properly terminated. 80 | 81 | #### func Drain 82 | 83 | ```go 84 | func Drain[T any](ctx context.Context, in <-chan T) 85 | ``` 86 | Drain accepts a channel and drains the channel until the channel is closed or 87 | the context is canceled. 88 | 89 | #### func FanIn 90 | 91 | ```go 92 | func FanIn[T any](ctx context.Context, in ...<-chan T) <-chan T 93 | ``` 94 | FanIn accepts incoming data channels and forwards returns a single channel that 95 | receives all the data from the supplied channels. 96 | 97 | NOTE: The transfer takes place in a goroutine for each channel so ensuring that 98 | the context is canceled or the incoming channels are closed is important to 99 | ensure that the goroutine is terminated. 100 | 101 | #### func FanOut 102 | 103 | ```go 104 | func FanOut[T any]( 105 | ctx context.Context, in <-chan T, out ...chan<- T, 106 | ) 107 | ``` 108 | FanOut accepts an incoming data channel and copies the data to each of the 109 | supplied outgoing data channels. 110 | 111 | NOTE: Execute the FanOut function in a goroutine if parallel execution is 112 | desired. Canceling the context or closing the incoming channel is important to 113 | ensure that the goroutine is properly terminated. 114 | 115 | #### func Intercept 116 | 117 | ```go 118 | func Intercept[T, U any]( 119 | ctx context.Context, 120 | in <-chan T, 121 | fn InterceptFunc[T, U], 122 | ) <-chan U 123 | ``` 124 | Intercept accepts an incoming data channel and a function literal that accepts 125 | the incoming data and returns data of the same type and a boolean indicating 126 | whether the data should be forwarded to the output channel. The function is 127 | executed for each data item in the incoming channel as long as the context is 128 | not canceled or the incoming channel remains open. 129 | 130 | #### func Pipe 131 | 132 | ```go 133 | func Pipe[T any]( 134 | ctx context.Context, in <-chan T, out chan<- T, 135 | ) 136 | ``` 137 | Pipe accepts an incoming data channel and pipes it to the supplied outgoing data 138 | channel. 139 | 140 | NOTE: Execute the Pipe function in a goroutine if parallel execution is desired. 141 | Canceling the context or closing the incoming channel is important to ensure 142 | that the goroutine is properly terminated. 143 | 144 | #### type DurationScaler 145 | 146 | ```go 147 | type DurationScaler struct { 148 | // Interval is the number the current step must be divisible by in order 149 | // to modify the time.Duration. 150 | Interval int 151 | 152 | // ScalingFactor is a value between -1 and 1 that is used to modify the 153 | // time.Duration of a ticker or timer. The value is multiplied by 154 | // the ScalingFactor is multiplied by the duration for scaling. 155 | // 156 | // For example, if the ScalingFactor is 0.5, then the duration will be 157 | // multiplied by 0.5. If the ScalingFactor is -0.5, then the duration will 158 | // be divided by 0.5. If the ScalingFactor is 0, then the duration will 159 | // not be modified. 160 | // 161 | // A negative ScalingFactor will cause the duration to decrease as the 162 | // step value increases causing the ticker or timer to fire more often 163 | // and create more routines. A positive ScalingFactor will cause the 164 | // duration to increase as the step value increases causing the ticker 165 | // or timer to fire less often and create less routines. 166 | ScalingFactor float64 167 | } 168 | ``` 169 | 170 | DurationScaler is used to modify the time.Duration of a ticker or timer based on 171 | a configured step value and modifier (between -1 and 1) value. 172 | 173 | #### type InterceptFunc 174 | 175 | ```go 176 | type InterceptFunc[T, U any] func(context.Context, T) (U, bool) 177 | ``` 178 | 179 | 180 | #### type Scaler 181 | 182 | ```go 183 | type Scaler[T, U any] struct { 184 | Wait time.Duration 185 | Life time.Duration 186 | Fn InterceptFunc[T, U] 187 | 188 | // WaitModifier is used to modify the Wait time based on the number of 189 | // times the Scaler has scaled up. This is useful for systems 190 | // that are CPU bound and need to scale up more/less quickly. 191 | WaitModifier DurationScaler 192 | 193 | // Max is the maximum number of layer2 routines that will be spawned. 194 | // If Max is set to 0, then there is no limit. 195 | Max uint 196 | } 197 | ``` 198 | 199 | Scaler implements generic auto-scaling logic which starts with a net-zero set of 200 | processing routines (with the exception of the channel listener) and then scales 201 | up and down based on the CPU contention of a system and the speed at which the 202 | InterceptionFunc is able to process data. Once the incoming channel becomes 203 | blocked (due to nothing being sent) each of the spawned routines will finish out 204 | their execution of Fn and then the internal timer will collapse brining the 205 | routine count back to zero until there is more to be done. 206 | 207 | To use Scalar, simply create a new Scaler[T, U], configuring the Wait, Life, and 208 | InterceptFunc fields. These fields are what configure the functionality of the 209 | Scaler. 210 | 211 | NOTE: Fn is REQUIRED! Defaults: Wait = 1ns, Life = 1µs 212 | 213 | After creating the Scaler instance and configuring it, call the Exec method 214 | passing the appropriate context and input channel. 215 | 216 | Internally the Scaler implementation will wait for data on the incoming channel 217 | and attempt to send it to a layer2 channel. If the layer2 channel is blocking 218 | and the Wait time has been reached, then the Scaler will spawn a new layer2 219 | which will increase throughput for the Scaler, and Scaler will attempt to send 220 | the data to the layer2 channel once more. This process will repeat until a 221 | successful send occurs. (This should only loop twice). 222 | 223 | #### func (Scaler[T, U]) Exec 224 | 225 | ```go 226 | func (s Scaler[T, U]) Exec(ctx context.Context, in <-chan T) (<-chan U, error) 227 | ``` 228 | Exec starts the internal Scaler routine (the first layer of processing) and 229 | returns the output channel where the resulting data from the Fn function will be 230 | sent. 231 | -------------------------------------------------------------------------------- /scaler.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | // MinWait is the absolute minimum wait time for the ticker. This is used to 11 | // prevent the ticker from firing too often and causing too small of a wait 12 | // time. 13 | const MinWait = time.Millisecond 14 | 15 | // MinLife is the minimum life time for the scaler. This is used to prevent 16 | // the scaler from exiting too quickly, and causing too small of a lifetime. 17 | const MinLife = time.Millisecond 18 | 19 | // Scaler implements generic auto-scaling logic which starts with a net-zero 20 | // set of processing routines (with the exception of the channel listener) and 21 | // then scales up and down based on the CPU contention of a system and the speed 22 | // at which the InterceptionFunc is able to process data. Once the incoming 23 | // channel becomes blocked (due to nothing being sent) each of the spawned 24 | // routines will finish out their execution of Fn and then the internal timer 25 | // will collapse brining the routine count back to zero until there is more to 26 | // be done. 27 | // 28 | // To use Scalar, simply create a new Scaler[T, U], configuring the Wait, Life, 29 | // and InterceptFunc fields. These fields are what configure the functionality 30 | // of the Scaler. 31 | // 32 | // NOTE: Fn is REQUIRED! 33 | // Defaults: Wait = 1ns, Life = 1µs 34 | // 35 | // After creating the Scaler instance and configuring it, call the Exec method 36 | // passing the appropriate context and input channel. 37 | // 38 | // Internally the Scaler implementation will wait for data on the incoming 39 | // channel and attempt to send it to a layer2 channel. If the layer2 channel 40 | // is blocking and the Wait time has been reached, then the Scaler will spawn 41 | // a new layer2 which will increase throughput for the Scaler, and Scaler 42 | // will attempt to send the data to the layer2 channel once more. This process 43 | // will repeat until a successful send occurs. (This should only loop twice). 44 | type Scaler[T, U any] struct { 45 | Wait time.Duration 46 | Life time.Duration 47 | Fn InterceptFunc[T, U] 48 | 49 | // WaitModifier is used to modify the Wait time based on the number of 50 | // times the Scaler has scaled up. This is useful for systems 51 | // that are CPU bound and need to scale up more/less quickly. 52 | WaitModifier DurationScaler 53 | 54 | // Max is the maximum number of layer2 routines that will be spawned. 55 | // If Max is set to 0, then there is no limit. 56 | Max uint 57 | 58 | wScale *DurationScaler 59 | } 60 | 61 | var ErrFnRequired = fmt.Errorf("nil InterceptFunc, Fn is required") 62 | 63 | // Exec starts the internal Scaler routine (the first layer of processing) and 64 | // returns the output channel where the resulting data from the Fn function 65 | // will be sent. 66 | // 67 | //nolint:funlen,gocognit // This really can't be broken up any further 68 | func (s Scaler[T, U]) Exec(ctx context.Context, in <-chan T) (<-chan U, error) { 69 | ctx = _ctx(ctx) 70 | 71 | // set the configured tick as a pointer for execution 72 | s.wScale = &s.WaitModifier 73 | // set the original wait time on the ticker 74 | s.wScale.originalDuration = s.Wait 75 | 76 | // Fn is REQUIRED! 77 | if s.Fn == nil { 78 | return nil, ErrFnRequired 79 | } 80 | 81 | // Create outbound channel 82 | out := make(chan U) 83 | 84 | // nano-second precision really isn't feasible here, so this is arbitrary 85 | // because the caller did not specify a wait time. This means Scaler will 86 | // likely always scale up rather than waiting for an existing layer2 routine 87 | // to pick up data. 88 | if s.Wait <= MinWait { 89 | s.Wait = MinWait 90 | } 91 | 92 | // Minimum life of a spawned layer2 should be 1ms 93 | if s.Life < MinLife { 94 | s.Life = MinLife 95 | } 96 | 97 | go func() { 98 | defer close(out) 99 | 100 | wg := sync.WaitGroup{} 101 | wgMu := sync.Mutex{} 102 | 103 | // Ensure that the method does not close 104 | // until all layer2 routines have exited 105 | defer func() { 106 | wgMu.Lock() 107 | wg.Wait() 108 | wgMu.Unlock() 109 | }() 110 | 111 | l2 := make(chan T) 112 | ticker := time.NewTicker(s.Wait) 113 | defer ticker.Stop() 114 | step := 0 115 | stepMu := sync.RWMutex{} 116 | 117 | var max chan struct{} 118 | 119 | if s.Max > 0 { 120 | max = make(chan struct{}, s.Max) 121 | for i := uint(0); i < s.Max; i++ { 122 | max <- struct{}{} 123 | } 124 | } 125 | 126 | scaleLoop: 127 | for { 128 | select { 129 | case <-ctx.Done(): 130 | return 131 | case v, ok := <-in: 132 | if !ok { 133 | break scaleLoop 134 | } 135 | 136 | l2loop: 137 | for { 138 | select { 139 | case <-ctx.Done(): 140 | return 141 | case <-ticker.C: 142 | if max != nil { 143 | select { 144 | case <-ctx.Done(): 145 | return 146 | case <-max: // start a new layer2 routine 147 | default: 148 | // wait for a layer2 routine to finish 149 | continue l2loop 150 | } 151 | } 152 | 153 | wgMu.Lock() 154 | wg.Add(1) 155 | wgMu.Unlock() 156 | 157 | if !s.WaitModifier.inactive() { 158 | stepMu.Lock() 159 | step++ 160 | stepMu.Unlock() 161 | } 162 | 163 | go func() { 164 | defer wg.Done() 165 | 166 | if s.Max > 0 { 167 | defer func() { 168 | select { 169 | case <-ctx.Done(): 170 | case max <- struct{}{}: 171 | } 172 | }() 173 | } 174 | 175 | if !s.WaitModifier.inactive() { 176 | defer func() { 177 | stepMu.Lock() 178 | step-- 179 | stepMu.Unlock() 180 | }() 181 | } 182 | 183 | Pipe(ctx, s.layer2(ctx, l2), out) 184 | }() 185 | case l2 <- v: 186 | break l2loop 187 | } 188 | } 189 | 190 | stepN := 0 191 | if !s.WaitModifier.inactive() { 192 | stepMu.RLock() 193 | stepN = step 194 | stepMu.RUnlock() 195 | } 196 | 197 | // Reset the ticker so that it does not immediately trip the 198 | // case statement on loop. 199 | ticker.Reset(s.wScale.scaledDuration(s.Wait, stepN)) 200 | } 201 | } 202 | }() 203 | 204 | return out, nil 205 | } 206 | 207 | // layer2 manages the execution of the InterceptFunc. layer2 has a life time 208 | // of s.Life and will exit if the context is canceled, the timer has reached 209 | // its life time, or the incoming channel has been closed. 210 | // 211 | // If the case statement which reads from the in channel is executed, then 212 | // layer2 will execute the Scaler function and send the result to the out 213 | // channel. Afterward, layer2 will reset the internal timer, expanding the 214 | // life time of the layer2, and continue to attempt another read from the in 215 | // channel until the in channel is closed, the context is canceled, or the 216 | // timer has reached its life time. 217 | func (s Scaler[T, U]) layer2(ctx context.Context, in <-chan T) <-chan U { 218 | out := make(chan U) 219 | 220 | go func() { 221 | defer close(out) 222 | 223 | timer := time.NewTimer(s.Life) 224 | defer timer.Stop() 225 | 226 | for { 227 | select { 228 | case <-ctx.Done(): 229 | return 230 | case <-timer.C: 231 | return 232 | case t, ok := <-in: 233 | if !ok { 234 | return 235 | } 236 | 237 | // If the function returns false, then don't send the data 238 | // but break out of the select statement to ensure the timer 239 | // is reset. 240 | u, send := s.Fn(ctx, t) 241 | if !send { 242 | break 243 | } 244 | 245 | // Send the resulting value to the output channel 246 | select { 247 | case <-ctx.Done(): 248 | return 249 | case out <- u: 250 | } 251 | } 252 | 253 | // NOTE: This code is based off the doc comment for time.Timer.Stop 254 | // which ensures that the channel of the timer is drained before 255 | // resetting the timer so that it doesn't immediately trip the 256 | // case statement. 257 | if !timer.Stop() { 258 | <-timer.C 259 | } 260 | timer.Reset(s.Life) 261 | } 262 | }() 263 | 264 | return out 265 | } 266 | 267 | // DurationScaler is used to modify the time.Duration of a ticker or timer based on 268 | // a configured step value and modifier (between -1 and 1) value. 269 | type DurationScaler struct { 270 | // Interval is the number the current step must be divisible by in order 271 | // to modify the time.Duration. 272 | Interval int 273 | 274 | // ScalingFactor is a value between -1 and 1 that is used to modify the 275 | // time.Duration of a ticker or timer. The value is multiplied by 276 | // the ScalingFactor is multiplied by the duration for scaling. 277 | // 278 | // For example, if the ScalingFactor is 0.5, then the duration will be 279 | // multiplied by 0.5. If the ScalingFactor is -0.5, then the duration will 280 | // be divided by 0.5. If the ScalingFactor is 0, then the duration will 281 | // not be modified. 282 | // 283 | // A negative ScalingFactor will cause the duration to decrease as the 284 | // step value increases causing the ticker or timer to fire more often 285 | // and create more routines. A positive ScalingFactor will cause the 286 | // duration to increase as the step value increases causing the ticker 287 | // or timer to fire less often and create less routines. 288 | ScalingFactor float64 289 | 290 | // originalDuration is the time.Duration that was passed to the 291 | // Scaler. This is used to reset the time.Duration of the ticker 292 | // or timer. 293 | originalDuration time.Duration 294 | 295 | // lastInterval is the lastInterval step that was used to modify 296 | // the time.Duration. 297 | lastInterval int 298 | } 299 | 300 | func (t *DurationScaler) inactive() bool { 301 | return t.Interval == 0 || 302 | (t.ScalingFactor == 0 || 303 | t.ScalingFactor <= -1 || 304 | t.ScalingFactor >= 1) 305 | } 306 | 307 | // scaledDuration returns the modified time.Duration based on the current step (cStep). 308 | func (t *DurationScaler) scaledDuration( 309 | dur time.Duration, 310 | currentInterval int, 311 | ) time.Duration { 312 | if dur < MinWait { 313 | dur = MinWait 314 | } 315 | 316 | if t.inactive() { 317 | return dur 318 | } 319 | 320 | mod := t.ScalingFactor 321 | if currentInterval <= t.lastInterval { 322 | mod = -mod 323 | } 324 | 325 | if currentInterval%t.Interval == 0 { 326 | t.lastInterval = currentInterval 327 | out := dur + time.Duration(float64(t.originalDuration)*mod) 328 | if out < MinWait { 329 | return MinWait 330 | } 331 | 332 | return out 333 | } 334 | 335 | return dur 336 | } 337 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /stream_test.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "go.devnw.com/gen" 9 | ) 10 | 11 | func PipeTest[U ~[]T, T comparable]( 12 | t *testing.T, 13 | name string, 14 | data []U, 15 | ) { 16 | Tst( 17 | t, 18 | name, 19 | data, 20 | func(t *testing.T, data []T) { 21 | ctx, cancel := context.WithCancel(context.Background()) 22 | defer cancel() 23 | 24 | c1, c2 := make(chan T), make(chan T) 25 | 26 | go Pipe(ctx, c1, c2) 27 | 28 | go func() { 29 | for _, v := range data { 30 | select { 31 | case <-ctx.Done(): 32 | return 33 | case c1 <- v: 34 | } 35 | } 36 | }() 37 | 38 | for i := 0; i < len(data); i++ { 39 | select { 40 | case <-ctx.Done(): 41 | t.Error("context canceled") 42 | return 43 | case out, ok := <-c2: 44 | if !ok { 45 | if i != len(data)-1 { 46 | t.Fatal("c2 closed prematurely") 47 | } 48 | return 49 | } 50 | 51 | if out != data[i] { 52 | t.Errorf("expected %v, got %v", data[i], out) 53 | } 54 | } 55 | } 56 | }) 57 | } 58 | 59 | func Test_Pipe(t *testing.T) { 60 | PipeTest(t, "int8", IntTests[int8](100, 1000)) 61 | PipeTest(t, "uint8", IntTests[uint8](100, 1000)) 62 | PipeTest(t, "uint8", IntTests[uint8](100, 1000)) 63 | PipeTest(t, "uint16", IntTests[uint16](100, 1000)) 64 | PipeTest(t, "int32", IntTests[int32](100, 1000)) 65 | PipeTest(t, "uint32", IntTests[uint32](100, 1000)) 66 | PipeTest(t, "int64", IntTests[int64](100, 1000)) 67 | PipeTest(t, "uint64", IntTests[uint64](100, 1000)) 68 | PipeTest(t, "float32", FloatTests[float32](100, 1000)) 69 | PipeTest(t, "float64", FloatTests[float64](100, 1000)) 70 | } 71 | 72 | func FanInTest[U ~[]T, T comparable]( 73 | t *testing.T, 74 | name string, 75 | data []U, 76 | ) { 77 | Tst( 78 | t, 79 | name, 80 | data, 81 | func(t *testing.T, data []T) { 82 | ctx, cancel := context.WithCancel(context.Background()) 83 | defer cancel() 84 | 85 | divisor := 5 86 | 87 | if len(data)%divisor != 0 { 88 | t.Fatalf("data length must be divisible by %v", divisor) 89 | } 90 | 91 | out := make([]chan T, divisor) 92 | 93 | // Initialize channels 94 | for i := range out { 95 | out[i] = make(chan T) 96 | } 97 | 98 | fan := FanIn(ctx, gen.ReadOnly(out...)...) 99 | 100 | ichan := 0 101 | cursor := 0 102 | for i := len(data) / divisor; i <= len(data); i += len(data) / divisor { 103 | go func(out chan<- T, data []T) { 104 | defer close(out) 105 | 106 | for _, v := range data { 107 | select { 108 | case <-ctx.Done(): 109 | return 110 | case out <- v: 111 | } 112 | } 113 | }(out[ichan], data[cursor:i]) 114 | 115 | cursor = i 116 | ichan++ 117 | } 118 | 119 | returned := make([]T, len(data)) 120 | for i := 0; ; i++ { 121 | select { 122 | case <-ctx.Done(): 123 | t.Error("context canceled") 124 | return 125 | case out, ok := <-fan: 126 | if !ok { 127 | if i != len(data) { 128 | t.Fatalf("c2 closed prematurely; index %v", i) 129 | } 130 | 131 | return 132 | } 133 | 134 | returned[i] = out 135 | } 136 | } 137 | }) 138 | } 139 | 140 | func Test_FanIn(t *testing.T) { 141 | FanInTest(t, "int8", IntTests[int8](100, 1000)) 142 | FanInTest(t, "uint8", IntTests[uint8](100, 1000)) 143 | FanInTest(t, "uint8", IntTests[uint8](100, 1000)) 144 | FanInTest(t, "uint16", IntTests[uint16](100, 1000)) 145 | FanInTest(t, "int32", IntTests[int32](100, 1000)) 146 | FanInTest(t, "uint32", IntTests[uint32](100, 1000)) 147 | FanInTest(t, "int64", IntTests[int64](100, 1000)) 148 | FanInTest(t, "uint64", IntTests[uint64](100, 1000)) 149 | FanInTest(t, "float32", FloatTests[float32](100, 1000)) 150 | FanInTest(t, "float64", FloatTests[float64](100, 1000)) 151 | } 152 | 153 | func InterceptTest[U ~[]T, T signed]( 154 | t *testing.T, 155 | name string, 156 | data []U, 157 | ) { 158 | Tst( 159 | t, 160 | name, 161 | data, 162 | func(t *testing.T, data []T) { 163 | ctx, cancel := context.WithCancel(context.Background()) 164 | defer cancel() 165 | 166 | in := make(chan T) 167 | defer close(in) 168 | 169 | out := Intercept(ctx, in, func(_ context.Context, in T) (T, bool) { 170 | return in % 3, true 171 | }) 172 | 173 | go func() { 174 | for _, v := range data { 175 | select { 176 | case <-ctx.Done(): 177 | return 178 | case in <- v: 179 | } 180 | } 181 | }() 182 | 183 | for i := 0; i < len(data); i++ { 184 | select { 185 | case <-ctx.Done(): 186 | t.Error("context canceled") 187 | return 188 | case out, ok := <-out: 189 | if !ok { 190 | if i != len(data)-1 { 191 | t.Fatal("c2 closed prematurely") 192 | } 193 | } 194 | 195 | if out != data[i]%3 { 196 | t.Errorf("expected %v, got %v", data[i], out) 197 | } 198 | } 199 | } 200 | }) 201 | } 202 | 203 | func Test_Intercept(t *testing.T) { 204 | InterceptTest(t, "int8", IntTests[int8](100, 1000)) 205 | InterceptTest(t, "int8", IntTests[int8](100, 1000)) 206 | InterceptTest(t, "int32", IntTests[int32](100, 1000)) 207 | InterceptTest(t, "int64", IntTests[int64](100, 1000)) 208 | } 209 | 210 | func Test_Intercept_ChangeType(t *testing.T) { 211 | ctx, cancel := context.WithCancel(context.Background()) 212 | defer cancel() 213 | 214 | integers := Ints[int](100) 215 | booleans := make([]bool, len(integers)) 216 | 217 | for i, v := range integers { 218 | booleans[i] = v%2 == 0 219 | } 220 | 221 | out := Intercept( 222 | ctx, 223 | gen.Slice[int](integers).Chan(ctx), 224 | func(_ context.Context, in int) (bool, bool) { 225 | return in%2 == 0, true 226 | }) 227 | 228 | for i := 0; ; i++ { 229 | select { 230 | case <-ctx.Done(): 231 | return 232 | case out, ok := <-out: 233 | if !ok { 234 | return 235 | } 236 | 237 | if out != booleans[i] { 238 | t.Errorf("expected %v, got %v", booleans[0], out) 239 | } 240 | } 241 | } 242 | } 243 | 244 | func Test_Intercept_NotOk(t *testing.T) { 245 | ctx, cancel := context.WithCancel(context.Background()) 246 | defer cancel() 247 | 248 | in := make(chan int) 249 | defer close(in) 250 | 251 | go func() { 252 | in <- 1 253 | in <- 2 254 | in <- 3 255 | in <- 0 256 | }() 257 | 258 | out := Intercept(ctx, in, func(_ context.Context, v int) (int, bool) { 259 | if v == 0 { 260 | return 0, true 261 | } 262 | 263 | return v, false 264 | }) 265 | 266 | select { 267 | case <-ctx.Done(): 268 | t.Fatal("context canceled") 269 | case out, ok := <-out: 270 | if !ok { 271 | t.Fatal("in closed prematurely") 272 | } 273 | 274 | if out != 0 { 275 | t.Errorf("expected 0, got %v", out) 276 | } 277 | } 278 | } 279 | 280 | func Test_Intercept_ClosedChan(_ *testing.T) { 281 | ctx, cancel := context.WithCancel(context.Background()) 282 | defer cancel() 283 | 284 | in := make(chan int) 285 | 286 | out := Intercept( 287 | ctx, 288 | in, 289 | func(_ context.Context, v int) (int, bool) { 290 | return v, false 291 | }) 292 | 293 | close(in) 294 | 295 | <-out 296 | } 297 | 298 | func Test_Intercept_Canceled_On_Wait(t *testing.T) { 299 | ctx, cancel := context.WithCancel(context.Background()) 300 | defer cancel() 301 | 302 | in := make(chan int) 303 | defer close(in) 304 | 305 | // Setup intercept 306 | out := Intercept(ctx, in, func(_ context.Context, v int) (int, bool) { 307 | return v, true 308 | }) 309 | 310 | // Push to in 311 | in <- 1 312 | 313 | // Wait for intercept routine to be scheduled 314 | time.Sleep(time.Millisecond) 315 | 316 | // Cancel the routine 317 | cancel() 318 | 319 | _, ok := <-out 320 | if ok { 321 | t.Error("expected closed channel") 322 | } 323 | } 324 | 325 | func Test_FanOut_Canceled_On_Wait(_ *testing.T) { 326 | ctx, cancel := context.WithCancel(context.Background()) 327 | defer cancel() 328 | 329 | in, out := make(chan int), make(chan int) 330 | defer close(in) 331 | defer close(out) 332 | 333 | go func() { 334 | defer cancel() 335 | in <- 1 336 | }() 337 | 338 | FanOut(ctx, in, out) 339 | } 340 | 341 | //nolint:gocognit // This is a test function 342 | func DistributeTest[U ~[]T, T comparable]( 343 | t *testing.T, 344 | name string, 345 | data []U, 346 | ) { 347 | Tst( 348 | t, 349 | name, 350 | data, 351 | func(t *testing.T, data []T) { 352 | ctx, cancel := context.WithCancel(context.Background()) 353 | defer cancel() 354 | 355 | c1, c2, c3 := make(chan T), make(chan T), make(chan T) 356 | 357 | go Distribute(ctx, gen.Slice[T](data).Chan(ctx), c1, c2, c3) 358 | 359 | c1total, c2total, c3total := 0, 0, 0 360 | for i := 0; i < len(data); i++ { 361 | select { 362 | case <-ctx.Done(): 363 | t.Error("context canceled") 364 | return 365 | case out, ok := <-c1: 366 | if !ok { 367 | return 368 | } 369 | 370 | if out != data[i] { 371 | t.Errorf("expected %v, got %v", data[i], out) 372 | } 373 | c1total++ 374 | case out, ok := <-c2: 375 | if !ok { 376 | return 377 | } 378 | 379 | if out != data[i] { 380 | t.Errorf("expected %v, got %v", data[i], out) 381 | } 382 | c2total++ 383 | case out, ok := <-c3: 384 | if !ok { 385 | return 386 | } 387 | 388 | if out != data[i] { 389 | t.Errorf("expected %v, got %v", data[i], out) 390 | } 391 | c3total++ 392 | } 393 | } 394 | 395 | t.Logf("c1: %v", c1total) 396 | t.Logf("c2: %v", c2total) 397 | t.Logf("c3: %v", c3total) 398 | 399 | ctotal := c1total + c2total + c3total 400 | if ctotal != len(data) { 401 | t.Errorf("expected %v, got %v", len(data), ctotal) 402 | } 403 | }) 404 | } 405 | 406 | func Test_Distribute(t *testing.T) { 407 | DistributeTest(t, "int8", IntTests[int8](100, 1000)) 408 | DistributeTest(t, "uint8", IntTests[uint8](100, 1000)) 409 | DistributeTest(t, "uint8", IntTests[uint8](100, 1000)) 410 | DistributeTest(t, "uint16", IntTests[uint16](100, 1000)) 411 | DistributeTest(t, "int32", IntTests[int32](100, 1000)) 412 | DistributeTest(t, "uint32", IntTests[uint32](100, 1000)) 413 | DistributeTest(t, "int64", IntTests[int64](100, 1000)) 414 | DistributeTest(t, "uint64", IntTests[uint64](100, 1000)) 415 | DistributeTest(t, "float32", FloatTests[float32](100, 1000)) 416 | DistributeTest(t, "float64", FloatTests[float64](100, 1000)) 417 | } 418 | 419 | func Test_Distribute_Canceled_On_Wait(_ *testing.T) { 420 | ctx, cancel := context.WithCancel(context.Background()) 421 | defer cancel() 422 | 423 | in, out := make(chan int), make(chan int) 424 | defer close(in) 425 | defer close(out) 426 | 427 | go func() { 428 | defer cancel() 429 | in <- 1 430 | }() 431 | 432 | Distribute(ctx, in, out) 433 | } 434 | 435 | func Test_Distribute_ZeroOut(_ *testing.T) { 436 | ctx, cancel := context.WithCancel(context.Background()) 437 | defer cancel() 438 | 439 | in := make(chan int) 440 | defer close(in) 441 | 442 | Distribute(ctx, in) 443 | } 444 | 445 | func Test_FanOut(t *testing.T) { 446 | ctx, cancel := context.WithCancel(context.Background()) 447 | defer cancel() 448 | 449 | c1, c2, c3 := make(chan int), make(chan int), make(chan int) 450 | var c4 chan int 451 | data := Ints[int](1000) 452 | 453 | go FanOut(ctx, gen.Slice[int](data).Chan(ctx), c1, c2, c3, c4) 454 | 455 | seen := make(map[int]int) 456 | for i := 0; i < len(data)*3; i++ { 457 | select { 458 | case <-ctx.Done(): 459 | t.Fatal("context canceled") 460 | return 461 | case _, ok := <-c1: 462 | if !ok { 463 | return 464 | } 465 | 466 | seen[1]++ 467 | case _, ok := <-c2: 468 | if !ok { 469 | return 470 | } 471 | 472 | seen[2]++ 473 | case _, ok := <-c3: 474 | if !ok { 475 | return 476 | } 477 | 478 | seen[3]++ 479 | case _, ok := <-c4: 480 | if !ok { 481 | return 482 | } 483 | 484 | seen[4]++ 485 | } 486 | } 487 | 488 | if len(seen) != 3 { 489 | t.Fatalf("expected %v, got %v", len(data)-1, len(seen)) 490 | } 491 | 492 | for k, v := range seen { 493 | if k == 4 { 494 | if v > 0 { 495 | t.Fatalf("expected %v, got %v", 0, v) 496 | } 497 | } 498 | 499 | if v != len(data) { 500 | t.Fatalf("Chan C%v: expected %v, got %v", k, len(data), v) 501 | } 502 | } 503 | } 504 | 505 | func Test_FanOut_ZeroOut(_ *testing.T) { 506 | ctx, cancel := context.WithCancel(context.Background()) 507 | defer cancel() 508 | 509 | in := make(chan int) 510 | defer close(in) 511 | 512 | FanOut(ctx, in) 513 | } 514 | 515 | func Test_FanIn_ZeroIn(_ *testing.T) { 516 | ctx, cancel := context.WithCancel(context.Background()) 517 | defer cancel() 518 | 519 | FanIn[int](ctx) 520 | } 521 | 522 | func Test_Drain(t *testing.T) { 523 | ctx, cancel := context.WithTimeout( 524 | context.Background(), 525 | time.Second*5, 526 | ) 527 | 528 | count := 1000 529 | in := make(chan int, count) 530 | start := make(chan struct{}) 531 | 532 | go func() { 533 | defer cancel() 534 | defer close(in) 535 | <-start 536 | 537 | for i := 0; i < count; i++ { 538 | select { 539 | case <-ctx.Done(): 540 | return 541 | case in <- i: 542 | } 543 | } 544 | }() 545 | 546 | Drain(ctx, in) 547 | 548 | close(start) 549 | <-ctx.Done() 550 | if ctx.Err() != context.Canceled { 551 | t.Fatalf("expected %v, got %v", context.Canceled, ctx.Err()) 552 | } 553 | } 554 | 555 | func Test_Any(t *testing.T) { 556 | ctx, cancel := context.WithCancel(context.Background()) 557 | defer cancel() 558 | 559 | count := 1000 560 | in := make(chan int, count) 561 | 562 | go func() { 563 | defer close(in) 564 | 565 | for i := 0; i < count; i++ { 566 | select { 567 | case <-ctx.Done(): 568 | return 569 | case in <- i: 570 | } 571 | } 572 | }() 573 | 574 | out := Any(ctx, in) 575 | 576 | for i := 0; i < count; i++ { 577 | select { 578 | case <-ctx.Done(): 579 | return 580 | case v, ok := <-out: 581 | if !ok { 582 | return 583 | } 584 | 585 | value, ok := v.(int) 586 | if !ok || value != i { 587 | t.Fatalf("expected %v, got %v", i, value) 588 | } 589 | } 590 | } 591 | } 592 | -------------------------------------------------------------------------------- /scaler_test.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | "testing" 8 | "time" 9 | 10 | "go.devnw.com/gen" 11 | ) 12 | 13 | var emptyFn = func(context.Context, any) (any, bool) { return 0, true } 14 | var nosendFn = func(context.Context, any) (any, bool) { return 0, false } 15 | 16 | func ScalerTest[U ~[]T, T comparable]( 17 | t *testing.T, 18 | name string, 19 | data []U, 20 | ) { 21 | Tst( 22 | t, 23 | name, 24 | data, 25 | func(t *testing.T, data []T) { 26 | ctx, cancel := context.WithCancel(context.Background()) 27 | defer cancel() 28 | 29 | testdata := gen.Slice[T](data) 30 | 31 | integers := testdata.Map() 32 | 33 | s := Scaler[T, T]{ 34 | Fn: func(_ context.Context, in T) (T, bool) { 35 | return in, true 36 | }, 37 | } 38 | 39 | // Test that the scaler can be used with a nil context. 40 | //nolint:staticcheck // nil context on purpose 41 | out, err := s.Exec(nil, testdata.Chan(ctx)) 42 | if err != nil { 43 | t.Errorf("expected no error, got %v", err) 44 | } 45 | 46 | tloop: 47 | for { 48 | select { 49 | case <-ctx.Done(): 50 | t.Fatal("context closed") 51 | case v, ok := <-out: 52 | if !ok { 53 | break tloop 54 | } 55 | 56 | integers[v] = true 57 | } 58 | } 59 | 60 | for k, v := range integers { 61 | seen, ok := v.(bool) 62 | if !ok { 63 | t.Errorf("expected bool, got %T", v) 64 | } 65 | 66 | if !seen { 67 | t.Errorf("expected %v, got %v for %v", true, v, k) 68 | } 69 | } 70 | }) 71 | } 72 | 73 | func Test_Scaler_Exec(t *testing.T) { 74 | ScalerTest(t, "int8", IntTests[int8](10, 100)) 75 | ScalerTest(t, "uint8", IntTests[uint8](10, 100)) 76 | ScalerTest(t, "uint8", IntTests[uint8](10, 100)) 77 | ScalerTest(t, "uint16", IntTests[uint16](10, 100)) 78 | ScalerTest(t, "int32", IntTests[int32](10, 100)) 79 | ScalerTest(t, "uint32", IntTests[uint32](10, 100)) 80 | ScalerTest(t, "int64", IntTests[int64](10, 100)) 81 | ScalerTest(t, "uint64", IntTests[uint64](10, 100)) 82 | ScalerTest(t, "float32", FloatTests[float32](10, 100)) 83 | ScalerTest(t, "float64", FloatTests[float64](10, 100)) 84 | } 85 | 86 | func Test_Scaler_NilFn(t *testing.T) { 87 | s := Scaler[any, any]{} 88 | 89 | //nolint:staticcheck // nil context on purpose 90 | _, err := s.Exec(nil, nil) 91 | if err == nil { 92 | t.Error("Expected error, got nil") 93 | } 94 | } 95 | 96 | func Test_Scaler_NilCtx(t *testing.T) { 97 | s := Scaler[any, any]{ 98 | Fn: emptyFn, 99 | } 100 | 101 | // Overwrite the default context with a cancelable context. 102 | var cancel context.CancelFunc 103 | defaultCtx, cancel = context.WithCancel(context.Background()) 104 | 105 | // Fix the default context after the test completes 106 | t.Cleanup(func() { 107 | defaultCtx = context.Background() 108 | }) 109 | 110 | cancel() 111 | 112 | // Test that the scaler can be used with a nil context. 113 | //nolint:staticcheck // nil context on purpose 114 | out, err := s.Exec(nil, nil) 115 | if err != nil { 116 | t.Errorf("expected no error, got %v", err) 117 | } 118 | 119 | select { 120 | case <-time.After(time.Second): 121 | t.Errorf("expected no timeout, got timeout") 122 | case _, ok := <-out: 123 | if ok { 124 | t.Errorf("expected out to be closed") 125 | } 126 | } 127 | } 128 | 129 | func Test_Scaler_CloseIn(t *testing.T) { 130 | s := Scaler[any, any]{ 131 | Fn: emptyFn, 132 | } 133 | 134 | in := make(chan any) 135 | close(in) 136 | 137 | // Test that the scaler can be used with a nil context. 138 | //nolint:staticcheck // nil context on purpose 139 | out, err := s.Exec(nil, in) 140 | if err != nil { 141 | t.Errorf("expected no error, got %v", err) 142 | } 143 | 144 | select { 145 | case <-time.After(time.Second): 146 | t.Errorf("expected no timeout, got timeout") 147 | case _, ok := <-out: 148 | if ok { 149 | t.Errorf("expected out to be closed") 150 | } 151 | } 152 | } 153 | 154 | func Test_Scaler_l2ctx(t *testing.T) { 155 | ctx, cancel := context.WithCancel(context.Background()) 156 | 157 | s := Scaler[any, any]{ 158 | Wait: time.Minute, 159 | Fn: emptyFn, 160 | } 161 | 162 | in := make(chan any) 163 | 164 | // Test that the scaler can be used with a nil context. 165 | out, err := s.Exec(ctx, in) 166 | if err != nil { 167 | t.Errorf("expected no error, got %v", err) 168 | } 169 | 170 | // Trigger the internal loop of the scaler 171 | in <- 1 172 | 173 | // Cancel the context while it's waiting to 174 | // scale to layer 2. 175 | cancel() 176 | 177 | select { 178 | case <-time.After(time.Second): 179 | t.Errorf("expected no timeout, got timeout") 180 | case _, ok := <-out: 181 | if ok { 182 | t.Errorf("expected out to be closed") 183 | } 184 | } 185 | } 186 | 187 | func Test_Scaler_layer2_ctx1(t *testing.T) { 188 | ctx, cancel := context.WithCancel(context.Background()) 189 | cancel() 190 | 191 | s := Scaler[any, any]{ 192 | Wait: time.Minute, 193 | Life: time.Minute, 194 | Fn: emptyFn, 195 | } 196 | 197 | out := s.layer2(ctx, nil) 198 | 199 | select { 200 | case <-time.After(time.Second): 201 | t.Errorf("expected no timeout, got timeout") 202 | case _, ok := <-out: 203 | if ok { 204 | t.Errorf("expected out to be closed") 205 | } 206 | } 207 | } 208 | 209 | func Test_Scaler_layer2_closeIn(t *testing.T) { 210 | ctx, cancel := context.WithCancel(context.Background()) 211 | defer cancel() 212 | 213 | s := Scaler[any, any]{ 214 | Wait: time.Minute, 215 | Life: time.Minute, 216 | Fn: emptyFn, 217 | } 218 | 219 | in := make(chan any) 220 | close(in) 221 | 222 | out := s.layer2(ctx, in) 223 | 224 | select { 225 | case <-time.After(time.Second): 226 | t.Errorf("expected no timeout, got timeout") 227 | case _, ok := <-out: 228 | if ok { 229 | t.Errorf("expected out to be closed") 230 | } 231 | } 232 | } 233 | 234 | func Test_Scaler_layer2_ctx2(t *testing.T) { 235 | ctx, cancel := context.WithCancel(context.Background()) 236 | 237 | s := Scaler[any, any]{ 238 | Wait: time.Minute, 239 | Life: time.Minute, 240 | Fn: emptyFn, 241 | } 242 | 243 | in := make(chan any) 244 | defer close(in) 245 | 246 | out := s.layer2(ctx, in) 247 | 248 | // Push data to the channel to trigger the internal loop and block 249 | in <- 1 250 | cancel() 251 | <-ctx.Done() 252 | 253 | for i := 0; i < 1000; i++ { 254 | select { 255 | case <-time.After(time.Second): 256 | t.Errorf("expected no timeout, got timeout") 257 | case _, ok := <-out: 258 | if !ok { 259 | return 260 | } 261 | } 262 | } 263 | 264 | t.Errorf("expected out to be closed") 265 | } 266 | 267 | func Test_Scaler_layer2_nosend(t *testing.T) { 268 | ctx, cancel := context.WithCancel(context.Background()) 269 | defer cancel() 270 | 271 | s := Scaler[any, any]{ 272 | Wait: time.Minute, 273 | Life: time.Minute, 274 | Fn: nosendFn, 275 | } 276 | 277 | in := make(chan any) 278 | defer close(in) 279 | 280 | out := s.layer2(ctx, in) 281 | 282 | // Push data to the channel to trigger the internal loop and block 283 | in <- 1 284 | 285 | select { 286 | case <-time.After(time.Millisecond): 287 | case <-out: 288 | t.Fatalf("expected 0 data to be sent, got 1") 289 | } 290 | } 291 | 292 | func TestTickDur(t *testing.T) { 293 | testCases := []struct { 294 | name string 295 | tick DurationScaler 296 | duration time.Duration 297 | currentStep int 298 | expected time.Duration 299 | }{ 300 | { 301 | name: "Test case 1", 302 | tick: DurationScaler{Interval: 3, ScalingFactor: 0.1, originalDuration: 10 * time.Second}, 303 | duration: 10 * time.Second, 304 | currentStep: 3, 305 | expected: 11 * time.Second, 306 | }, 307 | { 308 | name: "Test case 2", 309 | tick: DurationScaler{Interval: 5, ScalingFactor: -0.1, originalDuration: 20 * time.Second}, 310 | duration: 20 * time.Second, 311 | currentStep: 10, 312 | expected: 18 * time.Second, 313 | }, 314 | { 315 | name: "Test case 3", 316 | tick: DurationScaler{Interval: 2, ScalingFactor: 0.5, originalDuration: 10 * time.Second}, 317 | duration: 10 * time.Second, 318 | currentStep: 4, 319 | expected: 15 * time.Second, 320 | }, 321 | { 322 | name: "Test case 4", 323 | tick: DurationScaler{Interval: 4, ScalingFactor: -0.5, originalDuration: 30 * time.Second}, 324 | duration: 30 * time.Second, 325 | currentStep: 8, 326 | expected: 15 * time.Second, 327 | }, 328 | { 329 | name: "Test case 5", 330 | tick: DurationScaler{Interval: 3, ScalingFactor: 0.1, originalDuration: 10 * time.Second}, 331 | duration: 10 * time.Second, 332 | currentStep: 2, 333 | expected: 10 * time.Second, 334 | }, 335 | { 336 | name: "Test case 6: Step is divisible, modifier in range", 337 | tick: DurationScaler{Interval: 3, ScalingFactor: 0.1, originalDuration: 10 * time.Second}, 338 | duration: 10 * time.Second, 339 | currentStep: 3, 340 | expected: 11 * time.Second, 341 | }, 342 | { 343 | name: "Test case 7: Step is not divisible, modifier in range", 344 | tick: DurationScaler{Interval: 3, ScalingFactor: 0.1, originalDuration: 10 * time.Second}, 345 | duration: 10 * time.Second, 346 | currentStep: 2, 347 | expected: 10 * time.Second, 348 | }, 349 | { 350 | name: "Test case 8: Step is divisible, modifier is zero", 351 | tick: DurationScaler{Interval: 3, ScalingFactor: 0, originalDuration: 10 * time.Second}, 352 | duration: 10 * time.Second, 353 | currentStep: 3, 354 | expected: 10 * time.Second, 355 | }, 356 | { 357 | name: "Test case 9: Step is divisible, modifier is out of range", 358 | tick: DurationScaler{Interval: 3, ScalingFactor: 1, originalDuration: 10 * time.Second}, 359 | duration: 10 * time.Second, 360 | currentStep: 3, 361 | expected: 10 * time.Second, 362 | }, 363 | { 364 | name: "Test case 10: Step is zero, modifier in range", 365 | tick: DurationScaler{Interval: 0, ScalingFactor: 0.1, originalDuration: 10 * time.Second}, 366 | duration: 10 * time.Second, 367 | currentStep: 3, 368 | expected: 10 * time.Second, 369 | }, 370 | { 371 | name: "Test case 6: Step number decreases", 372 | tick: DurationScaler{ 373 | Interval: 2, 374 | ScalingFactor: 0.5, 375 | originalDuration: 10 * time.Second, 376 | lastInterval: 4, 377 | }, 378 | duration: 15 * time.Second, 379 | currentStep: 2, 380 | expected: 10 * time.Second, 381 | }, 382 | { 383 | name: "Test case 7: testing below minwait", 384 | tick: DurationScaler{ 385 | Interval: 1, 386 | ScalingFactor: -0.999, 387 | originalDuration: time.Millisecond * 2, 388 | lastInterval: 0, 389 | }, 390 | duration: MinWait, 391 | currentStep: 1, 392 | expected: MinWait, 393 | }, 394 | { 395 | name: "Test case 8: testing below minwait", 396 | tick: DurationScaler{ 397 | Interval: 1, 398 | ScalingFactor: -0.999, 399 | originalDuration: time.Millisecond * 900, 400 | lastInterval: 0, 401 | }, 402 | duration: MinWait, 403 | currentStep: 1, 404 | expected: MinWait, 405 | }, 406 | } 407 | 408 | for _, tc := range testCases { 409 | t.Run(tc.name, func(t *testing.T) { 410 | result := (&tc.tick).scaledDuration(tc.duration, tc.currentStep) 411 | if result != tc.expected { 412 | t.Errorf("Expected: %v, got: %v", tc.expected, result) 413 | } 414 | }) 415 | } 416 | } 417 | 418 | func FuzzTick(f *testing.F) { 419 | f.Fuzz(func(t *testing.T, step, cStep int, mod float64, orig, dur int64) { 420 | tick := &DurationScaler{ 421 | Interval: step, 422 | ScalingFactor: mod, 423 | originalDuration: time.Duration(orig), 424 | } 425 | 426 | v := tick.scaledDuration(time.Duration(dur), cStep) 427 | if v < 0 { 428 | t.Fatalf("negative duration: %v", v) 429 | } 430 | }) 431 | } 432 | 433 | func FuzzScaler(f *testing.F) { 434 | interceptFunc := func(_ context.Context, t int) (string, bool) { 435 | return fmt.Sprintf("%d", t), true 436 | } 437 | 438 | f.Fuzz(func( 439 | t *testing.T, 440 | wait, life int64, 441 | step, _ int, 442 | mod float64, 443 | max uint, 444 | in int, 445 | ) { 446 | ctx, cancel := context.WithCancel(context.Background()) 447 | defer cancel() 448 | 449 | tick := DurationScaler{ 450 | Interval: step, 451 | ScalingFactor: mod, 452 | } 453 | 454 | // Initialize Scaler 455 | scaler := Scaler[int, string]{ 456 | Wait: time.Millisecond * time.Duration(wait), 457 | Life: time.Millisecond * time.Duration(life), 458 | Fn: interceptFunc, 459 | WaitModifier: tick, 460 | Max: max, 461 | } 462 | 463 | // Create a simple input channel 464 | input := make(chan int, 1) 465 | defer close(input) 466 | 467 | // Execute the Scaler 468 | out, err := scaler.Exec(ctx, input) 469 | if err != nil { 470 | t.Errorf("Scaler Exec failed: %v", err) 471 | t.Fail() 472 | } 473 | 474 | // Send input value and check output 475 | input <- in 476 | 477 | select { 478 | case <-ctx.Done(): 479 | t.Errorf("Scaler Exec timed out") 480 | t.Fail() 481 | case res := <-out: 482 | if res != fmt.Sprintf("%d", in) { 483 | t.Errorf("Scaler Exec failed: expected %d, got %s", in, res) 484 | t.Fail() 485 | } 486 | 487 | t.Logf("Scaler Exec succeeded: expected %d, got %s", in, res) 488 | } 489 | }) 490 | } 491 | 492 | //nolint:gocognit // This is a test function 493 | func Test_Scaler_Max(t *testing.T) { 494 | tests := map[string]struct { 495 | max uint 496 | send int 497 | expected int 498 | }{ 499 | "max 0": { 500 | max: 0, 501 | send: 1000, 502 | expected: 1000, 503 | }, 504 | "max 1": { 505 | max: 1, 506 | send: 10, 507 | expected: 10, 508 | }, 509 | "max 2": { 510 | max: 2, 511 | send: 10, 512 | expected: 10, 513 | }, 514 | "max 3": { 515 | max: 3, 516 | send: 10, 517 | expected: 10, 518 | }, 519 | "max 4": { 520 | max: 4, 521 | send: 100, 522 | expected: 100, 523 | }, 524 | "max 1000": { 525 | max: 1000, 526 | send: 10000, 527 | expected: 10000, 528 | }, 529 | } 530 | 531 | for name, test := range tests { 532 | t.Run(name, func(t *testing.T) { 533 | ctx, cancel := context.WithCancel(context.Background()) 534 | defer cancel() 535 | 536 | inited := 0 537 | initedMu := sync.Mutex{} 538 | release := make(chan struct{}) 539 | 540 | interceptFunc := func(_ context.Context, t int) (int, bool) { 541 | defer func() { 542 | initedMu.Lock() 543 | defer initedMu.Unlock() 544 | inited-- 545 | }() 546 | 547 | initedMu.Lock() 548 | inited++ 549 | initedMu.Unlock() 550 | 551 | <-release 552 | 553 | return t, true 554 | } 555 | 556 | // Initialize Scaler 557 | scaler := Scaler[int, int]{ 558 | Wait: time.Millisecond, 559 | Life: time.Millisecond, 560 | Fn: interceptFunc, 561 | Max: test.max, 562 | } 563 | 564 | // Create a simple input channel 565 | input := make(chan int, test.send) 566 | defer close(input) 567 | 568 | for i := 0; i < test.send; i++ { 569 | input <- i 570 | } 571 | 572 | // Execute the Scaler 573 | out, err := scaler.Exec(ctx, input) 574 | if err != nil { 575 | t.Errorf("Scaler Exec failed: %v", err) 576 | t.Fail() 577 | } 578 | 579 | recv := 1 580 | 581 | tloop: 582 | for { 583 | select { 584 | case <-ctx.Done(): 585 | t.Errorf("Scaler Exec timed out") 586 | case _, ok := <-out: 587 | if !ok { 588 | break tloop 589 | } 590 | 591 | recv++ 592 | t.Logf("received %d", recv) 593 | if recv >= test.expected { 594 | break tloop 595 | } 596 | default: 597 | time.Sleep(time.Millisecond) 598 | 599 | initedMu.Lock() 600 | if test.max > 0 && inited > int(test.max) { 601 | t.Errorf("Scaler Exec failed: expected %d, got %d", test.max, inited) 602 | t.Fail() 603 | } 604 | initedMu.Unlock() 605 | 606 | // Release one goroutine 607 | release <- struct{}{} 608 | } 609 | } 610 | 611 | if recv != test.expected { 612 | t.Errorf("Scaler Exec failed: expected %d, got %d", test.expected, recv) 613 | t.Fail() 614 | } 615 | }) 616 | } 617 | } 618 | --------------------------------------------------------------------------------