├── .github └── workflows │ ├── ci.yml │ └── release.yml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yml ├── BENCHMARK.md ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── circuit.go ├── circuit_test.go ├── errors.go ├── example ├── buffer │ └── buffer.go ├── circuit │ └── circuit.go ├── db │ ├── db.go │ └── db_test.go ├── etl │ └── etl.go ├── fanout │ └── fanout.go ├── metrics │ └── metrics.go ├── pipeline │ └── pipeline.go ├── pooling │ └── pooling.go ├── ratelimit │ └── ratelimit.go ├── retry │ └── retry.go ├── timeout │ └── timeout.go └── tracing │ └── tracing.go ├── fluxus.go ├── fluxus_test.go ├── go.mod ├── go.sum ├── logo.svg ├── metrics.go ├── metrics_test.go ├── pool.go ├── pool_test.go ├── rate_limiter.go ├── rate_limiter_test.go ├── tracing.go └── tracing_test.go /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Go CI 2 | 3 | on: 4 | push: 5 | branches: [main] # Or your default branch 6 | pull_request: 7 | branches: [main] # Or your default branch 8 | 9 | jobs: 10 | build-and-test: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v4 15 | 16 | - name: Set up Go 17 | uses: actions/setup-go@v5 18 | with: 19 | go-version: "stable" # Or your specific Go version 20 | 21 | - name: Build 22 | run: go build ./... # Check if the code builds 23 | 24 | - name: Test 25 | run: go test -race -vet=off ./... # Run tests with race detector 26 | # -vet=off might be needed if fuzz tests cause issues with vet 27 | 28 | - name: Lint 29 | uses: golangci/golangci-lint-action@v7.0.0 # Use the latest version 30 | with: 31 | version: latest # Or your specific linter version 32 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" # Trigger on tags like v1.0.0, v0.1.1, etc. 7 | 8 | jobs: 9 | goreleaser: 10 | runs-on: ubuntu-latest 11 | permissions: 12 | contents: write # Required to upload release assets 13 | steps: 14 | - name: Checkout code 15 | uses: actions/checkout@v4 16 | with: 17 | fetch-depth: 0 # Fetches all history for changelog generation 18 | 19 | - name: Set up Go 20 | uses: actions/setup-go@v5 21 | with: 22 | go-version: "stable" # Or specify your Go version 23 | 24 | # --- ADD THIS STEP --- 25 | - name: Run tests 26 | run: go test ./... 27 | # --- END OF ADDED STEP --- 28 | 29 | - name: Run GoReleaser 30 | uses: goreleaser/goreleaser-action@v5 31 | with: 32 | # Optional: specify GoReleaser version, otherwise latest is used 33 | # version: latest 34 | args: release --clean # Run the release command 35 | env: 36 | # GITHUB_TOKEN is automatically provided by GitHub Actions 37 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # If you prefer the allow list template instead of the deny list, see community template: 2 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 3 | # 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | # vendor/ 19 | 20 | # Go workspace file 21 | go.work 22 | go.work.sum 23 | 24 | # env file 25 | .env 26 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | project_name: fluxus 2 | 3 | builds: 4 | - skip: true 5 | 6 | # Optional: Run go mod tidy before release 7 | before: 8 | hooks: 9 | - go mod tidy 10 | 11 | # Keep snapshot config for pre-releases 12 | snapshot: 13 | name_template: "{{ incpatch .Version }}-next" 14 | 15 | # Keep changelog generation 16 | changelog: 17 | sort: asc 18 | filters: 19 | exclude: 20 | - "^docs:" 21 | - "^test:" 22 | - "^ci:" 23 | - "^chore:" 24 | - Merge pull request 25 | - Merge branch 26 | -------------------------------------------------------------------------------- /BENCHMARK.md: -------------------------------------------------------------------------------- 1 | ## Benchmark Results 2 | 3 | Below is the benchmark results on my machine using `AMD Ryzen 5 5600H` 4 | 5 | ``` 6 | BenchmarkCircuitBreaker-12 100000000 100.1 ns/op 40 B/op 1 allocs/op 7 | BenchmarkSimplePipeline-12 23377321 50.60 ns/op 16 B/op 1 allocs/op 8 | BenchmarkChainedPipeline-12 8355632 142.4 ns/op 72 B/op 2 allocs/op 9 | BenchmarkParallelProcessing-12 387948 3060 ns/op 904 B/op 16 allocs/op 10 | BenchmarkFanOut-12 190591 5911 ns/op 2083 B/op 39 allocs/op 11 | BenchmarkFanOutConcurrencyLimited-12 13261 89991 ns/op 6670 B/op 292 allocs/op 12 | BenchmarkRetry-12 2800845 428.4 ns/op 176 B/op 8 allocs/op 13 | BenchmarkBuffer-12 301753 3575 ns/op 17152 B/op 11 allocs/op 14 | BenchmarkComplexPipeline-12 2250320 531.8 ns/op 336 B/op 11 allocs/op 15 | BenchmarkChainVsDirectCalls/DirectCalls-12 17599461 63.21 ns/op 16 B/op 1 allocs/op 16 | BenchmarkChainVsDirectCalls/ChainedStages-12 16818004 70.94 ns/op 16 B/op 1 allocs/op 17 | BenchmarkConcurrencyScaling/Concurrency-1-12 3802 319876 ns/op 2384 B/op 10 allocs/op 18 | BenchmarkConcurrencyScaling/Concurrency-2-12 7035 170881 ns/op 2480 B/op 11 allocs/op 19 | BenchmarkConcurrencyScaling/Concurrency-4-12 13096 91488 ns/op 2672 B/op 13 allocs/op 20 | BenchmarkConcurrencyScaling/Concurrency-8-12 17648 67781 ns/op 3056 B/op 17 allocs/op 21 | BenchmarkConcurrencyScaling/Concurrency-16-12 17414 68519 ns/op 3828 B/op 25 allocs/op 22 | BenchmarkConcurrencyScaling/Concurrency-32-12 17186 69543 ns/op 5375 B/op 41 allocs/op 23 | BenchmarkConcurrencyScaling/Concurrency-Unlimited-12 14366 82260 ns/op 11949 B/op 109 allocs/op 24 | BenchmarkChainDepth/ChainDepth-1-12 232961623 5.171 ns/op 0 B/op 0 allocs/op 25 | BenchmarkChainDepth/ChainDepth-5-12 44821777 26.21 ns/op 0 B/op 0 allocs/op 26 | BenchmarkChainDepth/ChainDepth-10-12 20625679 56.83 ns/op 0 B/op 0 allocs/op 27 | BenchmarkChainDepth/ChainDepth-20-12 8324926 152.6 ns/op 0 B/op 0 allocs/op 28 | BenchmarkChainDepth/ChainDepth-50-12 2514831 510.9 ns/op 0 B/op 0 allocs/op 29 | BenchmarkMetricatedStage/WithoutMetrics-12 28803571 40.90 ns/op 16 B/op 1 allocs/op 30 | BenchmarkMetricatedStage/WithMetrics-12 21703272 57.36 ns/op 16 B/op 1 allocs/op 31 | BenchmarkRateLimiter/NoLimiter-12 25708868 44.72 ns/op 16 B/op 1 allocs/op 32 | BenchmarkRateLimiter/RateInf_Burst1-12 2479399 550.0 ns/op 400 B/op 6 allocs/op 33 | BenchmarkRateLimiter/RateInf_Burst100-12 2504014 499.6 ns/op 400 B/op 6 allocs/op 34 | BenchmarkRateLimiterAllowCheck/RateInf_Burst1-12 60086824 19.71 ns/op 0 B/op 0 allocs/op 35 | BenchmarkRateLimiterAllowCheck/RateInf_Burst100-12 59908339 19.69 ns/op 0 B/op 0 allocs/op 36 | BenchmarkRateLimiterRealWorld/NormalPipeline_short-12 19974066 59.67 ns/op 8 B/op 1 allocs/op 37 | BenchmarkRateLimiterRealWorld/RateLimitedPipeline_short-12 2565648 500.5 ns/op 392 B/op 6 allocs/op 38 | BenchmarkRateLimiterRealWorld/NormalPipeline_medium-12 2829969 442.4 ns/op 272 B/op 3 allocs/op 39 | BenchmarkRateLimiterRealWorld/RateLimitedPipeline_medium-12 1319850 855.0 ns/op 656 B/op 8 allocs/op 40 | BenchmarkRateLimiterRealWorld/NormalPipeline_long-12 1389391 930.5 ns/op 528 B/op 2 allocs/op 41 | BenchmarkRateLimiterRealWorld/RateLimitedPipeline_long-12 864838 1412 ns/op 912 B/op 7 allocs/op 42 | BenchmarkComponentComparison/BaseStage-12 16438626 75.16 ns/op 24 B/op 1 allocs/op 43 | BenchmarkComponentComparison/MetricStage-12 13914414 89.06 ns/op 24 B/op 1 allocs/op 44 | BenchmarkComponentComparison/RateLimitStage-12 2420282 581.4 ns/op 408 B/op 6 allocs/op 45 | BenchmarkComponentComparison/SimplePipeline-12 13281608 89.45 ns/op 24 B/op 1 allocs/op 46 | BenchmarkComponentComparison/Combined_MetricsAndRateLimit-12 2109991 595.6 ns/op 408 B/op 6 allocs/op 47 | BenchmarkTracing/SimpleStage_NoTracing_short-12 37968184 33.81 ns/op 8 B/op 1 allocs/op 48 | BenchmarkTracing/SimpleStage_NoopTracing_short-12 2094750 540.7 ns/op 320 B/op 7 allocs/op 49 | BenchmarkTracing/SimpleStage_Recording_short-12 877770 1455 ns/op 1511 B/op 10 allocs/op 50 | BenchmarkTracing/ComplexStage_NoTracing_short-12 8414562 156.3 ns/op 24 B/op 3 allocs/op 51 | BenchmarkTracing/ComplexStage_NoopTracing_short-12 1788638 611.6 ns/op 336 B/op 9 allocs/op 52 | BenchmarkTracing/ComplexStage_Recording_short-12 1000000 1430 ns/op 1497 B/op 12 allocs/op 53 | BenchmarkTracing/SimpleStage_NoTracing_medium-12 8619772 141.3 ns/op 48 B/op 1 allocs/op 54 | BenchmarkTracing/SimpleStage_NoopTracing_medium-12 2189911 595.9 ns/op 360 B/op 7 allocs/op 55 | BenchmarkTracing/SimpleStage_Recording_medium-12 1000000 1314 ns/op 1547 B/op 10 allocs/op 56 | BenchmarkTracing/ComplexStage_NoTracing_medium-12 2277123 468.7 ns/op 240 B/op 5 allocs/op 57 | BenchmarkTracing/ComplexStage_NoopTracing_medium-12 1340713 950.4 ns/op 552 B/op 11 allocs/op 58 | BenchmarkTracing/ComplexStage_Recording_medium-12 797193 1765 ns/op 1714 B/op 14 allocs/op 59 | BenchmarkTracing/ComplexStage_Recording_medium-12 797193 1765 ns/op 1714 B/op 14 allocs/op 60 | BenchmarkTracing/SimpleStage_NoTracing_long-12 2037211 572.0 ns/op 288 B/op 1 allocs/op 61 | BenchmarkTracing/SimpleStage_NoopTracing_long-12 1200326 981.4 ns/op 600 B/op 7 allocs/op 62 | BenchmarkTracing/ComplexStage_Recording_medium-12 797193 1765 ns/op 1714 B/op 14 allocs/op 63 | BenchmarkTracing/SimpleStage_NoTracing_long-12 2037211 572.0 ns/op 288 B/op 1 allocs/op 64 | BenchmarkTracing/SimpleStage_NoopTracing_long-12 1200326 981.4 ns/op 600 B/op 7 allocs/op 65 | BenchmarkTracing/SimpleStage_NoTracing_long-12 2037211 572.0 ns/op 288 B/op 1 allocs/op 66 | BenchmarkTracing/SimpleStage_NoopTracing_long-12 1200326 981.4 ns/op 600 B/op 7 allocs/op 67 | BenchmarkTracing/SimpleStage_Recording_long-12 844416 1784 ns/op 1792 B/op 10 allocs/op 68 | BenchmarkTracing/ComplexStage_NoTracing_long-12 767425 2059 ns/op 1728 B/op 6 allocs/op 69 | BenchmarkTracing/SimpleStage_Recording_long-12 844416 1784 ns/op 1792 B/op 10 allocs/op 70 | BenchmarkTracing/ComplexStage_NoTracing_long-12 767425 2059 ns/op 1728 B/op 6 allocs/op 71 | BenchmarkTracing/ComplexStage_NoopTracing_long-12 453865 3074 ns/op 2040 B/op 12 allocs/op 72 | BenchmarkTracing/ComplexStage_NoopTracing_long-12 453865 3074 ns/op 2040 B/op 12 allocs/op 73 | BenchmarkTracing/ComplexStage_Recording_long-12 496234 2732 ns/op 3032 B/op 15 allocs/op 74 | BenchmarkTracedComponents/Baseline_NoTracing-12 25623832 47.82 ns/op 16 B/op 1 allocs/op 75 | BenchmarkTracedComponents/TracedStage-12 2827249 433.4 ns/op 264 B/op 6 allocs/op 76 | BenchmarkTracedComponents/TracedFanOut-12 239308 4603 ns/op 1352 B/op 21 allocs/op 77 | BenchmarkTracedComponents/TracedRetry-12 897397 1277 ns/op 936 B/op 19 allocs/op 78 | BenchmarkTracedComponents/TracedPipeline-12 2915329 420.2 ns/op 240 B/op 5 allocs/op 79 | BenchmarkTracing/ComplexStage_Recording_long-12 496234 2732 ns/op 3032 B/op 15 allocs/op 80 | BenchmarkTracedComponents/Baseline_NoTracing-12 25623832 47.82 ns/op 16 B/op 1 allocs/op 81 | BenchmarkTracedComponents/TracedStage-12 2827249 433.4 ns/op 264 B/op 6 allocs/op 82 | BenchmarkTracedComponents/TracedFanOut-12 239308 4603 ns/op 1352 B/op 21 allocs/op 83 | BenchmarkTracedComponents/TracedRetry-12 897397 1277 ns/op 936 B/op 19 allocs/op 84 | BenchmarkTracedComponents/TracedPipeline-12 2915329 420.2 ns/op 240 B/op 5 allocs/op 85 | BenchmarkTracedComponents/Baseline_NoTracing-12 25623832 47.82 ns/op 16 B/op 1 allocs/op 86 | BenchmarkTracedComponents/TracedStage-12 2827249 433.4 ns/op 264 B/op 6 allocs/op 87 | BenchmarkTracedComponents/TracedFanOut-12 239308 4603 ns/op 1352 B/op 21 allocs/op 88 | BenchmarkTracedComponents/TracedRetry-12 897397 1277 ns/op 936 B/op 19 allocs/op 89 | BenchmarkTracedComponents/TracedPipeline-12 2915329 420.2 ns/op 240 B/op 5 allocs/op 90 | BenchmarkTracedComponents/TracedStage-12 2827249 433.4 ns/op 264 B/op 6 allocs/op 91 | BenchmarkTracedComponents/TracedFanOut-12 239308 4603 ns/op 1352 B/op 21 allocs/op 92 | BenchmarkTracedComponents/TracedRetry-12 897397 1277 ns/op 936 B/op 19 allocs/op 93 | BenchmarkTracedComponents/TracedPipeline-12 2915329 420.2 ns/op 240 B/op 5 allocs/op 94 | BenchmarkTracedComponents/TracedFanOut-12 239308 4603 ns/op 1352 B/op 21 allocs/op 95 | BenchmarkTracedComponents/TracedRetry-12 897397 1277 ns/op 936 B/op 19 allocs/op 96 | BenchmarkTracedComponents/TracedPipeline-12 2915329 420.2 ns/op 240 B/op 5 allocs/op 97 | BenchmarkTracedComponents/TracedRetry-12 897397 1277 ns/op 936 B/op 19 allocs/op 98 | BenchmarkTracedComponents/TracedPipeline-12 2915329 420.2 ns/op 240 B/op 5 allocs/op 99 | BenchmarkTracedComponents/TracedPipeline-12 2915329 420.2 ns/op 240 B/op 5 allocs/op 100 | BenchmarkTracingWithContext/EmptyContext-12 2724776 438.4 ns/op 264 B/op 6 allocs/op 101 | BenchmarkTracingWithContext/ContextWithValues-12 2599760 459.1 ns/op 264 B/op 6 allocs/op 102 | BenchmarkTracingWithContext/ContextWithSpan-12 2769135 518.0 ns/op 264 B/op 6 allocs/op 103 | ``` -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to the Fluxus project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [1.1.1] - 2025-04-11 9 | 10 | ### Added 11 | - N/A 12 | 13 | ### Changed 14 | - Moved from GPL-3 to MIT license for better compatibility with other projects 15 | 16 | ### Removed 17 | - N/A 18 | 19 | [1.1.1]: https://github.com/synoptiq/go-fluxus/releases/tag/v1.1.1 20 | 21 | ## [1.1.0] - 2025-04-10 22 | 23 | ### Added 24 | - Added new MultiError and MapItemError type for better error handling 25 | - Added new Map stage for processing items in parallel 26 | - Added more examples to the documentation 27 | 28 | 29 | ### Changed 30 | - N/A 31 | 32 | ### Removed 33 | - N/A 34 | 35 | [1.1.0]: https://github.com/synoptiq/go-fluxus/releases/tag/v1.1.0 36 | 37 | ## [1.0.0] - 2025-04-10 38 | 39 | ### Added 40 | - Initial release of the Fluxus pipeline orchestration library 41 | - Core pipeline components: `Stage`, `Chain`, `Pipeline` 42 | - Fan-out/fan-in patterns for parallel processing 43 | - Retry mechanism with backoff strategies 44 | - Buffer for batch processing 45 | - Timeout handling for stages 46 | - Comprehensive test suite 47 | - Benchmark suite for performance analysis 48 | - Structured error types for better error handling 49 | - Metrics collection interface for monitoring pipeline performance 50 | - Prometheus metrics implementation 51 | - Circuit breaker pattern for increased resilience 52 | - Rate limiter functionality for controlled resource usage 53 | - Fuzzing tests to catch edge cases 54 | - ETL pipeline example showcasing library features 55 | - GitHub Actions CI/CD workflow 56 | - Makefile with common development tasks 57 | - Documentation improvements 58 | 59 | ### Changed 60 | - N/A (initial release) 61 | 62 | ### Removed 63 | - N/A (initial release) 64 | 65 | [1.0.0]: https://github.com/synoptiq/go-fluxus/releases/tag/v1.0.0 -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Fluxus 2 | 3 | Thank you for your interest in contributing to Fluxus! This document provides guidelines and instructions for contributing to the project. 4 | 5 | ## Code of Conduct 6 | 7 | By participating in this project, you agree to abide by our Code of Conduct. Please read it before contributing. 8 | 9 | ## Getting Started 10 | 11 | 1. **Fork the repository** on GitHub. 12 | 2. **Clone your fork** locally: 13 | ```bash 14 | git clone https://github.com/synoptiq/go-fluxus.git 15 | cd go-fluxus 16 | ``` 17 | 3. **Add the upstream repository** as a remote: 18 | ```bash 19 | git remote add upstream https://github.com/synoptiq/go-fluxus.git 20 | ``` 21 | 4. **Set up your development environment**: 22 | ```bash 23 | go mod download 24 | ``` 25 | 26 | ## Development Workflow 27 | 28 | 1. **Create a branch** for your feature or bugfix: 29 | ```bash 30 | git checkout -b feature/your-feature-name 31 | ``` 32 | 33 | 2. **Make your changes** and write tests for them. 34 | 35 | 3. **Run tests locally** to ensure they pass: 36 | ```bash 37 | go test -v ./... 38 | ``` 39 | 40 | 4. **Run linters** to ensure code quality: 41 | ```bash 42 | golangci-lint run 43 | ``` 44 | 45 | 5. **Commit your changes** with a clear, descriptive message: 46 | ```bash 47 | git commit -m "feat: add new feature X" 48 | ``` 49 | 50 | We follow [Conventional Commits](https://www.conventionalcommits.org/) for commit messages. 51 | 52 | 6. **Push your branch** to your fork: 53 | ```bash 54 | git push origin feature/your-feature-name 55 | ``` 56 | 57 | 7. **Create a Pull Request** from your fork to the upstream repository. 58 | 59 | ## Pull Request Guidelines 60 | 61 | When submitting a pull request: 62 | 63 | 1. **Include tests** for any new functionality. 64 | 2. **Update documentation** if needed. 65 | 3. **Describe the changes** in detail in the PR description. 66 | 4. **Link any related issues** using keywords like "Fixes #123" or "Closes #456". 67 | 5. **Ensure all CI checks pass** before requesting a review. 68 | 69 | ## Code Style 70 | 71 | We follow standard Go code style conventions: 72 | 73 | 1. Run `go fmt` on your code before committing. 74 | 2. Use meaningful variable and function names. 75 | 3. Write godoc-style comments for exported functions, types, and constants. 76 | 4. Follow the [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) guidelines. 77 | 78 | ## Testing 79 | 80 | - Every new feature or bug fix should include tests. 81 | - Run the full test suite with race detection before submitting your PR: 82 | ```bash 83 | go test -race ./... 84 | ``` 85 | - For performance-critical changes, include benchmarks: 86 | ```bash 87 | go test -bench=Bench -v . 88 | ``` 89 | 90 | ## Documentation 91 | 92 | - Update the README.md if your changes affect the public API or user experience. 93 | - Add godoc-style comments to any new exported functions, types, or constants. 94 | - Consider adding examples for new features in the `examples/` directory. 95 | 96 | ## Reporting Bugs 97 | 98 | When reporting bugs: 99 | 100 | 1. **Use the issue tracker** on GitHub. 101 | 2. **Describe the bug** in detail. 102 | 3. **Provide a minimal reproduction** case, if possible. 103 | 4. **Include your environment details** (Go version, OS, etc.). 104 | 105 | ## Feature Requests 106 | 107 | We welcome feature requests! When submitting a feature request: 108 | 109 | 1. **Check existing issues** to see if it's already been requested. 110 | 2. **Describe the feature** in detail. 111 | 3. **Explain the use case** for the feature. 112 | 4. **Consider contributing** the feature yourself if possible. 113 | 114 | ## License 115 | 116 | By contributing to Fluxus, you agree that your contributions will be licensed under the project's [MIT License](LICENSE). 117 | 118 | ## Questions 119 | 120 | If you have any questions about contributing, please open an issue or reach out to the maintainers. 121 | 122 | Thank you for your contributions! -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Daniele 'dzonerzy' Linguaglossa 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /circuit.go: -------------------------------------------------------------------------------- 1 | package fluxus 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | // CircuitBreakerState represents the state of a circuit breaker. 11 | type CircuitBreakerState int 12 | 13 | const ( 14 | // CircuitClosed is the normal state where requests are allowed through. 15 | CircuitClosed CircuitBreakerState = iota 16 | // CircuitOpen is the state where requests are blocked. 17 | CircuitOpen 18 | // CircuitHalfOpen is the state where a limited number of requests are allowed through to test the system. 19 | CircuitHalfOpen 20 | ) 21 | 22 | // ErrCircuitOpen is returned when the circuit is open and requests are blocked. 23 | var ErrCircuitOpen = errors.New("circuit breaker is open") 24 | 25 | // CircuitBreaker represents a circuit breaker that can prevent requests to a failing service. 26 | type CircuitBreaker[I, O any] struct { 27 | stage Stage[I, O] 28 | failureThreshold int 29 | resetTimeout time.Duration 30 | halfOpenMax int 31 | 32 | state CircuitBreakerState 33 | failures int 34 | lastError error 35 | openTime time.Time 36 | halfOpenCount int 37 | consecutiveSuccesses int 38 | successThreshold int 39 | 40 | mu sync.RWMutex 41 | } 42 | 43 | // CircuitBreakerOption is a function that configures a CircuitBreaker. 44 | type CircuitBreakerOption[I, O any] func(*CircuitBreaker[I, O]) 45 | 46 | // WithSuccessThreshold sets the number of consecutive successes needed to close the circuit. 47 | func WithSuccessThreshold[I, O any](threshold int) CircuitBreakerOption[I, O] { 48 | return func(cb *CircuitBreaker[I, O]) { 49 | cb.successThreshold = threshold 50 | } 51 | } 52 | 53 | // WithHalfOpenMaxRequests sets the maximum number of requests allowed when half-open. 54 | func WithHalfOpenMaxRequests[I, O any](maxReq int) CircuitBreakerOption[I, O] { 55 | return func(cb *CircuitBreaker[I, O]) { 56 | cb.halfOpenMax = maxReq 57 | } 58 | } 59 | 60 | // NewCircuitBreaker creates a new circuit breaker with the given stage. 61 | func NewCircuitBreaker[I, O any]( 62 | stage Stage[I, O], 63 | failureThreshold int, 64 | resetTimeout time.Duration, 65 | options ...CircuitBreakerOption[I, O], 66 | ) *CircuitBreaker[I, O] { 67 | cb := &CircuitBreaker[I, O]{ 68 | stage: stage, 69 | failureThreshold: failureThreshold, 70 | resetTimeout: resetTimeout, 71 | state: CircuitClosed, 72 | halfOpenMax: 1, 73 | successThreshold: 1, 74 | } 75 | 76 | // Apply options 77 | for _, option := range options { 78 | option(cb) 79 | } 80 | 81 | return cb 82 | } 83 | 84 | // Process implements the Stage interface for CircuitBreaker. 85 | func (cb *CircuitBreaker[I, O]) Process(ctx context.Context, input I) (O, error) { 86 | var zero O 87 | 88 | // Check if the circuit is open 89 | if !cb.allowRequest() { 90 | return zero, ErrCircuitOpen 91 | } 92 | 93 | // Track starting time for metrics 94 | startTime := time.Now() 95 | 96 | // Process the request 97 | output, err := cb.stage.Process(ctx, input) 98 | 99 | // Update circuit state based on the result 100 | cb.recordResult(err, time.Since(startTime)) 101 | 102 | return output, err 103 | } 104 | 105 | // allowRequest checks if a request should be allowed through the circuit breaker. 106 | func (cb *CircuitBreaker[I, O]) allowRequest() bool { 107 | cb.mu.RLock() 108 | defer cb.mu.RUnlock() 109 | 110 | switch cb.state { 111 | case CircuitClosed: 112 | // When closed, allow all requests 113 | return true 114 | case CircuitOpen: 115 | // When open, check if enough time has passed to try again 116 | if time.Since(cb.openTime) > cb.resetTimeout { 117 | // Need to transition to half-open, but require a write lock 118 | cb.mu.RUnlock() 119 | cb.transitionToHalfOpen() 120 | cb.mu.RLock() 121 | return cb.halfOpenCount < cb.halfOpenMax 122 | } 123 | return false 124 | case CircuitHalfOpen: 125 | // When half-open, allow a limited number of requests 126 | return cb.halfOpenCount < cb.halfOpenMax 127 | default: 128 | return false 129 | } 130 | } 131 | 132 | // transitionToHalfOpen transitions the circuit from open to half-open. 133 | func (cb *CircuitBreaker[I, O]) transitionToHalfOpen() { 134 | cb.mu.Lock() 135 | defer cb.mu.Unlock() 136 | 137 | // Only transition if we're still open and enough time has passed 138 | if cb.state == CircuitOpen && time.Since(cb.openTime) > cb.resetTimeout { 139 | cb.state = CircuitHalfOpen 140 | cb.halfOpenCount = 0 141 | cb.consecutiveSuccesses = 0 142 | } 143 | } 144 | 145 | // recordResult updates the circuit state based on the result of a request. 146 | func (cb *CircuitBreaker[I, O]) recordResult(err error, _ time.Duration) { 147 | cb.mu.Lock() 148 | defer cb.mu.Unlock() 149 | 150 | if err != nil { 151 | // Record failure 152 | switch cb.state { 153 | case CircuitClosed: 154 | cb.failures++ 155 | cb.lastError = err 156 | if cb.failures >= cb.failureThreshold { 157 | cb.tripOpen() 158 | } 159 | case CircuitHalfOpen: 160 | cb.tripOpen() 161 | case CircuitOpen: 162 | // No state change needed 163 | } 164 | cb.consecutiveSuccesses = 0 165 | } else { 166 | // Record success 167 | switch cb.state { 168 | case CircuitClosed: 169 | cb.failures = 0 170 | case CircuitHalfOpen: 171 | cb.consecutiveSuccesses++ 172 | cb.halfOpenCount++ 173 | if cb.consecutiveSuccesses >= cb.successThreshold { 174 | cb.closeCircuit() 175 | } 176 | case CircuitOpen: 177 | // No state change needed 178 | } 179 | } 180 | } 181 | 182 | // tripOpen transitions the circuit to the open state. 183 | func (cb *CircuitBreaker[I, O]) tripOpen() { 184 | cb.state = CircuitOpen 185 | cb.openTime = time.Now() 186 | cb.halfOpenCount = 0 187 | cb.consecutiveSuccesses = 0 188 | } 189 | 190 | // closeCircuit transitions the circuit to the closed state. 191 | func (cb *CircuitBreaker[I, O]) closeCircuit() { 192 | cb.state = CircuitClosed 193 | cb.failures = 0 194 | cb.halfOpenCount = 0 195 | cb.consecutiveSuccesses = 0 196 | } 197 | 198 | // State returns the current state of the circuit breaker. 199 | func (cb *CircuitBreaker[I, O]) State() CircuitBreakerState { 200 | cb.mu.RLock() 201 | defer cb.mu.RUnlock() 202 | return cb.state 203 | } 204 | 205 | // LastError returns the last error that caused the circuit to open. 206 | func (cb *CircuitBreaker[I, O]) LastError() error { 207 | cb.mu.RLock() 208 | defer cb.mu.RUnlock() 209 | return cb.lastError 210 | } 211 | 212 | // Reset forces the circuit breaker back to the closed state. 213 | func (cb *CircuitBreaker[I, O]) Reset() { 214 | cb.mu.Lock() 215 | defer cb.mu.Unlock() 216 | cb.state = CircuitClosed 217 | cb.failures = 0 218 | cb.halfOpenCount = 0 219 | cb.consecutiveSuccesses = 0 220 | cb.lastError = nil 221 | } 222 | -------------------------------------------------------------------------------- /circuit_test.go: -------------------------------------------------------------------------------- 1 | package fluxus_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | "testing" 8 | "time" 9 | 10 | "github.com/synoptiq/go-fluxus" 11 | ) 12 | 13 | // mockExternalService simulates an unreliable service 14 | type mockExternalService struct { 15 | failureCount int 16 | maxFailures int 17 | mu sync.Mutex 18 | } 19 | 20 | // Process simulates an external service call that can fail 21 | func (s *mockExternalService) Process(_ context.Context, input string) (string, error) { 22 | s.mu.Lock() 23 | defer s.mu.Unlock() 24 | 25 | s.failureCount++ 26 | if s.failureCount <= s.maxFailures { 27 | return "", fmt.Errorf("service failure %d", s.failureCount) 28 | } 29 | 30 | return fmt.Sprintf("processed: %s", input), nil 31 | } 32 | 33 | // TestCircuitBreakerPipeline demonstrates circuit breaker in a pipeline context 34 | func TestCircuitBreakerPipeline(t *testing.T) { 35 | scenarios := []struct { 36 | name string 37 | maxFailures int 38 | failureThreshold int 39 | resetTimeout time.Duration 40 | expectedErrors int 41 | expectedResults int 42 | expectedState fluxus.CircuitBreakerState 43 | }{ 44 | { 45 | name: "Fails before threshold", 46 | maxFailures: 2, 47 | failureThreshold: 3, 48 | resetTimeout: 100 * time.Millisecond, 49 | expectedErrors: 2, 50 | expectedResults: 0, 51 | expectedState: fluxus.CircuitClosed, 52 | }, 53 | { 54 | name: "Opens after threshold", 55 | maxFailures: 5, 56 | failureThreshold: 3, 57 | resetTimeout: 100 * time.Millisecond, 58 | expectedErrors: 3, 59 | expectedResults: 0, 60 | expectedState: fluxus.CircuitOpen, 61 | }, 62 | } 63 | 64 | for _, tc := range scenarios { 65 | t.Run(tc.name, func(t *testing.T) { 66 | // Create a mock external service 67 | service := &mockExternalService{ 68 | maxFailures: tc.maxFailures, 69 | } 70 | 71 | // Create a stage from the service 72 | serviceStage := fluxus.StageFunc[string, string](service.Process) 73 | 74 | // Wrap the stage with a circuit breaker 75 | circuitBreaker := fluxus.NewCircuitBreaker( 76 | serviceStage, 77 | tc.failureThreshold, 78 | tc.resetTimeout, 79 | ) 80 | 81 | // Create a pipeline with the circuit breaker 82 | pipeline := fluxus.NewPipeline(circuitBreaker) 83 | 84 | ctx := context.Background() 85 | results := make([]string, 0) 86 | errors := make([]error, 0) 87 | 88 | // Try to process multiple times 89 | for i := 0; i < 10; i++ { 90 | result, err := pipeline.Process(ctx, fmt.Sprintf("input-%d", i)) 91 | 92 | if err != nil { 93 | errors = append(errors, err) 94 | 95 | // For "Fails before threshold" scenario 96 | if tc.name == "Fails before threshold" && len(errors) == tc.maxFailures { 97 | break 98 | } 99 | 100 | // For "Opens after threshold" scenario 101 | if tc.name == "Opens after threshold" && len(errors) == tc.failureThreshold { 102 | break 103 | } 104 | } else { 105 | results = append(results, result) 106 | } 107 | } 108 | 109 | // Verify errors 110 | if len(errors) != tc.expectedErrors { 111 | t.Errorf("Expected %d errors, got %d (errors: %v)", 112 | tc.expectedErrors, len(errors), errors) 113 | } 114 | 115 | // Verify results 116 | if len(results) != tc.expectedResults { 117 | t.Errorf("Expected %d results, got %d (results: %v)", 118 | tc.expectedResults, len(results), results) 119 | } 120 | 121 | // Check circuit breaker state 122 | currentState := circuitBreaker.State() 123 | if currentState != tc.expectedState { 124 | t.Errorf("Expected circuit to be %v, got %v", tc.expectedState, currentState) 125 | } 126 | }) 127 | } 128 | } 129 | 130 | // TestCircuitBreakerRecovery tests the recovery mechanism 131 | func TestCircuitBreakerRecovery(t *testing.T) { 132 | service := &mockExternalService{ 133 | maxFailures: 3, 134 | } 135 | 136 | serviceStage := fluxus.StageFunc[string, string](service.Process) 137 | 138 | circuitBreaker := fluxus.NewCircuitBreaker( 139 | serviceStage, 140 | 3, 141 | 50*time.Millisecond, 142 | ) 143 | 144 | ctx := context.Background() 145 | 146 | // Trigger failures 147 | for i := 0; i < 3; i++ { 148 | _, err := circuitBreaker.Process(ctx, "input") 149 | if err == nil { 150 | t.Fatalf("Expected error on iteration %d", i) 151 | } 152 | } 153 | 154 | // Verify circuit is open 155 | if circuitBreaker.State() != fluxus.CircuitOpen { 156 | t.Errorf("Expected circuit to be open, got %v", circuitBreaker.State()) 157 | } 158 | 159 | // Wait for reset timeout 160 | time.Sleep(100 * time.Millisecond) 161 | 162 | // First request should be allowed in half-open state 163 | result, err := circuitBreaker.Process(ctx, "recovery-input") 164 | if err != nil { 165 | t.Errorf("Expected successful request in half-open state, got %v", err) 166 | } 167 | 168 | if result != "processed: recovery-input" { 169 | t.Errorf("Unexpected result: %s", result) 170 | } 171 | 172 | // Verify circuit is now closed 173 | if circuitBreaker.State() != fluxus.CircuitClosed { 174 | t.Errorf("Expected circuit to close after successful request, got %v", circuitBreaker.State()) 175 | } 176 | } 177 | 178 | // Benchmark circuit breaker performance 179 | func BenchmarkCircuitBreaker(b *testing.B) { 180 | service := &mockExternalService{ 181 | maxFailures: 5, 182 | } 183 | 184 | serviceStage := fluxus.StageFunc[string, string](service.Process) 185 | 186 | circuitBreaker := fluxus.NewCircuitBreaker( 187 | serviceStage, 188 | 3, 189 | 50*time.Millisecond, 190 | ) 191 | 192 | ctx := context.Background() 193 | 194 | b.ResetTimer() 195 | for i := 0; i < b.N; i++ { 196 | _, _ = circuitBreaker.Process(ctx, "benchmark-input") 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /errors.go: -------------------------------------------------------------------------------- 1 | package fluxus 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | // Error types for specific failure scenarios in pipeline processing 9 | 10 | // StageError represents an error that occurred in a specific pipeline stage. 11 | type StageError struct { 12 | // StageName is an optional identifier for the stage where the error occurred 13 | StageName string 14 | // StageIndex is the index of the stage in a multi-stage operation (like FanOut) 15 | StageIndex int 16 | // OriginalError is the underlying error that occurred 17 | OriginalError error 18 | } 19 | 20 | // Error implements the error interface for StageError. 21 | func (e *StageError) Error() string { 22 | if e.StageName != "" { 23 | return fmt.Sprintf("stage %q (index %d): %v", e.StageName, e.StageIndex, e.OriginalError) 24 | } 25 | return fmt.Sprintf("stage %d: %v", e.StageIndex, e.OriginalError) 26 | } 27 | 28 | // Unwrap returns the underlying error for compatibility with errors.Is and errors.As. 29 | func (e *StageError) Unwrap() error { 30 | return e.OriginalError 31 | } 32 | 33 | // NewStageError creates a new StageError with the provided details. 34 | func NewStageError(stageName string, stageIndex int, err error) *StageError { 35 | return &StageError{ 36 | StageName: stageName, 37 | StageIndex: stageIndex, 38 | OriginalError: err, 39 | } 40 | } 41 | 42 | // FanOutError represents an error that occurred during a fan-out operation. 43 | type FanOutError struct { 44 | // FailedStages maps stage indices to their corresponding errors 45 | FailedStages map[int]error 46 | } 47 | 48 | // Error implements the error interface for FanOutError. 49 | func (e *FanOutError) Error() string { 50 | if len(e.FailedStages) == 1 { 51 | for idx, err := range e.FailedStages { 52 | return fmt.Sprintf("fan-out stage %d failed: %v", idx, err) 53 | } 54 | } 55 | 56 | return fmt.Sprintf("%d fan-out stages failed", len(e.FailedStages)) 57 | } 58 | 59 | // NewFanOutError creates a new FanOutError with the provided failed stages. 60 | func NewFanOutError(failedStages map[int]error) *FanOutError { 61 | return &FanOutError{ 62 | FailedStages: failedStages, 63 | } 64 | } 65 | 66 | // RetryExhaustedError occurs when all retry attempts have been exhausted without success. 67 | type RetryExhaustedError struct { 68 | // MaxAttempts is the maximum number of attempts that were made 69 | MaxAttempts int 70 | // LastError is the last error that occurred before giving up 71 | LastError error 72 | } 73 | 74 | // Error implements the error interface for RetryExhaustedError. 75 | func (e *RetryExhaustedError) Error() string { 76 | return fmt.Sprintf("retry exhausted %d attempts: %v", e.MaxAttempts, e.LastError) 77 | } 78 | 79 | // Unwrap returns the underlying error for compatibility with errors.Is and errors.As. 80 | func (e *RetryExhaustedError) Unwrap() error { 81 | return e.LastError 82 | } 83 | 84 | // NewRetryExhaustedError creates a new RetryExhaustedError with the provided details. 85 | func NewRetryExhaustedError(maxAttempts int, lastError error) *RetryExhaustedError { 86 | return &RetryExhaustedError{ 87 | MaxAttempts: maxAttempts, 88 | LastError: lastError, 89 | } 90 | } 91 | 92 | // TimeoutError occurs when a stage execution times out. 93 | type TimeoutError struct { 94 | // StageName is an optional identifier for the stage where the timeout occurred 95 | StageName string 96 | // Duration is the timeout duration 97 | Duration string 98 | // OriginalError is the underlying timeout error 99 | OriginalError error 100 | } 101 | 102 | // Error implements the error interface for TimeoutError. 103 | func (e *TimeoutError) Error() string { 104 | if e.StageName != "" { 105 | return fmt.Sprintf("stage %q timed out after %s: %v", e.StageName, e.Duration, e.OriginalError) 106 | } 107 | return fmt.Sprintf("stage timed out after %s: %v", e.Duration, e.OriginalError) 108 | } 109 | 110 | // Unwrap returns the underlying error for compatibility with errors.Is and errors.As. 111 | func (e *TimeoutError) Unwrap() error { 112 | return e.OriginalError 113 | } 114 | 115 | // NewTimeoutError creates a new TimeoutError with the provided details. 116 | func NewTimeoutError(stageName string, duration string, err error) *TimeoutError { 117 | return &TimeoutError{ 118 | StageName: stageName, 119 | Duration: duration, 120 | OriginalError: err, 121 | } 122 | } 123 | 124 | // BufferError occurs during batch processing operations. 125 | type BufferError struct { 126 | // BatchIndex is the index of the batch where the error occurred 127 | BatchIndex int 128 | // Offset is the offset in the input where the batch starts 129 | Offset int 130 | // OriginalError is the underlying error 131 | OriginalError error 132 | } 133 | 134 | // Error implements the error interface for BufferError. 135 | func (e *BufferError) Error() string { 136 | return fmt.Sprintf("batch processing error at batch %d (offset %d): %v", e.BatchIndex, e.Offset, e.OriginalError) 137 | } 138 | 139 | // Unwrap returns the underlying error for compatibility with errors.Is and errors.As. 140 | func (e *BufferError) Unwrap() error { 141 | return e.OriginalError 142 | } 143 | 144 | // NewBufferError creates a new BufferError with the provided details. 145 | func NewBufferError(batchIndex, offset int, err error) *BufferError { 146 | return &BufferError{ 147 | BatchIndex: batchIndex, 148 | Offset: offset, 149 | OriginalError: err, 150 | } 151 | } 152 | 153 | // MultiError holds multiple errors, e.g., from Map or FanOut with CollectErrors. 154 | type MultiError struct { 155 | Errors []error 156 | } 157 | 158 | // NewMultiError creates a new MultiError. 159 | func NewMultiError(errs []error) *MultiError { 160 | // Filter out nil errors 161 | nonNilErrs := make([]error, 0, len(errs)) 162 | for _, err := range errs { 163 | if err != nil { 164 | nonNilErrs = append(nonNilErrs, err) 165 | } 166 | } 167 | if len(nonNilErrs) == 0 { 168 | return nil // Return nil if there are no actual errors 169 | } 170 | return &MultiError{Errors: nonNilErrs} 171 | } 172 | 173 | // Error implements the error interface. 174 | func (m *MultiError) Error() string { 175 | if m == nil || len(m.Errors) == 0 { 176 | return "no errors" 177 | } 178 | if len(m.Errors) == 1 { 179 | return m.Errors[0].Error() 180 | } 181 | var b strings.Builder 182 | fmt.Fprintf(&b, "%d errors occurred:", len(m.Errors)) 183 | for i, err := range m.Errors { 184 | fmt.Fprintf(&b, "\n [%d] %v", i, err) 185 | } 186 | return b.String() 187 | } 188 | 189 | // Unwrap provides compatibility with errors.Is/As by returning the first error. 190 | // You might choose a different behavior depending on your needs. 191 | func (m *MultiError) Unwrap() error { 192 | if m == nil || len(m.Errors) == 0 { 193 | return nil 194 | } 195 | return m.Errors[0] 196 | } 197 | 198 | // MapItemError represents an error that occurred while processing a specific item 199 | // within a Map stage. 200 | type MapItemError struct { 201 | // ItemIndex is the index of the input item that caused the error 202 | ItemIndex int 203 | // OriginalError is the underlying error that occurred 204 | OriginalError error 205 | } 206 | 207 | // Error implements the error interface for MapItemError. 208 | func (e *MapItemError) Error() string { 209 | return fmt.Sprintf("map stage item %d failed: %v", e.ItemIndex, e.OriginalError) 210 | } 211 | 212 | // Unwrap returns the underlying error for compatibility with errors.Is and errors.As. 213 | func (e *MapItemError) Unwrap() error { 214 | return e.OriginalError 215 | } 216 | 217 | // NewMapItemError creates a new MapItemError with the provided details. 218 | func NewMapItemError(itemIndex int, err error) *MapItemError { 219 | return &MapItemError{ 220 | ItemIndex: itemIndex, 221 | OriginalError: err, 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /example/circuit/circuit.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log" 8 | "math/rand" 9 | "net/http" 10 | "time" 11 | 12 | "github.com/synoptiq/go-fluxus" 13 | ) 14 | 15 | // UnreliableService simulates an external service that occasionally fails 16 | type UnreliableService struct { 17 | failureRate float64 18 | responseTimeMs int 19 | requestCount int 20 | recoveryTimeMs int 21 | isInFailureMode bool 22 | failureModeTime time.Time 23 | } 24 | 25 | // NewUnreliableService creates a new service simulator 26 | func NewUnreliableService(failureRate float64, responseTimeMs, recoveryTimeMs int) *UnreliableService { 27 | return &UnreliableService{ 28 | failureRate: failureRate, 29 | responseTimeMs: responseTimeMs, 30 | recoveryTimeMs: recoveryTimeMs, 31 | } 32 | } 33 | 34 | // Call simulates calling the external service, which may fail 35 | func (s *UnreliableService) Call(ctx context.Context, request string) (string, error) { 36 | s.requestCount++ 37 | 38 | // Check if the service was in failure mode but has recovered 39 | if s.isInFailureMode && time.Since(s.failureModeTime) > time.Duration(s.recoveryTimeMs)*time.Millisecond { 40 | s.isInFailureMode = false 41 | log.Printf("Service recovered after %d ms", s.recoveryTimeMs) 42 | } 43 | 44 | // Simulate delay 45 | time.Sleep(time.Duration(s.responseTimeMs) * time.Millisecond) 46 | 47 | // Either simulate failure or success 48 | if s.isInFailureMode || rand.Float64() < s.failureRate { 49 | if !s.isInFailureMode { 50 | s.isInFailureMode = true 51 | s.failureModeTime = time.Now() 52 | log.Printf("Service entered failure mode! Will recover in %d ms", s.recoveryTimeMs) 53 | } 54 | return "", fmt.Errorf("service unavailable (request: %s)", request) 55 | } 56 | 57 | // Service worked correctly 58 | return fmt.Sprintf("Response for: %s", request), nil 59 | } 60 | 61 | // ServiceClient represents a client that uses the circuit breaker pattern 62 | type ServiceClient struct { 63 | service *UnreliableService 64 | circuitBreaker *fluxus.CircuitBreaker[string, string] 65 | } 66 | 67 | // NewServiceClient creates a new client with circuit breaker protection 68 | func NewServiceClient(service *UnreliableService) *ServiceClient { 69 | // Create a stage that calls the service 70 | serviceStage := fluxus.StageFunc[string, string](service.Call) 71 | 72 | // Wrap the service stage with a circuit breaker 73 | circuitBreaker := fluxus.NewCircuitBreaker( 74 | serviceStage, 75 | 3, // Failures threshold: open after 3 failures 76 | 5*time.Second, // Reset timeout: try again after 5 seconds 77 | fluxus.WithSuccessThreshold[string, string](2), // Require 2 successes to close again 78 | fluxus.WithHalfOpenMaxRequests[string, string](3), // Allow 3 test requests when half-open 79 | ) 80 | 81 | return &ServiceClient{ 82 | service: service, 83 | circuitBreaker: circuitBreaker, 84 | } 85 | } 86 | 87 | // CallService calls the service with circuit breaker protection 88 | func (c *ServiceClient) CallService(ctx context.Context, request string) (string, error) { 89 | return c.circuitBreaker.Process(ctx, request) 90 | } 91 | 92 | // GetState returns the current state of the circuit breaker 93 | func (c *ServiceClient) GetState() fluxus.CircuitBreakerState { 94 | return c.circuitBreaker.State() 95 | } 96 | 97 | // GetLastError returns the last error that caused the circuit to open 98 | func (c *ServiceClient) GetLastError() error { 99 | return c.circuitBreaker.LastError() 100 | } 101 | 102 | // HTTP handler for the example 103 | func serviceHandler(client *ServiceClient) http.HandlerFunc { 104 | return func(w http.ResponseWriter, r *http.Request) { 105 | // Extract request ID from query params or use default 106 | requestID := r.URL.Query().Get("id") 107 | if requestID == "" { 108 | requestID = fmt.Sprintf("req-%d", rand.Intn(1000)) 109 | } 110 | 111 | // Call service with circuit breaker protection 112 | response, err := client.CallService(r.Context(), requestID) 113 | 114 | // Get circuit state for display 115 | circuitState := client.GetState() 116 | var stateStr string 117 | 118 | switch circuitState { 119 | case fluxus.CircuitOpen: 120 | stateStr = "OPEN" 121 | case fluxus.CircuitHalfOpen: 122 | stateStr = "HALF-OPEN" 123 | case fluxus.CircuitClosed: 124 | stateStr = "CLOSED" 125 | } 126 | 127 | // Respond based on result 128 | if err != nil { 129 | w.WriteHeader(http.StatusServiceUnavailable) 130 | fmt.Fprintf(w, "Error: %v\nCircuit State: %s\n", err, stateStr) 131 | if errors.Is(err, fluxus.ErrCircuitOpen) { 132 | lastErr := client.GetLastError() 133 | fmt.Fprintf(w, "Circuit is open due to: %v\n", lastErr) 134 | } 135 | return 136 | } 137 | 138 | w.WriteHeader(http.StatusOK) 139 | fmt.Fprintf(w, "Success: %s\nCircuit State: %s\n", response, stateStr) 140 | } 141 | } 142 | 143 | func resetHandler(client *ServiceClient) http.HandlerFunc { 144 | return func(w http.ResponseWriter, r *http.Request) { 145 | client.circuitBreaker.Reset() 146 | w.WriteHeader(http.StatusOK) 147 | fmt.Fprintf(w, "Circuit breaker has been reset\n") 148 | } 149 | } 150 | 151 | func main() { 152 | // Seed random for consistent demo 153 | rand.New(rand.NewSource(time.Now().UnixNano())) 154 | 155 | // Create an unreliable service (40% failures, 200ms response time, recovers after 8 seconds) 156 | service := NewUnreliableService(0.4, 200, 8000) 157 | 158 | // Create a client with circuit breaker 159 | client := NewServiceClient(service) 160 | 161 | // Setup HTTP server 162 | http.HandleFunc("/call", serviceHandler(client)) 163 | http.HandleFunc("/reset", resetHandler(client)) 164 | 165 | // Instructions 166 | port := 8080 167 | fmt.Printf("Circuit Breaker Demo Server\n") 168 | fmt.Printf("---------------------------\n") 169 | fmt.Printf("The server simulates an unreliable service with a 40%% failure rate.\n") 170 | fmt.Printf("The circuit breaker will open after 3 failures and try again after 5 seconds.\n\n") 171 | fmt.Printf("Endpoints:\n") 172 | fmt.Printf(" http://localhost:%d/call?id=your-request-id - Call the service\n", port) 173 | fmt.Printf(" http://localhost:%d/reset - Reset the circuit breaker\n\n", port) 174 | fmt.Printf("Try making several calls rapidly to see the circuit breaker in action!\n\n") 175 | 176 | // Start server 177 | fmt.Printf("Server started on port %d\n", port) 178 | log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil)) 179 | } 180 | -------------------------------------------------------------------------------- /example/db/db.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "errors" 7 | "fmt" 8 | "log" 9 | "os" 10 | "runtime" // For concurrency setting 11 | "sync" // For mock repo locking 12 | "time" 13 | 14 | _ "github.com/mattn/go-sqlite3" // Using testify for clearer assertions 15 | "github.com/synoptiq/go-fluxus" 16 | ) 17 | 18 | // --- 1. Define the Dependency Interface (Unchanged) --- 19 | 20 | type User struct { 21 | ID int 22 | Email string 23 | LastLogin time.Time 24 | } 25 | 26 | type UserRepository interface { 27 | GetUserByID(ctx context.Context, id int) (*User, error) 28 | UpdateLastLogin(ctx context.Context, id int, loginTime time.Time) error 29 | } 30 | 31 | // --- 2. Define the Stage Struct (Unchanged) --- 32 | 33 | type UserProcessingStage struct { 34 | repo UserRepository 35 | } 36 | 37 | func NewUserProcessingStage(repo UserRepository) *UserProcessingStage { 38 | if repo == nil { 39 | panic("UserRepository cannot be nil") 40 | } 41 | return &UserProcessingStage{ 42 | repo: repo, 43 | } 44 | } 45 | 46 | // --- 3. Implement the Stage Interface (Minor Logging Change) --- 47 | 48 | func (s *UserProcessingStage) Process(ctx context.Context, userID int) (string, error) { 49 | // Use a logger or structured logging in real apps 50 | // fmt.Printf(" [Stage] Processing user ID: %d\n", userID) // Reduced verbosity for concurrent runs 51 | 52 | user, err := s.repo.GetUserByID(ctx, userID) 53 | if err != nil { 54 | if errors.Is(err, sql.ErrNoRows) { 55 | return "", fmt.Errorf("user %d not found: %w", userID, err) 56 | } 57 | return "", fmt.Errorf("failed to get user %d: %w", userID, err) 58 | } 59 | 60 | now := time.Now() 61 | if err := s.repo.UpdateLastLogin(ctx, userID, now); err != nil { 62 | // Log the error but maybe continue? Depends on requirements. 63 | log.Printf("Warning: failed to update last login for user %d: %v", userID, err) 64 | } 65 | 66 | summary := fmt.Sprintf("User %d (%s) login updated to %s", 67 | user.ID, user.Email, now.Format(time.RFC3339)) 68 | 69 | // fmt.Printf(" [Stage] Finished processing user ID: %d\n", userID) // Reduced verbosity 70 | return summary, nil 71 | } 72 | 73 | var _ fluxus.Stage[int, string] = (*UserProcessingStage)(nil) 74 | 75 | // --- 4a. Concrete Dependency Implementation (SQLite - Unchanged) --- 76 | 77 | type SQLiteUserRepository struct { 78 | db *sql.DB 79 | } 80 | 81 | func NewSQLiteUserRepository(db *sql.DB) *SQLiteUserRepository { 82 | if db == nil { 83 | panic("sql.DB cannot be nil") 84 | } 85 | return &SQLiteUserRepository{db: db} 86 | } 87 | 88 | func (r *SQLiteUserRepository) GetUserByID(ctx context.Context, id int) (*User, error) { 89 | query := "SELECT id, email, last_login FROM users WHERE id = ?" 90 | row := r.db.QueryRowContext(ctx, query, id) 91 | 92 | var user User 93 | var lastLoginStr string 94 | 95 | err := row.Scan(&user.ID, &user.Email, &lastLoginStr) 96 | if err != nil { 97 | if errors.Is(err, sql.ErrNoRows) { 98 | return nil, sql.ErrNoRows 99 | } 100 | return nil, fmt.Errorf("query user %d failed: %w", id, err) 101 | } 102 | 103 | user.LastLogin, err = time.Parse(time.RFC3339, lastLoginStr) 104 | if err != nil { 105 | // Handle potential empty string if last_login is NULL 106 | if lastLoginStr == "" { 107 | // Assign a zero time or handle as appropriate 108 | user.LastLogin = time.Time{} 109 | } else { 110 | return nil, fmt.Errorf("parsing last_login '%s' for user %d failed: %w", lastLoginStr, id, err) 111 | } 112 | } 113 | 114 | return &user, nil 115 | } 116 | 117 | func (r *SQLiteUserRepository) UpdateLastLogin(ctx context.Context, id int, loginTime time.Time) error { 118 | query := "UPDATE users SET last_login = ? WHERE id = ?" 119 | _, err := r.db.ExecContext(ctx, query, loginTime.Format(time.RFC3339), id) 120 | if err != nil { 121 | return fmt.Errorf("update last_login for user %d failed: %w", id, err) 122 | } 123 | // fmt.Printf(" [SQLiteDB] Updated last login for user %d\n", id) // Reduced verbosity 124 | return nil 125 | } 126 | 127 | // --- 4b. Concrete Dependency Implementation (Mock - Added Mutex for Concurrency) --- 128 | 129 | type MockUserRepository struct { 130 | mu sync.RWMutex // Added mutex for safe concurrent access in tests 131 | users map[int]*User 132 | 133 | GetUserByIDFunc func(ctx context.Context, id int) (*User, error) 134 | UpdateLastLoginFunc func(ctx context.Context, id int, loginTime time.Time) error 135 | } 136 | 137 | func NewMockUserRepository() *MockUserRepository { 138 | m := &MockUserRepository{ 139 | users: map[int]*User{ 140 | 1: {ID: 1, Email: "alice@example.com", LastLogin: time.Now().Add(-24 * time.Hour)}, 141 | 2: {ID: 2, Email: "bob@example.com", LastLogin: time.Now().Add(-48 * time.Hour)}, 142 | 4: {ID: 4, Email: "charlie@example.com", LastLogin: time.Now().Add(-72 * time.Hour)}, // Added more users 143 | 5: {ID: 5, Email: "dave@example.com", LastLogin: time.Now().Add(-96 * time.Hour)}, 144 | }, 145 | } 146 | 147 | // Default implementations using the mutex 148 | m.GetUserByIDFunc = func(_ context.Context, id int) (*User, error) { 149 | m.mu.RLock() 150 | defer m.mu.RUnlock() 151 | user, exists := m.users[id] 152 | if !exists { 153 | return nil, sql.ErrNoRows 154 | } 155 | userCopy := *user // Return a copy 156 | return &userCopy, nil 157 | } 158 | 159 | m.UpdateLastLoginFunc = func(_ context.Context, id int, loginTime time.Time) error { 160 | m.mu.Lock() 161 | defer m.mu.Unlock() 162 | user, exists := m.users[id] 163 | if !exists { 164 | return fmt.Errorf("user %d not found for update", id) 165 | } 166 | user.LastLogin = loginTime 167 | // fmt.Printf(" [MockDB] Updated last login for user %d to %s\n", id, loginTime.Format(time.RFC3339)) 168 | return nil 169 | } 170 | 171 | return m 172 | } 173 | 174 | func (m *MockUserRepository) GetUserByID(ctx context.Context, id int) (*User, error) { 175 | return m.GetUserByIDFunc(ctx, id) 176 | } 177 | 178 | func (m *MockUserRepository) UpdateLastLogin(ctx context.Context, id int, loginTime time.Time) error { 179 | return m.UpdateLastLoginFunc(ctx, id, loginTime) 180 | } 181 | 182 | // --- Database Setup Helper (Unchanged) --- 183 | 184 | const dbFile = "./fluxus_di_example.db" 185 | 186 | func setupDatabase(ctx context.Context) (*sql.DB, error) { 187 | _ = os.Remove(dbFile) 188 | db, err := sql.Open("sqlite3", dbFile+"?_journal_mode=WAL&_busy_timeout=5000") // Added WAL mode and busy timeout for better concurrency 189 | if err != nil { 190 | return nil, fmt.Errorf("failed to open database: %w", err) 191 | } 192 | 193 | createTableSQL := ` 194 | CREATE TABLE users ( 195 | id INTEGER PRIMARY KEY, 196 | email TEXT NOT NULL UNIQUE, 197 | last_login TEXT 198 | );` 199 | if _, err := db.ExecContext(ctx, createTableSQL); err != nil { 200 | db.Close() 201 | return nil, fmt.Errorf("failed to create users table: %w", err) 202 | } 203 | 204 | insertSQL := "INSERT INTO users (id, email, last_login) VALUES (?, ?, ?)" 205 | usersToInsert := []User{ 206 | {ID: 1, Email: "alice@example.com", LastLogin: time.Now().Add(-24 * time.Hour)}, 207 | {ID: 2, Email: "bob@example.com", LastLogin: time.Now().Add(-48 * time.Hour)}, 208 | {ID: 4, Email: "charlie@example.com", LastLogin: time.Now().Add(-72 * time.Hour)}, 209 | {ID: 5, Email: "dave@example.com", LastLogin: time.Now().Add(-96 * time.Hour)}, 210 | } 211 | tx, err := db.BeginTx(ctx, nil) 212 | if err != nil { 213 | db.Close() 214 | return nil, fmt.Errorf("failed to begin transaction: %w", err) 215 | } 216 | stmt, err := tx.PrepareContext(ctx, insertSQL) 217 | if err != nil { 218 | tx.Rollback() 219 | db.Close() 220 | return nil, fmt.Errorf("failed to prepare insert statement: %w", err) 221 | } 222 | defer stmt.Close() 223 | 224 | for _, user := range usersToInsert { 225 | _, err := stmt.ExecContext(ctx, user.ID, user.Email, user.LastLogin.Format(time.RFC3339)) 226 | if err != nil { 227 | tx.Rollback() 228 | db.Close() 229 | return nil, fmt.Errorf("failed to insert user %d: %w", user.ID, err) 230 | } 231 | } 232 | if err := tx.Commit(); err != nil { 233 | db.Close() 234 | return nil, fmt.Errorf("failed to commit transaction: %w", err) 235 | } 236 | 237 | fmt.Println("✅ SQLite database initialized.") 238 | return db, nil 239 | } 240 | 241 | // --- 5. Example Usage (Using fluxus.Map) --- 242 | 243 | func main() { 244 | fmt.Println("🚀 Fluxus Dependency Injection Example (with SQLite & Map Stage)") 245 | fmt.Println("==============================================================") 246 | 247 | ctx := context.Background() 248 | 249 | // --- Setup Real Database --- 250 | db, err := setupDatabase(ctx) 251 | if err != nil { 252 | log.Fatalf("Database setup failed: %v", err) 253 | } 254 | defer db.Close() 255 | defer os.Remove(dbFile) 256 | 257 | // --- Create the Real Repository --- 258 | userRepo := NewSQLiteUserRepository(db) 259 | 260 | // --- Create the Stage to be Mapped --- 261 | // Create the single stage instance that will process each user ID. 262 | // It's instantiated once with its dependency. 263 | processStage := NewUserProcessingStage(userRepo) 264 | 265 | // --- Create the Map Stage --- 266 | // Map takes a slice of inputs ([]int) and applies the processStage 267 | // to each element concurrently. 268 | // Limit concurrency to avoid overwhelming the DB connection pool or CPU. 269 | concurrency := runtime.NumCPU() * 2 // Example concurrency limit 270 | mapStage := fluxus.NewMap(processStage). 271 | WithConcurrency(concurrency). 272 | WithCollectErrors(true) // Collect all errors instead of failing on the first one 273 | 274 | // --- Create and Run Pipeline --- 275 | // The pipeline now takes []int as input and produces []string as output. 276 | pipeline := fluxus.NewPipeline(mapStage) 277 | 278 | // Process a slice of user IDs 279 | userIDs := []int{1, 3, 2, 99, 4, 5} // Includes non-existent IDs 280 | 281 | fmt.Printf("\nProcessing %d user IDs concurrently (limit %d)...\n", len(userIDs), concurrency) 282 | startTime := time.Now() 283 | results, err := pipeline.Process(ctx, userIDs) 284 | duration := time.Since(startTime) 285 | fmt.Printf("Processing finished in %v\n", duration) 286 | 287 | if err != nil { 288 | // WithCollectErrors, 'err' will be a fluxus.MultiError 289 | fmt.Printf("❌ Errors occurred during processing:\n") 290 | if merr, ok := err.(*fluxus.MultiError); ok { 291 | for i, e := range merr.Errors { 292 | if e != nil { // Check if there was an error for this specific index 293 | fmt.Printf(" - Input index %d (ID %d): %v\n", i, userIDs[i], e) 294 | } 295 | } 296 | } else { 297 | fmt.Printf(" - Unexpected error type: %v\n", err) // Should not happen with WithCollectErrors 298 | } 299 | } 300 | 301 | fmt.Println("\n✅ Successful Results:") 302 | if len(results) > 0 { 303 | for i, result := range results { 304 | // Check if the corresponding error was nil before printing the result 305 | isError := false 306 | if merr, ok := err.(*fluxus.MultiError); ok && i < len(merr.Errors) && merr.Errors[i] != nil { 307 | isError = true 308 | } 309 | if !isError { 310 | fmt.Printf(" - Input index %d (ID %d): %s\n", i, userIDs[i], result) 311 | } 312 | } 313 | } else if err == nil { 314 | fmt.Println(" (No results - perhaps all inputs failed?)") 315 | } 316 | 317 | // --- Verify DB State (Optional) --- 318 | fmt.Println("\nVerifying SQLite DB state:") 319 | // ... (verification logic remains the same) ... 320 | rows, err := db.QueryContext(ctx, "SELECT id, email, last_login FROM users ORDER BY id") 321 | if err != nil { 322 | log.Printf("Warning: Failed to query DB for verification: %v", err) 323 | } else { 324 | defer rows.Close() 325 | for rows.Next() { 326 | var user User 327 | var lastLoginStr string 328 | if err := rows.Scan(&user.ID, &user.Email, &lastLoginStr); err != nil { 329 | log.Printf("Warning: Failed to scan row: %v", err) 330 | continue 331 | } 332 | user.LastLogin, _ = time.Parse(time.RFC3339, lastLoginStr) 333 | fmt.Printf(" User %d (%s) - Last Login: %s\n", user.ID, user.Email, user.LastLogin.Format(time.RFC3339)) 334 | } 335 | } 336 | } 337 | -------------------------------------------------------------------------------- /example/db/db_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "errors" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | "github.com/synoptiq/go-fluxus" 13 | ) 14 | 15 | func TestUserProcessingStage(t *testing.T) { 16 | mockRepo := NewMockUserRepository() 17 | stage := NewUserProcessingStage(mockRepo) 18 | ctx := context.Background() 19 | 20 | t.Run("UserExists", func(t *testing.T) { 21 | userID := 1 22 | result, err := stage.Process(ctx, userID) 23 | require.NoError(t, err) 24 | assert.Contains(t, result, "User 1 (alice@example.com)") 25 | 26 | updatedUser, _ := mockRepo.GetUserByID(ctx, userID) 27 | assert.WithinDuration(t, time.Now(), updatedUser.LastLogin, 1*time.Second) 28 | }) 29 | 30 | t.Run("UserNotFound", func(t *testing.T) { 31 | userID := 99 32 | _, err := stage.Process(ctx, userID) 33 | require.Error(t, err) 34 | assert.Contains(t, err.Error(), "user 99 not found") 35 | assert.ErrorIs(t, err, sql.ErrNoRows) // Check underlying error type if needed 36 | }) 37 | 38 | t.Run("GetUserError", func(t *testing.T) { 39 | originalGetUserFunc := mockRepo.GetUserByIDFunc 40 | simulatedErr := errors.New("simulated DB connection error") 41 | mockRepo.GetUserByIDFunc = func(ctx context.Context, id int) (*User, error) { 42 | return nil, simulatedErr 43 | } 44 | defer func() { mockRepo.GetUserByIDFunc = originalGetUserFunc }() 45 | 46 | userID := 1 47 | _, err := stage.Process(ctx, userID) 48 | require.Error(t, err) 49 | assert.ErrorIs(t, err, simulatedErr) 50 | assert.Contains(t, err.Error(), "failed to get user 1") 51 | }) 52 | 53 | t.Run("UpdateUserError", func(t *testing.T) { 54 | originalUpdateFunc := mockRepo.UpdateLastLoginFunc 55 | simulatedErr := errors.New("simulated update error") 56 | mockRepo.UpdateLastLoginFunc = func(ctx context.Context, id int, loginTime time.Time) error { 57 | return simulatedErr 58 | } 59 | defer func() { mockRepo.UpdateLastLoginFunc = originalUpdateFunc }() 60 | 61 | userID := 1 62 | // We still expect success from Process, as the error is only logged 63 | result, err := stage.Process(ctx, userID) 64 | require.NoError(t, err) // Error is logged, not returned by Process 65 | assert.Contains(t, result, "User 1") 66 | // Note: Testing log output requires more setup (capturing log output) 67 | }) 68 | } 69 | 70 | // --- 7. Unit Testing Example (Pipeline with Map Stage) --- 71 | 72 | func TestUserProcessingPipelineWithMap(t *testing.T) { 73 | mockRepo := NewMockUserRepository() 74 | processStage := NewUserProcessingStage(mockRepo) // Stage with mock repo 75 | mapStage := fluxus.NewMap(processStage). 76 | WithConcurrency(2). // Use some concurrency for the test 77 | WithCollectErrors(true) // Important for testing multiple outcomes 78 | 79 | ctx := context.Background() 80 | 81 | t.Run("MixedSuccessAndFailure", func(t *testing.T) { 82 | userIDs := []int{1, 99, 2, 100} // Good, Not Found, Good, Not Found 83 | expectedResults := []string{"User 1 (alice@example.com)", "", "User 2 (bob@example.com)", ""} // Expected results (empty for errors) 84 | 85 | // We call directly mapStage.Process instead of pipeline.Process because we want 86 | // both the errors and the partial results. If we used pipeline.Process, 87 | // it would return only the errors and nil for results. 88 | // This is a common pattern when testing stages that can fail independently. 89 | results, err := mapStage.Process(ctx, userIDs) 90 | 91 | // Check for MultiError 92 | require.Error(t, err, "Expected an error because some inputs failed") 93 | merr, ok := err.(*fluxus.MultiError) 94 | require.True(t, ok, "Expected error to be a MultiError") 95 | 96 | // --- FIX: Assert the number of actual errors --- 97 | require.Len(t, merr.Errors, 2, "MultiError should contain 2 actual errors") 98 | 99 | // --- FIX: Check results slice (which has length matching input) --- 100 | require.Len(t, results, len(userIDs)) 101 | assert.Contains(t, results[0], expectedResults[0], "Result for ID 1 mismatch") 102 | assert.Equal(t, "", results[1], "Result for ID 99 should be empty string on error") 103 | assert.Contains(t, results[2], expectedResults[2], "Result for ID 2 mismatch") 104 | assert.Equal(t, "", results[3], "Result for ID 100 should be empty string on error") 105 | 106 | // --- FIX: Check the content of the collected errors --- 107 | foundErr99 := false 108 | foundErr100 := false 109 | for _, itemErr := range merr.Errors { 110 | // Check if it's a MapItemError 111 | var mapItemErr *fluxus.MapItemError 112 | if errors.As(itemErr, &mapItemErr) { 113 | if mapItemErr.ItemIndex == 1 { // Error for input index 1 (value 99) 114 | assert.ErrorIs(t, mapItemErr.OriginalError, sql.ErrNoRows) 115 | assert.Contains(t, mapItemErr.Error(), "user 99 not found") 116 | foundErr99 = true 117 | } else if mapItemErr.ItemIndex == 3 { // Error for input index 3 (value 100) 118 | assert.ErrorIs(t, mapItemErr.OriginalError, sql.ErrNoRows) 119 | assert.Contains(t, mapItemErr.Error(), "user 100 not found") 120 | foundErr100 = true 121 | } 122 | } 123 | } 124 | assert.True(t, foundErr99, "Did not find expected error for user 99") 125 | assert.True(t, foundErr100, "Did not find expected error for user 100") 126 | }) 127 | 128 | t.Run("SimulatedGetErrorDuringMap", func(t *testing.T) { 129 | userIDs := []int{1, 4} // Process two users 130 | 131 | // Simulate GetUserByID error only for the second user (ID 4) 132 | originalGetUserFunc := mockRepo.GetUserByIDFunc 133 | simulatedErr := errors.New("simulated get error for ID 4") 134 | mockRepo.GetUserByIDFunc = func(ctx context.Context, id int) (*User, error) { 135 | if id == 4 { 136 | return nil, simulatedErr // Fail only for ID 4 137 | } 138 | // Use original for others (make sure it's correctly captured) 139 | // Need to ensure mockRepo is accessible here or capture originalGetUserFunc correctly 140 | // Assuming originalGetUserFunc correctly points to the initial mock function 141 | return originalGetUserFunc(ctx, id) 142 | } 143 | defer func() { mockRepo.GetUserByIDFunc = originalGetUserFunc }() // Restore 144 | 145 | results, err := mapStage.Process(ctx, userIDs) 146 | 147 | require.Error(t, err) 148 | merr, ok := err.(*fluxus.MultiError) 149 | require.True(t, ok) 150 | 151 | // --- FIX: Assert the number of actual errors --- 152 | require.Len(t, merr.Errors, 1, "MultiError should contain 1 actual error") 153 | 154 | // --- FIX: Check results slice --- 155 | require.Len(t, results, 2) 156 | assert.Contains(t, results[0], "User 1", "Result for ID 1 should be present") 157 | assert.Equal(t, "", results[1], "Result for ID 4 should be empty string") 158 | 159 | // --- FIX: Check the single error in merr.Errors --- 160 | singleErr := merr.Errors[0] 161 | 162 | // Check if it's a MapItemError and for the correct index 163 | var mapItemErr *fluxus.MapItemError 164 | require.ErrorAs(t, singleErr, &mapItemErr, "Error should be a MapItemError") 165 | assert.Equal(t, 1, mapItemErr.ItemIndex, "Error should be for item index 1") // Index 1 corresponds to input user ID 4 166 | assert.ErrorIs(t, mapItemErr.OriginalError, simulatedErr, "Original error should be the simulated one") 167 | }) 168 | 169 | t.Run("AllSuccess", func(t *testing.T) { 170 | userIDs := []int{1, 2, 4, 5} // All exist in mock repo 171 | 172 | results, err := mapStage.Process(ctx, userIDs) 173 | 174 | require.NoError(t, err, "Expected no error when all inputs succeed") 175 | require.Len(t, results, len(userIDs)) 176 | assert.Contains(t, results[0], "User 1") 177 | assert.Contains(t, results[1], "User 2") 178 | assert.Contains(t, results[2], "User 4") 179 | assert.Contains(t, results[3], "User 5") 180 | }) 181 | } 182 | -------------------------------------------------------------------------------- /example/fanout/fanout.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log" 8 | "math/rand" 9 | "runtime" 10 | "strings" 11 | "time" 12 | 13 | "github.com/synoptiq/go-fluxus" 14 | ) 15 | 16 | // --- Stages for Demonstration --- 17 | 18 | // stageToUpper converts input string to uppercase. 19 | var stageToUpper = fluxus.StageFunc[string, string](func(_ context.Context, input string) (string, error) { 20 | fmt.Printf(" [ToUpper] Processing '%s'...\n", input) 21 | time.Sleep(50 * time.Millisecond) // Simulate work 22 | return strings.ToUpper(input), nil 23 | }) 24 | 25 | // stageToLower converts input string to lowercase. 26 | var stageToLower = fluxus.StageFunc[string, string](func(_ context.Context, input string) (string, error) { 27 | fmt.Printf(" [ToLower] Processing '%s'...\n", input) 28 | time.Sleep(70 * time.Millisecond) // Simulate work 29 | return strings.ToLower(input), nil 30 | }) 31 | 32 | // stageReverse reverses the input string. 33 | var stageReverse = fluxus.StageFunc[string, string](func(_ context.Context, input string) (string, error) { 34 | fmt.Printf(" [Reverse] Processing '%s'...\n", input) 35 | time.Sleep(60 * time.Millisecond) // Simulate work 36 | runes := []rune(input) 37 | for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 { 38 | runes[i], runes[j] = runes[j], runes[i] 39 | } 40 | return string(runes), nil 41 | }) 42 | 43 | // stageWithError sometimes returns an error. 44 | var stageWithError = fluxus.StageFunc[string, string](func(_ context.Context, input string) (string, error) { 45 | fmt.Printf(" [WithError] Processing '%s'...\n", input) 46 | time.Sleep(40 * time.Millisecond) // Simulate work 47 | if rand.Intn(2) == 0 { // 50% chance of error 48 | return "", errors.New("simulated error from WithError stage") 49 | } 50 | return fmt.Sprintf("NoError: %s", input), nil 51 | }) 52 | 53 | // stageWithDelay simulates a longer running stage. 54 | func stageWithDelay(id int, delay time.Duration) fluxus.Stage[string, string] { 55 | return fluxus.StageFunc[string, string](func(ctx context.Context, input string) (string, error) { 56 | fmt.Printf(" [Delay %d] Processing '%s', sleeping for %v...\n", id, input, delay) 57 | select { 58 | case <-time.After(delay): 59 | fmt.Printf(" [Delay %d] Finished processing '%s'\n", id, input) 60 | return fmt.Sprintf("Delayed-%d: %s", id, input), nil 61 | case <-ctx.Done(): 62 | fmt.Printf(" [Delay %d] Cancelled processing '%s'\n", id, input) 63 | return "", ctx.Err() 64 | } 65 | }) 66 | } 67 | 68 | // --- Demonstration Functions --- 69 | 70 | // runBasicFanOutDemo demonstrates basic fan-out functionality. 71 | func runBasicFanOutDemo() { 72 | fmt.Println("\n🔄 Running Basic FanOut Demo") 73 | fmt.Println("============================") 74 | 75 | // Create a FanOut stage with multiple processing functions 76 | fanOut := fluxus.NewFanOut( 77 | stageToUpper, 78 | stageToLower, 79 | stageReverse, 80 | ) 81 | 82 | input := "Hello Fluxus" 83 | fmt.Printf("Processing input '%s' with %d stages concurrently...\n", input, 3) 84 | 85 | startTime := time.Now() 86 | results, err := fanOut.Process(context.Background(), input) 87 | duration := time.Since(startTime) 88 | 89 | if err != nil { 90 | log.Fatalf("Basic FanOut failed: %v", err) 91 | } 92 | 93 | fmt.Printf("\n📊 Basic FanOut Results (took %v):\n", duration) 94 | for i, result := range results { 95 | fmt.Printf(" Result from stage %d: %s\n", i, result) 96 | } 97 | } 98 | 99 | // runConcurrencyLimitedFanOutDemo demonstrates limiting concurrency. 100 | func runConcurrencyLimitedFanOutDemo() { 101 | fmt.Println("\n🚦 Running Concurrency Limited FanOut Demo") 102 | fmt.Println("==========================================") 103 | 104 | numStages := 8 105 | stages := make([]fluxus.Stage[string, string], numStages) 106 | for i := 0; i < numStages; i++ { 107 | stages[i] = stageWithDelay(i, time.Duration(50+rand.Intn(100))*time.Millisecond) 108 | } 109 | 110 | input := "Concurrency Test" 111 | 112 | // --- Run with unlimited concurrency --- 113 | fmt.Printf("\nProcessing '%s' with %d stages (unlimited concurrency)...\n", input, numStages) 114 | fanOutUnlimited := fluxus.NewFanOut(stages...) 115 | startTimeUnlimited := time.Now() 116 | resultsUnlimited, err := fanOutUnlimited.Process(context.Background(), input) 117 | durationUnlimited := time.Since(startTimeUnlimited) 118 | 119 | if err != nil { 120 | log.Fatalf("Unlimited FanOut failed: %v", err) 121 | } 122 | fmt.Printf("✅ Unlimited concurrency finished in %v. Results: %d\n", durationUnlimited, len(resultsUnlimited)) 123 | 124 | // --- Run with limited concurrency --- 125 | concurrencyLimit := runtime.NumCPU() // Limit to number of CPU cores 126 | if concurrencyLimit > numStages { 127 | concurrencyLimit = numStages 128 | } 129 | if concurrencyLimit < 1 { 130 | concurrencyLimit = 1 131 | } 132 | 133 | fmt.Printf("\nProcessing '%s' with %d stages (concurrency limited to %d)...\n", input, numStages, concurrencyLimit) 134 | fanOutLimited := fluxus.NewFanOut(stages...).WithConcurrency(concurrencyLimit) 135 | startTimeLimited := time.Now() 136 | resultsLimited, err := fanOutLimited.Process(context.Background(), input) 137 | durationLimited := time.Since(startTimeLimited) 138 | 139 | if err != nil { 140 | log.Fatalf("Limited FanOut failed: %v", err) 141 | } 142 | fmt.Printf("✅ Limited concurrency (%d) finished in %v. Results: %d\n", concurrencyLimit, durationLimited, len(resultsLimited)) 143 | 144 | fmt.Printf("\n📊 Concurrency Comparison:\n") 145 | fmt.Printf(" Unlimited: %v\n", durationUnlimited) 146 | fmt.Printf(" Limited (%d): %v\n", concurrencyLimit, durationLimited) 147 | // Note: Limited concurrency might be slower if stages are short and CPU-bound, 148 | // but can be faster or use fewer resources if stages involve I/O or contention. 149 | } 150 | 151 | // runErrorHandlingFanOutDemo demonstrates error handling. 152 | func runErrorHandlingFanOutDemo() { 153 | fmt.Println("\n⚠️ Running Error Handling FanOut Demo") 154 | fmt.Println("=====================================") 155 | 156 | // Create a FanOut stage including one that might error 157 | fanOut := fluxus.NewFanOut( 158 | stageToUpper, 159 | stageWithError, // This one might fail 160 | stageReverse, 161 | ) 162 | 163 | input := "Error Test" 164 | fmt.Printf("Processing input '%s' with stages (one might error)...\n", input) 165 | 166 | // --- Default error handling --- 167 | fmt.Println("\nAttempt 1 (Default Error Handling):") 168 | _, err := fanOut.Process(context.Background(), input) 169 | 170 | if err != nil { 171 | fmt.Printf("❌ Received expected error: %v\n", err) 172 | // Check if the error message indicates which stage failed 173 | if strings.Contains(err.Error(), "fan-out stage 1") { 174 | fmt.Println("✅ Error message correctly identifies the failing stage index.") 175 | } else { 176 | fmt.Println("❌ Error message does not clearly identify the failing stage index.") 177 | } 178 | } else { 179 | fmt.Println("❓ Expected an error but got none (stageWithError might have succeeded). Run again?") 180 | } 181 | 182 | // --- Custom error handling --- 183 | fmt.Println("\nAttempt 2 (Custom Error Handling):") 184 | customFanOut := fanOut.WithErrorHandler(func(err error) error { 185 | // Wrap the original error 186 | return fmt.Errorf("custom fan-out handler: %w", err) 187 | }) 188 | 189 | _, err = customFanOut.Process(context.Background(), input) 190 | 191 | if err != nil { 192 | fmt.Printf("❌ Received expected error: %v\n", err) 193 | // Check if the error is wrapped 194 | if strings.Contains(err.Error(), "custom fan-out handler:") { 195 | fmt.Println("✅ Error was properly wrapped by the custom handler.") 196 | // You can use errors.Unwrap to get the original error 197 | originalErr := errors.Unwrap(err) 198 | fmt.Printf(" Original error: %v\n", originalErr) 199 | } else { 200 | fmt.Println("❌ Error was not wrapped by the custom handler.") 201 | } 202 | } else { 203 | fmt.Println("❓ Expected an error but got none (stageWithError might have succeeded). Run again?") 204 | } 205 | } 206 | 207 | func main() { 208 | // Set seed for reproducible error in stageWithError (optional) 209 | // rand.New(rand.NewSource(1)) // Use a fixed seed for consistent error behavior 210 | rand.New(rand.NewSource(time.Now().UnixNano())) // Use random seed 211 | 212 | fmt.Println("Fluxus FanOut Stage Demonstration") 213 | fmt.Println("=================================") 214 | fmt.Println("This example demonstrates processing a single input") 215 | fmt.Println("through multiple stages concurrently using FanOut.") 216 | 217 | // Run demos 218 | runBasicFanOutDemo() 219 | runConcurrencyLimitedFanOutDemo() 220 | runErrorHandlingFanOutDemo() 221 | 222 | fmt.Println("\nDemo Complete!") 223 | } 224 | -------------------------------------------------------------------------------- /example/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "math/rand" 8 | "strings" 9 | "sync" 10 | "sync/atomic" 11 | "time" 12 | 13 | "github.com/synoptiq/go-fluxus" 14 | ) 15 | 16 | // --- Simple In-Memory Metrics Collector --- 17 | 18 | // InMemoryMetricsCollector implements the fluxus.MetricsCollector interface 19 | // and stores metrics counts in memory using atomic operations. 20 | type InMemoryMetricsCollector struct { 21 | stageStartedCount int64 22 | stageCompletedCount int64 23 | stageErrorCount int64 24 | retryAttemptCount int64 25 | fanOutStartedCount int64 26 | fanOutCompletedCount int64 27 | fanInStartedCount int64 28 | fanInCompletedCount int64 29 | bufferBatchCount int64 30 | 31 | // Store durations (use mutex for slice access) 32 | stageDurations map[string][]time.Duration 33 | mu sync.Mutex 34 | } 35 | 36 | // NewInMemoryMetricsCollector creates a new collector. 37 | func NewInMemoryMetricsCollector() *InMemoryMetricsCollector { 38 | return &InMemoryMetricsCollector{ 39 | stageDurations: make(map[string][]time.Duration), 40 | } 41 | } 42 | 43 | func (m *InMemoryMetricsCollector) StageStarted(ctx context.Context, stageName string) { 44 | atomic.AddInt64(&m.stageStartedCount, 1) 45 | fmt.Printf(" 📊 Metric: Stage '%s' started\n", stageName) 46 | } 47 | 48 | func (m *InMemoryMetricsCollector) StageCompleted(ctx context.Context, stageName string, duration time.Duration) { 49 | atomic.AddInt64(&m.stageCompletedCount, 1) 50 | m.mu.Lock() 51 | m.stageDurations[stageName] = append(m.stageDurations[stageName], duration) 52 | m.mu.Unlock() 53 | fmt.Printf(" 📊 Metric: Stage '%s' completed in %v\n", stageName, duration) 54 | } 55 | 56 | func (m *InMemoryMetricsCollector) StageError(ctx context.Context, stageName string, err error) { 57 | atomic.AddInt64(&m.stageErrorCount, 1) 58 | fmt.Printf(" 📊 Metric: Stage '%s' errored: %v\n", stageName, err) 59 | } 60 | 61 | func (m *InMemoryMetricsCollector) RetryAttempt(ctx context.Context, stageName string, attempt int, err error) { 62 | atomic.AddInt64(&m.retryAttemptCount, 1) 63 | fmt.Printf(" 📊 Metric: Retry attempt #%d for stage '%s' (error: %v)\n", attempt, stageName, err) 64 | } 65 | 66 | func (m *InMemoryMetricsCollector) BufferBatchProcessed(ctx context.Context, batchSize int, duration time.Duration) { 67 | atomic.AddInt64(&m.bufferBatchCount, 1) 68 | fmt.Printf(" 📊 Metric: Buffer batch processed (size %d) in %v\n", batchSize, duration) 69 | } 70 | 71 | func (m *InMemoryMetricsCollector) FanOutStarted(ctx context.Context, numStages int) { 72 | atomic.AddInt64(&m.fanOutStartedCount, 1) 73 | fmt.Printf(" 📊 Metric: FanOut started (%d stages)\n", numStages) 74 | } 75 | 76 | func (m *InMemoryMetricsCollector) FanOutCompleted(ctx context.Context, numStages int, duration time.Duration) { 77 | atomic.AddInt64(&m.fanOutCompletedCount, 1) 78 | fmt.Printf(" 📊 Metric: FanOut completed (%d stages) in %v\n", numStages, duration) 79 | } 80 | 81 | func (m *InMemoryMetricsCollector) FanInStarted(ctx context.Context, numInputs int) { 82 | atomic.AddInt64(&m.fanInStartedCount, 1) 83 | fmt.Printf(" 📊 Metric: FanIn started (%d inputs)\n", numInputs) 84 | } 85 | 86 | func (m *InMemoryMetricsCollector) FanInCompleted(ctx context.Context, numInputs int, duration time.Duration) { 87 | atomic.AddInt64(&m.fanInCompletedCount, 1) 88 | fmt.Printf(" 📊 Metric: FanIn completed (%d inputs) in %v\n", numInputs, duration) 89 | } 90 | 91 | // PrintStats displays the collected metrics. 92 | func (m *InMemoryMetricsCollector) PrintStats() { 93 | fmt.Println("\n📈 Collected Metrics Summary:") 94 | fmt.Println("----------------------------") 95 | fmt.Printf("Stage Started: %d\n", atomic.LoadInt64(&m.stageStartedCount)) 96 | fmt.Printf("Stage Completed: %d\n", atomic.LoadInt64(&m.stageCompletedCount)) 97 | fmt.Printf("Stage Errors: %d\n", atomic.LoadInt64(&m.stageErrorCount)) 98 | fmt.Printf("Retry Attempts: %d\n", atomic.LoadInt64(&m.retryAttemptCount)) 99 | fmt.Printf("FanOut Started: %d\n", atomic.LoadInt64(&m.fanOutStartedCount)) 100 | fmt.Printf("FanOut Completed: %d\n", atomic.LoadInt64(&m.fanOutCompletedCount)) 101 | fmt.Printf("FanIn Started: %d\n", atomic.LoadInt64(&m.fanInStartedCount)) 102 | fmt.Printf("FanIn Completed: %d\n", atomic.LoadInt64(&m.fanInCompletedCount)) 103 | fmt.Printf("Buffer Batches: %d\n", atomic.LoadInt64(&m.bufferBatchCount)) 104 | 105 | m.mu.Lock() 106 | defer m.mu.Unlock() 107 | if len(m.stageDurations) > 0 { 108 | fmt.Println("\nStage Durations:") 109 | for name, durations := range m.stageDurations { 110 | var total time.Duration 111 | for _, d := range durations { 112 | total += d 113 | } 114 | avg := time.Duration(0) 115 | if len(durations) > 0 { 116 | avg = total / time.Duration(len(durations)) 117 | } 118 | fmt.Printf(" - %s: %d calls, Avg: %v, Total: %v\n", name, len(durations), avg, total) 119 | } 120 | } 121 | } 122 | 123 | // --- Pipeline Stages --- 124 | 125 | // Stage 1: Prepare the input string (e.g., trim spaces) 126 | func prepareStageFunc(ctx context.Context, input string) (string, error) { 127 | fmt.Println(" ⚙️ Running Prepare Stage...") 128 | time.Sleep(20 * time.Millisecond) // Simulate work 129 | return strings.TrimSpace(input), nil 130 | } 131 | 132 | // Stage 2a: Uppercase the input string (part of FanOut) 133 | func uppercaseStageFunc(ctx context.Context, input string) (string, error) { 134 | fmt.Println(" ⚙️ Running Uppercase Stage (Parallel)...") 135 | time.Sleep(100 * time.Millisecond) // Simulate work 136 | return strings.ToUpper(input), nil 137 | } 138 | 139 | // Stage 2b: Reverse the string (part of FanOut, simulates potential failure) 140 | var reverseAttemptCount int 141 | 142 | func reverseStageFunc(ctx context.Context, input string) (string, error) { 143 | fmt.Println(" ⚙️ Running Reverse Stage (Parallel)...") 144 | time.Sleep(150 * time.Millisecond) // Simulate work 145 | 146 | reverseAttemptCount++ 147 | // Fail the first time this stage is called in the demo 148 | if reverseAttemptCount <= 1 { 149 | fmt.Println(" ⚠️ Reverse Stage simulating transient failure...") 150 | return "", errors.New("reverse service temporarily unavailable") 151 | } 152 | 153 | runes := []rune(input) 154 | for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 { 155 | runes[i], runes[j] = runes[j], runes[i] 156 | } 157 | fmt.Println(" ✅ Reverse Stage succeeded.") 158 | return string(runes), nil 159 | } 160 | 161 | // Stage 3: Aggregate results from FanOut (FanIn) 162 | func aggregateResultsFunc(results []string) (string, error) { 163 | fmt.Println(" ⚙️ Running Aggregate Stage (FanIn)...") 164 | time.Sleep(40 * time.Millisecond) // Simulate work 165 | if len(results) != 2 { 166 | return "", fmt.Errorf("aggregator expected 2 results, got %d", len(results)) 167 | } 168 | // Combine results: "UPPERCASE | DESREVER" 169 | return fmt.Sprintf("%s | %s", results[0], results[1]), nil 170 | } 171 | 172 | // Stage 4: Add a prefix to the final result 173 | func prefixStageFunc(ctx context.Context, input string) (string, error) { 174 | fmt.Println(" ⚙️ Running Prefix Stage...") 175 | time.Sleep(30 * time.Millisecond) // Simulate work 176 | return "Final: " + input, nil 177 | } 178 | 179 | // --- Main Application --- 180 | 181 | func main() { 182 | rand.New(rand.NewSource(time.Now().UnixNano())) 183 | 184 | fmt.Println("📊 Fluxus Metrics Demonstration (with FanOut/FanIn)") 185 | fmt.Println("==================================================") 186 | fmt.Println("This example shows how to collect metrics from pipeline stages,") 187 | fmt.Println("including FanOut and FanIn operations.") 188 | 189 | // 1. Create our custom metrics collector 190 | collector := NewInMemoryMetricsCollector() 191 | 192 | // 2. Define the pipeline stages 193 | prepareStage := fluxus.StageFunc[string, string](prepareStageFunc) 194 | uppercaseStage := fluxus.StageFunc[string, string](uppercaseStageFunc) 195 | reverseStage := fluxus.StageFunc[string, string](reverseStageFunc) 196 | prefixStage := fluxus.StageFunc[string, string](prefixStageFunc) 197 | 198 | // 3. Wrap stages with metrics collection 199 | fmt.Println("\n🔧 Wrapping stages with metrics...") 200 | 201 | // Wrap initial stage 202 | metricatedPrepare := fluxus.NewMetricatedStage( 203 | prepareStage, 204 | fluxus.WithStageName[string, string]("prepare"), 205 | fluxus.WithMetricsCollector[string, string](collector), 206 | ) 207 | fmt.Println(" - Wrapped 'prepare' stage.") 208 | 209 | // Wrap stages for FanOut 210 | metricatedUppercase := fluxus.NewMetricatedStage( 211 | uppercaseStage, 212 | fluxus.WithStageName[string, string]("uppercase"), 213 | fluxus.WithMetricsCollector[string, string](collector), 214 | ) 215 | fmt.Println(" - Wrapped 'uppercase' stage (for FanOut).") 216 | 217 | // Wrap the reverse stage with Retry first, then metrics for the retry operation 218 | retryReverse := fluxus.NewRetry(reverseStage, 2).WithBackoff(func(attempt int) int { 219 | return 50 * (1 << attempt) // 50ms, 100ms 220 | }) 221 | metricatedRetryReverse := fluxus.NewMetricatedRetry( 222 | retryReverse, 223 | fluxus.WithStageName[string, string]("retry-reverse"), // Name for the retry wrapper 224 | fluxus.WithMetricsCollector[string, string](collector), 225 | ) 226 | fmt.Println(" - Wrapped 'reverse' stage with Retry and Metrics (for FanOut).") 227 | 228 | // Create FanOut stage 229 | fanOut := fluxus.NewFanOut(metricatedUppercase, metricatedRetryReverse) 230 | 231 | // Wrap FanOut with metrics 232 | metricatedFanOut := fluxus.NewMetricatedFanOut( 233 | fanOut, 234 | fluxus.WithStageName[string, []string]("parallel-transform"), 235 | fluxus.WithMetricsCollector[string, []string](collector), 236 | ) 237 | fmt.Println(" - Wrapped FanOut stage ('parallel-transform') with Metrics.") 238 | 239 | // Create FanIn stage 240 | fanIn := fluxus.NewFanIn(aggregateResultsFunc) 241 | 242 | // Wrap FanIn with metrics 243 | metricatedFanIn := fluxus.NewMetricatedFanIn( 244 | fanIn, 245 | fluxus.WithStageName[[]string, string]("aggregate-results"), 246 | fluxus.WithMetricsCollector[[]string, string](collector), 247 | ) 248 | fmt.Println(" - Wrapped FanIn stage ('aggregate-results') with Metrics.") 249 | 250 | // Wrap final stage 251 | metricatedPrefix := fluxus.NewMetricatedStage( 252 | prefixStage, 253 | fluxus.WithStageName[string, string]("prefix"), 254 | fluxus.WithMetricsCollector[string, string](collector), 255 | ) 256 | fmt.Println(" - Wrapped 'prefix' stage.") 257 | 258 | // 4. Chain the metricated stages: Prepare -> FanOut -> FanIn -> Prefix 259 | fmt.Println("\n🔗 Chaining metricated stages...") 260 | chainedStage := fluxus.Chain( 261 | metricatedPrepare, 262 | fluxus.Chain( 263 | metricatedFanOut, 264 | fluxus.Chain( 265 | metricatedFanIn, 266 | metricatedPrefix, 267 | ), 268 | ), 269 | ) 270 | 271 | // 5. Create the pipeline 272 | pipeline := fluxus.NewPipeline(chainedStage) 273 | fmt.Println("✅ Pipeline built.") 274 | 275 | // 6. Process some data 276 | fmt.Println("\n▶️ Processing input ' Hello Metrics World '...") 277 | ctx := context.Background() 278 | startTime := time.Now() 279 | 280 | result, err := pipeline.Process(ctx, " Hello Metrics World ") 281 | 282 | duration := time.Since(startTime) 283 | 284 | if err != nil { 285 | fmt.Printf("\n❌ Pipeline processing failed after %v: %v\n", duration, err) 286 | // Note: The StageError metric would have already been recorded by the failing stage's wrapper. 287 | } else { 288 | fmt.Printf("\n✅ Pipeline processing successful in %v\n", duration) 289 | fmt.Printf(" Result: %s\n", result) 290 | } 291 | 292 | // 7. Display collected metrics 293 | collector.PrintStats() 294 | 295 | fmt.Println("\nDemo Complete!") 296 | } 297 | -------------------------------------------------------------------------------- /example/pipeline/pipeline.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "math/rand" 8 | "regexp" 9 | "strings" 10 | "time" 11 | 12 | "github.com/synoptiq/go-fluxus" 13 | ) 14 | 15 | // Various data types for our pipeline stages 16 | type RawText string 17 | type TokenizedText []string 18 | type FilteredTokens []string 19 | type CountMap map[string]int 20 | type TextStats struct { 21 | WordCount int 22 | UniqueWords int 23 | TopWords []string 24 | LongestWord string 25 | ShortestWord string 26 | AverageLength float64 27 | ProcessingTime time.Duration 28 | } 29 | 30 | // Stage 1: Tokenize text into words 31 | func tokenizeText(ctx context.Context, text RawText) (TokenizedText, error) { 32 | startTime := time.Now() 33 | 34 | // Simulate processing delay 35 | time.Sleep(50 * time.Millisecond) 36 | 37 | // Check for context cancellation 38 | if ctx.Err() != nil { 39 | return nil, ctx.Err() 40 | } 41 | 42 | // Convert to lowercase 43 | lowerText := strings.ToLower(string(text)) 44 | 45 | // Remove punctuation and split into words 46 | re := regexp.MustCompile(`[^\w\s]`) 47 | cleanText := re.ReplaceAllString(lowerText, "") 48 | words := strings.Fields(cleanText) 49 | 50 | fmt.Printf("✓ Tokenized text into %d words (%.2fms)\n", 51 | len(words), float64(time.Since(startTime).Microseconds())/1000) 52 | 53 | return words, nil 54 | } 55 | 56 | // Stage 2: Filter out common stop words 57 | func filterStopWords(ctx context.Context, tokens TokenizedText) (FilteredTokens, error) { 58 | startTime := time.Now() 59 | 60 | // Simulate processing delay 61 | time.Sleep(30 * time.Millisecond) 62 | 63 | // Check for context cancellation 64 | if ctx.Err() != nil { 65 | return nil, ctx.Err() 66 | } 67 | 68 | // Common English stop words 69 | stopWords := map[string]bool{ 70 | "a": true, "an": true, "the": true, "and": true, "or": true, "but": true, 71 | "is": true, "are": true, "was": true, "were": true, "be": true, "been": true, 72 | "to": true, "of": true, "in": true, "on": true, "at": true, "by": true, 73 | "for": true, "with": true, "about": true, "from": true, "as": true, 74 | "this": true, "that": true, "these": true, "those": true, "it": true, 75 | "i": true, "he": true, "she": true, "they": true, "we": true, "you": true, 76 | } 77 | 78 | // Filter out stop words 79 | filtered := make(FilteredTokens, 0, len(tokens)) 80 | for _, word := range tokens { 81 | if !stopWords[word] && len(word) > 0 { 82 | filtered = append(filtered, word) 83 | } 84 | } 85 | 86 | fmt.Printf("✓ Filtered out stop words, %d words remaining (%.2fms)\n", 87 | len(filtered), float64(time.Since(startTime).Microseconds())/1000) 88 | 89 | return filtered, nil 90 | } 91 | 92 | // Stage 3: Count word frequencies 93 | func countWords(ctx context.Context, tokens FilteredTokens) (CountMap, error) { 94 | startTime := time.Now() 95 | 96 | // Simulate processing delay 97 | time.Sleep(70 * time.Millisecond) 98 | 99 | // Check for context cancellation 100 | if ctx.Err() != nil { 101 | return nil, ctx.Err() 102 | } 103 | 104 | // Count word frequencies 105 | wordCounts := make(CountMap) 106 | for _, word := range tokens { 107 | wordCounts[word]++ 108 | } 109 | 110 | fmt.Printf("✓ Counted frequencies of %d unique words (%.2fms)\n", 111 | len(wordCounts), float64(time.Since(startTime).Microseconds())/1000) 112 | 113 | return wordCounts, nil 114 | } 115 | 116 | // Stage 4: Analyze text statistics 117 | func analyzeText(ctx context.Context, counts CountMap) (TextStats, error) { 118 | startTime := time.Now() 119 | 120 | // Simulate processing delay 121 | time.Sleep(100 * time.Millisecond) 122 | 123 | // Check for context cancellation 124 | if ctx.Err() != nil { 125 | return TextStats{}, ctx.Err() 126 | } 127 | 128 | // For this example, randomly simulate an error 10% of the time 129 | if rand.Float32() < 0.1 { 130 | return TextStats{}, errors.New("random analysis error occurred") 131 | } 132 | 133 | // Calculate total words 134 | totalWords := 0 135 | for _, count := range counts { 136 | totalWords += count 137 | } 138 | 139 | // Find longest and shortest words 140 | var longestWord string 141 | shortestWord := "pneumonoultramicroscopicsilicovolcanoconiosis" // A very long word as default 142 | 143 | for word := range counts { 144 | if len(word) > len(longestWord) { 145 | longestWord = word 146 | } 147 | if len(word) < len(shortestWord) { 148 | shortestWord = word 149 | } 150 | } 151 | 152 | // Calculate average word length 153 | totalLength := 0 154 | for word, count := range counts { 155 | totalLength += len(word) * count 156 | } 157 | 158 | averageLength := 0.0 159 | if totalWords > 0 { 160 | averageLength = float64(totalLength) / float64(totalWords) 161 | } 162 | 163 | // Find top words (limited to top 5) 164 | type wordCount struct { 165 | word string 166 | count int 167 | } 168 | 169 | wordFreqs := make([]wordCount, 0, len(counts)) 170 | for word, count := range counts { 171 | wordFreqs = append(wordFreqs, wordCount{word, count}) 172 | } 173 | 174 | // Sort by count (simple bubble sort for demonstration) 175 | for i := 0; i < len(wordFreqs); i++ { 176 | for j := i + 1; j < len(wordFreqs); j++ { 177 | if wordFreqs[j].count > wordFreqs[i].count { 178 | wordFreqs[i], wordFreqs[j] = wordFreqs[j], wordFreqs[i] 179 | } 180 | } 181 | } 182 | 183 | // Get top words (up to 5) 184 | numTop := 5 185 | if len(wordFreqs) < numTop { 186 | numTop = len(wordFreqs) 187 | } 188 | 189 | topWords := make([]string, numTop) 190 | for i := 0; i < numTop; i++ { 191 | topWords[i] = fmt.Sprintf("%s (%d)", wordFreqs[i].word, wordFreqs[i].count) 192 | } 193 | 194 | stats := TextStats{ 195 | WordCount: totalWords, 196 | UniqueWords: len(counts), 197 | TopWords: topWords, 198 | LongestWord: longestWord, 199 | ShortestWord: shortestWord, 200 | AverageLength: averageLength, 201 | ProcessingTime: time.Since(startTime), 202 | } 203 | 204 | fmt.Printf("✓ Analyzed text statistics (%.2fms)\n", 205 | float64(stats.ProcessingTime.Microseconds())/1000) 206 | 207 | return stats, nil 208 | } 209 | 210 | // errorHandler is a custom error handler for the pipeline 211 | func errorHandler(err error) error { 212 | // Log the error with a timestamp 213 | fmt.Printf("❌ [%s] Pipeline error: %v\n", time.Now().Format(time.RFC3339), err) 214 | 215 | // You could implement custom error classification and handling here 216 | // For example, retry on specific errors, ignore others, etc. 217 | 218 | // For this example, we'll just return the error 219 | return err 220 | } 221 | 222 | // displayStats prints the text statistics in a formatted way 223 | func displayStats(text RawText, stats TextStats) { 224 | fmt.Println("\n📊 Text Analysis Results") 225 | fmt.Println("=====================") 226 | 227 | // Print some of the original text (truncated if too long) 228 | preview := string(text) 229 | if len(preview) > 100 { 230 | preview = preview[:100] + "..." 231 | } 232 | fmt.Printf("Text: %s\n\n", preview) 233 | 234 | fmt.Printf("Word count: %d\n", stats.WordCount) 235 | fmt.Printf("Unique words: %d\n", stats.UniqueWords) 236 | fmt.Printf("Average word length: %.2f characters\n", stats.AverageLength) 237 | fmt.Printf("Longest word: \"%s\" (%d characters)\n", stats.LongestWord, len(stats.LongestWord)) 238 | fmt.Printf("Shortest word: \"%s\" (%d characters)\n", stats.ShortestWord, len(stats.ShortestWord)) 239 | 240 | fmt.Println("\nTop words:") 241 | for i, word := range stats.TopWords { 242 | fmt.Printf(" %d. %s\n", i+1, word) 243 | } 244 | 245 | fmt.Printf("\nTotal processing time: %.2f ms\n", 246 | float64(stats.ProcessingTime.Microseconds())/1000) 247 | } 248 | 249 | // buildSimplePipeline constructs a linear pipeline using Chain 250 | func buildSimplePipeline() *fluxus.Pipeline[RawText, TextStats] { 251 | // Create stages 252 | tokenizeStage := fluxus.StageFunc[RawText, TokenizedText](tokenizeText) 253 | filterStage := fluxus.StageFunc[TokenizedText, FilteredTokens](filterStopWords) 254 | countStage := fluxus.StageFunc[FilteredTokens, CountMap](countWords) 255 | analyzeStage := fluxus.StageFunc[CountMap, TextStats](analyzeText) 256 | 257 | // Chain stages together 258 | chainedStage := fluxus.Chain( 259 | tokenizeStage, 260 | fluxus.Chain( 261 | filterStage, 262 | fluxus.Chain( 263 | countStage, 264 | analyzeStage, 265 | ), 266 | ), 267 | ) 268 | 269 | // Create a pipeline with error handling 270 | pipeline := fluxus.NewPipeline(chainedStage).WithErrorHandler(errorHandler) 271 | 272 | return pipeline 273 | } 274 | 275 | // buildResilientPipeline constructs a pipeline with additional resilience features 276 | func buildResilientPipeline() *fluxus.Pipeline[RawText, TextStats] { 277 | // Create basic stages 278 | tokenizeStage := fluxus.StageFunc[RawText, TokenizedText](tokenizeText) 279 | filterStage := fluxus.StageFunc[TokenizedText, FilteredTokens](filterStopWords) 280 | countStage := fluxus.StageFunc[FilteredTokens, CountMap](countWords) 281 | analyzeStage := fluxus.StageFunc[CountMap, TextStats](analyzeText) 282 | 283 | // Add timeout to tokenize stage (tokenization should be quick) 284 | timedTokenizeStage := fluxus.NewTimeout(tokenizeStage, 200*time.Millisecond) 285 | 286 | // Add retry for the analyze stage (which has simulated random failures) 287 | retryAnalyzeStage := fluxus.NewRetry(analyzeStage, 3) 288 | 289 | // Use exponential backoff for retries 290 | retryAnalyzeStage.WithBackoff(func(attempt int) int { 291 | // 100ms, 200ms, 400ms 292 | return 100 * (1 << attempt) 293 | }) 294 | 295 | // Chain stages together 296 | chainedStage := fluxus.Chain( 297 | timedTokenizeStage, 298 | fluxus.Chain( 299 | filterStage, 300 | fluxus.Chain( 301 | countStage, 302 | retryAnalyzeStage, 303 | ), 304 | ), 305 | ) 306 | 307 | // Create a pipeline with error handling 308 | pipeline := fluxus.NewPipeline(chainedStage).WithErrorHandler(errorHandler) 309 | 310 | return pipeline 311 | } 312 | 313 | // runPipelineDemo runs a demonstration of a pipeline 314 | func runPipelineDemo(name string, pipeline *fluxus.Pipeline[RawText, TextStats], text RawText) { 315 | fmt.Printf("\n▶️ Running %s\n", name) 316 | fmt.Printf("===================%s\n", strings.Repeat("=", len(name))) 317 | 318 | // Create a context with cancellation 319 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 320 | defer cancel() 321 | 322 | // Process the text through the pipeline 323 | fmt.Println("Processing text through pipeline stages:") 324 | startTime := time.Now() 325 | 326 | stats, err := pipeline.Process(ctx, text) 327 | 328 | totalTime := time.Since(startTime) 329 | 330 | if err != nil { 331 | fmt.Printf("\n❌ Pipeline failed after %.2f ms: %v\n", 332 | float64(totalTime.Microseconds())/1000, err) 333 | } else { 334 | fmt.Printf("\n✅ Pipeline succeeded in %.2f ms\n", 335 | float64(totalTime.Microseconds())/1000) 336 | 337 | // Display the results 338 | displayStats(text, stats) 339 | } 340 | } 341 | 342 | // sampleTexts provides some examples for processing 343 | func sampleTexts() []RawText { 344 | return []RawText{ 345 | "The quick brown fox jumps over the lazy dog. This pangram contains every letter of the English alphabet.", 346 | 347 | "Four score and seven years ago our fathers brought forth on this continent, a new nation, " + 348 | "conceived in Liberty, and dedicated to the proposition that all men are created equal.", 349 | 350 | "It was the best of times, it was the worst of times, it was the age of wisdom, it was the age " + 351 | "of foolishness, it was the epoch of belief, it was the epoch of incredulity, it was the season " + 352 | "of Light, it was the season of Darkness, it was the spring of hope, it was the winter of despair, " + 353 | "we had everything before us, we had nothing before us, we were all going direct to Heaven, " + 354 | "we were all going direct the other way.", 355 | } 356 | } 357 | 358 | func main() { 359 | // Seed random for consistent results 360 | rand.New(rand.NewSource(time.Now().UnixNano())) 361 | 362 | fmt.Println("Fluxus Chain and Pipeline Composition Demonstration") 363 | fmt.Println("=================================================") 364 | fmt.Println("This example demonstrates how to build pipelines by chaining") 365 | fmt.Println("different stages together, with each stage transforming the data") 366 | fmt.Println("into a new format before passing it to the next stage.") 367 | 368 | // Get sample texts 369 | texts := sampleTexts() 370 | 371 | // Build different pipeline variations 372 | simplePipeline := buildSimplePipeline() 373 | resilientPipeline := buildResilientPipeline() 374 | 375 | // Process the first sample with simple pipeline 376 | fmt.Println("\n📝 Sample Text 1") 377 | runPipelineDemo("Simple Pipeline", simplePipeline, texts[0]) 378 | 379 | // Process the second sample with simple pipeline too 380 | fmt.Println("\n📝 Sample Text 2") 381 | runPipelineDemo("Simple Pipeline", simplePipeline, texts[1]) 382 | 383 | // Process the third sample with resilient pipeline 384 | fmt.Println("\n📝 Sample Text 3 (with Resilient Pipeline)") 385 | runPipelineDemo("Resilient Pipeline", resilientPipeline, texts[2]) 386 | 387 | fmt.Println("\nDemo Complete!") 388 | } 389 | -------------------------------------------------------------------------------- /example/pooling/pooling.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "math/rand" 8 | "os" 9 | "runtime" 10 | "runtime/pprof" 11 | "strings" 12 | "time" 13 | 14 | "github.com/synoptiq/go-fluxus" 15 | ) 16 | 17 | // LargeObject simulates a large object that is expensive to create 18 | type LargeObject struct { 19 | ID int 20 | Name string 21 | Data []byte // Simulates large data 22 | Metadata map[string]string // Maps are expensive to allocate 23 | Timestamp time.Time 24 | Tags []string 25 | Processed bool 26 | } 27 | 28 | // ProcessedResult represents a transformed object 29 | type ProcessedResult struct { 30 | ObjectID int 31 | SizeBytes int 32 | TagCount int 33 | MetaCount int 34 | Status string // Keep status short for demo output 35 | ProcessedAt time.Time 36 | } 37 | 38 | // generateLargeObjects creates test data with larger objects 39 | func generateLargeObjects(count int) []LargeObject { 40 | fmt.Printf("⚙️ Generating %d large test objects...\n", count) 41 | objects := make([]LargeObject, count) 42 | 43 | for i := 0; i < count; i++ { 44 | dataSize := 5*1024 + rand.Intn(5*1024) // 5-10KB 45 | metaSize := 20 + rand.Intn(20) // 20-40 entries 46 | tagCount := 5 + rand.Intn(10) // 5-15 tags 47 | 48 | metadata := make(map[string]string, metaSize) 49 | for j := 0; j < metaSize; j++ { 50 | key := fmt.Sprintf("meta-key-%d", j) 51 | value := fmt.Sprintf("meta-value-%d-%d-%s", i, j, strings.Repeat("x", 10)) // Shorter value 52 | metadata[key] = value 53 | } 54 | 55 | tags := make([]string, tagCount) 56 | for j := 0; j < tagCount; j++ { 57 | tags[j] = fmt.Sprintf("tag-%d-%s", j, strings.Repeat("y", 5)) // Shorter tag 58 | } 59 | 60 | data := make([]byte, dataSize) 61 | // rand.Read(data) // Skip filling for faster generation in demo 62 | 63 | objects[i] = LargeObject{ 64 | ID: i + 1, 65 | Name: fmt.Sprintf("Object-%d", i+1), // Shorter name 66 | Data: data, 67 | Metadata: metadata, 68 | Timestamp: time.Now().Add(-time.Duration(rand.Intn(10000)) * time.Minute), 69 | Tags: tags, 70 | Processed: false, 71 | } 72 | } 73 | fmt.Println("✅ Generation complete.") 74 | return objects 75 | } 76 | 77 | // --- Processing Logic (Common) --- 78 | 79 | // processObject simulates processing an object, potentially using pooled resources 80 | func processObject( 81 | obj LargeObject, 82 | builder *strings.Builder, // Pass builder explicitly 83 | tempBuffer []byte, // Pass buffer explicitly 84 | tempStrings []string, // Pass strings slice explicitly 85 | tempMap1 map[string]int, // Pass maps explicitly 86 | tempMap2 map[string][]byte, 87 | tempMap3 map[string]string, 88 | allKeys []string, 89 | ) (ProcessedResult, error) { 90 | // Reset builder and slices 91 | builder.Reset() 92 | tempStrings = tempStrings[:0] 93 | allKeys = allKeys[:0] 94 | 95 | // Clear maps 96 | for k := range tempMap1 { 97 | delete(tempMap1, k) 98 | } 99 | for k := range tempMap2 { 100 | delete(tempMap2, k) 101 | } 102 | for k := range tempMap3 { 103 | delete(tempMap3, k) 104 | } 105 | 106 | // Simulate processing that requires temporary allocations 107 | for i := 0; i < 5; i++ { // Reduced iterations for demo speed 108 | // Use the provided buffer 109 | workBuffer := tempBuffer 110 | if len(workBuffer) < 1024 { // Ensure minimum size 111 | workBuffer = make([]byte, 1024) 112 | } 113 | 114 | // Do some work with the buffer 115 | for j := 0; j < 100; j++ { // Reduced inner loop 116 | if j < len(obj.Data) { 117 | workBuffer[j] = obj.Data[j] ^ byte(i) 118 | } else { 119 | workBuffer[j] = byte(j % 256) 120 | } 121 | } 122 | 123 | // Use the provided string slice 124 | workStrings := tempStrings 125 | if cap(workStrings) < 5 { // Ensure minimum capacity 126 | workStrings = make([]string, 0, 5) 127 | } 128 | for j := 0; j < 5; j++ { // Reduced inner loop 129 | workStrings = append(workStrings, fmt.Sprintf("Item%d", j)) 130 | } 131 | 132 | fmt.Fprintf(builder, "B%d ", i) // Shorter status 133 | tempStrings = workStrings // Update slice reference if reallocated 134 | } 135 | 136 | // Use the provided maps 137 | for _, tag := range obj.Tags { 138 | tempMap1[tag] = len(tag) 139 | tempMap2[tag] = []byte(tag) 140 | tempMap3[tag] = strings.ToUpper(tag) 141 | } 142 | for key, value := range obj.Metadata { 143 | tempMap1[key] = len(key) + len(value) 144 | tempMap2[key] = []byte(value) 145 | tempMap3[key] = strings.ToUpper(key) + ":" + value 146 | } 147 | 148 | // Use the provided keys slice 149 | workKeys := allKeys 150 | if cap(workKeys) < len(tempMap1) { 151 | workKeys = make([]string, 0, len(tempMap1)) 152 | } 153 | for k := range tempMap1 { 154 | workKeys = append(workKeys, k) 155 | } 156 | // Simplified processing for demo 157 | _, err := fmt.Fprintf(builder, "Keys:%d ", len(workKeys)) 158 | 159 | if err != nil { 160 | return ProcessedResult{}, err 161 | } 162 | 163 | allKeys = workKeys // Update slice reference if reallocated 164 | _ = allKeys 165 | 166 | // Create a processed result 167 | result := ProcessedResult{ 168 | ObjectID: obj.ID, 169 | SizeBytes: len(obj.Data), 170 | TagCount: len(obj.Tags), 171 | MetaCount: len(obj.Metadata), 172 | Status: "Processed", // Simplified status 173 | ProcessedAt: time.Now(), 174 | } 175 | 176 | return result, nil 177 | } 178 | 179 | // --- Non-Pooled Implementation --- 180 | 181 | // processBatchWithoutPooling processes a batch of objects without pooling 182 | func processBatchWithoutPooling(ctx context.Context, batch []LargeObject) ([]ProcessedResult, error) { 183 | batchStart := time.Now() 184 | fmt.Printf("📦 Processing batch of %d records (No Pooling)...\n", len(batch)) 185 | results := make([]ProcessedResult, len(batch)) 186 | 187 | for i, obj := range batch { 188 | // Allocate resources for each object 189 | builder := &strings.Builder{} 190 | tempBuffer := make([]byte, 1024) 191 | tempStrings := make([]string, 0, 5) 192 | tempMap1 := make(map[string]int) 193 | tempMap2 := make(map[string][]byte) 194 | tempMap3 := make(map[string]string) 195 | allKeys := make([]string, 0, 50) 196 | 197 | result, err := processObject(obj, builder, tempBuffer, tempStrings, tempMap1, tempMap2, tempMap3, allKeys) 198 | if err != nil { 199 | return nil, err 200 | } 201 | results[i] = result 202 | } 203 | 204 | fmt.Printf("✅ Batch complete (No Pooling) in %v.\n", time.Since(batchStart)) 205 | return results, nil 206 | } 207 | 208 | // --- Pooled Implementation --- 209 | 210 | // PoolResources holds the pools needed for processing 211 | type PoolResources struct { 212 | builderPool *fluxus.ObjectPool[*strings.Builder] 213 | bufferPool *fluxus.ObjectPool[[]byte] 214 | stringsPool *fluxus.ObjectPool[[]string] 215 | map1Pool *fluxus.ObjectPool[map[string]int] 216 | map2Pool *fluxus.ObjectPool[map[string][]byte] 217 | map3Pool *fluxus.ObjectPool[map[string]string] 218 | keysPool *fluxus.ObjectPool[[]string] 219 | } 220 | 221 | // createPoolResources initializes all necessary pools 222 | func createPoolResources() *PoolResources { 223 | fmt.Println("🔧 Creating object pools...") 224 | resources := &PoolResources{ 225 | builderPool: fluxus.NewObjectPool(func() *strings.Builder { return &strings.Builder{} }), 226 | bufferPool: fluxus.NewObjectPool(func() []byte { return make([]byte, 1024) }), 227 | stringsPool: fluxus.NewObjectPool(func() []string { return make([]string, 0, 10) }), 228 | map1Pool: fluxus.NewObjectPool(func() map[string]int { return make(map[string]int, 50) }), 229 | map2Pool: fluxus.NewObjectPool(func() map[string][]byte { return make(map[string][]byte, 50) }), 230 | map3Pool: fluxus.NewObjectPool(func() map[string]string { return make(map[string]string, 50) }), 231 | keysPool: fluxus.NewObjectPool(func() []string { return make([]string, 0, 50) }), 232 | } 233 | // Pre-warm pools slightly 234 | fluxus.PreWarmPool(resources.builderPool, 10) 235 | fluxus.PreWarmPool(resources.bufferPool, 10) 236 | fmt.Println("🔥 Pools pre-warmed.") 237 | return resources 238 | } 239 | 240 | // processBatchWithPooling processes a batch of objects with pooling 241 | func processBatchWithPooling(pools *PoolResources) func(ctx context.Context, batch []LargeObject) ([]ProcessedResult, error) { 242 | return func(ctx context.Context, batch []LargeObject) ([]ProcessedResult, error) { 243 | batchStart := time.Now() 244 | fmt.Printf("📦 Processing batch of %d records (Pooling)...\n", len(batch)) 245 | results := make([]ProcessedResult, len(batch)) 246 | 247 | for i, obj := range batch { 248 | // Get resources from pools 249 | builder := pools.builderPool.Get() 250 | tempBuffer := pools.bufferPool.Get() 251 | tempStrings := pools.stringsPool.Get() 252 | tempMap1 := pools.map1Pool.Get() 253 | tempMap2 := pools.map2Pool.Get() 254 | tempMap3 := pools.map3Pool.Get() 255 | allKeys := pools.keysPool.Get() 256 | 257 | result, err := processObject(obj, builder, tempBuffer, tempStrings, tempMap1, tempMap2, tempMap3, allKeys) 258 | 259 | // Return resources to pools 260 | pools.builderPool.Put(builder) 261 | pools.bufferPool.Put(tempBuffer) 262 | pools.stringsPool.Put(tempStrings) 263 | pools.map1Pool.Put(tempMap1) 264 | pools.map2Pool.Put(tempMap2) 265 | pools.map3Pool.Put(tempMap3) 266 | pools.keysPool.Put(allKeys) 267 | 268 | if err != nil { 269 | return nil, err 270 | } 271 | results[i] = result 272 | } 273 | fmt.Printf("✅ Batch complete (Pooling) in %v.\n", time.Since(batchStart)) 274 | return results, nil 275 | } 276 | } 277 | 278 | // --- Demonstration Functions --- 279 | 280 | // runProcessingDemo runs the processing with a given buffer stage 281 | func runProcessingDemo(name string, ctx context.Context, buffer fluxus.Stage[[]LargeObject, []ProcessedResult], objects []LargeObject) (time.Duration, int64, uint64) { 282 | fmt.Printf("\n▶️ Running demo: %s\n", name) 283 | // Force GC before measurement 284 | runtime.GC() 285 | var memStatsBefore runtime.MemStats 286 | runtime.ReadMemStats(&memStatsBefore) 287 | gcTotalPauseNsBefore := memStatsBefore.PauseTotalNs 288 | gcStart := memStatsBefore.NumGC 289 | 290 | startTime := time.Now() 291 | _, err := buffer.Process(ctx, objects) 292 | if err != nil { 293 | log.Printf("❌ Processing error in %s: %v", name, err) 294 | } 295 | duration := time.Since(startTime) 296 | 297 | // Force GC after measurement 298 | runtime.GC() 299 | var memStatsAfter runtime.MemStats 300 | runtime.ReadMemStats(&memStatsAfter) 301 | gcCount := memStatsAfter.NumGC - gcStart 302 | gcPauseNs := memStatsAfter.PauseTotalNs - gcTotalPauseNsBefore 303 | allocDelta := memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc 304 | 305 | fmt.Printf("🏁 %s finished in %v.\n", name, duration) 306 | fmt.Printf(" - Allocations: %d MB (%d bytes/object)\n", allocDelta/(1024*1024), allocDelta/uint64(len(objects))) 307 | fmt.Printf(" - GC Runs: %d\n", gcCount) 308 | fmt.Printf(" - GC Pause: %v\n", time.Duration(gcPauseNs)) 309 | 310 | return duration, int64(allocDelta), uint64(gcCount) 311 | } 312 | 313 | func main() { 314 | rand.New(rand.NewSource(time.Now().UnixNano())) 315 | ctx := context.Background() 316 | 317 | fmt.Println("🌀 Fluxus Pooling Demonstration") 318 | fmt.Println("==============================") 319 | fmt.Println("This example compares processing large objects with and without") 320 | fmt.Println("object pooling to demonstrate the impact on performance and memory usage.") 321 | 322 | // Generate test data 323 | objectCount := 500 // Reduced count for faster demo 324 | objects := generateLargeObjects(objectCount) 325 | batchSize := 50 326 | 327 | // --- Run Without Pooling --- 328 | bufferNoPooling := fluxus.NewBuffer(batchSize, processBatchWithoutPooling) 329 | durationNoPool, allocNoPool, gcNoPool := runProcessingDemo("Without Pooling", ctx, bufferNoPooling, objects) 330 | 331 | // --- Run With Pooling --- 332 | poolResources := createPoolResources() 333 | bufferWithPooling := fluxus.NewPooledBuffer(batchSize, processBatchWithPooling(poolResources)) 334 | durationPool, allocPool, gcPool := runProcessingDemo("With Pooling", ctx, bufferWithPooling, objects) 335 | 336 | // --- Comparison --- 337 | fmt.Println("\n📊 Comparison Results") 338 | fmt.Println("--------------------") 339 | fmt.Printf(" | %-15s | %-15s | Improvement\n", "Without Pooling", "With Pooling") 340 | fmt.Println("---------------------|-----------------|-----------------|------------") 341 | fmt.Printf("Duration | %-15v | %-15v | %.2fx\n", 342 | durationNoPool.Round(time.Millisecond), 343 | durationPool.Round(time.Millisecond), 344 | float64(durationNoPool)/float64(durationPool)) 345 | fmt.Printf("Allocations (MB) | %-15d | %-15d | %.2fx less\n", 346 | allocNoPool/(1024*1024), 347 | allocPool/(1024*1024), 348 | float64(allocNoPool)/float64(allocPool)) 349 | fmt.Printf("GC Runs | %-15d | %-15d | %d fewer\n", 350 | gcNoPool, gcPool, gcNoPool-gcPool) 351 | 352 | // --- Profiling Info --- 353 | // Setup CPU profiling 354 | cpuFile, err := os.Create("pooling_cpu.prof") 355 | if err != nil { 356 | log.Printf("⚠️ Could not create CPU profile: %v", err) 357 | } else { 358 | defer cpuFile.Close() 359 | if err := pprof.StartCPUProfile(cpuFile); err != nil { 360 | log.Printf("⚠️ Could not start CPU profile: %v", err) 361 | } else { 362 | // Rerun the pooled version briefly for profiling 363 | fmt.Println("\n⏱️ Running pooled version again for CPU profiling...") 364 | ctxProfile, cancel := context.WithTimeout(ctx, 5*time.Second) 365 | runProcessingDemo("Profiling Run", ctxProfile, bufferWithPooling, objects) 366 | cancel() 367 | pprof.StopCPUProfile() 368 | fmt.Println("✅ CPU profile written to pooling_cpu.prof") 369 | } 370 | } 371 | 372 | // Create memory profile 373 | memFile, err := os.Create("pooling_mem.prof") 374 | if err != nil { 375 | log.Printf("⚠️ Could not create memory profile: %v", err) 376 | } else { 377 | defer memFile.Close() 378 | runtime.GC() // Run GC right before heap dump 379 | if err := pprof.WriteHeapProfile(memFile); err != nil { 380 | log.Printf("⚠️ Could not write memory profile: %v", err) 381 | } else { 382 | fmt.Println("✅ Memory profile written to pooling_mem.prof") 383 | } 384 | } 385 | 386 | fmt.Println("\n💡 To analyze profiles, use: go tool pprof ") 387 | fmt.Println("\nDemo Complete!") 388 | } 389 | -------------------------------------------------------------------------------- /example/ratelimit/ratelimit.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | "time" 8 | 9 | "github.com/synoptiq/go-fluxus" 10 | "golang.org/x/time/rate" 11 | ) 12 | 13 | // APIService represents a service that makes API calls 14 | type APIService struct { 15 | name string 16 | processingTimeMs int 17 | callCount int 18 | mu sync.Mutex 19 | } 20 | 21 | // NewAPIService creates a new API service simulator 22 | func NewAPIService(name string, processingTimeMs int) *APIService { 23 | return &APIService{ 24 | name: name, 25 | processingTimeMs: processingTimeMs, 26 | } 27 | } 28 | 29 | // Call simulates making an API call with some processing time 30 | func (s *APIService) Call(ctx context.Context, request string) (string, error) { 31 | // Increment call count 32 | s.mu.Lock() 33 | s.callCount++ 34 | currentCount := s.callCount 35 | s.mu.Unlock() 36 | 37 | // Simulate processing delay 38 | select { 39 | case <-time.After(time.Duration(s.processingTimeMs) * time.Millisecond): 40 | // Continue after delay 41 | case <-ctx.Done(): 42 | return "", ctx.Err() 43 | } 44 | 45 | // Return successful response 46 | return fmt.Sprintf("[%s] Response #%d for request: %s", s.name, currentCount, request), nil 47 | } 48 | 49 | // RateLimitedAPIClient wraps a service with rate limiting 50 | type RateLimitedAPIClient struct { 51 | service *APIService 52 | rateLimiter *fluxus.RateLimiter[string, string] 53 | limit rate.Limit 54 | burst int 55 | } 56 | 57 | // NewRateLimitedAPIClient creates a new API client with rate limiting 58 | func NewRateLimitedAPIClient(service *APIService, rps float64, burst int) *RateLimitedAPIClient { 59 | // Create a stage for the service 60 | serviceStage := fluxus.StageFunc[string, string](service.Call) 61 | 62 | // Wrap with rate limiter 63 | rateLimiter := fluxus.NewRateLimiter( 64 | serviceStage, 65 | rate.Limit(rps), // Requests per second 66 | burst, // Burst capacity 67 | fluxus.WithLimiterTimeout[string, string](2*time.Second), // Wait up to 2 seconds for a token 68 | ) 69 | 70 | return &RateLimitedAPIClient{ 71 | service: service, 72 | rateLimiter: rateLimiter, 73 | limit: rate.Limit(rps), 74 | burst: burst, 75 | } 76 | } 77 | 78 | // Call makes a rate-limited call to the service 79 | func (c *RateLimitedAPIClient) Call(ctx context.Context, request string) (string, error) { 80 | return c.rateLimiter.Process(ctx, request) 81 | } 82 | 83 | // UpdateLimit updates the rate limit for this client 84 | func (c *RateLimitedAPIClient) UpdateLimit(newRPS float64) { 85 | c.limit = rate.Limit(newRPS) 86 | c.rateLimiter.SetLimit(c.limit) 87 | fmt.Printf("🔄 Rate limit updated to %.1f RPS\n", newRPS) 88 | } 89 | 90 | // UpdateBurst updates the burst limit for this client 91 | func (c *RateLimitedAPIClient) UpdateBurst(newBurst int) { 92 | c.burst = newBurst 93 | c.rateLimiter.SetBurst(newBurst) 94 | fmt.Printf("🔄 Burst limit updated to %d\n", newBurst) 95 | } 96 | 97 | // DisplayStatus shows the current rate limit settings 98 | func (c *RateLimitedAPIClient) DisplayStatus() { 99 | fmt.Printf("📊 Client Status: %.1f RPS, Burst: %d, Service: %s\n", 100 | float64(c.limit), c.burst, c.service.name) 101 | } 102 | 103 | // RunBurstDemo demonstrates a burst of requests 104 | func RunBurstDemo(client *RateLimitedAPIClient, requestCount int) { 105 | fmt.Printf("\n🚀 Starting burst demo with %d requests...\n", requestCount) 106 | 107 | startTime := time.Now() 108 | 109 | // Create a context with timeout 110 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 111 | defer cancel() 112 | 113 | // Send requests sequentially 114 | successCount := 0 115 | failureCount := 0 116 | 117 | for i := 1; i <= requestCount; i++ { 118 | requestID := fmt.Sprintf("burst-req-%d", i) 119 | 120 | // Measure this specific request 121 | requestStart := time.Now() 122 | 123 | result, err := client.Call(ctx, requestID) 124 | 125 | elapsed := time.Since(requestStart) 126 | 127 | if err != nil { 128 | fmt.Printf("❌ Request %2d failed after %7.1fms: %v\n", 129 | i, float64(elapsed.Microseconds())/1000, err) 130 | failureCount++ 131 | } else { 132 | fmt.Printf("✅ Request %2d completed in %7.1fms: %s\n", 133 | i, float64(elapsed.Microseconds())/1000, result) 134 | successCount++ 135 | } 136 | } 137 | 138 | totalTime := time.Since(startTime) 139 | 140 | fmt.Printf("\n📋 Burst Demo Results:\n") 141 | fmt.Printf(" Total time: %.2f seconds\n", totalTime.Seconds()) 142 | fmt.Printf(" Successful requests: %d\n", successCount) 143 | fmt.Printf(" Failed requests: %d\n", failureCount) 144 | fmt.Printf(" Effective rate: %.2f RPS\n", float64(successCount)/totalTime.Seconds()) 145 | } 146 | 147 | // RunConcurrentDemo demonstrates concurrent requests with rate limiting 148 | func RunConcurrentDemo(client *RateLimitedAPIClient, concurrency, requestsPerWorker int) { 149 | totalRequests := concurrency * requestsPerWorker 150 | fmt.Printf("\n🔄 Starting concurrent demo with %d workers, %d requests each (%d total)...\n", 151 | concurrency, requestsPerWorker, totalRequests) 152 | 153 | startTime := time.Now() 154 | 155 | // Create a context with timeout 156 | ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) 157 | defer cancel() 158 | 159 | // Create wait group for workers 160 | var wg sync.WaitGroup 161 | wg.Add(concurrency) 162 | 163 | // Tracking results 164 | var ( 165 | successCount int64 166 | failureCount int64 167 | totalLatencyMs int64 168 | ) 169 | 170 | // Mutex for thread-safe updates 171 | var mu sync.Mutex 172 | 173 | // Launch workers 174 | for w := 1; w <= concurrency; w++ { 175 | go func(workerID int) { 176 | defer wg.Done() 177 | 178 | for i := 1; i <= requestsPerWorker; i++ { 179 | requestID := fmt.Sprintf("worker-%d-req-%d", workerID, i) 180 | 181 | requestStart := time.Now() 182 | _, err := client.Call(ctx, requestID) 183 | elapsed := time.Since(requestStart) 184 | 185 | mu.Lock() 186 | if err != nil { 187 | failureCount++ 188 | fmt.Printf("❌ Worker %2d: Request %2d failed after %7.1fms: %v\n", 189 | workerID, i, float64(elapsed.Microseconds())/1000, err) 190 | } else { 191 | successCount++ 192 | totalLatencyMs += elapsed.Milliseconds() 193 | fmt.Printf("✅ Worker %2d: Request %2d completed in %7.1fms\n", 194 | workerID, i, float64(elapsed.Microseconds())/1000) 195 | } 196 | mu.Unlock() 197 | 198 | // Small sleep to make output more readable 199 | time.Sleep(10 * time.Millisecond) 200 | } 201 | }(w) 202 | } 203 | 204 | // Wait for all workers to finish 205 | wg.Wait() 206 | 207 | totalTime := time.Since(startTime) 208 | 209 | fmt.Printf("\n📋 Concurrent Demo Results:\n") 210 | fmt.Printf(" Total time: %.2f seconds\n", totalTime.Seconds()) 211 | fmt.Printf(" Successful requests: %d\n", successCount) 212 | fmt.Printf(" Failed requests: %d\n", failureCount) 213 | fmt.Printf(" Effective rate: %.2f RPS\n", float64(successCount)/totalTime.Seconds()) 214 | 215 | if successCount > 0 { 216 | fmt.Printf(" Average latency: %.2f ms\n", float64(totalLatencyMs)/float64(successCount)) 217 | } 218 | } 219 | 220 | // RunDynamicRateDemo demonstrates changing rate limits on the fly 221 | func RunDynamicRateDemo(client *RateLimitedAPIClient) { 222 | fmt.Printf("\n🔄 Starting dynamic rate limiting demo...\n") 223 | 224 | // Initial settings 225 | client.DisplayStatus() 226 | 227 | // Run with initial rate 228 | fmt.Println("\n1️⃣ Initial rate limit:") 229 | RunBurstDemo(client, 5) 230 | 231 | // Decrease rate limit 232 | client.UpdateLimit(1.0) // 1 request per second 233 | client.UpdateBurst(1) // No bursting 234 | fmt.Println("\n2️⃣ Decreased rate limit:") 235 | RunBurstDemo(client, 5) 236 | 237 | // Increase rate limit 238 | client.UpdateLimit(10.0) // 10 requests per second 239 | client.UpdateBurst(5) // Burst of 5 240 | fmt.Println("\n3️⃣ Increased rate limit:") 241 | RunBurstDemo(client, 10) 242 | } 243 | 244 | func main() { 245 | fmt.Println("Fluxus Rate Limiter Demonstration") 246 | fmt.Println("=================================") 247 | fmt.Println("This example demonstrates rate limiting to control the flow of requests") 248 | fmt.Println("to a service, preventing it from being overwhelmed while allowing bursts") 249 | fmt.Println("of traffic when capacity is available.") 250 | 251 | // Create an API service with 100ms processing time 252 | service := NewAPIService("ExampleAPI", 100) 253 | 254 | // Create a rate-limited client 255 | // Start with 3 RPS and burst capacity of 2 256 | client := NewRateLimitedAPIClient(service, 3.0, 2) 257 | 258 | // Part 1: Simple burst demo 259 | RunBurstDemo(client, 8) 260 | 261 | // Part 2: Concurrent requests demo 262 | RunConcurrentDemo(client, 3, 4) 263 | 264 | // Part 3: Dynamic rate limiting demo 265 | RunDynamicRateDemo(client) 266 | 267 | fmt.Println("\nDemo Complete!") 268 | } 269 | -------------------------------------------------------------------------------- /example/retry/retry.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log" 8 | "math/rand" 9 | "os" 10 | "time" 11 | 12 | "github.com/synoptiq/go-fluxus" 13 | ) 14 | 15 | // TransientError represents a temporary failure that can be retried 16 | type TransientError struct { 17 | message string 18 | } 19 | 20 | func (e TransientError) Error() string { 21 | return fmt.Sprintf("transient error: %s", e.message) 22 | } 23 | 24 | // PermanentError represents a permanent failure that should not be retried 25 | type PermanentError struct { 26 | message string 27 | } 28 | 29 | func (e PermanentError) Error() string { 30 | return fmt.Sprintf("permanent error: %s", e.message) 31 | } 32 | 33 | // FlakyService simulates a service that fails intermittently 34 | type FlakyService struct { 35 | name string 36 | transientErrorRate float64 37 | permanentErrorRate float64 38 | successfulAttempts int 39 | failedAttempts int 40 | recoveringThreshold int // After this many calls, the service starts behaving better 41 | } 42 | 43 | // NewFlakyService creates a new flaky service simulator 44 | func NewFlakyService(name string, transientRate, permanentRate float64, recoveringThreshold int) *FlakyService { 45 | return &FlakyService{ 46 | name: name, 47 | transientErrorRate: transientRate, 48 | permanentErrorRate: permanentRate, 49 | recoveringThreshold: recoveringThreshold, 50 | } 51 | } 52 | 53 | // Call simulates calling the flaky service 54 | func (s *FlakyService) Call(ctx context.Context, request string) (string, error) { 55 | // Check for context cancellation 56 | select { 57 | case <-ctx.Done(): 58 | return "", ctx.Err() 59 | default: 60 | // Continue with the call 61 | } 62 | 63 | // Simulate some processing time 64 | processingTime := 100 + rand.Intn(200) 65 | time.Sleep(time.Duration(processingTime) * time.Millisecond) 66 | 67 | // Determine if the service should start recovering 68 | totalAttempts := s.successfulAttempts + s.failedAttempts 69 | if totalAttempts >= s.recoveringThreshold { 70 | // Reduce error rates once we've had enough attempts 71 | s.transientErrorRate /= 2 72 | s.permanentErrorRate /= 2 73 | } 74 | 75 | // Determine outcome 76 | r := rand.Float64() 77 | 78 | if r < s.permanentErrorRate { 79 | // Permanent failure 80 | s.failedAttempts++ 81 | errorMsg := fmt.Sprintf("%s failed with permanent error (attempt #%d for %s)", 82 | s.name, totalAttempts+1, request) 83 | return "", PermanentError{message: errorMsg} 84 | } else if r < s.permanentErrorRate+s.transientErrorRate { 85 | // Transient failure 86 | s.failedAttempts++ 87 | errorMsg := fmt.Sprintf("%s failed with transient error (attempt #%d for %s)", 88 | s.name, totalAttempts+1, request) 89 | return "", TransientError{message: errorMsg} 90 | } 91 | 92 | // Success 93 | s.successfulAttempts++ 94 | return fmt.Sprintf("%s successfully processed %s (attempt #%d)", 95 | s.name, request, totalAttempts+1), nil 96 | } 97 | 98 | // RetryingServiceClient wraps a service with retry capabilities 99 | type RetryingServiceClient struct { 100 | service *FlakyService 101 | retry *fluxus.Retry[string, string] 102 | } 103 | 104 | // NewRetryingServiceClient creates a new service client with retry logic 105 | func NewRetryingServiceClient(service *FlakyService, maxAttempts int) *RetryingServiceClient { 106 | // Create a stage that calls the service 107 | serviceStage := fluxus.StageFunc[string, string](service.Call) 108 | 109 | // Wrap with retry logic 110 | retry := fluxus.NewRetry(serviceStage, maxAttempts) 111 | 112 | // Only retry on transient errors 113 | retry.WithShouldRetry(func(err error) bool { 114 | var transientErr TransientError 115 | return errors.As(err, &transientErr) 116 | }) 117 | 118 | // Use exponential backoff 119 | retry.WithBackoff(func(attempt int) int { 120 | // Base delay is 100ms, doubles each attempt with some jitter 121 | baseDelay := 100 122 | maxJitter := baseDelay / 2 123 | delay := baseDelay * (1 << attempt) // 100, 200, 400, 800, ... 124 | jitter := rand.Intn(maxJitter) 125 | 126 | return delay + jitter 127 | }) 128 | 129 | return &RetryingServiceClient{ 130 | service: service, 131 | retry: retry, 132 | } 133 | } 134 | 135 | // Call sends a request to the service with retries 136 | func (c *RetryingServiceClient) Call(ctx context.Context, request string) (string, error) { 137 | return c.retry.Process(ctx, request) 138 | } 139 | 140 | // runDemo demonstrates the retry mechanism with a series of requests 141 | func runDemo(service *FlakyService, client *RetryingServiceClient, numRequests int) { 142 | fmt.Printf("\nRunning %d requests with retries to %s\n", numRequests, service.name) 143 | fmt.Printf("----------------------------------------------\n") 144 | 145 | successes := 0 146 | transientFailures := 0 147 | permanentFailures := 0 148 | 149 | for i := 1; i <= numRequests; i++ { 150 | requestID := fmt.Sprintf("request-%d", i) 151 | 152 | // Create context with timeout 153 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 154 | 155 | // Call service with retries 156 | fmt.Printf("\nSending %s... ", requestID) 157 | result, err := client.Call(ctx, requestID) 158 | 159 | if err != nil { 160 | var transientErr TransientError 161 | var permanentErr PermanentError 162 | 163 | if errors.As(err, &transientErr) { 164 | transientFailures++ 165 | fmt.Printf("FAILED after retries (transient): %v\n", err) 166 | } else if errors.As(err, &permanentErr) { 167 | permanentFailures++ 168 | fmt.Printf("❌ FAILED permanently: %v\n", err) 169 | } else if errors.Is(err, context.DeadlineExceeded) { 170 | fmt.Printf("⌛ TIMEOUT: %v\n", err) 171 | permanentFailures++ 172 | } else { 173 | fmt.Printf("❌ ERROR: %v\n", err) 174 | permanentFailures++ 175 | } 176 | } else { 177 | successes++ 178 | fmt.Printf("✅ SUCCESS: %s\n", result) 179 | } 180 | 181 | cancel() // Clean up the context 182 | 183 | // Slight pause between requests 184 | time.Sleep(200 * time.Millisecond) 185 | } 186 | 187 | fmt.Printf("\nResults:\n") 188 | fmt.Printf(" Total requests: %d\n", numRequests) 189 | fmt.Printf(" Successes: %d (%.1f%%)\n", successes, float64(successes)/float64(numRequests)*100) 190 | fmt.Printf(" Permanent failures: %d (%.1f%%)\n", permanentFailures, float64(permanentFailures)/float64(numRequests)*100) 191 | fmt.Printf(" Transient failures: %d (%.1f%%)\n", transientFailures, float64(transientFailures)/float64(numRequests)*100) 192 | fmt.Printf(" Service stats: %d successful attempts, %d failed attempts\n", 193 | service.successfulAttempts, service.failedAttempts) 194 | } 195 | 196 | func main() { 197 | // Set random seed for deterministic results 198 | rand.New(rand.NewSource(time.Now().UnixNano())) 199 | 200 | // Configure logging 201 | log.SetOutput(os.Stdout) 202 | log.SetFlags(log.Ltime | log.Lmicroseconds) 203 | 204 | fmt.Println("Fluxus Retry Mechanism Demonstration") 205 | fmt.Println("====================================") 206 | fmt.Println("This example demonstrates the retry mechanism with exponential backoff") 207 | fmt.Println("and intelligent retry policies. The simulated service begins with high") 208 | fmt.Println("failure rates but improves over time as more requests are processed.") 209 | fmt.Println("") 210 | fmt.Println("- Green checkmarks indicate success") 211 | fmt.Println("- Yellow warnings indicate transient errors (retried)") 212 | fmt.Println("- Red X's indicate permanent errors (not retried)") 213 | 214 | // Create a flaky service 215 | // Initially 50% transient errors, 20% permanent errors, improves after 10 requests 216 | service := NewFlakyService("ExampleAPI", 0.5, 0.2, 10) 217 | 218 | // Create a client with retry logic 219 | client := NewRetryingServiceClient(service, 5) // Max 5 attempts 220 | 221 | // Run the demo with 20 requests 222 | runDemo(service, client, 20) 223 | 224 | fmt.Println("\nDemo Complete!") 225 | } 226 | -------------------------------------------------------------------------------- /example/timeout/timeout.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "math/rand" 8 | "time" 9 | 10 | "github.com/synoptiq/go-fluxus" 11 | ) 12 | 13 | // SlowService represents a service with unpredictable response times 14 | type SlowService struct { 15 | name string 16 | minLatencyMs int 17 | maxLatencyMs int 18 | failureRate float64 19 | callCount int 20 | timeoutCount int 21 | successCount int 22 | naturalFailures int 23 | } 24 | 25 | // NewSlowService creates a new slow service simulator 26 | func NewSlowService(name string, minLatencyMs, maxLatencyMs int, failureRate float64) *SlowService { 27 | return &SlowService{ 28 | name: name, 29 | minLatencyMs: minLatencyMs, 30 | maxLatencyMs: maxLatencyMs, 31 | failureRate: failureRate, 32 | } 33 | } 34 | 35 | // Call simulates a service call with variable response time 36 | func (s *SlowService) Call(ctx context.Context, request string) (string, error) { 37 | s.callCount++ 38 | 39 | // First check if this call will fail naturally 40 | if rand.Float64() < s.failureRate { 41 | s.naturalFailures++ 42 | return "", fmt.Errorf("%s failed to process request: %s (natural failure)", s.name, request) 43 | } 44 | 45 | // Determine processing time 46 | processingTime := s.minLatencyMs + rand.Intn(s.maxLatencyMs-s.minLatencyMs+1) 47 | 48 | // Create a timer for the expected processing time 49 | timer := time.NewTimer(time.Duration(processingTime) * time.Millisecond) 50 | defer timer.Stop() 51 | 52 | // Simulate processing 53 | select { 54 | case <-timer.C: 55 | // Processing completed successfully 56 | s.successCount++ 57 | return fmt.Sprintf("[%s] Successfully processed %s in %d ms", 58 | s.name, request, processingTime), nil 59 | 60 | case <-ctx.Done(): 61 | // Context was cancelled (likely timeout) 62 | s.timeoutCount++ 63 | return "", ctx.Err() 64 | } 65 | } 66 | 67 | // GetStats returns service statistics 68 | func (s *SlowService) GetStats() map[string]int { 69 | return map[string]int{ 70 | "calls": s.callCount, 71 | "successes": s.successCount, 72 | "timeouts": s.timeoutCount, 73 | "natural_failures": s.naturalFailures, 74 | } 75 | } 76 | 77 | // TimeoutProtectedClient wraps a service with timeout protection 78 | type TimeoutProtectedClient struct { 79 | service *SlowService 80 | timeout *fluxus.Timeout[string, string] 81 | } 82 | 83 | // NewTimeoutProtectedClient creates a new client with timeout protection 84 | func NewTimeoutProtectedClient(service *SlowService, timeoutDuration time.Duration) *TimeoutProtectedClient { 85 | // Create a stage for the service 86 | serviceStage := fluxus.StageFunc[string, string](service.Call) 87 | 88 | // Wrap with timeout 89 | timeoutStage := fluxus.NewTimeout(serviceStage, timeoutDuration) 90 | 91 | return &TimeoutProtectedClient{ 92 | service: service, 93 | timeout: timeoutStage, 94 | } 95 | } 96 | 97 | // Call makes a call to the service with timeout protection 98 | func (c *TimeoutProtectedClient) Call(ctx context.Context, request string) (string, error) { 99 | return c.timeout.Process(ctx, request) 100 | } 101 | 102 | // RunComparisonDemo demonstrates the effect of different timeout settings 103 | func RunComparisonDemo(service *SlowService, requestCount int, timeouts []time.Duration) { 104 | fmt.Printf("\n🕒 Running timeout comparison for %d requests\n", requestCount) 105 | fmt.Printf("Service: %s (latency %d-%d ms, failure rate %.1f%%)\n\n", 106 | service.name, service.minLatencyMs, service.maxLatencyMs, service.failureRate*100) 107 | 108 | // Create base context 109 | baseCtx := context.Background() 110 | 111 | // Run tests with different timeouts 112 | for _, timeout := range timeouts { 113 | fmt.Printf("Testing with timeout: %v\n", timeout) 114 | 115 | // Create a client with this timeout 116 | client := NewTimeoutProtectedClient(service, timeout) 117 | 118 | // Track results 119 | successCount := 0 120 | timeoutCount := 0 121 | naturalFailureCount := 0 122 | totalSuccessLatency := 0 123 | 124 | // Process requests 125 | for i := 1; i <= requestCount; i++ { 126 | requestID := fmt.Sprintf("req-%d", i) 127 | 128 | startTime := time.Now() 129 | result, err := client.Call(baseCtx, requestID) 130 | duration := time.Since(startTime) 131 | 132 | if err != nil { 133 | if errors.Is(err, context.DeadlineExceeded) { 134 | timeoutCount++ 135 | fmt.Printf(" ⏱️ Request %2d timed out after %7.1f ms\n", 136 | i, float64(duration.Microseconds())/1000) 137 | } else { 138 | naturalFailureCount++ 139 | fmt.Printf(" ❌ Request %2d failed after %7.1f ms: %v\n", 140 | i, float64(duration.Microseconds())/1000, err) 141 | } 142 | } else { 143 | successCount++ 144 | totalSuccessLatency += int(duration.Milliseconds()) 145 | fmt.Printf(" ✅ Request %2d completed in %7.1f ms: %s\n", 146 | i, float64(duration.Microseconds())/1000, result) 147 | } 148 | } 149 | 150 | // Print summary for this timeout 151 | fmt.Printf("\n Summary (timeout=%v):\n", timeout) 152 | fmt.Printf(" Successful: %d (%.1f%%)\n", 153 | successCount, float64(successCount)*100/float64(requestCount)) 154 | fmt.Printf(" Timeouts: %d (%.1f%%)\n", 155 | timeoutCount, float64(timeoutCount)*100/float64(requestCount)) 156 | fmt.Printf(" Natural failures: %d (%.1f%%)\n", 157 | naturalFailureCount, float64(naturalFailureCount)*100/float64(requestCount)) 158 | 159 | if successCount > 0 { 160 | fmt.Printf(" Avg. latency for successes: %.1f ms\n", 161 | float64(totalSuccessLatency)/float64(successCount)) 162 | } 163 | 164 | fmt.Println() 165 | } 166 | 167 | // Verify service stats 168 | stats := service.GetStats() 169 | fmt.Printf("Service stats after all tests:\n") 170 | fmt.Printf(" Total calls: %d\n", stats["calls"]) 171 | fmt.Printf(" Successful responses: %d\n", stats["successes"]) 172 | fmt.Printf(" Timeouts: %d\n", stats["timeouts"]) 173 | fmt.Printf(" Natural failures: %d\n", stats["natural_failures"]) 174 | } 175 | 176 | // RunChainedTimeoutsDemo demonstrates timeouts in a chained pipeline 177 | func RunChainedTimeoutsDemo() { 178 | fmt.Println("\n🔄 Running chained timeouts demo") 179 | 180 | // Create multiple services with different characteristics 181 | authService := NewSlowService("AuthService", 50, 150, 0.05) 182 | databaseService := NewSlowService("DatabaseService", 100, 400, 0.1) 183 | processingService := NewSlowService("ProcessingService", 200, 700, 0.05) 184 | 185 | // Create timeout stages for each service 186 | authStage := fluxus.NewTimeout( 187 | fluxus.StageFunc[string, string](authService.Call), 188 | 200*time.Millisecond, 189 | ) 190 | 191 | dbStage := fluxus.NewTimeout( 192 | fluxus.StageFunc[string, string](databaseService.Call), 193 | 500*time.Millisecond, 194 | ) 195 | 196 | processingStage := fluxus.NewTimeout( 197 | fluxus.StageFunc[string, string](processingService.Call), 198 | 800*time.Millisecond, 199 | ) 200 | 201 | // Chain stages together with data transformations 202 | stage1 := fluxus.StageFunc[string, string](func(ctx context.Context, input string) (string, error) { 203 | // Authentication stage 204 | authResult, err := authStage.Process(ctx, fmt.Sprintf("auth:%s", input)) 205 | if err != nil { 206 | return "", fmt.Errorf("auth failed: %w", err) 207 | } 208 | return authResult, nil 209 | }) 210 | 211 | stage2 := fluxus.StageFunc[string, string](func(ctx context.Context, input string) (string, error) { 212 | // Database lookup stage 213 | dbResult, err := dbStage.Process(ctx, fmt.Sprintf("db:%s", input)) 214 | if err != nil { 215 | return "", fmt.Errorf("database lookup failed: %w", err) 216 | } 217 | return dbResult, nil 218 | }) 219 | 220 | stage3 := fluxus.StageFunc[string, string](func(ctx context.Context, input string) (string, error) { 221 | // Processing stage 222 | processResult, err := processingStage.Process(ctx, fmt.Sprintf("process:%s", input)) 223 | if err != nil { 224 | return "", fmt.Errorf("processing failed: %w", err) 225 | } 226 | return processResult, nil 227 | }) 228 | 229 | // Chain all stages 230 | chainedStage := fluxus.Chain( 231 | stage1, 232 | fluxus.Chain(stage2, stage3), 233 | ) 234 | 235 | // Create a pipeline with global timeout 236 | pipeline := fluxus.NewPipeline( 237 | fluxus.NewTimeout(chainedStage, 2*time.Second), 238 | ) 239 | 240 | // Process some requests 241 | for i := 1; i <= 5; i++ { 242 | fmt.Printf("\nRequest %d:\n", i) 243 | 244 | startTime := time.Now() 245 | requestID := fmt.Sprintf("request-%d", i) 246 | 247 | ctx := context.Background() 248 | result, err := pipeline.Process(ctx, requestID) 249 | 250 | duration := time.Since(startTime) 251 | 252 | if err != nil { 253 | fmt.Printf("❌ Pipeline failed after %.1f ms: %v\n", 254 | float64(duration.Microseconds())/1000, err) 255 | } else { 256 | fmt.Printf("✅ Pipeline completed in %.1f ms: %s\n", 257 | float64(duration.Microseconds())/1000, result) 258 | } 259 | } 260 | 261 | // Print service stats 262 | fmt.Println("\nService statistics:") 263 | fmt.Printf(" Auth Service: %d calls, %d successes, %d timeouts, %d natural failures\n", 264 | authService.callCount, authService.successCount, 265 | authService.timeoutCount, authService.naturalFailures) 266 | 267 | fmt.Printf(" Database Service: %d calls, %d successes, %d timeouts, %d natural failures\n", 268 | databaseService.callCount, databaseService.successCount, 269 | databaseService.timeoutCount, databaseService.naturalFailures) 270 | 271 | fmt.Printf(" Processing Service: %d calls, %d successes, %d timeouts, %d natural failures\n", 272 | processingService.callCount, processingService.successCount, 273 | processingService.timeoutCount, processingService.naturalFailures) 274 | } 275 | 276 | func main() { 277 | // Set random seed 278 | rand.New(rand.NewSource(time.Now().UnixNano())) 279 | 280 | fmt.Println("Fluxus Timeout Mechanism Demonstration") 281 | fmt.Println("======================================") 282 | fmt.Println("This example demonstrates timeout handling to protect against") 283 | fmt.Println("slow or unresponsive services, allowing graceful failure when") 284 | fmt.Println("services don't respond within expected timeframes.") 285 | 286 | // Create a service with variable latency 287 | // Latency between 200-800ms with 5% natural failure rate 288 | service := NewSlowService("VariableLatencyAPI", 200, 800, 0.05) 289 | 290 | // Test different timeout settings 291 | timeouts := []time.Duration{ 292 | 300 * time.Millisecond, // Very aggressive 293 | 500 * time.Millisecond, // Moderate 294 | 1000 * time.Millisecond, // Lenient 295 | } 296 | 297 | // Run comparison demo 298 | RunComparisonDemo(service, 10, timeouts) 299 | 300 | // Run chained timeouts demo 301 | RunChainedTimeoutsDemo() 302 | 303 | fmt.Println("\nDemo Complete!") 304 | } 305 | -------------------------------------------------------------------------------- /example/tracing/tracing.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "math/rand" 8 | "os" 9 | "time" 10 | 11 | "github.com/synoptiq/go-fluxus" 12 | 13 | "go.opentelemetry.io/otel" 14 | "go.opentelemetry.io/otel/attribute" 15 | "go.opentelemetry.io/otel/codes" 16 | "go.opentelemetry.io/otel/exporters/otlp/otlptrace" 17 | "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" 18 | "go.opentelemetry.io/otel/sdk/resource" 19 | tracesdk "go.opentelemetry.io/otel/sdk/trace" 20 | semconv "go.opentelemetry.io/otel/semconv/v1.17.0" 21 | ) 22 | 23 | // initTracer creates and registers a new trace provider with OTLP exporter 24 | func initTracer() (*tracesdk.TracerProvider, error) { 25 | fmt.Println("🔧 Initializing OpenTelemetry Tracer...") 26 | otlpEndpoint := os.Getenv("OTLP_ENDPOINT") 27 | if otlpEndpoint == "" { 28 | otlpEndpoint = "localhost:4317" // Default OTLP gRPC endpoint 29 | } 30 | fmt.Printf(" Using OTLP endpoint: %s\n", otlpEndpoint) 31 | 32 | ctx := context.Background() 33 | traceExporter, err := otlptrace.New( 34 | ctx, 35 | otlptracegrpc.NewClient( 36 | otlptracegrpc.WithEndpoint(otlpEndpoint), 37 | otlptracegrpc.WithInsecure(), // Use insecure for local demo 38 | ), 39 | ) 40 | if err != nil { 41 | return nil, fmt.Errorf("❌ failed to create trace exporter: %w", err) 42 | } 43 | 44 | tp := tracesdk.NewTracerProvider( 45 | tracesdk.WithBatcher(traceExporter), 46 | tracesdk.WithSampler(tracesdk.AlwaysSample()), // Sample all traces for demo 47 | tracesdk.WithResource(resource.NewWithAttributes( 48 | semconv.SchemaURL, 49 | semconv.ServiceNameKey.String("fluxus-tracing-example"), 50 | attribute.String("environment", "demo"), 51 | )), 52 | ) 53 | 54 | otel.SetTracerProvider(tp) 55 | fmt.Println("✅ Tracer initialized and registered globally.") 56 | return tp, nil 57 | } 58 | 59 | // simulateAPICall simulates an API call with some random delay and potential failure 60 | func simulateAPICall(ctx context.Context, apiName string) (map[string]interface{}, error) { 61 | // Create a span for the API call (will be child of pipeline stage span) 62 | tracer := otel.Tracer("api-client-simulator") 63 | _, span := tracer.Start(ctx, fmt.Sprintf("API Call: %s", apiName)) 64 | defer span.End() 65 | 66 | fmt.Printf(" 📞 Calling API: %s...\n", apiName) 67 | delay := 50 + rand.Intn(150) // Reduced delay for faster demo 68 | time.Sleep(time.Duration(delay) * time.Millisecond) 69 | 70 | span.SetAttributes( 71 | attribute.String("api.name", apiName), 72 | attribute.Int("api.latency_ms", delay), 73 | ) 74 | 75 | if rand.Float32() < 0.1 { // 10% failure rate 76 | err := fmt.Errorf("API %s failed simulation", apiName) 77 | span.RecordError(err) 78 | span.SetStatus(codes.Error, err.Error()) 79 | fmt.Printf(" ❌ API %s failed!\n", apiName) 80 | return nil, err 81 | } 82 | 83 | result := map[string]interface{}{ 84 | "api": apiName, 85 | "success": true, 86 | "latency": delay, 87 | "data": fmt.Sprintf("Sample data from %s", apiName), 88 | } 89 | span.SetStatus(codes.Ok, "Success") 90 | fmt.Printf(" ✅ API %s succeeded (%d ms).\n", apiName, delay) 91 | return result, nil 92 | } 93 | 94 | // --- API Simulation Functions --- 95 | // CORRECTED: Changed signature to accept Request struct 96 | func fetchUserData(ctx context.Context, _ Request) (map[string]interface{}, error) { 97 | // In a real scenario, you would use req.UserID here 98 | return simulateAPICall(ctx, "user-service") 99 | } 100 | 101 | // CORRECTED: Changed signature to accept Request struct 102 | func fetchProductData(ctx context.Context, _ Request) (map[string]interface{}, error) { 103 | // In a real scenario, you would use req.ProductID here 104 | return simulateAPICall(ctx, "product-service") 105 | } 106 | 107 | func fetchPricingData(ctx context.Context, _ string) (map[string]interface{}, error) { 108 | return simulateAPICall(ctx, "pricing-service") 109 | } 110 | func fetchRecommendations(ctx context.Context, _ string) (map[string]interface{}, error) { 111 | return simulateAPICall(ctx, "recommendation-service") 112 | } 113 | 114 | // Request represents the input to the pipeline 115 | type Request struct { 116 | UserID string 117 | ProductID string 118 | } 119 | 120 | // Response represents the output of the pipeline 121 | type Response struct { 122 | UserData map[string]interface{} 123 | ProductData map[string]interface{} 124 | PricingData map[string]interface{} 125 | Recommendations map[string]interface{} 126 | ProcessingTimeMs int64 127 | } 128 | 129 | // buildTracedPipeline constructs the example pipeline with tracing enabled 130 | func buildTracedPipeline() *fluxus.Pipeline[Request, Response] { 131 | fmt.Println("🛠️ Building traced pipeline...") 132 | 133 | // Stage 1: Fetch user and product data in parallel 134 | // CORRECTED: These conversions now work because the function signatures match 135 | userDataStage := fluxus.StageFunc[Request, map[string]interface{}](fetchUserData) 136 | productDataStage := fluxus.StageFunc[Request, map[string]interface{}](fetchProductData) 137 | 138 | // Wrap stages with tracing 139 | tracedUserDataStage := fluxus.NewTracedStage( 140 | userDataStage, 141 | fluxus.WithTracerName[Request, map[string]interface{}]("fetch-user-data"), 142 | fluxus.WithTracerAttributes[Request, map[string]interface{}](attribute.String("service.type", "user")), 143 | ) 144 | fmt.Println(" - Wrapped user data stage with tracing.") 145 | 146 | tracedProductDataStage := fluxus.NewTracedStage( 147 | productDataStage, 148 | fluxus.WithTracerName[Request, map[string]interface{}]("fetch-product-data"), 149 | fluxus.WithTracerAttributes[Request, map[string]interface{}](attribute.String("service.type", "product")), 150 | ) 151 | fmt.Println(" - Wrapped product data stage with tracing.") 152 | 153 | // Fan out for parallel fetching 154 | dataFetchStages := []fluxus.Stage[Request, map[string]interface{}]{ 155 | tracedUserDataStage, 156 | tracedProductDataStage, 157 | } 158 | dataFanOut := fluxus.NewFanOut(dataFetchStages...) 159 | 160 | // Add tracing to the fan-out operation itself 161 | tracedDataFanOut := fluxus.NewTracedFanOut( 162 | dataFanOut, 163 | "parallel-initial-fetch", 164 | attribute.String("operation", "data-fetch"), 165 | ) 166 | fmt.Println(" - Created traced fan-out for initial data fetch.") 167 | 168 | // Stage 2: Process the initial data and make additional API calls 169 | processDataStage := fluxus.StageFunc[[]map[string]interface{}, Response](func(ctx context.Context, results []map[string]interface{}) (Response, error) { 170 | fmt.Println(" ⚙️ Entering enrichment stage...") 171 | if len(results) != 2 { 172 | return Response{}, fmt.Errorf("expected 2 results from fan-out, got %d", len(results)) 173 | } 174 | 175 | response := Response{UserData: results[0], ProductData: results[1]} 176 | 177 | // --- Get UserID and ProductID from the context or initial request if needed --- 178 | // For this example, we'll use placeholder IDs as before, but in a real app 179 | // you might get these from the initial Request passed via context or results. 180 | productID := "product-xyz" // Simplified for demo 181 | userID := "user-abc" // Simplified for demo 182 | // Example: If the Request struct was passed down via context: 183 | // if initialRequest, ok := ctx.Value("initialRequest").(Request); ok { 184 | // userID = initialRequest.UserID 185 | // productID = initialRequest.ProductID 186 | // } 187 | 188 | // Fetch pricing data (will create its own span via simulateAPICall) 189 | pricingData, err := fetchPricingData(ctx, productID) // Pass the correct ID 190 | if err != nil { 191 | return response, err 192 | } 193 | response.PricingData = pricingData 194 | 195 | // Fetch recommendations (will create its own span via simulateAPICall) 196 | recommendations, err := fetchRecommendations(ctx, userID) // Pass the correct ID 197 | if err != nil { 198 | return response, err 199 | } 200 | response.Recommendations = recommendations 201 | 202 | // Calculate total simulated latency 203 | totalLatency := int64(0) 204 | if l, ok := response.UserData["latency"].(int); ok { 205 | totalLatency += int64(l) 206 | } 207 | if l, ok := response.ProductData["latency"].(int); ok { 208 | totalLatency += int64(l) 209 | } 210 | if l, ok := response.PricingData["latency"].(int); ok { 211 | totalLatency += int64(l) 212 | } 213 | if l, ok := response.Recommendations["latency"].(int); ok { 214 | totalLatency += int64(l) 215 | } 216 | response.ProcessingTimeMs = totalLatency 217 | 218 | fmt.Println(" ✅ Enrichment stage complete.") 219 | return response, nil 220 | }) 221 | 222 | // Wrap enrichment stage with tracing 223 | tracedProcessDataStage := fluxus.NewTracedStage( 224 | processDataStage, 225 | fluxus.WithTracerName[[]map[string]interface{}, Response]("process-and-enrich-data"), 226 | fluxus.WithTracerAttributes[[]map[string]interface{}, Response](attribute.String("operation", "enrichment")), 227 | ) 228 | fmt.Println(" - Wrapped enrichment stage with tracing.") 229 | 230 | // Chain all stages together 231 | chainedStage := fluxus.Chain(tracedDataFanOut, tracedProcessDataStage) 232 | fmt.Println(" - Chained fan-out and enrichment stages.") 233 | 234 | // Create a pipeline with the chained stage 235 | pipeline := fluxus.NewPipeline(chainedStage) 236 | fmt.Println("✅ Pipeline built successfully.") 237 | return pipeline 238 | } 239 | 240 | func main() { 241 | fmt.Println("🛰️ Fluxus OpenTelemetry Tracing Demonstration") 242 | fmt.Println("===========================================") 243 | fmt.Println("This example shows how to integrate OpenTelemetry tracing into a Fluxus pipeline.") 244 | fmt.Println("Each stage and simulated API call will generate spans.") 245 | 246 | // Initialize OpenTelemetry tracing 247 | tp, err := initTracer() 248 | if err != nil { 249 | log.Fatalf("❌ Failed to initialize tracer: %v", err) 250 | } 251 | // Ensure tracer provider is shut down cleanly on exit 252 | defer func() { 253 | fmt.Println("🔌 Shutting down tracer provider...") 254 | shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 255 | defer cancel() 256 | if err := tp.Shutdown(shutdownCtx); err != nil { 257 | log.Printf("⚠️ Error shutting down tracer provider: %v", err) 258 | } else { 259 | fmt.Println("✅ Tracer provider shut down.") 260 | } 261 | }() 262 | 263 | // Create a sample request 264 | request := Request{UserID: "user123", ProductID: "product456"} 265 | 266 | // Build the traced pipeline 267 | pipeline := buildTracedPipeline() 268 | 269 | // --- Process the request --- 270 | fmt.Println("\n▶️ Processing request through the pipeline...") 271 | tracer := otel.Tracer("main-processor") // Get a tracer for the main operation 272 | ctx, rootSpan := tracer.Start(context.Background(), "HandleProductViewRequest") 273 | rootSpan.SetAttributes( 274 | attribute.String("user.id", request.UserID), 275 | attribute.String("product.id", request.ProductID), 276 | ) 277 | 278 | // Optional: Pass the initial request down via context if needed by later stages 279 | // ctx = context.WithValue(ctx, "initialRequest", request) 280 | 281 | startTime := time.Now() 282 | result, err := pipeline.Process(ctx, request) // Pass the context with the root span 283 | duration := time.Since(startTime) 284 | 285 | if err != nil { 286 | fmt.Printf("❌ Pipeline processing failed after %v: %v\n", duration, err) 287 | rootSpan.RecordError(err) 288 | rootSpan.SetStatus(codes.Error, "Pipeline failed") 289 | } else { 290 | fmt.Printf("✅ Pipeline processing successful in %v.\n", duration) 291 | rootSpan.SetAttributes(attribute.Int64("total_simulated_latency_ms", result.ProcessingTimeMs)) 292 | rootSpan.SetStatus(codes.Ok, "Success") 293 | } 294 | rootSpan.End() // End the root span 295 | 296 | // --- Output Results --- 297 | if err == nil { 298 | fmt.Println("\n📊 Processing Summary:") 299 | fmt.Printf(" User: %s, Product: %s\n", request.UserID, request.ProductID) 300 | fmt.Printf(" Total Simulated API Latency: %d ms\n", result.ProcessingTimeMs) 301 | // You could print more details from the result if needed 302 | } 303 | 304 | // --- Trace Viewing Instructions --- 305 | fmt.Println("\n📍 Trace Viewing Instructions:") 306 | fmt.Println(" Traces have been exported via OTLP.") 307 | fmt.Println(" Ensure an OTLP collector (like Jaeger, Zipkin, Tempo, etc.) is running") 308 | fmt.Println(" and accessible at the configured endpoint (default: localhost:4317).") 309 | fmt.Println(" Example Jaeger setup (Docker):") 310 | fmt.Println(" docker run -d --name jaeger -p 16686:16686 -p 4317:4317 jaegertracing/all-in-one:latest") 311 | fmt.Println(" View traces in your backend UI (e.g., http://localhost:16686 for Jaeger).") 312 | 313 | fmt.Println("\nDemo Complete!") 314 | // Add a small delay to ensure traces are flushed before exit 315 | time.Sleep(2 * time.Second) 316 | } 317 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/synoptiq/go-fluxus 2 | 3 | go 1.23.4 4 | 5 | require ( 6 | github.com/mattn/go-sqlite3 v1.14.27 7 | github.com/stretchr/testify v1.10.0 8 | go.opentelemetry.io/otel v1.35.0 9 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 10 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 11 | go.opentelemetry.io/otel/sdk v1.35.0 12 | go.opentelemetry.io/otel/trace v1.35.0 13 | golang.org/x/time v0.11.0 14 | ) 15 | 16 | require ( 17 | github.com/cenkalti/backoff/v4 v4.3.0 // indirect 18 | github.com/davecgh/go-spew v1.1.1 // indirect 19 | github.com/go-logr/logr v1.4.2 // indirect 20 | github.com/go-logr/stdr v1.2.2 // indirect 21 | github.com/google/uuid v1.6.0 // indirect 22 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect 23 | github.com/pmezard/go-difflib v1.0.0 // indirect 24 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 25 | go.opentelemetry.io/otel/metric v1.35.0 // indirect 26 | go.opentelemetry.io/proto/otlp v1.5.0 // indirect 27 | golang.org/x/net v0.35.0 // indirect 28 | golang.org/x/sys v0.30.0 // indirect 29 | golang.org/x/text v0.22.0 // indirect 30 | google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect 31 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect 32 | google.golang.org/grpc v1.71.1 // indirect 33 | google.golang.org/protobuf v1.36.5 // indirect 34 | gopkg.in/yaml.v3 v3.0.1 // indirect 35 | ) 36 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= 2 | github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= 3 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 4 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 5 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 6 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 7 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 8 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= 9 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= 10 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 11 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 12 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 13 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 14 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 15 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 16 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= 17 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= 18 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 19 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 20 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 21 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 22 | github.com/mattn/go-sqlite3 v1.14.27 h1:drZCnuvf37yPfs95E5jd9s3XhdVWLal+6BOK6qrv6IU= 23 | github.com/mattn/go-sqlite3 v1.14.27/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= 24 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 25 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 26 | github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= 27 | github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= 28 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 29 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 30 | go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= 31 | go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= 32 | go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= 33 | go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= 34 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= 35 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= 36 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI= 37 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo= 38 | go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= 39 | go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= 40 | go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= 41 | go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= 42 | go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= 43 | go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= 44 | go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= 45 | go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= 46 | go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= 47 | go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= 48 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 49 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 50 | golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= 51 | golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= 52 | golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= 53 | golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 54 | golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= 55 | golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= 56 | golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= 57 | golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= 58 | google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0= 59 | google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU= 60 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= 61 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= 62 | google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= 63 | google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= 64 | google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= 65 | google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 66 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 67 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 68 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 69 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 70 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 71 | -------------------------------------------------------------------------------- /logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 30 | 31 | 32 | 38 | 39 | 40 | 46 | 47 | 48 | 54 | 55 | 56 | 57 | 63 | FLUXUS 64 | 65 | -------------------------------------------------------------------------------- /metrics.go: -------------------------------------------------------------------------------- 1 | package fluxus 2 | 3 | import ( 4 | "context" 5 | "time" 6 | ) 7 | 8 | // MetricsCollector defines an interface for collecting metrics about pipeline operations. 9 | // This allows for integration with various monitoring systems like Prometheus, StatsD, etc. 10 | type MetricsCollector interface { 11 | // StageStarted is called when a stage begins processing. 12 | StageStarted(ctx context.Context, stageName string) 13 | 14 | // StageCompleted is called when a stage successfully completes processing. 15 | StageCompleted(ctx context.Context, stageName string, duration time.Duration) 16 | 17 | // StageError is called when a stage encounters an error. 18 | StageError(ctx context.Context, stageName string, err error) 19 | 20 | // RetryAttempt is called for each retry attempt. 21 | RetryAttempt(ctx context.Context, stageName string, attempt int, err error) 22 | 23 | // BufferBatchProcessed is called when a batch is processed in a buffer. 24 | BufferBatchProcessed(ctx context.Context, batchSize int, duration time.Duration) 25 | 26 | // FanOutStarted is called when a fan-out operation begins. 27 | FanOutStarted(ctx context.Context, numStages int) 28 | 29 | // FanOutCompleted is called when a fan-out operation completes. 30 | FanOutCompleted(ctx context.Context, numStages int, duration time.Duration) 31 | 32 | // FanInStarted is called when a fan-in operation begins. 33 | FanInStarted(ctx context.Context, numInputs int) 34 | 35 | // FanInCompleted is called when a fan-in operation completes. 36 | FanInCompleted(ctx context.Context, numInputs int, duration time.Duration) 37 | } 38 | 39 | // NoopMetricsCollector is a metrics collector that does nothing. 40 | // It's useful as a default when no metrics collection is needed. 41 | type NoopMetricsCollector struct{} 42 | 43 | // Ensure NoopMetricsCollector implements MetricsCollector 44 | var _ MetricsCollector = (*NoopMetricsCollector)(nil) 45 | 46 | // StageStarted implements MetricsCollector. 47 | func (*NoopMetricsCollector) StageStarted(_ context.Context, _ string) {} 48 | 49 | // StageCompleted implements MetricsCollector. 50 | func (*NoopMetricsCollector) StageCompleted(_ context.Context, _ string, _ time.Duration) { 51 | } 52 | 53 | // StageError implements MetricsCollector. 54 | func (*NoopMetricsCollector) StageError(_ context.Context, _ string, _ error) {} 55 | 56 | // RetryAttempt implements MetricsCollector. 57 | func (*NoopMetricsCollector) RetryAttempt(_ context.Context, _ string, _ int, _ error) { 58 | } 59 | 60 | // BufferBatchProcessed implements MetricsCollector. 61 | func (*NoopMetricsCollector) BufferBatchProcessed(_ context.Context, _ int, _ time.Duration) { 62 | } 63 | 64 | // FanOutStarted implements MetricsCollector. 65 | func (*NoopMetricsCollector) FanOutStarted(_ context.Context, _ int) {} 66 | 67 | // FanOutCompleted implements MetricsCollector. 68 | func (*NoopMetricsCollector) FanOutCompleted(_ context.Context, _ int, _ time.Duration) { 69 | } 70 | 71 | // FanInStarted implements MetricsCollector. 72 | func (*NoopMetricsCollector) FanInStarted(_ context.Context, _ int) {} 73 | 74 | // FanInCompleted implements MetricsCollector. 75 | func (*NoopMetricsCollector) FanInCompleted(_ context.Context, _ int, _ time.Duration) { 76 | } 77 | 78 | // DefaultMetricsCollector is the default metrics collector used when none is provided. 79 | var DefaultMetricsCollector MetricsCollector = &NoopMetricsCollector{} 80 | 81 | // MetricatedStage wraps any Stage with metrics collection 82 | type MetricatedStage[I, O any] struct { 83 | // The underlying stage 84 | stage Stage[I, O] 85 | 86 | // Name for metrics and logging 87 | name string 88 | 89 | // Metrics collector 90 | metricsCollector MetricsCollector 91 | } 92 | 93 | // MetricatedStageOption is a function that configures a MetricatedStage. 94 | type MetricatedStageOption[I, O any] func(*MetricatedStage[I, O]) 95 | 96 | // WithMetricsCollector adds a metrics collector to the metricated stage. 97 | func WithMetricsCollector[I, O any](collector MetricsCollector) MetricatedStageOption[I, O] { 98 | return func(ms *MetricatedStage[I, O]) { 99 | ms.metricsCollector = collector 100 | } 101 | } 102 | 103 | // WithStageName adds a name to the metricated stage for metrics and logging. 104 | func WithStageName[I, O any](name string) MetricatedStageOption[I, O] { 105 | return func(ms *MetricatedStage[I, O]) { 106 | ms.name = name 107 | } 108 | } 109 | 110 | // NewMetricatedStage creates a new metricated stage that wraps another stage. 111 | func NewMetricatedStage[I, O any]( 112 | stage Stage[I, O], 113 | options ...MetricatedStageOption[I, O], 114 | ) *MetricatedStage[I, O] { 115 | ms := &MetricatedStage[I, O]{ 116 | stage: stage, 117 | name: "metricated_stage", 118 | metricsCollector: DefaultMetricsCollector, 119 | } 120 | 121 | // Apply options 122 | for _, option := range options { 123 | option(ms) 124 | } 125 | 126 | return ms 127 | } 128 | 129 | // Process implements the Stage interface for MetricatedStage. 130 | func (ms *MetricatedStage[I, O]) Process(ctx context.Context, input I) (O, error) { 131 | // Track starting time for metrics 132 | startTime := time.Now() 133 | 134 | // Signal stage start 135 | if ms.metricsCollector != nil { 136 | ms.metricsCollector.StageStarted(ctx, ms.name) 137 | } 138 | 139 | // Process the request using the underlying stage 140 | output, err := ms.stage.Process(ctx, input) 141 | 142 | // Record metrics based on the result 143 | if ms.metricsCollector != nil { 144 | if err != nil { 145 | ms.metricsCollector.StageError(ctx, ms.name, err) 146 | } else { 147 | ms.metricsCollector.StageCompleted(ctx, ms.name, time.Since(startTime)) 148 | } 149 | } 150 | 151 | return output, err 152 | } 153 | 154 | // Factory functions for creating metricated versions of specific stages 155 | 156 | // MetricatedFanOutStage wraps a FanOut with additional fan-out specific metrics 157 | type MetricatedFanOutStage[I, O any] struct { 158 | stage *FanOut[I, O] 159 | name string 160 | metricsCollector MetricsCollector 161 | } 162 | 163 | // NewMetricatedFanOut creates a metricated wrapper around a FanOut stage. 164 | func NewMetricatedFanOut[I, O any]( 165 | fanOut *FanOut[I, O], 166 | options ...MetricatedStageOption[I, []O], 167 | ) Stage[I, []O] { 168 | // Create a proxy for handling options 169 | ms := &MetricatedStage[I, []O]{ 170 | stage: fanOut, 171 | name: "metricated_fan_out", 172 | metricsCollector: DefaultMetricsCollector, 173 | } 174 | 175 | // Apply options 176 | for _, option := range options { 177 | option(ms) 178 | } 179 | 180 | // Create the specialized stage 181 | return &MetricatedFanOutStage[I, O]{ 182 | stage: fanOut, 183 | name: ms.name, 184 | metricsCollector: ms.metricsCollector, 185 | } 186 | } 187 | 188 | // Process implements the Stage interface for MetricatedFanOutStage 189 | func (ms *MetricatedFanOutStage[I, O]) Process(ctx context.Context, input I) ([]O, error) { 190 | // Track starting time for metrics 191 | startTime := time.Now() 192 | 193 | // Signal stage start 194 | if ms.metricsCollector != nil { 195 | ms.metricsCollector.StageStarted(ctx, ms.name) 196 | ms.metricsCollector.FanOutStarted(ctx, len(ms.stage.stages)) 197 | } 198 | 199 | // Process the request using the underlying stage 200 | output, err := ms.stage.Process(ctx, input) 201 | 202 | // Record metrics based on the result 203 | if ms.metricsCollector != nil { 204 | if err != nil { 205 | ms.metricsCollector.StageError(ctx, ms.name, err) 206 | } else { 207 | duration := time.Since(startTime) 208 | ms.metricsCollector.StageCompleted(ctx, ms.name, duration) 209 | ms.metricsCollector.FanOutCompleted(ctx, len(ms.stage.stages), duration) 210 | } 211 | } 212 | 213 | return output, err 214 | } 215 | 216 | // MetricatedFanInStage wraps a FanIn with additional fan-in specific metrics 217 | type MetricatedFanInStage[I, O any] struct { 218 | stage *FanIn[I, O] 219 | name string 220 | metricsCollector MetricsCollector 221 | } 222 | 223 | // NewMetricatedFanIn creates a metricated wrapper around a FanIn stage. 224 | func NewMetricatedFanIn[I, O any]( 225 | fanIn *FanIn[I, O], 226 | options ...MetricatedStageOption[[]I, O], 227 | ) Stage[[]I, O] { 228 | // Create a proxy for handling options 229 | ms := &MetricatedStage[[]I, O]{ 230 | stage: fanIn, 231 | name: "metricated_fan_in", 232 | metricsCollector: DefaultMetricsCollector, 233 | } 234 | 235 | // Apply options 236 | for _, option := range options { 237 | option(ms) 238 | } 239 | 240 | // Create the specialized stage 241 | return &MetricatedFanInStage[I, O]{ 242 | stage: fanIn, 243 | name: ms.name, 244 | metricsCollector: ms.metricsCollector, 245 | } 246 | } 247 | 248 | // Process implements the Stage interface for MetricatedFanInStage 249 | func (ms *MetricatedFanInStage[I, O]) Process(ctx context.Context, inputs []I) (O, error) { 250 | // Track starting time for metrics 251 | startTime := time.Now() 252 | 253 | // Signal stage start 254 | if ms.metricsCollector != nil { 255 | ms.metricsCollector.StageStarted(ctx, ms.name) 256 | ms.metricsCollector.FanInStarted(ctx, len(inputs)) 257 | } 258 | 259 | // Process the request using the underlying stage 260 | output, err := ms.stage.Process(ctx, inputs) 261 | 262 | // Record metrics based on the result 263 | if ms.metricsCollector != nil { 264 | if err != nil { 265 | ms.metricsCollector.StageError(ctx, ms.name, err) 266 | } else { 267 | duration := time.Since(startTime) 268 | ms.metricsCollector.StageCompleted(ctx, ms.name, duration) 269 | ms.metricsCollector.FanInCompleted(ctx, len(inputs), duration) 270 | } 271 | } 272 | 273 | return output, err 274 | } 275 | 276 | // MetricatedBufferStage wraps a Buffer with buffer-specific metrics 277 | type MetricatedBufferStage[I, O any] struct { 278 | stage *Buffer[I, O] 279 | name string 280 | metricsCollector MetricsCollector 281 | } 282 | 283 | // NewMetricatedBuffer creates a metricated wrapper around a Buffer stage. 284 | func NewMetricatedBuffer[I, O any]( 285 | buffer *Buffer[I, O], 286 | options ...MetricatedStageOption[[]I, []O], 287 | ) Stage[[]I, []O] { 288 | // Create a proxy for handling options 289 | ms := &MetricatedStage[[]I, []O]{ 290 | stage: buffer, 291 | name: "metricated_buffer", 292 | metricsCollector: DefaultMetricsCollector, 293 | } 294 | 295 | // Apply options 296 | for _, option := range options { 297 | option(ms) 298 | } 299 | 300 | // Create the specialized stage 301 | return &MetricatedBufferStage[I, O]{ 302 | stage: buffer, 303 | name: ms.name, 304 | metricsCollector: ms.metricsCollector, 305 | } 306 | } 307 | 308 | // Process implements the Stage interface for MetricatedBufferStage 309 | func (ms *MetricatedBufferStage[I, O]) Process(ctx context.Context, inputs []I) ([]O, error) { 310 | // Track starting time for metrics 311 | startTime := time.Now() 312 | 313 | // Signal stage start 314 | if ms.metricsCollector != nil { 315 | ms.metricsCollector.StageStarted(ctx, ms.name) 316 | } 317 | 318 | // Process the request using the underlying buffer stage 319 | // Note: The Buffer will call BufferBatchProcessed internally for each batch 320 | output, err := ms.stage.Process(ctx, inputs) 321 | 322 | // Record metrics based on the result 323 | if ms.metricsCollector != nil { 324 | if err != nil { 325 | ms.metricsCollector.StageError(ctx, ms.name, err) 326 | } else { 327 | ms.metricsCollector.StageCompleted(ctx, ms.name, time.Since(startTime)) 328 | } 329 | } 330 | 331 | return output, err 332 | } 333 | 334 | // MetricatedRetryStage wraps a Retry with additional retry-specific metrics 335 | type MetricatedRetryStage[I, O any] struct { 336 | stage *Retry[I, O] 337 | name string 338 | metricsCollector MetricsCollector 339 | } 340 | 341 | // NewMetricatedRetry creates a metricated wrapper around a Retry stage. 342 | func NewMetricatedRetry[I, O any]( 343 | retry *Retry[I, O], 344 | options ...MetricatedStageOption[I, O], 345 | ) Stage[I, O] { 346 | // Create a proxy for handling options 347 | ms := &MetricatedStage[I, O]{ 348 | stage: retry, 349 | name: "metricated_retry", 350 | metricsCollector: DefaultMetricsCollector, 351 | } 352 | 353 | // Apply options 354 | for _, option := range options { 355 | option(ms) 356 | } 357 | 358 | // Create the specialized stage 359 | return &MetricatedRetryStage[I, O]{ 360 | stage: retry, 361 | name: ms.name, 362 | metricsCollector: ms.metricsCollector, 363 | } 364 | } 365 | 366 | // Process implements the Stage interface for MetricatedRetryStage 367 | func (ms *MetricatedRetryStage[I, O]) Process(ctx context.Context, input I) (O, error) { 368 | // Track starting time for metrics 369 | startTime := time.Now() 370 | 371 | // Signal stage start 372 | if ms.metricsCollector != nil { 373 | ms.metricsCollector.StageStarted(ctx, ms.name) 374 | } 375 | 376 | // Create a counter for tracking attempts 377 | attemptCount := 0 378 | 379 | // Temporarily replace the original stage with our counted version 380 | originalStage := ms.stage.stage 381 | 382 | // Create a wrapper stage that counts attempts 383 | countingStage := StageFunc[I, O](func(ctx context.Context, input I) (O, error) { 384 | // Increment attempt counter before each attempt 385 | attemptCount++ 386 | 387 | // Record the attempt 388 | if ms.metricsCollector != nil { 389 | ms.metricsCollector.RetryAttempt(ctx, ms.name, attemptCount, nil) 390 | } 391 | 392 | // Forward to the original stage 393 | return originalStage.Process(ctx, input) 394 | }) 395 | 396 | // Temporarily replace the stage with our counting version 397 | ms.stage.stage = countingStage 398 | 399 | // Process using the modified retry stage 400 | output, err := ms.stage.Process(ctx, input) 401 | 402 | // Restore the original stage 403 | ms.stage.stage = originalStage 404 | 405 | // Record metrics based on the result 406 | if ms.metricsCollector != nil { 407 | if err != nil { 408 | ms.metricsCollector.StageError(ctx, ms.name, err) 409 | } else { 410 | ms.metricsCollector.StageCompleted(ctx, ms.name, time.Since(startTime)) 411 | } 412 | } 413 | 414 | return output, err 415 | } 416 | -------------------------------------------------------------------------------- /metrics_test.go: -------------------------------------------------------------------------------- 1 | package fluxus_test 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "strings" 8 | "sync/atomic" 9 | "testing" 10 | "time" 11 | 12 | "github.com/synoptiq/go-fluxus" 13 | ) 14 | 15 | // mockMetricsCollector records metrics for testing 16 | type mockMetricsCollector struct { 17 | stageStarted int64 18 | stageCompleted int64 19 | stageErrors int64 20 | retryAttempts int64 21 | fanOutStarted int64 22 | fanOutCompleted int64 23 | fanInStarted int64 24 | fanInCompleted int64 25 | bufferBatchCalled int64 26 | } 27 | 28 | func (m *mockMetricsCollector) StageStarted(_ context.Context, _ string) { 29 | atomic.AddInt64(&m.stageStarted, 1) 30 | } 31 | 32 | func (m *mockMetricsCollector) StageCompleted(_ context.Context, _ string, _ time.Duration) { 33 | atomic.AddInt64(&m.stageCompleted, 1) 34 | } 35 | 36 | func (m *mockMetricsCollector) StageError(_ context.Context, _ string, _ error) { 37 | atomic.AddInt64(&m.stageErrors, 1) 38 | } 39 | 40 | func (m *mockMetricsCollector) RetryAttempt(_ context.Context, _ string, _ int, _ error) { 41 | atomic.AddInt64(&m.retryAttempts, 1) 42 | } 43 | 44 | func (m *mockMetricsCollector) BufferBatchProcessed(_ context.Context, _ int, _ time.Duration) { 45 | atomic.AddInt64(&m.bufferBatchCalled, 1) 46 | } 47 | 48 | func (m *mockMetricsCollector) FanOutStarted(_ context.Context, _ int) { 49 | atomic.AddInt64(&m.fanOutStarted, 1) 50 | } 51 | 52 | func (m *mockMetricsCollector) FanOutCompleted(_ context.Context, _ int, _ time.Duration) { 53 | atomic.AddInt64(&m.fanOutCompleted, 1) 54 | } 55 | 56 | func (m *mockMetricsCollector) FanInStarted(_ context.Context, _ int) { 57 | atomic.AddInt64(&m.fanInStarted, 1) 58 | } 59 | 60 | func (m *mockMetricsCollector) FanInCompleted(_ context.Context, _ int, _ time.Duration) { 61 | atomic.AddInt64(&m.fanInCompleted, 1) 62 | } 63 | 64 | // TestMetricatedStage tests the basic MetricatedStage functionality 65 | func TestMetricatedStage(t *testing.T) { 66 | // Create a mock metrics collector 67 | collector := &mockMetricsCollector{} 68 | 69 | // Create a simple stage 70 | stage := fluxus.StageFunc[string, string](func(_ context.Context, input string) (string, error) { 71 | return strings.ToUpper(input), nil 72 | }) 73 | 74 | // Wrap it with metrics 75 | metricated := fluxus.NewMetricatedStage( 76 | stage, 77 | fluxus.WithStageName[string, string]("test_stage"), 78 | fluxus.WithMetricsCollector[string, string](collector), 79 | ) 80 | 81 | // Process input 82 | result, err := metricated.Process(context.Background(), "hello") 83 | if err != nil { 84 | t.Fatalf("Unexpected error: %v", err) 85 | } 86 | 87 | // Verify result 88 | if result != "HELLO" { 89 | t.Errorf("Expected 'HELLO', got '%s'", result) 90 | } 91 | 92 | // Verify metrics 93 | if collector.stageStarted != 1 { 94 | t.Errorf("Expected StageStarted to be called once, got %d", collector.stageStarted) 95 | } 96 | if collector.stageCompleted != 1 { 97 | t.Errorf("Expected StageCompleted to be called once, got %d", collector.stageCompleted) 98 | } 99 | if collector.stageErrors != 0 { 100 | t.Errorf("Expected StageError not to be called, got %d", collector.stageErrors) 101 | } 102 | } 103 | 104 | // TestMetricatedStageError tests error metrics 105 | func TestMetricatedStageError(t *testing.T) { 106 | // Create a mock metrics collector 107 | collector := &mockMetricsCollector{} 108 | 109 | // Create a stage that returns an error 110 | expectedErr := errors.New("test error") 111 | stage := fluxus.StageFunc[string, string](func(_ context.Context, _ string) (string, error) { 112 | return "", expectedErr 113 | }) 114 | 115 | // Wrap it with metrics 116 | metricated := fluxus.NewMetricatedStage( 117 | stage, 118 | fluxus.WithStageName[string, string]("error_stage"), 119 | fluxus.WithMetricsCollector[string, string](collector), 120 | ) 121 | 122 | // Process input 123 | _, err := metricated.Process(context.Background(), "hello") 124 | if !errors.Is(err, expectedErr) { 125 | t.Fatalf("Expected error %v, got %v", expectedErr, err) 126 | } 127 | 128 | // Verify metrics 129 | if collector.stageStarted != 1 { 130 | t.Errorf("Expected StageStarted to be called once, got %d", collector.stageStarted) 131 | } 132 | if collector.stageCompleted != 0 { 133 | t.Errorf("Expected StageCompleted not to be called, got %d", collector.stageCompleted) 134 | } 135 | if collector.stageErrors != 1 { 136 | t.Errorf("Expected StageError to be called once, got %d", collector.stageErrors) 137 | } 138 | } 139 | 140 | // TestMetricatedFanOut tests the FanOut metrics 141 | func TestMetricatedFanOut(t *testing.T) { 142 | // Create a mock metrics collector 143 | collector := &mockMetricsCollector{} 144 | 145 | // Create stages for FanOut 146 | stage1 := fluxus.StageFunc[string, string](func(_ context.Context, input string) (string, error) { 147 | return strings.ToUpper(input), nil 148 | }) 149 | 150 | stage2 := fluxus.StageFunc[string, string](func(_ context.Context, input string) (string, error) { 151 | return strings.ToLower(input), nil 152 | }) 153 | 154 | // Create a FanOut 155 | fanOut := fluxus.NewFanOut(stage1, stage2) 156 | 157 | // Wrap with metrics 158 | metricated := fluxus.NewMetricatedFanOut( 159 | fanOut, 160 | fluxus.WithStageName[string, []string]("test_fan_out"), 161 | fluxus.WithMetricsCollector[string, []string](collector), 162 | ) 163 | 164 | // Process input 165 | results, err := metricated.Process(context.Background(), "Hello") 166 | if err != nil { 167 | t.Fatalf("Unexpected error: %v", err) 168 | } 169 | 170 | // Verify results 171 | if len(results) != 2 || results[0] != "HELLO" || results[1] != "hello" { 172 | t.Errorf("Unexpected results: %v", results) 173 | } 174 | 175 | // Verify metrics 176 | if collector.stageStarted != 1 { 177 | t.Errorf("Expected StageStarted to be called once, got %d", collector.stageStarted) 178 | } 179 | if collector.stageCompleted != 1 { 180 | t.Errorf("Expected StageCompleted to be called once, got %d", collector.stageCompleted) 181 | } 182 | if collector.fanOutStarted != 1 { 183 | t.Errorf("Expected FanOutStarted to be called once, got %d", collector.fanOutStarted) 184 | } 185 | if collector.fanOutCompleted != 1 { 186 | t.Errorf("Expected FanOutCompleted to be called once, got %d", collector.fanOutCompleted) 187 | } 188 | } 189 | 190 | // TestMetricatedFanIn tests the FanIn metrics 191 | func TestMetricatedFanIn(t *testing.T) { 192 | // Create a mock metrics collector 193 | collector := &mockMetricsCollector{} 194 | 195 | // Create a FanIn 196 | fanIn := fluxus.NewFanIn(func(inputs []string) (string, error) { 197 | return strings.Join(inputs, ", "), nil 198 | }) 199 | 200 | // Wrap with metrics 201 | metricated := fluxus.NewMetricatedFanIn( 202 | fanIn, 203 | fluxus.WithStageName[[]string, string]("test_fan_in"), 204 | fluxus.WithMetricsCollector[[]string, string](collector), 205 | ) 206 | 207 | // Process input 208 | result, err := metricated.Process(context.Background(), []string{"hello", "world"}) 209 | if err != nil { 210 | t.Fatalf("Unexpected error: %v", err) 211 | } 212 | 213 | // Verify result 214 | if result != "hello, world" { 215 | t.Errorf("Expected 'hello, world', got '%s'", result) 216 | } 217 | 218 | // Verify metrics 219 | if collector.stageStarted != 1 { 220 | t.Errorf("Expected StageStarted to be called once, got %d", collector.stageStarted) 221 | } 222 | if collector.stageCompleted != 1 { 223 | t.Errorf("Expected StageCompleted to be called once, got %d", collector.stageCompleted) 224 | } 225 | if collector.fanInStarted != 1 { 226 | t.Errorf("Expected FanInStarted to be called once, got %d", collector.fanInStarted) 227 | } 228 | if collector.fanInCompleted != 1 { 229 | t.Errorf("Expected FanInCompleted to be called once, got %d", collector.fanInCompleted) 230 | } 231 | } 232 | 233 | // TestMetricatedRetry tests the Retry metrics 234 | func TestMetricatedRetry(t *testing.T) { 235 | // Create a mock metrics collector 236 | collector := &mockMetricsCollector{} 237 | 238 | // Create a stage that fails a few times then succeeds 239 | attemptCount := 0 240 | maxFailures := 2 241 | stage := fluxus.StageFunc[string, string](func(_ context.Context, _ string) (string, error) { 242 | attemptCount++ 243 | if attemptCount <= maxFailures { 244 | return "", fmt.Errorf("attempt %d failed", attemptCount) 245 | } 246 | return fmt.Sprintf("Success on attempt %d", attemptCount), nil 247 | }) 248 | 249 | // Create a retry stage 250 | retry := fluxus.NewRetry(stage, maxFailures+1) 251 | 252 | // Wrap with metrics 253 | metricated := fluxus.NewMetricatedRetry( 254 | retry, 255 | fluxus.WithStageName[string, string]("test_retry"), 256 | fluxus.WithMetricsCollector[string, string](collector), 257 | ) 258 | 259 | // Process input 260 | result, err := metricated.Process(context.Background(), "test") 261 | if err != nil { 262 | t.Fatalf("Unexpected error: %v", err) 263 | } 264 | 265 | // Verify result 266 | expected := "Success on attempt 3" 267 | if result != expected { 268 | t.Errorf("Expected '%s', got '%s'", expected, result) 269 | } 270 | 271 | // Verify metrics 272 | if collector.stageStarted != 1 { 273 | t.Errorf("Expected StageStarted to be called once, got %d", collector.stageStarted) 274 | } 275 | if collector.stageCompleted != 1 { 276 | t.Errorf("Expected StageCompleted to be called once, got %d", collector.stageCompleted) 277 | } 278 | if collector.retryAttempts != int64(maxFailures+1) { 279 | t.Errorf("Expected RetryAttempt to be called %d times, got %d", maxFailures+1, collector.retryAttempts) 280 | } 281 | } 282 | 283 | // BenchmarkMetricatedStage benchmarks the overhead of adding metrics to a stage 284 | func BenchmarkMetricatedStage(b *testing.B) { 285 | // Create a simple stage 286 | stage := fluxus.StageFunc[string, string](func(_ context.Context, input string) (string, error) { 287 | return strings.ToUpper(input), nil 288 | }) 289 | 290 | // Benchmark without metrics 291 | b.Run("WithoutMetrics", func(b *testing.B) { 292 | ctx := context.Background() 293 | input := "benchmark" 294 | b.ResetTimer() 295 | for i := 0; i < b.N; i++ { 296 | _, _ = stage.Process(ctx, input) 297 | } 298 | }) 299 | 300 | // Benchmark with metrics 301 | b.Run("WithMetrics", func(b *testing.B) { 302 | // Create a no-op metrics collector to avoid external dependencies in benchmark 303 | collector := &mockMetricsCollector{} 304 | metricated := fluxus.NewMetricatedStage( 305 | stage, 306 | fluxus.WithStageName[string, string]("benchmark_stage"), 307 | fluxus.WithMetricsCollector[string, string](collector), 308 | ) 309 | 310 | ctx := context.Background() 311 | input := "benchmark" 312 | b.ResetTimer() 313 | for i := 0; i < b.N; i++ { 314 | _, _ = metricated.Process(ctx, input) 315 | } 316 | }) 317 | } 318 | -------------------------------------------------------------------------------- /pool.go: -------------------------------------------------------------------------------- 1 | package fluxus 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "runtime" 7 | "sync" 8 | "sync/atomic" 9 | "time" 10 | ) 11 | 12 | // PoolStats defines an interface for objects that provide pool statistics 13 | type PoolStats interface { 14 | // Stats returns statistics about the pool usage 15 | Stats() map[string]int64 16 | 17 | // Name returns the name of the pool 18 | Name() string 19 | } 20 | 21 | // ObjectPool provides a generic object pool for frequent allocation reduction. 22 | // This implementation includes statistics and optimizations for performance. 23 | type ObjectPool[T any] struct { 24 | pool sync.Pool 25 | name string 26 | gets int64 // Atomic counter for gets 27 | puts int64 // Atomic counter for puts 28 | misses int64 // Atomic counter for cache misses 29 | maxCapacity int // Optional max capacity to limit memory usage 30 | currentCount int64 // Current number of objects in the pool 31 | } 32 | 33 | // ObjectPoolOption is a function that configures an ObjectPool. 34 | type ObjectPoolOption[T any] func(*ObjectPool[T]) 35 | 36 | // WithPoolName adds a name to the object pool for debugging and metrics. 37 | func WithPoolName[T any](name string) ObjectPoolOption[T] { 38 | return func(p *ObjectPool[T]) { 39 | p.name = name 40 | } 41 | } 42 | 43 | // WithMaxCapacity sets a maximum capacity for the pool to limit memory usage. 44 | func WithMaxCapacity[T any](maxCapacity int) ObjectPoolOption[T] { 45 | return func(p *ObjectPool[T]) { 46 | p.maxCapacity = maxCapacity 47 | } 48 | } 49 | 50 | // NewObjectPool creates a new optimized ObjectPool with the given factory function. 51 | func NewObjectPool[T any](factory func() T, options ...ObjectPoolOption[T]) *ObjectPool[T] { 52 | pool := &ObjectPool[T]{ 53 | pool: sync.Pool{ 54 | New: func() interface{} { 55 | return factory() 56 | }, 57 | }, 58 | name: "generic_pool", 59 | maxCapacity: -1, // No limit by default 60 | } 61 | 62 | // Apply options 63 | for _, option := range options { 64 | option(pool) 65 | } 66 | 67 | return pool 68 | } 69 | 70 | // Get retrieves an object from the pool. 71 | func (p *ObjectPool[T]) Get() T { 72 | atomic.AddInt64(&p.gets, 1) 73 | 74 | // Get an object from the pool 75 | objInterface := p.pool.Get() // Use a different name to avoid confusion 76 | 77 | // Perform the type assertion and check the result 78 | obj, ok := objInterface.(T) 79 | if !ok { 80 | // This shouldn't happen if the pool is used correctly (i.e., only T types are Put), 81 | // but handle it defensively. Record a miss and return the zero value for T. 82 | atomic.AddInt64(&p.misses, 1) 83 | // Note: currentCount is NOT decremented here because we didn't successfully "get" 84 | // an object of the correct type from the pool's perspective. 85 | var zero T 86 | return zero 87 | } 88 | 89 | // Decrement count only if we successfully got an object of the correct type 90 | // Note: This assumes currentCount tracks items *available* in the sync.Pool, 91 | // which might be slightly inaccurate as sync.Pool's internal count isn't exposed. 92 | // If currentCount is meant to track items *logically* belonging to the pool, 93 | // the decrement might happen earlier or be handled differently. 94 | // However, based on the Put logic, decrementing here seems intended. 95 | atomic.AddInt64(&p.currentCount, -1) 96 | 97 | return obj // Return the already asserted object 98 | } 99 | 100 | // Put returns an object to the pool if there's capacity available. 101 | func (p *ObjectPool[T]) Put(obj T) { 102 | // Check if we should limit the pool size 103 | if p.maxCapacity > 0 { 104 | if count := atomic.LoadInt64(&p.currentCount); count >= int64(p.maxCapacity) { 105 | // Pool is at capacity, don't put the object back 106 | // It will be garbage collected 107 | return 108 | } 109 | } 110 | 111 | atomic.AddInt64(&p.puts, 1) 112 | atomic.AddInt64(&p.currentCount, 1) 113 | p.pool.Put(obj) 114 | } 115 | 116 | // Stats returns statistics about the pool usage. 117 | func (p *ObjectPool[T]) Stats() map[string]int64 { 118 | gets := atomic.LoadInt64(&p.gets) 119 | 120 | // Avoid division by zero 121 | hitRatio := int64(0) 122 | if gets > 0 { 123 | hitRatio = int64(float64(gets-atomic.LoadInt64(&p.misses)) / float64(gets) * 100) 124 | } 125 | 126 | return map[string]int64{ 127 | "gets": gets, 128 | "puts": atomic.LoadInt64(&p.puts), 129 | "misses": atomic.LoadInt64(&p.misses), 130 | "current_size": atomic.LoadInt64(&p.currentCount), 131 | "hit_ratio": hitRatio, 132 | } 133 | } 134 | 135 | // Name returns the name of the pool 136 | func (p *ObjectPool[T]) Name() string { 137 | return p.name 138 | } 139 | 140 | // Ensure ObjectPool implements PoolStats 141 | var _ PoolStats = (*ObjectPool[string])(nil) 142 | 143 | // SlicePool is a specialized pool for slices with optimized handling. 144 | type SlicePool[T any] struct { 145 | *ObjectPool[[]T] 146 | initialCapacity int 147 | } 148 | 149 | // NewSlicePool creates a new optimized SlicePool. 150 | func NewSlicePool[T any](initialCapacity int, options ...ObjectPoolOption[[]T]) *SlicePool[T] { 151 | factory := func() []T { 152 | return make([]T, 0, initialCapacity) 153 | } 154 | 155 | return &SlicePool[T]{ 156 | ObjectPool: NewObjectPool(factory, options...), 157 | initialCapacity: initialCapacity, 158 | } 159 | } 160 | 161 | // Get retrieves a slice from the pool and ensures it has the right characteristics. 162 | func (p *SlicePool[T]) Get() []T { 163 | // Get a slice from the base pool 164 | slice := p.ObjectPool.Get() 165 | 166 | // Reset length to 0 but preserve capacity 167 | if len(slice) > 0 { 168 | slice = slice[:0] 169 | } 170 | 171 | return slice 172 | } 173 | 174 | // GetWithCapacity gets a slice with at least the specified capacity. 175 | func (p *SlicePool[T]) GetWithCapacity(minCapacity int) []T { 176 | slice := p.Get() 177 | 178 | // If we need more capacity, allocate a new slice 179 | if cap(slice) < minCapacity { 180 | // Don't put the too-small slice back - let GC handle it 181 | return make([]T, 0, minCapacity) 182 | } 183 | 184 | return slice 185 | } 186 | 187 | // PooledStage is a stage that uses object pooling for its internal operations. 188 | type PooledStage[I, O any] struct { 189 | // The underlying stage 190 | stage Stage[I, O] 191 | // Pools this stage manages 192 | pools []PoolStats 193 | } 194 | 195 | // PooledStageOption is a function that configures a PooledStage. 196 | type PooledStageOption[I, O any] func(*PooledStage[I, O]) 197 | 198 | // NewPooledStage creates a new pooled stage that wraps another stage. 199 | func NewPooledStage[I, O any](stage Stage[I, O], options ...PooledStageOption[I, O]) *PooledStage[I, O] { 200 | ps := &PooledStage[I, O]{ 201 | stage: stage, 202 | pools: make([]PoolStats, 0), 203 | } 204 | 205 | // Apply options 206 | for _, option := range options { 207 | option(ps) 208 | } 209 | 210 | return ps 211 | } 212 | 213 | // RegisterPool adds an object pool to be managed by this stage. 214 | func (s *PooledStage[I, O]) RegisterPool(pool PoolStats) { 215 | s.pools = append(s.pools, pool) 216 | } 217 | 218 | // Process implements the Stage interface for PooledStage. 219 | func (s *PooledStage[I, O]) Process(ctx context.Context, input I) (O, error) { 220 | // Process the input using the underlying stage 221 | output, err := s.stage.Process(ctx, input) 222 | return output, err 223 | } 224 | 225 | // GetStats returns statistics about all pools managed by this stage. 226 | func (s *PooledStage[I, O]) GetStats() map[string]map[string]int64 { 227 | stats := make(map[string]map[string]int64) 228 | 229 | for _, pool := range s.pools { 230 | poolName := fmt.Sprintf("pool_%s", pool.Name()) 231 | stats[poolName] = pool.Stats() 232 | } 233 | 234 | return stats 235 | } 236 | 237 | // PooledBuffer is an optimized buffer implementation using object pools. 238 | type PooledBuffer[I, O any] struct { 239 | // Basic buffer configuration 240 | batchSize int 241 | processor func(ctx context.Context, batch []I) ([]O, error) 242 | errHandler func(error) error 243 | 244 | // Pools for internal use 245 | resultPool *SlicePool[O] 246 | 247 | // Stats and metrics 248 | name string 249 | metricsCollector MetricsCollector 250 | processedBatches int64 251 | processedItems int64 252 | } 253 | 254 | // PooledBufferOption is a function that configures a PooledBuffer. 255 | type PooledBufferOption[I, O any] func(*PooledBuffer[I, O]) 256 | 257 | // WithBufferName adds a name to the pooled buffer. 258 | func WithBufferName[I, O any](name string) PooledBufferOption[I, O] { 259 | return func(b *PooledBuffer[I, O]) { 260 | b.name = name 261 | } 262 | } 263 | 264 | // WithBufferMetricsCollector adds a metrics collector to the pooled buffer. 265 | func WithBufferMetricsCollector[I, O any](collector MetricsCollector) PooledBufferOption[I, O] { 266 | return func(b *PooledBuffer[I, O]) { 267 | b.metricsCollector = collector 268 | } 269 | } 270 | 271 | // WithBufferErrorHandler adds a custom error handler to the pooled buffer. 272 | func WithBufferErrorHandler[I, O any](handler func(error) error) PooledBufferOption[I, O] { 273 | return func(b *PooledBuffer[I, O]) { 274 | b.errHandler = handler 275 | } 276 | } 277 | 278 | // NewPooledBuffer creates a new pooled buffer with optimal settings. 279 | func NewPooledBuffer[I, O any]( 280 | batchSize int, 281 | processor func(ctx context.Context, batch []I) ([]O, error), 282 | options ...PooledBufferOption[I, O], 283 | ) *PooledBuffer[I, O] { 284 | // Ensure reasonable batch size 285 | if batchSize <= 0 { 286 | batchSize = runtime.NumCPU() * 10 // Default to CPU count * 10 287 | } 288 | 289 | pb := &PooledBuffer[I, O]{ 290 | batchSize: batchSize, 291 | processor: processor, 292 | errHandler: func(err error) error { return err }, 293 | resultPool: NewSlicePool[O](batchSize * 2), // Double for efficiency 294 | name: "pooled_buffer", 295 | metricsCollector: DefaultMetricsCollector, 296 | } 297 | 298 | // Apply options 299 | for _, option := range options { 300 | option(pb) 301 | } 302 | 303 | return pb 304 | } 305 | 306 | // Name returns the name of the pooled buffer. 307 | func (pb *PooledBuffer[I, O]) Name() string { 308 | return pb.name 309 | } 310 | 311 | // Stats returns statistics about the buffer usage. 312 | func (pb *PooledBuffer[I, O]) Stats() map[string]int64 { 313 | stats := map[string]int64{ 314 | "processed_batches": atomic.LoadInt64(&pb.processedBatches), 315 | "processed_items": atomic.LoadInt64(&pb.processedItems), 316 | } 317 | 318 | // Add pool stats if available 319 | if poolStats := pb.resultPool.ObjectPool.Stats(); poolStats != nil { 320 | for k, v := range poolStats { 321 | stats["result_pool_"+k] = v 322 | } 323 | } 324 | 325 | return stats 326 | } 327 | 328 | // Process implements the Stage interface for PooledBuffer. 329 | func (pb *PooledBuffer[I, O]) Process(ctx context.Context, inputs []I) ([]O, error) { 330 | // Track processing start time for metrics 331 | startTime := time.Now() 332 | 333 | // Handle empty input case 334 | inputLen := len(inputs) 335 | if inputLen == 0 { 336 | return []O{}, nil 337 | } 338 | 339 | // Calculate expected output capacity 340 | estimatedOutputSize := (inputLen + pb.batchSize - 1) / pb.batchSize * pb.batchSize 341 | 342 | // Get a results slice from the pool with the right capacity 343 | allResults := pb.resultPool.GetWithCapacity(estimatedOutputSize) 344 | 345 | // Process inputs in batches 346 | for i := 0; i < inputLen; i += pb.batchSize { 347 | // Check for context cancellation 348 | if ctx.Err() != nil { 349 | pb.resultPool.Put(allResults) // Return the slice to the pool 350 | return nil, pb.errHandler(ctx.Err()) 351 | } 352 | 353 | // Get batch bounds 354 | end := i + pb.batchSize 355 | if end > inputLen { 356 | end = inputLen 357 | } 358 | 359 | // Get the current batch (just a slice of the original - no allocation) 360 | batch := inputs[i:end] 361 | 362 | // Track batch processing for metrics 363 | batchStartTime := time.Now() 364 | 365 | // Process the batch 366 | results, err := pb.processor(ctx, batch) 367 | 368 | if err != nil { 369 | // Return the results slice to the pool if we're not going to use it 370 | pb.resultPool.Put(allResults) 371 | return nil, pb.errHandler(fmt.Errorf("batch processing error at offset %d: %w", i, err)) 372 | } 373 | 374 | // Record batch metrics 375 | if pb.metricsCollector != nil { 376 | pb.metricsCollector.BufferBatchProcessed(ctx, len(batch), time.Since(batchStartTime)) 377 | } 378 | 379 | // Update stats 380 | atomic.AddInt64(&pb.processedBatches, 1) 381 | atomic.AddInt64(&pb.processedItems, int64(len(batch))) 382 | 383 | // Append batch results to our pooled results slice 384 | allResults = append(allResults, results...) 385 | } 386 | 387 | // Create a new slice for the final results - we can't return the pooled slice directly 388 | finalResults := make([]O, len(allResults)) 389 | copy(finalResults, allResults) 390 | 391 | // Return the buffer to the pool 392 | pb.resultPool.Put(allResults) 393 | 394 | // Record overall metrics 395 | if pb.metricsCollector != nil { 396 | pb.metricsCollector.StageCompleted(ctx, pb.name, time.Since(startTime)) 397 | } 398 | 399 | return finalResults, nil 400 | } 401 | 402 | // PreWarmPool creates and returns the specified number of objects to the pool. 403 | // This can help reduce allocation overhead during high-load periods. 404 | func PreWarmPool[T any](pool *ObjectPool[T], count int) { 405 | objects := make([]T, count) 406 | 407 | // Get objects from the pool (forcing creation) 408 | for i := 0; i < count; i++ { 409 | objects[i] = pool.Get() 410 | } 411 | 412 | // Return them to the pool 413 | for i := 0; i < count; i++ { 414 | pool.Put(objects[i]) 415 | } 416 | } 417 | 418 | // PreWarmSlicePool creates and returns the specified number of slices to the slice pool. 419 | func PreWarmSlicePool[T any](pool *SlicePool[T], count int) { 420 | slices := make([][]T, count) 421 | 422 | // Get slices from the pool (forcing creation) 423 | for i := 0; i < count; i++ { 424 | slices[i] = pool.Get() 425 | } 426 | 427 | // Return them to the pool 428 | for i := 0; i < count; i++ { 429 | pool.Put(slices[i]) 430 | } 431 | } 432 | -------------------------------------------------------------------------------- /rate_limiter.go: -------------------------------------------------------------------------------- 1 | package fluxus 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | 8 | "golang.org/x/time/rate" 9 | ) 10 | 11 | // RateLimiter is a stage decorator that limits the rate of requests to a stage. 12 | type RateLimiter[I, O any] struct { 13 | stage Stage[I, O] 14 | limiter *rate.Limiter 15 | timeout time.Duration 16 | mu sync.RWMutex 17 | } 18 | 19 | // RateLimiterOption is a function that configures a RateLimiter. 20 | type RateLimiterOption[I, O any] func(*RateLimiter[I, O]) 21 | 22 | // WithLimiterTimeout sets a timeout for waiting for the limiter. 23 | func WithLimiterTimeout[I, O any](timeout time.Duration) RateLimiterOption[I, O] { 24 | return func(rl *RateLimiter[I, O]) { 25 | rl.timeout = timeout 26 | } 27 | } 28 | 29 | // NewRateLimiter creates a new rate limiter with the given stage. 30 | // r is the rate limit (e.g., 10 means 10 requests per second) 31 | // b is the maximum burst size (maximum number of tokens that can be consumed in a single burst) 32 | func NewRateLimiter[I, O any]( 33 | stage Stage[I, O], 34 | r rate.Limit, 35 | b int, 36 | options ...RateLimiterOption[I, O], 37 | ) *RateLimiter[I, O] { 38 | rl := &RateLimiter[I, O]{ 39 | stage: stage, 40 | limiter: rate.NewLimiter(r, b), 41 | timeout: time.Second, // Default timeout 42 | } 43 | 44 | // Apply options 45 | for _, option := range options { 46 | option(rl) 47 | } 48 | 49 | return rl 50 | } 51 | 52 | // Process implements the Stage interface for RateLimiter. 53 | func (rl *RateLimiter[I, O]) Process(ctx context.Context, input I) (O, error) { 54 | var zero O 55 | 56 | // Wait for rate limiter or timeout 57 | limiterCtx := ctx 58 | if rl.timeout > 0 { 59 | var cancel context.CancelFunc 60 | limiterCtx, cancel = context.WithTimeout(ctx, rl.timeout) 61 | defer cancel() 62 | } 63 | 64 | // Try to acquire a token 65 | if err := rl.limiter.Wait(limiterCtx); err != nil { 66 | return zero, err 67 | } 68 | 69 | // Forward the request to the underlying stage 70 | output, err := rl.stage.Process(ctx, input) 71 | 72 | return output, err 73 | } 74 | 75 | // SetLimit updates the rate limit. 76 | func (rl *RateLimiter[I, O]) SetLimit(r rate.Limit) { 77 | rl.mu.Lock() 78 | defer rl.mu.Unlock() 79 | rl.limiter.SetLimit(r) 80 | } 81 | 82 | // SetBurst updates the burst limit. 83 | func (rl *RateLimiter[I, O]) SetBurst(b int) { 84 | rl.mu.Lock() 85 | defer rl.mu.Unlock() 86 | rl.limiter.SetBurst(b) 87 | } 88 | 89 | // Allow checks if a request can be processed without blocking. 90 | func (rl *RateLimiter[I, O]) Allow() bool { 91 | return rl.limiter.Allow() 92 | } 93 | -------------------------------------------------------------------------------- /tracing.go: -------------------------------------------------------------------------------- 1 | package fluxus 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "go.opentelemetry.io/otel" 9 | "go.opentelemetry.io/otel/attribute" 10 | "go.opentelemetry.io/otel/codes" 11 | "go.opentelemetry.io/otel/trace" 12 | ) 13 | 14 | // TracedStage wraps any Stage with OpenTelemetry tracing 15 | type TracedStage[I, O any] struct { 16 | // The underlying stage 17 | stage Stage[I, O] 18 | 19 | // Name for tracing 20 | name string 21 | 22 | // Tracer to use 23 | tracer trace.Tracer 24 | 25 | // Attributes to add to spans 26 | attributes []attribute.KeyValue 27 | } 28 | 29 | // TracedStageOption is a function that configures a TracedStage. 30 | type TracedStageOption[I, O any] func(*TracedStage[I, O]) 31 | 32 | // WithTracerName sets a custom name for the TracedStage. 33 | func WithTracerName[I, O any](name string) TracedStageOption[I, O] { 34 | return func(ts *TracedStage[I, O]) { 35 | ts.name = name 36 | } 37 | } 38 | 39 | // WithTracer sets a custom tracer for the TracedStage. 40 | func WithTracer[I, O any](tracer trace.Tracer) TracedStageOption[I, O] { 41 | return func(ts *TracedStage[I, O]) { 42 | ts.tracer = tracer 43 | } 44 | } 45 | 46 | // WithTracerAttributes adds custom attributes to spans created by the TracedStage. 47 | func WithTracerAttributes[I, O any](attrs ...attribute.KeyValue) TracedStageOption[I, O] { 48 | return func(ts *TracedStage[I, O]) { 49 | ts.attributes = append(ts.attributes, attrs...) 50 | } 51 | } 52 | 53 | // NewTracedStage creates a new TracedStage that wraps the given stage. 54 | func NewTracedStage[I, O any]( 55 | stage Stage[I, O], 56 | options ...TracedStageOption[I, O], 57 | ) *TracedStage[I, O] { 58 | ts := &TracedStage[I, O]{ 59 | stage: stage, 60 | name: "fluxus.stage", 61 | tracer: otel.Tracer("github.com/synoptiq/go-fluxus"), 62 | attributes: []attribute.KeyValue{}, 63 | } 64 | 65 | // Apply options 66 | for _, option := range options { 67 | option(ts) 68 | } 69 | 70 | return ts 71 | } 72 | 73 | // Process implements the Stage interface for TracedStage. 74 | func (ts *TracedStage[I, O]) Process(ctx context.Context, input I) (O, error) { 75 | // Create a span for this stage 76 | ctx, span := ts.tracer.Start( 77 | ctx, 78 | ts.name, 79 | trace.WithAttributes(ts.attributes...), 80 | ) 81 | defer span.End() 82 | 83 | // Start timing 84 | startTime := time.Now() 85 | 86 | // Process the stage 87 | output, err := ts.stage.Process(ctx, input) 88 | 89 | // Record duration 90 | duration := time.Since(startTime) 91 | span.SetAttributes(attribute.Float64("duration_ms", float64(duration.Milliseconds()))) 92 | 93 | // Record error if any 94 | if err != nil { 95 | span.RecordError(err) 96 | span.SetStatus(codes.Error, err.Error()) 97 | } else { 98 | span.SetStatus(codes.Ok, "") 99 | } 100 | 101 | return output, err 102 | } 103 | 104 | // Factory functions for creating traced versions of specific stages 105 | 106 | // TracedFanOutStage wraps a FanOut with additional fan-out specific tracing 107 | type TracedFanOutStage[I, O any] struct { 108 | stage *FanOut[I, O] 109 | name string 110 | tracer trace.Tracer 111 | attributes []attribute.KeyValue 112 | } 113 | 114 | // NewTracedFanOut creates a traced wrapper around a FanOut stage. 115 | func NewTracedFanOut[I, O any]( 116 | fanOut *FanOut[I, O], 117 | name string, 118 | attributes ...attribute.KeyValue, 119 | ) Stage[I, []O] { 120 | return &TracedFanOutStage[I, O]{ 121 | stage: fanOut, 122 | name: name, 123 | tracer: otel.Tracer("github.com/synoptiq/go-fluxus"), 124 | attributes: attributes, 125 | } 126 | } 127 | 128 | // WithTracer sets a custom tracer for the traced fan-out stage. 129 | func (ts *TracedFanOutStage[I, O]) WithTracer(tracer trace.Tracer) *TracedFanOutStage[I, O] { 130 | ts.tracer = tracer 131 | return ts 132 | } 133 | 134 | // Process implements the Stage interface for TracedFanOutStage 135 | func (ts *TracedFanOutStage[I, O]) Process(ctx context.Context, input I) ([]O, error) { 136 | // Create a span for the fan-out operation 137 | ctx, span := ts.tracer.Start( 138 | ctx, 139 | ts.name, 140 | trace.WithAttributes( 141 | append( 142 | ts.attributes, 143 | attribute.Int("num_stages", len(ts.stage.stages)), 144 | attribute.Int("concurrency", ts.stage.concurrency), 145 | )..., 146 | ), 147 | ) 148 | defer span.End() 149 | 150 | // Start timing 151 | startTime := time.Now() 152 | 153 | // Process the fan-out 154 | outputs, err := ts.stage.Process(ctx, input) 155 | 156 | // Record duration 157 | duration := time.Since(startTime) 158 | span.SetAttributes(attribute.Float64("duration_ms", float64(duration.Milliseconds()))) 159 | 160 | // Record error if any 161 | if err != nil { 162 | span.RecordError(err) 163 | span.SetStatus(codes.Error, err.Error()) 164 | } else { 165 | span.SetStatus(codes.Ok, "") 166 | span.SetAttributes(attribute.Int("num_results", len(outputs))) 167 | } 168 | 169 | return outputs, err 170 | } 171 | 172 | // TracedFanInStage wraps a FanIn with additional fan-in specific tracing 173 | type TracedFanInStage[I, O any] struct { 174 | stage *FanIn[I, O] 175 | name string 176 | tracer trace.Tracer 177 | attributes []attribute.KeyValue 178 | } 179 | 180 | // NewTracedFanIn creates a traced wrapper around a FanIn stage. 181 | func NewTracedFanIn[I, O any]( 182 | fanIn *FanIn[I, O], 183 | name string, 184 | attributes ...attribute.KeyValue, 185 | ) Stage[[]I, O] { 186 | return &TracedFanInStage[I, O]{ 187 | stage: fanIn, 188 | name: name, 189 | tracer: otel.Tracer("github.com/synoptiq/go-fluxus"), 190 | attributes: attributes, 191 | } 192 | } 193 | 194 | // WithTracer sets a custom tracer for the traced fan-in stage. 195 | func (ts *TracedFanInStage[I, O]) WithTracer(tracer trace.Tracer) *TracedFanInStage[I, O] { 196 | ts.tracer = tracer 197 | return ts 198 | } 199 | 200 | // Process implements the Stage interface for TracedFanInStage 201 | func (ts *TracedFanInStage[I, O]) Process(ctx context.Context, inputs []I) (O, error) { 202 | // Create a span for the fan-in operation 203 | ctx, span := ts.tracer.Start( 204 | ctx, 205 | ts.name, 206 | trace.WithAttributes( 207 | append( 208 | ts.attributes, 209 | attribute.Int("num_inputs", len(inputs)), 210 | )..., 211 | ), 212 | ) 213 | defer span.End() 214 | 215 | // Start timing 216 | startTime := time.Now() 217 | 218 | // Process the fan-in 219 | output, err := ts.stage.Process(ctx, inputs) 220 | 221 | // Record duration 222 | duration := time.Since(startTime) 223 | span.SetAttributes(attribute.Float64("duration_ms", float64(duration.Milliseconds()))) 224 | 225 | // Record error if any 226 | if err != nil { 227 | span.RecordError(err) 228 | span.SetStatus(codes.Error, err.Error()) 229 | } else { 230 | span.SetStatus(codes.Ok, "") 231 | } 232 | 233 | return output, err 234 | } 235 | 236 | // TracedBufferStage wraps a Buffer with buffer-specific tracing 237 | type TracedBufferStage[I, O any] struct { 238 | stage *Buffer[I, O] 239 | name string 240 | tracer trace.Tracer 241 | attributes []attribute.KeyValue 242 | } 243 | 244 | // NewTracedBuffer creates a traced wrapper around a Buffer stage. 245 | func NewTracedBuffer[I, O any]( 246 | buffer *Buffer[I, O], 247 | name string, 248 | attributes ...attribute.KeyValue, 249 | ) Stage[[]I, []O] { 250 | return &TracedBufferStage[I, O]{ 251 | stage: buffer, 252 | name: name, 253 | tracer: otel.Tracer("github.com/synoptiq/go-fluxus"), 254 | attributes: attributes, 255 | } 256 | } 257 | 258 | // WithTracer sets a custom tracer for the traced buffer stage. 259 | func (ts *TracedBufferStage[I, O]) WithTracer(tracer trace.Tracer) *TracedBufferStage[I, O] { 260 | ts.tracer = tracer 261 | return ts 262 | } 263 | 264 | // Process implements the Stage interface for TracedBufferStage 265 | func (ts *TracedBufferStage[I, O]) Process(ctx context.Context, inputs []I) ([]O, error) { 266 | // Create a span for the buffer operation 267 | ctx, span := ts.tracer.Start( 268 | ctx, 269 | ts.name, 270 | trace.WithAttributes( 271 | append( 272 | ts.attributes, 273 | attribute.Int("num_inputs", len(inputs)), 274 | attribute.Int("batch_size", ts.stage.batchSize), 275 | )..., 276 | ), 277 | ) 278 | defer span.End() 279 | 280 | // Start timing 281 | startTime := time.Now() 282 | 283 | // Process the buffer 284 | outputs, err := ts.stage.Process(ctx, inputs) 285 | 286 | // Record duration 287 | duration := time.Since(startTime) 288 | span.SetAttributes( 289 | attribute.Float64("duration_ms", float64(duration.Milliseconds())), 290 | attribute.Int("num_outputs", len(outputs)), 291 | ) 292 | 293 | // Record error if any 294 | if err != nil { 295 | span.RecordError(err) 296 | span.SetStatus(codes.Error, err.Error()) 297 | } else { 298 | span.SetStatus(codes.Ok, "") 299 | } 300 | 301 | return outputs, err 302 | } 303 | 304 | // TracedRetryStage wraps a Retry with additional retry-specific tracing 305 | type TracedRetryStage[I, O any] struct { 306 | stage *Retry[I, O] 307 | name string 308 | tracer trace.Tracer 309 | attributes []attribute.KeyValue 310 | } 311 | 312 | // NewTracedRetry creates a traced wrapper around a Retry stage. 313 | func NewTracedRetry[I, O any]( 314 | retry *Retry[I, O], 315 | name string, 316 | attributes ...attribute.KeyValue, 317 | ) Stage[I, O] { 318 | return &TracedRetryStage[I, O]{ 319 | stage: retry, 320 | name: name, 321 | tracer: otel.Tracer("github.com/synoptiq/go-fluxus"), 322 | attributes: attributes, 323 | } 324 | } 325 | 326 | // WithTracer sets a custom tracer for the traced retry stage. 327 | func (ts *TracedRetryStage[I, O]) WithTracer(tracer trace.Tracer) *TracedRetryStage[I, O] { 328 | ts.tracer = tracer 329 | return ts 330 | } 331 | 332 | // Process implements the Stage interface for TracedRetryStage 333 | func (ts *TracedRetryStage[I, O]) Process(ctx context.Context, input I) (O, error) { 334 | // Create a span for the retry operation 335 | ctx, span := ts.tracer.Start( 336 | ctx, 337 | ts.name, 338 | trace.WithAttributes( 339 | append( 340 | ts.attributes, 341 | attribute.Int("max_attempts", ts.stage.maxAttempts), 342 | )..., 343 | ), 344 | ) 345 | defer span.End() 346 | 347 | // Create a counter for tracking attempts 348 | attemptCount := 0 349 | 350 | // Temporarily replace the original stage with our counted version 351 | originalStage := ts.stage.stage 352 | 353 | // Create a wrapper stage that counts attempts and creates spans for each attempt 354 | countingStage := StageFunc[I, O](func(ctx context.Context, input I) (O, error) { 355 | // Increment attempt counter before each attempt 356 | attemptCount++ 357 | 358 | // Create a child span for the retry attempt 359 | attemptCtx, attemptSpan := ts.tracer.Start( 360 | ctx, 361 | fmt.Sprintf("%s.attempt.%d", ts.name, attemptCount), 362 | trace.WithAttributes( 363 | attribute.Int("attempt", attemptCount), 364 | ), 365 | ) 366 | 367 | // Process the attempt 368 | output, err := originalStage.Process(attemptCtx, input) 369 | 370 | // Record result in the span 371 | if err != nil { 372 | attemptSpan.RecordError(err) 373 | attemptSpan.SetStatus(codes.Error, err.Error()) 374 | attemptSpan.SetAttributes(attribute.Bool("success", false)) 375 | } else { 376 | attemptSpan.SetStatus(codes.Ok, "") 377 | attemptSpan.SetAttributes(attribute.Bool("success", true)) 378 | } 379 | 380 | // End the attempt span 381 | attemptSpan.End() 382 | 383 | return output, err 384 | }) 385 | 386 | // Temporarily replace the stage with our counting version 387 | ts.stage.stage = countingStage 388 | 389 | // Start timing 390 | startTime := time.Now() 391 | 392 | // Process using the modified retry stage 393 | output, err := ts.stage.Process(ctx, input) 394 | 395 | // Restore the original stage 396 | ts.stage.stage = originalStage 397 | 398 | // Record duration and attempts 399 | duration := time.Since(startTime) 400 | span.SetAttributes( 401 | attribute.Float64("duration_ms", float64(duration.Milliseconds())), 402 | attribute.Int("attempts", attemptCount), 403 | ) 404 | 405 | // Record error if any 406 | if err != nil { 407 | span.RecordError(err) 408 | span.SetStatus(codes.Error, err.Error()) 409 | } else { 410 | span.SetStatus(codes.Ok, "") 411 | } 412 | 413 | return output, err 414 | } 415 | 416 | // TracedPipelineStage wraps a Pipeline with pipeline-specific tracing 417 | type TracedPipelineStage[I, O any] struct { 418 | stage *Pipeline[I, O] 419 | name string 420 | tracer trace.Tracer 421 | attributes []attribute.KeyValue 422 | } 423 | 424 | // NewTracedPipeline creates a traced wrapper around a Pipeline stage. 425 | func NewTracedPipeline[I, O any]( 426 | pipeline *Pipeline[I, O], 427 | name string, 428 | attributes ...attribute.KeyValue, 429 | ) Stage[I, O] { 430 | return &TracedPipelineStage[I, O]{ 431 | stage: pipeline, 432 | name: name, 433 | tracer: otel.Tracer("github.com/synoptiq/go-fluxus"), 434 | attributes: attributes, 435 | } 436 | } 437 | 438 | // WithTracer sets a custom tracer for the traced pipeline stage. 439 | func (ts *TracedPipelineStage[I, O]) WithTracer(tracer trace.Tracer) *TracedPipelineStage[I, O] { 440 | ts.tracer = tracer 441 | return ts 442 | } 443 | 444 | // Process implements the Stage interface for TracedPipelineStage 445 | func (ts *TracedPipelineStage[I, O]) Process(ctx context.Context, input I) (O, error) { 446 | // Create a span for the pipeline operation 447 | ctx, span := ts.tracer.Start( 448 | ctx, 449 | ts.name, 450 | trace.WithAttributes(ts.attributes...), 451 | ) 452 | defer span.End() 453 | 454 | // Start timing 455 | startTime := time.Now() 456 | 457 | // Process the pipeline 458 | output, err := ts.stage.Process(ctx, input) 459 | 460 | // Record duration 461 | duration := time.Since(startTime) 462 | span.SetAttributes(attribute.Float64("duration_ms", float64(duration.Milliseconds()))) 463 | 464 | // Record error if any 465 | if err != nil { 466 | span.RecordError(err) 467 | span.SetStatus(codes.Error, err.Error()) 468 | } else { 469 | span.SetStatus(codes.Ok, "") 470 | } 471 | 472 | return output, err 473 | } 474 | --------------------------------------------------------------------------------