├── .github └── workflows │ └── build.yml ├── .gitignore ├── .golangci.yml ├── LICENSE.txt ├── README.md ├── UPGRADE_GUIDE.md ├── circuit.go ├── circuit_stress_test.go ├── circuit_test.go ├── closers.go ├── closers ├── doc.go ├── hystrix │ ├── circuit_test.go │ ├── closer.go │ ├── closer_test.go │ ├── config.go │ ├── config_test.go │ ├── doc.go │ ├── example_test.go │ ├── opener.go │ └── opener_test.go └── simplelogic │ ├── closers.go │ ├── closers_test.go │ └── doc.go ├── closers_test.go ├── config.go ├── config_test.go ├── doc.go ├── errors.go ├── errors_test.go ├── example └── main.go ├── example_test.go ├── faststats ├── atomic.go ├── atomic_test.go ├── doc.go ├── rolling_bucket.go ├── rolling_bucket_test.go ├── rolling_counter.go ├── rolling_counter_test.go ├── rolling_percentile.go ├── rolling_percentile_test.go ├── rolling_stress_test.go ├── timedcheck.go └── timedcheck_test.go ├── go.mod ├── go.sum ├── gowrapper.go ├── gowrapper_test.go ├── internal ├── clock │ ├── clock.go │ └── clock_test.go ├── evar │ ├── evar.go │ └── evar_test.go └── testhelp │ ├── testhelp.go │ └── testhelp_test.go ├── manager.go ├── manager_stress_test.go ├── manager_test.go ├── metriceventstream ├── doc.go ├── example_test.go ├── metriceventstream.go └── metriceventstream_test.go ├── metrics.go ├── metrics ├── doc.go ├── responsetimeslo │ ├── doc.go │ ├── example_test.go │ ├── responsetime.go │ └── responsetime_test.go └── rolling │ ├── doc.go │ ├── rolling.go │ └── rolling_test.go └── renovate.json /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | permissions: 4 | # Required: allow read access to the content for analysis. 5 | contents: read 6 | # Optional: allow read access to pull request. Use with `only-new-issues` option. 7 | pull-requests: read 8 | # Optional: Allow write access to checks to allow the action to annotate code in the PR. 9 | checks: write 10 | 11 | on: 12 | push: 13 | pull_request: 14 | 15 | jobs: 16 | test: 17 | strategy: 18 | matrix: 19 | # Note: We support the three latest Go versions 20 | go-version: [1.22.x, 1.23.x, 1.24.x] 21 | name: Test 22 | runs-on: ubuntu-latest 23 | steps: 24 | - name: Install Go 25 | uses: actions/setup-go@v5 26 | with: 27 | go-version: ${{ matrix.go-version }} 28 | - name: Check out code 29 | uses: actions/checkout@v4 30 | - name: Build 31 | run: go build -mod=readonly ./... 32 | - name: Verify 33 | run: go mod verify 34 | - name: Test 35 | run: env "GORACE=halt_on_error=1" go test -v -race -count 10 ./... 36 | - name: golangci-lint 37 | uses: golangci/golangci-lint-action@v8 38 | - name: Output coverage 39 | run: go test -v -covermode=count -coverprofile=coverage.out ./... 40 | - name: upload coverage 41 | uses: shogo82148/actions-goveralls@v1 42 | if: github.ref == 'refs/heads/main' && github.event_name == 'push' && matrix.go-version == '1.24.x' 43 | with: 44 | path-to-profile: coverage.out 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | /coverage.out 3 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | default: none 4 | enable: 5 | - dupl 6 | - errcheck 7 | - gochecknoinits 8 | - goconst 9 | - gocritic 10 | - gocyclo 11 | - gosec 12 | - govet 13 | - ineffassign 14 | - misspell 15 | - nakedret 16 | - prealloc 17 | - revive 18 | - staticcheck 19 | - unconvert 20 | - unparam 21 | - unused 22 | exclusions: 23 | generated: lax 24 | presets: 25 | - comments 26 | - common-false-positives 27 | - legacy 28 | - std-error-handling 29 | rules: 30 | - linters: 31 | - gosec 32 | text: G404 33 | - linters: 34 | - gosec 35 | text: G114 36 | - linters: 37 | - revive 38 | text: unused-parameter 39 | paths: 40 | - third_party$ 41 | - builtin$ 42 | - examples$ 43 | formatters: 44 | enable: 45 | - gofmt 46 | - goimports 47 | exclusions: 48 | generated: lax 49 | paths: 50 | - third_party$ 51 | - builtin$ 52 | - examples$ 53 | -------------------------------------------------------------------------------- /UPGRADE_GUIDE.md: -------------------------------------------------------------------------------- 1 | # Upgrading from v3 -> v4 2 | 3 | ## Gopkg.toml removed 4 | 5 | The `Gopkg.toml` file and support for [dep](https://github.com/golang/dep) has been 6 | removed. Please use `go.mod` instead. 7 | 8 | ## Remove the "/v3" root directory 9 | 10 | The `/v3` directory has been removed and things have moved to the root directory. This should not 11 | be a problem if you are using the `go.mod` file. 12 | 13 | ## Move statsd implementation to another library 14 | 15 | The statsd implementation has been moved to a separate library since the statsd interface was not stable. 16 | If you need statsd metrics, use the implementation [here](https://github.com/cep21/circuit-statsd). 17 | 18 | ## Add ctx to the stats interfaces 19 | 20 | All metric and circuit interfaces now take a context as the first parameter. For example, the call 21 | `Success(now time.Time, duration time.Duration)` is now `Success(ctx context.Context, now time.Time, duration time.Duration)` 22 | and the call `Closed(now time.Time)` is now `Closed(ctx context.Context, now time.Time)`. 23 | 24 | If you have a custom metric implementation, you will need to add a context to your interface. 25 | 26 | ## Move benchmarks to their own repo 27 | 28 | The benchmarks have been moved to their own repo. You can find them [here](https://github.com/cep21/circuit-benchmarks). 29 | 30 | ## Use Go's builtin atomic package 31 | 32 | The atomics package previously implemented atomics manually. This is now using go 1.19's builtin atomics package. 33 | 34 | ## External API changes to `Circuit` 35 | 36 | The following APIs have changed: 37 | 38 | * `func (c *Circuit) CloseCircuit()` is now `func (c *Circuit) CloseCircuit(ctx context.Context)` 39 | * `func (c *Circuit) OpenCircuit()` is now `func (c *Circuit) OpenCircuit(ctx context.Context)` -------------------------------------------------------------------------------- /circuit_stress_test.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "strings" 8 | "sync" 9 | "sync/atomic" 10 | "testing" 11 | "time" 12 | 13 | "github.com/stretchr/testify/require" 14 | ) 15 | 16 | // TestConcurrentExecutions tests the circuit under high concurrency 17 | func TestConcurrentExecutions(t *testing.T) { 18 | concurrency := 100 19 | iterations := 1000 20 | 21 | c := NewCircuitFromConfig("concurrent-test", Config{}) 22 | 23 | var wg sync.WaitGroup 24 | var successCount int64 25 | var failureCount int64 26 | 27 | // Start multiple goroutines to hammer the circuit 28 | for i := 0; i < concurrency; i++ { 29 | wg.Add(1) 30 | go func() { 31 | defer wg.Done() 32 | 33 | for j := 0; j < iterations; j++ { 34 | success := j%3 != 0 // Introduce some failures 35 | err := c.Execute(context.Background(), func(ctx context.Context) error { 36 | if !success { 37 | return errors.New("intentional failure") 38 | } 39 | return nil 40 | }, nil) 41 | 42 | if err == nil { 43 | atomic.AddInt64(&successCount, 1) 44 | } else { 45 | atomic.AddInt64(&failureCount, 1) 46 | } 47 | } 48 | }() 49 | } 50 | 51 | wg.Wait() 52 | 53 | t.Logf("Total executions: %d, Successes: %d, Failures: %d", 54 | concurrency*iterations, successCount, failureCount) 55 | 56 | // Ensure we get the expected counts 57 | require.Equal(t, int64(concurrency*iterations), successCount+failureCount, 58 | "Total executions should match successes + failures") 59 | } 60 | 61 | // TestRaceOnConfigChange tests for race conditions when configuration changes during operation 62 | func TestRaceOnConfigChange(t *testing.T) { 63 | c := NewCircuitFromConfig("config-race-test", Config{}) 64 | 65 | var wg sync.WaitGroup 66 | configChanges := 100 67 | executions := 1000 68 | 69 | // Goroutine that constantly updates configuration 70 | wg.Add(1) 71 | go func() { 72 | defer wg.Done() 73 | for i := 0; i < configChanges; i++ { 74 | // Update various configuration values 75 | timeout := time.Millisecond * time.Duration(50+i%100) 76 | c.SetConfigThreadSafe(Config{ 77 | Execution: ExecutionConfig{ 78 | Timeout: timeout, 79 | }, 80 | Fallback: FallbackConfig{ 81 | MaxConcurrentRequests: int64(10 + i%20), 82 | }, 83 | Metrics: MetricsCollectors{}, 84 | }) 85 | time.Sleep(time.Millisecond) 86 | } 87 | }() 88 | 89 | // Multiple goroutines executing the circuit 90 | for i := 0; i < 10; i++ { 91 | wg.Add(1) 92 | go func() { 93 | defer wg.Done() 94 | 95 | for j := 0; j < executions; j++ { 96 | _ = c.Execute(context.Background(), func(ctx context.Context) error { 97 | time.Sleep(time.Millisecond) 98 | return nil 99 | }, nil) 100 | } 101 | }() 102 | } 103 | 104 | wg.Wait() 105 | } 106 | 107 | // TestCircuitStateTransitionRace tests for race conditions during circuit state transitions 108 | func TestCircuitStateTransitionRace(t *testing.T) { 109 | // Create a circuit that will open after 20 consecutive failures 110 | c := NewCircuitFromConfig("state-transition-race", Config{}) 111 | 112 | var wg sync.WaitGroup 113 | goroutines := 100 114 | 115 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 116 | defer cancel() 117 | 118 | // Start with consistent failures to trigger circuit opening 119 | for i := 0; i < 50; i++ { 120 | _ = c.Execute(ctx, func(ctx context.Context) error { 121 | return errors.New("intentional failure") 122 | }, nil) 123 | } 124 | 125 | // Now create multiple goroutines that will hit the circuit as it's changing state 126 | circuitOpenObserved := int64(0) 127 | circuitClosedObserved := int64(0) 128 | 129 | for i := 0; i < goroutines; i++ { 130 | wg.Add(1) 131 | go func(id int) { 132 | defer wg.Done() 133 | 134 | // Half will try to trigger success (to close circuit) 135 | // Half will continue to fail 136 | shouldFail := id%2 == 0 137 | 138 | for j := 0; j < 100; j++ { 139 | err := c.Execute(ctx, func(ctx context.Context) error { 140 | if shouldFail { 141 | return errors.New("intentional failure") 142 | } 143 | time.Sleep(time.Millisecond) 144 | return nil 145 | }, nil) 146 | 147 | // Check if circuit is open by using the CircuitOpen method on the err 148 | if err != nil { 149 | if cerr, ok := err.(Error); ok && cerr.CircuitOpen() { 150 | atomic.AddInt64(&circuitOpenObserved, 1) 151 | } 152 | } else if err == nil { 153 | atomic.AddInt64(&circuitClosedObserved, 1) 154 | } 155 | } 156 | }(i) 157 | } 158 | 159 | wg.Wait() 160 | 161 | t.Logf("Circuit open observed: %d, Circuit closed observed: %d", 162 | circuitOpenObserved, circuitClosedObserved) 163 | } 164 | 165 | // TestContextCancellationStress tests how the circuit handles many context cancellations 166 | func TestContextCancellationStress(t *testing.T) { 167 | c := NewCircuitFromConfig("context-cancel-test", Config{}) 168 | 169 | var wg sync.WaitGroup 170 | goroutines := 100 171 | iterations := 100 172 | 173 | timeoutCount := int64(0) 174 | successCount := int64(0) 175 | failureCount := int64(0) 176 | 177 | for i := 0; i < goroutines; i++ { 178 | wg.Add(1) 179 | go func() { 180 | defer wg.Done() 181 | 182 | for j := 0; j < iterations; j++ { 183 | // Random timeout between 1-10ms 184 | timeout := time.Duration(1+j%10) * time.Millisecond 185 | ctx, cancel := context.WithTimeout(context.Background(), timeout) 186 | 187 | // Function that takes 0-20ms to complete 188 | err := c.Execute(ctx, func(ctx context.Context) error { 189 | sleepTime := time.Duration(j%20) * time.Millisecond 190 | select { 191 | case <-ctx.Done(): 192 | return ctx.Err() 193 | case <-time.After(sleepTime): 194 | return nil 195 | } 196 | }, nil) 197 | 198 | switch { 199 | case errors.Is(err, context.DeadlineExceeded): 200 | atomic.AddInt64(&timeoutCount, 1) 201 | case err != nil: 202 | atomic.AddInt64(&failureCount, 1) 203 | default: 204 | atomic.AddInt64(&successCount, 1) 205 | } 206 | 207 | cancel() // Always cancel to clean up resources 208 | } 209 | }() 210 | } 211 | 212 | wg.Wait() 213 | 214 | t.Logf("Timeouts: %d, Successes: %d, Failures: %d", 215 | timeoutCount, successCount, failureCount) 216 | } 217 | 218 | // TestFallbackUnderStress tests the fallback mechanism under high concurrency 219 | func TestFallbackUnderStress(t *testing.T) { 220 | c := NewCircuitFromConfig("fallback-stress-test", Config{ 221 | Fallback: FallbackConfig{ 222 | MaxConcurrentRequests: 10, // Limit fallback concurrency 223 | }, 224 | }) 225 | 226 | var wg sync.WaitGroup 227 | goroutines := 50 228 | iterations := 100 229 | 230 | fallbackCount := int64(0) 231 | fallbackRejectionCount := int64(0) 232 | 233 | for i := 0; i < goroutines; i++ { 234 | wg.Add(1) 235 | go func() { 236 | defer wg.Done() 237 | 238 | for j := 0; j < iterations; j++ { 239 | err := c.Execute(context.Background(), 240 | // Main function always fails 241 | func(ctx context.Context) error { 242 | return errors.New("intentional failure") 243 | }, 244 | // Fallback function does some work and succeeds 245 | func(ctx context.Context, err error) error { 246 | // Small delay to increase contention 247 | time.Sleep(time.Millisecond * 5) 248 | return nil 249 | }) 250 | 251 | if err == nil { 252 | atomic.AddInt64(&fallbackCount, 1) 253 | } else if strings.Contains(err.Error(), "fallback") { 254 | // Check for fallback rejection using the error string 255 | atomic.AddInt64(&fallbackRejectionCount, 1) 256 | } 257 | } 258 | }() 259 | } 260 | 261 | wg.Wait() 262 | 263 | t.Logf("Fallback successes: %d, Fallback rejections: %d", 264 | fallbackCount, fallbackRejectionCount) 265 | 266 | // We should have some successful fallbacks 267 | require.Greater(t, fallbackCount, int64(0)) 268 | 269 | // With the concurrency limit, we should also see some rejections 270 | require.Greater(t, fallbackRejectionCount, int64(0)) 271 | } 272 | 273 | // TestManyCircuitsStress tests creating and using many circuits simultaneously 274 | func TestManyCircuitsStress(t *testing.T) { 275 | manager := Manager{} 276 | 277 | circuitCount := 50 278 | goroutinesPerCircuit := 20 279 | iterations := 100 280 | 281 | var wg sync.WaitGroup 282 | 283 | // Metrics to track 284 | var totalExecutions int64 285 | var circuitOpens int64 286 | 287 | // Create and use many circuits simultaneously 288 | for c := 0; c < circuitCount; c++ { 289 | circuitName := fmt.Sprintf("stress-circuit-%d", c) 290 | _ = manager.MustCreateCircuit(circuitName) 291 | 292 | // Each circuit gets multiple goroutines hitting it 293 | for g := 0; g < goroutinesPerCircuit; g++ { 294 | wg.Add(1) 295 | go func(circuitID, goroutineID int) { 296 | defer wg.Done() 297 | 298 | localCircuit := manager.GetCircuit(fmt.Sprintf("stress-circuit-%d", circuitID)) 299 | if localCircuit == nil { 300 | t.Errorf("Failed to get circuit %d", circuitID) 301 | return 302 | } 303 | 304 | // Determine if this goroutine causes failures 305 | causeFailures := goroutineID%4 == 0 306 | 307 | for i := 0; i < iterations; i++ { 308 | err := localCircuit.Execute(context.Background(), func(ctx context.Context) error { 309 | if causeFailures { 310 | return errors.New("intentional failure") 311 | } 312 | return nil 313 | }, nil) 314 | 315 | atomic.AddInt64(&totalExecutions, 1) 316 | 317 | // Check if circuit is open 318 | if err != nil { 319 | if cerr, ok := err.(Error); ok && cerr.CircuitOpen() { 320 | atomic.AddInt64(&circuitOpens, 1) 321 | } 322 | } 323 | } 324 | }(c, g) 325 | } 326 | } 327 | 328 | wg.Wait() 329 | 330 | t.Logf("Total executions across all circuits: %d", totalExecutions) 331 | t.Logf("Circuit open rejections: %d", circuitOpens) 332 | 333 | require.Equal(t, int64(circuitCount*goroutinesPerCircuit*iterations), totalExecutions) 334 | } 335 | 336 | // TestNestedCircuitStress tests nested circuit patterns under concurrency 337 | func TestNestedCircuitStress(t *testing.T) { 338 | manager := Manager{} 339 | 340 | outerCircuit := manager.MustCreateCircuit("outer") 341 | innerCircuit := manager.MustCreateCircuit("inner") 342 | 343 | var wg sync.WaitGroup 344 | goroutines := 50 345 | iterations := 100 346 | 347 | var successCount int64 348 | var failureCount int64 349 | 350 | for g := 0; g < goroutines; g++ { 351 | wg.Add(1) 352 | go func() { 353 | defer wg.Done() 354 | 355 | for i := 0; i < iterations; i++ { 356 | // Determine success/failure pattern 357 | innerShouldFail := i%5 == 0 358 | outerShouldFail := i%7 == 0 359 | 360 | err := outerCircuit.Execute(context.Background(), func(outerCtx context.Context) error { 361 | if outerShouldFail { 362 | return errors.New("outer circuit failure") 363 | } 364 | 365 | // Call the inner circuit from within the outer circuit 366 | return innerCircuit.Execute(outerCtx, func(innerCtx context.Context) error { 367 | if innerShouldFail { 368 | return errors.New("inner circuit failure") 369 | } 370 | return nil 371 | }, nil) 372 | }, nil) 373 | 374 | if err == nil { 375 | atomic.AddInt64(&successCount, 1) 376 | } else { 377 | atomic.AddInt64(&failureCount, 1) 378 | } 379 | } 380 | }() 381 | } 382 | 383 | wg.Wait() 384 | 385 | t.Logf("Nested circuit executions - Success: %d, Failure: %d", 386 | successCount, failureCount) 387 | 388 | require.Equal(t, int64(goroutines*iterations), successCount+failureCount) 389 | } 390 | -------------------------------------------------------------------------------- /closers.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "context" 5 | "time" 6 | ) 7 | 8 | // ClosedToOpen receives events and controls if the circuit should open or close as a result of those events. 9 | // Return true if the circuit should open, false if the circuit should close. 10 | type ClosedToOpen interface { 11 | RunMetrics 12 | Metrics 13 | // ShouldOpen will attempt to open a circuit that is currently closed, after a bad request comes in. Only called 14 | // after bad requests, never called after a successful request 15 | ShouldOpen(ctx context.Context, now time.Time) bool 16 | // Prevent a single request from going through while the circuit is closed. 17 | // Even though the circuit is closed, and we want to allow the circuit to remain closed, we still prevent this 18 | // command from happening. The error will return as a short circuit to the caller, as well as trigger fallback 19 | // logic. This could be useful if your circuit is closed, but some external force wants you to pretend to be open. 20 | Prevent(ctx context.Context, now time.Time) bool 21 | } 22 | 23 | // OpenToClosed controls logic that tries to close an open circuit 24 | type OpenToClosed interface { 25 | RunMetrics 26 | Metrics 27 | // ShouldClose is called after a request is allowed to go through, and the circuit is open. If the circuit should 28 | // now close, return true. If the circuit should remain open, return false. 29 | ShouldClose(ctx context.Context, now time.Time) bool 30 | // Allow a single request while remaining in the closed state 31 | Allow(ctx context.Context, now time.Time) bool 32 | } 33 | 34 | func neverOpensFactory() ClosedToOpen { 35 | return neverOpens{} 36 | } 37 | 38 | type neverOpens struct{} 39 | 40 | var _ ClosedToOpen = neverOpens{} 41 | 42 | func (c neverOpens) Prevent(_ context.Context, _ time.Time) bool { 43 | return false 44 | } 45 | 46 | func (c neverOpens) Success(_ context.Context, _ time.Time, _ time.Duration) {} 47 | func (c neverOpens) ErrFailure(_ context.Context, _ time.Time, _ time.Duration) {} 48 | func (c neverOpens) ErrTimeout(_ context.Context, _ time.Time, _ time.Duration) {} 49 | func (c neverOpens) ErrBadRequest(_ context.Context, _ time.Time, _ time.Duration) {} 50 | func (c neverOpens) ErrInterrupt(_ context.Context, _ time.Time, _ time.Duration) {} 51 | func (c neverOpens) ErrConcurrencyLimitReject(_ context.Context, _ time.Time) {} 52 | func (c neverOpens) ErrShortCircuit(_ context.Context, _ time.Time) {} 53 | func (c neverOpens) Opened(_ context.Context, _ time.Time) {} 54 | func (c neverOpens) Closed(_ context.Context, _ time.Time) {} 55 | 56 | func (c neverOpens) ShouldOpen(_ context.Context, _ time.Time) bool { 57 | return false 58 | } 59 | 60 | func neverClosesFactory() OpenToClosed { 61 | return neverCloses{} 62 | } 63 | 64 | type neverCloses struct{} 65 | 66 | var _ OpenToClosed = neverCloses{} 67 | 68 | func (c neverCloses) Allow(_ context.Context, _ time.Time) bool { 69 | return false 70 | } 71 | 72 | func (c neverCloses) Success(_ context.Context, _ time.Time, _ time.Duration) {} 73 | func (c neverCloses) ErrFailure(_ context.Context, _ time.Time, _ time.Duration) {} 74 | func (c neverCloses) ErrTimeout(_ context.Context, _ time.Time, _ time.Duration) {} 75 | func (c neverCloses) ErrBadRequest(_ context.Context, _ time.Time, _ time.Duration) {} 76 | func (c neverCloses) ErrInterrupt(_ context.Context, _ time.Time, _ time.Duration) {} 77 | func (c neverCloses) ErrConcurrencyLimitReject(_ context.Context, _ time.Time) {} 78 | func (c neverCloses) ErrShortCircuit(_ context.Context, _ time.Time) {} 79 | func (c neverCloses) Opened(_ context.Context, _ time.Time) {} 80 | func (c neverCloses) Closed(_ context.Context, _ time.Time) {} 81 | func (c neverCloses) ShouldClose(_ context.Context, _ time.Time) bool { 82 | return false 83 | } 84 | -------------------------------------------------------------------------------- /closers/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package closers contains subpackages that control circuit open and close logic. 3 | */ 4 | package closers 5 | -------------------------------------------------------------------------------- /closers/hystrix/circuit_test.go: -------------------------------------------------------------------------------- 1 | package hystrix 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "sync" 7 | "sync/atomic" 8 | "testing" 9 | "time" 10 | 11 | "github.com/cep21/circuit/v4" 12 | "github.com/cep21/circuit/v4/internal/testhelp" 13 | ) 14 | 15 | func TestCloser_closes(t *testing.T) { 16 | f := Factory{ 17 | ConfigureOpener: ConfigureOpener{ 18 | RequestVolumeThreshold: 1, 19 | }, 20 | } 21 | h := circuit.Manager{ 22 | DefaultCircuitProperties: []circuit.CommandPropertiesConstructor{ 23 | f.Configure, 24 | }, 25 | } 26 | c := h.MustCreateCircuit("TestCircuitCloses") 27 | 28 | if c.IsOpen() { 29 | t.Fatal("Circuit should not start out open") 30 | } 31 | err := c.Execute(context.Background(), testhelp.AlwaysFails, nil) 32 | if err == nil { 33 | t.Fatal("Circuit should have failed if run fails") 34 | } 35 | if !c.IsOpen() { 36 | t.Fatal("Circuit should be open after having failed once") 37 | } 38 | err = c.Execute(context.Background(), testhelp.AlwaysPasses, nil) 39 | if err == nil { 40 | t.Fatal("Circuit should be open") 41 | } 42 | } 43 | 44 | func TestCircuitAttemptsToReopen(t *testing.T) { 45 | // Use a more reliable sleep window to reduce timing flakiness 46 | sleepWindow := 10 * time.Millisecond 47 | 48 | c := circuit.NewCircuitFromConfig("TestCircuitAttemptsToReopen", circuit.Config{ 49 | General: circuit.GeneralConfig{ 50 | OpenToClosedFactory: CloserFactory(ConfigureCloser{ 51 | SleepWindow: sleepWindow, 52 | }), 53 | ClosedToOpenFactory: OpenerFactory(ConfigureOpener{ 54 | RequestVolumeThreshold: 1, 55 | }), 56 | }, 57 | }) 58 | 59 | // Verify initial state 60 | if c.IsOpen() { 61 | t.Fatal("Circuit should not start out open") 62 | } 63 | 64 | // Open the circuit with a failure 65 | err := c.Execute(context.Background(), testhelp.AlwaysFails, nil) 66 | if err == nil { 67 | t.Fatal("Circuit should have failed if run fails") 68 | } 69 | if !c.IsOpen() { 70 | t.Fatal("Circuit should be open after having failed once") 71 | } 72 | 73 | // Verify circuit is open 74 | err = c.Execute(context.Background(), testhelp.AlwaysPasses, nil) 75 | if err == nil { 76 | t.Fatal("Circuit should be open") 77 | } 78 | 79 | // Wait slightly longer than the sleep window to ensure it takes effect 80 | time.Sleep(sleepWindow * 2) 81 | 82 | // Try for 20 sec with increasing delay to avoid excessive CPU usage 83 | var i int 84 | maxAttempts := 20 85 | for i = 0; i < maxAttempts; i++ { 86 | err = c.Execute(context.Background(), testhelp.AlwaysPasses, nil) 87 | if err == nil { 88 | t.Logf("Circuit reopened after %d attempts", i+1) 89 | break 90 | } 91 | 92 | // Exponential backoff (100ms, 200ms, 400ms...) 93 | // Calculate backoff multiplier: 1, 2, 4, 8, etc. 94 | var backoffMultiplier int64 = 1 95 | shift := i / 3 96 | if shift > 0 { 97 | if shift > 10 { 98 | shift = 10 // Cap the shift at 10 to prevent overflow 99 | } 100 | backoffMultiplier = int64(1) << shift // Use int64 to avoid overflow 101 | } 102 | sleepTime := time.Millisecond * 100 * time.Duration(backoffMultiplier) 103 | if sleepTime > time.Second { 104 | sleepTime = time.Second // Cap at 1 second 105 | } 106 | time.Sleep(sleepTime) 107 | } 108 | 109 | if i == maxAttempts { 110 | t.Fatalf("Circuit did not reopen after %d attempts and %s sleep window", 111 | maxAttempts, sleepWindow) 112 | } 113 | } 114 | 115 | func TestCircuitAttemptsToReopenOnlyOnce(t *testing.T) { 116 | c := circuit.NewCircuitFromConfig("TestCircuitAttemptsToReopenOnlyOnce", circuit.Config{ 117 | General: circuit.GeneralConfig{ 118 | OpenToClosedFactory: CloserFactory(ConfigureCloser{ 119 | SleepWindow: time.Millisecond, 120 | }), 121 | ClosedToOpenFactory: OpenerFactory(ConfigureOpener{ 122 | RequestVolumeThreshold: 1, 123 | }), 124 | }, 125 | }) 126 | if c.IsOpen() { 127 | t.Fatal("Circuit should not start out open") 128 | } 129 | err := c.Execute(context.Background(), testhelp.AlwaysFails, nil) 130 | if err == nil { 131 | t.Fatal("Circuit should have failed if run fails") 132 | } 133 | if !c.IsOpen() { 134 | t.Fatal("Circuit should be open after having failed once") 135 | } 136 | err = c.Execute(context.Background(), testhelp.AlwaysPasses, nil) 137 | if err == nil { 138 | t.Fatal("Circuit should be open") 139 | } 140 | 141 | time.Sleep(time.Millisecond * 3) 142 | err = c.Execute(context.Background(), testhelp.AlwaysFails, nil) 143 | if err == nil { 144 | t.Fatal("Circuit should try to reopen, but fail") 145 | } 146 | err = c.Execute(context.Background(), testhelp.AlwaysPasses, nil) 147 | if err == nil { 148 | t.Fatal("Circuit should only try to reopen once") 149 | } 150 | } 151 | 152 | func TestLargeSleepWindow(t *testing.T) { 153 | c := circuit.NewCircuitFromConfig("TestLargeSleepWindow", circuit.Config{ 154 | General: circuit.GeneralConfig{ 155 | OpenToClosedFactory: CloserFactory(ConfigureCloser{ 156 | SleepWindow: time.Hour, 157 | }), 158 | ClosedToOpenFactory: OpenerFactory(ConfigureOpener{ 159 | RequestVolumeThreshold: 1, 160 | ErrorThresholdPercentage: 1, 161 | }), 162 | }, 163 | }) 164 | 165 | err := c.Execute(context.Background(), testhelp.AlwaysFails, testhelp.AlwaysPassesFallback) 166 | if err != nil { 167 | t.Errorf("I expect this to not fail since it has a fallback") 168 | } 169 | 170 | if !c.IsOpen() { 171 | t.Fatalf("I expect the circuit to now be open, since the previous failure happened") 172 | } 173 | 174 | wg := sync.WaitGroup{} 175 | // Create many goroutines that never fail 176 | for i := 0; i < 10; i++ { 177 | wg.Add(1) 178 | go func() { 179 | defer wg.Done() 180 | for i := 0; i < 20*2; i++ { 181 | err := c.Execute(context.Background(), testhelp.SleepsForX(time.Millisecond/10), nil) 182 | if err == nil { 183 | t.Errorf("I expect this to always fail, now that it's in the failure state") 184 | } 185 | time.Sleep(time.Millisecond / 10) 186 | } 187 | }() 188 | } 189 | wg.Wait() 190 | } 191 | 192 | func TestSleepDurationWorks(t *testing.T) { 193 | concurrentThreads := 10 194 | sleepWindow := time.Millisecond * 25 195 | c := circuit.NewCircuitFromConfig("TestSleepDurationWorks", circuit.Config{ 196 | Execution: circuit.ExecutionConfig{ 197 | MaxConcurrentRequests: int64(concurrentThreads), 198 | }, 199 | Fallback: circuit.FallbackConfig{ 200 | MaxConcurrentRequests: int64(concurrentThreads), 201 | }, 202 | General: circuit.GeneralConfig{ 203 | OpenToClosedFactory: CloserFactory(ConfigureCloser{ 204 | SleepWindow: sleepWindow * 2, 205 | }), 206 | ClosedToOpenFactory: OpenerFactory(ConfigureOpener{ 207 | RequestVolumeThreshold: 1, 208 | ErrorThresholdPercentage: 1, 209 | }), 210 | }, 211 | }) 212 | 213 | // Once failing, c should never see more than one request every 40 ms 214 | // If I wait 110 ms, I should see exactly 2 requests (the one at 40 and at 80) 215 | doNotPassTime := time.Now().Add(sleepWindow * 4) 216 | err := c.Execute(context.Background(), testhelp.AlwaysFails, testhelp.AlwaysPassesFallback) 217 | if err != nil { 218 | t.Errorf("I expect this to not fail since it has a fallback") 219 | } 220 | 221 | if c.OpenToClose.(*Closer).Config().SleepWindow != time.Millisecond*50 { 222 | t.Errorf("I expect a 30 ms sleep window") 223 | } 224 | 225 | bc := testhelp.BehaviorCheck{ 226 | RunFunc: testhelp.AlwaysFails, 227 | } 228 | 229 | var lastRequestTime atomic.Value 230 | lastRequestTime.Store(time.Now()) 231 | ctx := context.Background() 232 | c.OpenCircuit(ctx) 233 | if !c.IsOpen() { 234 | t.Errorf("circuit should be open after I open it") 235 | } 236 | 237 | wg := sync.WaitGroup{} 238 | for ct := 0; ct < concurrentThreads; ct++ { 239 | testhelp.DoTillTime(doNotPassTime, &wg, func() { 240 | err := c.Execute(context.Background(), func(_ context.Context) error { 241 | now := time.Now() 242 | if now.Sub(lastRequestTime.Load().(time.Time)) < sleepWindow { 243 | t.Errorf("I am getting too many requests: %s", time.Since(lastRequestTime.Load().(time.Time))) 244 | } 245 | lastRequestTime.Store(now) 246 | if !c.IsOpen() { 247 | t.Error("This circuit should never close itself") 248 | } 249 | return errors.New("failure") 250 | }, testhelp.AlwaysPassesFallback) 251 | if err != nil { 252 | t.Errorf("The fallback was fine. It should not fail (but should stay open): %s", err) 253 | } 254 | }) 255 | } 256 | wg.Wait() 257 | if bc.TotalRuns > 3 { 258 | t.Error("Too many requests", bc.TotalRuns) 259 | } 260 | } 261 | 262 | func TestCircuitRecovers(t *testing.T) { 263 | concurrentThreads := 25 264 | sleepWindow := time.Millisecond * 5 265 | c := circuit.NewCircuitFromConfig("TestCircuitRecovers", circuit.Config{ 266 | General: circuit.GeneralConfig{ 267 | OpenToClosedFactory: CloserFactory(ConfigureCloser{ 268 | // // This should allow a new request every 10 milliseconds 269 | SleepWindow: time.Millisecond * 5, 270 | }), 271 | ClosedToOpenFactory: OpenerFactory(ConfigureOpener{ 272 | RequestVolumeThreshold: 1, 273 | ErrorThresholdPercentage: 1, 274 | }), 275 | }, 276 | Execution: circuit.ExecutionConfig{ 277 | MaxConcurrentRequests: int64(concurrentThreads), 278 | }, 279 | Fallback: circuit.FallbackConfig{ 280 | MaxConcurrentRequests: int64(concurrentThreads), 281 | }, 282 | }) 283 | 284 | // This is when the circuit starts working again 285 | startWorkingTime := time.Now().Add(sleepWindow * 2) 286 | // This is the latest that the circuit should keep failing requests 287 | circuitOkTime := startWorkingTime.Add(sleepWindow).Add(time.Millisecond * 200) 288 | 289 | // Give some buffer so time.AfterFunc can get called 290 | doNotPassTime := time.Now().Add(time.Millisecond * 250) 291 | err := c.Execute(context.Background(), testhelp.AlwaysFails, testhelp.AlwaysPassesFallback) 292 | if err != nil { 293 | t.Errorf("I expect this to not fail since it has a fallback") 294 | } 295 | if !c.IsOpen() { 296 | t.Errorf("I expect the circuit to open after that one, first failure") 297 | } 298 | 299 | workingAtThisTime := func(t time.Time) bool { 300 | return t.After(startWorkingTime) 301 | } 302 | 303 | failure := errors.New("a failure") 304 | bc := testhelp.BehaviorCheck{ 305 | RunFunc: func(_ context.Context) error { 306 | if workingAtThisTime(time.Now()) { 307 | return nil 308 | } 309 | return failure 310 | }, 311 | } 312 | 313 | wg := sync.WaitGroup{} 314 | for ct := 0; ct < concurrentThreads; ct++ { 315 | testhelp.DoTillTime(doNotPassTime, &wg, func() { 316 | isCircuitOk := time.Now().After(circuitOkTime) 317 | justBeforeTime := time.Now() 318 | err := c.Execute(context.Background(), bc.Run, nil) 319 | if err != nil { 320 | if isCircuitOk { 321 | t.Fatalf("Should not get an error after this time: The circuit should be ok: %s", err) 322 | } 323 | if circuitOkTime.Before(justBeforeTime) { 324 | t.Fatalf("Should not get an error after the circuit healed itself") 325 | } 326 | } 327 | if err == nil { 328 | if time.Now().Before(startWorkingTime) { 329 | t.Fatalf("The circuit should not work before I correct the service") 330 | } 331 | } 332 | }) 333 | } 334 | wg.Wait() 335 | } 336 | -------------------------------------------------------------------------------- /closers/hystrix/closer.go: -------------------------------------------------------------------------------- 1 | package hystrix 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "sync" 7 | "time" 8 | 9 | "github.com/cep21/circuit/v4" 10 | "github.com/cep21/circuit/v4/faststats" 11 | ) 12 | 13 | // Closer is hystrix's default half-open logic: try again ever X ms 14 | type Closer struct { 15 | // Tracks when we should try to close an open circuit again 16 | reopenCircuitCheck faststats.TimedCheck 17 | 18 | concurrentSuccessfulAttempts faststats.AtomicInt64 19 | closeOnCurrentCount faststats.AtomicInt64 20 | 21 | mu sync.Mutex 22 | config ConfigureCloser 23 | } 24 | 25 | // CloserFactory creates Closer closer 26 | func CloserFactory(config ConfigureCloser) func() circuit.OpenToClosed { 27 | return func() circuit.OpenToClosed { 28 | s := Closer{} 29 | config.Merge(defaultConfigureCloser) 30 | s.SetConfigNotThreadSafe(config) 31 | return &s 32 | } 33 | } 34 | 35 | var _ circuit.OpenToClosed = &Closer{} 36 | 37 | // ConfigureCloser configures values for Closer 38 | type ConfigureCloser struct { 39 | // AfterFunc should simulate time.AfterFunc 40 | AfterFunc func(time.Duration, func()) *time.Timer `json:"-"` 41 | 42 | // SleepWindow is https://github.com/Netflix/Hystrix/wiki/Configuration#circuitbreakersleepwindowinmilliseconds 43 | SleepWindow time.Duration 44 | // HalfOpenAttempts is how many attempts to allow per SleepWindow 45 | HalfOpenAttempts int64 46 | // RequiredConcurrentSuccessful is how may consecutive passing requests are required before the circuit is closed 47 | RequiredConcurrentSuccessful int64 48 | } 49 | 50 | // Merge this configuration with another 51 | func (c *ConfigureCloser) Merge(other ConfigureCloser) { 52 | if c.SleepWindow == 0 { 53 | c.SleepWindow = other.SleepWindow 54 | } 55 | if c.HalfOpenAttempts == 0 { 56 | c.HalfOpenAttempts = other.HalfOpenAttempts 57 | } 58 | if c.RequiredConcurrentSuccessful == 0 { 59 | c.RequiredConcurrentSuccessful = other.RequiredConcurrentSuccessful 60 | } 61 | if c.AfterFunc == nil { 62 | c.AfterFunc = other.AfterFunc 63 | } 64 | } 65 | 66 | var defaultConfigureCloser = ConfigureCloser{ 67 | SleepWindow: 5 * time.Second, 68 | HalfOpenAttempts: 1, 69 | RequiredConcurrentSuccessful: 1, 70 | } 71 | 72 | // MarshalJSON returns closer information in a JSON format 73 | func (s *Closer) MarshalJSON() ([]byte, error) { 74 | return json.Marshal(map[string]interface{}{ 75 | "config": s.Config(), 76 | "concurrentSuccessfulAttempts": s.concurrentSuccessfulAttempts.Get(), 77 | }) 78 | } 79 | 80 | var _ json.Marshaler = &Closer{} 81 | 82 | // Opened circuit. It should now check to see if it should ever allow various requests in an attempt to become closed 83 | func (s *Closer) Opened(_ context.Context, now time.Time) { 84 | s.concurrentSuccessfulAttempts.Set(0) 85 | s.reopenCircuitCheck.SleepStart(now) 86 | } 87 | 88 | // Closed circuit. It can turn off now. 89 | func (s *Closer) Closed(_ context.Context, now time.Time) { 90 | s.concurrentSuccessfulAttempts.Set(0) 91 | s.reopenCircuitCheck.SleepStart(now) 92 | } 93 | 94 | // Allow checks for half open state. 95 | // The circuit is currently closed. Check and return true if this request should be allowed. This will signal 96 | // the circuit in a "half-open" state, allowing that one request. 97 | // If any requests are allowed, the circuit moves into a half open state. 98 | func (s *Closer) Allow(_ context.Context, now time.Time) (shouldAllow bool) { 99 | return s.reopenCircuitCheck.Check(now) 100 | } 101 | 102 | // Success any time runFunc was called and appeared healthy 103 | func (s *Closer) Success(_ context.Context, _ time.Time, _ time.Duration) { 104 | s.concurrentSuccessfulAttempts.Add(1) 105 | } 106 | 107 | // ErrBadRequest is ignored 108 | func (s *Closer) ErrBadRequest(_ context.Context, _ time.Time, _ time.Duration) { 109 | } 110 | 111 | // ErrInterrupt is ignored 112 | func (s *Closer) ErrInterrupt(_ context.Context, _ time.Time, _ time.Duration) { 113 | } 114 | 115 | // ErrConcurrencyLimitReject is ignored 116 | func (s *Closer) ErrConcurrencyLimitReject(_ context.Context, _ time.Time) { 117 | } 118 | 119 | // ErrShortCircuit is ignored 120 | func (s *Closer) ErrShortCircuit(_ context.Context, _ time.Time) { 121 | } 122 | 123 | // ErrFailure resets the consecutive Successful count 124 | func (s *Closer) ErrFailure(_ context.Context, _ time.Time, _ time.Duration) { 125 | s.concurrentSuccessfulAttempts.Set(0) 126 | } 127 | 128 | // ErrTimeout resets the consecutive Successful count 129 | func (s *Closer) ErrTimeout(_ context.Context, _ time.Time, _ time.Duration) { 130 | s.concurrentSuccessfulAttempts.Set(0) 131 | } 132 | 133 | // ShouldClose is true if we have enough successful attempts in a row. 134 | func (s *Closer) ShouldClose(_ context.Context, _ time.Time) bool { 135 | return s.concurrentSuccessfulAttempts.Get() >= s.closeOnCurrentCount.Get() 136 | } 137 | 138 | // Config returns the current configuration. Use SetConfigThreadSafe to modify the current configuration. 139 | func (s *Closer) Config() ConfigureCloser { 140 | s.mu.Lock() 141 | defer s.mu.Unlock() 142 | return s.config 143 | } 144 | 145 | // SetConfigThreadSafe resets the sleep duration during reopen attempts 146 | func (s *Closer) SetConfigThreadSafe(config ConfigureCloser) { 147 | s.mu.Lock() 148 | defer s.mu.Unlock() 149 | s.config = config 150 | s.reopenCircuitCheck.TimeAfterFunc = config.AfterFunc 151 | s.reopenCircuitCheck.SetSleepDuration(config.SleepWindow) 152 | s.reopenCircuitCheck.SetEventCountToAllow(config.HalfOpenAttempts) 153 | s.closeOnCurrentCount.Set(config.RequiredConcurrentSuccessful) 154 | } 155 | 156 | // SetConfigNotThreadSafe just calls SetConfigThreadSafe. It is not safe to call while the circuit is active. 157 | func (s *Closer) SetConfigNotThreadSafe(config ConfigureCloser) { 158 | s.SetConfigThreadSafe(config) 159 | } 160 | -------------------------------------------------------------------------------- /closers/hystrix/closer_test.go: -------------------------------------------------------------------------------- 1 | package hystrix 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "sync" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | func TestCloser_MarshalJSON(t *testing.T) { 12 | c := Closer{ 13 | config: ConfigureCloser{ 14 | HalfOpenAttempts: 12345, 15 | }, 16 | } 17 | asJSON, err := c.MarshalJSON() 18 | if err != nil { 19 | t.Fatal("unexpected error marshalling JSON") 20 | } 21 | if !strings.Contains(string(asJSON), "12345") { 22 | t.Fatal("Expect JSON to contain 12345") 23 | } 24 | } 25 | 26 | func TestCloser_NoPanics(t *testing.T) { 27 | defer func() { 28 | if r := recover(); r != nil { 29 | t.Fatal("Expected no panic") 30 | } 31 | }() 32 | ctx := context.Background() 33 | c := Closer{} 34 | wg := sync.WaitGroup{} 35 | // None of these should panic 36 | for i := 0; i < 10; i++ { 37 | wg.Add(1) 38 | go func() { 39 | defer wg.Done() 40 | c.ErrBadRequest(ctx, time.Now(), time.Second) 41 | c.ErrInterrupt(ctx, time.Now(), time.Second) 42 | c.ErrConcurrencyLimitReject(ctx, time.Now()) 43 | }() 44 | } 45 | wg.Wait() 46 | } 47 | 48 | func assertBool(t *testing.T, b bool, msg string) { 49 | if !b { 50 | t.Fatal(msg) 51 | } 52 | } 53 | 54 | func TestCloser_ConcurrentAttempts(t *testing.T) { 55 | ctx := context.Background() 56 | now := time.Now() 57 | 58 | c := Closer{} 59 | c.SetConfigNotThreadSafe(ConfigureCloser{ 60 | RequiredConcurrentSuccessful: 3, 61 | }) 62 | c.Opened(ctx, now) 63 | assertBool(t, !c.ShouldClose(ctx, now), "Expected the circuit to not yet close") 64 | c.Success(ctx, now, time.Second) 65 | assertBool(t, !c.ShouldClose(ctx, now), "Expected the circuit to not yet close") 66 | c.Success(ctx, now, time.Second) 67 | assertBool(t, !c.ShouldClose(ctx, now), "Expected the circuit to not yet close") 68 | c.Success(ctx, now, time.Second) 69 | assertBool(t, c.ShouldClose(ctx, now), "Expected the circuit to now close") 70 | 71 | // None of these should matter 72 | c.ErrBadRequest(ctx, now, time.Second) 73 | c.ErrInterrupt(ctx, now, time.Second) 74 | c.ErrConcurrencyLimitReject(ctx, now) 75 | assertBool(t, c.ShouldClose(ctx, now), "Expected the circuit to now close") 76 | 77 | c.ErrTimeout(ctx, now, time.Second) 78 | // Should reset closer 79 | assertBool(t, !c.ShouldClose(ctx, now), "Expected the circuit to not yet close") 80 | } 81 | 82 | func TestCloser_AfterFunc(t *testing.T) { 83 | ctx := context.Background() 84 | t.Run("afterfunc is used", func(t *testing.T) { 85 | var invocations int 86 | c := Closer{} 87 | c.SetConfigNotThreadSafe(ConfigureCloser{ 88 | AfterFunc: func(d time.Duration, f func()) *time.Timer { 89 | invocations++ 90 | return time.AfterFunc(d, f) 91 | }, 92 | RequiredConcurrentSuccessful: 3, 93 | }) 94 | 95 | now := time.Now() 96 | c.Opened(ctx, now) 97 | c.Success(ctx, now, time.Second) 98 | c.Success(ctx, now, time.Second) 99 | c.Success(ctx, now, time.Second) 100 | c.Success(ctx, now, time.Second) 101 | 102 | if invocations == 0 { 103 | t.Error("Expected mock AfterFunc to be used") 104 | } 105 | t.Log("invocations: ", invocations) 106 | }) 107 | t.Run("afterfunc is set if previously nil", func(t *testing.T) { 108 | var ( 109 | countD int 110 | c = ConfigureCloser{AfterFunc: nil} 111 | d = ConfigureCloser{AfterFunc: func(d time.Duration, f func()) *time.Timer { 112 | countD++ 113 | return time.AfterFunc(d+1, f) 114 | }} 115 | ) 116 | c.Merge(d) 117 | _ = c.AfterFunc(time.Second, func() {}) 118 | 119 | if countD != 1 { 120 | t.Errorf("expected merge to assign newer AfterFunc") 121 | } 122 | }) 123 | t.Run("afterfunc is not merged if already set", func(t *testing.T) { 124 | var ( 125 | countC, countD int 126 | 127 | c = ConfigureCloser{AfterFunc: func(d time.Duration, f func()) *time.Timer { 128 | countC++ 129 | return time.AfterFunc(d, f) 130 | }} 131 | d = ConfigureCloser{AfterFunc: func(d time.Duration, f func()) *time.Timer { 132 | countD++ 133 | return time.AfterFunc(d+1, f) 134 | }} 135 | ) 136 | c.Merge(d) 137 | _ = c.AfterFunc(time.Second, func() {}) 138 | 139 | if countD > 0 { 140 | t.Errorf("expected merge to maintain an already set AfterFunc") 141 | } 142 | if countC != 1 { 143 | t.Errorf("expected post-merge to invoke initially set AfterFunc") 144 | } 145 | }) 146 | } 147 | -------------------------------------------------------------------------------- /closers/hystrix/config.go: -------------------------------------------------------------------------------- 1 | package hystrix 2 | 3 | import "github.com/cep21/circuit/v4" 4 | 5 | // Factory aids making hystrix circuit logic 6 | type Factory struct { 7 | ConfigureCloser ConfigureCloser 8 | ConfigureOpener ConfigureOpener 9 | CreateConfigureCloser []func(circuitName string) ConfigureCloser 10 | CreateConfigureOpener []func(circuitName string) ConfigureOpener 11 | } 12 | 13 | // Configure creates a circuit configuration constructor that uses hystrix open/close logic 14 | func (c *Factory) Configure(circuitName string) circuit.Config { 15 | return circuit.Config{ 16 | General: circuit.GeneralConfig{ 17 | OpenToClosedFactory: c.createCloser(circuitName), 18 | ClosedToOpenFactory: c.createOpener(circuitName), 19 | }, 20 | } 21 | } 22 | 23 | func (c *Factory) createCloser(circuitName string) func() circuit.OpenToClosed { 24 | finalConfig := ConfigureCloser{} 25 | // Merge in reverse order so the most recently appending constructor is more important 26 | for i := len(c.CreateConfigureCloser) - 1; i >= 0; i-- { 27 | finalConfig.Merge(c.CreateConfigureCloser[i](circuitName)) 28 | } 29 | finalConfig.Merge(c.ConfigureCloser) 30 | return CloserFactory(finalConfig) 31 | } 32 | 33 | func (c *Factory) createOpener(circuitName string) func() circuit.ClosedToOpen { 34 | finalConfig := ConfigureOpener{} 35 | // Merge in reverse order so the most recently appending constructor is more important 36 | for i := len(c.CreateConfigureOpener) - 1; i >= 0; i-- { 37 | finalConfig.Merge(c.CreateConfigureOpener[i](circuitName)) 38 | } 39 | finalConfig.Merge(c.ConfigureOpener) 40 | return OpenerFactory(finalConfig) 41 | } 42 | -------------------------------------------------------------------------------- /closers/hystrix/config_test.go: -------------------------------------------------------------------------------- 1 | package hystrix 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | ) 7 | 8 | func TestFactory(t *testing.T) { 9 | f := Factory{ 10 | CreateConfigureCloser: []func(circuitName string) ConfigureCloser{ 11 | func(_ string) ConfigureCloser { 12 | return ConfigureCloser{ 13 | SleepWindow: time.Second, 14 | } 15 | }, 16 | }, 17 | CreateConfigureOpener: []func(circuitName string) ConfigureOpener{ 18 | func(_ string) ConfigureOpener { 19 | return ConfigureOpener{ 20 | RequestVolumeThreshold: 10, 21 | } 22 | }, 23 | }, 24 | } 25 | cfg := f.Configure("testing") 26 | x := cfg.General.OpenToClosedFactory().(*Closer) 27 | if x.config.SleepWindow != time.Second { 28 | t.Fatal("Expected a second sleep window") 29 | } 30 | 31 | y := cfg.General.ClosedToOpenFactory().(*Opener) 32 | if y.config.RequestVolumeThreshold != 10 { 33 | t.Fatal("Expected 10 request volume threshold") 34 | } 35 | } 36 | 37 | func TestConfigureOpener(t *testing.T) { 38 | now := time.Now() 39 | c := ConfigureOpener{ 40 | RequestVolumeThreshold: 10, 41 | Now: func() time.Time { 42 | return now 43 | }, 44 | } 45 | if !c.now().Equal(now) { 46 | t.Fatal("now not using set Now") 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /closers/hystrix/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package hystrix is a Go implementation of Netflix's Hystrix logic for circuit breakers. It creates openers and closers 3 | for the circuit that behave the same as Netflix's Hystrix Java implementation. The easiest way to learn how to use this 4 | package is to look at the examples, especially ExampleFactory. 5 | */ 6 | package hystrix 7 | -------------------------------------------------------------------------------- /closers/hystrix/example_test.go: -------------------------------------------------------------------------------- 1 | package hystrix_test 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/cep21/circuit/v4" 8 | "github.com/cep21/circuit/v4/closers/hystrix" 9 | ) 10 | 11 | // This example configures the circuit to use Hystrix open/close logic with the default Hystrix parameters 12 | func ExampleFactory() { 13 | configuration := hystrix.Factory{ 14 | // Hystrix open logic is to open the circuit after an % of errors 15 | ConfigureOpener: hystrix.ConfigureOpener{ 16 | // We change the default to wait for 10 requests, not 20, before checking to close 17 | RequestVolumeThreshold: 10, 18 | // The default values match what hystrix does by default 19 | }, 20 | // Hystrix close logic is to sleep then check 21 | ConfigureCloser: hystrix.ConfigureCloser{ 22 | // The default values match what hystrix does by default 23 | }, 24 | } 25 | h := circuit.Manager{ 26 | // Tell the manager to use this configuration factory whenever it makes a new circuit 27 | DefaultCircuitProperties: []circuit.CommandPropertiesConstructor{configuration.Configure}, 28 | } 29 | // This circuit will inherit the configuration from the example 30 | c := h.MustCreateCircuit("hystrix-circuit") 31 | fmt.Println("This is a hystrix configured circuit", c.Name()) 32 | // Output: This is a hystrix configured circuit hystrix-circuit 33 | } 34 | 35 | // Most configuration properties on [the Hystrix Configuration page](https://github.com/Netflix/Hystrix/wiki/Configuration) that say 36 | // they are modifyable at runtime can be changed on the Circuit in a thread safe way. Most of the ones that cannot are 37 | // related to stat collection. 38 | // 39 | // This example shows how to update hystrix configuration at runtime. 40 | func ExampleCloser_SetConfigThreadSafe() { 41 | // Start off using the defaults 42 | configuration := hystrix.Factory{} 43 | h := circuit.Manager{ 44 | // Tell the manager to use this configuration factory whenever it makes a new circuit 45 | DefaultCircuitProperties: []circuit.CommandPropertiesConstructor{configuration.Configure}, 46 | } 47 | c := h.MustCreateCircuit("hystrix-circuit") 48 | fmt.Println("The default sleep window", c.OpenToClose.(*hystrix.Closer).Config().SleepWindow) 49 | // This configuration update function is thread safe. We can modify this at runtime while the circuit is active 50 | c.OpenToClose.(*hystrix.Closer).SetConfigThreadSafe(hystrix.ConfigureCloser{ 51 | SleepWindow: time.Second * 3, 52 | }) 53 | fmt.Println("The new sleep window", c.OpenToClose.(*hystrix.Closer).Config().SleepWindow) 54 | // Output: 55 | // The default sleep window 5s 56 | // The new sleep window 3s 57 | } 58 | -------------------------------------------------------------------------------- /closers/hystrix/opener.go: -------------------------------------------------------------------------------- 1 | package hystrix 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "sync" 7 | "time" 8 | 9 | "github.com/cep21/circuit/v4" 10 | "github.com/cep21/circuit/v4/faststats" 11 | ) 12 | 13 | // Opener is ClosedToOpen that opens a circuit after a threshold and % error has been 14 | // reached. It is the default hystrix implementation. 15 | type Opener struct { 16 | errorsCount faststats.RollingCounter 17 | legitimateAttemptsCount faststats.RollingCounter 18 | 19 | errorPercentage faststats.AtomicInt64 20 | requestVolumeThreshold faststats.AtomicInt64 21 | 22 | mu sync.Mutex 23 | config ConfigureOpener 24 | } 25 | 26 | var _ circuit.ClosedToOpen = &Opener{} 27 | 28 | // OpenerFactory creates a err % opener 29 | func OpenerFactory(config ConfigureOpener) func() circuit.ClosedToOpen { 30 | return func() circuit.ClosedToOpen { 31 | s := Opener{} 32 | config.Merge(defaultConfigureOpener) 33 | s.SetConfigNotThreadSafe(config) 34 | return &s 35 | } 36 | } 37 | 38 | // ConfigureOpener configures Opener 39 | type ConfigureOpener struct { 40 | // ErrorThresholdPercentage is https://github.com/Netflix/Hystrix/wiki/Configuration#circuitbreakererrorthresholdpercentage 41 | ErrorThresholdPercentage int64 42 | // RequestVolumeThreshold is https://github.com/Netflix/Hystrix/wiki/Configuration#circuitbreakerrequestvolumethreshold 43 | RequestVolumeThreshold int64 44 | // Now should simulate time.Now 45 | Now func() time.Time `json:"-"` 46 | // RollingDuration is https://github.com/Netflix/Hystrix/wiki/Configuration#metricsrollingstatstimeinmilliseconds 47 | RollingDuration time.Duration 48 | // NumBuckets is https://github.com/Netflix/Hystrix/wiki/Configuration#metricsrollingstatsnumbuckets 49 | NumBuckets int 50 | } 51 | 52 | func (c *ConfigureOpener) now() time.Time { 53 | if c.Now == nil { 54 | return time.Now() 55 | } 56 | return c.Now() 57 | } 58 | 59 | // Merge this configuration with another 60 | func (c *ConfigureOpener) Merge(other ConfigureOpener) { 61 | if c.ErrorThresholdPercentage == 0 { 62 | c.ErrorThresholdPercentage = other.ErrorThresholdPercentage 63 | } 64 | if c.RequestVolumeThreshold == 0 { 65 | c.RequestVolumeThreshold = other.RequestVolumeThreshold 66 | } 67 | if c.Now == nil { 68 | c.Now = other.Now 69 | } 70 | if c.RollingDuration == 0 { 71 | c.RollingDuration = other.RollingDuration 72 | } 73 | if c.NumBuckets == 0 { 74 | c.NumBuckets = other.NumBuckets 75 | } 76 | } 77 | 78 | var defaultConfigureOpener = ConfigureOpener{ 79 | RequestVolumeThreshold: 20, 80 | ErrorThresholdPercentage: 50, 81 | Now: time.Now, 82 | NumBuckets: 10, 83 | RollingDuration: 10 * time.Second, 84 | } 85 | 86 | // MarshalJSON returns opener information in a JSON format 87 | func (e *Opener) MarshalJSON() ([]byte, error) { 88 | cfg := e.Config() 89 | return json.Marshal(map[string]interface{}{ 90 | "config": cfg, 91 | "attempts": &e.legitimateAttemptsCount, 92 | "errors": &e.errorsCount, 93 | "err_%": e.errPercentage(cfg.now()), 94 | }) 95 | } 96 | 97 | var _ json.Marshaler = &Opener{} 98 | 99 | // Closed resets the error and attempt count 100 | func (e *Opener) Closed(_ context.Context, now time.Time) { 101 | e.errorsCount.Reset(now) 102 | e.legitimateAttemptsCount.Reset(now) 103 | } 104 | 105 | // Opened resets the error and attempt count 106 | func (e *Opener) Opened(_ context.Context, now time.Time) { 107 | e.errorsCount.Reset(now) 108 | e.legitimateAttemptsCount.Reset(now) 109 | } 110 | 111 | // Success increases the number of correct attempts 112 | func (e *Opener) Success(_ context.Context, now time.Time, _ time.Duration) { 113 | e.legitimateAttemptsCount.Inc(now) 114 | } 115 | 116 | // Prevent never returns true 117 | func (e *Opener) Prevent(_ context.Context, _ time.Time) (shouldAllow bool) { 118 | return false 119 | } 120 | 121 | // ErrBadRequest is ignored 122 | func (e *Opener) ErrBadRequest(_ context.Context, _ time.Time, _ time.Duration) {} 123 | 124 | // ErrInterrupt is ignored 125 | func (e *Opener) ErrInterrupt(_ context.Context, _ time.Time, _ time.Duration) {} 126 | 127 | // ErrFailure increases error count for the circuit 128 | func (e *Opener) ErrFailure(_ context.Context, now time.Time, _ time.Duration) { 129 | e.legitimateAttemptsCount.Inc(now) 130 | e.errorsCount.Inc(now) 131 | } 132 | 133 | // ErrTimeout increases error count for the circuit 134 | func (e *Opener) ErrTimeout(_ context.Context, now time.Time, _ time.Duration) { 135 | e.legitimateAttemptsCount.Inc(now) 136 | e.errorsCount.Inc(now) 137 | } 138 | 139 | // ErrConcurrencyLimitReject is ignored 140 | func (e *Opener) ErrConcurrencyLimitReject(_ context.Context, _ time.Time) {} 141 | 142 | // ErrShortCircuit is ignored 143 | func (e *Opener) ErrShortCircuit(_ context.Context, _ time.Time) {} 144 | 145 | // ShouldOpen returns true if rolling count >= threshold and 146 | // error % is high enough. 147 | func (e *Opener) ShouldOpen(_ context.Context, now time.Time) bool { 148 | attemptCount := e.legitimateAttemptsCount.RollingSumAt(now) 149 | if attemptCount == 0 || attemptCount < e.requestVolumeThreshold.Get() { 150 | // not enough requests. Will not open circuit 151 | return false 152 | } 153 | return int64(e.errPercentage(now)*100) >= e.errorPercentage.Get() 154 | } 155 | 156 | func (e *Opener) errPercentage(now time.Time) float64 { 157 | attemptCount := e.legitimateAttemptsCount.RollingSumAt(now) 158 | if attemptCount == 0 { 159 | // not enough requests (can't make a percent of zero) 160 | return -1 161 | } 162 | 163 | errCount := e.errorsCount.RollingSumAt(now) 164 | return float64(errCount) / float64(attemptCount) 165 | } 166 | 167 | // SetConfigThreadSafe modifies error % and request volume threshold 168 | func (e *Opener) SetConfigThreadSafe(props ConfigureOpener) { 169 | e.mu.Lock() 170 | defer e.mu.Unlock() 171 | e.config = props 172 | e.errorPercentage.Set(props.ErrorThresholdPercentage) 173 | e.requestVolumeThreshold.Set(props.RequestVolumeThreshold) 174 | } 175 | 176 | // SetConfigNotThreadSafe recreates the buckets. It is not safe to call while the circuit is active. 177 | func (e *Opener) SetConfigNotThreadSafe(props ConfigureOpener) { 178 | e.SetConfigThreadSafe(props) 179 | now := props.Now() 180 | rollingCounterBucketWidth := time.Duration(props.RollingDuration.Nanoseconds() / int64(props.NumBuckets)) 181 | e.errorsCount = faststats.NewRollingCounter(rollingCounterBucketWidth, props.NumBuckets, now) 182 | e.legitimateAttemptsCount = faststats.NewRollingCounter(rollingCounterBucketWidth, props.NumBuckets, now) 183 | } 184 | 185 | // Config returns the current configuration. To update configuration, please call SetConfigThreadSafe or 186 | // SetConfigNotThreadSafe 187 | func (e *Opener) Config() ConfigureOpener { 188 | e.mu.Lock() 189 | defer e.mu.Unlock() 190 | return e.config 191 | } 192 | -------------------------------------------------------------------------------- /closers/hystrix/opener_test.go: -------------------------------------------------------------------------------- 1 | package hystrix 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestOpener_MarshalJSON(t *testing.T) { 11 | ctx := context.Background() 12 | o := Opener{} 13 | _, err := o.MarshalJSON() 14 | if err != nil { 15 | t.Fatal("expect no error doing initial marshal") 16 | } 17 | // 3 failures should exist in the output 18 | o.ErrFailure(ctx, time.Now(), time.Second) 19 | o.ErrFailure(ctx, time.Now(), time.Second) 20 | o.ErrFailure(ctx, time.Now(), time.Second) 21 | b, err := o.MarshalJSON() 22 | if err != nil { 23 | t.Fatal("expect no error doing marshal") 24 | } 25 | if !strings.Contains(string(b), "3") { 26 | t.Fatal("expect a 3 back") 27 | } 28 | } 29 | 30 | func TestOpener(t *testing.T) { 31 | ctx := context.Background() 32 | o := OpenerFactory(ConfigureOpener{ 33 | RequestVolumeThreshold: 3, 34 | })().(*Opener) 35 | if o.Config().RequestVolumeThreshold != 3 { 36 | t.Fatal("Should start at 3") 37 | } 38 | now := time.Now() 39 | if o.ShouldOpen(ctx, now) { 40 | t.Fatal("Should not start open") 41 | } 42 | o.ErrTimeout(ctx, now, time.Second) 43 | o.ErrFailure(ctx, now, time.Second) 44 | if o.ShouldOpen(ctx, now) { 45 | t.Fatal("Not enough requests to open") 46 | } 47 | // These should be ignored 48 | o.ErrBadRequest(ctx, now, time.Second) 49 | o.ErrInterrupt(ctx, now, time.Second) 50 | o.ErrConcurrencyLimitReject(ctx, now) 51 | if o.ShouldOpen(ctx, now) { 52 | t.Fatal("Not enough requests to open") 53 | } 54 | o.ErrFailure(ctx, now, time.Second) 55 | if !o.ShouldOpen(ctx, now) { 56 | t.Fatal("should now open") 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /closers/simplelogic/closers.go: -------------------------------------------------------------------------------- 1 | package simplelogic 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/cep21/circuit/v4" 8 | "github.com/cep21/circuit/v4/faststats" 9 | ) 10 | 11 | // ConsecutiveErrOpener is simple closed->open logic that opens on consecutive error counts 12 | type ConsecutiveErrOpener struct { 13 | consecutiveCount faststats.AtomicInt64 14 | closeThreshold faststats.AtomicInt64 15 | } 16 | 17 | // ConsecutiveErrOpenerFactory constructs a new ConsecutiveErrOpener 18 | func ConsecutiveErrOpenerFactory(config ConfigConsecutiveErrOpener) func() circuit.ClosedToOpen { 19 | return func() circuit.ClosedToOpen { 20 | ret := &ConsecutiveErrOpener{} 21 | config.Merge(defaultConfigConsecutiveErrOpener) 22 | ret.SetConfigThreadSafe(config) 23 | return ret 24 | } 25 | } 26 | 27 | // ConfigConsecutiveErrOpener configures a ConsecutiveErrOpener 28 | type ConfigConsecutiveErrOpener struct { 29 | ErrorThreshold int64 30 | } 31 | 32 | // Merge this config with another 33 | func (c *ConfigConsecutiveErrOpener) Merge(other ConfigConsecutiveErrOpener) { 34 | if c.ErrorThreshold == 0 { 35 | c.ErrorThreshold = other.ErrorThreshold 36 | } 37 | } 38 | 39 | var defaultConfigConsecutiveErrOpener = ConfigConsecutiveErrOpener{ 40 | ErrorThreshold: 10, 41 | } 42 | 43 | // Closed resets the consecutive error count 44 | func (c *ConsecutiveErrOpener) Closed(_ context.Context, _ time.Time) { 45 | c.consecutiveCount.Set(0) 46 | } 47 | 48 | // Prevent always returns false 49 | func (c *ConsecutiveErrOpener) Prevent(_ context.Context, _ time.Time) bool { 50 | return false 51 | } 52 | 53 | // Success resets the consecutive error count 54 | func (c *ConsecutiveErrOpener) Success(_ context.Context, _ time.Time, _ time.Duration) { 55 | c.consecutiveCount.Set(0) 56 | } 57 | 58 | // ErrBadRequest is ignored 59 | func (c *ConsecutiveErrOpener) ErrBadRequest(_ context.Context, _ time.Time, _ time.Duration) { 60 | } 61 | 62 | // ErrInterrupt is ignored 63 | func (c *ConsecutiveErrOpener) ErrInterrupt(_ context.Context, _ time.Time, _ time.Duration) { 64 | } 65 | 66 | // ErrConcurrencyLimitReject is ignored 67 | func (c *ConsecutiveErrOpener) ErrConcurrencyLimitReject(_ context.Context, _ time.Time) {} 68 | 69 | // ErrShortCircuit is ignored 70 | func (c *ConsecutiveErrOpener) ErrShortCircuit(_ context.Context, _ time.Time) {} 71 | 72 | // ErrFailure increments the consecutive error counter 73 | func (c *ConsecutiveErrOpener) ErrFailure(_ context.Context, _ time.Time, _ time.Duration) { 74 | c.consecutiveCount.Add(1) 75 | } 76 | 77 | // ErrTimeout increments the consecutive error counter 78 | func (c *ConsecutiveErrOpener) ErrTimeout(_ context.Context, _ time.Time, _ time.Duration) { 79 | c.consecutiveCount.Add(1) 80 | } 81 | 82 | // Opened resets the error counter 83 | func (c *ConsecutiveErrOpener) Opened(_ context.Context, _ time.Time) { 84 | c.consecutiveCount.Set(0) 85 | } 86 | 87 | // ShouldOpen returns true if enough consecutive errors have returned 88 | func (c *ConsecutiveErrOpener) ShouldOpen(_ context.Context, _ time.Time) bool { 89 | return c.consecutiveCount.Get() >= c.closeThreshold.Get() 90 | } 91 | 92 | // SetConfigThreadSafe updates the error threshold 93 | func (c *ConsecutiveErrOpener) SetConfigThreadSafe(props ConfigConsecutiveErrOpener) { 94 | c.closeThreshold.Set(props.ErrorThreshold) 95 | } 96 | 97 | // SetConfigNotThreadSafe updates the error threshold 98 | func (c *ConsecutiveErrOpener) SetConfigNotThreadSafe(props ConfigConsecutiveErrOpener) { 99 | c.SetConfigThreadSafe(props) 100 | } 101 | 102 | var _ circuit.ClosedToOpen = &ConsecutiveErrOpener{} 103 | -------------------------------------------------------------------------------- /closers/simplelogic/closers_test.go: -------------------------------------------------------------------------------- 1 | package simplelogic 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/cep21/circuit/v4" 10 | ) 11 | 12 | func TestConsecutiveErrOpenerFactory(t *testing.T) { 13 | f := ConsecutiveErrOpenerFactory(ConfigConsecutiveErrOpener{ 14 | ErrorThreshold: 5, 15 | }) 16 | opener := f().(*ConsecutiveErrOpener) 17 | if opener.closeThreshold.Get() != 5 { 18 | t.Errorf("Expected threshold to be 5, got %d", opener.closeThreshold.Get()) 19 | } 20 | 21 | // Test default config 22 | f = ConsecutiveErrOpenerFactory(ConfigConsecutiveErrOpener{}) 23 | opener = f().(*ConsecutiveErrOpener) 24 | if opener.closeThreshold.Get() != 10 { 25 | t.Errorf("Expected default threshold to be 10, got %d", opener.closeThreshold.Get()) 26 | } 27 | } 28 | 29 | func TestConsecutiveErrOpener_Merge(t *testing.T) { 30 | c := &ConfigConsecutiveErrOpener{} 31 | c.Merge(ConfigConsecutiveErrOpener{ 32 | ErrorThreshold: 15, 33 | }) 34 | if c.ErrorThreshold != 15 { 35 | t.Errorf("Expected threshold to be 15, got %d", c.ErrorThreshold) 36 | } 37 | 38 | // Don't override if already set 39 | c = &ConfigConsecutiveErrOpener{ 40 | ErrorThreshold: 5, 41 | } 42 | c.Merge(ConfigConsecutiveErrOpener{ 43 | ErrorThreshold: 15, 44 | }) 45 | if c.ErrorThreshold != 5 { 46 | t.Errorf("Expected threshold to remain 5, got %d", c.ErrorThreshold) 47 | } 48 | } 49 | 50 | func TestConsecutiveErrOpener_ShouldOpen(t *testing.T) { 51 | ctx := context.Background() 52 | now := time.Now() 53 | 54 | opener := &ConsecutiveErrOpener{} 55 | opener.SetConfigThreadSafe(ConfigConsecutiveErrOpener{ 56 | ErrorThreshold: 3, 57 | }) 58 | 59 | // Initially should not open 60 | if opener.ShouldOpen(ctx, now) { 61 | t.Error("Circuit should not open initially") 62 | } 63 | 64 | // Add errors and check when it should open 65 | opener.ErrFailure(ctx, now, time.Second) 66 | if opener.ShouldOpen(ctx, now) { 67 | t.Error("Circuit should not open after 1 error") 68 | } 69 | 70 | opener.ErrTimeout(ctx, now, time.Second) 71 | if opener.ShouldOpen(ctx, now) { 72 | t.Error("Circuit should not open after 2 errors") 73 | } 74 | 75 | opener.ErrFailure(ctx, now, time.Second) 76 | if !opener.ShouldOpen(ctx, now) { 77 | t.Error("Circuit should open after 3 errors") 78 | } 79 | 80 | // Reset on success 81 | opener.Success(ctx, now, time.Second) 82 | if opener.ShouldOpen(ctx, now) { 83 | t.Error("Circuit should not open after success resets counter") 84 | } 85 | 86 | // Reset when closed 87 | opener.ErrFailure(ctx, now, time.Second) 88 | opener.ErrFailure(ctx, now, time.Second) 89 | opener.Closed(ctx, now) 90 | if opener.ShouldOpen(ctx, now) { 91 | t.Error("Circuit should not open after closed resets counter") 92 | } 93 | 94 | // Reset when opened 95 | opener.ErrFailure(ctx, now, time.Second) 96 | opener.ErrFailure(ctx, now, time.Second) 97 | opener.ErrFailure(ctx, now, time.Second) 98 | if !opener.ShouldOpen(ctx, now) { 99 | t.Error("Circuit should open after 3 errors") 100 | } 101 | opener.Opened(ctx, now) 102 | if opener.ShouldOpen(ctx, now) { 103 | t.Error("Circuit should not open after opened resets counter") 104 | } 105 | } 106 | 107 | func TestConsecutiveErrOpener_Config(t *testing.T) { 108 | opener := &ConsecutiveErrOpener{} 109 | 110 | // Test thread-safe config 111 | opener.SetConfigThreadSafe(ConfigConsecutiveErrOpener{ 112 | ErrorThreshold: 7, 113 | }) 114 | if opener.closeThreshold.Get() != 7 { 115 | t.Errorf("Expected threshold to be 7, got %d", opener.closeThreshold.Get()) 116 | } 117 | 118 | // Test non-thread-safe config 119 | opener.SetConfigNotThreadSafe(ConfigConsecutiveErrOpener{ 120 | ErrorThreshold: 9, 121 | }) 122 | if opener.closeThreshold.Get() != 9 { 123 | t.Errorf("Expected threshold to be 9, got %d", opener.closeThreshold.Get()) 124 | } 125 | } 126 | 127 | func TestConsecutiveErrOpener_OtherMethods(t *testing.T) { 128 | ctx := context.Background() 129 | now := time.Now() 130 | opener := &ConsecutiveErrOpener{} 131 | 132 | // Test methods that don't affect counts 133 | if opener.Prevent(ctx, now) { 134 | t.Error("Prevent should always return false") 135 | } 136 | 137 | // These should not affect the counter 138 | opener.ErrBadRequest(ctx, now, time.Second) 139 | opener.ErrInterrupt(ctx, now, time.Second) 140 | opener.ErrConcurrencyLimitReject(ctx, now) 141 | opener.ErrShortCircuit(ctx, now) 142 | 143 | // These should not increment the counter 144 | if opener.consecutiveCount.Get() != 0 { 145 | t.Error("Methods should not have incremented error counter") 146 | } 147 | } 148 | 149 | func TestConsecutiveErrOpener_CircuitIntegration(t *testing.T) { 150 | configuration := circuit.Config{ 151 | General: circuit.GeneralConfig{ 152 | OpenToClosedFactory: nil, // Use default 153 | ClosedToOpenFactory: ConsecutiveErrOpenerFactory(ConfigConsecutiveErrOpener{ 154 | ErrorThreshold: 2, 155 | }), 156 | }, 157 | } 158 | 159 | h := circuit.Manager{} 160 | c := h.MustCreateCircuit("SimpleLogic", configuration) 161 | 162 | // Circuit should start closed 163 | if c.IsOpen() { 164 | t.Error("Circuit should start in a closed state") 165 | } 166 | 167 | // Bad requests shouldn't count towards failures 168 | err := c.Execute(context.Background(), func(_ context.Context) error { 169 | return circuit.SimpleBadRequest{} // Bad requests don't count 170 | }, nil) 171 | if err == nil { 172 | t.Error("Expected an error from bad request") 173 | } 174 | if c.IsOpen() { 175 | t.Error("Circuit should remain closed after a bad request") 176 | } 177 | 178 | // Two failures should open the circuit 179 | for i := 0; i < 2; i++ { 180 | err = c.Execute(context.Background(), func(_ context.Context) error { 181 | return fmt.Errorf("failure") 182 | }, nil) 183 | if err == nil { 184 | t.Error("Expected an error from failure") 185 | } 186 | } 187 | 188 | // Circuit should now be open 189 | if !c.IsOpen() { 190 | t.Error("Circuit should be open after threshold failures") 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /closers/simplelogic/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package simplelogic is a holding place for close and open circuit logic that is otherwise simple in use or complexity. 3 | */ 4 | package simplelogic 5 | -------------------------------------------------------------------------------- /closers_test.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestNeverOpen(t *testing.T) { 12 | ctx := context.Background() 13 | c := neverOpensFactory() 14 | require.False(t, c.ShouldOpen(ctx, time.Now())) 15 | require.False(t, c.Prevent(ctx, time.Now())) 16 | } 17 | 18 | func TestNeverClose(t *testing.T) { 19 | ctx := context.Background() 20 | c := neverClosesFactory() 21 | require.False(t, c.Allow(ctx, time.Now())) 22 | require.False(t, c.ShouldClose(ctx, time.Now())) 23 | } 24 | -------------------------------------------------------------------------------- /config.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/cep21/circuit/v4/faststats" 7 | ) 8 | 9 | // Config controls how a circuit operates 10 | type Config struct { 11 | General GeneralConfig 12 | Execution ExecutionConfig 13 | Fallback FallbackConfig 14 | Metrics MetricsCollectors 15 | } 16 | 17 | // GeneralConfig controls the general logic of the circuit. Things specific to metrics, execution, or fallback are 18 | // in their own configs 19 | type GeneralConfig struct { 20 | // if disabled, Execute functions pass to just calling runFunc and do no tracking or fallbacks 21 | // Note: Java Manager calls this "Enabled". I call it "Disabled" so the zero struct can fill defaults 22 | Disabled bool `json:",omitempty"` 23 | // ForceOpen is https://github.com/Netflix/Hystrix/wiki/Configuration#circuitbreakerforceopen 24 | ForceOpen bool `json:",omitempty"` 25 | // ForcedClosed is https://github.com/Netflix/Hystrix/wiki/Configuration#circuitbreakerforceclosed 26 | ForcedClosed bool `json:",omitempty"` 27 | // GoLostErrors can receive errors that would otherwise be lost by `Go` executions. For example, if Go returns 28 | // early but some long time later an error or panic eventually happens. 29 | GoLostErrors func(err error, panics interface{}) `json:"-"` 30 | // ClosedToOpenFactory creates logic that determines if the circuit should go from Closed to Open state. 31 | // By default, it never opens 32 | ClosedToOpenFactory func() ClosedToOpen `json:"-"` 33 | // OpenToClosedFactory creates logic that determines if the circuit should go from Open to Closed state. 34 | // By default, it never closes 35 | OpenToClosedFactory func() OpenToClosed `json:"-"` 36 | // CustomConfig is anything you want. 37 | CustomConfig map[interface{}]interface{} `json:"-"` 38 | // TimeKeeper returns the current way to keep time. You only want to modify this for testing. 39 | TimeKeeper TimeKeeper `json:"-"` 40 | } 41 | 42 | // ExecutionConfig is https://github.com/Netflix/Hystrix/wiki/Configuration#execution 43 | type ExecutionConfig struct { 44 | // ExecutionTimeout is https://github.com/Netflix/Hystrix/wiki/Configuration#execution.isolation.thread.timeoutInMilliseconds 45 | Timeout time.Duration 46 | // MaxConcurrentRequests is https://github.com/Netflix/Hystrix/wiki/Configuration#executionisolationsemaphoremaxconcurrentrequests 47 | MaxConcurrentRequests int64 48 | // Normally if the parent context is canceled before a timeout is reached, we don't consider the circuit 49 | // unhealthy. Set this to true to consider those circuits unhealthy. 50 | IgnoreInterrupts bool `json:",omitempty"` 51 | // IsErrInterrupt should return true if the error from the original context should be considered an interrupt error. 52 | // The error passed in will be a non-nil error returned by calling `Err()` on the context passed into Run. 53 | // The default behavior is to consider all errors from the original context interrupt caused errors. 54 | // Default behaviour: 55 | // IsErrInterrupt: function(e err) bool { return true } 56 | IsErrInterrupt func(originalContextError error) bool `json:"-"` 57 | } 58 | 59 | // FallbackConfig is https://github.com/Netflix/Hystrix/wiki/Configuration#fallback 60 | type FallbackConfig struct { 61 | // Enabled is opposite of https://github.com/Netflix/Hystrix/wiki/Configuration#circuitbreakerenabled 62 | // Note: Java Manager calls this "Enabled". I call it "Disabled" so the zero struct can fill defaults 63 | Disabled bool `json:",omitempty"` 64 | // MaxConcurrentRequests is https://github.com/Netflix/Hystrix/wiki/Configuration#fallback.isolation.semaphore.maxConcurrentRequests 65 | MaxConcurrentRequests int64 66 | } 67 | 68 | // MetricsCollectors can receive metrics during a circuit. They should be fast, as they will 69 | // block circuit operation during function calls. 70 | type MetricsCollectors struct { 71 | Run []RunMetrics `json:"-"` 72 | Fallback []FallbackMetrics `json:"-"` 73 | Circuit []Metrics `json:"-"` 74 | } 75 | 76 | // TimeKeeper allows overriding time to test the circuit 77 | type TimeKeeper struct { 78 | // Now should simulate time.Now 79 | Now func() time.Time 80 | // AfterFunc should simulate time.AfterFunc 81 | AfterFunc func(time.Duration, func()) *time.Timer 82 | } 83 | 84 | // Configurable is anything that can receive configuration changes while live 85 | type Configurable interface { 86 | // SetConfigThreadSafe can be called while the circuit is currently being used and will modify things that are 87 | // safe to change live. 88 | SetConfigThreadSafe(props Config) 89 | // SetConfigNotThreadSafe should only be called when the circuit is not in use: otherwise it will fail -race 90 | // detection 91 | SetConfigNotThreadSafe(props Config) 92 | } 93 | 94 | func (t *TimeKeeper) merge(other TimeKeeper) { 95 | if t.Now == nil { 96 | t.Now = other.Now 97 | } 98 | if t.AfterFunc == nil { 99 | t.AfterFunc = other.AfterFunc 100 | } 101 | } 102 | 103 | func (c *ExecutionConfig) merge(other ExecutionConfig) { 104 | if !c.IgnoreInterrupts { 105 | c.IgnoreInterrupts = other.IgnoreInterrupts 106 | } 107 | if c.IsErrInterrupt == nil { 108 | c.IsErrInterrupt = other.IsErrInterrupt 109 | } 110 | if c.MaxConcurrentRequests == 0 { 111 | c.MaxConcurrentRequests = other.MaxConcurrentRequests 112 | } 113 | if c.Timeout == 0 { 114 | c.Timeout = other.Timeout 115 | } 116 | } 117 | 118 | func (c *FallbackConfig) merge(other FallbackConfig) { 119 | if c.MaxConcurrentRequests == 0 { 120 | c.MaxConcurrentRequests = other.MaxConcurrentRequests 121 | } 122 | if !c.Disabled { 123 | c.Disabled = other.Disabled 124 | } 125 | } 126 | 127 | func (g *GeneralConfig) mergeCustomConfig(other GeneralConfig) { 128 | if len(other.CustomConfig) != 0 { 129 | if g.CustomConfig == nil { 130 | g.CustomConfig = make(map[interface{}]interface{}, len(other.CustomConfig)) 131 | } 132 | for k, v := range other.CustomConfig { 133 | if _, exists := g.CustomConfig[k]; !exists { 134 | g.CustomConfig[k] = v 135 | } 136 | } 137 | } 138 | } 139 | 140 | func (g *GeneralConfig) merge(other GeneralConfig) { 141 | if g.ClosedToOpenFactory == nil { 142 | g.ClosedToOpenFactory = other.ClosedToOpenFactory 143 | } 144 | if g.OpenToClosedFactory == nil { 145 | g.OpenToClosedFactory = other.OpenToClosedFactory 146 | } 147 | g.mergeCustomConfig(other) 148 | 149 | if !g.ForceOpen { 150 | g.ForceOpen = other.ForceOpen 151 | } 152 | 153 | if !g.ForcedClosed { 154 | g.ForcedClosed = other.ForcedClosed 155 | } 156 | 157 | if !g.Disabled { 158 | g.Disabled = other.Disabled 159 | } 160 | 161 | if g.GoLostErrors == nil { 162 | g.GoLostErrors = other.GoLostErrors 163 | } 164 | g.TimeKeeper.merge(other.TimeKeeper) 165 | } 166 | 167 | func (m *MetricsCollectors) merge(other MetricsCollectors) { 168 | m.Run = append(m.Run, other.Run...) 169 | m.Fallback = append(m.Fallback, other.Fallback...) 170 | m.Circuit = append(m.Circuit, other.Circuit...) 171 | } 172 | 173 | // Merge these properties with another command's properties. Anything set to the zero value, will takes values from 174 | // other. 175 | func (c *Config) Merge(other Config) *Config { 176 | c.Execution.merge(other.Execution) 177 | c.Fallback.merge(other.Fallback) 178 | c.Metrics.merge(other.Metrics) 179 | c.General.merge(other.General) 180 | return c 181 | } 182 | 183 | // atomicCircuitConfig is used during circuit operations and allows atomic read/write operations. This lets users 184 | // change config at runtime without requiring locks on common operations 185 | type atomicCircuitConfig struct { 186 | Execution struct { 187 | ExecutionTimeout faststats.AtomicInt64 188 | MaxConcurrentRequests faststats.AtomicInt64 189 | } 190 | Fallback struct { 191 | Disabled faststats.AtomicBoolean 192 | MaxConcurrentRequests faststats.AtomicInt64 193 | } 194 | CircuitBreaker struct { 195 | ForceOpen faststats.AtomicBoolean 196 | ForcedClosed faststats.AtomicBoolean 197 | Disabled faststats.AtomicBoolean 198 | } 199 | GoSpecific struct { 200 | IgnoreInterrupts faststats.AtomicBoolean 201 | } 202 | } 203 | 204 | func (a *atomicCircuitConfig) reset(config Config) { 205 | a.CircuitBreaker.ForcedClosed.Set(config.General.ForcedClosed) 206 | a.CircuitBreaker.ForceOpen.Set(config.General.ForceOpen) 207 | a.CircuitBreaker.Disabled.Set(config.General.Disabled) 208 | 209 | a.Execution.ExecutionTimeout.Set(config.Execution.Timeout.Nanoseconds()) 210 | a.Execution.MaxConcurrentRequests.Set(config.Execution.MaxConcurrentRequests) 211 | 212 | a.GoSpecific.IgnoreInterrupts.Set(config.Execution.IgnoreInterrupts) 213 | 214 | a.Fallback.Disabled.Set(config.Fallback.Disabled) 215 | a.Fallback.MaxConcurrentRequests.Set(config.Fallback.MaxConcurrentRequests) 216 | } 217 | 218 | var defaultExecutionConfig = ExecutionConfig{ 219 | Timeout: time.Second, 220 | MaxConcurrentRequests: 10, 221 | } 222 | 223 | var defaultFallbackConfig = FallbackConfig{ 224 | MaxConcurrentRequests: 10, 225 | } 226 | 227 | var defaultGoSpecificConfig = GeneralConfig{ 228 | ClosedToOpenFactory: neverOpensFactory, 229 | OpenToClosedFactory: neverClosesFactory, 230 | TimeKeeper: TimeKeeper{ 231 | Now: time.Now, 232 | AfterFunc: time.AfterFunc, 233 | }, 234 | } 235 | 236 | var defaultCommandProperties = Config{ 237 | Execution: defaultExecutionConfig, 238 | Fallback: defaultFallbackConfig, 239 | General: defaultGoSpecificConfig, 240 | } 241 | -------------------------------------------------------------------------------- /config_test.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestGeneralConfig_Merge(t *testing.T) { 10 | 11 | t.Run("respect Disabled field of args cfg", func(t *testing.T) { 12 | cfg := GeneralConfig{} 13 | 14 | cfg.merge(GeneralConfig{Disabled: true}) 15 | 16 | assert.True(t, cfg.Disabled, "expect to be true") 17 | }) 18 | 19 | t.Run("respect Disabled field of receiver cfg", func(t *testing.T) { 20 | cfg := GeneralConfig{Disabled: true} 21 | 22 | cfg.merge(GeneralConfig{Disabled: false}) 23 | 24 | assert.True(t, cfg.Disabled, "expect to be true") 25 | }) 26 | 27 | t.Run("respect ForceOpen field of args cfg", func(t *testing.T) { 28 | cfg := GeneralConfig{} 29 | 30 | cfg.merge(GeneralConfig{ForceOpen: true}) 31 | 32 | assert.True(t, cfg.ForceOpen, "expect to be true") 33 | }) 34 | 35 | t.Run("respect ForceOpen field of receiver cfg", func(t *testing.T) { 36 | cfg := GeneralConfig{ForceOpen: true} 37 | 38 | cfg.merge(GeneralConfig{ForceOpen: false}) 39 | 40 | assert.True(t, cfg.ForceOpen, "expect to be true") 41 | }) 42 | 43 | t.Run("respect ForceClosed field of args cfg", func(t *testing.T) { 44 | cfg := GeneralConfig{} 45 | 46 | cfg.merge(GeneralConfig{ForcedClosed: true}) 47 | 48 | assert.True(t, cfg.ForcedClosed, "expect to be true") 49 | }) 50 | 51 | t.Run("respect ForceClosed field of receiver cfg", func(t *testing.T) { 52 | cfg := GeneralConfig{ForcedClosed: true} 53 | 54 | cfg.merge(GeneralConfig{ForceOpen: false}) 55 | 56 | assert.True(t, cfg.ForcedClosed, "expect to be true") 57 | }) 58 | 59 | } 60 | 61 | func TestExecutionConfig_Merge(t *testing.T) { 62 | 63 | t.Run("isErrInterrupt check function", func(t *testing.T) { 64 | cfg := ExecutionConfig{} 65 | 66 | cfg.merge(ExecutionConfig{IsErrInterrupt: func(e error) bool { return e != nil }}) 67 | 68 | assert.NotNil(t, cfg.IsErrInterrupt) 69 | }) 70 | 71 | t.Run("ignore isErrInterrupt if previously set", func(t *testing.T) { 72 | fn1 := func(err error) bool { return true } 73 | fn2 := func(err error) bool { return false } 74 | 75 | cfg := ExecutionConfig{ 76 | IsErrInterrupt: fn1, 77 | } 78 | 79 | cfg.merge(ExecutionConfig{IsErrInterrupt: fn2}) 80 | 81 | assert.NotNil(t, fn1, cfg.IsErrInterrupt) 82 | assert.True(t, cfg.IsErrInterrupt(nil)) 83 | }) 84 | } 85 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package circuit is a Go implementation of the circuit breaker pattern. Most documentation is available on 3 | the GitHub README page https://github.com/cep21/circuit/blob/master/README.md 4 | 5 | # Use case 6 | 7 | Netflix describes most use cases on their wiki for Hystrix at https://github.com/Netflix/Hystrix/wiki. Quoting the wiki: 8 | 9 | Give protection from and control over latency and failure from dependencies accessed (typically over the network) via third-party client libraries. 10 | Stop cascading failures in a complex distributed system. 11 | Fail fast and rapidly recover. 12 | Fallback and gracefully degrade when possible. 13 | Enable near real-time monitoring, alerting, and operational control. 14 | 15 | It is a great library for microservice applications that require a large number of calls to many, small services where 16 | any one of these calls could fail, or any of these services could be down or degraded. 17 | 18 | # Getting started 19 | 20 | The godoc contains many examples. Look at them for a good start on how to get started integrated and using the 21 | Hystrix library for Go. 22 | 23 | # Circuit Flowchart 24 | 25 | A circuits start Closed. The default logic is to open a circuit if more than 20 requests have come in during a 10-second 26 | window, and over 50% of requests during that 10-second window are failing. 27 | 28 | Once failed, the circuit waits 10 seconds before allowing a single request. If that request succeeds, then the circuit 29 | closes. If it fails, then the circuit waits another 10 seconds before allowing another request (and so on). 30 | 31 | Almost every part of this flow can be configured. See the CommandProperties struct for information. 32 | 33 | # Metric tracking 34 | 35 | All circuits record circuit stats that you can fetch out of the Circuit at any time. In addition, you can also inject 36 | your own circuit stat trackers by modifying the MetricsCollectors structure. 37 | */ 38 | package circuit 39 | -------------------------------------------------------------------------------- /errors.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | ) 7 | 8 | var errThrottledConcurrentCommands = &circuitError{concurrencyLimitReached: true, msg: "throttling connections to command"} 9 | var errCircuitOpen = &circuitError{circuitOpen: true, msg: "circuit is open"} 10 | 11 | // circuitError is used for internally generated errors 12 | type circuitError struct { 13 | concurrencyLimitReached bool 14 | circuitOpen bool 15 | msg string 16 | } 17 | 18 | var _ Error = &circuitError{} 19 | 20 | // Error is the type of error returned by internal errors using the circuit library. 21 | type Error interface { 22 | error 23 | // ConcurrencyLimitReached returns true if this error is because the concurrency limit has been reached. 24 | ConcurrencyLimitReached() bool 25 | // CircuitOpen returns true if this error is because the circuit is open. 26 | CircuitOpen() bool 27 | } 28 | 29 | func (m *circuitError) Error() string { 30 | return fmt.Sprintf("%s: concurrencyReached=%t circuitOpen=%t", m.msg, m.ConcurrencyLimitReached(), m.CircuitOpen()) 31 | } 32 | 33 | func (m *circuitError) ConcurrencyLimitReached() bool { 34 | return m.concurrencyLimitReached 35 | } 36 | 37 | func (m *circuitError) CircuitOpen() bool { 38 | return m.circuitOpen 39 | } 40 | 41 | // BadRequest is implemented by an error returned by runFunc if you want to consider the requestor bad, not the circuit 42 | // bad. See http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/exception/HystrixBadRequestException.html 43 | // and https://github.com/Netflix/Hystrix/wiki/How-To-Use#error-propagation for information. 44 | type BadRequest interface { 45 | BadRequest() bool 46 | } 47 | 48 | // IsBadRequest returns true if the error is of type BadRequest 49 | func IsBadRequest(err error) bool { 50 | if err == nil { 51 | return false 52 | } 53 | var br BadRequest 54 | return errors.As(err, &br) && br.BadRequest() 55 | } 56 | 57 | // SimpleBadRequest is a simple wrapper for an error to mark it as a bad request 58 | type SimpleBadRequest struct { 59 | Err error 60 | } 61 | 62 | // Cause returns the wrapped error 63 | func (s SimpleBadRequest) Cause() error { 64 | return s.Err 65 | } 66 | 67 | // Cause returns the wrapped error 68 | func (s SimpleBadRequest) Error() string { 69 | return s.Err.Error() 70 | } 71 | 72 | // BadRequest always returns true 73 | func (s SimpleBadRequest) BadRequest() bool { 74 | return true 75 | } 76 | 77 | var _ error = &SimpleBadRequest{} 78 | var _ BadRequest = &SimpleBadRequest{} 79 | 80 | var _ error = &circuitError{} 81 | -------------------------------------------------------------------------------- /errors_test.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestIsBadRequest(t *testing.T) { 12 | require.False(t, IsBadRequest(nil)) 13 | require.False(t, IsBadRequest(errors.New("not bad"))) 14 | require.False(t, IsBadRequest(errThrottledConcurrentCommands)) 15 | require.False(t, IsBadRequest(errCircuitOpen)) 16 | require.False(t, IsBadRequest(&circuitError{})) 17 | require.True(t, IsBadRequest(&SimpleBadRequest{})) 18 | wrappedErr := fmt.Errorf("wrapped: %w", &SimpleBadRequest{}) 19 | require.True(t, IsBadRequest(wrappedErr)) 20 | require.False(t, IsBadRequest(fmt.Errorf("wrapped: %w", errors.New("not bad")))) 21 | } 22 | -------------------------------------------------------------------------------- /example/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Run this simple Go program to see what circuits look like. You can explore their hystrix stream and expvar values. 3 | */ 4 | package main 5 | 6 | import ( 7 | "context" 8 | "errors" 9 | "expvar" 10 | "flag" 11 | "log" 12 | "math/rand" 13 | "net" 14 | "net/http" 15 | "sync/atomic" 16 | "time" 17 | 18 | "github.com/cep21/circuit/v4" 19 | "github.com/cep21/circuit/v4/closers/hystrix" 20 | "github.com/cep21/circuit/v4/metriceventstream" 21 | "github.com/cep21/circuit/v4/metrics/rolling" 22 | ) 23 | 24 | // nolint:lll 25 | const exampleURL = "http://localhost:7979/hystrix-dashboard/monitor/monitor.html?streams=%5B%7B%22name%22%3A%22%22%2C%22stream%22%3A%22http%3A%2F%2Flocalhost%3A8123%2Fhystrix.stream%22%2C%22auth%22%3A%22%22%2C%22delay%22%3A%22%22%7D%5D" 26 | 27 | func main() { 28 | f := rolling.StatFactory{} 29 | h := circuit.Manager{ 30 | DefaultCircuitProperties: []circuit.CommandPropertiesConstructor{f.CreateConfig}, 31 | } 32 | expvar.Publish("hystrix", h.Var()) 33 | es := metriceventstream.MetricEventStream{ 34 | Manager: &h, 35 | } 36 | go func() { 37 | log.Fatal(es.Start()) 38 | }() 39 | interval := flag.Duration("interval", time.Millisecond*100, "Setup duration between metric ticks") 40 | flag.Parse() 41 | createBackgroundCircuits(&h, *interval) 42 | http.Handle("/hystrix.stream", &es) 43 | sock, err := net.Listen("tcp", "127.0.0.1:8123") 44 | if err != nil { 45 | panic(err) 46 | } 47 | log.Println("Serving on socket :8123") 48 | log.Println("To view the stream, execute: ") 49 | log.Println(" curl http://127.0.0.1:8123/hystrix.stream") 50 | log.Println() 51 | log.Println("To view expvar metrics, visit expvar in your browser") 52 | log.Println(" http://127.0.0.1:8123/debug/vars") 53 | log.Println() 54 | log.Println("To view a dashboard, follow the instructions at https://github.com/Netflix/Manager/wiki/Dashboard#run-via-gradle") 55 | log.Println(" git clone git@github.com:Netflix/Manager.git") 56 | log.Println(" cd Manager/hystrix-dashboard") 57 | log.Println(" ../gradlew jettyRun") 58 | log.Println() 59 | log.Println("Then, add the stream http://127.0.0.1:8123/hystrix.stream") 60 | log.Println() 61 | log.Println("A URL directly to the page usually looks something like this") 62 | log.Printf(" %s\n", exampleURL) 63 | log.Fatal(http.Serve(sock, nil)) 64 | } 65 | 66 | func mustFail(err error) { 67 | if err == nil { 68 | panic("Expected a failure") 69 | } 70 | } 71 | 72 | func mustPass(err error) { 73 | if err != nil { 74 | panic(err) 75 | } 76 | } 77 | 78 | func setupAlwaysFails(h *circuit.Manager, tickInterval time.Duration) { 79 | failureCircuit := h.MustCreateCircuit("always-fails", circuit.Config{ 80 | General: circuit.GeneralConfig{ 81 | OpenToClosedFactory: hystrix.CloserFactory(hystrix.ConfigureCloser{}), 82 | ClosedToOpenFactory: hystrix.OpenerFactory(hystrix.ConfigureOpener{}), 83 | }, 84 | }) 85 | go func() { 86 | for range time.Tick(tickInterval) { 87 | mustFail(failureCircuit.Execute(context.Background(), func(ctx context.Context) error { 88 | return errors.New("a failure") 89 | }, nil)) 90 | } 91 | }() 92 | } 93 | 94 | func setupBadRequest(h *circuit.Manager, tickInterval time.Duration) { 95 | failingBadRequest := h.MustCreateCircuit("always-fails-bad-request", circuit.Config{ 96 | General: circuit.GeneralConfig{ 97 | OpenToClosedFactory: hystrix.CloserFactory(hystrix.ConfigureCloser{}), 98 | ClosedToOpenFactory: hystrix.OpenerFactory(hystrix.ConfigureOpener{}), 99 | }, 100 | }) 101 | go func() { 102 | for range time.Tick(tickInterval) { 103 | mustFail(failingBadRequest.Execute(context.Background(), func(ctx context.Context) error { 104 | return circuit.SimpleBadRequest{Err: errors.New("bad user input")} 105 | }, nil)) 106 | } 107 | }() 108 | } 109 | 110 | func setupFailsOriginalContext(h *circuit.Manager, tickInterval time.Duration) { 111 | failingOriginalContextCanceled := h.MustCreateCircuit("always-fails-original-context", circuit.Config{ 112 | General: circuit.GeneralConfig{ 113 | OpenToClosedFactory: hystrix.CloserFactory(hystrix.ConfigureCloser{}), 114 | ClosedToOpenFactory: hystrix.OpenerFactory(hystrix.ConfigureOpener{}), 115 | }, 116 | }) 117 | go func() { 118 | for range time.Tick(tickInterval) { 119 | endedContext, cancel := context.WithCancel(context.Background()) 120 | cancel() 121 | mustFail(failingOriginalContextCanceled.Execute(endedContext, func(ctx context.Context) error { 122 | return errors.New("a failure, but it's not my fault") 123 | }, nil)) 124 | } 125 | }() 126 | } 127 | 128 | func setupAlwaysPasses(h *circuit.Manager, tickInterval time.Duration) { 129 | passingCircuit := h.MustCreateCircuit("always-passes", circuit.Config{}) 130 | go func() { 131 | for range time.Tick(tickInterval) { 132 | mustPass(passingCircuit.Execute(context.Background(), func(ctx context.Context) error { 133 | return nil 134 | }, nil)) 135 | } 136 | }() 137 | } 138 | 139 | func setupTimesOut(h *circuit.Manager, tickInterval time.Duration) { 140 | timeOutCircuit := h.MustCreateCircuit("always-times-out", circuit.Config{ 141 | Execution: circuit.ExecutionConfig{ 142 | Timeout: time.Millisecond, 143 | }, 144 | }) 145 | go func() { 146 | for range time.Tick(tickInterval) { 147 | mustFail(timeOutCircuit.Execute(context.Background(), func(ctx context.Context) error { 148 | <-ctx.Done() 149 | return ctx.Err() 150 | }, nil)) 151 | } 152 | }() 153 | } 154 | 155 | func setupFallsBack(h *circuit.Manager, tickInterval time.Duration) { 156 | fallbackCircuit := h.MustCreateCircuit("always-falls-back", circuit.Config{ 157 | Execution: circuit.ExecutionConfig{ 158 | Timeout: time.Millisecond, 159 | }, 160 | }) 161 | go func() { 162 | for range time.Tick(tickInterval) { 163 | mustPass(fallbackCircuit.Execute(context.Background(), func(ctx context.Context) error { 164 | return errors.New("a failure") 165 | }, func(ctx context.Context, err error) error { 166 | return nil 167 | })) 168 | } 169 | }() 170 | } 171 | 172 | func setupRandomExecutionTime(h *circuit.Manager, tickInterval time.Duration) { 173 | randomExecutionTime := h.MustCreateCircuit("random-execution-time", circuit.Config{ 174 | Execution: circuit.ExecutionConfig{}, 175 | }) 176 | go func() { 177 | for range time.Tick(tickInterval) { 178 | mustPass(randomExecutionTime.Execute(context.Background(), func(ctx context.Context) error { 179 | select { 180 | // Some time between 0 and 50ms 181 | case <-time.After(time.Duration(int64(float64(time.Millisecond.Nanoseconds()*50) * rand.Float64()))): 182 | return nil 183 | case <-ctx.Done(): 184 | return ctx.Err() 185 | } 186 | }, func(ctx context.Context, err error) error { 187 | return nil 188 | })) 189 | } 190 | }() 191 | } 192 | 193 | func setupFloppyCircuit(h *circuit.Manager, tickInterval time.Duration) { 194 | // Flop every 3 seconds, try to recover very quickly 195 | floppyCircuit := h.MustCreateCircuit("floppy-circuit", circuit.Config{ 196 | General: circuit.GeneralConfig{ 197 | OpenToClosedFactory: hystrix.CloserFactory(hystrix.ConfigureCloser{ 198 | // // This should allow a new request every 10 milliseconds 199 | SleepWindow: time.Millisecond * 10, 200 | }), 201 | ClosedToOpenFactory: hystrix.OpenerFactory(hystrix.ConfigureOpener{ 202 | RequestVolumeThreshold: 2, 203 | }), 204 | }, 205 | }) 206 | floppyCircuitPasses := int64(1) 207 | go func() { 208 | isPassing := true 209 | for range time.Tick(time.Second * 3) { 210 | if isPassing { 211 | atomic.StoreInt64(&floppyCircuitPasses, 0) 212 | } else { 213 | atomic.StoreInt64(&floppyCircuitPasses, 1) 214 | } 215 | isPassing = !isPassing 216 | } 217 | }() 218 | for i := 0; i < 10; i++ { 219 | go func() { 220 | totalErrors := 0 221 | for range time.Tick(tickInterval) { 222 | // Errors flop back and forth 223 | err := floppyCircuit.Execute(context.Background(), func(ctx context.Context) error { 224 | if atomic.LoadInt64(&floppyCircuitPasses) == 1 { 225 | return nil 226 | } 227 | return errors.New("i'm failing now") 228 | }, func(ctx context.Context, err error) error { 229 | return nil 230 | }) 231 | if err != nil { 232 | totalErrors++ 233 | } 234 | } 235 | }() 236 | } 237 | } 238 | 239 | func setupThrottledCircuit(h *circuit.Manager, tickInterval time.Duration) { 240 | throttledCircuit := h.MustCreateCircuit("throttled-circuit", circuit.Config{ 241 | Execution: circuit.ExecutionConfig{ 242 | MaxConcurrentRequests: 2, 243 | }, 244 | }) 245 | // 100 threads, every 100ms, someone will get throttled 246 | for i := 0; i < 100; i++ { 247 | go func() { 248 | totalErrors := 0 249 | for range time.Tick(tickInterval) { 250 | // Some pass (not throttled) and some don't (throttled) 251 | err := throttledCircuit.Execute(context.Background(), func(ctx context.Context) error { 252 | select { 253 | // Some time between 0 and 50ms 254 | case <-time.After(time.Duration(int64(float64(time.Millisecond.Nanoseconds()*50) * rand.Float64()))): 255 | return nil 256 | case <-ctx.Done(): 257 | return ctx.Err() 258 | } 259 | }, nil) 260 | if err != nil { 261 | totalErrors++ 262 | } 263 | } 264 | }() 265 | } 266 | } 267 | 268 | func createBackgroundCircuits(h *circuit.Manager, tickInterval time.Duration) { 269 | setupAlwaysFails(h, tickInterval) 270 | setupBadRequest(h, tickInterval) 271 | setupFailsOriginalContext(h, tickInterval) 272 | setupAlwaysPasses(h, tickInterval) 273 | setupTimesOut(h, tickInterval) 274 | setupFallsBack(h, tickInterval) 275 | setupRandomExecutionTime(h, tickInterval) 276 | setupFloppyCircuit(h, tickInterval) 277 | setupThrottledCircuit(h, tickInterval) 278 | } 279 | -------------------------------------------------------------------------------- /example_test.go: -------------------------------------------------------------------------------- 1 | package circuit_test 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "errors" 7 | "expvar" 8 | "fmt" 9 | "io" 10 | "net/http" 11 | "net/http/httptest" 12 | "time" 13 | 14 | "github.com/cep21/circuit/v4" 15 | "github.com/cep21/circuit/v4/metrics/rolling" 16 | ) 17 | 18 | // This is a full example of using a circuit around HTTP requests. 19 | func Example_http() { 20 | h := circuit.Manager{} 21 | c := h.MustCreateCircuit("hello-http", circuit.Config{ 22 | Execution: circuit.ExecutionConfig{ 23 | // Timeout after 3 seconds 24 | Timeout: time.Second * 3, 25 | }, 26 | }) 27 | 28 | testServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { 29 | _, _ = io.WriteString(rw, "hello world") 30 | })) 31 | defer testServer.Close() 32 | 33 | var body bytes.Buffer 34 | runErr := c.Run(context.Background(), func(ctx context.Context) error { 35 | req, err := http.NewRequest("GET", testServer.URL, nil) 36 | if err != nil { 37 | return circuit.SimpleBadRequest{Err: err} 38 | } 39 | req = req.WithContext(ctx) 40 | resp, err := http.DefaultClient.Do(req) 41 | if err != nil { 42 | return err 43 | } 44 | if resp.StatusCode >= 400 && resp.StatusCode <= 499 { 45 | return circuit.SimpleBadRequest{Err: errors.New("server found your request invalid")} 46 | } 47 | if resp.StatusCode < 200 || resp.StatusCode > 299 { 48 | return fmt.Errorf("invalid status code: %d", resp.StatusCode) 49 | } 50 | if _, err := io.Copy(&body, resp.Body); err != nil { 51 | return err 52 | } 53 | return resp.Body.Close() 54 | }) 55 | if runErr == nil { 56 | fmt.Printf("We saw a body\n") 57 | return 58 | } 59 | fmt.Printf("There was an error with the request: %s\n", runErr) 60 | // Output: We saw a body 61 | } 62 | 63 | // This example shows how to create a hello-world circuit from the circuit manager 64 | func ExampleManager_MustCreateCircuit_helloworld() { 65 | // Manages all our circuits 66 | h := circuit.Manager{} 67 | // Create a circuit with a unique name 68 | c := h.MustCreateCircuit("hello-world") 69 | // Call the circuit 70 | errResult := c.Execute(context.Background(), func(ctx context.Context) error { 71 | return nil 72 | }, nil) 73 | fmt.Println("Result of execution:", errResult) 74 | // Output: Result of execution: 75 | } 76 | 77 | // If the context passed into a circuit function ends, before the circuit can 78 | // finish, it does not count the circuit as unhealthy. You can disable this 79 | // behavior with the `IgnoreInterrupts` flag. 80 | // 81 | // This example proves that terminating a circuit call early because the passed in context died does not, by default, 82 | // count as an error on the circuit. It also demonstrates setting up internal stat collection by default for all 83 | // circuits 84 | func ExampleCircuit_noearlyterminate() { 85 | // Inject stat collection to prove these failures don't count 86 | f := rolling.StatFactory{} 87 | manager := circuit.Manager{ 88 | DefaultCircuitProperties: []circuit.CommandPropertiesConstructor{ 89 | f.CreateConfig, 90 | }, 91 | } 92 | c := manager.MustCreateCircuit("don't fail me bro") 93 | // The passed in context times out in one millisecond 94 | ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) 95 | defer cancel() 96 | errResult := c.Execute(ctx, func(ctx context.Context) error { 97 | select { 98 | case <-ctx.Done(): 99 | // This will return early, with an error, since the parent context was canceled after 1 ms 100 | return ctx.Err() 101 | case <-time.After(time.Hour): 102 | panic("We never actually get this far") 103 | } 104 | }, nil) 105 | rs := f.RunStats("don't fail me bro") 106 | fmt.Println("errResult is", errResult) 107 | fmt.Println("The error and timeout count is", rs.ErrTimeouts.TotalSum()+rs.ErrFailures.TotalSum()) 108 | // Output: errResult is context deadline exceeded 109 | // The error and timeout count is 0 110 | } 111 | 112 | // This example shows how fallbacks execute to return alternate errors or provide 113 | // logic when the circuit is open. 114 | func ExampleCircuit_Execute_fallbackhelloworld() { 115 | // You can create circuits without using the manager 116 | c := circuit.NewCircuitFromConfig("hello-world-fallback", circuit.Config{}) 117 | errResult := c.Execute(context.Background(), func(ctx context.Context) error { 118 | return errors.New("this will fail") 119 | }, func(ctx context.Context, err error) error { 120 | fmt.Println("Circuit failed with error, but fallback returns nil") 121 | return nil 122 | }) 123 | fmt.Println("Execution result:", errResult) 124 | // Output: Circuit failed with error, but fallback returns nil 125 | // Execution result: 126 | } 127 | 128 | // This example shows execute failing (marking the circuit with a failure), but not returning an error 129 | // back to the user since the fallback was able to execute. For this case, we try to load the size of the 130 | // largest message a user can send, but fall back to 140 if the load fails. 131 | func ExampleCircuit_Execute_fallback() { 132 | c := circuit.NewCircuitFromConfig("divider", circuit.Config{}) 133 | var maximumMessageSize int 134 | err := c.Execute(context.Background(), func(_ context.Context) error { 135 | return errors.New("your circuit failed") 136 | }, func(ctx context.Context, err2 error) error { 137 | maximumMessageSize = 140 138 | return nil 139 | }) 140 | fmt.Printf("value=%d err=%v", maximumMessageSize, err) 141 | // Output: value=140 err= 142 | } 143 | 144 | // This example shows execute failing (marking the circuit with a failure), but not returning an error 145 | // back to the user since the fallback was able to execute. For this case, we try to load the size of the 146 | // largest message a user can send, but fall back to 140 if the load fails. 147 | func ExampleCircuit_Execute_helloworld() { 148 | c := circuit.NewCircuitFromConfig("hello-world", circuit.Config{}) 149 | err := c.Execute(context.Background(), func(_ context.Context) error { 150 | return nil 151 | }, nil) 152 | fmt.Printf("err=%v", err) 153 | // Output: err= 154 | } 155 | 156 | // It is recommended to use `circuit.Execute` and a context aware function. If, however, you want to exit 157 | // your run function early and leave it hanging (possibly forever), then you can call `circuit.Go`. 158 | func ExampleCircuit_Go() { 159 | h := circuit.Manager{} 160 | c := h.MustCreateCircuit("untrusting-circuit", circuit.Config{ 161 | Execution: circuit.ExecutionConfig{ 162 | // Time out the context after a few ms 163 | Timeout: time.Millisecond * 30, 164 | }, 165 | }) 166 | 167 | errResult := c.Go(context.Background(), func(ctx context.Context) error { 168 | // Sleep 30 seconds, way longer than our timeout 169 | time.Sleep(time.Second * 30) 170 | return nil 171 | }, nil) 172 | fmt.Printf("err=%v", errResult) 173 | // Output: err=context deadline exceeded 174 | } 175 | 176 | // Code executed with `Execute` does not spawn a goroutine and panics naturally go up the call stack to the caller. 177 | // This is also true for `Go`, where we attempt to recover and throw panics on the same stack that 178 | // calls Go. This example will panic, and the panic can be caught up the stack. 179 | func ExampleCircuit_Execute_panics() { 180 | h := circuit.Manager{} 181 | c := h.MustCreateCircuit("panic_up") 182 | 183 | defer func() { 184 | r := recover() 185 | if r != nil { 186 | fmt.Println("I recovered from a panic", r) 187 | } 188 | }() 189 | _ = c.Execute(context.Background(), func(ctx context.Context) error { 190 | panic("oh no") 191 | }, nil) 192 | // Output: I recovered from a panic oh no 193 | } 194 | 195 | // You can use DefaultCircuitProperties to set configuration dynamically for any circuit 196 | func ExampleCommandPropertiesConstructor() { 197 | myFactory := func(circuitName string) circuit.Config { 198 | timeoutsByName := map[string]time.Duration{ 199 | "v1": time.Second, 200 | "v2": time.Second * 2, 201 | } 202 | customTimeout := timeoutsByName[circuitName] 203 | if customTimeout == 0 { 204 | // Just return empty if you don't want to set any config 205 | return circuit.Config{} 206 | } 207 | return circuit.Config{ 208 | Execution: circuit.ExecutionConfig{ 209 | Timeout: customTimeout, 210 | }, 211 | } 212 | } 213 | 214 | // Manager manages circuits with unique names 215 | h := circuit.Manager{ 216 | DefaultCircuitProperties: []circuit.CommandPropertiesConstructor{myFactory}, 217 | } 218 | h.MustCreateCircuit("v1") 219 | fmt.Println("The timeout of v1 is", h.GetCircuit("v1").Config().Execution.Timeout) 220 | // Output: The timeout of v1 is 1s 221 | } 222 | 223 | // Many configuration variables can be set at runtime in a thread safe way 224 | func ExampleCircuit_SetConfigThreadSafe() { 225 | h := circuit.Manager{} 226 | c := h.MustCreateCircuit("changes-at-runtime", circuit.Config{}) 227 | // ... later on (during live) 228 | c.SetConfigThreadSafe(circuit.Config{ 229 | Execution: circuit.ExecutionConfig{ 230 | MaxConcurrentRequests: int64(12), 231 | }, 232 | }) 233 | } 234 | 235 | // Even though Go executes inside a goroutine, we catch that panic and bubble it up the same 236 | // call stack that called Go 237 | func ExampleCircuit_Go_panics() { 238 | c := circuit.NewCircuitFromConfig("panic_up", circuit.Config{}) 239 | 240 | defer func() { 241 | r := recover() 242 | if r != nil { 243 | fmt.Println("I recovered from a panic", r) 244 | } 245 | }() 246 | _ = c.Go(context.Background(), func(ctx context.Context) error { 247 | panic("oh no") 248 | }, nil) 249 | // Output: I recovered from a panic oh no 250 | } 251 | 252 | // This example shows how to return errors in a circuit without considering the circuit at fault. 253 | // Here, even if someone tries to divide by zero, the circuit will not consider it a failure even if the 254 | // function returns non nil error. 255 | func ExampleBadRequest() { 256 | c := circuit.NewCircuitFromConfig("divider", circuit.Config{}) 257 | divideInCircuit := func(numerator, denominator int) (int, error) { 258 | var result int 259 | err := c.Run(context.Background(), func(ctx context.Context) error { 260 | if denominator == 0 { 261 | // This error type is not counted as a failure of the circuit 262 | return &circuit.SimpleBadRequest{ 263 | Err: errors.New("someone tried to divide by zero"), 264 | } 265 | } 266 | result = numerator / denominator 267 | return nil 268 | }) 269 | return result, err 270 | } 271 | _, err := divideInCircuit(10, 0) 272 | fmt.Println("Result of 10/0 is", err) 273 | // Output: Result of 10/0 is someone tried to divide by zero 274 | } 275 | 276 | // If you wanted to publish hystrix information on Expvar, you can register your manager. 277 | func ExampleManager_Var() { 278 | h := circuit.Manager{} 279 | expvar.Publish("hystrix", h.Var()) 280 | // Output: 281 | } 282 | 283 | // Implement interfaces CmdMetricCollector or FallbackMetricCollector to know what happens with commands or fallbacks. 284 | // 285 | // Then pass those implementations to configure. 286 | func ExampleConfig_custommetrics() { 287 | config := circuit.Config{ 288 | Metrics: circuit.MetricsCollectors{ 289 | Run: []circuit.RunMetrics{ 290 | // Here is where I would insert my custom metric collector 291 | }, 292 | }, 293 | } 294 | circuit.NewCircuitFromConfig("custom-metrics", config) 295 | // Output: 296 | } 297 | 298 | // Shows how to check if an error is part of the circuit library. 299 | func ExampleError_checking() { 300 | x := errors.New("an error") 301 | if _, ok := x.(circuit.Error); ok { 302 | fmt.Println("this error is a circuit library error, not the result of runFunc or fallbackFunc") 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /faststats/atomic.go: -------------------------------------------------------------------------------- 1 | package faststats 2 | 3 | import ( 4 | "encoding/json" 5 | "strconv" 6 | "sync/atomic" 7 | "time" 8 | ) 9 | 10 | // AtomicBoolean is a helper struct to simulate atomic operations on a boolean 11 | type AtomicBoolean struct { 12 | atomic.Bool 13 | } 14 | 15 | // Get the current atomic value 16 | func (a *AtomicBoolean) Get() bool { 17 | return a.Load() 18 | } 19 | 20 | // Set the atomic boolean value 21 | func (a *AtomicBoolean) Set(value bool) { 22 | a.Store(value) 23 | } 24 | 25 | // String returns "true" or "false" 26 | func (a *AtomicBoolean) String() string { 27 | return strconv.FormatBool(a.Get()) 28 | } 29 | 30 | var _ json.Marshaler = &AtomicBoolean{} 31 | var _ json.Unmarshaler = &AtomicBoolean{} 32 | 33 | // MarshalJSON encodes this value in a thread safe way as a json bool 34 | func (a *AtomicBoolean) MarshalJSON() ([]byte, error) { 35 | return json.Marshal(a.Get()) 36 | } 37 | 38 | // UnmarshalJSON decodes this value in a thread safe way as a json bool 39 | func (a *AtomicBoolean) UnmarshalJSON(b []byte) error { 40 | var into bool 41 | if err := json.Unmarshal(b, &into); err != nil { 42 | return err 43 | } 44 | a.Set(into) 45 | return nil 46 | } 47 | 48 | // AtomicInt64 is a helper struct to simulate atomic operations on an int64 49 | // Note that I could have used `type AtomicInt642 int64`, but I did not want to make it easy 50 | // to do + and - operations so easily without using atomic functions. 51 | type AtomicInt64 struct { 52 | atomic.Int64 53 | } 54 | 55 | var _ json.Marshaler = &AtomicInt64{} 56 | var _ json.Unmarshaler = &AtomicInt64{} 57 | 58 | // MarshalJSON encodes this value as an int in a thread safe way 59 | func (a *AtomicInt64) MarshalJSON() ([]byte, error) { 60 | return json.Marshal(a.Get()) 61 | } 62 | 63 | // UnmarshalJSON decodes this value as an int in a thread safe way 64 | func (a *AtomicInt64) UnmarshalJSON(b []byte) error { 65 | var into int64 66 | if err := json.Unmarshal(b, &into); err != nil { 67 | return err 68 | } 69 | a.Set(into) 70 | return nil 71 | } 72 | 73 | // Get the current int64 74 | func (a *AtomicInt64) Get() int64 { 75 | return a.Load() 76 | } 77 | 78 | // String returns the integer as a string in a thread safe way 79 | func (a *AtomicInt64) String() string { 80 | return strconv.FormatInt(a.Get(), 10) 81 | } 82 | 83 | // Set the current store to a value 84 | func (a *AtomicInt64) Set(value int64) { 85 | a.Store(value) 86 | } 87 | 88 | // Duration returns the currently stored value as a time.Duration 89 | func (a *AtomicInt64) Duration() time.Duration { 90 | return time.Duration(a.Get()) 91 | } 92 | -------------------------------------------------------------------------------- /faststats/atomic_test.go: -------------------------------------------------------------------------------- 1 | package faststats 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestAtomicInt64(t *testing.T) { 12 | var x AtomicInt64 13 | x.Add(1) 14 | if x.Get() != 1 { 15 | t.Error("Expect 1 after an add") 16 | } 17 | if x.Swap(100) != 1 { 18 | t.Error("expect 1 back after a swap") 19 | } 20 | x.Set(time.Second.Nanoseconds()) 21 | if x.Duration() != time.Second { 22 | t.Error("expected to get second after a set") 23 | } 24 | asBytes, err := json.Marshal(&x) 25 | if err != nil { 26 | t.Error("unknown error marshalling", err) 27 | } 28 | require.Equal(t, []byte("1000000000"), asBytes) 29 | var y AtomicInt64 30 | if err := json.Unmarshal(asBytes, &y); err != nil { 31 | t.Error("unknown error unmarshalling", err) 32 | } 33 | if y.Get() != x.Get() { 34 | t.Error("Did not JSON encode correctly") 35 | } 36 | y.Set(1) 37 | if y.String() != "1" { 38 | t.Error("String value inconsistent") 39 | } 40 | } 41 | 42 | func TestAtomicBoolean(t *testing.T) { 43 | var b AtomicBoolean 44 | b.Set(true) 45 | if !b.Get() { 46 | t.Error("Could not set") 47 | } 48 | if b.String() != "true" { 49 | t.Error("Could not convert to string") 50 | } 51 | asBytes, err := json.Marshal(&b) 52 | if err != nil { 53 | t.Error("Could not json marshal") 54 | } 55 | require.Equal(t, []byte("true"), asBytes) 56 | var c AtomicBoolean 57 | if err := json.Unmarshal(asBytes, &c); err != nil { 58 | t.Error("Could not unmarshal") 59 | } 60 | if !c.Get() { 61 | t.Error("Value not stored in correctly") 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /faststats/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package faststats contains helpers to calculate circuit statistics quickly (usually atomically). 3 | */ 4 | package faststats 5 | -------------------------------------------------------------------------------- /faststats/rolling_bucket.go: -------------------------------------------------------------------------------- 1 | package faststats 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | // RollingBuckets simulates a time rolling list of buckets of items. It is safe to use JSON to encode this object 9 | // in a thread safe way. 10 | // 11 | // This implementation cheats in order to not take a lock. It is correct, but only if the total size of the buckets (NumBuckets * BucketWidth) 12 | // is less than any duration of how long Advance will take to execute. In anything but super small bucket sizes this should 13 | // be fine. The common case, where (NumBuckets * BucketWidth >= 1sec) should always work. 14 | type RollingBuckets struct { 15 | NumBuckets int 16 | StartTime time.Time 17 | BucketWidth time.Duration 18 | LastAbsIndex AtomicInt64 19 | } 20 | 21 | var _ fmt.Stringer = &RollingBuckets{} 22 | 23 | func (r *RollingBuckets) String() string { 24 | return fmt.Sprintf("RollingBucket(num=%d, width=%s)", r.NumBuckets, r.BucketWidth) 25 | } 26 | 27 | // Advance to now, clearing buckets as needed 28 | func (r *RollingBuckets) Advance(now time.Time, clearBucket func(int)) int { 29 | if r.NumBuckets == 0 { 30 | return -1 31 | } 32 | diff := now.Sub(r.StartTime) 33 | if diff < 0 { 34 | // This point is before init. That is invalid. We should ignore it. 35 | return -1 36 | } 37 | absIndex := int(diff.Nanoseconds() / r.BucketWidth.Nanoseconds()) 38 | lastAbsVal := int(r.LastAbsIndex.Get()) 39 | indexDiff := absIndex - lastAbsVal 40 | if indexDiff == 0 { 41 | // We are at the right time 42 | return absIndex % r.NumBuckets 43 | } 44 | if indexDiff < 0 { 45 | // This point is backwards in time. We should return a valid 46 | // index past where we are 47 | if indexDiff >= r.NumBuckets { 48 | // We rolled past the list. This point is before the start 49 | // of our rolling window. We should just do what ... ignore it? 50 | return -1 51 | } 52 | return absIndex % r.NumBuckets 53 | } 54 | for i := 0; i < r.NumBuckets && lastAbsVal < absIndex; i++ { 55 | if !r.LastAbsIndex.CompareAndSwap(int64(lastAbsVal), int64(lastAbsVal)+1) { 56 | // someone else is swapping 57 | return r.Advance(now, clearBucket) 58 | } 59 | lastAbsVal++ 60 | clearBucket(lastAbsVal % r.NumBuckets) 61 | } 62 | // indexDiff > 0 at this point. We have to roll our window forward 63 | // Cleared all the buckets. Try to advance back to wherever we need 64 | r.LastAbsIndex.CompareAndSwap(int64(lastAbsVal), int64(absIndex)) 65 | return r.Advance(now, clearBucket) 66 | } 67 | 68 | func (r *RollingBuckets) Store(bucket *RollingBuckets) { 69 | r.NumBuckets = bucket.NumBuckets 70 | r.StartTime = bucket.StartTime 71 | r.BucketWidth = bucket.BucketWidth 72 | r.LastAbsIndex.Store(bucket.LastAbsIndex.Get()) 73 | } 74 | -------------------------------------------------------------------------------- /faststats/rolling_bucket_test.go: -------------------------------------------------------------------------------- 1 | package faststats 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestRollingBuckets_String(t *testing.T) { 10 | x := RollingBuckets{ 11 | NumBuckets: 101, 12 | } 13 | if !strings.Contains(x.String(), "101") { 14 | t.Fatal("expected string 101 in output") 15 | } 16 | } 17 | 18 | func TestRollingBuckets_Advance(t *testing.T) { 19 | now := time.Now() 20 | x := RollingBuckets{ 21 | NumBuckets: 5, 22 | StartTime: now, 23 | BucketWidth: time.Second, 24 | } 25 | clearBucketCount := 0 26 | clearIgnore := func(_ int) { 27 | clearBucketCount++ 28 | } 29 | if nextBucket := x.Advance(now.Add(-time.Second), nil); nextBucket != -1 { 30 | t.Fatal("expected negative bucket in the past") 31 | } 32 | 33 | // advance 1 forward 34 | if nextBucket := x.Advance(now.Add(time.Second*1), clearIgnore); nextBucket != 1 { 35 | t.Fatalf("Should get to bucket 1, not %d", nextBucket) 36 | } 37 | 38 | // advance 10 forward, then backwards should be negative again 39 | if nextBucket := x.Advance(now.Add(time.Second*10), clearIgnore); nextBucket != 0 { 40 | t.Fatal("bucket zero at index 10") 41 | } 42 | 43 | if clearBucketCount != 6 { 44 | t.Fatalf("Expect 6 clears, not %d", clearBucketCount) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /faststats/rolling_counter.go: -------------------------------------------------------------------------------- 1 | package faststats 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | "time" 9 | ) 10 | 11 | // RollingCounter uses a slice of buckets to keep track of counts of an event over time with a sliding window 12 | type RollingCounter struct { 13 | // The len(buckets) is constant and not mutable 14 | // The values of the individual buckets are atomic, so they do not take the mutex 15 | buckets []AtomicInt64 16 | 17 | // Neither of these need to be locked (atomic operations) 18 | rollingSum AtomicInt64 19 | totalSum AtomicInt64 20 | 21 | rollingBucket RollingBuckets 22 | } 23 | 24 | // NewRollingCounter initializes a rolling counter with a bucket width and # of buckets 25 | func NewRollingCounter(bucketWidth time.Duration, numBuckets int, now time.Time) RollingCounter { 26 | return RollingCounter{ 27 | buckets: make([]AtomicInt64, numBuckets), 28 | rollingBucket: RollingBuckets{ 29 | NumBuckets: numBuckets, 30 | BucketWidth: bucketWidth, 31 | StartTime: now, 32 | }, 33 | } 34 | } 35 | 36 | var _ json.Marshaler = &RollingCounter{} 37 | var _ json.Unmarshaler = &RollingCounter{} 38 | var _ fmt.Stringer = &RollingCounter{} 39 | 40 | type jsonCounter struct { 41 | Buckets []AtomicInt64 42 | RollingSum *AtomicInt64 43 | TotalSum *AtomicInt64 44 | RollingBucket *RollingBuckets 45 | } 46 | 47 | // MarshalJSON JSON encodes a counter. It is thread safe. 48 | func (r *RollingCounter) MarshalJSON() ([]byte, error) { 49 | return json.Marshal(jsonCounter{ 50 | Buckets: r.buckets, 51 | RollingSum: &r.rollingSum, 52 | TotalSum: &r.totalSum, 53 | RollingBucket: &r.rollingBucket, 54 | }) 55 | } 56 | 57 | // UnmarshalJSON stores the previous JSON encoding. Note, this is *NOT* thread safe. 58 | func (r *RollingCounter) UnmarshalJSON(b []byte) error { 59 | var into jsonCounter 60 | if err := json.Unmarshal(b, &into); err != nil { 61 | return err 62 | } 63 | r.buckets = into.Buckets 64 | r.rollingSum.Store(into.RollingSum.Get()) 65 | r.totalSum.Store(into.TotalSum.Get()) 66 | r.rollingBucket.Store(into.RollingBucket) 67 | return nil 68 | } 69 | 70 | // String for debugging 71 | func (r *RollingCounter) String() string { 72 | return r.StringAt(time.Now()) 73 | } 74 | 75 | // StringAt converts the counter to a string at a given time. 76 | func (r *RollingCounter) StringAt(now time.Time) string { 77 | b := r.GetBuckets(now) 78 | parts := make([]string, 0, len(r.buckets)) 79 | for _, v := range b { 80 | parts = append(parts, strconv.FormatInt(v, 10)) 81 | } 82 | return fmt.Sprintf("rolling_sum=%d total_sum=%d parts=(%s)", r.RollingSumAt(now), r.TotalSum(), strings.Join(parts, ",")) 83 | } 84 | 85 | // Inc adds a single event to the current bucket 86 | func (r *RollingCounter) Inc(now time.Time) { 87 | r.totalSum.Add(1) 88 | if len(r.buckets) == 0 { 89 | return 90 | } 91 | idx := r.rollingBucket.Advance(now, r.clearBucket) 92 | if idx < 0 { 93 | return 94 | } 95 | r.buckets[idx].Add(1) 96 | r.rollingSum.Add(1) 97 | } 98 | 99 | // RollingSumAt returns the total number of events in the rolling time window 100 | func (r *RollingCounter) RollingSumAt(now time.Time) int64 { 101 | r.rollingBucket.Advance(now, r.clearBucket) 102 | return r.rollingSum.Get() 103 | } 104 | 105 | // RollingSum returns the total number of events in the rolling time window (With time time.Now()) 106 | func (r *RollingCounter) RollingSum() int64 { 107 | r.rollingBucket.Advance(time.Now(), r.clearBucket) 108 | return r.rollingSum.Get() 109 | } 110 | 111 | // TotalSum returns the total number of events of all time 112 | func (r *RollingCounter) TotalSum() int64 { 113 | return r.totalSum.Get() 114 | } 115 | 116 | // GetBuckets returns a copy of the buckets in order backwards in time 117 | func (r *RollingCounter) GetBuckets(now time.Time) []int64 { 118 | r.rollingBucket.Advance(now, r.clearBucket) 119 | startIdx := int(r.rollingBucket.LastAbsIndex.Get() % int64(r.rollingBucket.NumBuckets)) 120 | ret := make([]int64, r.rollingBucket.NumBuckets) 121 | for i := 0; i < r.rollingBucket.NumBuckets; i++ { 122 | idx := startIdx - i 123 | if idx < 0 { 124 | idx += r.rollingBucket.NumBuckets 125 | } 126 | ret[i] = r.buckets[idx].Get() 127 | } 128 | return ret 129 | } 130 | 131 | func (r *RollingCounter) clearBucket(idx int) { 132 | toDec := r.buckets[idx].Swap(0) 133 | r.rollingSum.Add(-toDec) 134 | } 135 | 136 | // Reset the counter to all zero values. 137 | func (r *RollingCounter) Reset(now time.Time) { 138 | r.rollingBucket.Advance(now, r.clearBucket) 139 | for i := 0; i < r.rollingBucket.NumBuckets; i++ { 140 | r.clearBucket(i) 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /faststats/rolling_counter_test.go: -------------------------------------------------------------------------------- 1 | package faststats 2 | 3 | import ( 4 | "encoding/json" 5 | "runtime" 6 | "strconv" 7 | "strings" 8 | "sync" 9 | "testing" 10 | "time" 11 | ) 12 | 13 | func TestRollingCounter_Empty(t *testing.T) { 14 | x := RollingCounter{} 15 | now := time.Now() 16 | s := x.RollingSumAt(now) 17 | if s != 0 { 18 | t.Errorf("expect to start with empty sum %d", s) 19 | } 20 | x.Inc(time.Now()) 21 | if x.TotalSum() != 1 { 22 | t.Error("Total sum should work even on empty structure") 23 | } 24 | } 25 | 26 | func TestRollingCounter_MovingBackwards(t *testing.T) { 27 | now := time.Now() 28 | x := NewRollingCounter(time.Millisecond, 10, now) 29 | x.Inc(now) 30 | x.Inc(now.Add(time.Millisecond * 2)) 31 | x.Inc(now) 32 | endTime := now.Add(time.Millisecond * 2) 33 | b := x.GetBuckets(endTime) 34 | if b[0] != 1 { 35 | t.Error("Expect one value at current bucket") 36 | } 37 | if b[2] != 2 { 38 | t.Error("expect 2 values at 2 back buckets") 39 | } 40 | } 41 | 42 | type atomicTime struct { 43 | t time.Time 44 | mu sync.Mutex 45 | } 46 | 47 | func (a *atomicTime) Add(d time.Duration) time.Time { 48 | a.mu.Lock() 49 | defer a.mu.Unlock() 50 | a.t = a.t.Add(d) 51 | return a.t 52 | } 53 | 54 | func (a *atomicTime) Get() time.Time { 55 | a.mu.Lock() 56 | defer a.mu.Unlock() 57 | return a.t 58 | } 59 | 60 | func TestRollingCounter_NormalConsistency(t *testing.T) { 61 | // Start now at 1970 62 | now := atomicTime{t: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)} 63 | bucketSize := 100 64 | numBuckets := 10 65 | x := NewRollingCounter(time.Millisecond*time.Duration(bucketSize), numBuckets+1, now.Get()) 66 | concurrent := int64(100) 67 | for k := 0; k < bucketSize; k++ { 68 | wg := sync.WaitGroup{} 69 | for i := 0; i < numBuckets; i++ { 70 | wg.Add(1) 71 | go func() { 72 | defer wg.Done() 73 | for j := 0; j < int(concurrent); j++ { 74 | newTime := now.Add(time.Duration(time.Millisecond.Nanoseconds() / concurrent)) 75 | x.Inc(newTime) 76 | } 77 | time.Sleep(time.Nanosecond) 78 | }() 79 | } 80 | wg.Wait() 81 | } 82 | newNow := now.Get() 83 | expectedValue := bucketSize * numBuckets * int(concurrent) 84 | if x.RollingSumAt(newNow) != int64(expectedValue) { 85 | t.Log(x.StringAt(newNow)) 86 | t.Error("small rolling sum", x.RollingSumAt(newNow), "when we want", expectedValue) 87 | } 88 | } 89 | 90 | func BenchmarkRollingCounter(b *testing.B) { 91 | type rollingCounterTestCase struct { 92 | name string 93 | bucketSize time.Duration 94 | numBuckets int 95 | } 96 | concurrents := []int{1, 50} 97 | runs := []rollingCounterTestCase{ 98 | { 99 | name: "super-small-buckets", 100 | bucketSize: time.Nanosecond, 101 | numBuckets: 20, 102 | }, 103 | { 104 | name: "normal-rate", 105 | bucketSize: time.Nanosecond * 100, 106 | numBuckets: 10, 107 | }, 108 | { 109 | name: "default", 110 | bucketSize: time.Millisecond * 100, 111 | numBuckets: 10, 112 | }, 113 | } 114 | for _, run := range runs { 115 | run := run 116 | b.Run(run.name, func(b *testing.B) { 117 | for _, concurrent := range concurrents { 118 | concurrent := concurrent 119 | b.Run(strconv.Itoa(concurrent), func(b *testing.B) { 120 | now := time.Now() 121 | x := NewRollingCounter(run.bucketSize, run.numBuckets, now) 122 | wg := sync.WaitGroup{} 123 | addAmount := AtomicInt64{} 124 | for i := 0; i < concurrent; i++ { 125 | wg.Add(1) 126 | go func() { 127 | defer wg.Done() 128 | for i := 0; i < b.N/concurrent; i++ { 129 | x.Inc(now.Add(time.Duration(addAmount.Add(1)))) 130 | } 131 | }() 132 | } 133 | wg.Wait() 134 | }) 135 | } 136 | }) 137 | } 138 | } 139 | 140 | func doTillTime(endTime time.Time, wg *sync.WaitGroup, f func()) { 141 | wg.Add(1) 142 | go func() { 143 | defer wg.Done() 144 | for time.Now().Before(endTime) { 145 | f() 146 | // Don't need to sleep. Just busy loop. But let another thread take over if it wants (to get some concurrency) 147 | runtime.Gosched() 148 | } 149 | }() 150 | } 151 | 152 | func TestRollingCounter_Race(t *testing.T) { 153 | startTime := time.Now() 154 | x := NewRollingCounter(time.Millisecond, 10, startTime) 155 | wg := sync.WaitGroup{} 156 | concurrent := 50 157 | doNotPassTime := startTime.Add(time.Millisecond * 50) 158 | for i := 0; i < concurrent; i++ { 159 | doTillTime(doNotPassTime, &wg, func() { 160 | x.Inc(time.Now()) 161 | }) 162 | doTillTime(doNotPassTime, &wg, func() { 163 | x.TotalSum() 164 | }) 165 | doTillTime(doNotPassTime, &wg, func() { 166 | x.RollingSumAt(time.Now()) 167 | }) 168 | doTillTime(doNotPassTime, &wg, func() { 169 | x.GetBuckets(time.Now()) 170 | }) 171 | doTillTime(doNotPassTime, &wg, func() { 172 | b, err := json.Marshal(&x) 173 | if err != nil { 174 | t.Error("Expected non nil error", err) 175 | } 176 | var x RollingCounter 177 | if err := json.Unmarshal(b, &x); err != nil { 178 | t.Error("Expected non nil error", err) 179 | } 180 | }) 181 | } 182 | wg.Wait() 183 | } 184 | 185 | func TestRollingCounter_IncPast(t *testing.T) { 186 | now := time.Now() 187 | x := NewRollingCounter(time.Millisecond, 4, now) 188 | x.Inc(now) 189 | if x.RollingSumAt(now) != 1 { 190 | t.Errorf("Should see a single item after adding by 1") 191 | } 192 | x.Inc(now.Add(time.Millisecond * 100)) 193 | if x.RollingSumAt(now) != 1 { 194 | t.Errorf("Should see one item, saw %d", x.RollingSumAt(now)) 195 | } 196 | } 197 | 198 | func TestRollingCounter_Inc(t *testing.T) { 199 | now := time.Now() 200 | x := NewRollingCounter(time.Millisecond, 10, now) 201 | if x.String() != "rolling_sum=0 total_sum=0 parts=(0,0,0,0,0,0,0,0,0,0)" { 202 | t.Errorf("String() function does not work: %s", x.String()) 203 | } 204 | x.Inc(now) 205 | if x.RollingSumAt(now) != 1 { 206 | t.Errorf("Should see a single item after adding by 1") 207 | } 208 | x.Inc(now) 209 | if ans := x.RollingSumAt(now); ans != 2 { 210 | t.Errorf("Should see two items now, not %d", ans) 211 | } 212 | x.Inc(now.Add(-time.Second)) 213 | if ans := x.RollingSumAt(now); ans != 2 { 214 | t.Errorf("Should see two items now, not %d", ans) 215 | } 216 | if x.RollingSum() != 2 { 217 | t.Errorf("Should see two items still") 218 | } 219 | 220 | x.Reset(now) 221 | if ans := x.RollingSumAt(now); ans != 0 { 222 | t.Errorf("Should reset to zero") 223 | } 224 | } 225 | 226 | func expectBuckets(t *testing.T, now time.Time, in *RollingCounter, b []int64) { 227 | a := in.GetBuckets(now) 228 | if len(a) != len(b) { 229 | t.Fatalf("Len not right: %d vs %d", len(a), len(b)) 230 | } 231 | p1 := make([]string, 0, len(b)) 232 | p2 := make([]string, 0, len(b)) 233 | for i := range b { 234 | p1 = append(p1, strconv.FormatInt(a[i], 10)) 235 | p2 = append(p2, strconv.FormatInt(b[i], 10)) 236 | } 237 | c1 := strings.Join(p1, ",") 238 | c2 := strings.Join(p2, ",") 239 | if c1 != c2 { 240 | t.Fatalf("buckets not as expected: seen=(%s) vs expected=(%s)", c1, c2) 241 | } 242 | } 243 | 244 | func TestRollingCounter_MoveForward(t *testing.T) { 245 | startTime := time.Now() 246 | x := NewRollingCounter(time.Millisecond, 4, startTime) 247 | 248 | expectBuckets(t, startTime, &x, []int64{0, 0, 0, 0}) 249 | x.Inc(startTime) 250 | x.Inc(startTime) 251 | if x.RollingSumAt(startTime) != 2 { 252 | t.Errorf("Should see two items after adding by 1 twice") 253 | } 254 | expectBuckets(t, startTime, &x, []int64{2, 0, 0, 0}) 255 | 256 | nextTime := startTime.Add(time.Millisecond) 257 | x.Inc(nextTime) 258 | if x.RollingSumAt(nextTime) != 3 { 259 | t.Errorf("Should see a sum of 3 after advancing") 260 | } 261 | if x.TotalSum() != 3 { 262 | t.Errorf("Should see a sum of 3 after advancing") 263 | } 264 | expectBuckets(t, nextTime, &x, []int64{1, 2, 0, 0}) 265 | 266 | moveCloseToEnd := startTime.Add(time.Millisecond * 3) 267 | 268 | x.Inc(moveCloseToEnd) 269 | expectBuckets(t, moveCloseToEnd, &x, []int64{1, 0, 1, 2}) 270 | if x.RollingSumAt(moveCloseToEnd) != 4 { 271 | t.Errorf("Should see a sum of 3 after advancing close to the end") 272 | } 273 | 274 | movePastOneBucket := startTime.Add(time.Millisecond * 4) 275 | x.Inc(movePastOneBucket) 276 | expectBuckets(t, movePastOneBucket, &x, []int64{1, 1, 0, 1}) 277 | if x.RollingSumAt(movePastOneBucket) != 3 { 278 | t.Errorf("Should see a sum of 3 after advancing close to the end again") 279 | } 280 | 281 | movePastAllButOneBucket := movePastOneBucket.Add(time.Millisecond * 3) 282 | x.Inc(movePastAllButOneBucket) 283 | expectBuckets(t, movePastAllButOneBucket, &x, []int64{1, 0, 0, 1}) 284 | if x.RollingSumAt(movePastAllButOneBucket) != 2 { 285 | t.Errorf("Should see a sum of 2 after advancing close to the end") 286 | } 287 | 288 | movePastAllBuckets := movePastAllButOneBucket.Add(time.Millisecond * 4) 289 | x.Inc(movePastAllBuckets) 290 | expectBuckets(t, movePastAllBuckets, &x, []int64{1, 0, 0, 0}) 291 | if s := x.RollingSumAt(movePastAllBuckets); s != 1 { 292 | t.Errorf("Should see a sum of 1 after advancing past all the buckets, saw %d", s) 293 | } 294 | } 295 | -------------------------------------------------------------------------------- /faststats/rolling_percentile.go: -------------------------------------------------------------------------------- 1 | package faststats 2 | 3 | import ( 4 | "encoding/json" 5 | "expvar" 6 | "fmt" 7 | "math" 8 | "sort" 9 | "strings" 10 | "time" 11 | 12 | "github.com/cep21/circuit/v4/internal/evar" 13 | ) 14 | 15 | // RollingPercentile is a bucketed array of time.Duration that cycles over time 16 | type RollingPercentile struct { 17 | buckets []durationsBucket 18 | rollingBucket RollingBuckets 19 | } 20 | 21 | // SortedDurations is a sorted list of time.Duration that allows fast Percentile operations 22 | type SortedDurations []time.Duration 23 | 24 | var _ fmt.Stringer = SortedDurations(nil) 25 | 26 | func (s SortedDurations) String() string { 27 | ret := make([]string, 0, len(s)) 28 | for _, d := range s { 29 | ret = append(ret, d.String()) 30 | } 31 | return "(" + strings.Join(ret, ",") + ")" 32 | } 33 | 34 | // Mean (average) of the current list 35 | func (s SortedDurations) Mean() time.Duration { 36 | if len(s) == 0 { 37 | // A meaningless value for a meaningless list 38 | return -1 39 | } 40 | sum := int64(0) 41 | for _, d := range s { 42 | sum += d.Nanoseconds() 43 | } 44 | return time.Duration(sum / int64(len(s))) 45 | } 46 | 47 | // Min returns the first (smallest) item, or -1 if the list is empty 48 | func (s SortedDurations) Min() time.Duration { 49 | if len(s) == 0 { 50 | return -1 51 | } 52 | return s[0] 53 | } 54 | 55 | // Max returns the last (largest) item, or -1 if the list is empty 56 | func (s SortedDurations) Max() time.Duration { 57 | if len(s) == 0 { 58 | return -1 59 | } 60 | return s[len(s)-1] 61 | } 62 | 63 | // Var allows exposing the durations on expvar 64 | func (s SortedDurations) Var() expvar.Var { 65 | return expvar.Func(func() interface{} { 66 | return map[string]string{ 67 | // Convert to string because it's easier to read 68 | "min": s.Min().String(), 69 | "p25": s.Percentile(.25).String(), 70 | "p50": s.Percentile(.5).String(), 71 | "p90": s.Percentile(.9).String(), 72 | "p99": s.Percentile(.99).String(), 73 | "max": s.Max().String(), 74 | "mean": s.Mean().String(), 75 | } 76 | }) 77 | } 78 | 79 | // Percentile returns a p [0.0 - 1.0] percentile of the list 80 | func (s SortedDurations) Percentile(p float64) time.Duration { 81 | if len(s) == 0 { 82 | // A meaningless value for a meaningless list 83 | return -1 84 | } 85 | if len(s) == 1 { 86 | return s[0] 87 | } 88 | if p <= 0 { 89 | return s[0] 90 | } 91 | if p >= 100 { 92 | return s[len(s)-1] 93 | } 94 | absoluteIndex := p / 100 * float64(len(s)-1) 95 | 96 | // The real value is now an approximation between here 97 | // For example, if absoluteIndex is 5.5, then we want to return a value 98 | // exactly between the [5] and [6] index of the array. 99 | // 100 | // However, if the absoluteIndex is 5.1, then we want to return a value 101 | // that is closer to [5], but still has a tiny part of [6] 102 | firstValue := s[int(math.Floor(absoluteIndex))] 103 | secondValue := s[int(math.Ceil(absoluteIndex))] 104 | 105 | firstWeight := absoluteIndex - math.Floor(absoluteIndex) 106 | return firstValue + time.Duration(int64(float64(secondValue-firstValue)*firstWeight)) 107 | } 108 | 109 | // NewRollingPercentile creates a new rolling percentile bucketer 110 | func NewRollingPercentile(bucketWidth time.Duration, numBuckets int, bucketSize int, now time.Time) RollingPercentile { 111 | return RollingPercentile{ 112 | buckets: makeBuckets(numBuckets, bucketSize), 113 | rollingBucket: RollingBuckets{ 114 | NumBuckets: numBuckets, 115 | BucketWidth: bucketWidth, 116 | StartTime: now, 117 | }, 118 | } 119 | } 120 | 121 | func makeBuckets(numBuckets int, bucketSize int) []durationsBucket { 122 | ret := make([]durationsBucket, numBuckets) 123 | for i := 0; i < numBuckets; i++ { 124 | ret[i] = newDurationsBucket(bucketSize) 125 | } 126 | return ret 127 | } 128 | 129 | // Var allows exposing a rolling percentile snapshot on expvar 130 | func (r *RollingPercentile) Var() expvar.Var { 131 | return expvar.Func(func() interface{} { 132 | return map[string]interface{}{ 133 | "snap": evar.ForExpvar(r.Snapshot()), 134 | } 135 | }) 136 | } 137 | 138 | // SortedDurations creates a raw []time.Duration in sorted order that is stored in these buckets 139 | func (r *RollingPercentile) SortedDurations(now time.Time) []time.Duration { 140 | if len(r.buckets) == 0 { 141 | return nil 142 | } 143 | r.rollingBucket.Advance(now, r.clearBucket) 144 | ret := make([]time.Duration, 0, len(r.buckets)*10) 145 | for idx := range r.buckets { 146 | ret = append(ret, r.buckets[idx].Durations()...) 147 | } 148 | sort.Slice(ret, func(i, j int) bool { 149 | return ret[i] < ret[j] 150 | }) 151 | return ret 152 | } 153 | 154 | // Snapshot the current rolling buckets, allowing easy p99 calculations 155 | func (r *RollingPercentile) Snapshot() SortedDurations { 156 | return r.SnapshotAt(time.Now()) 157 | } 158 | 159 | // SnapshotAt is an optimization on Snapshot that takes the current time 160 | func (r *RollingPercentile) SnapshotAt(now time.Time) SortedDurations { 161 | return SortedDurations(r.SortedDurations(now)) 162 | } 163 | 164 | func (r *RollingPercentile) clearBucket(idx int) { 165 | r.buckets[idx].clear() 166 | } 167 | 168 | // AddDuration adds a duration to the rolling buckets 169 | func (r *RollingPercentile) AddDuration(d time.Duration, now time.Time) { 170 | if len(r.buckets) == 0 { 171 | return 172 | } 173 | idx := r.rollingBucket.Advance(now, r.clearBucket) 174 | r.buckets[idx].addDuration(d) 175 | } 176 | 177 | // Reset the counter to all zero values. 178 | func (r *RollingPercentile) Reset(now time.Time) { 179 | r.rollingBucket.Advance(now, r.clearBucket) 180 | for i := 0; i < r.rollingBucket.NumBuckets; i++ { 181 | r.clearBucket(i) 182 | } 183 | } 184 | 185 | // durationsBucket supports atomically adding durations to a size limited list 186 | type durationsBucket struct { 187 | // durations is a fixed size and cannot change during operation 188 | durationsSomeInvalid []AtomicInt64 189 | currentIndex AtomicInt64 190 | } 191 | 192 | var _ json.Marshaler = &durationsBucket{} 193 | var _ json.Unmarshaler = &durationsBucket{} 194 | var _ fmt.Stringer = &durationsBucket{} 195 | 196 | func newDurationsBucket(bucketSize int) durationsBucket { 197 | return durationsBucket{ 198 | durationsSomeInvalid: make([]AtomicInt64, bucketSize), 199 | } 200 | } 201 | 202 | // String displays the current index 203 | func (b *durationsBucket) String() string { 204 | return fmt.Sprintf("durationsBucket(idx=%d)", b.currentIndex.Get()) 205 | } 206 | 207 | type forMarshal struct { 208 | DurationsSomeInvalid []int64 209 | CurrentIndex int64 210 | } 211 | 212 | // MarshalJSON returns the durations as JSON. It is thread safe. 213 | func (b *durationsBucket) MarshalJSON() ([]byte, error) { 214 | m := forMarshal{ 215 | DurationsSomeInvalid: make([]int64, len(b.durationsSomeInvalid)), 216 | } 217 | m.CurrentIndex = b.currentIndex.Get() 218 | for idx := range b.durationsSomeInvalid { 219 | m.DurationsSomeInvalid[idx] = b.durationsSomeInvalid[idx].Get() 220 | } 221 | return json.Marshal(m) 222 | } 223 | 224 | // UnmarshalJSON stores JSON encoded durations into the bucket. It is thread safe *only* if durations length matches. 225 | func (b *durationsBucket) UnmarshalJSON(data []byte) error { 226 | var m forMarshal 227 | if err := json.Unmarshal(data, &m); err != nil { 228 | return err 229 | } 230 | if len(b.durationsSomeInvalid) != len(m.DurationsSomeInvalid) { 231 | b.durationsSomeInvalid = make([]AtomicInt64, len(m.DurationsSomeInvalid)) 232 | } 233 | for idx := range m.DurationsSomeInvalid { 234 | b.durationsSomeInvalid[idx].Set(m.DurationsSomeInvalid[idx]) 235 | } 236 | b.currentIndex.Set(m.CurrentIndex) 237 | return nil 238 | } 239 | 240 | func (b *durationsBucket) Durations() []time.Duration { 241 | maxIndex := b.currentIndex.Get() 242 | if maxIndex > int64(len(b.durationsSomeInvalid)) { 243 | maxIndex = int64(len(b.durationsSomeInvalid)) 244 | } 245 | ret := make([]time.Duration, maxIndex) 246 | for i := 0; i < int(maxIndex); i++ { 247 | ret[i] = b.durationsSomeInvalid[i].Duration() 248 | } 249 | return ret 250 | } 251 | 252 | // IterateDurations allows executing a callback on the rolling durations bucket, returning a cursor you can pass into 253 | // future iteration calls 254 | func (b *durationsBucket) IterateDurations(startingIndex int64, callback func(time.Duration)) int64 { 255 | lastAbsoluteIndex := b.currentIndex.Get() - 1 256 | // work backwards from this value till we get to starting index 257 | for i := lastAbsoluteIndex; i >= startingIndex; i-- { 258 | arrayIndex := i % int64(len(b.durationsSomeInvalid)) 259 | val := b.durationsSomeInvalid[arrayIndex].Duration() 260 | callback(val) 261 | } 262 | return lastAbsoluteIndex + 1 263 | } 264 | 265 | func (b *durationsBucket) clear() { 266 | b.currentIndex.Set(0) 267 | } 268 | 269 | func (b *durationsBucket) addDuration(d time.Duration) { 270 | if len(b.durationsSomeInvalid) == 0 { 271 | return 272 | } 273 | nextIndex := b.currentIndex.Add(1) - 1 274 | arrayIndex := nextIndex % int64(len(b.durationsSomeInvalid)) 275 | b.durationsSomeInvalid[arrayIndex].Set(d.Nanoseconds()) 276 | } 277 | -------------------------------------------------------------------------------- /faststats/rolling_percentile_test.go: -------------------------------------------------------------------------------- 1 | package faststats 2 | 3 | import ( 4 | "encoding/json" 5 | "reflect" 6 | "strings" 7 | "sync" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | func TestRollingPercentile_Fresh(t *testing.T) { 13 | now := time.Now() 14 | x := NewRollingPercentile(time.Second, 10, 100, now) 15 | snap := x.SnapshotAt(now) 16 | expectSnap(t, "at empty", snap, 0, -1, map[float64]time.Duration{ 17 | 50: -1, 18 | }) 19 | } 20 | 21 | func TestRollingPercentile_Reset(t *testing.T) { 22 | now := time.Now() 23 | x := NewRollingPercentile(time.Second, 10, 100, now) 24 | x.AddDuration(time.Second, now) 25 | expectSnap(t, "at first", x.SnapshotAt(now), 1, time.Second, map[float64]time.Duration{ 26 | 0.0: time.Second, 27 | 1.0: time.Second, 28 | }) 29 | x.Reset(now) 30 | expectSnap(t, "at first", x.SnapshotAt(now), 0, -1, map[float64]time.Duration{ 31 | 0.0: -1, 32 | }) 33 | } 34 | 35 | func TestDurationsBucket_String(t *testing.T) { 36 | x := newDurationsBucket(10) 37 | x.addDuration(time.Second) 38 | dur := x.Durations() 39 | if !reflect.DeepEqual(dur, []time.Duration{time.Second}) { 40 | t.Fatalf("unexpected durations") 41 | } 42 | if x.String() != "durationsBucket(idx=1)" { 43 | t.Fatalf("unexpected string value: %s", x.String()) 44 | } 45 | 46 | b, err := json.Marshal(&x) 47 | if err != nil { 48 | t.Fatalf("Expect no error: %s", err) 49 | } 50 | var y durationsBucket 51 | if err := json.Unmarshal(b, &y); err != nil { 52 | t.Fatal("unexpected error marshalling", err) 53 | } 54 | if !reflect.DeepEqual(y.Durations(), x.Durations()) { 55 | t.Fatal("expected same durations") 56 | } 57 | } 58 | 59 | func TestDurationsBucket_IterateDurations(t *testing.T) { 60 | x := newDurationsBucket(10) 61 | x.IterateDurations(0, func(_ time.Duration) { 62 | t.Fatal("nothing in there") 63 | }) 64 | c := 0 65 | x.addDuration(time.Second) 66 | x.IterateDurations(0, func(d time.Duration) { 67 | c++ 68 | if d != time.Second { 69 | t.Fatal("Expected a second") 70 | } 71 | }) 72 | if c != 1 { 73 | t.Fatal("Expected 1 counter") 74 | } 75 | } 76 | 77 | func TestSortedDurations_asJSON(t *testing.T) { 78 | x := SortedDurations{ 79 | time.Second, time.Millisecond, 80 | } 81 | t.Log(x.String()) 82 | b, err := json.Marshal(x) 83 | if err != nil { 84 | t.Error("Could not marshal durations", err) 85 | } 86 | t.Log(string(b)) 87 | } 88 | 89 | func TestRollingPercentile_Empty(t *testing.T) { 90 | x := RollingPercentile{} 91 | x.AddDuration(time.Millisecond, time.Now()) 92 | snap := x.SnapshotAt(time.Now()) 93 | expectSnap(t, "at empty", snap, 0, -1, map[float64]time.Duration{ 94 | 50: -1, 95 | }) 96 | } 97 | 98 | func TestRollingPercentile_Race(t *testing.T) { 99 | now := time.Now() 100 | x := NewRollingPercentile(time.Millisecond, 10, 100, now) 101 | wg := sync.WaitGroup{} 102 | concurrent := 50 103 | doNotPassTime := time.Now().Add(time.Millisecond * 50) 104 | for i := 0; i < concurrent; i++ { 105 | doTillTime(doNotPassTime, &wg, func() { 106 | x.AddDuration(time.Second, time.Now()) 107 | }) 108 | doTillTime(doNotPassTime, &wg, func() { 109 | x.SnapshotAt(time.Now()) 110 | }) 111 | doTillTime(doNotPassTime, &wg, func() { 112 | //nolint:staticcheck 113 | _, err := json.Marshal(&x) 114 | if err != nil { 115 | t.Error("unable to marshal", err) 116 | } 117 | }) 118 | doTillTime(doNotPassTime, &wg, func() { 119 | s := x.Var().String() 120 | if !strings.Contains(s, "snap") { 121 | t.Error("expected to contain snap") 122 | } 123 | }) 124 | } 125 | wg.Wait() 126 | } 127 | 128 | func TestRollingPercentile_AddDuration(t *testing.T) { 129 | now := time.Now() 130 | x := NewRollingPercentile(time.Second, 10, 100, now) 131 | x.AddDuration(time.Second*2, now) 132 | snap := x.SnapshotAt(now) 133 | expectSnap(t, "at one item", snap, 1, time.Second*2, map[float64]time.Duration{ 134 | 0: time.Second * 2, 135 | 99: time.Second * 2, 136 | 100: time.Second * 2, 137 | }) 138 | 139 | x.AddDuration(time.Second, now) 140 | snap = x.SnapshotAt(now) 141 | expectSnap(t, "at second item", snap, 2, time.Second*3/2, map[float64]time.Duration{ 142 | 0: time.Second, 143 | 50: time.Second + time.Second/2, 144 | 100: time.Second * 2, 145 | }) 146 | 147 | x.AddDuration(time.Second*3, now) 148 | snap = x.SnapshotAt(now) 149 | expectSnap(t, "at third item", snap, 3, time.Second*2, map[float64]time.Duration{ 150 | 0: time.Second, 151 | 25: time.Second + time.Second/2, 152 | 50: time.Second * 2, 153 | 75: time.Second*2 + time.Second/2, 154 | 100: time.Second * 3, 155 | }) 156 | } 157 | 158 | func expectSnap(t *testing.T, name string, snap SortedDurations, size int, mean time.Duration, percentiles map[float64]time.Duration) { 159 | if len(snap) != size { 160 | t.Errorf("Unexpected size: %d vs %d for %s", len(snap), size, name) 161 | } 162 | if mean != snap.Mean() { 163 | t.Fatalf("Unexpected mean: saw=%d vs expected=%d for %s at %s", snap.Mean(), mean, name, snap) 164 | } 165 | for p, expected := range percentiles { 166 | per := snap.Percentile(p) 167 | if per != expected { 168 | t.Errorf("Unexpected percentile %f: %d vs %d for %s", p, per, expected, name) 169 | } 170 | } 171 | } 172 | 173 | func TestRollingPercentile_Movement(t *testing.T) { 174 | // 100 ms per bucket 175 | now := time.Now() 176 | x := NewRollingPercentile(time.Millisecond*100, 10, 100, now) 177 | x.AddDuration(time.Millisecond, now) 178 | x.AddDuration(time.Millisecond*3, now) 179 | x.AddDuration(time.Millisecond*2, now.Add(time.Millisecond*500)) 180 | x.AddDuration(time.Millisecond*4, now.Add(time.Millisecond*900)) 181 | // should have vlaues 1, 2, 3, 4 182 | 183 | snap := x.SnapshotAt(now.Add(time.Millisecond * 900)) 184 | expectSnap(t, "at start", snap, 4, time.Millisecond*10/4, map[float64]time.Duration{ 185 | 0: time.Millisecond, 186 | 1.0 / 3.0 * 100: time.Millisecond * 2, 187 | 50: time.Millisecond*2 + time.Millisecond/2, 188 | 2.0 / 3.0 * 100: time.Millisecond * 3, 189 | 100: time.Millisecond * 4, 190 | }) 191 | 192 | x.AddDuration(time.Millisecond*5, now.Add(time.Millisecond*1001)) 193 | snap = x.SnapshotAt(now.Add(time.Millisecond * 1001)) 194 | // The first two values should fall off, and we should add one new one 195 | // expect [2, 4, 5] 196 | expectSnap(t, "after falling off", snap, 3, time.Millisecond*11/3, map[float64]time.Duration{ 197 | 0: time.Millisecond * 2, 198 | 50: time.Millisecond * 4, 199 | 100: time.Millisecond * 5, 200 | }) 201 | 202 | snap = x.SnapshotAt(now.Add(time.Hour)) 203 | // All values should fall off 204 | expectSnap(t, "after all falling off", snap, 0, -1, map[float64]time.Duration{ 205 | 0: -1, 206 | 50: -1, 207 | 100: -1, 208 | }) 209 | } 210 | -------------------------------------------------------------------------------- /faststats/rolling_stress_test.go: -------------------------------------------------------------------------------- 1 | package faststats 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | // TestRollingCounterConcurrency tests that RollingCounter is thread-safe 13 | func TestRollingCounterConcurrency(t *testing.T) { 14 | numBuckets := 10 15 | bucketWidth := time.Millisecond * 10 16 | 17 | counter := NewRollingCounter(bucketWidth, numBuckets, time.Now()) 18 | 19 | // Set up concurrent increments 20 | goroutines := 100 21 | incrementsPerRoutine := 1000 22 | 23 | var wg sync.WaitGroup 24 | var totalIncrements int64 25 | 26 | // Start multiple goroutines that increment the counter 27 | for g := 0; g < goroutines; g++ { 28 | wg.Add(1) 29 | go func() { 30 | defer wg.Done() 31 | 32 | for i := 0; i < incrementsPerRoutine; i++ { 33 | // Use different increment values 34 | incrementBy := int64(1) // RollingCounter only supports Inc(now), not with a value 35 | counter.Inc(time.Now()) 36 | atomic.AddInt64(&totalIncrements, incrementBy) 37 | 38 | // Add a small sleep occasionally to allow buckets to roll 39 | if i%100 == 0 { 40 | time.Sleep(bucketWidth / 2) 41 | } 42 | } 43 | }() 44 | } 45 | 46 | wg.Wait() 47 | 48 | // Sleep slightly longer than the entire window to ensure all increments have rolled out 49 | time.Sleep(bucketWidth * time.Duration(numBuckets+1)) 50 | 51 | // The TotalSum is never reset and should match our total increments 52 | require.Equal(t, totalIncrements, counter.TotalSum()) 53 | 54 | t.Logf("Successfully processed %d concurrent increments", totalIncrements) 55 | } 56 | 57 | // TestRollingBucketConcurrency tests that RollingBuckets are thread-safe 58 | func TestRollingBucketConcurrency(t *testing.T) { 59 | // Skip this test since we don't have direct access to bucket functionality 60 | t.Skip("RollingBucket implementation not directly accessible") 61 | } 62 | 63 | // TestRollingPercentileConcurrency tests that RollingPercentile is thread-safe 64 | func TestRollingPercentileConcurrency(t *testing.T) { 65 | numBuckets := 10 66 | bucketWidth := time.Millisecond * 10 67 | bucketSize := 100 68 | 69 | // Default bucket size 100 70 | percentile := NewRollingPercentile(bucketWidth, numBuckets, bucketSize, time.Now()) 71 | 72 | // Set up concurrent adds 73 | goroutines := 50 74 | addsPerRoutine := 500 75 | 76 | var wg sync.WaitGroup 77 | 78 | // Start multiple goroutines that add values 79 | for g := 0; g < goroutines; g++ { 80 | wg.Add(1) 81 | go func(id int) { 82 | defer wg.Done() 83 | 84 | for i := 0; i < addsPerRoutine; i++ { 85 | // Add a range of values 86 | value := int64((id*100 + i) % 1000) 87 | now := time.Now() 88 | percentile.AddDuration(time.Duration(value), now) 89 | 90 | // Concurrent reads while adding 91 | if i%10 == 0 { 92 | // Use Snapshot which returns SortedDurations with proper methods 93 | snap := percentile.SnapshotAt(now) 94 | _ = snap.Percentile(0.5) 95 | _ = snap.Mean() 96 | _ = snap.Min() // Max not directly accessible 97 | } 98 | 99 | // Occasionally sleep to allow buckets to roll 100 | if i%50 == 0 { 101 | time.Sleep(bucketWidth / 5) 102 | } 103 | } 104 | }(g) 105 | } 106 | 107 | // Start additional goroutines that just read percentiles 108 | readGoroutines := 10 109 | for g := 0; g < readGoroutines; g++ { 110 | wg.Add(1) 111 | go func() { 112 | defer wg.Done() 113 | 114 | // Read different percentiles 115 | percentiles := []float64{0.5, 0.9, 0.95, 0.99} 116 | 117 | for i := 0; i < 1000; i++ { 118 | now := time.Now() 119 | snap := percentile.SnapshotAt(now) 120 | for _, p := range percentiles { 121 | _ = snap.Percentile(p) 122 | } 123 | _ = snap.Mean() 124 | _ = snap.Min() // No direct Max method 125 | 126 | time.Sleep(bucketWidth / 10) 127 | } 128 | }() 129 | } 130 | 131 | wg.Wait() 132 | 133 | // Sleep slightly longer than the entire window to ensure all values have rolled out 134 | time.Sleep(bucketWidth * time.Duration(numBuckets+1)) 135 | 136 | // After all buckets have rolled, the percentile should be empty 137 | // There's no direct Max method, so we'll check if the snapshot is empty 138 | snap := percentile.Snapshot() 139 | if len(snap) > 0 { 140 | t.Errorf("Expected empty snapshot after rollout, got %v entries", len(snap)) 141 | } 142 | } 143 | 144 | // TestTimedCheckConcurrency tests that TimedCheck is thread-safe 145 | func TestTimedCheckConcurrency(t *testing.T) { 146 | // Skip since we don't have direct access to TimedCheck 147 | t.Skip("TimedCheck not directly accessible for testing") 148 | } 149 | 150 | // TestRollingCounterBucketRolloverRace tests for race conditions during bucket rollover 151 | func TestRollingCounterBucketRolloverRace(t *testing.T) { 152 | // Create a counter with very small buckets for frequent rollovers 153 | numBuckets := 5 154 | bucketWidth := time.Millisecond * 5 155 | 156 | counter := NewRollingCounter(bucketWidth, numBuckets, time.Now()) 157 | 158 | // Start threads that constantly increment 159 | goroutines := 30 160 | duration := time.Millisecond * 300 // Run for 300ms 161 | 162 | var wg sync.WaitGroup 163 | var running int32 = 1 164 | var totalAdded int64 165 | 166 | for g := 0; g < goroutines; g++ { 167 | wg.Add(1) 168 | go func() { 169 | defer wg.Done() 170 | 171 | for atomic.LoadInt32(&running) == 1 { 172 | // Mix of operations 173 | counter.Inc(time.Now()) 174 | atomic.AddInt64(&totalAdded, 1) 175 | 176 | if counter.TotalSum() < 0 { 177 | t.Errorf("Counter sum went negative: %d", counter.TotalSum()) 178 | } 179 | } 180 | }() 181 | } 182 | 183 | // Start more threads that read percentiles during rollover 184 | readThreads := 10 185 | for g := 0; g < readThreads; g++ { 186 | wg.Add(1) 187 | go func() { 188 | defer wg.Done() 189 | 190 | for atomic.LoadInt32(&running) == 1 { 191 | sum := counter.TotalSum() 192 | if sum < 0 { 193 | t.Errorf("Counter sum went negative: %d", sum) 194 | } 195 | 196 | // Also check rolling sum 197 | now := time.Now() 198 | rollingSum := counter.RollingSumAt(now) 199 | if rollingSum < 0 { 200 | t.Errorf("Rolling sum went negative: %d", rollingSum) 201 | } 202 | } 203 | }() 204 | } 205 | 206 | // Let it run for the duration 207 | time.Sleep(duration) 208 | atomic.StoreInt32(&running, 0) 209 | 210 | wg.Wait() 211 | 212 | t.Logf("Added %d items during high-frequency rollover test", totalAdded) 213 | } 214 | -------------------------------------------------------------------------------- /faststats/timedcheck.go: -------------------------------------------------------------------------------- 1 | package faststats 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | // TimedCheck lets X events happen every sleepDuration units of time. For optimizations, it uses TimeAfterFunc to reset 11 | // an internal atomic boolean for when events are allowed. This timer could run a little bit behind real time since 12 | // it depends on when the OS decides to trigger the timer. 13 | type TimedCheck struct { 14 | sleepDuration AtomicInt64 15 | eventCountToAllow AtomicInt64 16 | 17 | isFastFail AtomicBoolean 18 | isFailFastVersion AtomicInt64 19 | 20 | TimeAfterFunc func(time.Duration, func()) *time.Timer 21 | 22 | // All 3 of these variables must be accessed with the RWMutex 23 | nextOpenTime time.Time 24 | currentlyAllowedEventCount int64 25 | lastSetTimer *time.Timer 26 | mu sync.RWMutex 27 | } 28 | 29 | var _ json.Marshaler = &TimedCheck{} 30 | var _ json.Unmarshaler = &TimedCheck{} 31 | var _ fmt.Stringer = &TimedCheck{} 32 | 33 | // marshalStruct is used by JSON marshalling 34 | type marshalStruct struct { 35 | SleepDuration int64 36 | EventCountToAllow int64 37 | NextOpenTime time.Time 38 | CurrentlyAllowedEventCount int64 39 | } 40 | 41 | func (c *TimedCheck) String() string { 42 | c.mu.Lock() 43 | defer c.mu.Unlock() 44 | return fmt.Sprintf("TimedCheck(open=%s)", c.nextOpenTime) 45 | } 46 | 47 | // MarshalJSON writes the object as JSON. It is thread safe. 48 | func (c *TimedCheck) MarshalJSON() ([]byte, error) { 49 | c.mu.Lock() 50 | defer c.mu.Unlock() 51 | return json.Marshal(marshalStruct{ 52 | SleepDuration: c.sleepDuration.Get(), 53 | EventCountToAllow: c.eventCountToAllow.Get(), 54 | NextOpenTime: c.nextOpenTime, 55 | CurrentlyAllowedEventCount: c.currentlyAllowedEventCount, 56 | }) 57 | } 58 | 59 | // UnmarshalJSON changes the object from JSON. It is *NOT* thread safe. 60 | func (c *TimedCheck) UnmarshalJSON(b []byte) error { 61 | var into marshalStruct 62 | if err := json.Unmarshal(b, &into); err != nil { 63 | return err 64 | } 65 | c.mu.Lock() 66 | defer c.mu.Unlock() 67 | c.sleepDuration.Set(into.SleepDuration) 68 | c.eventCountToAllow.Set(into.EventCountToAllow) 69 | c.nextOpenTime = into.NextOpenTime 70 | c.currentlyAllowedEventCount = into.CurrentlyAllowedEventCount 71 | return nil 72 | } 73 | 74 | // SetSleepDuration modifies how long time timed check will sleep. It will not change 75 | // alredy sleeping checks, but will change during the next check. 76 | func (c *TimedCheck) SetSleepDuration(newDuration time.Duration) { 77 | c.sleepDuration.Set(newDuration.Nanoseconds()) 78 | } 79 | 80 | func (c *TimedCheck) afterFunc(d time.Duration, f func()) *time.Timer { 81 | if c.TimeAfterFunc == nil { 82 | return time.AfterFunc(d, f) 83 | } 84 | return c.TimeAfterFunc(d, f) 85 | } 86 | 87 | // SetEventCountToAllow configures how many times Check() can return true before moving time 88 | // to the next interval 89 | func (c *TimedCheck) SetEventCountToAllow(newCount int64) { 90 | c.eventCountToAllow.Set(newCount) 91 | } 92 | 93 | // SleepStart resets the checker to trigger after now + sleepDuration 94 | func (c *TimedCheck) SleepStart(now time.Time) { 95 | c.mu.Lock() 96 | c.resetOpenTimeWithLock(now) 97 | c.mu.Unlock() 98 | } 99 | 100 | func (c *TimedCheck) resetOpenTimeWithLock(now time.Time) { 101 | if c.lastSetTimer != nil { 102 | c.lastSetTimer.Stop() 103 | c.lastSetTimer = nil 104 | } 105 | c.nextOpenTime = now.Add(c.sleepDuration.Duration()) 106 | c.currentlyAllowedEventCount = 0 107 | c.isFastFail.Set(true) 108 | currentVersion := c.isFailFastVersion.Add(1) 109 | c.lastSetTimer = c.afterFunc(c.sleepDuration.Duration(), func() { 110 | // If sleep start is called again, don't reset from an old version 111 | if currentVersion == c.isFailFastVersion.Get() { 112 | c.isFastFail.Set(false) 113 | } 114 | }) 115 | } 116 | 117 | // Check returns true if a check is allowed at this time 118 | func (c *TimedCheck) Check(now time.Time) bool { 119 | if c.isFastFail.Get() { 120 | return false 121 | } 122 | c.mu.RLock() 123 | // Common condition fast check 124 | if c.nextOpenTime.After(now) { 125 | c.mu.RUnlock() 126 | return false 127 | } 128 | c.mu.RUnlock() 129 | 130 | c.mu.Lock() 131 | defer c.mu.Unlock() 132 | if c.nextOpenTime.After(now) { 133 | return false 134 | } 135 | c.currentlyAllowedEventCount++ 136 | if c.currentlyAllowedEventCount >= c.eventCountToAllow.Get() { 137 | c.resetOpenTimeWithLock(now) 138 | } 139 | return true 140 | } 141 | -------------------------------------------------------------------------------- /faststats/timedcheck_test.go: -------------------------------------------------------------------------------- 1 | package faststats 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "sync" 7 | "testing" 8 | "time" 9 | 10 | "github.com/cep21/circuit/v4/internal/clock" 11 | "github.com/cep21/circuit/v4/internal/testhelp" 12 | ) 13 | 14 | func TestTimedCheck_Empty(t *testing.T) { 15 | x := TimedCheck{} 16 | now := time.Now() 17 | if !x.Check(now) { 18 | t.Error("First check should pass on empty object") 19 | } 20 | } 21 | 22 | func TestTimedCheck_MarshalJSON(t *testing.T) { 23 | x := TimedCheck{} 24 | if x.String() != fmt.Sprintf("TimedCheck(open=%s)", time.Time{}) { 25 | t.Fatal("unexpected toString", x.String()) 26 | } 27 | x.SetSleepDuration(time.Second) 28 | x.SetEventCountToAllow(12) 29 | b, err := json.Marshal(&x) 30 | if err != nil { 31 | t.Fatal("unexpected err", err) 32 | } 33 | var y TimedCheck 34 | if err := json.Unmarshal(b, &y); err != nil { 35 | t.Fatal("unexpected err", err) 36 | } 37 | if y.eventCountToAllow.Get() != 12 { 38 | t.Fatal("expect 10 event counts to allow") 39 | } 40 | if y.sleepDuration.Get() != time.Second.Nanoseconds() { 41 | t.Fatal("expect 1 sec sleep duration") 42 | } 43 | } 44 | 45 | func TestTimedCheck_Check(t *testing.T) { 46 | c := clock.MockClock{} 47 | x := TimedCheck{ 48 | TimeAfterFunc: c.AfterFunc, 49 | } 50 | x.SetSleepDuration(time.Second) 51 | now := time.Now() 52 | c.Set(now) 53 | x.SleepStart(now) 54 | if x.Check(now) { 55 | t.Fatal("Should not check at first") 56 | } 57 | if x.Check(c.Set(now.Add(time.Millisecond * 999))) { 58 | t.Fatal("Should not check close to end") 59 | } 60 | if !x.Check(c.Set(now.Add(time.Second))) { 61 | t.Fatal("Should check at barrier") 62 | } 63 | if x.Check(c.Set(now.Add(time.Second))) { 64 | t.Fatal("Should only check once") 65 | } 66 | if x.Check(c.Set(now.Add(time.Second + time.Millisecond))) { 67 | t.Fatal("Should only double check") 68 | } 69 | if !x.Check(c.Set(now.Add(time.Second * 2))) { 70 | t.Fatal("Should check again at 2 sec") 71 | } 72 | } 73 | 74 | func TestTimedCheck(t *testing.T) { 75 | sleepDuration := time.Millisecond * 100 76 | now := time.Now() 77 | neverFinishesBefore := now.Add(sleepDuration) 78 | // Travis is so slow we need a big buffer 79 | alwaysFinishesBy := now.Add(sleepDuration + time.Second) 80 | x := TimedCheck{} 81 | x.SetEventCountToAllow(1) 82 | x.SetSleepDuration(sleepDuration) 83 | x.SleepStart(time.Now()) 84 | hasFinished := false 85 | var wg sync.WaitGroup 86 | testhelp.DoTillTime(alwaysFinishesBy, &wg, func() { 87 | if x.Check(time.Now()) { 88 | if time.Now().Before(neverFinishesBefore) { 89 | t.Error("It should never finish by this time") 90 | } 91 | hasFinished = true 92 | } 93 | }) 94 | wg.Wait() 95 | if !hasFinished { 96 | t.Error("It should be finished by this late") 97 | } 98 | } 99 | 100 | func TestTimedCheckRaces(_ *testing.T) { 101 | x := TimedCheck{} 102 | x.SetSleepDuration(time.Nanosecond * 100) 103 | endTime := time.Now().Add(time.Millisecond * 50) 104 | wg := sync.WaitGroup{} 105 | doTillTime(endTime, &wg, func() { 106 | x.Check(time.Now()) 107 | }) 108 | doTillTime(endTime, &wg, func() { 109 | x.SetEventCountToAllow(2) 110 | }) 111 | doTillTime(endTime, &wg, func() { 112 | x.SetSleepDuration(time.Millisecond * 100) 113 | }) 114 | wg.Wait() 115 | } 116 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/cep21/circuit/v4 2 | 3 | go 1.21 4 | 5 | require github.com/stretchr/testify v1.10.0 6 | 7 | require ( 8 | github.com/davecgh/go-spew v1.1.1 // indirect 9 | github.com/pmezard/go-difflib v1.0.0 // indirect 10 | gopkg.in/yaml.v3 v3.0.1 // indirect 11 | ) 12 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 4 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 5 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= 6 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 7 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 8 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 9 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 10 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 11 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 12 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 13 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 14 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 15 | -------------------------------------------------------------------------------- /gowrapper.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/cep21/circuit/v4/faststats" 7 | ) 8 | 9 | // goroutineWrapper contains logic to wrap normal run methods inside a goroutine so they can end early 10 | // if the goroutine continues to run 11 | type goroutineWrapper struct { 12 | skipCatchPanics faststats.AtomicBoolean 13 | lostErrors func(err error, panics interface{}) 14 | } 15 | 16 | func (g *goroutineWrapper) run(runFunc func(context.Context) error) func(context.Context) error { 17 | if runFunc == nil { 18 | return nil 19 | } 20 | return func(ctx context.Context) error { 21 | var panicResult chan interface{} 22 | if !g.skipCatchPanics.Get() { 23 | panicResult = make(chan interface{}, 1) 24 | } 25 | runFuncErr := make(chan error, 1) 26 | go func() { 27 | if panicResult != nil { 28 | defer func() { 29 | if r := recover(); r != nil { 30 | panicResult <- r 31 | } 32 | }() 33 | } 34 | runFuncErr <- runFunc(ctx) 35 | }() 36 | select { 37 | case <-ctx.Done(): 38 | // runFuncErr is a lost error. 39 | if g.lostErrors != nil { 40 | go g.waitForErrors(runFuncErr, panicResult) 41 | } 42 | return ctx.Err() 43 | case err := <-runFuncErr: 44 | return err 45 | case panicVal := <-panicResult: 46 | panic(panicVal) 47 | } 48 | } 49 | } 50 | 51 | func (g *goroutineWrapper) fallback(runFunc func(context.Context, error) error) func(context.Context, error) error { 52 | if runFunc == nil { 53 | return nil 54 | } 55 | return func(ctx context.Context, err error) error { 56 | return g.run(func(funcCtx context.Context) error { 57 | return runFunc(funcCtx, err) 58 | })(ctx) 59 | } 60 | } 61 | 62 | func (g *goroutineWrapper) waitForErrors(runFuncErr chan error, panicResults chan interface{}) { 63 | select { 64 | case err := <-runFuncErr: 65 | g.lostErrors(err, nil) 66 | case panicResult := <-panicResults: 67 | g.lostErrors(nil, panicResult) 68 | } 69 | close(runFuncErr) 70 | close(panicResults) 71 | } 72 | -------------------------------------------------------------------------------- /gowrapper_test.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "reflect" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | type errWaiter struct { 12 | err error 13 | panics interface{} 14 | 15 | errChan chan error 16 | panicChan chan interface{} 17 | } 18 | 19 | func (e *errWaiter) init() { 20 | e.errChan = make(chan error, 1) 21 | e.panicChan = make(chan interface{}, 1) 22 | } 23 | 24 | func (e *errWaiter) lostErrors(err error, panics interface{}) { 25 | if err == nil && panics == nil { 26 | panic("expect one") 27 | } 28 | if e.err != nil || e.panics != nil { 29 | panic("unexpected double set") 30 | } 31 | e.err = err 32 | e.panics = panics 33 | close(e.errChan) 34 | close(e.panicChan) 35 | } 36 | 37 | func Test_goroutineWrapper_waitForErrors(t *testing.T) { 38 | type args struct { 39 | runFuncErr chan error 40 | panicResults chan interface{} 41 | } 42 | type testRun struct { 43 | lostCapture errWaiter 44 | name string 45 | args args 46 | gorun func(t *testRun) 47 | expected errWaiter 48 | } 49 | tests := []testRun{ 50 | { 51 | name: "onFuncErr", 52 | args: args{ 53 | runFuncErr: make(chan error), 54 | panicResults: make(chan interface{}), 55 | }, 56 | gorun: func(t *testRun) { 57 | t.args.runFuncErr <- errors.New("bad") 58 | }, 59 | expected: errWaiter{ 60 | err: errors.New("bad"), 61 | }, 62 | }, 63 | { 64 | name: "onPanic", 65 | args: args{ 66 | runFuncErr: make(chan error), 67 | panicResults: make(chan interface{}), 68 | }, 69 | gorun: func(t *testRun) { 70 | t.args.panicResults <- "bad panic" 71 | }, 72 | expected: errWaiter{ 73 | panics: "bad panic", 74 | }, 75 | }, 76 | } 77 | for _, tt := range tests { 78 | tt := tt 79 | t.Run(tt.name, func(t *testing.T) { 80 | g := &goroutineWrapper{ 81 | lostErrors: tt.lostCapture.lostErrors, 82 | } 83 | tt.lostCapture.init() 84 | go tt.gorun(&tt) 85 | g.waitForErrors(tt.args.runFuncErr, tt.args.panicResults) 86 | // Reset these so deep equal works ... 87 | tt.lostCapture.panicChan = nil 88 | tt.lostCapture.errChan = nil 89 | // --- end reset 90 | if !reflect.DeepEqual(tt.expected, tt.lostCapture) { 91 | t.Errorf("goroutineWrapper.waitForErrors failure %v vs %v", tt.expected, tt.lostCapture) 92 | } 93 | }) 94 | } 95 | } 96 | 97 | // noinspection GoNilness 98 | func Test_goroutineWrapper_nil(t *testing.T) { 99 | var g goroutineWrapper 100 | if g.run(nil) != nil { 101 | t.Error("expect nil when given nil") 102 | } 103 | if g.fallback(nil) != nil { 104 | t.Error("expect nil when given nil on fallback") 105 | } 106 | } 107 | 108 | // noinspection GoNilness 109 | func Test_goroutineWrapper_foreground(t *testing.T) { 110 | var g goroutineWrapper 111 | err := g.fallback(func(ctx context.Context, err error) error { 112 | return errors.New("bob") 113 | })(context.Background(), errors.New("ignore me")) 114 | if err.Error() != "bob" { 115 | t.Errorf("expected bob back, not %s", err.Error()) 116 | } 117 | } 118 | 119 | func Test_goroutineWrapper_run(t *testing.T) { 120 | deadCtx, onEnd := context.WithCancel(context.Background()) 121 | onEnd() 122 | type args struct { 123 | runFunc func(context.Context) error 124 | } 125 | tests := []struct { 126 | name string 127 | lostCapture errWaiter 128 | ctx context.Context 129 | args args 130 | want error 131 | expectPanic interface{} 132 | }{ 133 | { 134 | name: "normal", 135 | args: args{ 136 | runFunc: func(ctx context.Context) error { 137 | return nil 138 | }, 139 | }, 140 | }, 141 | { 142 | name: "normal_error", 143 | args: args{ 144 | runFunc: func(ctx context.Context) error { 145 | return errors.New("bad") 146 | }, 147 | }, 148 | want: errors.New("bad"), 149 | }, 150 | { 151 | name: "timeout", 152 | args: args{ 153 | runFunc: func(ctx context.Context) error { 154 | time.Sleep(time.Hour) 155 | return nil 156 | }, 157 | }, 158 | ctx: deadCtx, 159 | want: context.Canceled, 160 | }, 161 | { 162 | name: "panic", 163 | args: args{ 164 | runFunc: func(ctx context.Context) error { 165 | panic("bob") 166 | }, 167 | }, 168 | expectPanic: "bob", 169 | }, 170 | } 171 | for _, tt := range tests { 172 | tt := tt 173 | t.Run(tt.name, func(t *testing.T) { 174 | g := &goroutineWrapper{ 175 | lostErrors: tt.lostCapture.lostErrors, 176 | } 177 | f := g.run(tt.args.runFunc) 178 | if tt.expectPanic != nil { 179 | defer func() { 180 | if err := recover(); err != nil { 181 | if err != tt.expectPanic { 182 | panic(err) 183 | } 184 | } else { 185 | panic("i expect to panic!") 186 | } 187 | }() 188 | } 189 | if tt.ctx == nil { 190 | tt.ctx = context.Background() 191 | } 192 | if got := f(tt.ctx); !reflect.DeepEqual(got, tt.want) { 193 | t.Errorf("goroutineWrapper.run() = %v, want %v", got, tt.want) 194 | } 195 | }) 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /internal/clock/clock.go: -------------------------------------------------------------------------------- 1 | package clock 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | // MockClock allows mocking time for testing 9 | type MockClock struct { 10 | currentTime time.Time 11 | callbacks []timedCallbacks 12 | mu sync.Mutex 13 | } 14 | 15 | type timedCallbacks struct { 16 | when time.Time 17 | f func() 18 | } 19 | 20 | // Set the current time 21 | func (m *MockClock) Set(t time.Time) time.Time { 22 | // Note: do this after the lock is released 23 | defer m.triggerCallbacks() 24 | m.mu.Lock() 25 | defer m.mu.Unlock() 26 | m.currentTime = t 27 | return m.currentTime 28 | } 29 | 30 | // Add some time, triggering sleeping callbacks 31 | func (m *MockClock) Add(d time.Duration) time.Time { 32 | return m.Set(m.Now().Add(d)) 33 | } 34 | 35 | func (m *MockClock) triggerCallbacks() { 36 | var newArray []timedCallbacks 37 | var toCall []timedCallbacks 38 | m.mu.Lock() 39 | for _, c := range m.callbacks { 40 | if m.currentTime.Before(c.when) { 41 | newArray = append(newArray, c) 42 | } else { 43 | toCall = append(toCall, c) 44 | } 45 | } 46 | m.callbacks = newArray 47 | m.mu.Unlock() 48 | for _, cb := range toCall { 49 | cb.f() 50 | } 51 | } 52 | 53 | // Now simulates time.Now() 54 | func (m *MockClock) Now() time.Time { 55 | m.mu.Lock() 56 | defer m.mu.Unlock() 57 | return m.currentTime 58 | } 59 | 60 | // AfterFunc simulates time.AfterFunc 61 | func (m *MockClock) AfterFunc(d time.Duration, f func()) *time.Timer { 62 | m.mu.Lock() 63 | defer m.mu.Unlock() 64 | if d == 0 { 65 | f() 66 | return nil 67 | } 68 | m.callbacks = append(m.callbacks, timedCallbacks{when: m.currentTime.Add(d), f: f}) 69 | // Do not use what is returned ... 70 | return nil 71 | } 72 | 73 | // AfterFunc simulates time.After 74 | func (m *MockClock) After(d time.Duration) <-chan time.Time { 75 | c := make(chan time.Time, 1) 76 | m.AfterFunc(d, func() { 77 | c <- m.Now() 78 | }) 79 | return c 80 | } 81 | 82 | // TickUntil will tick the mock clock until shouldStop returns false. Real sleep should be very small 83 | func TickUntil(m *MockClock, shouldStop func() bool, realSleep time.Duration, mockIncr time.Duration) { 84 | for !shouldStop() { 85 | time.Sleep(realSleep) 86 | m.Add(mockIncr) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /internal/clock/clock_test.go: -------------------------------------------------------------------------------- 1 | package clock 2 | 3 | import ( 4 | "sync" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestMockClock_Set(t *testing.T) { 10 | m := &MockClock{} 11 | now := time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC) 12 | 13 | // Test setting the time 14 | result := m.Set(now) 15 | if !result.Equal(now) { 16 | t.Errorf("Expected time %v, got %v", now, result) 17 | } 18 | if !m.Now().Equal(now) { 19 | t.Errorf("Expected time %v, got %v", now, m.Now()) 20 | } 21 | 22 | // Test setting time again 23 | later := now.Add(time.Hour) 24 | result = m.Set(later) 25 | if !result.Equal(later) { 26 | t.Errorf("Expected time %v, got %v", later, result) 27 | } 28 | if !m.Now().Equal(later) { 29 | t.Errorf("Expected time %v, got %v", later, m.Now()) 30 | } 31 | } 32 | 33 | func TestMockClock_Add(t *testing.T) { 34 | m := &MockClock{} 35 | now := time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC) 36 | m.Set(now) 37 | 38 | // Test adding time 39 | result := m.Add(time.Hour) 40 | expected := now.Add(time.Hour) 41 | if !result.Equal(expected) { 42 | t.Errorf("Expected time %v, got %v", expected, result) 43 | } 44 | if !m.Now().Equal(expected) { 45 | t.Errorf("Expected time %v, got %v", expected, m.Now()) 46 | } 47 | } 48 | 49 | func TestMockClock_AfterFunc(t *testing.T) { 50 | m := &MockClock{} 51 | now := time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC) 52 | m.Set(now) 53 | 54 | var callCount int 55 | var mu sync.Mutex 56 | 57 | incrementCallCount := func() { 58 | mu.Lock() 59 | defer mu.Unlock() 60 | callCount++ 61 | } 62 | 63 | getCallCount := func() int { 64 | mu.Lock() 65 | defer mu.Unlock() 66 | return callCount 67 | } 68 | 69 | // Test AfterFunc with immediate execution 70 | m.AfterFunc(0, incrementCallCount) 71 | if count := getCallCount(); count != 1 { 72 | t.Errorf("Expected call count to be 1, got %d", count) 73 | } 74 | 75 | // Test AfterFunc with delayed execution 76 | m.AfterFunc(time.Hour, incrementCallCount) 77 | if count := getCallCount(); count != 1 { 78 | t.Errorf("Function should not be called before time advances, got count %d", count) 79 | } 80 | 81 | // Add half the time - callback shouldn't fire yet 82 | m.Add(30 * time.Minute) 83 | if count := getCallCount(); count != 1 { 84 | t.Errorf("Function should not be called before time reaches target, got count %d", count) 85 | } 86 | 87 | // Add remaining time - callback should fire 88 | m.Add(30 * time.Minute) 89 | if count := getCallCount(); count != 2 { 90 | t.Errorf("Function should be called after time reaches target, got count %d", count) 91 | } 92 | } 93 | 94 | func TestMockClock_After(t *testing.T) { 95 | m := &MockClock{} 96 | now := time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC) 97 | m.Set(now) 98 | 99 | // Test After channel 100 | ch := m.After(time.Hour) 101 | 102 | // Shouldn't be any value yet 103 | select { 104 | case <-ch: 105 | t.Fatal("Channel should not have a value yet") 106 | default: 107 | // This is correct 108 | } 109 | 110 | // Add time and check channel 111 | later := now.Add(time.Hour) 112 | m.Add(time.Hour) 113 | 114 | select { 115 | case receivedTime := <-ch: 116 | if !receivedTime.Equal(later) { 117 | t.Errorf("Expected received time %v, got %v", later, receivedTime) 118 | } 119 | default: 120 | t.Fatal("Channel should have a value after time advances") 121 | } 122 | } 123 | 124 | func TestMockClock_triggerCallbacks(t *testing.T) { 125 | m := &MockClock{} 126 | now := time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC) 127 | m.Set(now) 128 | 129 | var calls []int 130 | var mu sync.Mutex 131 | 132 | addCall := func(i int) { 133 | mu.Lock() 134 | defer mu.Unlock() 135 | calls = append(calls, i) 136 | } 137 | 138 | m.AfterFunc(time.Hour, func() { addCall(1) }) 139 | m.AfterFunc(2*time.Hour, func() { addCall(2) }) 140 | m.AfterFunc(3*time.Hour, func() { addCall(3) }) 141 | 142 | // Add enough time to trigger first callback 143 | m.Add(65 * time.Minute) // Just past 1 hour 144 | 145 | mu.Lock() 146 | if len(calls) != 1 || calls[0] != 1 { 147 | t.Errorf("Expected calls to be [1], got %v", calls) 148 | } 149 | mu.Unlock() 150 | 151 | // Add enough time to trigger second callback 152 | m.Add(65 * time.Minute) // Now at 2h10m 153 | 154 | mu.Lock() 155 | if len(calls) != 2 || calls[0] != 1 || calls[1] != 2 { 156 | t.Errorf("Expected calls to be [1, 2], got %v", calls) 157 | } 158 | mu.Unlock() 159 | 160 | // Add enough time to trigger third callback 161 | m.Add(65 * time.Minute) // Now at 3h15m 162 | 163 | mu.Lock() 164 | if len(calls) != 3 || calls[0] != 1 || calls[1] != 2 || calls[2] != 3 { 165 | t.Errorf("Expected calls to be [1, 2, 3], got %v", calls) 166 | } 167 | mu.Unlock() 168 | } 169 | 170 | func TestTickUntil(t *testing.T) { 171 | m := &MockClock{} 172 | now := time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC) 173 | m.Set(now) 174 | 175 | var tickCount int 176 | done := make(chan struct{}) 177 | 178 | // Run TickUntil in a goroutine 179 | go func() { 180 | defer close(done) 181 | TickUntil(m, func() bool { 182 | return tickCount >= 3 183 | }, time.Millisecond, time.Hour) 184 | }() 185 | 186 | // Create a function that increments tickCount when time advances 187 | var mu sync.Mutex 188 | incrementTickCount := func() { 189 | mu.Lock() 190 | defer mu.Unlock() 191 | tickCount++ 192 | } 193 | 194 | // Set up callbacks at hourly intervals 195 | m.AfterFunc(time.Hour, incrementTickCount) 196 | m.AfterFunc(2*time.Hour, incrementTickCount) 197 | m.AfterFunc(3*time.Hour, incrementTickCount) 198 | 199 | // Wait for TickUntil to complete 200 | select { 201 | case <-done: 202 | // Success 203 | case <-time.After(time.Second): 204 | t.Fatal("TickUntil did not complete in time") 205 | } 206 | 207 | if tickCount != 3 { 208 | t.Errorf("Expected tick count to be 3, got %d", tickCount) 209 | } 210 | 211 | expected := now.Add(3 * time.Hour) 212 | if !m.Now().Equal(expected) { 213 | t.Errorf("Expected time to be %v, got %v", expected, m.Now()) 214 | } 215 | } 216 | -------------------------------------------------------------------------------- /internal/evar/evar.go: -------------------------------------------------------------------------------- 1 | package evar 2 | 3 | import ( 4 | "expvar" 5 | ) 6 | 7 | // ExpvarToVal is a helper to extract the root value() from an expvar 8 | func ExpvarToVal(in expvar.Var) interface{} { 9 | type iv interface { 10 | Value() interface{} 11 | } 12 | if rawVal, ok := in.(iv); ok { 13 | return rawVal.Value() 14 | } 15 | return nil 16 | } 17 | 18 | // ForExpvar is a helper to extract the root value() from any interface 19 | func ForExpvar(in interface{}) interface{} { 20 | type hasVar interface { 21 | Var() expvar.Var 22 | } 23 | if withVar, ok := in.(hasVar); ok { 24 | return ExpvarToVal(withVar.Var()) 25 | } 26 | return in 27 | } 28 | -------------------------------------------------------------------------------- /internal/evar/evar_test.go: -------------------------------------------------------------------------------- 1 | package evar 2 | 3 | import ( 4 | "expvar" 5 | "fmt" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | // Mock implementation for testing 11 | type mockExpvar struct { 12 | val interface{} 13 | } 14 | 15 | func (m *mockExpvar) String() string { 16 | return "mock" 17 | } 18 | 19 | func (m *mockExpvar) Value() interface{} { 20 | return m.val 21 | } 22 | 23 | // Mock implementation that has a Var() method 24 | type hasVarType struct { 25 | v expvar.Var 26 | } 27 | 28 | func (h hasVarType) Var() expvar.Var { 29 | return h.v 30 | } 31 | 32 | func TestExpvarToVal(t *testing.T) { 33 | // Test with a valid expvar implementation 34 | mock := &mockExpvar{val: 42} 35 | result := ExpvarToVal(mock) 36 | if result != 42 { 37 | t.Errorf("Expected result to be 42, got %v", result) 38 | } 39 | 40 | // Test with a non-Value-implementing expvar - using unique name to avoid collision 41 | nonValueVar := expvar.NewString("test_" + t.Name() + "_" + fmt.Sprintf("%d", time.Now().UnixNano())) 42 | result = ExpvarToVal(nonValueVar) 43 | if result != nil { 44 | t.Errorf("Expected result to be nil, got %v", result) 45 | } 46 | } 47 | 48 | func TestForExpvar(t *testing.T) { 49 | // Test with an object that has Var() 50 | mock := &mockExpvar{val: "test-value"} 51 | hasVar := hasVarType{v: mock} 52 | result := ForExpvar(hasVar) 53 | if result != "test-value" { 54 | t.Errorf("Expected result to be 'test-value', got %v", result) 55 | } 56 | 57 | // Test with a regular value that doesn't implement hasVar 58 | directValue := "direct-value" 59 | result = ForExpvar(directValue) 60 | if result != "direct-value" { 61 | t.Errorf("Expected result to be 'direct-value', got %v", result) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /internal/testhelp/testhelp.go: -------------------------------------------------------------------------------- 1 | package testhelp 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "math/rand" 8 | "runtime" 9 | "sync" 10 | "testing" 11 | "time" 12 | ) 13 | 14 | // BehaviorCheck tracks Run commands to help you test their logic 15 | type BehaviorCheck struct { 16 | TotalRuns int64 17 | totalErrors int64 18 | LongestRunDuration time.Duration 19 | MostConcurrent int64 20 | currentConcurrent int64 21 | 22 | mu sync.Mutex 23 | RunFunc func(ctx context.Context) error 24 | } 25 | 26 | // MustTesting errors if err != nil 27 | func MustTesting(t *testing.T, err error) { 28 | if err != nil { 29 | t.Errorf("unexpected error: %s", err) 30 | } 31 | } 32 | 33 | // MustNotTesting errors if err == nil 34 | func MustNotTesting(t *testing.T, err error) { 35 | if err == nil { 36 | t.Errorf("Saw nil, expected an error") 37 | } 38 | } 39 | 40 | // Run is a runFunc. Use this as the runFunc for your circuit 41 | func (b *BehaviorCheck) Run(ctx context.Context) (err error) { 42 | start := time.Now() 43 | defer func() { 44 | end := time.Now() 45 | thisRun := end.Sub(start) 46 | 47 | b.mu.Lock() 48 | defer b.mu.Unlock() 49 | 50 | if err != nil { 51 | b.totalErrors++ 52 | } 53 | if b.LongestRunDuration < thisRun { 54 | b.LongestRunDuration = thisRun 55 | } 56 | b.currentConcurrent-- 57 | }() 58 | b.mu.Lock() 59 | b.TotalRuns++ 60 | b.currentConcurrent++ 61 | if b.currentConcurrent > b.MostConcurrent { 62 | b.MostConcurrent = b.currentConcurrent 63 | } 64 | b.mu.Unlock() 65 | return b.RunFunc(ctx) 66 | } 67 | 68 | // SleepsForX waits for a duration, or until the passed in context fails 69 | func SleepsForX(d time.Duration) func(context.Context) error { 70 | return func(ctx context.Context) error { 71 | select { 72 | case <-ctx.Done(): 73 | return ctx.Err() 74 | case <-time.After(d): 75 | return nil 76 | } 77 | } 78 | } 79 | 80 | // AlwaysPassesFallback is a fallback circuit that always passes 81 | func AlwaysPassesFallback(_ context.Context, _ error) error { 82 | return nil 83 | } 84 | 85 | // AlwaysFailsFallback is a fallback circuit that always fails 86 | func AlwaysFailsFallback(_ context.Context, err error) error { 87 | return fmt.Errorf("failed: %s", err) 88 | } 89 | 90 | var errFailure = errors.New("alwaysFails failure") 91 | 92 | // AlwaysFails is a runFunc that always fails 93 | func AlwaysFails(_ context.Context) error { 94 | return errFailure 95 | } 96 | 97 | // AlwaysPasses is a runFunc that always passes 98 | func AlwaysPasses(_ context.Context) error { 99 | return nil 100 | } 101 | 102 | // DoTillTime concurrently calls f() in a forever loop until endTime 103 | func DoTillTime(endTime time.Time, wg *sync.WaitGroup, f func()) { 104 | wg.Add(1) 105 | go func() { 106 | defer wg.Done() 107 | for time.Now().Before(endTime) { 108 | f() 109 | // Don't need to sleep. Just busy loop. But let another thread take over if it wants (to get some concurrency) 110 | if rand.Float64() > .5 { 111 | runtime.Gosched() 112 | } 113 | } 114 | }() 115 | } 116 | -------------------------------------------------------------------------------- /internal/testhelp/testhelp_test.go: -------------------------------------------------------------------------------- 1 | package testhelp 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "strings" 7 | "sync" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | func TestMustTesting(t *testing.T) { 13 | mockT := &testing.T{} 14 | // Should not cause an error 15 | MustTesting(mockT, nil) 16 | 17 | // Should cause an error 18 | MustTesting(mockT, errors.New("test error")) 19 | // Note: we can't easily check that the mock testing.T recorded an error 20 | } 21 | 22 | func TestMustNotTesting(t *testing.T) { 23 | mockT := &testing.T{} 24 | // Should cause an error 25 | MustNotTesting(mockT, nil) 26 | 27 | // Should not cause an error 28 | MustNotTesting(mockT, errors.New("test error")) 29 | // Note: we can't easily check that the mock testing.T recorded an error 30 | } 31 | 32 | func TestBehaviorCheck_Run(t *testing.T) { 33 | b := &BehaviorCheck{ 34 | RunFunc: func(ctx context.Context) error { 35 | return nil 36 | }, 37 | } 38 | 39 | // Test success case 40 | err := b.Run(context.Background()) 41 | if err != nil { 42 | t.Errorf("Expected no error, got %v", err) 43 | } 44 | if b.TotalRuns != 1 { 45 | t.Errorf("Expected 1 total run, got %d", b.TotalRuns) 46 | } 47 | if b.totalErrors != 0 { 48 | t.Errorf("Expected 0 total errors, got %d", b.totalErrors) 49 | } 50 | if b.MostConcurrent != 1 { 51 | t.Errorf("Expected most concurrent to be 1, got %d", b.MostConcurrent) 52 | } 53 | if b.currentConcurrent != 0 { 54 | t.Errorf("Expected current concurrent to be 0, got %d", b.currentConcurrent) 55 | } 56 | 57 | // Test error case 58 | b.RunFunc = func(ctx context.Context) error { 59 | return errors.New("test error") 60 | } 61 | 62 | err = b.Run(context.Background()) 63 | if err == nil { 64 | t.Error("Expected an error") 65 | } 66 | if b.TotalRuns != 2 { 67 | t.Errorf("Expected 2 total runs, got %d", b.TotalRuns) 68 | } 69 | if b.totalErrors != 1 { 70 | t.Errorf("Expected 1 total error, got %d", b.totalErrors) 71 | } 72 | 73 | // Test concurrency tracking 74 | wg := sync.WaitGroup{} 75 | for i := 0; i < 5; i++ { 76 | wg.Add(1) 77 | go func() { 78 | defer wg.Done() 79 | if runErr := b.Run(context.Background()); runErr == nil { 80 | t.Error("I expecte an error from running b") 81 | } 82 | }() 83 | } 84 | wg.Wait() 85 | 86 | if b.TotalRuns != 7 { 87 | t.Errorf("Expected 7 total runs, got %d", b.TotalRuns) 88 | } 89 | if b.totalErrors != 6 { 90 | t.Errorf("Expected 6 total errors, got %d", b.totalErrors) 91 | } 92 | if b.MostConcurrent < 1 { 93 | t.Errorf("Expected most concurrent to be at least 1, got %d", b.MostConcurrent) 94 | } 95 | } 96 | 97 | func TestSleepsForX(t *testing.T) { 98 | // Test normal sleep 99 | start := time.Now() 100 | fn := SleepsForX(100 * time.Millisecond) 101 | err := fn(context.Background()) 102 | elapsed := time.Since(start) 103 | if err != nil { 104 | t.Errorf("Expected no error, got %v", err) 105 | } 106 | if elapsed < 100*time.Millisecond { 107 | t.Errorf("Expected sleep of at least 100ms, got %v", elapsed) 108 | } 109 | 110 | // Test context cancellation 111 | ctx, cancel := context.WithCancel(context.Background()) 112 | start = time.Now() 113 | go func() { 114 | time.Sleep(50 * time.Millisecond) 115 | cancel() 116 | }() 117 | fn = SleepsForX(10 * time.Second) 118 | err = fn(ctx) 119 | elapsed = time.Since(start) 120 | 121 | if err == nil { 122 | t.Error("Expected an error due to context cancellation") 123 | } 124 | if err != context.Canceled { 125 | t.Errorf("Expected context.Canceled error, got %v", err) 126 | } 127 | if elapsed >= 10*time.Second { 128 | t.Errorf("Expected earlier termination than 10s, got %v", elapsed) 129 | } 130 | } 131 | 132 | func TestAlwaysPassesFallback(t *testing.T) { 133 | err := AlwaysPassesFallback(context.Background(), errors.New("test error")) 134 | if err != nil { 135 | t.Errorf("Expected no error, got %v", err) 136 | } 137 | } 138 | 139 | func TestAlwaysFailsFallback(t *testing.T) { 140 | origErr := errors.New("original error") 141 | err := AlwaysFailsFallback(context.Background(), origErr) 142 | if err == nil { 143 | t.Error("Expected an error") 144 | } 145 | if err != nil { 146 | errorText := err.Error() 147 | if !strings.Contains(errorText, "original error") { 148 | t.Errorf("Expected error to contain 'original error', got: %v", err) 149 | } 150 | } 151 | } 152 | 153 | func TestAlwaysFails(t *testing.T) { 154 | err := AlwaysFails(context.Background()) 155 | if err == nil { 156 | t.Error("Expected an error") 157 | } 158 | if err != errFailure { 159 | t.Errorf("Expected errFailure, got %v", err) 160 | } 161 | } 162 | 163 | func TestAlwaysPasses(t *testing.T) { 164 | err := AlwaysPasses(context.Background()) 165 | if err != nil { 166 | t.Errorf("Expected no error, got %v", err) 167 | } 168 | } 169 | 170 | func TestDoTillTime(t *testing.T) { 171 | var counter int 172 | wg := &sync.WaitGroup{} 173 | 174 | endTime := time.Now().Add(500 * time.Millisecond) 175 | DoTillTime(endTime, wg, func() { 176 | counter++ 177 | }) 178 | 179 | wg.Wait() 180 | if counter <= 0 { 181 | t.Error("Function should have executed at least once") 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /manager.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "errors" 5 | "expvar" 6 | "sync" 7 | ) 8 | 9 | // CommandPropertiesConstructor is a generic function that can create command properties to configure a circuit by name 10 | // It is safe to leave not configured properties their empty value. 11 | type CommandPropertiesConstructor func(circuitName string) Config 12 | 13 | // Manager manages circuits with unique names 14 | type Manager struct { 15 | // DefaultCircuitProperties is a list of Config constructors called, in reverse order, 16 | // to append or modify configuration for your circuit. 17 | DefaultCircuitProperties []CommandPropertiesConstructor 18 | 19 | circuitMap map[string]*Circuit 20 | // mu locks circuitMap, not DefaultCircuitProperties 21 | mu sync.RWMutex 22 | } 23 | 24 | // AllCircuits returns every hystrix circuit tracked 25 | func (h *Manager) AllCircuits() []*Circuit { 26 | if h == nil { 27 | return nil 28 | } 29 | h.mu.RLock() 30 | defer h.mu.RUnlock() 31 | ret := make([]*Circuit, 0, len(h.circuitMap)) 32 | for _, c := range h.circuitMap { 33 | ret = append(ret, c) 34 | } 35 | return ret 36 | } 37 | 38 | // Var allows you to expose all your hystrix circuits on expvar 39 | func (h *Manager) Var() expvar.Var { 40 | return expvar.Func(func() interface{} { 41 | h.mu.RLock() 42 | defer h.mu.RUnlock() 43 | ret := make(map[string]interface{}) 44 | for k, v := range h.circuitMap { 45 | ev := expvarToVal(v.Var()) 46 | if ev != nil { 47 | ret[k] = ev 48 | } 49 | } 50 | return ret 51 | }) 52 | } 53 | 54 | // GetCircuit returns the circuit with a given name, or nil if the circuit does not exist. You should not call this 55 | // in live code. Instead, store the circuit somewhere and use the circuit directly. 56 | func (h *Manager) GetCircuit(name string) *Circuit { 57 | if h == nil { 58 | return nil 59 | } 60 | h.mu.RLock() 61 | defer h.mu.RUnlock() 62 | return h.circuitMap[name] 63 | } 64 | 65 | // MustCreateCircuit calls CreateCircuit, but panics if the circuit name already exists 66 | func (h *Manager) MustCreateCircuit(name string, config ...Config) *Circuit { 67 | c, err := h.CreateCircuit(name, config...) 68 | if err != nil { 69 | panic(err) 70 | } 71 | return c 72 | } 73 | 74 | // CreateCircuit creates a new circuit, or returns error if a circuit with that name already exists 75 | func (h *Manager) CreateCircuit(name string, configs ...Config) (*Circuit, error) { 76 | h.mu.Lock() 77 | defer h.mu.Unlock() 78 | if h.circuitMap == nil { 79 | h.circuitMap = make(map[string]*Circuit, 5) 80 | } 81 | finalConfig := Config{} 82 | for _, c := range configs { 83 | finalConfig.Merge(c) 84 | } 85 | // Merge in reverse order so the most recently appending constructor is more important 86 | for i := len(h.DefaultCircuitProperties) - 1; i >= 0; i-- { 87 | finalConfig.Merge(h.DefaultCircuitProperties[i](name)) 88 | } 89 | _, exists := h.circuitMap[name] 90 | if exists { 91 | return nil, errors.New("circuit with that name already exists") 92 | } 93 | h.circuitMap[name] = NewCircuitFromConfig(name, finalConfig) 94 | return h.circuitMap[name], nil 95 | } 96 | -------------------------------------------------------------------------------- /manager_stress_test.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "sync" 8 | "sync/atomic" 9 | "testing" 10 | "time" 11 | 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | // TestManagerCircuitCreationStress tests high-concurrency circuit creation 16 | func TestManagerCircuitCreationStress(t *testing.T) { 17 | mgr := Manager{} 18 | 19 | // Create many circuits concurrently to test race conditions in creation 20 | goroutines := 100 21 | circuitsPerRoutine := 50 22 | 23 | var wg sync.WaitGroup 24 | uniqueCircuits := make(map[string]struct{}) 25 | var mu sync.Mutex 26 | 27 | for g := 0; g < goroutines; g++ { 28 | wg.Add(1) 29 | go func(routineNum int) { 30 | defer wg.Done() 31 | 32 | for i := 0; i < circuitsPerRoutine; i++ { 33 | // Some goroutines will try to create the same circuit names 34 | circuitNum := i % (circuitsPerRoutine / 2) 35 | circuitName := fmt.Sprintf("stress-circuit-%d", routineNum*1000+circuitNum) 36 | 37 | // Try both creation methods 38 | if i%2 == 0 { 39 | c, err := mgr.CreateCircuit(circuitName) 40 | if err == nil && c != nil { 41 | mu.Lock() 42 | uniqueCircuits[circuitName] = struct{}{} 43 | mu.Unlock() 44 | } 45 | } else if mgr.GetCircuit(circuitName) == nil { 46 | // We need to check if the circuit exists first 47 | // Doesn't exist yet, so create it 48 | func() { 49 | defer func() { 50 | // Recover from any panics 51 | _ = recover() 52 | }() 53 | c := mgr.MustCreateCircuit(circuitName) 54 | if c != nil { 55 | mu.Lock() 56 | uniqueCircuits[circuitName] = struct{}{} 57 | mu.Unlock() 58 | } 59 | }() 60 | } 61 | 62 | // Verify we can get the circuit 63 | c := mgr.GetCircuit(circuitName) 64 | require.NotNil(t, c) 65 | } 66 | }(g) 67 | } 68 | 69 | wg.Wait() 70 | 71 | t.Logf("Created %d unique circuits", len(uniqueCircuits)) 72 | 73 | // Verify all circuits can be used 74 | for name := range uniqueCircuits { 75 | c := mgr.GetCircuit(name) 76 | require.NotNil(t, c) 77 | 78 | err := c.Execute(context.Background(), func(ctx context.Context) error { 79 | return nil 80 | }, nil) 81 | require.NoError(t, err) 82 | } 83 | } 84 | 85 | // TestManagerConcurrentCircuitAccess tests concurrent access to the same circuits 86 | func TestManagerConcurrentCircuitAccess(t *testing.T) { 87 | mgr := Manager{} 88 | 89 | // Create a fixed number of circuits 90 | circuitCount := 20 91 | for i := 0; i < circuitCount; i++ { 92 | mgr.MustCreateCircuit(fmt.Sprintf("shared-circuit-%d", i)) 93 | } 94 | 95 | // Access them concurrently 96 | goroutines := 50 97 | operationsPerRoutine := 1000 98 | 99 | var wg sync.WaitGroup 100 | var operationCounter int64 101 | 102 | for g := 0; g < goroutines; g++ { 103 | wg.Add(1) 104 | go func() { 105 | defer wg.Done() 106 | 107 | for i := 0; i < operationsPerRoutine; i++ { 108 | // Randomly access one of the circuits 109 | circuitNum := i % circuitCount 110 | circuitName := fmt.Sprintf("shared-circuit-%d", circuitNum) 111 | 112 | c := mgr.GetCircuit(circuitName) 113 | require.NotNil(t, c) 114 | 115 | // Perform an operation 116 | err := c.Execute(context.Background(), func(ctx context.Context) error { 117 | // Mix success and failure 118 | if i%7 == 0 { 119 | return errors.New("random failure") 120 | } 121 | return nil 122 | }, nil) 123 | 124 | // Don't care about the error, just that we performed the operation 125 | atomic.AddInt64(&operationCounter, 1) 126 | _ = err 127 | 128 | if i%11 == 0 { 129 | // Occasionally check if exists 130 | exists := mgr.GetCircuit(circuitName) != nil 131 | require.True(t, exists) 132 | 133 | // Also try a non-existent circuit 134 | exists = mgr.GetCircuit("non-existent-circuit") != nil 135 | require.False(t, exists) 136 | } 137 | } 138 | }() 139 | } 140 | 141 | wg.Wait() 142 | 143 | t.Logf("Performed %d operations on %d shared circuits", operationCounter, circuitCount) 144 | require.Equal(t, int64(goroutines*operationsPerRoutine), operationCounter) 145 | } 146 | 147 | // TestManagerCircuitRunningMetrics tests that the manager correctly tracks running metrics 148 | func TestManagerCircuitRunningMetrics(t *testing.T) { 149 | mgr := Manager{} 150 | 151 | // Create some circuits 152 | circuitCount := 5 153 | for i := 0; i < circuitCount; i++ { 154 | mgr.MustCreateCircuit(fmt.Sprintf("metric-circuit-%d", i)) 155 | } 156 | 157 | // Update metrics from multiple goroutines 158 | goroutines := 20 159 | updatesPerRoutine := 100 160 | 161 | var wg sync.WaitGroup 162 | 163 | // Run many goroutines that will put load on the circuits 164 | for g := 0; g < goroutines; g++ { 165 | wg.Add(1) 166 | go func(id int) { 167 | defer wg.Done() 168 | 169 | for i := 0; i < updatesPerRoutine; i++ { 170 | // Cycle through circuits 171 | circuitNum := i % circuitCount 172 | circuitName := fmt.Sprintf("metric-circuit-%d", circuitNum) 173 | 174 | c := mgr.GetCircuit(circuitName) 175 | 176 | // Different goroutines will hit circuits with different patterns 177 | var err error 178 | switch id % 4 { 179 | case 0: 180 | // Always successful 181 | err = c.Execute(context.Background(), func(ctx context.Context) error { 182 | return nil 183 | }, nil) 184 | case 1: 185 | // Always error 186 | err = c.Execute(context.Background(), func(ctx context.Context) error { 187 | return errors.New("failure") 188 | }, nil) 189 | case 2: 190 | // Slow execution (possible timeout) 191 | ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*5) 192 | err = c.Execute(ctx, func(ctx context.Context) error { 193 | select { 194 | case <-time.After(time.Millisecond * 10): 195 | return nil 196 | case <-ctx.Done(): 197 | return ctx.Err() 198 | } 199 | }, nil) 200 | cancel() 201 | case 3: 202 | // Mix of success/failure 203 | shouldFail := i%2 == 0 204 | err = c.Execute(context.Background(), func(ctx context.Context) error { 205 | if shouldFail { 206 | return errors.New("conditional failure") 207 | } 208 | return nil 209 | }, nil) 210 | } 211 | 212 | // Don't assert on err, we're just generating metrics 213 | _ = err 214 | } 215 | }(g) 216 | } 217 | 218 | wg.Wait() 219 | 220 | // Get the manager's metrics - just validate it works 221 | metrics := mgr.Var() 222 | require.NotNil(t, metrics) 223 | } 224 | 225 | // TestManagerConcurrentFactoryConfiguration tests the manager's handling of 226 | // circuit creation with concurrent configuration factories 227 | func TestManagerConcurrentFactoryConfiguration(t *testing.T) { 228 | // Create factories that vary the configuration 229 | factoryCount := 5 230 | factories := make([]CommandPropertiesConstructor, factoryCount) 231 | 232 | for i := 0; i < factoryCount; i++ { 233 | timeoutValue := time.Millisecond * time.Duration(20*(i+1)) 234 | factories[i] = func(circuitName string) Config { 235 | return Config{ 236 | Execution: ExecutionConfig{ 237 | Timeout: timeoutValue, 238 | }, 239 | } 240 | } 241 | } 242 | 243 | mgr := Manager{ 244 | DefaultCircuitProperties: factories, 245 | } 246 | 247 | // Create circuits concurrently 248 | goroutines := 30 249 | circuitsPerRoutine := 10 250 | 251 | var wg sync.WaitGroup 252 | var mu sync.Mutex 253 | circuitTimeouts := make(map[string]time.Duration) 254 | 255 | for g := 0; g < goroutines; g++ { 256 | wg.Add(1) 257 | go func(id int) { 258 | defer wg.Done() 259 | 260 | for i := 0; i < circuitsPerRoutine; i++ { 261 | circuitName := fmt.Sprintf("factory-circuit-%d-%d", id, i) 262 | 263 | // Create the circuit - this should apply all factories 264 | c := mgr.MustCreateCircuit(circuitName) 265 | 266 | // Check its timeout 267 | timeout := c.Config().Execution.Timeout 268 | mu.Lock() 269 | circuitTimeouts[circuitName] = timeout 270 | mu.Unlock() 271 | 272 | // Execute it 273 | ctx, cancel := context.WithTimeout(context.Background(), timeout*2) 274 | err := c.Execute(ctx, func(ctx context.Context) error { 275 | sleepTime := timeout / 2 // Should finish in time 276 | time.Sleep(sleepTime) 277 | return nil 278 | }, nil) 279 | cancel() 280 | 281 | require.NoError(t, err) 282 | } 283 | }(g) 284 | } 285 | 286 | wg.Wait() 287 | 288 | t.Logf("Created %d circuits with factories", len(circuitTimeouts)) 289 | require.Equal(t, goroutines*circuitsPerRoutine, len(circuitTimeouts)) 290 | } 291 | 292 | // TestRaceOnParallelCircuitControlPlane tests for race conditions 293 | // from goroutines performing control plane operations like circuit creation and deletion 294 | // while other goroutines perform data plane operations 295 | func TestRaceOnParallelCircuitControlPlane(t *testing.T) { 296 | mgr := Manager{} 297 | 298 | // Use atomic operations to coordinate goroutines 299 | var controlPlaneOps, dataPlaneOps int64 300 | var running int32 = 1 301 | 302 | // Control plane goroutine that creates circuits 303 | go func() { 304 | for atomic.LoadInt32(&running) == 1 { 305 | // Create circuit 306 | circuitName := fmt.Sprintf("test-circuit-%d", atomic.LoadInt64(&controlPlaneOps)) 307 | _, _ = mgr.CreateCircuit(circuitName) 308 | 309 | // Let it be used for a bit 310 | time.Sleep(time.Microsecond) 311 | 312 | // We don't have a delete API, but we can stop using this circuit 313 | // and create new ones to stress the manager 314 | 315 | atomic.AddInt64(&controlPlaneOps, 1) 316 | } 317 | }() 318 | 319 | // Data plane goroutines 320 | dataPlaneCount := 5 321 | var wg sync.WaitGroup 322 | 323 | for i := 0; i < dataPlaneCount; i++ { 324 | wg.Add(1) 325 | go func() { 326 | defer wg.Done() 327 | 328 | for atomic.LoadInt32(&running) == 1 { 329 | // Get all circuits 330 | allCircuits := mgr.AllCircuits() 331 | 332 | // Try to use each one 333 | for _, c := range allCircuits { 334 | // Just try to use it, don't care about result 335 | _ = c.Execute(context.Background(), func(ctx context.Context) error { 336 | return nil 337 | }, nil) 338 | } 339 | 340 | // Check if a specific circuit exists 341 | circuitNum := atomic.LoadInt64(&dataPlaneOps) % 100 342 | circuitName := fmt.Sprintf("test-circuit-%d", circuitNum) 343 | c := mgr.GetCircuit(circuitName) 344 | if c != nil { 345 | // Try to use it 346 | _ = c.Execute(context.Background(), func(ctx context.Context) error { 347 | return nil 348 | }, nil) 349 | } 350 | 351 | atomic.AddInt64(&dataPlaneOps, 1) 352 | } 353 | }() 354 | } 355 | 356 | // Let it run for a short time 357 | time.Sleep(time.Millisecond * 500) 358 | atomic.StoreInt32(&running, 0) 359 | 360 | wg.Wait() 361 | 362 | t.Logf("Performed %d control plane operations and %d data plane operations", 363 | atomic.LoadInt64(&controlPlaneOps), atomic.LoadInt64(&dataPlaneOps)) 364 | } 365 | -------------------------------------------------------------------------------- /manager_test.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | ) 7 | 8 | func TestManager_Empty(t *testing.T) { 9 | h := Manager{} 10 | if h.GetCircuit("does_not_exist") != nil { 11 | t.Error("found a circuit that does not exist") 12 | } 13 | } 14 | 15 | func TestManager_Var(t *testing.T) { 16 | h := Manager{} 17 | c := h.MustCreateCircuit("hello-world", Config{}) 18 | if !strings.Contains(h.Var().String(), "hello-world") { 19 | t.Error("Var() does not seem to work for hystrix", h.Var()) 20 | } 21 | if !strings.Contains(c.Var().String(), "hello-world") { 22 | t.Error("Var() does not seem to work for circuits") 23 | } 24 | } 25 | 26 | func TestManager_AllCircuits(t *testing.T) { 27 | h := Manager{} 28 | c := h.MustCreateCircuit("hello-world", Config{}) 29 | if len(h.AllCircuits()) != 1 { 30 | t.Error("unexpected number of circuits") 31 | } 32 | if h.AllCircuits()[0] != c { 33 | t.Error("unexpected circuit") 34 | } 35 | } 36 | 37 | func TestSimpleCreate(t *testing.T) { 38 | h := Manager{} 39 | c := h.MustCreateCircuit("hello-world", Config{}) 40 | if c.Name() != "hello-world" { 41 | t.Error("unexpected name") 42 | } 43 | c = h.GetCircuit("hello-world") 44 | if c.Name() != "hello-world" { 45 | t.Error("unexpected name") 46 | } 47 | } 48 | 49 | func TestDoubleCreate(t *testing.T) { 50 | h := Manager{} 51 | h.MustCreateCircuit("hello-world", Config{}) 52 | var foundErr interface{} 53 | func() { 54 | defer func() { 55 | foundErr = recover() 56 | }() 57 | h.MustCreateCircuit("hello-world", Config{}) 58 | }() 59 | if foundErr == nil { 60 | t.Error("Expect panic when must creating twice") 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /metriceventstream/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package metriceventstream allows exposing your circuit's health as a metric stream that you can visualize with the 3 | hystrix dashboard. Note, you do not have to use hystrix open/close logic to take advantage of this. 4 | */ 5 | package metriceventstream 6 | -------------------------------------------------------------------------------- /metriceventstream/example_test.go: -------------------------------------------------------------------------------- 1 | package metriceventstream_test 2 | 3 | import ( 4 | "log" 5 | "net/http" 6 | 7 | "github.com/cep21/circuit/v4" 8 | "github.com/cep21/circuit/v4/metriceventstream" 9 | "github.com/cep21/circuit/v4/metrics/rolling" 10 | ) 11 | 12 | // This example creates an event stream handler, starts it, then later closes the handler 13 | func ExampleMetricEventStream() { 14 | // metriceventstream uses rolling stats to report circuit information 15 | sf := rolling.StatFactory{} 16 | h := circuit.Manager{ 17 | DefaultCircuitProperties: []circuit.CommandPropertiesConstructor{sf.CreateConfig}, 18 | } 19 | es := metriceventstream.MetricEventStream{ 20 | Manager: &h, 21 | } 22 | go func() { 23 | if err := es.Start(); err != nil { 24 | log.Fatal(err) 25 | } 26 | }() 27 | // ES is a http.Handler, so you can pass it directly to your mux 28 | http.Handle("/hystrix.stream", &es) 29 | // ... 30 | if err := es.Close(); err != nil { 31 | log.Fatal(err) 32 | } 33 | // Output: 34 | } 35 | -------------------------------------------------------------------------------- /metriceventstream/metriceventstream_test.go: -------------------------------------------------------------------------------- 1 | package metriceventstream 2 | 3 | import ( 4 | "context" 5 | "net/http/httptest" 6 | "strings" 7 | "testing" 8 | "time" 9 | 10 | "github.com/cep21/circuit/v4" 11 | ) 12 | 13 | func TestMetricEventStream(t *testing.T) { 14 | h := &circuit.Manager{} 15 | c := h.MustCreateCircuit("hello-world", circuit.Config{}) 16 | if err := c.Execute(context.Background(), func(_ context.Context) error { 17 | return nil 18 | }, nil); err != nil { 19 | t.Error("no error expected from always passes") 20 | } 21 | 22 | eventStream := MetricEventStream{ 23 | Manager: h, 24 | TickDuration: time.Millisecond * 10, 25 | } 26 | eventStreamStartResult := make(chan error) 27 | go func() { 28 | eventStreamStartResult <- eventStream.Start() 29 | }() 30 | 31 | recorder := httptest.NewRecorder() 32 | req := httptest.NewRequest("GET", "http://localhost:8080/hystrix.stream", nil) 33 | // Just get 500 ms of data 34 | reqContext, cancelData := context.WithTimeout(context.Background(), time.Millisecond*100) 35 | defer cancelData() 36 | req = req.WithContext(reqContext) 37 | eventStream.ServeHTTP(recorder, req) 38 | 39 | bodyOfRequest := recorder.Body.String() 40 | if !strings.Contains(bodyOfRequest, "hello-world") { 41 | t.Error("Did not see my hello world circuit in the body") 42 | } 43 | if err := eventStream.Close(); err != nil { 44 | t.Error("no error expected from closing event stream") 45 | } 46 | // And finally wait for start to end 47 | <-eventStreamStartResult 48 | } 49 | -------------------------------------------------------------------------------- /metrics.go: -------------------------------------------------------------------------------- 1 | package circuit 2 | 3 | import ( 4 | "context" 5 | "expvar" 6 | "time" 7 | ) 8 | 9 | // RunMetricsCollection send metrics to multiple RunMetrics 10 | type RunMetricsCollection []RunMetrics 11 | 12 | var _ RunMetrics = &RunMetricsCollection{} 13 | 14 | type varable interface { 15 | Var() expvar.Var 16 | } 17 | 18 | func expvarToVal(in expvar.Var) interface{} { 19 | type iv interface { 20 | Value() interface{} 21 | } 22 | if rawVal, ok := in.(iv); ok { 23 | return rawVal.Value() 24 | } 25 | return nil 26 | } 27 | 28 | // Var exposes run collectors as expvar 29 | func (r RunMetricsCollection) Var() expvar.Var { 30 | return expvar.Func(func() interface{} { 31 | ret := make([]interface{}, 0, len(r)) 32 | for _, c := range r { 33 | if v, ok := c.(varable); ok { 34 | asVal := expvarToVal(v.Var()) 35 | if asVal != nil { 36 | ret = append(ret, asVal) 37 | } 38 | } 39 | } 40 | return ret 41 | }) 42 | } 43 | 44 | // Success sends Success to all collectors 45 | func (r RunMetricsCollection) Success(ctx context.Context, now time.Time, duration time.Duration) { 46 | for _, c := range r { 47 | c.Success(ctx, now, duration) 48 | } 49 | } 50 | 51 | // ErrConcurrencyLimitReject sends ErrConcurrencyLimitReject to all collectors 52 | func (r RunMetricsCollection) ErrConcurrencyLimitReject(ctx context.Context, now time.Time) { 53 | for _, c := range r { 54 | c.ErrConcurrencyLimitReject(ctx, now) 55 | } 56 | } 57 | 58 | // ErrFailure sends ErrFailure to all collectors 59 | func (r RunMetricsCollection) ErrFailure(ctx context.Context, now time.Time, duration time.Duration) { 60 | for _, c := range r { 61 | c.ErrFailure(ctx, now, duration) 62 | } 63 | } 64 | 65 | // ErrShortCircuit sends ErrShortCircuit to all collectors 66 | func (r RunMetricsCollection) ErrShortCircuit(ctx context.Context, now time.Time) { 67 | for _, c := range r { 68 | c.ErrShortCircuit(ctx, now) 69 | } 70 | } 71 | 72 | // ErrTimeout sends ErrTimeout to all collectors 73 | func (r RunMetricsCollection) ErrTimeout(ctx context.Context, now time.Time, duration time.Duration) { 74 | for _, c := range r { 75 | c.ErrTimeout(ctx, now, duration) 76 | } 77 | } 78 | 79 | // ErrBadRequest sends ErrBadRequest to all collectors 80 | func (r RunMetricsCollection) ErrBadRequest(ctx context.Context, now time.Time, duration time.Duration) { 81 | for _, c := range r { 82 | c.ErrBadRequest(ctx, now, duration) 83 | } 84 | } 85 | 86 | // ErrInterrupt sends ErrInterrupt to all collectors 87 | func (r RunMetricsCollection) ErrInterrupt(ctx context.Context, now time.Time, duration time.Duration) { 88 | for _, c := range r { 89 | c.ErrInterrupt(ctx, now, duration) 90 | } 91 | } 92 | 93 | // FallbackMetricsCollection sends fallback metrics to all collectors 94 | type FallbackMetricsCollection []FallbackMetrics 95 | 96 | var _ FallbackMetrics = &FallbackMetricsCollection{} 97 | 98 | // Success sends Success to all collectors 99 | func (r FallbackMetricsCollection) Success(ctx context.Context, now time.Time, duration time.Duration) { 100 | for _, c := range r { 101 | c.Success(ctx, now, duration) 102 | } 103 | } 104 | 105 | // ErrConcurrencyLimitReject sends ErrConcurrencyLimitReject to all collectors 106 | func (r FallbackMetricsCollection) ErrConcurrencyLimitReject(ctx context.Context, now time.Time) { 107 | for _, c := range r { 108 | c.ErrConcurrencyLimitReject(ctx, now) 109 | } 110 | } 111 | 112 | // ErrFailure sends ErrFailure to all collectors 113 | func (r FallbackMetricsCollection) ErrFailure(ctx context.Context, now time.Time, duration time.Duration) { 114 | for _, c := range r { 115 | c.ErrFailure(ctx, now, duration) 116 | } 117 | } 118 | 119 | // Var exposes run collectors as expvar 120 | func (r FallbackMetricsCollection) Var() expvar.Var { 121 | return expvar.Func(func() interface{} { 122 | ret := make([]interface{}, 0, len(r)) 123 | for _, c := range r { 124 | if v, ok := c.(varable); ok { 125 | asVal := expvarToVal(v.Var()) 126 | if asVal != nil { 127 | ret = append(ret, asVal) 128 | } 129 | } 130 | } 131 | return ret 132 | }) 133 | } 134 | 135 | // MetricsCollection allows reporting multiple circuit metrics at once 136 | type MetricsCollection []Metrics 137 | 138 | var _ Metrics = &MetricsCollection{} 139 | 140 | // Closed sends Closed to all collectors 141 | func (r MetricsCollection) Closed(ctx context.Context, now time.Time) { 142 | for _, c := range r { 143 | c.Closed(ctx, now) 144 | } 145 | } 146 | 147 | // Opened sends Opened to all collectors 148 | func (r MetricsCollection) Opened(ctx context.Context, now time.Time) { 149 | for _, c := range r { 150 | c.Opened(ctx, now) 151 | } 152 | } 153 | 154 | // Metrics reports internal circuit metric events 155 | type Metrics interface { 156 | // Closed is called when the circuit transitions from Open to Closed. 157 | Closed(ctx context.Context, now time.Time) 158 | // Opened is called when the circuit transitions from Closed to Opened. 159 | Opened(ctx context.Context, now time.Time) 160 | } 161 | 162 | // RunMetrics is guaranteed to execute one (and only one) of the following functions each time the circuit 163 | // attempts to call a run function. Methods with durations are when run was actually executed. Methods without 164 | // durations never called run, probably because of the circuit. 165 | type RunMetrics interface { 166 | // Success each time `Execute` does not return an error 167 | Success(ctx context.Context, now time.Time, duration time.Duration) 168 | // ErrFailure each time a runFunc (the circuit part) ran, but failed 169 | ErrFailure(ctx context.Context, now time.Time, duration time.Duration) 170 | // ErrTimeout increments the number of timeouts that occurred in the circuit breaker. 171 | ErrTimeout(ctx context.Context, now time.Time, duration time.Duration) 172 | // ErrBadRequest is counts of http://netflix.github.io/Hystrix/javadoc/com/netflix/hystrix/exception/HystrixBadRequestException.html 173 | // See https://github.com/Netflix/Hystrix/wiki/How-To-Use#error-propagation 174 | ErrBadRequest(ctx context.Context, now time.Time, duration time.Duration) 175 | // ErrInterrupt means the request ended, not because the runFunc failed, but probably because the original 176 | // context canceled. Your circuit returned an error, but it's probably because someone else killed the context, 177 | // and not that your circuit is broken. Java Manager doesn't have an equivalent for this, but it would be like if 178 | // an interrupt was called on the thread. 179 | // 180 | // A note on stat tracking: you may or may not consider this duration valid. Yes, that's how long it executed, 181 | // but the circuit never finished correctly since it was asked to end early, so the value is smaller than the 182 | // circuit would have otherwise taken. 183 | ErrInterrupt(ctx context.Context, now time.Time, duration time.Duration) 184 | // If `Execute` returns an error, it will increment one of the following metrics 185 | 186 | // ErrConcurrencyLimitReject each time a circuit is rejected due to concurrency limits 187 | ErrConcurrencyLimitReject(ctx context.Context, now time.Time) 188 | // ErrShortCircuit each time runFunc is not called because the circuit was open. 189 | ErrShortCircuit(ctx context.Context, now time.Time) 190 | } 191 | 192 | // FallbackMetrics is guaranteed to execute one (and only one) of the following functions each time a fallback is executed. 193 | // Methods with durations are when the fallback is actually executed. Methods without durations are when the fallback was 194 | // never called, probably because of some circuit condition. 195 | type FallbackMetrics interface { 196 | // All `fallback` calls will implement one of the following metrics 197 | 198 | // Success each time fallback is called and succeeds. 199 | Success(ctx context.Context, now time.Time, duration time.Duration) 200 | // ErrFailure each time fallback callback fails. 201 | ErrFailure(ctx context.Context, now time.Time, duration time.Duration) 202 | // ErrConcurrencyLimitReject each time fallback fails due to concurrency limit 203 | ErrConcurrencyLimitReject(ctx context.Context, now time.Time) 204 | } 205 | 206 | var _ FallbackMetrics = RunMetrics(nil) 207 | -------------------------------------------------------------------------------- /metrics/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package metrics contains implementations of MetricsCollectors to aid circuit health detection. 3 | */ 4 | package metrics 5 | -------------------------------------------------------------------------------- /metrics/responsetimeslo/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package responsetimeslo contains a MetricsCollector that tracks a SLO metric for circuits. 3 | */ 4 | package responsetimeslo 5 | -------------------------------------------------------------------------------- /metrics/responsetimeslo/example_test.go: -------------------------------------------------------------------------------- 1 | package responsetimeslo_test 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/cep21/circuit/v4" 7 | "github.com/cep21/circuit/v4/metrics/responsetimeslo" 8 | ) 9 | 10 | // This example creates a SLO tracker that counts failures at less than 20 ms. You 11 | // will need to provide your own Collectors. 12 | func ExampleFactory() { 13 | sloTrackerFactory := responsetimeslo.Factory{ 14 | Config: responsetimeslo.Config{ 15 | // Consider requests faster than 20 ms as passing 16 | MaximumHealthyTime: time.Millisecond * 20, 17 | }, 18 | // Pass in your collector here: for example, statsd 19 | CollectorConstructors: nil, 20 | } 21 | h := circuit.Manager{ 22 | DefaultCircuitProperties: []circuit.CommandPropertiesConstructor{sloTrackerFactory.CommandProperties}, 23 | } 24 | h.MustCreateCircuit("circuit-with-slo") 25 | // Output: 26 | } 27 | -------------------------------------------------------------------------------- /metrics/responsetimeslo/responsetime.go: -------------------------------------------------------------------------------- 1 | package responsetimeslo 2 | 3 | import ( 4 | "context" 5 | "expvar" 6 | "sync" 7 | "time" 8 | 9 | "github.com/cep21/circuit/v4" 10 | "github.com/cep21/circuit/v4/faststats" 11 | ) 12 | 13 | // Tracker sets up a response time SLO that has a reasonable meaning for hystrix. Use it for an SLO like 14 | // "99% of requests should respond correctly within 300 ms". 15 | // 16 | // Define a maximum time that a healthy request is allowed to take. This should be less than the maximum "break" point 17 | // of the circuit. Only Successful requests <= that time are counted as healthy. 18 | // 19 | // Requests that are interrupted, or have bad input, are not considered healthy or unhealthy. It's like they don't 20 | // happen. All other types of errors are blamed on the down stream service, or the Run method's request time. They 21 | // will count as failing the SLA. 22 | type Tracker struct { 23 | MaximumHealthyTime faststats.AtomicInt64 24 | MeetsSLOCount faststats.AtomicInt64 25 | FailsSLOCount faststats.AtomicInt64 26 | Collectors []Collector 27 | 28 | mu sync.Mutex 29 | config Config 30 | } 31 | 32 | // Config controls how SLO is tracked by default for a Tracker 33 | type Config struct { 34 | // MaximumHealthyTime is the maximum amount of time a request can take and still be considered healthy 35 | MaximumHealthyTime time.Duration 36 | } 37 | 38 | var defaultConfig = Config{ 39 | MaximumHealthyTime: time.Millisecond * 250, 40 | } 41 | 42 | // Merge this configuration with another, changing any values that are non zero into other's value 43 | func (c *Config) Merge(other Config) { 44 | if c.MaximumHealthyTime == 0 { 45 | c.MaximumHealthyTime = other.MaximumHealthyTime 46 | } 47 | } 48 | 49 | // Factory creates SLO monitors for a circuit 50 | type Factory struct { 51 | Config Config 52 | ConfigConstructor []func(circuitName string) Config 53 | CollectorConstructors []func(circuitName string) Collector 54 | } 55 | 56 | var _ circuit.RunMetrics = &Tracker{} 57 | 58 | func (r *Factory) getConfig(circuitName string) Config { 59 | finalConfig := Config{} 60 | // Merge in reverse order so the most recently appending constructor is more important 61 | for i := len(r.ConfigConstructor) - 1; i >= 0; i-- { 62 | finalConfig.Merge(r.ConfigConstructor[i](circuitName)) 63 | } 64 | finalConfig.Merge(r.Config) 65 | finalConfig.Merge(defaultConfig) 66 | return finalConfig 67 | } 68 | 69 | // CommandProperties appends SLO tracking to a circuit 70 | func (r *Factory) CommandProperties(circuitName string) circuit.Config { 71 | collectors := make([]Collector, 0, len(r.CollectorConstructors)) 72 | for _, constructor := range r.CollectorConstructors { 73 | collectors = append(collectors, constructor(circuitName)) 74 | } 75 | tracker := &Tracker{ 76 | Collectors: collectors, 77 | } 78 | 79 | cfg := r.getConfig(circuitName) 80 | tracker.SetConfigThreadSafe(cfg) 81 | return circuit.Config{ 82 | Metrics: circuit.MetricsCollectors{ 83 | Run: []circuit.RunMetrics{tracker}, 84 | }, 85 | } 86 | } 87 | 88 | // Var returns something to pass to expvar 89 | func (r *Tracker) Var() expvar.Var { 90 | return expvar.Func(func() interface{} { 91 | return map[string]interface{}{ 92 | "config": r.Config(), 93 | "pass": r.MeetsSLOCount.Get(), 94 | "fail": r.FailsSLOCount.Get(), 95 | } 96 | }) 97 | } 98 | 99 | // Success adds a healthy check if duration <= maximum healthy time 100 | func (r *Tracker) Success(_ context.Context, _ time.Time, duration time.Duration) { 101 | if duration.Nanoseconds() <= r.MaximumHealthyTime.Get() { 102 | r.healthy() 103 | return 104 | } 105 | r.failure() 106 | } 107 | 108 | func (r *Tracker) failure() { 109 | r.FailsSLOCount.Add(1) 110 | for _, c := range r.Collectors { 111 | c.Failed() 112 | } 113 | } 114 | 115 | func (r *Tracker) healthy() { 116 | r.MeetsSLOCount.Add(1) 117 | for _, c := range r.Collectors { 118 | c.Passed() 119 | } 120 | } 121 | 122 | // ErrFailure is always a failure 123 | func (r *Tracker) ErrFailure(_ context.Context, _ time.Time, _ time.Duration) { 124 | r.failure() 125 | } 126 | 127 | // ErrTimeout is always a failure 128 | func (r *Tracker) ErrTimeout(_ context.Context, _ time.Time, _ time.Duration) { 129 | r.failure() 130 | } 131 | 132 | // ErrConcurrencyLimitReject is always a failure 133 | func (r *Tracker) ErrConcurrencyLimitReject(_ context.Context, _ time.Time) { 134 | // Your endpoint could be healthy, but because we can't process commands fast enough, you're considered unhealthy. 135 | // This one could honestly go either way, but generally if a service cannot process commands fast enough, it's not 136 | // doing what you want. 137 | r.failure() 138 | } 139 | 140 | // ErrShortCircuit is always a failure 141 | func (r *Tracker) ErrShortCircuit(_ context.Context, _ time.Time) { 142 | // We had to end the request early. It's possible the endpoint we want is healthy, but because we had to trip 143 | // our circuit, due to past misbehavior, it is still end endpoint's fault we cannot satisfy this request, so it 144 | // fails the SLO. 145 | r.failure() 146 | } 147 | 148 | // ErrBadRequest is ignored 149 | func (r *Tracker) ErrBadRequest(_ context.Context, _ time.Time, _ time.Duration) {} 150 | 151 | // SetConfigThreadSafe updates the configuration stored in the tracker 152 | func (r *Tracker) SetConfigThreadSafe(config Config) { 153 | r.mu.Lock() 154 | defer r.mu.Unlock() 155 | r.config = config 156 | r.MaximumHealthyTime.Set(config.MaximumHealthyTime.Nanoseconds()) 157 | } 158 | 159 | // Config returns the tracker's config 160 | func (r *Tracker) Config() Config { 161 | r.mu.Lock() 162 | defer r.mu.Unlock() 163 | return r.config 164 | } 165 | 166 | // ErrInterrupt is only a failure if healthy time has passed 167 | func (r *Tracker) ErrInterrupt(_ context.Context, _ time.Time, duration time.Duration) { 168 | // If it is interrupted, but past the healthy time. Then it is as good as unhealthy 169 | if duration.Nanoseconds() > r.MaximumHealthyTime.Get() { 170 | r.failure() 171 | } 172 | // Cannot consider this value healthy, since it didn't return 173 | } 174 | 175 | // Collector can collect metrics about the happy SLO of a request. 176 | type Collector interface { 177 | // Failed the SLO 178 | Failed() 179 | // Passed the SLO (responded correctly fast enough) 180 | Passed() 181 | } 182 | -------------------------------------------------------------------------------- /metrics/responsetimeslo/responsetime_test.go: -------------------------------------------------------------------------------- 1 | package responsetimeslo 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func checkSLO(t *testing.T, r *Tracker, expectFail int64, expectPass int64) { 10 | if r.FailsSLOCount.Get() != expectFail { 11 | t.Error("Unexpected failing count", r.FailsSLOCount.Get(), expectFail) 12 | } 13 | if r.MeetsSLOCount.Get() != expectPass { 14 | t.Error("Unexpected meets count", r.MeetsSLOCount.Get(), expectPass) 15 | } 16 | } 17 | 18 | func TestTracker(t *testing.T) { 19 | r := &Tracker{} 20 | ctx := context.Background() 21 | r.MaximumHealthyTime.Set(time.Second.Nanoseconds()) 22 | r.ErrInterrupt(ctx, time.Now(), time.Second) 23 | checkSLO(t, r, 0, 0) 24 | r.ErrInterrupt(ctx, time.Now(), time.Second*2) 25 | checkSLO(t, r, 1, 0) 26 | r.ErrBadRequest(ctx, time.Now(), time.Second*2) 27 | checkSLO(t, r, 1, 0) 28 | r.ErrConcurrencyLimitReject(ctx, time.Now()) 29 | checkSLO(t, r, 2, 0) 30 | r.ErrFailure(ctx, time.Now(), time.Nanosecond) 31 | checkSLO(t, r, 3, 0) 32 | r.ErrShortCircuit(ctx, time.Now()) 33 | checkSLO(t, r, 4, 0) 34 | r.ErrTimeout(ctx, time.Now(), time.Second) 35 | checkSLO(t, r, 5, 0) 36 | r.Success(ctx, time.Now(), time.Second) 37 | checkSLO(t, r, 5, 1) 38 | r.Success(ctx, time.Now(), time.Second*2) 39 | checkSLO(t, r, 6, 1) 40 | 41 | if r.Var().String() == "" { 42 | t.Error("Expect something out of Var") 43 | } 44 | 45 | } 46 | -------------------------------------------------------------------------------- /metrics/rolling/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package rolling contains a MetricsCollector that tracks in memory rolling stats about a circuit. 3 | */ 4 | package rolling 5 | -------------------------------------------------------------------------------- /metrics/rolling/rolling_test.go: -------------------------------------------------------------------------------- 1 | package rolling 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "strings" 7 | "testing" 8 | "time" 9 | 10 | "github.com/cep21/circuit/v4" 11 | "github.com/cep21/circuit/v4/internal/testhelp" 12 | ) 13 | 14 | func TestHappyCircuit(t *testing.T) { 15 | s := StatFactory{} 16 | c := circuit.NewCircuitFromConfig("TestHappyCircuit", s.CreateConfig("")) 17 | err := c.Execute(context.Background(), testhelp.AlwaysPasses, nil) 18 | if err != nil { 19 | t.Error("saw error from circuit that always passes") 20 | } 21 | cmdMetrics := FindCommandMetrics(c) 22 | errCount := cmdMetrics.ErrorsAt(time.Now()) 23 | if errCount != 0 { 24 | t.Error("Happy circuit shouldn't make errors") 25 | } 26 | if cmdMetrics.Successes.TotalSum() != 1 { 27 | t.Error("Should see a success total") 28 | } 29 | if cmdMetrics.Successes.RollingSumAt(time.Now()) != 1 { 30 | t.Error("Should see a success rolling") 31 | } 32 | requestCount := cmdMetrics.LegitimateAttemptsAt(time.Now()) 33 | if requestCount != 1 { 34 | t.Error("happy circuit should still count as a request") 35 | } 36 | } 37 | 38 | func TestBadRequest(t *testing.T) { 39 | s := StatFactory{} 40 | c := circuit.NewCircuitFromConfig("TestBadRequest", s.CreateConfig("")) 41 | err := c.Execute(context.Background(), func(_ context.Context) error { 42 | return circuit.SimpleBadRequest{ 43 | Err: errors.New("this request is bad"), 44 | } 45 | }, nil) 46 | if err == nil { 47 | t.Error("I really expected an error here!") 48 | } 49 | cmdMetrics := FindCommandMetrics(c) 50 | errCount := cmdMetrics.ErrorsAt(time.Now()) 51 | if errCount != 0 { 52 | t.Error("bad requests shouldn't be errors!") 53 | } 54 | requestCount := cmdMetrics.LegitimateAttemptsAt(time.Now()) 55 | if requestCount != 0 { 56 | t.Error("bad requests should not count as legit requests!") 57 | } 58 | requestCount = cmdMetrics.ErrBadRequests.RollingSumAt(time.Now()) 59 | if requestCount != 1 { 60 | t.Error("bad requests should count as backed out requests!") 61 | } 62 | } 63 | 64 | func TestFallbackCircuit(t *testing.T) { 65 | s := StatFactory{} 66 | c := circuit.NewCircuitFromConfig("TestFallbackCircuit", s.CreateConfig("")) 67 | err := c.Execute(context.Background(), testhelp.AlwaysFails, testhelp.AlwaysPassesFallback) 68 | if err != nil { 69 | t.Error("saw error from circuit that has happy fallback", err) 70 | } 71 | cmdMetrics := FindCommandMetrics(c) 72 | fallbackMetrics := FindFallbackMetrics(c) 73 | if cmdMetrics.ErrorsAt(time.Now()) != 1 { 74 | t.Error("Even if fallback happens, and works ok, we should still count an error in the circuit") 75 | } 76 | if cmdMetrics.ErrFailures.RollingSumAt(time.Now()) != 1 { 77 | t.Error("Even if fallback happens, and works ok, we should still increment an error in stats") 78 | } 79 | if fallbackMetrics.ErrFailures.TotalSum() != 0 { 80 | t.Error("expected no fallback error") 81 | } 82 | if fallbackMetrics.Successes.TotalSum() != 1 { 83 | t.Error("expected fallback success") 84 | } 85 | if fallbackMetrics.Successes.RollingSumAt(time.Now()) != 1 { 86 | t.Error("expected fallback success") 87 | } 88 | } 89 | 90 | func TestCircuitIgnoreContextFailures(t *testing.T) { 91 | s := StatFactory{} 92 | h := circuit.Manager{ 93 | DefaultCircuitProperties: []circuit.CommandPropertiesConstructor{s.CreateConfig}, 94 | } 95 | c := h.MustCreateCircuit("TestFailingCircuit", circuit.Config{ 96 | Execution: circuit.ExecutionConfig{ 97 | Timeout: time.Hour, 98 | }, 99 | }) 100 | rootCtx, cancel := context.WithTimeout(context.Background(), time.Millisecond*3) 101 | defer cancel() 102 | err := c.Execute(rootCtx, testhelp.SleepsForX(time.Second), nil) 103 | if err == nil { 104 | t.Error("saw no error from circuit that should end in an error") 105 | } 106 | cmdMetrics := FindCommandMetrics(c) 107 | if cmdMetrics.ErrorsAt(time.Now()) != 0 { 108 | t.Error("if the root context dies, it shouldn't be an error") 109 | } 110 | if cmdMetrics.ErrInterrupts.TotalSum() != 1 { 111 | t.Error("Total sum should count the interrupt") 112 | } 113 | if cmdMetrics.ErrInterrupts.RollingSumAt(time.Now()) != 1 { 114 | t.Error("rolling sum should count the interrupt") 115 | } 116 | } 117 | 118 | func TestStatFactory_RunStats(t *testing.T) { 119 | s := StatFactory{} 120 | if s.RunStats("hello") != nil { 121 | t.Error("expected nil stats") 122 | } 123 | s.CreateConfig("hello") 124 | if s.RunStats("hello") == nil { 125 | t.Error("expected non nil stats") 126 | } 127 | } 128 | 129 | func TestStatFactory_FallbackStats(t *testing.T) { 130 | s := StatFactory{} 131 | if s.FallbackStats("hello") != nil { 132 | t.Error("expected nil stats") 133 | } 134 | s.CreateConfig("hello") 135 | if s.FallbackStats("hello") == nil { 136 | t.Error("expected non nil stats") 137 | } 138 | } 139 | 140 | func TestFindCommandMetrics(t *testing.T) { 141 | var c circuit.Circuit 142 | if stats := FindCommandMetrics(&c); stats != nil { 143 | t.Error("expect no stats on empty circuit") 144 | } 145 | } 146 | 147 | func TestFindFallbackMetrics(t *testing.T) { 148 | var c circuit.Circuit 149 | if stats := FindFallbackMetrics(&c); stats != nil { 150 | t.Error("expect no stats on empty circuit") 151 | } 152 | } 153 | 154 | func TestRunStats_Var(t *testing.T) { 155 | r := RunStats{} 156 | varOut := r.Var().String() 157 | if !strings.Contains(varOut, "ErrFailures") { 158 | t.Fatal("expect to see failures in var stats") 159 | } 160 | } 161 | 162 | func TestRunStats_Config(t *testing.T) { 163 | var r RunStats 164 | c := RunStatsConfig{ 165 | RollingStatsNumBuckets: 10, 166 | } 167 | c.Merge(defaultRunStatsConfig) 168 | r.SetConfigNotThreadSafe(c) 169 | if r.Config().RollingStatsNumBuckets != 10 { 170 | t.Fatal("expect 10 rolling stats buckets") 171 | } 172 | } 173 | 174 | func TestRunStats_ErrConcurrencyLimitReject(t *testing.T) { 175 | ctx := context.Background() 176 | var r RunStats 177 | r.SetConfigNotThreadSafe(defaultRunStatsConfig) 178 | now := time.Now() 179 | r.ErrConcurrencyLimitReject(ctx, now) 180 | if r.ErrConcurrencyLimitRejects.TotalSum() != 1 { 181 | t.Errorf("expect a limit reject") 182 | } 183 | } 184 | 185 | func TestRunStats_ErrShortCircuit(t *testing.T) { 186 | ctx := context.Background() 187 | var r RunStats 188 | r.SetConfigNotThreadSafe(defaultRunStatsConfig) 189 | now := time.Now() 190 | r.ErrShortCircuit(ctx, now) 191 | if r.ErrShortCircuits.TotalSum() != 1 { 192 | t.Errorf("expect a short circuit") 193 | } 194 | } 195 | 196 | func TestRunStats_ErrTimeout(t *testing.T) { 197 | ctx := context.Background() 198 | var r RunStats 199 | r.SetConfigNotThreadSafe(defaultRunStatsConfig) 200 | now := time.Now() 201 | r.ErrTimeout(ctx, now, time.Second) 202 | if r.ErrTimeouts.TotalSum() != 1 { 203 | t.Errorf("expect a error timeout") 204 | } 205 | if r.Latencies.Snapshot().Max() != time.Second { 206 | t.Errorf("expect 1 sec latency") 207 | } 208 | } 209 | 210 | func TestRunStats_ErrorPercentage(t *testing.T) { 211 | ctx := context.Background() 212 | var r RunStats 213 | if r.ErrorPercentage() != 0.0 { 214 | t.Errorf("Expect no errors") 215 | } 216 | r.SetConfigNotThreadSafe(defaultRunStatsConfig) 217 | now := time.Now() 218 | r.ErrTimeout(ctx, now, time.Second) 219 | if r.ErrorPercentage() != 1.0 { 220 | t.Errorf("Expect all errors") 221 | } 222 | } 223 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended" 5 | ] 6 | } 7 | --------------------------------------------------------------------------------