├── .circleci └── config.yml ├── .gitignore ├── LICENSE ├── README.md ├── doc.go ├── sleep.go ├── sleep_test.go ├── timeline.go └── timeline_test.go /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | working_directory: /go/src/github.com/segmentio/timers 5 | docker: 6 | - image: circleci/golang 7 | steps: 8 | - checkout 9 | - setup_remote_docker: { reusable: true, docker_layer_caching: true } 10 | - run: go get -v -t ./... 11 | - run: go vet ./... 12 | - run: go test -v -race ./... 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | 7 | # Test binary, build with `go test -c` 8 | *.test 9 | 10 | # Output of the go coverage tool, specifically when used with LiteIDE 11 | *.out 12 | 13 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 14 | .glide/ 15 | 16 | # Emacs 17 | *~ 18 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Segment 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # timers [![CircleCI](https://circleci.com/gh/segmentio/timers.svg?style=shield)](https://circleci.com/gh/segmentio/timers) [![Go Report Card](https://goreportcard.com/badge/github.com/segmentio/timers)](https://goreportcard.com/report/github.com/segmentio/timers) [![GoDoc](https://godoc.org/github.com/segmentio/timers?status.svg)](https://godoc.org/github.com/segmentio/timers) 2 | 3 | > **Note** 4 | > Segment has paused maintenance on this project, but may return it to an active status in the future. Issues and pull requests from external contributors are not being considered, although internal contributions may appear from time to time. The project remains available under its open source license for anyone to use. 5 | 6 | ## Motivations 7 | 8 | The Go standard library offers good timer management abstractions through the 9 | [time](https://golang.org/pkg/time/) and [context](https://golang.org/pkg/context/) 10 | packages. However those are built as general purpose solution to fit most 11 | programs out there, but aren't designed for very high performance applications, 12 | and can become sources of inefficiencies for high traffic services. 13 | 14 | Take as an example the common pattern of using `context.WithTimeout` to acquire 15 | a context that will control the time limit for an HTTP request. The creation of 16 | such context constructs a new timer within the Go runtime, and allocates a few 17 | hundred bytes of memory on the heap. A large portion of the CPU time and memory 18 | allocation now ends up being spent on creating those timers which in most cases 19 | will never fire since the normal behavior is often for the request to succeed 20 | and not to timeout. 21 | 22 | This is where the `timers` package come in play, offering timing management 23 | abstractions which are both compatible with code built on top of the standard 24 | library and designed for efficiency. 25 | 26 | ## Timelines 27 | 28 | Timelines are a key abstraction for efficient timer management. They expose APIs 29 | to create background contexts that expire on a defined deadline, but instead of 30 | creating a new context, it shares contexts that are expire within a same time 31 | window. This means that concurrent operations which are intended to expire at 32 | roughly the same time do not need to create and manage their own context, they 33 | can share one that a timeline has already set for expiration near their own 34 | deadline. 35 | 36 | The trade off is on the accuracy of the expirations, when creating a new context 37 | the runtime will try its best to expire it exactly at the time it was set for. 38 | A Timeline on the other hand will use a configurable resolution to group 39 | expiration times together under a single timer. 40 | There are use cases where a program may want to get timers that are as accurate 41 | as possible, but often times (and especially to manage request timeouts) the 42 | program will have no issues dealing with a 10 seconds timeout which triggered 43 | after 11 seconds instead of 10. 44 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | // Package timers exposes efficient data structures for managing timers. 2 | package timers 3 | -------------------------------------------------------------------------------- /sleep.go: -------------------------------------------------------------------------------- 1 | package timers 2 | 3 | import ( 4 | "context" 5 | "time" 6 | ) 7 | 8 | // Sleep puts the calling goroutine to sleep until the given duration has 9 | // passed, or until the context is canceled, whichever comes first, in which 10 | // case it will return the context's error. 11 | func Sleep(ctx context.Context, duration time.Duration) (err error) { 12 | timer := time.NewTimer(duration) 13 | select { 14 | case <-timer.C: 15 | case <-ctx.Done(): 16 | err = ctx.Err() 17 | } 18 | timer.Stop() 19 | return 20 | } 21 | -------------------------------------------------------------------------------- /sleep_test.go: -------------------------------------------------------------------------------- 1 | package timers 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestSleep(t *testing.T) { 10 | t.Run("timeout", testSleepTimeout) 11 | t.Run("cancel", testSleepCancel) 12 | } 13 | 14 | func testSleepTimeout(t *testing.T) { 15 | t.Parallel() 16 | 17 | const sleepDuration = 100 * time.Microsecond 18 | 19 | then := time.Now() 20 | err := Sleep(context.Background(), sleepDuration) 21 | now := time.Now() 22 | 23 | if err != nil { 24 | t.Errorf("unexpected error returned from Sleep, expected but got %q", err) 25 | } 26 | 27 | if elapsed := now.Sub(then); elapsed < sleepDuration { 28 | t.Errorf("not enough time has passed since sleep was called, expected more than %s but got %s", sleepDuration, elapsed) 29 | } 30 | } 31 | 32 | func testSleepCancel(t *testing.T) { 33 | t.Parallel() 34 | 35 | const sleepDuration = 100 * time.Millisecond 36 | const abortDuration = sleepDuration / 2 37 | 38 | ctx, cancel := context.WithTimeout(context.Background(), abortDuration) 39 | defer cancel() 40 | 41 | then := time.Now() 42 | err := Sleep(ctx, sleepDuration) 43 | now := time.Now() 44 | 45 | if ctxErr := ctx.Err(); err != ctxErr { 46 | t.Errorf("unexpected error returned from Sleep, expected %q but got %q", ctxErr, err) 47 | } 48 | 49 | if elapsed := now.Sub(then); elapsed >= sleepDuration { 50 | t.Errorf("too much time has passed since sleep was called, expected less then %s but got %s", sleepDuration, elapsed) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /timeline.go: -------------------------------------------------------------------------------- 1 | package timers 2 | 3 | import ( 4 | "context" 5 | "math/rand" 6 | "sync" 7 | "sync/atomic" 8 | "time" 9 | ) 10 | 11 | // Timeline is a data structure that maintains a cache of deadlines represented 12 | // by background contexts. A Timeline has a resolution attribute representing 13 | // the accuracy of the deadlines it maintains. All deadlines that fall within 14 | // the same resolution window share the same context, making it very efficient 15 | // to create thousands, or even millions of them since the runtime only needs to 16 | // maintain a single timer per resolution window. 17 | // 18 | // Timelines are safe to use concurrently from multiple goroutines, however they 19 | // should not be copied after being first used. 20 | // 21 | // The zero-value is a valid timeline with a resolution of 100ms. 22 | type Timeline struct { 23 | // Resolution represents the accuracy of timers managed by this timeline. 24 | // The lower the resolution the more accurate the timers are, but it also 25 | // means the timeline will put more pressure on the runtime and use more 26 | // memory. 27 | Resolution time.Duration 28 | 29 | // Background configures the background context used by contexts created by 30 | // the timeline. If nil, the default background context is used instead. 31 | Background context.Context 32 | 33 | mutex sync.RWMutex 34 | deadlines map[int64]deadline 35 | 36 | cleanupLock int64 37 | cleanupTime int64 38 | } 39 | 40 | var ( 41 | // HighRes is a timeline configured for high resolution timers, with 10 42 | // millisecond accuracy. 43 | HighRes = Timeline{ 44 | Resolution: 10 * time.Millisecond, 45 | } 46 | 47 | // LowRes is a timeline configured for low resolution timers, with 1 second 48 | // accuracy. This timeline is typically useful for network timeouts. 49 | // 50 | // Here is an example of how the timeline may be used to set a timeout on an 51 | // http request: 52 | // 53 | // req = req.WithContext(timers.LowRes.Timeout(10 * time.Second)) 54 | // res, err := httpClient.Do(req) 55 | // 56 | LowRes = Timeline{ 57 | Resolution: 1 * time.Second, 58 | } 59 | ) 60 | 61 | // Cancel cancels all contexts and releases all internal resources managed by 62 | // the timeline. 63 | func (t *Timeline) Cancel() { 64 | t.mutex.Lock() 65 | deadlines := t.deadlines 66 | t.deadlines = nil 67 | t.mutex.Unlock() 68 | 69 | for _, d := range deadlines { 70 | d.cancel() 71 | } 72 | } 73 | 74 | // Timeout returns a context which expires after the given amount of time has 75 | // passed, plus up to the timeline's resolution. 76 | func (t *Timeline) Timeout(timeout time.Duration) context.Context { 77 | now := time.Now() 78 | return t.Context(now.Add(timeout), now) 79 | } 80 | 81 | // Deadline returns a context which expires when the given deadline is reached, 82 | // plus up to the timeline's resolution. 83 | func (t *Timeline) Deadline(deadline time.Time) context.Context { 84 | return t.Context(deadline, time.Now()) 85 | } 86 | 87 | // Context returns a context which expires when the given deadline is reached, 88 | // using `now` as the current time. 89 | func (t *Timeline) Context(at time.Time, now time.Time) context.Context { 90 | r := int64(t.resolution()) 91 | k := ((at.UnixNano() / r) + 1) * r 92 | 93 | t.mutex.RLock() 94 | d, ok := t.deadlines[k] 95 | t.mutex.RUnlock() 96 | 97 | if ok { // fast path 98 | return d.context 99 | } 100 | 101 | background := t.background() 102 | expiration := jitterTime(time.Unix(0, k), time.Duration(r)) 103 | newDeadline := makeDeadline(background, expiration) 104 | 105 | t.mutex.Lock() 106 | d, ok = t.deadlines[k] 107 | if !ok { 108 | if t.deadlines == nil { 109 | t.deadlines = make(map[int64]deadline) 110 | } 111 | t.deadlines[k] = newDeadline 112 | } 113 | t.mutex.Unlock() 114 | if ok { 115 | newDeadline.cancel() 116 | } else { 117 | d = newDeadline 118 | } 119 | 120 | if cleanupTime := t.loadCleanupTime(); cleanupTime.IsZero() || cleanupTime.Before(now) { 121 | if t.tryLockCleanup() { 122 | t.storeCleanupTime(t.nextCleanupTime(cleanupTime)) 123 | t.cleanup(now) 124 | t.unlockCleanup() 125 | } 126 | } 127 | 128 | return d.context 129 | } 130 | 131 | func (t *Timeline) nextCleanupTime(lastCleanupTime time.Time) time.Time { 132 | return lastCleanupTime.Add(100 * t.resolution()) 133 | } 134 | 135 | func (t *Timeline) loadCleanupTime() time.Time { 136 | return time.Unix(0, atomic.LoadInt64(&t.cleanupTime)) 137 | } 138 | 139 | func (t *Timeline) storeCleanupTime(cleanupTime time.Time) { 140 | atomic.StoreInt64(&t.cleanupTime, cleanupTime.UnixNano()) 141 | } 142 | 143 | func (t *Timeline) tryLockCleanup() bool { 144 | return atomic.CompareAndSwapInt64(&t.cleanupLock, 0, 1) 145 | } 146 | 147 | func (t *Timeline) unlockCleanup() { 148 | atomic.StoreInt64(&t.cleanupLock, 0) 149 | } 150 | 151 | func (t *Timeline) cleanup(now time.Time) { 152 | type timestampAndDeadline struct { 153 | timestamp int64 154 | deadline deadline 155 | } 156 | 157 | expired := []timestampAndDeadline{} 158 | r := t.resolution() 159 | 160 | t.mutex.RLock() 161 | for k, d := range t.deadlines { 162 | if deadline, _ := d.context.Deadline(); now.After(deadline.Add(r)) { 163 | expired = append(expired, timestampAndDeadline{ 164 | timestamp: k, 165 | deadline: d, 166 | }) 167 | } 168 | } 169 | t.mutex.RUnlock() 170 | 171 | if len(expired) != 0 { 172 | t.mutex.Lock() 173 | for _, x := range expired { 174 | delete(t.deadlines, x.timestamp) 175 | } 176 | t.mutex.Unlock() 177 | for _, x := range expired { 178 | x.deadline.cancel() 179 | } 180 | } 181 | } 182 | 183 | func (t *Timeline) resolution() time.Duration { 184 | if r := t.Resolution; r != 0 { 185 | return r 186 | } 187 | return 100 * time.Millisecond 188 | } 189 | 190 | func (t *Timeline) background() context.Context { 191 | if b := t.Background; b != nil { 192 | return b 193 | } 194 | return context.Background() 195 | } 196 | 197 | type deadline struct { 198 | context context.Context 199 | cancel context.CancelFunc 200 | } 201 | 202 | func makeDeadline(parent context.Context, expiration time.Time) deadline { 203 | context, cancel := context.WithDeadline(parent, expiration) 204 | return deadline{ 205 | context: context, 206 | cancel: cancel, 207 | } 208 | } 209 | 210 | var ( 211 | jitterMutex sync.Mutex 212 | jitterRand = rand.New( 213 | rand.NewSource(time.Now().UnixNano()), 214 | ) 215 | ) 216 | 217 | func jitter(d time.Duration) time.Duration { 218 | jitterMutex.Lock() 219 | x := time.Duration(jitterRand.Int63n(int64(d))) 220 | jitterMutex.Unlock() 221 | return x 222 | } 223 | 224 | func jitterTime(t time.Time, d time.Duration) time.Time { 225 | return t.Add(jitter(d)) 226 | } 227 | -------------------------------------------------------------------------------- /timeline_test.go: -------------------------------------------------------------------------------- 1 | package timers 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestTimeline(t *testing.T) { 11 | tests := []struct { 12 | scenario string 13 | function func(*testing.T) 14 | }{ 15 | { 16 | scenario: "scheduling a deadline every 10ms triggers as expected", 17 | function: testTimeline10ms, 18 | }, 19 | { 20 | scenario: "multiple goroutines waiting on the same timeline are all notified when the deadline expires", 21 | function: testTimelineMulti, 22 | }, 23 | { 24 | scenario: "canceling a timeline cancels all contexts of this timeline", 25 | function: testTimelineCancel, 26 | }, 27 | { 28 | scenario: "canceling the background context of a timeline also cancels contexts that it created", 29 | function: testTimelineBackground, 30 | }, 31 | } 32 | 33 | for _, test := range tests { 34 | testFunc := test.function 35 | t.Run(test.scenario, func(t *testing.T) { 36 | t.Parallel() 37 | testFunc(t) 38 | }) 39 | } 40 | } 41 | 42 | func testTimeline10ms(t *testing.T) { 43 | timeline := Timeline{Resolution: 1 * time.Millisecond} 44 | defer timeline.Cancel() 45 | 46 | for i := 0; i != 100; i++ { 47 | t0 := time.Now() 48 | 49 | ctx := timeline.Timeout(10 * time.Millisecond) 50 | <-ctx.Done() 51 | 52 | t1 := time.Now() 53 | d, _ := ctx.Deadline() 54 | 55 | for j, delay := range []time.Duration{d.Sub(t0), t1.Sub(t0)} { 56 | if delay < (10 * time.Millisecond) { 57 | t.Error("the delay is too short, expected > 10ms, got", delay, "at", j, "/", i) 58 | } 59 | if delay > (15 * time.Millisecond) { 60 | t.Error("the delay is too large, expected < 13ms, got", delay, "at", j, "/", i) 61 | } 62 | } 63 | 64 | if err := ctx.Err(); err != context.DeadlineExceeded { 65 | t.Error("bad context error:", err) 66 | } 67 | } 68 | } 69 | 70 | func testTimelineMulti(t *testing.T) { 71 | timeline := Timeline{Resolution: 10 * time.Millisecond} 72 | defer timeline.Cancel() 73 | 74 | wg := sync.WaitGroup{} 75 | deadline := time.Now().Add(100 * time.Millisecond) 76 | 77 | for i := 0; i != 10; i++ { 78 | wg.Add(1) 79 | go func(ctx context.Context) { 80 | <-ctx.Done() 81 | wg.Done() 82 | }(timeline.Deadline(deadline)) 83 | } 84 | 85 | wg.Wait() 86 | } 87 | 88 | func testTimelineCancel(t *testing.T) { 89 | timeline := Timeline{} 90 | 91 | ctx1 := timeline.Timeout(1 * time.Second) 92 | ctx2 := timeline.Timeout(2 * time.Second) 93 | ctx3 := timeline.Timeout(3 * time.Second) 94 | 95 | timeline.Cancel() 96 | 97 | for _, ctx := range []context.Context{ctx1, ctx2, ctx3} { 98 | if err := ctx.Err(); err != context.Canceled { 99 | t.Error("bad context error:", err) 100 | } 101 | } 102 | } 103 | 104 | func testTimelineBackground(t *testing.T) { 105 | background, cancel := context.WithCancel(context.Background()) 106 | timeline := Timeline{Background: background} 107 | 108 | ctx1 := timeline.Timeout(1 * time.Second) 109 | ctx2 := timeline.Timeout(2 * time.Second) 110 | ctx3 := timeline.Timeout(3 * time.Second) 111 | 112 | cancel() 113 | 114 | for _, ctx := range []context.Context{ctx1, ctx2, ctx3} { 115 | if err := ctx.Err(); err != context.Canceled { 116 | t.Error("bad context error:", err) 117 | } 118 | } 119 | } 120 | 121 | func BenchmarkTimeline(b *testing.B) { 122 | timeouts := []time.Duration{ 123 | 100 * time.Millisecond, 124 | 250 * time.Millisecond, 125 | 500 * time.Millisecond, 126 | 1 * time.Second, 127 | 10 * time.Second, 128 | } 129 | 130 | timeline := Timeline{} 131 | defer timeline.Cancel() 132 | 133 | b.RunParallel(func(pb *testing.PB) { 134 | for i := 0; pb.Next(); i++ { 135 | timeline.Timeout(timeouts[i%len(timeouts)]) 136 | } 137 | }) 138 | } 139 | --------------------------------------------------------------------------------