├── .github └── workflows │ ├── static-analysis.yml │ └── test.yml ├── LICENSE.md ├── README.md ├── doc.go ├── go.mod ├── go.sum ├── group.go ├── group_test.go └── heap_test.go /.github/workflows/static-analysis.yml: -------------------------------------------------------------------------------- 1 | name: Static Analysis 2 | 3 | on: 4 | push: 5 | branches: 6 | - '*' 7 | pull_request: 8 | branches: 9 | - '*' 10 | 11 | jobs: 12 | build: 13 | strategy: 14 | matrix: 15 | go-version: [1.18] 16 | runs-on: ubuntu-latest 17 | 18 | steps: 19 | - name: Set up Go 20 | uses: actions/setup-go@v2 21 | with: 22 | go-version: ${{ matrix.go-version }} 23 | id: go 24 | 25 | - name: Check out code into the Go module directory 26 | uses: actions/checkout@v2 27 | 28 | - name: Install staticcheck 29 | run: go install honnef.co/go/tools/cmd/staticcheck@latest 30 | 31 | - name: Print staticcheck version 32 | run: staticcheck -version 33 | 34 | - name: Run staticcheck 35 | run: staticcheck ./... 36 | 37 | - name: Run go vet 38 | run: go vet ./... 39 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: 6 | - '*' 7 | pull_request: 8 | branches: 9 | - '*' 10 | 11 | jobs: 12 | build: 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | go-version: [1.18] 17 | os: [ubuntu-latest, macos-latest] 18 | runs-on: ${{ matrix.os }} 19 | 20 | steps: 21 | - name: Set up Go 22 | uses: actions/setup-go@v2 23 | with: 24 | go-version: ${{ matrix.go-version }} 25 | id: go 26 | 27 | - name: Check out code into the Go module directory 28 | uses: actions/checkout@v2 29 | 30 | # Run basic tests, we just want to make sure there is parity on Linux and 31 | # macOS. 32 | - name: Run tests 33 | run: go test ./... 34 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | Copyright (C) 2020-2022 Matt Layher 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # schedgroup [![Test Status](https://github.com/mdlayher/schedgroup/workflows/Test/badge.svg)](https://github.com/mdlayher/schedgroup/actions) [![Go Reference](https://pkg.go.dev/badge/github.com/mdlayher/schedgroup.svg)](https://pkg.go.dev/github.com/mdlayher/schedgroup) [![Go Report Card](https://goreportcard.com/badge/github.com/mdlayher/schedgroup)](https://goreportcard.com/report/github.com/mdlayher/schedgroup) 2 | 3 | Package `schedgroup` provides a goroutine worker pool which schedules tasks 4 | to be performed at or after a specified time. MIT Licensed. 5 | 6 | Special thanks to Egon Elbre from #performance on Gophers Slack for [two](https://play.golang.org/p/YyeSWuDil-b) 7 | [prototypes](https://play.golang.org/p/4iYBO6Cgj8m) of this idea, based 8 | on Go's `container/heap` package. Egon's prototypes heavily influenced the final 9 | design of this package. 10 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | // Package schedgroup provides a goroutine worker pool which schedules tasks 2 | // to be performed at or after a specified time. 3 | // 4 | // Special thanks to Egon Elbre from #performance on Gophers Slack for two 5 | // prototypes (https://play.golang.org/p/YyeSWuDil-b, https://play.golang.org/p/4iYBO6Cgj8m) 6 | // of this idea, based on Go's container/heap package. Egon's prototypes 7 | // heavily influenced the final design of this package. 8 | package schedgroup 9 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/mdlayher/schedgroup 2 | 3 | go 1.18 4 | 5 | require github.com/google/go-cmp v0.5.7 6 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= 2 | github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= 3 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= 4 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 5 | -------------------------------------------------------------------------------- /group.go: -------------------------------------------------------------------------------- 1 | package schedgroup 2 | 3 | import ( 4 | "container/heap" 5 | "context" 6 | "sync" 7 | "sync/atomic" 8 | "time" 9 | ) 10 | 11 | // Although unnecessary, explicit break labels should be used in all select 12 | // statements in this package so that test coverage tools are able to identify 13 | // which cases have been triggered. 14 | 15 | // A Group is a goroutine worker pool which schedules tasks to be performed 16 | // after a specified time. A Group must be created with the New constructor. 17 | // Once Wait is called, New must be called to create a new Group to schedule 18 | // more tasks. 19 | type Group struct { 20 | // Atomics must come first per sync/atomic. 21 | waiting uint32 22 | 23 | // Context/cancelation support. 24 | ctx context.Context 25 | cancel func() 26 | 27 | // Task runner and a heap of tasks to be run. 28 | wg sync.WaitGroup 29 | mu sync.Mutex 30 | tasks tasks 31 | 32 | // Signals for when a task is added and how many tasks remain on the heap. 33 | addC chan struct{} 34 | lenC chan int 35 | } 36 | 37 | // New creates a new Group which will use ctx for cancelation. If cancelation 38 | // is not a concern, use context.Background(). 39 | func New(ctx context.Context) *Group { 40 | // Monitor goroutine context and cancelation. 41 | mctx, cancel := context.WithCancel(ctx) 42 | 43 | g := &Group{ 44 | ctx: ctx, 45 | cancel: cancel, 46 | 47 | addC: make(chan struct{}), 48 | lenC: make(chan int), 49 | } 50 | 51 | g.wg.Add(1) 52 | go func() { 53 | defer g.wg.Done() 54 | g.monitor(mctx) 55 | }() 56 | 57 | return g 58 | } 59 | 60 | // Delay schedules a function to run at or after the specified delay. Delay 61 | // is a convenience wrapper for Schedule which adds delay to the current time. 62 | // Specifying a negative delay will cause the task to be scheduled immediately. 63 | // 64 | // If Delay is called after a call to Wait, Delay will panic. 65 | func (g *Group) Delay(delay time.Duration, fn func()) { 66 | g.Schedule(time.Now().Add(delay), fn) 67 | } 68 | 69 | // Schedule schedules a function to run at or after the specified time. 70 | // Specifying a past time will cause the task to be scheduled immediately. 71 | // 72 | // If Schedule is called after a call to Wait, Schedule will panic. 73 | func (g *Group) Schedule(when time.Time, fn func()) { 74 | if atomic.LoadUint32(&g.waiting) != 0 { 75 | panic("schedgroup: attempted to schedule task after Group.Wait was called") 76 | } 77 | 78 | g.mu.Lock() 79 | defer g.mu.Unlock() 80 | 81 | heap.Push(&g.tasks, task{ 82 | Deadline: when, 83 | Call: fn, 84 | }) 85 | 86 | // Notify monitor that a new task has been pushed on to the heap. 87 | select { 88 | case g.addC <- struct{}{}: 89 | break 90 | default: 91 | break 92 | } 93 | } 94 | 95 | // Wait waits for the completion of all scheduled tasks, or for cancelation of 96 | // the context passed to New. Wait will only returns errors due to context 97 | // cancelation. If no context is associated the the Group, wait never returns 98 | // an error. 99 | // 100 | // Once Wait is called, any further calls to Delay or Schedule will panic. If 101 | // Wait is called more than once, Wait will panic. 102 | func (g *Group) Wait() error { 103 | if v := atomic.SwapUint32(&g.waiting, 1); v != 0 { 104 | panic("schedgroup: multiple calls to Group.Wait") 105 | } 106 | 107 | // Context cancelation takes priority. 108 | if err := g.ctx.Err(); err != nil { 109 | return err 110 | } 111 | 112 | // See if the task heap is already empty. If so, we can exit early. 113 | g.mu.Lock() 114 | if g.tasks.Len() == 0 { 115 | // Release the mutex immediately so that any running jobs are able to 116 | // complete and send on g.lenC. 117 | g.mu.Unlock() 118 | g.cancel() 119 | g.wg.Wait() 120 | return nil 121 | } 122 | g.mu.Unlock() 123 | 124 | // Wait on context cancelation or for the number of items in the heap 125 | // to reach 0. 126 | var n int 127 | for { 128 | select { 129 | case <-g.ctx.Done(): 130 | return g.ctx.Err() 131 | case n = <-g.lenC: 132 | // Context cancelation takes priority. 133 | if err := g.ctx.Err(); err != nil { 134 | return err 135 | } 136 | } 137 | 138 | if n == 0 { 139 | // No more tasks left, cancel the monitor goroutine and wait for 140 | // all tasks to complete. 141 | g.cancel() 142 | g.wg.Wait() 143 | return nil 144 | } 145 | } 146 | } 147 | 148 | // monitor triggers tasks at the interval specified by g.Interval until ctx 149 | // is canceled. 150 | func (g *Group) monitor(ctx context.Context) { 151 | t := time.NewTimer(0) 152 | defer t.Stop() 153 | 154 | for { 155 | if ctx.Err() != nil { 156 | // Context canceled. 157 | return 158 | } 159 | 160 | now := time.Now() 161 | var tickC <-chan time.Time 162 | 163 | // Start any tasks that are ready as of now. 164 | next := g.trigger(now) 165 | if !next.IsZero() { 166 | // Wait until the next scheduled task is ready. 167 | t.Reset(next.Sub(now)) 168 | tickC = t.C 169 | } else { 170 | t.Stop() 171 | } 172 | 173 | select { 174 | case <-ctx.Done(): 175 | // Context canceled. 176 | return 177 | case <-g.addC: 178 | // A new task was added, check task heap again. 179 | //lint:ignore SA4011 intentional break for code coverage 180 | break 181 | case <-tickC: 182 | // An existing task should be ready as of now. 183 | //lint:ignore SA4011 intentional break for code coverage 184 | break 185 | } 186 | } 187 | } 188 | 189 | // trigger checks for scheduled tasks and runs them if they are scheduled 190 | // on or after the time specified by now. 191 | func (g *Group) trigger(now time.Time) time.Time { 192 | g.mu.Lock() 193 | defer func() { 194 | // Notify how many tasks are left on the heap so Wait can stop when 195 | // appropriate. 196 | select { 197 | case g.lenC <- g.tasks.Len(): 198 | break 199 | default: 200 | // Wait hasn't been called. 201 | break 202 | } 203 | 204 | g.mu.Unlock() 205 | }() 206 | 207 | for g.tasks.Len() > 0 { 208 | next := &g.tasks[0] 209 | if next.Deadline.After(now) { 210 | // Earliest scheduled task is not ready. 211 | return next.Deadline 212 | } 213 | 214 | // This task is ready, pop it from the heap and run it. 215 | t := heap.Pop(&g.tasks).(task) 216 | g.wg.Add(1) 217 | go func() { 218 | defer g.wg.Done() 219 | t.Call() 220 | }() 221 | } 222 | 223 | return time.Time{} 224 | } 225 | 226 | // A task is a function which is called after the specified deadline. 227 | type task struct { 228 | Deadline time.Time 229 | Call func() 230 | } 231 | 232 | // tasks implements heap.Interface. 233 | type tasks []task 234 | 235 | var _ heap.Interface = &tasks{} 236 | 237 | func (pq tasks) Len() int { return len(pq) } 238 | func (pq tasks) Less(i, j int) bool { return pq[i].Deadline.Before(pq[j].Deadline) } 239 | func (pq tasks) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } 240 | func (pq *tasks) Push(x interface{}) { *pq = append(*pq, x.(task)) } 241 | func (pq *tasks) Pop() (item interface{}) { 242 | n := len(*pq) 243 | item, *pq = (*pq)[n-1], (*pq)[:n-1] 244 | return item 245 | } 246 | -------------------------------------------------------------------------------- /group_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Matt Layher 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package schedgroup_test 15 | 16 | import ( 17 | "context" 18 | "fmt" 19 | "log" 20 | "sync" 21 | "sync/atomic" 22 | "testing" 23 | "time" 24 | 25 | "github.com/google/go-cmp/cmp" 26 | "github.com/mdlayher/schedgroup" 27 | ) 28 | 29 | func TestGroupScheduling(t *testing.T) { 30 | t.Parallel() 31 | 32 | sg := schedgroup.New(context.Background()) 33 | 34 | // Schedule N tasks that should be roughly spread duration apart, with a bit 35 | // of leeway in each direction. While 10ms leeway worked fine on Linux, we 36 | // saw a difference of 64ms in the darwin GitHub Actions runner. 37 | const ( 38 | n = 5 39 | spread = 100 * time.Millisecond 40 | leeway = 100 * time.Millisecond 41 | ) 42 | 43 | timeC := make(chan time.Time, n) 44 | 45 | var wg sync.WaitGroup 46 | wg.Add(1) 47 | defer wg.Wait() 48 | 49 | go func() { 50 | defer func() { 51 | close(timeC) 52 | wg.Done() 53 | }() 54 | 55 | // Produce the current time when a task is fired. 56 | for i := 0; i < n; i++ { 57 | sg.Delay(time.Duration(i+1)*spread, func() { 58 | timeC <- time.Now() 59 | }) 60 | } 61 | 62 | if err := sg.Wait(); err != nil { 63 | panicf("failed to wait: %v", err) 64 | } 65 | }() 66 | 67 | var ( 68 | last time.Time 69 | recv int 70 | ) 71 | 72 | for tv := range timeC { 73 | recv++ 74 | 75 | if !last.IsZero() { 76 | diff := tv.Sub(last) 77 | 78 | // Assume that each task should have been scheduled roughly spread 79 | // seconds apart, with some leeway. 80 | if diff < spread-leeway || diff > spread+leeway { 81 | t.Fatalf("expected roughly %s +/- %s difference, but got: %v", spread, leeway, diff) 82 | } 83 | } 84 | 85 | last = tv 86 | } 87 | 88 | if diff := cmp.Diff(n, recv); diff != "" { 89 | t.Fatalf("unexpected number of received values (-want +got):\n%s", diff) 90 | } 91 | } 92 | 93 | func TestGroupContextCancelImmediate(t *testing.T) { 94 | t.Parallel() 95 | 96 | // Context canceled before the Group is created, so no tasks should ever run. 97 | ctx, cancel := context.WithCancel(context.Background()) 98 | cancel() 99 | 100 | sg := schedgroup.New(ctx) 101 | 102 | for i := 0; i < 5; i++ { 103 | sg.Schedule(time.Now(), func() { 104 | panic("should not be called") 105 | }) 106 | time.Sleep(2 * time.Millisecond) 107 | } 108 | 109 | if err := sg.Wait(); err != context.Canceled { 110 | t.Fatalf("expected context canceled, but got: %v", err) 111 | } 112 | } 113 | 114 | func TestGroupSchedulePast(t *testing.T) { 115 | t.Parallel() 116 | 117 | sg := schedgroup.New(context.Background()) 118 | 119 | const n = 2 120 | 121 | // Each task will signal on a channel when it is run. 122 | sigC := make(chan struct{}, n) 123 | signal := func() { 124 | sigC <- struct{}{} 125 | } 126 | 127 | // Any negative delay or time in the past will cause the task to be 128 | // scheduled immediately. 129 | sg.Delay(-1*time.Second, signal) 130 | sg.Schedule(time.Now().Add(-1*time.Second), signal) 131 | 132 | if err := sg.Wait(); err != nil { 133 | t.Fatalf("failed to wait: %v", err) 134 | } 135 | 136 | if diff := cmp.Diff(n, len(sigC)); diff != "" { 137 | t.Fatalf("unexpected number of tasks run (-want +got):\n%s", diff) 138 | } 139 | } 140 | 141 | func TestGroupScheduledTasksContextCancel(t *testing.T) { 142 | t.Parallel() 143 | 144 | ctx, cancel := context.WithCancel(context.Background()) 145 | defer cancel() 146 | 147 | sg := schedgroup.New(ctx) 148 | 149 | // Assume we want to process work repeatedly but eventually our caller 150 | // wants no more tasks to be scheduled. 151 | waitC := make(chan struct{}) 152 | var count int32 153 | for i := 0; i < 10; i++ { 154 | sg.Delay(1*time.Millisecond, func() { 155 | waitC <- struct{}{} 156 | atomic.AddInt32(&count, 1) 157 | }) 158 | 159 | // Blocks until closed halfway through. Any further sends will result 160 | // in a panic, failing the test. 161 | <-waitC 162 | 163 | if i == 5 { 164 | cancel() 165 | close(waitC) 166 | } 167 | } 168 | 169 | if err := sg.Wait(); err != context.Canceled { 170 | t.Fatalf("expected context canceled, but got: %v", err) 171 | } 172 | 173 | if diff := cmp.Diff(6, int(atomic.LoadInt32(&count))); diff != "" { 174 | t.Fatalf("unexpected number of tasks scheduled (-want +got):\n%s", diff) 175 | } 176 | } 177 | 178 | func TestGroupWaitContextDeadlineExceeded(t *testing.T) { 179 | t.Parallel() 180 | 181 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) 182 | defer cancel() 183 | 184 | sg := schedgroup.New(ctx) 185 | 186 | // This task is scheduled now and should run. 187 | doneC := make(chan struct{}) 188 | sg.Schedule(time.Now(), func() { 189 | close(doneC) 190 | }) 191 | 192 | // This task is delayed and should not run. 193 | sg.Delay(1*time.Second, func() { 194 | panic("should not be called") 195 | }) 196 | 197 | // Make sure the first task ran and then expect deadline exceeded. 198 | <-doneC 199 | if err := sg.Wait(); err != context.DeadlineExceeded { 200 | t.Fatalf("expected deadline exceeded, but got: %v", err) 201 | } 202 | } 203 | 204 | func TestGroupWaitNoContext(t *testing.T) { 205 | t.Parallel() 206 | 207 | sg := schedgroup.New(context.Background()) 208 | 209 | timer := time.AfterFunc(5*time.Second, func() { 210 | panic("took too long") 211 | }) 212 | defer timer.Stop() 213 | 214 | // Make sure both tasks complete before Wait unblocks. 215 | doneC := make(chan struct{}, 2) 216 | done := func() { 217 | doneC <- struct{}{} 218 | } 219 | 220 | sg.Schedule(time.Now(), done) 221 | sg.Delay(50*time.Millisecond, done) 222 | 223 | <-doneC 224 | <-doneC 225 | 226 | if err := sg.Wait(); err != nil { 227 | t.Fatalf("failed to wait: %v", err) 228 | } 229 | 230 | } 231 | 232 | func TestGroupScheduleAfterWaitPanic(t *testing.T) { 233 | t.Parallel() 234 | 235 | sg := schedgroup.New(context.Background()) 236 | if err := sg.Wait(); err != nil { 237 | t.Fatalf("failed to wait: %v", err) 238 | } 239 | 240 | defer func() { 241 | r := recover() 242 | if r == nil { 243 | t.Fatal("no panic occurred") 244 | } 245 | 246 | // Probably better than exporting the message. 247 | const want = "schedgroup: attempted to schedule task after Group.Wait was called" 248 | 249 | if diff := cmp.Diff(want, r); diff != "" { 250 | t.Fatalf("unexpected panic (-want +got):\n%s", diff) 251 | } 252 | }() 253 | 254 | sg.Schedule(time.Now(), func() { 255 | panic("should not be scheduled") 256 | }) 257 | } 258 | 259 | func TestGroupDoubleWaitPanic(t *testing.T) { 260 | t.Parallel() 261 | 262 | sg := schedgroup.New(context.Background()) 263 | if err := sg.Wait(); err != nil { 264 | t.Fatalf("failed to wait: %v", err) 265 | } 266 | 267 | defer func() { 268 | r := recover() 269 | if r == nil { 270 | t.Fatal("no panic occurred") 271 | } 272 | 273 | // Probably better than exporting the message. 274 | const want = "schedgroup: multiple calls to Group.Wait" 275 | 276 | if diff := cmp.Diff(want, r); diff != "" { 277 | t.Fatalf("unexpected panic (-want +got):\n%s", diff) 278 | } 279 | }() 280 | 281 | sg.Wait() 282 | panic("wait did not panic") 283 | } 284 | 285 | func TestGroupScheduleNoTasks(t *testing.T) { 286 | t.Parallel() 287 | 288 | // Ensure Groups that schedule no work do not hang, as was previously the 289 | // case between monitor and Wait. 290 | const n = 8 291 | var wg sync.WaitGroup 292 | wg.Add(n) 293 | defer wg.Wait() 294 | 295 | for i := 0; i < n; i++ { 296 | go func() { 297 | defer wg.Done() 298 | 299 | for j := 0; j < 1024; j++ { 300 | sg := schedgroup.New(context.Background()) 301 | if err := sg.Wait(); err != nil { 302 | panicf("failed to wait: %v", err) 303 | } 304 | } 305 | }() 306 | } 307 | } 308 | 309 | func TestGroupWaitAfterScheduled(t *testing.T) { 310 | t.Parallel() 311 | 312 | sg := schedgroup.New(context.Background()) 313 | 314 | // This job should done before Wait can be called due to the signal send 315 | // and the sleep. 316 | doneC := make(chan struct{}, 2) 317 | sg.Schedule(time.Now(), func() { 318 | doneC <- struct{}{} 319 | }) 320 | 321 | <-doneC 322 | time.Sleep(100 * time.Millisecond) 323 | 324 | if err := sg.Wait(); err != nil { 325 | t.Fatalf("failed to wait: %v", err) 326 | } 327 | } 328 | 329 | // This example demonstrates typical use of a Group. 330 | func ExampleGroup_wait() { 331 | // Create a Group which will not use a context for cancelation. 332 | sg := schedgroup.New(context.Background()) 333 | 334 | // Schedule tasks to run in 100, 200, and 300 milliseconds which will print 335 | // the number n to the screen. 336 | for i := 0; i < 3; i++ { 337 | n := i + 1 338 | sg.Delay(time.Duration(n)*100*time.Millisecond, func() { 339 | fmt.Println(n) 340 | }) 341 | } 342 | 343 | // Wait for all of the scheduled tasks to complete. 344 | if err := sg.Wait(); err != nil { 345 | log.Fatalf("failed to wait: %v", err) 346 | } 347 | 348 | // Output: 349 | // 1 350 | // 2 351 | // 3 352 | } 353 | 354 | // This example demonstrates how context cancelation/timeout effects a Group. 355 | func ExampleGroup_cancelation() { 356 | // Create a Group which will use a context's timeout for cancelation. 357 | ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) 358 | defer cancel() 359 | sg := schedgroup.New(ctx) 360 | 361 | // Schedule multiple tasks to occur at different times relative to a point 362 | // in time. 363 | start := time.Now() 364 | 365 | // Schedule a task which will not be run before a timeout occurs. 366 | sg.Schedule(start.Add(1*time.Second), func() { 367 | // This panic would normally crash the program, but this task will 368 | // never be run. 369 | panic("this shouldn't happen!") 370 | }) 371 | 372 | // Schedule tasks which will occur before timeout. Tasks which are scheduled 373 | // for an earlier time will occur first. 374 | sg.Schedule(start.Add(200*time.Millisecond), func() { 375 | fmt.Println("world") 376 | }) 377 | 378 | sg.Schedule(start.Add(100*time.Millisecond), func() { 379 | fmt.Println("hello") 380 | }) 381 | 382 | // Wait for task completion or timeout. 383 | switch err := sg.Wait(); err { 384 | case nil: 385 | panic("all tasks should not have completed!") 386 | case context.DeadlineExceeded: 387 | // No problem, we expected this to occur. 388 | fmt.Println("timeout!") 389 | default: 390 | log.Fatalf("failed to wait: %v", err) 391 | } 392 | 393 | // Output: 394 | // hello 395 | // world 396 | // timeout! 397 | } 398 | 399 | func panicf(format string, a ...interface{}) { 400 | panic(fmt.Sprintf(format, a...)) 401 | } 402 | -------------------------------------------------------------------------------- /heap_test.go: -------------------------------------------------------------------------------- 1 | package schedgroup 2 | 3 | import ( 4 | "container/heap" 5 | "testing" 6 | "time" 7 | 8 | "github.com/google/go-cmp/cmp" 9 | ) 10 | 11 | func Test_tasksHeap(t *testing.T) { 12 | newTask := func(d time.Duration) task { 13 | return task{ 14 | // Static start time for consistency, no call function. 15 | Deadline: time.Unix(0, 0).Add(d * time.Second), 16 | } 17 | } 18 | 19 | tests := []struct { 20 | name string 21 | in, want []task 22 | }{ 23 | { 24 | name: "ordered", 25 | in: []task{ 26 | newTask(1), 27 | newTask(2), 28 | newTask(3), 29 | }, 30 | want: []task{ 31 | newTask(1), 32 | newTask(2), 33 | newTask(3), 34 | }, 35 | }, 36 | { 37 | name: "unordered", 38 | in: []task{ 39 | newTask(3), 40 | newTask(1), 41 | newTask(2), 42 | }, 43 | want: []task{ 44 | newTask(1), 45 | newTask(2), 46 | newTask(3), 47 | }, 48 | }, 49 | } 50 | 51 | for _, tt := range tests { 52 | t.Run(tt.name, func(t *testing.T) { 53 | // Push and pop all tasks to verify the heap.Interface implementation. 54 | var tasks tasks 55 | for _, v := range tt.in { 56 | heap.Push(&tasks, v) 57 | } 58 | 59 | var got []task 60 | for tasks.Len() > 0 { 61 | got = append(got, heap.Pop(&tasks).(task)) 62 | } 63 | 64 | if diff := cmp.Diff(tt.want, got); diff != "" { 65 | t.Fatalf("unexpected output tasks (-want +got):\n%s", diff) 66 | } 67 | }) 68 | } 69 | } 70 | --------------------------------------------------------------------------------