├── .travis.yml ├── LICENSE ├── README.md ├── examples ├── first.go └── second.go ├── grpool.go └── grpool_test.go /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.5 5 | - 1.6 6 | - 1.7 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Ivan Pusic 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # grpool 2 | [![Build Status](https://travis-ci.org/ivpusic/grpool.svg?branch=master)](https://travis-ci.org/ivpusic/grpool) 3 | 4 | Lightweight Goroutine pool 5 | 6 | Clients can submit jobs. Dispatcher takes job, and sends it to first available worker. 7 | When worker is done with processing job, will be returned back to worker pool. 8 | 9 | Number of workers and Job queue size is configurable. 10 | 11 | ## Docs 12 | https://godoc.org/github.com/ivpusic/grpool 13 | 14 | ## Installation 15 | ``` 16 | go get github.com/ivpusic/grpool 17 | ``` 18 | 19 | ## Simple example 20 | ```Go 21 | package main 22 | 23 | import ( 24 | "fmt" 25 | "runtime" 26 | "time" 27 | 28 | "github.com/ivpusic/grpool" 29 | ) 30 | 31 | func main() { 32 | // number of workers, and size of job queue 33 | pool := grpool.NewPool(100, 50) 34 | 35 | // release resources used by pool 36 | defer pool.Release() 37 | 38 | // submit one or more jobs to pool 39 | for i := 0; i < 10; i++ { 40 | count := i 41 | 42 | pool.JobQueue <- func() { 43 | fmt.Printf("I am worker! Number %d\n", count) 44 | } 45 | } 46 | 47 | // dummy wait until jobs are finished 48 | time.Sleep(1 * time.Second) 49 | } 50 | ``` 51 | 52 | ## Example with waiting jobs to finish 53 | ```Go 54 | package main 55 | 56 | import ( 57 | "fmt" 58 | "runtime" 59 | 60 | "github.com/ivpusic/grpool" 61 | ) 62 | 63 | func main() { 64 | // number of workers, and size of job queue 65 | pool := grpool.NewPool(100, 50) 66 | defer pool.Release() 67 | 68 | // how many jobs we should wait 69 | pool.WaitCount(10) 70 | 71 | // submit one or more jobs to pool 72 | for i := 0; i < 10; i++ { 73 | count := i 74 | 75 | pool.JobQueue <- func() { 76 | // say that job is done, so we can know how many jobs are finished 77 | defer pool.JobDone() 78 | 79 | fmt.Printf("hello %d\n", count) 80 | } 81 | } 82 | 83 | // wait until we call JobDone for all jobs 84 | pool.WaitAll() 85 | } 86 | ``` 87 | 88 | ## License 89 | *MIT* 90 | -------------------------------------------------------------------------------- /examples/first.go: -------------------------------------------------------------------------------- 1 | package grpool 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | "time" 7 | 8 | "github.com/ivpusic/grpool" 9 | ) 10 | 11 | func first() { 12 | numCPUs := runtime.NumCPU() 13 | runtime.GOMAXPROCS(numCPUs) 14 | 15 | // number of workers, and size of job queue 16 | pool := grpool.NewPool(100, 50) 17 | 18 | // release resources used by pool 19 | defer pool.Release() 20 | 21 | // submit one or more jobs to pool 22 | for i := 0; i < 10; i++ { 23 | count := i 24 | 25 | pool.JobQueue <- func() { 26 | fmt.Printf("I am worker! Number %d\n", count) 27 | } 28 | } 29 | 30 | // dummy wait until jobs are finished 31 | time.Sleep(1 * time.Second) 32 | } 33 | -------------------------------------------------------------------------------- /examples/second.go: -------------------------------------------------------------------------------- 1 | package grpool 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | 7 | "github.com/ivpusic/grpool" 8 | ) 9 | 10 | func second() { 11 | numCPUs := runtime.NumCPU() 12 | runtime.GOMAXPROCS(numCPUs) 13 | 14 | // number of workers, and size of job queue 15 | pool := grpool.NewPool(100, 50) 16 | defer pool.Release() 17 | 18 | // how many jobs we should wait 19 | pool.WaitCount(10) 20 | 21 | // submit one or more jobs to pool 22 | for i := 0; i < 10; i++ { 23 | count := i 24 | 25 | pool.JobQueue <- func() { 26 | // say that job is done, so we can know how many jobs are finished 27 | defer pool.JobDone() 28 | 29 | fmt.Printf("hello %d\n", count) 30 | } 31 | } 32 | 33 | // wait until we call JobDone for all jobs 34 | pool.WaitAll() 35 | } 36 | -------------------------------------------------------------------------------- /grpool.go: -------------------------------------------------------------------------------- 1 | package grpool 2 | 3 | import "sync" 4 | 5 | // Gorouting instance which can accept client jobs 6 | type worker struct { 7 | workerPool chan *worker 8 | jobChannel chan Job 9 | stop chan struct{} 10 | } 11 | 12 | func (w *worker) start() { 13 | go func() { 14 | var job Job 15 | for { 16 | // worker free, add it to pool 17 | w.workerPool <- w 18 | 19 | select { 20 | case job = <-w.jobChannel: 21 | job() 22 | case <-w.stop: 23 | w.stop <- struct{}{} 24 | return 25 | } 26 | } 27 | }() 28 | } 29 | 30 | func newWorker(pool chan *worker) *worker { 31 | return &worker{ 32 | workerPool: pool, 33 | jobChannel: make(chan Job), 34 | stop: make(chan struct{}), 35 | } 36 | } 37 | 38 | // Accepts jobs from clients, and waits for first free worker to deliver job 39 | type dispatcher struct { 40 | workerPool chan *worker 41 | jobQueue chan Job 42 | stop chan struct{} 43 | } 44 | 45 | func (d *dispatcher) dispatch() { 46 | for { 47 | select { 48 | case job := <-d.jobQueue: 49 | worker := <-d.workerPool 50 | worker.jobChannel <- job 51 | case <-d.stop: 52 | for i := 0; i < cap(d.workerPool); i++ { 53 | worker := <-d.workerPool 54 | 55 | worker.stop <- struct{}{} 56 | <-worker.stop 57 | } 58 | 59 | d.stop <- struct{}{} 60 | return 61 | } 62 | } 63 | } 64 | 65 | func newDispatcher(workerPool chan *worker, jobQueue chan Job) *dispatcher { 66 | d := &dispatcher{ 67 | workerPool: workerPool, 68 | jobQueue: jobQueue, 69 | stop: make(chan struct{}), 70 | } 71 | 72 | for i := 0; i < cap(d.workerPool); i++ { 73 | worker := newWorker(d.workerPool) 74 | worker.start() 75 | } 76 | 77 | go d.dispatch() 78 | return d 79 | } 80 | 81 | // Represents user request, function which should be executed in some worker. 82 | type Job func() 83 | 84 | type Pool struct { 85 | JobQueue chan Job 86 | dispatcher *dispatcher 87 | wg sync.WaitGroup 88 | } 89 | 90 | // Will make pool of gorouting workers. 91 | // numWorkers - how many workers will be created for this pool 92 | // queueLen - how many jobs can we accept until we block 93 | // 94 | // Returned object contains JobQueue reference, which you can use to send job to pool. 95 | func NewPool(numWorkers int, jobQueueLen int) *Pool { 96 | jobQueue := make(chan Job, jobQueueLen) 97 | workerPool := make(chan *worker, numWorkers) 98 | 99 | pool := &Pool{ 100 | JobQueue: jobQueue, 101 | dispatcher: newDispatcher(workerPool, jobQueue), 102 | } 103 | 104 | return pool 105 | } 106 | 107 | // In case you are using WaitAll fn, you should call this method 108 | // every time your job is done. 109 | // 110 | // If you are not using WaitAll then we assume you have your own way of synchronizing. 111 | func (p *Pool) JobDone() { 112 | p.wg.Done() 113 | } 114 | 115 | // How many jobs we should wait when calling WaitAll. 116 | // It is using WaitGroup Add/Done/Wait 117 | func (p *Pool) WaitCount(count int) { 118 | p.wg.Add(count) 119 | } 120 | 121 | // Will wait for all jobs to finish. 122 | func (p *Pool) WaitAll() { 123 | p.wg.Wait() 124 | } 125 | 126 | // Will release resources used by pool 127 | func (p *Pool) Release() { 128 | p.dispatcher.stop <- struct{}{} 129 | <-p.dispatcher.stop 130 | } 131 | -------------------------------------------------------------------------------- /grpool_test.go: -------------------------------------------------------------------------------- 1 | package grpool 2 | 3 | import ( 4 | "io/ioutil" 5 | "log" 6 | "runtime" 7 | "sync/atomic" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func init() { 14 | println("using MAXPROC") 15 | numCPUs := runtime.NumCPU() 16 | runtime.GOMAXPROCS(numCPUs) 17 | } 18 | 19 | func TestNewWorker(t *testing.T) { 20 | pool := make(chan *worker) 21 | worker := newWorker(pool) 22 | worker.start() 23 | assert.NotNil(t, worker) 24 | 25 | worker = <-pool 26 | assert.NotNil(t, worker, "Worker should register itself to the pool") 27 | 28 | called := false 29 | done := make(chan bool) 30 | 31 | job := func() { 32 | called = true 33 | done <- true 34 | } 35 | 36 | worker.jobChannel <- job 37 | <-done 38 | assert.Equal(t, true, called) 39 | } 40 | 41 | func TestNewPool(t *testing.T) { 42 | pool := NewPool(1000, 10000) 43 | defer pool.Release() 44 | 45 | iterations := 1000000 46 | pool.WaitCount(iterations) 47 | var counter uint64 = 0 48 | 49 | for i := 0; i < iterations; i++ { 50 | arg := uint64(1) 51 | 52 | job := func() { 53 | defer pool.JobDone() 54 | atomic.AddUint64(&counter, arg) 55 | assert.Equal(t, uint64(1), arg) 56 | } 57 | 58 | pool.JobQueue <- job 59 | } 60 | 61 | pool.WaitAll() 62 | 63 | counterFinal := atomic.LoadUint64(&counter) 64 | assert.Equal(t, uint64(iterations), counterFinal) 65 | } 66 | 67 | func TestRelease(t *testing.T) { 68 | grNum := runtime.NumGoroutine() 69 | pool := NewPool(5, 10) 70 | defer func() { 71 | pool.Release() 72 | 73 | // give some time for all goroutines to quit 74 | assert.Equal(t, grNum, runtime.NumGoroutine(), "All goroutines should be released after Release() call") 75 | }() 76 | 77 | pool.WaitCount(1000) 78 | 79 | for i := 0; i < 1000; i++ { 80 | job := func() { 81 | defer pool.JobDone() 82 | } 83 | 84 | pool.JobQueue <- job 85 | } 86 | 87 | pool.WaitAll() 88 | } 89 | 90 | func BenchmarkPool(b *testing.B) { 91 | // Testing with just 1 goroutine 92 | // to benchmark the non-parallel part of the code 93 | pool := NewPool(1, 10) 94 | defer pool.Release() 95 | 96 | log.SetOutput(ioutil.Discard) 97 | 98 | for n := 0; n < b.N; n++ { 99 | pool.JobQueue <- func() { 100 | log.Printf("I am worker! Number %d\n", n) 101 | } 102 | } 103 | } 104 | --------------------------------------------------------------------------------