├── closer ├── closer.go └── main.go ├── dirsize ├── concurrent │ └── main.go └── single │ └── main.go ├── noThrottle └── main.go └── throttle ├── go.mod ├── go.sum ├── main.go └── throttle.go /closer/closer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | ) 7 | 8 | // Taken from https://github.com/dgraph-io/ristretto/blob/67fef616c676b6848c3fd026d16b8f7d7ef6ae87/z/z.go#L65-L134 9 | 10 | // Closer holds the two things we need to close a goroutine and wait for it to 11 | // finish: a chan to tell the goroutine to shut down, and a WaitGroup with 12 | // which to wait for it to finish shutting down. 13 | // 14 | // How to use Closer? 15 | // -> Create a new closer. 16 | // -> Pass the closer to the goroutine. 17 | // -> Inside the goroutine, check if the closer has been closed by HasBeenClosed() function. 18 | // -> If closed, mark closer as done using closer.Done() and return from the goroutine. 19 | // -> To stop the goroutine, use the closer.Signal() method. 20 | // -> To stop and wait for the goroutine to finish, call closer.SignalAndWait() method. 21 | type Closer struct { 22 | waiting sync.WaitGroup 23 | 24 | ctx context.Context 25 | cancel context.CancelFunc 26 | } 27 | 28 | // NewCloser constructs a new Closer, with an initial count on the WaitGroup. 29 | func NewCloser(initial int) *Closer { 30 | ret := &Closer{} 31 | ret.ctx, ret.cancel = context.WithCancel(context.Background()) 32 | ret.waiting.Add(initial) 33 | return ret 34 | } 35 | 36 | // AddRunning Add()'s delta to the WaitGroup. 37 | func (lc *Closer) AddRunning(delta int) { 38 | lc.waiting.Add(delta) 39 | } 40 | 41 | // Ctx can be used to get a context, which would automatically get cancelled when Signal is called. 42 | func (lc *Closer) Ctx() context.Context { 43 | return lc.ctx 44 | } 45 | 46 | // Signal signals the HasBeenClosed signal. 47 | func (lc *Closer) Signal() { 48 | lc.cancel() 49 | } 50 | 51 | // HasBeenClosed gets signaled when Signal() is called. 52 | func (lc *Closer) HasBeenClosed() <-chan struct{} { 53 | return lc.ctx.Done() 54 | } 55 | 56 | // Done calls Done() on the WaitGroup. 57 | func (lc *Closer) Done() { 58 | lc.waiting.Done() 59 | } 60 | 61 | // Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done 62 | // calls to balance out.) 63 | func (lc *Closer) Wait() { 64 | lc.waiting.Wait() 65 | } 66 | 67 | // SignalAndWait calls Signal(), then Wait(). 68 | func (lc *Closer) SignalAndWait() { 69 | lc.Signal() 70 | lc.Wait() 71 | } 72 | -------------------------------------------------------------------------------- /closer/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/signal" 7 | "syscall" 8 | "time" 9 | ) 10 | 11 | func doWork(cl *Closer) { 12 | defer cl.Done() 13 | 14 | i := 0 15 | for { 16 | select { 17 | case <-cl.HasBeenClosed(): 18 | // Close active connection. 19 | // Close open file descriptors. 20 | fmt.Println("Exiting doWork. Bye Bye!") 21 | return 22 | default: 23 | // Simulate some work. 24 | time.Sleep(time.Millisecond * 500) 25 | fmt.Printf("Processed Items = %+v\n", i) 26 | i++ 27 | } 28 | } 29 | } 30 | 31 | func main() { 32 | cl := NewCloser(1) 33 | 34 | go doWork(cl) 35 | 36 | // Gracefully exit on CTRL+C. 37 | sigCh := make(chan os.Signal) 38 | signal.Notify(sigCh, os.Interrupt, syscall.SIGINT) 39 | go func() { 40 | <-sigCh 41 | // Signal the goroutine to stop. 42 | cl.Signal() 43 | }() 44 | 45 | // Wait for the goroutines to finish. 46 | cl.Wait() 47 | } 48 | -------------------------------------------------------------------------------- /dirsize/concurrent/main.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Alan A. A. Donovan & Brian W. Kernighan. 2 | // License: https://creativecommons.org/licenses/by-nc-sa/4.0/ 3 | 4 | // See page 250. 5 | 6 | // This command computes the disk usage of the files in a directory. 7 | package main 8 | 9 | // This variant traverses all directories in parallel. 10 | // It uses a concurrency-limiting counting semaphore 11 | // to avoid opening too many files at once. 12 | 13 | import ( 14 | "flag" 15 | "fmt" 16 | "io/ioutil" 17 | "os" 18 | "path/filepath" 19 | "sync" 20 | "time" 21 | ) 22 | 23 | //!+ 24 | func main() { 25 | flag.Parse() 26 | // ...determine roots... 27 | 28 | // Determine the initial directories. 29 | roots := flag.Args() 30 | if len(roots) == 0 { 31 | roots = []string{"."} 32 | } 33 | 34 | now := time.Now() 35 | 36 | //!+ 37 | // Traverse each root of the file tree in parallel. 38 | fileSizes := make(chan int64) 39 | var n sync.WaitGroup 40 | for _, root := range roots { 41 | n.Add(1) 42 | go walkDir(root, &n, fileSizes) 43 | } 44 | go func() { 45 | n.Wait() 46 | close(fileSizes) 47 | }() 48 | //!- 49 | 50 | var nfiles, nbytes int64 51 | for size := range fileSizes { 52 | nfiles++ 53 | nbytes += size 54 | } 55 | 56 | fmt.Println("Total time taken: ", time.Since(now)) 57 | printDiskUsage(nfiles, nbytes) // final totals 58 | //!+ 59 | } 60 | 61 | //!- 62 | 63 | func printDiskUsage(nfiles, nbytes int64) { 64 | fmt.Printf("%d files %.1f GB\n", nfiles, float64(nbytes)/1e9) 65 | } 66 | 67 | // walkDir recursively walks the file tree rooted at dir 68 | // and sends the size of each found file on fileSizes. 69 | //!+walkDir 70 | func walkDir(dir string, n *sync.WaitGroup, fileSizes chan<- int64) { 71 | time.Sleep(100 * time.Millisecond) 72 | defer n.Done() 73 | for _, entry := range dirents(dir) { 74 | if entry.IsDir() { 75 | n.Add(1) 76 | subdir := filepath.Join(dir, entry.Name()) 77 | go walkDir(subdir, n, fileSizes) 78 | } else { 79 | fileSizes <- entry.Size() 80 | } 81 | } 82 | } 83 | 84 | //!-walkDir 85 | 86 | //!+sema 87 | // sema is a counting semaphore for limiting concurrency in dirents. 88 | var sema = make(chan struct{}, 20) 89 | 90 | // dirents returns the entries of directory dir. 91 | func dirents(dir string) []os.FileInfo { 92 | sema <- struct{}{} // acquire token 93 | defer func() { <-sema }() // release token 94 | // ... 95 | //!-sema 96 | 97 | entries, err := ioutil.ReadDir(dir) 98 | if err != nil { 99 | fmt.Fprintf(os.Stderr, "du: %v\n", err) 100 | return nil 101 | } 102 | return entries 103 | } 104 | -------------------------------------------------------------------------------- /dirsize/single/main.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Alan A. A. Donovan & Brian W. Kernighan. 2 | // License: https://creativecommons.org/licenses/by-nc-sa/4.0/ 3 | 4 | // See page 247. 5 | 6 | //!+main 7 | 8 | // The command computes the disk usage of the files in a directory. 9 | package main 10 | 11 | import ( 12 | "flag" 13 | "fmt" 14 | "io/ioutil" 15 | "os" 16 | "path/filepath" 17 | "time" 18 | ) 19 | 20 | func main() { 21 | flag.Parse() 22 | // Determine the initial directories. 23 | roots := flag.Args() 24 | if len(roots) == 0 { 25 | roots = []string{"."} 26 | } 27 | 28 | now := time.Now() 29 | 30 | var nfiles, nbytes int64 31 | for _, root := range roots { 32 | nf, nb := walkDir(root) 33 | nfiles += nf 34 | nbytes += nb 35 | } 36 | 37 | fmt.Println("Total time taken: ", time.Since(now)) 38 | printDiskUsage(nfiles, nbytes) 39 | } 40 | 41 | func printDiskUsage(nfiles, nbytes int64) { 42 | fmt.Printf("%d files %.1f GB\n", nfiles, float64(nbytes)/1e9) 43 | } 44 | 45 | //!-main 46 | 47 | //!+walkDir 48 | 49 | // walkDir recursively walks the file tree rooted at dir 50 | // and returns the size of each found file and the number of files. 51 | func walkDir(dir string) (numFiles int64, size int64) { 52 | time.Sleep(100 * time.Millisecond) 53 | for _, entry := range dirents(dir) { 54 | if entry.IsDir() { 55 | subdir := filepath.Join(dir, entry.Name()) 56 | nf, fs := walkDir(subdir) 57 | numFiles += nf 58 | size += fs 59 | } else { 60 | numFiles++ 61 | size += entry.Size() 62 | } 63 | } 64 | return 65 | } 66 | 67 | // dirents returns the entries of directory dir. 68 | func dirents(dir string) []os.FileInfo { 69 | entries, err := ioutil.ReadDir(dir) 70 | if err != nil { 71 | fmt.Fprintf(os.Stderr, "du1: %v\n", err) 72 | return nil 73 | } 74 | return entries 75 | } 76 | -------------------------------------------------------------------------------- /noThrottle/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | "time" 7 | ) 8 | 9 | func doWork() { 10 | // Simulate some work. 11 | time.Sleep(time.Second) 12 | } 13 | 14 | func printStats() { 15 | for { 16 | time.Sleep(time.Millisecond * 500) 17 | fmt.Printf("Active number of goroutines = %+v\n", runtime.NumGoroutine()) 18 | } 19 | } 20 | 21 | func main() { 22 | go printStats() 23 | 24 | for i := 0; i < 100; i++ { 25 | fmt.Printf("Processing item number = %+v\n", i) 26 | go doWork() 27 | } 28 | 29 | time.Sleep(time.Second * 2) 30 | } 31 | -------------------------------------------------------------------------------- /throttle/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/jarifibrahim/workshop 2 | 3 | go 1.15 4 | -------------------------------------------------------------------------------- /throttle/go.sum: -------------------------------------------------------------------------------- 1 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 2 | github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= 3 | github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= 4 | github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= 5 | github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= 6 | github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= 7 | github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= 8 | github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= 9 | github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= 10 | github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= 11 | github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= 12 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 13 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 14 | github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= 15 | github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= 16 | github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= 17 | github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= 18 | github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= 19 | github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= 20 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 21 | github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= 22 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 23 | github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 24 | github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= 25 | github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= 26 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 27 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 28 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 29 | github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= 30 | github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= 31 | github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= 32 | github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= 33 | github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= 34 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 35 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 36 | github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= 37 | github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 38 | github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 39 | github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= 40 | github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= 41 | github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= 42 | github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= 43 | github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 44 | github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= 45 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 46 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 47 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 48 | github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= 49 | github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= 50 | golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 51 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 52 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= 53 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 54 | golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 55 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 56 | golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= 57 | golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 58 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 59 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 60 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 61 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 62 | -------------------------------------------------------------------------------- /throttle/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | "time" 7 | ) 8 | 9 | func doWork(th *Throttle) { 10 | // Mark this job as done. 11 | defer th.Done(nil) 12 | 13 | // Simulate some work. 14 | time.Sleep(time.Second) 15 | } 16 | 17 | func printStats() { 18 | for { 19 | time.Sleep(time.Millisecond * 500) 20 | fmt.Printf("Active number of goroutines = %+v\n", runtime.NumGoroutine()) 21 | } 22 | } 23 | 24 | func main() { 25 | go printStats() 26 | 27 | // Create a new throttle variable. 28 | th := NewThrottle(3) 29 | 30 | for i := 0; i < 100; i++ { 31 | // Gate keeper. Do not let more than 3 goroutines to start. 32 | th.Do() 33 | 34 | fmt.Printf("Processing item number = %+v\n", i) 35 | go doWork(th) 36 | } 37 | 38 | // Wait for all the jobs to finish. 39 | th.Finish() 40 | } 41 | -------------------------------------------------------------------------------- /throttle/throttle.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "sync" 4 | 5 | // Taken from https://github.com/dgraph-io/badger/blob/2d88aea98099cc021fe39a783629759c90d9e139/y/y.go#L195-L263 6 | 7 | // Throttle allows a limited number of workers to run at a time. It also 8 | // provides a mechanism to check for errors encountered by workers and wait for 9 | // them to finish. 10 | // 11 | // How to user throttle? 12 | // -> Call throttle.Do before starting the goroutine. 13 | // -> Call throttle.Done in the goroutine. 14 | // -> Call throttle.Finish to wait for all routines to finish. 15 | type Throttle struct { 16 | once sync.Once 17 | wg sync.WaitGroup 18 | ch chan struct{} 19 | errCh chan error 20 | finishErr error 21 | } 22 | 23 | // NewThrottle creates a new throttle with a max number of workers. 24 | func NewThrottle(max int) *Throttle { 25 | return &Throttle{ 26 | ch: make(chan struct{}, max), 27 | errCh: make(chan error, max), 28 | } 29 | } 30 | 31 | // Do should be called by workers before they start working. It blocks if there 32 | // are already maximum number of workers working. If it detects an error from 33 | // previously Done workers, it would return it. 34 | func (t *Throttle) Do() error { 35 | for { 36 | select { 37 | case t.ch <- struct{}{}: 38 | t.wg.Add(1) 39 | return nil 40 | case err := <-t.errCh: 41 | if err != nil { 42 | return err 43 | } 44 | } 45 | } 46 | } 47 | 48 | // Done should be called by workers when they finish working. They can also 49 | // pass the error status of work done. 50 | func (t *Throttle) Done(err error) { 51 | if err != nil { 52 | t.errCh <- err 53 | } 54 | select { 55 | case <-t.ch: 56 | default: 57 | panic("Throttle Do Done mismatch") 58 | } 59 | t.wg.Done() 60 | } 61 | 62 | // Finish waits until all workers have finished working. It would return any error passed by Done. 63 | // If Finish is called multiple time, it will wait for workers to finish only once(first time). 64 | // From next calls, it will return same error as found on first call. 65 | func (t *Throttle) Finish() error { 66 | t.once.Do(func() { 67 | t.wg.Wait() 68 | close(t.ch) 69 | close(t.errCh) 70 | for err := range t.errCh { 71 | if err != nil { 72 | t.finishErr = err 73 | return 74 | } 75 | } 76 | }) 77 | 78 | return t.finishErr 79 | } 80 | --------------------------------------------------------------------------------