├── .gitignore
├── README.md
├── patterns
├── bridgechannel
│ └── main.go
├── cancellation
│ └── main.go
├── confinement
│ ├── adhoc
│ │ └── main.go
│ └── lexical
│ │ ├── data
│ │ └── main.go
│ │ └── main.go
├── contextpackage
│ └── cancel
│ │ └── main.go
├── errorhandler
│ ├── main.go
│ └── returnerror
│ │ └── main.go
├── fanoutfanin
│ ├── main.go
│ └── samples
│ │ ├── 001
│ │ └── main.go
│ │ ├── 002
│ │ └── main.go
│ │ └── 003
│ │ └── main.go
├── heartbeats
│ └── main.go
├── orchannel
│ └── main.go
├── ordonechannel
│ └── main.go
├── pipelines
│ ├── channels
│ │ └── main.go
│ └── repeat
│ │ └── main.go
├── queuing
│ └── main.go
├── replicatedrequests
│ └── main.go
└── teechannel
│ └── main.go
└── sync
├── broadcast
└── main.go
├── cond
└── main.go
├── mutex
├── README.md
├── c
│ ├── Makefile
│ ├── README.md
│ ├── c11threads.h
│ ├── mutex.c
│ └── test.c
├── main.go
└── rust
│ ├── dataracefree
│ ├── Cargo.lock
│ ├── Cargo.toml
│ └── src
│ │ └── main.rs
│ └── mutex
│ ├── Cargo.lock
│ ├── Cargo.toml
│ └── src
│ └── main.rs
├── once
└── main.go
├── pool
├── main.go
├── network.go
└── network_test.go
└── rwmutex
└── main.go
/.gitignore:
--------------------------------------------------------------------------------
1 | *.o
2 | mutex.out
3 | target/
4 | .vscode
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Go Concurrency Guide
3 |
4 | This guide is built on top of the some examples of the book `Go Concurrency in Go` and `Go Programming Language`
5 |
6 | - [Race Condition and Data Race](#race-condition-and-data-race)
7 | - [Memory Access Synchronization](#memory-access-synchronization)
8 | - [Mutex](#mutex)
9 | - [WaitGroup](#waitgroup)
10 | - [RWMutex](#rwmutex)
11 | - [Cond](#cond)
12 | - [Pool](#pool)
13 | - [Deadlocks, Livelocks and Starvation](#deadlocks-livelocks-and-starvation)
14 | - [Deadlocks](#deadlocks)
15 | - [Livelocks](#livelocks)
16 | - [Starvation](#starvation)
17 | - [Channels](#channels)
18 | - [Patterns](#patterns)
19 | - [Confinement](#confinement)
20 | - [Cancellation](#cancellation)
21 | - [OR Channel](#or-channel)
22 | - [Error Handling](#error-handling)
23 | - [Pipelines](#pipelines)
24 | - [Fan-in and Fan-out](#fan-in-and-fan-out)
25 | - [Or done channel](#or-done-channel)
26 | - [Tee channel](#tee-channel)
27 | - [Bridge channel](#bridge-channel)
28 | - [Queuing](#queuing)
29 | - [Context package](#context-package)
30 | - [HeartBeats](#heartbeats)
31 | - [Replicated Requests](#replicated-requests)
32 | - [Scheduler Runtime](#scheduler-runtime)
33 | - [References](#references)
34 |
35 |
36 | ## Race Condition and Data Race
37 |
38 | Race condition occur when two or more operations must execute in the correct order, but the program has not been written so that this order is guaranteed to be maintained.
39 |
40 | Data race is when one concurrent operation attempts to read a variable while at some undetermined time another concurrent operation is attempting to write to the same variable. The main func is the main goroutine.
41 |
42 | ```go
43 | func main() {
44 | var data int
45 | go func() {
46 | data++
47 | }()
48 |
49 | if data == 0 {
50 | fmt.Printf("the value is %d", data)
51 | }
52 | }
53 | ```
54 |
55 |
56 | ## Memory Access Synchronization
57 |
58 | The sync package contains the concurrency primitives that are most useful for low-level memory access synchronization.
59 | Critical section is the place in your code that has access to a shared memory
60 |
61 |
62 | ### Mutex
63 |
64 | Mutex stands for “mutual exclusion” and is a way to protect critical sections of your program.
65 |
66 | ```go
67 | type Counter struct {
68 | mu sync.Mutex
69 | value int
70 | }
71 |
72 | func (c *Counter) Increment() {
73 | c.mu.Lock()
74 | defer c.mu.Unlock()
75 | c.value++
76 | }
77 | ```
78 |
79 |
80 | ### WaitGroup
81 |
82 | Call to add a group of goroutines
83 |
84 | ```go
85 | var wg sync.WaitGroup
86 | for _, salutation := range []string{"hello", "greetings", "good day"} {
87 | wg.Add(1)
88 | go func(salutation string) {
89 | defer wg.Done()
90 | fmt.Println(salutation)
91 | }(salutation)
92 | }
93 | wg.Wait()
94 | ```
95 |
96 |
97 | ### RWMutex
98 |
99 | More fine-grained memory control, being possible to request read-only lock
100 |
101 | ```go
102 | producer := func(wg *sync.WaitGroup, l sync.Locker) {
103 | defer wg.Done()
104 | for i := 5; i > 0; i-- {
105 | l.Lock()
106 | l.Unlock()
107 | time.Sleep(1)
108 | }
109 | }
110 |
111 | observer := func(wg *sync.WaitGroup, l sync.Locker) {
112 | defer wg.Done()
113 | l.Lock()
114 | defer l.Unlock()
115 | }
116 |
117 | test := func(count int, mutex, rwMutex sync.Locker) time.Duration {
118 | var wg sync.WaitGroup
119 | wg.Add(count+1)
120 | beginTestTime := time.Now()
121 | go producer(&wg, mutex)
122 | for i := count; i > 0; i-- {
123 | go observer(&wg, rwMutex)
124 | }
125 |
126 | wg.Wait()
127 | return time.Since(beginTestTime)
128 | }
129 |
130 | tw := tabwriter.NewWriter(os.Stdout, 0, 1, 2, ' ', 0)
131 | defer tw.Flush()
132 |
133 | var m sync.RWMutex
134 | fmt.Fprintf(tw, "Readers\tRWMutex\tMutex\n")
135 |
136 | for i := 0; i < 20; i++ {
137 | count := int(math.Pow(2, float64(i)))
138 | fmt.Fprintf(
139 | tw,
140 | "%d\t%v\t%v\n",
141 | count,
142 | test(count, &m, m.RLocker()),
143 | test(count, &m, &m),
144 | )
145 | }
146 | ```
147 |
148 |
149 | ### Cond
150 |
151 | It would be better if there were some kind of way for a goroutine to efficiently sleep until it was signaled to wake and check its condition. This is exactly what the Cond type does for us.
152 |
153 | The Cond and the Broadcast is the method that provides for notifying goroutines blocked on Wait call that the condition has been triggered.
154 |
155 | ```go
156 | type Button struct {
157 | Clicked *sync.Cond
158 | }
159 |
160 | func main() {
161 | button := Button{
162 | Clicked: sync.NewCond(&sync.Mutex{}),
163 | }
164 |
165 | // running on goroutine every function that passed/registered
166 | // and wait, not exit until that goroutine is confirmed to be running
167 | subscribe := func(c *sync.Cond, param string, fn func(s string)) {
168 | var goroutineRunning sync.WaitGroup
169 | goroutineRunning.Add(1)
170 |
171 | go func(p string) {
172 | goroutineRunning.Done()
173 | c.L.Lock() // critical section
174 | defer c.L.Unlock()
175 |
176 | fmt.Println("Registered and wait ... ")
177 | c.Wait()
178 |
179 | fn(p)
180 | }(param)
181 |
182 | goroutineRunning.Wait()
183 | }
184 |
185 | var clickRegistered sync.WaitGroup
186 |
187 | for _, v := range []string{
188 | "Maximizing window.",
189 | "Displaying annoying dialog box!",
190 | "Mouse clicked."} {
191 |
192 | clickRegistered.Add(1)
193 |
194 | subscribe(button.Clicked, v, func(s string) {
195 | fmt.Println(s)
196 | clickRegistered.Done()
197 | })
198 | }
199 |
200 | button.Clicked.Broadcast()
201 |
202 | clickRegistered.Wait()
203 | }
204 | ```
205 | [cond samples](https://github.com/luk4z7/go-concurrency-guide/blob/main/sync/cond/main.go)
206 |
207 |
208 | ### Once
209 |
210 | Ensuring that only one execution will be carried out even among several goroutines
211 |
212 | ```go
213 | var count int
214 |
215 | increment := func() {
216 | count++
217 | }
218 |
219 | var once sync.Once
220 |
221 | var increments sync.WaitGroup
222 | increments.Add(100)
223 |
224 | for i := 0; i < 100; i++ {
225 | go func() {
226 | defer increments.Done()
227 | once.Do(increment)
228 | }()
229 | }
230 |
231 | increments.Wait()
232 | fmt.Printf("Count is %d\n", count)
233 | ```
234 |
235 |
236 | ### Pool
237 |
238 | Manager the pool of connections, a quantity
239 |
240 | ```go
241 | package main
242 |
243 | import (
244 | "fmt"
245 | "sync"
246 | )
247 |
248 | func main() {
249 | myPool := &sync.Pool{
250 | New: func() interface{} {
251 | fmt.Println("Creating new instance.")
252 |
253 | return struct{}{}
254 | },
255 | }
256 |
257 | // Get call New function defined in pool if there is no instance started
258 | myPool.Get()
259 | instance := myPool.Get()
260 | fmt.Println("instance", instance)
261 |
262 | // here we put a previously retrieved instance back into the pool,
263 | // this increases the number of instances available to one
264 | myPool.Put(instance)
265 |
266 | // when this call is executed, we will reuse the
267 | // previously allocated instance and put it back in the pool
268 | myPool.Get()
269 |
270 | var numCalcsCreated int
271 | calcPool := &sync.Pool{
272 | New: func() interface{} {
273 | fmt.Println("new calc pool")
274 |
275 | numCalcsCreated += 1
276 | mem := make([]byte, 1024)
277 |
278 | return &mem
279 | },
280 | }
281 |
282 | fmt.Println("calcPool.New", calcPool.New())
283 |
284 | calcPool.Put(calcPool.New())
285 | calcPool.Put(calcPool.New())
286 | calcPool.Put(calcPool.New())
287 | calcPool.Put(calcPool.New())
288 |
289 | calcPool.Get()
290 |
291 | const numWorkers = 1024 * 1024
292 | var wg sync.WaitGroup
293 | wg.Add(numWorkers)
294 |
295 | for i := numWorkers; i > 0; i-- {
296 | go func() {
297 | defer wg.Done()
298 |
299 | mem := calcPool.Get().(*[]byte)
300 | defer calcPool.Put(mem)
301 |
302 | // Assume something interesting, but quick is being done with
303 | // this memory.
304 | }()
305 | }
306 |
307 | wg.Wait()
308 | fmt.Printf("%d calculators were created.", numCalcsCreated)
309 | }
310 | ```
311 | [sync samples](https://github.com/luk4z7/go-concurrency-guide/tree/main/sync)
312 |
313 |
314 |
315 | ## Deadlocks, Livelocks, and Starvation
316 |
317 | ### Deadlocks
318 |
319 | Deadlocks is a program is one in which all concurrent processes are waiting on one another.
320 |
321 | ```go
322 | package main
323 |
324 | import (
325 | "fmt"
326 | "sync"
327 | "time"
328 | )
329 |
330 | type value struct {
331 | mu sync.Mutex
332 | value int
333 | }
334 |
335 | func main() {
336 | var wg sync.WaitGroup
337 | printSum := func(v1, v2 *value) {
338 | defer wg.Done()
339 | v1.mu.Lock()
340 | defer v1.mu.Unlock()
341 |
342 | // deadlock
343 | time.Sleep(2 * time.Second)
344 | v2.mu.Lock()
345 | defer v2.mu.Unlock()
346 |
347 | fmt.Printf("sum=%v\n", v1.value+v2.value)
348 | }
349 |
350 | var a, b value
351 | wg.Add(2)
352 | go printSum(&a, &b)
353 | go printSum(&b, &a)
354 |
355 | wg.Wait()
356 | }
357 | ```
358 |
359 | ```go
360 | package main
361 |
362 | func main() {
363 | message := make(chan string)
364 |
365 | // A goroutine ( main goroutine ) trying to send message to channel
366 | message <- "message" // fatal error: all goroutines are asleep - deadlock!
367 | }
368 | ```
369 |
370 | ```go
371 | package main
372 |
373 | func main() {
374 | message := make(chan string)
375 |
376 | // A goroutine ( main goroutine ) trying to receive message from channel
377 | <-message // fatal error: all goroutines are asleep - deadlock!
378 | }
379 | ```
380 |
381 |
382 | ### Livelocks
383 |
384 | Livelocks are programs that are actively performing concurrent operations, but these operations do nothing to move the state of the program forward.
385 |
386 | ```go
387 | package main
388 |
389 | import (
390 | "bytes"
391 | "fmt"
392 | "sync"
393 | "sync/atomic"
394 | "time"
395 | )
396 |
397 | func main() {
398 | cadence := sync.NewCond(&sync.Mutex{})
399 | go func() {
400 | for range time.Tick(1 * time.Millisecond) {
401 | cadence.Broadcast()
402 | }
403 | }()
404 |
405 | takeStep := func() {
406 | cadence.L.Lock()
407 | cadence.Wait()
408 | cadence.L.Unlock()
409 | }
410 |
411 | tryDir := func(dirName string, dir *int32, out *bytes.Buffer) bool {
412 | fmt.Fprintf(out, " %v", dirName)
413 | atomic.AddInt32(dir, 1)
414 | takeStep()
415 |
416 | if atomic.LoadInt32(dir) == 1 {
417 | fmt.Fprint(out, " . Success!")
418 |
419 | return true
420 | }
421 |
422 | takeStep()
423 | atomic.AddInt32(dir, -1)
424 |
425 | return false
426 | }
427 |
428 | var left, right int32
429 | tryLeft := func(out *bytes.Buffer) bool {
430 | return tryDir("left", &left, out)
431 | }
432 |
433 | tryRight := func(out *bytes.Buffer) bool {
434 | return tryDir("right", &right, out)
435 | }
436 |
437 | walk := func(walking *sync.WaitGroup, name string) {
438 | var out bytes.Buffer
439 | defer func() {
440 | fmt.Println(out.String())
441 | }()
442 | defer walking.Done()
443 |
444 | fmt.Fprintf(&out, "%v is trying to scoot:", name)
445 | for i := 0; i < 5; i++ {
446 | if tryLeft(&out) || tryRight(&out) {
447 | return
448 | }
449 | }
450 |
451 | fmt.Fprintf(&out, "\n%v tosses her hands up in exasperation", name)
452 | }
453 |
454 | var peopleInHallway sync.WaitGroup
455 | peopleInHallway.Add(2)
456 |
457 | go walk(&peopleInHallway, "Alice")
458 | go walk(&peopleInHallway, "Barbara")
459 | peopleInHallway.Wait()
460 | }
461 | ```
462 |
463 |
464 | ### Starvation
465 |
466 | Starvation is any situation where a concurrent process cannot get all the resources it needs to perform work.
467 |
468 | ```go
469 | package main
470 |
471 | import (
472 | "fmt"
473 | "sync"
474 | "time"
475 | )
476 |
477 | func main() {
478 | fmt.Println("vim-go")
479 |
480 | var wg sync.WaitGroup
481 | var sharedLock sync.Mutex
482 | const runtime = 1 * time.Second
483 |
484 | greedyWorker := func() {
485 | defer wg.Done()
486 |
487 | var count int
488 | for begin := time.Now(); time.Since(begin) <= runtime; {
489 | sharedLock.Lock()
490 | time.Sleep(3 * time.Nanosecond)
491 | sharedLock.Unlock()
492 | count++
493 | }
494 |
495 | fmt.Printf("Greedy worker was able to execute %v work loops\n", count)
496 | }
497 |
498 | politeWorker := func() {
499 | defer wg.Done()
500 |
501 | var count int
502 | for begin := time.Now(); time.Since(begin) <= runtime; {
503 | sharedLock.Lock()
504 | time.Sleep(1 * time.Nanosecond)
505 | sharedLock.Unlock()
506 |
507 | sharedLock.Lock()
508 | time.Sleep(1 * time.Nanosecond)
509 | sharedLock.Unlock()
510 |
511 | sharedLock.Lock()
512 | time.Sleep(1 * time.Nanosecond)
513 | sharedLock.Unlock()
514 |
515 | count++
516 | }
517 |
518 | fmt.Printf("Polite worker was able to execute %v work loops \n", count)
519 | }
520 |
521 | wg.Add(2)
522 | go greedyWorker()
523 | go politeWorker()
524 | wg.Wait()
525 | }
526 | ```
527 |
528 |
529 | ## Channels
530 |
531 | Channels are one of the synchronization primitives in Go derived from Hoare’s CSP. While they can be used to synchronize access of the memory, they are best used to communicate information between goroutines, default value for channel: `nil`.
532 |
533 | To declare a channel to read and send
534 | ```go
535 | stream := make(chan interface{})
536 | ```
537 |
538 | To declare unidirectional channel that only can read
539 | ```go
540 | stream := make(<-chan interface{})
541 | ```
542 |
543 | To declare unidirectional channel that only can send
544 | ```go
545 | stream := make(chan<- interface{})
546 | ```
547 |
548 | is not often see the instantiates channels unidirectional, only in parameters in functions, is common because Go convert them implicity
549 | ```go
550 | var receiveChan <-chan interface{}
551 | var sendChan chan<- interface{}
552 | dataStream := make(chan interface{})
553 |
554 | // Valid statements:
555 | receiveChan = dataStream
556 | sendChan = dataStream
557 | ```
558 |
559 | To receive
560 | ```go
561 | <-stream
562 | ```
563 |
564 | to send
565 | ```go
566 | stream <- "Hello world"
567 | ```
568 |
569 | Ranging over a channel
570 | the for range break the loop if the channel is closed
571 |
572 | ```go
573 | intStream := make(chan int)
574 | go func() {
575 | defer close(intStream)
576 | for i := 1; i <= 5; i++ {
577 | intStream <- i
578 | }
579 | }()
580 |
581 | for integer := range intStream {
582 | fmt.Printf("%v ", integer)
583 | }
584 | ```
585 |
586 | **unbuffered channel**
587 | A send operation on an unbuffered channel blocks the sending goroutine, until another goroutine performs a corresponding receive on the same channel; at that point, the value is passed, and both goroutines can continue. On the other hand, if a receive operation is attempted beforehand, the receiving goroutine is blocked until another goroutine performs a send on the same channel. Communication over an unbuffered channel makes the sending and receiving goroutines synchronize. Because of this, unbuffered channels are sometimes called synchronous channels. When a value is sent over an unbuffered channel, the reception of the value takes place before the sending goroutine wakes up again. In discussions of concurrency, when we say that x occurs before y, we do not simply mean that x occurs before y in time; we mean that this is guaranteed and that all your previous effects like updates to variables will complete and you can count on them. When x does not occur before y or after y, we say that x is concurrent with y. This is not to say that x and y are necessarily simultaneous; it just means that we can't assume anything about your order
588 |
589 | **buffered channel**
590 | both, read and write of a channel full or empty it will block, on the buffered channel
591 |
592 | ```go
593 | var dataStream chan interface{}
594 | dataStream = make(chan interface{}, 4)
595 | ```
596 |
597 | both, read and send a channel empty cause deadlock
598 |
599 | ```go
600 | var dataStream chan interface{}
601 | <-dataStream // This panics with: fatal error: all goroutines are asleep - deadlock!
602 | ```
603 | ```
604 | goroutine 1 [chan receive (nil chan)]:
605 | main.main()
606 | /tmp/babel-23079IVB/go-src-23079O4q.go:9 +0x3f
607 | exit status 2
608 | ```
609 |
610 | ```go
611 | var dataStream chan interface{}
612 | dataStream <- struct{}{} // This produces: fatal error: all goroutines are asleep - deadlock!
613 | ```
614 | ```
615 | goroutine 1 [chan send (nil chan)]:
616 | main.main()
617 | /tmp/babel-23079IVB/go-src-23079dnD.go:9 +0x77
618 | exit status 2
619 | ```
620 |
621 | and a close channel cause a panic
622 |
623 | ```go
624 | var dataStream chan interface{}
625 | close(dataStream) // This produces: panic: close of nil channel
626 | ```
627 | ```
628 | goroutine 1 [running]:
629 | panic(0x45b0c0, 0xc42000a160)
630 | /usr/local/lib/go/src/runtime/panic.go:500 +0x1a1
631 | main.main()
632 | /tmp/babel-23079IVB/go-src-230794uu.go:9 +0x2a
633 | exit status 2 Yipes! This is probably
634 | ```
635 |
636 | Table with result of channel operations
637 |
638 | Operation | Channel State | Result
639 | ----------|--------------------|-------------
640 | Read | nil | Block
641 | _ | Open and Not Empty | Value
642 | _ | Open and Empty | Block
643 | _ | Close | default value, false
644 | _ | Write Only | Compilation Error
645 | Write | nil | Block
646 | _ | Open and Full | Block
647 | _ | Open and Not Full | Write Value
648 | _ | Closed | panic
649 | _ | Receive Only | Compilation Error
650 | Close | nil | panic
651 | _ | Open and Not Empty | Closes Channel; reads succeed until channel is drained, then reads produce default value
652 | _ | Open and Empty | Closes Channel; reads produces default value
653 | _ | Closed | panic
654 |
655 |
656 | `TIP: Cannot close a receive-only channel`
657 |
658 | * Let's start with channel owners. The goroutine that has a channel must:
659 | * 1 - Instantiate the channel.
660 | * 2 - Perform writes, or pass ownership to another goroutine.
661 | * 3 - Close the channel.
662 | * 4 - Encapsulate the previous three things in this list and expose them via a reader channel.
663 |
664 | * When assigning channel owners responsibilities, a few things happen:
665 | * 1 - Because we’re the one initializing the channel, we remove the risk of deadlocking by writing to a nil channel.
666 | * 2 - Because we’re the one initializing the channel, we remove the risk of panicing by closing a nil channel.
667 | * 3 - Because we’re the one who decides when the channel gets closed, we remove the risk of panicing by writing to a closed channel.
668 | * 4 - Because we’re the one who decides when the channel gets closed, we remove the risk of panicing by closing a channel more than once.
669 | * 5 - We wield the type checker at compile time to prevent improper writes to our channel.
670 |
671 | ```go
672 | chanOwner := func() <-chan int {
673 | resultStream := make(chan int, 5)
674 | go func() {
675 | defer close(resultStream)
676 | for i := 0; i <= 5; i++ {
677 | resultStream <- i
678 | }
679 | }()
680 | return resultStream
681 | }
682 |
683 | resultStream := chanOwner()
684 | for result := range resultStream {
685 | fmt.Printf("Received: %d\n", result)
686 | }
687 |
688 | fmt.Println("Done receiving!")
689 | ```
690 |
691 | The creation of channel owners explicitly tends to have greater control of when that channel should be closed and its operation, avoiding the delegation of these functions to other methods/functions of the system, avoiding reading closed channels or sending data to the same already finalized
692 |
693 |
694 | **select**
695 |
696 | the select cases do not work the same as the switch, which is sequential, and the execution will not automatically fall if none of the criteria is met.
697 |
698 | ```go
699 | var c1, c2 <-chan interface{}
700 | var c3 chan<- interface{}
701 | select {
702 | case <- c1:
703 | // Do something
704 | case <- c2:
705 | // Do something
706 | case c3<- struct{}{}:
707 |
708 | }
709 | ```
710 |
711 | Instead, all channel reads and writes are considered simultaneously to see if any of them are ready: channels filled or closed in the case of reads and channels not at capacity in the case of writes. If none of the channels are ready, the entire select command is blocked. Then, when one of the channels is ready, the operation will proceed and its corresponding instructions will be executed.
712 |
713 | ```go
714 | start := time.Now()
715 | c := make(chan interface{})
716 | go func() {
717 | time.Sleep(5*time.Second)
718 | close(c)
719 | }()
720 |
721 | fmt.Println("Blocking on read...")
722 | select {
723 | case <-c:
724 | fmt.Printf("Unblocked %v later.\n", time.Since(start))
725 | }
726 | ```
727 |
728 | questions when work with select and channels
729 |
730 | 1 - What happens when multiple channels have something to read?
731 |
732 | ```go
733 | c1 := make(chan interface{}); close(c1)
734 | c2 := make(chan interface{}); close(c2)
735 |
736 | var c1Count, c2Count int
737 | for i := 1000; i >= 0; i-- {
738 | select {
739 | case <-c1:
740 | c1Count++
741 | case <-c2:
742 | c2Count++
743 | }
744 | }
745 |
746 | fmt.Printf("c1Count: %d\nc2Count: %d\n", c1Count, c2Count)
747 | ```
748 |
749 | This produces:
750 | c1Count: 505
751 | c2Count: 496
752 |
753 | half is read by c1 half by c2 by the Go runtime, cannot exactly predict how much each will be read, and will not be exactly the same for both, it can happen but cannot be predicted, the runtime knows nothing about the intent to own 2 channels receiving information or closed as in our example, then the runtime includes a pseudo-random
754 | Go runtime will perform a pseudo-random uniform selection over the select case statement set. This just means that from your set of cases, each one has the same chance of being selected as all the others.
755 |
756 | A good way to do this is to introduce a random variable into your equation - in this case, which channel to select from. By weighing the chance that each channel is used equally, all Go programs that use the select statement will perform well in the average case.
757 |
758 |
759 | 2 - What if there are never any channels that become ready?
760 |
761 | ```go
762 | var c <-chan int
763 | select {
764 | case <-c:
765 | case <-time.After(1 * time.Second):
766 | fmt.Println("Timed out.")
767 | }
768 |
769 | ```
770 |
771 | To solve the problem of the channels being blocked, the default can be used to perform some other operation, or in the first example
772 | a time out with time.After
773 |
774 | 3 - What if we want to do something but no channels are currently ready? use `default`
775 |
776 | ```go
777 | start := time.Now()
778 | var c1, c2 <-chan int
779 | select {
780 | case <-c1:
781 | case <-c2:
782 | default:
783 | fmt.Printf("In default after %v\n\n", time.Since(start))
784 | }
785 | ```
786 |
787 | exit a select block
788 |
789 | ```go
790 | done := make(chan interface{})
791 | go func() {
792 | time.Sleep(5*time.Second)
793 | close(done)
794 | }()
795 |
796 | workCounter := 0
797 | loop:
798 | for {
799 | select {
800 | case <-done:
801 | break loop
802 | default:
803 | }
804 |
805 | // Simulate work
806 | workCounter++
807 | time.Sleep(1*time.Second)
808 | }
809 |
810 | fmt.Printf("Achieved %v cycles of work before signalled to stop.\n", workCounter)
811 | ```
812 |
813 | block forever
814 |
815 | ```go
816 | select {}
817 | ```
818 |
819 | **GOMAXPROCS**
820 | Prior to Go 1.5, GOMAXPROCS was always set to one, and usually you’d find this snippet in most Go programs:
821 |
822 | ```go
823 | runtime.GOMAXPROCS(runtime.NumCPU())
824 | ```
825 |
826 | This function controls the number of operating system threads that will host so-called “Work Queues.”
827 | [documentation](https://pkg.go.dev/runtime#GOMAXPROCS)
828 |
829 |
830 | [Use a sync.Mutex or a channel?](https://github.com/golang/go/wiki/MutexOrChannel)
831 |
832 | As a general guide, though:
833 |
834 | Channel | Mutex
835 | --------|-------
836 | passing ownership of data,
distributing units of work,
communicating async results | caches,
state
837 |
838 |
839 | _"Do not communicate by sharing memory; instead, share memory by communicating. (copies)"_
840 |
841 |
842 |
843 | ## Patterns
844 |
845 | ### Confinement
846 |
847 | Confinement is the simple yet powerful idea of ensuring information is only ever available from one concurrent process.
848 | There are two kinds of confinement possible: ad hoc and lexical.
849 |
850 | Ad hoc confinement is when you achieve confinement through a convention
851 |
852 | ```go
853 | data := make([]int, 4)
854 |
855 | loopData := func(handleData chan<- int) {
856 | defer close(handleData)
857 | for i := range data {
858 | handleData <- data[i]
859 | }
860 | }
861 |
862 | handleData := make(chan int)
863 | go loopData(handleData)
864 |
865 | for num := range handleData {
866 | fmt.Println(num)
867 | }
868 | ```
869 |
870 | Lexical confinement involves using lexical scope to expose only the correct data and concurrency primitives for multiple concurrent processes to use.
871 |
872 | ```go
873 | chanOwner := func() <-chan int {
874 | results := make(chan int, 5)
875 | go func() {
876 | defer close(results)
877 |
878 | for i := 0; i <= 5; i++ {
879 | results <- i
880 | }
881 | }()
882 | return results
883 | }
884 |
885 | consumer := func(results <-chan int) {
886 | for result := range results {
887 | fmt.Printf("Received: %d\n", result)
888 | }
889 | fmt.Println("Done receiving!")
890 | }
891 |
892 | results := chanOwner()
893 | consumer(results)
894 | ```
895 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/confinement)
896 |
897 |
898 | ### Cancellation
899 |
900 | ```go
901 | package main
902 |
903 | func main() {
904 | doWork := func(
905 | done <-chan interface{},
906 | strings <-chan string,
907 | ) <-chan interface{} {
908 | terminated := make(chan interface{})
909 | go func() {
910 | defer fmt.Println("doWork exited.")
911 | defer close(terminated)
912 | for {
913 |
914 | select {
915 | case s := <-strings:
916 | // Do something interesting
917 | fmt.Println(s)
918 | case <-done:
919 | return
920 | }
921 | }
922 | }()
923 | return terminated
924 | }
925 |
926 | done := make(chan interface{})
927 | terminated := doWork(done, nil)
928 |
929 | go func() {
930 | // Cancel the operation after 1 second.
931 | time.Sleep(1 * time.Second)
932 | fmt.Println("Canceling doWork goroutine...")
933 | close(done)
934 | }()
935 |
936 | <-terminated
937 | fmt.Println("Done.")
938 | }
939 | ```
940 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/cancellation)
941 |
942 |
943 | ### OR Channel
944 |
945 | At times you may find yourself wanting to combine one or more done channels into a single done channel that closes if any of its component channels close.
946 |
947 | ```go
948 | package main
949 |
950 | import (
951 | "fmt"
952 | "time"
953 | )
954 |
955 | func main() {
956 | var or func(channels ...<-chan interface{}) <-chan interface{}
957 |
958 | or = func(channels ...<-chan interface{}) <-chan interface{} {
959 | switch len(channels) {
960 | case 0:
961 | return nil
962 | case 1:
963 | return channels[0]
964 | }
965 |
966 | orDone := make(chan interface{})
967 | go func() {
968 | defer close(orDone)
969 |
970 | switch len(channels) {
971 | case 2:
972 | select {
973 | case <-channels[0]:
974 | case <-channels[1]:
975 | }
976 | default:
977 | select {
978 | case <-channels[0]:
979 | case <-channels[1]:
980 | case <-channels[2]:
981 |
982 | case <-or(append(channels[3:], orDone)...):
983 | }
984 | }
985 | }()
986 |
987 | return orDone
988 | }
989 |
990 | sig := func(after time.Duration) <-chan interface{} {
991 | c := make(chan interface{})
992 | go func() {
993 | defer close(c)
994 | time.Sleep(after)
995 | }()
996 | return c
997 | }
998 |
999 | start := time.Now()
1000 | <-or(
1001 | sig(2*time.Hour),
1002 | sig(5*time.Minute),
1003 | sig(1*time.Second),
1004 | sig(1*time.Hour),
1005 | sig(1*time.Minute),
1006 | )
1007 |
1008 | fmt.Printf("done after %v", time.Since(start))
1009 | }
1010 | ```
1011 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/orchannel)
1012 |
1013 |
1014 | ### Error Handling
1015 |
1016 | ```go
1017 | package main
1018 |
1019 | import (
1020 | "fmt"
1021 | "net/http"
1022 | )
1023 |
1024 | type Result struct {
1025 | Error error
1026 | Response *http.Response
1027 | }
1028 |
1029 | func main() {
1030 | checkStatus := func(done <-chan interface{}, urls ...string) <-chan Result {
1031 | results := make(chan Result)
1032 | go func() {
1033 | defer close(results)
1034 |
1035 | for _, url := range urls {
1036 | var result Result
1037 | resp, err := http.Get(url)
1038 | result = Result{Error: err, Response: resp}
1039 |
1040 | select {
1041 | case <-done:
1042 | return
1043 | case results <- result:
1044 | }
1045 | }
1046 | }()
1047 |
1048 | return results
1049 | }
1050 |
1051 | done := make(chan interface{})
1052 | defer close(done)
1053 |
1054 | urls := []string{"https://www.google.com", "https://badhost"}
1055 | for result := range checkStatus(done, urls...) {
1056 | if result.Error != nil {
1057 | fmt.Printf("error: %v", result.Error)
1058 | continue
1059 | }
1060 |
1061 | fmt.Printf("Response: %v\n", result.Response.Status)
1062 | }
1063 | }
1064 | ```
1065 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/errorhandler)
1066 |
1067 |
1068 | ### Pipelines
1069 |
1070 | A pipeline is just another tool you can use to form an abstraction in your system.
1071 |
1072 | ```go
1073 | multiply := func(values []int, multiplier int) []int {
1074 | multipliedValues := make([]int, len(values))
1075 | for i, v := range values {
1076 | multipliedValues[i] = v * multiplier
1077 | }
1078 |
1079 | return multipliedValues
1080 | }
1081 |
1082 | add := func(values []int, additive int) []int {
1083 | addedValues := make([]int, len(values))
1084 | for i, v := range values {
1085 | addedValues[i] = v + additive
1086 | }
1087 |
1088 | return addedValues
1089 | }
1090 |
1091 | ints := []int{1, 2, 3, 4}
1092 | for _, v := range add(multiply(ints, 2), 1) {
1093 | fmt.Println(v)
1094 | }
1095 | ```
1096 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/pipelines)
1097 |
1098 |
1099 | ### Fan-in and Fan-out
1100 |
1101 | Fan-out is a term to describe the process of starting multiple goroutines to handle pipeline input, and fan-in is a term to describe the process of combining multiple outputs into one channel.
1102 |
1103 | ```go
1104 | package main
1105 |
1106 | import (
1107 | "fmt"
1108 | )
1109 |
1110 | type data int
1111 |
1112 | // distribute work items to multiple uniform actors
1113 | // no data shall be processed twice!
1114 | // received wch
1115 | // response res
1116 | func worker(wch <-chan data, res chan<- data) {
1117 | for {
1118 | w, ok := <-wch
1119 | if !ok {
1120 | return // return when is closed
1121 | }
1122 |
1123 | w *= 2
1124 | res <- w
1125 | }
1126 | }
1127 |
1128 | func main() {
1129 | work := []data{1, 2, 3, 4, 5}
1130 |
1131 | const numWorkers = 3
1132 |
1133 | wch := make(chan data, len(work))
1134 | res := make(chan data, len(work))
1135 |
1136 | // fan-out, one input channel for all actors
1137 | for i := 0; i < numWorkers; i++ {
1138 | go worker(wch, res)
1139 | }
1140 |
1141 | // fan-out, one input channel for all actors
1142 | for _, w := range work {
1143 | fmt.Println("send to wch : ", w)
1144 | wch <- w
1145 | }
1146 | close(wch)
1147 |
1148 | // fan-in, one result channel
1149 | for range work {
1150 | w := <-res
1151 | fmt.Println("receive from res : ", w)
1152 | }
1153 | }
1154 | ```
1155 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/fanoutfanin)
1156 |
1157 |
1158 | ### Or done channel
1159 |
1160 | Or done is a way to encapsulate verbosity that can be achieved through for/select breaks to check when a channel has ended, and also avoiding goroutine leakage, the code below could be replaced by a closure that encapsulates that verbosity
1161 |
1162 | ```go
1163 | for val := range myChan {
1164 | // Do something with val
1165 | }
1166 |
1167 | loop:
1168 | for {
1169 | select {
1170 | case <-done:
1171 | break loop
1172 | case maybeVal, ok := <-myChan:
1173 | if ok == false {
1174 | return // or maybe break from for
1175 | }
1176 | // Do something with val
1177 | }
1178 | }
1179 | ```
1180 |
1181 | can be created an isolation, a function/method, closure, creating a single goroutine
1182 |
1183 | ```go
1184 | orDone := func(done, c <-chan interface{}) <-chan interface{} {
1185 | valStream := make(chan interface{})
1186 | go func() {
1187 | defer close(valStream)
1188 | for {
1189 | select {
1190 | case <-done:
1191 | return
1192 | case v, ok := <-c:
1193 | if ok == false {
1194 | return
1195 | }
1196 | select {
1197 | case valStream <- v:
1198 | case <-done:
1199 | }
1200 | }
1201 | }
1202 | }()
1203 |
1204 | return valStream
1205 | }
1206 |
1207 | for val := range orDone(done, myChan) {
1208 | // Do something with val
1209 | }
1210 | ```
1211 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/ordonechannel)
1212 |
1213 |
1214 | ### Tee channel
1215 |
1216 | Pass the it a channel to read from, and it will return two separate channels that will get the same value:
1217 |
1218 | ```go
1219 | tee := func(done <-chan interface{}, in <-chan interface{}) (_, _ <-chan interface{}) {
1220 |
1221 | out1 := make(chan interface{})
1222 | out2 := make(chan interface{})
1223 |
1224 | go func() {
1225 | defer close(out1)
1226 | defer close(out2)
1227 | for val := range orDone(done, in) {
1228 | var out1, out2 = out1, out2
1229 | for i := 0; i < 2; i++ {
1230 | select {
1231 | case <-done:
1232 | case out1 <- val:
1233 | out1 = nil
1234 | case out2 <- val:
1235 | out2 = nil
1236 | }
1237 | }
1238 | }
1239 | }()
1240 |
1241 | return out1, out2
1242 | }
1243 | ```
1244 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/teechannel)
1245 |
1246 |
1247 | ### Bridge channel
1248 |
1249 | With this patterns is possible to create a function that destruct a channel of channels into a single channel
1250 |
1251 | ```go
1252 | bridge := func(done <-chan interface{}, chanStream <-chan <-chan interface{}) <-chan interface{} {
1253 | valStream := make(chan interface{})
1254 | go func() {
1255 | defer close(valStream)
1256 | for {
1257 | var stream <-chan interface{}
1258 | select {
1259 | case maybeStream, ok := <-chanStream:
1260 | if ok == false {
1261 | return
1262 | }
1263 | stream = maybeStream
1264 |
1265 | case <-done:
1266 | return
1267 | }
1268 |
1269 | for val := range orDone(done, stream) {
1270 | select {
1271 | case valStream <- val:
1272 | case <-done:
1273 | }
1274 | }
1275 | }
1276 | }()
1277 |
1278 | return valStream
1279 | }
1280 |
1281 | genVals := func() <-chan <-chan interface{} {
1282 | chanStream := make(chan (<-chan interface{}))
1283 | go func() {
1284 | defer close(chanStream)
1285 | for i := 0; i < 10; i++ {
1286 | stream := make(chan interface{}, 1)
1287 | stream <- i
1288 | close(stream)
1289 | chanStream <- stream
1290 | }
1291 | }()
1292 |
1293 | return chanStream
1294 | }
1295 |
1296 | done := make(chan interface{})
1297 | defer close(done)
1298 |
1299 | for v := range bridge(done, genVals()) {
1300 | fmt.Printf("%v ", v)
1301 | }
1302 | ```
1303 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/bridgechannel)
1304 |
1305 |
1306 | ### Queuing
1307 |
1308 | buffered channel is a type of queue, Adding queuing prematurely can hide synchronization issues such as deadlocks, we can use the queue to make
1309 | a limit to processing, in this process when the `limit <- struct{}{}` is full the queue is wait to be released `<-limit`, if we remove them the 50 goroutines are created at the same time
1310 |
1311 | ```go
1312 | package main
1313 |
1314 | import (
1315 | "fmt"
1316 | "runtime"
1317 | "sync"
1318 | "time"
1319 | )
1320 |
1321 | func main() {
1322 | var wg sync.WaitGroup
1323 | limit := make(chan interface{}, runtime.NumCPU())
1324 |
1325 | fmt.Printf("Started, Limit %d\n", cap(limit))
1326 |
1327 | workers := func(l chan<- interface{}, wg *sync.WaitGroup) {
1328 | for i := 0; i <= 50; i++ {
1329 | i := i
1330 |
1331 | limit <- struct{}{}
1332 | wg.Add(1)
1333 |
1334 | go func(x int, w *sync.WaitGroup) {
1335 | defer w.Done()
1336 |
1337 | time.Sleep(1 * time.Second)
1338 | fmt.Printf("Process %d\n", i)
1339 |
1340 | <-limit
1341 | }(i, wg)
1342 | }
1343 | }
1344 |
1345 | workers(limit, &wg)
1346 | wg.Wait()
1347 |
1348 | fmt.Println("Finished")
1349 | }
1350 | ```
1351 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/queuing)
1352 |
1353 |
1354 | ### Context package
1355 |
1356 | in concurrent programs it’s often necessary to preempt operations because of timeouts, cancellation, or failure of another portion of the system. We’ve looked at the idiom of creating a done channel, which flows through your program and cancels all blocking concurrent operations. This works well, but it’s also somewhat limited.
1357 |
1358 | It would be useful if we could communicate extra information alongside the simple notification to cancel: why the cancellation was occuring, or whether or not our function has a deadline by which it needs to complete.
1359 |
1360 | see below an example to pass value into context, the context package serves two primary purposes:
1361 | - To provide an API for canceling branches of your call-graph.
1362 | - To provide a data-bag for transporting request-scoped data through your call-graph
1363 |
1364 |
1365 | ```go
1366 | package main
1367 |
1368 | import (
1369 | "context"
1370 | "fmt"
1371 | )
1372 |
1373 | func main() {
1374 | ProcessRequest("jane", "abc123")
1375 | }
1376 |
1377 | func ProcessRequest(userID, authToken string) {
1378 | ctx := context.WithValue(context.Background(), "userID", userID)
1379 | ctx = context.WithValue(ctx, "authToken", authToken)
1380 | HandleResponse(ctx)
1381 | }
1382 |
1383 | func HandleResponse(ctx context.Context) {
1384 | fmt.Printf(
1385 | "handling response for %v (%v)",
1386 | ctx.Value("userID"),
1387 | ctx.Value("authToken"),
1388 | )
1389 | }
1390 | ```
1391 |
1392 | another example with `Timeout`, cancellation in a function has three aspects:
1393 |
1394 | - A goroutine’s parent may want to cancel it.
1395 | - A goroutine may want to cancel its children.
1396 | - Any blocking operations within a goroutine need to be preemptable so that it may be canceled.
1397 |
1398 | The context package helps manage all three of these.
1399 |
1400 | ```go
1401 | package main
1402 |
1403 | import (
1404 | "context"
1405 | "fmt"
1406 | "sync"
1407 | "time"
1408 | )
1409 |
1410 | func main() {
1411 | var wg sync.WaitGroup
1412 | ctx, cancel := context.WithCancel(context.Background())
1413 | defer cancel()
1414 |
1415 | wg.Add(1)
1416 | go func() {
1417 | defer wg.Done()
1418 |
1419 | if err := printGreeting(ctx); err != nil {
1420 | fmt.Printf("cannot print greeting: %v\n", err)
1421 | cancel()
1422 | }
1423 | }()
1424 |
1425 | wg.Add(1)
1426 | go func() {
1427 | defer wg.Done()
1428 | if err := printFarewell(ctx); err != nil {
1429 | fmt.Printf("cannot print farewell: %v\n", err)
1430 | }
1431 | }()
1432 |
1433 | wg.Wait()
1434 | }
1435 |
1436 | func printGreeting(ctx context.Context) error {
1437 | greeting, err := genGreeting(ctx)
1438 | if err != nil {
1439 | return err
1440 | }
1441 | fmt.Printf("%s world!\n", greeting)
1442 |
1443 | return nil
1444 | }
1445 |
1446 | func printFarewell(ctx context.Context) error {
1447 | farewell, err := genFarewell(ctx)
1448 | if err != nil {
1449 | return err
1450 | }
1451 | fmt.Printf("%s world!\n", farewell)
1452 |
1453 | return nil
1454 | }
1455 |
1456 | func genGreeting(ctx context.Context) (string, error) {
1457 | ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
1458 | defer cancel()
1459 |
1460 | switch locale, err := locale(ctx); {
1461 | case err != nil:
1462 | return "", err
1463 | case locale == "EN/US":
1464 | return "hello", nil
1465 | }
1466 |
1467 | return "", fmt.Errorf("unsupported locale")
1468 | }
1469 |
1470 | func genFarewell(ctx context.Context) (string, error) {
1471 | switch locale, err := locale(ctx); {
1472 | case err != nil:
1473 | return "", err
1474 | case locale == "EN/US":
1475 | return "goodbye", nil
1476 | }
1477 |
1478 | return "", fmt.Errorf("unsupported locale")
1479 | }
1480 |
1481 | func locale(ctx context.Context) (string, error) {
1482 | if deadline, ok := ctx.Deadline(); ok {
1483 | if deadline.Sub(time.Now().Add(1*time.Minute)) <= 0 {
1484 | return "", context.DeadlineExceeded
1485 | }
1486 | }
1487 |
1488 | select {
1489 | case <-ctx.Done():
1490 | return "", ctx.Err()
1491 | case <-time.After(1 * time.Minute):
1492 | }
1493 |
1494 | return "EN/US", nil
1495 | }
1496 | ```
1497 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/contextpackage)
1498 |
1499 |
1500 | ### HeartBeats
1501 |
1502 | Heartbeats are a way for concurrent processes to signal life to outside parties. They get their name from human anatomy wherein a heartbeat signifies life to an observer. Heartbeats have been around since before Go, and remain useful within it.
1503 |
1504 | There are two different types of heartbeats:
1505 | - Heartbeats that occur on a time interval.
1506 | - Heartbeats that occur at the beginning of a unit of work
1507 |
1508 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/heartbeats)
1509 |
1510 |
1511 | ### Replicated Requests
1512 |
1513 | You should only replicate requests like this to handlers that have different runtime conditions: different processes, machines, paths to a data store, or access to different data stores. While this can be expensive to set up and maintain, if speed is your goal this is a valuable technique. Also, this naturally provides fault tolerance and scalability.
1514 |
1515 | The only caveat to this approach is that all handlers need to have equal opportunity to fulfill the request. In other words, you won't have a chance to get the fastest time from a handler that can't fulfill the request. As I mentioned, whatever resources the handlers are using to do their work also need to be replicated. A different symptom of the same problem is uniformity. If your handles are very similar, the chances that either one is an outlier are less.
1516 |
1517 | ```go
1518 | package main
1519 |
1520 | import (
1521 | "fmt"
1522 | "math/rand"
1523 | "sync"
1524 | "time"
1525 | )
1526 |
1527 | func main() {
1528 |
1529 | doWork := func(done <-chan interface{}, id int, wg *sync.WaitGroup, result chan<- int) {
1530 | started := time.Now()
1531 | defer wg.Done()
1532 |
1533 | // Simulate random load
1534 | simulatedLoadTime := time.Duration(1+rand.Intn(5)) * time.Second
1535 | select {
1536 | case <-done:
1537 | case <-time.After(simulatedLoadTime):
1538 | }
1539 |
1540 | select {
1541 | case <-done:
1542 | case result <- id:
1543 | }
1544 |
1545 | took := time.Since(started)
1546 | // Display how long handlers would have taken
1547 | if took < simulatedLoadTime {
1548 | took = simulatedLoadTime
1549 |
1550 | }
1551 |
1552 | fmt.Printf("%v took %v\n", id, took)
1553 | }
1554 |
1555 | done := make(chan interface{})
1556 | result := make(chan int)
1557 |
1558 | var wg sync.WaitGroup
1559 | wg.Add(10)
1560 |
1561 | // Here we start 10 handlers to handle our requests.
1562 | for i := 0; i < 10; i++ {
1563 | go doWork(done, i, &wg, result)
1564 | }
1565 |
1566 | // This line grabs the first returned value from the group of handlers.
1567 | firstReturned := <-result
1568 |
1569 | // Here we cancel all the remaining handlers.
1570 | // This ensures they don’t continue to do unnecessary work.
1571 | close(done)
1572 | wg.Wait()
1573 |
1574 | fmt.Printf("Received an answer from #%v\n", firstReturned)
1575 | }
1576 | ```
1577 | [sample](https://github.com/luk4z7/go-concurrency-guide/tree/main/patterns/replicatedrequests)
1578 |
1579 |
1580 |
1581 | ## Scheduler Runtime
1582 |
1583 | Go will handle multiplexing goroutines onto OS threads for you.
1584 |
1585 | The algorithm it uses to do this is known as a work `stealing strategy`.
1586 |
1587 | fair scheduling. In an effort to ensure all processors were equally utilized, we could evenly distribute the load between all available processors. Imagine there are n processors and x tasks to perform. In the fair scheduling strategy, each processor would get x/n tasks:
1588 |
1589 | Go models concurrency using a fork-join model.
1590 |
1591 | As a refresher, remember that Go follows a fork-join model for concurrency. Forks are when goroutines are started, and join points are when two or more goroutines are synchronized through channels or types in the sync package. The work stealing algorithm follows a few basic rules. Given a thread of execution:
1592 |
1593 | At a fork point, add tasks to the tail of the deque associated with the thread.
1594 |
1595 |
1596 | Go scheduler’s job is to distribute runnable goroutines over multiple worker OS threads that runs on one or more processors. In multi-threaded computation, two paradigms have emerged in scheduling: work sharing and work stealing.
1597 |
1598 | - Work-sharing: When a processor generates new threads, it attempts to migrate some of them to the other processors with the hopes of them being utilized by the idle/underutilized processors.
1599 | - Work-stealing: An underutilized processor actively looks for other processor’s threads and “steal” some.
1600 |
1601 | The migration of threads occurs less frequently with work stealing than with work sharing. When all processors have work to run, no threads are being migrated. And as soon as there is an idle processor, migration is considered.
1602 |
1603 | Go has a work-stealing scheduler since 1.1, contributed by Dmitry Vyukov. This article will go in depth explaining what work-stealing schedulers are and how Go implements one.
1604 |
1605 |
1606 | **Scheduling basics**
1607 |
1608 | Go has an M:N scheduler that can also utilize multiple processors. At any time, M goroutines need to be scheduled on N OS threads that runs on at most GOMAXPROCS numbers of processors. Go scheduler uses the following terminology for goroutines, threads and processors:
1609 |
1610 | - G: goroutine
1611 | - M: OS thread (machine)
1612 | - P: processor
1613 |
1614 | There is a P-specific local and a global goroutine queue. Each M should be assigned to a P. Ps may have no Ms if they are blocked or in a system call. At any time, there are at most GOMAXPROCS number of P. At any time, only one M can run per P. More Ms can be created by the scheduler if required.
1615 | [runtime doc](https://github.com/golang/go/blob/master/src/runtime/proc.go)
1616 |
1617 |
1618 | **Why have a scheduler?**
1619 |
1620 | goroutines are user-space threads
1621 | conceptually similar to kernel threads managed by the OS, but managed entirely by the Go runtime
1622 |
1623 | lighter-weight and cheaper than kernel threads.
1624 |
1625 | * smaller memory footprint:
1626 | * initial goroutine stack = 2KB; default thread stack = 8KB
1627 | * state tracking overhead
1628 | * faster creation, destruction, context switchesL
1629 | * goroutines switches = ~tens of ns; thread switches = ~ a us.
1630 |
1631 | Go schedule put her goroutines on kernel threads which run on the CPU
1632 |
1633 |
1634 |
1635 | ### References:
1636 |
1637 |
1638 | [Go Programming Language](https://www.gopl.io)
1639 |
1640 |
1641 | [Go Concurrency in Go](https://katherine.cox-buday.com/concurrency-in-go)
1642 |
1643 |
--------------------------------------------------------------------------------
/patterns/bridgechannel/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | var orDone = func(done, c <-chan interface{}) <-chan interface{} {
8 | valStream := make(chan interface{})
9 | go func() {
10 | defer close(valStream)
11 | for {
12 | select {
13 | case <-done:
14 | fmt.Println("finish")
15 | return
16 | case v, ok := <-c:
17 | if ok == false {
18 | return
19 | }
20 | select {
21 | case valStream <- v:
22 | case <-done:
23 | }
24 | }
25 | }
26 | }()
27 |
28 | return valStream
29 | }
30 |
31 | func main() {
32 |
33 | bridge := func(done <-chan interface{}, chanStream <-chan <-chan interface{}) <-chan interface{} {
34 | valStream := make(chan interface{})
35 | go func() {
36 | defer close(valStream)
37 | // This loop is responsible for pulling channels off of chanStream
38 | // and providing them to a nested loop for use
39 | for {
40 | var stream <-chan interface{}
41 | select {
42 | case maybeStream, ok := <-chanStream:
43 | if ok == false {
44 | return
45 | }
46 | stream = maybeStream
47 |
48 | case <-done:
49 | return
50 | }
51 |
52 | for val := range orDone(done, stream) {
53 | select {
54 | case valStream <- val:
55 | case <-done:
56 | }
57 | }
58 | }
59 | }()
60 |
61 | return valStream
62 | }
63 |
64 | // Here’s an example that creates a series of 10 channels,
65 | // each them one element written to them, and passes the channels into bridge function
66 | genVals := func() <-chan <-chan interface{} {
67 | chanStream := make(chan (<-chan interface{}))
68 | go func() {
69 | defer close(chanStream)
70 | for i := 0; i < 10; i++ {
71 | stream := make(chan interface{}, 1)
72 | stream <- i
73 | close(stream)
74 | chanStream <- stream
75 | }
76 | }()
77 | return chanStream
78 | }
79 |
80 | done := make(chan interface{})
81 | defer close(done)
82 |
83 | for v := range bridge(done, genVals()) {
84 | fmt.Printf("%v ", v)
85 | }
86 |
87 | }
--------------------------------------------------------------------------------
/patterns/cancellation/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "time"
6 | )
7 |
8 | func main() {
9 |
10 | doWork := func(done <-chan interface{}, strings <-chan string) <-chan interface{} {
11 | terminated := make(chan interface{})
12 |
13 | go func() {
14 | // será executado o defer quando for feito close de done
15 | // para que a goroutine seja finalizada
16 | defer fmt.Println("doWork exited.")
17 | defer close(terminated)
18 |
19 | for {
20 | select {
21 | case s := <-strings:
22 | // Do something interesting
23 | fmt.Println(s)
24 | case <-done:
25 | // return vai finalizar a goroutine
26 | return
27 | }
28 | }
29 | }()
30 |
31 | fmt.Println("doWork initiate ...")
32 |
33 | // somente é retornado depois do close, enquanto estiver vazio não será enviado
34 | return terminated
35 | }
36 |
37 | done := make(chan interface{})
38 | terminated := doWork(done, nil)
39 |
40 | go func() {
41 | // Cancel the operation after 1 second.
42 | time.Sleep(5 * time.Second)
43 | fmt.Println("Canceling doWork goroutine...")
44 | // if it has no cancel produces a deadlock
45 | close(done)
46 | }()
47 |
48 | fmt.Println("initiate ...")
49 | d := <-terminated
50 | fmt.Println(d)
51 | fmt.Println("Done.")
52 | }
--------------------------------------------------------------------------------
/patterns/confinement/adhoc/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | func main() {
8 | data := make([]int, 4)
9 | data = []int{1, 2, 3, 4, 5}
10 |
11 | loopData := func(handleData chan<- int) {
12 | defer close(handleData)
13 | for i := range data {
14 | handleData <- data[i]
15 | }
16 | }
17 |
18 | handleData := make(chan int)
19 | go loopData(handleData)
20 |
21 | for num := range handleData {
22 | fmt.Println(num)
23 | }
24 | }
--------------------------------------------------------------------------------
/patterns/confinement/lexical/data/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "sync"
7 | )
8 |
9 | func main() {
10 | printData := func(wg *sync.WaitGroup, data []byte) {
11 | defer wg.Done()
12 |
13 | var buff bytes.Buffer
14 | for _, b := range data {
15 | fmt.Fprintf(&buff, "%c", b)
16 | }
17 | fmt.Println(buff.String())
18 | }
19 |
20 | var wg sync.WaitGroup
21 | wg.Add(2)
22 | data := []byte("golang")
23 |
24 | go printData(&wg, data[:3])
25 | go printData(&wg, data[3:])
26 |
27 | wg.Wait()
28 | }
--------------------------------------------------------------------------------
/patterns/confinement/lexical/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "time"
6 | )
7 |
8 | func main() {
9 | chanOwner := func() <-chan int {
10 | results := make(chan int, 5)
11 | go func() {
12 | defer close(results)
13 |
14 | for i := 0; i <= 5; i++ {
15 | time.Sleep(1 * time.Second)
16 | results <- i
17 | }
18 | }()
19 |
20 | return results
21 | }
22 |
23 | consumer := func(results <-chan int) {
24 | fmt.Println(results)
25 |
26 | for result := range results {
27 | fmt.Printf("Received: %d\n", result)
28 | }
29 |
30 | fmt.Println("Done receiving!")
31 | }
32 |
33 | fmt.Println("initiate channel")
34 | results := chanOwner()
35 |
36 | fmt.Println("consumer ready - OK")
37 | consumer(results)
38 | }
--------------------------------------------------------------------------------
/patterns/contextpackage/cancel/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "sync"
7 | "time"
8 | )
9 |
10 | func main() {
11 | var wg sync.WaitGroup
12 | ctx, cancel := context.WithCancel(context.Background())
13 | defer cancel()
14 |
15 | wg.Add(1)
16 | go func() {
17 | defer wg.Done()
18 |
19 | if err := printGreeting(ctx); err != nil {
20 | fmt.Printf("cannot print greeting: %v\n", err)
21 | cancel()
22 | }
23 | }()
24 |
25 | wg.Add(1)
26 | go func() {
27 | defer wg.Done()
28 | if err := printFarewell(ctx); err != nil {
29 | fmt.Printf("cannot print farewell: %v\n", err)
30 | }
31 | }()
32 |
33 | wg.Wait()
34 | }
35 |
36 | func printGreeting(ctx context.Context) error {
37 | greeting, err := genGreeting(ctx)
38 | if err != nil {
39 | return err
40 | }
41 | fmt.Printf("%s world!\n", greeting)
42 | return nil
43 | }
44 |
45 | func printFarewell(ctx context.Context) error {
46 | farewell, err := genFarewell(ctx)
47 | if err != nil {
48 | return err
49 | }
50 | fmt.Printf("%s world!\n", farewell)
51 | return nil
52 |
53 | }
54 |
55 | func genGreeting(ctx context.Context) (string, error) {
56 | ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
57 | defer cancel()
58 |
59 | switch locale, err := locale(ctx); {
60 | case err != nil:
61 | return "", err
62 | case locale == "EN/US":
63 | return "hello", nil
64 | }
65 | return "", fmt.Errorf("unsupported locale")
66 | }
67 |
68 | func genFarewell(ctx context.Context) (string, error) {
69 | switch locale, err := locale(ctx); {
70 | case err != nil:
71 | return "", err
72 | case locale == "EN/US":
73 | return "goodbye", nil
74 | }
75 | return "", fmt.Errorf("unsupported locale")
76 | }
77 |
78 | func locale(ctx context.Context) (string, error) {
79 | if deadline, ok := ctx.Deadline(); ok {
80 | if deadline.Sub(time.Now().Add(1*time.Minute)) <= 0 {
81 | return "", context.DeadlineExceeded
82 | }
83 | }
84 |
85 | select {
86 | case <-ctx.Done():
87 | return "", ctx.Err()
88 | case <-time.After(1 * time.Minute):
89 | }
90 | return "EN/US", nil
91 | }
--------------------------------------------------------------------------------
/patterns/errorhandler/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "net/http"
6 | )
7 |
8 | func main() {
9 | checkStatus := func(done <-chan interface{}, urls ...string) <-chan *http.Response {
10 | responses := make(chan *http.Response)
11 |
12 | go func() {
13 | defer close(responses)
14 |
15 | for _, url := range urls {
16 | resp, err := http.Get(url)
17 | if err != nil {
18 | fmt.Println(err)
19 | continue
20 | }
21 |
22 | select {
23 | case <-done:
24 | return
25 | case responses <- resp:
26 | }
27 | }
28 | }()
29 |
30 | return responses
31 | }
32 |
33 | done := make(chan interface{})
34 | defer close(done)
35 |
36 | urls := []string{"https://www.google.com", "https://badhost"}
37 | for response := range checkStatus(done, urls...) {
38 | fmt.Printf("Response: %v\n", response.Status)
39 | }
40 | }
--------------------------------------------------------------------------------
/patterns/errorhandler/returnerror/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "net/http"
6 | )
7 |
8 | type Result struct {
9 | Error error
10 | Response *http.Response
11 | }
12 |
13 | func main() {
14 |
15 | checkStatus := func(done <-chan interface{}, urls ...string) <-chan Result {
16 | results := make(chan Result)
17 |
18 | go func() {
19 | defer close(results)
20 |
21 | for _, url := range urls {
22 | resp, err := http.Get(url)
23 | result := Result{
24 | Error: err,
25 | Response: resp,
26 | }
27 |
28 | // Enviando somente para o channel também funciona
29 | // results <- result
30 |
31 | select {
32 | case <-done:
33 | return
34 | case results <- result:
35 | }
36 | }
37 | }()
38 |
39 | return results
40 | }
41 |
42 | done := make(chan interface{})
43 | defer close(done)
44 |
45 | urls := []string{"https://www.google.com", "https://badhost"}
46 | for result := range checkStatus(done, urls...) {
47 | if result.Error != nil {
48 | fmt.Printf("error: %v", result.Error)
49 | continue
50 | }
51 |
52 | fmt.Printf("Response: %v\n", result.Response.Status)
53 | }
54 | }
--------------------------------------------------------------------------------
/patterns/fanoutfanin/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "runtime"
7 | "sync"
8 | "time"
9 | )
10 |
11 | var toString = func(done <-chan interface{}, valueStream <-chan interface{}) <-chan string {
12 | stringStream := make(chan string)
13 |
14 | go func() {
15 | defer close(stringStream)
16 |
17 | for v := range valueStream {
18 | select {
19 | case <-done:
20 | return
21 | case stringStream <- v.(string):
22 | }
23 | }
24 | }()
25 |
26 | return stringStream
27 | }
28 |
29 | var toInt = func(done <-chan interface{}, valueStream <-chan interface{}) <-chan int {
30 | intStream := make(chan int)
31 |
32 | go func() {
33 | defer close(intStream)
34 |
35 | for v := range valueStream {
36 | select {
37 | case <-done:
38 | return
39 | case intStream <- v.(int):
40 | }
41 | }
42 | }()
43 |
44 | return intStream
45 | }
46 |
47 | var repeat = func(done <-chan interface{}, values ...interface{}) <-chan interface{} {
48 | valueStream := make(chan interface{})
49 |
50 | go func() {
51 | defer close(valueStream)
52 |
53 | for {
54 | for _, v := range values {
55 | select {
56 | case <-done:
57 | return
58 | case valueStream <- v:
59 | }
60 | }
61 | }
62 | }()
63 |
64 | return valueStream
65 | }
66 |
67 | // This function will repeat the values you pass to it infinitely until you tell it to stop.
68 | // Let’s take a look at another generic pipeline stage that is helpful when used in combination with repeat, take:
69 | // With the take stage, the concern is limiting our pipeline.
70 | var take = func(done <-chan interface{}, valueStream <-chan interface{}, num int) <-chan interface{} {
71 | takeStream := make(chan interface{})
72 |
73 | go func() {
74 | defer close(takeStream)
75 |
76 | for i := 0; i < num; i++ {
77 | select {
78 | case <-done:
79 | return
80 | case takeStream <- <-valueStream:
81 | }
82 | }
83 | }()
84 |
85 | return takeStream
86 | }
87 |
88 | // In the repeat and repeatFn generators, the concern is generating a stream of data by looping over a list or operator.
89 | var repeatFn = func(done <-chan interface{}, fn func() interface{}) <-chan interface{} {
90 | valueStream := make(chan interface{})
91 |
92 | go func() {
93 | defer close(valueStream)
94 |
95 | for {
96 | select {
97 | case <-done:
98 | return
99 | case valueStream <- fn():
100 | }
101 | }
102 | }()
103 |
104 | return valueStream
105 | }
106 |
107 | // 1º
108 | // Here we take in our standard done channel to allow our goroutines to be torn down,
109 | // and then a variadic slice of interface{} channels to fan-in.
110 | var fanIn = func(done <-chan interface{}, channels ...<-chan interface{}) <-chan interface{} {
111 |
112 | // 2º
113 | // On this line we create a sync.WaitGroup so that we can wait until all channels have been drained.
114 | var wg sync.WaitGroup
115 | multiplexedStream := make(chan interface{})
116 |
117 | // 3º
118 | // Here we create a function, multiplex, which, when passed a channel,
119 | // will read from the channel, and pass the value read onto the multiplexedStream channel.
120 | multiplex := func(c <-chan interface{}) {
121 | defer wg.Done()
122 | for i := range c {
123 | select {
124 | case <-done:
125 | return
126 | case multiplexedStream <- i:
127 | }
128 | }
129 | }
130 |
131 | // 4º
132 | // This line increments the sync.WaitGroup by the number of channels we’re multiplexing.
133 | // Select from all the channels
134 | wg.Add(len(channels))
135 | for _, c := range channels {
136 | go multiplex(c)
137 | }
138 |
139 | // 5º
140 | // Here we create a goroutine to wait for all the channels we’re multiplexing
141 | // to be drained so that we can close the multiplexedStream channel.
142 | // Wait for all the reads to complete
143 | go func() {
144 | wg.Wait()
145 | close(multiplexedStream)
146 | }()
147 |
148 | return multiplexedStream
149 | }
150 |
151 | var primeFinder = func(done <-chan interface{}, valueStream <-chan int) <-chan interface{} {
152 | intStream := make(chan interface{})
153 |
154 | go func() {
155 | defer close(intStream)
156 |
157 | for {
158 | select {
159 | case <-done:
160 | return
161 | case intStream <- <-valueStream:
162 | }
163 | }
164 | }()
165 |
166 | return intStream
167 | }
168 |
169 | // Without Fan-in and Fan-out
170 | func v1() {
171 | fmt.Println("V1")
172 | rand := func() interface{} { return rand.Intn(50000000) }
173 |
174 | done := make(chan interface{})
175 | defer close(done)
176 |
177 | start := time.Now()
178 |
179 | randIntStream := toInt(done, repeatFn(done, rand))
180 | fmt.Println("Primes:")
181 | for prime := range take(done, primeFinder(done, randIntStream), 10) {
182 | fmt.Printf("\t%d\n", prime)
183 | }
184 |
185 | fmt.Printf("Search took: %v\n", time.Since(start))
186 | }
187 |
188 | // Fan In
189 | func v2() {
190 | fmt.Println("V2")
191 | done := make(chan interface{})
192 | defer close(done)
193 |
194 | start := time.Now()
195 |
196 | rand := func() interface{} { return rand.Intn(50000000) }
197 |
198 | randIntStream := toInt(done, repeatFn(done, rand))
199 |
200 | // now that we have four goroutines, we also have four channels,
201 | // but our range over primes is only expecting one channel.
202 | // This brings us to the fan-in portion of the pattern.
203 | numFinders := runtime.NumCPU()
204 | fmt.Printf("Spinning up %d prime finders.\n", numFinders)
205 |
206 | finders := make([]<-chan interface{}, numFinders)
207 | fmt.Println("Primes:")
208 | for i := 0; i < numFinders; i++ {
209 | finders[i] = primeFinder(done, randIntStream)
210 | }
211 |
212 | // As we discussed earlier, fanning in means multiplexing or joining
213 | // together multiple streams of data into a single stream. The algorithm to do so is relatively simple:
214 | // That's fanIn functions make, receive the many channels of "finders..." and return only one stream
215 | for prime := range take(done, fanIn(done, finders...), 10) {
216 | fmt.Printf("\t%d\n", prime)
217 | }
218 |
219 | fmt.Printf("Search took: %v\n", time.Since(start))
220 |
221 | }
222 |
223 | func main() {
224 | v1()
225 | v2()
226 | }
--------------------------------------------------------------------------------
/patterns/fanoutfanin/samples/001/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | type data int
8 |
9 | // distribute work items to multiple uniform actors
10 | // no data shall be processed twice!
11 | // received wch
12 | // response res
13 | func worker(wch <-chan data, res chan<- data) {
14 | for {
15 | w, ok := <-wch
16 | if !ok {
17 | return // return when is closed
18 | }
19 |
20 | w *= 2
21 | res <- w
22 | }
23 | }
24 |
25 | func main() {
26 | work := []data{1, 2, 3, 4, 5}
27 |
28 | const numWorkers = 3
29 |
30 | wch := make(chan data, len(work))
31 | res := make(chan data, len(work))
32 |
33 | // fan-out, one input channel for all actors
34 | for i := 0; i < numWorkers; i++ {
35 | go worker(wch, res)
36 | }
37 |
38 | // fan-out, one input channel for all actors
39 | for _, w := range work {
40 | fmt.Println("send to wch : ", w)
41 | wch <- w
42 | }
43 | close(wch)
44 |
45 | // fan-in, one result channel
46 | for range work {
47 | w := <-res
48 | fmt.Println("receive from res : ", w)
49 | }
50 | }
--------------------------------------------------------------------------------
/patterns/fanoutfanin/samples/002/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "time"
7 | )
8 |
9 | // send the same message to different actors
10 | // all actors receive the same work item
11 | // example, send a single signal
12 | // security scanner
13 | type goFile struct {
14 | name string
15 | content string
16 | }
17 |
18 | func mockScan() string {
19 | if rand.Intn(100) > 90 {
20 | return "ALERT - vulnerability found"
21 | }
22 |
23 | return "OK - All Correct"
24 | }
25 |
26 | func scanSQLInjection(data goFile, res chan<- string) {
27 | res <- fmt.Sprintf("SQL injection scan: %s scanned, result: %s", data.name, mockScan())
28 | }
29 |
30 | func scanTimingExploits(data goFile, res chan<- string) {
31 | res <- fmt.Sprintf("Timing exploits scan: %s scanned, result: %s", data.name, mockScan())
32 | }
33 |
34 | func scanAuth(data goFile, res chan<- string) {
35 | res <- fmt.Sprintf("Authentication scan: %s scanned, result: %s", data.name, mockScan())
36 | }
37 |
38 | func main() {
39 | si := []goFile{
40 | {name: "utils.go", content: "package utils\n\nfunc Util() {}"},
41 | {name: "helper.go", content: "package Helper\n\nfunc Helper() {}"},
42 | {name: "misc.go", content: "package Misc\n\nfunc Misc() {}"},
43 | {name: "various.go", content: "package Various\n\nfunc Various() {}"},
44 | }
45 |
46 | res := make(chan string, len(si)*3)
47 |
48 | for _, d := range si {
49 | d := d
50 |
51 | // fan-out pass the input directly
52 | go scanSQLInjection(d, res) // fan-in common result channel
53 | go scanTimingExploits(d, res)
54 | go scanAuth(d, res)
55 | }
56 |
57 | // Scatter-Gather
58 | for i := 0; i < cap(res); i++ {
59 | fmt.Println(<-res)
60 | }
61 |
62 | fmt.Println("main: done")
63 |
64 | NumberOfTheWeekInMonth(time.Now())
65 | }
66 |
67 | func NumberOfTheWeekInMonth(now time.Time) int {
68 |
69 | // beginningOfTheMonth := time.Date(now.Year(), now.Month(), 1, 1, 1, 1, 1, time.UTC)
70 | // _, thisWeek := now.ISOWeek()
71 | // _, beginningWeek := beginningOfTheMonth.ISOWeek()
72 |
73 | _, w := now.ISOWeek()
74 | data := fmt.Sprintf("%s%d", now.Format("200601"), w)
75 |
76 | fmt.Println(data)
77 |
78 | return 1
79 | }
--------------------------------------------------------------------------------
/patterns/fanoutfanin/samples/003/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "golang.org/x/sync/errgroup"
5 |
6 | "fmt"
7 | "math/rand"
8 | "time"
9 | )
10 |
11 | // send the same message to different actors
12 | // all actors receive the same work item
13 | // example, send a single signal
14 | // security scanner
15 | type goFile struct {
16 | name string
17 | data string
18 | }
19 |
20 | func mockScan() string {
21 | if rand.Intn(100) > 90 {
22 | return "ALERT - vulnerability found"
23 | }
24 |
25 | return "OK - All Correct"
26 | }
27 |
28 | func scanSQLInjection(data <-chan goFile, res chan<- string) error {
29 | for d := range data {
30 | res <- fmt.Sprintf("SQL injection scan: %s scanned, result: %s", d.name, mockScan())
31 | }
32 |
33 | close(res)
34 | return nil
35 | }
36 |
37 | func scanTimingExploits(data <-chan goFile, res chan<- string) error {
38 | for d := range data {
39 | res <- fmt.Sprintf("Timing exploits scan: %s scanned, result: %s", d.name, mockScan())
40 | }
41 |
42 | close(res)
43 | return nil
44 | }
45 |
46 | func scanAuth(data <-chan goFile, res chan<- string) error {
47 | for d := range data {
48 | res <- fmt.Sprintf("Authentication scan: %s scanned, result: %s", d.name, mockScan())
49 | }
50 |
51 | close(res)
52 | return nil
53 | }
54 |
55 | func main() {
56 | si := []goFile{
57 | {name: "utils.go", data: "package utils\n\nfunc Util() {}"},
58 | {name: "helper.go", data: "package Helper\n\nfunc Helper() {}"},
59 | {name: "misc.go", data: "package Misc\n\nfunc Misc() {}"},
60 | {name: "various.go", data: "package Various\n\nfunc Various() {}"},
61 | }
62 |
63 | // Three chans to simulate existing output channels
64 | // that we must read from
65 | input := make(chan goFile, len(si))
66 | res1 := make(chan string, len(si))
67 | res2 := make(chan string, len(si))
68 | res3 := make(chan string, len(si))
69 |
70 | chans := fanOut(input, 3)
71 | var g errgroup.Group
72 |
73 | // Spawn the actors
74 | g.Go(func() error {
75 | return scanSQLInjection(chans[0], res1)
76 | })
77 |
78 | g.Go(func() error {
79 | return scanTimingExploits(chans[1], res2)
80 | })
81 |
82 | g.Go(func() error {
83 | return scanAuth(chans[2], res3)
84 | })
85 |
86 | // Start sending work items
87 | g.Go(func() error {
88 | for _, d := range si {
89 | input <- d
90 | }
91 |
92 | close(input)
93 | return nil
94 | })
95 |
96 | g.Go(func() error {
97 | res := fanIn(res1, res2, res3)
98 | for r := range res {
99 | fmt.Println(r)
100 | }
101 |
102 | return nil
103 | })
104 |
105 | err := g.Wait()
106 | if err != nil {
107 | panic(err)
108 | }
109 |
110 | fmt.Println("main: done")
111 | }
112 |
113 | func fanIn[T any](chans ...chan T) chan T {
114 | res := make(chan T)
115 | var g errgroup.Group
116 |
117 | for _, c := range chans {
118 | c := c
119 |
120 | g.Go(func() error {
121 | for s := range c {
122 | res <- s
123 | }
124 |
125 | return nil
126 | })
127 | }
128 |
129 | go func() {
130 | g.Wait()
131 | close(res)
132 | }()
133 |
134 | return res
135 | }
136 |
137 | func fanOut[T any](ch chan T, n int) []chan T {
138 | chans := make([]chan T, 0, n)
139 |
140 | for i := 0; i < n; i++ {
141 | chans = append(chans, make(chan T, 1))
142 | }
143 |
144 | go func() {
145 | // recebe os valores do channel que é passado como parametros
146 | for item := range ch {
147 | for _, c := range chans {
148 | select {
149 | case c <- item:
150 | case <-time.After(100 * time.Millisecond):
151 | }
152 | }
153 | }
154 |
155 | for _, c := range chans {
156 | close(c)
157 | }
158 | }()
159 |
160 | return chans
161 | }
--------------------------------------------------------------------------------
/patterns/heartbeats/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "time"
7 | )
8 |
9 | type typedef int
10 |
11 | const (
12 | doWork1 typedef = iota
13 | doWork2
14 | doWork3
15 | doWork4
16 | doWork5
17 | )
18 |
19 | var (
20 | activeDoWork typedef = doWork1
21 |
22 | doWork1Fn = func(done <-chan interface{}, pulseInterval time.Duration) (<-chan interface{}, <-chan time.Time) {
23 | // Here we set up a channel to send heartbeats on. We return this out of doWork.
24 | heartbeat := make(chan interface{})
25 | results := make(chan time.Time)
26 |
27 | go func() {
28 | defer close(heartbeat)
29 | defer close(results)
30 |
31 | // Here we set the heartbeat to pulse at the pulseInterval we were given.
32 | // Every pulseInterval there will be something to read on this channel.
33 | pulse := time.Tick(pulseInterval)
34 |
35 | // This is just another ticker used to simulate work coming in.
36 | // We choose a duration greater than the pulseInterval so that we
37 | // can see some heartbeats coming out of the goroutine.
38 | workGen := time.Tick(2 * pulseInterval)
39 |
40 | sendPulse := func() {
41 | select {
42 | case heartbeat <- struct{}{}:
43 | // Note that we include a default clause.
44 | // We must always guard against the fact that no one
45 | // may be listening to our heartbeat.
46 | // The results emitted from the goroutine are critical, but the pulses are not.
47 | default:
48 | fmt.Println("without listener")
49 | }
50 | }
51 | _ = sendPulse
52 |
53 | sendResult := func(r time.Time) {
54 | for {
55 | select {
56 | case <-done:
57 | return
58 |
59 | // note 5
60 | // Just like with done channels, anytime you perform a send or receive,
61 | // you also need to include a case for the heartbeat’s pulse.
62 | case <-pulse:
63 | sendPulse()
64 | // is the same below to use sendPulse(),
65 | // but with more critical if the heartbeat that are no listener
66 | // heartbeat <- struct{}{}
67 | case results <- r:
68 | return
69 | }
70 | }
71 | }
72 |
73 | // simulate an erro to send healthy of the heartbeat
74 | // for i := 0; i < 2; i++ {
75 | for {
76 | select {
77 | case <-done:
78 | return
79 |
80 | // the same as note 5
81 | case <-pulse:
82 | sendPulse()
83 | // heartbeat <- struct{}{}
84 | case r := <-workGen:
85 | sendResult(r)
86 |
87 | }
88 | }
89 | }()
90 |
91 | return heartbeat, results
92 | }
93 |
94 | doWork2Fn = func(done <-chan interface{}) (<-chan interface{}, <-chan int) {
95 | // Here we create the heartbeat channel with a buffer of one.
96 | // This ensures that there’s always at least one pulse sent out
97 | // even if no one is listening in time for the send to occur.
98 | heartbeatStream := make(chan interface{}, 1)
99 | workStream := make(chan int)
100 |
101 | go func() {
102 | defer close(heartbeatStream)
103 | defer close(workStream)
104 |
105 | for i := 0; i < 10; i++ {
106 | // Here we set up a separate select block for the heartbeat.
107 | // We don’t want to include this in the same select block as
108 | // the send on results because if the receiver isn’t ready for the result,
109 | // they’ll receive a pulse instead, and the current value of the result will be lost.
110 | // We also don’t include a case statement for the done channel since we have a
111 | // default case that will just fall through.
112 | select {
113 | case heartbeatStream <- struct{}{}:
114 |
115 | // Once again we guard against the fact that no one may be
116 | // listening to our heartbeats.
117 | // Because our heartbeat channel was created with a buffer of one,
118 | // if someone is listening, but not in time for the first pulse,
119 | // they’ll still be notified of a pulse.
120 | default:
121 |
122 | }
123 |
124 | select {
125 | case <-done:
126 | return
127 | case workStream <- rand.Intn(10):
128 | }
129 | }
130 | }()
131 |
132 | return heartbeatStream, workStream
133 | }
134 |
135 | doWork3Fn = func(done <-chan interface{}, nums ...int) (<-chan interface{}, <-chan int) {
136 | heartbeat := make(chan interface{}, 1)
137 | intStream := make(chan int)
138 |
139 | go func() {
140 | defer close(heartbeat)
141 | defer close(intStream)
142 |
143 | // Here we simulate some kind of delay before the goroutine can begin working.
144 | // In practice this can be all kinds of things and is nondeterministic.
145 | // I’ve seen delays caused by CPU load, disk contention, network latency, and goblins.
146 | time.Sleep(2 * time.Second)
147 |
148 | for _, n := range nums {
149 | select {
150 | case heartbeat <- struct{}{}:
151 | default:
152 | }
153 |
154 | select {
155 | case <-done:
156 | return
157 | case intStream <- n:
158 | }
159 | }
160 |
161 | }()
162 |
163 | return heartbeat, intStream
164 | }
165 |
166 | doWork4Fn = func(done <-chan interface{}, pulseInterval time.Duration, nums ...int) (
167 | <-chan interface{}, <-chan int) {
168 |
169 | heartbeat := make(chan interface{}, 1)
170 |
171 | intStream := make(chan int)
172 | go func() {
173 | defer close(heartbeat)
174 | defer close(intStream)
175 |
176 | time.Sleep(2 * time.Second)
177 | pulse := time.Tick(pulseInterval)
178 |
179 | // We’re using a label here to make continuing from the inner loop a little simpler.
180 | numLoop:
181 | for _, n := range nums {
182 |
183 | // We require two loops: one to range over our list of numbers,
184 | // and this inner loop to run until the number is successfully sent on the intStream.
185 | for {
186 | select {
187 | case <-done:
188 | return
189 | case <-pulse:
190 | select {
191 | case heartbeat <- struct{}{}:
192 | default:
193 | }
194 | case intStream <- n:
195 | // Here we continue executing the outer loop.
196 | continue numLoop
197 | }
198 | }
199 | }
200 | }()
201 |
202 | return heartbeat, intStream
203 | }
204 |
205 | receive1 = func(heartbeat <-chan interface{}, results <-chan time.Time, timeout time.Duration) {
206 | for {
207 | select {
208 |
209 | // Here we select on the heartbeat.
210 | // When there are no results,
211 | // we are at least guaranteed a message from the
212 | // heartbeat channel every timeout/2.
213 | // If we don’t receive it, we know there’s
214 | // something wrong with the goroutine itself.
215 | case _, ok := <-heartbeat:
216 | // os pulsos que são enviados por sendPulse(), somente serão retornados
217 | // caso tenha um listener, caso contrário entrada no default do switch
218 | if ok == false {
219 | return
220 | }
221 |
222 | fmt.Println("pulse")
223 |
224 | // Here we select from the results channel;
225 | // nothing fancy going on here.
226 | case r, ok := <-results:
227 | if ok == false {
228 | return
229 | }
230 |
231 | fmt.Printf("results in second %v \n", r.Second())
232 |
233 | // Here we time out if we haven’t received
234 | // either a heartbeat or a new result.
235 | case <-time.After(timeout):
236 | fmt.Println("worker goroutine is not healthy!")
237 |
238 | return
239 | }
240 | }
241 | }
242 |
243 | receive2 = func(heartbeat <-chan interface{}, results <-chan int) {
244 | for {
245 | select {
246 | case _, ok := <-heartbeat:
247 | if ok {
248 | fmt.Println("pulse")
249 | } else {
250 | return
251 | }
252 | case r, ok := <-results:
253 | if ok {
254 | fmt.Printf("results %v\n", r)
255 | } else {
256 | return
257 | }
258 | }
259 | }
260 | }
261 |
262 | receive3 = func(heartbeat <-chan interface{}, results <-chan int, intSlice []int) {
263 | for i, expected := range intSlice {
264 | select {
265 | case r := <-results:
266 | if r != expected {
267 | fmt.Printf("index %v: expected %v, but received %v \n", i, expected, r)
268 | }
269 |
270 | // Here we time out after what we think is a reasonable duration
271 | // to prevent a broken goroutine from deadlocking our test.
272 | case <-time.After(1 * time.Second):
273 | fmt.Printf("test timed out")
274 | }
275 | }
276 | }
277 |
278 | receive4 = func(heartbeat <-chan interface{}, results <-chan int, intSlice []int) {
279 | i := 0
280 | for r := range results {
281 | if expected := intSlice[i]; r != expected {
282 | fmt.Sprintf("index %v: expected %v, but received %v,", i, expected, r)
283 | }
284 | i++
285 | }
286 | }
287 |
288 | receive5 = func(heartbeat <-chan interface{}, results <-chan int, timeout time.Duration, intSlice []int) {
289 | i := 0
290 | for {
291 | select {
292 | case r, ok := <-results:
293 | if ok == false {
294 | return
295 | } else if expected := intSlice[i]; r != expected {
296 | fmt.Sprintf("index %v: expected %v, but received %v,", i, expected, r)
297 | }
298 | i++
299 |
300 | // We also select on the heartbeat here to keep the timeout from occuring.
301 | case <-heartbeat:
302 | case <-time.After(timeout):
303 | fmt.Println("test timed out")
304 | }
305 | }
306 | }
307 | )
308 |
309 | func main() {
310 | // Notice that because we might be sending out multiple pulses
311 | // while we wait for input, or multiple pulses while waiting to send results,
312 | // all the select statements need to be within for loops.
313 | // Looking good so far; how do we utilize this function and consume
314 | // the events it emits? Let’s take a look:
315 | done := make(chan interface{})
316 |
317 | // We set up the standard done channel and close it after 10 seconds.
318 | // This gives our goroutine time to do some work.
319 | time.AfterFunc(10*time.Second, func() {
320 | close(done)
321 | })
322 |
323 | // Here we set our timeout period.
324 | // We’ll use this to couple our heartbeat interval to our timeout.
325 | const timeout = 2 * time.Second
326 |
327 | switch activeDoWork {
328 | case doWork1:
329 | // We pass in timeout/2 here.
330 | // This gives our heartbeat an extra tick to
331 | // respond so that our timeout isn’t too sensitive.
332 | heartbeat, results := doWork1Fn(done, timeout/2)
333 | receive1(heartbeat, results, timeout)
334 |
335 | case doWork2:
336 | heartbeat, results := doWork2Fn(done)
337 | receive2(heartbeat, results)
338 |
339 | case doWork3:
340 | intSlice := []int{0, 1, 2, 3, 5}
341 | heartbeat, results := doWork3Fn(done, intSlice...)
342 |
343 | receive3(heartbeat, results, intSlice)
344 |
345 | case doWork4:
346 | intSlice := []int{0, 1, 2, 3, 5}
347 | heartbeat, results := doWork3Fn(done, intSlice...)
348 |
349 | <-heartbeat
350 | receive4(heartbeat, results, intSlice)
351 | case doWork5:
352 | intSlice := []int{0, 1, 2, 3, 5}
353 | const timeout = 2 * time.Second
354 |
355 | heartbeat, results := doWork4Fn(done, timeout/2, intSlice...)
356 |
357 | // We still wait for the first heartbeat to occur to indicate we’ve entered the goroutine’s loop.
358 | <-heartbeat
359 | receive5(heartbeat, results, timeout, intSlice)
360 | }
361 | }
--------------------------------------------------------------------------------
/patterns/orchannel/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "time"
6 | )
7 |
8 | func main() {
9 | // receive a variadic slice of channels
10 | var or func(channels ...<-chan interface{}) <-chan interface{}
11 |
12 | or = func(channels ...<-chan interface{}) <-chan interface{} {
13 |
14 | // quando for zero retorna nil
15 | // quando for 1 retorna somente o único item do slice
16 | switch len(channels) {
17 | case 0:
18 | return nil
19 | case 1:
20 | return channels[0]
21 | }
22 |
23 | orDone := make(chan interface{})
24 | go func() {
25 | defer close(orDone)
26 |
27 | switch len(channels) {
28 | case 2:
29 | select {
30 | case <-channels[0]:
31 | case <-channels[1]:
32 | }
33 | default:
34 | select {
35 | case <-channels[0]:
36 | case <-channels[1]:
37 | case <-channels[2]:
38 |
39 | case <-or(append(channels[3:], orDone)...):
40 | }
41 | }
42 | }()
43 |
44 | return orDone
45 | }
46 |
47 | sig := func(after time.Duration) <-chan interface{} {
48 | c := make(chan interface{})
49 | go func() {
50 | defer close(c)
51 | time.Sleep(after)
52 | }()
53 | return c
54 | }
55 |
56 | start := time.Now()
57 | <-or(
58 | sig(2*time.Hour),
59 | sig(5*time.Minute),
60 | sig(1*time.Second),
61 | sig(1*time.Hour),
62 | sig(1*time.Minute),
63 | )
64 |
65 | fmt.Printf("done after %v", time.Since(start))
66 | }
--------------------------------------------------------------------------------
/patterns/ordonechannel/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | func main() {
8 | orDone := func(done, c <-chan interface{}) <-chan interface{} {
9 | valStream := make(chan interface{})
10 | go func() {
11 | defer close(valStream)
12 | for {
13 | select {
14 | case <-done:
15 | fmt.Println("finish")
16 | return
17 | case v, ok := <-c:
18 | if ok == false {
19 | return
20 | }
21 | select {
22 | case valStream <- v:
23 | case <-done:
24 | }
25 | }
26 | }
27 | }()
28 | return valStream
29 | }
30 |
31 | done := make(chan interface{})
32 | myChan := make(chan interface{})
33 | go func() {
34 | done <- struct{}{}
35 | }()
36 |
37 | for val := range orDone(done, myChan) {
38 | // Do something with val
39 | fmt.Println(val)
40 | }
41 | }
--------------------------------------------------------------------------------
/patterns/pipelines/channels/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | func main() {
8 |
9 | // generator, converte data
10 | // this convert the slice into stream of data on channel
11 | generator := func(done <-chan interface{}, integers ...int) <-chan int {
12 | intStream := make(chan int)
13 |
14 | go func() {
15 | defer close(intStream)
16 |
17 | for _, i := range integers {
18 | select {
19 | case <-done:
20 | return
21 | case intStream <- i:
22 | }
23 | }
24 | }()
25 |
26 | return intStream
27 | }
28 |
29 | multiply := func(done <-chan interface{}, intStream <-chan int, multiplier int) <-chan int {
30 | multipliedStream := make(chan int)
31 |
32 | go func() {
33 | defer close(multipliedStream)
34 |
35 | for i := range intStream {
36 | select {
37 | case <-done:
38 | return
39 | case multipliedStream <- i * multiplier:
40 | }
41 | }
42 | }()
43 |
44 | return multipliedStream
45 | }
46 |
47 | add := func(done <-chan interface{}, intStream <-chan int, additive int) <-chan int {
48 | addedStream := make(chan int)
49 |
50 | go func() {
51 | defer close(addedStream)
52 |
53 | for i := range intStream {
54 | select {
55 | case <-done:
56 | return
57 | case addedStream <- i + additive:
58 | }
59 | }
60 | }()
61 |
62 | return addedStream
63 | }
64 |
65 | done := make(chan interface{})
66 | defer close(done)
67 |
68 | // intStream := generator(done, 1, 2, 3, 4)
69 | pipeline := multiply(done, add(done, multiply(done, generator(done, 1, 2, 3, 4), 2), 1), 2)
70 |
71 | for v := range pipeline {
72 | fmt.Println(v)
73 | }
74 | }
--------------------------------------------------------------------------------
/patterns/pipelines/repeat/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | )
7 |
8 | var toString = func(done <-chan interface{}, valueStream <-chan interface{}) <-chan string {
9 | stringStream := make(chan string)
10 |
11 | go func() {
12 | defer close(stringStream)
13 |
14 | for v := range valueStream {
15 | select {
16 | case <-done:
17 | return
18 | case stringStream <- v.(string):
19 | }
20 | }
21 | }()
22 |
23 | return stringStream
24 | }
25 |
26 | var repeat = func(done <-chan interface{}, values ...interface{}) <-chan interface{} {
27 | valueStream := make(chan interface{})
28 |
29 | go func() {
30 | defer close(valueStream)
31 |
32 | for {
33 | for _, v := range values {
34 | select {
35 | case <-done:
36 | return
37 | case valueStream <- v:
38 | }
39 | }
40 | }
41 | }()
42 |
43 | return valueStream
44 | }
45 |
46 | var take = func(done <-chan interface{}, valueStream <-chan interface{}, num int) <-chan interface{} {
47 | takeStream := make(chan interface{})
48 |
49 | go func() {
50 | defer close(takeStream)
51 |
52 | for i := 0; i < num; i++ {
53 | select {
54 | case <-done:
55 | return
56 | case takeStream <- <-valueStream:
57 | }
58 | }
59 | }()
60 |
61 | return takeStream
62 | }
63 |
64 | var repeatFn = func(done <-chan interface{}, fn func() interface{}) <-chan interface{} {
65 | valueStream := make(chan interface{})
66 |
67 | go func() {
68 | defer close(valueStream)
69 |
70 | for {
71 | select {
72 | case <-done:
73 | return
74 | case valueStream <- fn():
75 | }
76 | }
77 | }()
78 |
79 | return valueStream
80 | }
81 |
82 | func main() {
83 |
84 | done := make(chan interface{})
85 | defer close(done)
86 |
87 | for num := range take(done, repeat(done, 1), 10) {
88 | fmt.Printf("%v ", num)
89 | }
90 |
91 | rand := func() interface{} {
92 | return rand.Int()
93 | }
94 |
95 | for num := range take(done, repeatFn(done, rand), 10) {
96 | fmt.Println(num)
97 | }
98 |
99 | var message string
100 | for token := range toString(done, take(done, repeat(done,
101 | "I", "am."), 5)) {
102 | message += token
103 | }
104 |
105 | fmt.Printf("message: %s...", message)
106 |
107 | }
--------------------------------------------------------------------------------
/patterns/queuing/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "runtime"
6 | "sync"
7 | "time"
8 | )
9 |
10 | func main() {
11 | var wg sync.WaitGroup
12 | limit := make(chan interface{}, runtime.NumCPU())
13 |
14 | fmt.Printf("Started, Limit %d\n", cap(limit))
15 |
16 | workers := func(l chan<- interface{}, wg *sync.WaitGroup) {
17 | for i := 0; i <= 50; i++ {
18 | i := i
19 |
20 | limit <- struct{}{}
21 | wg.Add(1)
22 |
23 | go func(x int, w *sync.WaitGroup) {
24 | defer w.Done()
25 |
26 | time.Sleep(1 * time.Second)
27 | fmt.Printf("Process %d\n", i)
28 |
29 | <-limit
30 | }(i, wg)
31 | }
32 | }
33 |
34 | workers(limit, &wg)
35 | wg.Wait()
36 |
37 | fmt.Println("Finished")
38 | }
--------------------------------------------------------------------------------
/patterns/replicatedrequests/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "sync"
7 | "time"
8 | )
9 |
10 | func main() {
11 |
12 | doWork := func(done <-chan interface{}, id int, wg *sync.WaitGroup, result chan<- int) {
13 | started := time.Now()
14 | defer wg.Done()
15 |
16 | // Simulate random load
17 | simulatedLoadTime := time.Duration(1+rand.Intn(5)) * time.Second
18 | select {
19 | case <-done:
20 | case <-time.After(simulatedLoadTime):
21 | }
22 |
23 | select {
24 | case <-done:
25 | case result <- id:
26 | }
27 |
28 | took := time.Since(started)
29 | // Display how long handlers would have taken
30 | if took < simulatedLoadTime {
31 | took = simulatedLoadTime
32 |
33 | }
34 |
35 | fmt.Printf("%v took %v\n", id, took)
36 | }
37 |
38 | done := make(chan interface{})
39 | result := make(chan int)
40 |
41 | var wg sync.WaitGroup
42 | wg.Add(10)
43 |
44 | // Here we start 10 handlers to handle our requests.
45 | for i := 0; i < 10; i++ {
46 | go doWork(done, i, &wg, result)
47 | }
48 |
49 | // This line grabs the first returned value from the group of handlers.
50 | firstReturned := <-result
51 |
52 | // Here we cancel all the remaining handlers.
53 | // This ensures they don’t continue to do unnecessary work.
54 | close(done)
55 | wg.Wait()
56 |
57 | fmt.Printf("Received an answer from #%v\n", firstReturned)
58 | }
--------------------------------------------------------------------------------
/patterns/teechannel/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | var repeat = func(done <-chan interface{}, values ...interface{}) <-chan interface{} {
8 | valueStream := make(chan interface{})
9 |
10 | go func() {
11 | defer close(valueStream)
12 |
13 | for {
14 | for _, v := range values {
15 | select {
16 | case <-done:
17 | return
18 | case valueStream <- v:
19 | }
20 | }
21 | }
22 | }()
23 |
24 | return valueStream
25 | }
26 |
27 | var take = func(done <-chan interface{}, valueStream <-chan interface{}, num int) <-chan interface{} {
28 | takeStream := make(chan interface{})
29 |
30 | go func() {
31 | defer close(takeStream)
32 |
33 | for i := 0; i < num; i++ {
34 | select {
35 | case <-done:
36 | return
37 | case takeStream <- <-valueStream:
38 | }
39 | }
40 | }()
41 |
42 | return takeStream
43 | }
44 |
45 | func main() {
46 | orDone := func(done, c <-chan interface{}) <-chan interface{} {
47 | valStream := make(chan interface{})
48 | go func() {
49 | defer close(valStream)
50 | for {
51 | select {
52 | case <-done:
53 | fmt.Println("finish")
54 | return
55 | case v, ok := <-c:
56 | if ok == false {
57 | return
58 | }
59 | select {
60 | case valStream <- v:
61 | case <-done:
62 | }
63 | }
64 | }
65 | }()
66 |
67 | return valStream
68 | }
69 |
70 | tee := func(done <-chan interface{}, in <-chan interface{}) (_, _ <-chan interface{}) {
71 |
72 | out1 := make(chan interface{})
73 | out2 := make(chan interface{})
74 |
75 | go func() {
76 | defer close(out1)
77 | defer close(out2)
78 | for val := range orDone(done, in) {
79 | var out1, out2 = out1, out2
80 | for i := 0; i < 2; i++ {
81 | select {
82 | case <-done:
83 | case out1 <- val:
84 | out1 = nil
85 | case out2 <- val:
86 | out2 = nil
87 | }
88 | }
89 | }
90 | }()
91 |
92 | return out1, out2
93 | }
94 |
95 | done := make(chan interface{})
96 | defer close(done)
97 |
98 | out1, out2 := tee(done, take(done, repeat(done, 1, 2), 4))
99 |
100 | for vall := range out1 {
101 | fmt.Printf("out1: %v, out2: %v\n", vall, <-out2)
102 | }
103 | }
--------------------------------------------------------------------------------
/sync/broadcast/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "sync"
6 | )
7 |
8 | type Button struct {
9 | Clicked *sync.Cond
10 | }
11 |
12 | func main() {
13 | button := Button{
14 | Clicked: sync.NewCond(&sync.Mutex{}),
15 | }
16 |
17 | // running on goroutine every function that passed/registered
18 | // and wait, not exit until that goroutine is confirmed to be running
19 | subscribe := func(c *sync.Cond, param string, fn func(s string)) {
20 | var goroutineRunning sync.WaitGroup
21 | goroutineRunning.Add(1)
22 |
23 | go func(p string) {
24 | goroutineRunning.Done()
25 | c.L.Lock() // critical section
26 | defer c.L.Unlock()
27 |
28 | // fmt.Println("Registered and wait ... ")
29 | c.Wait()
30 |
31 | fn(p)
32 | }(param)
33 |
34 | goroutineRunning.Wait()
35 | }
36 |
37 | var clickRegistered sync.WaitGroup
38 |
39 | for _, v := range []string{
40 | "Maximizing window.",
41 | "Displaying annoying dialog box!",
42 | "Mouse clicked."} {
43 |
44 | clickRegistered.Add(1)
45 |
46 | subscribe(button.Clicked, v, func(s string) {
47 | fmt.Println(s)
48 | clickRegistered.Done()
49 | })
50 | }
51 |
52 | button.Clicked.Broadcast()
53 |
54 | clickRegistered.Wait()
55 | }
--------------------------------------------------------------------------------
/sync/cond/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "sync"
6 | "time"
7 | )
8 |
9 | // a goroutine that is waiting for a signal, and a goroutine that is sending signals.
10 | // Say we have a queue of fixed length 2, and 10 items we want to push onto the queue
11 | func main() {
12 | c := sync.NewCond(&sync.Mutex{})
13 | queue := make([]interface{}, 0, 10)
14 |
15 | removeFromQueue := func(delay time.Duration) {
16 | time.Sleep(delay)
17 | c.L.Lock() // critical section
18 |
19 | queue = queue[1:]
20 |
21 | fmt.Println("Removed from queue")
22 |
23 | c.L.Unlock()
24 | c.Signal() // let a goroutine waiting on the condition know that something has ocurred
25 | }
26 |
27 | for i := 0; i < 10; i++ {
28 | c.L.Lock() // critical section
29 |
30 | // When the queue is equal to two the main goroutine is suspend
31 | // until a signal on the condition has been sent
32 | for len(queue) == 2 {
33 | c.Wait()
34 | }
35 |
36 | fmt.Println("Adding to queue")
37 | queue = append(queue, struct{}{})
38 |
39 | go removeFromQueue(1 * time.Second)
40 |
41 | c.L.Unlock()
42 | }
43 | }
--------------------------------------------------------------------------------
/sync/mutex/README.md:
--------------------------------------------------------------------------------
1 | ## Mutex
2 |
3 | [Home](https://github.com/luk4z7/go-concurrency-guide)
4 | [Sync](https://github.com/luk4z7/go-concurrency-guide/tree/main/sync)
5 |
6 | Here we can take some of the many examples of how to make `memory access synchonization` safely to avoid data race errors and race conditions.
7 |
8 | In Go we can use the `sync` package to protect critical sections like the example [complete code](https://github.com/luk4z7/go-concurrency-guide/blob/main/sync/mutex/main.go)
9 |
10 | ```go
11 | var count int
12 | var lock sync.Mutex
13 |
14 | func main() {
15 | increment := func() {
16 | lock.Lock()
17 | defer lock.Unlock()
18 | count++
19 | fmt.Printf("Incrementing: %d\n", count)
20 | }
21 | ```
22 |
23 |
24 | Another view from other languages
25 |
26 | Few languages tell us this at compile time like `rust` which prevents us from accessing the same variable on multiple threads, this code causes an error because at compile time a data race is detected, it blocked us from writing to the same variable simultaneously, [mutex code](https://github.com/luk4z7/go-concurrency-guide/blob/main/sync/mutex/rust/mutex/src/main.rs)
27 |
28 | This example return the error, [complete code](https://github.com/luk4z7/go-concurrency-guide/blob/main/sync/mutex/rust/dataracefree/src/main.rs)
29 | ```rust
30 | // error -> closure may outlive the current function, but it borrows `vec`, which is owned by the current function
31 | fn main() {
32 | let mut vec: Vec = Vec::new();
33 |
34 | thread::spawn(|| {
35 | add_vec(&mut vec);
36 | });
37 |
38 | vec.push(34)
39 | }
40 |
41 | fn add_vec(vec: &mut Vec) {
42 | vec.push(42);
43 | }
44 | ```
45 |
46 | C Example using C11 `threads.h` implementation over POSIX threads from [c11threads](https://github.com/jtsiomb/c11threads)
47 | ```c++
48 | int increment_the_counter() {
49 | int r = mtx_lock(&mtx);
50 | if (r != 0) return r;
51 |
52 | // With the mutex locked we can safely poke the counter.
53 | the_counter++;
54 |
55 | return mtx_unlock(&mtx);
56 | }
57 |
58 | int read_the_counter(int *value) {
59 | int r = mtx_lock(&mtx);
60 | if (r != 0) return r;
61 |
62 | // With the mutex locked we can safely read the counter.
63 | *value = the_counter;
64 |
65 | return mtx_unlock(&mtx);
66 | }
67 | ```
68 |
69 | [complete c code](https://github.com/luk4z7/go-concurrency-guide/tree/main/sync/mutex/c)
70 |
--------------------------------------------------------------------------------
/sync/mutex/c/Makefile:
--------------------------------------------------------------------------------
1 | obj = mutex.o
2 | bin = mutex
3 |
4 | CFLAGS = -std=gnu99 -pedantic -Wall -g
5 | LDFLAGS = -lpthread
6 |
7 | $(bin): $(obj)
8 | $(CC) -o $@ $(obj) $(LDFLAGS) && make run
9 |
10 | mutex.o: mutex.c c11threads.h
11 |
12 | .PHONY: run
13 | run:
14 | ./mutex && make clean
15 |
16 | .PHONY: clean
17 | clean:
18 | rm -f $(obj) $(bin)
19 |
--------------------------------------------------------------------------------
/sync/mutex/c/README.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | make
3 | ```
4 |
5 | output:
6 | ```bash
7 | cc -std=gnu99 -pedantic -Wall -g -c -o mutex.o mutex.c
8 | cc -o mutex mutex.o -lpthread && make run
9 | ./mutex && make clean
10 | start thread test
11 | hello from thread 0
12 | hello from thread 1
13 | the_counter: 2
14 | hello from thread 2
15 | hello from thread 3
16 | the_counter: 4
17 | the_counter: 4
18 | the_counter: 2
19 | thread 1 done
20 | thread 0 done
21 | thread 3 done
22 | thread 2 done
23 | end thread test
24 |
25 | rm -f mutex.o mutex
26 | ```
--------------------------------------------------------------------------------
/sync/mutex/c/c11threads.h:
--------------------------------------------------------------------------------
1 | /*
2 | Author: John Tsiombikas
3 |
4 | I place this piece of code in the public domain. Feel free to use as you see
5 | fit. I'd appreciate it if you keep my name at the top of the code somehwere,
6 | but whatever.
7 |
8 | Main project site: https://github.com/jtsiomb/c11threads
9 | */
10 |
11 | #ifndef C11THREADS_H_
12 | #define C11THREADS_H_
13 |
14 | #include
15 | #include
16 | #include
17 | #include /* for sched_yield */
18 | #include
19 |
20 | #define thread_local _Thread_local
21 | #define ONCE_FLAG_INIT PTHREAD_ONCE_INIT
22 |
23 | #ifdef __APPLE__
24 | /* Darwin doesn't implement timed mutexes currently */
25 | #define C11THREADS_NO_TIMED_MUTEX
26 | #endif
27 |
28 | #ifdef C11THREADS_NO_TIMED_MUTEX
29 | #define PTHREAD_MUTEX_TIMED_NP PTHREAD_MUTEX_NORMAL
30 | #define C11THREADS_TIMEDLOCK_POLL_INTERVAL 5000000 /* 5 ms */
31 | #endif
32 |
33 | /* types */
34 | typedef pthread_t thrd_t;
35 | typedef pthread_mutex_t mtx_t;
36 | typedef pthread_cond_t cnd_t;
37 | typedef pthread_key_t tss_t;
38 | typedef pthread_once_t once_flag;
39 |
40 | typedef int (*thrd_start_t)(void*);
41 | typedef void (*tss_dtor_t)(void*);
42 |
43 | enum {
44 | mtx_plain = 0,
45 | mtx_recursive = 1,
46 | mtx_timed = 2,
47 | };
48 |
49 | enum {
50 | thrd_success,
51 | thrd_timedout,
52 | thrd_busy,
53 | thrd_error,
54 | thrd_nomem
55 | };
56 |
57 |
58 | /* ---- thread management ---- */
59 |
60 | static inline int thrd_create(thrd_t *thr, thrd_start_t func, void *arg)
61 | {
62 | int res = pthread_create(thr, 0, (void*(*)(void*))func, arg);
63 | if(res == 0) {
64 | return thrd_success;
65 | }
66 | return res == ENOMEM ? thrd_nomem : thrd_error;
67 | }
68 |
69 | static inline void thrd_exit(int res)
70 | {
71 | pthread_exit((void*)(long)res);
72 | }
73 |
74 | static inline int thrd_join(thrd_t thr, int *res)
75 | {
76 | void *retval;
77 |
78 | if(pthread_join(thr, &retval) != 0) {
79 | return thrd_error;
80 | }
81 | if(res) {
82 | *res = (int)(long)retval;
83 | }
84 | return thrd_success;
85 | }
86 |
87 | static inline int thrd_detach(thrd_t thr)
88 | {
89 | return pthread_detach(thr) == 0 ? thrd_success : thrd_error;
90 | }
91 |
92 | static inline thrd_t thrd_current(void)
93 | {
94 | return pthread_self();
95 | }
96 |
97 | static inline int thrd_equal(thrd_t a, thrd_t b)
98 | {
99 | return pthread_equal(a, b);
100 | }
101 |
102 | static inline int thrd_sleep(const struct timespec *ts_in, struct timespec *rem_out)
103 | {
104 | if(nanosleep(ts_in, rem_out) < 0) {
105 | if(errno == EINTR) return -1;
106 | return -2;
107 | }
108 | return 0;
109 | }
110 |
111 | static inline void thrd_yield(void)
112 | {
113 | sched_yield();
114 | }
115 |
116 | /* ---- mutexes ---- */
117 |
118 | static inline int mtx_init(mtx_t *mtx, int type)
119 | {
120 | int res;
121 | pthread_mutexattr_t attr;
122 |
123 | pthread_mutexattr_init(&attr);
124 |
125 | if(type & mtx_timed) {
126 | pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_TIMED_NP);
127 | }
128 | if(type & mtx_recursive) {
129 | pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
130 | }
131 |
132 | res = pthread_mutex_init(mtx, &attr) == 0 ? thrd_success : thrd_error;
133 | pthread_mutexattr_destroy(&attr);
134 | return res;
135 | }
136 |
137 | static inline void mtx_destroy(mtx_t *mtx)
138 | {
139 | pthread_mutex_destroy(mtx);
140 | }
141 |
142 | static inline int mtx_lock(mtx_t *mtx)
143 | {
144 | int res = pthread_mutex_lock(mtx);
145 | if(res == EDEADLK) {
146 | return thrd_busy;
147 | }
148 | return res == 0 ? thrd_success : thrd_error;
149 | }
150 |
151 | static inline int mtx_trylock(mtx_t *mtx)
152 | {
153 | int res = pthread_mutex_trylock(mtx);
154 | if(res == EBUSY) {
155 | return thrd_busy;
156 | }
157 | return res == 0 ? thrd_success : thrd_error;
158 | }
159 |
160 | static inline int mtx_timedlock(mtx_t *mtx, const struct timespec *ts)
161 | {
162 | int res;
163 | #ifdef C11THREADS_NO_TIMED_MUTEX
164 | /* fake a timedlock by polling trylock in a loop and waiting for a bit */
165 | struct timeval now;
166 | struct timespec sleeptime;
167 |
168 | sleeptime.tv_sec = 0;
169 | sleeptime.tv_nsec = C11THREADS_TIMEDLOCK_POLL_INTERVAL;
170 |
171 | while((res = pthread_mutex_trylock(mtx)) == EBUSY) {
172 | gettimeofday(&now, NULL);
173 |
174 | if(now.tv_sec > ts->tv_sec || (now.tv_sec == ts->tv_sec &&
175 | (now.tv_usec * 1000) >= ts->tv_nsec)) {
176 | return thrd_timedout;
177 | }
178 |
179 | nanosleep(&sleeptime, NULL);
180 | }
181 | #else
182 | if((res = pthread_mutex_timedlock(mtx, ts)) == ETIMEDOUT) {
183 | return thrd_timedout;
184 | }
185 | #endif
186 | return res == 0 ? thrd_success : thrd_error;
187 | }
188 |
189 | static inline int mtx_unlock(mtx_t *mtx)
190 | {
191 | return pthread_mutex_unlock(mtx) == 0 ? thrd_success : thrd_error;
192 | }
193 |
194 | /* ---- condition variables ---- */
195 |
196 | static inline int cnd_init(cnd_t *cond)
197 | {
198 | return pthread_cond_init(cond, 0) == 0 ? thrd_success : thrd_error;
199 | }
200 |
201 | static inline void cnd_destroy(cnd_t *cond)
202 | {
203 | pthread_cond_destroy(cond);
204 | }
205 |
206 | static inline int cnd_signal(cnd_t *cond)
207 | {
208 | return pthread_cond_signal(cond) == 0 ? thrd_success : thrd_error;
209 | }
210 |
211 | static inline int cnd_broadcast(cnd_t *cond)
212 | {
213 | return pthread_cond_broadcast(cond) == 0 ? thrd_success : thrd_error;
214 | }
215 |
216 | static inline int cnd_wait(cnd_t *cond, mtx_t *mtx)
217 | {
218 | return pthread_cond_wait(cond, mtx) == 0 ? thrd_success : thrd_error;
219 | }
220 |
221 | static inline int cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *ts)
222 | {
223 | int res;
224 |
225 | if((res = pthread_cond_timedwait(cond, mtx, ts)) != 0) {
226 | return res == ETIMEDOUT ? thrd_timedout : thrd_error;
227 | }
228 | return thrd_success;
229 | }
230 |
231 | /* ---- thread-specific data ---- */
232 |
233 | static inline int tss_create(tss_t *key, tss_dtor_t dtor)
234 | {
235 | return pthread_key_create(key, dtor) == 0 ? thrd_success : thrd_error;
236 | }
237 |
238 | static inline void tss_delete(tss_t key)
239 | {
240 | pthread_key_delete(key);
241 | }
242 |
243 | static inline int tss_set(tss_t key, void *val)
244 | {
245 | return pthread_setspecific(key, val) == 0 ? thrd_success : thrd_error;
246 | }
247 |
248 | static inline void *tss_get(tss_t key)
249 | {
250 | return pthread_getspecific(key);
251 | }
252 |
253 | /* ---- misc ---- */
254 |
255 | static inline void call_once(once_flag *flag, void (*func)(void))
256 | {
257 | pthread_once(flag, func);
258 | }
259 |
260 | #if __STDC_VERSION__ < 201112L || defined(C11THREADS_NO_TIMED_MUTEX)
261 | /* TODO take base into account */
262 | inline int timespec_get(struct timespec *ts, int base)
263 | {
264 | struct timeval tv;
265 |
266 | gettimeofday(&tv, 0);
267 |
268 | ts->tv_sec = tv.tv_sec;
269 | ts->tv_nsec = tv.tv_usec * 1000;
270 | return base;
271 | }
272 | #endif /* not C11 */
273 |
274 | #endif /* C11THREADS_H_ */
275 |
--------------------------------------------------------------------------------
/sync/mutex/c/mutex.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "c11threads.h"
3 |
4 | // Locks a mutex, blocking if necessary until it becomes free.
5 | // int mtx_lock(mtx_t *mutex);
6 |
7 | // Unlocks a mutex.
8 | // int mtx_unlock(mtx_t *mutex);
9 |
10 | int tfunc(void *arg);
11 |
12 | mtx_t mtx;
13 | mtx_t startup_mtx;
14 | int the_counter;
15 |
16 | int main(void) {
17 | int i;
18 | thrd_t threads[4];
19 |
20 | printf("start thread test\n");
21 |
22 | for(i=0; i<4; i++) {
23 | thrd_create(threads + i, tfunc, (void*)(long)i);
24 | }
25 |
26 | for(i=0; i<4; i++) {
27 | thrd_join(threads[i], 0);
28 | }
29 |
30 | printf("end thread test\n\n");
31 |
32 | return 0;
33 | }
34 |
35 | // Code to initialize the_mutex omitted.
36 | int increment_the_counter() {
37 | int r = mtx_lock(&mtx);
38 | if (r != 0) return r;
39 |
40 | // With the mutex locked we can safely poke the counter.
41 | the_counter++;
42 |
43 | return mtx_unlock(&mtx);
44 | }
45 |
46 | int read_the_counter(int *value) {
47 | int r = mtx_lock(&mtx);
48 | if (r != 0) return r;
49 |
50 | // With the mutex locked we can safely read the counter.
51 | *value = the_counter;
52 |
53 | return mtx_unlock(&mtx);
54 | }
55 |
56 | int tfunc(void *arg)
57 | {
58 | int num = (long)arg;
59 | int value = 0;
60 | struct timespec dur;
61 |
62 | mtx_init(&mtx, mtx_timed);
63 | mtx_init(&mtx, mtx_plain);
64 |
65 | increment_the_counter();
66 | printf("hello from thread %d\n", num);
67 |
68 | read_the_counter(&value);
69 | printf("the_counter: %d\n", value);
70 |
71 | dur.tv_sec = 1;
72 | dur.tv_nsec = 0;
73 | thrd_sleep(&dur, 0);
74 |
75 | printf("thread %d done\n", num);
76 |
77 | return 0;
78 | }
79 |
--------------------------------------------------------------------------------
/sync/mutex/c/test.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "c11threads.h"
3 |
4 | int tfunc(void *arg);
5 | void run_timed_test();
6 | int hold_mutex_three_seconds(void* arg);
7 |
8 | mtx_t mtx;
9 | mtx_t startup_mtx;
10 |
11 | int main(void)
12 | {
13 | int i;
14 | thrd_t threads[4];
15 |
16 | printf("start thread test\n");
17 |
18 | for(i=0; i<4; i++) {
19 | thrd_create(threads + i, tfunc, (void*)(long)i);
20 | }
21 |
22 | for(i=0; i<4; i++) {
23 | thrd_join(threads[i], 0);
24 | }
25 |
26 | printf("end thread test\n\n");
27 |
28 | printf("start timed mutex test\n");
29 | run_timed_test();
30 | printf("stop timed mutex test\n");
31 |
32 | return 0;
33 | }
34 |
35 | int tfunc(void *arg)
36 | {
37 | int num = (long)arg;
38 | struct timespec dur;
39 |
40 | printf("hello from thread %d\n", num);
41 |
42 | dur.tv_sec = 4;
43 | dur.tv_nsec = 0;
44 | thrd_sleep(&dur, 0);
45 |
46 | printf("thread %d done\n", num);
47 | return 0;
48 | }
49 |
50 | int hold_mutex_three_seconds(void* arg) {
51 | struct timespec dur;
52 | mtx_lock(&mtx);
53 | mtx_unlock(&startup_mtx);
54 | dur.tv_sec = 3;
55 | dur.tv_nsec = 0;
56 | thrd_sleep(&dur, 0);
57 | mtx_unlock(&mtx);
58 |
59 | return 0;
60 | }
61 |
62 | void run_timed_test()
63 | {
64 | thrd_t thread;
65 | struct timespec ts;
66 | struct timespec dur;
67 |
68 | mtx_init(&mtx, mtx_timed);
69 | mtx_init(&mtx, mtx_plain);
70 |
71 | mtx_lock(&startup_mtx);
72 | thrd_create(&thread, hold_mutex_three_seconds, &mtx);
73 |
74 | mtx_lock(&startup_mtx);
75 | timespec_get(&ts, 0);
76 | ts.tv_sec = ts.tv_sec + 2;
77 | ts.tv_nsec = 0;
78 | if (mtx_timedlock(&mtx,&ts)==thrd_timedout) {
79 | printf("thread has locked mutex & we timed out waiting for it\n");
80 | }
81 |
82 | dur.tv_sec = 4;
83 | dur.tv_nsec = 0;
84 | thrd_sleep(&dur, 0);
85 |
86 | if (mtx_timedlock(&mtx,&ts)==thrd_success) {
87 | printf("thread no longer has mutex & we grabbed it\n");
88 | }
89 |
90 | mtx_destroy(&mtx);
91 | }
--------------------------------------------------------------------------------
/sync/mutex/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "sync"
6 | )
7 |
8 | var count int
9 | var lock sync.Mutex
10 |
11 | func main() {
12 |
13 | increment := func() {
14 | lock.Lock()
15 | defer lock.Unlock()
16 | count++
17 | fmt.Printf("Incrementing: %d\n", count)
18 | }
19 |
20 | decrement := func() {
21 | lock.Lock()
22 | defer lock.Unlock()
23 | count--
24 | fmt.Printf("Decrementing: %d\n", count)
25 | }
26 |
27 | // Increment
28 | var arithmetic sync.WaitGroup
29 | for i := 0; i <= 5; i++ {
30 | arithmetic.Add(1)
31 | go func() {
32 | defer arithmetic.Done()
33 | increment()
34 | }()
35 | }
36 |
37 | // Decrement
38 | for i := 0; i <= 5; i++ {
39 | arithmetic.Add(1)
40 | go func() {
41 | defer arithmetic.Done()
42 | decrement()
43 | }()
44 | }
45 |
46 | arithmetic.Wait()
47 | fmt.Println("Arithmetic complete.")
48 | }
--------------------------------------------------------------------------------
/sync/mutex/rust/dataracefree/Cargo.lock:
--------------------------------------------------------------------------------
1 | # This file is automatically @generated by Cargo.
2 | # It is not intended for manual editing.
3 | version = 3
4 |
5 | [[package]]
6 | name = "dataracefree"
7 | version = "0.1.0"
8 |
--------------------------------------------------------------------------------
/sync/mutex/rust/dataracefree/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "dataracefree"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7 |
8 | [dependencies]
9 |
--------------------------------------------------------------------------------
/sync/mutex/rust/dataracefree/src/main.rs:
--------------------------------------------------------------------------------
1 | use std::thread;
2 |
3 | fn main() {
4 | let mut vec: Vec = Vec::new();
5 |
6 | // ------- value moved into closure here
7 | thread::spawn(move || {
8 | // --- variable moved due to use in closure
9 | add_vec(&mut vec);
10 | });
11 |
12 | // vec.push(34)
13 | // value borrowed here after move
14 | }
15 |
16 | fn add_vec(vec: &mut Vec) {
17 | vec.push(42);
18 | }
19 |
--------------------------------------------------------------------------------
/sync/mutex/rust/mutex/Cargo.lock:
--------------------------------------------------------------------------------
1 | # This file is automatically @generated by Cargo.
2 | # It is not intended for manual editing.
3 | version = 3
4 |
5 | [[package]]
6 | name = "mutex"
7 | version = "0.1.0"
8 |
--------------------------------------------------------------------------------
/sync/mutex/rust/mutex/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "mutex"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7 |
8 | [dependencies]
9 |
--------------------------------------------------------------------------------
/sync/mutex/rust/mutex/src/main.rs:
--------------------------------------------------------------------------------
1 | use std::sync::mpsc::channel;
2 | use std::sync::{Arc, Mutex};
3 | use std::thread;
4 |
5 | const N: usize = 10;
6 |
7 | // Spawn a few threads to increment a shared variable (non-atomically), and
8 | // let the main thread know once all increments are done.
9 | //
10 | // Here we're using an Arc to share memory among threads, and the data inside
11 | // the Arc is protected with a mutex.
12 | fn main() {
13 | let data = Arc::new(Mutex::new(0));
14 |
15 | let (tx, rx) = channel();
16 | for _ in 0..N {
17 | let (data, tx) = (Arc::clone(&data), tx.clone());
18 | thread::spawn(move || {
19 | // The shared state can only be accessed once the lock is held.
20 | // Our non-atomic increment is safe because we're the only thread
21 | // which can access the shared state when the lock is held.
22 | //
23 | // We unwrap() the return value to assert that we are not expecting
24 | // threads to ever fail while holding the lock.
25 | let mut data = data.lock().unwrap();
26 | *data += 1;
27 | if *data == N {
28 | tx.send(()).unwrap();
29 | }
30 | // the lock is unlocked here when `data` goes out of scope.
31 | });
32 | }
33 |
34 | rx.recv().unwrap();
35 | println!("value: {:?} \n", data)
36 | }
37 |
--------------------------------------------------------------------------------
/sync/once/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "sync"
6 | )
7 |
8 | func main() {
9 | var count int
10 |
11 | increment := func() {
12 | count++
13 | }
14 |
15 | var once sync.Once
16 |
17 | var increments sync.WaitGroup
18 | increments.Add(100)
19 | for i := 0; i < 100; i++ {
20 | go func() {
21 | defer increments.Done()
22 | once.Do(increment)
23 | }()
24 | }
25 |
26 | increments.Wait()
27 | fmt.Printf("Count is %d\n", count)
28 |
29 | // circular reference
30 | // var onceA, onceB sync.Once
31 | // var initB func()
32 | // initA := func() { onceB.Do(initB) }
33 | // initB = func() { onceA.Do(initA) }
34 | // onceA.Do(initA)
35 | }
--------------------------------------------------------------------------------
/sync/pool/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "sync"
6 | )
7 |
8 | func main() {
9 | myPool := &sync.Pool{
10 | New: func() interface{} {
11 | fmt.Println("Creating new instance.")
12 | return struct{}{}
13 | },
14 | }
15 |
16 | // Get invokes New function defined in the pool if there is no instance started
17 | myPool.Get()
18 | instance := myPool.Get()
19 | fmt.Println("instance", instance)
20 |
21 | // here we put a previously retrieved instance back in the pool, this
22 | // increases the number of instances available for a
23 | myPool.Put(instance)
24 | // when this call is executed, we will reuse the previously allocated instance
25 | // and put it back in the pool
26 | myPool.Get()
27 |
28 | var numCalcsCreated int
29 | calcPool := &sync.Pool{
30 | New: func() interface{} {
31 | // fmt.Println("new calc pool")
32 |
33 | numCalcsCreated += 1
34 | mem := make([]byte, 1024)
35 | return &mem
36 | },
37 | }
38 |
39 | fmt.Println("calcPool.New", calcPool.New())
40 |
41 | calcPool.Put(calcPool.New())
42 | calcPool.Put(calcPool.New())
43 | calcPool.Put(calcPool.New())
44 | calcPool.Put(calcPool.New())
45 |
46 | calcPool.Get()
47 |
48 | const numWorkers = 1024 * 1024
49 | var wg sync.WaitGroup
50 | wg.Add(numWorkers)
51 | for i := numWorkers; i > 0; i-- {
52 | go func() {
53 | defer wg.Done()
54 |
55 | mem := calcPool.Get().(*[]byte)
56 | defer calcPool.Put(mem)
57 |
58 | // Assume something interesting, but quick is being done with
59 | // this memory.
60 | }()
61 | }
62 |
63 | wg.Wait()
64 | fmt.Printf("%d calculators were created.", numCalcsCreated)
65 | }
--------------------------------------------------------------------------------
/sync/pool/network.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "net"
7 | "sync"
8 | "time"
9 | )
10 |
11 | func connectToService() interface{} {
12 | time.Sleep(1 * time.Second)
13 |
14 | return struct{}{}
15 | }
16 |
17 | func warmServiceConnCache() *sync.Pool {
18 | p := &sync.Pool{
19 | New: connectToService,
20 | }
21 |
22 | for i := 0; i < 10; i++ {
23 | p.Put(p.New())
24 | }
25 |
26 | return p
27 | }
28 |
29 | func startNetworkDaemon2() *sync.WaitGroup {
30 | var wg sync.WaitGroup
31 | wg.Add(1)
32 | go func() {
33 | connPool := warmServiceConnCache()
34 |
35 | server, err := net.Listen("tcp", "localhost:8081")
36 | if err != nil {
37 | log.Fatalf("cannot listen: %v", err)
38 | }
39 | defer server.Close()
40 |
41 | wg.Done()
42 |
43 | for {
44 | conn, err := server.Accept()
45 | if err != nil {
46 | log.Printf("cannot accept connection: %v", err)
47 | continue
48 | }
49 |
50 | svcConn := connPool.Get()
51 | fmt.Fprintln(conn, "")
52 | connPool.Put(svcConn)
53 | conn.Close()
54 | }
55 | }()
56 |
57 | return &wg
58 | }
59 |
60 | func startNetworkDaemon1() *sync.WaitGroup {
61 | var wg sync.WaitGroup
62 | wg.Add(1)
63 |
64 | go func() {
65 | server, err := net.Listen("tcp", "localhost:8080")
66 | if err != nil {
67 | log.Fatalf("cannot listen: %v", err)
68 | }
69 | defer server.Close()
70 |
71 | wg.Done()
72 |
73 | for {
74 | conn, err := server.Accept()
75 | if err != nil {
76 | log.Printf("cannot accept connection: %v", err)
77 | continue
78 | }
79 |
80 | connectToService()
81 | fmt.Fprintln(conn, "")
82 | conn.Close()
83 | }
84 | }()
85 |
86 | return &wg
87 | }
--------------------------------------------------------------------------------
/sync/pool/network_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "io/ioutil"
5 | "net"
6 | "testing"
7 | )
8 |
9 | func init() {
10 | daemonStarted1 := startNetworkDaemon1()
11 | daemonStarted1.Wait()
12 |
13 | daemonStarted2 := startNetworkDaemon2()
14 | daemonStarted2.Wait()
15 | }
16 |
17 | func BenchmarkNetworkRequest1(b *testing.B) {
18 | for i := 0; i < b.N; i++ {
19 | conn, err := net.Dial("tcp", "localhost:8080")
20 | if err != nil {
21 | b.Fatalf("cannot dial host: %v", err)
22 | }
23 |
24 | if _, err := ioutil.ReadAll(conn); err != nil {
25 | b.Fatalf("cannot read: %v", err)
26 | }
27 |
28 | conn.Close()
29 | }
30 | }
31 |
32 | func BenchmarkNetworkRequest2(b *testing.B) {
33 | for i := 0; i < b.N; i++ {
34 | conn, err := net.Dial("tcp", "localhost:8081")
35 | if err != nil {
36 | b.Fatalf("cannot dial host: %v", err)
37 | }
38 |
39 | if _, err := ioutil.ReadAll(conn); err != nil {
40 | b.Fatalf("cannot read: %v", err)
41 | }
42 |
43 | conn.Close()
44 | }
45 | }
--------------------------------------------------------------------------------
/sync/rwmutex/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "math"
6 | "os"
7 | "sync"
8 | "text/tabwriter"
9 | "time"
10 | )
11 |
12 | func main() {
13 | // Locker has thow methods, Lock and Unlock
14 | producer := func(wg *sync.WaitGroup, l sync.Locker) {
15 | defer wg.Done()
16 | for i := 5; i > 0; i-- {
17 | l.Lock()
18 | l.Unlock()
19 | time.Sleep(1) // less actice, wait 1 second
20 | }
21 | }
22 |
23 | observer := func(wg *sync.WaitGroup, l sync.Locker) {
24 | defer wg.Done()
25 | l.Lock()
26 | defer l.Unlock()
27 | }
28 |
29 | test := func(count int, mutex, rwMutex sync.Locker) time.Duration {
30 | var wg sync.WaitGroup
31 | wg.Add(count + 1)
32 |
33 | beginTestTime := time.Now()
34 |
35 | go producer(&wg, mutex)
36 |
37 | for i := count; i > 0; i-- {
38 | go observer(&wg, rwMutex)
39 | }
40 |
41 | wg.Wait()
42 |
43 | return time.Since(beginTestTime)
44 | }
45 |
46 | tw := tabwriter.NewWriter(os.Stdout, 0, 1, 2, ' ', 0)
47 | defer tw.Flush()
48 |
49 | var m sync.RWMutex
50 | fmt.Fprintf(tw, "Readers\tRWMutext\tMutex\n")
51 | for i := 0; i < 20; i++ {
52 | count := int(math.Pow(2, float64(i)))
53 | fmt.Fprintf(
54 | tw,
55 | "%d\t%v\t%v\n",
56 | count,
57 | test(count, &m, m.RLocker()),
58 | test(count, &m, &m),
59 | )
60 | }
61 | }
--------------------------------------------------------------------------------