├── .gitignore
├── LICENSE
├── README.md
├── batch.go
├── batch_items.go
├── batch_test.go
├── batch_utils.go
├── consumer.go
├── example
├── go.mod
├── go.sum
└── main.go
├── logger
└── logger.go
├── options.go
├── producer.go
├── semaphore.go
└── supply.go
/.gitignore:
--------------------------------------------------------------------------------
1 | vendor
2 | go-batch
3 | goreportcard-cli
4 | go-batch-go-tmp-umask
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Deeptiman Pattnaik
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # go-batch
2 | go-batch is a batch processing library written in Go. The process execution has multiple stages to release a Batch to the client.
3 |
4 |  [](https://goreportcard.com/report/github.com/Deeptiman/go-batch)
5 |
6 | ## Features
7 |
8 | 1. Client can use this library as an asynchronous batch processing for their application use case.
9 | 2. There are no restrictions on applying batch processing matrices to the library. The client can define the maximum no of items for a batch using the BatchOptions.
10 | 3. The library has a Workerpool that will faster the batch processing in concurrent scenarios.
11 |
12 | ## Demo
13 |
14 | [](https://asciinema.org/a/2vi5gAHjsuTrB3tCBTGeSW6hq)
15 |
16 | ## Stages
17 |
18 | 1. Batch Reader receives the resource payload from the client and marshals the payload item into the BatchItem object.
19 |
20 | ``````````````````````````
21 | type BatchItems struct {
22 | Id int
23 | BatchNo int
24 | Item interface{}
25 | }
26 |
27 | ``````````````````````````
28 | 2. BatchProducer has a Watcher channel that receives the marshal payload from the Batch reader. Watcher marks each BatchItem with a BatchNo and adds it to the []BatchItems array. After the batch itemCounter++ increases to the MaxItems [DefaultMaxItems: 100], the Batch gets
29 | releases to the Consumer callback function.
30 |
31 | 3. BatchConsumer has a ConsumerFunc that gets invoke by BatchProducer as a callback function to send the prepared []BatchItems arrays. Then, the Consumer channel sends the
32 | []BatchItems to the Worker channel.
33 |
34 | 4. Workerline is the sync.WaitGroup synchronizes the workers to send the []BatchItems to the supply chain.
35 |
36 | 5. BatchSupplyChannel works as a bidirectional channel that requests for the []BatchItems to the Workerline and gets in the response.
37 |
38 | 6. ClientSupplyChannel is the delivery channel that works as a Supply line to sends the []BatchItems and the client receives by listening to the channel.
39 |
40 |
41 | ## Go Docs
42 |
43 | Documentation at pkg.go.dev
44 |
45 | ## Installation
46 |
47 | go get github.com/Deeptiman/go-batch
48 |
49 | ## Example
50 |
51 | ````````````````````````````````````````````````````
52 | b := batch.NewBatch(batch.WithMaxItems(100))
53 | go b.StartBatchProcessing()
54 |
55 | for i := 1; i <= 1000; i++ {
56 | b.Item <- &Resources{
57 | id: i,
58 | name: fmt.Sprintf("%s%d", "R-", i),
59 | flag: false,
60 | }
61 | }
62 | b.Close()
63 | ````````````````````````````````````````````````````
64 |
65 | ## Note
66 | - In this version release, the library doesn't support starting concurrent BatchProcessing sessions.
67 |
68 | ## License
69 |
70 | This project is licensed under the MIT License
71 |
--------------------------------------------------------------------------------
/batch.go:
--------------------------------------------------------------------------------
1 | package batch
2 |
3 | import (
4 | log "github.com/Deeptiman/go-batch/logger"
5 | "time"
6 | )
7 |
8 | // Batch struct defines the structure payload for a Batch.
9 | //
10 | // Item: channel that contains the Resources object from the client.
11 | // Id: Each item that a client send for the processing marked with Id.
12 | // Semaphore: The ReadWrite locks handle by the Semaphore object, it helps to synchronize the batch processing session.
13 | // Islocked: Whenever the batch processing session starts, Islocked changes to [true], so it will restrict the concurrent batch processing.
14 | // Producer: The BatchItem object send to the Producer for further processing.
15 | // Consumer: The Consumer arranges the prepared []BatchItems for the Workerline.
16 | // Log: Batch processing library uses "github.com/sirupsen/logrus" as logging tool.
17 | type Batch struct {
18 | Item chan interface{}
19 | Id int
20 | Semaphore *Semaphore
21 | Islocked bool
22 | Producer *BatchProducer
23 | Consumer *BatchConsumer
24 | Log *log.Logger
25 | }
26 |
27 | // NewBatch creates a new Batch object with BatchProducer & BatchConsumer. The BatchOptions
28 | // sets the MaxItems for a batch and maximum wait time for a batch to complete set by MaxWait.
29 | func NewBatch(opts ...BatchOptions) *Batch {
30 |
31 | b := &Batch{
32 | Item: make(chan interface{}),
33 | Log: log.NewLogger(),
34 | }
35 |
36 | c := NewBatchConsumer()
37 |
38 | p := NewBatchProducer(c.ConsumerFunc)
39 |
40 | for _, opt := range opts {
41 | opt(p)
42 | }
43 |
44 | b.Producer = p
45 | b.Consumer = c
46 | b.Semaphore = NewSemaphore(int(p.MaxItems))
47 |
48 | items = make([]BatchItems, 0, p.MaxItems)
49 |
50 | return b
51 | }
52 |
53 | // StartBatchProcessing function to begin the BatchProcessing library and to start the Producer/
54 | // Consumer listeners. The ReadItems goroutine will receive the item from a source that keeps
55 | // listening infinitely.
56 | func (b *Batch) StartBatchProcessing() {
57 |
58 | b.Semaphore.Lock()
59 | defer b.Semaphore.Unlock()
60 |
61 | if b.Islocked {
62 | panic("Concurrent batch processing is not allowed!")
63 | }
64 |
65 | go b.Producer.WatchProducer()
66 | go b.Consumer.StartConsumer()
67 | go b.ReadItems()
68 | }
69 |
70 | // Unlock function will allow the batch processing to start with the multiple iteration
71 | func (b *Batch) Unlock() {
72 | b.Islocked = false
73 | }
74 |
75 | // ReadItems function will run infinitely to listen to the Resource channel and the received
76 | // object marshaled with BatchItem and then send to the Producer Watcher channel for further
77 | // processing.
78 | func (b *Batch) ReadItems() {
79 |
80 | b.Islocked = true
81 |
82 | for {
83 |
84 | select {
85 | case item := <-b.Item:
86 | b.Id++
87 | go func(item interface{}) {
88 | b.Producer.Watcher <- &BatchItems{
89 | Id: b.Id,
90 | Item: item,
91 | }
92 | }(item)
93 | time.Sleep(time.Duration(100) * time.Millisecond)
94 | }
95 | }
96 | }
97 |
98 | // SetLogLevel [Info:Debug]
99 | func (b *Batch) SetDebugLogLevel() {
100 | b.Log.SetLogLevel(log.Debug)
101 | }
102 |
103 | // StopProducer to exit the Producer line.
104 | func (b *Batch) StopProducer() {
105 | b.Producer.Quit <- true
106 | }
107 |
108 | // Stop to run StopProducer/StopConsumer goroutines to quit the execution.
109 | func (b *Batch) Stop() {
110 | go b.StopProducer()
111 | }
112 |
113 | // Close is the exit function to terminate the batch processing.
114 | func (b *Batch) Close() {
115 | //b.Log.WithFields(log.Fields{"Remaining Items": len(items)}).Warn("CheckRemainingItems")
116 | b.Log.Infoln("CheckRemainingItems", "Remaining=", len(items))
117 |
118 | done := make(chan bool)
119 |
120 | go b.Producer.CheckRemainingItems(done)
121 |
122 | select {
123 | case <-done:
124 | b.Log.Warn("Done")
125 | b.Semaphore.Lock()
126 | b.Stop()
127 | close(b.Item)
128 | b.Islocked = false
129 | b.Semaphore.Unlock()
130 | }
131 | }
132 |
--------------------------------------------------------------------------------
/batch_items.go:
--------------------------------------------------------------------------------
1 | package batch
2 |
3 | // BatchItems struct defines the each batch item payload with an Id and relates to an overall BatchNo
4 | type BatchItems struct {
5 | Id int `json:"id"`
6 | BatchNo int `json:"batchNo"`
7 | Item interface{} `json:"item"`
8 | }
9 |
--------------------------------------------------------------------------------
/batch_test.go:
--------------------------------------------------------------------------------
1 | package batch
2 |
3 | import (
4 | "fmt"
5 | //"time"
6 | "testing"
7 | )
8 |
9 | // Resources Structure
10 | type Resources struct {
11 | id int
12 | name string
13 | flag bool
14 | }
15 |
16 | func TestBatch(t *testing.T) {
17 |
18 | t.Run("Batch-1", func(t *testing.T) {
19 | t.Parallel()
20 |
21 | mFlag := 10
22 | rFlag := 15
23 |
24 | b := NewBatch(WithMaxItems(uint64(mFlag)))
25 |
26 | b.StartBatchProcessing()
27 |
28 | for i := 1; i <= rFlag; i++ {
29 | b.Item <- &Resources{
30 | id: i,
31 | name: fmt.Sprintf("%s%d", "R-", i),
32 | flag: false,
33 | }
34 | }
35 | b.Close()
36 | })
37 |
38 | t.Run("Batch-2", func(t *testing.T) {
39 | t.Parallel()
40 |
41 | mFlag := 10
42 | rFlag := 100
43 |
44 | b := NewBatch(WithMaxItems(uint64(mFlag)))
45 |
46 | b.StartBatchProcessing()
47 | for i := 1; i <= rFlag; i++ {
48 | b.Item <- &Resources{
49 | id: i,
50 | name: fmt.Sprintf("%s%d", "R-", i),
51 | flag: false,
52 | }
53 | }
54 |
55 | var panics bool
56 | func() {
57 | defer func() {
58 | if r := recover(); r != nil {
59 | t.Log("Recover--", r)
60 | panics = true
61 | }
62 | }()
63 |
64 | b.StartBatchProcessing()
65 | for i := 1; i <= rFlag; i++ {
66 | b.Item <- &Resources{
67 | id: i,
68 | name: fmt.Sprintf("%s%d", "R-", i),
69 | flag: false,
70 | }
71 | }
72 | }()
73 |
74 | if !panics {
75 | t.Error("Testing Concurrent batch processing should fail")
76 | }
77 |
78 | b.Close()
79 | })
80 | }
81 |
82 | func benchmarkBatch(numResources, maxItems int, b *testing.B) {
83 | for n := 0; n < b.N; n++ {
84 |
85 | b := NewBatch(WithMaxItems(uint64(maxItems)))
86 |
87 | b.StartBatchProcessing()
88 | for i := 1; i <= numResources; i++ {
89 | b.Item <- &Resources{
90 | id: i,
91 | name: fmt.Sprintf("%s%d", "R-", i),
92 | flag: false,
93 | }
94 | }
95 | b.Close()
96 | }
97 | }
98 |
99 | func BenchmarkBatchR100M5(b *testing.B) {
100 | benchmarkBatch(100, 5, b)
101 | }
102 |
103 | func BenchmarkBatchR10M5(b *testing.B) {
104 | benchmarkBatch(10, 5, b)
105 | }
106 |
107 | func BenchmarkBatchR100M10(b *testing.B) {
108 | benchmarkBatch(100, 10, b)
109 | }
110 |
111 | func BenchmarkBatch(b *testing.B) {
112 |
113 | b.ResetTimer()
114 | b.Run("Bench-1", func(b *testing.B) {
115 | benchmarkBatch(100, 5, b)
116 | })
117 |
118 | b.Run("Bench-2", func(b *testing.B) {
119 | benchmarkBatch(500, 50, b)
120 | })
121 |
122 | b.Run("Bench-3", func(b *testing.B) {
123 | benchmarkBatch(3000, 300, b)
124 | })
125 |
126 | b.Run("Bench-4", func(b *testing.B) {
127 | benchmarkBatch(10000, 100, b)
128 | })
129 | }
130 |
--------------------------------------------------------------------------------
/batch_utils.go:
--------------------------------------------------------------------------------
1 | package batch
2 |
3 | import "encoding/json"
4 |
5 | func getJsonString(item interface{}) string {
6 | batchItem, _ := json.Marshal(item)
7 | return string(batchItem)
8 | }
9 |
--------------------------------------------------------------------------------
/consumer.go:
--------------------------------------------------------------------------------
1 | package batch
2 |
3 | import (
4 | "context"
5 | log "github.com/Deeptiman/go-batch/logger"
6 | "os"
7 | "os/signal"
8 | "sync"
9 | "syscall"
10 | )
11 |
12 | var (
13 | DefaultWorkerPool = 10
14 | )
15 |
16 | // BatchConsumer struct defines the Consumer line for the Batch processing. It has the Workerline
17 | // that manages the concurrent scenarios where a large set of []BatchItems needs to be send to client.
18 | //
19 | // ConsumerCh: It receives the []BatchItems from the Producer line.
20 | // BatchWorkerCh: It has set of workers that manages the concurrent work under Workerline [sync.WaitGroup].
21 | // Supply: The final chain in the batch processing that sends the []BatchItems to the client.
22 | // Workerline: It's WaitGroup that synchronizes the workers to send the []BatchItems to the supply chain.
23 | // TerminateCh: To handle the graceful shutdown, this channel will listen to the os.Signal and terminate processing accordingly.
24 | // Quit: It's the exit channel for the Consumer to end the processing
25 | // Log: Batch processing library uses "github.com/sirupsen/logrus" as logging tool.
26 | type BatchConsumer struct {
27 | ConsumerCh chan []BatchItems
28 | BatchWorkerCh chan []BatchItems
29 | Supply *BatchSupply
30 | Workerline *sync.WaitGroup
31 | TerminateCh chan os.Signal
32 | Quit chan bool
33 | Log *log.Logger
34 | }
35 |
36 | // BatchSupply structure defines the supply line for the final delivery of []BatchItems to the client
37 | //
38 | // BatchSupplyCh: It's the bidirectional channel that request for the []BatchItems to the Workerline and gets in the response.
39 | // ClientSupplyCh: It's delivery channel that works as a Supply line to sends the []BatchItems and the client receives by listening to the channel.
40 | type BatchSupply struct {
41 | BatchSupplyCh chan chan []BatchItems
42 | ClientSupplyCh chan []BatchItems
43 | }
44 |
45 | // NewBatchConsumer defines several types of production channels those are works at a different
46 | // stages to release a Batch to the client. The ConsumerCh received the Batch and send it to the
47 | // Workers channel. Then, the Workerline arranges the worker under a waitGroup to release the Batch
48 | // to the Supply channel.
49 | //
50 | //
51 | // The BatchSupply has a bidirectional channel that requests a Batch from
52 | // the Worker channel and receives a Batch via response channel. Also, BatchSupply has a Client
53 | // channel that sends the released Batch to the Client. The client needs to listen to the ClientSupplyCh
54 | // to receive batch instantly.
55 | func NewBatchConsumer() *BatchConsumer {
56 |
57 | return &BatchConsumer{
58 | ConsumerCh: make(chan []BatchItems, 1),
59 | BatchWorkerCh: make(chan []BatchItems, DefaultWorkerPool),
60 | Supply: NewBatchSupply(),
61 | Workerline: &sync.WaitGroup{},
62 | TerminateCh: make(chan os.Signal, 1),
63 | Quit: make(chan bool, 1),
64 | Log: log.NewLogger(),
65 | }
66 | }
67 |
68 | // NewBatchSupply will create the BatchSupply object that has two sets of supply channels. The
69 | // BatchSupplyCh will work as a bidirectional channel to request for a []BatchItems from the
70 | // Workerline and gets the batch items from the response channel. The ClientSupplyCh will send
71 | // received the []BatchItems from the BatchSupplyCh to the client.
72 | func NewBatchSupply() *BatchSupply {
73 | return &BatchSupply{
74 | BatchSupplyCh: make(chan chan []BatchItems, 100),
75 | ClientSupplyCh: make(chan []BatchItems, 1),
76 | }
77 | }
78 |
79 | // StartConsumer will create the Wokerpool [DefaultWorkerPool: 10] to handle the large set of
80 | // []BatchItems that gets created fequently in highly concurrent scenarios. Also, starts the
81 | // ConsumerCh channel listener to the incoming []BatchItems from the Producer line.
82 | //
83 | // signal.Notify(c.TerminateCh, syscall.SIGINT, syscall.SIGTERM)
84 | // <-c.TerminateCh
85 | //
86 | // To handle the graceful shutdown, the BatchConsumer supports os.Signal. So, the TerminateCh
87 | // works as a terminate channel in case of certain os.Signal received [syscall.SIGINT, syscall.SIGTERM].
88 | // This logic will help the Workerline to complete the remaining work before going for a shutdown.
89 | func (c *BatchConsumer) StartConsumer() {
90 |
91 | ctx, cancel := context.WithCancel(context.Background())
92 |
93 | go c.ConsumerBatch(ctx)
94 |
95 | c.Workerline.Add(DefaultWorkerPool)
96 | for i := 0; i < DefaultWorkerPool; i++ {
97 | go c.WorkerFunc(i)
98 | }
99 |
100 | signal.Notify(c.TerminateCh, syscall.SIGINT, syscall.SIGTERM)
101 | <-c.TerminateCh
102 | cancel()
103 | os.Exit(0)
104 | c.Workerline.Wait()
105 | }
106 |
107 | // ConsumerFunc works as a callback function for the Producer line to send the released []BatchItems
108 | // to the Consumer and then the batch items send to the ConsumerCh channel for further processing.
109 | func (c *BatchConsumer) ConsumerFunc(items []BatchItems) {
110 | c.ConsumerCh <- items
111 | }
112 |
113 | // ConsumerBatch has the <-c.ConsumerCh receive channel to receives the newly created []BatchItems.
114 | // After that, the []BatchItems gets send to the WorkerCh to send the batch item to the supply line.
115 | //
116 | // This also supports the termination of the Consumer line in case of graceful shutdown or to exit
117 | // the batch processing forcefully.
118 | //
119 | // <-ctx.Done(): get called during a graceful shutdown scenarios and closes the worker channel
120 | // <-c.Quit: Exit the batch processing during a forceful request from the client.
121 | func (c *BatchConsumer) ConsumerBatch(ctx context.Context) {
122 |
123 | for {
124 | select {
125 | case batchItems := <-c.ConsumerCh:
126 | c.Log.Infoln("BatchConsumer", "Receive Batch Items:", len(batchItems))
127 |
128 | c.BatchWorkerCh <- batchItems
129 | case <-ctx.Done():
130 | c.Log.Warn("Request cancel signal received!")
131 | close(c.BatchWorkerCh)
132 | return
133 | case <-c.Quit:
134 | c.Log.Warn("Quit BatchConsumer")
135 | close(c.BatchWorkerCh)
136 | return
137 | }
138 | }
139 | }
140 |
141 | // WorkerFunc is the final production of []BatchItems. Each WorkerChannel sends their released
142 | // []BatchItems to the SupplyChannel.
143 | func (c *BatchConsumer) WorkerFunc(index int) {
144 | defer c.Workerline.Done()
145 |
146 | for batch := range c.BatchWorkerCh {
147 |
148 | c.Log.Debugln("Workerline", "Worker=", index, "Batch=", len(batch))
149 |
150 | go c.GetBatchSupply()
151 |
152 | select {
153 | case supplyCh := <-c.Supply.BatchSupplyCh:
154 | supplyCh <- batch
155 | }
156 | }
157 | }
158 |
159 | func (c *BatchConsumer) Shutdown() {
160 |
161 | c.Log.Warn("Shutdown signal received!")
162 | signal.Notify(c.TerminateCh, syscall.SIGINT, syscall.SIGTERM)
163 | <-c.TerminateCh
164 | }
165 |
--------------------------------------------------------------------------------
/example/go.mod:
--------------------------------------------------------------------------------
1 | module go-batch/example
2 |
3 | go 1.13
4 |
5 | require (
6 | github.com/Deeptiman/go-batch v0.0.0-20210418211829-87cf3272ea64
7 | github.com/sirupsen/logrus v1.8.1
8 | )
9 |
--------------------------------------------------------------------------------
/example/go.sum:
--------------------------------------------------------------------------------
1 | github.com/Deeptiman/go-batch v0.0.0-20210418211829-87cf3272ea64 h1:aQJfxdsXOay/cL+ZpIrgaBrBr0cVM8XFYxlPqgqpgJE=
2 | github.com/Deeptiman/go-batch v0.0.0-20210418211829-87cf3272ea64/go.mod h1:ign/MnzCAlatt1t3OV4ei6bS7b9AK6j2KfViPjT369E=
3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
4 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
5 | github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
6 | github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
7 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
8 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
9 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
10 |
--------------------------------------------------------------------------------
/example/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | log "github.com/sirupsen/logrus"
7 | batch "github.com/Deeptiman/go-batch"
8 | )
9 |
10 | // Resources Structure
11 | type Resources struct {
12 | id int
13 | name string
14 | flag bool
15 | }
16 |
17 | func main() {
18 |
19 | var rFlag, mFlag int
20 | flag.IntVar(&rFlag, "r", 10, "No of resources")
21 | flag.IntVar(&mFlag, "m", 10, "Maximum items")
22 | flag.Parse()
23 |
24 | logs := log.New()
25 |
26 | logs.Infoln("Batch Processing Example !")
27 |
28 | b := batch.NewBatch(batch.WithMaxItems(uint64(mFlag)))
29 |
30 | b.StartBatchProcessing()
31 |
32 | // go func() {
33 |
34 | // select {
35 | // case <-time.After(time.Duration(2) * time.Second):
36 | // fmt.Println("Run Batch processing again!")
37 | // b.StartBatchProcessing()
38 | // }
39 |
40 | // }()
41 |
42 | go func() {
43 |
44 | // Infinite loop to listen to the Consumer Client Supply Channel that releases
45 | // the []BatchItems for each iteration.
46 | for {
47 | for bt := range b.Consumer.Supply.ClientSupplyCh {
48 | logs.WithFields(log.Fields{"Batch": bt}).Warn("Client")
49 | }
50 | }
51 | }()
52 |
53 | for i := 1; i <= rFlag; i++ {
54 | b.Item <- &Resources{
55 | id: i,
56 | name: fmt.Sprintf("%s%d", "R-", i),
57 | flag: false,
58 | }
59 | }
60 | b.Close()
61 | }
--------------------------------------------------------------------------------
/logger/logger.go:
--------------------------------------------------------------------------------
1 | package logger
2 |
3 | import (
4 | "github.com/sirupsen/logrus"
5 | )
6 |
7 | type LogLevel int
8 |
9 | const (
10 | Info LogLevel = iota
11 | Debug
12 | )
13 |
14 | type Logger struct {
15 | log *logrus.Logger
16 | }
17 |
18 | func NewLogger() *Logger {
19 | log := logrus.New()
20 |
21 | log.SetFormatter(&logrus.TextFormatter{
22 | DisableColors: false,
23 | ForceColors: true,
24 | DisableTimestamp: true,
25 | TimestampFormat: "2006-01-02 15:04:05",
26 | FullTimestamp: true,
27 | })
28 |
29 | return &Logger{
30 | log: log,
31 | }
32 | }
33 |
34 | func (l *Logger) SetLogLevel(level LogLevel) {
35 | if level == Debug {
36 | l.log.Level = logrus.DebugLevel
37 | }
38 | }
39 |
40 | func (l *Logger) Trace(format string, args ...interface{}) {
41 |
42 | }
43 |
44 | func (l *Logger) Debug(args ...interface{}) {
45 | l.log.Debug(args...)
46 | }
47 |
48 | func (l *Logger) Debugf(format string, args ...interface{}) {
49 | l.log.Debugf(format, args...)
50 | }
51 |
52 | func (l *Logger) Debugln(args ...interface{}) {
53 | l.log.Debugln(args...)
54 | }
55 |
56 | func (l *Logger) Info(args ...interface{}) {
57 | l.log.Info(args...)
58 | }
59 |
60 | func (l *Logger) Infof(format string, args ...interface{}) {
61 | l.log.Infof(format, args...)
62 | }
63 |
64 | func (l *Logger) Infoln(args ...interface{}) {
65 | l.log.Infoln(args...)
66 | }
67 |
68 | func (l *Logger) Warn(format string, args ...interface{}) {
69 | l.log.Warn(args...)
70 | }
71 |
72 | func (l *Logger) Warnf(format string, args ...interface{}) {
73 | l.log.Warnf(format, args...)
74 | }
75 |
76 | func (l *Logger) Warnln(format string, args ...interface{}) {
77 | l.log.Warnln(args...)
78 | }
79 |
80 | func (l *Logger) Fatal(format string, args ...interface{}) {
81 | l.log.Fatal(args...)
82 | }
83 |
84 | func (l *Logger) Fatalf(format string, args ...interface{}) {
85 | l.log.Fatalf(format, args...)
86 | }
87 |
88 | func (l *Logger) Fatalln(format string, args ...interface{}) {
89 | l.log.Fatalln(args...)
90 | }
91 |
92 | func (l *Logger) Error(format string, args ...interface{}) {
93 | l.log.Error(args...)
94 | }
95 |
96 | func (l *Logger) Errorf(format string, args ...interface{}) {
97 | l.log.Errorf(format, args...)
98 | }
99 |
100 | func (l *Logger) Errorln(format string, args ...interface{}) {
101 | l.log.Errorln(args...)
102 | }
103 |
104 | func (l *Logger) WithField(key string, value interface{}) {
105 | l.log.WithField(key, value)
106 | }
107 |
108 | func (l *Logger) WithFields(fields logrus.Fields) {
109 | l.log.WithFields(fields)
110 | }
111 |
--------------------------------------------------------------------------------
/options.go:
--------------------------------------------------------------------------------
1 | package batch
2 |
3 | import "time"
4 |
5 | type BatchOptions func(b *BatchProducer)
6 |
7 | func WithMaxItems(maxItems uint64) BatchOptions {
8 | return func(b *BatchProducer) {
9 | b.MaxItems = maxItems
10 | }
11 | }
12 |
13 | func WithMaxWait(maxWait time.Duration) BatchOptions {
14 | return func(b *BatchProducer) {
15 | b.MaxWait = maxWait
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/producer.go:
--------------------------------------------------------------------------------
1 | package batch
2 |
3 | import (
4 | "sync/atomic"
5 | "time"
6 | log "github.com/Deeptiman/go-batch/logger"
7 | )
8 |
9 | var (
10 | DefaultMaxItems = uint64(100) // maximum no of items packed inside a Batch
11 | DefaultMaxWait = time.Duration(30) * time.Second //seconds
12 | DefaultBatchNo = int32(1)
13 |
14 | items []BatchItems
15 | )
16 |
17 | // ConsumerFunc is the callback function that invoke from Consumer
18 | type ConsumerFunc func(items []BatchItems)
19 |
20 | // BatchProducer struct defines the Producers fields that requires to create a []BatchItems object.
21 | //
22 | // Watcher: The receiver channel that gets the BatchItems marshalled object from Batch reader.
23 | // MaxItems: Maximum no of BatchItems can be packed for a released Batch.
24 | // BatchNo: Every []BatchItems that gets released marked with BatchNo [integer].
25 | // MaxWait: If a batch processing takes too long, then MaxWait has the timeout that expires after an interval.
26 | // ConsumerFunc: It's the callback function that gets invoke by the Consumer
27 | // Quit: It's the exit channel for the Producer to end the processing
28 | // Log: Batch processing library uses "github.com/sirupsen/logrus" as logging tool.
29 | type BatchProducer struct {
30 | Watcher chan *BatchItems
31 | MaxItems uint64
32 | BatchNo int32
33 | MaxWait time.Duration
34 | ConsumerFunc ConsumerFunc
35 | Quit chan bool
36 | Log *log.Logger
37 | }
38 |
39 | // NewBatchProducer defines the producer line for creating a Batch. There will be a Watcher
40 | // channel that receives the incoming BatchItem from the source. The ConsumerFunc works as a
41 | // callback function to the Consumer line to release the newly created set of BatchItems.
42 | //
43 | //
44 | // Each Batch is registered with a BatchNo that gets created when the Batch itemCounter++ increases
45 | // to the MaxItems value.
46 | func NewBatchProducer(callBackFn ConsumerFunc, opts ...BatchOptions) *BatchProducer {
47 |
48 | return &BatchProducer{
49 | Watcher: make(chan *BatchItems),
50 | ConsumerFunc: callBackFn,
51 | MaxItems: DefaultMaxItems,
52 | MaxWait: DefaultMaxWait,
53 | BatchNo: DefaultBatchNo,
54 | Quit: make(chan bool),
55 | Log: log.NewLogger(),
56 | }
57 | }
58 |
59 | // WatchProducer has the Watcher channel that receives the BatchItem object from the Batch read
60 | // item channel. Watcher marks each BatchItem with a BatchNo and adds it to the []BatchItems array.
61 | // After the batch itemCounter++ increases to the MaxItems [DefaultMaxItems: 100], the Batch gets
62 | // releases to the Consumer callback function.
63 | //
64 | // If the Batch processing get to halt in the Watcher
65 | // channel then the MaxWait [DefaultMaxWait: 30 sec] timer channel gets called to check the state
66 | // to releases the Batch to the Consumer callback function.
67 | func (p *BatchProducer) WatchProducer() {
68 |
69 | for {
70 |
71 | select {
72 | case item := <-p.Watcher:
73 |
74 | item.BatchNo = int(p.getBatchNo())
75 | p.Log.Debugln("BatchProducer", "Id=", item.Id, "Batch Break=", item.Id / int(p.MaxItems), "BatchNo=",item.BatchNo, "Item=", item.Item)
76 |
77 | items = append(items, *item)
78 |
79 | if (item.Id / int(p.MaxItems)) == item.BatchNo {
80 | p.Log.Infoln("BatchReady", "BatchNo=", item.BatchNo)
81 | items = p.releaseBatch(items)
82 | p.createBatchNo()
83 | }
84 |
85 | case <-time.After(p.MaxWait):
86 | p.Log.Infoln("MaxWait", "Items=", len(items))
87 | if len(items) == 0 {
88 | continue
89 | }
90 |
91 | items = p.releaseBatch(items)
92 | case <-p.Quit:
93 | p.Log.Warn("Quit BatchProducer")
94 |
95 | return
96 | }
97 | }
98 | }
99 |
100 | // releaseBatch will call the Consumer callback function to send the prepared []BatchItems to
101 | // the Consumer line. Also it reset the []BatchItems array (items = items[:0]) to begin the
102 | // next set of batch processing.
103 | func (p *BatchProducer) releaseBatch(items []BatchItems) []BatchItems {
104 |
105 | p.ConsumerFunc(items)
106 | return p.resetItem(items)
107 | }
108 |
109 | // resetItem to slice the []BatchItems to empty.
110 | func (p *BatchProducer) resetItem(items []BatchItems) []BatchItems {
111 | items = items[:0]
112 | return items
113 | }
114 |
115 | // CheckRemainingItems is a force re-check function on remaining batch items that are available
116 | // for processing.
117 | func (p *BatchProducer) CheckRemainingItems(done chan bool) {
118 |
119 | if len(items) >= 1 {
120 | p.releaseBatch(items)
121 | time.Sleep(time.Duration(100) * time.Millisecond)
122 | }
123 |
124 | done <- true
125 | }
126 |
127 | // addBatchNo will increases the current BatchNo to 1 atomically.
128 | func (p *BatchProducer) createBatchNo() {
129 | atomic.AddInt32(&p.BatchNo, 1)
130 | }
131 |
132 | // getBatchNo will get the current BatchNo from the atomic variable.
133 | func (p *BatchProducer) getBatchNo() int32 {
134 | return atomic.LoadInt32(&p.BatchNo)
135 | }
136 |
--------------------------------------------------------------------------------
/semaphore.go:
--------------------------------------------------------------------------------
1 | package batch
2 |
3 | type empty struct{}
4 |
5 | type Semaphore struct {
6 | blockch chan empty
7 | waiter int
8 | }
9 |
10 | func NewSemaphore(n int) *Semaphore {
11 | return &Semaphore{
12 | blockch: make(chan empty, n),
13 | waiter: n,
14 | }
15 | }
16 |
17 | func (s *Semaphore) Acquire(n int) {
18 |
19 | var e empty
20 | for i := 0; i < n; i++ {
21 | s.blockch <- e
22 | }
23 | }
24 |
25 | func (s *Semaphore) Release(n int) {
26 |
27 | for i := 0; i < n; i++ {
28 | <-s.blockch
29 | }
30 | }
31 |
32 | func (s *Semaphore) Lock() {
33 |
34 | s.Acquire(s.waiter)
35 | }
36 |
37 | func (s *Semaphore) Unlock() {
38 |
39 | s.Release(s.waiter)
40 | }
41 |
42 | func (s *Semaphore) RLock() {
43 |
44 | s.Acquire(1)
45 | }
46 |
47 | func (s *Semaphore) RUnlock() {
48 |
49 | s.Release(1)
50 | }
51 |
--------------------------------------------------------------------------------
/supply.go:
--------------------------------------------------------------------------------
1 | package batch
2 |
3 | // GetBatchSupply request the WorkerChannel for the released []BatchItems. The BatchSupplyChannel
4 | // works as a bidirectional channel to request/response for the final []BatchItems product.
5 | // The ClientSupplyChannel will send the []BatchItems to the client.
6 | func (c *BatchConsumer) GetBatchSupply() {
7 |
8 | supplyCh := make(chan []BatchItems)
9 |
10 | defer close(supplyCh)
11 |
12 | c.Supply.BatchSupplyCh <- supplyCh
13 |
14 | select {
15 | case supply := <-supplyCh:
16 | c.Log.Debugln("BatchSupply", "Supply=", len(supply))
17 |
18 | c.Supply.ClientSupplyCh <- supply
19 | }
20 | }
21 |
--------------------------------------------------------------------------------