├── .gitignore
├── go.mod
├── .editorconfig
├── pool.go
├── pool_test.go
├── .github
└── workflows
│ └── go.yml
├── LICENSE
├── examples
└── webapp.go
├── balancer.go
├── README.md
├── worker.go
├── types.go
└── balancer_test.go
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | coverage.txt
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/susamn/rio
2 |
3 | go 1.12
4 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | [*.go]
4 | indent_style = tab
5 | indent_size = 4
6 | insert_final_newline = true
7 |
8 | [*.{yml,yaml}]
9 | indent_style = space
10 | indent_size = 2
11 | insert_final_newline = true
12 | trim_trailing_whitespace = true
--------------------------------------------------------------------------------
/pool.go:
--------------------------------------------------------------------------------
1 | package rio
2 |
3 | // The pool is a list of workers. The pool is also a priority queue.
4 | type Pool []*Worker
5 |
6 | func (p Pool) Len() int {
7 | return len(p)
8 | }
9 |
10 | func (p Pool) Less(i, j int) bool {
11 | return p[i].pending < p[j].pending
12 | }
13 |
14 | func (p *Pool) Swap(i, j int) {
15 | (*p)[i], (*p)[j] = (*p)[j], (*p)[i]
16 | }
17 |
18 | func (p *Pool) Push(x interface{}) {
19 | //n := len(*p)
20 | item := x.(*Worker)
21 | //item.index = n
22 | *p = append(*p, item)
23 | }
24 |
25 | func (p *Pool) Pop() interface{} {
26 | old := *p
27 | n := len(old)
28 | item := old[n-1]
29 | //item.index = 0 // for safety
30 | *p = old[0 : n-1]
31 | return item
32 | }
33 |
--------------------------------------------------------------------------------
/pool_test.go:
--------------------------------------------------------------------------------
1 | package rio
2 |
3 | import (
4 | "container/heap"
5 | "fmt"
6 | "testing"
7 | )
8 |
9 | func TestPool(t *testing.T) {
10 | // Some items and their priorities.
11 | requests := []*Request{
12 | {}, {}, {}, {}, {},
13 | }
14 |
15 | // Create a priority queue, put the items in it, and
16 | // establish the priority queue (heap) invariants.
17 | poo := make(Pool, len(requests))
18 | i := 0
19 | for p, _ := range requests {
20 | w := &Worker{
21 | pending: p,
22 | index: i,
23 | }
24 | poo[i] = w
25 | i++
26 | }
27 | for _, v := range poo {
28 | fmt.Println(v.pending)
29 | }
30 |
31 | for i, _ := range poo {
32 | if i == 2 {
33 | poo[i].pending = 5
34 | }
35 |
36 | heap.Fix(&poo, i)
37 | }
38 | item := heap.Pop(&poo).(*Worker)
39 |
40 | if item.pending != 0 {
41 | t.Fail()
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/.github/workflows/go.yml:
--------------------------------------------------------------------------------
1 | name: Go
2 |
3 | on:
4 | push:
5 | branches: [ master ]
6 | tags:
7 | - '*'
8 | pull_request:
9 | branches: [ master ]
10 |
11 | jobs:
12 |
13 | build:
14 | name: Build
15 | runs-on: ubuntu-latest
16 | steps:
17 |
18 | - name: Set up Go 1.13
19 | uses: actions/setup-go@v1
20 | with:
21 | go-version: 1.13
22 | id: go
23 |
24 | - name: Check out code into the Go module directory
25 | uses: actions/checkout@v2
26 |
27 | - name: Get dependencies
28 | run: |
29 | go get -v -t -d ./...
30 | if [ -f Gopkg.toml ]; then
31 | curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
32 | dep ensure
33 | fi
34 |
35 | - name: Build
36 | run: go build -v .
37 |
38 | - name: Test & Coverage Report
39 | run: go test -race -coverprofile=coverage.txt -covermode=atomic
40 |
41 | - name: Codecov
42 | uses: codecov/codecov-action@v1.0.6
43 |
44 |
45 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Supratim Samanta
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/examples/webapp.go:
--------------------------------------------------------------------------------
1 | package examples
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/susamn/rio"
7 | "net/http"
8 | "time"
9 | )
10 |
11 | func main() {
12 | http.HandleFunc("/graphql", SampleHandler)
13 | http.ListenAndServe(":7070", nil)
14 | }
15 |
16 | func backEndCall1(id string) (name string) {
17 | time.Sleep(time.Duration(10) * time.Second)
18 | return "RIO"
19 | }
20 |
21 | func backEndCall2(name, locationId string) (streetAddress string) {
22 | time.Sleep(time.Duration(10) * time.Second)
23 | return "Route 66"
24 | }
25 |
26 | func GetNameById(id string) rio.Callback {
27 | return func(bconn *rio.BridgeConnection) *rio.FutureTaskResponse {
28 | response := backEndCall1(id)
29 | return &rio.FutureTaskResponse{
30 | Data: response,
31 | ResponseCode: 200,
32 | }
33 |
34 | }
35 | }
36 |
37 | func GetStreetAddressByNameAndLocationId(name, locationId string) rio.Callback {
38 | return func(bconn *rio.BridgeConnection) *rio.FutureTaskResponse {
39 | var innerName string
40 |
41 | if bconn != nil {
42 | innerName = bconn.Data[0].(string)
43 | } else {
44 | innerName = name
45 | }
46 |
47 | if innerName != "" && locationId != "" {
48 | response := backEndCall2(innerName, locationId)
49 | return &rio.FutureTaskResponse{
50 | Data: response,
51 | ResponseCode: 200,
52 | }
53 | } else {
54 | return rio.EMPTY_CALLBACK_RESPONSE
55 | }
56 |
57 | }
58 | }
59 |
60 | // Bridges
61 | func Call1ToCall2(response interface{}) *rio.BridgeConnection {
62 | bridge := make([]interface{}, 1)
63 | typedResponse := response.(string)
64 | bridge[0] = typedResponse
65 | return &rio.BridgeConnection{
66 | Data: bridge,
67 | Error: nil,
68 | }
69 | }
70 |
71 | func SampleHandler(w http.ResponseWriter, r *http.Request) {
72 | // Create the load balancer, this should be created only once.
73 | balancer := rio.GetBalancer(10, 2) // 10 threads
74 |
75 | // Setup the callbacks
76 | callback1 := GetNameById("Some Name")
77 | callback2 := GetStreetAddressByNameAndLocationId(rio.EMPTY_ARG_PLACEHOLDER, "Some Location ID")
78 |
79 | // Set up the pipeline
80 | request := rio.BuildRequests(context.Background(),
81 | rio.NewFutureTask(callback1).WithMilliSecondTimeout(10).WithRetry(3), 2).
82 | FollowedBy(Call1ToCall2, rio.NewFutureTask(callback2).WithMilliSecondTimeout(20))
83 |
84 | // Post job
85 | balancer.PostJob(request)
86 |
87 | // Wait for response
88 | <-request.CompletedChannel
89 |
90 | // Responses
91 | response1, err := request.GetResponse(0)
92 | if err == nil {
93 | // Do something with the response
94 | fmt.Println(response1)
95 | }
96 | response2, err := request.GetResponse(1)
97 | if err == nil {
98 | // Do something with the response
99 | fmt.Println(response2)
100 | }
101 |
102 | }
103 |
--------------------------------------------------------------------------------
/balancer.go:
--------------------------------------------------------------------------------
1 | package rio
2 |
3 | import (
4 | "container/heap"
5 | "fmt"
6 | "log"
7 | "time"
8 | )
9 |
10 | // The balancer struct, this struct is used inside the GetBalancer method to provide a load balancer to the caller
11 | type Balancer struct {
12 |
13 | // Its the pool of Worker, which is itself a priority queue based on min heap.
14 | pool Pool
15 |
16 | // This channel is used to receive a request instance form the caller. After getting the request it is dispatched
17 | // to the most lightly loaded worker
18 | jobChannel chan *Request
19 |
20 | // This channel is used by the worker. After processing a task, a worker uses this channel to let the balancer know
21 | // that it is done and able to take new requests from its request channel
22 | done chan *Worker
23 |
24 | // Its the number of queued requests
25 | queuedItems int
26 |
27 | // The close channel. When the Close method is called by any calling goroutine sending a chanel of boolean, the
28 | // balancer waits for all the requests to be processed, then closes all the worker, closes all its owen loops and
29 | // then finally respond by sending boolean true to the passed channel by the caller, confirming that all the inner
30 | // loop are closed and the balancer is shutdown.
31 | closeChannel chan chan bool
32 | }
33 |
34 | // Use this method to create an instance of the balancer/load balancer. This method must be created only one, per
35 | // the go runtime as it is very much resource intensive.
36 | func GetBalancer(workerCount, taskPerWorker int) *Balancer {
37 | b := &Balancer{
38 | done: make(chan *Worker),
39 | jobChannel: make(chan *Request),
40 | closeChannel: make(chan chan bool),
41 | }
42 | p := make([]*Worker, workerCount)
43 | for i := 0; i < workerCount; i++ {
44 | w := &Worker{
45 | requests: make(chan *Request, taskPerWorker),
46 | pending: 0,
47 | index: i,
48 | Name: fmt.Sprintf("Worker-%d", i),
49 | done: b.done,
50 | closeChannel: make(chan chan bool),
51 | }
52 | p[i] = w
53 | w.Run()
54 | }
55 | b.pool = p
56 | b.balance()
57 | return b
58 | }
59 |
60 | // Use this method from the caller side to queue a new job/request. It will be validated and if found proper, will be
61 | // passed to the worker to be processed. This method returns immediately
62 | func (b *Balancer) PostJob(job *Request) error {
63 | err := job.Validate()
64 | if err == nil {
65 | b.jobChannel <- job
66 | return nil
67 | }
68 | return err
69 | }
70 |
71 | // Use this method to close/shutdown a balancer. When this is called, balancer waits for all the requests to be
72 | // processed, then closes all the worker, closes all its owen loops and then finally respond by sending boolean true
73 | // to the passed channel by the caller, confirming that all the inner loop are closed and the balancer is shutdown.
74 | // Once shutdown, sending a request to it will raise a panic
75 | func (b *Balancer) Close(cb chan bool) {
76 | b.closeChannel <- cb
77 | }
78 |
79 | // Unexported method. Only used by the balancer to managed the posted requests.
80 | func (b *Balancer) balance() {
81 | go func() {
82 | for {
83 | select {
84 | case req := <-b.jobChannel:
85 | b.dispatch(req)
86 | b.queuedItems++
87 | case w := <-b.done:
88 | b.completed(w)
89 | b.queuedItems--
90 | case cb := <-b.closeChannel:
91 | if b.queuedItems > 0 {
92 | time.AfterFunc(1*time.Second, func() { b.closeChannel <- cb })
93 | } else {
94 | for _, w := range b.pool {
95 | c := make(chan bool)
96 | w.Close(c)
97 | <-c
98 | fmt.Println("")
99 | }
100 | cb <- true
101 | log.Println("Closing balancer")
102 | return
103 | }
104 | }
105 | }
106 | }()
107 |
108 | }
109 |
110 | // Balancer uses this method to send a validated request to the most lightly loaded worker
111 | func (b *Balancer) dispatch(req *Request) {
112 | w := heap.Pop(&b.pool).(*Worker)
113 | log.Println(fmt.Sprintf("Dispatching request to [%s]", w.Name))
114 | w.DoWork(req)
115 | w.pending++
116 | heap.Push(&b.pool, w)
117 | }
118 |
119 | // Worker when completes a task return to the balancer and its pending count is decreased by 1
120 | func (b *Balancer) completed(w *Worker) {
121 | w.pending--
122 | worker := heap.Remove(&b.pool, w.index)
123 | heap.Push(&b.pool, worker)
124 | }
125 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Table of Contents
2 |
3 | 1. [Introduction](#org8f950e8)
4 | 1. [What is RIO?](#org64d7944)
5 | 2. [Concern](#org23a1e51)
6 | 1. [An asynchronous job processor](#orgf7dc6c9)
7 | 2. [Easy management of these goroutines and chaining them](#orgc2a657c)
8 |
9 |
10 |
11 |
12 | # Introduction
13 |
14 |
15 |   [](https://codecov.io/gh/susamn/rio)  [](https://pkg.go.dev/github.com/susamn/rio?tab=doc)
16 |
17 | [](https://goreportcard.com/report/github.com/susamn/rio) [](https://www.code-inspector.com/project/5768/status/svg) [](https://www.code-inspector.com/project/5768/score/svg) [](https://app.codacy.com/manual/susamn/rio?utm_source=github.com&utm_medium=referral&utm_content=susamn/rio&utm_campaign=Badge_Grade_Dashboard)
18 |
19 |
20 |
21 | ## What is RIO?
22 |
23 | Rio is a lightweight job scheduler and job chaining library. Its mainly build for Golang web apps, but it can be very
24 | easily mold to serve any application needing job scheduling. The library is an asynchronous job processor, which makes
25 | all the backend calls asynchronously with retry, timeout and context cancellation functionality. It also provides very
26 | easy semantics to join multiple datasources based on their output and input types, at the same time having no coupling
27 | between the datasources. This helps in creating new apis or resolvers for GraphQL apis a breeze.
28 |
29 |
30 |
31 |
32 | ## Concern
33 |
34 | Many times we write web apps which connects to different data sources, combines the data obtained from these sources and
35 | then do some more jobs. During these process, we do a lot of boilerplate to transform one data type to other. Also in the
36 | absense of a proper job scheduler, we create goroutines abruptly and without proper management. These create unmanagable
37 | code. To update those code is even more hard in future, when there is a new team member in the team.
38 |
39 | Rio tries to solve this problem by introducing two concepts.
40 |
41 |
42 |
43 |
44 | ### An asynchronous job processor
45 |
46 | This is the piece which runs the multiple jobs asynchronously (Based on the Rob Pike video: Google I/O 2010). It has a
47 | priority queue(`balancer.go` and `pool.go`) which hands off incoming requests to a set of managed workers. The balancer
48 | hands off new job to the lightly loaded worker.
49 |
50 |
51 |
52 |
53 | ### Easy management of these goroutines and chaining them
54 |
55 | How many times do we do this:
56 |
57 | call service 1 in goroutine 1
58 | wait and get response from goroutine 1
59 | call service 2 in goroutine 2, taking piece of data from service call 1
60 | wait and get response from goroutine 2
61 | call service 3 in goroutine 3, taking piece of data from service call 3
62 | wait and get response from goroutine 3
63 |
64 | You get the idea, this only delays thing more and does a lot of context switching. Rio helps in this, by chaining multiple
65 | calls together by means of using closures and function types and runs in one goroutine.
66 |
67 | Now many can think is it not going to be slower compared to doing multiple goroutine calls. I think not, it will be faster.
68 | Think of the previous example. If you do not get response from service 1, can you invoke service 2, or if service 2 fails,
69 | can you call service 3? No right, as there is data dependency between these calls.
70 |
71 | Rio chains dependent jobs together by introducing this pattern.
72 |
73 | request := context,
74 | (.WithTimeOut(100 ms).WithRetry(3))
75 | .FollowedBy(,
76 | )
77 | .FollowedBy(,
78 | )
79 |
80 | In the example in `examples/web.go` the chaining pattern looks like this:
81 |
82 | request := rio.BuildRequests(context.Background(),
83 | rio.NewFutureTask(callback1).WithMilliSecondTimeout(10).WithRetry(3), 2).
84 | FollowedBy(Call1ToCall2, rio.NewFutureTask(callback2).WithMilliSecondTimeout(20))
85 |
86 | Once the chaining is done, post the job to load balancer
87 |
88 | balancer.PostJob(request)
89 | <-request.CompletedChannel
90 |
91 | Once the call chain happens, the request comes back with responses for all these calls in a slice and you can do this
92 |
93 | 1. Only one job response
94 |
95 | request.GetOnlyResponse()
96 |
97 | or
98 |
99 | 2. Multiple job response
100 |
101 | request.GetResponse(index) ---0,1,2
102 |
103 | If any job fails, the response will be empty response, specifically `rio.EMPTY_CALLBACK_RESPONSE`
104 |
--------------------------------------------------------------------------------
/worker.go:
--------------------------------------------------------------------------------
1 | package rio
2 |
3 | import (
4 | "log"
5 | "time"
6 | )
7 |
8 | // The worker struct, it has all the attributes that is needed by a worker to do its thing
9 | type Worker struct {
10 |
11 | // The name of the worker. It is assigned by the balancer when it is created.
12 | Name string
13 |
14 | // The request channel of the worker. The balancer sends the requests in this channel
15 | requests chan *Request
16 |
17 | // The is the count that tells how many requests are still in buffer for the worker to work on
18 | pending int
19 |
20 | // The index value is used by the priority queue to move it back and forth in the heap
21 | index int
22 |
23 | // Its the copy of the balancer done channel, passed to all the worker
24 | done chan *Worker
25 |
26 | // Its the close channel to close a worker. Its used by the balancer only, hence unexported
27 | closeChannel chan chan bool
28 | }
29 |
30 | // The balancer calls the method to queue a new request to the worker
31 | func (w *Worker) DoWork(request *Request) {
32 | w.requests <- request
33 | }
34 |
35 | // The close method, when called closes a worker
36 | func (w *Worker) Close(cb chan bool) {
37 | w.closeChannel <- cb
38 | }
39 |
40 | // The run method which actually processes the requests. Once a worker is created, this method is also called by the
41 | // balancer
42 | func (w *Worker) Run() {
43 | go func() {
44 | for {
45 | select {
46 | case callback := <-w.closeChannel:
47 | close(w.closeChannel)
48 | close(w.requests)
49 | log.Println("Closing worker : ", w.Name)
50 | callback <- true
51 | return
52 |
53 | case r := <-w.requests:
54 |
55 | // Create a slice of response with equal size of the number of requests
56 | r.Responses = make([]*Response, 0, len(r.Tasks))
57 |
58 | // The initial bridge, which is nil for the first call
59 | var bridgeConnection *BridgeConnection
60 |
61 | // Single request processing channel
62 | ch := make(chan *Response)
63 |
64 | currentTask := r.Tasks[0]
65 | currentTimer := time.NewTimer(currentTask.Timeout)
66 | doTask(ch, currentTask, bridgeConnection)
67 |
68 | w.loop(currentTimer, r, bridgeConnection, currentTask, ch)
69 |
70 | }
71 |
72 | }
73 | }()
74 | }
75 |
76 | // This method handles the individual tasks and its timeout and the request context
77 | func (w *Worker) loop(currentTimer *time.Timer, r *Request, bridgeConnection *BridgeConnection, currentTask *FutureTask, ch chan *Response) {
78 | for {
79 | select {
80 | case <-r.Ctx.Done():
81 | log.Println("Context cancelled")
82 | w.done <- w
83 | r.CompletedChannel <- true
84 | return
85 | case <-currentTimer.C:
86 | log.Println("Timeout")
87 | w.done <- w
88 | r.CompletedChannel <- true
89 | return
90 | case response := <-ch:
91 | currentTimer.Stop()
92 | if len(r.Tasks)-1 == 0 {
93 | if response.Error != nil && currentTask.RetryCount > 0 {
94 | currentTask.RetryCount--
95 | log.Println("Retrying task")
96 | currentTimer = time.NewTimer(currentTask.Timeout)
97 | doTask(ch, currentTask, bridgeConnection)
98 | } else {
99 | r.Responses = append(r.Responses, response)
100 | w.done <- w
101 | r.CompletedChannel <- true
102 | return
103 | }
104 |
105 | } else {
106 | if response.Error != nil && currentTask.RetryCount > 0 {
107 | currentTask.RetryCount--
108 | log.Println("Retrying task")
109 | currentTimer = time.NewTimer(currentTask.Timeout)
110 | doTask(ch, currentTask, bridgeConnection)
111 | } else {
112 | r.Responses = append(r.Responses, response)
113 | r.Tasks = r.Tasks[1:]
114 | bridge := r.Bridges[0]
115 | if len(r.Bridges) > 1 {
116 | r.Bridges = r.Bridges[1:]
117 | }
118 | if bridge == nil {
119 | log.Printf("Cannot access bridge as it is nil, check your bridge configuration")
120 | return
121 | }
122 | if response.Data == nil {
123 | log.Printf("Cannot proceed the chain, the response from the parent call is nil")
124 | return
125 | }
126 | bridgeConnection = bridge(response.Data)
127 |
128 | if bridgeConnection.Error == nil {
129 | currentTask = r.Tasks[0]
130 | currentTimer = time.NewTimer(currentTask.Timeout)
131 | doTask(ch, currentTask, bridgeConnection)
132 | } else {
133 | for i := 0; i < len(r.Tasks); i++ {
134 | r.Responses = append(r.Responses, &Response{
135 | ResponseTime: -1,
136 | ResponseCode: -1,
137 | Data: nil,
138 | Error: bridgeConnection.Error,
139 | })
140 | }
141 | w.done <- w
142 | r.CompletedChannel <- true
143 | return
144 | }
145 | }
146 |
147 | }
148 |
149 | }
150 | }
151 | }
152 |
153 | // This method handles the execution of the actual network call
154 | func doTask(ch chan *Response, task *FutureTask, bridgeConnection *BridgeConnection) {
155 | // The actual network call happens here
156 | go func() {
157 | var futureTaskResponse *FutureTaskResponse
158 | preTime := time.Now()
159 | if task.ReplicaCount > 1 {
160 | replicaChannel := make(chan *FutureTaskResponse)
161 | for i := 0; i < task.RetryCount; i++ {
162 | go func() { replicaChannel <- task.Callback(bridgeConnection) }()
163 | }
164 | futureTaskResponse = <-replicaChannel
165 | close(replicaChannel)
166 | } else {
167 | futureTaskResponse = task.Callback(bridgeConnection)
168 | }
169 |
170 | ch <- &Response{
171 | ResponseTime: time.Since(preTime),
172 | ResponseCode: futureTaskResponse.ResponseCode,
173 | Data: futureTaskResponse.Data,
174 | Error: futureTaskResponse.Error,
175 | }
176 | }()
177 | }
178 |
--------------------------------------------------------------------------------
/types.go:
--------------------------------------------------------------------------------
1 | package rio
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "log"
8 | "time"
9 | )
10 |
11 | // This is a response which will be available in future from a FutureTask
12 | type FutureTaskResponse struct {
13 | ResponseCode int
14 | Data interface{}
15 | Error error
16 | }
17 |
18 | // During callback chaining, ue this to setup the callbacks, see the example
19 | var EMPTY_ARG_PLACEHOLDER = ""
20 |
21 | // Use this to send an empty response when some callback is failed
22 | var EMPTY_CALLBACK_RESPONSE = &FutureTaskResponse{
23 | ResponseCode: -1,
24 | Data: nil,
25 | Error: errors.New("The callback didn't run due to argument unavailability"),
26 | }
27 |
28 | // This is task which will be executed in future
29 | type FutureTask struct {
30 | Name string
31 | Callback Callback
32 | Timeout time.Duration
33 | RetryCount int
34 | ReplicaCount int
35 | }
36 |
37 | // Its how two callbacks communicate with each other, this is a function which knows how to convert
38 | // one callback response to the next
39 | type Bridge func(interface{}) *BridgeConnection
40 |
41 | // Its the type that will be used by the consumers to create the service closures
42 | type Callback func(*BridgeConnection) *FutureTaskResponse
43 |
44 | // Its the data that is filled with the bridge data
45 | type BridgeConnection struct {
46 | Data []interface{}
47 | Error error
48 | }
49 |
50 | // Request is the one that is sent to the *balancer* to be used to call concurrently
51 | type Request struct {
52 | Tasks []*FutureTask
53 | Bridges []Bridge
54 | Responses []*Response
55 | CompletedChannel chan bool
56 | Ctx context.Context
57 | }
58 |
59 | // Response is the one that is sent to the graphql layer to be sent to the caller
60 | type Response struct {
61 | ResponseTime time.Duration
62 | ResponseCode int
63 | Data interface{}
64 | Error error
65 | }
66 |
67 | // GetResponse method gives the response from the request, based on index, use this method, when there are multiple
68 | // tasks sent to the balancer to be processed. When the execution is done, the corresponding response for the queued
69 | // task is available, the same succession.
70 | func (r *Request) GetResponse(index int) (*Response, error) {
71 | if r.Responses != nil && len(r.Responses) > 0 {
72 | if index > len(r.Responses)-1 {
73 | return nil, errors.New(fmt.Sprintf("No response available at index position : %d", index))
74 | } else {
75 | return r.Responses[index], nil
76 | }
77 | } else {
78 | return nil, errors.New("No response obtained from the process, the response slice is empty.")
79 | }
80 | }
81 |
82 | // GetOnlyResponse is used to get the one and only response from the request object, use this when there is only 1 task
83 | func (r *Request) GetOnlyResponse() (*Response, error) {
84 | if len(r.Responses) > 0 {
85 | return r.Responses[0], nil
86 | } else {
87 | return nil, errors.New("No response obtained from the process, the response slice is empty.")
88 | }
89 | }
90 |
91 | // Use this method to create a new task. It takes a callback in the form of a closure.
92 | func NewFutureTask(callback Callback) *FutureTask {
93 | return &FutureTask{Callback: callback}
94 | }
95 |
96 | // Use this method to create a new task. It takes a callback in the form of a closure.
97 | func NewNamedFutureTask(name string, callback Callback) *FutureTask {
98 | return &FutureTask{Callback: callback, Name: name}
99 | }
100 |
101 | // Add timeout for the task in the form of milliseconds
102 | func (f *FutureTask) WithMilliSecondTimeout(t int) *FutureTask {
103 | f.Timeout = time.Duration(t) * time.Millisecond
104 | return f
105 | }
106 |
107 | // Add timeout for the task in the form of seconds
108 | func (f *FutureTask) WithSecondTimeout(t int) *FutureTask {
109 | f.Timeout = time.Duration(t) * time.Second
110 | return f
111 | }
112 |
113 | // Add retry count to the task. If the task fails, it will be retried this many times. The failure information, comes
114 | // from the task itself.
115 | func (f *FutureTask) WithRetry(c int) *FutureTask {
116 | f.RetryCount = c
117 | return f
118 | }
119 |
120 | // Add replica calls. Use this when there is a possibility to get different response time from a service for successive
121 | // calls and only the fastest one is needed. The worker will use call the service concurrently, this many times and only
122 | // the fastest will be picked.
123 | func (f *FutureTask) WithReplica(c int) *FutureTask {
124 | f.ReplicaCount = c
125 | return f
126 | }
127 |
128 | // Use this method to build a request instance, which is sent on the balancer to be processed. Use this variant when
129 | // there is a job chaining required and multiple tasks are involved, one after another.
130 | func BuildRequests(context context.Context, task *FutureTask) *Request {
131 | tasks := make([]*FutureTask, 0, 1)
132 | tasks = append(tasks, task)
133 | return &Request{Ctx: context, Tasks: tasks, CompletedChannel: make(chan bool)}
134 | }
135 |
136 | // This method validates the posted job/request to the balancer. If validation fails, balancer sends the error to the
137 | // calling goroutine immediately, otherwise sends the request to the workers.
138 | func (r Request) Validate() error {
139 | if r.CompletedChannel == nil {
140 | return errors.New("The request CompletedChannel is nil")
141 | }
142 | if r.Tasks == nil || len(r.Tasks) == 0 {
143 | return errors.New("please provide some tasks to process, the task list is empty")
144 | }
145 | if length := len(r.Tasks); length > 1 && length != len(r.Bridges)+1 {
146 | return errors.New("for a followed by construct, there should be n requests and (n-1) bridges")
147 | }
148 | if len(r.Tasks) > 1 && len(r.Bridges) != len(r.Tasks)-1 {
149 | log.Println("If you are specifying multiple tasks, n, then the you must provide (n-1) bridges")
150 | return errors.New(fmt.Sprintf("Provided task count : %d, bridge count : %d. Expected "+
151 | "bridge count : %d\n", len(r.Tasks), len(r.Bridges), len(r.Tasks)-1))
152 | }
153 | return nil
154 | }
155 |
156 | // This construct is used to create task chaining. If task2 depends on task1 in terms of data and the execution is to
157 | // happen like task1-->task2, then use this method to chain them together by means of a Bridge type
158 | func (r *Request) FollowedBy(bridge Bridge, task *FutureTask) *Request {
159 | if bridge == nil || task == nil {
160 | log.Println("Error : Please provide the bridges and tasks properly")
161 | return nil
162 | }
163 | if r.Bridges == nil {
164 | r.Bridges = make([]Bridge, 0, 1)
165 | }
166 | r.Bridges = append(r.Bridges, bridge)
167 | r.Tasks = append(r.Tasks, task)
168 | return r
169 | }
170 |
--------------------------------------------------------------------------------
/balancer_test.go:
--------------------------------------------------------------------------------
1 | package rio
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "runtime"
8 | "testing"
9 | "time"
10 | )
11 |
12 | func BenchmarkBalancerSingleTask(b *testing.B) {
13 |
14 | balancer := GetBalancer(5, 3)
15 | for i := 0; i < b.N; i++ {
16 |
17 | var tasks = make([]*FutureTask, 1)
18 | tasks[0] = &FutureTask{Callback: Task3, Timeout: time.Duration(1) * time.Second, RetryCount: 0}
19 |
20 | completeChannel := make(chan bool)
21 |
22 | ctx := context.Background()
23 |
24 | request := &Request{
25 | Tasks: tasks,
26 | Bridges: nil,
27 | Responses: nil,
28 | CompletedChannel: completeChannel,
29 | Ctx: ctx,
30 | }
31 |
32 | balancer.PostJob(request)
33 |
34 | <-request.CompletedChannel
35 |
36 | fmt.Println(request.Responses[0])
37 |
38 | }
39 | closeChannel := make(chan bool)
40 | balancer.Close(closeChannel)
41 | <-closeChannel
42 |
43 | }
44 |
45 | func BenchmarkMultipleChainedTask(b *testing.B) {
46 | balancer := GetBalancer(10, 2)
47 | for i := 0; i < b.N; i++ {
48 |
49 | var tasks = make([]*FutureTask, 4)
50 | tasks[0] = &FutureTask{Callback: Task1, Timeout: time.Duration(100) * time.Second, RetryCount: 2}
51 | tasks[1] = &FutureTask{Callback: Task2, Timeout: time.Duration(100) * time.Second, RetryCount: 0}
52 | tasks[2] = &FutureTask{Callback: Task3, Timeout: time.Duration(100) * time.Second, RetryCount: 1}
53 | tasks[3] = &FutureTask{Callback: Task4, Timeout: time.Duration(100) * time.Second}
54 |
55 | var bridges = make([]Bridge, 3)
56 | bridges[0] = Bridge1
57 | bridges[1] = Bridge2
58 | bridges[2] = Bridge3
59 |
60 | completeChannel := make(chan bool)
61 |
62 | ctx := context.Background()
63 |
64 | request := &Request{
65 | Tasks: tasks,
66 | Bridges: bridges,
67 | Responses: nil,
68 | CompletedChannel: completeChannel,
69 | Ctx: ctx,
70 | }
71 |
72 | balancer.PostJob(request)
73 |
74 | <-request.CompletedChannel
75 |
76 | fmt.Println(request.Responses[0], request.Responses[1], request.Responses[2], request.Responses[3])
77 |
78 | if request.Responses[0].Data.(string) != "Response 1" ||
79 | request.Responses[1].Data.(string) != "Response 2" ||
80 | request.Responses[2].Data.(string) != "Response 3" ||
81 | request.Responses[3].Data.(string) != "Response 4" {
82 |
83 | }
84 | }
85 | }
86 |
87 | func TestWithSingleTaskWithRetry(t *testing.T) {
88 | balancer := GetBalancer(1, 1)
89 |
90 | var tasks = make([]*FutureTask, 1)
91 | tasks[0] = &FutureTask{Callback: Task1, Timeout: time.Duration(100) * time.Second, RetryCount: 2}
92 |
93 | var bridges = make([]Bridge, 1)
94 | bridges[0] = Bridge4
95 |
96 | completeChannel := make(chan bool)
97 |
98 | ctx := context.Background()
99 |
100 | request := &Request{
101 | Tasks: tasks,
102 | Bridges: bridges,
103 | Responses: nil,
104 | CompletedChannel: completeChannel,
105 | Ctx: ctx,
106 | }
107 |
108 | balancer.PostJob(request)
109 |
110 | <-request.CompletedChannel
111 |
112 | fmt.Println(request.Responses[0])
113 |
114 | fmt.Println("Goroutines count : ", runtime.NumGoroutine())
115 |
116 | closeChannel := make(chan bool)
117 | balancer.Close(closeChannel)
118 | <-closeChannel
119 |
120 | fmt.Println("Goroutines count : ", runtime.NumGoroutine())
121 |
122 | }
123 |
124 | func TestWithSingleTaskWithTimeout(t *testing.T) {
125 |
126 | balancer := GetBalancer(1, 1)
127 |
128 | var tasks = make([]*FutureTask, 1)
129 | tasks[0] = &FutureTask{Callback: Task7, Timeout: time.Duration(2) * time.Second, RetryCount: 2}
130 |
131 | completeChannel := make(chan bool)
132 |
133 | ctx := context.Background()
134 |
135 | request := &Request{
136 | Tasks: tasks,
137 | Bridges: nil,
138 | Responses: nil,
139 | CompletedChannel: completeChannel,
140 | Ctx: ctx,
141 | }
142 |
143 | balancer.PostJob(request)
144 |
145 | <-request.CompletedChannel
146 |
147 | _, err := request.GetResponse(0)
148 |
149 | if err == nil {
150 | t.Fail()
151 | }
152 |
153 | closeChannel := make(chan bool)
154 | balancer.Close(closeChannel)
155 | <-closeChannel
156 |
157 | }
158 |
159 | func TestWithSingleTaskWithContextCancel(t *testing.T) {
160 | balancer := GetBalancer(1, 1)
161 |
162 | var tasks = make([]*FutureTask, 1)
163 | tasks[0] = &FutureTask{Callback: Task7, Timeout: time.Duration(20) * time.Second, RetryCount: 2}
164 |
165 | completeChannel := make(chan bool)
166 |
167 | ctx, cancel := context.WithCancel(context.Background())
168 |
169 | request := &Request{
170 | Tasks: tasks,
171 | Bridges: nil,
172 | Responses: nil,
173 | CompletedChannel: completeChannel,
174 | Ctx: ctx,
175 | }
176 |
177 | balancer.PostJob(request)
178 |
179 | go func() {
180 | time.Sleep(time.Duration(4) * time.Second)
181 | cancel()
182 | }()
183 |
184 | <-request.CompletedChannel
185 |
186 | _, err := request.GetResponse(0)
187 |
188 | if err == nil {
189 | t.Fail()
190 | }
191 |
192 | closeChannel := make(chan bool)
193 | balancer.Close(closeChannel)
194 | <-closeChannel
195 |
196 | }
197 |
198 | func TestWithMultipleChainedTasks(t *testing.T) {
199 |
200 | balancer := GetBalancer(10, 2)
201 |
202 | var tasks = make([]*FutureTask, 4)
203 | tasks[0] = &FutureTask{Callback: Task1, Timeout: time.Duration(100) * time.Second, RetryCount: 2}
204 | tasks[1] = &FutureTask{Callback: Task2, Timeout: time.Duration(100) * time.Second, RetryCount: 0}
205 | tasks[2] = &FutureTask{Callback: Task3, Timeout: time.Duration(100) * time.Second, RetryCount: 1}
206 | tasks[3] = &FutureTask{Callback: Task4, Timeout: time.Duration(100) * time.Second}
207 |
208 | var bridges = make([]Bridge, 3)
209 | bridges[0] = Bridge1
210 | bridges[1] = Bridge2
211 | bridges[2] = Bridge3
212 |
213 | completeChannel := make(chan bool)
214 |
215 | ctx := context.Background()
216 |
217 | request := &Request{
218 | Tasks: tasks,
219 | Bridges: bridges,
220 | Responses: nil,
221 | CompletedChannel: completeChannel,
222 | Ctx: ctx,
223 | }
224 |
225 | balancer.PostJob(request)
226 |
227 | <-request.CompletedChannel
228 |
229 | fmt.Println(request.Responses[0], request.Responses[1], request.Responses[2], request.Responses[3])
230 |
231 | if request.Responses[0].Data.(string) != "Response 1" ||
232 | request.Responses[1].Data.(string) != "Response 2" ||
233 | request.Responses[2].Data.(string) != "Response 3" ||
234 | request.Responses[3].Data.(string) != "Response 4" {
235 | t.Fail()
236 |
237 | }
238 |
239 | closeChannel := make(chan bool)
240 | balancer.Close(closeChannel)
241 | <-closeChannel
242 |
243 | }
244 |
245 | func TestWithMultipleChainedTasksWithThirdTaskTimedOut(t *testing.T) {
246 |
247 | balancer := GetBalancer(10, 2)
248 |
249 | var tasks = make([]*FutureTask, 4)
250 | tasks[0] = &FutureTask{Callback: Task1, Timeout: time.Duration(100) * time.Second, RetryCount: 2}
251 | tasks[1] = &FutureTask{Callback: Task2, Timeout: time.Duration(100) * time.Second, RetryCount: 0}
252 | tasks[2] = &FutureTask{Callback: Task7, Timeout: time.Duration(3) * time.Second, RetryCount: 1}
253 | tasks[3] = &FutureTask{Callback: Task4, Timeout: time.Duration(100) * time.Second}
254 |
255 | var bridges = make([]Bridge, 3)
256 | bridges[0] = Bridge1
257 | bridges[1] = Bridge2
258 | bridges[2] = Bridge3
259 |
260 | completeChannel := make(chan bool)
261 |
262 | ctx := context.Background()
263 |
264 | request := &Request{
265 | Tasks: tasks,
266 | Bridges: bridges,
267 | Responses: nil,
268 | CompletedChannel: completeChannel,
269 | Ctx: ctx,
270 | }
271 |
272 | balancer.PostJob(request)
273 |
274 | <-request.CompletedChannel
275 |
276 | fmt.Println(request.Responses[0], request.Responses[1])
277 |
278 | _, err := request.GetResponse(2)
279 |
280 | if err == nil {
281 | t.Fail()
282 | }
283 |
284 | closeChannel := make(chan bool)
285 | balancer.Close(closeChannel)
286 | <-closeChannel
287 |
288 | }
289 |
290 | func TestWithMultipleChainedTaskAndBridgeData(t *testing.T) {
291 | balancer := GetBalancer(1, 1)
292 |
293 | var tasks = make([]*FutureTask, 3)
294 | tasks[0] = &FutureTask{Callback: Task1, Timeout: time.Duration(100) * time.Second, RetryCount: 2}
295 | tasks[1] = &FutureTask{Callback: Task5, Timeout: time.Duration(100) * time.Second, RetryCount: 0}
296 | tasks[2] = &FutureTask{Callback: Task6, Timeout: time.Duration(100) * time.Second, RetryCount: 0}
297 |
298 | var bridges = make([]Bridge, 2)
299 | bridges[0] = Bridge4
300 | bridges[1] = Bridge5
301 |
302 | completeChannel := make(chan bool)
303 |
304 | ctx := context.Background()
305 |
306 | request := &Request{
307 | Tasks: tasks,
308 | Bridges: bridges,
309 | Responses: nil,
310 | CompletedChannel: completeChannel,
311 | Ctx: ctx,
312 | }
313 |
314 | balancer.PostJob(request)
315 |
316 | <-request.CompletedChannel
317 |
318 | r1, _ := request.GetResponse(0)
319 | r2, _ := request.GetResponse(1)
320 | r3, _ := request.GetResponse(2)
321 |
322 | if r1.Data.(string) != "Response 1" ||
323 | len(r2.Data.([]interface{})) != 3 ||
324 | len(r3.Data.([]interface{})) != 2 {
325 | t.Fail()
326 | }
327 |
328 | closeChannel := make(chan bool)
329 | balancer.Close(closeChannel)
330 | <-closeChannel
331 |
332 | }
333 |
334 | func TestWithMultipleChainedTaskAndBridgeDataFromDifferentGoroutines(t *testing.T) {
335 | balancer := GetBalancer(10, 100)
336 |
337 | for i := 0; i < 100; i++ {
338 | go func() {
339 | for j := 0; j < 100; j++ {
340 | var tasks = make([]*FutureTask, 3)
341 | tasks[0] = &FutureTask{Callback: Task1, Timeout: time.Duration(100) * time.Second, RetryCount: 2}
342 | tasks[1] = &FutureTask{Callback: Task5, Timeout: time.Duration(100) * time.Second, RetryCount: 0}
343 | tasks[2] = &FutureTask{Callback: Task6, Timeout: time.Duration(100) * time.Second, RetryCount: 0}
344 |
345 | var bridges = make([]Bridge, 2)
346 | bridges[0] = Bridge4
347 | bridges[1] = Bridge5
348 |
349 | completeChannel := make(chan bool)
350 |
351 | ctx := context.Background()
352 |
353 | request := &Request{
354 | Tasks: tasks,
355 | Bridges: bridges,
356 | Responses: nil,
357 | CompletedChannel: completeChannel,
358 | Ctx: ctx,
359 | }
360 |
361 | balancer.PostJob(request)
362 |
363 | <-request.CompletedChannel
364 |
365 | r1, _ := request.GetResponse(0)
366 | r2, _ := request.GetResponse(1)
367 | r3, _ := request.GetResponse(2)
368 |
369 | if r1.Data.(string) != "Response 1" ||
370 | len(r2.Data.([]interface{})) != 3 ||
371 | len(r3.Data.([]interface{})) != 2 {
372 | t.Fail()
373 | }
374 | }
375 | }()
376 | }
377 |
378 | closeChannel := make(chan bool)
379 | balancer.Close(closeChannel)
380 | <-closeChannel
381 |
382 | }
383 |
384 | func TestWithMultipleChainedTaskAndBridgeDataSecondCallFailed(t *testing.T) {
385 | balancer := GetBalancer(1, 1)
386 |
387 | var tasks = make([]*FutureTask, 4)
388 | tasks[0] = &FutureTask{Callback: Task1, Timeout: time.Duration(100) * time.Second, RetryCount: 2}
389 | tasks[1] = &FutureTask{Callback: Task5, Timeout: time.Duration(100) * time.Second, RetryCount: 0}
390 | tasks[2] = &FutureTask{Callback: Task6, Timeout: time.Duration(100) * time.Second, RetryCount: 0}
391 | tasks[3] = &FutureTask{Callback: Task7, Timeout: time.Duration(100) * time.Second, RetryCount: 0}
392 |
393 | var bridges = make([]Bridge, 3)
394 | bridges[0] = Bridge6
395 | bridges[1] = Bridge7
396 | bridges[2] = Bridge8
397 |
398 | completeChannel := make(chan bool)
399 |
400 | context := context.Background()
401 |
402 | request := &Request{
403 | Tasks: tasks,
404 | Bridges: bridges,
405 | Responses: nil,
406 | CompletedChannel: completeChannel,
407 | Ctx: context,
408 | }
409 |
410 | balancer.PostJob(request)
411 |
412 | <-request.CompletedChannel
413 |
414 | r1, _ := request.GetResponse(0)
415 | r2, _ := request.GetResponse(1)
416 | r3, _ := request.GetResponse(2)
417 | r4, _ := request.GetResponse(3)
418 |
419 | fmt.Println(r1, r2, r3, r4)
420 |
421 | if r3.Error.Error() != "Test Error" || r4.Error.Error() != "Test Error" {
422 | t.Fail()
423 | }
424 |
425 | closeChannel := make(chan bool)
426 | balancer.Close(closeChannel)
427 | <-closeChannel
428 |
429 | }
430 |
431 | // Negative Test Cases
432 | func TestWithInsufficientBridges(t *testing.T) {
433 | balancer := GetBalancer(10, 2)
434 |
435 | var tasks = make([]*FutureTask, 4)
436 | tasks[0] = &FutureTask{Callback: Task1, Timeout: time.Duration(100) * time.Second, RetryCount: 2}
437 | tasks[1] = &FutureTask{Callback: Task2, Timeout: time.Duration(100) * time.Second, RetryCount: 0}
438 | tasks[2] = &FutureTask{Callback: Task3, Timeout: time.Duration(100) * time.Second, RetryCount: 1}
439 | tasks[3] = &FutureTask{Callback: Task4, Timeout: time.Duration(100) * time.Second}
440 |
441 | var bridges = make([]Bridge, 2)
442 | bridges[0] = Bridge1
443 | bridges[1] = Bridge2
444 |
445 | completeChannel := make(chan bool)
446 |
447 | ctx := context.Background()
448 |
449 | request := &Request{
450 | Tasks: tasks,
451 | Bridges: bridges,
452 | Responses: nil,
453 | CompletedChannel: completeChannel,
454 | Ctx: ctx,
455 | }
456 |
457 | err := balancer.PostJob(request)
458 | fmt.Println(err.Error())
459 |
460 | if err == nil {
461 | t.Fail()
462 | }
463 |
464 | closeChannel := make(chan bool)
465 | balancer.Close(closeChannel)
466 | <-closeChannel
467 |
468 | }
469 |
470 | func TestWithEmptyTasks(t *testing.T) {
471 | balancer := GetBalancer(1, 2)
472 |
473 | completeChannel := make(chan bool)
474 | request := &Request{
475 | Tasks: []*FutureTask{},
476 | Bridges: nil,
477 | Responses: nil,
478 | CompletedChannel: completeChannel,
479 | Ctx: nil,
480 | }
481 |
482 | err := balancer.PostJob(request)
483 | fmt.Println(err.Error())
484 |
485 | if err == nil {
486 | t.Fail()
487 | }
488 |
489 | closeChannel := make(chan bool)
490 | balancer.Close(closeChannel)
491 | <-closeChannel
492 |
493 | }
494 |
495 | func TestWithEmptyCompletedChannel(t *testing.T) {
496 | balancer := GetBalancer(1, 2)
497 |
498 | request := &Request{
499 | Tasks: []*FutureTask{},
500 | Bridges: nil,
501 | Responses: nil,
502 | CompletedChannel: nil,
503 | Ctx: nil,
504 | }
505 |
506 | err := balancer.PostJob(request)
507 | fmt.Println(err.Error())
508 |
509 | if err == nil {
510 | t.Fail()
511 | }
512 |
513 | closeChannel := make(chan bool)
514 | balancer.Close(closeChannel)
515 | <-closeChannel
516 |
517 | }
518 |
519 | func Bridge1(interface{}) *BridgeConnection {
520 | return &BridgeConnection{}
521 | }
522 |
523 | func Bridge2(interface{}) *BridgeConnection {
524 | return &BridgeConnection{}
525 | }
526 |
527 | func Bridge3(interface{}) *BridgeConnection {
528 | return &BridgeConnection{}
529 | }
530 |
531 | func Bridge4(interface{}) *BridgeConnection {
532 | return &BridgeConnection{
533 | Data: []interface{}{"1", 2, 3.0},
534 | Error: nil,
535 | }
536 | }
537 |
538 | func Bridge5(interface{}) *BridgeConnection {
539 | return &BridgeConnection{
540 | Data: []interface{}{"1", "2"},
541 | Error: nil,
542 | }
543 | }
544 |
545 | func Bridge6(interface{}) *BridgeConnection {
546 | return &BridgeConnection{
547 | Data: []interface{}{"1", 2, 3.0},
548 | Error: nil,
549 | }
550 | }
551 |
552 | func Bridge7(interface{}) *BridgeConnection {
553 | return &BridgeConnection{
554 | Data: []interface{}{"1", "2"},
555 | Error: errors.New("Test Error"),
556 | }
557 | }
558 |
559 | func Bridge8(interface{}) *BridgeConnection {
560 | return &BridgeConnection{
561 | Data: []interface{}{"1", "2"},
562 | Error: nil,
563 | }
564 | }
565 |
566 | func Task1(*BridgeConnection) *FutureTaskResponse {
567 | fmt.Print("Task 1-->")
568 | return &FutureTaskResponse{
569 | ResponseCode: 404,
570 | Data: "Response 1",
571 | Error: errors.New(""),
572 | }
573 | }
574 |
575 | func Task2(*BridgeConnection) *FutureTaskResponse {
576 |
577 | fmt.Println("Task 2-->")
578 | return &FutureTaskResponse{
579 | ResponseCode: 500,
580 | Data: "Response 2",
581 | Error: nil,
582 | }
583 | }
584 |
585 | func Task3(*BridgeConnection) *FutureTaskResponse {
586 | fmt.Print("Task 3-->")
587 | return &FutureTaskResponse{
588 | ResponseCode: 200,
589 | Data: "Response 3",
590 | Error: errors.New(""),
591 | }
592 | }
593 |
594 | func Task4(*BridgeConnection) *FutureTaskResponse {
595 | fmt.Println("Task 4-->")
596 | return &FutureTaskResponse{
597 | ResponseCode: 404,
598 | Data: "Response 4",
599 | Error: nil,
600 | }
601 | }
602 |
603 | func Task5(bconn *BridgeConnection) *FutureTaskResponse {
604 | fmt.Print("Task 5-->")
605 | d1 := bconn.Data[0].(string)
606 | d2 := bconn.Data[1].(int)
607 | d3 := bconn.Data[2].(float64)
608 | return &FutureTaskResponse{
609 | ResponseCode: 200,
610 | Data: []interface{}{d1, d2, d3},
611 | Error: nil,
612 | }
613 | }
614 |
615 | func Task6(bconn *BridgeConnection) *FutureTaskResponse {
616 | fmt.Println("Task 6-->")
617 | d1 := bconn.Data[0].(string)
618 | d2 := bconn.Data[1].(string)
619 | return &FutureTaskResponse{
620 | ResponseCode: 200,
621 | Data: []interface{}{d1, d2},
622 | Error: nil,
623 | }
624 | }
625 |
626 | func Task7(*BridgeConnection) *FutureTaskResponse {
627 | fmt.Print("Task 7-->")
628 | time.Sleep(time.Duration(5) * time.Second)
629 | return &FutureTaskResponse{
630 | ResponseCode: 404,
631 | Data: "Response 7",
632 | Error: errors.New(""),
633 | }
634 | }
635 |
--------------------------------------------------------------------------------